├── Advancing_EdgeAI ├── 1-slm-limitation-tst.py ├── 10-Create-Persistent-Vector-Database.py ├── 2-simple_agent.py ├── 20-Query-the-Persistent-RAG-Database.py ├── 25-optimized_RAG_query.py ├── 3-ollama-calculator-agent.py ├── 30-advanced_agentic_rag.py ├── 4-ollama-search-agent.py └── 5-validate_response.py ├── FLORENCE-2 ├── docs │ └── Florence2-paper.pdf ├── images │ ├── cover.jpg │ ├── dogs-cats.jpg │ ├── flyer.png │ └── table.jpg └── notebooks │ ├── 10-florence2_test.ipynb │ ├── 20-florence_2.ipynb │ └── 30-Finetune_florence_2_on_detection_dataset_box_vs_wheel.ipynb ├── IMG_CLASS ├── dataset │ ├── background │ │ ├── background.jpg │ │ ├── image_20240826-213556.jpg │ │ ├── image_20240826-213557.jpg │ │ ├── image_20240826-213558.jpg │ │ ├── image_20240826-213559.jpg │ │ ├── image_20240826-213600.jpg │ │ ├── image_20240826-213601.jpg │ │ ├── image_20240826-213602.jpg │ │ ├── image_20240826-213603.jpg │ │ ├── image_20240826-213604.jpg │ │ ├── image_20240826-213605.jpg │ │ ├── image_20240826-213606.jpg │ │ ├── image_20240826-213607.jpg │ │ ├── image_20240826-213608.jpg │ │ ├── image_20240826-213609.jpg │ │ ├── image_20240826-213610.jpg │ │ ├── image_20240826-213611.jpg │ │ ├── image_20240826-213612.jpg │ │ ├── image_20240826-213613.jpg │ │ └── image_20240826-213614.jpg │ ├── periquito │ │ ├── image_20240826-201300.jpg │ │ ├── image_20240826-201303.jpg │ │ ├── image_20240826-201305.jpg │ │ ├── image_20240826-201308.jpg │ │ ├── image_20240826-201311.jpg │ │ ├── image_20240826-201318.jpg │ │ ├── image_20240826-201954.jpg │ │ ├── image_20240826-201955.jpg │ │ ├── image_20240826-201956.jpg │ │ ├── image_20240826-203428.jpg │ │ ├── image_20240826-203429.jpg │ │ ├── image_20240826-203430.jpg │ │ ├── image_20240826-203431.jpg │ │ ├── image_20240826-203433.jpg │ │ ├── image_20240826-205245.jpg │ │ ├── image_20240826-205246.jpg │ │ ├── image_20240826-205247.jpg │ │ ├── image_20240826-205249.jpg │ │ ├── image_20240826-205708.jpg │ │ ├── image_20240826-205709.jpg │ │ ├── image_20240826-205711.jpg │ │ ├── image_20240826-205713.jpg │ │ ├── image_20240826-205718.jpg │ │ ├── image_20240826-205719.jpg │ │ ├── image_20240826-205722.jpg │ │ ├── image_20240826-205723.jpg │ │ ├── image_20240826-210724.jpg │ │ ├── image_20240826-210725.jpg │ │ ├── image_20240826-210726.jpg │ │ ├── image_20240826-210727.jpg │ │ ├── image_20240826-210729.jpg │ │ ├── image_20240826-210730.jpg │ │ ├── image_20240826-210731.jpg │ │ └── periquito.jpg │ └── robot │ │ ├── image_20240826-201342.jpg │ │ ├── image_20240826-201344.jpg │ │ ├── image_20240826-201346.jpg │ │ ├── image_20240826-201348.jpg │ │ ├── image_20240826-201350.jpg │ │ ├── image_20240826-201352.jpg │ │ ├── image_20240826-202012.jpg │ │ ├── image_20240826-202016.jpg │ │ ├── image_20240826-202017.jpg │ │ ├── image_20240826-202018.jpg │ │ ├── image_20240826-203449.jpg │ │ ├── image_20240826-203450.jpg │ │ ├── image_20240826-203451.jpg │ │ ├── image_20240826-203452.jpg │ │ ├── image_20240826-205305.jpg │ │ ├── image_20240826-205306.jpg │ │ ├── image_20240826-205307.jpg │ │ ├── image_20240826-205308.jpg │ │ ├── image_20240826-205739.jpg │ │ ├── image_20240826-205740.jpg │ │ ├── image_20240826-205744.jpg │ │ ├── image_20240826-205745.jpg │ │ ├── image_20240826-205747.jpg │ │ ├── image_20240826-205748.jpg │ │ ├── image_20240826-205751.jpg │ │ ├── image_20240826-210642.jpg │ │ ├── image_20240826-210643.jpg │ │ ├── image_20240826-210644.jpg │ │ ├── image_20240826-210647.jpg │ │ ├── image_20240826-210651.jpg │ │ ├── image_20240826-210653.jpg │ │ ├── image_20240826-210655.jpg │ │ ├── image_20240826-210702.jpg │ │ ├── image_20240826-210704.jpg │ │ ├── image_20240826-210707.jpg │ │ └── robot.jpg ├── images │ ├── Cat03.jpg │ ├── background.jpg │ ├── car_1.jpg │ ├── car_2.jpg │ ├── car_3.jpg │ ├── car_4.jpg │ ├── car_5.jpg │ ├── cat_1.jpg │ ├── cat_2.jpg │ ├── cat_2.png │ ├── dog_1.jpg │ ├── dog_2.jpg │ ├── dog_3.jpg │ ├── periquito.jpg │ ├── periquito_2.jpg │ ├── robot.jpg │ ├── ship_1.jpg │ ├── ship_2.jpg │ └── teste_img.jpg ├── models │ ├── cifar10.tflite │ ├── ei-periquito-vs-robot-img-class-int8-quantized-model.lite │ ├── ei-raspi-img-class-float32-model.tflite │ ├── ei-raspi-img-class-int8-quantized-model.tflite │ ├── labels.txt │ └── mobilenet_v2_1.0_224_quant.tflite ├── notebooks │ ├── 10_Image_Classification.ipynb │ ├── 20_Cifar_10_Image_Classification.ipynb │ ├── CNN_Cifar_10_TFLite.ipynb │ └── setup_test.ipynb └── python_scripts │ ├── capture_and_serve.py │ ├── capture_image.py │ ├── get_img_data.py │ ├── img_class_live_infer.py │ └── setup_test.py ├── KD-Knowledge_Destilation ├── KD-From MNIST_2_LLMs.pdf ├── KD_Knowledge_Destilation_with_MNIST.ipynb └── kd_mnist.py ├── LICENSE ├── OBJ_DETEC ├── images │ ├── beagles.jpg │ ├── beatch.jpg │ ├── cat_dog.jpeg │ ├── cats_dogs.jpg │ ├── home-office.jpg │ ├── man_cat_dog.jpg │ ├── office.jpeg │ └── ship_2.jpg ├── models │ ├── box_wheel_320_yolo.pt │ ├── coco_labels.txt │ ├── ei-raspi-object-detection-FOMO-160x160-int8.lite │ ├── ei-raspi-object-detection-SSD-MobileNetv2-320x0320-int8.lite │ ├── lite-model_efficientdet_lite0_detection_metadata_1.tflite │ ├── raspi-object-detection-linux-aarch64-FOMO-int8.eim │ ├── ssd-mobilenet-v1-tflite-default-v1.tar.gz │ └── ssd-mobilenet-v1-tflite-default-v1.tflite ├── notebooks │ ├── EI-Linux-FOMO.ipynb │ ├── EI-SSD-MobileNetV2.ipynb │ ├── SSD_EfficientDet.ipynb │ ├── SSD_MobileNetV1.ipynb │ └── yolov8_box_vs_wheel.ipynb └── python_scripts │ ├── get_img_data.py │ └── object_detection_app.py ├── OLLAMA_SLMs ├── 10-Ollama_Python_Library.ipynb ├── 20-Ollama_Function_Calling.ipynb ├── 30-Function_Calling_with_images.ipynb ├── 40-RAG-simple-bee.ipynb ├── calc_distance_image.py ├── image _test_2.jpg ├── image_test_1.jpg └── image_test_3.jpg ├── PHYSICAL_COMPUTING ├── GPIOS │ ├── button_test.py │ ├── check_pins.py │ ├── led_test.py │ └── led_test_2.py ├── Notebooks │ ├── Physical_Computing_Raspi.ipynb │ ├── SLM_Raspi_Commands.ipynb │ ├── SLM_reaction_test.ipynb │ ├── SLM_test.ipynb │ └── notebook_test.ipynb └── Sensors │ ├── blinka_test.py │ ├── bmp280_test.py │ └── dht_test.py ├── README.md └── SLMs_for_IoT_CONTROL ├── Raspi-Physical-Computing.fzz ├── SLM_IOT_CONTROL.pdf ├── monitor.py ├── monitor_log.py ├── slm_basic_analysis.py ├── slm_basic_analysis_action.py ├── slm_basic_interaction.py ├── slm_basic_interaction_log.py └── system_log.csv /Advancing_EdgeAI/1-slm-limitation-tst.py: -------------------------------------------------------------------------------- 1 | # Example: Knowledge limitation demonstration 2 | import ollama 3 | 4 | response = ollama.generate( 5 | model="llama3.2:3b", 6 | prompt="Multiply 123456 by 123456" 7 | ) 8 | print(response['response']) 9 | # Output will likely show a wrong result -------------------------------------------------------------------------------- /Advancing_EdgeAI/10-Create-Persistent-Vector-Database.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # 10 Create Persistent Vector Database for RAG 5 | # - Edge AI 6 | 7 | 8 | import warnings 9 | import os 10 | from langchain.text_splitter import RecursiveCharacterTextSplitter 11 | from langchain_community.document_loaders import WebBaseLoader 12 | from langchain_community.document_loaders import PyPDFLoader 13 | from langchain_ollama import OllamaEmbeddings 14 | from langchain_community.vectorstores import Chroma 15 | 16 | # Suppress LangSmith warnings 17 | warnings.filterwarnings("ignore", 18 | message="API key must be provided when using hosted LangSmith API", 19 | category=UserWarning) 20 | 21 | 22 | # Vector Database 23 | 24 | # Define persistent directory for Chroma 25 | PERSIST_DIRECTORY = "chroma_db" 26 | 27 | 28 | # PDF documents to include 29 | pdf_paths = ["./data/2025_Edge_AI_Technology_Report.pdf"] 30 | 31 | 32 | # Define URLs for document sources 33 | urls = [ 34 | "https://mjrovai.github.io/EdgeML_Made_Ease_ebook/raspi/object_detection/object_detection.html", 35 | "https://mjrovai.github.io/EdgeML_Made_Ease_ebook/raspi/image_classification/image_classification.html", 36 | "https://mjrovai.github.io/EdgeML_Made_Ease_ebook/raspi/setup/setup.html", 37 | "https://mjrovai.github.io/EdgeML_Made_Ease_ebook/raspi/counting_objects_yolo/counting_objects_yolo.html", 38 | "https://mjrovai.github.io/EdgeML_Made_Ease_ebook/raspi/llm/llm.html", 39 | "https://mjrovai.github.io/EdgeML_Made_Ease_ebook/raspi/vlm/vlm.html", 40 | "https://mjrovai.github.io/EdgeML_Made_Ease_ebook/raspi/physical_comp/RPi_Physical_Computing.html", 41 | "https://mjrovai.github.io/EdgeML_Made_Ease_ebook/raspi/iot/slm_iot.html", 42 | ] 43 | 44 | 45 | def create_vectorstore(): 46 | """Create the vector store with document data and persist it to disk""" 47 | print("Creating persistent vector store...") 48 | 49 | # Load documents from PDFs 50 | docs_list = [] 51 | for path in pdf_paths: 52 | if os.path.exists(path): 53 | print(f"Loading PDF: {path}") 54 | loader = PyPDFLoader(path) 55 | docs_list.extend(loader.load()) 56 | else: 57 | print(f"Warning: PDF file {path} not found") 58 | 59 | # Load documents from URLs 60 | print("Loading documents from URLs...") 61 | try: 62 | web_docs = [] 63 | for url in urls: 64 | print(f"Loading URL: {url}") 65 | loader = WebBaseLoader(url) 66 | web_docs.extend(loader.load()) 67 | docs_list.extend(web_docs) 68 | except Exception as e: 69 | print(f"Error loading URL documents: {e}") 70 | 71 | if not docs_list: 72 | print("Error: No documents were loaded. Check file paths and URLs.") 73 | return None 74 | 75 | print(f"Total documents loaded: {len(docs_list)}") 76 | 77 | # Split documents 78 | print("Splitting documents into chunks...") 79 | text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( 80 | chunk_size=300, chunk_overlap=30 81 | ) 82 | doc_splits = text_splitter.split_documents(docs_list) 83 | print(f"Created {len(doc_splits)} document chunks") 84 | 85 | # Create embedding function 86 | print("Initializing embedding model...") 87 | embedding_function = OllamaEmbeddings(model="nomic-embed-text") 88 | 89 | # Create and persist vectorstore to disk 90 | print("Creating vector database...") 91 | vectorstore = Chroma.from_documents( 92 | documents=doc_splits, 93 | collection_name="rag-edgeai-eng-chroma", 94 | embedding=embedding_function, 95 | persist_directory=PERSIST_DIRECTORY 96 | ) 97 | 98 | # Important: persist to disk 99 | vectorstore.persist() 100 | 101 | print(f"Vector store created and saved to {PERSIST_DIRECTORY}") 102 | print(f"Total document chunks indexed: {len(doc_splits)}") 103 | 104 | return vectorstore 105 | 106 | 107 | # Check if database already exists 108 | if os.path.exists(PERSIST_DIRECTORY): 109 | choice = input(f"Database already exists at {PERSIST_DIRECTORY}. Recreate? (y/n): ") 110 | if choice.lower() != 'y': 111 | print("Exiting without changes.") 112 | exit() 113 | 114 | # Create the vector store 115 | create_vectorstore() 116 | print("Database creation complete!") 117 | 118 | 119 | -------------------------------------------------------------------------------- /Advancing_EdgeAI/2-simple_agent.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | 4 | # Configuration 5 | OLLAMA_URL = "http://localhost:11434/api" 6 | MODEL = "llama3.2:3b" # You can change this to any model you have installed 7 | VERBOSE = True 8 | 9 | def ask_ollama_for_classification(user_input): 10 | """ 11 | Ask Ollama to classify whether the query is a multiplication request or a general question. 12 | """ 13 | classification_prompt = f""" 14 | Analyze the following query and determine if it's asking for multiplication or if it's a general question. 15 | 16 | Query: "{user_input}" 17 | 18 | If it's asking for multiplication, respond with a JSON object in this format: 19 | {{ 20 | "type": "multiplication", 21 | "numbers": [number1, number2] 22 | }} 23 | 24 | If it's a general question, respond with a JSON object in this format: 25 | {{ 26 | "type": "general_question" 27 | }} 28 | 29 | Respond ONLY with the JSON object, nothing else. 30 | """ 31 | 32 | try: 33 | if VERBOSE: 34 | print(f"Sending classification request to Ollama") 35 | 36 | response = requests.post( 37 | f"{OLLAMA_URL}/generate", 38 | json={ 39 | "model": MODEL, 40 | "prompt": classification_prompt, 41 | "stream": False 42 | } 43 | ) 44 | 45 | if response.status_code == 200: 46 | response_text = response.json().get("response", "").strip() 47 | if VERBOSE: 48 | print(f"Classification response: {response_text}") 49 | 50 | # Try to parse the JSON response 51 | try: 52 | # Find JSON content if there's any surrounding text 53 | start_index = response_text.find('{') 54 | end_index = response_text.rfind('}') + 1 55 | if start_index >= 0 and end_index > start_index: 56 | json_str = response_text[start_index:end_index] 57 | return json.loads(json_str) 58 | return {"type": "general_question"} 59 | except json.JSONDecodeError: 60 | if VERBOSE: 61 | print(f"Failed to parse JSON: {response_text}") 62 | return {"type": "general_question"} 63 | else: 64 | if VERBOSE: 65 | print(f"Error: Received status code {response.status_code} from Ollama.") 66 | return {"type": "general_question"} 67 | 68 | except Exception as e: 69 | if VERBOSE: 70 | print(f"Error connecting to Ollama: {str(e)}") 71 | return {"type": "general_question"} 72 | 73 | def multiply(a, b): 74 | """ 75 | Perform multiplication and return a formatted response. 76 | """ 77 | result = a * b 78 | return f"The product of {a} and {b} is {result}." 79 | 80 | def ask_ollama(query): 81 | """ 82 | Send a query to Ollama for general question answering. 83 | """ 84 | try: 85 | if VERBOSE: 86 | print(f"Sending query to Ollama") 87 | 88 | response = requests.post( 89 | f"{OLLAMA_URL}/generate", 90 | json={ 91 | "model": MODEL, 92 | "prompt": query, 93 | "stream": False 94 | } 95 | ) 96 | 97 | if response.status_code == 200: 98 | return response.json().get("response", "") 99 | else: 100 | return f"Error: Received status code {response.status_code} from Ollama." 101 | 102 | except Exception as e: 103 | return f"Error connecting to Ollama: {str(e)}" 104 | 105 | def process_query(user_input): 106 | """ 107 | Process the user input by first asking Ollama to classify it, 108 | then either performing multiplication or sending it back as a general question. 109 | """ 110 | # Let Ollama classify the query 111 | classification = ask_ollama_for_classification(user_input) 112 | 113 | if VERBOSE: 114 | print("Ollama classification:", classification) 115 | 116 | if classification.get("type") == "multiplication": 117 | numbers = classification.get("numbers", [0, 0]) 118 | if len(numbers) >= 2: 119 | return multiply(numbers[0], numbers[1]) 120 | else: 121 | return "I understood you wanted multiplication, but couldn't extract the numbers properly." 122 | else: 123 | return ask_ollama(user_input) 124 | 125 | def main(): 126 | """ 127 | Main function to run the agent interactively. 128 | """ 129 | print("Ollama Agent (Type 'exit' to quit)") 130 | print("-----------------------------------") 131 | 132 | while True: 133 | user_input = input("\nYou: ") 134 | 135 | if user_input.lower() in ["exit", "quit", "bye"]: 136 | print("Goodbye!") 137 | break 138 | 139 | response = process_query(user_input) 140 | print(f"\nAgent: {response}") 141 | 142 | # Example usage 143 | if __name__ == "__main__": 144 | # Set to True to see detailed logging 145 | VERBOSE = True 146 | main() -------------------------------------------------------------------------------- /Advancing_EdgeAI/20-Query-the-Persistent-RAG-Database.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # 20 Query the Persistent RAG Database 5 | 6 | 7 | import time 8 | import warnings 9 | import os 10 | from langchain_ollama import ChatOllama 11 | from langchain_ollama import OllamaEmbeddings 12 | from langchain_community.vectorstores import Chroma 13 | from langchain_core.output_parsers import StrOutputParser 14 | from langchain import hub 15 | 16 | # Suppress LangSmith warnings 17 | warnings.filterwarnings("ignore", 18 | message="API key must be provided when using hosted LangSmith API", 19 | category=UserWarning) 20 | 21 | 22 | 23 | # Define persistent directory for Chroma 24 | PERSIST_DIRECTORY = "chroma_db" 25 | 26 | 27 | # Initialize the LLM 28 | local_llm = "llama3.2:3b" 29 | llm = ChatOllama(model=local_llm, temperature=0) 30 | 31 | 32 | def load_retriever(): 33 | """Load the vector store from disk and create a retriever""" 34 | if not os.path.exists(PERSIST_DIRECTORY): 35 | raise FileNotFoundError(f"Database directory {PERSIST_DIRECTORY} not found. Please run create-database.py first.") 36 | 37 | print("Loading existing vector store...") 38 | 39 | embedding_function = OllamaEmbeddings(model="nomic-embed-text") 40 | vectorstore = Chroma( 41 | collection_name="rag-edgeai-eng-chroma", 42 | embedding_function=embedding_function, 43 | persist_directory=PERSIST_DIRECTORY 44 | ) 45 | 46 | # Create retriever 47 | retriever = vectorstore.as_retriever(k=3) 48 | return retriever 49 | 50 | 51 | def answer_question(question, retriever): 52 | """Generate an answer using the RAG system""" 53 | # Start timing 54 | start_time = time.time() 55 | 56 | # Retrieve relevant documents 57 | print(f"Question: {question}") 58 | print("Retrieving documents...") 59 | docs = retriever.invoke(question) 60 | docs_content = "\n\n".join(doc.page_content for doc in docs) 61 | print(f"Retrieved {len(docs)} document chunks") 62 | 63 | # Generate answer using RAG prompt 64 | print("Generating answer...") 65 | rag_prompt = hub.pull("rlm/rag-prompt") 66 | 67 | # Create the chain 68 | rag_chain = rag_prompt | llm | StrOutputParser() 69 | 70 | # Generate the answer 71 | answer = rag_chain.invoke({"context": docs_content, "question": question}) 72 | 73 | # Calculate and print latency 74 | end_time = time.time() 75 | latency = end_time - start_time 76 | print(f"Response latency: {latency:.2f} seconds using model: {local_llm}") 77 | 78 | return answer 79 | 80 | 81 | # Load the retriever once 82 | retriever = load_retriever() 83 | 84 | 85 | def interactive_mode(): 86 | """Run an interactive query session""" 87 | try: 88 | # Load the retriever once 89 | retriever = load_retriever() 90 | 91 | print("==== RAG Query System ====") 92 | print("Type your questions and press Enter. Type 'quit' to exit.") 93 | 94 | while True: 95 | question = input("\nYour question: ") 96 | if question.lower() in ['quit', 'exit', 'q']: 97 | break 98 | 99 | print("\nGenerating answer...\n") 100 | answer = answer_question(question, retriever) 101 | 102 | print("\nANSWER:") 103 | print("="*50) 104 | print(answer) 105 | print("="*50) 106 | 107 | except KeyboardInterrupt: 108 | print("\nExiting...") 109 | except Exception as e: 110 | print(f"Error: {e}") 111 | 112 | if __name__ == "__main__": 113 | interactive_mode() 114 | 115 | -------------------------------------------------------------------------------- /Advancing_EdgeAI/25-optimized_RAG_query.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # Optimized RAG Query System 5 | # - Edge AI Engineering with Raspberry Pi 6 | 7 | import time 8 | import warnings 9 | import os 10 | import requests 11 | import concurrent.futures 12 | from functools import lru_cache 13 | from langchain_community.vectorstores import Chroma 14 | from langchain_core.output_parsers import StrOutputParser 15 | 16 | # Suppress warnings 17 | warnings.filterwarnings("ignore", 18 | message="API key must be provided when using hosted LangSmith API", 19 | category=UserWarning) 20 | 21 | # models 22 | MODEL = "llama3.2:3b" 23 | EMBED = "nomic-embed-text" 24 | 25 | # Define persistent directory for Chroma 26 | PERSIST_DIRECTORY = "chroma_db" 27 | 28 | # Direct Ollama API functions for better performance 29 | def direct_ollama_embed(text): 30 | """Get embeddings directly from Ollama API""" 31 | response = requests.post( 32 | "http://localhost:11434/api/embeddings", 33 | json={"model": EMBED, "prompt": text} 34 | ) 35 | if response.status_code == 200: 36 | return response.json()["embedding"] 37 | else: 38 | raise Exception(f"Error from Ollama API: {response.status_code}") 39 | 40 | # Cache embeddings to avoid recalculating 41 | @lru_cache(maxsize=100) 42 | def cached_embed_query(text): 43 | """Cache embeddings for repeated queries""" 44 | return direct_ollama_embed(text) 45 | 46 | # Custom embedding class that uses Ollama directly and implements caching 47 | class OptimizedOllamaEmbeddings: 48 | def embed_query(self, text): 49 | """Get embeddings for a query with caching""" 50 | return cached_embed_query(text) 51 | 52 | def embed_documents(self, documents): 53 | """Get embeddings for documents - not cached as this is mainly used during DB creation""" 54 | results = [] 55 | # Process in batches of 4 for efficiency 56 | batch_size = 4 57 | for i in range(0, len(documents), batch_size): 58 | batch = documents[i:i+batch_size] 59 | with concurrent.futures.ThreadPoolExecutor() as executor: 60 | batch_results = list(executor.map(direct_ollama_embed, batch)) 61 | results.extend(batch_results) 62 | return results 63 | 64 | def generate_llm_response(prompt): 65 | """Generate response directly from Ollama API""" 66 | response = requests.post( 67 | "http://localhost:11434/api/generate", 68 | json={ 69 | "model": MODEL, 70 | "prompt": prompt, 71 | "stream": False, 72 | "options": { 73 | "num_predict": 512, 74 | "temperature": 0, 75 | "top_k": 40, 76 | "top_p": 0.9, 77 | "seed": 42 # Fixed seed for consistent outputs 78 | } 79 | } 80 | ) 81 | if response.status_code == 200: 82 | return response.json()["response"] 83 | else: 84 | return f"Error: Received status code {response.status_code} from Ollama API" 85 | 86 | def preload_models(): 87 | """Preload models to avoid cold starts""" 88 | print("Preloading models...") 89 | try: 90 | # Preload embedding model 91 | _ = requests.post( 92 | "http://localhost:11434/api/embeddings", 93 | json={"model": EMBED, "prompt": "warmup"}, 94 | timeout=30 95 | ) 96 | 97 | # Preload LLM 98 | _ = requests.post( 99 | "http://localhost:11434/api/generate", 100 | json={ 101 | "model": MODEL, 102 | "prompt": "warmup", 103 | "stream": False, 104 | "options": {"num_predict": 1} 105 | }, 106 | timeout=30 107 | ) 108 | print("Models preloaded successfully") 109 | except Exception as e: 110 | print(f"Warning: Model preloading failed: {e}") 111 | print("Continuing anyway - first query may be slower") 112 | 113 | def load_retriever(): 114 | """Load the vector store from disk and create an optimized retriever""" 115 | if not os.path.exists(PERSIST_DIRECTORY): 116 | raise FileNotFoundError(f"Database directory {PERSIST_DIRECTORY} not found. Please run create-database.py first.") 117 | 118 | print("Loading existing vector store...") 119 | 120 | embedding_function = OptimizedOllamaEmbeddings() 121 | vectorstore = Chroma( 122 | collection_name="rag-edgeai-eng-chroma", 123 | embedding_function=embedding_function, 124 | persist_directory=PERSIST_DIRECTORY 125 | ) 126 | 127 | # Create retriever with optimized settings 128 | retriever = vectorstore.as_retriever( 129 | search_type="similarity", # Basic similarity is fastest 130 | search_kwargs={ 131 | "k": 2 # Retrieve fewer documents 132 | } 133 | ) 134 | return retriever 135 | 136 | def answer_question(question, retriever): 137 | """Generate an answer using the RAG system with optimized processing""" 138 | # Start timing 139 | start_time = time.time() 140 | 141 | # Retrieve relevant documents 142 | print(f"Question: {question}") 143 | print("Retrieving documents...") 144 | docs = retriever.invoke(question) 145 | 146 | # Early check if we found any relevant documents 147 | if not docs: 148 | end_time = time.time() 149 | latency = end_time - start_time 150 | print(f"No relevant documents found. Response latency: {latency:.2f} seconds") 151 | return "I don't have enough information to answer this question accurately." 152 | 153 | # Process documents - extract only what we need 154 | docs_content = "\n\n".join(doc.page_content for doc in docs) 155 | print(f"Retrieved {len(docs)} document chunks") 156 | 157 | # Generate answer 158 | print("Generating answer...") 159 | 160 | # Simplified RAG prompt for efficiency 161 | rag_prompt = f""" 162 | You are an AI assistant specialized in Edge AI Engineering with Raspberry Pi. 163 | Answer the following question based only on the information provided in the context below. 164 | Be concise and direct. If the context doesn't contain relevant information, admit that you don't know. 165 | 166 | Context: 167 | {docs_content} 168 | 169 | Question: {question} 170 | 171 | Answer: 172 | """ 173 | 174 | # Generate answer through direct API call 175 | answer = generate_llm_response(rag_prompt) 176 | 177 | # Calculate and print latency 178 | end_time = time.time() 179 | latency = end_time - start_time 180 | print(f"Response latency: {latency:.2f} seconds using model: {MODEL}") 181 | 182 | return answer 183 | 184 | def interactive_mode(): 185 | """Run an interactive query session""" 186 | try: 187 | # Preload models to avoid cold start latency 188 | preload_models() 189 | 190 | # Load the retriever once 191 | retriever = load_retriever() 192 | 193 | print("\n==== Optimized RAG Query System ====") 194 | print("Type your questions and press Enter. Type 'quit' to exit.") 195 | 196 | while True: 197 | question = input("\nYour question: ") 198 | if question.lower() in ['quit', 'exit', 'q']: 199 | break 200 | 201 | print("\nGenerating answer...\n") 202 | answer = answer_question(question, retriever) 203 | 204 | print("\nANSWER:") 205 | print("="*50) 206 | print(answer) 207 | print("="*50) 208 | 209 | except KeyboardInterrupt: 210 | print("\nExiting...") 211 | except Exception as e: 212 | print(f"Error: {e}") 213 | 214 | if __name__ == "__main__": 215 | interactive_mode() -------------------------------------------------------------------------------- /Advancing_EdgeAI/3-ollama-calculator-agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | import requests 5 | import json 6 | import re 7 | import time 8 | import threading 9 | import concurrent.futures 10 | 11 | # Configuration 12 | OLLAMA_URL = "http://localhost:11434/api" 13 | MODEL = "llama3.2:3b" # Main model for answering 14 | CLASSIFICATION_MODEL = "llama3.2:3b" # Model for classification 15 | VERBOSE = True 16 | 17 | # Keep a persistent session to reuse connections 18 | session = requests.Session() 19 | 20 | def ask_ollama_for_classification(user_input): 21 | """ 22 | Ask Ollama to classify whether the query is a calculation request. 23 | Using a more efficient, simpler prompt and potentially a smaller model. 24 | """ 25 | # Simplified classification prompt 26 | classification_prompt = f""" 27 | Is this a calculation request? "{user_input}" 28 | 29 | Respond with JSON only: 30 | {{ 31 | "type": "calculation" or "general_question", 32 | "operation": "add|subtract|multiply|divide" if calculation, 33 | "numbers": [number1, number2] if calculation 34 | }} 35 | """ 36 | 37 | try: 38 | if VERBOSE: 39 | print(f"Sending classification request using {CLASSIFICATION_MODEL}") 40 | 41 | # Use the session for connection reuse 42 | response = session.post( 43 | f"{OLLAMA_URL}/generate", 44 | json={ 45 | "model": CLASSIFICATION_MODEL, 46 | "prompt": classification_prompt, 47 | "stream": False, 48 | # Add to prevent model reloading 49 | "keep_alive": "5m" 50 | } 51 | ) 52 | 53 | if response.status_code == 200: 54 | response_text = response.json().get("response", "").strip() 55 | 56 | # Try to extract JSON from the response 57 | try: 58 | # Find JSON content if there's any surrounding text 59 | start_index = response_text.find('{') 60 | end_index = response_text.rfind('}') + 1 61 | if start_index >= 0 and end_index > start_index: 62 | json_str = response_text[start_index:end_index] 63 | classification = json.loads(json_str) 64 | 65 | # If model failed to include numbers or operation 66 | if classification.get("type") == "calculation": 67 | # Extract numbers from query if needed 68 | if not classification.get("numbers"): 69 | numbers = extract_numbers_from_query(user_input) 70 | if numbers and len(numbers) >= 2: 71 | classification["numbers"] = numbers[:2] 72 | # Ensure numbers are floats, not strings 73 | elif classification.get("numbers"): 74 | classification["numbers"] = [float(n) if isinstance(n, str) else n 75 | for n in classification["numbers"]] 76 | 77 | # Determine operation if needed 78 | if not classification.get("operation"): 79 | classification["operation"] = determine_operation(user_input) 80 | 81 | return classification 82 | return {"type": "general_question"} 83 | except json.JSONDecodeError: 84 | if VERBOSE: 85 | print(f"Failed to parse JSON: {response_text}") 86 | 87 | # Fallback to rule-based classification 88 | numbers = extract_numbers_from_query(user_input) 89 | if len(numbers) >= 2 and has_calculation_keywords(user_input): 90 | return { 91 | "type": "calculation", 92 | "operation": determine_operation(user_input), 93 | "numbers": [float(n) if isinstance(n, str) else n for n in numbers[:2]] 94 | } 95 | return {"type": "general_question"} 96 | else: 97 | if VERBOSE: 98 | print(f"Error: Received status code {response.status_code} from Ollama.") 99 | return {"type": "general_question"} 100 | 101 | except Exception as e: 102 | if VERBOSE: 103 | print(f"Error connecting to Ollama: {str(e)}") 104 | return {"type": "general_question"} 105 | 106 | def has_calculation_keywords(query): 107 | """Check if the query contains calculation keywords""" 108 | query = query.lower() 109 | calc_words = ["add", "plus", "+", "subtract", "minus", "-", 110 | "multiply", "times", "*", "×", "divide", "/", "÷"] 111 | return any(word in query for word in calc_words) 112 | 113 | def extract_numbers_from_query(query): 114 | """ 115 | Extract numbers from the query string. 116 | """ 117 | # Look for floating point or integer numbers 118 | numbers = re.findall(r'(\d+\.?\d*)', query) 119 | return [float(num) for num in numbers] 120 | 121 | def determine_operation(query): 122 | """ 123 | Determine the arithmetic operation based on keywords in the query. 124 | """ 125 | query = query.lower() 126 | 127 | if any(word in query for word in ["add", "addition", "plus", "sum", "+"]): 128 | return "add" 129 | elif any(word in query for word in ["subtract", "subtraction", "minus", "difference", "-"]): 130 | return "subtract" 131 | elif any(word in query for word in ["multiply", "multiplication", "times", "product", "*", "×"]): 132 | return "multiply" 133 | elif any(word in query for word in ["divide", "division", "/", "÷"]): 134 | return "divide" 135 | else: 136 | # Default to addition if unclear 137 | return "add" 138 | 139 | def calculate(operation, a, b): 140 | """ 141 | Perform the specified calculation and return a formatted response with comma separators. 142 | """ 143 | def format_number(num): 144 | """Format a number with comma separators for thousands""" 145 | if isinstance(num, int): 146 | # For integers 147 | return f"{num:,}" 148 | else: 149 | # For floats: format with appropriate decimal places 150 | # Handle different decimal precision based on the value 151 | if abs(num) < 0.01: 152 | # Scientific notation for very small numbers 153 | return f"{num:.10g}" 154 | elif abs(num) < 1: 155 | return f"{num:,.6f}".rstrip('0').rstrip('.') if '.' in f"{num:,.6f}" else f"{num:,}" 156 | elif abs(num) < 1000: 157 | return f"{num:,.4f}".rstrip('0').rstrip('.') if '.' in f"{num:,.4f}" else f"{num:,}" 158 | else: 159 | return f"{num:,.2f}".rstrip('0').rstrip('.') if '.' in f"{num:,.2f}" else f"{num:,}" 160 | 161 | if operation == "add": 162 | result = a + b 163 | return f"The sum of {format_number(a)} and {format_number(b)} is {format_number(result)}." 164 | elif operation == "subtract": 165 | result = a - b 166 | return f"The difference between {format_number(a)} and {format_number(b)} is {format_number(result)}." 167 | elif operation == "multiply": 168 | result = a * b 169 | return f"The product of {format_number(a)} and {format_number(b)} is {format_number(result)}." 170 | elif operation == "divide": 171 | if b == 0: 172 | return "Cannot divide by zero." 173 | result = a / b 174 | return f"The result of dividing {format_number(a)} by {format_number(b)} is {format_number(result)}." 175 | else: 176 | return "Unsupported operation." 177 | 178 | def ask_ollama(query): 179 | """ 180 | Send a query to Ollama for general question answering. 181 | """ 182 | try: 183 | if VERBOSE: 184 | print(f"Sending query to Ollama using {MODEL}") 185 | 186 | # Use the session for connection reuse 187 | response = session.post( 188 | f"{OLLAMA_URL}/generate", 189 | json={ 190 | "model": MODEL, 191 | "prompt": query, 192 | "stream": False, 193 | # Add to prevent model reloading 194 | "keep_alive": "5m" 195 | } 196 | ) 197 | 198 | if response.status_code == 200: 199 | return response.json().get("response", "") 200 | else: 201 | return f"Error: Received status code {response.status_code} from Ollama." 202 | 203 | except Exception as e: 204 | return f"Error connecting to Ollama: {str(e)}" 205 | 206 | def process_query(user_input): 207 | """ 208 | Process the user input by first asking Ollama to classify it, 209 | then either performing a calculation or sending it back as a general question. 210 | """ 211 | # Classify the query 212 | classification = ask_ollama_for_classification(user_input) 213 | 214 | if VERBOSE: 215 | print("Classification:", classification) 216 | 217 | if classification.get("type") == "calculation": 218 | operation = classification.get("operation", "add") 219 | numbers = classification.get("numbers", [0, 0]) 220 | if len(numbers) >= 2: 221 | # Convert numbers to float if they're strings 222 | a = float(numbers[0]) if isinstance(numbers[0], str) else numbers[0] 223 | b = float(numbers[1]) if isinstance(numbers[1], str) else numbers[1] 224 | return calculate(operation, a, b) 225 | else: 226 | return "I understood you wanted a calculation, but couldn't extract the numbers properly." 227 | else: 228 | return ask_ollama(user_input) 229 | 230 | def initialize_models(): 231 | """ 232 | Initialize both models to keep them loaded in memory. 233 | This prevents the cold start problem. 234 | """ 235 | print(f"Initializing models: {MODEL} and {CLASSIFICATION_MODEL}") 236 | 237 | # Start both model initializations in parallel 238 | with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: 239 | future1 = executor.submit( 240 | session.post, 241 | f"{OLLAMA_URL}/generate", 242 | json={"model": MODEL, "prompt": "Hello", "stream": False, "keep_alive": "5m"} 243 | ) 244 | 245 | future2 = executor.submit( 246 | session.post, 247 | f"{OLLAMA_URL}/generate", 248 | json={"model": CLASSIFICATION_MODEL, "prompt": "Hello", "stream": False, "keep_alive": "5m"} 249 | ) 250 | 251 | # Wait for both to complete 252 | concurrent.futures.wait([future1, future2]) 253 | 254 | print("Models initialized and ready") 255 | 256 | def main(): 257 | """ 258 | Main function to run the calculator agent interactively. 259 | """ 260 | global MODEL, CLASSIFICATION_MODEL 261 | 262 | print(f"Optimized Ollama Calculator Agent") 263 | print(f"Main model: {MODEL}, Classification model: {CLASSIFICATION_MODEL}") 264 | print("Type 'exit' to quit, 'model ' to change main model, or 'classmodel ' to change classification model") 265 | print("-" * 50) 266 | 267 | # Initialize models at startup 268 | initialize_models() 269 | 270 | while True: 271 | user_input = input("\nYou: ") 272 | 273 | if user_input.lower() in ["exit", "quit", "bye"]: 274 | print("Goodbye!") 275 | break 276 | 277 | # Check if user wants to change the models 278 | if user_input.lower().startswith("model "): 279 | MODEL = user_input[6:].strip() 280 | print(f"Main model changed to: {MODEL}") 281 | continue 282 | 283 | if user_input.lower().startswith("classmodel "): 284 | CLASSIFICATION_MODEL = user_input[11:].strip() 285 | print(f"Classification model changed to: {CLASSIFICATION_MODEL}") 286 | continue 287 | 288 | start_time = time.time() 289 | response = process_query(user_input) 290 | elapsed_time = time.time() - start_time 291 | 292 | print(f"\nAgent: {response}") 293 | print(f"\nTime elapsed: {elapsed_time:.2f} seconds") 294 | print("-" * 50) 295 | 296 | # Example usage 297 | if __name__ == "__main__": 298 | # Set to True to see detailed logging 299 | VERBOSE = True 300 | 301 | # Run interactive mode 302 | main() -------------------------------------------------------------------------------- /Advancing_EdgeAI/4-ollama-search-agent.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import re 4 | import time 5 | from datetime import datetime 6 | from tavily import TavilyClient 7 | 8 | # Configuration 9 | OLLAMA_URL = "http://localhost:11434/api" 10 | MODEL = "llama3.2:3b" # Main model for answering 11 | CLASSIFICATION_MODEL = "llama3.2:3b" # Model for classification 12 | TAVILY_API_KEY = "tvly-YOUR_API_KEY" # Replace with your actual API key 13 | VERBOSE = True 14 | 15 | # Keep a persistent session to reuse connections 16 | session = requests.Session() 17 | 18 | def ask_ollama_for_classification(user_input): 19 | """ 20 | Ask Ollama to classify whether the query is: 21 | 1. Something it can answer from its knowledge 22 | 2. Something that requires recent information from the web 23 | """ 24 | current_date = datetime.now().strftime("%Y-%m-%d") 25 | current_year = datetime.now().year 26 | 27 | classification_prompt = f""" 28 | Today's date is {current_date} and the current year is {current_year}. 29 | 30 | Analyze if the following query requires current or recent information that would be OUTSIDE 31 | your training data, or if it's about general knowledge that doesn't change over time. 32 | 33 | Query: "{user_input}" 34 | 35 | Respond with JSON only: 36 | {{ 37 | "type": "general_knowledge" or "needs_web_search", 38 | "reason": "brief explanation of why this is general knowledge or needs web search", 39 | "search_query": "optimized search terms for web search" if needs_web_search 40 | }} 41 | """ 42 | 43 | try: 44 | if VERBOSE: 45 | print(f"Sending classification request using {CLASSIFICATION_MODEL}") 46 | 47 | # Use the session for connection reuse 48 | response = session.post( 49 | f"{OLLAMA_URL}/generate", 50 | json={ 51 | "model": CLASSIFICATION_MODEL, 52 | "prompt": classification_prompt, 53 | "stream": False, 54 | "keep_alive": "5m" 55 | }, 56 | timeout=30 57 | ) 58 | 59 | if response.status_code == 200: 60 | response_text = response.json().get("response", "").strip() 61 | 62 | # Try to extract JSON from the response 63 | try: 64 | # Find JSON content if there's any surrounding text 65 | start_index = response_text.find('{') 66 | end_index = response_text.rfind('}') + 1 67 | if start_index >= 0 and end_index > start_index: 68 | json_str = response_text[start_index:end_index] 69 | return json.loads(json_str) 70 | 71 | # Fallback 72 | return {"type": "needs_web_search", "reason": "Failed to parse model output", "search_query": user_input} 73 | except json.JSONDecodeError: 74 | if VERBOSE: 75 | print(f"Failed to parse JSON: {response_text}") 76 | 77 | # Default to web search when in doubt 78 | return { 79 | "type": "needs_web_search", 80 | "reason": "JSON parsing error - defaulting to web search", 81 | "search_query": user_input 82 | } 83 | else: 84 | if VERBOSE: 85 | print(f"Error: Received status code {response.status_code} from Ollama.") 86 | return {"type": "needs_web_search", "reason": "API error", "search_query": user_input} 87 | 88 | except Exception as e: 89 | if VERBOSE: 90 | print(f"Error in classification: {str(e)}") 91 | return {"type": "needs_web_search", "reason": f"Error: {str(e)}", "search_query": user_input} 92 | 93 | def search_tavily(query): 94 | """ 95 | Search the web using Tavily API which is designed for RAG applications. 96 | Returns formatted search results. 97 | """ 98 | try: 99 | if VERBOSE: 100 | print(f"Searching Tavily for: {query}") 101 | 102 | # Initialize the Tavily client 103 | tavily_client = TavilyClient(api_key=TAVILY_API_KEY) 104 | 105 | # Search with Tavily 106 | response = tavily_client.search( 107 | query=query, 108 | search_depth="basic", # Use 'basic' for faster, more focused results 109 | max_results=3, # Limit to 3 results to avoid overloading the context 110 | include_answer=True, # Include an AI-generated answer 111 | ) 112 | 113 | if VERBOSE: 114 | print(f"Tavily search successful, found {len(response.get('results', []))} results") 115 | 116 | # Format the results 117 | formatted_results = "Search Results:\n\n" 118 | 119 | # First add the Tavily-generated answer if available 120 | if response.get('answer'): 121 | formatted_results += f"Summary: {response['answer']}\n\n" 122 | 123 | # Then add a limited number of search results 124 | for i, result in enumerate(response.get('results', [])[:3], 1): 125 | title = result.get('title', 'No title') 126 | content = result.get('content', 'No content available') 127 | if len(content) > 300: # Limit long content 128 | content = content[:300] + "..." 129 | 130 | formatted_results += f"{i}. {title}\n" 131 | formatted_results += f" {content}\n\n" 132 | 133 | if not response.get('results'): 134 | return "No search results found for this query." 135 | 136 | return formatted_results 137 | 138 | except Exception as e: 139 | if VERBOSE: 140 | print(f"Error during Tavily search: {str(e)}") 141 | 142 | # Check if it's an authentication error 143 | if "authentication" in str(e).lower() or "api key" in str(e).lower(): 144 | return "Tavily API authentication failed. Please check your API key." 145 | 146 | return f"Error during web search: {str(e)}. Using built-in knowledge instead." 147 | 148 | def ask_ollama(query, context=""): 149 | """ 150 | Send a query to Ollama for answering with improved timeout and streaming. 151 | """ 152 | try: 153 | if VERBOSE: 154 | print(f"Sending query to Ollama using {MODEL}") 155 | 156 | prompt = query 157 | if context: 158 | # Create a more focused prompt 159 | prompt = f""" 160 | Here is some recent information related to your question: 161 | 162 | {context} 163 | 164 | Based on this information, please provide a concise answer to: {query} 165 | 166 | If the information doesn't provide a clear answer, please state what you know about the topic. 167 | """ 168 | 169 | # Use shorter prompts for general knowledge questions 170 | # This helps reduce processing time 171 | if not context: 172 | prompt = f"Please provide a brief, concise answer to: {query}" 173 | 174 | # CRITICAL FIX: Use streaming for general knowledge queries 175 | # This prevents timeouts by starting to process the output immediately 176 | stream = not bool(context) # Stream for general knowledge, don't stream for search results 177 | 178 | if stream: 179 | # Streaming response 180 | if VERBOSE: 181 | print("Using streaming response") 182 | 183 | response = session.post( 184 | f"{OLLAMA_URL}/generate", 185 | json={ 186 | "model": MODEL, 187 | "prompt": prompt, 188 | "stream": True, 189 | "keep_alive": "5m", 190 | "max_tokens": 150 # Shorter response for general knowledge 191 | }, 192 | stream=True, 193 | timeout=45 194 | ) 195 | 196 | if response.status_code == 200: 197 | # Process the streaming response 198 | full_response = "" 199 | for line in response.iter_lines(): 200 | if line: 201 | try: 202 | json_line = json.loads(line.decode('utf-8')) 203 | if 'response' in json_line: 204 | full_response += json_line['response'] 205 | except json.JSONDecodeError: 206 | continue 207 | return full_response 208 | else: 209 | return f"Error: Received status code {response.status_code} from Ollama." 210 | 211 | else: 212 | # Non-streaming for context-based answers 213 | response = session.post( 214 | f"{OLLAMA_URL}/generate", 215 | json={ 216 | "model": MODEL, 217 | "prompt": prompt, 218 | "stream": False, 219 | "keep_alive": "5m", 220 | "max_tokens": 300 # Longer response for search results 221 | }, 222 | timeout=45 # Increased timeout 223 | ) 224 | 225 | if response.status_code == 200: 226 | return response.json().get("response", "") 227 | else: 228 | return f"Error: Received status code {response.status_code} from Ollama." 229 | 230 | except requests.exceptions.Timeout: 231 | # Specific handling for timeout errors 232 | if context: 233 | # If we have context but Ollama timed out, return the context summary directly 234 | summary_match = re.search(r"Summary: (.*?)(?:\n\n|\Z)", context, re.DOTALL) 235 | if summary_match: 236 | return f"Based on search results: {summary_match.group(1)}" 237 | 238 | return "The search found information, but the language model timed out while processing it." 239 | else: 240 | return "The language model timed out while processing your query. Please try a simpler question or try again later." 241 | 242 | except Exception as e: 243 | return f"Error connecting to Ollama: {str(e)}" 244 | 245 | def process_query(user_input): 246 | """ 247 | Process the user input by first classifying whether it needs web search, 248 | then either searching the web or using the model's knowledge directly. 249 | """ 250 | start_time = time.time() 251 | 252 | # Classify the query 253 | classification = ask_ollama_for_classification(user_input) 254 | 255 | if VERBOSE: 256 | print("Classification:", classification) 257 | 258 | if classification.get("type") == "needs_web_search": 259 | # Use the provided search query or fall back to the original query 260 | search_query = classification.get("search_query", user_input) 261 | 262 | if VERBOSE: 263 | print(f"Searching web for: {search_query}") 264 | 265 | # Get search results using Tavily 266 | search_results = search_tavily(search_query) 267 | 268 | # Let the model answer based on search results 269 | answer = ask_ollama(user_input, context=search_results) 270 | 271 | elapsed_time = time.time() - start_time 272 | 273 | # Format the response to show it used web search 274 | return { 275 | "answer": answer, 276 | "source": "web_search", 277 | "search_query": search_query, 278 | "time": elapsed_time 279 | } 280 | else: 281 | # Use the model's knowledge directly 282 | answer = ask_ollama(user_input) 283 | 284 | elapsed_time = time.time() - start_time 285 | 286 | return { 287 | "answer": answer, 288 | "source": "model_knowledge", 289 | "reason": classification.get("reason", "Query classified as general knowledge"), 290 | "time": elapsed_time 291 | } 292 | 293 | def initialize_models(): 294 | """ 295 | Initialize both models to keep them loaded in memory. 296 | This prevents the cold start problem. 297 | """ 298 | print(f"Initializing models: {MODEL} and {CLASSIFICATION_MODEL}") 299 | 300 | try: 301 | # Use simple prompts to warm up the models 302 | session.post( 303 | f"{OLLAMA_URL}/generate", 304 | json={"model": MODEL, "prompt": "Hello", "stream": False, "keep_alive": "5m"} 305 | ) 306 | 307 | session.post( 308 | f"{OLLAMA_URL}/generate", 309 | json={"model": CLASSIFICATION_MODEL, "prompt": "Hello", "stream": False, "keep_alive": "5m"} 310 | ) 311 | 312 | print("Models initialized and ready") 313 | except Exception as e: 314 | print(f"Warning: Model initialization failed. Error: {str(e)}") 315 | 316 | def main(): 317 | """ 318 | Main function to run the knowledge agent interactively. 319 | """ 320 | global MODEL, CLASSIFICATION_MODEL 321 | 322 | print(f"Knowledge Router Agent with Tavily Search") 323 | print(f"Main model: {MODEL}, Classification model: {CLASSIFICATION_MODEL}") 324 | print(f"Current date: {datetime.now().strftime('%Y-%m-%d')}") 325 | print("Type 'exit' to quit, 'model ' to change main model, or 'classmodel ' to change classification model") 326 | print("-" * 50) 327 | 328 | # Initialize models at startup 329 | initialize_models() 330 | 331 | while True: 332 | user_input = input("\nYou: ") 333 | 334 | if user_input.lower() in ["exit", "quit", "bye"]: 335 | print("Goodbye!") 336 | break 337 | 338 | # Check if user wants to change the models 339 | if user_input.lower().startswith("model "): 340 | MODEL = user_input[6:].strip() 341 | print(f"Main model changed to: {MODEL}") 342 | continue 343 | 344 | if user_input.lower().startswith("classmodel "): 345 | CLASSIFICATION_MODEL = user_input[11:].strip() 346 | print(f"Classification model changed to: {CLASSIFICATION_MODEL}") 347 | continue 348 | 349 | result = process_query(user_input) 350 | 351 | # Format the response based on source 352 | if result["source"] == "web_search": 353 | print(f"\nAgent (via web search): {result['answer']}") 354 | print(f"\nSearch query: {result['search_query']}") 355 | else: 356 | print(f"\nAgent (from knowledge): {result['answer']}") 357 | 358 | print(f"\nTime elapsed: {result['time']:.2f} seconds") 359 | print("-" * 50) 360 | 361 | # Example usage 362 | if __name__ == "__main__": 363 | # Run interactive mode 364 | main() -------------------------------------------------------------------------------- /Advancing_EdgeAI/5-validate_response.py: -------------------------------------------------------------------------------- 1 | import ollama 2 | import json 3 | 4 | def validate_response(query, response): 5 | """Validate that the response is appropriate for the query""" 6 | validation_prompt = f""" 7 | User query: {query} 8 | Generated response: {response} 9 | 10 | Evaluate if this response: 11 | 1. Directly addresses the user's query 12 | 2. Is factually accurate to the best of your knowledge 13 | 3. Is helpful and complete 14 | 15 | Respond in the following JSON format: 16 | {{ 17 | "valid": true/false, 18 | "reason": "Explanation if invalid", 19 | "score": 0-10 20 | }} 21 | """ 22 | 23 | try: 24 | validation = ollama.generate( 25 | model="llama3.2:3b", 26 | prompt=validation_prompt 27 | ) 28 | 29 | result = json.loads(validation['response']) 30 | return result 31 | except Exception as e: 32 | print(f"Error during validation: {e}") 33 | return {"valid": False, "reason": "Validation error", "score": 0} 34 | 35 | # Test 36 | query = "What is the Raspberry Pi 5?" 37 | response = "It is a pie created with raspberry and cooked in an oven" 38 | validation = validate_response(query, response) 39 | print(validation) 40 | -------------------------------------------------------------------------------- /FLORENCE-2/docs/Florence2-paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/FLORENCE-2/docs/Florence2-paper.pdf -------------------------------------------------------------------------------- /FLORENCE-2/images/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/FLORENCE-2/images/cover.jpg -------------------------------------------------------------------------------- /FLORENCE-2/images/dogs-cats.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/FLORENCE-2/images/dogs-cats.jpg -------------------------------------------------------------------------------- /FLORENCE-2/images/flyer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/FLORENCE-2/images/flyer.png -------------------------------------------------------------------------------- /FLORENCE-2/images/table.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/FLORENCE-2/images/table.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/background.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/background.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213556.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213556.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213557.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213557.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213558.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213558.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213559.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213559.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213600.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213600.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213601.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213601.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213602.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213602.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213603.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213603.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213604.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213604.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213605.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213605.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213606.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213606.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213607.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213607.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213608.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213608.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213609.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213609.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213610.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213610.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213611.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213611.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213612.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213612.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213613.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213613.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/background/image_20240826-213614.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/background/image_20240826-213614.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201300.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201300.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201303.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201303.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201305.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201305.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201308.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201308.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201311.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201311.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201318.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201318.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201954.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201954.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201955.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201955.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-201956.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-201956.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-203428.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-203428.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-203429.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-203429.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-203430.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-203430.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-203431.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-203431.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-203433.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-203433.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205245.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205245.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205246.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205246.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205247.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205247.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205249.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205249.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205708.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205708.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205709.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205709.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205711.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205711.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205713.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205713.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205718.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205718.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205719.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205719.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205722.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205722.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-205723.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-205723.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-210724.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-210724.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-210725.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-210725.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-210726.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-210726.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-210727.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-210727.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-210729.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-210729.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-210730.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-210730.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/image_20240826-210731.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/image_20240826-210731.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/periquito/periquito.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/periquito/periquito.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-201342.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-201342.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-201344.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-201344.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-201346.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-201346.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-201348.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-201348.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-201350.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-201350.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-201352.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-201352.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-202012.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-202012.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-202016.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-202016.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-202017.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-202017.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-202018.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-202018.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-203449.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-203449.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-203450.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-203450.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-203451.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-203451.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-203452.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-203452.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205305.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205305.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205306.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205306.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205307.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205307.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205308.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205308.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205739.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205739.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205740.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205740.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205744.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205744.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205745.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205745.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205747.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205747.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205748.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205748.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-205751.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-205751.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210642.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210642.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210643.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210643.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210644.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210644.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210647.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210647.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210651.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210651.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210653.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210653.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210655.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210655.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210702.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210702.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210704.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210704.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/image_20240826-210707.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/image_20240826-210707.jpg -------------------------------------------------------------------------------- /IMG_CLASS/dataset/robot/robot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/dataset/robot/robot.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/Cat03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/Cat03.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/background.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/background.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/car_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/car_1.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/car_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/car_2.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/car_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/car_3.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/car_4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/car_4.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/car_5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/car_5.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/cat_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/cat_1.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/cat_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/cat_2.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/cat_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/cat_2.png -------------------------------------------------------------------------------- /IMG_CLASS/images/dog_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/dog_1.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/dog_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/dog_2.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/dog_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/dog_3.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/periquito.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/periquito.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/periquito_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/periquito_2.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/robot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/robot.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/ship_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/ship_1.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/ship_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/ship_2.jpg -------------------------------------------------------------------------------- /IMG_CLASS/images/teste_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/images/teste_img.jpg -------------------------------------------------------------------------------- /IMG_CLASS/models/cifar10.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/models/cifar10.tflite -------------------------------------------------------------------------------- /IMG_CLASS/models/ei-periquito-vs-robot-img-class-int8-quantized-model.lite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/models/ei-periquito-vs-robot-img-class-int8-quantized-model.lite -------------------------------------------------------------------------------- /IMG_CLASS/models/ei-raspi-img-class-float32-model.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/models/ei-raspi-img-class-float32-model.tflite -------------------------------------------------------------------------------- /IMG_CLASS/models/ei-raspi-img-class-int8-quantized-model.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/models/ei-raspi-img-class-int8-quantized-model.tflite -------------------------------------------------------------------------------- /IMG_CLASS/models/mobilenet_v2_1.0_224_quant.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/IMG_CLASS/models/mobilenet_v2_1.0_224_quant.tflite -------------------------------------------------------------------------------- /IMG_CLASS/notebooks/setup_test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "576c970a-0ec9-4be8-86d5-a77d5f99417e", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import tflite_runtime.interpreter as tflite\n", 11 | "import numpy as np\n", 12 | "from PIL import Image" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 4, 18 | "id": "e32ee236-fe88-4cb2-983f-9721acd13e9f", 19 | "metadata": {}, 20 | "outputs": [ 21 | { 22 | "name": "stdout", 23 | "output_type": "stream", 24 | "text": [ 25 | "NumPy: 1.23.2\n", 26 | "Pillow: 10.4.0\n" 27 | ] 28 | } 29 | ], 30 | "source": [ 31 | "print(\"NumPy:\", np.__version__)\n", 32 | "print(\"Pillow:\", Image.__version__)" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 5, 38 | "id": "98e49bdd-c710-45e8-a1e3-0275c356e2c9", 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "model_path = \"./models/mobilenet_v2_1.0_224_quant.tflite\"" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 6, 48 | "id": "5c9f129a-afe2-480e-9c54-097fcf83c279", 49 | "metadata": {}, 50 | "outputs": [ 51 | { 52 | "name": "stdout", 53 | "output_type": "stream", 54 | "text": [ 55 | "TFLite Interpreter created successfully!\n" 56 | ] 57 | } 58 | ], 59 | "source": [ 60 | "# Try to create a TFLite Interpreter\n", 61 | "interpreter = tflite.Interpreter(model_path=model_path)\n", 62 | "interpreter.allocate_tensors()\n", 63 | "print(\"TFLite Interpreter created successfully!\")" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "id": "9019827f-49f2-4268-bc49-73a1ad08aae1", 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [] 73 | } 74 | ], 75 | "metadata": { 76 | "kernelspec": { 77 | "display_name": "Python 3 (ipykernel)", 78 | "language": "python", 79 | "name": "python3" 80 | }, 81 | "language_info": { 82 | "codemirror_mode": { 83 | "name": "ipython", 84 | "version": 3 85 | }, 86 | "file_extension": ".py", 87 | "mimetype": "text/x-python", 88 | "name": "python", 89 | "nbconvert_exporter": "python", 90 | "pygments_lexer": "ipython3", 91 | "version": "3.8.16" 92 | } 93 | }, 94 | "nbformat": 4, 95 | "nbformat_minor": 5 96 | } 97 | -------------------------------------------------------------------------------- /IMG_CLASS/python_scripts/capture_and_serve.py: -------------------------------------------------------------------------------- 1 | from picamera2 import Picamera2 2 | from flask import Flask, send_from_directory, render_template_string 3 | import time 4 | import os 5 | import threading 6 | 7 | app = Flask(__name__) 8 | 9 | # Global variable to store the latest image filename 10 | latest_image = None 11 | 12 | def capture_dataset(num_images, interval, output_dir="dataset"): 13 | global latest_image 14 | os.makedirs(output_dir, exist_ok=True) 15 | picam2 = Picamera2() 16 | config = picam2.create_still_configuration(main={"size": (640, 480)}) 17 | picam2.configure(config) 18 | 19 | try: 20 | picam2.start() 21 | time.sleep(2) # Wait for camera to warm up 22 | 23 | for i in range(num_images): 24 | timestamp = time.strftime("%Y%m%d-%H%M%S") 25 | filename = f"image_{timestamp}_{i+1:04d}.jpg" 26 | full_path = os.path.join(output_dir, filename) 27 | 28 | picam2.capture_file(full_path) 29 | print(f"Captured image {i+1}/{num_images}: {filename}") 30 | 31 | latest_image = filename # Update the latest image filename 32 | 33 | if i < num_images - 1: 34 | time.sleep(interval) 35 | 36 | except Exception as e: 37 | print(f"An error occurred: {str(e)}") 38 | finally: 39 | picam2.stop() 40 | print("Dataset capture completed.") 41 | 42 | @app.route('/') 43 | def index(): 44 | return render_template_string(''' 45 | 46 | 47 | 48 | Bee Dataset Viewer 49 | 56 | 57 | 58 |

Latest Captured Image

59 | Latest captured image 60 | 61 | 62 | ''') 63 | 64 | @app.route('/latest_image') 65 | def latest_image(): 66 | if latest_image: 67 | return send_from_directory('dataset', latest_image) 68 | else: 69 | return "No image captured yet", 404 70 | 71 | if __name__ == '__main__': 72 | # Start the image capture in a separate thread 73 | capture_thread = threading.Thread(target=capture_dataset, args=(100, 5, "dataset")) 74 | capture_thread.start() 75 | 76 | # Run the Flask app 77 | app.run(host='0.0.0.0', port=5000) 78 | -------------------------------------------------------------------------------- /IMG_CLASS/python_scripts/capture_image.py: -------------------------------------------------------------------------------- 1 | from picamera2 import Picamera2 2 | import time 3 | 4 | # Initialize the camera 5 | picam2 = Picamera2() 6 | 7 | # Configure the camera 8 | config = picam2.create_still_configuration(main={"size": (640, 480)}) 9 | picam2.configure(config) 10 | 11 | # Start the camera 12 | picam2.start() 13 | 14 | # Wait for the camera to warm up 15 | time.sleep(2) 16 | 17 | # Capture an image 18 | picam2.capture_file("usb_camera_image.jpg") 19 | print("Image captured and saved as 'usb_camera_image.jpg'") 20 | 21 | # Stop the camera 22 | picam2.stop() 23 | -------------------------------------------------------------------------------- /IMG_CLASS/python_scripts/get_img_data.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response, render_template_string, request, redirect, url_for 2 | from picamera2 import Picamera2 3 | import io 4 | import threading 5 | import time 6 | import os 7 | import signal 8 | 9 | app = Flask(__name__) 10 | 11 | # Global variables 12 | base_dir = "dataset" 13 | picam2 = None 14 | frame = None 15 | frame_lock = threading.Lock() 16 | capture_counts = {} 17 | current_label = None 18 | shutdown_event = threading.Event() 19 | 20 | def initialize_camera(): 21 | global picam2 22 | picam2 = Picamera2() 23 | config = picam2.create_preview_configuration(main={"size": (320, 240)}) 24 | picam2.configure(config) 25 | picam2.start() 26 | time.sleep(2) # Wait for camera to warm up 27 | 28 | def get_frame(): 29 | global frame 30 | while not shutdown_event.is_set(): 31 | stream = io.BytesIO() 32 | picam2.capture_file(stream, format='jpeg') 33 | with frame_lock: 34 | frame = stream.getvalue() 35 | time.sleep(0.1) # Adjust as needed for smooth preview 36 | 37 | def generate_frames(): 38 | while not shutdown_event.is_set(): 39 | with frame_lock: 40 | if frame is not None: 41 | yield (b'--frame\r\n' 42 | b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') 43 | time.sleep(0.1) # Adjust as needed for smooth streaming 44 | 45 | def shutdown_server(): 46 | shutdown_event.set() 47 | if picam2: 48 | picam2.stop() 49 | # Give some time for other threads to finish 50 | time.sleep(2) 51 | # Send SIGINT to the main process 52 | os.kill(os.getpid(), signal.SIGINT) 53 | 54 | @app.route('/', methods=['GET', 'POST']) 55 | def index(): 56 | global current_label 57 | if request.method == 'POST': 58 | current_label = request.form['label'] 59 | if current_label not in capture_counts: 60 | capture_counts[current_label] = 0 61 | os.makedirs(os.path.join(base_dir, current_label), exist_ok=True) 62 | return redirect(url_for('capture_page')) 63 | return render_template_string(''' 64 | 65 | 66 | 67 | Dataset Capture - Label Entry 68 | 69 | 70 |

Enter Label for Dataset

71 |
72 | 73 | 74 |
75 | 76 | 77 | ''') 78 | 79 | @app.route('/capture') 80 | def capture_page(): 81 | return render_template_string(''' 82 | 83 | 84 | 85 | Dataset Capture 86 | 103 | 104 | 105 |

Dataset Capture

106 |

Current Label: {{ label }}

107 |

Images captured for this label: {{ capture_count }}

108 | 109 | 112 |
113 | 114 |
115 |
116 | 117 |
118 |
119 | 120 |
121 | 122 | 123 | ''', label=current_label, capture_count=capture_counts.get(current_label, 0)) 124 | 125 | @app.route('/video_feed') 126 | def video_feed(): 127 | return Response(generate_frames(), 128 | mimetype='multipart/x-mixed-replace; boundary=frame') 129 | 130 | @app.route('/capture_image', methods=['POST']) 131 | def capture_image(): 132 | global capture_counts 133 | if current_label and not shutdown_event.is_set(): 134 | capture_counts[current_label] += 1 135 | timestamp = time.strftime("%Y%m%d-%H%M%S") 136 | filename = f"image_{timestamp}.jpg" 137 | full_path = os.path.join(base_dir, current_label, filename) 138 | 139 | picam2.capture_file(full_path) 140 | 141 | return redirect(url_for('capture_page')) 142 | 143 | @app.route('/stop', methods=['POST']) 144 | def stop(): 145 | summary = render_template_string(''' 146 | 147 | 148 | 149 | Dataset Capture - Stopped 150 | 151 | 152 |

Dataset Capture Stopped

153 |

The capture process has been stopped. You can close this window.

154 |

Summary of captures:

155 |
    156 | {% for label, count in capture_counts.items() %} 157 |
  • {{ label }}: {{ count }} images
  • 158 | {% endfor %} 159 |
160 | 161 | 162 | ''', capture_counts=capture_counts) 163 | 164 | # Start a new thread to shutdown the server 165 | threading.Thread(target=shutdown_server).start() 166 | 167 | return summary 168 | 169 | @app.route('/check_shutdown') 170 | def check_shutdown(): 171 | return {'shutdown': shutdown_event.is_set()} 172 | 173 | if __name__ == '__main__': 174 | initialize_camera() 175 | threading.Thread(target=get_frame, daemon=True).start() 176 | app.run(host='0.0.0.0', port=5000, threaded=True) 177 | -------------------------------------------------------------------------------- /IMG_CLASS/python_scripts/img_class_live_infer.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response, render_template_string, request, jsonify 2 | from picamera2 import Picamera2 3 | import io 4 | import threading 5 | import time 6 | import numpy as np 7 | from PIL import Image 8 | import tflite_runtime.interpreter as tflite 9 | from queue import Queue 10 | 11 | app = Flask(__name__) 12 | 13 | # Global variables 14 | picam2 = None 15 | frame = None 16 | frame_lock = threading.Lock() 17 | is_classifying = False 18 | confidence_threshold = 0.8 19 | model_path = "./models/ei-raspi-img-class-int8-quantized-model.tflite" 20 | labels = ['background', 'periquito', 'robot'] 21 | interpreter = None 22 | classification_queue = Queue(maxsize=1) 23 | 24 | def initialize_camera(): 25 | global picam2 26 | picam2 = Picamera2() 27 | config = picam2.create_preview_configuration(main={"size": (320, 240)}) 28 | picam2.configure(config) 29 | picam2.start() 30 | time.sleep(2) # Wait for camera to warm up 31 | 32 | def get_frame(): 33 | global frame 34 | while True: 35 | stream = io.BytesIO() 36 | picam2.capture_file(stream, format='jpeg') 37 | with frame_lock: 38 | frame = stream.getvalue() 39 | time.sleep(0.1) # Capture frames more frequently 40 | 41 | def generate_frames(): 42 | while True: 43 | with frame_lock: 44 | if frame is not None: 45 | yield (b'--frame\r\n' 46 | b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') 47 | time.sleep(0.1) 48 | 49 | def load_model(): 50 | global interpreter 51 | if interpreter is None: 52 | interpreter = tflite.Interpreter(model_path=model_path) 53 | interpreter.allocate_tensors() 54 | return interpreter 55 | 56 | def classify_image(img, interpreter): 57 | input_details = interpreter.get_input_details() 58 | output_details = interpreter.get_output_details() 59 | 60 | img = img.resize((input_details[0]['shape'][1], input_details[0]['shape'][2])) 61 | input_data = np.expand_dims(np.array(img), axis=0).astype(input_details[0]['dtype']) 62 | 63 | interpreter.set_tensor(input_details[0]['index'], input_data) 64 | interpreter.invoke() 65 | 66 | predictions = interpreter.get_tensor(output_details[0]['index'])[0] 67 | # Handle output based on type 68 | output_dtype = output_details[0]['dtype'] 69 | if output_dtype in [np.int8, np.uint8]: 70 | # Dequantize the output 71 | scale, zero_point = output_details[0]['quantization'] 72 | predictions = (predictions.astype(np.float32) - zero_point) * scale 73 | return predictions 74 | 75 | def classification_worker(): 76 | interpreter = load_model() 77 | while True: 78 | if is_classifying: 79 | with frame_lock: 80 | if frame is not None: 81 | img = Image.open(io.BytesIO(frame)) 82 | predictions = classify_image(img, interpreter) 83 | max_prob = np.max(predictions) 84 | if max_prob >= confidence_threshold: 85 | label = labels[np.argmax(predictions)] 86 | else: 87 | label = 'Uncertain' 88 | classification_queue.put({'label': label, 'probability': float(max_prob)}) 89 | time.sleep(0.1) # Adjust based on your needs 90 | 91 | @app.route('/') 92 | def index(): 93 | return render_template_string(''' 94 | 95 | 96 | 97 | Image Classification 98 | 99 | 123 | 124 | 125 |

Image Classification

126 | 127 |
128 | 129 | 130 |
131 | 132 | 133 |
134 |
Waiting for classification...
135 | 136 | 137 | ''') 138 | 139 | @app.route('/video_feed') 140 | def video_feed(): 141 | return Response(generate_frames(), 142 | mimetype='multipart/x-mixed-replace; boundary=frame') 143 | 144 | @app.route('/start', methods=['POST']) 145 | def start_classification(): 146 | global is_classifying 147 | is_classifying = True 148 | return '', 204 149 | 150 | @app.route('/stop', methods=['POST']) 151 | def stop_classification(): 152 | global is_classifying 153 | is_classifying = False 154 | return '', 204 155 | 156 | @app.route('/update_confidence', methods=['POST']) 157 | def update_confidence(): 158 | global confidence_threshold 159 | confidence_threshold = float(request.form['confidence']) 160 | return '', 204 161 | 162 | @app.route('/get_classification') 163 | def get_classification(): 164 | if not is_classifying: 165 | return jsonify({'label': 'Not classifying', 'probability': 0}) 166 | try: 167 | result = classification_queue.get_nowait() 168 | except Queue.Empty: 169 | result = {'label': 'Processing', 'probability': 0} 170 | return jsonify(result) 171 | 172 | if __name__ == '__main__': 173 | initialize_camera() 174 | threading.Thread(target=get_frame, daemon=True).start() 175 | threading.Thread(target=classification_worker, daemon=True).start() 176 | app.run(host='0.0.0.0', port=5000, threaded=True) 177 | -------------------------------------------------------------------------------- /IMG_CLASS/python_scripts/setup_test.py: -------------------------------------------------------------------------------- 1 | import tflite_runtime.interpreter as tflite 2 | import numpy as np 3 | from PIL import Image 4 | 5 | print("NumPy:", np.__version__) 6 | print("Pillow:", Image.__version__) 7 | 8 | # Try to create a TFLite Interpreter 9 | model_path = "./models/mobilenet_v2_1.0_224_quant.tflite" 10 | interpreter = tflite.Interpreter(model_path=model_path) 11 | interpreter.allocate_tensors() 12 | print("TFLite Interpreter created successfully!") 13 | -------------------------------------------------------------------------------- /KD-Knowledge_Destilation/KD-From MNIST_2_LLMs.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/KD-Knowledge_Destilation/KD-From MNIST_2_LLMs.pdf -------------------------------------------------------------------------------- /OBJ_DETEC/images/beagles.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/images/beagles.jpg -------------------------------------------------------------------------------- /OBJ_DETEC/images/beatch.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/images/beatch.jpg -------------------------------------------------------------------------------- /OBJ_DETEC/images/cat_dog.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/images/cat_dog.jpeg -------------------------------------------------------------------------------- /OBJ_DETEC/images/cats_dogs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/images/cats_dogs.jpg -------------------------------------------------------------------------------- /OBJ_DETEC/images/home-office.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/images/home-office.jpg -------------------------------------------------------------------------------- /OBJ_DETEC/images/man_cat_dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/images/man_cat_dog.jpg -------------------------------------------------------------------------------- /OBJ_DETEC/images/office.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/images/office.jpeg -------------------------------------------------------------------------------- /OBJ_DETEC/images/ship_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/images/ship_2.jpg -------------------------------------------------------------------------------- /OBJ_DETEC/models/box_wheel_320_yolo.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/models/box_wheel_320_yolo.pt -------------------------------------------------------------------------------- /OBJ_DETEC/models/coco_labels.txt: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | trafficlight 11 | firehydrant 12 | streetsign 13 | stopsign 14 | parkingmeter 15 | bench 16 | bird 17 | cat 18 | dog 19 | horse 20 | sheep 21 | cow 22 | elephant 23 | bear 24 | zebra 25 | giraffe 26 | hat 27 | backpack 28 | umbrella 29 | shoe 30 | eyeglasses 31 | handbag 32 | tie 33 | suitcase 34 | frisbee 35 | skis 36 | snowboard 37 | sportsball 38 | kite 39 | baseballbat 40 | baseballglove 41 | skateboard 42 | surfboard 43 | tennisracket 44 | bottle 45 | plate 46 | wineglass 47 | cup 48 | fork 49 | knife 50 | spoon 51 | bowl 52 | banana 53 | apple 54 | sandwich 55 | orange 56 | broccoli 57 | carrot 58 | hotdog 59 | pizza 60 | donut 61 | cake 62 | chair 63 | sofa 64 | pottedplant 65 | bed 66 | mirror 67 | diningtable 68 | window 69 | desk 70 | toilet 71 | door 72 | tvmonitor 73 | laptop 74 | mouse 75 | remote 76 | keyboard 77 | cellphone 78 | microwave 79 | oven 80 | toaster 81 | sink 82 | refrigerator 83 | blender 84 | book 85 | clock 86 | vase 87 | scissors 88 | teddybear 89 | hairdrier 90 | toothbrush 91 | hairbrush -------------------------------------------------------------------------------- /OBJ_DETEC/models/ei-raspi-object-detection-FOMO-160x160-int8.lite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/models/ei-raspi-object-detection-FOMO-160x160-int8.lite -------------------------------------------------------------------------------- /OBJ_DETEC/models/ei-raspi-object-detection-SSD-MobileNetv2-320x0320-int8.lite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/models/ei-raspi-object-detection-SSD-MobileNetv2-320x0320-int8.lite -------------------------------------------------------------------------------- /OBJ_DETEC/models/lite-model_efficientdet_lite0_detection_metadata_1.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/models/lite-model_efficientdet_lite0_detection_metadata_1.tflite -------------------------------------------------------------------------------- /OBJ_DETEC/models/raspi-object-detection-linux-aarch64-FOMO-int8.eim: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/models/raspi-object-detection-linux-aarch64-FOMO-int8.eim -------------------------------------------------------------------------------- /OBJ_DETEC/models/ssd-mobilenet-v1-tflite-default-v1.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/models/ssd-mobilenet-v1-tflite-default-v1.tar.gz -------------------------------------------------------------------------------- /OBJ_DETEC/models/ssd-mobilenet-v1-tflite-default-v1.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OBJ_DETEC/models/ssd-mobilenet-v1-tflite-default-v1.tflite -------------------------------------------------------------------------------- /OBJ_DETEC/python_scripts/get_img_data.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response, render_template_string, request, redirect, url_for 2 | from picamera2 import Picamera2 3 | import io 4 | import threading 5 | import time 6 | import os 7 | import signal 8 | 9 | app = Flask(__name__) 10 | 11 | # Global variables 12 | base_dir = "dataset" 13 | picam2 = None 14 | frame = None 15 | frame_lock = threading.Lock() 16 | capture_counts = {} 17 | current_label = None 18 | shutdown_event = threading.Event() 19 | 20 | def initialize_camera(): 21 | global picam2 22 | picam2 = Picamera2() 23 | config = picam2.create_preview_configuration(main={"size": (320, 240)}) 24 | picam2.configure(config) 25 | picam2.start() 26 | time.sleep(2) # Wait for camera to warm up 27 | 28 | def get_frame(): 29 | global frame 30 | while not shutdown_event.is_set(): 31 | stream = io.BytesIO() 32 | picam2.capture_file(stream, format='jpeg') 33 | with frame_lock: 34 | frame = stream.getvalue() 35 | time.sleep(0.1) # Adjust as needed for smooth preview 36 | 37 | def generate_frames(): 38 | while not shutdown_event.is_set(): 39 | with frame_lock: 40 | if frame is not None: 41 | yield (b'--frame\r\n' 42 | b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') 43 | time.sleep(0.1) # Adjust as needed for smooth streaming 44 | 45 | def shutdown_server(): 46 | shutdown_event.set() 47 | if picam2: 48 | picam2.stop() 49 | # Give some time for other threads to finish 50 | time.sleep(2) 51 | # Send SIGINT to the main process 52 | os.kill(os.getpid(), signal.SIGINT) 53 | 54 | @app.route('/', methods=['GET', 'POST']) 55 | def index(): 56 | global current_label 57 | if request.method == 'POST': 58 | current_label = request.form['label'] 59 | if current_label not in capture_counts: 60 | capture_counts[current_label] = 0 61 | os.makedirs(os.path.join(base_dir, current_label), exist_ok=True) 62 | return redirect(url_for('capture_page')) 63 | return render_template_string(''' 64 | 65 | 66 | 67 | Dataset Capture - Label Entry 68 | 69 | 70 |

Enter Label for Dataset

71 |
72 | 73 | 74 |
75 | 76 | 77 | ''') 78 | 79 | @app.route('/capture') 80 | def capture_page(): 81 | return render_template_string(''' 82 | 83 | 84 | 85 | Dataset Capture 86 | 103 | 104 | 105 |

Dataset Capture

106 |

Current Label: {{ label }}

107 |

Images captured for this label: {{ capture_count }}

108 | 109 | 112 |
113 | 114 |
115 |
116 | 117 |
118 |
119 | 120 |
121 | 122 | 123 | ''', label=current_label, capture_count=capture_counts.get(current_label, 0)) 124 | 125 | @app.route('/video_feed') 126 | def video_feed(): 127 | return Response(generate_frames(), 128 | mimetype='multipart/x-mixed-replace; boundary=frame') 129 | 130 | @app.route('/capture_image', methods=['POST']) 131 | def capture_image(): 132 | global capture_counts 133 | if current_label and not shutdown_event.is_set(): 134 | capture_counts[current_label] += 1 135 | timestamp = time.strftime("%Y%m%d-%H%M%S") 136 | filename = f"image_{timestamp}.jpg" 137 | full_path = os.path.join(base_dir, current_label, filename) 138 | 139 | picam2.capture_file(full_path) 140 | 141 | return redirect(url_for('capture_page')) 142 | 143 | @app.route('/stop', methods=['POST']) 144 | def stop(): 145 | summary = render_template_string(''' 146 | 147 | 148 | 149 | Dataset Capture - Stopped 150 | 151 | 152 |

Dataset Capture Stopped

153 |

The capture process has been stopped. You can close this window.

154 |

Summary of captures:

155 |
    156 | {% for label, count in capture_counts.items() %} 157 |
  • {{ label }}: {{ count }} images
  • 158 | {% endfor %} 159 |
160 | 161 | 162 | ''', capture_counts=capture_counts) 163 | 164 | # Start a new thread to shutdown the server 165 | threading.Thread(target=shutdown_server).start() 166 | 167 | return summary 168 | 169 | @app.route('/check_shutdown') 170 | def check_shutdown(): 171 | return {'shutdown': shutdown_event.is_set()} 172 | 173 | if __name__ == '__main__': 174 | initialize_camera() 175 | threading.Thread(target=get_frame, daemon=True).start() 176 | app.run(host='0.0.0.0', port=5000, threaded=True) 177 | -------------------------------------------------------------------------------- /OBJ_DETEC/python_scripts/object_detection_app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response, render_template_string, request, jsonify 2 | from picamera2 import Picamera2 3 | import io 4 | import threading 5 | import time 6 | import numpy as np 7 | from PIL import Image, ImageDraw, ImageFont 8 | import tflite_runtime.interpreter as tflite 9 | from queue import Queue, Empty 10 | import matplotlib.pyplot as plt 11 | from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas 12 | import os 13 | import signal 14 | 15 | app = Flask(__name__) 16 | 17 | # Global variables 18 | picam2 = None 19 | frame = None 20 | frame_lock = threading.Lock() 21 | is_detecting = False 22 | confidence_threshold = 0.5 23 | model_path = "./models/ssd-mobilenet-v1-tflite-default-v1.tflite" 24 | labels_path = "./models/coco_labels.txt" 25 | interpreter = None 26 | detection_queue = Queue(maxsize=1) 27 | latest_detections = [] 28 | detections_lock = threading.Lock() 29 | 30 | def load_labels(path): 31 | with open(path, 'r') as f: 32 | return {i: line.strip() for i, line in enumerate(f.readlines())} 33 | 34 | labels = load_labels(labels_path) 35 | 36 | def initialize_camera(): 37 | global picam2 38 | picam2 = Picamera2() 39 | config = picam2.create_preview_configuration(main={"size": (640, 480)}) 40 | picam2.configure(config) 41 | picam2.start() 42 | time.sleep(2) # Wait for camera to warm up 43 | 44 | def get_frame(): 45 | global frame 46 | while True: 47 | stream = io.BytesIO() 48 | picam2.capture_file(stream, format='jpeg') 49 | stream.seek(0) 50 | img = Image.open(stream) 51 | 52 | # Draw detections on the image 53 | img_with_detections = draw_detections(img) 54 | 55 | # Convert the image back to bytes 56 | img_byte_arr = io.BytesIO() 57 | img_with_detections.save(img_byte_arr, format='JPEG') 58 | img_byte_arr = img_byte_arr.getvalue() 59 | 60 | with frame_lock: 61 | frame = img_byte_arr 62 | print("Frame captured and processed") 63 | time.sleep(0.1) # Capture frames more frequently 64 | 65 | def generate_frames(): 66 | while True: 67 | with frame_lock: 68 | if frame is not None: 69 | yield (b'--frame\r\n' 70 | b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') 71 | time.sleep(0.03) # Adjust this value to control frame rate 72 | 73 | def load_model(): 74 | global interpreter 75 | if interpreter is None: 76 | interpreter = tflite.Interpreter(model_path=model_path) 77 | interpreter.allocate_tensors() 78 | return interpreter 79 | 80 | def detect_objects(img, interpreter): 81 | input_details = interpreter.get_input_details() 82 | output_details = interpreter.get_output_details() 83 | 84 | # Get the expected input shape 85 | input_shape = input_details[0]['shape'] 86 | height, width = input_shape[1], input_shape[2] 87 | 88 | # Resize and preprocess the image 89 | img = img.resize((width, height)) 90 | img = img.convert('RGB') # Ensure the image is in RGB format 91 | input_data = np.expand_dims(np.array(img, dtype=np.uint8), axis=0) 92 | 93 | # Check if the model expects float input 94 | if input_details[0]['dtype'] == np.float32: 95 | input_data = (np.float32(input_data) - 127.5) / 127.5 96 | 97 | # Set the tensor 98 | interpreter.set_tensor(input_details[0]['index'], input_data) 99 | 100 | # Run inference 101 | interpreter.invoke() 102 | 103 | # Get outputs 104 | boxes = interpreter.get_tensor(output_details[0]['index'])[0] 105 | classes = interpreter.get_tensor(output_details[1]['index'])[0] 106 | scores = interpreter.get_tensor(output_details[2]['index'])[0] 107 | num_detections = int(interpreter.get_tensor(output_details[3]['index'])[0]) 108 | 109 | return boxes, classes, scores, num_detections 110 | 111 | def detection_worker(): 112 | global frame, is_detecting, latest_detections 113 | interpreter = load_model() 114 | print("Model loaded successfully") 115 | 116 | while True: 117 | if is_detecting: 118 | try: 119 | current_frame = None 120 | with frame_lock: 121 | if frame is not None: 122 | current_frame = frame 123 | 124 | if current_frame is not None: 125 | img = Image.open(io.BytesIO(current_frame)) 126 | 127 | boxes, classes, scores, num_detections = detect_objects(img, interpreter) 128 | 129 | new_detections = [] 130 | for i in range(int(num_detections)): 131 | if scores[i] >= confidence_threshold: 132 | ymin, xmin, ymax, xmax = boxes[i] 133 | (left, right, top, bottom) = (xmin * img.width, xmax * img.width, 134 | ymin * img.height, ymax * img.height) 135 | 136 | class_id = int(classes[i]) 137 | class_name = labels.get(class_id, f"Class {class_id}") 138 | 139 | new_detections.append({ 140 | 'class': class_name, 141 | 'score': float(scores[i]), 142 | 'box': [left, top, right, bottom] 143 | }) 144 | 145 | with detections_lock: 146 | latest_detections = new_detections # Replace instead of append 147 | 148 | except Exception as e: 149 | print(f"Error in detection worker: {e}") 150 | import traceback 151 | traceback.print_exc() 152 | time.sleep(0.1) # Process frames more frequently 153 | 154 | def draw_detections(img): 155 | draw = ImageDraw.Draw(img) 156 | try: 157 | font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 12) 158 | except IOError: 159 | font = ImageFont.load_default() 160 | 161 | with detections_lock: 162 | for detection in latest_detections: 163 | left, top, right, bottom = detection['box'] 164 | draw.rectangle([left, top, right, bottom], outline="red", width=2) 165 | 166 | label = f"{detection['class']}: {detection['score']:.2f}" 167 | draw.text((left, top-15), label, font=font, fill="red") 168 | 169 | return img 170 | 171 | @app.route('/') 172 | def index(): 173 | return render_template_string(''' 174 | 175 | 176 | 177 | Object Detection 178 | 179 | 232 | 233 | 234 | 235 |

Object Detection

236 | 237 |
238 | 239 | 240 | 241 |
242 | 243 | 244 |
245 |
Waiting for detections...
246 | 247 | 248 | ''') 249 | 250 | @app.route('/video_feed') 251 | def video_feed(): 252 | return Response(generate_frames(), 253 | mimetype='multipart/x-mixed-replace; boundary=frame') 254 | 255 | @app.route('/start', methods=['POST']) 256 | def start_detection(): 257 | global is_detecting 258 | is_detecting = True 259 | return '', 204 260 | 261 | @app.route('/stop', methods=['POST']) 262 | def stop_detection(): 263 | global is_detecting 264 | is_detecting = False 265 | return '', 204 266 | 267 | @app.route('/update_confidence', methods=['POST']) 268 | def update_confidence(): 269 | global confidence_threshold 270 | confidence_threshold = float(request.form['confidence']) 271 | return '', 204 272 | 273 | @app.route('/get_detections') 274 | def get_detections(): 275 | global latest_detections 276 | if not is_detecting: 277 | return jsonify([]) 278 | with detections_lock: 279 | return jsonify(latest_detections) 280 | 281 | @app.route('/close', methods=['POST']) 282 | def close_app(): 283 | global is_detecting 284 | is_detecting = False 285 | cleanup() 286 | if request.headers.get('X-Requested-With') == 'XMLHttpRequest': 287 | # This is an AJAX request, so it's an intentional close 288 | def shutdown(): 289 | os.kill(os.getpid(), signal.SIGINT) 290 | threading.Thread(target=shutdown).start() 291 | return 'Server shutting down...', 200 292 | else: 293 | # This is not an AJAX request, so it's probably a page refresh 294 | return 'Page refreshed', 200 295 | 296 | def cleanup(): 297 | global picam2, is_detecting 298 | is_detecting = False 299 | if picam2: 300 | picam2.stop() 301 | # You might want to add any additional cleanup code here 302 | 303 | if __name__ == '__main__': 304 | try: 305 | initialize_camera() 306 | detection_thread = threading.Thread(target=detection_worker, daemon=True) 307 | detection_thread.start() 308 | frame_thread = threading.Thread(target=get_frame, daemon=True) 309 | frame_thread.start() 310 | app.run(host='0.0.0.0', port=5000, threaded=True) 311 | except KeyboardInterrupt: 312 | print("Shutting down...") 313 | finally: 314 | cleanup() -------------------------------------------------------------------------------- /OLLAMA_SLMs/calc_distance_image.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from haversine import haversine 3 | import ollama 4 | from openai import OpenAI 5 | from pydantic import BaseModel, Field 6 | import instructor 7 | import time 8 | 9 | start_time = time.perf_counter() # Start timing 10 | 11 | img = sys.argv[1] 12 | MODEL = 'llava-phi3:3.8b' 13 | mylat = -33.33 14 | mylon = -70.51 15 | 16 | def image_description(img): 17 | with open(img, 'rb') as file: 18 | response = ollama.chat( 19 | model=MODEL, 20 | messages=[ 21 | { 22 | 'role': 'user', 23 | 'content': 'return the decimal latitude and decimal longitude of the city in the image, its name, and what country it is located', 24 | 'images': [file.read()], 25 | }, 26 | ], 27 | options = { 28 | 'temperature': 0, 29 | } 30 | ) 31 | #print(response['message']['content']) 32 | return response['message']['content'] 33 | 34 | 35 | class CityCoord(BaseModel): 36 | city: str = Field(..., description="Name of the city in the image") 37 | country: str = Field(..., description="Name of the country where the city in the image is located") 38 | lat: float = Field(..., description="Decimal Latitude of the city in the image") 39 | lon: float = Field(..., description="Decimal Longitude of the city in the image") 40 | 41 | # enables `response_model` in create call 42 | client = instructor.patch( 43 | OpenAI( 44 | base_url="http://localhost:11434/v1", 45 | api_key="ollama", 46 | ), 47 | mode=instructor.Mode.JSON, 48 | ) 49 | 50 | image_description = image_description(img) 51 | # Send this description to the model 52 | resp = client.chat.completions.create( 53 | model=MODEL, 54 | messages=[ 55 | { 56 | "role": "user", 57 | "content": image_description, 58 | } 59 | ], 60 | response_model=CityCoord, 61 | max_retries=10, 62 | temperature=0, 63 | ) 64 | 65 | distance = haversine((mylat, mylon), (resp.lat, resp.lon), unit='km') 66 | 67 | print(f"\n The image shows {resp.city}, with lat:{round(resp.lat, 2)} and long: {round(resp.lon, 2)}, located in {resp.country} and about {int(round(distance, -1)):,} kilometers away from Santiago, Chile.\n") 68 | 69 | end_time = time.perf_counter() # End timing 70 | elapsed_time = end_time - start_time # Calculate elapsed time 71 | print(f" [INFO] ==> The code (running {MODEL}), took {elapsed_time:.1f} seconds to execute.\n") -------------------------------------------------------------------------------- /OLLAMA_SLMs/image _test_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OLLAMA_SLMs/image _test_2.jpg -------------------------------------------------------------------------------- /OLLAMA_SLMs/image_test_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OLLAMA_SLMs/image_test_1.jpg -------------------------------------------------------------------------------- /OLLAMA_SLMs/image_test_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/OLLAMA_SLMs/image_test_3.jpg -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/GPIOS/button_test.py: -------------------------------------------------------------------------------- 1 | from gpiozero import Button 2 | button = Button(20) 3 | while True: 4 | if button.is_pressed: 5 | print("Button is pressed") 6 | else: 7 | print("Button is not pressed") 8 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/GPIOS/check_pins.py: -------------------------------------------------------------------------------- 1 | import RPi.GPIO as GPIO 2 | 3 | def check_all_pins(): 4 | # Using BCM numbering 5 | GPIO.setmode(GPIO.BCM) 6 | 7 | # Standard GPIO pins on Raspberry Pi (adjust this list based on your Pi model) 8 | gpio_pins = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] 9 | 10 | pins_in_use = [] 11 | available_pins = [] 12 | 13 | for pin in gpio_pins: 14 | try: 15 | GPIO.setup(pin, GPIO.IN) 16 | # If setup succeeds, the pin is available 17 | GPIO.cleanup(pin) 18 | available_pins.append(pin) 19 | except: 20 | # If we get an error, the pin is in use 21 | pins_in_use.append(pin) 22 | 23 | # Clean up all GPIO settings 24 | GPIO.cleanup() 25 | 26 | return pins_in_use, available_pins 27 | 28 | def print_pin_status(): 29 | in_use, available = check_all_pins() 30 | 31 | print("\nPins currently in use:") 32 | if in_use: 33 | for pin in sorted(in_use): 34 | print(f"GPIO {pin} (BCM)") 35 | else: 36 | print("None") 37 | 38 | print("\nAvailable pins:") 39 | if available: 40 | for pin in sorted(available): 41 | print(f"GPIO {pin} (BCM)") 42 | else: 43 | print("None") 44 | 45 | print("\nNote: Some pins might be reserved for special functions (I2C, SPI, UART)") 46 | print("Common reserved pins:") 47 | print("- GPIO 0, 1: Reserved for ID EEPROM") 48 | print("- GPIO 2, 3: Reserved for I2C1 (SDA, SCL)") 49 | print("- GPIO 14, 15: Reserved for UART (TXD, RXD)") 50 | print("- GPIO 8, 9, 10, 11: Reserved for SPI0") 51 | 52 | if __name__ == "__main__": 53 | try: 54 | print_pin_status() 55 | except Exception as e: 56 | print(f"An error occurred: {str(e)}") 57 | print("Note: This script requires root privileges to run") 58 | print("Try running with: sudo python3 check_gpio.py") 59 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/GPIOS/led_test.py: -------------------------------------------------------------------------------- 1 | from gpiozero import LED 2 | from time import sleep 3 | led = LED(13) 4 | while True: 5 | led.on() 6 | sleep(1) 7 | led.off() 8 | sleep(1) 9 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/GPIOS/led_test_2.py: -------------------------------------------------------------------------------- 1 | from gpiozero import LED 2 | from signal import pause 3 | red = LED(13) 4 | red.blink() 5 | pause() 6 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/Notebooks/Physical_Computing_Raspi.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 3, 6 | "id": "38b56598-eebf-4730-af9c-401547de6057", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import time\n", 11 | "import datetime\n", 12 | "import board" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 7, 18 | "id": "ef293f21-d4cc-4b46-abcd-7cd6477be406", 19 | "metadata": {}, 20 | "outputs": [ 21 | { 22 | "name": "stdout", 23 | "output_type": "stream", 24 | "text": [ 25 | "Unable to set line 16 to input\n" 26 | ] 27 | } 28 | ], 29 | "source": [ 30 | "# Adafruit DHT library (Temperature/Humidity)\n", 31 | "import adafruit_dht\n", 32 | "DHT22Sensor = adafruit_dht.DHT22(board.D16)" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 3, 38 | "id": "720d1827-898e-4f14-9de4-d23cdeb56743", 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "# BMP library (Pressure/Temperature)\n", 43 | "import adafruit_bmp280\n", 44 | "i2c = board.I2C()\n", 45 | "bmp280Sensor = adafruit_bmp280.Adafruit_BMP280_I2C(i2c, address = 0x76)\n", 46 | "bmp280Sensor.sea_level_pressure = 1013.25" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 4, 52 | "id": "a2c90c53-fac1-44d8-b5cd-6c0629cf983f", 53 | "metadata": {}, 54 | "outputs": [], 55 | "source": [ 56 | "# LEDs\n", 57 | "from gpiozero import LED\n", 58 | "\n", 59 | "ledRed = LED(13)\n", 60 | "ledYlw = LED(19)\n", 61 | "ledGrn = LED(26)\n", 62 | "\n", 63 | "ledRed.off()\n", 64 | "ledYlw.off()\n", 65 | "ledGrn.off()\n", 66 | "\n", 67 | "# Push-Button\n", 68 | "from gpiozero import Button\n", 69 | "button = Button(20)" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "id": "a7e18341-d3d8-4f95-87b7-e4a6c3447a42", 75 | "metadata": {}, 76 | "source": [ 77 | "### Getting GPIOs Status" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 5, 83 | "id": "b64d0ccc-1263-42d0-a667-36ed4e0574c9", 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "# Get GPIO status data \n", 88 | "def getGpioStatus():\n", 89 | " global timeString\n", 90 | " global buttonSts\n", 91 | " global ledRedSts\n", 92 | " global ledYlwSts\n", 93 | " global ledGrnSts\n", 94 | "\n", 95 | " # Get time of reading\n", 96 | " now = datetime.datetime.now()\n", 97 | " timeString = now.strftime(\"%Y-%m-%d %H:%M\")\n", 98 | " \n", 99 | " # Read GPIO Status\n", 100 | " buttonSts = button.is_pressed\n", 101 | " ledRedSts = ledRed.is_lit\n", 102 | " ledYlwSts = ledYlw.is_lit\n", 103 | " ledGrnSts = ledGrn.is_lit " 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": 6, 109 | "id": "58278040-24d9-4277-9fdc-db35eebcff80", 110 | "metadata": {}, 111 | "outputs": [], 112 | "source": [ 113 | "# Print GPIO status data \n", 114 | "def PrintGpioStatus():\n", 115 | " print (\"Local Station Time: \", timeString)\n", 116 | " print (\"Led Red Status: \", ledRedSts)\n", 117 | " print (\"Led Yellow Status: \", ledYlwSts)\n", 118 | " print (\"Led Green Status: \", ledGrnSts)\n", 119 | " print (\"Push-Button Status: \", buttonSts)" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": 7, 125 | "id": "3da731d4-93d8-45e6-b164-7c375ed71e7d", 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "ledRed.on()\n", 130 | "ledYlw.on()\n", 131 | "ledGrn.on()" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 9, 137 | "id": "c58b7c20-308e-448c-a3b1-502fcd089820", 138 | "metadata": {}, 139 | "outputs": [ 140 | { 141 | "name": "stdout", 142 | "output_type": "stream", 143 | "text": [ 144 | "Local Station Time: 2025-02-07 14:34\n", 145 | "Led Red Status: True\n", 146 | "Led Yellow Status: True\n", 147 | "Led Green Status: True\n", 148 | "Push-Button Status: True\n" 149 | ] 150 | } 151 | ], 152 | "source": [ 153 | "getGpioStatus()\n", 154 | "PrintGpioStatus()" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 10, 160 | "id": "4e4bf18e-bd0a-4e15-bd0a-59e92ba8a2e1", 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "ledRed.off()\n", 165 | "ledYlw.off()\n", 166 | "ledGrn.off()" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 11, 172 | "id": "50a567c5-8533-49f9-81a2-f8b0097042d7", 173 | "metadata": {}, 174 | "outputs": [ 175 | { 176 | "name": "stdout", 177 | "output_type": "stream", 178 | "text": [ 179 | "Local Station Time: 2025-02-07 14:34\n", 180 | "Led Red Status: False\n", 181 | "Led Yellow Status: False\n", 182 | "Led Green Status: False\n", 183 | "Push-Button Status: False\n" 184 | ] 185 | } 186 | ], 187 | "source": [ 188 | "getGpioStatus()\n", 189 | "PrintGpioStatus()" 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": 12, 195 | "id": "27b875a0-011d-45f0-9338-7c60145d0e6f", 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [ 199 | "# Acting on GPIOs and printing Status\n", 200 | "def controlLeds(r, y, g):\n", 201 | " if (r):\n", 202 | " ledRed.on()\n", 203 | " else:\n", 204 | " ledRed.off() \n", 205 | " if (y):\n", 206 | " ledYlw.on()\n", 207 | " else:\n", 208 | " ledYlw.off() \n", 209 | " if (g):\n", 210 | " ledGrn.on()\n", 211 | " else:\n", 212 | " ledGrn.off() \n", 213 | " \n", 214 | " getGpioStatus()\n", 215 | " PrintGpioStatus()" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 17, 221 | "id": "104aee8b-6e43-4a5e-8fc2-065ad10b286f", 222 | "metadata": {}, 223 | "outputs": [ 224 | { 225 | "name": "stdout", 226 | "output_type": "stream", 227 | "text": [ 228 | "Local Station Time: 2025-02-07 14:35\n", 229 | "Led Red Status: False\n", 230 | "Led Yellow Status: False\n", 231 | "Led Green Status: False\n", 232 | "Push-Button Status: False\n" 233 | ] 234 | } 235 | ], 236 | "source": [ 237 | "controlLeds(0, 0, 0)" 238 | ] 239 | }, 240 | { 241 | "cell_type": "markdown", 242 | "id": "88f9e0d0-354a-46f2-abfb-1276ee272633", 243 | "metadata": {}, 244 | "source": [ 245 | "## Getting and displaying Sensor Data" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 18, 251 | "id": "bc1418e2-ce33-47c1-9dd0-c55b3eae6b47", 252 | "metadata": {}, 253 | "outputs": [], 254 | "source": [ 255 | "# Read data from BMP280\n", 256 | "def bmp280GetData(real_altitude):\n", 257 | " \n", 258 | " temp = bmp280Sensor.temperature\n", 259 | " pres = bmp280Sensor.pressure\n", 260 | " alt = bmp280Sensor.altitude\n", 261 | " presSeaLevel = pres / pow(1.0 - real_altitude/44330.0, 5.255) \n", 262 | " \n", 263 | " temp = round (temp, 1)\n", 264 | " pres = round (pres, 2) # absolute pressure in mbar\n", 265 | " alt = round (alt)\n", 266 | " presSeaLevel = round (presSeaLevel, 2) # absolute pressure in mbar\n", 267 | " \n", 268 | " return temp, pres, alt, presSeaLevel" 269 | ] 270 | }, 271 | { 272 | "cell_type": "code", 273 | "execution_count": 19, 274 | "id": "3f5eb8b7-6fdb-454f-9257-0871970f9675", 275 | "metadata": {}, 276 | "outputs": [ 277 | { 278 | "data": { 279 | "text/plain": [ 280 | "(29.7, 907.54, 920, 1018.2)" 281 | ] 282 | }, 283 | "execution_count": 19, 284 | "metadata": {}, 285 | "output_type": "execute_result" 286 | } 287 | ], 288 | "source": [ 289 | "bmp280GetData(960)" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": 20, 295 | "id": "ef8c687d-0f3d-494b-96e4-038d4ab8cb84", 296 | "metadata": {}, 297 | "outputs": [], 298 | "source": [ 299 | "# Get data (from local sensors)\n", 300 | "def getSensorData(altReal=0):\n", 301 | " global timeString\n", 302 | " global humExt\n", 303 | " global tempLab\n", 304 | " global tempExt\n", 305 | " global presSL\n", 306 | " global altLab\n", 307 | " global presAbs\n", 308 | " global buttonSts\n", 309 | "\t\n", 310 | " # Get time of reading\n", 311 | " now = datetime.datetime.now()\n", 312 | " timeString = now.strftime(\"%Y-%m-%d %H:%M\")\n", 313 | " \n", 314 | " tempLab, presAbs, altLab, presSL = bmp280GetData(altReal) \n", 315 | " \n", 316 | " tempDHT = DHT22Sensor.temperature\n", 317 | " humDHT = DHT22Sensor.humidity\n", 318 | "\t\n", 319 | " if humDHT is not None and tempDHT is not None:\n", 320 | " tempExt = round (tempDHT)\n", 321 | " humExt = round (humDHT)" 322 | ] 323 | }, 324 | { 325 | "cell_type": "code", 326 | "execution_count": 21, 327 | "id": "b6bb1800-1ea8-4c3e-aea7-93fd2289df83", 328 | "metadata": {}, 329 | "outputs": [ 330 | { 331 | "data": { 332 | "text/plain": [ 333 | "(29.1, 36.5)" 334 | ] 335 | }, 336 | "execution_count": 21, 337 | "metadata": {}, 338 | "output_type": "execute_result" 339 | } 340 | ], 341 | "source": [ 342 | "DHT22Sensor.temperature, DHT22Sensor.humidity" 343 | ] 344 | }, 345 | { 346 | "cell_type": "code", 347 | "execution_count": 22, 348 | "id": "3dff1b73-41c6-476e-bff5-9601150d4442", 349 | "metadata": {}, 350 | "outputs": [], 351 | "source": [ 352 | "# Display important data on-screen\n", 353 | "def printData():\n", 354 | " print (\"Local Station Time: \", timeString)\n", 355 | " print (\"External Air Temperature (DHT): \", tempExt, \"oC\")\n", 356 | " print (\"External Air Humidity (DHT): \", humExt, \"%\")\n", 357 | " print (\"Station Air Temperature (BMP): \", tempLab, \"oC\")\n", 358 | " print (\"Sea Level Air Pressure: \", presSL, \"mBar\")\n", 359 | " print (\"Absolute Station Air Pressure: \", presAbs, \"mBar\")\n", 360 | " print (\"Station Measured Altitude: \", altLab, \"m\")" 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": 23, 366 | "id": "1632d9d6-79d4-43a6-ae46-740f092faeba", 367 | "metadata": {}, 368 | "outputs": [ 369 | { 370 | "name": "stdout", 371 | "output_type": "stream", 372 | "text": [ 373 | "Local Station Time: 2025-02-07 14:37\n", 374 | "External Air Temperature (DHT): 30 oC\n", 375 | "External Air Humidity (DHT): 36 %\n", 376 | "Station Air Temperature (BMP): 29.6 oC\n", 377 | "Sea Level Air Pressure: 1018.15 mBar\n", 378 | "Absolute Station Air Pressure: 907.5 mBar\n", 379 | "Station Measured Altitude: 920 m\n" 380 | ] 381 | } 382 | ], 383 | "source": [ 384 | "real_altitude = 960 # real altitude of where the BMP280 is installed\n", 385 | "getSensorData(real_altitude)\n", 386 | "printData()" 387 | ] 388 | }, 389 | { 390 | "cell_type": "markdown", 391 | "id": "0802aafd-e4f3-4db7-b821-67925a57c81f", 392 | "metadata": {}, 393 | "source": [ 394 | "## Widgets" 395 | ] 396 | }, 397 | { 398 | "cell_type": "code", 399 | "execution_count": 24, 400 | "id": "3eab97b5-1b37-42ee-bc9a-bef4e5f2bd5f", 401 | "metadata": {}, 402 | "outputs": [], 403 | "source": [ 404 | "# widget library\n", 405 | "from ipywidgets import interactive\n", 406 | "import ipywidgets as widgets\n", 407 | "from IPython.display import display" 408 | ] 409 | }, 410 | { 411 | "cell_type": "code", 412 | "execution_count": 25, 413 | "id": "83f1fb95-b06f-481e-aea8-02ce7a712870", 414 | "metadata": {}, 415 | "outputs": [ 416 | { 417 | "data": { 418 | "application/vnd.jupyter.widget-view+json": { 419 | "model_id": "f9e4f2c46066476fa8bf93ed971ea3d0", 420 | "version_major": 2, 421 | "version_minor": 0 422 | }, 423 | "text/plain": [ 424 | "interactive(children=(IntSlider(value=0, description='r', max=1), IntSlider(value=0, description='y', max=1), …" 425 | ] 426 | }, 427 | "metadata": {}, 428 | "output_type": "display_data" 429 | } 430 | ], 431 | "source": [ 432 | "f = interactive(controlLeds, r=(0,1,1), y=(0,1,1), g=(0,1,1))\n", 433 | "display(f)" 434 | ] 435 | }, 436 | { 437 | "cell_type": "code", 438 | "execution_count": null, 439 | "id": "d3337aa6-1f8b-443c-b4d6-eacc21025845", 440 | "metadata": {}, 441 | "outputs": [], 442 | "source": [] 443 | }, 444 | { 445 | "cell_type": "code", 446 | "execution_count": null, 447 | "id": "ba2cd653-8f9b-4b5f-89a5-974edd66a8d9", 448 | "metadata": {}, 449 | "outputs": [], 450 | "source": [] 451 | } 452 | ], 453 | "metadata": { 454 | "kernelspec": { 455 | "display_name": "Python 3 (ipykernel)", 456 | "language": "python", 457 | "name": "python3" 458 | }, 459 | "language_info": { 460 | "codemirror_mode": { 461 | "name": "ipython", 462 | "version": 3 463 | }, 464 | "file_extension": ".py", 465 | "mimetype": "text/x-python", 466 | "name": "python", 467 | "nbconvert_exporter": "python", 468 | "pygments_lexer": "ipython3", 469 | "version": "3.11.2" 470 | } 471 | }, 472 | "nbformat": 4, 473 | "nbformat_minor": 5 474 | } 475 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/Notebooks/SLM_reaction_test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "7246b132-9e7d-4352-a46f-715bd6204f60", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import time\n", 11 | "import datetime\n", 12 | "import board\n", 13 | "import adafruit_dht\n", 14 | "import adafruit_bmp280\n", 15 | "from gpiozero import LED, Button\n", 16 | "from transformers import pipeline" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 2, 22 | "id": "0414a056-5151-43a4-adb7-38fd2e7608a7", 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "# Initialize sensors and actuators\n", 27 | "DHT22Sensor = adafruit_dht.DHT22(board.D16)\n", 28 | "i2c = board.I2C()\n", 29 | "bmp280Sensor = adafruit_bmp280.Adafruit_BMP280_I2C(i2c, address=0x76)\n", 30 | "bmp280Sensor.sea_level_pressure = 1013.25\n", 31 | "\n", 32 | "# Initialize LEDs and Button\n", 33 | "ledRed = LED(13)\n", 34 | "ledYlw = LED(19)\n", 35 | "ledGrn = LED(26)\n", 36 | "button = Button(20)" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "id": "a58bf7bf-fdc5-4ff9-9070-75acd31ae8f1", 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "# Initialize the SLM pipeline\n", 47 | "# We're using a small model suitable for Raspberry Pi\n", 48 | "generator = pipeline('text-generation', \n", 49 | " model='TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T',\n", 50 | " device='cpu')" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "id": "d23b8eb2-f9a3-441d-b8cf-a2eff00c76f4", 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "def get_sensor_data():\n", 61 | " \"\"\"Get current readings from all sensors\"\"\"\n", 62 | " try:\n", 63 | " temp_dht = DHT22Sensor.temperature\n", 64 | " humidity = DHT22Sensor.humidity\n", 65 | " temp_bmp = bmp280Sensor.temperature\n", 66 | " pressure = bmp280Sensor.pressure\n", 67 | " \n", 68 | " return {\n", 69 | " 'temperature_dht': round(temp_dht, 1) if temp_dht else None,\n", 70 | " 'humidity': round(humidity, 1) if humidity else None,\n", 71 | " 'temperature_bmp': round(temp_bmp, 1),\n", 72 | " 'pressure': round(pressure, 1)\n", 73 | " }\n", 74 | " except RuntimeError:\n", 75 | " return None\n", 76 | "\n", 77 | "def control_leds(red=False, yellow=False, green=False):\n", 78 | " \"\"\"Control LED states\"\"\"\n", 79 | " ledRed.value = red\n", 80 | " ledYlw.value = yellow\n", 81 | " ledGrn.value = green" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "id": "efa710e6-a9d5-4a49-b2c7-e7bdf308c3ca", 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "def generate_response(sensor_data):\n", 92 | " \"\"\"Generate response based on sensor data using SLM\"\"\"\n", 93 | " if not sensor_data:\n", 94 | " return \"Unable to read sensor data\"\n", 95 | " \n", 96 | " prompt = f\"\"\"Based on these sensor readings:\n", 97 | " Temperature: {sensor_data['temperature_dht']}°C\n", 98 | " Humidity: {sensor_data['humidity']}%\n", 99 | " Pressure: {sensor_data['pressure']} hPa\n", 100 | " \n", 101 | " Provide a brief status and recommendation in 2 sentences.\n", 102 | " \"\"\"\n", 103 | " \n", 104 | " # Generate response from SLM\n", 105 | " response = generator(prompt, \n", 106 | " max_length=100,\n", 107 | " num_return_sequences=1,\n", 108 | " temperature=0.7)[0]['generated_text']\n", 109 | " \n", 110 | " return response\n", 111 | "\n", 112 | "def process_conditions(sensor_data):\n", 113 | " \"\"\"Process sensor data and control LEDs based on conditions\"\"\"\n", 114 | " if not sensor_data:\n", 115 | " control_leds(red=True) # Error condition\n", 116 | " return\n", 117 | " \n", 118 | " temp = sensor_data['temperature_dht']\n", 119 | " humidity = sensor_data['humidity']\n", 120 | " \n", 121 | " # Example conditions for LED control\n", 122 | " if temp > 30: # Hot\n", 123 | " control_leds(red=True)\n", 124 | " elif humidity > 70: # Humid\n", 125 | " control_leds(yellow=True)\n", 126 | " else: # Normal conditions\n", 127 | " control_leds(green=True)" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": 5, 133 | "id": "cadea267-0134-43e2-a054-8d44548ee28d", 134 | "metadata": {}, 135 | "outputs": [], 136 | "source": [ 137 | "def main_loop():\n", 138 | " \"\"\"Main program loop\"\"\"\n", 139 | " print(\"Starting Physical Computing with SLM Integration...\")\n", 140 | " print(\"Press the button to get a reading and SLM response.\")\n", 141 | " \n", 142 | " try:\n", 143 | " while True:\n", 144 | " if button.is_pressed:\n", 145 | " # Get sensor readings\n", 146 | " sensor_data = get_sensor_data()\n", 147 | " \n", 148 | " # Process conditions and control LEDs\n", 149 | " process_conditions(sensor_data)\n", 150 | " \n", 151 | " if sensor_data:\n", 152 | " # Get SLM response\n", 153 | " response = generate_response(sensor_data)\n", 154 | " \n", 155 | " # Print current status\n", 156 | " print(\"\\nCurrent Readings:\")\n", 157 | " print(f\"Temperature: {sensor_data['temperature_dht']}°C\")\n", 158 | " print(f\"Humidity: {sensor_data['humidity']}%\")\n", 159 | " print(f\"Pressure: {sensor_data['pressure']} hPa\")\n", 160 | " print(\"\\nSLM Response:\")\n", 161 | " print(response)\n", 162 | " \n", 163 | " time.sleep(2) # Debounce and allow time to read\n", 164 | " \n", 165 | " time.sleep(0.1) # Reduce CPU usage\n", 166 | " \n", 167 | " except KeyboardInterrupt:\n", 168 | " print(\"\\nShutting down...\")\n", 169 | " control_leds(False, False, False) # Turn off all LEDs\n", 170 | " " 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": null, 176 | "id": "59629b25-cdd1-49da-b300-cbebc46a3c8d", 177 | "metadata": {}, 178 | "outputs": [ 179 | { 180 | "name": "stdout", 181 | "output_type": "stream", 182 | "text": [ 183 | "Starting Physical Computing with SLM Integration...\n", 184 | "Press the button to get a reading and SLM response.\n" 185 | ] 186 | }, 187 | { 188 | "name": "stderr", 189 | "output_type": "stream", 190 | "text": [ 191 | "Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n", 192 | "/home/mjrovai/.local/lib/python3.11/site-packages/transformers/generation/configuration_utils.py:628: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.7` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`.\n", 193 | " warnings.warn(\n" 194 | ] 195 | }, 196 | { 197 | "name": "stdout", 198 | "output_type": "stream", 199 | "text": [ 200 | "\n", 201 | "Current Readings:\n", 202 | "Temperature: 26.6°C\n", 203 | "Humidity: 42.0%\n", 204 | "Pressure: 907.4 hPa\n", 205 | "\n", 206 | "SLM Response:\n", 207 | "Based on these sensor readings:\n", 208 | " Temperature: 26.6°C\n", 209 | " Humidity: 42.0%\n", 210 | " Pressure: 907.4 hPa\n", 211 | " \n", 212 | " Provide a brief status and recommendation in 2 sentences.\n", 213 | " \n", 214 | " The temperature is a bit high, but the humidity is low.\n", 215 | " \n", 216 | " The temperature is a bit high, but the humidity is low.\n" 217 | ] 218 | } 219 | ], 220 | "source": [ 221 | "main_loop()" 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": null, 227 | "id": "a31ec134-1c26-4a8c-80c5-3d9c0e025004", 228 | "metadata": {}, 229 | "outputs": [], 230 | "source": [ 231 | "if __name__ == \"__main__\":\n", 232 | " main_loop()" 233 | ] 234 | } 235 | ], 236 | "metadata": { 237 | "kernelspec": { 238 | "display_name": "Python 3 (ipykernel)", 239 | "language": "python", 240 | "name": "python3" 241 | }, 242 | "language_info": { 243 | "codemirror_mode": { 244 | "name": "ipython", 245 | "version": 3 246 | }, 247 | "file_extension": ".py", 248 | "mimetype": "text/x-python", 249 | "name": "python", 250 | "nbconvert_exporter": "python", 251 | "pygments_lexer": "ipython3", 252 | "version": "3.11.2" 253 | } 254 | }, 255 | "nbformat": 4, 256 | "nbformat_minor": 5 257 | } 258 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/Notebooks/SLM_test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 3, 6 | "id": "63f138d2-2618-4159-b954-16e91eb95f13", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import time\n", 11 | "from transformers import pipeline\n", 12 | "import torch" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 6, 18 | "id": "e952af05-bed1-4b19-af64-35c0449e1e64", 19 | "metadata": {}, 20 | "outputs": [ 21 | { 22 | "name": "stdout", 23 | "output_type": "stream", 24 | "text": [ 25 | "Using device: cpu\n" 26 | ] 27 | } 28 | ], 29 | "source": [ 30 | "# Check if CUDA is available (it won't be on our case, Raspberry Pi)\n", 31 | "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", 32 | "print(f\"Using device: {device}\")" 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "id": "31f6319d-1a08-4886-b023-a9a2e24fb094", 38 | "metadata": {}, 39 | "source": [ 40 | "## TinyLlama-1.1B" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 7, 46 | "id": "669e8dd1-05b1-4b53-aff0-ff8c97800058", 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | " model='TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T'" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": 8, 56 | "id": "b6f9d44b-103e-475e-b2a0-33d73db84b27", 57 | "metadata": {}, 58 | "outputs": [ 59 | { 60 | "name": "stderr", 61 | "output_type": "stream", 62 | "text": [ 63 | "Device set to use cpu\n" 64 | ] 65 | }, 66 | { 67 | "name": "stdout", 68 | "output_type": "stream", 69 | "text": [ 70 | "Model loading time: 1.28 seconds\n" 71 | ] 72 | } 73 | ], 74 | "source": [ 75 | "# Load the model and measure loading time\n", 76 | "start_time = time.time()\n", 77 | "generator = pipeline('text-generation', \n", 78 | " model=model,\n", 79 | " device=device)\n", 80 | "load_time = time.time() - start_time\n", 81 | "print(f\"Model loading time: {load_time:.2f} seconds\")" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 5, 87 | "id": "fdf067f4-e93f-4f34-8825-5074c9ef214d", 88 | "metadata": {}, 89 | "outputs": [ 90 | { 91 | "name": "stderr", 92 | "output_type": "stream", 93 | "text": [ 94 | "Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n", 95 | "/home/mjrovai/.local/lib/python3.11/site-packages/transformers/generation/configuration_utils.py:628: UserWarning: `do_sample` is set to `False`. However, `temperature` is set to `0.7` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `temperature`.\n", 96 | " warnings.warn(\n" 97 | ] 98 | }, 99 | { 100 | "name": "stdout", 101 | "output_type": "stream", 102 | "text": [ 103 | "\n", 104 | "Test prompt: The weather today is\n", 105 | "Generated response: The weather today is going to be sunny and warm with a high of 80 degrees.\n", 106 | "The weather today is going to be sunny and warm with a high of 80 degrees.\n", 107 | "The weather today is going to be\n", 108 | "Inference time: 199.41 seconds\n" 109 | ] 110 | } 111 | ], 112 | "source": [ 113 | "# Test prompt\n", 114 | "test_prompt = \"The weather today is\"\n", 115 | "\n", 116 | "# Measure inference time\n", 117 | "start_time = time.time()\n", 118 | "response = generator(test_prompt, \n", 119 | " max_length=50,\n", 120 | " num_return_sequences=1,\n", 121 | " temperature=0.7)\n", 122 | "inference_time = time.time() - start_time\n", 123 | "\n", 124 | "print(f\"\\nTest prompt: {test_prompt}\")\n", 125 | "print(f\"Generated response: {response[0]['generated_text']}\")\n", 126 | "print(f\"Inference time: {inference_time:.2f} seconds\")\n", 127 | "\n", 128 | "# Memory usage\n", 129 | "if device == \"cuda\":\n", 130 | " print(f\"\\nGPU Memory allocated: {torch.cuda.memory_allocated()/1024**2:.2f} MB\")\n", 131 | " print(f\"GPU Memory cached: {torch.cuda.memory_reserved()/1024**2:.2f} MB\")" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": null, 137 | "id": "a73e2306-596d-4033-9204-6e2f279dc476", 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [] 141 | } 142 | ], 143 | "metadata": { 144 | "kernelspec": { 145 | "display_name": "Python 3 (ipykernel)", 146 | "language": "python", 147 | "name": "python3" 148 | }, 149 | "language_info": { 150 | "codemirror_mode": { 151 | "name": "ipython", 152 | "version": 3 153 | }, 154 | "file_extension": ".py", 155 | "mimetype": "text/x-python", 156 | "name": "python", 157 | "nbconvert_exporter": "python", 158 | "pygments_lexer": "ipython3", 159 | "version": "3.11.2" 160 | } 161 | }, 162 | "nbformat": 4, 163 | "nbformat_minor": 5 164 | } 165 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/Notebooks/notebook_test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "8c5da647-ce71-43f1-8362-0348633021b9", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import time\n", 11 | "import board\n", 12 | "import adafruit_dht\n", 13 | "dhtDevice = adafruit_dht.DHT22(board.D16)" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 2, 19 | "id": "6b20ee2a-c611-4442-8697-1d7a1e998c3a", 20 | "metadata": {}, 21 | "outputs": [ 22 | { 23 | "name": "stdout", 24 | "output_type": "stream", 25 | "text": [ 26 | "Temp: 85.1 F / 29.5 C Humidity: 30.3% \n", 27 | "Temp: 87.4 F / 30.8 C Humidity: 28.9% \n", 28 | "Temp: 87.3 F / 30.7 C Humidity: 28.9% \n", 29 | "\n" 30 | ] 31 | }, 32 | { 33 | "name": "stderr", 34 | "output_type": "stream", 35 | "text": [ 36 | "received SIGINT\n" 37 | ] 38 | }, 39 | { 40 | "ename": "KeyboardInterrupt", 41 | "evalue": "", 42 | "output_type": "error", 43 | "traceback": [ 44 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 45 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", 46 | "Cell \u001b[0;32mIn[2], line 22\u001b[0m\n\u001b[1;32m 19\u001b[0m dhtDevice\u001b[38;5;241m.\u001b[39mexit()\n\u001b[1;32m 20\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m error\n\u001b[0;32m---> 22\u001b[0m \u001b[43mtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msleep\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m2.0\u001b[39;49m\u001b[43m)\u001b[49m\n", 47 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 48 | ] 49 | } 50 | ], 51 | "source": [ 52 | "while True:\n", 53 | " try:\n", 54 | " # Print the values to the serial port\n", 55 | " temperature_c = dhtDevice.temperature\n", 56 | " temperature_f = temperature_c * (9 / 5) + 32\n", 57 | " humidity = dhtDevice.humidity\n", 58 | " print(\n", 59 | " \"Temp: {:.1f} F / {:.1f} C Humidity: {}% \".format(\n", 60 | " temperature_f, temperature_c, humidity\n", 61 | " )\n", 62 | " )\n", 63 | "\n", 64 | " except RuntimeError as error:\n", 65 | " # Errors happen fairly often, DHT's are hard to read, just keep going\n", 66 | " print(error.args[0])\n", 67 | " time.sleep(2.0)\n", 68 | " continue\n", 69 | " except Exception as error:\n", 70 | " dhtDevice.exit()\n", 71 | " raise error\n", 72 | "\n", 73 | " time.sleep(2.0)" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "id": "d9e6f7d3-b880-4c3d-8aad-420b49792e28", 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [] 83 | } 84 | ], 85 | "metadata": { 86 | "kernelspec": { 87 | "display_name": "Python 3 (ipykernel)", 88 | "language": "python", 89 | "name": "python3" 90 | }, 91 | "language_info": { 92 | "codemirror_mode": { 93 | "name": "ipython", 94 | "version": 3 95 | }, 96 | "file_extension": ".py", 97 | "mimetype": "text/x-python", 98 | "name": "python", 99 | "nbconvert_exporter": "python", 100 | "pygments_lexer": "ipython3", 101 | "version": "3.11.2" 102 | } 103 | }, 104 | "nbformat": 4, 105 | "nbformat_minor": 5 106 | } 107 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/Sensors/blinka_test.py: -------------------------------------------------------------------------------- 1 | import board 2 | import digitalio 3 | import busio 4 | 5 | print("Hello, blinka!") 6 | 7 | # Try to create a Digital input 8 | pin = digitalio.DigitalInOut(board.D4) 9 | print("Digital IO ok!") 10 | 11 | # Try to create an I2C device 12 | i2c = busio.I2C(board.SCL, board.SDA) 13 | print("I2C ok!") 14 | 15 | # Try to create an SPI device 16 | spi = busio.SPI(board.SCLK, board.MOSI, board.MISO) 17 | print("SPI ok!") 18 | 19 | print("done!") 20 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/Sensors/bmp280_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import board 3 | 4 | import adafruit_bmp280 5 | 6 | i2c = board.I2C() 7 | bmp280 = adafruit_bmp280.Adafruit_BMP280_I2C(i2c, address = 0x76) 8 | bmp280.sea_level_pressure = 1013.25 9 | 10 | while True: 11 | print("\nTemperature: %0.1f C" % bmp280.temperature) 12 | print("Pressure: %0.1f hPa" % bmp280.pressure) 13 | print("Altitude = %0.2f meters" % bmp280.altitude) 14 | time.sleep(2) 15 | -------------------------------------------------------------------------------- /PHYSICAL_COMPUTING/Sensors/dht_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import board 3 | import adafruit_dht 4 | dhtDevice = adafruit_dht.DHT22(board.D16) 5 | 6 | while True: 7 | try: 8 | # Print the values to the serial port 9 | temperature_c = dhtDevice.temperature 10 | temperature_f = temperature_c * (9 / 5) + 32 11 | humidity = dhtDevice.humidity 12 | print( 13 | "Temp: {:.1f} F / {:.1f} C Humidity: {}% ".format( 14 | temperature_f, temperature_c, humidity 15 | ) 16 | ) 17 | 18 | except RuntimeError as error: 19 | # Errors happen fairly often, DHT's are hard to read, just keep going 20 | print(error.args[0]) 21 | time.sleep(2.0) 22 | continue 23 | except Exception as error: 24 | dhtDevice.exit() 25 | raise error 26 | 27 | time.sleep(2.0) 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EdgeAI-with-Raspberry-Pi 2 | DL and GenAI Hands-On with the Raspberry Pi 3 | -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/Raspi-Physical-Computing.fzz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/SLMs_for_IoT_CONTROL/Raspi-Physical-Computing.fzz -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/SLM_IOT_CONTROL.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mjrovai/EdgeML-with-Raspberry-Pi/d4622ccf90fde21f22a297a207405a4d6cb5e527/SLMs_for_IoT_CONTROL/SLM_IOT_CONTROL.pdf -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/monitor.py: -------------------------------------------------------------------------------- 1 | import time 2 | import board 3 | import adafruit_dht 4 | import adafruit_bmp280 5 | from gpiozero import LED, Button 6 | 7 | DHT22Sensor = adafruit_dht.DHT22(board.D16) 8 | i2c = board.I2C() 9 | bmp280Sensor = adafruit_bmp280.Adafruit_BMP280_I2C(i2c, address=0x76) 10 | bmp280Sensor.sea_level_pressure = 1013.25 11 | 12 | ledRed = LED(13) 13 | ledYlw = LED(19) 14 | ledGrn = LED(26) 15 | button = Button(20) 16 | 17 | def collect_data(): 18 | try: 19 | temperature_dht = DHT22Sensor.temperature 20 | humidity = DHT22Sensor.humidity 21 | temperature_bmp = bmp280Sensor.temperature 22 | pressure = bmp280Sensor.pressure 23 | button_pressed = button.is_pressed 24 | return temperature_dht, humidity, temperature_bmp, pressure, button_pressed 25 | except RuntimeError: 26 | return None, None, None, None, None 27 | 28 | def led_status(): 29 | ledRedSts = ledRed.is_lit 30 | ledYlwSts = ledYlw.is_lit 31 | ledGrnSts = ledGrn.is_lit 32 | return ledRedSts, ledYlwSts, ledGrnSts 33 | 34 | 35 | def control_leds(red, yellow, green): 36 | ledRed.on() if red else ledRed.off() 37 | ledYlw.on() if yellow else ledYlw.off() 38 | ledGrn.on() if green else ledGrn.off() 39 | 40 | 41 | if __name__ == "__main__": 42 | while True: 43 | ledRedSts, ledYlwSts, ledGrnSts = led_status() 44 | temp_dht, hum, temp_bmp, press, button_state = collect_data() 45 | 46 | #control_leds(True, True, True) 47 | 48 | if all(v is not None for v in [temp_dht, hum, temp_bmp, press]): 49 | print(f"\nMonitor Data") 50 | print(f"DHT22 Temp: {temp_dht:.1f}°C, Humidity: {hum:.1f}%") 51 | print(f"BMP280 Temp: {temp_bmp:.1f}°C, Pressure: {press:.2f}hPa") 52 | print(f"Button {'pressed' if button_state else 'not pressed'}") 53 | print(f"Red LED {'is on' if ledRedSts else 'is off'}") 54 | print(f"Yellow LED {'is on' if ledYlwSts else 'is off'}") 55 | print(f"Green LED {'is on' if ledGrnSts else 'is off'}") 56 | 57 | 58 | time.sleep(2) -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/monitor_log.py: -------------------------------------------------------------------------------- 1 | # monitor_log.py 2 | import csv 3 | import os 4 | from datetime import datetime 5 | import pandas as pd 6 | from threading import Event, Thread 7 | import time 8 | from monitor import collect_data, led_status 9 | 10 | # Global variables for logging 11 | LOG_FILE = 'system_log.csv' 12 | stop_logging = Event() 13 | 14 | def setup_log_file(): 15 | """Create or verify log file with headers""" 16 | headers = ['timestamp', 'temp_dht', 'humidity', 'temp_bmp', 'pressure', 17 | 'button_state', 'led_red', 'led_yellow', 'led_green', 'command'] 18 | 19 | if not os.path.exists(LOG_FILE): 20 | with open(LOG_FILE, 'w', newline='') as f: 21 | writer = csv.writer(f) 22 | writer.writerow(headers) 23 | 24 | def log_data(timestamp, sensors, leds, command=""): 25 | """Log system data to CSV file""" 26 | temp_dht, hum, temp_bmp, press, button = sensors 27 | red, yellow, green = leds 28 | 29 | row = [ 30 | timestamp, 31 | f"{temp_dht:.1f}" if temp_dht is not None else "NA", 32 | f"{hum:.1f}" if hum is not None else "NA", 33 | f"{temp_bmp:.1f}" if temp_bmp is not None else "NA", 34 | f"{press:.1f}" if press is not None else "NA", 35 | "1" if button else "0", 36 | "1" if red else "0", 37 | "1" if yellow else "0", 38 | "1" if green else "0", 39 | command 40 | ] 41 | 42 | with open(LOG_FILE, 'a', newline='') as f: 43 | writer = csv.writer(f) 44 | writer.writerow(row) 45 | 46 | def automatic_logging(): 47 | """Background thread for automatic logging every minute""" 48 | while not stop_logging.is_set(): 49 | try: 50 | sensors = collect_data() 51 | leds = led_status() 52 | if any(v is None for v in sensors): 53 | continue 54 | 55 | timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') 56 | log_data(timestamp, sensors, leds) 57 | 58 | # Wait for one minute or until stop signal 59 | stop_logging.wait(60) 60 | 61 | except Exception as e: 62 | print(f"Logging error: {e}") 63 | time.sleep(60) 64 | 65 | def count_state_changes(series): 66 | """Count actual state changes in a binary series""" 67 | # Convert to int and get the changes 68 | series = series.astype(int) 69 | changes = 0 70 | last_state = series.iloc[0] 71 | 72 | # Count each time the state changes 73 | for state in series[1:]: 74 | if state != last_state: 75 | changes += 1 76 | last_state = state 77 | 78 | return changes 79 | 80 | def calculate_trend(series): 81 | """Calculate trend with improved statistical analysis""" 82 | # Convert to numeric and handle NaN values 83 | series = pd.to_numeric(series, errors='coerce') 84 | series = series.dropna() 85 | 86 | if len(series) < 2: 87 | return 0.0, "insufficient data" 88 | 89 | # Calculate simple moving average to smooth noise 90 | window = min(5, len(series)) 91 | smoothed = series.rolling(window=window, center=True).mean() 92 | 93 | # Calculate overall trend 94 | total_change = smoothed.iloc[-1] - smoothed.iloc[0] 95 | time_periods = len(smoothed) - 1 96 | 97 | if time_periods == 0: 98 | return 0.0, "stable" 99 | 100 | trend_per_period = total_change / time_periods 101 | 102 | # Determine trend direction 103 | if abs(trend_per_period) < 0.1: # Threshold for "stable" 104 | direction = "stable" 105 | else: 106 | direction = "increasing" if trend_per_period > 0 else "decreasing" 107 | 108 | return trend_per_period, direction 109 | 110 | def analyze_log_data(): 111 | """Analyze log data and return statistics""" 112 | try: 113 | df = pd.read_csv(LOG_FILE) 114 | df['timestamp'] = pd.to_datetime(df['timestamp']) 115 | 116 | # Convert numeric columns 117 | numeric_columns = ['temp_dht', 'humidity', 'temp_bmp', 'pressure'] 118 | for col in numeric_columns: 119 | df[col] = pd.to_numeric(df[col], errors='coerce') 120 | 121 | # Calculate trends using rolling mean to smooth out noise 122 | window = min(5, len(df)) 123 | if len(df) >= 2: # Need at least 2 points for trend 124 | temp_dht_trend = df['temp_dht'].diff().mean() 125 | temp_bmp_trend = df['temp_bmp'].diff().mean() 126 | humidity_trend = df['humidity'].diff().mean() 127 | pressure_trend = df['pressure'].diff().mean() 128 | else: 129 | temp_dht_trend = temp_bmp_trend = humidity_trend = pressure_trend = 0.0 130 | 131 | # Calculate statistics 132 | stats = { 133 | 'temp_dht_trend': temp_dht_trend if not pd.isna(temp_dht_trend) else 0.0, 134 | 'temp_bmp_trend': temp_bmp_trend if not pd.isna(temp_bmp_trend) else 0.0, 135 | 'humidity_trend': humidity_trend if not pd.isna(humidity_trend) else 0.0, 136 | 'pressure_trend': pressure_trend if not pd.isna(pressure_trend) else 0.0, 137 | 'avg_temp_dht': df['temp_dht'].mean(), 138 | 'avg_humidity': df['humidity'].mean(), 139 | 'avg_temp_bmp': df['temp_bmp'].mean(), 140 | 'avg_pressure': df['pressure'].mean(), 141 | 'led_red_changes': count_state_changes(df['led_red']), 142 | 'led_yellow_changes': count_state_changes(df['led_yellow']), 143 | 'led_green_changes': count_state_changes(df['led_green']), 144 | 'button_changes': count_state_changes(df['button_state']), 145 | 'recent_data': df.tail(1) 146 | } 147 | 148 | return stats 149 | 150 | except Exception as e: 151 | print(f"Error analyzing log data: {e}") 152 | return None 153 | 154 | def get_log_summary(): 155 | """Get a formatted summary of log data for SLM prompts""" 156 | stats = analyze_log_data() 157 | if not stats: 158 | return "Error: Unable to analyze log data" 159 | 160 | # Add trend direction indicators 161 | def get_trend_indicator(value): 162 | if abs(value) < 0.01: # threshold for "stable" 163 | return "stable" 164 | return "increasing" if value > 0 else "decreasing" 165 | 166 | temp_dht_direction = get_trend_indicator(stats['temp_dht_trend']) 167 | temp_bmp_direction = get_trend_indicator(stats['temp_bmp_trend']) 168 | humidity_direction = get_trend_indicator(stats['humidity_trend']) 169 | pressure_direction = get_trend_indicator(stats['pressure_trend']) 170 | 171 | summary = f""" 172 | Recent Statistics: 173 | - Temperature (DHT22): {stats['temp_dht_trend']:.3f}°C per interval ({temp_dht_direction}) 174 | - Temperature (BMP280): {stats['temp_bmp_trend']:.3f}°C per interval ({temp_bmp_direction}) 175 | - Humidity: {stats['humidity_trend']:.3f}% per interval ({humidity_direction}) 176 | - Pressure: {stats['pressure_trend']:.3f}hPa per interval ({pressure_direction}) 177 | 178 | Averages: 179 | - Average Temperature (DHT22): {stats['avg_temp_dht']:.1f}°C 180 | - Average Temperature (BMP280): {stats['avg_temp_bmp']:.1f}°C 181 | - Average Humidity: {stats['avg_humidity']:.1f}% 182 | - Average Pressure: {stats['avg_pressure']:.1f}hPa 183 | 184 | LED and Button Activity (transitions): 185 | - Red LED changes: {stats['led_red_changes']} 186 | - Yellow LED changes: {stats['led_yellow_changes']} 187 | - Green LED changes: {stats['led_green_changes']} 188 | - Button presses: {stats['button_changes']} 189 | 190 | Most recent values: 191 | {stats['recent_data'][['timestamp', 'temp_dht', 'humidity', 'temp_bmp', 'pressure']].to_string()} 192 | """ 193 | 194 | return summary 195 | 196 | if __name__ == "__main__": 197 | # Setup the log file if it doesn't exist 198 | setup_log_file() 199 | 200 | # Start the automatic logging in a separate thread 201 | logging_thread = Thread(target=automatic_logging, daemon=True) 202 | logging_thread.start() 203 | 204 | print("Starting log summary test (Press Ctrl+C to stop)") 205 | 206 | try: 207 | while True: 208 | summary = get_log_summary() 209 | print("\nLog Summary:") 210 | print(summary) 211 | print("="*50) 212 | 213 | time.sleep(2) 214 | 215 | except KeyboardInterrupt: 216 | print("\nStopping test...") 217 | stop_logging.set() 218 | finally: 219 | stop_logging.set() -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/slm_basic_analysis.py: -------------------------------------------------------------------------------- 1 | import ollama 2 | from monitor import collect_data, led_status 3 | 4 | ledRedSts, ledYlwSts, ledGrnSts = led_status() 5 | temp_dht, hum, temp_bmp, press, button_state = collect_data() 6 | 7 | prompt = f""" 8 | You are an experienced environmental scientist. 9 | Analyze the information received from an IoT system: 10 | 11 | DHT22 Temp: {temp_dht:.1f}°C and Humidity: {hum:.1f}% 12 | BMP280 Temp: {temp_bmp:.1f}°C and Pressure: {press:.2f}hPa 13 | Button {"pressed" if button_state else "not pressed"} 14 | Red LED {"is on" if ledRedSts else "is off"} 15 | Yellow LED {"is on" if ledYlwSts else "is off"} 16 | Green LED {"is on" if ledGrnSts else "is off"} 17 | 18 | Where, 19 | - The button, not pressed, shows a normal operation 20 | - The button, when pressed, shows an emergency 21 | - Red LED when is on, indicates a problem/emergency. 22 | - Yellow LED when is on indicates a warning situation. 23 | - Green LED when is on, indicates system is OK. 24 | 25 | If the temperature is over 20°C, mean a warning situation 26 | 27 | You should answer only with: "Activate Red LED" or "Activate Yellow LED" or "Activate Green LED" 28 | 29 | """ 30 | 31 | MODEL = 'llama3.2:1b' 32 | PROMPT = prompt 33 | 34 | response = ollama.generate( 35 | model=MODEL, 36 | prompt=PROMPT 37 | ) 38 | 39 | print(f"\nSmart IoT Analyser using {MODEL} model\n") 40 | 41 | print(f"SYSTEM REAL DATA") 42 | print(f" - DHT22 ==> Temp: {temp_dht:.1f}°C, Humidity: {hum:.1f}%") 43 | print(f" - BMP280 => Temp: {temp_bmp:.1f}°C, Pressure: {press:.2f}hPa") 44 | print(f" - Button {'pressed' if button_state else 'not pressed'}") 45 | print(f" - Red LED {'is on' if ledRedSts else 'is off'}") 46 | print(f" - Yellow LED {'is on' if ledYlwSts else 'is off'}") 47 | print(f" - Green LED {'is on' if ledGrnSts else 'is off'}") 48 | 49 | print(f"\n>> {MODEL} Response: {response['response']}") 50 | 51 | -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/slm_basic_analysis_action.py: -------------------------------------------------------------------------------- 1 | import ollama 2 | from monitor import collect_data, led_status, control_leds 3 | import time 4 | 5 | # Available models 6 | MODELS = { 7 | 1: ('deepseek-r1:1.5b', 'DeepSeek R1 1.5B'), 8 | 2: ('llama3.2:1b', 'Llama 3.2 1B'), 9 | 3: ('llama3.2:3b', 'Llama 3.2 3B'), 10 | 4: ('phi3:latest', 'Phi-3'), 11 | 5: ('gemma:2b', 'Gemma 2B'), 12 | } 13 | 14 | def parse_llm_response(response_text): 15 | """Parse the LLM response to extract LED control instructions.""" 16 | response_lower = response_text.lower() 17 | red_led = 'activate red led' in response_lower 18 | yellow_led = 'activate yellow led' in response_lower 19 | green_led = 'activate green led' in response_lower 20 | return (red_led, yellow_led, green_led) 21 | 22 | def get_user_input(): 23 | """Get user input for model selection and temperature threshold""" 24 | print("\nAvailable Models:") 25 | for num, (_, name) in MODELS.items(): 26 | print(f"{num}. {name}") 27 | 28 | while True: 29 | try: 30 | model_num = int(input("\nSelect model (1-4): ")) 31 | if model_num in MODELS: 32 | break 33 | print("Please select a number between 1 and 4.") 34 | except ValueError: 35 | print("Please enter a valid number.") 36 | 37 | while True: 38 | try: 39 | temp_threshold = float(input("Enter temperature threshold (°C): ")) 40 | break 41 | except ValueError: 42 | print("Please enter a valid number for temperature threshold.") 43 | 44 | return MODELS[model_num][0], MODELS[model_num][1], temp_threshold 45 | 46 | def monitor_system(model, model_name, temp_threshold): 47 | 48 | """Monitor system continuously""" 49 | print(f"\nStarting monitoring with {model_name}") 50 | print(f"Temperature threshold: {temp_threshold}°C") 51 | print("Press Ctrl+C to stop monitoring\n") 52 | 53 | while True: 54 | try: 55 | # Collect sensor data 56 | temp_dht, hum, temp_bmp, press, button_state = collect_data() 57 | 58 | if any(v is None for v in [temp_dht, hum, temp_bmp, press]): 59 | print("Error: Failed to read sensor data") 60 | time.sleep(2) 61 | continue 62 | 63 | prompt = f""" 64 | 65 | You are monitoring an IoT system which is showing the following sensor status: 66 | - DHT22 Temp: {temp_dht:.1f}°C and Humidity: {hum:.1f}% 67 | - BMP280 Temp: {temp_bmp:.1f}°C and Pressure: {press:.2f}hPa 68 | - Button {"pressed" if button_state else "not pressed"} 69 | 70 | Based on the Rules: 71 | - If system is working in normal conditions → Activate Green LED 72 | - If DHT22 Temp or BMP280 Temp are greater than Temperature 73 | Threshold ({temp_threshold}°C) → Activate Yellow LED 74 | - If Button pressed, it is an emergency → Activate Red LED 75 | 76 | You should provide a brief answer only with: "Activate Red LED" or "Activate Yellow LED" 77 | or "Activate Green LED" 78 | 79 | """ 80 | 81 | 82 | # Format prompt with current data 83 | current_prompt = prompt.format( 84 | temp_dht=temp_dht, 85 | hum=hum, 86 | temp_bmp=temp_bmp, 87 | press=press, 88 | button_state="pressed" if button_state else "not pressed" 89 | ) 90 | 91 | # Get SLM response 92 | response = ollama.generate( 93 | model=model, 94 | prompt=current_prompt 95 | ) 96 | 97 | # Parse response and control LEDs 98 | red, yellow, green = parse_llm_response(response['response']) 99 | control_leds(red, yellow, green) 100 | 101 | # Print status 102 | print("\n" + "="*50) 103 | print(f"Time: {time.strftime('%H:%M:%S')}") 104 | print(f"DHT22: {temp_dht:.1f}°C, {hum:.1f}%") 105 | print(f"BMP280: {temp_bmp:.1f}°C, {press:.1f}hPa") 106 | print(f"Button: {'pressed' if button_state else 'not pressed'}") 107 | print(f"SLM Response: {response['response'].strip()}") 108 | print(f"LED Status: R={'ON' if red else 'off'}, " 109 | f"Y={'ON' if yellow else 'off'}, " 110 | f"G={'ON' if green else 'off'}") 111 | 112 | time.sleep(2) 113 | 114 | except KeyboardInterrupt: 115 | print("\nMonitoring stopped by user") 116 | control_leds(False, False, False) # Turn off all LEDs 117 | break 118 | except Exception as e: 119 | print(f"\nError occurred: {str(e)}") 120 | time.sleep(2) 121 | 122 | def main(): 123 | # Get initial user input 124 | model, model_name, temp_threshold = get_user_input() 125 | 126 | # Start continuous monitoring 127 | monitor_system(model, model_name, temp_threshold) 128 | 129 | if __name__ == "__main__": 130 | main() -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/slm_basic_interaction.py: -------------------------------------------------------------------------------- 1 | import ollama 2 | from monitor import collect_data, led_status, control_leds 3 | import time 4 | 5 | # Available models 6 | MODELS = { 7 | 1: ('deepseek-r1:1.5b', 'DeepSeek R1 1.5B'), 8 | 2: ('llama3.2:1b', 'Llama 3.2 1B'), 9 | 3: ('llama3.2:3b', 'Llama 3.2 3B'), 10 | 4: ('phi3:latest', 'Phi-3'), 11 | 5: ('gemma:2b', 'Gemma 2B'), 12 | } 13 | 14 | def parse_llm_response(response_text): 15 | """Parse the LLM response to extract LED control instructions.""" 16 | response_lower = response_text.lower() 17 | red_led = 'activate red led' in response_lower 18 | yellow_led = 'activate yellow led' in response_lower 19 | green_led = 'activate green led' in response_lower 20 | return (red_led, yellow_led, green_led) 21 | 22 | def get_user_input(): 23 | """Get user input for model selection and temperature threshold""" 24 | print("\nAvailable Models:") 25 | for num, (_, name) in MODELS.items(): 26 | print(f"{num}. {name}") 27 | 28 | while True: 29 | try: 30 | model_num = int(input("\nSelect model (1-5): ")) 31 | if model_num in MODELS: 32 | break 33 | print("Please select a number between 1 and 5.") 34 | except ValueError: 35 | print("Please enter a valid number.") 36 | 37 | while True: 38 | try: 39 | temp_threshold = float(input("Enter temperature threshold (°C): ")) 40 | break 41 | except ValueError: 42 | print("Please enter a valid number for temperature threshold.") 43 | 44 | return MODELS[model_num][0], MODELS[model_num][1], temp_threshold 45 | 46 | def process_command(model, temp_threshold, user_input): 47 | """Process a single user command""" 48 | try: 49 | # Collect sensor data 50 | temp_dht, hum, temp_bmp, press, button_state = collect_data() 51 | 52 | if any(v is None for v in [temp_dht, hum, temp_bmp, press]): 53 | return "Error: Failed to read sensor data" 54 | 55 | prompt = f""" 56 | You are monitoring an IoT system which is showing the following sensor status: 57 | - DHT22 Temp: {temp_dht:.1f}°C and Humidity: {hum:.1f}% 58 | - BMP280 Temp: {temp_bmp:.1f}°C and Pressure: {press:.2f}hPa 59 | - Button {"pressed" if button_state else "not pressed"} 60 | 61 | The user command is: "{user_input}" 62 | 63 | You should: 64 | 1. Understand what the user wants 65 | 2. If it's a question about sensor data, provide ONLY the relevant information. 66 | Be concise and stop. 67 | 3. If it's a command to control LEDs, you should provide a concise answer only with: 68 | "Activate Red LED" or "Activate Yellow LED" or "Activate Green LED" 69 | 4. If temperature is above {temp_threshold}°C, mention it in your response. 70 | """ 71 | 72 | # Get SLM response 73 | response = ollama.generate( 74 | model=model, 75 | prompt=prompt 76 | ) 77 | 78 | # Parse response and control LEDs 79 | red, yellow, green = parse_llm_response(response['response']) 80 | control_leds(red, yellow, green) 81 | 82 | # Print status 83 | print("\n" + "="*50) 84 | print(f"Time: {time.strftime('%H:%M:%S')}") 85 | print(f"DHT22: {temp_dht:.1f}°C, {hum:.1f}%") 86 | print(f"BMP280: {temp_bmp:.1f}°C, {press:.1f}hPa") 87 | print(f"Button: {'pressed' if button_state else 'not pressed'}") 88 | print(f"SLM Response: {response['response'].strip()}") 89 | print(f"LED Status: R={'ON' if red else 'off'}, " 90 | f"Y={'ON' if yellow else 'off'}, " 91 | f"G={'ON' if green else 'off'}") 92 | print("="*50 + "\n") 93 | 94 | except Exception as e: 95 | return f"Error occurred: {str(e)}" 96 | 97 | def main(): 98 | try: 99 | # Get initial user input 100 | model, model_name, temp_threshold = get_user_input() 101 | 102 | print(f"\nStarting IoT control system with {model_name}") 103 | print(f"Temperature threshold: {temp_threshold}°C") 104 | print("Type 'quit' to exit\n") 105 | 106 | while True: 107 | user_input = input("Command: ").strip().lower() 108 | 109 | if user_input == 'quit': 110 | print("\nShutting down...") 111 | control_leds(False, False, False) # Turn off all LEDs 112 | break 113 | 114 | # Process single command 115 | process_command(model, temp_threshold, user_input) 116 | 117 | except KeyboardInterrupt: 118 | print("\nShutting down...") 119 | control_leds(False, False, False) 120 | except Exception as e: 121 | print(f"Error: {str(e)}") 122 | finally: 123 | control_leds(False, False, False) # Ensure LEDs are off on exit 124 | 125 | if __name__ == "__main__": 126 | main() -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/slm_basic_interaction_log.py: -------------------------------------------------------------------------------- 1 | # slm_basic_interaction_log.py 2 | import ollama 3 | from monitor import collect_data, led_status, control_leds 4 | import monitor_log as mlog 5 | import time 6 | from datetime import datetime 7 | from threading import Thread 8 | 9 | # Available models 10 | MODELS = { 11 | 1: ('deepseek-r1:1.5b', 'DeepSeek R1 1.5B'), 12 | 2: ('llama3.2:1b', 'Llama 3.2 1B'), 13 | 3: ('llama3.2:3b', 'Llama 3.2 3B'), 14 | 4: ('phi3:latest', 'Phi-3'), 15 | 5: ('gemma:2b', 'Gemma 2B'), 16 | } 17 | 18 | def parse_llm_response(response_text): 19 | """Parse the LLM response to extract LED control instructions.""" 20 | response_lower = response_text.lower() 21 | red_led = 'activate red led' in response_lower 22 | yellow_led = 'activate yellow led' in response_lower 23 | green_led = 'activate green led' in response_lower 24 | return (red_led, yellow_led, green_led) 25 | 26 | def get_user_input(): 27 | """Get user input for model selection and temperature threshold""" 28 | print("\nAvailable Models:") 29 | for num, (_, name) in MODELS.items(): 30 | print(f"{num}. {name}") 31 | 32 | while True: 33 | try: 34 | model_num = int(input("\nSelect model (1-5): ")) 35 | if model_num in MODELS: 36 | break 37 | print("Please select a number between 1 and 5.") 38 | except ValueError: 39 | print("Please enter a valid number.") 40 | 41 | while True: 42 | try: 43 | temp_threshold = float(input("Enter temperature threshold (°C): ")) 44 | break 45 | except ValueError: 46 | print("Please enter a valid number for temperature threshold.") 47 | 48 | return MODELS[model_num][0], MODELS[model_num][1], temp_threshold 49 | 50 | def query_log(query, model): 51 | """Query the log data using SLM with improved context and structure""" 52 | try: 53 | # Get log summary from monitor_log 54 | log_summary = mlog.get_log_summary() 55 | 56 | # Create a more structured prompt with explicit instructions 57 | prompt = f""" 58 | You are an IoT system analyst examining sensor data logs. Here is the current analysis: 59 | 60 | {log_summary} 61 | 62 | The user asks: "{query}" 63 | 64 | Follow these guidelines to answer: 65 | 1. For temperature trends: 66 | - Look at the temperature trend values (both DHT22 and BMP280) 67 | - Report if temperatures are increasing, decreasing, or stable 68 | - Include the rate of change in °C per interval 69 | 70 | 2. For button or LED history: 71 | - Report the total number of state changes 72 | - Mention if activity is high, moderate, or low 73 | 74 | 3. For specific measurements: 75 | - Use the most recent values 76 | - Include both sensor readings when relevant 77 | - Report the average if asked 78 | 79 | 4. For general analysis: 80 | - Focus on notable patterns 81 | - Highlight any unusual values 82 | - Compare different sensor readings if relevant 83 | 84 | Provide a clear, concise response focusing ONLY on the requested information. 85 | """ 86 | 87 | # Get SLM response 88 | response = ollama.generate( 89 | model=model, 90 | prompt=prompt 91 | ) 92 | 93 | return response['response'].strip() 94 | 95 | except Exception as e: 96 | return f"Error querying log: {e}" 97 | 98 | def process_command(model, temp_threshold, user_input): 99 | """Process a single user command with improved log query detection""" 100 | try: 101 | # Enhanced log query detection 102 | log_keywords = [ 103 | 'trend', 'history', 'past', 'record', 'log', 104 | 'average', 'changes', 'times', 'pattern', 105 | 'statistics', 'stats', 'summary' 106 | ] 107 | 108 | # Check if this is a log query 109 | if any(keyword in user_input.lower() for keyword in log_keywords): 110 | response = query_log(user_input, model) 111 | 112 | # Print status with log query results 113 | print("\n" + "="*50) 114 | print(f"Time: {time.strftime('%H:%M:%S')}") 115 | print(f"Log Query: {user_input}") 116 | print(f"Analysis: {response}") 117 | print("="*50 + "\n") 118 | return response 119 | 120 | # Rest of the existing command processing code... 121 | sensors = collect_data() 122 | if any(v is None for v in sensors): 123 | return "Error: Failed to read sensor data" 124 | 125 | temp_dht, hum, temp_bmp, press, button_state = sensors 126 | 127 | # Original prompt for real-time commands 128 | prompt = f""" 129 | You are monitoring an IoT system which is showing the following sensor status: 130 | - DHT22 Temp: {temp_dht:.1f}°C and Humidity: {hum:.1f}% 131 | - BMP280 Temp: {temp_bmp:.1f}°C and Pressure: {press:.2f}hPa 132 | - Button {"pressed" if button_state else "not pressed"} 133 | 134 | The user command is: "{user_input}" 135 | 136 | Based on the command: 137 | 1. For sensor queries, provide only the relevant current readings 138 | 2. For LED control, respond only with: 139 | "Activate Red LED" or "Activate Yellow LED" or "Activate Green LED" 140 | 3. If temperature is above {temp_threshold}°C, mention it 141 | """ 142 | 143 | # Get SLM response and process as before... 144 | # [Rest of the existing code remains the same] 145 | 146 | except Exception as e: 147 | return f"Error occurred: {str(e)}" 148 | 149 | 150 | def main(): 151 | try: 152 | # Setup logging 153 | mlog.setup_log_file() 154 | 155 | # Start automatic logging thread 156 | logging_thread = Thread(target=mlog.automatic_logging, daemon=True) 157 | logging_thread.start() 158 | 159 | # Get initial user input 160 | model, model_name, temp_threshold = get_user_input() 161 | 162 | print(f"\nStarting IoT control system with {model_name}") 163 | print(f"Temperature threshold: {temp_threshold}°C") 164 | print("Type 'quit' to exit") 165 | print("\nYou can:") 166 | print("- Control LEDs (e.g., 'turn on red led')") 167 | print("- Query sensors (e.g., 'what's the temperature?')") 168 | print("- Query logs (e.g., 'show temperature trend', 'led history')\n") 169 | 170 | while True: 171 | user_input = input("Command: ").strip().lower() 172 | 173 | if user_input == 'quit': 174 | print("\nShutting down...") 175 | mlog.stop_logging.set() # Signal logging thread to stop 176 | control_leds(False, False, False) 177 | break 178 | 179 | # Process single command 180 | process_command(model, temp_threshold, user_input) 181 | 182 | except KeyboardInterrupt: 183 | print("\nShutting down...") 184 | mlog.stop_logging.set() 185 | control_leds(False, False, False) 186 | except Exception as e: 187 | print(f"Error: {str(e)}") 188 | finally: 189 | mlog.stop_logging.set() 190 | control_leds(False, False, False) 191 | 192 | if __name__ == "__main__": 193 | main() -------------------------------------------------------------------------------- /SLMs_for_IoT_CONTROL/system_log.csv: -------------------------------------------------------------------------------- 1 | timestamp,temp_dht,humidity,temp_bmp,pressure,button_state,led_red,led_yellow,led_green,command 2 | 2025-02-18 13:08:15,24.4,40.2,26.2,905.1,0,0,0,0, 3 | 2025-02-18 13:09:15,26.0,38.6,26.2,905.1,0,0,0,0, 4 | 2025-02-18 13:09:29,26.0,38.6,26.2,905.1,0,1,0,0,turn on red led 5 | 2025-02-18 13:10:15,26.1,38.4,26.3,905.1,0,1,0,0, 6 | 2025-02-18 13:10:49,26.0,38.7,26.3,905.1,0,0,0,0,"what is the humidity, temperature and pressure?" 7 | 2025-02-18 13:11:16,26.1,38.2,26.4,905.1,0,0,0,0, 8 | 2025-02-18 13:12:16,26.1,38.2,26.4,905.1,0,0,0,0, 9 | 2025-02-18 13:13:16,26.3,37.8,26.5,905.1,0,0,0,0, 10 | 2025-02-18 13:14:17,26.4,37.3,26.5,905.1,0,0,0,0, 11 | 2025-02-18 13:15:17,26.5,38.2,26.4,905.1,0,0,0,0, 12 | 2025-02-18 13:16:17,28.0,99.9,26.4,905.1,0,0,0,0, 13 | 2025-02-18 13:16:18,26.3,38.3,26.5,905.1,0,0,0,0,what was the average humidity in the last 10 minutes? 14 | 2025-02-18 13:17:18,29.2,46.3,26.4,905.0,0,0,0,0, 15 | 2025-02-18 13:18:18,28.2,38.0,26.4,905.1,0,0,0,0, 16 | 2025-02-18 13:19:18,27.6,37.9,26.5,905.0,0,0,0,0, 17 | 2025-02-18 13:20:19,27.2,37.8,26.5,905.0,0,0,0,0, 18 | 2025-02-18 13:20:48,27.2,37.8,26.5,905.0,0,0,0,0,what is the average temperature in the last 10 minutes? 19 | 2025-02-18 13:21:19,27.0,37.9,26.5,905.1,0,0,0,0, 20 | 2025-02-18 13:22:19,26.8,38.2,26.4,905.0,0,0,0,0, 21 | 2025-02-18 13:23:20,26.6,38.8,26.5,905.0,0,0,0,0, 22 | 2025-02-18 13:24:20,26.5,41.2,26.5,905.0,0,0,0,0, 23 | 2025-02-18 13:25:20,26.6,40.0,26.5,905.0,0,0,0,0, 24 | 2025-02-18 13:26:21,26.5,39.4,26.6,905.0,0,0,0,0, 25 | 2025-02-18 13:27:21,26.6,38.7,26.8,904.9,0,0,0,0, 26 | 2025-02-18 13:28:18,26.7,40.0,26.8,905.0,0,0,0,0, 27 | 2025-02-18 13:29:18,26.7,40.0,26.8,905.0,0,0,0,0, 28 | 2025-02-18 13:30:19,26.9,38.1,26.8,904.9,0,0,0,0, 29 | 2025-02-18 13:31:12,26.8,37.5,26.8,904.9,1,0,0,0,what is the button status? 30 | 2025-02-18 13:31:19,26.8,37.6,26.7,905.0,0,0,0,0, 31 | 2025-02-18 13:32:15,26.7,38.5,26.7,905.0,1,1,0,0,"if the button is pressed, turn on the red led" 32 | 2025-02-18 13:32:19,26.7,39.0,26.6,904.9,0,1,0,0, 33 | 2025-02-18 13:33:20,26.6,38.4,26.5,904.9,0,1,0,0, 34 | 2025-02-18 13:34:20,26.4,39.0,26.5,905.0,0,1,0,0, 35 | 2025-02-18 13:35:20,26.4,39.2,26.6,905.0,0,1,0,0, 36 | 2025-02-18 13:35:58,26.3,38.6,26.6,904.9,0,0,0,0,show the average humidity fo the last hour 37 | 2025-02-18 13:36:21,26.4,40.1,26.7,905.0,0,0,0,0, 38 | 2025-02-18 13:37:21,26.5,39.3,26.8,904.9,0,0,0,0, 39 | 2025-02-18 13:38:21,26.7,39.0,26.8,904.9,0,0,0,0, 40 | 2025-02-18 13:39:22,26.8,38.1,26.7,904.9,0,0,0,0, 41 | 2025-02-18 13:40:22,26.7,38.4,26.6,904.9,0,0,0,0, 42 | 2025-02-18 13:41:22,26.6,38.8,26.6,904.9,0,0,0,0, 43 | 2025-02-18 13:42:23,26.5,39.0,26.6,904.9,0,0,0,0, 44 | 2025-02-18 13:43:23,26.4,39.1,26.5,904.9,0,0,0,0, 45 | 2025-02-18 13:44:23,26.3,39.3,26.5,904.8,0,0,0,0, 46 | 2025-02-18 13:45:24,26.3,39.4,26.5,904.9,0,0,0,0, 47 | 2025-02-18 13:46:24,26.2,39.4,26.5,904.8,0,0,0,0, 48 | 2025-02-18 13:47:24,26.2,39.6,26.5,904.9,0,0,0,0, 49 | 2025-02-18 13:48:25,26.2,38.6,26.5,904.8,0,0,0,0, 50 | 2025-02-18 13:49:25,26.2,39.7,26.4,904.8,0,0,0,0, 51 | 2025-02-18 13:50:25,26.2,39.8,26.4,904.9,0,0,0,0, 52 | 2025-02-18 13:51:26,26.1,39.8,26.4,904.8,0,0,0,0, 53 | 2025-02-18 13:52:26,26.1,39.9,26.4,904.8,0,0,0,0, 54 | 2025-02-18 13:53:26,26.1,39.9,26.4,904.8,0,0,0,0, 55 | 2025-02-18 13:54:27,26.1,40.0,26.4,904.8,0,0,0,0, 56 | 2025-02-18 13:55:27,26.0,40.0,26.4,904.8,0,0,0,0, 57 | 2025-02-18 13:56:27,26.1,40.1,26.4,904.8,0,0,0,0, 58 | 2025-02-18 13:57:27,26.0,40.1,26.4,904.8,0,0,0,0, 59 | 2025-02-18 13:58:28,26.0,40.1,26.4,904.8,0,0,0,0, 60 | 2025-02-18 13:59:28,26.0,40.1,26.3,904.8,0,0,0,0, 61 | 2025-02-18 14:00:28,26.0,40.1,26.3,904.7,0,0,0,0, 62 | 2025-02-18 14:01:29,26.0,40.2,26.3,904.7,0,0,0,0, 63 | 2025-02-18 14:02:29,26.0,40.3,26.3,904.7,0,0,0,0, 64 | 2025-02-18 14:03:29,25.9,40.3,26.3,904.7,0,0,0,0, 65 | 2025-02-18 14:04:30,25.9,40.4,26.2,904.6,0,0,0,0, 66 | 2025-02-18 14:05:30,25.8,40.4,26.2,904.7,0,0,0,0, 67 | 2025-02-18 14:06:30,25.8,40.4,26.2,904.7,0,0,0,0, 68 | 2025-02-18 14:07:31,25.8,40.5,26.2,904.6,0,0,0,0, 69 | 2025-02-18 14:08:31,25.8,40.5,26.2,904.6,0,0,0,0, 70 | 2025-02-18 14:09:31,25.8,40.5,26.2,904.6,0,0,0,0, 71 | 2025-02-18 14:10:32,25.8,40.4,26.2,904.7,0,0,0,0, 72 | 2025-02-18 14:11:32,25.8,40.4,26.2,904.6,0,0,0,0, 73 | 2025-02-18 14:12:32,25.8,40.4,26.2,904.7,0,0,0,0, 74 | 2025-02-18 14:13:33,25.8,40.4,26.2,904.6,0,0,0,0, 75 | 2025-02-18 14:14:33,25.8,40.4,26.2,904.7,0,0,0,0, 76 | 2025-02-18 14:15:33,25.8,40.4,26.2,904.7,0,0,0,0, 77 | 2025-02-18 14:16:34,25.8,40.5,26.2,904.6,0,0,0,0, 78 | 2025-02-18 14:17:34,25.8,40.5,26.1,904.6,0,0,0,0, 79 | 2025-02-18 14:18:34,25.8,40.5,26.1,904.6,0,0,0,0, 80 | 2025-02-18 14:19:35,25.8,40.5,26.1,904.6,0,0,0,0, 81 | 2025-02-18 14:20:35,25.8,40.5,26.1,904.6,0,0,0,0, 82 | 2025-02-18 14:21:35,25.8,40.5,26.1,904.5,0,0,0,0, 83 | 2025-02-18 14:22:36,25.8,40.5,26.1,904.5,0,0,0,0, 84 | 2025-02-18 14:23:36,25.8,40.5,26.1,904.5,0,0,0,0, 85 | 2025-02-18 14:24:36,25.8,40.5,26.1,904.5,0,0,0,0, 86 | 2025-02-18 14:25:37,25.7,40.4,26.1,904.5,0,0,0,0, 87 | 2025-02-18 14:26:37,25.7,40.4,26.1,904.6,0,0,0,0, 88 | 2025-02-18 14:27:37,25.8,40.5,26.2,904.6,0,0,0,0, 89 | 2025-02-18 14:28:38,25.7,40.4,26.1,904.5,0,0,0,0, 90 | 2025-02-18 14:29:38,25.8,40.2,26.1,904.5,0,0,0,0, 91 | 2025-02-18 14:30:38,25.8,40.3,26.2,904.5,0,0,0,0, 92 | 2025-02-18 14:31:39,25.8,40.0,26.1,904.5,0,0,0,0, 93 | 2025-02-18 14:32:39,25.8,39.9,26.1,904.5,0,0,0,0, 94 | 2025-02-18 14:33:39,25.8,40.0,26.1,904.5,0,0,0,0, 95 | 2025-02-18 14:34:40,25.7,39.8,26.1,904.5,0,0,0,0, 96 | 2025-02-18 14:35:40,25.8,39.8,26.1,904.4,0,0,0,0, 97 | 2025-02-18 14:36:40,25.8,39.9,26.1,904.5,0,0,0,0, 98 | 2025-02-18 14:37:41,25.7,39.9,26.1,904.4,0,0,0,0, 99 | 2025-02-18 14:38:41,25.8,40.3,26.1,904.5,0,0,0,0, 100 | 2025-02-18 14:39:41,25.7,40.2,26.1,904.4,0,0,0,0, 101 | 2025-02-18 14:40:42,25.7,40.1,26.1,904.5,0,0,0,0, 102 | 2025-02-18 14:41:42,25.7,40.1,26.1,904.4,0,0,0,0, 103 | 2025-02-18 14:42:42,25.7,40.2,26.1,904.4,0,0,0,0, 104 | 2025-02-18 14:43:43,25.7,40.7,26.0,904.5,0,0,0,0, 105 | 2025-02-18 14:44:43,25.7,40.9,26.1,904.4,0,0,0,0, 106 | 2025-02-18 14:45:43,25.7,40.8,26.0,904.4,0,0,0,0, 107 | 2025-02-18 14:46:44,25.7,40.6,26.0,904.4,0,0,0,0, 108 | 2025-02-18 14:47:44,25.7,40.6,26.1,904.4,0,0,0,0, 109 | 2025-02-18 14:48:44,25.7,42.4,26.1,904.4,0,0,0,0, 110 | 2025-02-18 14:49:45,25.8,41.1,26.1,904.4,0,0,0,0, 111 | 2025-02-18 14:50:45,25.8,41.6,26.2,904.4,0,0,0,0, 112 | 2025-02-18 14:51:45,25.9,41.5,26.2,904.4,0,0,0,0, 113 | 2025-02-18 14:52:46,26.0,41.1,26.3,904.3,0,0,0,0, 114 | 2025-02-18 14:53:46,26.0,39.4,26.3,904.3,0,0,0,0, 115 | 2025-02-18 14:54:46,26.2,38.2,26.5,904.3,0,0,0,0, 116 | 2025-02-18 14:55:47,26.4,37.4,26.6,904.2,0,0,0,0, 117 | 2025-02-18 14:56:47,26.6,36.2,26.7,904.2,0,0,0,0, 118 | 2025-02-18 14:57:47,26.7,36.2,26.8,904.2,0,0,0,0, 119 | 2025-02-18 14:58:48,26.9,36.4,26.9,904.1,0,0,0,0, 120 | 2025-02-18 14:59:48,27.0,35.9,27.0,904.1,0,0,0,0, 121 | 2025-02-18 15:00:48,27.2,35.0,26.9,904.1,0,0,0,0, 122 | 2025-02-18 15:01:49,27.2,34.3,26.9,904.1,0,0,0,0, 123 | 2025-02-18 15:02:49,27.2,34.6,27.1,904.1,0,0,0,0, 124 | 2025-02-18 15:03:49,27.3,37.3,27.2,904.1,0,0,0,0, 125 | 2025-02-18 15:04:50,27.4,36.4,27.2,904.0,0,0,0,0, 126 | 2025-02-18 15:05:50,27.5,35.0,27.4,904.1,0,0,0,0, 127 | 2025-02-18 15:06:51,27.6,34.2,27.3,904.0,0,0,0,0, 128 | 2025-02-18 15:07:50,27.6,35.6,27.3,904.1,0,0,0,0,what is the temperature? 129 | 2025-02-18 15:07:52,27.6,34.8,27.4,904.1,0,0,0,0, 130 | 2025-02-18 15:08:52,27.6,34.8,27.4,904.0,0,0,0,0, 131 | 2025-02-18 15:09:52,27.7,32.9,27.4,904.0,0,0,0,0, 132 | 2025-02-18 15:10:53,27.7,34.3,27.5,904.0,0,0,0,0, 133 | 2025-02-18 15:11:26,27.7,33.7,27.5,904.0,0,0,0,0,what is the average humidity? 134 | 2025-02-18 15:11:53,27.7,33.9,27.4,904.0,0,0,0,0, 135 | 2025-02-18 15:12:53,27.7,34.4,27.4,903.9,0,0,0,0, 136 | 2025-02-18 15:13:54,27.7,34.4,27.4,903.9,0,0,0,0, 137 | 2025-02-18 15:14:54,27.6,35.5,27.6,903.9,0,0,0,0, 138 | 2025-02-18 15:19:47,27.7,35.5,27.8,903.9,0,0,0,0, 139 | 2025-02-18 15:20:42,28.0,32.8,27.9,903.8,0,0,0,0, 140 | 2025-02-18 15:21:42,29.8,40.2,27.9,903.9,0,0,0,0, 141 | 2025-02-18 15:22:43,29.1,32.1,27.8,903.8,0,0,0,0, 142 | 2025-02-18 15:27:19,28.7,33.6,28.0,903.7,0,0,0,0, 143 | 2025-02-18 15:28:29,28.3,32.3,28.1,903.7,0,0,0,0, 144 | 2025-02-18 15:29:29,28.4,32.1,28.1,903.7,0,0,0,0, 145 | 2025-02-18 15:29:43,28.3,31.7,28.1,903.7,0,0,0,0, 146 | 2025-02-18 15:38:33,28.5,31.6,28.3,903.6,0,0,0,0, 147 | 2025-02-18 15:42:37,28.4,32.3,28.3,903.6,0,0,0,0, 148 | 2025-02-18 15:43:37,28.4,31.9,28.5,903.7,0,0,0,0, 149 | 2025-02-18 15:45:53,28.5,32.8,28.3,903.5,0,0,0,0, 150 | 2025-02-18 15:46:53,28.4,31.4,28.4,903.6,0,0,0,0, 151 | 2025-02-18 15:47:54,28.4,32.2,28.5,903.5,0,0,0,0, 152 | 2025-02-18 15:48:54,28.5,32.2,28.5,903.5,0,0,0,0, 153 | 2025-02-18 15:49:54,28.6,32.3,28.6,903.6,0,0,0,0, 154 | 2025-02-18 15:50:55,28.7,31.4,28.5,903.5,0,0,0,0, 155 | 2025-02-18 15:51:55,28.7,30.2,28.4,903.5,0,0,0,0, 156 | 2025-02-18 15:52:55,28.5,30.9,28.4,903.5,0,0,0,0, 157 | 2025-02-18 15:53:56,28.4,30.7,28.5,903.5,0,0,0,0, 158 | 2025-02-18 15:54:56,28.4,30.1,28.5,903.5,0,0,0,0, 159 | 2025-02-18 15:55:56,28.4,31.5,28.5,903.5,0,0,0,0, 160 | 2025-02-18 15:56:57,28.4,32.1,28.5,903.5,0,0,0,0, 161 | 2025-02-18 15:57:57,28.4,32.8,28.6,903.5,0,0,0,0, 162 | 2025-02-18 15:58:57,28.5,31.7,28.6,903.5,0,0,0,0, 163 | 2025-02-18 15:59:58,28.6,31.5,28.7,903.5,0,0,0,0, 164 | 2025-02-18 16:00:58,28.7,31.3,28.7,903.5,0,0,0,0, 165 | 2025-02-18 16:01:58,28.8,31.6,28.7,903.5,0,0,0,0, 166 | 2025-02-18 16:02:59,28.7,31.1,28.8,903.4,0,0,0,0, 167 | 2025-02-18 16:03:59,28.8,31.6,28.8,903.4,0,0,0,0, 168 | 2025-02-18 16:04:59,28.9,30.1,28.7,903.5,0,0,0,0, 169 | 2025-02-18 16:06:00,28.8,30.9,28.9,903.4,0,0,0,0, 170 | 2025-02-18 16:07:00,28.8,30.4,28.9,903.4,0,0,0,0, 171 | 2025-02-18 16:08:00,29.0,30.1,28.9,903.4,0,0,0,0, 172 | 2025-02-18 16:09:01,29.0,29.6,28.8,903.4,0,0,0,0, 173 | 2025-02-18 16:10:01,28.7,29.5,28.8,903.4,0,0,0,0, 174 | 2025-02-18 16:11:01,28.7,30.2,28.8,903.4,0,0,0,0, 175 | 2025-02-18 16:12:02,28.8,30.5,28.9,903.4,0,0,0,0, 176 | 2025-02-18 16:13:02,28.8,29.7,28.8,903.4,0,0,0,0, 177 | 2025-02-18 16:14:02,28.8,30.0,28.9,903.4,0,0,0,0, 178 | 2025-02-18 16:15:03,28.8,30.0,28.8,903.4,0,0,0,0, 179 | 2025-02-18 16:15:58,28.8,30.9,28.7,903.4,0,0,0,0, 180 | 2025-02-18 16:16:25,28.6,31.8,28.7,903.3,0,0,0,0, 181 | 2025-02-18 16:17:12,28.6,32.1,28.8,903.3,0,0,0,0,what is the temperature? 182 | 2025-02-18 16:17:27,28.6,33.1,28.8,903.4,0,0,0,0, 183 | 2025-02-18 16:18:28,28.7,31.4,28.9,903.4,0,0,0,0, 184 | 2025-02-18 16:18:35,28.7,32.2,28.8,903.4,0,0,0,0,what is the current temperature? 185 | 2025-02-18 16:19:28,28.8,30.5,29.0,903.4,0,0,0,0, 186 | 2025-02-18 16:19:38,28.8,30.5,28.9,903.4,0,0,0,0,what is the humidity and pressure? 187 | 2025-02-18 16:20:27,28.9,30.9,28.9,903.4,0,0,0,0,turn on the red led 188 | 2025-02-18 16:20:28,28.9,31.3,28.9,903.4,0,0,0,0, 189 | 2025-02-18 16:21:14,28.8,31.4,28.9,903.4,0,1,0,0,turn on the red led 190 | 2025-02-18 16:21:29,28.8,31.4,29.0,903.4,0,1,0,0, 191 | 2025-02-18 16:22:16,28.9,30.5,29.0,903.4,1,0,0,0,"if the button is pressed, turn on the yellow led" 192 | 2025-02-18 16:22:29,28.9,30.7,28.9,903.4,0,0,0,0, 193 | 2025-02-18 16:23:29,28.9,30.7,28.9,903.4,0,0,0,0, 194 | 2025-02-18 16:24:30,28.9,30.9,28.8,903.4,0,0,0,0, 195 | 2025-02-18 16:25:30,28.8,31.4,28.8,903.4,0,0,0,0, 196 | 2025-02-18 16:26:30,28.7,31.6,28.8,903.4,0,0,0,0, 197 | 2025-02-18 16:27:31,28.7,32.0,28.9,903.3,0,0,0,0, 198 | 2025-02-18 16:28:31,28.8,31.1,28.9,903.4,0,0,0,0, 199 | 2025-02-18 16:29:31,28.8,31.6,28.9,903.4,0,0,0,0, 200 | 2025-02-18 16:30:32,28.8,31.8,28.9,903.4,0,0,0,0, 201 | 2025-02-18 16:31:32,28.7,31.8,28.9,903.4,0,0,0,0, 202 | 2025-02-18 16:32:32,28.7,31.7,28.9,903.4,0,0,0,0, 203 | 2025-02-18 16:33:33,28.8,30.5,28.8,903.4,0,0,0,0, 204 | 2025-02-18 16:34:33,28.7,31.1,28.9,903.4,0,0,0,0, 205 | 2025-02-18 16:35:33,28.7,31.9,28.8,903.4,0,0,0,0, 206 | 2025-02-18 16:36:34,28.6,32.7,28.8,903.4,0,0,0,0, 207 | 2025-02-18 16:37:34,28.6,30.5,28.9,903.5,0,0,0,0, 208 | 2025-02-18 16:38:34,28.6,30.4,28.8,903.5,0,0,0,0, 209 | 2025-02-18 16:39:34,28.6,30.4,28.8,903.5,0,0,0,0, 210 | 2025-02-18 16:40:35,28.6,30.9,28.9,903.5,0,0,0,0, 211 | 2025-02-18 16:41:19,28.6,30.9,28.9,903.5,0,0,0,0, 212 | 2025-02-18 16:42:19,28.7,31.0,28.9,903.5,0,0,0,0, 213 | 2025-02-18 16:43:19,28.7,31.0,28.8,903.5,0,0,0,0, 214 | --------------------------------------------------------------------------------