├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── app_factory ├── __init__.py ├── app.py ├── data │ ├── meeting_templates.json │ └── sample_questions.json ├── data_generator │ ├── __init__.py │ ├── data_pools.json │ └── sqlite-synthetic-mes-data.py ├── mes_chat │ ├── __init__.py │ └── app.py ├── production_meeting │ ├── __init__.py │ ├── action_tracker.py │ ├── ai_insights.py │ ├── app.py │ ├── dashboards │ │ ├── __init__.py │ │ ├── equipment.py │ │ ├── inventory.py │ │ ├── production.py │ │ ├── productivity.py │ │ ├── quality.py │ │ ├── root_cause.py │ │ └── weekly.py │ └── report.py └── shared │ ├── __init__.py │ ├── bedrock_utils.py │ └── database.py ├── assets ├── MES-chatbot-sys-architecture.png ├── ProductionDashboard.gif ├── chatwithMES.gif ├── mes-chatbot-architecture-sequence-diagram.png ├── mes-chatbot-example-screenshot.png ├── mes-chatbot.gif ├── postgres-installation-confirmation.png └── table-list.png ├── models.json ├── requirements.txt └── text-to-sql-notebook.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | **/__pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # local testing 156 | .dev 157 | .vscode 158 | .streamlit 159 | .DS_Store 160 | .kiro 161 | 162 | #notebooks for testing 163 | #*.ipynb 164 | 165 | #sqllite simulation database 166 | mes.db 167 | *.db-journal 168 | 169 | #meeting notes 170 | app_factory/reports 171 | data/action_items 172 | reports/ -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Manufacturing Operations Hub 2 | 3 | A comprehensive platform providing manufacturing operations interfaces powered by Amazon Bedrock. This project offers a combination of **natural language interaction with MES** (Manufacturing Execution System) data structured dashboards for daily lean production meetings with AI data analysis. 4 | 5 | ![MES chatbot UI](assets/mes-chatbot-example-screenshot.png) 6 | 7 | ## Overview 8 | 9 | This application provides two integrated interfaces for manufacturing operations: 10 | 11 | 1. **MES Insight Chat** - An interactive AI-powered chatbot for analyzing Manufacturing Execution System (MES) data 12 | 2. **Daily Production Meeting** - A structured interface for daily lean meetings and production status reviews 13 | 3. **Educational Jupyter Notebook** - A demonstration of text-to-SQL patterns used in the chatbot 14 | 15 | The application is built on a synthetic MES database for an e-bike manufacturing facility, providing a realistic environment for exploring production data, inventory management, quality control, and equipment efficiency metrics. 16 | 17 | ## Key Features 18 | 19 | ### MES Insight Chat 20 | 21 | - Natural language interface to query MES data without SQL knowledge 22 | - Interactive conversation with an AI assistant 23 | - Data visualization for query results 24 | - Deep insights into production processes, inventory, quality, and equipment 25 | 26 | ### Daily Production Meeting 27 | 28 | The Daily Production Meeting dashboard eliminates the need for teams to spend a lot of preparation time by, for example, gathering data and running pivot table reports before meetings. Instead, team members arrive with answers to the basic questions already available and an overview of the state of the factory, allowing the meeting to focus on actions and solving problems. 29 | 30 | Key benefits include: 31 | - Instant access to critical production metrics - no more preparing slides before meetings 32 | - Real-time dashboards that present a consistent view across all stakeholders 33 | - Natural language querying of top issues (e.g., "What are the top quality issues from yesterday that we should investigate?") 34 | - AI-powered insights that highlight patterns humans might miss 35 | - Function-specific views that allow teams to quickly answer common questions: 36 | - Production: "What was our completion rate yesterday?" 37 | - Quality: "Which products have the highest defect rates?" 38 | - Equipment: "What machines need maintenance today?" 39 | - Inventory: "Which materials are below reorder level?" 40 | 41 | Features include: 42 | - **📈 Production Summary** - KPIs, completion rates, and current work orders 43 | - **🔧 Equipment Status** - Machine availability, upcoming maintenance, and downtime impact 44 | - **⚠️ Quality Issues** - Defect rates, top issues, and problem products 45 | - **📦 Inventory Alerts** - Items below reorder level with days of supply analysis 46 | - **👥 Productivity** - Employee and shift performance metrics 47 | - **🔍 Root Cause Analysis** - Interactive defect analysis tools 48 | - **🤖 AI Insights** - Predictive analytics and decision intelligence 49 | - **📋 Action Items** - Track and manage action items 50 | - **📝 Meeting Notes** - Document discussions and decisions 51 | - **📄 Reports** - Generate meeting summaries and weekly reports 52 | 53 | ### System Architecture 54 | 55 | This architecture enables natural language queries against manufacturing databases using LLMs. The system follows a schema-first approach where the LLM first learns the database structure before generating SQL queries. When users ask questions in plain English, the application bridges the gap between natural language and structured data by having the LLM generate appropriate SQL, execute it against the MES database, and then transform the results into insightful, business-relevant responses with visualizations. The pattern includes error handling with query reformulation when needed, ensuring robust performance even with complex manufacturing questions. 56 | 57 | This is the Sequence Diagram of the chatbot: 58 | 59 | ![MES System Architecture](assets/mes-chatbot-architecture-sequence-diagram.png) 60 | 61 | ## Installation 62 | 63 | ### Prerequisites 64 | 65 | - Python 3.9 or higher 66 | - [SQLite](https://www.sqlite.org/download.html) 67 | - AWS account with access to Amazon Bedrock 68 | (see AWS Configuration section for required permissions and models) 69 | 70 | ### Setup 71 | 72 | 1. **Environment Setup** 73 | 74 | If using Amazon SageMaker AI JupyterLab (recommended), you can skip to step 3. 75 | 76 | Create and activate a Python virtual environment: 77 | 78 | ```bash 79 | python3 -m venv .venv 80 | source .venv/bin/activate # On Windows: .venv\Scripts\activate 81 | ``` 82 | 83 | 2. **AWS Configuration** 84 | 85 | Configure AWS environment variables by creating a `.env` file: 86 | 87 | ```text 88 | AWS_REGION="YourRegion" #example us-east-1 89 | AWS_PROFILE="myprofile" #from ~/.aws/config 90 | ``` 91 | 92 | 3. **Install Required Packages** 93 | 94 | ```bash 95 | pip install -r requirements.txt 96 | ``` 97 | 98 | 4. **Generate the MES Database** 99 | 100 | ```bash 101 | # Create tables and simulation data (auto-detects if database exists) 102 | python3 app_factory/data_generator/sqlite-synthetic-mes-data.py --config app_factory/data_generator/data_pools.json --lookback 90 --lookahead 14 103 | ``` 104 | 105 | This will create the database file `mes.db` in the project root directory if it doesn't exist, or refresh the data if it does. 106 | 107 | **Additional Options** 108 | 109 | ```bash 110 | # Get help on all configuration options 111 | python3 app_factory/data_generator/sqlite-synthetic-mes-data.py --help 112 | ``` 113 | 114 | ## Running the Applications 115 | 116 | You can run the applications independently or together: 117 | 118 | ### Run All Components Together 119 | 120 | ```bash 121 | # Start the combined application 122 | streamlit run app_factory/app.py 123 | ``` 124 | 125 | ### Run Components Independently 126 | 127 | ```bash 128 | # Run only the MES Insight Chat 129 | streamlit run app_factory/mes_chat/app.py 130 | 131 | # Run only the Daily Production Meeting 132 | streamlit run app_factory/production_meeting/app.py 133 | ``` 134 | 135 | ### Educational Jupyter Notebook 136 | 137 | The repository includes a Jupyter notebook (`text-to-sql-notebook.ipynb`) that demonstrates the text-to-SQL patterns used in the chatbot. It's located at the root level for easy access to the database. 138 | 139 | ```bash 140 | # Start Jupyter to access the notebook 141 | jupyter notebook 142 | ``` 143 | 144 | ## Database and Simulation 145 | 146 | The synthetic MES database (`mes.db`) contains a comprehensive manufacturing data model for an e-bike production facility, including: 147 | 148 | - **Products & BOM**: E-bikes, components, subassemblies, and raw materials with hierarchical bill of materials 149 | - **Inventory & Suppliers**: Stock levels, reorder points, lead times, and supplier information 150 | - **Work Centers & Machines**: Manufacturing areas, equipment capabilities, capacity, and status 151 | - **Employees & Shifts**: Personnel profiles, skills, shift assignments, and work schedules 152 | - **Work Orders**: Production schedules, actual production, and order status tracking 153 | - **Quality Control**: Inspection results, defects, root causes, severity, and corrective actions 154 | - **Downtimes**: Equipment failures, planned maintenance, and operational interruptions 155 | - **OEE Metrics**: Overall Equipment Effectiveness tracking (Availability, Performance, Quality) 156 | - **Material Consumption**: Component usage, variance reporting, and lot tracking 157 | 158 | The simulation includes realistic manufacturing patterns such as: 159 | - Production bottlenecks and constraints in specific work centers 160 | - Maintenance cycles affecting equipment performance over time 161 | - Quality issues correlated with process variables, equipment, and materials 162 | - Inventory fluctuations and occasional shortages with lead time impacts 163 | - Downtime events with appropriate distributions (planned vs. unplanned) 164 | - Seasonal and weekly production patterns reflecting real-world manufacturing 165 | 166 | Use the configuration options to control the date ranges and data characteristics when generating the database. 167 | 168 | ## Project Structure 169 | 170 | ```text 171 | ./ 172 | ├── LICENSE # MIT License 173 | ├── README.md # This file 174 | ├── CONTRIBUTING.md # Contribution guidelines 175 | ├── CODE_OF_CONDUCT.md # Code of conduct 176 | ├── requirements.txt # Project dependencies 177 | ├── .env # Environment variables (user-created) 178 | ├── .gitignore # Git ignore file 179 | ├── text-to-sql-notebook.ipynb # Educational Jupyter notebook 180 | ├── app_factory/ # Main application code 181 | │ ├── app.py # Combined application entry point 182 | │ ├── shared/ # Shared utilities 183 | │ │ ├── database.py # Database access 184 | │ │ └── bedrock_utils.py # Amazon Bedrock client 185 | │ ├── mes_chat/ # MES Chat application 186 | │ │ └── app.py # Chat interface 187 | │ ├── production_meeting/ # Production Meeting application 188 | │ │ ├── app.py # Main dashboard 189 | │ │ ├── dashboards/ # Individual dashboard components 190 | │ │ │ ├── equipment.py # Equipment status dashboard 191 | │ │ │ ├── inventory.py # Inventory dashboard 192 | │ │ │ ├── production.py # Production metrics dashboard 193 | │ │ │ ├── productivity.py # Productivity dashboard 194 | │ │ │ ├── quality.py # Quality issues dashboard 195 | │ │ │ ├── root_cause.py # Root cause analysis 196 | │ │ │ └── weekly.py # Weekly summary dashboard 197 | │ │ ├── action_tracker.py # Action item management 198 | │ │ ├── report.py # Meeting report generation 199 | │ │ └── ai_insights.py # AI-powered insights 200 | │ ├── data_generator/ # Database generator 201 | │ │ ├── sqlite-synthetic-mes-data.py # MES database generator 202 | │ │ └── data_pools.json # Configuration for database generator 203 | │ └── data/ # Data files 204 | │ ├── sample_questions.json # Example questions 205 | │ └── meeting_templates.json # Meeting templates 206 | ├── assets/ # Images and media files 207 | ├── mes.db # Generated MES database (not in repo) 208 | └── reports/ # Generated reports directory (not in repo) 209 | ``` 210 | 211 | ## Using the Applications 212 | 213 | ### MES Insight Chat 214 | 215 | In the MES Chat interface, you can: 216 | 217 | 1. Ask questions about production data in natural language 218 | 2. Select example questions from predefined categories 219 | 3. View query results in tabular or chart format 220 | 4. Download data as CSV 221 | 222 | Example questions: 223 | 224 | - "What's our current production schedule for the next week?" 225 | - "Which inventory items are below their reorder level?" 226 | - "What's the OEE for our Frame Welding machines?" 227 | - "Show me the most common defect types and their severity" 228 | 229 | ![mes-chatbot-gif](assets/mes-chatbot.gif) 230 | 231 | ### Daily Production Meeting 232 | 233 | The Production Meeting dashboard includes: 234 | 235 | 1. **Production Summary** - Daily production metrics, completion rates, OEE, and real-time work order status 236 | 2. **Equipment Status** - Machine availability, downtime analysis, and upcoming maintenance schedule 237 | 3. **Quality Issues** - Top defects, problem products, root causes, and trend analysis 238 | 4. **Inventory Alerts** - Critical shortages, days of supply analysis, and material requirements 239 | 5. **Productivity** - Employee and shift performance metrics with comparative analysis 240 | 6. **Root Cause Analysis** - Interactive tools to drill into quality issues and identify patterns 241 | 7. **AI Insights** - AI-powered analytics including predictive insights and decision intelligence 242 | 8. **Action Items** - Track and assign action items to team members 243 | 9. **Meeting Notes** - Document discussions and decisions with templates 244 | 10. **Reports** - Generate comprehensive meeting summaries and weekly reports 245 | 246 | The dashboard updates in real-time, providing a consistent view for all stakeholders and eliminating the need for manual report preparation before meetings. This allows teams to focus on problem-solving rather than data collection and reporting. 247 | 248 | ![daily-lean-meetings](assets/ProductionDashboard.gif) 249 | 250 | ## AWS Configuration 251 | 252 | This application uses Amazon Bedrock for natural language understanding and AI capabilities. The following configuration is required: 253 | 254 | ### IAM Permissions 255 | 256 | Your AWS role needs these specific permissions: 257 | 258 | ```json 259 | { 260 | "Version": "2012-10-17", 261 | "Statement": [ 262 | { 263 | "Effect": "Allow", 264 | "Action": [ 265 | "bedrock:ListFoundationModels", 266 | "bedrock:GetFoundationModel", 267 | "bedrock-runtime:InvokeModel" 268 | ], 269 | "Resource": "*" //narrow the scope based on where you run this application 270 | } 271 | ] 272 | } 273 | ``` 274 | 275 | ### Required Model Access 276 | 277 | You must enable at least one model in Amazon Bedrock that supports **System Prompt**, **Converse API**, and **Tool use**. 278 | 279 | Compatible models include Anthropic Claude 3.x models, Amazon Nova, Mistral, etc. See [Supported models and features](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference-supported-models-features.html) for the full list. 280 | 281 | To enable these models see [Add or remove access to foundation models](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access-modify.html) 282 | 283 | ## License 284 | 285 | This project is licensed under the MIT License - see the LICENSE file for details. 286 | -------------------------------------------------------------------------------- /app_factory/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Manufacturing Operations Hub - MES and Production Meeting applications. 3 | """ 4 | 5 | __version__ = "1.1.0" -------------------------------------------------------------------------------- /app_factory/app.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main entry point for the MES Demo Application 3 | Allows selection between MES Chat and Production Meeting modes 4 | """ 5 | 6 | import streamlit as st 7 | import os 8 | import sys 9 | from pathlib import Path 10 | 11 | # Add the current directory to the path so we can import modules 12 | sys.path.append(os.path.dirname(os.path.abspath(__file__))) 13 | 14 | # Import the app modules 15 | from mes_chat.app import run_mes_chat 16 | from production_meeting.app import run_production_meeting 17 | 18 | # Page configuration 19 | st.set_page_config( 20 | page_title="Manufacturing Operations Hub", 21 | page_icon="🏭", 22 | layout="wide", 23 | initial_sidebar_state="expanded" 24 | ) 25 | 26 | def main(): 27 | """Main application entry point""" 28 | 29 | # Application header 30 | col1, col2 = st.columns([1, 5]) 31 | 32 | with col1: 33 | st.image("https://upload.wikimedia.org/wikipedia/commons/9/93/Amazon_Web_Services_Logo.svg", width=80) 34 | 35 | with col2: 36 | st.title("🏭 Manufacturing Operations Hub") 37 | 38 | st.markdown(""" 39 | Welcome to the Manufacturing Operations Hub for your e-bike manufacturing facility. 40 | Choose from the following applications: 41 | """) 42 | 43 | # Application selector using native Streamlit components 44 | col1, col2 = st.columns(2) 45 | 46 | with col1: 47 | st.subheader("⚙️ MES Insight Chat") 48 | st.write(""" 49 | Interactive chat interface for MES data analysis. Ask questions about production, inventory, 50 | machine status, quality control, and more using natural language. 51 | 52 | **Use this when:** You need to analyze specific MES data, investigate issues, 53 | or generate custom reports. 54 | """) 55 | 56 | if st.button("Launch MES Chat", key="launch_mes", use_container_width=True): 57 | st.session_state.app_mode = "mes_chat" 58 | st.rerun() 59 | 60 | with col2: 61 | st.subheader("📊 Daily Production Meeting") 62 | st.write(""" 63 | Structured interface for daily lean meetings with production KPIs, issue tracking, 64 | action items, and performance metrics focused on today's operations. 65 | 66 | **Use this when:** Running daily stand-up meetings, shift handovers, 67 | or production status reviews. 68 | """) 69 | 70 | if st.button("Launch Production Meeting", key="launch_prod", use_container_width=True): 71 | st.session_state.app_mode = "production_meeting" 72 | st.rerun() 73 | 74 | # App selector logic 75 | if 'app_mode' not in st.session_state: 76 | st.session_state.app_mode = None 77 | 78 | # Run the selected app 79 | if st.session_state.app_mode == "mes_chat": 80 | run_mes_chat() 81 | elif st.session_state.app_mode == "production_meeting": 82 | run_production_meeting() 83 | 84 | # Footer 85 | st.divider() 86 | st.caption("E-bike Manufacturing Facility Demo • MES & Production Meeting Simulator") 87 | 88 | if __name__ == "__main__": 89 | main() -------------------------------------------------------------------------------- /app_factory/data/meeting_templates.json: -------------------------------------------------------------------------------- 1 | { 2 | "templates": { 3 | "standard_production_meeting": { 4 | "name": "Standard Daily Production Meeting", 5 | "duration": 15, 6 | "sections": [ 7 | { 8 | "name": "Safety Share", 9 | "description": "Brief safety topic or reminder", 10 | "time_allocated": 1, 11 | "prompts": [ 12 | "Any safety incidents in the last 24 hours?", 13 | "Safety observations or near-misses to share?", 14 | "Safety topic of the day" 15 | ] 16 | }, 17 | { 18 | "name": "Production Review", 19 | "description": "Yesterday's production performance vs. targets", 20 | "time_allocated": 4, 21 | "prompts": [ 22 | "What was our target vs. actual output?", 23 | "What were the main challenges affecting production?", 24 | "Which products exceeded or missed targets and why?" 25 | ] 26 | }, 27 | { 28 | "name": "Quality Issues", 29 | "description": "Key quality metrics and issues", 30 | "time_allocated": 3, 31 | "prompts": [ 32 | "What is our current defect rate?", 33 | "Are there any recurring quality issues?", 34 | "What countermeasures are we implementing?" 35 | ] 36 | }, 37 | { 38 | "name": "Equipment Status", 39 | "description": "Machine availability and maintenance", 40 | "time_allocated": 3, 41 | "prompts": [ 42 | "What is our current equipment availability?", 43 | "Any machines down or in maintenance?", 44 | "Upcoming planned maintenance activities" 45 | ] 46 | }, 47 | { 48 | "name": "Material/Inventory Issues", 49 | "description": "Inventory constraints and shortages", 50 | "time_allocated": 2, 51 | "prompts": [ 52 | "Any material shortages affecting production?", 53 | "Inventory items below reorder level", 54 | "Expected deliveries today" 55 | ] 56 | }, 57 | { 58 | "name": "Action Items", 59 | "description": "Review open actions and assign new ones", 60 | "time_allocated": 2, 61 | "prompts": [ 62 | "Update on previously assigned actions", 63 | "New action items to be assigned", 64 | "Urgent issues requiring immediate attention" 65 | ] 66 | } 67 | ] 68 | }, 69 | "shift_handover": { 70 | "name": "Shift Handover Meeting", 71 | "duration": 10, 72 | "sections": [ 73 | { 74 | "name": "Safety & Quality", 75 | "description": "Safety incidents and quality issues", 76 | "time_allocated": 2, 77 | "prompts": [ 78 | "Safety incidents or near-misses during shift", 79 | "Quality issues encountered", 80 | "Quality holds or quarantined material" 81 | ] 82 | }, 83 | { 84 | "name": "Production Status", 85 | "description": "Work orders status and progress", 86 | "time_allocated": 3, 87 | "prompts": [ 88 | "Work orders completed", 89 | "Work in progress and percent complete", 90 | "Production vs target for the shift" 91 | ] 92 | }, 93 | { 94 | "name": "Equipment Status", 95 | "description": "Machine condition and issues", 96 | "time_allocated": 2, 97 | "prompts": [ 98 | "Equipment problems encountered", 99 | "Maintenance performed during shift", 100 | "Equipment requiring attention next shift" 101 | ] 102 | }, 103 | { 104 | "name": "Handover Items", 105 | "description": "Critical information for next shift", 106 | "time_allocated": 3, 107 | "prompts": [ 108 | "Priority work for next shift", 109 | "Pending issues requiring follow-up", 110 | "Special instructions or considerations" 111 | ] 112 | } 113 | ] 114 | }, 115 | "weekly_production_planning": { 116 | "name": "Weekly Production Planning", 117 | "duration": 30, 118 | "sections": [ 119 | { 120 | "name": "Previous Week Review", 121 | "description": "Performance review of previous week", 122 | "time_allocated": 5, 123 | "prompts": [ 124 | "Production targets vs actuals", 125 | "Key challenges and successes", 126 | "Quality and delivery performance" 127 | ] 128 | }, 129 | { 130 | "name": "Upcoming Orders", 131 | "description": "New orders and production schedule", 132 | "time_allocated": 8, 133 | "prompts": [ 134 | "Customer orders to be scheduled", 135 | "Production priorities for the week", 136 | "Capacity planning and constraints" 137 | ] 138 | }, 139 | { 140 | "name": "Resource Planning", 141 | "description": "Staff, equipment, and materials", 142 | "time_allocated": 7, 143 | "prompts": [ 144 | "Staffing requirements and availability", 145 | "Equipment availability and maintenance schedules", 146 | "Material requirements and inventory status" 147 | ] 148 | }, 149 | { 150 | "name": "Continuous Improvement", 151 | "description": "Improvement initiatives and projects", 152 | "time_allocated": 5, 153 | "prompts": [ 154 | "Status of ongoing improvement projects", 155 | "New improvement opportunities identified", 156 | "Resources required for implementation" 157 | ] 158 | }, 159 | { 160 | "name": "Action Planning", 161 | "description": "Assign actions and responsibilities", 162 | "time_allocated": 5, 163 | "prompts": [ 164 | "Review of open actions", 165 | "New actions to be assigned", 166 | "Critical path items and dependencies" 167 | ] 168 | } 169 | ] 170 | } 171 | }, 172 | "kpi_definitions": { 173 | "production": { 174 | "completion_rate": { 175 | "name": "Production Completion Rate", 176 | "description": "Percentage of planned production actually completed", 177 | "formula": "ActualProduction / PlannedQuantity * 100", 178 | "target": 95, 179 | "warning_threshold": 90, 180 | "critical_threshold": 85 181 | }, 182 | "schedule_adherence": { 183 | "name": "Schedule Adherence", 184 | "description": "Percentage of work orders completed on time", 185 | "formula": "OnTimeOrders / TotalOrders * 100", 186 | "target": 90, 187 | "warning_threshold": 85, 188 | "critical_threshold": 80 189 | } 190 | }, 191 | "quality": { 192 | "first_pass_yield": { 193 | "name": "First Pass Yield", 194 | "description": "Percentage of products that pass inspection on first attempt", 195 | "formula": "PassCount / InspectionCount * 100", 196 | "target": 95, 197 | "warning_threshold": 90, 198 | "critical_threshold": 85 199 | }, 200 | "defect_rate": { 201 | "name": "Defect Rate", 202 | "description": "Percentage of defective products", 203 | "formula": "DefectiveCount / TotalProduction * 100", 204 | "target": 2, 205 | "warning_threshold": 3, 206 | "critical_threshold": 5, 207 | "lower_is_better": true 208 | } 209 | }, 210 | "equipment": { 211 | "availability": { 212 | "name": "Equipment Availability", 213 | "description": "Percentage of time equipment is available for production", 214 | "formula": "UpTime / TotalTime * 100", 215 | "target": 90, 216 | "warning_threshold": 85, 217 | "critical_threshold": 80 218 | }, 219 | "oee": { 220 | "name": "Overall Equipment Effectiveness", 221 | "description": "Combined measure of availability, performance, and quality", 222 | "formula": "Availability * Performance * Quality", 223 | "target": 85, 224 | "warning_threshold": 75, 225 | "critical_threshold": 65 226 | } 227 | }, 228 | "inventory": { 229 | "stock_coverage": { 230 | "name": "Inventory Coverage", 231 | "description": "Days of inventory on hand", 232 | "formula": "CurrentInventory / AverageDailyUsage", 233 | "target": "Varies by item", 234 | "warning_threshold": "Varies by item", 235 | "critical_threshold": "Varies by item" 236 | }, 237 | "shortage_events": { 238 | "name": "Material Shortage Events", 239 | "description": "Count of production interruptions due to material shortages", 240 | "formula": "Count of shortage events", 241 | "target": 0, 242 | "warning_threshold": 1, 243 | "critical_threshold": 3, 244 | "lower_is_better": true 245 | } 246 | } 247 | } 248 | } -------------------------------------------------------------------------------- /app_factory/data/sample_questions.json: -------------------------------------------------------------------------------- 1 | { 2 | "categories": { 3 | "🏭 Production": [ 4 | "What's our current production schedule for the next week?", 5 | "How many orders have we completed for eBike T101?", 6 | "Which work center has the highest production volume?", 7 | "What's our on-time completion rate for completed orders?", 8 | "What's the current status distribution of all work orders?", 9 | "Which products have the highest production quantities this month?" 10 | ], 11 | "📦 Inventory": [ 12 | "Which inventory items are below their reorder level?", 13 | "What components are used in the eBike T101?", 14 | "Which supplier provides our battery components?", 15 | "Calculate the total value of current inventory", 16 | "Which inventory items have the longest lead times?", 17 | "Show me inventory items with quantity less than 100 units" 18 | ], 19 | "🔧 Machines": [ 20 | "Which machines are due for maintenance in the next 7 days?", 21 | "What's the OEE for our Frame Welding machines?", 22 | "Which machine has the most unplanned downtime?", 23 | "Show me the efficiency trends for the Final Assembly machines", 24 | "Compare the availability metrics across all machine types", 25 | "Which machines have efficiency factors below 0.85?" 26 | ], 27 | "⚠️ Quality": [ 28 | "What's our overall defect rate by product category?", 29 | "Show me the most common defect types and their severity", 30 | "Which work centers have the highest scrap rates?", 31 | "Compare quality metrics between morning and afternoon shifts", 32 | "What products have the highest yield rates?", 33 | "Which inspectors report the most defects?" 34 | ], 35 | "👥 Personnel": [ 36 | "Who are our most productive operators based on completed work orders?", 37 | "Which employees have the highest hourly rates?", 38 | "Show me the distribution of employees across different shifts", 39 | "What's the average tenure of our technicians?", 40 | "Which operators are assigned to Final Assembly work centers?" 41 | ], 42 | "⏱️ Performance": [ 43 | "What are our top bottleneck machines based on downtime?", 44 | "Which suppliers have the best reliability scores?", 45 | "Show material consumption variance for the most recent eBike T101 orders", 46 | "What's our average setup time across different machine types?", 47 | "Compare actual vs. planned production times across work centers" 48 | ] 49 | }, 50 | "general": { 51 | "work_orders": "Show me the current work orders and their status", 52 | "production_stats": "What's our production output for completed work orders in the last 30 days?", 53 | "inventory_status": "Which inventory items are below their reorder level?", 54 | "machines_maintenance": "Which machines are due for maintenance in the next week?", 55 | "quality_control": "What are our most common defect types and their severity?", 56 | "employee_productivity": "Who are our most productive operators based on completed work orders?", 57 | "material_consumption": "Show me material usage variance for eBike T101 production", 58 | "oee_metrics": "What's the OEE trend for our assembly lines over the past month?", 59 | "downtime_analysis": "Which machines have the most unplanned downtime?", 60 | "bom_analysis": "What components go into the eBike T101 and what's their total cost?", 61 | "machine_efficiency": "Which machines have the highest and lowest efficiency factors?", 62 | "quality_by_shift": "Compare quality metrics between different work shifts", 63 | "work_center_capacity": "What's the current utilization of our work centers?", 64 | "schedule_adherence": "What percentage of our work orders are completed on time?", 65 | "inventory_value": "What's the total value of our current inventory?", 66 | "defect_rate": "What's our overall defect rate across different products?", 67 | "supplier_performance": "Which suppliers have the longest lead times and lowest reliability?", 68 | "production_bottlenecks": "Which work centers have the lowest throughput?", 69 | "component_usage": "Which components are used in the most products?", 70 | "machine_status": "Show me the current status distribution of all machines" 71 | } 72 | } -------------------------------------------------------------------------------- /app_factory/data_generator/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Data Generator for MES Database 3 | Contains synthetic data generation scripts for simulating a manufacturing execution system 4 | """ 5 | 6 | __version__ = "1.0.0" -------------------------------------------------------------------------------- /app_factory/data_generator/data_pools.json: -------------------------------------------------------------------------------- 1 | { 2 | "inventory_names": [ 3 | "Aluminum Tubing", 4 | "Steel Bolts", 5 | "Rubber Grips", 6 | "Brake Cables", 7 | "Gear Shifters", 8 | "Ball Bearings", 9 | "Wheel Spokes", 10 | "Tire Rubber", 11 | "Chain Links", 12 | "Pedal Assemblies", 13 | "Lithium-ion Cells", 14 | "Electric Motors", 15 | "Control Circuits", 16 | "Seat Padding", 17 | "Handlebar Tubing", 18 | "Brake Pads", 19 | "Derailleur Springs", 20 | "Gear Cogs", 21 | "Bottom Bracket Shells", 22 | "Battery Casings", 23 | "Motor Magnets", 24 | "Microcontrollers", 25 | "Frame Paint", 26 | "Lubricating Oil", 27 | "Suspension Forks", 28 | "Rim Strips", 29 | "Valve Stems", 30 | "Chainring Bolts", 31 | "Dropout Hangers", 32 | "Hydraulic Fluid" 33 | ], 34 | "product_names": [ 35 | "eBike T101", 36 | "eBike T200", 37 | "eBike C150", 38 | "eBike M300", 39 | "Forks", 40 | "Frame", 41 | "Seat", 42 | "Handlebar", 43 | "Bearings", 44 | "Wheels", 45 | "Wheel", 46 | "Thru_Axle", 47 | "Tires", 48 | "Drive_Train", 49 | "Cassette", 50 | "Crank", 51 | "Brakes", 52 | "Brake_Lever", 53 | "Gear_Lever", 54 | "Front_Derailleur", 55 | "Rear_Derailleur", 56 | "Chain", 57 | "Bottom_Bracket", 58 | "Battery", 59 | "Motor", 60 | "Control_Unit", 61 | "Bolt", 62 | "Washer", 63 | "Motor_Assembly", 64 | "MRO_Part1", 65 | "MRO_Part2", 66 | "MRO_Part3" 67 | ], 68 | "product_descriptions": [ 69 | "eBike T101: High-performance electric bicycle with advanced components and sleek design", 70 | "eBike T200: Premium mountain e-bike with rugged frame and high-capacity battery", 71 | "eBike C150: Comfortable city commuter e-bike with integrated lights and fenders", 72 | "eBike M300: Mid-drive electric mountain bike with advanced suspension and trail capabilities", 73 | "Forks: Durable suspension forks for smooth ride on various terrains", 74 | "Frame: Lightweight aluminum frame with internal cable routing", 75 | "Seat: Ergonomic seat with memory foam for extended comfort", 76 | "Handlebar: Adjustable handlebar with integrated display mount", 77 | "Bearings: Sealed cartridge bearings for smooth, low-maintenance operation", 78 | "Wheels: 27.5-inch alloy wheels with reinforced spokes", 79 | "Wheel: Individual 27.5-inch alloy wheel compatible with tubeless tires", 80 | "Thru_Axle: 15mm front and 12mm rear thru-axles for improved stiffness", 81 | "Tires: Puncture-resistant, all-terrain tires with reflective sidewalls", 82 | "Drive_Train: 11-speed drivetrain system for versatile gear range", 83 | "Cassette: 11-speed cassette with wide gear ratio for varied terrain", 84 | "Crank: Forged aluminum crank arms with integrated torque sensor", 85 | "Brakes: Hydraulic disc brakes for powerful, all-weather stopping", 86 | "Brake_Lever: Ergonomic brake levers with reach adjustment", 87 | "Gear_Lever: Intuitive trigger shifters for precise gear changes", 88 | "Front_Derailleur: Electronic front derailleur for smooth shifting", 89 | "Rear_Derailleur: Electronic rear derailleur with motor-driven actuation", 90 | "Chain: Rust-resistant, high-strength chain optimized for e-bike use", 91 | "Bottom_Bracket: Sealed cartridge bottom bracket for durability", 92 | "Battery: 36V, 14Ah lithium-ion battery with smart BMS", 93 | "Motor: 250W mid-drive motor with 80Nm torque output", 94 | "Control_Unit: Advanced controller with Bluetooth connectivity and app support", 95 | "Bolt: Stainless steel bolts in various sizes for assembly", 96 | "Washer: Nylon and metal washers for proper component fit", 97 | "Motor_Assembly: Pre-assembled motor unit with integrated gearbox", 98 | "MRO_Part1: Specialized tools for e-bike maintenance", 99 | "MRO_Part2: Cleaning and lubrication kit for regular upkeep", 100 | "MRO_Part3: Diagnostic equipment for electronic systems" 101 | ], 102 | "machine_types": [ 103 | "Frame Welding", 104 | "Wheel Assembly", 105 | "Paint Booth", 106 | "Battery Assembly", 107 | "Motor Assembly", 108 | "Final Assembly", 109 | "Quality Control", 110 | "Packaging" 111 | ], 112 | "capacity_uom": { 113 | "Frame Welding": "frames/hour", 114 | "Wheel Assembly": "wheels/hour", 115 | "Paint Booth": "frames/hour", 116 | "Battery Assembly": "batteries/hour", 117 | "Motor Assembly": "motors/hour", 118 | "Final Assembly": "bikes/hour", 119 | "Quality Control": "bikes/hour", 120 | "Packaging": "bikes/hour" 121 | }, 122 | "nominal_capacity": { 123 | "Frame Welding": [5, 10], 124 | "Wheel Assembly": [20, 30], 125 | "Paint Booth": [10, 15], 126 | "Battery Assembly": [15, 25], 127 | "Motor Assembly": [20, 30], 128 | "Final Assembly": [5, 10], 129 | "Quality Control": [10, 15], 130 | "Packaging": [10, 20] 131 | }, 132 | "work_centers": [ 133 | { 134 | "name": "Frame Fabrication", 135 | "description": "Area for cutting, shaping, and welding bike frames", 136 | "capacity": 10, 137 | "capacity_uom": "frames/hour", 138 | "associated_machines": ["Frame Welding"] 139 | }, 140 | { 141 | "name": "Wheel Production", 142 | "description": "Assembly line for wheel building and truing", 143 | "capacity": 30, 144 | "capacity_uom": "wheels/hour", 145 | "associated_machines": ["Wheel Assembly"] 146 | }, 147 | { 148 | "name": "Paint and Finish", 149 | "description": "Automated paint booth and manual finishing area", 150 | "capacity": 15, 151 | "capacity_uom": "frames/hour", 152 | "associated_machines": ["Paint Booth"] 153 | }, 154 | { 155 | "name": "Battery Production", 156 | "description": "Clean room for battery cell assembly and testing", 157 | "capacity": 20, 158 | "capacity_uom": "batteries/hour", 159 | "associated_machines": ["Battery Assembly"] 160 | }, 161 | { 162 | "name": "Motor Assembly", 163 | "description": "Precision assembly line for electric motors", 164 | "capacity": 25, 165 | "capacity_uom": "motors/hour", 166 | "associated_machines": ["Motor Assembly"] 167 | }, 168 | { 169 | "name": "Final Assembly Line 1", 170 | "description": "Main assembly line for complete e-bikes", 171 | "capacity": 8, 172 | "capacity_uom": "bikes/hour", 173 | "associated_machines": ["Final Assembly"] 174 | }, 175 | { 176 | "name": "Final Assembly Line 2", 177 | "description": "Secondary assembly line for overflow and special orders", 178 | "capacity": 6, 179 | "capacity_uom": "bikes/hour", 180 | "associated_machines": ["Final Assembly"] 181 | }, 182 | { 183 | "name": "Quality Control Station", 184 | "description": "Comprehensive testing and inspection area", 185 | "capacity": 12, 186 | "capacity_uom": "bikes/hour", 187 | "associated_machines": ["Quality Control"] 188 | }, 189 | { 190 | "name": "Packaging and Shipping", 191 | "description": "Area for final packaging and preparation for shipment", 192 | "capacity": 15, 193 | "capacity_uom": "bikes/hour", 194 | "associated_machines": ["Packaging"] 195 | } 196 | ], 197 | "qc_comments": { 198 | "frame": [ 199 | "Frame alignment within specified tolerances", 200 | "Welding quality meets or exceeds standards", 201 | "No visible defects or stress points in frame structure", 202 | "Frame size and geometry match specifications", 203 | "All mounting points correctly positioned and threaded" 204 | ], 205 | "paint": [ 206 | "Paint finish smooth and even across all surfaces", 207 | "No runs, drips, or orange peel effect observed", 208 | "Color matches specified shade within acceptable range", 209 | "Decals applied correctly and without bubbles", 210 | "Clear coat evenly applied and fully cured" 211 | ], 212 | "wheels": [ 213 | "Wheel true and round within 0.5mm tolerance", 214 | "Spoke tension even and within specified range", 215 | "Hub bearings smooth with no play", 216 | "Rim tape correctly installed", 217 | "Tire bead seated properly with even spacing" 218 | ], 219 | "drivetrain": [ 220 | "Chain tension set to specifications", 221 | "Derailleur alignment correct for all gears", 222 | "Gear indexing smooth and accurate", 223 | "Crankset properly torqued and aligned", 224 | "Bottom bracket smoothly rotating without play" 225 | ], 226 | "brakes": [ 227 | "Brake pads aligned correctly with rim/rotor", 228 | "Hydraulic system properly bled with no leaks", 229 | "Brake lever feel firm with appropriate travel", 230 | "Braking power meets safety standards in testing", 231 | "Rotors true and free from contamination" 232 | ], 233 | "electronics": [ 234 | "Battery capacity meets or exceeds rated specification", 235 | "Motor output and efficiency within expected range", 236 | "Control unit firmware up to date", 237 | "All electrical connections secure and waterproof", 238 | "Display functions correctly with accurate readings" 239 | ], 240 | "final_assembly": [ 241 | "All components securely fastened to torque specifications", 242 | "Cables and housing routed correctly without kinks", 243 | "Headset adjusted for smooth rotation without play", 244 | "Pedals thread in smoothly and are secure", 245 | "Quick release/thru-axles function correctly and are secure" 246 | ], 247 | "quality_control": [ 248 | "Test ride completed, all systems functioning as expected", 249 | "Shifting smooth and accurate across all gears", 250 | "Braking performance even and within safety standards", 251 | "No unusual noises or vibrations during operation", 252 | "All reflectors and safety features present and secure" 253 | ], 254 | "packaging": [ 255 | "All included accessories present and undamaged", 256 | "Protective packaging correctly applied to frame and components", 257 | "User manual and warranty information included", 258 | "Shipping box undamaged and correctly labeled", 259 | "Battery charge level within safe shipping range" 260 | ] 261 | }, 262 | "suppliers": [ 263 | { "name": "Acme Electronics", "lead_time": 5 }, 264 | { "name": "BikeFrame Co.", "lead_time": 10 }, 265 | { "name": "Wheel Masters", "lead_time": 7 }, 266 | { "name": "Battery Tech", "lead_time": 14 }, 267 | { "name": "Control Systems Inc.", "lead_time": 8 }, 268 | { "name": "MetalWorks Supply", "lead_time": 12 }, 269 | { "name": "Precision Parts Ltd.", "lead_time": 6 }, 270 | { "name": "Global Components", "lead_time": 21 } 271 | ], 272 | "cost_ranges": { 273 | "products": { "min": 50, "max": 1000 }, 274 | "components": { "min": 1, "max": 100 }, 275 | "work_centers": { "min": 50, "max": 200 }, 276 | "machines": { "min": 20, "max": 100 } 277 | }, 278 | "lead_time_range": { "min": 1, "max": 30 }, 279 | "employee_hourly_rate_range": { "min": 15, "max": 50 }, 280 | "downtime_reasons": { 281 | "unplanned": [ 282 | "Equipment Failure", 283 | "Power Outage", 284 | "Material Shortage", 285 | "Operator Absence", 286 | "Quality Issue", 287 | "Tool Breakage", 288 | "Software Error", 289 | "Safety Incident", 290 | "Unexpected Maintenance" 291 | ], 292 | "planned": [ 293 | "Scheduled Maintenance", 294 | "Shift Change", 295 | "Setup/Changeover", 296 | "Cleaning", 297 | "Training", 298 | "Meeting", 299 | "Planned Downtime", 300 | "Software Update", 301 | "Certification" 302 | ] 303 | }, 304 | "material_categories": [ 305 | "Raw Material", 306 | "Electronic Component", 307 | "Mechanical Component", 308 | "Assembly", 309 | "Packaging", 310 | "MRO", 311 | "Consumable", 312 | "Finished Good" 313 | ], 314 | "product_categories": [ 315 | "Electric Bikes", 316 | "Components", 317 | "Accessories", 318 | "Spare Parts", 319 | "MRO" 320 | ], 321 | "storage_locations": [ 322 | "Warehouse A", 323 | "Warehouse B", 324 | "Production Floor", 325 | "Assembly Area", 326 | "External Storage", 327 | "Secured Storage", 328 | "Temperature Controlled" 329 | ], 330 | "defect_severities": { 331 | "1": "Minor - Cosmetic issue, no functional impact", 332 | "2": "Low - Slight functional impact, easily fixed", 333 | "3": "Medium - Noticeable functional impact, requires rework", 334 | "4": "High - Significant functional issue, may require replacement", 335 | "5": "Critical - Safety concern or complete failure" 336 | } 337 | } -------------------------------------------------------------------------------- /app_factory/mes_chat/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | MES Chatbot application for querying and analyzing MES data. 3 | """ 4 | 5 | from mes_chat.app import run_mes_chat -------------------------------------------------------------------------------- /app_factory/production_meeting/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Production Meeting Dashboard for daily lean meetings and production status reviews. 3 | """ 4 | 5 | from production_meeting.app import run_production_meeting -------------------------------------------------------------------------------- /app_factory/production_meeting/action_tracker.py: -------------------------------------------------------------------------------- 1 | """ 2 | Action Item Tracker for Production Meetings 3 | Handles creating, updating, and managing action items from meetings 4 | """ 5 | 6 | import streamlit as st 7 | import pandas as pd 8 | import json 9 | from datetime import datetime, timedelta 10 | import os 11 | from pathlib import Path 12 | 13 | class ActionTracker: 14 | """Manager for tracking action items from production meetings""" 15 | 16 | def __init__(self): 17 | """Initialize the action tracker""" 18 | # Create the data directory if it doesn't exist 19 | self.data_dir = Path("data/action_items") 20 | self.data_dir.mkdir(exist_ok=True, parents=True) 21 | 22 | def add_action_item(self, description, owner, priority, due_date, meeting_date, notes=""): 23 | """ 24 | Add a new action item 25 | 26 | Args: 27 | description (str): Description of the action item 28 | owner (str): Person responsible for the action 29 | priority (str): Priority level (High, Medium, Low) 30 | due_date (str): Due date in YYYY-MM-DD format 31 | meeting_date (str): Meeting date in YYYY-MM-DD format 32 | notes (str, optional): Additional notes 33 | 34 | Returns: 35 | dict: The created action item 36 | """ 37 | # Generate a unique ID 38 | action_id = f"ACT-{datetime.now().strftime('%Y%m%d%H%M%S')}" 39 | 40 | # Create the action item 41 | action_item = { 42 | "id": action_id, 43 | "description": description, 44 | "owner": owner, 45 | "priority": priority, 46 | "due_date": due_date, 47 | "created_date": datetime.now().strftime("%Y-%m-%d"), 48 | "meeting_date": meeting_date, 49 | "status": "Open", 50 | "notes": notes, 51 | "updates": [] 52 | } 53 | 54 | # Save to file 55 | self._save_action_item(action_item) 56 | 57 | return action_item 58 | 59 | def update_action_status(self, action_id, new_status, update_note=""): 60 | """ 61 | Update an action item's status 62 | 63 | Args: 64 | action_id (str): The ID of the action item 65 | new_status (str): New status (Open, In Progress, Completed, Closed) 66 | update_note (str, optional): Note about the update 67 | 68 | Returns: 69 | bool: True if updated successfully, False otherwise 70 | """ 71 | action_item = self.get_action_item(action_id) 72 | 73 | if not action_item: 74 | return False 75 | 76 | # Update the status 77 | action_item["status"] = new_status 78 | 79 | # Add update record 80 | update = { 81 | "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 82 | "status": new_status, 83 | "note": update_note 84 | } 85 | 86 | action_item["updates"].append(update) 87 | 88 | # Save changes 89 | self._save_action_item(action_item) 90 | 91 | return True 92 | 93 | def get_action_item(self, action_id): 94 | """ 95 | Get a specific action item by ID 96 | 97 | Args: 98 | action_id (str): The ID of the action item 99 | 100 | Returns: 101 | dict: The action item or None if not found 102 | """ 103 | filepath = self.data_dir / f"{action_id}.json" 104 | 105 | if not filepath.exists(): 106 | return None 107 | 108 | try: 109 | with open(filepath, 'r') as f: 110 | return json.load(f) 111 | except Exception as e: 112 | print(f"Error loading action item {action_id}: {e}") 113 | return None 114 | 115 | def get_all_action_items(self, filter_status=None): 116 | """ 117 | Get all action items, optionally filtered by status 118 | 119 | Args: 120 | filter_status (str or list, optional): Status(es) to filter by 121 | 122 | Returns: 123 | list: List of action items 124 | """ 125 | actions = [] 126 | 127 | # Load all action item files 128 | for file in self.data_dir.glob("*.json"): 129 | try: 130 | with open(file, 'r') as f: 131 | action = json.load(f) 132 | 133 | # Apply status filter if specified 134 | if filter_status: 135 | if isinstance(filter_status, list): 136 | if action["status"] in filter_status: 137 | actions.append(action) 138 | else: 139 | if action["status"] == filter_status: 140 | actions.append(action) 141 | else: 142 | actions.append(action) 143 | except Exception as e: 144 | print(f"Error loading action item from {file}: {e}") 145 | 146 | # Sort by due date 147 | actions.sort(key=lambda x: x.get("due_date", "9999-99-99")) 148 | 149 | return actions 150 | 151 | def get_open_actions_by_owner(self): 152 | """ 153 | Get open actions grouped by owner 154 | 155 | Returns: 156 | dict: Dictionary of owners with their open actions 157 | """ 158 | open_actions = self.get_all_action_items(filter_status=["Open", "In Progress"]) 159 | 160 | # Group by owner 161 | owners = {} 162 | for action in open_actions: 163 | owner = action["owner"] 164 | if owner not in owners: 165 | owners[owner] = [] 166 | owners[owner].append(action) 167 | 168 | return owners 169 | 170 | def get_overdue_actions(self): 171 | """ 172 | Get all overdue actions (due date in the past and not completed) 173 | 174 | Returns: 175 | list: List of overdue action items 176 | """ 177 | open_actions = self.get_all_action_items(filter_status=["Open", "In Progress"]) 178 | today = datetime.now().strftime("%Y-%m-%d") 179 | 180 | overdue = [ 181 | action for action in open_actions 182 | if action["due_date"] < today 183 | ] 184 | 185 | # Sort by due date (oldest first) 186 | overdue.sort(key=lambda x: x["due_date"]) 187 | 188 | return overdue 189 | 190 | def delete_action_item(self, action_id): 191 | """ 192 | Delete an action item 193 | 194 | Args: 195 | action_id (str): The ID of the action item 196 | 197 | Returns: 198 | bool: True if deleted successfully, False otherwise 199 | """ 200 | filepath = self.data_dir / f"{action_id}.json" 201 | 202 | if not filepath.exists(): 203 | return False 204 | 205 | try: 206 | filepath.unlink() 207 | return True 208 | except Exception as e: 209 | print(f"Error deleting action item {action_id}: {e}") 210 | return False 211 | 212 | def _save_action_item(self, action_item): 213 | """ 214 | Save an action item to file 215 | 216 | Args: 217 | action_item (dict): The action item to save 218 | """ 219 | action_id = action_item["id"] 220 | filepath = self.data_dir / f"{action_id}.json" 221 | 222 | try: 223 | with open(filepath, 'w') as f: 224 | json.dump(action_item, f, indent=2) 225 | except Exception as e: 226 | print(f"Error saving action item {action_id}: {e}") 227 | 228 | def get_action_items_for_meeting(self, meeting_date): 229 | """ 230 | Get action items created in a specific meeting 231 | 232 | Args: 233 | meeting_date (str): Meeting date in YYYY-MM-DD format 234 | 235 | Returns: 236 | list: List of action items from the meeting 237 | """ 238 | all_actions = self.get_all_action_items() 239 | 240 | meeting_actions = [ 241 | action for action in all_actions 242 | if action.get("meeting_date") == meeting_date 243 | ] 244 | 245 | return meeting_actions 246 | 247 | def export_actions_to_csv(self, filename=None): 248 | """ 249 | Export all action items to CSV 250 | 251 | Args: 252 | filename (str, optional): Output filename 253 | 254 | Returns: 255 | str: Path to the exported CSV file 256 | """ 257 | if not filename: 258 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 259 | filename = f"action_items_export_{timestamp}.csv" 260 | 261 | # Get all action items 262 | actions = self.get_all_action_items() 263 | 264 | # Convert to DataFrame for export 265 | df = pd.DataFrame(actions) 266 | 267 | # Create exports directory if it doesn't exist 268 | exports_dir = Path("exports") 269 | exports_dir.mkdir(exist_ok=True) 270 | 271 | # Export to CSV 272 | export_path = exports_dir / filename 273 | df.to_csv(export_path, index=False) 274 | 275 | return str(export_path) 276 | 277 | # Create a Streamlit component for displaying and managing action items 278 | def display_action_tracker(meeting_date=None): 279 | """ 280 | Streamlit component for displaying and managing action items 281 | 282 | Args: 283 | meeting_date (str, optional): Current meeting date (for filtering) 284 | """ 285 | # Initialize tracker 286 | tracker = ActionTracker() 287 | 288 | # Layout 289 | st.subheader("📋 Action Item Tracker") 290 | 291 | # Tabs for different views 292 | tab1, tab2, tab3 = st.tabs(["Current Actions", "Add New Action", "Action History"]) 293 | 294 | # Tab 1: Current Actions 295 | with tab1: 296 | # Filter controls 297 | col1, col2, col3 = st.columns(3) 298 | 299 | with col1: 300 | status_filter = st.multiselect( 301 | "Status", 302 | options=["Open", "In Progress", "Completed", "Closed"], 303 | default=["Open", "In Progress"] 304 | ) 305 | 306 | with col2: 307 | priority_filter = st.multiselect( 308 | "Priority", 309 | options=["High", "Medium", "Low"], 310 | default=["High", "Medium", "Low"] 311 | ) 312 | 313 | with col3: 314 | owner_filter = st.text_input("Owner Filter", "") 315 | 316 | # Get action items 317 | actions = tracker.get_all_action_items() 318 | 319 | # Apply filters 320 | filtered_actions = [] 321 | for action in actions: 322 | if status_filter and action["status"] not in status_filter: 323 | continue 324 | 325 | if priority_filter and action["priority"] not in priority_filter: 326 | continue 327 | 328 | if owner_filter and owner_filter.lower() not in action["owner"].lower(): 329 | continue 330 | 331 | filtered_actions.append(action) 332 | 333 | # Show results 334 | if filtered_actions: 335 | st.write(f"Showing {len(filtered_actions)} action items") 336 | 337 | for action in filtered_actions: 338 | with st.container(): 339 | col1, col2, col3 = st.columns([3, 1, 1]) 340 | 341 | # Set priority color 342 | priority_color = "🟢" # Default green 343 | if action["priority"] == "High": 344 | priority_color = "🔴" 345 | elif action["priority"] == "Medium": 346 | priority_color = "🟠" 347 | 348 | col1.markdown(f"**{action['id']}: {action['description']}**") 349 | col2.markdown(f"**Owner:** {action['owner']}") 350 | col3.markdown(f"**Priority:** {priority_color} {action['priority']}") 351 | 352 | col4, col5, col6 = st.columns([1, 1, 3]) 353 | col4.markdown(f"**Due:** {action['due_date']}") 354 | col5.markdown(f"**Status:** {action['status']}") 355 | 356 | if action.get("notes"): 357 | col6.markdown(f"**Notes:** {action['notes']}") 358 | 359 | # Action management 360 | action_col1, action_col2 = st.columns([3, 1]) 361 | 362 | with action_col1: 363 | new_status = st.selectbox( 364 | "Update Status", 365 | options=["Open", "In Progress", "Completed", "Closed"], 366 | index=["Open", "In Progress", "Completed", "Closed"].index(action["status"]), 367 | key=f"status_{action['id']}" 368 | ) 369 | 370 | status_note = st.text_input( 371 | "Update Note", 372 | key=f"note_{action['id']}", 373 | placeholder="Add optional note about the update" 374 | ) 375 | 376 | with action_col2: 377 | if st.button("Update", key=f"update_{action['id']}"): 378 | if new_status != action["status"]: 379 | tracker.update_action_status(action["id"], new_status, status_note) 380 | st.success(f"Updated status to {new_status}") 381 | st.rerun() 382 | 383 | if st.button("Delete", key=f"delete_{action['id']}"): 384 | tracker.delete_action_item(action["id"]) 385 | st.success("Action item deleted") 386 | st.rerun() 387 | 388 | st.markdown("---") 389 | else: 390 | st.info("No action items match your filters") 391 | 392 | # Tab 2: Add New Action 393 | with tab2: 394 | with st.form("new_action_form"): 395 | st.write("Create New Action Item") 396 | 397 | description = st.text_input("Description", placeholder="What needs to be done?") 398 | 399 | col1, col2 = st.columns(2) 400 | with col1: 401 | owner = st.text_input("Owner", placeholder="Who is responsible?") 402 | with col2: 403 | priority = st.selectbox("Priority", options=["High", "Medium", "Low"]) 404 | 405 | col3, col4 = st.columns(2) 406 | with col3: 407 | due_date = st.date_input("Due Date", datetime.now() + timedelta(days=1)) 408 | with col4: 409 | if meeting_date: 410 | meeting_date_val = datetime.strptime(meeting_date, "%Y-%m-%d").date() 411 | else: 412 | meeting_date_val = datetime.now().date() 413 | 414 | meeting_date_input = st.date_input("Meeting Date", meeting_date_val) 415 | 416 | notes = st.text_area("Notes", placeholder="Additional details or context") 417 | 418 | submitted = st.form_submit_button("Create Action Item") 419 | 420 | if submitted: 421 | if description and owner: 422 | # Create new action 423 | tracker.add_action_item( 424 | description=description, 425 | owner=owner, 426 | priority=priority, 427 | due_date=due_date.strftime("%Y-%m-%d"), 428 | meeting_date=meeting_date_input.strftime("%Y-%m-%d"), 429 | notes=notes 430 | ) 431 | 432 | st.success("Action item created!") 433 | st.rerun() 434 | else: 435 | st.error("Description and Owner are required") 436 | 437 | # Tab 3: Action History 438 | with tab3: 439 | st.write("Action Item History") 440 | 441 | # Get completed/closed actions 442 | closed_actions = tracker.get_all_action_items(filter_status=["Completed", "Closed"]) 443 | 444 | if closed_actions: 445 | st.write(f"{len(closed_actions)} completed or closed actions") 446 | 447 | # Group by month 448 | months = {} 449 | for action in closed_actions: 450 | update_dates = [u["timestamp"][:7] for u in action.get("updates", [])] 451 | if update_dates: 452 | # Use the date of the status change to Completed/Closed 453 | month = max(update_dates) 454 | else: 455 | # Fallback to creation date 456 | month = action["created_date"][:7] 457 | 458 | if month not in months: 459 | months[month] = [] 460 | 461 | months[month].append(action) 462 | 463 | # Display by month 464 | for month in sorted(months.keys(), reverse=True): 465 | with st.expander(f"{month} ({len(months[month])} actions)"): 466 | for action in months[month]: 467 | st.markdown(f""" 468 | **{action['id']}:** {action['description']} 469 | **Owner:** {action['owner']} | **Status:** {action['status']} | **Priority:** {action['priority']} 470 | **Due:** {action['due_date']} | **Created:** {action['created_date']} 471 | """) 472 | 473 | if action.get("updates"): 474 | st.markdown("**Updates:**") 475 | for update in action["updates"]: 476 | st.markdown(f"- {update['timestamp']}: Changed to **{update['status']}**" + 477 | (f" - *{update['note']}*" if update['note'] else "")) 478 | 479 | st.markdown("---") 480 | 481 | # Export option 482 | if st.button("Export All Actions to CSV"): 483 | csv_path = tracker.export_actions_to_csv() 484 | st.success(f"Exported to {csv_path}") 485 | else: 486 | st.info("No completed or closed action items found") 487 | 488 | # For testing the module directly 489 | if __name__ == "__main__": 490 | st.set_page_config(page_title="Action Tracker", layout="wide") 491 | st.title("Action Item Tracker Test") 492 | display_action_tracker() -------------------------------------------------------------------------------- /app_factory/production_meeting/app.py: -------------------------------------------------------------------------------- 1 | """ 2 | Production Meeting Dashboard - Daily lean meeting tool with enhanced AI analytics 3 | """ 4 | 5 | import streamlit as st 6 | from datetime import datetime, timedelta 7 | 8 | # Import shared modules 9 | import sys 10 | import os 11 | parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 12 | sys.path.append(parent_dir) 13 | from shared.database import DatabaseManager 14 | 15 | # Import production meeting modules 16 | from production_meeting.dashboards import ( 17 | production_summary_dashboard, 18 | equipment_status_dashboard, 19 | quality_dashboard, 20 | inventory_dashboard, 21 | productivity_dashboard, 22 | weekly_overview_dashboard, 23 | add_root_cause_analysis, 24 | ) 25 | from production_meeting.action_tracker import display_action_tracker 26 | from production_meeting.report import display_report_generator 27 | from production_meeting.ai_insights import ( 28 | display_ai_insights_tab, 29 | provide_tab_insights, 30 | generate_predictive_insights, 31 | generate_decision_intelligence, 32 | generate_narrative_summary, 33 | add_conversational_analysis 34 | ) 35 | 36 | # Initialize database manager 37 | db_manager = DatabaseManager() 38 | 39 | def run_production_meeting(): 40 | """Main function for the Production Meeting Dashboard""" 41 | 42 | # Set up the page header 43 | st.title("📊 Daily Production Meeting Dashboard") 44 | 45 | # Initialize session state for meeting data 46 | if "meeting_data" not in st.session_state: 47 | st.session_state.meeting_data = { 48 | "date": datetime.now().strftime("%Y-%m-%d"), 49 | "action_items": [], 50 | "notes": "", 51 | "attendees": "", 52 | "meeting_status": "Not Started", # Not Started, In Progress, Completed 53 | "selected_section": "summary" 54 | } 55 | 56 | # Initialize session state for AI settings 57 | if "enable_tab_insights" not in st.session_state: 58 | st.session_state.enable_tab_insights = True 59 | 60 | # Top control bar 61 | control_cols = st.columns([1, 2, 1, 1, 1, 1]) 62 | 63 | with control_cols[0]: 64 | meeting_date = st.date_input( 65 | "Meeting Date", 66 | value=datetime.strptime(st.session_state.meeting_data["date"], "%Y-%m-%d"), 67 | key="meeting_date" 68 | ) 69 | st.session_state.meeting_data["date"] = meeting_date.strftime("%Y-%m-%d") 70 | 71 | with control_cols[1]: 72 | st.text_input( 73 | "Attendees", 74 | value=st.session_state.meeting_data["attendees"], 75 | key="attendees", 76 | placeholder="List meeting attendees" 77 | ) 78 | st.session_state.meeting_data["attendees"] = st.session_state.attendees 79 | 80 | with control_cols[2]: 81 | # Time controls 82 | start_time = datetime.now().replace(hour=9, minute=0, second=0) 83 | meeting_duration = st.slider( 84 | "Duration (min)", 85 | min_value=5, 86 | max_value=30, 87 | value=15, 88 | step=5, 89 | key="meeting_duration" 90 | ) 91 | 92 | # Calculate end time 93 | end_time = start_time + timedelta(minutes=meeting_duration) 94 | st.write(f"⏰ {start_time.strftime('%H:%M')}-{end_time.strftime('%H:%M')}") 95 | 96 | with control_cols[3]: 97 | meeting_status = st.selectbox( 98 | "Status", 99 | options=["Not Started", "In Progress", "Completed"], 100 | index=["Not Started", "In Progress", "Completed"].index(st.session_state.meeting_data["meeting_status"]), 101 | key="meeting_status" 102 | ) 103 | st.session_state.meeting_data["meeting_status"] = meeting_status 104 | 105 | with control_cols[4]: 106 | # Toggle for AI insights 107 | ai_enabled = st.checkbox( 108 | "Tab Insights", 109 | value=st.session_state.enable_tab_insights, 110 | help="Enable/disable AI insights in dashboard tabs" 111 | ) 112 | st.session_state.enable_tab_insights = ai_enabled 113 | 114 | with control_cols[5]: 115 | if st.button("🏠 Main Menu", use_container_width=True): 116 | st.session_state.app_mode = None 117 | st.rerun() 118 | 119 | # Main navigation tabs 120 | tabs = st.tabs([ 121 | "📈 Production Summary", 122 | "🔧 Equipment Status", 123 | "⚠️ Quality Issues", 124 | "📦 Inventory Alerts", 125 | "👥 Productivity", 126 | "🔍 Root Cause Analysis", 127 | "🤖 AI Insights", 128 | "📋 Action Items", 129 | "📝 Meeting Notes", 130 | "📄 Reports" 131 | ]) 132 | 133 | # Tab 1: Production Summary 134 | with tabs[0]: 135 | production_summary_dashboard() 136 | provide_tab_insights("production") 137 | 138 | # Tab 2: Equipment Status 139 | with tabs[1]: 140 | equipment_status_dashboard() 141 | provide_tab_insights("machines") 142 | 143 | # Tab 3: Quality Issues 144 | with tabs[2]: 145 | quality_dashboard() 146 | provide_tab_insights("quality") 147 | 148 | # Tab 4: Inventory Alerts 149 | with tabs[3]: 150 | inventory_dashboard() 151 | provide_tab_insights("inventory") 152 | 153 | # Tab 5: Productivity 154 | with tabs[4]: 155 | productivity_dashboard() 156 | 157 | # Tab 6: Root Cause Analysis 158 | with tabs[5]: 159 | add_root_cause_analysis() 160 | 161 | # Tab 7: AI Insights - Enhanced with structured selection options 162 | with tabs[6]: 163 | # Let user select which type of AI analysis to show 164 | analysis_type = st.radio( 165 | "Select Analysis Type:", 166 | options=[ 167 | "General Insights", 168 | "Predictive Analysis", 169 | "Decision Intelligence", 170 | "Data Storytelling", 171 | "Conversational Q&A" 172 | ], 173 | horizontal=True 174 | ) 175 | 176 | if analysis_type == "General Insights": 177 | display_ai_insights_tab() 178 | elif analysis_type == "Predictive Analysis": 179 | generate_predictive_insights() 180 | elif analysis_type == "Decision Intelligence": 181 | generate_decision_intelligence() 182 | elif analysis_type == "Data Storytelling": 183 | generate_narrative_summary() 184 | else: # Conversational Q&A 185 | add_conversational_analysis() 186 | 187 | # Tab 8: Action Items 188 | with tabs[7]: 189 | display_action_tracker(st.session_state.meeting_data["date"]) 190 | 191 | # Tab 9: Meeting Notes 192 | with tabs[8]: 193 | st.header("📝 Meeting Notes") 194 | 195 | # Meeting notes input 196 | notes = st.text_area( 197 | "Meeting Notes", 198 | value=st.session_state.meeting_data["notes"], 199 | height=300, 200 | key="meeting_notes" 201 | ) 202 | st.session_state.meeting_data["notes"] = notes 203 | 204 | # Weekly overview section 205 | with st.expander("Weekly Performance Overview", expanded=False): 206 | weekly_overview_dashboard() 207 | 208 | # Tab 10: Reports 209 | with tabs[9]: 210 | report_options = st.radio( 211 | "Report Type:", 212 | options=["Standard Report", "AI-Enhanced Executive Summary"], 213 | horizontal=True 214 | ) 215 | 216 | if report_options == "Standard Report": 217 | display_report_generator( 218 | meeting_date=st.session_state.meeting_data["date"], 219 | meeting_data=st.session_state.meeting_data 220 | ) 221 | else: 222 | st.subheader("AI-Enhanced Executive Summary") 223 | st.info("This summary combines production data with AI analysis for executive review") 224 | 225 | if st.button("Generate Executive Summary", use_container_width=True): 226 | with st.spinner("Analyzing data and generating executive summary..."): 227 | # Simply call the narrative summary function - it already provides what we need 228 | generate_narrative_summary() 229 | 230 | def show_welcome_screen(): 231 | """Display welcome screen with demo introduction""" 232 | st.title("🏭 Manufacturing Operations Hub") 233 | st.subheader("AI-Enhanced Production Analytics Demo") 234 | 235 | st.markdown(""" 236 | This demonstration showcases how AI can transform daily production meetings from lengthy report reviews into 237 | focused decision sessions. The application features: 238 | 239 | - **Real-time KPIs and Metrics** - All the critical numbers at your fingertips 240 | - **Automated Analysis** - Let AI find patterns and insights in your data 241 | - **Root Cause Exploration** - Dig deeper into quality and production issues 242 | - **Predictive Insights** - See potential issues before they happen 243 | """) 244 | 245 | col1, col2, col3 = st.columns(3) 246 | 247 | with col1: 248 | st.info("### BEFORE\n* 60-90 min manual report preparation\n* Reactive problem solving\n* Focus on what happened") 249 | 250 | with col2: 251 | st.success("### AFTER\n* Automatic data analysis\n* Proactive issue detection\n* Focus on why and what's next") 252 | 253 | with col3: 254 | st.button("Launch Production Meeting Demo", use_container_width=True, key="launch_demo", 255 | on_click=lambda: setattr(st.session_state, 'show_welcome', False)) 256 | 257 | # for testing 258 | if __name__ == "__main__": 259 | # Set page config 260 | st.set_page_config( 261 | page_title="Manufacturing Operations Hub", 262 | page_icon="🏭", 263 | layout="wide", 264 | initial_sidebar_state="collapsed" 265 | ) 266 | 267 | if "show_welcome" not in st.session_state: 268 | st.session_state.show_welcome = True 269 | 270 | # Show either welcome screen or main application 271 | if st.session_state.show_welcome: 272 | show_welcome_screen() 273 | else: 274 | run_production_meeting() -------------------------------------------------------------------------------- /app_factory/production_meeting/dashboards/__init__.py: -------------------------------------------------------------------------------- 1 | from .production import production_summary_dashboard 2 | from .equipment import equipment_status_dashboard 3 | from .quality import quality_dashboard 4 | from .inventory import inventory_dashboard 5 | from .productivity import productivity_dashboard 6 | from .root_cause import add_root_cause_analysis 7 | from .weekly import weekly_overview_dashboard 8 | 9 | __all__ = [ 10 | 'production_summary_dashboard', 11 | 'equipment_status_dashboard', 12 | 'quality_dashboard', 13 | 'inventory_dashboard', 14 | 'productivity_dashboard', 15 | 'add_root_cause_analysis', 16 | 'weekly_overview_dashboard', 17 | ] -------------------------------------------------------------------------------- /app_factory/production_meeting/dashboards/equipment.py: -------------------------------------------------------------------------------- 1 | """ 2 | Equipment dashboard functionality with downtime impact and maintenance effectiveness 3 | """ 4 | 5 | import streamlit as st 6 | import pandas as pd 7 | import plotly.express as px 8 | import plotly.graph_objects as go 9 | from datetime import datetime, timedelta 10 | 11 | from shared.database import DatabaseManager 12 | 13 | # Initialize database manager 14 | db_manager = DatabaseManager() 15 | 16 | def equipment_status_dashboard(): 17 | """Display the enhanced equipment status dashboard""" 18 | st.header("🔧 Equipment Status") 19 | 20 | # Get machine status summary 21 | machine_status = db_manager.get_machine_status_summary() 22 | 23 | if not machine_status.empty: 24 | # Summary metrics for machines 25 | col1, col2 = st.columns(2) 26 | 27 | with col1: 28 | # Overall equipment status 29 | total_machines = machine_status['TotalMachines'].sum() 30 | running_machines = machine_status['Running'].sum() 31 | idle_machines = machine_status['Idle'].sum() 32 | maintenance_machines = machine_status['Maintenance'].sum() 33 | breakdown_machines = machine_status['Breakdown'].sum() 34 | 35 | availability = running_machines / total_machines * 100 if total_machines > 0 else 0 36 | 37 | # Create gauge chart for availability 38 | fig = go.Figure(go.Indicator( 39 | mode = "gauge+number", 40 | value = availability, 41 | domain = {'x': [0, 1], 'y': [0, 1]}, 42 | title = {'text': "Machine Availability"}, 43 | gauge = { 44 | 'axis': {'range': [0, 100]}, 45 | 'bar': {'color': "green"}, 46 | 'steps': [ 47 | {'range': [0, 50], 'color': "red"}, 48 | {'range': [50, 80], 'color': "orange"}, 49 | {'range': [80, 100], 'color': "lightgreen"} 50 | ], 51 | 'threshold': { 52 | 'line': {'color': "black", 'width': 4}, 53 | 'thickness': 0.75, 54 | 'value': 85 55 | } 56 | } 57 | )) 58 | 59 | st.plotly_chart(fig, use_container_width=True) 60 | 61 | # Status breakdown 62 | status_data = pd.DataFrame({ 63 | 'Status': ['Running', 'Idle', 'Maintenance', 'Breakdown'], 64 | 'Count': [running_machines, idle_machines, maintenance_machines, breakdown_machines] 65 | }) 66 | 67 | fig = px.pie( 68 | status_data, 69 | values='Count', 70 | names='Status', 71 | title='Machine Status Distribution', 72 | color='Status', 73 | color_discrete_map={ 74 | 'Running': 'green', 75 | 'Idle': 'blue', 76 | 'Maintenance': 'orange', 77 | 'Breakdown': 'red' 78 | } 79 | ) 80 | st.plotly_chart(fig, use_container_width=True) 81 | 82 | with col2: 83 | # Machine type breakdown with efficiency 84 | st.subheader("Machine Performance by Type") 85 | 86 | fig = px.bar( 87 | machine_status, 88 | x='MachineType', 89 | y='AvgEfficiency', 90 | title='Average Efficiency by Machine Type', 91 | labels={'AvgEfficiency': 'Efficiency (%)', 'MachineType': 'Machine Type'}, 92 | color='AvgEfficiency', 93 | color_continuous_scale='RdYlGn' 94 | ) 95 | fig.update_layout(coloraxis_colorbar=dict(title='Efficiency (%)')) 96 | st.plotly_chart(fig, use_container_width=True) 97 | 98 | # Stacked bar chart of machine status by type 99 | fig = px.bar( 100 | machine_status, 101 | x='MachineType', 102 | y=['Running', 'Idle', 'Maintenance', 'Breakdown'], 103 | title='Machine Status by Type', 104 | labels={'value': 'Number of Machines', 'variable': 'Status', 'MachineType': 'Machine Type'}, 105 | color_discrete_map={ 106 | 'Running': 'green', 107 | 'Idle': 'blue', 108 | 'Maintenance': 'orange', 109 | 'Breakdown': 'red' 110 | } 111 | ) 112 | st.plotly_chart(fig, use_container_width=True) 113 | else: 114 | st.info("No machine status data available") 115 | 116 | # NEW: Downtime Impact Analysis 117 | st.subheader("🔍 Downtime Impact Analysis") 118 | 119 | # Get downtime data with production impact 120 | downtime_query = """ 121 | SELECT 122 | m.Name as MachineName, 123 | m.Type as MachineType, 124 | d.Reason as DowntimeReason, 125 | d.Category as DowntimeCategory, 126 | d.Duration as DurationMinutes, 127 | m.HourlyRate as UnitsPerHour, 128 | (d.Duration / 60.0 * m.HourlyRate) as EstimatedLostUnits 129 | FROM 130 | Downtimes d 131 | JOIN 132 | Machines m ON d.MachineID = m.MachineID 133 | WHERE 134 | d.StartTime >= date('now', '-7 day') 135 | ORDER BY 136 | EstimatedLostUnits DESC 137 | LIMIT 10 138 | """ 139 | 140 | result = db_manager.execute_query(downtime_query) 141 | 142 | if result["success"] and result["row_count"] > 0: 143 | downtime_df = pd.DataFrame(result["rows"]) 144 | 145 | # Create visualization of production impact 146 | fig = px.bar( 147 | downtime_df, 148 | x='MachineName', 149 | y='EstimatedLostUnits', 150 | color='DowntimeCategory', 151 | title='Estimated Production Impact of Downtime (Last 7 Days)', 152 | labels={ 153 | 'EstimatedLostUnits': 'Estimated Lost Units', 154 | 'MachineName': 'Machine', 155 | 'DowntimeCategory': 'Downtime Category' 156 | }, 157 | color_discrete_map={ 158 | 'planned': 'blue', 159 | 'unplanned': 'red' 160 | }, 161 | hover_data=['DurationMinutes', 'DowntimeReason', 'MachineType'] 162 | ) 163 | 164 | st.plotly_chart(fig, use_container_width=True) 165 | 166 | # Calculate and display total impact 167 | total_lost_units = downtime_df['EstimatedLostUnits'].sum() 168 | total_downtime_minutes = downtime_df['DurationMinutes'].sum() 169 | 170 | cols = st.columns(2) 171 | cols[0].metric("Total Lost Production", f"{int(total_lost_units):,} units") 172 | cols[1].metric("Total Downtime", f"{total_downtime_minutes:.0f} minutes") 173 | 174 | # Show most impactful downtime reasons 175 | reason_impact = downtime_df.groupby('DowntimeReason').agg({ 176 | 'EstimatedLostUnits': 'sum', 177 | 'DurationMinutes': 'sum' 178 | }).reset_index().sort_values('EstimatedLostUnits', ascending=False) 179 | 180 | st.subheader("Most Impactful Downtime Reasons") 181 | 182 | # Create Pareto chart of downtime reasons 183 | fig = go.Figure() 184 | 185 | # Add bars for lost units 186 | fig.add_trace(go.Bar( 187 | x=reason_impact['DowntimeReason'], 188 | y=reason_impact['EstimatedLostUnits'], 189 | name='Lost Units' 190 | )) 191 | 192 | # Add cumulative percentage line 193 | reason_impact['CumulativeLostUnits'] = reason_impact['EstimatedLostUnits'].cumsum() 194 | reason_impact['CumulativePercentage'] = reason_impact['CumulativeLostUnits'] / reason_impact['EstimatedLostUnits'].sum() * 100 195 | 196 | fig.add_trace(go.Scatter( 197 | x=reason_impact['DowntimeReason'], 198 | y=reason_impact['CumulativePercentage'], 199 | name='Cumulative %', 200 | mode='lines+markers', 201 | line=dict(color='red', width=2), 202 | yaxis='y2' 203 | )) 204 | 205 | # Update layout with second y-axis 206 | fig.update_layout( 207 | title='Pareto Analysis of Downtime Impact', 208 | yaxis=dict(title='Lost Units'), 209 | yaxis2=dict( 210 | title='Cumulative %', 211 | overlaying='y', 212 | side='right', 213 | range=[0, 100] 214 | ), 215 | legend=dict( 216 | orientation="h", 217 | yanchor="bottom", 218 | y=1.02, 219 | xanchor="right", 220 | x=1 221 | ) 222 | ) 223 | 224 | # Add 80% line (Pareto principle) 225 | fig.add_shape( 226 | type="line", 227 | x0=-0.5, 228 | x1=len(reason_impact)-0.5, 229 | y0=80, 230 | y1=80, 231 | line=dict(color="black", width=1, dash="dash"), 232 | yref='y2' 233 | ) 234 | 235 | st.plotly_chart(fig, use_container_width=True) 236 | else: 237 | st.info("No downtime data available for impact analysis") 238 | 239 | # NEW: Maintenance Effectiveness Analysis 240 | st.subheader("🛠️ Maintenance Effectiveness") 241 | 242 | # Get data on machine breakdowns after maintenance 243 | maintenance_query = """ 244 | SELECT 245 | m.Name as MachineName, 246 | m.Type as MachineType, 247 | MAX(m.LastMaintenanceDate) as LastMaintenance, 248 | julianday('now') - julianday(MAX(m.LastMaintenanceDate)) as DaysSinceMaintenance, 249 | COUNT(d.DowntimeID) as BreakdownsAfterMaintenance 250 | FROM 251 | Machines m 252 | LEFT JOIN 253 | Downtimes d ON m.MachineID = d.MachineID AND 254 | d.StartTime > m.LastMaintenanceDate AND 255 | d.Category = 'unplanned' 256 | GROUP BY 257 | m.MachineID, m.Name, m.Type 258 | HAVING 259 | LastMaintenance IS NOT NULL 260 | ORDER BY 261 | BreakdownsAfterMaintenance DESC 262 | LIMIT 10 263 | """ 264 | 265 | result = db_manager.execute_query(maintenance_query) 266 | 267 | if result["success"] and result["row_count"] > 0: 268 | maintenance_df = pd.DataFrame(result["rows"]) 269 | 270 | # Create visualization of maintenance effectiveness 271 | fig = px.scatter( 272 | maintenance_df, 273 | x='DaysSinceMaintenance', 274 | y='BreakdownsAfterMaintenance', 275 | color='MachineType', 276 | size='BreakdownsAfterMaintenance', 277 | hover_data=['MachineName', 'LastMaintenance'], 278 | title='Machine Reliability After Maintenance', 279 | labels={ 280 | 'DaysSinceMaintenance': 'Days Since Maintenance', 281 | 'BreakdownsAfterMaintenance': 'Number of Breakdowns', 282 | 'MachineType': 'Machine Type' 283 | } 284 | ) 285 | 286 | # Add reference quadrants 287 | fig.add_shape( 288 | type="rect", 289 | x0=0, y0=0, 290 | x1=30, y1=1, 291 | line=dict(color="green", width=1), 292 | fillcolor="rgba(0,255,0,0.1)" 293 | ) 294 | 295 | fig.add_shape( 296 | type="rect", 297 | x0=30, y0=0, 298 | x1=90, y1=1, 299 | line=dict(color="yellow", width=1), 300 | fillcolor="rgba(255,255,0,0.1)" 301 | ) 302 | 303 | fig.add_shape( 304 | type="rect", 305 | x0=0, y0=1, 306 | x1=90, y1=10, 307 | line=dict(color="red", width=1), 308 | fillcolor="rgba(255,0,0,0.1)" 309 | ) 310 | 311 | # Add annotations 312 | fig.add_annotation( 313 | x=15, y=0.5, 314 | text="Good: Recent Maintenance, Few Breakdowns", 315 | showarrow=False, 316 | font=dict(size=10) 317 | ) 318 | 319 | fig.add_annotation( 320 | x=60, y=0.5, 321 | text="Warning: Aging Maintenance, Few Breakdowns", 322 | showarrow=False, 323 | font=dict(size=10) 324 | ) 325 | 326 | fig.add_annotation( 327 | x=45, y=5, 328 | text="Critical: Multiple Breakdowns After Maintenance", 329 | showarrow=False, 330 | font=dict(size=10) 331 | ) 332 | 333 | st.plotly_chart(fig, use_container_width=True) 334 | 335 | # Show machines with concerning maintenance effectiveness 336 | st.subheader("Machines Needing Maintenance Improvement") 337 | 338 | concern_machines = maintenance_df[maintenance_df['BreakdownsAfterMaintenance'] > 1].sort_values( 339 | 'BreakdownsAfterMaintenance', ascending=False 340 | ) 341 | 342 | if not concern_machines.empty: 343 | for i, row in concern_machines.head(5).iterrows(): 344 | st.markdown(f""" 345 | ⚠️ **{row['MachineName']}** ({row['MachineType']}) 346 | **Breakdowns after maintenance:** {int(row['BreakdownsAfterMaintenance'])} 347 | **Last maintenance:** {row['LastMaintenance']} ({row['DaysSinceMaintenance']:.1f} days ago) 348 | 349 | **Recommendation:** Review maintenance procedures for this machine type 350 | """) 351 | st.markdown("---") 352 | else: 353 | st.success("All machines showing good maintenance effectiveness!") 354 | else: 355 | st.info("No maintenance effectiveness data available") 356 | 357 | # Upcoming maintenance 358 | st.subheader("Upcoming Maintenance") 359 | 360 | # Get machines due for maintenance 361 | maintenance_data = db_manager.get_upcoming_maintenance(days_ahead=7) 362 | 363 | if not maintenance_data.empty: 364 | # Display machines needing maintenance 365 | st.write(f"**{len(maintenance_data)} machines scheduled for maintenance in the next 7 days:**") 366 | 367 | for i, row in maintenance_data.iterrows(): 368 | days_until = float(row['DaysUntilMaintenance']) 369 | 370 | # Create color coding based on urgency 371 | if days_until <= 1: 372 | severity = "🔴" # Red for urgent (today or tomorrow) 373 | elif days_until <= 3: 374 | severity = "🟠" # Orange for approaching 375 | else: 376 | severity = "🟢" # Green for scheduled future maintenance 377 | 378 | st.markdown(f""" 379 | {severity} **{row['MachineName']}** ({row['MachineType']}) in {row['WorkCenterName']} 380 | **Due:** {row['MaintenanceDate']} ({days_until:.1f} days) 381 | **Last Maintenance:** {row['LastMaintenance']} 382 | """) 383 | 384 | # Add a separator between items 385 | st.markdown("---") 386 | else: 387 | st.success("No machines scheduled for maintenance in the next 7 days") 388 | -------------------------------------------------------------------------------- /app_factory/production_meeting/dashboards/inventory.py: -------------------------------------------------------------------------------- 1 | """ 2 | Inventory dashboard 3 | """ 4 | 5 | import streamlit as st 6 | import pandas as pd 7 | import plotly.express as px 8 | import plotly.graph_objects as go 9 | import numpy as np 10 | from datetime import datetime, timedelta 11 | 12 | from shared.database import DatabaseManager 13 | 14 | # Initialize database manager 15 | db_manager = DatabaseManager() 16 | 17 | def inventory_dashboard(): 18 | """Display the enhanced inventory dashboard""" 19 | st.header("📦 Inventory Status") 20 | 21 | # Get inventory alerts 22 | inventory_alerts = db_manager.get_inventory_alerts() 23 | 24 | # Get all inventory items for the complete inventory view 25 | all_inventory_query = """ 26 | SELECT 27 | i.ItemID, 28 | i.Name as ItemName, 29 | i.Category as Category, 30 | i.Quantity as CurrentQuantity, 31 | i.ReorderLevel, 32 | i.LeadTime as LeadTimeInDays, 33 | s.Name as SupplierName, 34 | CASE 35 | WHEN i.Quantity < i.ReorderLevel THEN i.ReorderLevel - i.Quantity 36 | ELSE 0 37 | END as ShortageAmount, 38 | CASE 39 | WHEN i.Quantity < i.ReorderLevel * 0.5 THEN 'Critical' 40 | WHEN i.Quantity < i.ReorderLevel THEN 'Low' 41 | WHEN i.Quantity < i.ReorderLevel * 1.5 THEN 'Adequate' 42 | ELSE 'Well-Stocked' 43 | END as StockStatus 44 | FROM 45 | Inventory i 46 | LEFT JOIN 47 | Suppliers s ON i.SupplierID = s.SupplierID 48 | ORDER BY 49 | CASE 50 | WHEN i.Quantity < i.ReorderLevel THEN 1 51 | ELSE 2 52 | END, 53 | ShortageAmount DESC 54 | """ 55 | 56 | all_inventory_result = db_manager.execute_query(all_inventory_query) 57 | all_inventory = pd.DataFrame(all_inventory_result["rows"]) if all_inventory_result["success"] else pd.DataFrame() 58 | 59 | if not inventory_alerts.empty: 60 | # Summary metrics 61 | total_items_below = len(inventory_alerts) 62 | critical_items = len(inventory_alerts[inventory_alerts['ShortageAmount'] > inventory_alerts['ReorderLevel'] * 0.5]) 63 | 64 | # Display metrics 65 | metrics_cols = st.columns(3) 66 | metrics_cols[0].metric("Items Below Reorder", total_items_below) 67 | metrics_cols[1].metric("Critical Shortages", critical_items) 68 | 69 | # Calculate average lead time for items below reorder 70 | avg_lead_time = inventory_alerts['LeadTimeInDays'].mean() 71 | metrics_cols[2].metric("Avg Lead Time", f"{avg_lead_time:.1f} days") 72 | 73 | # Display critical shortage items 74 | st.subheader("Critical Shortage Items") 75 | 76 | # Sort by shortage amount 77 | critical_items_df = inventory_alerts.sort_values('ShortageAmount', ascending=False).head(10) 78 | 79 | fig = px.bar( 80 | critical_items_df, 81 | x='ItemName', 82 | y=['CurrentQuantity', 'ReorderLevel'], 83 | barmode='group', 84 | title='Inventory Levels vs. Reorder Points', 85 | labels={ 86 | 'value': 'Quantity', 87 | 'variable': 'Metric', 88 | 'ItemName': 'Item' 89 | }, 90 | color_discrete_map={ 91 | 'CurrentQuantity': 'red', 92 | 'ReorderLevel': 'blue' 93 | } 94 | ) 95 | 96 | # Force y-axis to use integers only 97 | fig.update_yaxes(dtick=10, tick0=0) 98 | 99 | # Add data labels on top of bars 100 | fig.update_traces(texttemplate='%{y}', textposition='outside') 101 | 102 | # Adjust layout for better readability 103 | fig.update_layout( 104 | legend=dict( 105 | orientation="h", 106 | yanchor="bottom", 107 | y=1.02, 108 | xanchor="right", 109 | x=1 110 | ) 111 | ) 112 | 113 | st.plotly_chart(fig, use_container_width=True) 114 | 115 | # Changed from "All Inventory Alerts" to "All Inventory Levels" 116 | with st.expander("All Inventory Levels", expanded=False): 117 | # Use the complete inventory data instead of just alerts 118 | if not all_inventory.empty: 119 | # Add color coding based on stock status 120 | def highlight_status(val): 121 | if val == 'Critical': 122 | return 'background-color: #ffcccc' 123 | elif val == 'Low': 124 | return 'background-color: #ffffcc' 125 | elif val == 'Adequate': 126 | return 'background-color: #ccffcc' 127 | else: # Well-Stocked 128 | return 'background-color: #ccffcc' 129 | 130 | # Apply the styling and display the dataframe 131 | styled_inventory = all_inventory.style.applymap( 132 | lambda _: '', subset=pd.IndexSlice[:, all_inventory.columns != 'StockStatus'] 133 | ).applymap( 134 | highlight_status, subset=['StockStatus'] 135 | ) 136 | 137 | st.dataframe(all_inventory) 138 | else: 139 | st.info("No inventory data available") 140 | 141 | st.subheader("Days of Supply Analysis") 142 | 143 | # Get consumption data to calculate days of supply 144 | consumption_query = """ 145 | SELECT 146 | i.ItemID, 147 | i.Name as ItemName, 148 | i.Category as ItemCategory, 149 | i.Quantity as CurrentQuantity, 150 | i.ReorderLevel, 151 | AVG(mc.ActualQuantity) as AvgDailyConsumption 152 | FROM 153 | Inventory i 154 | LEFT JOIN 155 | MaterialConsumption mc ON i.ItemID = mc.ItemID 156 | WHERE 157 | i.Quantity < i.ReorderLevel 158 | AND mc.ConsumptionDate >= date('now', '-30 day') 159 | GROUP BY 160 | i.ItemID, i.Name, i.Category, i.Quantity, i.ReorderLevel 161 | ORDER BY 162 | (i.Quantity / CASE WHEN AVG(mc.ActualQuantity) > 0 THEN AVG(mc.ActualQuantity) ELSE 999999 END) ASC 163 | LIMIT 10 164 | """ 165 | 166 | result = db_manager.execute_query(consumption_query) 167 | 168 | if result["success"] and result["row_count"] > 0: 169 | consumption_df = pd.DataFrame(result["rows"]) 170 | 171 | # Calculate days of supply 172 | consumption_df['DaysOfSupply'] = np.where( 173 | consumption_df['AvgDailyConsumption'] > 0, 174 | consumption_df['CurrentQuantity'] / consumption_df['AvgDailyConsumption'], 175 | float('inf') # Infinite days if no consumption 176 | ) 177 | 178 | # Replace infinite values with a large number for display purposes 179 | consumption_df['DaysOfSupply'] = consumption_df['DaysOfSupply'].replace(float('inf'), 90) 180 | 181 | # Create days of supply visualization 182 | fig = go.Figure() 183 | 184 | # Add horizontal bars for days of supply 185 | fig.add_trace(go.Bar( 186 | y=consumption_df['ItemName'], 187 | x=consumption_df['DaysOfSupply'].clip(upper=90), # Clip at 90 days for better visualization 188 | orientation='h', 189 | name='Days of Supply', 190 | marker_color=consumption_df['DaysOfSupply'].apply(lambda x: 191 | 'red' if x < 5 else 192 | 'orange' if x < 10 else 193 | 'green' 194 | ) 195 | )) 196 | 197 | # Add vertical lines for reference 198 | fig.add_shape( 199 | type="line", 200 | x0=5, y0=-0.5, 201 | x1=5, y1=len(consumption_df) - 0.5, 202 | line=dict(color="red", width=2, dash="dash") 203 | ) 204 | 205 | fig.add_shape( 206 | type="line", 207 | x0=10, y0=-0.5, 208 | x1=10, y1=len(consumption_df) - 0.5, 209 | line=dict(color="orange", width=2, dash="dash") 210 | ) 211 | 212 | # Add annotations 213 | fig.add_annotation( 214 | x=5, y=len(consumption_df), 215 | text="Critical (5 days)", 216 | showarrow=False, 217 | yshift=10, 218 | font=dict(color="red") 219 | ) 220 | 221 | fig.add_annotation( 222 | x=10, y=len(consumption_df), 223 | text="Warning (10 days)", 224 | showarrow=False, 225 | yshift=10, 226 | font=dict(color="orange") 227 | ) 228 | 229 | # Update layout 230 | fig.update_layout( 231 | title='Days of Supply for Critical Items', 232 | xaxis_title='Days of Supply', 233 | yaxis_title='Item', 234 | height=400, 235 | margin=dict(l=20, r=20, t=50, b=20), 236 | xaxis=dict(range=[0, 30]) # 30 days 237 | ) 238 | 239 | st.plotly_chart(fig, use_container_width=True) 240 | 241 | # Display details for these critical items 242 | with st.expander("Critical Items Details", expanded=False): 243 | # Add formatted display of critical items 244 | for i, row in consumption_df.iterrows(): 245 | days = row['DaysOfSupply'] 246 | 247 | if days < float('inf'): 248 | if days < 5: 249 | urgency = "🔴 Critical" 250 | color = "red" 251 | elif days < 10: 252 | urgency = "🟠 Warning" 253 | color = "orange" 254 | else: 255 | urgency = "🟢 Adequate" 256 | color = "green" 257 | 258 | st.markdown(f""" 259 | **{row['ItemName']}** ({row['ItemCategory']}) - {urgency} 260 | Current Quantity: {int(row['CurrentQuantity']):,} | Daily Usage: {row['AvgDailyConsumption']:.1f} | Days Remaining: {days:.1f} 261 | """, unsafe_allow_html=True) 262 | 263 | # Add progress bar to visualize days of supply 264 | if days < 30: 265 | st.progress(min(days / 30, 1.0)) 266 | else: 267 | st.progress(1.0) 268 | 269 | st.markdown("---") 270 | else: 271 | st.markdown(f""" 272 | **{row['ItemName']}** ({row['ItemCategory']}) - 🟢 No Recent Usage 273 | Current Quantity: {int(row['CurrentQuantity']):,} | No consumption in last 30 days 274 | """) 275 | st.progress(1.0) 276 | st.markdown("---") 277 | else: 278 | st.info("No consumption data available to calculate days of supply") 279 | else: 280 | st.success("No inventory items are below reorder level") -------------------------------------------------------------------------------- /app_factory/production_meeting/dashboards/productivity.py: -------------------------------------------------------------------------------- 1 | """ 2 | Productivity / personnel dashboard functionality 3 | """ 4 | 5 | import streamlit as st 6 | import pandas as pd 7 | import plotly.express as px 8 | 9 | from shared.database import DatabaseManager 10 | 11 | 12 | # Initialize database manager 13 | db_manager = DatabaseManager() 14 | 15 | def productivity_dashboard(): 16 | """Display employee productivity dashboard""" 17 | st.header("👥 Productivity Dashboard") 18 | 19 | # Get employee productivity data 20 | productivity_query = """ 21 | SELECT 22 | e.Name as EmployeeName, 23 | e.Role as EmployeeRole, 24 | s.Name as ShiftName, 25 | COUNT(DISTINCT wo.OrderID) as CompletedOrders, 26 | SUM(wo.ActualProduction) as TotalProduction, 27 | ROUND(AVG(julianday(wo.ActualEndTime) - julianday(wo.ActualStartTime)) * 24, 2) as AvgOrderHours, 28 | MAX(wo.ActualEndTime) as LastCompletedOrder 29 | FROM 30 | Employees e 31 | JOIN 32 | WorkOrders wo ON e.EmployeeID = wo.EmployeeID 33 | JOIN 34 | Shifts s ON e.ShiftID = s.ShiftID 35 | WHERE 36 | wo.Status = 'completed' 37 | AND wo.ActualEndTime >= date('now', '-30 day') 38 | GROUP BY 39 | e.EmployeeID 40 | ORDER BY 41 | CompletedOrders DESC 42 | LIMIT 10 43 | """ 44 | 45 | result = db_manager.execute_query(productivity_query) 46 | if result["success"] and result["row_count"] > 0: 47 | productivity_df = pd.DataFrame(result["rows"]) 48 | 49 | # Create bar chart of completed orders by employee 50 | st.subheader("Top Employees by Completed Orders (Last 30 Days)") 51 | 52 | fig = px.bar( 53 | productivity_df, 54 | x='EmployeeName', 55 | y='CompletedOrders', 56 | color='EmployeeRole', 57 | title='Completed Orders by Employee', 58 | labels={ 59 | 'CompletedOrders': 'Number of Orders', 60 | 'EmployeeName': 'Employee', 61 | 'EmployeeRole': 'Role' 62 | } 63 | ) 64 | st.plotly_chart(fig, use_container_width=True) 65 | 66 | # Employee productivity metrics 67 | col1, col2 = st.columns(2) 68 | 69 | with col1: 70 | # Production by role 71 | role_data = productivity_df.groupby('EmployeeRole').agg({ 72 | 'CompletedOrders': 'sum', 73 | 'TotalProduction': 'sum' 74 | }).reset_index() 75 | 76 | fig = px.pie( 77 | role_data, 78 | values='TotalProduction', 79 | names='EmployeeRole', 80 | title='Production by Employee Role', 81 | hover_data=['CompletedOrders'] 82 | ) 83 | st.plotly_chart(fig, use_container_width=True) 84 | 85 | with col2: 86 | # Production by shift 87 | shift_data = productivity_df.groupby('ShiftName').agg({ 88 | 'CompletedOrders': 'sum', 89 | 'TotalProduction': 'sum', 90 | 'AvgOrderHours': 'mean' 91 | }).reset_index() 92 | 93 | fig = px.bar( 94 | shift_data, 95 | x='ShiftName', 96 | y='TotalProduction', 97 | color='AvgOrderHours', 98 | title='Production by Shift', 99 | labels={ 100 | 'TotalProduction': 'Total Units', 101 | 'ShiftName': 'Shift', 102 | 'AvgOrderHours': 'Avg Hours per Order' 103 | }, 104 | color_continuous_scale='Blues' 105 | ) 106 | st.plotly_chart(fig, use_container_width=True) 107 | 108 | # Detailed employee data 109 | with st.expander("Employee Productivity Details", expanded=False): 110 | st.dataframe(productivity_df) 111 | else: 112 | st.info("No productivity data available for the last 30 days") -------------------------------------------------------------------------------- /app_factory/production_meeting/dashboards/root_cause.py: -------------------------------------------------------------------------------- 1 | """ 2 | RCA dashboard functionality 3 | """ 4 | 5 | import streamlit as st 6 | import pandas as pd 7 | import plotly.express as px 8 | 9 | from shared.database import DatabaseManager 10 | 11 | # Initialize database manager 12 | db_manager = DatabaseManager() 13 | 14 | def add_root_cause_analysis(): 15 | """Add root cause analysis based on actual defect data from the database""" 16 | st.header("🔍 Root Cause Analysis") 17 | 18 | # Get defect types from database for selection 19 | defect_query = """ 20 | SELECT 21 | d.DefectType, 22 | COUNT(d.DefectID) as DefectCount 23 | FROM 24 | Defects d 25 | JOIN 26 | QualityControl qc ON d.CheckID = qc.CheckID 27 | WHERE 28 | qc.Date >= date('now', '-30 day') 29 | GROUP BY 30 | d.DefectType 31 | ORDER BY 32 | DefectCount DESC 33 | LIMIT 15 34 | """ 35 | 36 | result = db_manager.execute_query(defect_query) 37 | 38 | if result["success"] and result["row_count"] > 0: 39 | defect_df = pd.DataFrame(result["rows"]) 40 | 41 | # Let user select defect type to analyze 42 | selected_defect = st.selectbox( 43 | "Select defect type to analyze:", 44 | options=defect_df['DefectType'].tolist(), 45 | format_func=lambda x: f"{x} ({defect_df[defect_df['DefectType']==x]['DefectCount'].values[0]} occurrences)" 46 | ) 47 | 48 | if st.button("Run Root Cause Analysis"): 49 | with st.spinner("Analyzing patterns..."): 50 | # Get detailed data on the selected defect type 51 | detail_query = f""" 52 | SELECT 53 | d.DefectType, 54 | d.Severity, 55 | d.Location, 56 | d.RootCause, 57 | d.ActionTaken, 58 | p.Name as ProductName, 59 | p.Category as ProductCategory, 60 | wc.Name as WorkCenterName, 61 | m.Name as MachineName, 62 | m.Type as MachineType, 63 | e.Name as EmployeeName, 64 | e.Role as EmployeeRole 65 | FROM 66 | Defects d 67 | JOIN 68 | QualityControl qc ON d.CheckID = qc.CheckID 69 | JOIN 70 | WorkOrders wo ON qc.OrderID = wo.OrderID 71 | JOIN 72 | Products p ON wo.ProductID = p.ProductID 73 | JOIN 74 | WorkCenters wc ON wo.WorkCenterID = wc.WorkCenterID 75 | JOIN 76 | Machines m ON wo.MachineID = m.MachineID 77 | JOIN 78 | Employees e ON wo.EmployeeID = e.EmployeeID 79 | WHERE 80 | d.DefectType = '{selected_defect}' 81 | AND qc.Date >= date('now', '-30 day') 82 | """ 83 | 84 | detail_result = db_manager.execute_query(detail_query) 85 | 86 | if detail_result["success"] and detail_result["row_count"] > 0: 87 | detail_df = pd.DataFrame(detail_result["rows"]) 88 | 89 | # Analyze patterns in the data 90 | 91 | # Product distribution 92 | product_counts = detail_df['ProductName'].value_counts().reset_index() 93 | product_counts.columns = ['ProductName', 'Count'] 94 | 95 | # Machine distribution 96 | machine_counts = detail_df['MachineName'].value_counts().reset_index() 97 | machine_counts.columns = ['MachineName', 'Count'] 98 | 99 | # Root cause distribution 100 | cause_counts = detail_df['RootCause'].value_counts().reset_index() 101 | cause_counts.columns = ['RootCause', 'Count'] 102 | 103 | # Location distribution 104 | location_counts = detail_df['Location'].value_counts().reset_index() 105 | location_counts.columns = ['Location', 'Count'] 106 | 107 | # Display analysis results 108 | st.write(f"### Root Cause Analysis: {selected_defect}") 109 | 110 | # Key metrics 111 | st.write(f"**Total occurrences**: {len(detail_df)}") 112 | st.write(f"**Average severity**: {detail_df['Severity'].mean():.1f} / 5") 113 | 114 | # Create columns for distribution charts 115 | col1, col2 = st.columns(2) 116 | 117 | with col1: 118 | # Product distribution 119 | fig1 = px.bar( 120 | product_counts.head(5), 121 | x='ProductName', 122 | y='Count', 123 | title='Top Products with this Defect' 124 | ) 125 | st.plotly_chart(fig1, use_container_width=True) 126 | 127 | # Root cause distribution 128 | fig3 = px.pie( 129 | cause_counts, 130 | values='Count', 131 | names='RootCause', 132 | title='Root Causes' 133 | ) 134 | st.plotly_chart(fig3, use_container_width=True) 135 | 136 | with col2: 137 | # Machine distribution 138 | fig2 = px.bar( 139 | machine_counts.head(5), 140 | x='MachineName', 141 | y='Count', 142 | title='Top Machines with this Defect' 143 | ) 144 | st.plotly_chart(fig2, use_container_width=True) 145 | 146 | # Location distribution 147 | fig4 = px.pie( 148 | location_counts, 149 | values='Count', 150 | names='Location', 151 | title='Defect Locations' 152 | ) 153 | st.plotly_chart(fig4, use_container_width=True) 154 | 155 | # Identify correlations 156 | st.write("### Key Findings") 157 | 158 | # Report primary product affected 159 | if not product_counts.empty: 160 | primary_product = product_counts.iloc[0]['ProductName'] 161 | product_percent = product_counts.iloc[0]['Count'] / product_counts['Count'].sum() * 100 162 | st.info(f"**Primary Product**: {primary_product} accounts for {product_percent:.1f}% of these defects") 163 | 164 | # Report primary machine affected 165 | if not machine_counts.empty: 166 | primary_machine = machine_counts.iloc[0]['MachineName'] 167 | machine_percent = machine_counts.iloc[0]['Count'] / machine_counts['Count'].sum() * 100 168 | st.info(f"**Primary Machine**: {primary_machine} accounts for {machine_percent:.1f}% of these defects") 169 | 170 | # Report primary root cause 171 | if not cause_counts.empty: 172 | primary_cause = cause_counts.iloc[0]['RootCause'] 173 | cause_percent = cause_counts.iloc[0]['Count'] / cause_counts['Count'].sum() * 100 174 | st.info(f"**Primary Root Cause**: {primary_cause} accounts for {cause_percent:.1f}% of these defects") 175 | 176 | # Get actions taken 177 | actions = detail_df['ActionTaken'].value_counts().reset_index() 178 | actions.columns = ['Action', 'Count'] 179 | 180 | st.write("### Recommended Actions") 181 | 182 | # Recommend actions based on data patterns 183 | if not actions.empty: 184 | st.write("**Based on effective actions taken so far:**") 185 | 186 | for i, row in actions.head(3).iterrows(): 187 | effectiveness = row['Count'] / actions['Count'].sum() * 100 188 | st.write(f"- **{row['Action']}** (Used in {effectiveness:.1f}% of cases)") 189 | 190 | # Check for machine maintenance correlation 191 | maintenance_query = f""" 192 | SELECT 193 | julianday(m.LastMaintenanceDate) - julianday(qc.Date) as DaysSinceMaintenance 194 | FROM 195 | Defects d 196 | JOIN 197 | QualityControl qc ON d.CheckID = qc.CheckID 198 | JOIN 199 | WorkOrders wo ON qc.OrderID = wo.OrderID 200 | JOIN 201 | Machines m ON wo.MachineID = m.MachineID 202 | WHERE 203 | d.DefectType = '{selected_defect}' 204 | AND qc.Date >= date('now', '-30 day') 205 | """ 206 | 207 | maintenance_result = db_manager.execute_query(maintenance_query) 208 | 209 | if maintenance_result["success"] and maintenance_result["row_count"] > 0: 210 | maintenance_df = pd.DataFrame(maintenance_result["rows"]) 211 | 212 | avg_days = maintenance_df['DaysSinceMaintenance'].mean() 213 | 214 | if avg_days > 14: # If average is more than 2 weeks 215 | st.warning(f"**Maintenance Correlation**: Defects occur on average {avg_days:.1f} days after maintenance. Consider reviewing maintenance frequency.") 216 | else: 217 | st.error("Error retrieving defect details") 218 | else: 219 | st.info("No defect data available for analysis") 220 | -------------------------------------------------------------------------------- /app_factory/production_meeting/dashboards/weekly.py: -------------------------------------------------------------------------------- 1 | """ 2 | Weekly report dashboard functionality 3 | """ 4 | 5 | import streamlit as st 6 | import pandas as pd 7 | import plotly.express as px 8 | from datetime import datetime, timedelta 9 | 10 | from shared.database import DatabaseManager 11 | 12 | # Initialize database manager 13 | db_manager = DatabaseManager() 14 | 15 | def weekly_overview_dashboard(): 16 | """Display weekly overview dashboard""" 17 | st.header("📅 Weekly Performance Overview") 18 | 19 | # Date range selector 20 | col1, col2 = st.columns(2) 21 | with col1: 22 | end_date = st.date_input( 23 | "End Date", 24 | value=datetime.now().date(), 25 | key="weekly_end_date" 26 | ) 27 | with col2: 28 | # Calculate start date (7 days before end date) 29 | start_date = end_date - timedelta(days=6) 30 | st.write(f"Date Range: {start_date.strftime('%Y-%m-%d')} to {end_date.strftime('%Y-%m-%d')}") 31 | 32 | # Convert to strings for SQL 33 | end_date_str = end_date.strftime('%Y-%m-%d') 34 | start_date_str = start_date.strftime('%Y-%m-%d') 35 | 36 | # Weekly production data 37 | weekly_production_query = f""" 38 | SELECT 39 | date(wo.ActualEndTime) as ProductionDate, 40 | COUNT(wo.OrderID) as CompletedOrders, 41 | SUM(wo.Quantity) as PlannedQuantity, 42 | SUM(wo.ActualProduction) as ActualProduction, 43 | SUM(wo.Scrap) as ScrapQuantity, 44 | ROUND(SUM(wo.ActualProduction) * 100.0 / SUM(wo.Quantity), 2) as CompletionPercentage 45 | FROM 46 | WorkOrders wo 47 | WHERE 48 | wo.Status = 'completed' 49 | AND wo.ActualEndTime BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 50 | GROUP BY 51 | date(wo.ActualEndTime) 52 | ORDER BY 53 | ProductionDate 54 | """ 55 | 56 | result = db_manager.execute_query(weekly_production_query) 57 | if result["success"] and result["row_count"] > 0: 58 | weekly_production = pd.DataFrame(result["rows"]) 59 | 60 | # Production trend chart 61 | st.subheader("Daily Production Trend") 62 | 63 | fig = px.line( 64 | weekly_production, 65 | x='ProductionDate', 66 | y=['PlannedQuantity', 'ActualProduction'], 67 | title='Daily Production (Planned vs Actual)', 68 | labels={ 69 | 'value': 'Units', 70 | 'variable': 'Metric', 71 | 'ProductionDate': 'Date' 72 | }, 73 | markers=True 74 | ) 75 | st.plotly_chart(fig, use_container_width=True) 76 | 77 | # Weekly OEE data 78 | weekly_oee_query = f""" 79 | SELECT 80 | date(m.Date) as MeasurementDate, 81 | AVG(m.Availability) * 100 as AvgAvailability, 82 | AVG(m.Performance) * 100 as AvgPerformance, 83 | AVG(m.Quality) * 100 as AvgQuality, 84 | AVG(m.OEE) * 100 as AvgOEE 85 | FROM 86 | OEEMetrics m 87 | WHERE 88 | m.Date BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 89 | GROUP BY 90 | date(m.Date) 91 | ORDER BY 92 | MeasurementDate 93 | """ 94 | 95 | result = db_manager.execute_query(weekly_oee_query) 96 | if result["success"] and result["row_count"] > 0: 97 | weekly_oee = pd.DataFrame(result["rows"]) 98 | 99 | # OEE trend chart 100 | st.subheader("Daily OEE Metrics") 101 | 102 | fig = px.line( 103 | weekly_oee, 104 | x='MeasurementDate', 105 | y=['AvgAvailability', 'AvgPerformance', 'AvgQuality', 'AvgOEE'], 106 | title='Daily OEE Components', 107 | labels={ 108 | 'value': 'Percentage (%)', 109 | 'variable': 'Metric', 110 | 'MeasurementDate': 'Date' 111 | }, 112 | markers=True 113 | ) 114 | 115 | # Add target line at 85% 116 | fig.add_shape( 117 | type="line", 118 | x0=weekly_oee['MeasurementDate'].min(), 119 | y0=85, 120 | x1=weekly_oee['MeasurementDate'].max(), 121 | y1=85, 122 | line=dict(color="red", width=2, dash="dash"), 123 | ) 124 | 125 | st.plotly_chart(fig, use_container_width=True) 126 | else: 127 | st.info("No OEE data available for the selected period") 128 | 129 | # Weekly quality data 130 | weekly_quality_query = f""" 131 | SELECT 132 | date(qc.Date) as InspectionDate, 133 | COUNT(qc.CheckID) as InspectionCount, 134 | ROUND(AVG(qc.DefectRate) * 100, 2) as AvgDefectRate, 135 | ROUND(AVG(qc.YieldRate) * 100, 2) as AvgYieldRate, 136 | SUM(CASE WHEN qc.Result = 'pass' THEN 1 ELSE 0 END) as PassCount, 137 | SUM(CASE WHEN qc.Result = 'fail' THEN 1 ELSE 0 END) as FailCount 138 | FROM 139 | QualityControl qc 140 | WHERE 141 | qc.Date BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 142 | GROUP BY 143 | date(qc.Date) 144 | ORDER BY 145 | InspectionDate 146 | """ 147 | 148 | result = db_manager.execute_query(weekly_quality_query) 149 | if result["success"] and result["row_count"] > 0: 150 | weekly_quality = pd.DataFrame(result["rows"]) 151 | 152 | # Quality trend chart 153 | st.subheader("Daily Quality Metrics") 154 | 155 | fig = px.line( 156 | weekly_quality, 157 | x='InspectionDate', 158 | y=['AvgDefectRate', 'AvgYieldRate'], 159 | title='Daily Quality Metrics', 160 | labels={ 161 | 'value': 'Percentage (%)', 162 | 'variable': 'Metric', 163 | 'InspectionDate': 'Date' 164 | }, 165 | markers=True 166 | ) 167 | st.plotly_chart(fig, use_container_width=True) 168 | else: 169 | st.info("No quality data available for the selected period") 170 | 171 | # Weekly summary 172 | st.subheader("Weekly Performance Summary") 173 | 174 | # Calculate weekly totals 175 | total_planned = weekly_production['PlannedQuantity'].sum() 176 | total_actual = weekly_production['ActualProduction'].sum() 177 | total_scrap = weekly_production['ScrapQuantity'].sum() 178 | avg_completion = weekly_production['CompletionPercentage'].mean() 179 | 180 | # Create summary metrics 181 | col1, col2, col3, col4 = st.columns(4) 182 | col1.metric("Total Planned", f"{int(total_planned):,}") 183 | col2.metric("Total Produced", f"{int(total_actual):,}") 184 | col3.metric("Total Scrap", f"{int(total_scrap):,}") 185 | col4.metric("Avg Completion", f"{avg_completion:.1f}%") 186 | 187 | # Calculate OEE summary if available 188 | if 'weekly_oee' in locals(): 189 | avg_oee = weekly_oee['AvgOEE'].mean() 190 | st.metric("Average OEE", f"{avg_oee:.1f}%") 191 | else: 192 | st.info("No production data available for the selected period") 193 | -------------------------------------------------------------------------------- /app_factory/production_meeting/report.py: -------------------------------------------------------------------------------- 1 | """ 2 | Meeting report generation for production meetings 3 | """ 4 | 5 | import streamlit as st 6 | import pandas as pd 7 | import json 8 | import os 9 | from datetime import datetime, timedelta 10 | from pathlib import Path 11 | 12 | from shared.database import DatabaseManager 13 | 14 | # Initialize database manager 15 | db_manager = DatabaseManager() 16 | 17 | class ReportGenerator: 18 | """Generator for meeting reports and summaries""" 19 | 20 | def __init__(self): 21 | """Initialize the report generator""" 22 | # Create the reports directory if it doesn't exist 23 | self.reports_dir = Path("reports") 24 | self.reports_dir.mkdir(exist_ok=True, parents=True) 25 | 26 | def save_meeting_report(self, meeting_data): 27 | """ 28 | Save meeting data to a JSON file 29 | 30 | Args: 31 | meeting_data (dict): Meeting data including date, notes, attendees, etc. 32 | 33 | Returns: 34 | str: Path to the saved report file 35 | """ 36 | # Generate filename based on meeting date 37 | filename = f"production_meeting_{meeting_data['date']}.json" 38 | filepath = self.reports_dir / filename 39 | 40 | try: 41 | # Save meeting data to file 42 | with open(filepath, 'w') as f: 43 | json.dump(meeting_data, f, indent=2) 44 | 45 | return str(filepath) 46 | except Exception as e: 47 | print(f"Error saving meeting report: {e}") 48 | return None 49 | 50 | def load_meeting_report(self, meeting_date): 51 | """ 52 | Load a meeting report from file 53 | 54 | Args: 55 | meeting_date (str): Meeting date in YYYY-MM-DD format 56 | 57 | Returns: 58 | dict: Meeting data or None if not found 59 | """ 60 | filename = f"production_meeting_{meeting_date}.json" 61 | filepath = self.reports_dir / filename 62 | 63 | if not filepath.exists(): 64 | return None 65 | 66 | try: 67 | with open(filepath, 'r') as f: 68 | return json.load(f) 69 | except Exception as e: 70 | print(f"Error loading meeting report: {e}") 71 | return None 72 | 73 | def list_available_reports(self): 74 | """ 75 | Get a list of all available meeting reports 76 | 77 | Returns: 78 | list: List of meeting dates with available reports 79 | """ 80 | reports = [] 81 | 82 | for file in self.reports_dir.glob("production_meeting_*.json"): 83 | try: 84 | # Extract date from filename 85 | date_str = file.stem.replace("production_meeting_", "") 86 | reports.append(date_str) 87 | except: 88 | pass 89 | 90 | # Sort by date (newest first) 91 | reports.sort(reverse=True) 92 | 93 | return reports 94 | 95 | def generate_meeting_summary(self, meeting_date, meeting_data, include_data=True): 96 | """ 97 | Generate a markdown summary of the meeting 98 | 99 | Args: 100 | meeting_date (str): Meeting date in YYYY-MM-DD format 101 | meeting_data (dict): Meeting data including notes, attendees, etc. 102 | include_data (bool): Whether to include production data in the summary 103 | 104 | Returns: 105 | str: Markdown formatted meeting summary 106 | """ 107 | # Start with header 108 | summary = f""" 109 | # Production Meeting Summary - {meeting_date} 110 | 111 | **Status:** {meeting_data.get('meeting_status', 'Unknown')} 112 | **Attendees:** {meeting_data.get('attendees', 'Not recorded')} 113 | 114 | """ 115 | 116 | # Add production data if requested 117 | if include_data: 118 | # Yesterday's production data 119 | yesterday_data = db_manager.get_daily_production_summary(days_back=1) 120 | 121 | if not yesterday_data.empty: 122 | total_planned = yesterday_data['PlannedQuantity'].sum() 123 | total_actual = yesterday_data['ActualProduction'].sum() 124 | completion_rate = (total_actual / total_planned * 100) if total_planned > 0 else 0 125 | 126 | summary += f""" 127 | ## Production Performance 128 | - Yesterday's completion rate: {completion_rate:.1f}% ({total_actual} of {total_planned} units) 129 | """ 130 | 131 | # Machine status 132 | machine_status = db_manager.get_machine_status_summary() 133 | 134 | if not machine_status.empty: 135 | total_machines = machine_status['TotalMachines'].sum() 136 | running_machines = machine_status['Running'].sum() 137 | availability = running_machines / total_machines * 100 if total_machines > 0 else 0 138 | machines_in_maintenance = machine_status['Maintenance'].sum() 139 | 140 | summary += f""" 141 | - Current machine availability: {availability:.1f}% ({running_machines} of {total_machines} machines running) 142 | - {machines_in_maintenance} machines currently in maintenance 143 | """ 144 | 145 | # Quality data 146 | quality_data = db_manager.get_quality_summary(days_back=1) 147 | 148 | if not quality_data.empty: 149 | avg_defect_rate = quality_data['AvgDefectRate'].mean() 150 | avg_yield_rate = quality_data['AvgYieldRate'].mean() 151 | 152 | summary += f""" 153 | - Quality yield rate: {avg_yield_rate:.1f}% (defect rate: {avg_defect_rate:.1f}%) 154 | """ 155 | 156 | # Inventory alerts 157 | inventory_alerts = db_manager.get_inventory_alerts() 158 | inventory_alert_count = len(inventory_alerts) if not inventory_alerts.empty else 0 159 | 160 | summary += f""" 161 | - {inventory_alert_count} inventory items below reorder level 162 | """ 163 | 164 | # Add action items 165 | if meeting_data.get('action_items'): 166 | summary += """ 167 | ## Action Items 168 | """ 169 | 170 | for item in meeting_data['action_items']: 171 | summary += f""" 172 | - {item['description']} (Owner: {item['owner']}, Due: {item['due_date']}, Status: {item['status']}) 173 | """ 174 | 175 | # Add notes 176 | if meeting_data.get('notes'): 177 | summary += f""" 178 | ## Notes 179 | {meeting_data['notes']} 180 | """ 181 | 182 | return summary.replace(" ", "") # Remove leading spaces from the heredoc 183 | 184 | def generate_weekly_summary(self, end_date=None): 185 | """ 186 | Generate a weekly summary report 187 | 188 | Args: 189 | end_date (str): End date in YYYY-MM-DD format (defaults to today) 190 | 191 | Returns: 192 | str: Markdown formatted weekly summary 193 | """ 194 | if end_date is None: 195 | end_date = datetime.now().date() 196 | elif isinstance(end_date, str): 197 | end_date = datetime.strptime(end_date, "%Y-%m-%d").date() 198 | 199 | # Calculate start date (7 days before end date) 200 | start_date = end_date - timedelta(days=6) 201 | 202 | # Convert to strings for SQL 203 | end_date_str = end_date.strftime('%Y-%m-%d') 204 | start_date_str = start_date.strftime('%Y-%m-%d') 205 | 206 | # Header 207 | summary = f""" 208 | # Weekly Production Summary 209 | **Period:** {start_date_str} to {end_date_str} 210 | 211 | """ 212 | 213 | # Weekly production data 214 | weekly_production_query = f""" 215 | SELECT 216 | date(wo.ActualEndTime) as ProductionDate, 217 | COUNT(wo.OrderID) as CompletedOrders, 218 | SUM(wo.Quantity) as PlannedQuantity, 219 | SUM(wo.ActualProduction) as ActualProduction, 220 | SUM(wo.Scrap) as ScrapQuantity, 221 | ROUND(SUM(wo.ActualProduction) * 100.0 / SUM(wo.Quantity), 2) as CompletionPercentage 222 | FROM 223 | WorkOrders wo 224 | WHERE 225 | wo.Status = 'completed' 226 | AND wo.ActualEndTime BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 227 | GROUP BY 228 | date(wo.ActualEndTime) 229 | ORDER BY 230 | ProductionDate 231 | """ 232 | 233 | result = db_manager.execute_query(weekly_production_query) 234 | if result["success"] and result["row_count"] > 0: 235 | weekly_production = pd.DataFrame(result["rows"]) 236 | 237 | # Calculate weekly totals 238 | total_planned = weekly_production['PlannedQuantity'].sum() 239 | total_actual = weekly_production['ActualProduction'].sum() 240 | total_scrap = weekly_production['ScrapQuantity'].sum() 241 | avg_completion = weekly_production['CompletionPercentage'].mean() 242 | 243 | summary += f""" 244 | ## Production Summary 245 | - **Total Planned Production:** {int(total_planned):,} units 246 | - **Total Actual Production:** {int(total_actual):,} units 247 | - **Average Completion Rate:** {avg_completion:.1f}% 248 | - **Total Scrap:** {int(total_scrap):,} units ({(total_scrap/total_planned*100 if total_planned > 0 else 0):.1f}% of planned) 249 | 250 | ### Daily Production Trend 251 | """ 252 | 253 | # Add daily data 254 | summary += "| Date | Planned | Actual | Completion % |\n" 255 | summary += "|------|---------|--------|-------------|\n" 256 | 257 | for _, row in weekly_production.iterrows(): 258 | summary += f"| {row['ProductionDate']} | {int(row['PlannedQuantity']):,} | {int(row['ActualProduction']):,} | {row['CompletionPercentage']}% |\n" 259 | else: 260 | summary += "No production data available for this period.\n" 261 | 262 | # Quality data 263 | weekly_quality_query = f""" 264 | SELECT 265 | p.Category as ProductCategory, 266 | COUNT(qc.CheckID) as InspectionCount, 267 | ROUND(AVG(qc.DefectRate) * 100, 2) as AvgDefectRate, 268 | ROUND(AVG(qc.ReworkRate) * 100, 2) as AvgReworkRate, 269 | ROUND(AVG(qc.YieldRate) * 100, 2) as AvgYieldRate, 270 | SUM(CASE WHEN qc.Result = 'pass' THEN 1 ELSE 0 END) as PassCount, 271 | SUM(CASE WHEN qc.Result = 'fail' THEN 1 ELSE 0 END) as FailCount, 272 | SUM(CASE WHEN qc.Result = 'rework' THEN 1 ELSE 0 END) as ReworkCount 273 | FROM 274 | QualityControl qc 275 | JOIN 276 | WorkOrders wo ON qc.OrderID = wo.OrderID 277 | JOIN 278 | Products p ON wo.ProductID = p.ProductID 279 | WHERE 280 | qc.Date BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 281 | GROUP BY 282 | p.Category 283 | ORDER BY 284 | InspectionCount DESC 285 | """ 286 | 287 | result = db_manager.execute_query(weekly_quality_query) 288 | if result["success"] and result["row_count"] > 0: 289 | weekly_quality = pd.DataFrame(result["rows"]) 290 | 291 | summary += f""" 292 | ## Quality Summary 293 | """ 294 | 295 | # Add quality data by product category 296 | summary += "| Product Category | Inspections | Pass Rate | Defect Rate | Rework Rate |\n" 297 | summary += "|-----------------|------------|-----------|-------------|-------------|\n" 298 | 299 | for _, row in weekly_quality.iterrows(): 300 | pass_rate = row['PassCount'] / row['InspectionCount'] * 100 if row['InspectionCount'] > 0 else 0 301 | summary += f"| {row['ProductCategory']} | {int(row['InspectionCount']):,} | {pass_rate:.1f}% | {row['AvgDefectRate']}% | {row['AvgReworkRate']}% |\n" 302 | 303 | # Get top defects 304 | top_defects_query = f""" 305 | SELECT 306 | d.DefectType, 307 | COUNT(d.DefectID) as DefectCount, 308 | AVG(d.Severity) as AvgSeverity, 309 | p.Category as ProductCategory 310 | FROM 311 | Defects d 312 | JOIN 313 | QualityControl qc ON d.CheckID = qc.CheckID 314 | JOIN 315 | WorkOrders wo ON qc.OrderID = wo.OrderID 316 | JOIN 317 | Products p ON wo.ProductID = p.ProductID 318 | WHERE 319 | qc.Date BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 320 | GROUP BY 321 | d.DefectType 322 | ORDER BY 323 | DefectCount DESC 324 | LIMIT 5 325 | """ 326 | 327 | result = db_manager.execute_query(top_defects_query) 328 | if result["success"] and result["row_count"] > 0: 329 | top_defects = pd.DataFrame(result["rows"]) 330 | 331 | summary += f""" 332 | ### Top 5 Defect Types 333 | """ 334 | 335 | for _, row in top_defects.iterrows(): 336 | summary += f"- **{row['DefectType']}** ({row['DefectCount']} occurrences, Avg Severity: {row['AvgSeverity']:.1f}/5) in {row['ProductCategory']}\n" 337 | 338 | # Equipment performance 339 | weekly_oee_query = f""" 340 | SELECT 341 | m.Type as MachineType, 342 | AVG(oee.Availability) * 100 as AvgAvailability, 343 | AVG(oee.Performance) * 100 as AvgPerformance, 344 | AVG(oee.Quality) * 100 as AvgQuality, 345 | AVG(oee.OEE) * 100 as AvgOEE, 346 | COUNT(DISTINCT m.MachineID) as MachineCount 347 | FROM 348 | OEEMetrics oee 349 | JOIN 350 | Machines m ON oee.MachineID = m.MachineID 351 | WHERE 352 | oee.Date BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 353 | GROUP BY 354 | m.Type 355 | ORDER BY 356 | AvgOEE DESC 357 | """ 358 | 359 | result = db_manager.execute_query(weekly_oee_query) 360 | if result["success"] and result["row_count"] > 0: 361 | weekly_oee = pd.DataFrame(result["rows"]) 362 | 363 | summary += f""" 364 | ## Equipment Performance 365 | """ 366 | 367 | # Add OEE data by machine type 368 | summary += "| Machine Type | # Machines | Availability | Performance | Quality | OEE |\n" 369 | summary += "|-------------|------------|--------------|-------------|---------|-----|\n" 370 | 371 | for _, row in weekly_oee.iterrows(): 372 | summary += f"| {row['MachineType']} | {int(row['MachineCount']):,} | {row['AvgAvailability']:.1f}% | {row['AvgPerformance']:.1f}% | {row['AvgQuality']:.1f}% | {row['AvgOEE']:.1f}% |\n" 373 | 374 | # Overall OEE 375 | overall_availability = weekly_oee['AvgAvailability'].mean() 376 | overall_performance = weekly_oee['AvgPerformance'].mean() 377 | overall_quality = weekly_oee['AvgQuality'].mean() 378 | overall_oee = overall_availability * overall_performance * overall_quality / 10000 379 | 380 | summary += f""" 381 | ### Overall OEE: {overall_oee:.1f}% 382 | - Availability: {overall_availability:.1f}% 383 | - Performance: {overall_performance:.1f}% 384 | - Quality: {overall_quality:.1f}% 385 | """ 386 | 387 | # Downtime events 388 | downtime_query = f""" 389 | SELECT 390 | d.Reason as DowntimeReason, 391 | d.Category as DowntimeCategory, 392 | COUNT(d.DowntimeID) as EventCount, 393 | SUM(d.Duration) as TotalMinutes, 394 | AVG(d.Duration) as AvgDuration 395 | FROM 396 | Downtimes d 397 | WHERE 398 | d.StartTime BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 399 | GROUP BY 400 | d.Reason, d.Category 401 | ORDER BY 402 | TotalMinutes DESC 403 | LIMIT 5 404 | """ 405 | 406 | result = db_manager.execute_query(downtime_query) 407 | if result["success"] and result["row_count"] > 0: 408 | downtimes = pd.DataFrame(result["rows"]) 409 | 410 | summary += f""" 411 | ### Top Downtime Reasons 412 | """ 413 | 414 | for _, row in downtimes.iterrows(): 415 | hours = row['TotalMinutes'] / 60 416 | summary += f"- **{row['DowntimeReason']}** ({row['DowntimeCategory']}): {hours:.1f} hours total across {int(row['EventCount'])} events\n" 417 | 418 | # Generate timestamp 419 | summary += f""" 420 | 421 | --- 422 | *Report generated on {datetime.now().strftime('%Y-%m-%d %H:%M')}* 423 | """ 424 | 425 | return summary 426 | 427 | def export_to_pdf(self, markdown_content, output_file=None): 428 | """ 429 | Export markdown content to PDF (placeholder function) 430 | 431 | In a real implementation, this would use a library like reportlab, 432 | weasyprint, or pdfkit to convert markdown to PDF. 433 | 434 | Args: 435 | markdown_content (str): Markdown content 436 | output_file (str): Output file path 437 | 438 | Returns: 439 | str: Path to the PDF file 440 | """ 441 | # This is a placeholder - in a real implementation, would convert to PDF 442 | if output_file is None: 443 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 444 | output_file = f"report_{timestamp}.md" 445 | 446 | output_path = self.reports_dir / output_file 447 | 448 | with open(output_path, 'w') as f: 449 | f.write(markdown_content) 450 | 451 | return str(output_path) 452 | 453 | def display_report_generator(meeting_date=None, meeting_data=None): 454 | """ 455 | Streamlit component for generating and viewing reports 456 | 457 | Args: 458 | meeting_date (str): Current meeting date 459 | meeting_data (dict): Current meeting data 460 | """ 461 | st.subheader("📄 Meeting Reports") 462 | 463 | # Initialize report generator 464 | generator = ReportGenerator() 465 | 466 | # Create tabs for different report options 467 | tab1, tab2, tab3 = st.tabs(["Current Meeting", "Weekly Report", "Past Reports"]) 468 | 469 | # Tab 1: Current meeting summary 470 | with tab1: 471 | st.write("Generate a summary for the current meeting") 472 | 473 | include_data = st.checkbox("Include production data in summary", value=True) 474 | 475 | if st.button("Generate Meeting Summary"): 476 | if meeting_date and meeting_data: 477 | with st.spinner("Generating meeting summary..."): 478 | summary = generator.generate_meeting_summary(meeting_date, meeting_data, include_data) 479 | st.markdown(summary) 480 | 481 | # Provide download link 482 | st.download_button( 483 | "Download Summary", 484 | summary, 485 | f"meeting_summary_{meeting_date}.md", 486 | "text/markdown", 487 | key="download_meeting_summary" 488 | ) 489 | 490 | # Save option 491 | if st.button("Save Meeting Report"): 492 | filepath = generator.save_meeting_report(meeting_data) 493 | if filepath: 494 | st.success(f"Meeting report saved to {filepath}") 495 | else: 496 | st.error("Failed to save meeting report") 497 | else: 498 | st.warning("No meeting data available. Please fill in meeting details first.") 499 | 500 | # Tab 2: Weekly report 501 | with tab2: 502 | st.write("Generate a weekly summary report") 503 | 504 | end_date = st.date_input( 505 | "Week Ending", 506 | value=datetime.now().date(), 507 | key="weekly_report_date" 508 | ) 509 | 510 | if st.button("Generate Weekly Report"): 511 | with st.spinner("Generating weekly report..."): 512 | summary = generator.generate_weekly_summary(end_date) 513 | st.markdown(summary) 514 | 515 | # Provide download link 516 | st.download_button( 517 | "Download Weekly Report", 518 | summary, 519 | f"weekly_report_{end_date.strftime('%Y%m%d')}.md", 520 | "text/markdown", 521 | key="download_weekly_report" 522 | ) 523 | 524 | # Tab 3: Past reports 525 | with tab3: 526 | st.write("View past meeting reports") 527 | 528 | # Get available reports 529 | reports = generator.list_available_reports() 530 | 531 | if reports: 532 | selected_report = st.selectbox( 533 | "Select a past report", 534 | options=reports, 535 | key="past_report_select" 536 | ) 537 | 538 | if selected_report: 539 | # Load the selected report 540 | report_data = generator.load_meeting_report(selected_report) 541 | 542 | if report_data: 543 | st.write(f"**Meeting Date:** {selected_report}") 544 | st.write(f"**Status:** {report_data.get('meeting_status', 'Unknown')}") 545 | st.write(f"**Attendees:** {report_data.get('attendees', 'Not recorded')}") 546 | 547 | if report_data.get('notes'): 548 | st.subheader("Meeting Notes") 549 | st.write(report_data['notes']) 550 | 551 | if report_data.get('action_items'): 552 | st.subheader("Action Items") 553 | for item in report_data['action_items']: 554 | st.markdown(f""" 555 | **{item['description']}** 556 | Owner: {item['owner']} | Priority: {item['priority']} | Due: {item['due_date']} | Status: {item['status']} 557 | """) 558 | 559 | # Generate summary option 560 | if st.button("Generate Summary from this Report"): 561 | summary = generator.generate_meeting_summary( 562 | selected_report, report_data, include_data=True 563 | ) 564 | st.markdown(summary) 565 | 566 | # Provide download link 567 | st.download_button( 568 | "Download Summary", 569 | summary, 570 | f"meeting_summary_{selected_report}.md", 571 | "text/markdown", 572 | key="download_past_summary" 573 | ) 574 | else: 575 | st.error(f"Could not load report for {selected_report}") 576 | else: 577 | st.info("No past reports available") 578 | 579 | # For testing the module directly 580 | if __name__ == "__main__": 581 | st.set_page_config(page_title="Report Generator", layout="wide") 582 | st.title("Meeting Report Generator Test") 583 | 584 | # Sample meeting data for testing 585 | test_meeting_data = { 586 | "date": datetime.now().strftime("%Y-%m-%d"), 587 | "attendees": "John Smith, Jane Doe, Bob Johnson", 588 | "meeting_status": "Completed", 589 | "notes": "This is a test meeting with sample notes.\n\n- Discussed production issues\n- Reviewed quality metrics\n- Assigned action items", 590 | "action_items": [ 591 | { 592 | "id": 1, 593 | "description": "Investigate machine downtime", 594 | "owner": "John Smith", 595 | "priority": "High", 596 | "due_date": (datetime.now() + timedelta(days=1)).strftime("%Y-%m-%d"), 597 | "status": "Open" 598 | }, 599 | { 600 | "id": 2, 601 | "description": "Order replacement parts", 602 | "owner": "Jane Doe", 603 | "priority": "Medium", 604 | "due_date": (datetime.now() + timedelta(days=3)).strftime("%Y-%m-%d"), 605 | "status": "In Progress" 606 | } 607 | ] 608 | } 609 | 610 | display_report_generator(test_meeting_data["date"], test_meeting_data) -------------------------------------------------------------------------------- /app_factory/shared/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Shared utilities for MES and Production Meeting applications. 3 | """ 4 | 5 | from shared.database import DatabaseManager, get_tool_config 6 | from shared.bedrock_utils import ( 7 | get_bedrock_client, 8 | get_available_models, 9 | get_best_available_model 10 | ) -------------------------------------------------------------------------------- /app_factory/shared/bedrock_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Amazon Bedrock utilities 3 | """ 4 | 5 | import os 6 | import logging 7 | import boto3 8 | from typing import List, Dict, Any, Optional 9 | from dotenv import load_dotenv 10 | 11 | # Load environment variables 12 | load_dotenv() 13 | 14 | # Configure logging 15 | logging.basicConfig(level=logging.INFO) 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | def get_bedrock_client(): 20 | """Create a bedrock-runtime client""" 21 | return boto3.client( 22 | service_name='bedrock-runtime', 23 | region_name=os.getenv("AWS_REGION", "us-east-1") 24 | ) 25 | 26 | 27 | def get_bedrock_management_client(): 28 | """Create a bedrock management client for listing models""" 29 | return boto3.client( 30 | service_name='bedrock', 31 | region_name=os.getenv("AWS_REGION", "us-east-1") 32 | ) 33 | 34 | 35 | def get_supported_models(): 36 | """ 37 | Returns the supported models with their basic information. 38 | All models support text input/output, Converse API, tool use, and system prompts. 39 | Models can use either ON_DEMAND or INFERENCE_PROFILE access. 40 | """ 41 | 42 | return { 43 | # Claude Models (includes both ON_DEMAND and INFERENCE_PROFILE models) 44 | "anthropic.claude-3-haiku-20240307-v1:0": { 45 | "name": "Claude 3 Haiku", 46 | "provider": "Anthropic", 47 | "tier": "fast" 48 | }, 49 | "anthropic.claude-3-sonnet-20240229-v1:0": { 50 | "name": "Claude 3 Sonnet", 51 | "provider": "Anthropic", 52 | "tier": "balanced" 53 | }, 54 | "anthropic.claude-3-5-sonnet-20240620-v1:0": { 55 | "name": "Claude 3.5 Sonnet", 56 | "provider": "Anthropic", 57 | "tier": "balanced" 58 | }, 59 | "anthropic.claude-3-5-sonnet-20241022-v2:0": { 60 | "name": "Claude 3.5 Sonnet v2", 61 | "provider": "Anthropic", 62 | "tier": "balanced" 63 | }, 64 | "anthropic.claude-3-5-haiku-20241022-v1:0": { 65 | "name": "Claude 3.5 Haiku", 66 | "provider": "Anthropic", 67 | "tier": "fast" 68 | }, 69 | "anthropic.claude-3-7-sonnet-20250219-v1:0": { 70 | "name": "Claude 3.7 Sonnet", 71 | "provider": "Anthropic", 72 | "tier": "premium" 73 | }, 74 | 75 | # Amazon Nova Models 76 | "amazon.nova-micro-v1:0": { 77 | "name": "Amazon Nova Micro", 78 | "provider": "Amazon", 79 | "tier": "fast" 80 | }, 81 | "amazon.nova-lite-v1:0": { 82 | "name": "Amazon Nova Lite", 83 | "provider": "Amazon", 84 | "tier": "fast" 85 | }, 86 | "amazon.nova-pro-v1:0": { 87 | "name": "Amazon Nova Pro", 88 | "provider": "Amazon", 89 | "tier": "balanced" 90 | }, 91 | 92 | # Mistral Model 93 | "mistral.mistral-large-2402-v1:0": { 94 | "name": "Mistral Large", 95 | "provider": "Mistral AI", 96 | "tier": "balanced" 97 | }, 98 | 99 | # Cohere Model 100 | "cohere.command-r-plus-v1:0": { 101 | "name": "Command R+", 102 | "provider": "Cohere", 103 | "tier": "balanced" 104 | } 105 | } 106 | 107 | 108 | def get_available_models(client=None, use_cache=True): 109 | """ 110 | Get models that are actually available in the user's account. 111 | Accepts both ON_DEMAND and INFERENCE_PROFILE models. 112 | 113 | Args: 114 | client: Optional bedrock management client 115 | use_cache: Whether to use cached results (default: True) 116 | 117 | Returns: 118 | List of available model dictionaries 119 | """ 120 | # Try to use cached results first 121 | if use_cache: 122 | try: 123 | import streamlit as st 124 | if hasattr(st, 'session_state') and 'bedrock_available_models' in st.session_state: 125 | cached_models = st.session_state.bedrock_available_models 126 | if cached_models: 127 | logger.debug(f"Using cached models: {len(cached_models)} models") 128 | return cached_models 129 | except ImportError: 130 | # Not in Streamlit environment, skip caching 131 | pass 132 | 133 | if client is None: 134 | client = get_bedrock_management_client() 135 | 136 | supported_models = get_supported_models() 137 | available_models = [] 138 | 139 | try: 140 | logger.info("Fetching available models from AWS Bedrock...") 141 | response = client.list_foundation_models() 142 | accessible_model_ids = { 143 | model['modelId'] for model in response['modelSummaries'] 144 | if model.get('inferenceTypesSupported') and 145 | ( 146 | 'ON_DEMAND' in model.get('inferenceTypesSupported') or 147 | 'INFERENCE_PROFILE' in model.get('inferenceTypesSupported') 148 | ) 149 | } 150 | 151 | logger.info(f"Found {len(accessible_model_ids)} accessible models in Bedrock") 152 | 153 | for model_id, info in supported_models.items(): 154 | if model_id in accessible_model_ids: 155 | available_models.append({ 156 | "id": model_id, 157 | "name": info["name"], 158 | "provider": info["provider"], 159 | "tier": info["tier"] 160 | }) 161 | logger.debug(f"Added available model: {model_id}") 162 | 163 | # Sort by provider and tier for consistent ordering 164 | available_models.sort(key=lambda x: (x['provider'], x['tier'], x['name'])) 165 | 166 | # Cache the results if in Streamlit environment 167 | if use_cache: 168 | try: 169 | import streamlit as st 170 | if hasattr(st, 'session_state'): 171 | st.session_state.bedrock_available_models = available_models 172 | logger.info(f"Cached {len(available_models)} models in session state") 173 | except ImportError: 174 | pass 175 | 176 | logger.info(f"Returning {len(available_models)} supported models") 177 | return available_models 178 | 179 | except Exception as e: 180 | logger.error(f"Error retrieving available models: {e}") 181 | return [] 182 | 183 | 184 | def clear_model_cache(): 185 | """Clear the cached model list to force a refresh on next call""" 186 | try: 187 | import streamlit as st 188 | if hasattr(st, 'session_state') and 'bedrock_available_models' in st.session_state: 189 | del st.session_state.bedrock_available_models 190 | logger.info("Cleared model cache") 191 | except ImportError: 192 | pass 193 | 194 | def debug_available_models(): 195 | """ 196 | Debug function to show what models are actually available in Bedrock 197 | """ 198 | try: 199 | client = get_bedrock_management_client() 200 | response = client.list_foundation_models() 201 | 202 | print("=== ALL MODELS AVAILABLE IN YOUR BEDROCK ACCOUNT ===") 203 | models_by_provider = {} 204 | 205 | for model in response['modelSummaries']: 206 | model_id = model['modelId'] 207 | provider = model_id.split('.')[0].title() 208 | 209 | if provider not in models_by_provider: 210 | models_by_provider[provider] = [] 211 | 212 | # Check for both ON_DEMAND and INFERENCE_PROFILE support 213 | inference_types = model.get('inferenceTypesSupported', []) 214 | supports_usage = 'ON_DEMAND' in inference_types or 'INFERENCE_PROFILE' in inference_types 215 | 216 | models_by_provider[provider].append({ 217 | 'id': model_id, 218 | 'name': model.get('modelName', 'Unknown'), 219 | 'on_demand': supports_usage, 220 | 'inference_types': inference_types 221 | }) 222 | 223 | for provider in sorted(models_by_provider.keys()): 224 | print(f"\n{provider}:") 225 | for model in sorted(models_by_provider[provider], key=lambda x: x['id']): 226 | status = "✅" if model['on_demand'] else "❌" 227 | types_str = ", ".join(model['inference_types']) if model['inference_types'] else "None" 228 | print(f" {status} {model['id']} - {model['name']} ({types_str})") 229 | 230 | print(f"\n=== SUPPORTED BY OUR APP ===") 231 | supported = get_supported_models() 232 | for model_id, info in supported.items(): 233 | print(f" {info['provider']} - {info['name']}: {model_id}") 234 | 235 | print(f"\n=== MATCHES ===") 236 | available = get_available_models(client) 237 | for model in available: 238 | print(f" ✅ {model['provider']} - {model['name']} ({model['tier']}): {model['id']}") 239 | 240 | except Exception as e: 241 | print(f"Error debugging models: {e}") 242 | print(f"Check your AWS credentials and permissions") 243 | 244 | 245 | def get_best_available_model(available_models=None, prefer_tier="fast", use_cache=True): 246 | """ 247 | Get the best available model, preferring fast/cheap models by default. 248 | 249 | Args: 250 | available_models: Optional list of available models 251 | prefer_tier: Preferred tier ("fast", "balanced", "premium") 252 | 253 | Returns: 254 | str: Model ID to use 255 | """ 256 | if available_models is None: 257 | available_models = get_available_models(use_cache=use_cache) 258 | 259 | if not available_models: 260 | # Fallback to most common model 261 | fallback = "anthropic.claude-3-haiku-20240307-v1:0" 262 | logger.warning(f"No available models found. Using fallback: {fallback}") 263 | return fallback 264 | 265 | # Define preference order by tier and provider 266 | tier_priority = {"fast": 0, "balanced": 1, "premium": 2} 267 | provider_priority = {"Anthropic": 0, "Amazon": 1, "Mistral AI": 2, "Cohere": 3} 268 | 269 | # If specific tier requested, try to find it first 270 | if prefer_tier != "fast": 271 | tier_models = [m for m in available_models if m['tier'] == prefer_tier] 272 | if tier_models: 273 | available_models = tier_models 274 | 275 | # Sort by preference (tier, then provider) 276 | available_models.sort(key=lambda x: ( 277 | tier_priority.get(x['tier'], 99), 278 | provider_priority.get(x['provider'], 99), 279 | x['name'] 280 | )) 281 | 282 | selected_model = available_models[0]['id'] 283 | logger.debug(f"Selected model: {selected_model} ({available_models[0]['name']})") 284 | 285 | return selected_model 286 | 287 | 288 | if __name__ == "__main__": 289 | debug_available_models() -------------------------------------------------------------------------------- /app_factory/shared/database.py: -------------------------------------------------------------------------------- 1 | """ 2 | Shared database utilities for accessing the MES database 3 | """ 4 | 5 | import logging 6 | import sqlite3 7 | import pandas as pd 8 | from datetime import datetime, timedelta 9 | import time 10 | import os 11 | from pathlib import Path 12 | 13 | # Configure logging 14 | logging.basicConfig(level=logging.INFO, 15 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 16 | logger = logging.getLogger(__name__) 17 | 18 | class DatabaseManager: 19 | """Database manager for accessing the MES database with common queries""" 20 | 21 | def __init__(self, db_path=None): 22 | """Initialize with the database path""" 23 | if db_path is None: 24 | # Always use mes.db in the root directory, not relative to this file 25 | db_path = 'mes.db' 26 | 27 | self.db_path = db_path 28 | self._schema_cache = None 29 | self._schema_cache_time = None 30 | self._cache_expiry = 60 * 5 # Cache expires after 5 minutes 31 | 32 | # Verify database exists 33 | if not os.path.exists(self.db_path): 34 | logger.warning(f"Database file not found: {self.db_path}") 35 | 36 | def get_connection(self): 37 | """Get a database connection""" 38 | return sqlite3.connect(self.db_path) 39 | 40 | def execute_query(self, sql_query): 41 | """Execute a SQL query and return the results""" 42 | logger.info(f"Executing SQL query: {sql_query}") 43 | start_time = time.time() 44 | 45 | try: 46 | # Connect to the database 47 | conn = self.get_connection() 48 | 49 | # Execute the query 50 | df = pd.read_sql_query(sql_query, conn) 51 | conn.close() 52 | 53 | # Process datetime columns for better display 54 | for col in df.columns: 55 | if df[col].dtype == 'object': 56 | # Try to convert string columns that might be dates 57 | try: 58 | if df[col].str.contains('-').any() and df[col].str.contains(':').any(): 59 | df[col] = pd.to_datetime(df[col]) 60 | # Format datetime for display 61 | df[col] = df[col].dt.strftime('%Y-%m-%d %H:%M') 62 | except: 63 | pass 64 | 65 | # Round float columns to 2 decimal places for display 66 | for col in df.select_dtypes(include=['float']).columns: 67 | df[col] = df[col].round(2) 68 | 69 | # Convert to JSON-serializable format 70 | result = { 71 | "success": True, 72 | "rows": df.to_dict(orient="records"), 73 | "column_names": df.columns.tolist(), 74 | "row_count": len(df), 75 | "execution_time_ms": round((time.time() - start_time) * 1000, 2) 76 | } 77 | 78 | logger.info(f"Query executed successfully: {len(df)} rows returned in {result['execution_time_ms']}ms") 79 | return result 80 | 81 | except Exception as e: 82 | error_msg = str(e) 83 | logger.error(f"Error executing SQL query: {error_msg}") 84 | 85 | # Provide more helpful error messages for common issues 86 | if "no such table" in error_msg.lower(): 87 | table_name = error_msg.split("no such table:", 1)[1].strip() if "no such table:" in error_msg else "unknown" 88 | error_msg = f"Table '{table_name}' doesn't exist. Please check the schema and table names." 89 | elif "no such column" in error_msg.lower(): 90 | col_name = error_msg.split("no such column:", 1)[1].strip() if "no such column:" in error_msg else "unknown" 91 | error_msg = f"Column '{col_name}' doesn't exist. Please check the schema and column names." 92 | elif "syntax error" in error_msg.lower(): 93 | error_msg = f"SQL syntax error: {error_msg}. Please check your query syntax." 94 | 95 | return { 96 | "success": False, 97 | "error": error_msg, 98 | "execution_time_ms": round((time.time() - start_time) * 1000, 2) 99 | } 100 | 101 | def get_schema(self): 102 | """Get the database schema with caching for performance""" 103 | current_time = time.time() 104 | 105 | # Return cached schema if available and fresh 106 | if (self._schema_cache is not None and 107 | self._schema_cache_time is not None and 108 | current_time - self._schema_cache_time < self._cache_expiry): 109 | logger.info("Returning cached schema") 110 | return self._schema_cache 111 | 112 | logger.info("Retrieving fresh database schema") 113 | start_time = time.time() 114 | 115 | try: 116 | conn = self.get_connection() 117 | cursor = conn.cursor() 118 | 119 | # Get all tables 120 | cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") 121 | tables = cursor.fetchall() 122 | 123 | schema = {} 124 | for table in tables: 125 | table_name = table[0] 126 | 127 | # Get column information 128 | cursor.execute(f"PRAGMA table_info({table_name});") 129 | columns = cursor.fetchall() 130 | 131 | # Format column information 132 | column_info = [] 133 | for col in columns: 134 | column_info.append({ 135 | "name": col[1], 136 | "type": col[2], 137 | "notnull": bool(col[3]), 138 | "pk": bool(col[5]) 139 | }) 140 | 141 | # Get foreign key relationships 142 | cursor.execute(f"PRAGMA foreign_key_list({table_name});") 143 | foreign_keys = cursor.fetchall() 144 | 145 | fk_info = [] 146 | for fk in foreign_keys: 147 | fk_info.append({ 148 | "id": fk[0], 149 | "seq": fk[1], 150 | "table": fk[2], 151 | "from": fk[3], 152 | "to": fk[4] 153 | }) 154 | 155 | # Get table row count 156 | cursor.execute(f"SELECT COUNT(*) FROM {table_name};") 157 | row_count = cursor.fetchone()[0] 158 | 159 | # Get sample data (limited to 3 rows for performance) 160 | cursor.execute(f"SELECT * FROM {table_name} LIMIT 3;") 161 | sample_data = cursor.fetchall() 162 | 163 | # Get column names for the sample data 164 | column_names = [col[1] for col in columns] 165 | 166 | # Format sample data as records 167 | sample_data_records = [] 168 | for row in sample_data: 169 | record = {} 170 | for i, value in enumerate(row): 171 | record[column_names[i]] = value 172 | sample_data_records.append(record) 173 | 174 | # Add table information to schema 175 | schema[table_name] = { 176 | "columns": column_info, 177 | "foreign_keys": fk_info, 178 | "row_count": row_count, 179 | "sample_data": sample_data_records 180 | } 181 | 182 | # Add schema metadata 183 | schema["__metadata__"] = { 184 | "database_name": self.db_path.split("/")[-1], 185 | "total_tables": len(tables), 186 | "generated_at": datetime.now().isoformat(), 187 | "schema_version": "1.1" 188 | } 189 | 190 | conn.close() 191 | 192 | # Update cache 193 | self._schema_cache = schema 194 | self._schema_cache_time = current_time 195 | 196 | logger.info(f"Schema retrieved in {round((time.time() - start_time) * 1000, 2)}ms") 197 | return schema 198 | 199 | except Exception as e: 200 | logger.error(f"Error retrieving schema: {e}") 201 | return { 202 | "error": f"Failed to retrieve schema: {str(e)}", 203 | "timestamp": datetime.now().isoformat() 204 | } 205 | 206 | # ---- Production Meeting Specific Queries ---- # 207 | 208 | def get_daily_production_summary(self, days_back=1): 209 | """Get a summary of production for the specified days back from today""" 210 | today = datetime.now() 211 | target_date = today - timedelta(days=days_back) 212 | 213 | # Format date for SQL query 214 | date_str = target_date.strftime('%Y-%m-%d') 215 | 216 | query = f""" 217 | SELECT 218 | p.Name as ProductName, 219 | COUNT(wo.OrderID) as TotalOrders, 220 | SUM(wo.Quantity) as PlannedQuantity, 221 | SUM(wo.ActualProduction) as ActualProduction, 222 | SUM(wo.Scrap) as ScrapQuantity, 223 | ROUND(SUM(wo.ActualProduction) * 100.0 / SUM(wo.Quantity), 2) as CompletionPercentage 224 | FROM 225 | WorkOrders wo 226 | JOIN 227 | Products p ON wo.ProductID = p.ProductID 228 | WHERE 229 | wo.ActualStartTime LIKE '{date_str}%' 230 | GROUP BY 231 | p.Name 232 | ORDER BY 233 | TotalOrders DESC 234 | """ 235 | 236 | result = self.execute_query(query) 237 | if result["success"]: 238 | return pd.DataFrame(result["rows"]) 239 | else: 240 | logger.error(f"Error getting daily production summary: {result['error']}") 241 | return pd.DataFrame() 242 | 243 | def get_machine_status_summary(self): 244 | """Get a summary of current machine status""" 245 | query = """ 246 | SELECT 247 | m.Type as MachineType, 248 | COUNT(m.MachineID) as TotalMachines, 249 | SUM(CASE WHEN m.Status = 'running' THEN 1 ELSE 0 END) as Running, 250 | SUM(CASE WHEN m.Status = 'idle' THEN 1 ELSE 0 END) as Idle, 251 | SUM(CASE WHEN m.Status = 'maintenance' THEN 1 ELSE 0 END) as Maintenance, 252 | SUM(CASE WHEN m.Status = 'breakdown' THEN 1 ELSE 0 END) as Breakdown, 253 | ROUND(AVG(m.EfficiencyFactor) * 100, 2) as AvgEfficiency 254 | FROM 255 | Machines m 256 | GROUP BY 257 | m.Type 258 | ORDER BY 259 | TotalMachines DESC 260 | """ 261 | 262 | result = self.execute_query(query) 263 | if result["success"]: 264 | return pd.DataFrame(result["rows"]) 265 | else: 266 | logger.error(f"Error getting machine status summary: {result['error']}") 267 | return pd.DataFrame() 268 | 269 | def get_quality_summary(self, days_back=1, range_days=30): 270 | """Get a summary of quality metrics for a range of days 271 | 272 | Args: 273 | days_back (int): Days ago to start the range 274 | range_days (int): Number of days to look back from the start date 275 | """ 276 | today = datetime.now() 277 | end_date = today - timedelta(days=days_back) 278 | start_date = end_date - timedelta(days=range_days) 279 | 280 | # Format dates for SQL query 281 | end_date_str = end_date.strftime('%Y-%m-%d') 282 | start_date_str = start_date.strftime('%Y-%m-%d') 283 | 284 | query = f""" 285 | SELECT 286 | p.Name as ProductName, 287 | p.Category as ProductCategory, 288 | COUNT(qc.CheckID) as InspectionCount, 289 | ROUND(AVG(qc.DefectRate) * 100, 2) as AvgDefectRate, 290 | ROUND(AVG(qc.ReworkRate) * 100, 2) as AvgReworkRate, 291 | ROUND(AVG(qc.YieldRate) * 100, 2) as AvgYieldRate, 292 | SUM(CASE WHEN qc.Result = 'pass' THEN 1 ELSE 0 END) as PassCount, 293 | SUM(CASE WHEN qc.Result = 'fail' THEN 1 ELSE 0 END) as FailCount, 294 | SUM(CASE WHEN qc.Result = 'rework' THEN 1 ELSE 0 END) as ReworkCount 295 | FROM 296 | QualityControl qc 297 | JOIN 298 | WorkOrders wo ON qc.OrderID = wo.OrderID 299 | JOIN 300 | Products p ON wo.ProductID = p.ProductID 301 | WHERE 302 | qc.Date BETWEEN '{start_date_str}' AND '{end_date_str} 23:59:59' 303 | GROUP BY 304 | p.Name, p.Category 305 | ORDER BY 306 | InspectionCount DESC 307 | """ 308 | 309 | result = self.execute_query(query) 310 | if result["success"]: 311 | return pd.DataFrame(result["rows"]) 312 | else: 313 | logger.error(f"Error getting quality summary: {result['error']}") 314 | return pd.DataFrame() 315 | 316 | def get_inventory_alerts(self): 317 | """Get inventory items that are below reorder level""" 318 | query = """ 319 | SELECT 320 | i.Name as ItemName, 321 | i.Category as Category, 322 | i.Quantity as CurrentQuantity, 323 | i.ReorderLevel as ReorderLevel, 324 | i.LeadTime as LeadTimeInDays, 325 | s.Name as SupplierName, 326 | (i.ReorderLevel - i.Quantity) as ShortageAmount 327 | FROM 328 | Inventory i 329 | JOIN 330 | Suppliers s ON i.SupplierID = s.SupplierID 331 | WHERE 332 | i.Quantity < i.ReorderLevel 333 | ORDER BY 334 | ShortageAmount DESC 335 | """ 336 | 337 | result = self.execute_query(query) 338 | if result["success"]: 339 | return pd.DataFrame(result["rows"]) 340 | else: 341 | logger.error(f"Error getting inventory alerts: {result['error']}") 342 | return pd.DataFrame() 343 | 344 | def get_upcoming_maintenance(self, days_ahead=7): 345 | """Get machines due for maintenance in the next X days""" 346 | today = datetime.now() 347 | future_date = today + timedelta(days=days_ahead) 348 | 349 | # Format dates for SQL query 350 | today_str = today.strftime('%Y-%m-%d') 351 | future_str = future_date.strftime('%Y-%m-%d') 352 | 353 | query = f""" 354 | SELECT 355 | m.Name as MachineName, 356 | m.Type as MachineType, 357 | wc.Name as WorkCenterName, 358 | m.NextMaintenanceDate as MaintenanceDate, 359 | m.MaintenanceFrequency as FrequencyHours, 360 | m.LastMaintenanceDate as LastMaintenance, 361 | julianday(m.NextMaintenanceDate) - julianday('{today_str}') as DaysUntilMaintenance 362 | FROM 363 | Machines m 364 | JOIN 365 | WorkCenters wc ON m.WorkCenterID = wc.WorkCenterID 366 | WHERE 367 | m.NextMaintenanceDate BETWEEN '{today_str}' AND '{future_str}' 368 | ORDER BY 369 | m.NextMaintenanceDate ASC 370 | """ 371 | 372 | result = self.execute_query(query) 373 | if result["success"]: 374 | return pd.DataFrame(result["rows"]) 375 | else: 376 | logger.error(f"Error getting upcoming maintenance: {result['error']}") 377 | return pd.DataFrame() 378 | 379 | def get_work_order_status(self): 380 | """Get current work order status summary""" 381 | query = """ 382 | SELECT 383 | wo.Status as Status, 384 | COUNT(wo.OrderID) as OrderCount, 385 | SUM(wo.Quantity) as TotalQuantity, 386 | ROUND(AVG(julianday(wo.PlannedEndTime) - julianday(wo.PlannedStartTime)) * 24, 2) as AvgPlanHours 387 | FROM 388 | WorkOrders wo 389 | GROUP BY 390 | wo.Status 391 | ORDER BY 392 | OrderCount DESC 393 | """ 394 | 395 | result = self.execute_query(query) 396 | if result["success"]: 397 | return pd.DataFrame(result["rows"]) 398 | else: 399 | logger.error(f"Error getting work order status: {result['error']}") 400 | return pd.DataFrame() 401 | 402 | # Bedrock tool configuration for the chat interface 403 | def get_tool_config(): 404 | """Get the tool configuration for the Bedrock converse API""" 405 | 406 | return { 407 | "tools": [ 408 | { 409 | "toolSpec": { 410 | "name": "get_schema", 411 | "description": "ALWAYS use this tool FIRST to get the schema of the MES database before attempting any SQL queries. This provides details about all tables, columns, relationships, and sample data.", 412 | "inputSchema": { 413 | "json": { 414 | "type": "object", 415 | "properties": {} 416 | } 417 | } 418 | } 419 | }, 420 | { 421 | "toolSpec": { 422 | "name": "execute_sql", 423 | "description": "Execute SQL queries against the MES database ONLY after you have retrieved and examined the schema. Write efficient SQL that joins relevant tables and focuses on answering the user's specific question.", 424 | "inputSchema": { 425 | "json": { 426 | "type": "object", 427 | "properties": { 428 | "sql_query": { 429 | "type": "string", 430 | "description": "The SQL query to execute against the MES database. Write clean, efficient SQL that joins necessary tables to answer the user's question in one query when possible. The queries must be SQLite compatible" 431 | } 432 | }, 433 | "required": [ 434 | "sql_query" 435 | ] 436 | } 437 | } 438 | } 439 | } 440 | ] 441 | } -------------------------------------------------------------------------------- /assets/MES-chatbot-sys-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/industrial-data-store-simulation-chatbot/faa7ed0da3c0dc2f31bf7a35dc7805a28931bdf7/assets/MES-chatbot-sys-architecture.png -------------------------------------------------------------------------------- /assets/ProductionDashboard.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/industrial-data-store-simulation-chatbot/faa7ed0da3c0dc2f31bf7a35dc7805a28931bdf7/assets/ProductionDashboard.gif -------------------------------------------------------------------------------- /assets/chatwithMES.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/industrial-data-store-simulation-chatbot/faa7ed0da3c0dc2f31bf7a35dc7805a28931bdf7/assets/chatwithMES.gif -------------------------------------------------------------------------------- /assets/mes-chatbot-architecture-sequence-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/industrial-data-store-simulation-chatbot/faa7ed0da3c0dc2f31bf7a35dc7805a28931bdf7/assets/mes-chatbot-architecture-sequence-diagram.png -------------------------------------------------------------------------------- /assets/mes-chatbot-example-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/industrial-data-store-simulation-chatbot/faa7ed0da3c0dc2f31bf7a35dc7805a28931bdf7/assets/mes-chatbot-example-screenshot.png -------------------------------------------------------------------------------- /assets/mes-chatbot.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/industrial-data-store-simulation-chatbot/faa7ed0da3c0dc2f31bf7a35dc7805a28931bdf7/assets/mes-chatbot.gif -------------------------------------------------------------------------------- /assets/postgres-installation-confirmation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/industrial-data-store-simulation-chatbot/faa7ed0da3c0dc2f31bf7a35dc7805a28931bdf7/assets/postgres-installation-confirmation.png -------------------------------------------------------------------------------- /assets/table-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/industrial-data-store-simulation-chatbot/faa7ed0da3c0dc2f31bf7a35dc7805a28931bdf7/assets/table-list.png -------------------------------------------------------------------------------- /models.json: -------------------------------------------------------------------------------- 1 | { 2 | "modelSummaries": [ 3 | { 4 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-instant-v1:2:100k", 5 | "modelId": "anthropic.claude-instant-v1:2:100k", 6 | "modelName": "Claude Instant", 7 | "providerName": "Anthropic", 8 | "inputModalities": [ 9 | "TEXT" 10 | ], 11 | "outputModalities": [ 12 | "TEXT" 13 | ], 14 | "responseStreamingSupported": true, 15 | "customizationsSupported": [], 16 | "inferenceTypesSupported": [ 17 | "PROVISIONED" 18 | ], 19 | "modelLifecycle": { 20 | "status": "LEGACY" 21 | } 22 | }, 23 | { 24 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-instant-v1", 25 | "modelId": "anthropic.claude-instant-v1", 26 | "modelName": "Claude Instant", 27 | "providerName": "Anthropic", 28 | "inputModalities": [ 29 | "TEXT" 30 | ], 31 | "outputModalities": [ 32 | "TEXT" 33 | ], 34 | "responseStreamingSupported": true, 35 | "customizationsSupported": [], 36 | "inferenceTypesSupported": [ 37 | "ON_DEMAND" 38 | ], 39 | "modelLifecycle": { 40 | "status": "LEGACY" 41 | } 42 | }, 43 | { 44 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-v2:0:18k", 45 | "modelId": "anthropic.claude-v2:0:18k", 46 | "modelName": "Claude", 47 | "providerName": "Anthropic", 48 | "inputModalities": [ 49 | "TEXT" 50 | ], 51 | "outputModalities": [ 52 | "TEXT" 53 | ], 54 | "responseStreamingSupported": true, 55 | "customizationsSupported": [], 56 | "inferenceTypesSupported": [ 57 | "PROVISIONED" 58 | ], 59 | "modelLifecycle": { 60 | "status": "LEGACY" 61 | } 62 | }, 63 | { 64 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-v2:0:100k", 65 | "modelId": "anthropic.claude-v2:0:100k", 66 | "modelName": "Claude", 67 | "providerName": "Anthropic", 68 | "inputModalities": [ 69 | "TEXT" 70 | ], 71 | "outputModalities": [ 72 | "TEXT" 73 | ], 74 | "responseStreamingSupported": true, 75 | "customizationsSupported": [], 76 | "inferenceTypesSupported": [ 77 | "PROVISIONED" 78 | ], 79 | "modelLifecycle": { 80 | "status": "LEGACY" 81 | } 82 | }, 83 | { 84 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-v2:1:18k", 85 | "modelId": "anthropic.claude-v2:1:18k", 86 | "modelName": "Claude", 87 | "providerName": "Anthropic", 88 | "inputModalities": [ 89 | "TEXT" 90 | ], 91 | "outputModalities": [ 92 | "TEXT" 93 | ], 94 | "responseStreamingSupported": true, 95 | "customizationsSupported": [], 96 | "inferenceTypesSupported": [ 97 | "PROVISIONED" 98 | ], 99 | "modelLifecycle": { 100 | "status": "LEGACY" 101 | } 102 | }, 103 | { 104 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-v2:1:200k", 105 | "modelId": "anthropic.claude-v2:1:200k", 106 | "modelName": "Claude", 107 | "providerName": "Anthropic", 108 | "inputModalities": [ 109 | "TEXT" 110 | ], 111 | "outputModalities": [ 112 | "TEXT" 113 | ], 114 | "responseStreamingSupported": true, 115 | "customizationsSupported": [], 116 | "inferenceTypesSupported": [ 117 | "PROVISIONED" 118 | ], 119 | "modelLifecycle": { 120 | "status": "LEGACY" 121 | } 122 | }, 123 | { 124 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-v2:1", 125 | "modelId": "anthropic.claude-v2:1", 126 | "modelName": "Claude", 127 | "providerName": "Anthropic", 128 | "inputModalities": [ 129 | "TEXT" 130 | ], 131 | "outputModalities": [ 132 | "TEXT" 133 | ], 134 | "responseStreamingSupported": true, 135 | "customizationsSupported": [], 136 | "inferenceTypesSupported": [ 137 | "ON_DEMAND" 138 | ], 139 | "modelLifecycle": { 140 | "status": "LEGACY" 141 | } 142 | }, 143 | { 144 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-v2", 145 | "modelId": "anthropic.claude-v2", 146 | "modelName": "Claude", 147 | "providerName": "Anthropic", 148 | "inputModalities": [ 149 | "TEXT" 150 | ], 151 | "outputModalities": [ 152 | "TEXT" 153 | ], 154 | "responseStreamingSupported": true, 155 | "customizationsSupported": [], 156 | "inferenceTypesSupported": [ 157 | "ON_DEMAND" 158 | ], 159 | "modelLifecycle": { 160 | "status": "LEGACY" 161 | } 162 | }, 163 | { 164 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-sonnet-20240229-v1:0:28k", 165 | "modelId": "anthropic.claude-3-sonnet-20240229-v1:0:28k", 166 | "modelName": "Claude 3 Sonnet", 167 | "providerName": "Anthropic", 168 | "inputModalities": [ 169 | "TEXT", 170 | "IMAGE" 171 | ], 172 | "outputModalities": [ 173 | "TEXT" 174 | ], 175 | "responseStreamingSupported": true, 176 | "customizationsSupported": [], 177 | "inferenceTypesSupported": [ 178 | "PROVISIONED" 179 | ], 180 | "modelLifecycle": { 181 | "status": "LEGACY" 182 | } 183 | }, 184 | { 185 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-sonnet-20240229-v1:0:200k", 186 | "modelId": "anthropic.claude-3-sonnet-20240229-v1:0:200k", 187 | "modelName": "Claude 3 Sonnet", 188 | "providerName": "Anthropic", 189 | "inputModalities": [ 190 | "TEXT", 191 | "IMAGE" 192 | ], 193 | "outputModalities": [ 194 | "TEXT" 195 | ], 196 | "responseStreamingSupported": true, 197 | "customizationsSupported": [], 198 | "inferenceTypesSupported": [ 199 | "PROVISIONED" 200 | ], 201 | "modelLifecycle": { 202 | "status": "LEGACY" 203 | } 204 | }, 205 | { 206 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-sonnet-20240229-v1:0", 207 | "modelId": "anthropic.claude-3-sonnet-20240229-v1:0", 208 | "modelName": "Claude 3 Sonnet", 209 | "providerName": "Anthropic", 210 | "inputModalities": [ 211 | "TEXT", 212 | "IMAGE" 213 | ], 214 | "outputModalities": [ 215 | "TEXT" 216 | ], 217 | "responseStreamingSupported": true, 218 | "customizationsSupported": [], 219 | "inferenceTypesSupported": [ 220 | "ON_DEMAND" 221 | ], 222 | "modelLifecycle": { 223 | "status": "LEGACY" 224 | } 225 | }, 226 | { 227 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-haiku-20240307-v1:0:48k", 228 | "modelId": "anthropic.claude-3-haiku-20240307-v1:0:48k", 229 | "modelName": "Claude 3 Haiku", 230 | "providerName": "Anthropic", 231 | "inputModalities": [ 232 | "TEXT", 233 | "IMAGE" 234 | ], 235 | "outputModalities": [ 236 | "TEXT" 237 | ], 238 | "responseStreamingSupported": true, 239 | "customizationsSupported": [], 240 | "inferenceTypesSupported": [ 241 | "PROVISIONED" 242 | ], 243 | "modelLifecycle": { 244 | "status": "ACTIVE" 245 | } 246 | }, 247 | { 248 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-haiku-20240307-v1:0:200k", 249 | "modelId": "anthropic.claude-3-haiku-20240307-v1:0:200k", 250 | "modelName": "Claude 3 Haiku", 251 | "providerName": "Anthropic", 252 | "inputModalities": [ 253 | "TEXT", 254 | "IMAGE" 255 | ], 256 | "outputModalities": [ 257 | "TEXT" 258 | ], 259 | "responseStreamingSupported": true, 260 | "customizationsSupported": [], 261 | "inferenceTypesSupported": [ 262 | "PROVISIONED" 263 | ], 264 | "modelLifecycle": { 265 | "status": "ACTIVE" 266 | } 267 | }, 268 | { 269 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-haiku-20240307-v1:0", 270 | "modelId": "anthropic.claude-3-haiku-20240307-v1:0", 271 | "modelName": "Claude 3 Haiku", 272 | "providerName": "Anthropic", 273 | "inputModalities": [ 274 | "TEXT", 275 | "IMAGE" 276 | ], 277 | "outputModalities": [ 278 | "TEXT" 279 | ], 280 | "responseStreamingSupported": true, 281 | "customizationsSupported": [], 282 | "inferenceTypesSupported": [ 283 | "ON_DEMAND" 284 | ], 285 | "modelLifecycle": { 286 | "status": "ACTIVE" 287 | } 288 | }, 289 | { 290 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-opus-20240229-v1:0:12k", 291 | "modelId": "anthropic.claude-3-opus-20240229-v1:0:12k", 292 | "modelName": "Claude 3 Opus", 293 | "providerName": "Anthropic", 294 | "inputModalities": [ 295 | "TEXT", 296 | "IMAGE" 297 | ], 298 | "outputModalities": [ 299 | "TEXT" 300 | ], 301 | "responseStreamingSupported": true, 302 | "customizationsSupported": [], 303 | "inferenceTypesSupported": [ 304 | "PROVISIONED" 305 | ], 306 | "modelLifecycle": { 307 | "status": "ACTIVE" 308 | } 309 | }, 310 | { 311 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-opus-20240229-v1:0:28k", 312 | "modelId": "anthropic.claude-3-opus-20240229-v1:0:28k", 313 | "modelName": "Claude 3 Opus", 314 | "providerName": "Anthropic", 315 | "inputModalities": [ 316 | "TEXT", 317 | "IMAGE" 318 | ], 319 | "outputModalities": [ 320 | "TEXT" 321 | ], 322 | "responseStreamingSupported": true, 323 | "customizationsSupported": [], 324 | "inferenceTypesSupported": [ 325 | "PROVISIONED" 326 | ], 327 | "modelLifecycle": { 328 | "status": "ACTIVE" 329 | } 330 | }, 331 | { 332 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-opus-20240229-v1:0:200k", 333 | "modelId": "anthropic.claude-3-opus-20240229-v1:0:200k", 334 | "modelName": "Claude 3 Opus", 335 | "providerName": "Anthropic", 336 | "inputModalities": [ 337 | "TEXT", 338 | "IMAGE" 339 | ], 340 | "outputModalities": [ 341 | "TEXT" 342 | ], 343 | "responseStreamingSupported": true, 344 | "customizationsSupported": [], 345 | "inferenceTypesSupported": [ 346 | "PROVISIONED" 347 | ], 348 | "modelLifecycle": { 349 | "status": "ACTIVE" 350 | } 351 | }, 352 | { 353 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-opus-20240229-v1:0", 354 | "modelId": "anthropic.claude-3-opus-20240229-v1:0", 355 | "modelName": "Claude 3 Opus", 356 | "providerName": "Anthropic", 357 | "inputModalities": [ 358 | "TEXT", 359 | "IMAGE" 360 | ], 361 | "outputModalities": [ 362 | "TEXT" 363 | ], 364 | "responseStreamingSupported": true, 365 | "customizationsSupported": [], 366 | "inferenceTypesSupported": [ 367 | "INFERENCE_PROFILE" 368 | ], 369 | "modelLifecycle": { 370 | "status": "ACTIVE" 371 | } 372 | }, 373 | { 374 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-5-sonnet-20240620-v1:0", 375 | "modelId": "anthropic.claude-3-5-sonnet-20240620-v1:0", 376 | "modelName": "Claude 3.5 Sonnet", 377 | "providerName": "Anthropic", 378 | "inputModalities": [ 379 | "TEXT", 380 | "IMAGE" 381 | ], 382 | "outputModalities": [ 383 | "TEXT" 384 | ], 385 | "responseStreamingSupported": true, 386 | "customizationsSupported": [], 387 | "inferenceTypesSupported": [ 388 | "ON_DEMAND", 389 | "INFERENCE_PROFILE" 390 | ], 391 | "modelLifecycle": { 392 | "status": "ACTIVE" 393 | } 394 | }, 395 | { 396 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-5-sonnet-20241022-v2:0", 397 | "modelId": "anthropic.claude-3-5-sonnet-20241022-v2:0", 398 | "modelName": "Claude 3.5 Sonnet v2", 399 | "providerName": "Anthropic", 400 | "inputModalities": [ 401 | "TEXT", 402 | "IMAGE" 403 | ], 404 | "outputModalities": [ 405 | "TEXT" 406 | ], 407 | "responseStreamingSupported": true, 408 | "customizationsSupported": [], 409 | "inferenceTypesSupported": [ 410 | "INFERENCE_PROFILE" 411 | ], 412 | "modelLifecycle": { 413 | "status": "ACTIVE" 414 | } 415 | }, 416 | { 417 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-7-sonnet-20250219-v1:0", 418 | "modelId": "anthropic.claude-3-7-sonnet-20250219-v1:0", 419 | "modelName": "Claude 3.7 Sonnet", 420 | "providerName": "Anthropic", 421 | "inputModalities": [ 422 | "TEXT", 423 | "IMAGE" 424 | ], 425 | "outputModalities": [ 426 | "TEXT" 427 | ], 428 | "responseStreamingSupported": true, 429 | "customizationsSupported": [], 430 | "inferenceTypesSupported": [ 431 | "INFERENCE_PROFILE" 432 | ], 433 | "modelLifecycle": { 434 | "status": "ACTIVE" 435 | } 436 | }, 437 | { 438 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-5-haiku-20241022-v1:0", 439 | "modelId": "anthropic.claude-3-5-haiku-20241022-v1:0", 440 | "modelName": "Claude 3.5 Haiku", 441 | "providerName": "Anthropic", 442 | "inputModalities": [ 443 | "TEXT" 444 | ], 445 | "outputModalities": [ 446 | "TEXT" 447 | ], 448 | "responseStreamingSupported": true, 449 | "customizationsSupported": [], 450 | "inferenceTypesSupported": [ 451 | "INFERENCE_PROFILE" 452 | ], 453 | "modelLifecycle": { 454 | "status": "ACTIVE" 455 | } 456 | }, 457 | { 458 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-opus-4-20250514-v1:0", 459 | "modelId": "anthropic.claude-opus-4-20250514-v1:0", 460 | "modelName": "Claude Opus 4", 461 | "providerName": "Anthropic", 462 | "inputModalities": [ 463 | "TEXT", 464 | "IMAGE" 465 | ], 466 | "outputModalities": [ 467 | "TEXT" 468 | ], 469 | "responseStreamingSupported": true, 470 | "customizationsSupported": [], 471 | "inferenceTypesSupported": [ 472 | "INFERENCE_PROFILE" 473 | ], 474 | "modelLifecycle": { 475 | "status": "ACTIVE" 476 | } 477 | }, 478 | { 479 | "modelArn": "arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-sonnet-4-20250514-v1:0", 480 | "modelId": "anthropic.claude-sonnet-4-20250514-v1:0", 481 | "modelName": "Claude Sonnet 4", 482 | "providerName": "Anthropic", 483 | "inputModalities": [ 484 | "TEXT", 485 | "IMAGE" 486 | ], 487 | "outputModalities": [ 488 | "TEXT" 489 | ], 490 | "responseStreamingSupported": true, 491 | "customizationsSupported": [], 492 | "inferenceTypesSupported": [ 493 | "INFERENCE_PROFILE" 494 | ], 495 | "modelLifecycle": { 496 | "status": "ACTIVE" 497 | } 498 | } 499 | ] 500 | } 501 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Core dependencies 2 | streamlit>=1.42.0 3 | pandas>=2.2.0 4 | numpy>=1.24.0 5 | plotly>=6.0.0 6 | python-dotenv>=1.0.0 7 | boto3>=1.37.0 8 | 9 | # For the database generator 10 | Faker>=22.0.0 11 | SQLAlchemy>=2.0.0 --------------------------------------------------------------------------------