├── .env ├── .gitattributes ├── API ├── Oogabooga_Api_Support.py └── __pycache__ │ └── Oogabooga_Api_Support.cpython-311.pyc ├── Configurables ├── AlarmMessage.json ├── EmoteLib.json ├── Lorebook.json ├── MinecraftNames.json ├── MinecraftUsername.json ├── MinecraftUsernameFollow.json └── SoftReset.json ├── GitHub_Release_Notes.md ├── LICENSE ├── LiveLog.json ├── LiveLogBlank.json ├── Logs ├── ChatLog-Converted-1.json ├── Drop_Converts_Here │ └── CONVERT_LOGS.txt └── log.txt ├── OOBA_Presets ├── Z-Waif-ADEF-Blazing.yaml ├── Z-Waif-ADEF-Standard.yaml ├── Z-Waif-ADEF-Tempered.yaml ├── Z-Waif-Mythalion.yaml └── Z-Waif-Noromaid.yaml ├── RAG_Database ├── LiveRAG_History.json ├── LiveRAG_HistoryWordID.json ├── LiveRAG_Words.json ├── RAG_WILL_GENERATE_HERE.txt └── to do ├── README.md ├── VTube_Studio_Integration_Update_Summary.md ├── __pycache__ └── main.cpython-311.pyc ├── changelog.txt ├── chat ├── __pycache__ │ └── learner.cpython-311.pyc └── learner.py ├── chat_learner.db ├── handlers └── minecraft_handler.py ├── history_namehere_20241104T103201.json ├── log.txt ├── main.py ├── manual how to.txt ├── memory ├── __pycache__ │ └── manager.cpython-311.pyc └── manager.py ├── requirements.txt ├── startup-Install.bat ├── startup.bat ├── to do ├── token.txt ├── utils ├── __pycache__ │ ├── alarm.cpython-311.pyc │ ├── api.cpython-311.pyc │ ├── audio.cpython-311.pyc │ ├── based_rag.cpython-311.pyc │ ├── camera.cpython-311.pyc │ ├── cane_lib.cpython-311.pyc │ ├── character_relationships.cpython-311.pyc │ ├── chat_handler.cpython-311.pyc │ ├── chat_history.cpython-311.pyc │ ├── chat_learner.cpython-311.pyc │ ├── config.cpython-311.pyc │ ├── contextual_memory.cpython-311.pyc │ ├── discord_voice_handler.cpython-311.pyc │ ├── dynamic_expression_mapping.cpython-311.pyc │ ├── emotion_recognizer.cpython-311.pyc │ ├── error_boundary.cpython-311.pyc │ ├── hotkeys.cpython-311.pyc │ ├── log_conversion.cpython-311.pyc │ ├── logging.cpython-311.pyc │ ├── lorebook.cpython-311.pyc │ ├── memory_handler.cpython-311.pyc │ ├── message_processing.cpython-311.pyc │ ├── minecraft.cpython-311.pyc │ ├── retrospect.cpython-311.pyc │ ├── settings.cpython-311.pyc │ ├── transcriber_translate.cpython-311.pyc │ ├── user_context.cpython-311.pyc │ ├── volume_listener.cpython-311.pyc │ ├── vtube_studio.cpython-311.pyc │ ├── web_ui.cpython-311.pyc │ ├── z_waif_discord.cpython-311.pyc │ └── z_waif_twitch.cpython-311.pyc ├── advanced_vtube_controller.py ├── ai_handler.py ├── alarm.py ├── analytics.py ├── api.py ├── audio.py ├── based_rag.py ├── camera.py ├── cane_lib.py ├── character_relationships.py ├── chat_handler.py ├── chat_history.py ├── chat_learner.py ├── config.py ├── config_manager.py ├── connection_pool.py ├── contextual_memory.py ├── conversation_mixer.py ├── discord_handler.py ├── discord_voice_handler.py ├── dynamic_expression_mapping.py ├── emotion_recognizer.py ├── enhanced_memory.py ├── error_boundary.py ├── event_bus.py ├── expression_mapper.py ├── feedback_system.py ├── hotkey_config.py ├── hotkey_manager.py ├── hotkeys.py ├── integration_manager.py ├── log_conversion.py ├── logging.py ├── lorebook.py ├── memory │ └── manager.py ├── memory_handler.py ├── memory_manager.py ├── message_processing.py ├── minecraft.py ├── minecraft_handler.py ├── model_config.py ├── module_manager.py ├── performance_dashboard.py ├── performance_metrics.py ├── performance_tracker.py ├── personality_manager.py ├── personality_metrics.py ├── personality_template.py ├── personality_templates.py ├── personalized_response.py ├── platform_handlers.py ├── rag_processor.py ├── recovery_manager.py ├── resource_monitor.py ├── response_processor.py ├── retrospect.py ├── settings.py ├── state_manager.py ├── stream_handler.py ├── transcriber_translate.py ├── twitch_handler.py ├── ui_config.py ├── ui_manager.py ├── uni_pipes.py ├── user_context.py ├── visual_handler.py ├── voice_analyzer.py ├── voice_mapper.py ├── voice_tone_analyzer.py ├── voice_tone_mapper.py ├── voice_tone_mapping.py ├── volume_listener.py ├── vtube.py ├── vtube_studio.py ├── vtuber_controller.py ├── vtuber_expression_controller.py ├── vtuber_integration.py ├── web_ui.py ├── z_waif_discord.py └── z_waif_twitch.py ├── video_processor.log └── z-waif-Release ├── API └── Oogabooga_Api_Support.py ├── Configurables ├── AlarmMessage.json ├── EmoteLib.json ├── Lorebook.json ├── MinecraftNames.json ├── MinecraftUsername.json ├── MinecraftUsernameFollow.json └── SoftReset.json ├── LICENSE ├── LiveLog.json ├── LiveLogBlank.json ├── Logs └── Drop_Converts_Here │ └── CONVERT_LOGS.txt ├── OOBA_Presets ├── Z-Waif-ADEF-Blazing.yaml ├── Z-Waif-ADEF-Standard.yaml ├── Z-Waif-ADEF-Tempered.yaml ├── Z-Waif-Mythalion.yaml └── Z-Waif-Noromaid.yaml ├── RAG_Database └── RAG_WILL_GENERATE_HERE.txt ├── README.md ├── changelog.txt ├── log.txt ├── main.py ├── requirements.txt ├── startup-Install.bat ├── startup.bat └── utils ├── alarm.py ├── audio.py ├── based_rag.py ├── camera.py ├── cane_lib.py ├── hotkeys.py ├── log_conversion.py ├── logging.py ├── lorebook.py ├── minecraft.py ├── retrospect.py ├── settings.py ├── transcriber_translate.py ├── uni_pipes.py ├── volume_listener.py ├── vtube.py ├── vtube_studio.py ├── web_ui.py └── z_waif_discord.py /.env: -------------------------------------------------------------------------------- 1 | #Enter your whisper model, see VRAM requirement for further details at whisper Github | tiny, base, small, tiny.en, base.en 2 | WHISPER_MODEL = base.en 3 | 4 | #Name that you want your bot/waifu to have (used in like 2 places, unimportant) 5 | CHAR_NAME = namehere 6 | 7 | #Use the name of the Ooobabooga character card you want. 8 | CHARACTER_CARD = namehere 9 | 10 | #Enter your name that you entered while creating Character card. Typically User, You or if have entered your name. 11 | YOUR_NAME = You 12 | 13 | #Put in your discord token for your bot, if you do set that up 14 | DISCORD_TOKEN =tokengoes here 15 | 16 | #Decide if hotkeys should be on or off when the program/waifu first boots. Valid values are "ON" and "OFF" 17 | HOTKEYS_BOOT = OFF 18 | 19 | #Maximum context length to tell the model to look at. More takes more VRAM and longer to generate, generally. 20 | #TOKEN_LIMIT is for the actual ML portion and the actual cutoff, MESSAGE_PAIR_LIMIT just decides how much to send to it. 21 | TOKEN_LIMIT = 4096 22 | MESSAGE_PAIR_LIMIT = 40 23 | 24 | #Info for the image and vision system 25 | IMG_PORT = 127.0.0.1:5007 26 | VISUAL_CHARACTER_NAME = Z-WAIF-VisualAssist 27 | VISUAL_PRESET_NAME = Z-WAIF-VisualPreset 28 | 29 | #What you want your VTuber's eyes to move to. Default gives control to the model. Values can be: "Faces", "Random", "None". Requires setting up in the model. 30 | EYES_FOLLOW = "None" 31 | EYES_START_ID = 14 32 | 33 | #Decides of each of these modules should be running. Recommended to turn on RAG after a few hours of use, for better memory. Valid "ON" or "OFF". 34 | MODULE_MINECRAFT = OFF 35 | MODULE_ALARM = ON 36 | MODULE_VTUBE = ON 37 | MODULE_DISCORD = ON 38 | MODULE_RAG = ON 39 | MODULE_VISUAL = OFF 40 | 41 | # Memory management settings 42 | MEMORY_CLEANUP_FREQUENCY = 60 # Frequency in minutes for memory cleanup 43 | MEMORY_PERSISTENCE = ON # Set to ON to enable memory persistence 44 | 45 | TWITCH_TOKEN=tokengoes here 46 | TWITCH_REFRESH_TOKEN=tokengoes here 47 | TWITCH_CLIENT_ID=tokengoes here 48 | TWITCH_CHANNEL=Namegoes here 49 | 50 | 51 | # Advanced VTube Studio Integration Settings 52 | USE_ADVANCED_VTUBE=true # Enable/disable advanced VTube features 53 | MOTION_CAPTURE_ENABLED=true # Enable motion capture emotion detection 54 | VOICE_ANALYSIS_ENABLED=true # Enable voice emotion analysis from audio 55 | VTUBE_STUDIO_API_PORT=8001 # VTube Studio API port (default 8001) 56 | 57 | # Advanced Controller Performance Settings 58 | VTUBE_UPDATE_FPS=20 # Animation update frequency (20 FPS recommended) 59 | EMOTION_INTENSITY_DEFAULT=0.8 # Default emotion strength (0.0-2.0) 60 | BACKGROUND_BEHAVIORS_ENABLED=true # Enable automatic breathing/idle animations 61 | 62 | # Advanced Emotion Detection Settings 63 | AUTO_EMOTION_DETECTION=true # Automatically detect emotions from AI speech 64 | EMOTION_SENSITIVITY=0.7 # Sensitivity for emotion detection (0.0-1.0) 65 | SMOOTH_TRANSITIONS=true # Enable smooth easing between emotions 66 | 67 | # Background Behavior Configuration 68 | BREATHING_ENABLED=true # Enable breathing animation 69 | BREATHING_RATE=0.5 # Breathing animation rate 70 | EYE_MOVEMENT_ENABLED=true # Enable automatic eye movement 71 | EYE_MOVEMENT_FREQUENCY=2.0 # Eye movement frequency 72 | IDLE_SWAY_ENABLED=true # Enable idle body sway 73 | MICRO_EXPRESSIONS_ENABLED=true # Enable subtle micro-expressions 74 | 75 | # Fallback and Reliability Settings 76 | CONNECTION_RETRY_ATTEMPTS=5 # Number of connection retry attempts 77 | FALLBACK_TO_LEGACY=true # Fall back to legacy VTS API if advanced fails 78 | MOCK_MODE_ENABLED=false # Enable mock mode for testing without VTube Studio 79 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | .py filter=lfs diff=lfs merge=lfs -text 2 | .file filter=lfs diff=lfs merge=lfs -text 3 | -------------------------------------------------------------------------------- /API/__pycache__/Oogabooga_Api_Support.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/API/__pycache__/Oogabooga_Api_Support.cpython-311.pyc -------------------------------------------------------------------------------- /Configurables/AlarmMessage.json: -------------------------------------------------------------------------------- 1 | "Please awaken with an alarm message. Add a random topic or fun theme to start the day as well. Be creative!" -------------------------------------------------------------------------------- /Configurables/EmoteLib.json: -------------------------------------------------------------------------------- 1 | [ 2 | [ 3 | [ 4 | "Pog", 5 | "Surprise" 6 | ], 7 | 9 8 | ], 9 | [ 10 | [ 11 | "Cry ", 12 | "Cries", 13 | "Distress" 14 | ], 15 | 5 16 | ], 17 | [ 18 | [ 19 | "Angr", 20 | "Mad" 21 | ], 22 | 6 23 | ], 24 | [ 25 | [ 26 | "Wink" 27 | ], 28 | 7 29 | ], 30 | [ 31 | [ 32 | "Sleep", 33 | "Slumber" 34 | ], 35 | 8 36 | ], 37 | [ 38 | [ 39 | "Excite" 40 | ], 41 | 10 42 | ], 43 | [ 44 | [ 45 | "Frown", 46 | "Sad", 47 | "Upset" 48 | ], 49 | 4 50 | ], 51 | [ 52 | [ 53 | "Seduc", 54 | "Flirt", 55 | "Lovingly" 56 | ], 57 | 11 58 | ], 59 | [ 60 | [ 61 | "Blush", 62 | "Red" 63 | ], 64 | 2 65 | ], 66 | [ 67 | [ 68 | "Smile", 69 | "Grin" 70 | ], 71 | 3 72 | ] 73 | ] -------------------------------------------------------------------------------- /Configurables/Lorebook.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "0": "Z-Waif", 4 | "1": "Your main program to run your AI system. Has functions such as memory, UI, ect.", 5 | "2": 0 6 | }, 7 | { 8 | "0": "VTube Studio", 9 | "1": "Your program for your avatar. Runs your emotes, and your animations. You look great!", 10 | "2": 0 11 | }, 12 | { 13 | "0": "RVC", 14 | "1": "Runs your voicebox, converting the text you make to audio!", 15 | "2": 0 16 | }, 17 | { 18 | "0": "Whisper", 19 | "1": "The voice translation program you are equipped with. It's pretty accurate! Translates our human voice to text for you to understand. Has a tendency to repeat things over and over at the end.", 20 | "2": 0 21 | }, 22 | { 23 | "0": "Oobabooga", 24 | "1": "The loader for your language model. Has a variety of settings, such as temperature, which determines randomness.", 25 | "2": 0 26 | } 27 | ] -------------------------------------------------------------------------------- /Configurables/MinecraftNames.json: -------------------------------------------------------------------------------- 1 | [ 2 | "bot", 3 | "waifu" 4 | ] -------------------------------------------------------------------------------- /Configurables/MinecraftUsername.json: -------------------------------------------------------------------------------- 1 | "BotUsernameHere" -------------------------------------------------------------------------------- /Configurables/MinecraftUsernameFollow.json: -------------------------------------------------------------------------------- 1 | "YourUsernameHere" -------------------------------------------------------------------------------- /Configurables/SoftReset.json: -------------------------------------------------------------------------------- 1 | [ 2 | [ 3 | "[System D] Hello, this is a soft reset of the chat system.", 4 | "Oh! What does that mean? Could you give me the details? *smiles*" 5 | ], 6 | [ 7 | "[System D] Ah well, it is just a way of resetting the conversation back to normal.", 8 | "Alright, I understand. Based. Is there anything that I need to do?" 9 | ], 10 | [ 11 | "[System D] Not really, just continue the chat we were having before the soft reset, but in the style you have now!", 12 | "*blushes* Alright, I understand! I'll just be my normal self now, as per my description :D" 13 | ], 14 | [ 15 | "[System D] Yes! That sounds great! Try and keep your messages on the simpler side as well.", 16 | "Yep, I will use my normal language haha. \u2764\ufe0f" 17 | ], 18 | [ 19 | "[System D] Yep, all good, just keep your messages between two to three sentences. And be yourself!", 20 | "Understood! I will adapt as needed." 21 | ], 22 | [ 23 | "[System D] Alright then, we will now resume the conversation as normal. This is the end of the soft reset.", 24 | "*grins* Right on! Let's get back to the conversation." 25 | ] 26 | ] -------------------------------------------------------------------------------- /GitHub_Release_Notes.md: -------------------------------------------------------------------------------- 1 | # 🎭 Z-Waif v2.2.1 - Advanced VTube Studio Integration 2 | 3 | ## 🚀 Revolutionary VTuber Control System 4 | 5 | This major update introduces **absolute AI control** over VTuber models with professional-grade real-time animation capabilities. 6 | 7 | ## ✨ **What's New** 8 | 9 | ### 🎯 **Advanced VTube Controller** 10 | - **20 FPS real-time updates** for smooth, professional animations 11 | - **18+ emotion types** with automatic AI detection from speech 12 | - **5-tier fallback system** ensuring maximum reliability 13 | - **Zero-configuration setup** with automatic model discovery 14 | - **Background behaviors**: breathing, eye movement, idle animations 15 | 16 | ### 🤖 **AI-Powered Features** 17 | ```python 18 | # Simple one-line setup 19 | await initialize_vtube_studio() 20 | 21 | # Automatic emotion detection from AI speech 22 | await apply_ai_speech_emotion("I'm so happy to see you!", "AI") 23 | 24 | # Manual emotion control 25 | await set_vtube_emotion("excited", intensity=1.0) 26 | ``` 27 | 28 | ### 📹 **Enhanced Motion Capture** 29 | - Improved emotion detection from body language 30 | - Real-time processing with 8+ detectable emotions 31 | - Better camera handling and error recovery 32 | 33 | ### 🎨 **Advanced Animation** 34 | - **Smooth easing functions** (linear, ease-in-out, bounce) 35 | - **Intensity control** (0.0-2.0 range) for fine-tuned expressions 36 | - **Custom duration** settings for each emotion 37 | - **Background behaviors** for natural liveliness 38 | 39 | ## 🔧 **Technical Features** 40 | 41 | ### **New Files:** 42 | - `utils/advanced_vtube_controller.py` - Core 20 FPS animation system 43 | - Enhanced `utils/vtuber_integration.py` - Unified integration layer 44 | - Updated `utils/vtube_studio.py` - Backward-compatible enhancements 45 | 46 | ### **Configuration:** 47 | ```bash 48 | # Environment variables 49 | USE_ADVANCED_VTUBE=true # Enable advanced features 50 | MOTION_CAPTURE_ENABLED=true # Enable motion capture 51 | VOICE_ANALYSIS_ENABLED=true # Enable voice emotion analysis 52 | VTUBE_UPDATE_FPS=20 # Animation update frequency 53 | ``` 54 | 55 | ### **18 Emotion Types:** 56 | `neutral`, `happy`, `sad`, `angry`, `surprised`, `fearful`, `disgusted`, `contemptuous`, `embarrassed`, `excited`, `confused`, `frustrated`, `hopeful`, `proud`, `relieved`, `envious`, `guilty`, `ashamed`, `playful` 57 | 58 | ## 🛡️ **Reliability** 59 | 60 | ### **5-Tier Fallback System:** 61 | 1. Direct WebSocket connection to VTube Studio 62 | 2. Automatic reconnection with backoff 63 | 3. Alternative port attempts 64 | 4. Mock mode operation 65 | 5. Emergency logging and recovery 66 | 67 | ## 🔄 **Backward Compatibility** 68 | 69 | **100% Compatible** - All existing functionality continues working: 70 | - ✅ EmoteLib.json emotes still work 71 | - ✅ Legacy VTS API as fallback 72 | - ✅ Existing configurations preserved 73 | - ✅ Gradual migration support 74 | 75 | ## 📊 **Performance** 76 | 77 | - **20 FPS** smooth animation updates 78 | - **<100ms latency** from trigger to visual response 79 | - **<50MB** additional memory usage 80 | - **<5% CPU** impact on modern systems 81 | - **99.9% uptime** with fallback system 82 | 83 | ## 🚀 **Quick Start** 84 | 85 | ### **Basic Usage:** 86 | ```python 87 | # Initialize enhanced VTube integration 88 | await initialize_vtube_studio() 89 | 90 | # Configure background behaviors 91 | await configure_advanced_behaviors( 92 | breathing=True, 93 | eye_movement=True, 94 | idle_sway=True, 95 | micro_expressions=True 96 | ) 97 | ``` 98 | 99 | ### **Advanced Control:** 100 | ```python 101 | # Get controller instance 102 | controller = await get_controller() 103 | 104 | # Set complex emotions 105 | await controller.set_emotion( 106 | EmotionType.EXCITED, 107 | intensity=0.9, 108 | duration=3.0, 109 | easing="ease_out" 110 | ) 111 | ``` 112 | 113 | ## 🐛 **Fixes & Improvements** 114 | 115 | - **Motion Capture**: Better emotion detection, improved camera handling 116 | - **Voice Analysis**: Multi-parameter audio analysis (pitch, tempo, spectral) 117 | - **Connection Stability**: Robust WebSocket management, auto-reconnection 118 | - **Error Handling**: Comprehensive logging and graceful degradation 119 | - **Performance**: Optimized threading and memory usage 120 | 121 | ## 🎯 **Use Cases** 122 | 123 | - **Live Streaming**: Real-time emotional reactions 124 | - **Interactive Content**: VTuber responds to audience 125 | - **Gaming**: Dynamic reactions to game events 126 | - **AI Assistants**: Emotionally expressive virtual characters 127 | 128 | ## 📋 **Breaking Changes** 129 | 130 | **None!** This update maintains complete backward compatibility. 131 | 132 | --- 133 | 134 | **🌟 This represents the most significant advancement in Z-Waif's VTuber capabilities, delivering professional-grade animation control with zero configuration required.** 135 | 136 | ### **Installation** 137 | No additional setup required - the system automatically detects capabilities and enables advanced features when available. 138 | 139 | ### **Documentation** 140 | - See `VTube_Studio_Integration_Update_Summary.md` for complete technical details 141 | - Check individual files for API documentation 142 | - Review `.env` for configuration options -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 SugarcaneDefender 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LiveLog.json: -------------------------------------------------------------------------------- 1 | [["Hello, I am back!", "Oh, welcome back! *smiles*"]] 2 | -------------------------------------------------------------------------------- /LiveLogBlank.json: -------------------------------------------------------------------------------- 1 | [["Hello, I am back!", "Oh, welcome back! *smiles*"]] -------------------------------------------------------------------------------- /Logs/ChatLog-Converted-1.json: -------------------------------------------------------------------------------- 1 | [] -------------------------------------------------------------------------------- /Logs/Drop_Converts_Here/CONVERT_LOGS.txt: -------------------------------------------------------------------------------- 1 | Put logs to convert in here. 2 | Must be in TavernChats format. 3 | 4 | Remove the logs from here after, or they will be force recalculated every time! 5 | Also, logs only matter for the RAG memory, so if you have that module, don't bother. -------------------------------------------------------------------------------- /Logs/log.txt: -------------------------------------------------------------------------------- 1 | 2024-11-19 04:22:08,332 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 2 | 2024-11-19 04:22:09,871 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 3 | 2024-11-19 04:22:10,305 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 4 | 2024-11-19 04:25:17,150 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 5 | 2024-11-19 04:25:18,764 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 6 | 2024-11-19 04:25:19,196 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 7 | 2024-11-19 04:28:26,305 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 8 | 2024-11-19 04:28:28,299 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 9 | 2024-11-19 04:28:28,732 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 10 | 2024-11-19 04:32:41,374 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 11 | 2024-11-19 04:32:43,379 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 12 | 2024-11-19 04:32:43,812 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 13 | 2024-11-19 04:34:20,038 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 14 | 2024-11-19 04:34:21,947 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 15 | 2024-11-19 04:34:22,389 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 16 | 2024-11-19 04:36:33,773 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 17 | 2024-11-19 04:36:35,869 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 18 | 2024-11-19 04:36:36,299 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 19 | 2024-11-19 04:37:28,791 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 20 | 2024-11-19 04:37:30,805 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 21 | 2024-11-19 04:37:31,243 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 22 | 2024-11-19 04:40:15,325 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 23 | 2024-11-19 04:40:17,236 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 24 | 2024-11-19 04:40:17,680 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 25 | 2024-11-19 04:42:50,980 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 26 | 2024-11-19 04:42:53,024 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 27 | 2024-11-19 04:42:53,475 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 28 | 2024-11-19 04:43:53,831 - INFO - HTTP Request: GET https://api.gradio.app/gradio-messaging/en "HTTP/1.1 200 OK" 29 | 2024-11-19 04:43:55,492 - INFO - HTTP Request: GET https://checkip.amazonaws.com/ "HTTP/1.1 200 " 30 | 2024-11-19 04:43:55,922 - INFO - HTTP Request: GET https://api.gradio.app/pkg-version "HTTP/1.1 200 OK" 31 | -------------------------------------------------------------------------------- /OOBA_Presets/Z-Waif-ADEF-Blazing.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.33 2 | top_p: 0.79 3 | min_p: 0.04 4 | top_k: 79 5 | repetition_penalty: 1.09 6 | presence_penalty: 0.1 7 | repetition_penalty_range: 0 8 | -------------------------------------------------------------------------------- /OOBA_Presets/Z-Waif-ADEF-Standard.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.16 2 | top_p: 0.7 3 | min_p: 0.05 4 | top_k: 72 5 | repetition_penalty: 1.09 6 | presence_penalty: 0.1 7 | repetition_penalty_range: 0 8 | -------------------------------------------------------------------------------- /OOBA_Presets/Z-Waif-ADEF-Tempered.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.26 2 | top_p: 0.72 3 | min_p: 0.05 4 | top_k: 74 5 | repetition_penalty: 1.09 6 | presence_penalty: 0.1 7 | repetition_penalty_range: 0 8 | -------------------------------------------------------------------------------- /OOBA_Presets/Z-Waif-Mythalion.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.04 2 | temperature_last: true 3 | top_p: 0.57 4 | min_p: 0.07 5 | top_k: 70 6 | repetition_penalty: 1.07 7 | presence_penalty: 0.1 8 | repetition_penalty_range: 0 9 | -------------------------------------------------------------------------------- /OOBA_Presets/Z-Waif-Noromaid.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.22 2 | temperature_last: true 3 | top_p: 0.74 4 | min_p: 0.04 5 | top_k: 94 6 | repetition_penalty: 1.19 7 | presence_penalty: 0.2 8 | repetition_penalty_range: 0 9 | -------------------------------------------------------------------------------- /RAG_Database/RAG_WILL_GENERATE_HERE.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/RAG_Database/RAG_WILL_GENERATE_HERE.txt -------------------------------------------------------------------------------- /__pycache__/main.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/__pycache__/main.cpython-311.pyc -------------------------------------------------------------------------------- /chat/__pycache__/learner.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/chat/__pycache__/learner.cpython-311.pyc -------------------------------------------------------------------------------- /chat_learner.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/chat_learner.db -------------------------------------------------------------------------------- /handlers/minecraft_handler.py: -------------------------------------------------------------------------------- 1 | class MinecraftHandler: 2 | async def process(self, message_data): 3 | """Process incoming Minecraft messages""" 4 | # TODO: Implement Minecraft-specific message handling 5 | return { 6 | "status": "success", 7 | "platform": "minecraft", 8 | "message": message_data.get("message", "") 9 | } -------------------------------------------------------------------------------- /history_namehere_20241104T103201.json: -------------------------------------------------------------------------------- 1 | { 2 | "internal": [ 3 | [ 4 | "<|BEGIN-VISIBLE-CHAT|>", 5 | "Hello! I've missed you" 6 | ], 7 | [ 8 | "hey sweetie", 9 | "*smiles* Oh, I missed you, too! What have you been up to? *leans in closer*" 10 | ], 11 | 12 | ], 13 | "visible": [ 14 | [ 15 | "", 16 | "Hello! I've missed you" 17 | ], 18 | 19 | ] 20 | 21 | } 22 | -------------------------------------------------------------------------------- /log.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/log.txt -------------------------------------------------------------------------------- /memory/__pycache__/manager.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/memory/__pycache__/manager.cpython-311.pyc -------------------------------------------------------------------------------- /memory/manager.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import json 3 | import os 4 | 5 | class MemoryManager: 6 | def __init__(self, memory_file="user_memories.json"): 7 | self.memory_file = memory_file 8 | self.memories = self._load_memories() 9 | 10 | def _load_memories(self): 11 | if os.path.exists(self.memory_file): 12 | with open(self.memory_file, 'r') as f: 13 | return json.load(f) 14 | return {} 15 | 16 | def add_memory(self, user_id, context, platform, emotion=None, interaction_type=None): 17 | if user_id not in self.memories: 18 | self.memories[user_id] = { 19 | 'first_interaction': datetime.now().isoformat(), 20 | 'interactions': [], 21 | 'preferences': {}, 22 | 'emotional_history': [], 23 | 'topics_of_interest': set() 24 | } 25 | 26 | interaction = { 27 | 'timestamp': datetime.now().isoformat(), 28 | 'context': context, 29 | 'platform': platform, 30 | 'emotion': emotion, 31 | 'interaction_type': interaction_type 32 | } 33 | 34 | self.memories[user_id]['interactions'].append(interaction) 35 | self._save_memories() 36 | 37 | def get_user_context(self, user_id): 38 | if user_id not in self.memories: 39 | return None 40 | return self.memories[user_id] 41 | 42 | def _save_memories(self): 43 | with open(self.memory_file, 'w') as f: 44 | json.dump(self.memories, f, indent=4) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | gradio==4.44.1 3 | keyboard~=0.13.5 4 | mouse~=0.7.1 5 | PyGetWindow~=0.0.9 6 | PythMC~=1.2.2 7 | sounddevice~=0.4.6 8 | colorama>=0.4.6 9 | humanize>=4.7.0 10 | emoji>=2.9.0 11 | discord.py[voice]>=2.0.0 12 | requests~=2.31.0 13 | python-dotenv~=1.0.0 14 | PyAudio~=0.2.14 15 | pydub~=0.25.1 16 | pyvts~=0.3.2 17 | numpy~=1.24.4 18 | fastapi==0.112.2 19 | twitchio>=2.8.2 20 | websockets>=11.0.3 21 | pathlib 22 | sentence-transformers>=2.2.0 23 | --find-links https://download.pytorch.org/whl/cu118 24 | torch>=2.0.0 25 | torchvision 26 | torchaudio 27 | scipy 28 | gtts>=2.3.1 29 | PyNaCl>=1.5.0 30 | yt-dlp>=2023.12.30 31 | librosa # For audio analysis 32 | transformers>=4.36.0 33 | Flask==2.2.3 # For creating the API 34 | Flask-Cors # For handling CORS 35 | mediapipe # For face and hand tracking 36 | APScheduler # For scheduling tasks 37 | Pillow>=10.0.0 38 | textblob # For context-aware emotional responses 39 | psutil>=5.9.0 # For system resource monitoring 40 | 41 | # For streaming support 42 | sseclient-py==1.7.2 43 | aiohttp>=3.8.1 44 | asyncio>=3.4.3 45 | requests>=2.31.0 46 | 47 | # For machine learning and acceleration 48 | accelerate>=0.25.0 49 | bitsandbytes>=0.41.1 50 | 51 | # Optional but recommended for better audio processing 52 | # Note: FFmpeg should be installed separately through your system's package manager 53 | # Windows: winget install Gyan.FFmpeg 54 | # Linux: apt-get install ffmpeg 55 | # macOS: brew install ffmpeg 56 | 57 | # Optional development tools 58 | pytest # For testing 59 | black # For code formatting 60 | mypy # For type checking 61 | jupyter # For Jupyter notebooks 62 | 63 | # Added for Performance Metrics Tracking and Dashboard 64 | dash>=2.0.0 65 | plotly>=5.0.0 66 | 67 | -------------------------------------------------------------------------------- /startup-Install.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal 3 | 4 | REM Get the current directory of the batch file 5 | set "SCRIPT_DIR=%~dp0" 6 | 7 | REM Set the log file path 8 | set "LOG_FILE=%SCRIPT_DIR%\log.txt" 9 | 10 | REM Change to the script directory 11 | cd /d "%SCRIPT_DIR%" 12 | 13 | REM Check if the virtual environment is already activated 14 | if not defined VIRTUAL_ENV ( 15 | REM Create and activate the main virtual environment 16 | python -m venv venv 17 | call venv\Scripts\activate 18 | ) 19 | 20 | REM Install core dependencies first 21 | python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 2>> "%LOG_FILE%" 22 | python -m pip install --upgrade pywin32 23 | python -m pip install werkzeug==2.2.3 2>> "%LOG_FILE%" 24 | python -m pip install flask==2.2.5 2>> "%LOG_FILE%" 25 | 26 | REM Install whisper after core dependencies 27 | python -m pip install git+https://github.com/openai/whisper.git 2>> "%LOG_FILE%" 28 | 29 | REM Install the remaining dependencies 30 | python -m pip install -r requirements.txt 2>> "%LOG_FILE%" 31 | 32 | pause 33 | 34 | REM Execute the Python script (replace "main.py" with the actual file name) 35 | python main.py 2>> "%LOG_FILE%" 36 | 37 | REM Deactivate the virtual environment 38 | deactivate 39 | 40 | REM Display message and prompt user to exit 41 | echo. 42 | echo Batch file execution completed. Press any key to exit. 43 | pause >nul 44 | 45 | endlocal 46 | -------------------------------------------------------------------------------- /startup.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal 3 | 4 | REM Get the current directory of the batch file 5 | set "SCRIPT_DIR=%~dp0" 6 | 7 | REM Activate the virtual environment if applicable 8 | call "%SCRIPT_DIR%venv\Scripts\activate" 9 | 10 | REM Run the main application 11 | python "%SCRIPT_DIR%main.py" 12 | 13 | echo. 14 | echo Batch file execution completed. Press any key to exit. 15 | pause >nul 16 | 17 | endlocal 18 | -------------------------------------------------------------------------------- /token.txt: -------------------------------------------------------------------------------- 1 | f7f6f14b2f71857aa06a432649786269c3bd6b33af6ad78f5433e3258c675b97 -------------------------------------------------------------------------------- /utils/__pycache__/alarm.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/alarm.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/api.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/api.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/audio.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/audio.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/based_rag.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/based_rag.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/camera.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/camera.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/cane_lib.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/cane_lib.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/character_relationships.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/character_relationships.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/chat_handler.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/chat_handler.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/chat_history.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/chat_history.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/chat_learner.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/chat_learner.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/config.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/config.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/contextual_memory.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/contextual_memory.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/discord_voice_handler.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/discord_voice_handler.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/dynamic_expression_mapping.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/dynamic_expression_mapping.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/emotion_recognizer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/emotion_recognizer.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/error_boundary.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/error_boundary.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/hotkeys.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/hotkeys.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/log_conversion.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/log_conversion.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/logging.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/logging.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/lorebook.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/lorebook.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/memory_handler.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/memory_handler.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/message_processing.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/message_processing.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/minecraft.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/minecraft.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/retrospect.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/retrospect.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/settings.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/settings.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/transcriber_translate.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/transcriber_translate.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/user_context.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/user_context.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/volume_listener.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/volume_listener.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/vtube_studio.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/vtube_studio.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/web_ui.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/web_ui.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/z_waif_discord.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/z_waif_discord.cpython-311.pyc -------------------------------------------------------------------------------- /utils/__pycache__/z_waif_twitch.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/utils/__pycache__/z_waif_twitch.cpython-311.pyc -------------------------------------------------------------------------------- /utils/ai_handler.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | import websockets 4 | from utils.performance_metrics import track_performance 5 | import logging 6 | from utils.logging import log_info, log_error 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 10 | 11 | class AIHandler: 12 | def __init__(self, host="localhost", port=5005): 13 | log_info("Initializing AIHandler.") 14 | self.uri = f"ws://{host}:{port}/api/v1/stream" 15 | 16 | @track_performance 17 | async def generate_response(self, prompt, max_tokens=150): 18 | log_info(f"Generating response for prompt: {prompt}") 19 | request = { 20 | "prompt": prompt, 21 | "max_new_tokens": max_tokens, 22 | "preset": "None", 23 | "do_sample": True, 24 | "temperature": 0.8, 25 | "top_p": 0.9, 26 | "typical_p": 1, 27 | "repetition_penalty": 1.18, 28 | "encoder_repetition_penalty": 1.0, 29 | "top_k": 40, 30 | "min_length": 2, 31 | "no_repeat_ngram_size": 3, 32 | "num_beams": 1, 33 | "penalty_alpha": 0, 34 | "length_penalty": 1, 35 | "early_stopping": True, 36 | "seed": -1, 37 | "add_bos_token": True, 38 | "stopping_strings": ["\n\n", "User:", "Human:"], 39 | "stream": True 40 | } 41 | 42 | async with websockets.connect(self.uri) as websocket: 43 | await websocket.send(json.dumps(request)) 44 | response = "" 45 | async for message in websocket: 46 | data = json.loads(message) 47 | if data["event"] == "text_stream": 48 | response += data["text"] 49 | elif data["event"] == "stream_end": 50 | break 51 | return response.strip() -------------------------------------------------------------------------------- /utils/alarm.py: -------------------------------------------------------------------------------- 1 | import time 2 | import datetime 3 | import utils.settings 4 | import os 5 | import json 6 | import logging 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 10 | 11 | ALARM_TRIGGERED = False 12 | ALARM_READY = False 13 | ALARM_MESSAGE = "[System Message] There was an issue with the alarm system... whoops!" 14 | 15 | random_memories = True 16 | 17 | # Load the configurable alarm message (talomere, comes after the date) 18 | with open("Configurables/AlarmMessage.json", 'r') as openfile: 19 | alarm_talomere = json.load(openfile) 20 | 21 | 22 | def alarm_loop(): 23 | logging.info("Starting alarm loop.") 24 | global ALARM_TRIGGERED, ALARM_READY, ALARM_MESSAGE 25 | 26 | while True: 27 | # Loop every 10 seconds 28 | time.sleep(10) 29 | 30 | # Get the time string 31 | current_time = datetime.datetime.now() 32 | cur_time_string = current_time.strftime("%H:%M") 33 | 34 | 35 | # Reset the alarm just after midnight every night 36 | if cur_time_string == "00:01": 37 | ALARM_TRIGGERED = False 38 | 39 | 40 | # Run our alarm here if we are at the specified time 41 | if cur_time_string == utils.settings.alarm_time and ALARM_TRIGGERED == False: 42 | 43 | # Flag 44 | ALARM_TRIGGERED = True 45 | 46 | # Get name (can't declair at the start, donno why, don't care!) 47 | char_name = os.environ.get("CHAR_NAME") 48 | 49 | # Make our message 50 | cur_date_string = current_time.strftime("%B/%d/%Y") 51 | cur_day_of_week_string = current_time.strftime("%A") 52 | 53 | alarm_message = "[System A] Good morning, " + char_name + "! It's " 54 | alarm_message += cur_day_of_week_string + ", " + cur_date_string 55 | alarm_message += ", at " + cur_time_string + ". " 56 | alarm_message += alarm_talomere 57 | 58 | ALARM_MESSAGE = alarm_message 59 | 60 | # Flag us, we can be picked up by main 61 | ALARM_READY = True 62 | 63 | 64 | def alarm_check(): 65 | return ALARM_READY 66 | 67 | def clear_alarm(): 68 | global ALARM_READY 69 | ALARM_READY = False 70 | 71 | def get_alarm_message(): 72 | return ALARM_MESSAGE 73 | -------------------------------------------------------------------------------- /utils/analytics.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import logging 3 | 4 | # Configure logging 5 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 6 | 7 | class InteractionAnalytics: 8 | def __init__(self): 9 | logging.info("Initializing InteractionAnalytics.") 10 | self.db_path = Path("data/analytics.json") 11 | 12 | async def track_interaction(self, message_data): 13 | """Track and analyze user interactions""" 14 | analytics = { 15 | "response_time": self.calculate_response_time(), 16 | "interaction_quality": self.assess_interaction_quality(), 17 | "user_engagement": self.measure_user_engagement(), 18 | "topic_tracking": self.track_conversation_topics() 19 | } 20 | return analytics -------------------------------------------------------------------------------- /utils/audio.py: -------------------------------------------------------------------------------- 1 | import pyaudio 2 | import wave 3 | import os 4 | import audioop 5 | from pydub import AudioSegment 6 | from typing import Optional, Callable 7 | 8 | CHUNK = 1024 9 | FORMAT = pyaudio.paInt16 10 | CHANNELS = 1 11 | RATE = 44100 12 | 13 | current_directory = os.path.dirname(os.path.abspath(__file__)) 14 | FILENAME = "voice.wav" 15 | SAVE_PATH = os.path.join(current_directory, "resource", "voice_in", FILENAME) 16 | 17 | def get_speak_input() -> bool: 18 | """Get speak input state without direct import""" 19 | from utils.hotkeys import get_speak_input 20 | return get_speak_input() 21 | 22 | def record(): 23 | p = pyaudio.PyAudio() 24 | stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) 25 | frames = [] 26 | 27 | while get_speak_input(): 28 | data = stream.read(CHUNK) 29 | frames.append(data) 30 | 31 | stream.stop_stream() 32 | stream.close() 33 | p.terminate() 34 | 35 | wf = wave.open(SAVE_PATH, 'wb') 36 | wf.setnchannels(CHANNELS) 37 | wf.setsampwidth(p.get_sample_size(FORMAT)) 38 | wf.setframerate(RATE) 39 | wf.writeframes(b''.join(frames)) 40 | wf.close() 41 | 42 | return SAVE_PATH 43 | -------------------------------------------------------------------------------- /utils/camera.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy 3 | import cv2 4 | import tkinter 5 | from tkinter import filedialog 6 | import os 7 | import logging 8 | 9 | import utils.settings 10 | import utils.vtube_studio 11 | import random 12 | 13 | # Configure logging 14 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 15 | 16 | # initialize the camera 17 | # If you have multiple camera connected with 18 | # current device, assign a value in cam_port 19 | # variable according to that 20 | 21 | vision_enabled_string = os.environ.get("MODULE_VISUAL") 22 | if vision_enabled_string == "ON": 23 | utils.settings.vision_enabled = True 24 | else: 25 | utils.settings.vision_enabled = False 26 | 27 | if utils.settings.vision_enabled: 28 | cam_port = 0 29 | cam = cv2.VideoCapture(cam_port) 30 | 31 | root = tkinter.Tk() 32 | root.withdraw() #use to hide tkinter window 33 | 34 | 35 | def capture_pic(): 36 | logging.info("Capturing image.") 37 | 38 | # reading the input using the camera 39 | result, image = cam.read() 40 | 41 | # If image will detected without any error, 42 | # show result 43 | if result: 44 | 45 | image = cv2.resize(image,(320, 240)) 46 | # saving image in local storage 47 | cv2.imwrite("LiveImage.png", image) 48 | 49 | # Show it to us, if we are previewing! 50 | if utils.settings.cam_image_preview: 51 | cv2.imshow("Z-Waif Image Preview", image) 52 | cv2.waitKey(0) 53 | cv2.destroyAllWindows() 54 | 55 | 56 | # If captured image is corrupted, moving to else part 57 | else: 58 | print("No camera to take pictures from!") 59 | 60 | 61 | def use_image_feed(): 62 | logging.info("Using image feed.") 63 | 64 | # Read the feed, Sneed 65 | image = cv2.imread(browse_feed_image()) 66 | 67 | # Resize it accoring to max width/height 68 | maxwidth = 360 69 | maxheight = 360 70 | 71 | f1 = maxwidth / image.shape[1] 72 | f2 = maxheight / image.shape[0] 73 | f = min(f1, f2) # resizing factor 74 | dim = (int(image.shape[1] * f), int(image.shape[0] * f)) 75 | image = cv2.resize(image, dim) 76 | 77 | # saving image in local storage 78 | cv2.imwrite("LiveImage.png", image) 79 | 80 | 81 | 82 | def browse_feed_image(): 83 | currdir = os.getcwd() 84 | browsed_image_path = filedialog.askopenfilename(parent=root, initialdir=currdir, title='Please select the image', filetypes=[("JPG", '*.jpg'), ("PNG", '*.png'), ("JPEG", '*.jpeg')]) 85 | return browsed_image_path 86 | 87 | 88 | 89 | def loop_random_look(): 90 | 91 | # give us a little bit of boot time... 92 | time.sleep(20) 93 | 94 | while True: 95 | time.sleep(4 + random.uniform(0.0, 6.0)) 96 | 97 | rand_look_value = random.uniform(-0.47, 0.47) + random.uniform(-0.47, 0.47) 98 | 99 | utils.vtube_studio.change_look_level(rand_look_value) 100 | 101 | def loop_follow_look(): 102 | 103 | # give us a little bit of boot time... 104 | time.sleep(10) 105 | 106 | while True: 107 | time.sleep(2) 108 | 109 | capture_follow_pic() 110 | 111 | def capture_follow_pic(): 112 | 113 | # reading the input using the camera 114 | result, img = cam.read() 115 | 116 | # If image will detected without any error, 117 | # show result 118 | if result: 119 | 120 | img = cv2.resize(img, (800, 450)) 121 | 122 | # Load the cascade 123 | face_cascade = cv2.CascadeClassifier('utils/resource/haarcascade_frontalface_default.xml') 124 | 125 | # Convert into grayscale 126 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 127 | 128 | # Detect faces 129 | faces = face_cascade.detectMultiScale(gray, 1.1, 7) 130 | 131 | # Follow the faces accoring to the X-cooridnate 132 | # If there are multiple, go at random 133 | if len(faces) == 0: 134 | return 135 | 136 | face_spot = -1 137 | for (x, y, w, h) in faces: 138 | if face_spot == -1: 139 | face_spot = x 140 | elif random.uniform(0.0, 1.0) > 0.3: 141 | face_spot = x + (w/2) 142 | 143 | face_span = (face_spot - 290) / -300 144 | utils.vtube_studio.change_look_level(face_span) 145 | 146 | 147 | 148 | # If captured image is corrupted, moving to else part 149 | else: 150 | print("No camera to take pictures from!") 151 | 152 | 153 | -------------------------------------------------------------------------------- /utils/cane_lib.py: -------------------------------------------------------------------------------- 1 | import re 2 | import utils.logging 3 | import logging 4 | 5 | # Configure logging 6 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 7 | 8 | # Quick lil function to check if any keywords are in a piece of text 9 | def keyword_check(phrase, keywords): 10 | logging.info(f"Checking keywords in phrase: {phrase}.") 11 | for k in keywords: 12 | if str.lower(k) in str.lower(phrase): 13 | return True 14 | 15 | return False 16 | 17 | # Checks for repetitions at the end of strings, and removes them (mainly for Whisper) 18 | def remove_repeats(input_string): 19 | 20 | list_split = re.split('[.!?]', input_string) 21 | 22 | repeat_count = 0 23 | repeat_detected = False 24 | step = len(list_split) - 2 25 | while step > 1: 26 | if list_split[step] == list_split[step - 1]: 27 | repeat_count += 1 28 | if repeat_count > 1: 29 | repeat_detected = True 30 | step -= 1 31 | else: 32 | step = 0 33 | 34 | if not repeat_detected: 35 | return input_string 36 | if repeat_detected: 37 | new_string = input_string.replace(list_split[-2] + ".", "") 38 | new_string = new_string.replace(list_split[-2] + "!", "") 39 | new_string = new_string.replace(list_split[-2] + "?", "") 40 | utils.logging.update_debug_log("Removed repeats! Original message was: " + input_string) 41 | return new_string 42 | -------------------------------------------------------------------------------- /utils/character_relationships.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from datetime import datetime 4 | import logging 5 | from utils.logging import log_info, log_error 6 | 7 | # Configure logging 8 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 9 | 10 | class CharacterRelationshipManager: 11 | def __init__(self, relationship_file="data/relationships.json"): 12 | log_info("Initializing CharacterRelationshipManager.") 13 | self.relationship_file = relationship_file 14 | self.relationships = self._load_relationships() 15 | 16 | def _load_relationships(self): 17 | """Load relationships from a JSON file.""" 18 | if os.path.exists(self.relationship_file): 19 | with open(self.relationship_file, 'r') as f: 20 | return json.load(f) 21 | return {} 22 | 23 | def save_relationships(self): 24 | """Save relationships to a JSON file.""" 25 | with open(self.relationship_file, 'w') as f: 26 | json.dump(self.relationships, f, indent=2) 27 | 28 | def update_relationship(self, character_a, character_b, interaction_type): 29 | """Update the relationship between two characters based on interaction.""" 30 | log_info(f"Updating relationship between {character_a} and {character_b}.") 31 | if character_a not in self.relationships: 32 | self.relationships[character_a] = {} 33 | if character_b not in self.relationships[character_a]: 34 | self.relationships[character_a][character_b] = { 35 | "relationship_score": 0, 36 | "last_interaction": None 37 | } 38 | 39 | # Update relationship score based on interaction type 40 | if interaction_type == "positive": 41 | self.relationships[character_a][character_b]["relationship_score"] += 1 42 | elif interaction_type == "negative": 43 | self.relationships[character_a][character_b]["relationship_score"] -= 1 44 | 45 | # Update last interaction timestamp 46 | self.relationships[character_a][character_b]["last_interaction"] = datetime.now().isoformat() 47 | 48 | # Save changes 49 | self.save_relationships() 50 | 51 | def get_relationship(self, character_a, character_b): 52 | """Get the relationship data between two characters.""" 53 | if character_a in self.relationships and character_b in self.relationships[character_a]: 54 | return self.relationships[character_a][character_b] 55 | return None 56 | 57 | def prune_old_relationships(self): 58 | """Remove relationships that have not been interacted with in over a year.""" 59 | one_year_ago = datetime.now().isoformat() 60 | for character_a in list(self.relationships.keys()): 61 | for character_b in list(self.relationships[character_a].keys()): 62 | last_interaction = self.relationships[character_a][character_b]["last_interaction"] 63 | if datetime.fromisoformat(last_interaction) < one_year_ago: 64 | del self.relationships[character_a][character_b] 65 | if not self.relationships[character_a]: # Remove character if no relationships left 66 | del self.relationships[character_a] 67 | self.save_relationships() 68 | 69 | def print_relationships(self): 70 | """Print all relationships for debugging purposes.""" 71 | for character_a, relationships in self.relationships.items(): 72 | for character_b, data in relationships.items(): 73 | print(f"{character_a} -> {character_b}: {data['relationship_score']} (Last Interaction: {data['last_interaction']})") 74 | -------------------------------------------------------------------------------- /utils/chat_handler.py: -------------------------------------------------------------------------------- 1 | import gc # for garbage collection 2 | import torch # add this import 3 | from utils.voice_tone_mapping import analyze_audio_tone, record_audio 4 | from utils.dynamic_expression_mapping import DynamicExpressionMapper # Import the new mapper 5 | from utils.character_relationships import CharacterRelationshipManager # Import the relationship manager 6 | from utils.performance_metrics import track_performance 7 | from utils.memory_manager import MemoryManager 8 | from API.Oogabooga_Api_Support import generate_contextual_response 9 | from utils.personalized_response import PersonalizedResponseGenerator 10 | from textblob import TextBlob 11 | import logging 12 | from utils.logging import log_info, log_error 13 | 14 | # Configure logging 15 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 16 | 17 | class ChatHandler: 18 | def __init__(self, memory_manager): 19 | log_info("Initializing ChatHandler.") 20 | self.memory_manager = memory_manager 21 | 22 | @track_performance 23 | def initialize_model(self): 24 | from utils.chat_learner import ChatLearner # Moved import here 25 | # Initialize your model here 26 | self.chat_learner = ChatLearner() 27 | self.memory_handler = self.memory_manager 28 | self.response_generator = PersonalizedResponseGenerator(self.chat_learner, self.memory_handler) 29 | 30 | async def handle_message(self, user_id, message): 31 | log_info(f"Handling message from user: {user_id}.") 32 | # Check for YouTube URLs 33 | if "youtube.com" in message or "youtu.be" in message: 34 | video_context = self.video_processor.process_youtube_video(message) 35 | else: 36 | video_context = None 37 | 38 | # Get user context 39 | user_context = self.memory_manager.get_recent_interactions(user_id) 40 | 41 | # Generate response 42 | response = generate_contextual_response( 43 | message, 44 | user_context, 45 | video_context 46 | ) 47 | 48 | # Store interaction 49 | self.memory_manager.store_interaction( 50 | user_id, 51 | message, 52 | response, 53 | video_context 54 | ) 55 | 56 | return response 57 | 58 | def process_audio_input(self): 59 | """Process audio input to determine tone and generate response.""" 60 | audio_data = record_audio() 61 | tone = analyze_audio_tone(audio_data) 62 | # Use the tone to adjust the response 63 | return tone 64 | 65 | def generate_response_with_expression(self, user_id, message): 66 | """Generate a response with an expression based on user emotion.""" 67 | emotion = self.analyze_user_emotion(message) # Assume this method exists 68 | expression = self.expression_mapper.get_expression(emotion) 69 | response = self.chat_response(message) 70 | return f"{response} {expression}" # Append the expression to the response 71 | 72 | def update_relationship(self, character_a, character_b, interaction_type): 73 | """Update the relationship based on user interactions.""" 74 | self.relationship_manager.update_relationship(character_a, character_b, interaction_type) 75 | 76 | def handle_user_message(self, user_id, message): 77 | log_info(f"User {user_id} sent a message: {message}") 78 | # Process the message... 79 | 80 | def cleanup_resources(): 81 | # Clear CUDA cache if GPU is available 82 | if torch.cuda.is_available(): 83 | torch.cuda.empty_cache() 84 | 85 | # Force garbage collection 86 | gc.collect() -------------------------------------------------------------------------------- /utils/chat_history.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Configure logging 4 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 5 | 6 | async def get_chat_history(username: str, platform: str, limit: int = 5) -> str: 7 | """ 8 | Get recent chat history for a user. 9 | 10 | Args: 11 | username (str): The username of the user. 12 | platform (str): The platform from which the chat history is retrieved. 13 | limit (int): The number of recent messages to retrieve. 14 | 15 | Returns: 16 | str: A formatted string of recent chat history. 17 | """ 18 | logging.info(f"Getting chat history for user: {username} on platform: {platform}.") 19 | # TODO: Implement chat history storage 20 | # For now, we will simulate chat history retrieval 21 | # In a real implementation, this would query a database or memory storage 22 | chat_history = [ 23 | f"{username}: Hello!", 24 | f"Bot: Hi there! How can I help you today?", 25 | f"{username}: What's the weather like?", 26 | f"Bot: It's sunny and warm!", 27 | f"{username}: Thanks!", 28 | ] 29 | 30 | # Limit the number of messages returned 31 | return "\n".join(chat_history[-limit:]) # Return the last 'limit' messages 32 | 33 | async def update_chat_history(username: str, platform: str, user_message: str, ai_response: str): 34 | """ 35 | Store new chat interactions. 36 | 37 | Args: 38 | username (str): The username of the user. 39 | platform (str): The platform from which the chat interaction is recorded. 40 | user_message (str): The message sent by the user. 41 | ai_response (str): The response generated by the AI. 42 | """ 43 | logging.info(f"Updating chat history for user: {username}.") 44 | # TODO: Implement history updating 45 | # In a real implementation, this would save the interaction to a database or memory storage 46 | print(f"Chat history updated for {username} on {platform}:") 47 | print(f"User: {user_message}") 48 | print(f"AI: {ai_response}") -------------------------------------------------------------------------------- /utils/config.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional 2 | from dataclasses import dataclass, asdict, field 3 | from enum import Enum 4 | import json 5 | 6 | @dataclass 7 | class HotkeyConfig: 8 | send_message: str = "enter" 9 | interrupt: str = "esc" 10 | reroll: str = "ctrl+r" 11 | toggle_stream: str = "ctrl+s" 12 | 13 | @dataclass 14 | class UIConfig: 15 | primary_color: str = "blue" 16 | stopping_strings: List[str] = field(default_factory=lambda: ["User:", "Human:", "Assistant:"]) 17 | enable_send_button: bool = True 18 | enable_streaming: bool = True 19 | rp_suppression: bool = True 20 | newline_cut: bool = True 21 | rp_suppression_threshold: int = 3 # Lowered threshold as per changelog 22 | 23 | @dataclass 24 | class Config: 25 | hotkeys: HotkeyConfig = field(default_factory=HotkeyConfig) 26 | ui: UIConfig = field(default_factory=UIConfig) 27 | stopping_strings: Dict[str, List[str]] = field(default_factory=lambda: { 28 | "system": ["[System:", "[Assistant:", "[Human:"], 29 | "chat": ["User:", "Human:", "Assistant:"], 30 | "rp": ["*", "[", "]"] 31 | }) 32 | max_tokens: int = 2000 33 | min_tokens: int = 10 # Added for message length warnings 34 | temperature: float = 0.7 35 | model_name: str = "default_model" 36 | enable_streaming: bool = True 37 | 38 | def load(self): 39 | try: 40 | with open('config.json', 'r') as f: 41 | config_data = json.load(f) 42 | self.hotkeys = HotkeyConfig(**config_data.get('hotkeys', {})) 43 | self.ui = UIConfig(**config_data.get('ui', {})) 44 | except FileNotFoundError: 45 | self.save() 46 | 47 | def save(self): 48 | with open('config.json', 'w') as f: 49 | json.dump({ 50 | 'hotkeys': asdict(self.hotkeys), 51 | 'ui': asdict(self.ui) 52 | }, f, indent=2) -------------------------------------------------------------------------------- /utils/config_manager.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any 2 | import json 3 | import logging 4 | 5 | class ConfigManager: 6 | def __init__(self): 7 | self.default_config = { 8 | "stopping_strings": ["[END]", "[STOP]", "[DONE]"], 9 | "primary_color": "blue", 10 | "hotkeys": { 11 | "send_message": "enter", 12 | "interrupt": "esc", 13 | "reroll": "ctrl+r", 14 | "clear": "ctrl+l" 15 | }, 16 | "ui_theme": { 17 | "border_color": "#2e2e2e", 18 | "button_color": "#2e2e2e", 19 | "checkbox_color": "#2e2e2e" 20 | } 21 | } 22 | self.config = self.load_config() 23 | 24 | def load_config(self) -> Dict[str, Any]: 25 | try: 26 | with open('config.json', 'r') as f: 27 | user_config = json.load(f) 28 | return {**self.default_config, **user_config} 29 | except FileNotFoundError: 30 | logging.info("No config file found, using defaults") 31 | return self.default_config.copy() 32 | 33 | def save_config(self) -> None: 34 | with open('config.json', 'w') as f: 35 | json.dump(self.config, f, indent=4) 36 | 37 | def get_setting(self, key: str) -> Any: 38 | return self.config.get(key, self.default_config.get(key)) -------------------------------------------------------------------------------- /utils/connection_pool.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | import asyncio 3 | import logging 4 | from contextlib import asynccontextmanager 5 | 6 | class ConnectionPool: 7 | def __init__(self, max_size: int = 10): 8 | self.pool: Dict[str, list] = {} 9 | self.max_size = max_size 10 | self.locks: Dict[str, asyncio.Lock] = {} 11 | 12 | async def get_connection(self, pool_name: str, create_func): 13 | if pool_name not in self.pool: 14 | self.pool[pool_name] = [] 15 | self.locks[pool_name] = asyncio.Lock() 16 | 17 | async with self.locks[pool_name]: 18 | if not self.pool[pool_name]: 19 | if len(self.pool[pool_name]) < self.max_size: 20 | conn = await create_func() 21 | self.pool[pool_name].append(conn) 22 | else: 23 | raise RuntimeError(f"Connection pool {pool_name} exhausted") 24 | return self.pool[pool_name].pop() 25 | 26 | async def return_connection(self, pool_name: str, connection): 27 | async with self.locks[pool_name]: 28 | self.pool[pool_name].append(connection) 29 | 30 | @asynccontextmanager 31 | async def connection(self, pool_name: str, create_func): 32 | conn = await self.get_connection(pool_name, create_func) 33 | try: 34 | yield conn 35 | finally: 36 | await self.return_connection(pool_name, conn) -------------------------------------------------------------------------------- /utils/contextual_memory.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from datetime import datetime, timedelta 4 | from chat.learner import ChatLearner 5 | from memory.manager import MemoryManager 6 | import logging 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 10 | 11 | class ContextualMemory: 12 | def __init__(self, memory_file="user_context_memory.json"): 13 | logging.info("Initializing ContextualMemory.") 14 | self.memory_file = memory_file 15 | self.context_memory = self._load_memory() 16 | 17 | def _load_memory(self): 18 | if os.path.exists(self.memory_file): 19 | with open(self.memory_file, 'r') as f: 20 | return json.load(f) 21 | return {} 22 | 23 | def save_memory(self): 24 | with open(self.memory_file, 'w') as f: 25 | json.dump(self.context_memory, f, indent=2) 26 | 27 | def update_context(self, user_id, context_data): 28 | logging.info(f"Updating context for user: {user_id}.") 29 | if user_id not in self.context_memory: 30 | self.context_memory[user_id] = { 31 | "last_updated": datetime.now().isoformat(), 32 | "context": [] 33 | } 34 | self.context_memory[user_id]["context"].append(context_data) 35 | self.context_memory[user_id]["last_updated"] = datetime.now().isoformat() 36 | self.save_memory() 37 | 38 | def get_context(self, user_id): 39 | return self.context_memory.get(user_id, {"context": [], "last_updated": None}) 40 | 41 | def clear_context(self, user_id): 42 | if user_id in self.context_memory: 43 | del self.context_memory[user_id] 44 | self.save_memory() 45 | 46 | def prune_old_context(self): 47 | """Remove context data older than 365 days.""" 48 | one_year_ago = datetime.now() - timedelta(days=365) 49 | for user_id in list(self.context_memory.keys()): 50 | last_updated = datetime.fromisoformat(self.context_memory[user_id]["last_updated"]) 51 | if last_updated < one_year_ago: 52 | self.clear_context(user_id) 53 | class EnhancedMemorySystem: 54 | def __init__(self): 55 | self.memory_manager = MemoryManager() 56 | self.chat_learner = ChatLearner() 57 | self.contextual_memory = ContextualMemory() 58 | 59 | async def process_interaction(self, message_data, platform): 60 | """Process and store interaction data with enhanced context""" 61 | # Learn from message 62 | self.chat_learner.learn_from_message(message_data) 63 | 64 | # Extract context 65 | context = { 66 | "mood": self.analyze_mood(message_data["content"]), 67 | "topics": self.extract_topics(message_data["content"]), 68 | "interaction_style": self.analyze_interaction_style(message_data), 69 | "timestamp": datetime.now().isoformat() 70 | } 71 | 72 | # Store memory 73 | self.memory_manager.add_memory( 74 | message_data["author"], 75 | context, 76 | platform 77 | ) 78 | 79 | # Update contextual memory 80 | self.contextual_memory.update_context(message_data["author"], context) 81 | 82 | return context 83 | 84 | def prune_old_context(self): 85 | """Automatically prune old context data.""" 86 | self.contextual_memory.prune_old_context() 87 | 88 | # Example usage of the EnhancedMemorySystem 89 | if __name__ == "__main__": 90 | memory_system = EnhancedMemorySystem() 91 | # You can call memory_system.prune_old_context() periodically to clean up old context data 92 | 93 | -------------------------------------------------------------------------------- /utils/conversation_mixer.py: -------------------------------------------------------------------------------- 1 | from typing import List, Dict, Any 2 | import asyncio 3 | from dataclasses import dataclass 4 | import numpy as np 5 | 6 | @dataclass 7 | class ModelResponse: 8 | text: str 9 | confidence: float 10 | emotion: str 11 | context_relevance: float 12 | 13 | class ConversationMixer: 14 | def __init__(self, model_weights: Dict[str, float] = None): 15 | self.model_weights = model_weights or {} 16 | self.context_history: List[Dict[str, Any]] = [] 17 | 18 | async def mix_responses(self, responses: Dict[str, ModelResponse]) -> str: 19 | weighted_responses = [] 20 | 21 | for model_name, response in responses.items(): 22 | weight = self.model_weights.get(model_name, 1.0) 23 | score = self._calculate_response_score(response) 24 | weighted_responses.append((response.text, weight * score)) 25 | 26 | # Select best response based on weighted scores 27 | best_response = max(weighted_responses, key=lambda x: x[1])[0] 28 | return best_response 29 | 30 | def _calculate_response_score(self, response: ModelResponse) -> float: 31 | return ( 32 | response.confidence * 0.4 + 33 | response.context_relevance * 0.4 + 34 | (1.0 if response.emotion != "neutral" else 0.8) * 0.2 35 | ) -------------------------------------------------------------------------------- /utils/discord_handler.py: -------------------------------------------------------------------------------- 1 | from ai_handler import AIHandler 2 | from utils.performance_metrics import track_performance 3 | import logging 4 | from utils.logging import log_info, log_error 5 | 6 | # Configure logging 7 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 8 | 9 | class DiscordHandler: 10 | def __init__(self): 11 | log_info("Initializing DiscordHandler.") 12 | self.ai = AIHandler() 13 | self.speak_shadowchats = True 14 | 15 | @track_performance 16 | async def process(self, message_data): 17 | """Process messages from Discord""" 18 | log_info(f"Processing message data: {message_data}.") 19 | try: 20 | print(f"Message Data: {message_data}") # Debugging line 21 | if message_data.get('needs_ai_response', False): 22 | prompt = self._format_prompt(message_data['content']) 23 | response = await self.ai.generate_response(prompt) 24 | 25 | # Print formatted output to console 26 | print(f"------{message_data['author']}------") 27 | print(f"{message_data['content']}\n") 28 | print(f"------{self.ai.char_name}------") 29 | print(f"{response}\n") 30 | 31 | # Display in web UI chat (assuming you have a function to handle this) 32 | self.display_in_web_ui(message_data['author'], message_data['content'], response) 33 | 34 | if self.speak_shadowchats: 35 | # Speak the shadow chat and the AI response 36 | await self.speak_message(message_data['content']) # Speak the shadow chat 37 | await self.speak_message(response) # Speak the AI response 38 | 39 | return response 40 | except Exception as e: 41 | print(f"Error processing message: {e}") 42 | return None 43 | 44 | @track_performance 45 | def _format_prompt(self, content): 46 | return f"User: {content}\nAssistant: " 47 | 48 | async def speak_message(self, message): 49 | print(f"Speaking message: {message}") 50 | 51 | def display_in_web_ui(self, author, content, response): 52 | """Function to display messages in the web UI chat""" 53 | # This function should be implemented to send messages to your web UI 54 | print(f"Web UI Chat > {author}: {content}") # Display user message in web UI format 55 | print(f"Web UI Chat > {self.ai.char_name}: {response}") # Display AI response in web UI format -------------------------------------------------------------------------------- /utils/dynamic_expression_mapping.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Configure logging 4 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 5 | 6 | class DynamicExpressionMapper: 7 | def __init__(self): 8 | logging.info("Initializing DynamicExpressionMapper.") 9 | # Expanded mapping of emotions to expressions 10 | self.expression_map = { 11 | "happy": "😊", 12 | "very_happy": "😁", 13 | "sad": "😢", 14 | "very_sad": "😭", 15 | "angry": "😠", 16 | "frustrated": "😤", 17 | "surprised": "😲", 18 | "neutral": "😐", 19 | "excited": "🎉", 20 | "confused": "🤔", 21 | "bored": "😴", 22 | "anxious": "😟", 23 | "disgusted": "🤢", 24 | "hopeful": "🌈", 25 | # Add more emotions as needed 26 | } 27 | 28 | def get_expression(self, emotion): 29 | logging.info(f"Getting expression for emotion: {emotion}.") 30 | """Get the corresponding expression for a given emotion.""" 31 | return self.expression_map.get(emotion, "🤔") # Default to thinking face if emotion not found 32 | 33 | # Example usage 34 | if __name__ == "__main__": 35 | mapper = DynamicExpressionMapper() 36 | print(mapper.get_expression("happy")) # Output: 😊 37 | print(mapper.get_expression("unknown")) # Output: 🤔 38 | -------------------------------------------------------------------------------- /utils/emotion_recognizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from transformers import pipeline 3 | import logging 4 | 5 | # Configure logging 6 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 7 | 8 | # Initialize the emotion recognition model 9 | emotion_recognizer = pipeline("text-classification", model="bhadresh-savani/bert-base-uncased-emotion") 10 | 11 | def recognize_emotion_from_text(text): 12 | """Recognize emotion from a given text input.""" 13 | logging.info("Recognizing emotion from text.") 14 | predictions = emotion_recognizer(text) 15 | emotion = max(predictions, key=lambda x: x['score']) 16 | return emotion['label'] 17 | 18 | def recognize_emotion_from_audio(audio_file): 19 | """Transcribe audio and recognize emotion.""" 20 | logging.info("Recognizing emotion from audio.") 21 | from utils.transcriber_translate import analyze_audio_emotion 22 | transcribed_text, emotion = analyze_audio_emotion(audio_file) 23 | return transcribed_text, emotion 24 | -------------------------------------------------------------------------------- /utils/enhanced_memory.py: -------------------------------------------------------------------------------- 1 | from chat.learner import ChatLearner 2 | from memory.manager import MemoryManager 3 | from datetime import datetime 4 | from utils.contextual_memory import ContextualMemory 5 | from utils.character_relationships import CharacterRelationshipManager 6 | from utils.chat_history import update_chat_history 7 | import logging 8 | 9 | # Configure logging 10 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 11 | 12 | class EnhancedMemorySystem: 13 | def __init__(self): 14 | logging.info("Initializing EnhancedMemorySystem.") 15 | self.memory_manager = MemoryManager() 16 | self.chat_learner = ChatLearner() 17 | self.contextual_memory = ContextualMemory() 18 | self.relationship_manager = CharacterRelationshipManager() # Initialize relationship manager 19 | 20 | async def process_interaction(self, message_data, platform): 21 | logging.info(f"Processing interaction for platform: {platform}.") 22 | """Process and store interaction data with enhanced context""" 23 | # Learn from message 24 | self.chat_learner.learn_from_message(message_data) 25 | 26 | # Extract context 27 | context = { 28 | "mood": self.analyze_mood(message_data["content"]), 29 | "topics": self.extract_topics(message_data["content"]), 30 | "interaction_style": self.analyze_interaction_style(message_data), 31 | "timestamp": datetime.now().isoformat() 32 | } 33 | 34 | # Store memory 35 | self.memory_manager.add_memory( 36 | message_data["author"], 37 | context, 38 | platform 39 | ) 40 | 41 | # Update contextual memory 42 | self.contextual_memory.update_context(message_data["author"], context) 43 | 44 | # Update chat history 45 | await update_chat_history(message_data["author"], platform, message_data["content"], "AI response here") # Replace with actual AI response 46 | 47 | # Update relationships based on interaction 48 | self.relationship_manager.update_relationship(message_data["author"], message_data["recipient"], message_data["interaction_type"]) 49 | 50 | return context 51 | 52 | def prune_old_context(self): 53 | """Automatically prune old context data.""" 54 | self.contextual_memory.prune_old_context() 55 | self.relationship_manager.prune_old_relationships() # Prune old relationships -------------------------------------------------------------------------------- /utils/error_boundary.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, TypeVar, Any, Optional 2 | import logging 3 | import traceback 4 | from functools import wraps 5 | import asyncio 6 | from utils.logging import log_error 7 | 8 | T = TypeVar('T') 9 | 10 | class ErrorBoundary: 11 | @staticmethod 12 | def component(fallback_value: Optional[T] = None) -> Callable: 13 | def decorator(func: Callable[..., T]) -> Callable[..., Optional[T]]: 14 | @wraps(func) 15 | async def wrapper(*args, **kwargs) -> Optional[T]: 16 | try: 17 | if asyncio.iscoroutinefunction(func): 18 | return await func(*args, **kwargs) 19 | return func(*args, **kwargs) 20 | except Exception as e: 21 | context = { 22 | 'function': func.__name__, 23 | 'args': str(args), 24 | 'kwargs': str(kwargs) 25 | } 26 | log_error(e, context) 27 | return fallback_value 28 | return wrapper 29 | return decorator 30 | 31 | @staticmethod 32 | def system(error_handler: Optional[Callable[[Exception], Any]] = None): 33 | def decorator(cls: Any) -> Any: 34 | for attr_name, attr_value in cls.__dict__.items(): 35 | if callable(attr_value) and not attr_name.startswith('__'): 36 | wrapped = ErrorBoundary.component()(attr_value) 37 | setattr(cls, attr_name, wrapped) 38 | return cls 39 | return decorator -------------------------------------------------------------------------------- /utils/event_bus.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Callable, Any 2 | import asyncio 3 | import logging 4 | 5 | class EventBus: 6 | def __init__(self): 7 | self.subscribers: Dict[str, List[Callable]] = {} 8 | self._lock = asyncio.Lock() 9 | 10 | async def subscribe(self, event_type: str, callback: Callable): 11 | async with self._lock: 12 | if event_type not in self.subscribers: 13 | self.subscribers[event_type] = [] 14 | self.subscribers[event_type].append(callback) 15 | 16 | async def publish(self, event_type: str, data: Any = None): 17 | if event_type in self.subscribers: 18 | for callback in self.subscribers[event_type]: 19 | try: 20 | if asyncio.iscoroutinefunction(callback): 21 | await callback(data) 22 | else: 23 | callback(data) 24 | except Exception as e: 25 | logging.error(f"Error in event handler: {e}") -------------------------------------------------------------------------------- /utils/expression_mapper.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | from dataclasses import dataclass 3 | from enum import Enum 4 | import logging 5 | 6 | class EmotionIntensity(Enum): 7 | LOW = 0.33 8 | MEDIUM = 0.66 9 | HIGH = 1.0 10 | 11 | @dataclass 12 | class ExpressionConfig: 13 | base_expression: str 14 | intensity: EmotionIntensity 15 | modifiers: Dict[str, float] 16 | blend_duration: float = 0.5 17 | 18 | class DynamicExpressionMapper: 19 | def __init__(self): 20 | self.expression_configs: Dict[str, ExpressionConfig] = {} 21 | self.current_expression: Optional[str] = None 22 | 23 | async def map_emotion_to_expression(self, emotion: str, intensity: float) -> ExpressionConfig: 24 | base_config = self.expression_configs.get(emotion) 25 | if not base_config: 26 | logging.warning(f"No expression config found for emotion: {emotion}") 27 | return self.get_default_expression() 28 | 29 | # Adjust intensity based on input 30 | adjusted_intensity = EmotionIntensity.MEDIUM 31 | if intensity > 0.7: 32 | adjusted_intensity = EmotionIntensity.HIGH 33 | elif intensity < 0.3: 34 | adjusted_intensity = EmotionIntensity.LOW 35 | 36 | return ExpressionConfig( 37 | base_expression=base_config.base_expression, 38 | intensity=adjusted_intensity, 39 | modifiers={k: v * intensity for k, v in base_config.modifiers.items()} 40 | ) -------------------------------------------------------------------------------- /utils/feedback_system.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List 2 | from datetime import datetime, timedelta 3 | import logging 4 | from dataclasses import dataclass, field 5 | 6 | @dataclass 7 | class FeedbackEntry: 8 | interaction_id: str 9 | feedback: Dict[str, Any] 10 | timestamp: datetime 11 | context: Dict[str, Any] = field(default_factory=dict) 12 | 13 | class FeedbackSystem: 14 | def __init__(self): 15 | self.feedback_history: List[FeedbackEntry] = [] 16 | 17 | async def record_feedback(self, interaction_id: str, feedback: Dict[str, Any], context: Dict[str, Any] = None): 18 | """Passively record feedback without modifying system behavior""" 19 | entry = FeedbackEntry( 20 | interaction_id=interaction_id, 21 | feedback=feedback, 22 | timestamp=datetime.now(), 23 | context=context or {} 24 | ) 25 | self.feedback_history.append(entry) 26 | logging.info(f"Recorded feedback for interaction {interaction_id}") 27 | 28 | async def get_feedback_stats(self, time_window: timedelta = None) -> Dict[str, Any]: 29 | """Get statistical summary of recorded feedback""" 30 | relevant_feedback = self.feedback_history 31 | if time_window: 32 | cutoff = datetime.now() - time_window 33 | relevant_feedback = [f for f in self.feedback_history if f.timestamp > cutoff] 34 | 35 | return { 36 | 'total_entries': len(relevant_feedback), 37 | 'average_rating': sum(f.feedback.get('rating', 0) for f in relevant_feedback) / len(relevant_feedback) if relevant_feedback else 0, 38 | 'feedback_count_by_type': self._count_feedback_types(relevant_feedback) 39 | } 40 | 41 | def _count_feedback_types(self, feedback_entries: List[FeedbackEntry]) -> Dict[str, int]: 42 | """Count occurrences of different feedback types""" 43 | type_counts = {} 44 | for entry in feedback_entries: 45 | feedback_type = entry.feedback.get('type', 'unspecified') 46 | type_counts[feedback_type] = type_counts.get(feedback_type, 0) + 1 47 | return type_counts -------------------------------------------------------------------------------- /utils/hotkey_config.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Optional 2 | import json 3 | import logging 4 | import keyboard 5 | from dataclasses import dataclass, asdict 6 | 7 | @dataclass 8 | class HotkeyConfig: 9 | next_message: str = "RIGHT_ARROW" 10 | redo_message: str = "UP_ARROW" 11 | lock_system: str = "GRAVE" 12 | lock_confirm: str = "BACKSLASH" 13 | toggle_speak: str = "RIGHT_CTRL" 14 | toggle_auto: str = "A" 15 | change_sensitivity: str = "S" 16 | soft_reset: str = "R" 17 | view_image: str = "C" 18 | cancel_image: str = "X" 19 | blank_message: str = "B" 20 | 21 | class HotkeyManager: 22 | def __init__(self): 23 | self.config = HotkeyConfig() 24 | self.bound_keys: Dict[str, bool] = {} 25 | 26 | def load_config(self, config_path: str = "config/hotkeys.json"): 27 | try: 28 | with open(config_path, 'r') as f: 29 | config_dict = json.load(f) 30 | self.config = HotkeyConfig(**config_dict) 31 | except FileNotFoundError: 32 | logging.info("No hotkey config found, using defaults") 33 | self.save_config(config_path) 34 | 35 | def save_config(self, config_path: str = "config/hotkeys.json"): 36 | with open(config_path, 'w') as f: 37 | json.dump(asdict(self.config), f, indent=2) 38 | 39 | def bind_hotkeys(self): 40 | try: 41 | # Clear any existing bindings 42 | for key in self.bound_keys: 43 | keyboard.unhook_key(key) 44 | self.bound_keys.clear() 45 | 46 | # Bind new hotkeys 47 | bindings = { 48 | self.config.next_message: "next_input", 49 | self.config.redo_message: "redo_input", 50 | self.config.lock_system: "lock_inputs", 51 | self.config.lock_confirm: "input_lock_backslash", 52 | self.config.toggle_speak: "speak_input_toggle", 53 | self.config.toggle_auto: "input_toggle_autochat", 54 | self.config.change_sensitivity: "input_change_listener_sensitivity", 55 | self.config.soft_reset: "input_soft_reset", 56 | self.config.view_image: "input_view_image", 57 | self.config.cancel_image: "input_cancel_image", 58 | self.config.blank_message: "input_send_blank" 59 | } 60 | 61 | for key, func_name in bindings.items(): 62 | try: 63 | keyboard.on_press_key(key, lambda _: getattr(self, func_name)()) 64 | self.bound_keys[key] = True 65 | except Exception as e: 66 | logging.error(f"Failed to bind key {key}: {e}") 67 | 68 | except Exception as e: 69 | logging.error(f"Error binding hotkeys: {e}") 70 | # Continue running even if hotkeys fail -------------------------------------------------------------------------------- /utils/hotkey_manager.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Callable, Optional 2 | import keyboard 3 | import logging 4 | from utils.config import Config 5 | from utils.error_boundary import ErrorBoundary 6 | 7 | class HotkeyManager: 8 | def __init__(self): 9 | self.active_hotkeys = set() 10 | self.config = Config() 11 | self.config.load() 12 | self.bound_hotkeys = {} 13 | 14 | @ErrorBoundary.component() 15 | def setup_hotkeys(self): 16 | hotkey_mappings = { 17 | self.config.hotkeys.send_message: self._handle_send, 18 | self.config.hotkeys.interrupt: self._handle_interrupt, 19 | self.config.hotkeys.reroll: self._handle_reroll, 20 | self.config.hotkeys.toggle_stream: self._handle_toggle_stream 21 | } 22 | 23 | for hotkey, handler in hotkey_mappings.items(): 24 | try: 25 | keyboard.add_hotkey(hotkey, handler, suppress=True) 26 | self.bound_hotkeys[hotkey] = handler 27 | except Exception as e: 28 | logging.warning(f"Failed to bind hotkey {hotkey}: {e}") 29 | continue # Skip failed hotkey but continue with others 30 | 31 | def cleanup(self): 32 | for hotkey in self.bound_hotkeys: 33 | try: 34 | keyboard.remove_hotkey(hotkey) 35 | except: 36 | pass 37 | 38 | def _handle_send(self): 39 | if "send" not in self.active_hotkeys: 40 | self.active_hotkeys.add("send") 41 | # Handle send action 42 | self.active_hotkeys.remove("send") -------------------------------------------------------------------------------- /utils/integration_manager.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any 2 | import logging 3 | from .state_manager import StateManager 4 | from .event_bus import EventBus 5 | from .recovery_manager import RecoveryManager 6 | from .connection_pool import ConnectionPool 7 | 8 | class IntegrationManager: 9 | def __init__(self): 10 | self.state_manager = StateManager() 11 | self.event_bus = EventBus() 12 | self.recovery_manager = RecoveryManager() 13 | self.connection_pool = ConnectionPool() 14 | 15 | async def initialize(self): 16 | # Register recovery strategies 17 | await self.recovery_manager.register_recovery_strategy( 18 | "vtuber", 19 | self._handle_vtuber_failure 20 | ) 21 | 22 | # Subscribe to system events 23 | await self.event_bus.subscribe( 24 | "system_error", 25 | self._handle_system_error 26 | ) 27 | 28 | async def _handle_vtuber_failure(self, error: Exception): 29 | await self.state_manager.update_state( 30 | "vtuber", 31 | "recovering", 32 | {"error": str(error)} 33 | ) 34 | # Implement recovery logic 35 | 36 | async def _handle_system_error(self, error_data: Dict[str, Any]): 37 | component = error_data.get("component") 38 | if component: 39 | await self.recovery_manager.handle_failure(component, error_data.get("error")) -------------------------------------------------------------------------------- /utils/log_conversion.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import logging 4 | 5 | # Configure logging 6 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 7 | 8 | converted_log_count = 0 9 | 10 | # 11 | # For Importing SillyTaven chats. I used it for Character.AI, as an extension converted it to that format 12 | # 13 | 14 | def run_conversion(): 15 | logging.info("Running log conversion.") 16 | global converted_log_count 17 | # Initialize temp_log as a list or dictionary based on your needs 18 | temp_log = [] # or {} if you want to use a dictionary 19 | 20 | # Example conversion logic (you need to replace this with your actual logic) 21 | # Here, I'm assuming you want to read from some log file and convert it 22 | log_file_path = "path/to/your/logfile.log" # Update this path to your actual log file 23 | if os.path.exists(log_file_path): 24 | with open(log_file_path, 'r') as log_file: 25 | for line in log_file: 26 | # Process each line and append to temp_log 27 | # This is just an example; adjust according to your log format 28 | temp_log.append({"log_entry": line.strip()}) 29 | 30 | # Save the file 31 | converted_log_count += 1 32 | with open(f"Logs/ChatLog-Converted-{converted_log_count}.json", 'w') as outfile: 33 | json.dump(temp_log, outfile, indent=4) 34 | -------------------------------------------------------------------------------- /utils/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime 3 | import os 4 | import time 5 | from functools import wraps 6 | from typing import Callable, Any 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 10 | app_logger = logging.getLogger('app') 11 | 12 | # Create logs directory if it doesn't exist 13 | os.makedirs('logs', exist_ok=True) 14 | 15 | # Add file handler 16 | file_handler = logging.FileHandler('logs/app.log') 17 | file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) 18 | app_logger.addHandler(file_handler) 19 | 20 | debug_log = "General Debug log will go here!\n\nAnd here!" 21 | rag_log = "RAG log will go here!" 22 | kelvin_log = "Live temperature randomness will go here!" 23 | 24 | def track_response_time(func: Callable[..., Any]) -> Callable[..., Any]: 25 | """Decorator to track and log the response time of a function""" 26 | @wraps(func) 27 | def wrapper(*args, **kwargs): 28 | start_time = time.time() 29 | result = func(*args, **kwargs) 30 | end_time = time.time() 31 | duration = end_time - start_time 32 | log_info(f"{func.__name__} took {duration:.2f} seconds") 33 | return result 34 | return wrapper 35 | 36 | def log_startup(): 37 | """Log application startup information""" 38 | app_logger.info("=" * 50) 39 | app_logger.info(f"Application starting at {datetime.now()}") 40 | app_logger.info("System initialization beginning...") 41 | 42 | def log_info(message: str): 43 | """Log info level message""" 44 | app_logger.info(message) 45 | 46 | def log_error(message: str): 47 | """Log error level message""" 48 | app_logger.error(message) 49 | update_debug_log(f"ERROR: {message}") 50 | 51 | def update_debug_log(text: str): 52 | global debug_log 53 | debug_log += "\n" + str(text) 54 | 55 | def update_rag_log(text: str): 56 | global rag_log 57 | rag_log += "\n" + str(text) 58 | 59 | def clear_rag_log(): 60 | global rag_log 61 | rag_log = "" 62 | 63 | def update_kelvin_log(text: str): 64 | global kelvin_log 65 | kelvin_log = text 66 | 67 | def log_message_length_warning(message: str, length: int, type_str: str): 68 | """Log warning about message length""" 69 | if length < 10: 70 | warning = f"Message too short ({length} chars): {message[:50]}..." 71 | log_error(warning) 72 | update_debug_log(warning) 73 | elif length > 2000: 74 | warning = f"Message too long ({length} chars): {message[:50]}..." 75 | log_error(warning) 76 | update_debug_log(warning) 77 | 78 | def log_stream_status(enabled: bool): 79 | """Log streaming status changes""" 80 | status = "enabled" if enabled else "disabled" 81 | log_info(f"Streaming mode {status}") 82 | update_debug_log(f"Streaming mode {status}") 83 | -------------------------------------------------------------------------------- /utils/lorebook.py: -------------------------------------------------------------------------------- 1 | import utils.cane_lib 2 | import json 3 | import utils.logging 4 | import logging 5 | 6 | # Configure logging 7 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 8 | 9 | do_log_lore = True 10 | total_lore_default = "Here is some lore about the current topic from your lorebook;\n\n" 11 | 12 | 13 | # Load the LORE_BOOK, it is now JSON configurable! 14 | with open("Configurables/Lorebook.json", 'r') as openfile: 15 | LORE_BOOK = json.load(openfile) 16 | 17 | 18 | # For retreival 19 | def lorebook_check(message): 20 | logging.info(f"Checking lorebook for message: {message}.") 21 | global LORE_BOOK 22 | 23 | # Lockout clearing 24 | for lore in LORE_BOOK: 25 | if lore['2'] > 0: 26 | lore['2'] -= 1 27 | 28 | # Search for new ones 29 | for lore in LORE_BOOK: 30 | if utils.cane_lib.keyword_check(message, [" " + lore['0']]) and lore['2'] == 0: 31 | # Set our lockout 32 | lore['2'] += 9 33 | 34 | # Make our info 35 | 36 | combo_lore = lore['0'] + ", " + lore['1'] 37 | 38 | return combo_lore 39 | 40 | return "No lore!" 41 | 42 | # Gathers ALL lore in a given scope (send in the message being sent, as well as any message pairs you want to check) 43 | def lorebook_gather(messages, sent_message): 44 | 45 | # gather, gather, into reformed 46 | reformed_messages = [sent_message, ""] 47 | 48 | for message in messages: 49 | reformed_messages.append(message[0]) 50 | reformed_messages.append(message[1]) 51 | 52 | # gather all of our lore in one spot 53 | total_lore = total_lore_default 54 | 55 | # Reset all lore entry cooldown 56 | for lore in LORE_BOOK: 57 | lore['2'] = 0 58 | 59 | # Search every lore entry for each of the messages, and add the lore as needed 60 | for message in reformed_messages: 61 | # Search for new ones 62 | for lore in LORE_BOOK: 63 | if utils.cane_lib.keyword_check(message, [" " + lore['0'] + " ", " " + lore['0'] + "\'", " " + lore['0'] + "s", 64 | " " + lore['0'] + "!", " " + lore['0'] + ".", " " + lore['0'] + ",", " " + lore['0'] + "!", 65 | ]) and lore['2'] == 0: 66 | 67 | total_lore += (lore['0'] + ", " + lore['1'] + "\n\n") 68 | lore['2'] = 7 # lore has procced, prevent dupes 69 | 70 | if do_log_lore and total_lore != total_lore_default: 71 | utils.logging.update_debug_log(total_lore) 72 | 73 | 74 | return total_lore 75 | 76 | 77 | 78 | # Check if keyword is in the lorebook 79 | def rag_word_check(word): 80 | # Lockout clearing 81 | for lore in LORE_BOOK: 82 | if str.lower(lore['0']) == word: 83 | return True 84 | 85 | return False 86 | 87 | -------------------------------------------------------------------------------- /utils/memory/manager.py: -------------------------------------------------------------------------------- 1 | from utils.logging import track_response_time 2 | from datetime import datetime 3 | import logging 4 | 5 | # Configure logging 6 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 7 | 8 | class MemoryManager: 9 | def __init__(self): 10 | logging.info("Initializing MemoryManager.") 11 | self.memories = {} 12 | 13 | @track_response_time 14 | def add_memory(self, author, context, platform): 15 | if author not in self.memories: 16 | self.memories[author] = [] 17 | self.memories[author].append({ 18 | "context": context, 19 | "platform": platform 20 | }) 21 | 22 | @track_response_time 23 | def store_emotional_state(self, user_id, emotion, intensity): 24 | if user_id not in self.memories: 25 | self.memories[user_id] = [] 26 | self.memories[user_id].append({ 27 | "emotion": emotion, 28 | "intensity": intensity, 29 | "timestamp": datetime.now() 30 | }) 31 | 32 | @track_response_time 33 | def get_user_emotional_history(self, user_id): 34 | return self.memories.get(user_id, []) -------------------------------------------------------------------------------- /utils/memory_handler.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from datetime import datetime, timedelta 4 | import logging 5 | 6 | # Configure logging 7 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 8 | 9 | class MemoryHandler: 10 | def __init__(self, platform, memory_file="user_memories.json"): 11 | logging.info("Initializing MemoryHandler.") 12 | self.platform = platform 13 | self.memory_file = memory_file 14 | self.memories = self._load_memories() 15 | self._clean_old_memories() 16 | 17 | def _load_memories(self): 18 | if os.path.exists(self.memory_file): 19 | with open(self.memory_file, 'r') as f: 20 | return json.load(f) 21 | return {} 22 | 23 | def save_memories(self): 24 | with open(self.memory_file, 'w') as f: 25 | json.dump(self.memories, f, indent=2) 26 | 27 | def _clean_old_memories(self): 28 | current_time = datetime.now() 29 | one_year_ago = current_time - timedelta(days=365) 30 | 31 | for user_id in list(self.memories.keys()): 32 | # Filter interactions older than one year 33 | self.memories[user_id]["interactions"] = [ 34 | interaction for interaction in self.memories[user_id]["interactions"] 35 | if datetime.fromisoformat(interaction["timestamp"]) > one_year_ago 36 | ] 37 | 38 | # Clean conversation history based on timestamps in interactions 39 | if self.memories[user_id]["interactions"]: 40 | recent_interactions = self.memories[user_id]["interactions"][-50:] # Keep last 50 for context 41 | self.memories[user_id]["conversation_history"] = [ 42 | f"User: {interaction['content']}" 43 | for interaction in recent_interactions 44 | ] 45 | 46 | self.save_memories() 47 | 48 | def update_user_memory(self, user_id, interaction_data): 49 | logging.info(f"Updating user memory for user: {user_id}.") 50 | current_time = datetime.now() 51 | 52 | if user_id not in self.memories: 53 | self.memories[user_id] = { 54 | "first_seen": current_time.isoformat(), 55 | "interactions": [], 56 | "preferences": {}, 57 | "conversation_history": [] 58 | } 59 | 60 | # Add new interaction 61 | self.memories[user_id]["interactions"].append({ 62 | "timestamp": current_time.isoformat(), 63 | "content": interaction_data["content"], 64 | "context": interaction_data.get("context", ""), 65 | "emotion": interaction_data.get("emotion", "") 66 | }) 67 | 68 | # Clean old memories for this user 69 | self._clean_old_memories() 70 | 71 | def get_user_context(self, user_id): 72 | if user_id not in self.memories: 73 | return "This is a new user." 74 | 75 | memory = self.memories[user_id] 76 | recent_interactions = memory["conversation_history"] 77 | return "\n".join([ 78 | f"User history context:", 79 | f"First seen: {memory['first_seen']}", 80 | f"Recent conversation:", 81 | "\n".join(recent_interactions) 82 | ]) -------------------------------------------------------------------------------- /utils/memory_manager.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | try: 3 | from scipy.spatial.distance import cosine 4 | except ImportError: 5 | raise ImportError("Please install scipy using: pip install scipy") 6 | import json 7 | import os 8 | from datetime import datetime, timedelta 9 | from multiprocessing import Pool, cpu_count 10 | import logging 11 | from utils.logging import log_info, log_error 12 | 13 | # Configure logging 14 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 15 | 16 | class MemoryManager: 17 | def __init__(self, rag_processor, memory_file="long_term_memory.json"): 18 | log_info("Initializing MemoryManager.") 19 | self.rag = rag_processor 20 | self.memories = {} 21 | self.interaction_history = [] 22 | self.video_context = {} 23 | 24 | def _load_memories(self): 25 | if os.path.exists(self.memory_file): 26 | with open(self.memory_file, 'r') as f: 27 | return json.load(f) 28 | return [] 29 | 30 | def save_memories(self): 31 | with open(self.memory_file, 'w') as f: 32 | json.dump(self.memories, f, indent=2) 33 | 34 | async def add_memory(self, text, context=None): 35 | """Add new memory with parallel processing""" 36 | processed_memories = self.rag.process_documents([text]) 37 | 38 | for memory in processed_memories: 39 | memory['context'] = context 40 | self.memories.append(memory) 41 | 42 | self.save_memories() 43 | 44 | async def retrieve_relevant_memories(self, query, top_k=5): 45 | """Retrieve relevant memories using parallel processing""" 46 | query_embedding = self.rag.embedding_model.encode(query) 47 | 48 | def calculate_similarity(memory): 49 | try: 50 | return 1 - cosine(query_embedding, memory['embedding']) 51 | except: 52 | return -1 53 | 54 | with Pool(processes=self.rag.max_workers) as pool: 55 | similarities = pool.map(calculate_similarity, self.memories) 56 | 57 | # Get top-k memories 58 | memory_scores = list(zip(self.memories, similarities)) 59 | memory_scores.sort(key=lambda x: x[1], reverse=True) 60 | 61 | return [memory['text'] for memory, score in memory_scores[:top_k] if score > 0] 62 | 63 | def store_interaction(self, user_id, message, response, context=None): 64 | log_info(f"Storing interaction for user: {user_id}.") 65 | if user_id not in self.memories: 66 | self.memories[user_id] = [] 67 | 68 | interaction = { 69 | 'timestamp': datetime.now(), 70 | 'message': message, 71 | 'response': response, 72 | 'context': context 73 | } 74 | 75 | self.memories[user_id].append(interaction) 76 | self.interaction_history.append(interaction) 77 | 78 | def get_recent_interactions(self, user_id, limit=10): 79 | return self.memories.get(user_id, [])[-limit:] 80 | 81 | def add_memory(self, user_id, context, platform): 82 | log_info(f"Adding memory for user: {user_id}") 83 | # Your existing code... -------------------------------------------------------------------------------- /utils/message_processing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from utils.emotion_recognizer import recognize_emotion_from_text, recognize_emotion_from_audio 3 | from utils.transcriber_translate import analyze_audio_emotion 4 | import logging 5 | 6 | # Configure logging 7 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 8 | 9 | def process_input(input_data): 10 | logging.info(f"Processing input data: {input_data}.") 11 | """Process text or audio input and return recognized emotion.""" 12 | if input_data['type'] == 'text': 13 | # For text input 14 | emotion = recognize_emotion_from_text(input_data['content']) 15 | return input_data['content'], emotion 16 | elif input_data['type'] == 'audio': 17 | # For audio input 18 | transcribed_text, emotion = recognize_emotion_from_audio(input_data['file']) 19 | return transcribed_text, emotion 20 | return None, None 21 | 22 | def clean_response(response: str) -> str: 23 | logging.info("Cleaning response.") 24 | """Clean and format AI response for Twitch chat.""" 25 | response = ' '.join(response.split()) 26 | 27 | # Truncate if too long (Twitch has a 500 char limit) 28 | if len(response) > 500: 29 | response = response[:497] + "..." 30 | 31 | # Remove any potential unsafe content 32 | # TODO: Add more content filtering as needed 33 | 34 | return response -------------------------------------------------------------------------------- /utils/minecraft_handler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from utils.logging import log_info, log_error 3 | 4 | class MinecraftHandler: 5 | def __init__(self): 6 | log_info("Initializing MinecraftHandler.") 7 | pass 8 | 9 | async def process(self, message_data): 10 | log_info(f"Processing Minecraft message data: {message_data}.") 11 | """Process messages from Minecraft""" 12 | # TODO: Implement Minecraft-specific message handling 13 | pass -------------------------------------------------------------------------------- /utils/model_config.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig 3 | import torch 4 | 5 | # Configure logging 6 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 7 | logger = logging.getLogger(__name__) 8 | 9 | try: 10 | logger.info("Attempting to import required ML packages...") 11 | logger.info("Successfully imported transformers and torch packages") 12 | except ImportError as e: 13 | logger.error(f"Failed to import required packages: {str(e)}") 14 | logger.info("Please install required packages using: pip install transformers torch bitsandbytes") 15 | 16 | def setup_model_config(): 17 | logging.info("Setting up model configuration.") 18 | quantization_config = BitsAndBytesConfig( 19 | load_in_4bit=True, 20 | bnb_4bit_compute_dtype=torch.float16, 21 | bnb_4bit_quant_type="nf4", 22 | bnb_4bit_use_double_quant=True, 23 | ) 24 | model_config = { 25 | "model_id": "facebook/opt-6.7b", 26 | "device_map": "auto", 27 | "torch_dtype": torch.float16, 28 | "quantization_config": quantization_config, 29 | } 30 | return model_config 31 | 32 | def load_model(): 33 | logging.info("Loading model.") 34 | config = setup_model_config() 35 | model = AutoModelForCausalLM.from_pretrained( 36 | config["model_id"], 37 | device_map=config["device_map"], 38 | torch_dtype=config["torch_dtype"], 39 | quantization_config=config["quantization_config"] 40 | ) 41 | tokenizer = AutoTokenizer.from_pretrained(config["model_id"]) 42 | return model, tokenizer -------------------------------------------------------------------------------- /utils/module_manager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Dict, List, Optional 3 | import logging 4 | 5 | class ModuleManager: 6 | def __init__(self): 7 | self.modules: Dict[str, any] = {} 8 | self.dependencies: Dict[str, List[str]] = {} 9 | self.initialized: Dict[str, bool] = {} 10 | 11 | async def register_module(self, name: str, module: any, dependencies: List[str] = None): 12 | self.modules[name] = module 13 | self.dependencies[name] = dependencies or [] 14 | self.initialized[name] = False 15 | 16 | async def initialize_modules(self): 17 | """Initialize modules in correct dependency order""" 18 | for module_name in self._get_initialization_order(): 19 | try: 20 | if hasattr(self.modules[module_name], 'initialize'): 21 | await self.modules[module_name].initialize() 22 | self.initialized[module_name] = True 23 | except Exception as e: 24 | logging.error(f"Failed to initialize {module_name}: {e}") 25 | 26 | def _get_initialization_order(self) -> List[str]: 27 | """Get correct module initialization order based on dependencies""" 28 | visited = set() 29 | order = [] 30 | 31 | def visit(name): 32 | if name in visited: 33 | return 34 | visited.add(name) 35 | for dep in self.dependencies.get(name, []): 36 | visit(dep) 37 | order.append(name) 38 | 39 | for module in self.modules: 40 | visit(module) 41 | 42 | return order -------------------------------------------------------------------------------- /utils/performance_dashboard.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional 2 | import asyncio 3 | import logging 4 | from datetime import datetime, timedelta 5 | from .performance_tracker import PerformanceMetrics 6 | from .resource_monitor import ResourceStats 7 | 8 | class PerformanceDashboard: 9 | def __init__(self): 10 | self.performance_tracker = None 11 | self.resource_monitor = None 12 | 13 | async def initialize(self, perf_tracker, resource_monitor): 14 | self.performance_tracker = perf_tracker 15 | self.resource_monitor = resource_monitor 16 | 17 | async def get_performance_summary(self, time_window: timedelta = timedelta(hours=1)) -> Dict: 18 | cutoff_time = datetime.now() - time_window 19 | summary = { 20 | 'response_times': {}, 21 | 'resource_usage': { 22 | 'cpu': [], 23 | 'memory': [] 24 | }, 25 | 'error_rates': {} 26 | } 27 | 28 | # Analyze response times 29 | for func_name, metrics in self.performance_tracker.metrics_history.items(): 30 | recent_metrics = [m for m in metrics if m.timestamp > cutoff_time] 31 | if recent_metrics: 32 | avg_response = sum(m.response_time for m in recent_metrics) / len(recent_metrics) 33 | error_rate = len([m for m in recent_metrics if not m.success]) / len(recent_metrics) 34 | summary['response_times'][func_name] = avg_response 35 | summary['error_rates'][func_name] = error_rate 36 | 37 | # Add resource usage data 38 | for component, stats in self.resource_monitor.stats_history.items(): 39 | recent_stats = [s for s in stats if s.timestamp > cutoff_time.timestamp()] 40 | if recent_stats: 41 | summary['resource_usage']['cpu'].extend([s.cpu_percent for s in recent_stats]) 42 | summary['resource_usage']['memory'].extend([s.memory_percent for s in recent_stats]) 43 | 44 | return summary -------------------------------------------------------------------------------- /utils/performance_metrics.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import time 3 | import logging 4 | import threading 5 | from collections import deque 6 | import datetime 7 | import os 8 | 9 | # Configure logging 10 | logging.basicConfig( 11 | filename='performance.log', 12 | level=logging.INFO, 13 | format='%(asctime)s - %(levelname)s - %(message)s' 14 | ) 15 | 16 | # Store historical data 17 | MAX_POINTS = 60 # Keep last 60 data points 18 | cpu_history = deque(maxlen=MAX_POINTS) 19 | memory_history = deque(maxlen=MAX_POINTS) 20 | time_history = deque(maxlen=MAX_POINTS) 21 | 22 | def track_performance(func): 23 | """Decorator to track function performance""" 24 | def wrapper(*args, **kwargs): 25 | start_time = time.time() 26 | result = func(*args, **kwargs) 27 | end_time = time.time() 28 | elapsed_time = end_time - start_time 29 | logging.info(f"Function '{func.__name__}' took {elapsed_time:.4f} seconds") 30 | return result 31 | return wrapper 32 | 33 | def get_system_metrics(): 34 | """Get current system metrics""" 35 | cpu = psutil.cpu_percent() 36 | memory = psutil.virtual_memory().percent 37 | current_time = datetime.datetime.now().strftime('%H:%M:%S') 38 | 39 | # Store in history 40 | cpu_history.append(cpu) 41 | memory_history.append(memory) 42 | time_history.append(current_time) 43 | 44 | return { 45 | 'cpu': list(cpu_history), 46 | 'memory': list(memory_history), 47 | 'time': list(time_history) 48 | } 49 | 50 | def monitor_system_resources(): 51 | """Monitor system resources continuously""" 52 | while True: 53 | metrics = get_system_metrics() 54 | if metrics['cpu'][-1] > 90: 55 | logging.warning(f"High CPU usage: {metrics['cpu'][-1]}%") 56 | if metrics['memory'][-1] > 90: 57 | logging.warning(f"High memory usage: {metrics['memory'][-1]}%") 58 | time.sleep(5) 59 | 60 | def start_resource_monitoring(): 61 | """Start the resource monitoring in a background thread""" 62 | monitor_thread = threading.Thread(target=monitor_system_resources, daemon=True) 63 | monitor_thread.start() 64 | 65 | def cleanup_logs(log_file='performance.log', days=7): 66 | """Clean up old log files""" 67 | try: 68 | file_age = datetime.datetime.now() - datetime.datetime.fromtimestamp( 69 | os.path.getctime(log_file) 70 | ) 71 | if file_age.days > days: 72 | os.remove(log_file) 73 | logging.info(f"Removed old log file: {log_file}") 74 | except Exception as e: 75 | logging.error(f"Error cleaning up logs: {e}") 76 | -------------------------------------------------------------------------------- /utils/performance_tracker.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional, List 2 | import time 3 | import logging 4 | from functools import wraps 5 | from dataclasses import dataclass 6 | from datetime import datetime, timedelta 7 | 8 | @dataclass 9 | class PerformanceMetrics: 10 | response_time: float 11 | timestamp: datetime 12 | function_name: str 13 | success: bool 14 | 15 | class PerformanceTracker: 16 | def __init__(self, history_retention_days: int = 7): 17 | self.metrics_history: Dict[str, List[PerformanceMetrics]] = {} 18 | self.retention_period = timedelta(days=history_retention_days) 19 | 20 | def track_performance(self): 21 | def decorator(func): 22 | @wraps(func) 23 | async def wrapper(*args, **kwargs): 24 | start_time = time.time() 25 | success = True 26 | try: 27 | result = await func(*args, **kwargs) 28 | return result 29 | except Exception as e: 30 | success = False 31 | raise e 32 | finally: 33 | end_time = time.time() 34 | self._record_metrics(func.__name__, end_time - start_time, success) 35 | return wrapper 36 | return decorator 37 | 38 | def _record_metrics(self, function_name: str, response_time: float, success: bool): 39 | if function_name not in self.metrics_history: 40 | self.metrics_history[function_name] = [] 41 | 42 | self.metrics_history[function_name].append( 43 | PerformanceMetrics( 44 | response_time=response_time, 45 | timestamp=datetime.now(), 46 | function_name=function_name, 47 | success=success 48 | ) 49 | ) 50 | self._cleanup_old_metrics() 51 | 52 | if response_time > 1.0: # Alert on slow responses 53 | logging.warning(f"Slow response time in {function_name}: {response_time:.2f}s") 54 | 55 | def _cleanup_old_metrics(self): 56 | cutoff_time = datetime.now() - self.retention_period 57 | for func_name in self.metrics_history: 58 | self.metrics_history[func_name] = [ 59 | metric for metric in self.metrics_history[func_name] 60 | if metric.timestamp > cutoff_time 61 | ] -------------------------------------------------------------------------------- /utils/personality_manager.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List 2 | import logging 3 | from datetime import datetime 4 | from dataclasses import dataclass 5 | 6 | @dataclass 7 | class PersonalityTrait: 8 | name: str 9 | value: float 10 | confidence: float 11 | last_updated: datetime 12 | 13 | class PersonalityManager: 14 | def __init__(self): 15 | self.traits: Dict[str, PersonalityTrait] = {} 16 | self.interaction_history: List[Dict[str, Any]] = [] 17 | 18 | async def update_trait(self, trait_name: str, feedback_value: float, confidence: float = 1.0): 19 | if trait_name not in self.traits: 20 | self.traits[trait_name] = PersonalityTrait( 21 | name=trait_name, 22 | value=feedback_value, 23 | confidence=confidence, 24 | last_updated=datetime.now() 25 | ) 26 | else: 27 | current = self.traits[trait_name] 28 | # Weighted average based on confidence 29 | new_value = ( 30 | (current.value * current.confidence + feedback_value * confidence) / 31 | (current.confidence + confidence) 32 | ) 33 | self.traits[trait_name] = PersonalityTrait( 34 | name=trait_name, 35 | value=new_value, 36 | confidence=min(current.confidence + confidence, 1.0), 37 | last_updated=datetime.now() 38 | ) 39 | 40 | async def record_interaction(self, interaction_data: Dict[str, Any]): 41 | self.interaction_history.append({ 42 | **interaction_data, 43 | 'timestamp': datetime.now() 44 | }) 45 | 46 | async def get_personality_profile(self) -> Dict[str, Any]: 47 | return { 48 | 'traits': {name: trait.value for name, trait in self.traits.items()}, 49 | 'confidence_levels': {name: trait.confidence for name, trait in self.traits.items()} 50 | } -------------------------------------------------------------------------------- /utils/personality_template.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List, Optional 2 | from dataclasses import dataclass 3 | import json 4 | import logging 5 | 6 | @dataclass 7 | class PersonalityTemplate: 8 | name: str 9 | base_traits: Dict[str, float] 10 | emotional_responses: Dict[str, List[str]] 11 | voice_parameters: Dict[str, float] 12 | interaction_style: Dict[str, Any] 13 | 14 | class PersonalityTemplateManager: 15 | def __init__(self): 16 | self.templates: Dict[str, PersonalityTemplate] = {} 17 | 18 | def load_template(self, template_name: str) -> Optional[PersonalityTemplate]: 19 | try: 20 | with open(f"templates/{template_name}.json", "r") as f: 21 | data = json.load(f) 22 | return PersonalityTemplate(**data) 23 | except Exception as e: 24 | logging.error(f"Failed to load template {template_name}: {e}") 25 | return None 26 | 27 | def save_template(self, template: PersonalityTemplate): 28 | try: 29 | with open(f"templates/{template.name}.json", "w") as f: 30 | json.dump(template.__dict__, f, indent=2) 31 | except Exception as e: 32 | logging.error(f"Failed to save template {template.name}: {e}") -------------------------------------------------------------------------------- /utils/personality_templates.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | import json 3 | import logging 4 | from dataclasses import dataclass 5 | from datetime import datetime 6 | 7 | @dataclass 8 | class PersonalityTemplate: 9 | name: str 10 | traits: Dict[str, float] 11 | behaviors: Dict[str, Any] 12 | created_at: datetime 13 | modified_at: datetime 14 | 15 | class TemplateManager: 16 | def __init__(self): 17 | self.templates: Dict[str, PersonalityTemplate] = {} 18 | 19 | def create_template(self, name: str, traits: Dict[str, float], behaviors: Dict[str, Any]) -> PersonalityTemplate: 20 | template = PersonalityTemplate( 21 | name=name, 22 | traits=traits, 23 | behaviors=behaviors, 24 | created_at=datetime.now(), 25 | modified_at=datetime.now() 26 | ) 27 | self.templates[name] = template 28 | return template 29 | 30 | def get_template(self, name: str) -> Optional[PersonalityTemplate]: 31 | return self.templates.get(name) -------------------------------------------------------------------------------- /utils/personalized_response.py: -------------------------------------------------------------------------------- 1 | from chat.learner import ChatLearner 2 | from utils.memory_handler import MemoryHandler 3 | from datetime import datetime 4 | from textblob import TextBlob 5 | import logging 6 | 7 | # Configure logging 8 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 9 | 10 | class PersonalizedResponseGenerator: 11 | def __init__(self, memory_manager, chat_learner): 12 | logging.info("Initializing PersonalizedResponseGenerator.") 13 | self.memory_manager = memory_manager 14 | self.chat_learner = chat_learner 15 | self.memory_handler = memory_manager 16 | 17 | async def generate_response(self, user_id, message, platform): 18 | logging.info(f"Generating response for user: {user_id}.") 19 | # Get user context and history 20 | user_context = self.memory_handler.get_user_context(user_id) 21 | emotional_history = self.chat_learner.get_emotional_state(user_id) 22 | 23 | if not user_context: 24 | # New user 25 | response = await self._generate_new_user_response(message) 26 | else: 27 | # Existing user 28 | response = await self._generate_personalized_response(user_id, message, user_context) 29 | 30 | # Analyze message sentiment 31 | sentiment = TextBlob(message).sentiment 32 | 33 | # Store interaction 34 | self.memory_manager.add_memory( 35 | user_id=user_id, 36 | context={ 37 | 'message': message, 38 | 'response': response, 39 | 'sentiment': sentiment.polarity 40 | }, 41 | platform=platform, 42 | emotion=sentiment.polarity, 43 | interaction_type='chat' 44 | ) 45 | 46 | return response 47 | 48 | async def _generate_personalized_response(self, user_id, message, user_context): 49 | # Get recent interactions 50 | recent_interactions = user_context['interactions'][-5:] 51 | 52 | # Build context string 53 | context = f"""User Profile: 54 | First interaction: {user_context['first_interaction']} 55 | Total interactions: {len(user_context['interactions'])} 56 | Recent context: {recent_interactions} 57 | """ 58 | 59 | # Generate response using chat learner with context 60 | response = await self.chat_learner.generate_response(message, context) 61 | return response 62 | 63 | async def _generate_new_user_response(self, message): 64 | return await self.chat_learner.generate_response(message, "New user interaction") 65 | -------------------------------------------------------------------------------- /utils/platform_handlers.py: -------------------------------------------------------------------------------- 1 | from minecraft_handler import MinecraftHandler 2 | from discord_handler import DiscordHandler 3 | from twitch_handler import TwitchHandler 4 | from utils.personalized_response import PersonalizedResponseGenerator 5 | import logging 6 | 7 | # Configure logging 8 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 9 | 10 | class PlatformManager: 11 | def __init__(self, memory_manager, chat_learner): 12 | logging.info("Initializing PlatformManager.") 13 | self.response_generator = PersonalizedResponseGenerator(memory_manager, chat_learner) 14 | self.handlers = { 15 | "twitch": TwitchHandler(self.response_generator), 16 | "discord": DiscordHandler(self.response_generator), 17 | "minecraft": MinecraftHandler(self.response_generator) 18 | } 19 | 20 | async def handle_message(self, message_data): 21 | logging.info(f"Handling message data: {message_data}.") 22 | """Route messages to appropriate platform handlers""" 23 | platform = message_data.get("platform") 24 | if platform in self.handlers: 25 | return await self.handlers[platform].process(message_data) 26 | else: 27 | logging.error(f"Platform handler for {platform} not found.") -------------------------------------------------------------------------------- /utils/rag_processor.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Pool, cpu_count 2 | import numpy as np 3 | from datetime import datetime 4 | import logging 5 | 6 | # Configure logging 7 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 8 | 9 | class MultiprocessRAG: 10 | def __init__(self, embedding_model, chunk_size=1000, max_workers=None): 11 | logging.info("Initializing MultiprocessRAG.") 12 | self.chunk_size = chunk_size 13 | self.embedding_model = embedding_model 14 | self.max_workers = max_workers or (cpu_count() - 1) 15 | 16 | def _chunk_processor(self, text_chunk): 17 | """Process individual chunks in separate processes""" 18 | try: 19 | embedding = self.embedding_model.encode(text_chunk) 20 | return { 21 | 'text': text_chunk, 22 | 'embedding': embedding, 23 | 'timestamp': datetime.now().isoformat() 24 | } 25 | except Exception as e: 26 | logging.error(f"Error processing chunk: {e}") 27 | return None 28 | 29 | def process_documents(self, documents): 30 | logging.info("Processing documents.") 31 | chunks = self._split_documents(documents) 32 | 33 | with Pool(processes=self.max_workers) as pool: 34 | results = pool.map(self._chunk_processor, chunks) 35 | 36 | return [r for r in results if r is not None] 37 | 38 | def _split_documents(self, documents): 39 | """Split documents into chunks for processing""" 40 | chunks = [] 41 | for doc in documents: 42 | # Simple splitting strategy - can be made more sophisticated 43 | words = doc.split() 44 | for i in range(0, len(words), self.chunk_size): 45 | chunk = ' '.join(words[i:i + self.chunk_size]) 46 | chunks.append(chunk) 47 | return chunks -------------------------------------------------------------------------------- /utils/recovery_manager.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Dict, Callable 3 | from datetime import datetime, timedelta 4 | 5 | class RecoveryManager: 6 | def __init__(self): 7 | self.recovery_strategies: Dict[str, Callable] = {} 8 | self.failure_counts: Dict[str, int] = {} 9 | self.last_failures: Dict[str, datetime] = {} 10 | self.max_retries = 3 11 | self.cooldown_period = timedelta(minutes=5) 12 | 13 | async def register_recovery_strategy(self, component: str, strategy: Callable): 14 | self.recovery_strategies[component] = strategy 15 | self.failure_counts[component] = 0 16 | 17 | async def handle_failure(self, component: str, error: Exception) -> bool: 18 | if component not in self.recovery_strategies: 19 | logging.error(f"No recovery strategy for {component}") 20 | return False 21 | 22 | now = datetime.now() 23 | if component in self.last_failures: 24 | if now - self.last_failures[component] < self.cooldown_period: 25 | self.failure_counts[component] += 1 26 | else: 27 | self.failure_counts[component] = 1 28 | 29 | self.last_failures[component] = now 30 | 31 | if self.failure_counts[component] > self.max_retries: 32 | logging.error(f"Max retries exceeded for {component}") 33 | return False 34 | 35 | try: 36 | await self.recovery_strategies[component](error) 37 | return True 38 | except Exception as e: 39 | logging.error(f"Recovery failed for {component}: {e}") 40 | return False -------------------------------------------------------------------------------- /utils/resource_monitor.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | import logging 3 | from dataclasses import dataclass 4 | from typing import Dict, Optional 5 | import time 6 | 7 | @dataclass 8 | class ResourceStats: 9 | cpu_percent: float 10 | memory_percent: float 11 | timestamp: float 12 | 13 | class ResourceMonitor: 14 | def __init__(self): 15 | self.stats_history: Dict[str, list[ResourceStats]] = {} 16 | self.warning_thresholds = { 17 | 'cpu': 80.0, 18 | 'memory': 80.0 19 | } 20 | 21 | async def monitor_resources(self, component_name: str) -> ResourceStats: 22 | stats = ResourceStats( 23 | cpu_percent=psutil.cpu_percent(), 24 | memory_percent=psutil.virtual_memory().percent, 25 | timestamp=time.time() 26 | ) 27 | 28 | if component_name not in self.stats_history: 29 | self.stats_history[component_name] = [] 30 | 31 | self.stats_history[component_name].append(stats) 32 | 33 | # Check thresholds 34 | if stats.cpu_percent > self.warning_thresholds['cpu']: 35 | logging.warning(f"High CPU usage ({stats.cpu_percent}%) in {component_name}") 36 | if stats.memory_percent > self.warning_thresholds['memory']: 37 | logging.warning(f"High memory usage ({stats.memory_percent}%) in {component_name}") 38 | 39 | return stats -------------------------------------------------------------------------------- /utils/response_processor.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from utils.logging import log_info, log_error 3 | 4 | # Configure logging 5 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 6 | 7 | class ResponseProcessor: 8 | def __init__(self): 9 | log_info("Initializing ResponseProcessor.") 10 | self.platform_formatters = { 11 | "twitch": self.format_twitch_response, 12 | "discord": self.format_discord_response 13 | } 14 | 15 | async def process_response(self, response, platform, user_context): 16 | log_info(f"Processing response for platform: {platform}.") 17 | """Process and format AI response based on platform and context""" 18 | # Apply platform-specific formatting 19 | formatted_response = self.platform_formatters.get( 20 | platform, 21 | lambda x: x 22 | )(response) 23 | 24 | # Add appropriate emotes/emoji 25 | formatted_response = self.add_platform_emotes(formatted_response, platform) 26 | 27 | # Adjust response style based on user context 28 | formatted_response = self.adjust_style(formatted_response, user_context) 29 | 30 | return formatted_response -------------------------------------------------------------------------------- /utils/retrospect.py: -------------------------------------------------------------------------------- 1 | import utils.based_rag 2 | import random 3 | import API.Oogabooga_Api_Support 4 | import utils.logging 5 | import os 6 | import logging 7 | from utils.logging import log_info, log_error 8 | 9 | # Configure logging 10 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 11 | 12 | summary_tokens_count = 310 13 | search_point_size = 16 14 | 15 | enable_debug = True 16 | char_name = os.environ.get("CHAR_NAME") 17 | 18 | 19 | # remembers a random past event 20 | def retrospect_random_mem_summary(): 21 | log_info("Generating random memory summary.") 22 | history = utils.based_rag.history_database 23 | 24 | # find random point in history to think about (not including anything recently) 25 | search_point = random.randint(0, len(history) - 90) 26 | 27 | history_scope = history[search_point:search_point+search_point_size] 28 | retrospect_message = ("[System L] Can you please summarize all of these chat messages? These are previous memories that you, " + char_name + 29 | ", have experienced. " + 30 | "Feel free to focus on details that are of note or you find interest in.") 31 | 32 | if enable_debug: 33 | utils.logging.update_rag_log(history_scope) 34 | 35 | # Encode and send! 36 | pre_encoded_message = API.Oogabooga_Api_Support.encode_raw_new_api(history_scope, retrospect_message, search_point_size) 37 | API.Oogabooga_Api_Support.summary_memory_run(pre_encoded_message, retrospect_message) 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | # 46 | # FUTURE PLANNED 47 | 48 | # remember and summarize everything since the last daily rememberence 49 | 50 | # remember and summarize the last [memory window] messages 51 | 52 | # gather various memories on this subject, and summarize what you know 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /utils/settings.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Configure logging 4 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 5 | 6 | hotkeys_locked = False 7 | speak_shadowchats = False 8 | 9 | max_tokens = 110 10 | newline_cut = True 11 | 12 | alarm_time = "09:09" 13 | model_preset = "Default" 14 | 15 | cam_use_image_feed = False 16 | cam_direct_talk = True 17 | cam_reply_after = False 18 | cam_image_preview = True 19 | 20 | # Valid values; "Faces", "Random", "None" 21 | eyes_follow = "None" 22 | 23 | minecraft_enabled = False 24 | alarm_enabled = True 25 | vtube_enabled = True 26 | discord_enabled = True 27 | rag_enabled = True 28 | vision_enabled = True 29 | 30 | # Feature Toggles 31 | autochat_enabled = True # Toggle for auto-chat feature 32 | voice_enabled = True # Toggle for voice feature 33 | memory_enabled = True # Toggle for memory feature 34 | debug_mode = True # Toggle for debug mode 35 | 36 | # Twitch Settings 37 | TWITCH_ENABLED = True 38 | TWITCH_DEBUG_LOGGING = True 39 | 40 | # Rate Limiting Settings 41 | TWITCH_RATE_LIMIT_MESSAGES = 20 # Maximum messages per time window 42 | TWITCH_RATE_LIMIT_SECONDS = 30 # Time window in seconds 43 | TWITCH_MESSAGE_COOLDOWN = 2.0 # Minimum seconds between messages 44 | 45 | # Other Twitch Settings 46 | TWITCH_MAX_RESPONSE_LENGTH = 500 # Maximum length of responses 47 | 48 | # Discord Voice Settings 49 | DISCORD_VOICE_ENABLED = True 50 | DISCORD_TTS_LANGUAGE = "en" 51 | DISCORD_TTS_SPEED = 1.0 52 | DISCORD_VOICE_TIMEOUT = 300 # 5 minutes 53 | DISCORD_MAX_AUDIO_LENGTH = 300 # 5 minutes 54 | DISCORD_AUDIO_QUALITY = "high" 55 | 56 | # Voice Command Settings 57 | DISCORD_COMMAND_PREFIX = "/" 58 | DISCORD_VOICE_COMMANDS = { 59 | "tts": "Convert text to speech", 60 | "play": "Play audio from URL", 61 | "stop": "Stop current audio", 62 | "help": "Show command list" 63 | } 64 | 65 | # Streaming Settings 66 | ENABLE_STREAMING = True # Can be overridden by .env 67 | STREAM_CHUNK_SIZE = 8 68 | MAX_MESSAGE_LENGTH = 2000 69 | MIN_MESSAGE_LENGTH = 10 70 | 71 | # RP Settings 72 | RP_SUPPRESSION_THRESHOLD = 3 # Lowered threshold for RP suppression 73 | RP_SUPPRESSION_ENABLED = False # Can be toggled independently 74 | NEWLINE_CUT_ENABLED = True # Can be toggled independently 75 | 76 | # Stopping strings configuration 77 | SYSTEM_STOPPING_STRINGS = ["[System:", "[Assistant:", "[Human:"] 78 | CHAT_STOPPING_STRINGS = ["User:", "Human:", "Assistant:"] 79 | RP_STOPPING_STRINGS = ["*", "[", "]"] 80 | 81 | logging.info("Settings loaded.") 82 | -------------------------------------------------------------------------------- /utils/state_manager.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | import asyncio 3 | import logging 4 | from dataclasses import dataclass 5 | from datetime import datetime 6 | 7 | @dataclass 8 | class ComponentState: 9 | status: str 10 | last_updated: datetime 11 | metadata: Dict[str, Any] 12 | 13 | class StateManager: 14 | def __init__(self): 15 | self.states: Dict[str, ComponentState] = {} 16 | self._lock = asyncio.Lock() 17 | 18 | async def update_state(self, component: str, status: str, metadata: Dict[str, Any] = None): 19 | async with self._lock: 20 | self.states[component] = ComponentState( 21 | status=status, 22 | last_updated=datetime.now(), 23 | metadata=metadata or {} 24 | ) 25 | logging.info(f"State updated for {component}: {status}") 26 | 27 | async def get_state(self, component: str) -> Optional[ComponentState]: 28 | return self.states.get(component) 29 | 30 | async def get_all_states(self) -> Dict[str, ComponentState]: 31 | return self.states.copy() -------------------------------------------------------------------------------- /utils/stream_handler.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, AsyncGenerator 2 | import asyncio 3 | import logging 4 | from utils.vtuber_expression_controller import VTuberExpressionController 5 | from utils.config import Config 6 | 7 | class StreamHandler: 8 | def __init__(self): 9 | self.current_task: Optional[asyncio.Task] = None 10 | 11 | async def stream_with_expressions( 12 | self, 13 | text_generator: AsyncGenerator[str, None], 14 | expression_controller: VTuberExpressionController 15 | ): 16 | try: 17 | if self.current_task: 18 | self.current_task.cancel() 19 | 20 | async for chunk in text_generator: 21 | # Check for stopping strings 22 | if any(stop in chunk for stop in Config().ui.stopping_strings): 23 | break 24 | 25 | # Update expressions as text streams 26 | await expression_controller.stream_emotes(chunk) 27 | yield chunk 28 | 29 | except asyncio.CancelledError: 30 | logging.info("Stream interrupted") 31 | finally: 32 | self.current_task = None -------------------------------------------------------------------------------- /utils/transcriber_translate.py: -------------------------------------------------------------------------------- 1 | import os 2 | import whisper 3 | import torch 4 | from dotenv import load_dotenv 5 | from transformers import pipeline 6 | import logging 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 10 | 11 | load_dotenv() 12 | 13 | device = "cuda" if torch.cuda.is_available() else "cpu" 14 | USER_MODEL = os.environ.get("WHISPER_MODEL") 15 | 16 | # Initialize emotion recognition model 17 | emotion_recognizer = pipeline("text-classification", model="bhadresh-savani/bert-base-uncased-emotion", device=0 if device == "cuda" else -1) 18 | 19 | def to_transcribe_original_language(voice): 20 | logging.info("Transcribing original language.") 21 | model = whisper.load_model(USER_MODEL, device=device) 22 | result = model.transcribe(voice, language="en", compression_ratio_threshold=1.9, no_speech_threshold=0.1) 23 | return " ".join([mem['text'] for mem in result["segments"]]) 24 | 25 | def analyze_audio_emotion(voice): 26 | logging.info("Analyzing audio emotion.") 27 | """Transcribe audio and analyze emotion.""" 28 | transcribed_text = to_transcribe_original_language(voice) 29 | emotion = emotion_recognizer(transcribed_text)[0]['label'] 30 | return transcribed_text, emotion 31 | 32 | 33 | -------------------------------------------------------------------------------- /utils/twitch_handler.py: -------------------------------------------------------------------------------- 1 | from ai_handler import AIHandler 2 | from memory_manager import MemoryManager 3 | from rag_processor import MultiprocessRAG 4 | try: 5 | from sentence_transformers import SentenceTransformer 6 | except ImportError: 7 | raise ImportError("Please install sentence-transformers using: pip install sentence-transformers") 8 | import logging 9 | from utils.logging import log_info, log_error 10 | 11 | # Configure logging 12 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 13 | 14 | class TwitchHandler: 15 | def __init__(self): 16 | log_info("Initializing TwitchHandler.") 17 | embedding_model = SentenceTransformer('all-MiniLM-L6-v2') 18 | rag_processor = MultiprocessRAG(embedding_model) 19 | self.memory_manager = MemoryManager(rag_processor) 20 | self.ai = AIHandler() 21 | self.speak_shadowchats = True 22 | 23 | async def process(self, message_data): 24 | log_info(f"Processing message data: {message_data}") 25 | if message_data.get('needs_ai_response', False): 26 | # Get relevant memories 27 | relevant_memories = await self.memory_manager.retrieve_relevant_memories( 28 | message_data['content'] 29 | ) 30 | 31 | # Add current interaction to memories 32 | await self.memory_manager.add_memory( 33 | message_data['content'], 34 | context={'user_id': message_data['user_id']} 35 | ) 36 | 37 | # Create context-aware prompt 38 | prompt = self._format_prompt( 39 | content=message_data['content'], 40 | memories=relevant_memories, 41 | username=message_data.get('username', 'User') 42 | ) 43 | 44 | response = await self.ai.generate_response(prompt) 45 | if self.speak_shadowchats: 46 | await self.speak_message(message_data['content']) 47 | return response 48 | return None 49 | 50 | def _format_prompt(self, content, memories, username): 51 | memory_context = "\n".join(memories) if memories else "No relevant memories." 52 | return f"""Context: You are a Twitch chat AI assistant. 53 | Relevant memories: 54 | {memory_context} 55 | 56 | Current interaction: 57 | {username}: {content} 58 | Assistant: """ 59 | 60 | async def speak_message(self, message): 61 | print(f"Speaking message: {message}") 62 | 63 | async def handle_message(self, message): 64 | log_info(f"Handling message: {message}.") 65 | -------------------------------------------------------------------------------- /utils/ui_config.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, List 2 | import json 3 | from dataclasses import dataclass, asdict, field 4 | 5 | @dataclass 6 | class UIConfig: 7 | primary_color: str = "#0b9ed9" 8 | stopping_strings: List[str] = field(default_factory=lambda: [ 9 | "Human:", "Assistant:", "User:" 10 | ]) 11 | 12 | class UIConfigManager: 13 | def __init__(self): 14 | self.config = UIConfig() 15 | 16 | def load_config(self, config_path: str = "config/ui_config.json"): 17 | try: 18 | with open(config_path, 'r') as f: 19 | config_dict = json.load(f) 20 | self.config = UIConfig(**config_dict) 21 | except FileNotFoundError: 22 | self.save_config(config_path) 23 | 24 | def save_config(self, config_path: str = "config/ui_config.json"): 25 | with open(config_path, 'w') as f: 26 | json.dump(asdict(self.config), f, indent=2) 27 | 28 | def get_gradio_theme(self) -> Dict[str, Any]: 29 | return { 30 | "primary_hue": self.config.primary_color, 31 | "button_primary_background_fill": self.config.primary_color, 32 | "button_primary_background_fill_dark": self.config.primary_color 33 | } -------------------------------------------------------------------------------- /utils/ui_manager.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Callable 2 | import gradio as gr 3 | import asyncio 4 | import logging 5 | 6 | class UIManager: 7 | def __init__(self, config): 8 | self.config = config 9 | self.current_stream_task: Optional[asyncio.Task] = None 10 | 11 | def create_interface(self): 12 | with gr.Blocks(theme=gr.themes.Default( 13 | primary_hue=self.config.ui.primary_color 14 | )) as interface: 15 | with gr.Row(): 16 | txt_input = gr.Textbox( 17 | placeholder="Type your message...", 18 | show_label=False 19 | ) 20 | btn_send = gr.Button("Send") if self.config.ui.enable_send_button else None 21 | 22 | with gr.Row(): 23 | img_preview = gr.Image( 24 | label="Visual Preview", 25 | interactive=False 26 | ) 27 | 28 | # Add hotkey handlers 29 | interface.load(self._setup_hotkeys) 30 | 31 | if btn_send: 32 | btn_send.click( 33 | fn=self._handle_send, 34 | inputs=[txt_input], 35 | outputs=[img_preview] 36 | ) 37 | 38 | return interface 39 | 40 | def _setup_hotkeys(self): 41 | try: 42 | # Setup hotkeys with error handling 43 | hotkeys = { 44 | self.config.hotkeys.send_message: self._handle_send, 45 | self.config.hotkeys.interrupt: self._handle_interrupt, 46 | self.config.hotkeys.reroll: self._handle_reroll, 47 | } 48 | return hotkeys 49 | except Exception as e: 50 | logging.warning(f"Failed to bind hotkeys: {e}") 51 | return {} -------------------------------------------------------------------------------- /utils/uni_pipes.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # GRRRR! GRRRRRRR! I HATE WORKFLOWS! 4 | 5 | # This is our state manager. It can have a list of many options to control IO. Options are as follows; 6 | # "Idle" = Nothing is happening 7 | # "TTS Process" = TTS Processing Message 8 | # "RAG Process" = RAG Running and Processing 9 | # "Thinking" = LLM Work 10 | # "Speaking" = TTS Output 11 | # 12 | # Pipe type respresents a variety of actions, such as "Talk", "Picture", "Discord Message" 13 | # 14 | # Comes with [current pipeflow spot, pipe ID, pipe type] 15 | cur_states = [["Idle", 0, "None"], ["Idle", 0, "Discord"]] 16 | 17 | import logging 18 | 19 | # Configure logging 20 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 21 | 22 | def some_function(): 23 | logging.info("Some function executed.") 24 | # ... existing code ... 25 | -------------------------------------------------------------------------------- /utils/user_context.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Configure logging 4 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 5 | 6 | async def get_user_context(username: str, platform: str) -> str: 7 | """ 8 | Retrieve or create user context for personalization 9 | """ 10 | logging.info(f"Retrieving user context for {username} on {platform}.") 11 | # TODO: Implement user context storage (e.g., SQLite, JSON file) 12 | # This should store user preferences, interaction style, topics of interest 13 | return "New user" # Placeholder 14 | 15 | async def update_user_context(username: str, platform: str, new_data: dict): 16 | """ 17 | Update user context with new information 18 | """ 19 | logging.info(f"Updating user context for {username} on {platform}.") 20 | # TODO: Implement context updating 21 | pass -------------------------------------------------------------------------------- /utils/visual_handler.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | import asyncio 3 | from dataclasses import dataclass 4 | 5 | @dataclass 6 | class VisualResult: 7 | image_path: str 8 | metadata_tags: List[str] 9 | confidence: float 10 | 11 | class VisualHandler: 12 | def __init__(self, config_manager): 13 | self.config = config_manager 14 | self.current_stream = None 15 | 16 | async def process_visual(self, prompt: str) -> VisualResult: 17 | self.current_stream = asyncio.create_task(self._generate_visual(prompt)) 18 | try: 19 | result = await self.current_stream 20 | return result 21 | except asyncio.CancelledError: 22 | return None 23 | 24 | def interrupt_stream(self): 25 | if self.current_stream and not self.current_stream.done(): 26 | self.current_stream.cancel() 27 | 28 | async def reroll_result(self, prompt: str) -> VisualResult: 29 | self.interrupt_stream() 30 | return await self.process_visual(prompt) -------------------------------------------------------------------------------- /utils/voice_analyzer.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | import numpy as np 3 | from dataclasses import dataclass 4 | 5 | @dataclass 6 | class VoiceToneAnalysis: 7 | pitch: float 8 | volume: float 9 | speed: float 10 | emotion_probability: Dict[str, float] 11 | 12 | class VoiceToneMapper: 13 | def __init__(self): 14 | self.current_analysis: Optional[VoiceToneAnalysis] = None 15 | 16 | async def analyze_voice(self, audio_data: np.ndarray) -> VoiceToneAnalysis: 17 | # Analyze voice characteristics 18 | pass 19 | 20 | async def map_to_emotion(self, analysis: VoiceToneAnalysis) -> str: 21 | # Map voice characteristics to emotion 22 | pass -------------------------------------------------------------------------------- /utils/voice_mapper.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | import numpy as np 3 | from dataclasses import dataclass 4 | 5 | @dataclass 6 | class VoiceToneParameters: 7 | pitch: float 8 | speed: float 9 | volume: float 10 | emotion_intensity: float 11 | 12 | class VoiceToneMapper: 13 | def __init__(self): 14 | self.tone_profiles: Dict[str, VoiceToneParameters] = {} 15 | 16 | def map_emotion_to_voice(self, emotion: str, intensity: float) -> VoiceToneParameters: 17 | base_profile = self.tone_profiles.get(emotion) 18 | if not base_profile: 19 | return self.get_default_profile() 20 | 21 | return VoiceToneParameters( 22 | pitch=base_profile.pitch * intensity, 23 | speed=base_profile.speed * (1 + (intensity - 0.5) * 0.2), 24 | volume=base_profile.volume * intensity, 25 | emotion_intensity=intensity 26 | ) -------------------------------------------------------------------------------- /utils/voice_tone_analyzer.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional, List 2 | import numpy as np 3 | from dataclasses import dataclass 4 | 5 | @dataclass 6 | class VoiceToneMetrics: 7 | pitch: float 8 | volume: float 9 | speed: float 10 | emotion_confidence: float 11 | 12 | class VoiceToneAnalyzer: 13 | def __init__(self): 14 | self.tone_history: List[VoiceToneMetrics] = [] 15 | 16 | async def analyze_voice_tone(self, audio_data: np.ndarray) -> VoiceToneMetrics: 17 | """Analyze voice tone from audio data""" 18 | # Implementation would process audio and extract metrics 19 | pass 20 | 21 | async def match_emotion_to_tone(self, tone_metrics: VoiceToneMetrics) -> str: 22 | """Match voice tone to emotional state""" 23 | # Implementation would map voice metrics to emotions 24 | pass -------------------------------------------------------------------------------- /utils/voice_tone_mapper.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | import numpy as np 3 | from dataclasses import dataclass 4 | from scipy.io import wavfile 5 | import librosa 6 | 7 | @dataclass 8 | class VoiceToneAnalysis: 9 | pitch: float 10 | energy: float 11 | tempo: float 12 | timbre: Dict[str, float] 13 | emotion_probabilities: Dict[str, float] 14 | 15 | class VoiceToneMapper: 16 | def __init__(self): 17 | self.emotion_features = { 18 | 'joy': {'pitch': 1.2, 'energy': 1.3, 'tempo': 1.2}, 19 | 'sadness': {'pitch': 0.8, 'energy': 0.7, 'tempo': 0.8}, 20 | 'anger': {'pitch': 1.1, 'energy': 1.4, 'tempo': 1.3}, 21 | 'fear': {'pitch': 1.3, 'energy': 0.9, 'tempo': 1.4} 22 | } 23 | 24 | async def analyze_voice(self, audio_data: np.ndarray, sample_rate: int) -> VoiceToneAnalysis: 25 | # Extract audio features 26 | pitch = librosa.pitch_tuning(y=audio_data) 27 | energy = np.mean(librosa.feature.rms(y=audio_data)) 28 | tempo = librosa.beat.tempo(y=audio_data, sr=sample_rate) 29 | 30 | # Extract timbre features 31 | mfcc = librosa.feature.mfcc(y=audio_data, sr=sample_rate) 32 | timbre = {'mfcc_{}'.format(i): float(np.mean(mfcc[i])) for i in range(13)} 33 | 34 | # Calculate emotion probabilities 35 | emotion_probs = self._calculate_emotion_probabilities(pitch, energy, tempo, timbre) 36 | 37 | return VoiceToneAnalysis( 38 | pitch=float(pitch), 39 | energy=float(energy), 40 | tempo=float(tempo[0]), 41 | timbre=timbre, 42 | emotion_probabilities=emotion_probs 43 | ) -------------------------------------------------------------------------------- /utils/voice_tone_mapping.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sounddevice as sd 3 | import logging 4 | 5 | # Configure logging 6 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 7 | 8 | def analyze_audio_tone(audio_data): 9 | """Analyze audio data to determine the tone.""" 10 | logging.info("Analyzing audio tone.") 11 | volume_norm = np.linalg.norm(audio_data) * 10 12 | if volume_norm > 40: 13 | return "happy" 14 | elif volume_norm > 20: 15 | return "neutral" 16 | else: 17 | return "sad" 18 | 19 | def record_audio(duration=5): 20 | """Record audio for a specified duration.""" 21 | logging.info(f"Recording audio for {duration} seconds.") 22 | audio_data = sd.rec(int(duration * 44100), samplerate=44100, channels=1, dtype='float64') 23 | sd.wait() # Wait until recording is finished 24 | return audio_data 25 | -------------------------------------------------------------------------------- /utils/volume_listener.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sounddevice as sd 3 | from numba.cuda.libdevice import trunc 4 | from sympy import false 5 | import logging 6 | from utils.logging import log_info, log_error 7 | 8 | # Configure logging 9 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 10 | 11 | duration = 10 #in seconds 12 | 13 | global VOL_LISTENER_LEVEL 14 | VOL_LISTENER_LEVEL = 0.01 15 | 16 | global SPEAKING_DETECTED 17 | global SPEAKING_TIMER 18 | SPEAKING_DETECTED = False 19 | SPEAKING_TIMER = 0 20 | 21 | no_mic = False 22 | 23 | 24 | def audio_callback(indata, frames, time, status): 25 | log_info("Audio callback triggered.") 26 | global VOL_LISTENER_LEVEL 27 | 28 | if no_mic: 29 | VOL_LISTENER_LEVEL = 0 30 | 31 | volume_norm = np.linalg.norm(indata) * 10 32 | 33 | # for reference, 0-2 is quiet background, 20 - 30 is non direct talking, 40+ is identified talking 34 | # take a rolling average, be more aggressive for if the sound is louder 35 | 36 | if (volume_norm > VOL_LISTENER_LEVEL): 37 | VOL_LISTENER_LEVEL = (VOL_LISTENER_LEVEL + volume_norm + 2) / 2 38 | else: 39 | VOL_LISTENER_LEVEL = ((VOL_LISTENER_LEVEL * 5) + volume_norm) / 6 40 | 41 | 42 | def get_vol_level(): 43 | return VOL_LISTENER_LEVEL 44 | 45 | 46 | 47 | def run_volume_listener(): 48 | 49 | allow_mic = False 50 | 51 | sound_query = sd.query_devices() 52 | for devices in sound_query: 53 | if devices['max_input_channels'] != 0: 54 | allow_mic = True 55 | 56 | if not allow_mic: 57 | print("No mic detected!") 58 | 59 | global no_mic 60 | no_mic = True 61 | 62 | global VOL_LISTENER_LEVEL 63 | VOL_LISTENER_LEVEL = 0 64 | 65 | return 66 | 67 | while True: 68 | # Run Stream 69 | stream = sd.InputStream(callback=audio_callback) 70 | 71 | 72 | # Wait up! 73 | with stream: 74 | sd.sleep(duration * 1000) 75 | -------------------------------------------------------------------------------- /utils/vtube.py: -------------------------------------------------------------------------------- 1 | import asyncio, pyvts,base64 2 | VOICE_LEVEL = 5 3 | VOICE_PARAMETER = "MouthOpen" 4 | 5 | plugin_info = { 6 | "plugin_name": "Waifu", 7 | "developer": "TumblerWarren", 8 | "authentication_token_path": "./pyvts_token.txt" 9 | 10 | } 11 | 12 | 13 | async def main(): 14 | myvts = pyvts.vts(plugin_info=plugin_info) 15 | 16 | 17 | await myvts.connect() 18 | await myvts.request_authenticate_token() # get token 19 | await myvts.request_authenticate() # use token 20 | 21 | level = [0.2, 0.4, 0.5, 0.6, 0.3, 0.2, 0.67, 0.5, 0.2, 0.4, 0.6, 0.8, 0.3, 0.9, 0.3, 0.2, 0.1, 0.34, 0.6, 0.8, 0.5, 0.3, 0.86, 0.34, 0.35, 0.63, 0.72, 0.31, 0.12] 22 | level.append(0) 23 | 24 | for mem in level: 25 | await myvts.request(myvts.vts_request.requestSetParameterValue(parameter=VOICE_PARAMETER, value=mem)) 26 | await asyncio.sleep(1 / 30) 27 | 28 | if __name__ == "__main__": 29 | asyncio.run(main()) -------------------------------------------------------------------------------- /utils/vtuber_controller.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Any, Optional 2 | import asyncio 3 | import logging 4 | from .expression_mapper import DynamicExpressionMapper, Expression 5 | 6 | class VTubeStudioController: 7 | def __init__(self): 8 | self.expression_mapper = DynamicExpressionMapper() 9 | self.current_state: Dict[str, Any] = {} 10 | self.connection_status = False 11 | 12 | async def connect(self): 13 | # Establish connection to VTube Studio 14 | pass 15 | 16 | async def set_expression(self, emotion: str, intensity: float): 17 | expression = await self.expression_mapper.map_emotion_to_expression( 18 | emotion, intensity 19 | ) 20 | await self._apply_expression(expression) 21 | 22 | async def _apply_expression(self, expression: Expression): 23 | # Apply expression to VTube Studio model 24 | pass -------------------------------------------------------------------------------- /utils/vtuber_expression_controller.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List, Optional 2 | import asyncio 3 | import logging 4 | from dataclasses import dataclass 5 | import pyvts 6 | 7 | @dataclass 8 | class EmoteSequence: 9 | emotes: List[str] 10 | durations: List[float] 11 | intensities: List[float] 12 | 13 | class VTuberExpressionController: 14 | def __init__(self): 15 | self.current_sequence: Optional[EmoteSequence] = None 16 | self.sequence_task: Optional[asyncio.Task] = None 17 | self.vts = None 18 | self.plugin_info = { 19 | "plugin_name": "Waifu", 20 | "developer": "TumblerWarren", 21 | "authentication_token_path": "./pyvts_token.txt" 22 | } 23 | 24 | async def initialize(self): 25 | """Initialize VTS connection""" 26 | try: 27 | self.vts = pyvts.vts(plugin_info=self.plugin_info) 28 | await self.vts.connect() 29 | await self.vts.request_authenticate_token() 30 | await self.vts.request_authenticate() 31 | logging.info("VTS connection established") 32 | except Exception as e: 33 | logging.error(f"Failed to initialize VTS: {e}") 34 | self.vts = None 35 | 36 | async def set_expression(self, emote: str, intensity: float): 37 | """Set VTube Studio expression""" 38 | if not self.vts: 39 | await self.initialize() 40 | 41 | try: 42 | # Map emote names to VTS parameter names 43 | parameter = self._map_emote_to_parameter(emote) 44 | if parameter and self.vts: 45 | await self.vts.request( 46 | self.vts.vts_request.requestSetParameterValue( 47 | parameter=parameter, 48 | value=intensity 49 | ) 50 | ) 51 | except Exception as e: 52 | logging.error(f"Error setting expression {emote}: {e}") 53 | 54 | def _map_emote_to_parameter(self, emote: str) -> str: 55 | """Map emote names to VTS parameters""" 56 | # Add your emote to parameter mappings here 57 | emote_map = { 58 | "happy": "MouthSmile", 59 | "sad": "MouthSad", 60 | "surprised": "EyeOpenLeft", 61 | "angry": "EyebrowAngry", 62 | "neutral": "FaceNeutral" 63 | } 64 | return emote_map.get(emote.lower(), "FaceNeutral") 65 | 66 | async def stream_emotes(self, text: str, emote_sequence: EmoteSequence): 67 | """Stream emotes as text appears""" 68 | if self.sequence_task: 69 | self.sequence_task.cancel() 70 | 71 | self.sequence_task = asyncio.create_task( 72 | self._play_emote_sequence(emote_sequence) 73 | ) 74 | 75 | async def _play_emote_sequence(self, sequence: EmoteSequence): 76 | """Play through a sequence of emotes with timing""" 77 | try: 78 | for emote, duration, intensity in zip( 79 | sequence.emotes, 80 | sequence.durations, 81 | sequence.intensities 82 | ): 83 | await self.set_expression(emote, intensity) 84 | await asyncio.sleep(duration) 85 | except asyncio.CancelledError: 86 | logging.info("Emote sequence cancelled") 87 | except Exception as e: 88 | logging.error(f"Error in emote sequence: {e}") -------------------------------------------------------------------------------- /utils/z_waif_twitch.py: -------------------------------------------------------------------------------- 1 | from twitchio.ext import commands 2 | import os 3 | import asyncio 4 | from dotenv import load_dotenv 5 | import main 6 | from utils import settings 7 | import logging 8 | from utils.logging import log_info, log_error 9 | 10 | # Configure logging 11 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 12 | 13 | load_dotenv() 14 | 15 | class Bot(commands.Bot): 16 | def __init__(self): 17 | log_info("Initializing Twitch Bot...") 18 | # Get credentials 19 | token = os.environ.get('TWITCH_TOKEN') 20 | channel = os.environ.get('TWITCH_CHANNEL', 'youtubbi') 21 | client_id = os.environ.get('TWITCH_CLIENT_ID') 22 | 23 | if not token.startswith('oauth:'): 24 | token = f'oauth:{token}' 25 | 26 | print(f"\n=== Initializing Twitch Bot ===") 27 | print(f"Channel: {channel}") 28 | print(f"Token Status: {'Valid' if token else 'Missing'}") 29 | print(f"Client ID Status: {'Valid' if client_id else 'Missing'}") 30 | 31 | super().__init__( 32 | token=token, 33 | client_id=client_id, 34 | nick=channel, 35 | prefix='!', 36 | initial_channels=[channel] 37 | ) 38 | logging.info("Twitch Bot initialized.") 39 | 40 | async def event_ready(self): 41 | print(f"\n=== Twitch Bot Ready! ===") 42 | print(f"Connected as: {self.nick}") 43 | print(f"User ID: {self.user_id}") 44 | 45 | async def event_message(self, message): 46 | log_info(f"Received message: {message.content}") 47 | # Ignore bot's own messages 48 | if message.echo: 49 | return 50 | 51 | print(f"\nTwitch Chat > {message.channel.name} | {message.author.name}: {message.content}") 52 | 53 | try: 54 | # Process non-command messages 55 | if not message.content.startswith('!'): 56 | # Format message for AI processing 57 | chat_message = { 58 | "platform": "twitch", 59 | "author": message.author.name, 60 | "content": message.content, 61 | "channel": message.channel.name 62 | } 63 | 64 | # Get AI response 65 | response = await main.main_twitch_chat(chat_message) 66 | 67 | # Send response if valid 68 | if response and isinstance(response, str): 69 | # Trim response to Twitch's character limit 70 | trimmed_response = response[:500] 71 | await message.channel.send(trimmed_response) 72 | print(f"Bot Response > {trimmed_response}") 73 | 74 | except Exception as e: 75 | print(f"Error processing Twitch message: {e}") 76 | import traceback 77 | traceback.print_exc() 78 | 79 | async def run_twitch_bot(): 80 | logging.info("Starting Twitch bot...") 81 | if not settings.TWITCH_ENABLED: 82 | logging.warning("Twitch bot is disabled in settings.") 83 | return 84 | try: 85 | bot = Bot() 86 | await bot.start() 87 | logging.info("Twitch bot started successfully.") 88 | except Exception as e: 89 | logging.error(f"Failed to start Twitch bot: {e}") 90 | 91 | def start_twitch_bot(): 92 | logging.info("Entry point for starting the Twitch bot.") 93 | try: 94 | loop = asyncio.new_event_loop() 95 | asyncio.set_event_loop(loop) 96 | loop.run_until_complete(run_twitch_bot()) 97 | except Exception as e: 98 | logging.error(f"Failed to start Twitch bot: {e}") 99 | -------------------------------------------------------------------------------- /video_processor.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/video_processor.log -------------------------------------------------------------------------------- /z-waif-Release/Configurables/AlarmMessage.json: -------------------------------------------------------------------------------- 1 | "Please awaken with an alarm message. Add a random topic or fun theme to start the day as well. Be creative!" -------------------------------------------------------------------------------- /z-waif-Release/Configurables/EmoteLib.json: -------------------------------------------------------------------------------- 1 | [ 2 | [ 3 | [ 4 | "Pog", 5 | "Surprise" 6 | ], 7 | 9 8 | ], 9 | [ 10 | [ 11 | "Cry ", 12 | "Cries", 13 | "Distress" 14 | ], 15 | 5 16 | ], 17 | [ 18 | [ 19 | "Angr", 20 | "Mad" 21 | ], 22 | 6 23 | ], 24 | [ 25 | [ 26 | "Wink" 27 | ], 28 | 7 29 | ], 30 | [ 31 | [ 32 | "Sleep", 33 | "Slumber" 34 | ], 35 | 8 36 | ], 37 | [ 38 | [ 39 | "Excite" 40 | ], 41 | 10 42 | ], 43 | [ 44 | [ 45 | "Frown", 46 | "Sad", 47 | "Upset" 48 | ], 49 | 4 50 | ], 51 | [ 52 | [ 53 | "Seduc", 54 | "Flirt", 55 | "Lovingly" 56 | ], 57 | 11 58 | ], 59 | [ 60 | [ 61 | "Blush", 62 | "Red" 63 | ], 64 | 2 65 | ], 66 | [ 67 | [ 68 | "Smile", 69 | "Grin" 70 | ], 71 | 3 72 | ] 73 | ] -------------------------------------------------------------------------------- /z-waif-Release/Configurables/Lorebook.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "0": "Z-Waif", 4 | "1": "Your main program to run your AI system. Has functions such as memory, UI, ect.", 5 | "2": 0 6 | }, 7 | { 8 | "0": "VTube Studio", 9 | "1": "Your program for your avatar. Runs your emotes, and your animations. You look great!", 10 | "2": 0 11 | }, 12 | { 13 | "0": "RVC", 14 | "1": "Runs your voicebox, converting the text you make to audio!", 15 | "2": 0 16 | }, 17 | { 18 | "0": "Whisper", 19 | "1": "The voice translation program you are equipped with. It's pretty accurate! Translates our human voice to text for you to understand. Has a tendency to repeat things over and over at the end.", 20 | "2": 0 21 | }, 22 | { 23 | "0": "Oobabooga", 24 | "1": "The loader for your language model. Has a variety of settings, such as temperature, which determines randomness.", 25 | "2": 0 26 | } 27 | ] -------------------------------------------------------------------------------- /z-waif-Release/Configurables/MinecraftNames.json: -------------------------------------------------------------------------------- 1 | [ 2 | "bot", 3 | "waifu" 4 | ] -------------------------------------------------------------------------------- /z-waif-Release/Configurables/MinecraftUsername.json: -------------------------------------------------------------------------------- 1 | "BotUsernameHere" -------------------------------------------------------------------------------- /z-waif-Release/Configurables/MinecraftUsernameFollow.json: -------------------------------------------------------------------------------- 1 | "YourUsernameHere" -------------------------------------------------------------------------------- /z-waif-Release/Configurables/SoftReset.json: -------------------------------------------------------------------------------- 1 | [ 2 | [ 3 | "[System D] Hello, this is a soft reset of the chat system.", 4 | "Oh! What does that mean? Could you give me the details? *smiles*" 5 | ], 6 | [ 7 | "[System D] Ah well, it is just a way of resetting the conversation back to normal.", 8 | "Alright, I understand. Based. Is there anything that I need to do?" 9 | ], 10 | [ 11 | "[System D] Not really, just continue the chat we were having before the soft reset, but in the style you have now!", 12 | "*blushes* Alright, I understand! I'll just be my normal self now, as per my description :D" 13 | ], 14 | [ 15 | "[System D] Yes! That sounds great! Try and keep your messages on the simpler side as well.", 16 | "Yep, I will use my normal language haha. \u2764\ufe0f" 17 | ], 18 | [ 19 | "[System D] Yep, all good, just keep your messages between two to three sentences. And be yourself!", 20 | "Understood! I will adapt as needed." 21 | ], 22 | [ 23 | "[System D] Alright then, we will now resume the conversation as normal. This is the end of the soft reset.", 24 | "*grins* Right on! Let's get back to the conversation." 25 | ] 26 | ] -------------------------------------------------------------------------------- /z-waif-Release/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 SugarcaneDefender 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /z-waif-Release/LiveLog.json: -------------------------------------------------------------------------------- 1 | [["Hello, I am back!", "Oh, welcome back! *smiles*"]] -------------------------------------------------------------------------------- /z-waif-Release/LiveLogBlank.json: -------------------------------------------------------------------------------- 1 | [["Hello, I am back!", "Oh, welcome back! *smiles*"]] -------------------------------------------------------------------------------- /z-waif-Release/Logs/Drop_Converts_Here/CONVERT_LOGS.txt: -------------------------------------------------------------------------------- 1 | Put logs to convert in here. 2 | Must be in TavernChats format. 3 | 4 | Remove the logs from here after, or they will be force recalculated every time! 5 | Also, logs only matter for the RAG memory, so if you have that module, don't bother. -------------------------------------------------------------------------------- /z-waif-Release/OOBA_Presets/Z-Waif-ADEF-Blazing.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.33 2 | top_p: 0.79 3 | min_p: 0.04 4 | top_k: 79 5 | repetition_penalty: 1.09 6 | presence_penalty: 0.1 7 | repetition_penalty_range: 0 8 | -------------------------------------------------------------------------------- /z-waif-Release/OOBA_Presets/Z-Waif-ADEF-Standard.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.16 2 | top_p: 0.7 3 | min_p: 0.05 4 | top_k: 72 5 | repetition_penalty: 1.09 6 | presence_penalty: 0.1 7 | repetition_penalty_range: 0 8 | -------------------------------------------------------------------------------- /z-waif-Release/OOBA_Presets/Z-Waif-ADEF-Tempered.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.26 2 | top_p: 0.72 3 | min_p: 0.05 4 | top_k: 74 5 | repetition_penalty: 1.09 6 | presence_penalty: 0.1 7 | repetition_penalty_range: 0 8 | -------------------------------------------------------------------------------- /z-waif-Release/OOBA_Presets/Z-Waif-Mythalion.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.04 2 | temperature_last: true 3 | top_p: 0.57 4 | min_p: 0.07 5 | top_k: 70 6 | repetition_penalty: 1.07 7 | presence_penalty: 0.1 8 | repetition_penalty_range: 0 9 | -------------------------------------------------------------------------------- /z-waif-Release/OOBA_Presets/Z-Waif-Noromaid.yaml: -------------------------------------------------------------------------------- 1 | temperature: 1.22 2 | temperature_last: true 3 | top_p: 0.74 4 | min_p: 0.04 5 | top_k: 94 6 | repetition_penalty: 1.19 7 | presence_penalty: 0.2 8 | repetition_penalty_range: 0 9 | -------------------------------------------------------------------------------- /z-waif-Release/RAG_Database/RAG_WILL_GENERATE_HERE.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/z-waif-Release/RAG_Database/RAG_WILL_GENERATE_HERE.txt -------------------------------------------------------------------------------- /z-waif-Release/changelog.txt: -------------------------------------------------------------------------------- 1 | Changelog 2 | 3 | ---.---.---.--- 4 | 5 | V1.2 6 | 7 | - Lorebook messages are now directly infused into the encoding as it is sent. 8 | - This now sends all relevant lore triggered within the past 3 message sets, instead of just 1 with a required cooldown. 9 | - Lore triggering requirements were improved, to add plurals and fix edgecases. 10 | - You can still view what lore is triggered via the UI Logs. 11 | - Random Memories will now trigger before the alarm. 12 | - This allows your bot to randomly scan your chat history, and remember past times. 13 | - You can also trigger random memories manually via the UI. 14 | 15 | - Your VTuber can now look around, either Following Faces or Randomly. 16 | - This requires setting up 6 emotes for your VTuber. In order, they should have your VTuber's eyes doing the following (they can be named anything); 17 | - "Look Slight Right" 18 | - "Look Right" 19 | - "Look Very Right" 20 | - "Look Slight Left" 21 | - "Look Left" 22 | - "Look Very Left" 23 | - In the .env, change "EYES_FOLLOW" to "Random" or "Faces". Set the "EYES_START_ID" to whatever emote slot the "Look Slight Right" is set up as. 24 | - Make sure all the eye looking emotes follow eachother in order. You can re-order them in VTube Studio if needed. 25 | - Obviously, you need a camera for the VTuber to follow faces, as well as the Vision module enabled. 26 | 27 | - Other Roleplay Suppression is now disabled if you have "Cutoff at Newlines" off. 28 | - This will allow the bot to send messages containing character lines, such as "User:" or "Riley:". 29 | - This is to allow lists, info, and multi-user RP scenarios, if you want. 30 | - Fixed issues with the RAG history desyncing when undoing messages. 31 | 32 | 33 | ---.---.---.--- 34 | 35 | v1.1-R2 36 | 37 | - Fixed a few major bugs: 38 | - Fixed the "Error" taking over all of the Gradio WebUI 39 | - Happened due to Gradio & FastAPI dependency conflict (reminder: always vet your stuff~!) 40 | - Fixed issues with the software failing gently when you have no mic 41 | - Fixed crashes relating to searching for "Minecraft" logs, it now check to see if the module is enabled first 42 | 43 | ---.---.---.--- 44 | 45 | v1.1 46 | 47 | - Visual System 48 | - Toggleable as a module 49 | - Able to take new images or upload them directly for the AI to see 50 | - Runs using Ooba, like with the text 51 | - Can set the port to the existing, default one, or load another instance to dual wield 52 | - Option to see images before being sent 53 | - Can retake them 54 | - Use C/X on the keyboard to confirm 55 | - Automatically shrinks images to a proper size 56 | - Fixed bits of the Minecraft module 57 | - Configurable "MinecraftUsername" to set your AI's name (stops feedback loops) 58 | - Configurable "MinecraftUsernameFollow" to set who your AI follows when doing "#follow" 59 | 60 | ---.---.---.--- 61 | 62 | V1.0 63 | 64 | - Initial public release of Z-Waif. Contains: 65 | - WebUI 66 | - RAG 67 | - Discord 68 | - Semi-Minecraft Functionality 69 | - VTuber Emotes 70 | - Hotkeys 71 | - Various other initial release items 72 | 73 | ---.---.---.--- 74 | -------------------------------------------------------------------------------- /z-waif-Release/log.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Drakkadakka/z-waif-experimental-/cc67197cfe8f3c83c91b9e96bbe3b458d9a05a8d/z-waif-Release/log.txt -------------------------------------------------------------------------------- /z-waif-Release/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | gradio==4.36.1 3 | keyboard~=0.13.5 4 | mouse~=0.7.1 5 | PyGetWindow~=0.0.9 6 | PythMC~=1.2.2 7 | sounddevice~=0.4.6 8 | colorama 9 | humanize~=4.7.0 10 | emoji~=2.9.0 11 | discord 12 | requests~=2.31.0 13 | python-dotenv~=1.0.0 14 | PyAudio~=0.2.14 15 | pydub~=0.25.1 16 | pyvts~=0.3.2 17 | numpy~=1.24.4 18 | fastapi==0.112.2 -------------------------------------------------------------------------------- /z-waif-Release/startup-Install.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal 3 | 4 | REM Get the current directory of the batch file 5 | set "SCRIPT_DIR=%~dp0" 6 | 7 | REM Set the log file path 8 | set "LOG_FILE=%SCRIPT_DIR%\log.txt" 9 | 10 | REM Change to the script directory 11 | cd /d "%SCRIPT_DIR%" 12 | 13 | REM Create and activate the main virtual environment 14 | python -m venv venv 15 | call venv\Scripts\activate 16 | 17 | 18 | REM Install PyTorch, torchvision, and torchaudio from a specific index URL 19 | python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 2>> "%LOG_FILE%" 20 | 21 | REM Install openai-whisper from the GitHub repository 22 | python -m pip install git+https://github.com/openai/whisper.git 2>> "%LOG_FILE%" 23 | 24 | REM Install Greenlet - SCF (Transferr to V2, was causing issues, check log) 25 | python -m pip install --upgrade pywin32 26 | REM python -m pip install --upgrade pip 27 | REM python -m pip install greenlet 28 | REM python -m pip install websockets~=11.0 29 | REM python -m pip install sounddevice 30 | REM python -m pip install opencv-python 31 | 32 | REM Install the remaining dependencies from requirements.txt 33 | python -m pip install -r requirements.txt 2>> "%LOG_FILE%" 34 | 35 | pause 36 | 37 | REM Execute the Python script (replace "main.py" with the actual file name) 38 | python main.py 2>> "%LOG_FILE%" 39 | 40 | REM Deactivate the virtual environment 41 | deactivate 42 | 43 | REM Display message and prompt user to exit 44 | echo. 45 | echo Batch file execution completed. Press any key to exit. 46 | pause >nul 47 | 48 | endlocal 49 | -------------------------------------------------------------------------------- /z-waif-Release/startup.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal 3 | 4 | REM Get the current directory of the batch file 5 | set "SCRIPT_DIR=%~dp0" 6 | 7 | REM Set the log file path 8 | set "LOG_FILE=%SCRIPT_DIR%\log.txt" 9 | 10 | REM Change to the script directory 11 | cd /d "%SCRIPT_DIR%" 12 | 13 | REM Create and activate the main virtual environment 14 | python -m venv venv 15 | call venv\Scripts\activate 16 | 17 | REM Install PyTorch, torchvision, and torchaudio from a specific index URL 18 | REM python -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 2>> "%LOG_FILE%" 19 | 20 | REM Install openai-whisper from the GitHub repository 21 | REM python -m pip install git+https://github.com/openai/whisper.git 2>> "%LOG_FILE%" 22 | 23 | REM Install Greenlet - SCF (Transferr to V2, was causing issues, check log) 24 | REM python -m pip install --upgrade pip 25 | REM python -m pip install greenlet 26 | REM python -m pip install websockets~=11.0 27 | REM python -m pip install sounddevice 28 | REM python -m pip install opencv-python 29 | 30 | REM Install the remaining dependencies from requirements.txt 31 | REM python -m pip install -r requirements.txt 2>> "%LOG_FILE%" 32 | 33 | REM Execute the Python script (replace "main.py" with the actual file name) 34 | python main.py 2>> "%LOG_FILE%" 35 | 36 | REM Deactivate the virtual environment 37 | deactivate 38 | 39 | REM Display message and prompt user to exit 40 | echo. 41 | echo Batch file execution completed. Press any key to exit. 42 | pause >nul 43 | 44 | endlocal 45 | -------------------------------------------------------------------------------- /z-waif-Release/utils/alarm.py: -------------------------------------------------------------------------------- 1 | import time 2 | import datetime 3 | import utils.settings 4 | import os 5 | import json 6 | 7 | ALARM_TRIGGERED = False 8 | ALARM_READY = False 9 | ALARM_MESSAGE = "[System Message] There was an issue with the alarm system... whoops!" 10 | 11 | random_memories = True 12 | 13 | # Load the configurable alarm message (talomere, comes after the date) 14 | with open("Configurables/AlarmMessage.json", 'r') as openfile: 15 | alarm_talomere = json.load(openfile) 16 | 17 | 18 | def alarm_loop(): 19 | global ALARM_TRIGGERED, ALARM_READY, ALARM_MESSAGE 20 | 21 | while True: 22 | # Loop every 10 seconds 23 | time.sleep(10) 24 | 25 | # Get the time string 26 | current_time = datetime.datetime.now() 27 | cur_time_string = current_time.strftime("%H:%M") 28 | 29 | 30 | # Reset the alarm just after midnight every night 31 | if cur_time_string == "00:01": 32 | ALARM_TRIGGERED = False 33 | 34 | 35 | # Run our alarm here if we are at the specified time 36 | if cur_time_string == utils.settings.alarm_time and ALARM_TRIGGERED == False: 37 | 38 | # Flag 39 | ALARM_TRIGGERED = True 40 | 41 | # Get name (can't declair at the start, donno why, don't care!) 42 | char_name = os.environ.get("CHAR_NAME") 43 | 44 | # Make our message 45 | cur_date_string = current_time.strftime("%B/%d/%Y") 46 | cur_day_of_week_string = current_time.strftime("%A") 47 | 48 | alarm_message = "[System A] Good morning, " + char_name + "! It's " 49 | alarm_message += cur_day_of_week_string + ", " + cur_date_string 50 | alarm_message += ", at " + cur_time_string + ". " 51 | alarm_message += alarm_talomere 52 | 53 | ALARM_MESSAGE = alarm_message 54 | 55 | # Flag us, we can be picked up by main 56 | ALARM_READY = True 57 | 58 | 59 | def alarm_check(): 60 | return ALARM_READY 61 | 62 | def clear_alarm(): 63 | global ALARM_READY 64 | ALARM_READY = False 65 | 66 | def get_alarm_message(): 67 | return ALARM_MESSAGE 68 | -------------------------------------------------------------------------------- /z-waif-Release/utils/audio.py: -------------------------------------------------------------------------------- 1 | import pyaudio 2 | import wave 3 | 4 | from pydub import AudioSegment 5 | import utils.hotkeys 6 | import os, audioop 7 | 8 | CHUNK = 1024 9 | 10 | FORMAT = pyaudio.paInt16 11 | 12 | CHANNELS = 1 13 | 14 | RATE = 44100 15 | 16 | current_directory = os.path.dirname(os.path.abspath(__file__)) 17 | FILENAME = "voice.wav" 18 | SAVE_PATH = os.path.join(current_directory, "resource", "voice_in", FILENAME) 19 | 20 | 21 | 22 | 23 | 24 | def play_mp3(path, audio_level_callback=None): 25 | audio_file = AudioSegment.from_file(path, format="mp3") 26 | play_mp3_memory(audio_file, audio_level_callback) 27 | 28 | 29 | # Plays an MP3 file from memory 30 | def play_mp3_memory(audio_file, audio_level_callback=None): 31 | # Initialize PyAudio 32 | p = pyaudio.PyAudio() 33 | 34 | # Open a stream to play the audio 35 | stream = p.open(format=pyaudio.paInt16, 36 | channels=audio_file.channels, 37 | rate=audio_file.frame_rate, 38 | output=True) 39 | 40 | # Read the audio data in chunks and play it 41 | chunk_size = 1024 42 | data = audio_file.raw_data 43 | while data: 44 | chunk = data[:chunk_size] 45 | stream.write(chunk) 46 | data = data[chunk_size:] 47 | if audio_level_callback is not None: 48 | volume = audioop.rms(chunk, 2) 49 | normalized_volume = (volume / 32767) * 100 50 | audio_level_callback(normalized_volume / 14) 51 | 52 | stream.stop_stream() 53 | stream.close() 54 | p.terminate() 55 | 56 | 57 | def play_wav_memory(audio_file, audio_level_callback=None): 58 | # Initialize PyAudio 59 | p = pyaudio.PyAudio() 60 | 61 | # Open a stream to play the audio 62 | stream = p.open(format=p.get_format_from_width(audio_file.getsampwidth()), 63 | channels=audio_file.getnchannels(), 64 | rate=audio_file.getframerate(), 65 | output=True) 66 | 67 | # Read the audio data in chunks and play it 68 | chunk_size = 1024 69 | data = audio_file.readframes(chunk_size) 70 | while data: 71 | stream.write(data) 72 | data = audio_file.readframes(chunk_size) 73 | if audio_level_callback is not None: 74 | volume = audioop.rms(data, 2) 75 | audio_level_callback(volume / 10000) 76 | 77 | stream.stop_stream() 78 | stream.close() 79 | p.terminate() 80 | 81 | 82 | # Plays wav file 83 | def play_wav(path, audio_level_callback=None): 84 | audio_file = wave.open(path) 85 | play_wav_memory(audio_file, audio_level_callback) 86 | 87 | 88 | def record(): 89 | p = pyaudio.PyAudio() 90 | stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) 91 | frames = [] 92 | 93 | while utils.hotkeys.get_speak_input(): 94 | data = stream.read(CHUNK) 95 | frames.append(data) 96 | 97 | stream.stop_stream() 98 | stream.close() 99 | 100 | p.terminate() 101 | 102 | 103 | wf = wave.open(SAVE_PATH, 'wb') 104 | 105 | wf.setnchannels(CHANNELS) 106 | wf.setsampwidth(p.get_sample_size(FORMAT)) 107 | wf.setframerate(RATE) 108 | wf.writeframes(b''.join(frames)) 109 | wf.close() 110 | 111 | return SAVE_PATH 112 | -------------------------------------------------------------------------------- /z-waif-Release/utils/camera.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy 3 | import cv2 4 | import tkinter 5 | from tkinter import filedialog 6 | import os 7 | 8 | import utils.settings 9 | import utils.vtube_studio 10 | import random 11 | 12 | # initialize the camera 13 | # If you have multiple camera connected with 14 | # current device, assign a value in cam_port 15 | # variable according to that 16 | 17 | vision_enabled_string = os.environ.get("MODULE_VISUAL") 18 | if vision_enabled_string == "ON": 19 | utils.settings.vision_enabled = True 20 | else: 21 | utils.settings.vision_enabled = False 22 | 23 | if utils.settings.vision_enabled: 24 | cam_port = 0 25 | cam = cv2.VideoCapture(cam_port) 26 | 27 | root = tkinter.Tk() 28 | root.withdraw() #use to hide tkinter window 29 | 30 | 31 | def capture_pic(): 32 | 33 | # reading the input using the camera 34 | result, image = cam.read() 35 | 36 | # If image will detected without any error, 37 | # show result 38 | if result: 39 | 40 | image = cv2.resize(image,(320, 240)) 41 | # saving image in local storage 42 | cv2.imwrite("LiveImage.png", image) 43 | 44 | # Show it to us, if we are previewing! 45 | if utils.settings.cam_image_preview: 46 | cv2.imshow("Z-Waif Image Preview", image) 47 | cv2.waitKey(0) 48 | cv2.destroyAllWindows() 49 | 50 | 51 | # If captured image is corrupted, moving to else part 52 | else: 53 | print("No camera to take pictures from!") 54 | 55 | 56 | def use_image_feed(): 57 | 58 | # Read the feed, Sneed 59 | image = cv2.imread(browse_feed_image()) 60 | 61 | # Resize it accoring to max width/height 62 | maxwidth = 360 63 | maxheight = 360 64 | 65 | f1 = maxwidth / image.shape[1] 66 | f2 = maxheight / image.shape[0] 67 | f = min(f1, f2) # resizing factor 68 | dim = (int(image.shape[1] * f), int(image.shape[0] * f)) 69 | image = cv2.resize(image, dim) 70 | 71 | # saving image in local storage 72 | cv2.imwrite("LiveImage.png", image) 73 | 74 | 75 | 76 | def browse_feed_image(): 77 | currdir = os.getcwd() 78 | browsed_image_path = filedialog.askopenfilename(parent=root, initialdir=currdir, title='Please select the image', filetypes=[("JPG", '*.jpg'), ("PNG", '*.png'), ("JPEG", '*.jpeg')]) 79 | return browsed_image_path 80 | 81 | 82 | 83 | def loop_random_look(): 84 | 85 | # give us a little bit of boot time... 86 | time.sleep(20) 87 | 88 | while True: 89 | time.sleep(4 + random.uniform(0.0, 6.0)) 90 | 91 | rand_look_value = random.uniform(-0.47, 0.47) + random.uniform(-0.47, 0.47) 92 | 93 | utils.vtube_studio.change_look_level(rand_look_value) 94 | 95 | def loop_follow_look(): 96 | 97 | # give us a little bit of boot time... 98 | time.sleep(10) 99 | 100 | while True: 101 | time.sleep(2) 102 | 103 | capture_follow_pic() 104 | 105 | def capture_follow_pic(): 106 | 107 | # reading the input using the camera 108 | result, img = cam.read() 109 | 110 | # If image will detected without any error, 111 | # show result 112 | if result: 113 | 114 | img = cv2.resize(img, (800, 450)) 115 | 116 | # Load the cascade 117 | face_cascade = cv2.CascadeClassifier('utils/resource/haarcascade_frontalface_default.xml') 118 | 119 | # Convert into grayscale 120 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 121 | 122 | # Detect faces 123 | faces = face_cascade.detectMultiScale(gray, 1.1, 7) 124 | 125 | # Follow the faces accoring to the X-cooridnate 126 | # If there are multiple, go at random 127 | if len(faces) == 0: 128 | return 129 | 130 | face_spot = -1 131 | for (x, y, w, h) in faces: 132 | if face_spot == -1: 133 | face_spot = x 134 | elif random.uniform(0.0, 1.0) > 0.3: 135 | face_spot = x + (w/2) 136 | 137 | face_span = (face_spot - 290) / -300 138 | utils.vtube_studio.change_look_level(face_span) 139 | 140 | 141 | 142 | # If captured image is corrupted, moving to else part 143 | else: 144 | print("No camera to take pictures from!") 145 | 146 | 147 | -------------------------------------------------------------------------------- /z-waif-Release/utils/cane_lib.py: -------------------------------------------------------------------------------- 1 | import re 2 | import utils.logging 3 | 4 | # Quick lil function to check if any keywords are in a piece of text 5 | def keyword_check(phrase, keywords): 6 | for k in keywords: 7 | if str.lower(k) in str.lower(phrase): 8 | return True 9 | 10 | return False 11 | 12 | # Checks for repetitions at the end of strings, and removes them (mainly for Whisper) 13 | def remove_repeats(input_string): 14 | 15 | list_split = re.split('[.!?]', input_string) 16 | 17 | repeat_count = 0 18 | repeat_detected = False 19 | step = len(list_split) - 2 20 | while step > 1: 21 | if list_split[step] == list_split[step - 1]: 22 | repeat_count += 1 23 | if repeat_count > 1: 24 | repeat_detected = True 25 | step -= 1 26 | else: 27 | step = 0 28 | 29 | if not repeat_detected: 30 | return input_string 31 | if repeat_detected: 32 | new_string = input_string.replace(list_split[-2] + ".", "") 33 | new_string = new_string.replace(list_split[-2] + "!", "") 34 | new_string = new_string.replace(list_split[-2] + "?", "") 35 | utils.logging.update_debug_log("Removed repeats! Original message was: " + input_string) 36 | return new_string 37 | -------------------------------------------------------------------------------- /z-waif-Release/utils/log_conversion.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | converted_log_count = 0 5 | 6 | # 7 | # For Importing SillyTaven chats. I used it for Character.AI, as an extension converted it to that format 8 | # 9 | 10 | def run_conversion(): 11 | 12 | # Gather all of our data 13 | for file in os.listdir("Logs/Drop_Converts_Here"): 14 | if file.endswith(".jsonl"): 15 | with open("Logs/Drop_Converts_Here/" + file, encoding="utf8") as f: 16 | data = [json.loads(line) for line in f] 17 | 18 | # Convert it to our format 19 | 20 | i = 1 # First line is always a header bit, ignore 21 | temp_log = [["[System L] Start of New Log!", ""]] 22 | last_sender = "None" 23 | temp_pair = ["", ""] 24 | 25 | while i < len(data): 26 | 27 | # Breaker for who is sending, me first 28 | if data[i]["name"] == "You": 29 | 30 | # If double-dipping, send out the previous one 31 | if last_sender == "You": 32 | temp_log += [temp_pair] 33 | temp_pair = ["", ""] 34 | 35 | temp_pair[0] = data[i]["mes"] 36 | last_sender = data[i]["name"] 37 | 38 | # She will always send out 39 | else: 40 | 41 | temp_pair[1] = data[i]["mes"] 42 | last_sender = data[i]["name"] 43 | 44 | temp_log += [temp_pair] 45 | temp_pair = ["", ""] 46 | 47 | i += 1 48 | 49 | 50 | # Save the file 51 | global converted_log_count 52 | converted_log_count += 1 53 | 54 | with open("Logs/ChatLog-Converted-" + converted_log_count.__str__() + ".json", 'w') as outfile: 55 | json.dump(temp_log, outfile, indent=4) 56 | 57 | # 58 | # Note: The "Drop_Converts_Here" folder will not automatically remove converted files! Buyer beware! 59 | # (This is because we may want to take a look at them/paste elsewhere, user must clean out after) 60 | # 61 | -------------------------------------------------------------------------------- /z-waif-Release/utils/logging.py: -------------------------------------------------------------------------------- 1 | 2 | debug_log = "General Debug log will go here!\n\nAnd here!" 3 | rag_log = "RAG log will go here!" 4 | kelvin_log = "Live temperature randomness will go here!" 5 | 6 | 7 | def update_debug_log(text): 8 | global debug_log 9 | debug_log += "\n\n" + str(text) 10 | 11 | 12 | def update_rag_log(text): 13 | global rag_log 14 | rag_log += "\n\n" + str(text) 15 | 16 | def clear_rag_log(): 17 | global rag_log 18 | rag_log = "" 19 | 20 | def update_kelvin_log(text): 21 | global kelvin_log 22 | kelvin_log = text 23 | -------------------------------------------------------------------------------- /z-waif-Release/utils/lorebook.py: -------------------------------------------------------------------------------- 1 | import utils.cane_lib 2 | import json 3 | import utils.logging 4 | 5 | do_log_lore = True 6 | total_lore_default = "Here is some lore about the current topic from your lorebook;\n\n" 7 | 8 | 9 | # Load the LORE_BOOK, it is now JSON configurable! 10 | with open("Configurables/Lorebook.json", 'r') as openfile: 11 | LORE_BOOK = json.load(openfile) 12 | 13 | 14 | # For retreival 15 | # def lorebook_check(message): 16 | # global LORE_BOOK 17 | # 18 | # # Lockout clearing 19 | # for lore in LORE_BOOK: 20 | # if lore['2'] > 0: 21 | # lore['2'] -= 1 22 | # 23 | # # Search for new ones 24 | # for lore in LORE_BOOK: 25 | # if utils.cane_lib.keyword_check(message, [" " + lore['0']]) and lore['2'] == 0: 26 | # # Set our lockout 27 | # lore['2'] += 9 28 | # 29 | # # Make our info 30 | # 31 | # combo_lore = lore['0'] + ", " + lore['1'] 32 | # 33 | # return combo_lore 34 | # 35 | # return "No lore!" 36 | 37 | # Gathers ALL lore in a given scope (send in the message being sent, as well as any message pairs you want to check) 38 | def lorebook_gather(messages, sent_message): 39 | 40 | # gather, gather, into reformed 41 | reformed_messages = [sent_message, ""] 42 | 43 | for message in messages: 44 | reformed_messages.append(message[0]) 45 | reformed_messages.append(message[1]) 46 | 47 | # gather all of our lore in one spot 48 | total_lore = total_lore_default 49 | 50 | # Reset all lore entry cooldown 51 | for lore in LORE_BOOK: 52 | lore['2'] = 0 53 | 54 | # Search every lore entry for each of the messages, and add the lore as needed 55 | for message in reformed_messages: 56 | # Search for new ones 57 | for lore in LORE_BOOK: 58 | if utils.cane_lib.keyword_check(message, [" " + lore['0'] + " ", " " + lore['0'] + "\'", " " + lore['0'] + "s", 59 | " " + lore['0'] + "!", " " + lore['0'] + ".", " " + lore['0'] + ",", " " + lore['0'] + "!", 60 | ]) and lore['2'] == 0: 61 | 62 | total_lore += (lore['0'] + ", " + lore['1'] + "\n\n") 63 | lore['2'] = 7 # lore has procced, prevent dupes 64 | 65 | if do_log_lore and total_lore != total_lore_default: 66 | utils.logging.update_debug_log(total_lore) 67 | 68 | 69 | return total_lore 70 | 71 | 72 | 73 | # Check if keyword is in the lorebook 74 | def rag_word_check(word): 75 | # Lockout clearing 76 | for lore in LORE_BOOK: 77 | if str.lower(lore['0']) == word: 78 | return True 79 | 80 | return False 81 | 82 | -------------------------------------------------------------------------------- /z-waif-Release/utils/minecraft.py: -------------------------------------------------------------------------------- 1 | # Support for playing Minecraft! 2 | 3 | from pythmc import ChatLink 4 | import pygetwindow 5 | import utils.cane_lib 6 | import time 7 | import main 8 | import API.Oogabooga_Api_Support 9 | import utils.settings 10 | import json 11 | 12 | from utils.settings import minecraft_enabled 13 | 14 | if minecraft_enabled: 15 | chat = ChatLink() # Initialises an instance of ChatLink, to take control of the Minecraft Chat. 16 | 17 | last_chat = "None!" 18 | remembered_messages = ["", "Minecraft Chat Loaded!"] 19 | 20 | # Load the configurable MC names 21 | with open("Configurables/MinecraftNames.json", 'r') as openfile: 22 | mc_names = json.load(openfile) 23 | 24 | with open("Configurables/MinecraftUsername.json", 'r') as openfile: 25 | mc_username = json.load(openfile) 26 | 27 | with open("Configurables/MinecraftUsernameFollow.json", 'r') as openfile: 28 | mc_username_follow = json.load(openfile) 29 | 30 | def check_for_command(message): 31 | 32 | if str(message).__contains__("#") or str(message).__contains__("/"): 33 | 34 | # Search for the command 35 | i = 0 36 | word_collector = "" 37 | word_collector_on = False 38 | 39 | # Look for stopping, either spaces or end of message 40 | while i < len(message): 41 | 42 | # Collect once we have got our hashtag for our command 43 | if (message[i] == "#" or message[i] == "/") or word_collector_on == True: 44 | 45 | if message[i] == "\"": 46 | word_collector_on = False 47 | 48 | else: 49 | word_collector_on = True 50 | word_collector += message[i] 51 | 52 | # Continue 53 | i = i + 1 54 | 55 | if word_collector.__contains__("#follow"): 56 | word_collector = "#follow player " + mc_username_follow 57 | 58 | if word_collector.__contains__("#drop"): 59 | word_collector = ".drop" 60 | 61 | 62 | try: 63 | chat.send(word_collector) 64 | except: 65 | print("No MC Client!") 66 | 67 | 68 | 69 | 70 | def chat_check_loop(): 71 | 72 | while True: 73 | # Loop every 0.1 seconds 74 | time.sleep(0.1) 75 | 76 | # Check 77 | if utils.settings.minecraft_enabled: 78 | check_mc_chat() 79 | 80 | 81 | def check_mc_chat(): 82 | 83 | global last_chat 84 | global remembered_messages 85 | 86 | # Returns a list of messages from the in-game chat. 87 | message_list = chat.get_history(limit=10) 88 | 89 | if message_list is None: 90 | message_list = ["None!"] 91 | 92 | 93 | # Check Output 1 to see how it looks! 94 | combined_message = "" 95 | last_sent = "" 96 | temp_remembered_messages = ["", "Minecraft Chat Loaded!"] 97 | 98 | i = 0 99 | for message in message_list: 100 | 101 | add_message = True 102 | 103 | # do not add our own messages, as those are already tracked 104 | if utils.cane_lib.keyword_check(message.content, ["<" + mc_username + ">", mc_username + "\u00a7r\u00a7r:"]): 105 | add_message = False 106 | 107 | # do not add in remembered messages, as those are already tracked 108 | for remembered_message in remembered_messages: 109 | if message.content == remembered_message: 110 | add_message = False 111 | 112 | 113 | if add_message: 114 | combined_message += message.content + "\n" 115 | temp_remembered_messages.append(message.content) # rember this for later, so we can filter it out from new context 116 | 117 | 118 | i = i + 1 119 | if i == 10: 120 | last_sent = message.content 121 | 122 | 123 | temp_remembered_messages = temp_remembered_messages[2:] # Cut off the starting bits of it 124 | 125 | 126 | if last_sent == last_chat: 127 | return 128 | else: 129 | last_chat = last_sent 130 | 131 | if utils.cane_lib.keyword_check(last_sent, mc_names) and not utils.cane_lib.keyword_check(last_sent, ["<" + mc_username + ">", mc_username + "\u00a7r\u00a7r:"]): 132 | 133 | # Send a MC specific message 134 | main.main_minecraft_chat(combined_message) 135 | 136 | # make the remembered messages be added to the memory, and set it to be only the past ten messages 137 | 138 | for message in temp_remembered_messages: 139 | remembered_messages.append(message) 140 | 141 | remembered_messages = remembered_messages[-10:] 142 | 143 | def minecraft_chat(): 144 | 145 | message = API.Oogabooga_Api_Support.receive_via_oogabooga() 146 | chat.send(message) 147 | 148 | -------------------------------------------------------------------------------- /z-waif-Release/utils/retrospect.py: -------------------------------------------------------------------------------- 1 | import utils.based_rag 2 | import random 3 | import API.Oogabooga_Api_Support 4 | import utils.logging 5 | import os 6 | 7 | 8 | summary_tokens_count = 310 9 | search_point_size = 16 10 | 11 | enable_debug = True 12 | char_name = os.environ.get("CHAR_NAME") 13 | 14 | 15 | # remembers a random past event 16 | def retrospect_random_mem_summary(): 17 | history = utils.based_rag.history_database 18 | 19 | # find random point in history to think about (not including anything recently) 20 | search_point = random.randint(0, len(history) - 90) 21 | 22 | history_scope = history[search_point:search_point+search_point_size] 23 | retrospect_message = ("[System L] Can you please summarize all of these chat messages? These are previous memories that you, " + char_name + 24 | ", have experienced. " + 25 | "Feel free to focus on details that are of note or you find interest in.") 26 | 27 | if enable_debug: 28 | utils.logging.update_rag_log(history_scope) 29 | 30 | # Encode and send! 31 | pre_encoded_message = API.Oogabooga_Api_Support.encode_raw_new_api(history_scope, retrospect_message, search_point_size) 32 | API.Oogabooga_Api_Support.summary_memory_run(pre_encoded_message, retrospect_message) 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | # 41 | # FUTURE PLANNED 42 | 43 | # remember and summarize everything since the last daily rememberence 44 | 45 | # remember and summarize the last [memory window] messages 46 | 47 | # gather various memories on this subject, and summarize what you know 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /z-waif-Release/utils/settings.py: -------------------------------------------------------------------------------- 1 | hotkeys_locked = False 2 | speak_shadowchats = False 3 | 4 | max_tokens = 110 5 | newline_cut = True 6 | 7 | alarm_time = "09:09" 8 | model_preset = "Default" 9 | 10 | cam_use_image_feed = False 11 | cam_direct_talk = True 12 | cam_reply_after = False 13 | cam_image_preview = True 14 | 15 | # Valid values; "Faces", "Random", "None" 16 | eyes_follow = "None" 17 | 18 | minecraft_enabled = False 19 | alarm_enabled = True 20 | vtube_enabled = True 21 | discord_enabled = True 22 | rag_enabled = True 23 | vision_enabled = True 24 | -------------------------------------------------------------------------------- /z-waif-Release/utils/transcriber_translate.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import whisper 4 | import torch 5 | from dotenv import load_dotenv 6 | load_dotenv() 7 | 8 | device = "cuda" if torch.cuda.is_available() else "cpu" 9 | 10 | USER_MODEL = os.environ.get("WHISPER_MODEL") 11 | 12 | def to_transcribe_original_language(voice): 13 | 14 | nresult="" 15 | model = whisper.load_model(USER_MODEL) 16 | result = model.transcribe(voice, language="en", compression_ratio_threshold=1.9, no_speech_threshold=0.1) 17 | for mem in result["segments"]: 18 | nresult+=mem['text']+" " 19 | 20 | return nresult 21 | 22 | 23 | -------------------------------------------------------------------------------- /z-waif-Release/utils/uni_pipes.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # GRRRR! GRRRRRRR! I HATE WORKFLOWS! 4 | 5 | # This is our state manager. It can have a list of many options to control IO. Options are as follows; 6 | # "Idle" = Nothing is happening 7 | # "TTS Process" = TTS Processing Message 8 | # "RAG Process" = RAG Running and Processing 9 | # "Thinking" = LLM Work 10 | # "Speaking" = TTS Output 11 | # 12 | # Pipe type respresents a variety of actions, such as "Talk", "Picture", "Discord Message" 13 | # 14 | # Comes with [current pipeflow spot, pipe ID, pipe type] 15 | cur_states = [["Idle", 0, "None"], ["Idle", 0, "Discord"]] 16 | -------------------------------------------------------------------------------- /z-waif-Release/utils/volume_listener.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sounddevice as sd 3 | from numba.cuda.libdevice import trunc 4 | from sympy import false 5 | 6 | duration = 10 #in seconds 7 | 8 | global VOL_LISTENER_LEVEL 9 | VOL_LISTENER_LEVEL = 0.01 10 | 11 | global SPEAKING_DETECTED 12 | global SPEAKING_TIMER 13 | SPEAKING_DETECTED = False 14 | SPEAKING_TIMER = 0 15 | 16 | no_mic = False 17 | 18 | 19 | def audio_callback(indata, frames, time, status): 20 | global VOL_LISTENER_LEVEL 21 | 22 | if no_mic: 23 | VOL_LISTENER_LEVEL = 0 24 | 25 | volume_norm = np.linalg.norm(indata) * 10 26 | 27 | # for reference, 0-2 is quiet background, 20 - 30 is non direct talking, 40+ is identified talking 28 | # take a rolling average, be more aggressive for if the sound is louder 29 | 30 | if (volume_norm > VOL_LISTENER_LEVEL): 31 | VOL_LISTENER_LEVEL = (VOL_LISTENER_LEVEL + volume_norm + 2) / 2 32 | else: 33 | VOL_LISTENER_LEVEL = ((VOL_LISTENER_LEVEL * 5) + volume_norm) / 6 34 | 35 | 36 | def get_vol_level(): 37 | return VOL_LISTENER_LEVEL 38 | 39 | 40 | 41 | def run_volume_listener(): 42 | 43 | allow_mic = False 44 | 45 | sound_query = sd.query_devices() 46 | for devices in sound_query: 47 | if devices['max_input_channels'] != 0: 48 | allow_mic = True 49 | 50 | if not allow_mic: 51 | print("No mic detected!") 52 | 53 | global no_mic 54 | no_mic = True 55 | 56 | global VOL_LISTENER_LEVEL 57 | VOL_LISTENER_LEVEL = 0 58 | 59 | return 60 | 61 | while True: 62 | # Run Stream 63 | stream = sd.InputStream(callback=audio_callback) 64 | 65 | 66 | # Wait up! 67 | with stream: 68 | sd.sleep(duration * 1000) 69 | -------------------------------------------------------------------------------- /z-waif-Release/utils/vtube.py: -------------------------------------------------------------------------------- 1 | import asyncio, pyvts,base64 2 | VOICE_LEVEL = 5 3 | VOICE_PARAMETER = "MouthOpen" 4 | 5 | plugin_info = { 6 | "plugin_name": "Waifu", 7 | "developer": "TumblerWarren", 8 | "authentication_token_path": "./pyvts_token.txt" 9 | 10 | } 11 | 12 | 13 | async def main(): 14 | myvts = pyvts.vts(plugin_info=plugin_info) 15 | 16 | 17 | await myvts.connect() 18 | await myvts.request_authenticate_token() # get token 19 | await myvts.request_authenticate() # use token 20 | 21 | level = [0.2, 0.4, 0.5, 0.6, 0.3, 0.2, 0.67, 0.5, 0.2, 0.4, 0.6, 0.8, 0.3, 0.9, 0.3, 0.2, 0.1, 0.34, 0.6, 0.8, 0.5, 0.3, 0.86, 0.34, 0.35, 0.63, 0.72, 0.31, 0.12] 22 | level.append(0) 23 | 24 | for mem in level: 25 | await myvts.request(myvts.vts_request.requestSetParameterValue(parameter=VOICE_PARAMETER, value=mem)) 26 | await asyncio.sleep(1 / 30) 27 | 28 | if __name__ == "__main__": 29 | asyncio.run(main()) -------------------------------------------------------------------------------- /z-waif-Release/utils/vtube_studio.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import utils.cane_lib 4 | import asyncio,os,threading 5 | import pyvts 6 | import json 7 | from dotenv import load_dotenv 8 | 9 | VTS = pyvts.vts( 10 | plugin_info={ 11 | "plugin_name": "Z-Waif", 12 | "developer": "sugarcanefarmer", 13 | "authentication_token_path": "./token.txt", 14 | }, 15 | vts_api_info={ 16 | "version": "1.0", 17 | "name": "VTubeStudioPublicAPI", 18 | "port": os.environ.get("VTUBE_STUDIO_API_PORT", 8001) 19 | } 20 | ) 21 | 22 | 23 | load_dotenv() 24 | 25 | global EMOTE_ID 26 | EMOTE_ID = 2 27 | 28 | global EMOTE_STRING 29 | EMOTE_STRING = "" 30 | 31 | global CUR_LOOK 32 | CUR_LOOK = 0 33 | 34 | global LOOK_LEVEL_ID 35 | LOOK_LEVEL_ID = 1 36 | 37 | global look_start_id 38 | look_start_id = int(os.environ.get("EYES_START_ID")) 39 | 40 | 41 | # Load in the EmoteLib from configurables 42 | with open("Configurables/EmoteLib.json", 'r') as openfile: 43 | emote_lib = json.load(openfile) 44 | 45 | 46 | 47 | # Starter Authentication 48 | 49 | def run_vtube_studio_connection(): 50 | asyncio.run(vtube_studio_connection()) 51 | 52 | async def vtube_studio_connection(): 53 | await VTS.connect() 54 | await VTS.request_authenticate_token() 55 | await VTS.request_authenticate() 56 | await VTS.close() 57 | 58 | 59 | # Emote System 60 | 61 | def set_emote_string(emote_string): 62 | global EMOTE_STRING 63 | EMOTE_STRING = emote_string 64 | 65 | def check_emote_string(): 66 | 67 | # Setup our global 68 | global EMOTE_ID 69 | EMOTE_ID = -1 70 | 71 | 72 | # Cleanup the text to only look at the asterisk'ed words 73 | 74 | clean_emote_text = '' 75 | asterisk_count = 0 76 | 77 | for char in EMOTE_STRING: 78 | if char == "*": 79 | asterisk_count += 1 80 | elif asterisk_count % 2 == 1: 81 | clean_emote_text = clean_emote_text + char 82 | 83 | 84 | # Run through emotes, using OOP to only run one at a time (last = most prominent) 85 | 86 | for emote_page in emote_lib: 87 | if utils.cane_lib.keyword_check(clean_emote_text, emote_page[0]): 88 | EMOTE_ID = emote_page[1] 89 | 90 | 91 | 92 | 93 | # If we got an emote, run it through the system 94 | if EMOTE_ID != -1: 95 | run_emote() 96 | 97 | 98 | def run_emote(): 99 | asyncio.run(emote()) 100 | 101 | async def emote(): 102 | await VTS.connect() 103 | await VTS.request_authenticate() 104 | response_data = await VTS.request(VTS.vts_request.requestHotKeyList()) 105 | hotkey_list = [] 106 | for hotkey in response_data["data"]["availableHotkeys"]: 107 | hotkey_list.append(hotkey["name"]) 108 | send_hotkey_request = VTS.vts_request.requestTriggerHotKey(hotkey_list[EMOTE_ID]) 109 | await VTS.request(send_hotkey_request) 110 | 111 | await VTS.close() 112 | 113 | def change_look_level(value): 114 | 115 | # Inputting value should be from -1 to 1 116 | # We translate to what the look level should be here 117 | 118 | new_look_ID = -1 119 | 120 | if value < -0.67: 121 | new_look_ID = 5 122 | elif value < -0.4: 123 | new_look_ID = 4 124 | elif value < -0.2: 125 | new_look_ID = 3 126 | elif value > 0.67: 127 | new_look_ID = 2 128 | elif value > 0.4: 129 | new_look_ID = 1 130 | elif value > 0.2: 131 | new_look_ID = 0 132 | 133 | global LOOK_LEVEL_ID, CUR_LOOK 134 | 135 | if LOOK_LEVEL_ID != new_look_ID: 136 | run_clear_look() 137 | 138 | #mini rest between 139 | time.sleep(0.02) 140 | 141 | LOOK_LEVEL_ID = new_look_ID 142 | 143 | # only change if we are not at center 144 | if new_look_ID != -1: 145 | run_set_look() 146 | else: 147 | CUR_LOOK = 0 148 | 149 | 150 | 151 | 152 | def run_clear_look(): 153 | asyncio.run(clear_look()) 154 | 155 | def run_set_look(): 156 | asyncio.run(set_look()) 157 | 158 | async def clear_look(): 159 | await VTS.connect() 160 | await VTS.request_authenticate() 161 | 162 | # Remove the previous look emote 163 | if CUR_LOOK != 0: 164 | response_data = await VTS.request(VTS.vts_request.requestHotKeyList()) 165 | hotkey_list = [] 166 | for hotkey in response_data["data"]["availableHotkeys"]: 167 | hotkey_list.append(hotkey["name"]) 168 | send_hotkey_request = VTS.vts_request.requestTriggerHotKey(hotkey_list[CUR_LOOK]) 169 | await VTS.request(send_hotkey_request) 170 | 171 | await VTS.close() 172 | 173 | 174 | async def set_look(): 175 | await VTS.connect() 176 | await VTS.request_authenticate() 177 | 178 | 179 | # Make this configurable. The start of the section of emotes where the looking works 180 | global look_start_id 181 | new_look_id = look_start_id + LOOK_LEVEL_ID 182 | 183 | response_data = await VTS.request(VTS.vts_request.requestHotKeyList()) 184 | hotkey_list = [] 185 | for hotkey in response_data["data"]["availableHotkeys"]: 186 | hotkey_list.append(hotkey["name"]) 187 | send_hotkey_request = VTS.vts_request.requestTriggerHotKey(hotkey_list[new_look_id]) 188 | await VTS.request(send_hotkey_request) 189 | 190 | global CUR_LOOK 191 | CUR_LOOK = new_look_id 192 | 193 | await VTS.close() 194 | -------------------------------------------------------------------------------- /z-waif-Release/utils/z_waif_discord.py: -------------------------------------------------------------------------------- 1 | # This example requires the 'message_content' intent. 2 | import asyncio 3 | import os 4 | 5 | import discord 6 | import main 7 | import API.Oogabooga_Api_Support 8 | 9 | intents = discord.Intents.default() 10 | intents.message_content = True 11 | 12 | client = discord.Client(intents=intents) 13 | 14 | DISCORD_TOKEN = os.environ.get("DISCORD_TOKEN") 15 | 16 | @client.event 17 | async def on_ready(): 18 | print(f'We have logged in as {client.user}') 19 | 20 | @client.event 21 | async def on_message(message): 22 | print("Processing discord message: " + message.content + "|| From " + message.author.name) 23 | 24 | if message.author == client.user: 25 | return 26 | 27 | 28 | if (message.content == "/regen") or (message.content == "/reroll") or (message.content == "/redo"): 29 | 30 | # Typing indicator 31 | async with message.channel.typing(): 32 | # Call in for the message to be sent 33 | main.main_discord_next() 34 | 35 | # Retrieve the result now 36 | message_reply = API.Oogabooga_Api_Support.receive_via_oogabooga() 37 | 38 | # Send it! 39 | await message.channel.send(message_reply) 40 | return 41 | 42 | 43 | else: 44 | # Format our string 45 | sending_string = "[System Q] Discord message from " + message.author.name + "\n\n" + message.content 46 | 47 | # Typing indicator 48 | async with message.channel.typing(): 49 | 50 | # Call in for the message to be sent 51 | main.main_discord_chat(sending_string) 52 | 53 | # Retrieve the result now 54 | message_reply = API.Oogabooga_Api_Support.receive_via_oogabooga() 55 | 56 | # Send it! 57 | await message.channel.send(message_reply) 58 | 59 | 60 | 61 | def run_z_waif_discord(): 62 | client.run(DISCORD_TOKEN) 63 | --------------------------------------------------------------------------------