├── .gitignore ├── LICENSE ├── README.md ├── main.py ├── previews ├── bars.gif ├── circle.gif ├── flame.gif ├── fractal_universe.gif ├── matrix_rain.gif ├── neural_dreamscape.gif ├── particles.gif └── wave.gif ├── requirements.txt ├── visualizer_base.py └── visualizers ├── README.md ├── __init__.py ├── bars.py ├── circle.py ├── cosmic_pulsar.py ├── flame.py ├── fractal_universe.py ├── fractal_universe_lite.py ├── guitar_tuner.py ├── matrix_rain.py ├── neural_dreamscape.py ├── neural_dreamscape_lite.py ├── particles.py ├── starfield_warp.py ├── stick_figure.py └── wave.py /.gitignore: -------------------------------------------------------------------------------- 1 | .summary_files/ 2 | .DS_Store 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | *.py,cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | cover/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | .pybuilder/ 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | # For a library or package, you might want to ignore these files since the code is 89 | # intended to run in multiple environments; otherwise, check them in: 90 | # .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # UV 100 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 101 | # This is especially recommended for binary packages to ensure reproducibility, and is more 102 | # commonly ignored for libraries. 103 | #uv.lock 104 | 105 | # poetry 106 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 107 | # This is especially recommended for binary packages to ensure reproducibility, and is more 108 | # commonly ignored for libraries. 109 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 110 | #poetry.lock 111 | 112 | # pdm 113 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 114 | #pdm.lock 115 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 116 | # in version control. 117 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 118 | .pdm.toml 119 | .pdm-python 120 | .pdm-build/ 121 | 122 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 123 | __pypackages__/ 124 | 125 | # Celery stuff 126 | celerybeat-schedule 127 | celerybeat.pid 128 | 129 | # SageMath parsed files 130 | *.sage.py 131 | 132 | # Environments 133 | .env 134 | .venv 135 | env/ 136 | venv/ 137 | ENV/ 138 | env.bak/ 139 | venv.bak/ 140 | 141 | # Spyder project settings 142 | .spyderproject 143 | .spyproject 144 | 145 | # Rope project settings 146 | .ropeproject 147 | 148 | # mkdocs documentation 149 | /site 150 | 151 | # mypy 152 | .mypy_cache/ 153 | .dmypy.json 154 | dmypy.json 155 | 156 | # Pyre type checker 157 | .pyre/ 158 | 159 | # pytype static type analyzer 160 | .pytype/ 161 | 162 | # Cython debug symbols 163 | cython_debug/ 164 | 165 | # PyCharm 166 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 167 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 168 | # and can be added to the global gitignore or merged into this file. For a more nuclear 169 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 170 | #.idea/ 171 | 172 | # Ruff stuff: 173 | .ruff_cache/ 174 | 175 | # PyPI configuration file 176 | .pypirc 177 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CLI-Viz: Terminal Audio Visualizer 2 | 3 | A powerful terminal-based audio visualizer with a plugin system that renders beautiful, responsive visualizations in your terminal. CLI-Viz processes audio from your microphone in real-time and transforms it into captivating visual displays. 4 | 5 | ## Included Visualizations 6 | 7 | 1. **Spectrum Bars**: Classic frequency spectrum analyzer with colorful bars 8 | - Controls: **b/B** - Increase/decrease bass boost 9 | 10 | ![Spectrum Bars](previews/bars.gif) 11 | 12 | 2. **Wave**: Audio-reactive sine wave that changes with different frequencies 13 | 14 | ![Wave](previews/wave.gif) 15 | 16 | 3. **Circle**: Circular spectrum visualization with pulsating rings 17 | 18 | ![Circle](previews/circle.gif) 19 | 20 | 4. **Particles**: Particle system that responds to beats and energy in the music 21 | - Controls: **p/P** - Increase/decrease maximum number of particles 22 | 23 | ![Particles](previews/particles.gif) 24 | 25 | 5. **Flame**: A realistic flame that dances to your music 26 | - Controls: **w/W** - Increase/decrease flame width 27 | - Controls: **h/H** - Increase/decrease flame height 28 | 29 | ![Flame](previews/flame.gif) 30 | 31 | 6. **Fractal Universe**: Hypnotic fractal patterns that evolve and respond to audio frequencies 32 | 33 | ![Fractal Universe](previews/fractal_universe.gif) 34 | 35 | 7. **Matrix Rain**: Digital rain effect inspired by The Matrix, with characters that flow and respond to the music 36 | 37 | ![Matrix Rain](previews/matrix_rain.gif) 38 | 39 | 8. **Neural Dreamscape**: Abstract visualization resembling neural networks that pulse and evolve with audio input 40 | 41 | ![Neural Dreamscape](previews/neural_dreamscape.gif) 42 | 43 | 9. **Neural Dreamscape Lite**: Optimized version of Neural Dreamscape designed specifically for low-end devices like Raspberry Pi Zero W' 44 | 45 | ... and more! 46 | 47 | ## Features 48 | 49 | - Modular plugin system for visualizers 50 | - Multiple audio-reactive visualizations included 51 | - Real-time audio processing 52 | - Customizable with keyboard controls 53 | - Low resource usage 54 | 55 | ## Global Controls 56 | 57 | - **Q**: Quit the program 58 | - **M**: Switch to the next visualization 59 | - **Space**: Pause/resume visualizations 60 | - **+/-**: Increase/decrease audio sensitivity 61 | 62 | ## Requirements 63 | 64 | - Python 3.6+ 65 | - numpy 66 | - pyaudio 67 | - curses (included in the standard library) 68 | 69 | ## Installation 70 | 71 | ### Quick Install 72 | 73 | ```bash 74 | # Clone the repository 75 | git clone https://github.com/sam1am/cli-viz.git 76 | cd cli-viz 77 | 78 | # Create venv (optional) 79 | python -m venv venv 80 | source ./venv/bin/activate 81 | 82 | # Install dependencies 83 | pip install -r requirements.txt 84 | 85 | # Run the visualizer 86 | python main.py 87 | ``` 88 | 89 | ### Troubleshooting PyAudio Installation 90 | 91 | If you encounter issues installing PyAudio: 92 | 93 | #### On Linux: 94 | ```bash 95 | # Install portaudio development package 96 | sudo apt-get install portaudio19-dev python-pyaudio 97 | 98 | # Then install PyAudio 99 | pip install pyaudio 100 | ``` 101 | 102 | #### On macOS: 103 | ```bash 104 | # Using Homebrew 105 | brew install portaudio 106 | pip install pyaudio 107 | ``` 108 | 109 | #### On Windows: 110 | You may need to install PyAudio from a pre-built wheel: 111 | ```bash 112 | pip install pipwin 113 | pipwin install pyaudio 114 | ``` 115 | 116 | ## Creating Your Own Visualizers 117 | 118 | CLI-Viz has a plugin system that makes it easy to create your own visualizations. See the [visualizer documentation](visualizers/README.md) for details on creating custom visualizers. 119 | 120 | ## Contributing 121 | 122 | Contributions are welcome! Feel free to submit a pull request or open an issue on GitHub. 123 | 124 | 1. Fork the repository 125 | 2. Create your feature branch (`git checkout -b feature/amazing-visualization`) 126 | 3. Commit your changes (`git commit -am 'Add amazing visualization'`) 127 | 4. Push to the branch (`git push origin feature/amazing-visualization`) 128 | 5. Create a new Pull Request -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # main.py 2 | import numpy as np 3 | import pyaudio 4 | import curses 5 | import time 6 | import importlib 7 | import pkgutil 8 | import os 9 | import inspect 10 | 11 | # Import the base visualizer 12 | from visualizer_base import VisualizerBase 13 | 14 | # Import the visualizers package 15 | import visualizers 16 | 17 | class TerminalAudioVisualizer: 18 | def __init__(self): 19 | # Audio setup 20 | self.CHUNK = 1024 * 2 21 | self.FORMAT = pyaudio.paInt16 22 | self.CHANNELS = 1 23 | self.RATE = 44100 24 | self.pause = False 25 | 26 | # FFT and visualization parameters 27 | self.spectrum = np.zeros(self.CHUNK) 28 | self.smoothed_spectrum = np.zeros(self.CHUNK // 2) 29 | self.previous_spectrum = np.zeros(self.CHUNK // 2) 30 | self.smoothing = 0.8 # Smoothing factor 31 | self.energy = 0 # Current energy level 32 | 33 | # Sensitivity control 34 | self.sensitivity = 1.0 # Default sensitivity multiplier 35 | self.sensitivity_step = 0.1 # How much to change per keystroke 36 | 37 | # Visual effects 38 | self.hue_offset = 0 39 | 40 | # Load visualizers 41 | self.visualizers = self.load_visualizers() 42 | self.current_visualizer_index = 0 43 | 44 | # Initialize audio stream 45 | self.p = pyaudio.PyAudio() 46 | self.stream = self.p.open( 47 | format=self.FORMAT, 48 | channels=self.CHANNELS, 49 | rate=self.RATE, 50 | input=True, 51 | output=False, 52 | frames_per_buffer=self.CHUNK 53 | ) 54 | 55 | def load_visualizers(self): 56 | """Dynamically load all visualizer plugins""" 57 | visualizers_list = [] 58 | 59 | # Iterate through all modules in the visualizers package 60 | for _, name, _ in pkgutil.iter_modules(visualizers.__path__): 61 | try: 62 | # Import the module 63 | module = importlib.import_module(f"visualizers.{name}") 64 | 65 | # Find all classes in the module that inherit from VisualizerBase 66 | for _, obj in inspect.getmembers(module, inspect.isclass): 67 | if issubclass(obj, VisualizerBase) and obj is not VisualizerBase: 68 | # Create an instance of the visualizer and add it to the list 69 | visualizers_list.append(obj()) 70 | print(f"Loaded visualizer: {obj().name}") 71 | except Exception as e: 72 | print(f"Error loading visualizer {name}: {e}") 73 | 74 | if not visualizers_list: 75 | print("No visualizers found!") 76 | 77 | return visualizers_list 78 | 79 | def get_audio_data(self): 80 | # Read audio data 81 | data = np.frombuffer(self.stream.read(self.CHUNK, exception_on_overflow=False), dtype=np.int16) 82 | 83 | # Apply FFT to get frequency domain 84 | spectrum = np.abs(np.fft.fft(data)[:self.CHUNK // 2]) 85 | 86 | # Normalize and apply smoothing 87 | spectrum = spectrum / (128 * self.CHUNK) 88 | self.previous_spectrum = self.smoothed_spectrum 89 | self.smoothed_spectrum = self.previous_spectrum * self.smoothing + spectrum * (1 - self.smoothing) 90 | 91 | # Apply sensitivity to the spectrum 92 | adjusted_spectrum = self.smoothed_spectrum * self.sensitivity 93 | 94 | # Calculate energy (for beat detection) 95 | self.energy = np.mean(adjusted_spectrum[:self.CHUNK//4]) * 2 96 | 97 | return adjusted_spectrum 98 | 99 | def setup_colors(self, stdscr): 100 | # Initialize color pairs for curses 101 | curses.start_color() 102 | curses.use_default_colors() 103 | 104 | # Check how many colors the terminal supports 105 | if curses.COLORS < 256: 106 | # Limited color mode - just set up basic color pairs 107 | color_count = min(curses.COLORS - 1, 7) # Reserve 0 for default 108 | for i in range(color_count): 109 | curses.init_pair(i + 1, i + 1, -1) # -1 means default background 110 | else: 111 | # Full color mode - create color cube 112 | color_count = 216 # 6x6x6 color cube 113 | 114 | # Create color pairs 115 | for i in range(color_count): 116 | # Convert index to r,g,b (0-5 range for each) 117 | r = (i // 36) % 6 118 | g = (i // 6) % 6 119 | b = i % 6 120 | 121 | # Scale to 0-1000 range for curses 122 | r_curses = int((r * 1000) / 5) 123 | g_curses = int((g * 1000) / 5) 124 | b_curses = int((b * 1000) / 5) 125 | 126 | # Define color and color pair 127 | curses.init_color(i + 16, r_curses, g_curses, b_curses) 128 | curses.init_pair(i + 1, i + 16, -1) # -1 means default background 129 | 130 | def run(self, stdscr): 131 | # Setup curses 132 | curses.curs_set(0) # Hide cursor 133 | 134 | # Check terminal color support 135 | has_color = curses.has_colors() 136 | can_change = curses.can_change_color() if has_color else False 137 | 138 | if has_color: 139 | self.setup_colors(stdscr) 140 | 141 | stdscr.timeout(0) # Non-blocking input 142 | stdscr.erase() 143 | 144 | # Initialize all visualizers 145 | for visualizer in self.visualizers: 146 | visualizer.setup() 147 | 148 | try: 149 | while True: 150 | # Handle keypresses 151 | try: 152 | key = stdscr.getkey() 153 | if key == 'q': 154 | break 155 | elif key == 'm': 156 | self.current_visualizer_index = (self.current_visualizer_index + 1) % len(self.visualizers) 157 | elif key == ' ': 158 | self.pause = not self.pause 159 | elif key == '+' or key == '=': # Both + and = (unshifted +) keys 160 | self.sensitivity += self.sensitivity_step 161 | elif key == '-': 162 | self.sensitivity = max(0.1, self.sensitivity - self.sensitivity_step) 163 | else: 164 | # Pass the key to the current visualizer 165 | current_vis = self.visualizers[self.current_visualizer_index] 166 | current_vis.handle_keypress(key) 167 | except: 168 | pass 169 | 170 | if not self.pause: 171 | # Get current terminal dimensions 172 | height, width = stdscr.getmaxyx() 173 | 174 | # Get audio data 175 | spectrum = self.get_audio_data() 176 | 177 | # Clear screen 178 | stdscr.erase() 179 | 180 | # Update hue offset 181 | self.hue_offset = (self.hue_offset + 0.005) % 1.0 182 | 183 | # Get current visualizer 184 | current_vis = self.visualizers[self.current_visualizer_index] 185 | 186 | # Draw info 187 | stdscr.addstr(0, 0, f"Terminal Audio Visualizer | {current_vis.name} | {self.current_visualizer_index+1}/{len(self.visualizers)} | Sensitivity: {self.sensitivity:.1f} | [Q]uit | [M]ode | [+/-] Sensitivity | [Space] Pause") 188 | 189 | # Draw the current visualization 190 | current_vis.draw(stdscr, spectrum, height, width, self.energy, self.hue_offset) 191 | 192 | # Update screen 193 | stdscr.refresh() 194 | 195 | # Control frame rate 196 | time.sleep(0.016) # ~60fps 197 | finally: 198 | # Cleanup 199 | self.stream.stop_stream() 200 | self.stream.close() 201 | self.p.terminate() 202 | 203 | # Run the visualizer 204 | if __name__ == "__main__": 205 | visualizer = TerminalAudioVisualizer() 206 | curses.wrapper(visualizer.run) -------------------------------------------------------------------------------- /previews/bars.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sam1am/cli-viz/9fe57a04bb7745e97c7eded235f6a481442f999a/previews/bars.gif -------------------------------------------------------------------------------- /previews/circle.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sam1am/cli-viz/9fe57a04bb7745e97c7eded235f6a481442f999a/previews/circle.gif -------------------------------------------------------------------------------- /previews/flame.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sam1am/cli-viz/9fe57a04bb7745e97c7eded235f6a481442f999a/previews/flame.gif -------------------------------------------------------------------------------- /previews/fractal_universe.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sam1am/cli-viz/9fe57a04bb7745e97c7eded235f6a481442f999a/previews/fractal_universe.gif -------------------------------------------------------------------------------- /previews/matrix_rain.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sam1am/cli-viz/9fe57a04bb7745e97c7eded235f6a481442f999a/previews/matrix_rain.gif -------------------------------------------------------------------------------- /previews/neural_dreamscape.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sam1am/cli-viz/9fe57a04bb7745e97c7eded235f6a481442f999a/previews/neural_dreamscape.gif -------------------------------------------------------------------------------- /previews/particles.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sam1am/cli-viz/9fe57a04bb7745e97c7eded235f6a481442f999a/previews/particles.gif -------------------------------------------------------------------------------- /previews/wave.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sam1am/cli-viz/9fe57a04bb7745e97c7eded235f6a481442f999a/previews/wave.gif -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | pyaudio 3 | colorama 4 | scipy -------------------------------------------------------------------------------- /visualizer_base.py: -------------------------------------------------------------------------------- 1 | # visualizer_base.py 2 | import curses 3 | 4 | class VisualizerBase: 5 | """Base class for audio visualizers""" 6 | 7 | def __init__(self, name="Base Visualizer"): 8 | self.name = name 9 | 10 | def setup(self): 11 | """Initialize any visualizer-specific state""" 12 | pass 13 | 14 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 15 | """ 16 | Draw the visualization 17 | 18 | Args: 19 | stdscr: curses screen object 20 | spectrum: audio frequency spectrum data 21 | height: screen height 22 | width: screen width 23 | energy: current audio energy level 24 | hue_offset: current color hue offset 25 | """ 26 | raise NotImplementedError("Visualizers must implement the draw method") 27 | 28 | def handle_keypress(self, key): 29 | """ 30 | Handle visualizer-specific keypresses 31 | 32 | Args: 33 | key: key that was pressed 34 | 35 | Returns: 36 | True if the key was handled, False otherwise 37 | """ 38 | return False 39 | 40 | def get_color_pair(self, stdscr, r, g, b): 41 | """Helper method to get color pair for RGB values""" 42 | # Check if we're in limited color mode 43 | if curses.COLORS < 256: 44 | # Map RGB to one of the 8 basic colors 45 | # Basic approximation: use brightness to choose between colors 46 | brightness = (r + g + b) / 3 47 | if brightness < 85: # Dark 48 | if r > g and r > b: return curses.color_pair(1) # Red 49 | if g > r and g > b: return curses.color_pair(2) # Green 50 | if b > r and b > g: return curses.color_pair(4) # Blue 51 | return curses.color_pair(0) # Black 52 | else: # Bright 53 | if r > g and r > b: return curses.color_pair(1) | curses.A_BOLD # Bright Red 54 | if g > r and g > b: return curses.color_pair(2) | curses.A_BOLD # Bright Green 55 | if b > r and b > g: return curses.color_pair(4) | curses.A_BOLD # Bright Blue 56 | if r > 200 and g > 200 and b > 200: return curses.color_pair(7) | curses.A_BOLD # White 57 | return curses.color_pair(3) # Yellow 58 | else: 59 | # Original 256-color logic 60 | r_idx = min(5, r * 6 // 256) 61 | g_idx = min(5, g * 6 // 256) 62 | b_idx = min(5, b * 6 // 256) 63 | 64 | # Calculate color index 65 | color_idx = 36 * r_idx + 6 * g_idx + b_idx + 1 66 | 67 | return curses.color_pair(color_idx) 68 | 69 | def hsv_to_color_pair(self, stdscr, h, s, v): 70 | """Helper method to get color pair for HSV values""" 71 | # Convert HSV to RGB 72 | h = h % 1.0 73 | c = v * s 74 | x = c * (1 - abs((h * 6) % 2 - 1)) 75 | m = v - c 76 | 77 | if h < 1/6: 78 | r, g, b = c, x, 0 79 | elif h < 2/6: 80 | r, g, b = x, c, 0 81 | elif h < 3/6: 82 | r, g, b = 0, c, x 83 | elif h < 4/6: 84 | r, g, b = 0, x, c 85 | elif h < 5/6: 86 | r, g, b = x, 0, c 87 | else: 88 | r, g, b = c, 0, x 89 | 90 | r = int((r + m) * 255) 91 | g = int((g + m) * 255) 92 | b = int((b + m) * 255) 93 | 94 | return self.get_color_pair(stdscr, r, g, b) -------------------------------------------------------------------------------- /visualizers/README.md: -------------------------------------------------------------------------------- 1 | # Creating Custom Visualizers for CLI-Viz 2 | 3 | This guide explains how to create custom visualizers for the CLI-Viz audio visualization system. 4 | 5 | ## Basic Structure 6 | 7 | All visualizers must: 8 | 1. Inherit from the `VisualizerBase` class 9 | 2. Implement at least the `draw()` method 10 | 3. Be placed in the `visualizers` directory with a `.py` extension 11 | 12 | ## Minimal Example 13 | 14 | Here's a minimal example of a custom visualizer: 15 | 16 | ```python 17 | # visualizers/minimal.py 18 | from visualizer_base import VisualizerBase 19 | import curses 20 | 21 | class MinimalVisualizer(VisualizerBase): 22 | def __init__(self): 23 | super().__init__(name="Minimal Example") 24 | 25 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 26 | # Draw a simple text at the center of the screen 27 | message = "Audio Level: {:.2f}".format(energy) 28 | y = height // 2 29 | x = width // 2 - len(message) // 2 30 | 31 | # Use the color helpers from the base class 32 | color = self.hsv_to_color_pair(stdscr, hue_offset, 1.0, 1.0) 33 | stdscr.addstr(y, x, message, color | curses.A_BOLD) 34 | ``` 35 | 36 | ## Essential Methods 37 | 38 | ### Required Methods 39 | 40 | - `__init__(self)`: Initialize your visualizer and set its name by calling `super().__init__(name="Your Visualizer Name")` 41 | - `draw(self, stdscr, spectrum, height, width, energy, hue_offset)`: The main method that draws your visualization 42 | 43 | ### Optional Methods 44 | 45 | - `setup(self)`: Called once when the visualizer is loaded; use for initialization 46 | - `handle_keypress(self, key)`: Handle keyboard input specific to your visualizer; return True if handled 47 | 48 | ## Parameters 49 | 50 | When implementing the `draw()` method, you'll have access to these parameters: 51 | 52 | - `stdscr`: The curses screen object used for drawing 53 | - `spectrum`: NumPy array containing the audio frequency spectrum (normalized) 54 | - `height, width`: Current terminal dimensions 55 | - `energy`: Overall audio energy level (useful for beat detection) 56 | - `hue_offset`: Current color hue offset (for creating color cycles) 57 | 58 | ## Helpful Utilities 59 | 60 | The base class provides useful methods: 61 | 62 | - `self.hsv_to_color_pair(stdscr, h, s, v)`: Convert HSV color (0-1 range) to a curses color pair 63 | - `self.get_color_pair(stdscr, r, g, b)`: Convert RGB color (0-255 range) to a curses color pair 64 | 65 | ## Audio Data Usage 66 | 67 | ### Frequency Spectrum 68 | 69 | The `spectrum` parameter is a numpy array containing the frequency components of the audio: 70 | - Lower indices (e.g., `spectrum[0:10]`) represent bass frequencies 71 | - Middle indices (e.g., `spectrum[10:30]`) represent mid-range frequencies 72 | - Higher indices represent treble frequencies 73 | 74 | Example: 75 | ```python 76 | # Get bass, mid, and treble energy 77 | bass = np.mean(spectrum[:10]) 78 | mids = np.mean(spectrum[10:30]) 79 | treble = np.mean(spectrum[30:]) 80 | ``` 81 | 82 | ### Overall Energy 83 | 84 | The `energy` parameter represents the overall energy level of the audio and is useful for detecting beats. 85 | 86 | Example: 87 | ```python 88 | if energy > 0.2: # Detected a beat 89 | # Do something exciting 90 | ``` 91 | 92 | ## Best Practices 93 | 94 | 1. **Performance**: Terminal rendering can be slow, so keep your visualizations efficient 95 | - Avoid redrawing areas that haven't changed 96 | - Limit the number of characters you draw 97 | 98 | 2. **Responsiveness**: Make your visualization responsive to audio 99 | - Use spectrum data to influence your visual elements 100 | - React to beats using the energy parameter 101 | 102 | 3. **Terminal Compatibility**: Use ASCII characters that work across different terminals 103 | - For wide compatibility, stick to basic ASCII 104 | - For more visual appeal, use Unicode when appropriate 105 | 106 | 4. **Size Adaptation**: Handle different terminal sizes gracefully 107 | - Adapt your visualization to the available dimensions 108 | - Check bounds before drawing to avoid errors 109 | 110 | 5. **State Management**: Keep visualizer state in instance variables 111 | - Initialize them in `__init__` or `setup` 112 | - Update them in the `draw` method 113 | 114 | ## Advanced Example: Pulsing Text 115 | 116 | Here's a more advanced example showing audio-reactive text: 117 | 118 | ```python 119 | # visualizers/pulsing_text.py 120 | from visualizer_base import VisualizerBase 121 | import curses 122 | import numpy as np 123 | import math 124 | 125 | class PulsingTextVisualizer(VisualizerBase): 126 | def __init__(self): 127 | super().__init__(name="Pulsing Text") 128 | self.message = "CLI-VIZ" 129 | self.beat_intensity = 0 130 | 131 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 132 | # Update beat intensity (rise quickly, fall slowly) 133 | if energy > self.beat_intensity: 134 | self.beat_intensity = energy 135 | else: 136 | self.beat_intensity *= 0.9 # Decay factor 137 | 138 | # Calculate text size based on beat intensity 139 | size_multiplier = 1.0 + self.beat_intensity * 2.0 140 | 141 | # Draw each letter with different color and size 142 | message_width = len(self.message) * 2 # Estimate width 143 | start_x = width // 2 - int(message_width * size_multiplier) // 2 144 | y = height // 2 145 | 146 | for i, char in enumerate(self.message): 147 | # Calculate position with some oscillation 148 | char_x = start_x + int(i * 2 * size_multiplier) 149 | char_y = y + int(math.sin(time.time() * 2 + i) * self.beat_intensity * 2) 150 | 151 | # Skip if out of bounds 152 | if not (0 <= char_y < height - 1 and 0 <= char_x < width - 1): 153 | continue 154 | 155 | # Calculate color based on spectrum and position 156 | freq_idx = (i * 5) % (len(spectrum) // 4) 157 | freq_intensity = spectrum[freq_idx] * 3.0 158 | 159 | hue = (i / len(self.message) + hue_offset) % 1.0 160 | sat = 0.7 + 0.3 * freq_intensity 161 | val = 0.7 + 0.3 * self.beat_intensity 162 | 163 | # Draw character with appropriate attributes 164 | color = self.hsv_to_color_pair(stdscr, hue, sat, val) 165 | 166 | # Choose character style based on energy 167 | attrs = curses.A_BOLD 168 | if freq_intensity > 0.5: 169 | attrs |= curses.A_REVERSE 170 | 171 | # Draw the character 172 | stdscr.addstr(char_y, char_x, char, color | attrs) 173 | ``` 174 | 175 | ## Loading Your Visualizer 176 | 177 | Once you've created your visualizer, save it in the `visualizers` directory with a `.py` extension. CLI-Viz will automatically detect and load it the next time you start the application. 178 | 179 | If your visualizer doesn't appear, check for errors in the terminal output when starting the application. 180 | -------------------------------------------------------------------------------- /visualizers/__init__.py: -------------------------------------------------------------------------------- 1 | # visualizers/__init__.py 2 | from .bars import BarsVisualizer 3 | from .circle import CircleVisualizer 4 | from .wave import WaveVisualizer 5 | from .particles import ParticlesVisualizer 6 | from .flame import FlameVisualizer 7 | from .matrix_rain import MatrixRainVisualizer 8 | from .neural_dreamscape import NeuralDreamscapeVisualizer 9 | from .neural_dreamscape_lite import NeuralDreamscapeLiteVisualizer 10 | from .fractal_universe import FractalUniverseVisualizer 11 | from .fractal_universe_lite import FractalUniverseLiteVisualizer 12 | from .stick_figure import StickFigureVisualizer 13 | from .starfield_warp import StarfieldWarpVisualizer 14 | from .cosmic_pulsar import CosmicPulsarVisualizer 15 | from .guitar_tuner import GuitarTunerVisualizer 16 | 17 | # Dictionary mapping visualizer names to classes 18 | visualizers = { 19 | "bars": BarsVisualizer, 20 | "circle": CircleVisualizer, 21 | "wave": WaveVisualizer, 22 | "particles": ParticlesVisualizer, 23 | "flame": FlameVisualizer, 24 | "matrix": MatrixRainVisualizer, 25 | "neural": NeuralDreamscapeVisualizer, 26 | "neural-lite": NeuralDreamscapeLiteVisualizer, 27 | "fractal": FractalUniverseVisualizer, 28 | "fractal-lite": FractalUniverseLiteVisualizer, 29 | "stick": StickFigureVisualizer, 30 | "starfield": StarfieldWarpVisualizer, 31 | "pulsar": CosmicPulsarVisualizer, 32 | "tuner": GuitarTunerVisualizer, 33 | } -------------------------------------------------------------------------------- /visualizers/bars.py: -------------------------------------------------------------------------------- 1 | # visualizers/bars.py 2 | import curses 3 | from visualizer_base import VisualizerBase 4 | 5 | class BarsVisualizer(VisualizerBase): 6 | def __init__(self): 7 | super().__init__(name="Spectrum Bars") 8 | self.bars = 50 # Number of bars in visualization 9 | self.boost = 1.5 # Bass boost factor 10 | 11 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 12 | # Classic spectrum analyzer with bars 13 | bar_width = max(1, width // self.bars) 14 | for i in range(min(self.bars, width // bar_width)): 15 | # Get frequency amplitude 16 | freq_index = int(i ** 1.3) + 1 # Non-linear mapping for better visualization 17 | freq_index = min(freq_index, len(spectrum) - 1) 18 | amplitude = spectrum[freq_index] * (1 + self.boost * (1 - i / self.bars)) 19 | 20 | # Calculate bar height 21 | bar_height = int(amplitude * (height - 4) * 3) 22 | bar_height = min(bar_height, height - 4) 23 | 24 | # Draw bar with color gradient 25 | for j in range(bar_height): 26 | # Calculate color (HSV: hue based on frequency, saturation and value based on amplitude) 27 | hue = (i / self.bars + hue_offset) % 1.0 28 | sat = 0.8 + 0.2 * (j / bar_height if bar_height > 0 else 0) 29 | val = 0.7 + 0.3 * (j / bar_height if bar_height > 0 else 0) 30 | 31 | # Get color pair and draw 32 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val) 33 | stdscr.addstr(height - 3 - j, i * bar_width, "█" * bar_width, color_attr | curses.A_BOLD) 34 | 35 | def handle_keypress(self, key): 36 | if key == 'b': 37 | self.boost = max(0.5, min(5.0, self.boost + 0.1)) 38 | return True 39 | elif key == 'B': 40 | self.boost = max(0.5, min(5.0, self.boost - 0.1)) 41 | return True 42 | return False -------------------------------------------------------------------------------- /visualizers/circle.py: -------------------------------------------------------------------------------- 1 | # visualizers/circle.py 2 | import curses 3 | import math 4 | from visualizer_base import VisualizerBase 5 | 6 | class CircleVisualizer(VisualizerBase): 7 | def __init__(self): 8 | super().__init__(name="Circle Spectrum") 9 | 10 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 11 | # Circular visualizer 12 | center_y = height // 2 13 | center_x = width // 2 14 | 15 | # Use lower frequencies for radius variations 16 | base_radius = min(height, width) // 4 17 | 18 | # Draw multiple circles 19 | for circle_idx in range(5): 20 | radius_variation = spectrum[:20].mean() * base_radius * 1.5 21 | radius = base_radius - circle_idx * 3 + radius_variation 22 | 23 | # Draw the circle 24 | for angle in range(0, 360, 5): 25 | rad = math.radians(angle) 26 | x = int(center_x + radius * math.cos(rad)) 27 | y = int(center_y + radius * math.sin(rad)) 28 | 29 | if 0 <= y < height - 1 and 0 <= x < width: 30 | # Color based on angle and energy 31 | hue = (angle / 360 + hue_offset) % 1.0 32 | sat = 0.7 + 0.3 * energy 33 | val = 0.7 + 0.3 * energy 34 | 35 | # Choose character based on energy 36 | char = "•" if energy < 0.1 else ("*" if energy < 0.2 else "★") 37 | 38 | # Get color pair and draw 39 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val) 40 | stdscr.addstr(y, x, char, color_attr | curses.A_BOLD) -------------------------------------------------------------------------------- /visualizers/cosmic_pulsar.py: -------------------------------------------------------------------------------- 1 | # visualizers/cosmic_pulsar.py 2 | import curses 3 | import random 4 | import math 5 | import numpy as np # Import numpy for spectrum analysis 6 | from visualizer_base import VisualizerBase 7 | 8 | class CosmicPulsarVisualizer(VisualizerBase): 9 | def __init__(self): 10 | super().__init__(name="Cosmic Pulsar Reactive") 11 | self.pulsar_rings = [] 12 | self.energy_threshold = 0.3 # Lowered threshold for more frequent rings 13 | self.ring_density = 1.0 14 | self.color_mode = 0 # 0: rainbow, 1: monochrome, 2: complementary 15 | self.orbital_particles = [] 16 | self.orbital_count = 80 17 | self.gravity_wells = [] 18 | self.max_wells = 4 # Allow slightly more wells 19 | self.symmetry = 4 # Rotational symmetry 20 | self.bass_sensitivity = 15.0 # Multiplier for bass effects (thickness, well strength) 21 | self.mids_sensitivity = 10.0 # Multiplier for mid-range effects (waves) 22 | self.highs_sensitivity = 5.0 # Multiplier for high-range effects (waves) 23 | self.particle_brightness_factor = 2.5 # How much spectrum affects particle brightness 24 | 25 | def setup(self): 26 | self.time = 0 27 | self.pulsar_rings = [] 28 | self.orbital_particles = [] 29 | self.gravity_wells = [] 30 | 31 | # Create initial orbital particles 32 | for i in range(self.orbital_count): 33 | angle = random.uniform(0, 2 * math.pi) 34 | base_radius = random.uniform(8, 20) # Store base radius 35 | orbital_speed = 0.02 + random.uniform(-0.01, 0.01) 36 | # Assign a frequency band index based on particle index 37 | # (Could also be random, but this gives some spatial distribution) 38 | freq_idx_rel = i / self.orbital_count 39 | self.orbital_particles.append({ 40 | 'angle': angle, 41 | 'base_radius': base_radius, 42 | 'current_radius': base_radius, # Add current radius for modulation 43 | 'orbital_speed': orbital_speed, 44 | 'size': random.choice(['*', '.', '+', '·', '•']), 45 | 'freq_idx_rel': freq_idx_rel, # Relative position in spectrum (0 to 1) 46 | 'hue': freq_idx_rel # Use frequency position for hue 47 | }) 48 | 49 | def _get_freq_bands(self, spectrum, width): 50 | """Helper to get average energy in bass, mids, highs""" 51 | if len(spectrum) == 0: 52 | return 0, 0, 0 53 | 54 | # Define frequency band ranges (adjust as needed) 55 | # Use relative indices based on spectrum length 56 | spec_len = len(spectrum) 57 | bass_end = spec_len // 10 58 | mids_end = spec_len // 3 59 | 60 | bass = np.mean(spectrum[:bass_end]) if bass_end > 0 else 0 61 | mids = np.mean(spectrum[bass_end:mids_end]) if mids_end > bass_end else 0 62 | highs = np.mean(spectrum[mids_end:]) if spec_len > mids_end else 0 63 | 64 | # Apply sensitivities and clamp 65 | bass = min(1.0, bass * self.bass_sensitivity) 66 | mids = min(1.0, mids * self.mids_sensitivity) 67 | highs = min(1.0, highs * self.highs_sensitivity) 68 | 69 | return bass, mids, highs 70 | 71 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 72 | center_x = width // 2 73 | center_y = height // 2 74 | self.time += 0.05 75 | max_dim = max(width, height) 76 | 77 | # Ensure spectrum is not empty before processing 78 | if len(spectrum) == 0: 79 | spectrum = np.zeros(1) # Avoid errors with empty spectrum 80 | 81 | # Get frequency band energies 82 | bass_energy, mids_energy, highs_energy = self._get_freq_bands(spectrum, width) 83 | 84 | # --- Subtle Background Shimmer --- 85 | if energy > 0.05: # Only draw if some sound 86 | bg_char = '.' 87 | # Vary brightness slightly with energy 88 | bg_val = min(0.3, energy * 0.5) 89 | # Slow color cycle for background 90 | bg_hue = (hue_offset * 0.1) % 1.0 91 | bg_sat = 0.1 + energy * 0.2 # Slightly more saturation with energy 92 | bg_attr = self.hsv_to_color_pair(stdscr, bg_hue, bg_sat, bg_val) 93 | # Draw sparsely 94 | for y_bg in range(0, height, 7): # Wider spacing 95 | for x_bg in range(0, width, 14): 96 | # Add slight random positional jitter based on time 97 | jitter_x = int(math.sin(self.time + x_bg * 0.1) * 1) 98 | jitter_y = int(math.cos(self.time + y_bg * 0.1) * 1) 99 | draw_x, draw_y = x_bg + jitter_x, y_bg + jitter_y 100 | if 0 <= draw_x < width and 0 <= draw_y < height: 101 | try: 102 | # Random chance to draw based on energy 103 | if random.random() < energy * 2.0: 104 | stdscr.addch(draw_y, draw_x, bg_char, bg_attr) 105 | except curses.error: 106 | pass # Ignore errors at screen edges 107 | 108 | 109 | # --- Pulsar Ring Creation --- 110 | # Trigger more frequently based on energy peaks 111 | if energy > self.energy_threshold and random.random() < (energy - self.energy_threshold) * 1.5: 112 | ring_count = int(1 + energy * 2 * self.ring_density) 113 | for _ in range(ring_count): 114 | # Thickness based on bass energy 115 | thickness = max(1, min(4, 1 + int(bass_energy * 3))) 116 | # Wave amplitude based on highs, frequency based on mids 117 | wave_amplitude = highs_energy * 2.0 118 | wave_frequency = 4 + mids_energy * 8.0 119 | # Store energy at creation for brightness/saturation control 120 | initial_energy = energy 121 | 122 | self.pulsar_rings.append({ 123 | 'radius': 1, 124 | 'growth_rate': 0.4 + energy * 1.2, # More variation in growth speed 125 | 'life': 1.0, 126 | 'thickness': thickness, 127 | 'hue': random.random(), # Keep random hue for variety 128 | 'wave_freq': wave_frequency, 129 | 'wave_amp': wave_amplitude, 130 | 'segments': random.randint(self.symmetry, self.symmetry * 3) if random.random() > 0.7 else 0, 131 | 'initial_energy': initial_energy 132 | }) 133 | 134 | # --- Gravity Well Creation & Update --- 135 | # More likely to spawn on bass hits 136 | if len(self.gravity_wells) < self.max_wells and random.random() < (0.005 + energy * 0.01 + bass_energy * 0.08): 137 | angle = random.uniform(0, 2 * math.pi) 138 | distance = random.uniform(max_dim * 0.1, max_dim * 0.3) 139 | lifetime = random.uniform(5, 15) 140 | base_strength = random.uniform(0.5, 2.0) # Store base strength 141 | 142 | self.gravity_wells.append({ 143 | 'x': center_x + distance * math.cos(angle), 144 | 'y': center_y + distance * math.sin(angle), 145 | 'base_strength': base_strength, 146 | 'current_strength': base_strength, # Initialize current strength 147 | 'life': 1.0, 148 | 'lifetime': lifetime, 149 | 'hue': random.random() 150 | }) 151 | 152 | # Update existing gravity wells (pulsate strength with bass) 153 | new_wells = [] 154 | for well in self.gravity_wells: 155 | well['life'] -= (1.0 / well['lifetime']) / 30.0 # Slower decay based on update rate (~30fps) 156 | # Pulsate strength with bass 157 | well['current_strength'] = well['base_strength'] * (0.8 + bass_energy * 1.5) 158 | 159 | if well['life'] > 0: 160 | new_wells.append(well) 161 | self.gravity_wells = new_wells 162 | 163 | # --- Draw Pulsar Rings --- 164 | new_rings = [] 165 | for ring in self.pulsar_rings: 166 | ring['radius'] += ring['growth_rate'] 167 | ring['life'] -= 0.015 # Slightly faster fade 168 | 169 | if ring['life'] <= 0 or ring['radius'] > max_dim * 1.5: # Allow going slightly off-screen 170 | continue 171 | 172 | radius = ring['radius'] 173 | thickness = ring['thickness'] 174 | segments = ring['segments'] # TODO: Implement drawing for segmented rings if needed 175 | 176 | # Color based on mode and initial energy 177 | brightness_factor = (ring['life'] * 0.4 + ring['initial_energy'] * 0.6) # Mix lifetime fade and creation energy 178 | brightness_factor = max(0, min(1.0, brightness_factor)) # Clamp 179 | 180 | if self.color_mode == 0: # Rainbow 181 | hue = (ring['hue'] + hue_offset) % 1.0 182 | elif self.color_mode == 1: # Monochrome 183 | hue = hue_offset 184 | else: # Complementary 185 | hue = (hue_offset + 0.5 * (ring['hue'] > 0.5)) % 1.0 186 | 187 | # Saturation and Value linked to brightness_factor 188 | sat = 0.5 + 0.5 * brightness_factor 189 | val = 0.4 + 0.6 * brightness_factor 190 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val) 191 | 192 | # --- Continuous Ring Drawing --- 193 | steps = max(10, int(2 * math.pi * radius)) # Ensure minimum steps 194 | # ** FIXED LINE BELOW ** 195 | for i in range(0, steps, max(1, int(steps / (100 + mids_energy * 100)))): # Density slightly affected by mids 196 | angle = i * 2 * math.pi / steps 197 | 198 | # Add wave effect 199 | wave_offset = ring['wave_amp'] * math.sin(angle * ring['wave_freq'] + self.time * 3) # Faster wave time 200 | current_radius = radius + wave_offset 201 | 202 | # Apply rotational symmetry 203 | for sym in range(self.symmetry): 204 | sym_angle = angle + sym * (2 * math.pi / self.symmetry) 205 | 206 | for r_offset in range(thickness): # Draw thickness inward 207 | r = current_radius - r_offset 208 | if r <= 0: continue # Don't draw negative radius 209 | 210 | x = center_x + r * math.cos(sym_angle) 211 | y = center_y + r * math.sin(sym_angle) 212 | 213 | if 0 <= x < width and 0 <= y < height: 214 | try: 215 | # Use different characters based on thickness/radius? 216 | char = '*' if r_offset < thickness / 2 else '.' 217 | stdscr.addstr(int(y), int(x), char, color_attr) 218 | except curses.error: 219 | pass # Ignore screen edge errors 220 | 221 | new_rings.append(ring) 222 | self.pulsar_rings = new_rings 223 | 224 | 225 | # --- Draw and Update Gravity Wells --- 226 | for well in self.gravity_wells: 227 | x, y = int(well['x']), int(well['y']) 228 | 229 | # Well appearance based on pulsating strength and life 230 | intensity = well['life'] * well['current_strength'] 231 | chars = ['·', '∘', '○', '◎', '●'] 232 | char_idx = min(len(chars) - 1, int(intensity * (len(chars) / 1.5))) # Scale index 233 | 234 | # Color based on current mode and intensity 235 | if self.color_mode == 0: hue = (well['hue'] + hue_offset) % 1.0 236 | elif self.color_mode == 1: hue = hue_offset 237 | else: hue = (hue_offset + 0.5) % 1.0 238 | sat = 0.6 + 0.4 * intensity # More saturation with intensity 239 | val = 0.4 + 0.6 * intensity # More brightness with intensity 240 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val) 241 | 242 | if 0 <= x < width and 0 <= y < height: 243 | try: 244 | stdscr.addstr(y, x, chars[char_idx], color_attr | curses.A_BOLD) 245 | 246 | # Draw a fading glow, intensity based on well strength 247 | glow_intensity = well['current_strength'] * well['life'] 248 | for r in range(1, 3): 249 | glow_val = val * (1 - r / 3) * (0.5 + glow_intensity * 0.5) # Glow pulses 250 | glow_attr = self.hsv_to_color_pair(stdscr, hue, sat * 0.8, max(0, min(1.0, glow_val))) 251 | for dx in range(-r, r + 1): 252 | for dy in range(-r, r + 1): 253 | dist_sq = dx*dx + dy*dy 254 | # Draw approx circle 255 | if r*r - r < dist_sq <= r*r + r: 256 | nx, ny = x + dx, y + dy 257 | if 0 <= nx < width and 0 <= ny < height: 258 | try: 259 | stdscr.addstr(ny, nx, '·', glow_attr) 260 | except curses.error: pass 261 | except curses.error: pass 262 | 263 | 264 | # --- Update and Draw Orbital Particles --- 265 | spec_len = len(spectrum) 266 | for particle in self.orbital_particles: 267 | # Update orbital speed based on overall energy 268 | particle['angle'] = (particle['angle'] + particle['orbital_speed'] * (1 + energy * 1.5)) % (2 * math.pi) # More speed variation 269 | 270 | # Get energy for this particle's frequency band 271 | freq_idx = int(particle['freq_idx_rel'] * (spec_len -1)) if spec_len > 0 else 0 272 | freq_energy = spectrum[freq_idx] if spec_len > 0 else 0 273 | 274 | # Modulate radius slightly based on frequency energy (push outward) 275 | radial_push = freq_energy * 2.0 276 | particle['current_radius'] = particle['base_radius'] * (1 + radial_push * 0.1) # Subtle radius change 277 | 278 | # Calculate base position 279 | base_x = center_x + particle['current_radius'] * math.cos(particle['angle']) 280 | base_y = center_y + particle['current_radius'] * math.sin(particle['angle']) 281 | 282 | # Apply influence from gravity wells 283 | final_x, final_y = base_x, base_y 284 | for well in self.gravity_wells: 285 | well_vector_x = well['x'] - final_x 286 | well_vector_y = well['y'] - final_y 287 | dist_sq = well_vector_x**2 + well_vector_y**2 288 | dist = math.sqrt(dist_sq) if dist_sq > 0.01 else 0.1 289 | 290 | # Use pulsating strength 291 | force = well['current_strength'] * well['life'] / dist_sq * 3 # Slightly stronger pull 292 | force = min(force, dist / 2) # Prevent excessive jumps 293 | 294 | final_x += well_vector_x / dist * force 295 | final_y += well_vector_y / dist * force 296 | 297 | # Draw particle if in bounds 298 | if 0 <= final_x < width and 0 <= final_y < height: 299 | # Color based on mode 300 | if self.color_mode == 0: hue = (particle['hue'] + hue_offset) % 1.0 301 | elif self.color_mode == 1: hue = hue_offset 302 | else: hue = (hue_offset + 0.5 * (particle['hue'] > 0.5)) % 1.0 303 | 304 | # Brightness (Value) based on frequency energy 305 | particle_val = 0.4 + freq_energy * self.particle_brightness_factor 306 | particle_val = max(0.1, min(1.0, particle_val)) # Clamp value 307 | particle_sat = 0.6 + freq_energy * 0.4 # Saturation also slightly affected 308 | particle_sat = max(0.2, min(1.0, particle_sat)) 309 | 310 | color_attr = self.hsv_to_color_pair(stdscr, hue, particle_sat, particle_val) 311 | 312 | try: 313 | # Maybe make size react slightly? 314 | size = particle['size'] 315 | if freq_energy > 0.5: size = '#' # Use brighter char on high energy 316 | stdscr.addstr(int(final_y), int(final_x), size, color_attr) 317 | except curses.error: 318 | pass 319 | 320 | 321 | # --- Draw the Pulsar Core --- 322 | # More reactive size and intensity range 323 | core_radius = 1.0 + energy * 5.0 324 | core_intensity = 0.2 + energy * 0.8 # Wider brightness range 325 | core_intensity = max(0, min(1.0, core_intensity)) 326 | 327 | core_hue = (hue_offset + bass_energy * 0.1) % 1.0 # Subtle hue shift with bass 328 | core_sat = 0.1 + energy * 0.5 # Saturation pulses with energy 329 | 330 | # Draw core as a bright, multi-layered spot 331 | for r_layer in range(int(core_radius), -1, -1): # Draw from outside in 332 | layer_radius = r_layer 333 | # Intensity falls off from center 334 | intensity_at_layer = core_intensity * max(0, 1 - (layer_radius / (core_radius + 1e-6)))**2 335 | if intensity_at_layer < 0.05: continue # Skip very dim layers 336 | 337 | char = '*' if layer_radius < core_radius / 2 else '+' if layer_radius < core_radius * 0.8 else '.' 338 | color_attr = self.hsv_to_color_pair(stdscr, core_hue, core_sat, intensity_at_layer) 339 | attr = curses.A_BOLD if layer_radius < core_radius / 3 else 0 340 | 341 | # Draw circle points for this layer 342 | steps = max(6, int(layer_radius * 4)) # More points for larger radii 343 | for i in range(steps): 344 | angle = i * 2 * math.pi / steps 345 | x = center_x + layer_radius * math.cos(angle) 346 | y = center_y + layer_radius * math.sin(angle) 347 | if 0 <= x < width and 0 <= y < height: 348 | try: 349 | stdscr.addstr(int(y), int(x), char, color_attr | attr) 350 | except curses.error: pass 351 | 352 | # Draw center point ('O') - always visible 353 | try: 354 | center_color = self.hsv_to_color_pair(stdscr, core_hue, 0.0, 1.0) # Whiteish center 355 | stdscr.addstr(center_y, center_x, "O", center_color | curses.A_BOLD) 356 | except curses.error: pass 357 | 358 | 359 | def handle_keypress(self, key): 360 | if key == 'c': # Cycle color modes 361 | self.color_mode = (self.color_mode + 1) % 3 362 | return True 363 | elif key == 't': # Lower energy threshold 364 | self.energy_threshold = max(0.05, self.energy_threshold - 0.05) 365 | return True 366 | elif key == 'T': # Raise energy threshold 367 | self.energy_threshold = min(0.9, self.energy_threshold + 0.05) 368 | return True 369 | elif key == 'd': # Increase ring density 370 | self.ring_density = min(3.0, self.ring_density + 0.2) 371 | return True 372 | elif key == 'D': # Decrease ring density 373 | self.ring_density = max(0.2, self.ring_density - 0.2) 374 | return True 375 | elif key == 's': # Change symmetry 376 | current_sym = self.symmetry 377 | if current_sym == 0: self.symmetry = 2 # From none to 2 378 | elif current_sym == 8: self.symmetry = 0 # From 8 to none 379 | else: self.symmetry = current_sym + 1 380 | # Effectively cycles: 2, 3, 4, 5, 6, 7, 8, 0 381 | return True 382 | # Add keys to adjust sensitivity? 383 | # elif key == 'b': self.bass_sensitivity = max(1.0, self.bass_sensitivity - 1.0); return True 384 | # elif key == 'B': self.bass_sensitivity += 1.0; return True 385 | # elif key == 'm': self.mids_sensitivity = max(1.0, self.mids_sensitivity - 1.0); return True 386 | # elif key == 'M': self.mids_sensitivity += 1.0; return True 387 | # elif key == 'h': self.highs_sensitivity = max(1.0, self.highs_sensitivity - 1.0); return True 388 | # elif key == 'H': self.highs_sensitivity += 1.0; return True 389 | return False -------------------------------------------------------------------------------- /visualizers/flame.py: -------------------------------------------------------------------------------- 1 | # visualizers/flame.py 2 | import curses 3 | import random 4 | import numpy as np 5 | from visualizer_base import VisualizerBase 6 | 7 | class FlameVisualizer(VisualizerBase): 8 | def __init__(self): 9 | super().__init__(name="Flame") 10 | self.flame_width = 80 11 | self.flame_height = 30 12 | self.flame_grid = np.zeros((self.flame_height, self.flame_width)) 13 | self.flame_cooling = np.zeros((self.flame_height, self.flame_width)) 14 | 15 | def setup(self): 16 | # Initialize flame grid 17 | self.flame_grid = np.zeros((self.flame_height, self.flame_width)) 18 | self.flame_cooling = np.zeros((self.flame_height, self.flame_width)) 19 | 20 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 21 | # Flame simulation 22 | # Adjust flame simulation dimensions to fit the screen 23 | flame_height = min(height - 2, self.flame_height) 24 | flame_width = min(width, self.flame_width) 25 | 26 | # Reset flame grid if dimensions have changed 27 | if self.flame_grid.shape != (flame_height, flame_width): 28 | self.flame_grid = np.zeros((flame_height, flame_width)) 29 | self.flame_cooling = np.zeros((flame_height, flame_width)) 30 | 31 | # Create cooling effect (random values to make flame flicker) 32 | for i in range(flame_width): 33 | # More cooling at the edges 34 | edge_cooling = 0.2 * (1.0 - min(i, flame_width - i - 1) / (flame_width / 2.0)) 35 | for j in range(flame_height): 36 | # More cooling at the top 37 | height_cooling = 0.1 * (j / flame_height) # More cooling at the top (low j) 38 | self.flame_cooling[j, i] = random.random() * 0.2 + edge_cooling + height_cooling 39 | 40 | # Apply cooling 41 | self.flame_grid -= self.flame_cooling 42 | self.flame_grid = np.clip(self.flame_grid, 0, 1) 43 | 44 | # Generate new heat at the bottom based on audio 45 | bass = np.mean(spectrum[:10]) * 3 # Use bass frequencies 46 | mids = np.mean(spectrum[10:30]) * 2 # Use mid frequencies 47 | 48 | # Add audio-reactive heat sources 49 | for i in range(flame_width): 50 | # Center bias (more heat in the middle) 51 | center_bias = 1.0 - 0.5 * abs(i - flame_width/2) / (flame_width/2) 52 | 53 | # Add random fluctuations for natural look 54 | fluctuation = random.random() * 0.3 + 0.7 55 | 56 | # Calculate heat intensity 57 | heat = bass * center_bias * fluctuation 58 | 59 | # Add extra heat at random positions for crackling effect 60 | if random.random() < 0.1 * (bass + mids): 61 | heat += random.random() * 0.5 62 | 63 | # Apply heat at the bottom row 64 | if flame_height > 0: 65 | self.flame_grid[flame_height-1, i] = min(1.0, heat) 66 | 67 | # Propagate heat upwards (fix the direction) 68 | for y in range(1, flame_height): 69 | target_y = flame_height - y - 1 70 | source_y = target_y + 1 71 | 72 | if 0 <= source_y < flame_height and 0 <= target_y < flame_height: 73 | for x in range(flame_width): 74 | # Get surrounding cells for diffusion 75 | left = max(0, x - 1) 76 | right = min(flame_width - 1, x + 1) 77 | 78 | # Calculate new heat from cells below with some diffusion 79 | left_val = self.flame_grid[source_y, left] * 0.2 80 | center_val = self.flame_grid[source_y, x] * 0.6 81 | right_val = self.flame_grid[source_y, right] * 0.2 82 | 83 | # Apply upward drift with audio reactivity 84 | drift = 0.95 + 0.05 * bass 85 | new_val = (left_val + center_val + right_val) * drift 86 | 87 | # Apply the new heat value 88 | self.flame_grid[target_y, x] = min(1.0, new_val) 89 | 90 | # Render the flame 91 | flame_chars = ' .,:;=+*#%@' # Characters from least to most intense 92 | 93 | for y in range(flame_height): 94 | for x in range(flame_width): 95 | # Correctly map the flame grid to screen coordinates 96 | screen_y = height - flame_height + y - 1 # Flame starts at bottom of screen 97 | screen_x = (width - flame_width) // 2 + x 98 | 99 | if 0 <= screen_y < height and 0 <= screen_x < width: 100 | # Get heat value at this cell 101 | heat = self.flame_grid[y, x] 102 | 103 | if heat > 0.01: # Only draw if visible 104 | # Map heat to character 105 | char_idx = min(len(flame_chars) - 1, int(heat * len(flame_chars))) 106 | char = flame_chars[char_idx] 107 | 108 | # Color based on heat (red->orange->yellow) 109 | h = 0.05 + (1.0 - heat) * 0.08 # Hue: red to yellow 110 | s = 0.8 + heat * 0.2 # Saturation 111 | v = 0.6 + heat * 0.4 # Value/brightness 112 | 113 | # Get color and draw 114 | color_attr = self.hsv_to_color_pair(stdscr, h, s, v) 115 | stdscr.addstr(screen_y, screen_x, char, color_attr) 116 | 117 | # Draw logs at the bottom 118 | log_y = height - 1 119 | log_width = flame_width // 2 120 | log_start = (width - log_width) // 2 121 | 122 | for x in range(log_width): 123 | # Draw log with brown color 124 | brown_color = self.get_color_pair(stdscr, 139, 69, 19) # RGB for brown 125 | stdscr.addstr(log_y, log_start + x, "▄", brown_color) 126 | 127 | # Add some red embers in the log 128 | for _ in range(5): 129 | ember_x = log_start + random.randint(0, log_width - 1) 130 | ember_color = self.hsv_to_color_pair(stdscr, 0.05, 1.0, 0.8) # Bright red-orange 131 | stdscr.addstr(log_y, ember_x, "▄", ember_color) 132 | 133 | def handle_keypress(self, key): 134 | if key == 'w': 135 | self.flame_width = min(200, self.flame_width + 5) 136 | self.setup() 137 | return True 138 | elif key == 'W': 139 | self.flame_width = max(20, self.flame_width - 5) 140 | self.setup() 141 | return True 142 | elif key == 'h': 143 | self.flame_height = min(50, self.flame_height + 2) 144 | self.setup() 145 | return True 146 | elif key == 'H': 147 | self.flame_height = max(10, self.flame_height - 2) 148 | self.setup() 149 | return True 150 | return False -------------------------------------------------------------------------------- /visualizers/fractal_universe.py: -------------------------------------------------------------------------------- 1 | # visualizers/fractal_universe.py 2 | import curses 3 | import numpy as np 4 | import cmath 5 | import random 6 | import time 7 | from visualizer_base import VisualizerBase 8 | 9 | class FractalUniverseVisualizer(VisualizerBase): 10 | def __init__(self): 11 | super().__init__(name="Fractal Universe") 12 | # Core fractal parameters 13 | self.max_iterations = 30 # Detail level 14 | self.zoom = 2.5 # Starting zoom level 15 | self.center_x = 0 16 | self.center_y = 0 17 | self.julia_mode = False # Start in Mandelbrot mode 18 | self.fractal_shift_factor = 0.02 19 | 20 | # Audio influence parameters 21 | self.zoom_target = 2.5 22 | self.zoom_speed = 0.05 23 | self.color_cycle_speed = 0.1 24 | self.julia_seed = complex(-0.8, 0.156) 25 | self.julia_seed_history = [] # Track past positions for trails 26 | 27 | # Animation and transition state 28 | self.beat_intensity = 0 29 | self.last_beat_time = 0 30 | self.rotation = 0 31 | self.pattern_complexity = 1.0 # Multiplier for iterations 32 | self.last_big_beat = 0 33 | self.warp_active = False 34 | self.warp_progress = 0 35 | 36 | # Rendering characters (from most dense to least) 37 | self.density_chars = " .'`^\",:;Il!i><~+_-?][}{1)(|/\\tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$" 38 | self.render_mode = 0 # 0=normal, 1=gradient, 2=psychedelic 39 | 40 | def setup(self): 41 | self.start_time = time.time() 42 | # Initialize seed history with current seed 43 | for i in range(20): 44 | self.julia_seed_history.append(self.julia_seed) 45 | 46 | def compute_fractal(self, width, height, spectrum, energy): 47 | # Create buffer for fractal data 48 | buffer = np.zeros((height, width), dtype=float) 49 | 50 | # Adjust complexity based on mid frequencies 51 | mid_energy = np.mean(spectrum[10:30]) 52 | self.pattern_complexity = 1.0 + mid_energy * 3.0 53 | current_max_iter = int(self.max_iterations * self.pattern_complexity) 54 | 55 | # Calculate aspect ratio correction 56 | aspect_ratio = height / width 57 | 58 | # Calculate zoom based on bass frequencies 59 | bass = np.mean(spectrum[:10]) * 2 60 | self.zoom_target = max(0.5, min(10.0, 2.5 - bass * 5.0)) 61 | self.zoom += (self.zoom_target - self.zoom) * self.zoom_speed 62 | 63 | # Affect rotation based on treble frequencies 64 | treble = np.mean(spectrum[30:]) 65 | self.rotation += treble * 0.1 66 | 67 | # Determine if a beat has occurred 68 | current_time = time.time() 69 | beat_detected = False 70 | big_beat_detected = False 71 | 72 | if energy > 0.2 and current_time - self.last_beat_time > 0.2: 73 | beat_detected = True 74 | self.last_beat_time = current_time 75 | self.beat_intensity = min(1.0, energy * 2) 76 | 77 | # Check for big beats (stronger and less frequent) 78 | if energy > 0.4 and current_time - self.last_big_beat > 1.5: 79 | big_beat_detected = True 80 | self.last_big_beat = current_time 81 | self.warp_active = True 82 | self.warp_progress = 0 83 | 84 | # Update beat intensity decay 85 | self.beat_intensity *= 0.9 86 | 87 | # Update warp effect 88 | if self.warp_active: 89 | self.warp_progress += 0.05 90 | if self.warp_progress >= 1.0: 91 | self.warp_active = False 92 | 93 | # After warp, toggle between Mandelbrot and Julia modes 94 | if big_beat_detected or random.random() < 0.3: 95 | self.julia_mode = not self.julia_mode 96 | 97 | # Sometimes change render mode on big beats 98 | if big_beat_detected and random.random() < 0.4: 99 | self.render_mode = (self.render_mode + 1) % 3 100 | 101 | # Update Julia seed based on bass and beat 102 | if self.julia_mode: 103 | # Create orbital motion with audio influence 104 | angle = current_time * (0.2 + bass * 0.5) 105 | radius = 0.7 + 0.2 * np.sin(current_time * 0.25) + bass * 0.3 106 | 107 | target_seed = complex( 108 | radius * np.cos(angle), 109 | radius * np.sin(angle) 110 | ) 111 | 112 | # Move seed toward target 113 | seed_diff = target_seed - self.julia_seed 114 | self.julia_seed += seed_diff * (0.1 + bass * 0.3) 115 | 116 | # Add current seed to history and remove oldest 117 | self.julia_seed_history.append(self.julia_seed) 118 | if len(self.julia_seed_history) > 20: 119 | self.julia_seed_history.pop(0) 120 | 121 | # Compute the fractal 122 | for y in range(height): 123 | for x in range(width): 124 | # Map pixel coordinates to complex plane 125 | real = (x - width/2) * (4.0 / width) * self.zoom + self.center_x 126 | imag = (y - height/2) * (4.0 / width) * aspect_ratio * self.zoom + self.center_y 127 | c = complex(real, imag) 128 | 129 | # Apply rotation if active 130 | if self.rotation != 0: 131 | rot = cmath.exp(complex(0, self.rotation)) 132 | c = c * rot 133 | 134 | # Apply warp effect 135 | if self.warp_active: 136 | # Spiral warp 137 | warp_factor = self.warp_progress * 2 * np.pi 138 | c = c * cmath.exp(complex(0, warp_factor * self.beat_intensity)) 139 | 140 | # Initialize z based on mode 141 | if self.julia_mode: 142 | z = c 143 | c = self.julia_seed 144 | else: # Mandelbrot mode 145 | z = complex(0, 0) 146 | 147 | # Iterate to determine if point is in set 148 | iteration = 0 149 | escape_radius = 4.0 + self.beat_intensity * 12.0 # Dynamic escape radius 150 | 151 | while abs(z) < escape_radius and iteration < current_max_iter: 152 | # Apply variations based on render mode 153 | if self.render_mode == 1: # Gradient mode 154 | z = z*z + c + complex(0, 0.01 * np.sin(current_time)) 155 | elif self.render_mode == 2: # Psychedelic mode 156 | # Fractal variations 157 | z = z*z*z/(z*z+c) + c 158 | else: # Normal mode 159 | z = z*z + c 160 | 161 | iteration += 1 162 | 163 | # If point escapes, calculate smoothed iteration count 164 | if iteration < current_max_iter: 165 | # Smooth coloring formula 166 | smooth_iteration = iteration + 1 - np.log(np.log(abs(z))) / np.log(2) 167 | buffer[y, x] = smooth_iteration / current_max_iter 168 | else: 169 | buffer[y, x] = 0 170 | 171 | return buffer, beat_detected 172 | 173 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 174 | # Compute the fractal field 175 | fractal_field, beat_detected = self.compute_fractal(width, height, spectrum, energy) 176 | 177 | # Get additional audio metrics 178 | bass = np.mean(spectrum[:10]) * 2 179 | mids = np.mean(spectrum[10:30]) 180 | treble = np.mean(spectrum[30:]) 181 | 182 | # Draw title with information 183 | mode_name = "Julia" if self.julia_mode else "Mandelbrot" 184 | render_names = ["Classic", "Gradient", "Psychedelic"] 185 | title = f"Fractal Universe | {mode_name} | {render_names[self.render_mode]} | Zoom: {1/self.zoom:.2f}x" 186 | title_color = self.hsv_to_color_pair(stdscr, hue_offset, 0.8, 1.0) 187 | stdscr.addstr(0, width//2 - len(title)//2, title, title_color | curses.A_BOLD) 188 | 189 | # Draw the fractal field 190 | for y in range(min(height-1, fractal_field.shape[0])): 191 | for x in range(min(width, fractal_field.shape[1])): 192 | value = fractal_field[y, x] 193 | 194 | if value == 0: # Inside the set 195 | if self.julia_mode: 196 | # For Julia sets, use inner coloring based on position 197 | inner_hue = ((x / width + y / height) / 2 + hue_offset + bass * 0.2) % 1.0 198 | inner_sat = 0.7 + 0.3 * mids 199 | inner_val = 0.5 + 0.5 * bass 200 | color = self.hsv_to_color_pair(stdscr, inner_hue, inner_sat, inner_val) 201 | stdscr.addstr(y+1, x, "●", color) 202 | else: 203 | # Void character for Mandelbrot inside 204 | stdscr.addstr(y+1, x, " ") 205 | else: 206 | # Outside the set - map to colors and characters based on iteration count 207 | 208 | # Adjust hue based on value and audio 209 | point_hue = (value + hue_offset + treble * 0.3) % 1.0 210 | 211 | # Dynamic saturation and value 212 | sat = 0.7 + 0.3 * max(0, min(1, value * 2)) 213 | val = 0.7 + 0.3 * value 214 | 215 | # Add beat pulse to brightness 216 | if beat_detected or self.beat_intensity > 0.1: 217 | val = min(1.0, val + self.beat_intensity * 0.3) 218 | 219 | # Get the color 220 | color = self.hsv_to_color_pair(stdscr, point_hue, sat, val) 221 | 222 | # Map value to character density 223 | char_idx = min(len(self.density_chars)-1, 224 | int(value * len(self.density_chars))) 225 | char = self.density_chars[char_idx] 226 | 227 | # Apply bold attribute for higher values 228 | attrs = curses.A_BOLD if value > 0.7 else 0 229 | 230 | # Draw the character 231 | try: 232 | stdscr.addstr(y+1, x, char, color | attrs) 233 | except curses.error: 234 | pass 235 | 236 | # If in Julia mode, draw the seed trail 237 | if self.julia_mode: 238 | for i, seed in enumerate(self.julia_seed_history): 239 | # Map complex position to screen coordinates 240 | screen_x = int(width/2 + (seed.real / self.zoom) * (width/4)) 241 | screen_y = int(height/2 + (seed.imag / self.zoom) * (width/4)) 242 | 243 | # Skip if out of bounds 244 | if 0 <= screen_x < width and 1 <= screen_y < height: 245 | # Fade opacity based on age 246 | opacity = i / len(self.julia_seed_history) 247 | trail_hue = (hue_offset + 0.5) % 1.0 # Complementary color 248 | trail_color = self.hsv_to_color_pair(stdscr, trail_hue, 1.0, opacity) 249 | 250 | # Use different characters for trail based on position 251 | trail_chars = "·•●★✧" 252 | char_idx = min(len(trail_chars)-1, int(opacity * len(trail_chars))) 253 | 254 | try: 255 | stdscr.addstr(screen_y, screen_x, trail_chars[char_idx], 256 | trail_color | curses.A_BOLD) 257 | except curses.error: 258 | pass 259 | 260 | # Draw controls help at bottom 261 | controls = "J: Toggle Mode | R: Reset | 1-3: Rendering Style | Arrow Keys: Navigate" 262 | stdscr.addstr(height-1, width//2 - len(controls)//2, controls, title_color) 263 | 264 | # Draw beat indicator 265 | if beat_detected or self.beat_intensity > 0.1: 266 | beat_size = int((width-2) * self.beat_intensity) 267 | beat_bar = "█" * beat_size 268 | beat_color = self.hsv_to_color_pair(stdscr, (hue_offset + 0.3) % 1.0, 1.0, 1.0) 269 | stdscr.addstr(height-2, 1, beat_bar, beat_color | curses.A_BOLD) 270 | 271 | # Create corner effects on big beats or warp transitions 272 | if self.warp_active: 273 | corner_chars = ["╔", "╗", "╚", "╝"] 274 | corners = [(1, 1), (1, width-2), (height-2, 1), (height-2, width-2)] 275 | for i, (y, x) in enumerate(corners): 276 | if 0 <= y < height and 0 <= x < width: 277 | corner_color = self.hsv_to_color_pair(stdscr, 278 | (hue_offset + self.warp_progress) % 1.0, 279 | 1.0, 1.0) 280 | try: 281 | stdscr.addstr(y, x, corner_chars[i], corner_color | curses.A_BOLD) 282 | except curses.error: 283 | pass 284 | 285 | def handle_keypress(self, key): 286 | # Toggle fractal mode 287 | if key == 'j' or key == 'J': 288 | self.julia_mode = not self.julia_mode 289 | return True 290 | 291 | # Reset view 292 | elif key == 'r' or key == 'R': 293 | self.zoom = 2.5 294 | self.zoom_target = 2.5 295 | self.center_x = 0 296 | self.center_y = 0 297 | self.rotation = 0 298 | return True 299 | 300 | # Change rendering style 301 | elif key in ['1', '2', '3']: 302 | self.render_mode = int(key) - 1 303 | return True 304 | 305 | # Navigation 306 | elif key == 'KEY_UP': 307 | self.center_y -= 0.1 * self.zoom 308 | return True 309 | elif key == 'KEY_DOWN': 310 | self.center_y += 0.1 * self.zoom 311 | return True 312 | elif key == 'KEY_LEFT': 313 | self.center_x -= 0.1 * self.zoom 314 | return True 315 | elif key == 'KEY_RIGHT': 316 | self.center_x += 0.1 * self.zoom 317 | return True 318 | 319 | # Zoom controls 320 | elif key == '+' or key == '=': 321 | self.zoom_target = max(0.01, self.zoom_target / 1.2) 322 | return True 323 | elif key == '-': 324 | self.zoom_target = min(10.0, self.zoom_target * 1.2) 325 | return True 326 | 327 | return False -------------------------------------------------------------------------------- /visualizers/fractal_universe_lite.py: -------------------------------------------------------------------------------- 1 | # visualizers/fractal_universe_lite.py 2 | import curses 3 | import numpy as np 4 | import cmath 5 | import random 6 | import time 7 | from visualizer_base import VisualizerBase 8 | 9 | class FractalUniverseLiteVisualizer(VisualizerBase): 10 | def __init__(self): 11 | super().__init__(name="Fractal Universe Lite") 12 | # Core fractal parameters (reduced for low-end devices) 13 | self.max_iterations = 15 # Lower detail level 14 | self.zoom = 2.5 # Starting zoom level 15 | self.center_x = 0 16 | self.center_y = 0 17 | self.julia_mode = False # Start in Mandelbrot mode 18 | 19 | # Audio influence parameters 20 | self.zoom_target = 2.5 21 | self.zoom_speed = 0.03 22 | self.julia_seed = complex(-0.8, 0.156) 23 | 24 | # Animation state (simplified) 25 | self.beat_intensity = 0 26 | self.last_beat_time = 0 27 | 28 | # Rendering characters (reduced set) 29 | self.density_chars = " .,:;+*#@" 30 | self.render_mode = 0 # 0=normal, 1=psychedelic 31 | 32 | # Performance optimization 33 | self.downsample = 2 # Compute at lower resolution 34 | self.skip_frames = 0 # For skipping computation on some frames 35 | self.frame_counter = 0 36 | self.fractal_buffer = None # Cache to store computed fractal 37 | 38 | def setup(self): 39 | self.start_time = time.time() 40 | 41 | def compute_fractal(self, width, height, spectrum, energy): 42 | # Increment frame counter 43 | self.frame_counter += 1 44 | 45 | # Skip computation on some frames to improve performance 46 | if self.fractal_buffer is not None and self.frame_counter % (self.skip_frames + 1) != 0: 47 | return self.fractal_buffer, False 48 | 49 | # Create buffer for fractal data (at reduced resolution) 50 | ds_width = width // self.downsample 51 | ds_height = height // self.downsample 52 | buffer = np.zeros((ds_height, ds_width), dtype=float) 53 | 54 | # Adjust iterations based on mid frequencies (simplified) 55 | mid_energy = np.mean(spectrum[5:15]) * 2 # Use fewer bands 56 | current_max_iter = int(self.max_iterations * (1.0 + mid_energy)) 57 | 58 | # Calculate aspect ratio correction 59 | aspect_ratio = ds_height / ds_width 60 | 61 | # Calculate zoom based on bass (simplified) 62 | bass = np.mean(spectrum[:5]) * 2 # Use fewer bands 63 | self.zoom_target = max(0.8, min(5.0, 2.5 - bass * 3.0)) 64 | self.zoom += (self.zoom_target - self.zoom) * self.zoom_speed 65 | 66 | # Determine if a beat has occurred (simplified) 67 | current_time = time.time() 68 | beat_detected = False 69 | 70 | if energy > 0.25 and current_time - self.last_beat_time > 0.3: 71 | beat_detected = True 72 | self.last_beat_time = current_time 73 | self.beat_intensity = min(1.0, energy * 1.5) 74 | 75 | # Toggle modes occasionally on beats 76 | if self.julia_mode and random.random() < 0.2: 77 | self.julia_mode = False 78 | elif not self.julia_mode and random.random() < 0.1: 79 | self.julia_mode = True 80 | 81 | # Update Julia seed (simplified) 82 | angle = current_time * 0.2 83 | radius = 0.7 + bass * 0.2 84 | self.julia_seed = complex( 85 | radius * np.cos(angle), 86 | radius * np.sin(angle) 87 | ) 88 | 89 | # Update beat intensity decay 90 | self.beat_intensity *= 0.9 91 | 92 | # Compute the fractal - use step size to skip pixels for better performance 93 | step_size = 1 # Can be increased to 2 or 3 on very low-end devices 94 | for y in range(0, ds_height, step_size): 95 | for x in range(0, ds_width, step_size): 96 | # Map pixel coordinates to complex plane 97 | real = (x - ds_width/2) * (4.0 / ds_width) * self.zoom + self.center_x 98 | imag = (y - ds_height/2) * (4.0 / ds_width) * aspect_ratio * self.zoom + self.center_y 99 | c = complex(real, imag) 100 | 101 | # Initialize z based on mode 102 | if self.julia_mode: 103 | z = c 104 | c = self.julia_seed 105 | else: # Mandelbrot mode 106 | z = complex(0, 0) 107 | 108 | # Iterate to determine if point is in set 109 | iteration = 0 110 | while abs(z) < 4.0 and iteration < current_max_iter: 111 | # Apply simplified formula based on render mode 112 | if self.render_mode == 1: # Psychedelic mode 113 | z = z*z*z + c 114 | else: # Normal mode 115 | z = z*z + c 116 | 117 | iteration += 1 118 | 119 | # Calculate color value (simplified) 120 | if iteration < current_max_iter: 121 | value = iteration / current_max_iter 122 | buffer[y, x] = value 123 | else: 124 | buffer[y, x] = 0 125 | 126 | # Fill in skipped pixels by copying 127 | if step_size > 1: 128 | for dy in range(step_size): 129 | for dx in range(step_size): 130 | ny, nx = y + dy, x + dx 131 | if ny < ds_height and nx < ds_width: 132 | buffer[ny, nx] = buffer[y, x] 133 | 134 | # Store buffer for frame skipping 135 | self.fractal_buffer = (buffer, beat_detected) 136 | return self.fractal_buffer 137 | 138 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 139 | # Compute the fractal field 140 | fractal_field, beat_detected = self.compute_fractal(width, height, spectrum, energy) 141 | 142 | # Get audio metrics (simplified) 143 | bass = np.mean(spectrum[:5]) * 2 144 | 145 | # Draw title with information 146 | mode_name = "Julia" if self.julia_mode else "Mandelbrot" 147 | title = f"Fractal Universe Lite | {mode_name} | Zoom: {1/self.zoom:.1f}x" 148 | title_color = self.hsv_to_color_pair(stdscr, hue_offset, 0.8, 1.0) 149 | try: 150 | stdscr.addstr(0, max(0, width//2 - len(title)//2), title, title_color | curses.A_BOLD) 151 | except curses.error: 152 | pass 153 | 154 | # Draw the fractal field (upscale from lower resolution) 155 | ds_height, ds_width = fractal_field.shape 156 | 157 | # Limit drawing to alternate lines for better performance 158 | draw_step = 1 if height < 30 else 1 # Increase step for very small terminals 159 | 160 | for y in range(0, height-1, draw_step): 161 | # Map to downsampled coordinates 162 | ds_y = min(ds_height-1, y // self.downsample) 163 | 164 | for x in range(0, width, draw_step): 165 | # Map to downsampled coordinates 166 | ds_x = min(ds_width-1, x // self.downsample) 167 | 168 | value = fractal_field[ds_y, ds_x] 169 | 170 | if value == 0: # Inside the set 171 | if self.julia_mode: 172 | # Simple coloring for Julia sets 173 | inner_hue = (hue_offset + bass * 0.2) % 1.0 174 | color = self.hsv_to_color_pair(stdscr, inner_hue, 0.7, 0.5) 175 | try: 176 | stdscr.addstr(y+1, x, "●", color) 177 | except curses.error: 178 | pass 179 | else: 180 | # Simple black for Mandelbrot inside 181 | try: 182 | stdscr.addstr(y+1, x, " ") 183 | except curses.error: 184 | pass 185 | else: 186 | # Outside the set - map to colors and characters 187 | 188 | # Adjust hue based on value and audio (simplified) 189 | point_hue = (value + hue_offset) % 1.0 190 | 191 | # Add beat pulse to brightness 192 | val = 0.7 + 0.3 * value 193 | if beat_detected or self.beat_intensity > 0.1: 194 | val = min(1.0, val + self.beat_intensity * 0.3) 195 | 196 | # Get the color 197 | color = self.hsv_to_color_pair(stdscr, point_hue, 0.8, val) 198 | 199 | # Map value to character density (simplified) 200 | char_idx = min(len(self.density_chars)-1, 201 | int(value * len(self.density_chars))) 202 | char = self.density_chars[char_idx] 203 | 204 | # Draw the character 205 | try: 206 | stdscr.addstr(y+1, x, char, color) 207 | except curses.error: 208 | pass 209 | 210 | # Draw simple controls help at bottom 211 | controls = "J: Toggle Mode | R: Reset | Arrow Keys: Navigate" 212 | try: 213 | stdscr.addstr(height-1, max(0, width//2 - len(controls)//2), controls, title_color) 214 | except curses.error: 215 | pass 216 | 217 | def handle_keypress(self, key): 218 | # Toggle fractal mode 219 | if key == 'j' or key == 'J': 220 | self.julia_mode = not self.julia_mode 221 | return True 222 | 223 | # Reset view 224 | elif key == 'r' or key == 'R': 225 | self.zoom = 2.5 226 | self.zoom_target = 2.5 227 | self.center_x = 0 228 | self.center_y = 0 229 | return True 230 | 231 | # Change rendering style 232 | elif key == '1': 233 | self.render_mode = 0 234 | return True 235 | elif key == '2': 236 | self.render_mode = 1 237 | return True 238 | 239 | # Navigation 240 | elif key == 'KEY_UP': 241 | self.center_y -= 0.1 * self.zoom 242 | return True 243 | elif key == 'KEY_DOWN': 244 | self.center_y += 0.1 * self.zoom 245 | return True 246 | elif key == 'KEY_LEFT': 247 | self.center_x -= 0.1 * self.zoom 248 | return True 249 | elif key == 'KEY_RIGHT': 250 | self.center_x += 0.1 * self.zoom 251 | return True 252 | 253 | # Zoom controls 254 | elif key == '+' or key == '=': 255 | self.zoom_target = max(0.1, self.zoom_target / 1.2) 256 | return True 257 | elif key == '-': 258 | self.zoom_target = min(8.0, self.zoom_target * 1.2) 259 | return True 260 | 261 | # Performance options 262 | elif key == '9': 263 | # Decrease quality for better performance 264 | self.downsample = min(4, self.downsample + 1) 265 | self.skip_frames = min(3, self.skip_frames + 1) 266 | self.fractal_buffer = None # Clear cache to force redraw 267 | return True 268 | elif key == '0': 269 | # Increase quality at expense of performance 270 | self.downsample = max(1, self.downsample - 1) 271 | self.skip_frames = max(0, self.skip_frames - 1) 272 | self.fractal_buffer = None # Clear cache to force redraw 273 | return True 274 | 275 | return False -------------------------------------------------------------------------------- /visualizers/guitar_tuner.py: -------------------------------------------------------------------------------- 1 | # visualizers/guitar_tuner.py 2 | import curses 3 | import math 4 | import numpy as np 5 | from visualizer_base import VisualizerBase 6 | import time # Import time for the pulsing text example, if needed - though not used in this tuner 7 | 8 | class GuitarTunerVisualizer(VisualizerBase): 9 | def __init__(self): 10 | super().__init__(name="Guitar Tuner") 11 | # Standard guitar tuning frequencies (E2, A2, D3, G3, B3, E4) 12 | self.guitar_strings = { 13 | 'E2': 82.41, 14 | 'A2': 110.00, 15 | 'D3': 146.83, 16 | 'G3': 196.00, 17 | 'B3': 246.94, 18 | 'E4': 329.63 19 | } 20 | self.selected_string = 'E2' # Default selected string 21 | self.string_names = list(self.guitar_strings.keys()) 22 | self.string_index = 0 23 | self.needle_position = 0.5 # Centered position 24 | self.needle_momentum = 0 25 | self.accuracy = 0 # How close to target frequency (0-1) 26 | self.animation_frames = 0 27 | 28 | def setup(self): 29 | pass 30 | 31 | def get_string_frequency(self, spectrum, target_freq): 32 | """Find the strongest frequency near the target frequency""" 33 | # Convert target frequency to FFT bin index 34 | # Crude approximation, assuming sample rate 44100 and chunk size used in main.py 35 | # bin = freq * N / sample_rate 36 | # N = self.CHUNK from main.py (2048) 37 | # sample_rate = self.RATE from main.py (44100) 38 | # This might need access to main.py's CHUNK/RATE or pass them during init/draw 39 | # For now, let's make a more educated guess based on typical params 40 | N = 2048 # Assuming CHUNK size from main.py - might need adjustment if this changes 41 | sample_rate = 44100 # Assuming RATE from main.py 42 | spectrum_length = len(spectrum) # Should be N // 2 43 | 44 | if spectrum_length == 0: return 0, 0 # Avoid division by zero 45 | 46 | bin_width = sample_rate / N # Frequency represented by each bin 47 | 48 | if bin_width == 0: return 0, 0 # Avoid division by zero 49 | 50 | # Find the bin closest to our target frequency 51 | target_bin = int(target_freq / bin_width) 52 | 53 | # Ensure target_bin is within valid range 54 | if target_bin >= spectrum_length or target_bin < 0: 55 | # Fallback: search the lower spectrum if target is too high/low for FFT resolution 56 | target_bin = min(max(0, target_bin), spectrum_length - 1) 57 | # return 0, 0 # Or handle this case differently 58 | 59 | # Search around this bin for the strongest peak 60 | # Search range should be relative to target freq (e.g., +/- 5%) 61 | search_radius_hz = target_freq * 0.10 # Search +/- 10% of target frequency 62 | search_radius_bins = int(search_radius_hz / bin_width) 63 | search_radius_bins = max(1, search_radius_bins) # Minimum search radius of 1 bin 64 | 65 | start_bin = max(0, target_bin - search_radius_bins) 66 | end_bin = min(spectrum_length - 1, target_bin + search_radius_bins) 67 | 68 | # Find the strongest bin in the range, weighted towards target bin if magnitudes are similar 69 | strongest_bin = target_bin # Default to target bin 70 | max_magnitude = 0 71 | if start_bin <= end_bin and 0 <= start_bin < spectrum_length: # Check range validity 72 | max_magnitude = spectrum[start_bin] 73 | strongest_bin = start_bin 74 | for i in range(start_bin, end_bin + 1): 75 | if spectrum[i] > max_magnitude: 76 | max_magnitude = spectrum[i] 77 | strongest_bin = i 78 | 79 | # Convert bin back to frequency 80 | detected_freq = strongest_bin * bin_width 81 | 82 | # Calculate how close we are to the target 83 | # Use cents for more musical accuracy (100 cents per semitone) 84 | if detected_freq <= 0 or target_freq <= 0: 85 | accuracy = 0 86 | freq_diff_cents = 0 87 | else: 88 | freq_diff_cents = 1200 * math.log2(detected_freq / target_freq) 89 | 90 | # Define accuracy based on cents deviation (e.g., +/- 10 cents is good) 91 | max_diff_cents = 50 # Maximum deviation allowed (50 cents = quarter tone) 92 | accuracy = max(0, 1 - abs(freq_diff_cents / max_diff_cents)) 93 | 94 | # Adjust target position based on cents difference 95 | # Map -max_diff_cents to 0, 0 to 0.5, +max_diff_cents to 1 96 | target_position = 0.5 + (freq_diff_cents / (2 * max_diff_cents)) 97 | target_position = max(0, min(1, target_position)) # Clamp between 0 and 1 98 | 99 | return detected_freq, accuracy, target_position 100 | 101 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 102 | # Update animation counter 103 | self.animation_frames += 1 104 | 105 | # Get current guitar string and its target frequency 106 | current_string = self.string_names[self.string_index] 107 | target_freq = self.guitar_strings[current_string] 108 | 109 | # Get current detected frequency and accuracy 110 | detected_freq, accuracy, target_position = self.get_string_frequency(spectrum, target_freq) 111 | self.accuracy = accuracy 112 | 113 | # Add some "physics" to the needle movement 114 | # Target position is now calculated in get_string_frequency 115 | force = (target_position - self.needle_position) * 0.2 # Spring force 116 | damping = self.needle_momentum * -0.2 # Damping force 117 | self.needle_momentum += force + damping 118 | self.needle_momentum *= 0.85 # Air resistance / friction 119 | self.needle_position += self.needle_momentum 120 | 121 | # Clamp needle position after momentum update 122 | self.needle_position = max(0.05, min(0.95, self.needle_position)) # Limit movement slightly inside the meter 123 | 124 | # Draw tuner background 125 | middle_y = height // 2 126 | min_tuner_height = 16 # Ensure minimum height for all elements 127 | 128 | # Adaptive tuner height based on screen, but with a minimum 129 | tuner_height = max(min_tuner_height, height // 3 * 2) 130 | tuner_height = min(tuner_height, height - 6) # Ensure space for title/instructions 131 | 132 | # Center tuner vertically if possible 133 | tuner_start_y = max(3, (height - tuner_height) // 2) # Ensure space below title 134 | 135 | # Tuner width 136 | tuner_width = min(width - 4, 80) # Ensure padding 137 | tuner_start_x = (width - tuner_width) // 2 138 | 139 | # Ensure tuner fits 140 | if tuner_height < min_tuner_height or tuner_width < 20: 141 | stdscr.addstr(0, 0, "Terminal too small!") 142 | return # Stop drawing if too small 143 | 144 | # --- Draw UI Elements --- 145 | 146 | # Draw tuner name and instructions (check bounds) 147 | title = f"《 GUITAR TUNER - {current_string} ({target_freq:.2f} Hz) 》" 148 | instructions = "← → : Change String" # Removed Space: Auto-tune as it wasn't functional 149 | title_x = (width - len(title)) // 2 150 | instr_x = (width - len(instructions)) // 2 151 | if 2 < height and 0 <= title_x < width and title_x + len(title) <= width: 152 | stdscr.addstr(1, title_x, title, curses.A_BOLD) # Move title down slightly 153 | if height - 2 >= 0 and 0 <= instr_x < width and instr_x + len(instructions) <= width: 154 | stdscr.addstr(height - 2, instr_x, instructions) 155 | 156 | # Draw tuner outline 157 | meter_y = tuner_start_y + tuner_height // 4 # Position meter higher 158 | freq_y = meter_y + 3 159 | tune_msg_y = freq_y + 2 160 | string_display_start_y = tune_msg_y + 2 # Y pos where strings drawing starts 161 | 162 | # Top/Bottom borders 163 | if tuner_start_y < height and tuner_start_x + tuner_width <= width: 164 | stdscr.addstr(tuner_start_y, tuner_start_x, "┌" + "─" * (tuner_width - 2) + "┐") 165 | if tuner_start_y + tuner_height -1 < height and tuner_start_x + tuner_width <= width: 166 | stdscr.addstr(tuner_start_y + tuner_height - 1, tuner_start_x, "└" + "─" * (tuner_width - 2) + "┘") 167 | 168 | # Left/Right borders 169 | for y in range(tuner_start_y + 1, tuner_start_y + tuner_height - 1): 170 | if 0 <= y < height: 171 | if 0 <= tuner_start_x < width: 172 | stdscr.addch(y, tuner_start_x, "│") 173 | if 0 <= tuner_start_x + tuner_width - 1 < width: 174 | stdscr.addch(y, tuner_start_x + tuner_width - 1, "│") 175 | 176 | # Draw tick marks for the meter 177 | meter_width = tuner_width - 6 # Width of the scale inside the box 178 | meter_start_x = tuner_start_x + 3 179 | 180 | if 0 <= meter_y < height and 0 <= meter_y + 2 < height: # Check if meter area fits vertically 181 | for i in range(meter_width): 182 | x_pos = meter_start_x + i 183 | if not (0 <= x_pos < width): continue # Skip if horizontally out of bounds 184 | 185 | x_ratio = i / (meter_width -1) if meter_width > 1 else 0.5 186 | 187 | # Determine tick character and labels 188 | tick = "╌" 189 | label = "" 190 | label_y = meter_y + 2 191 | label_offset = 0 192 | is_center_mark = abs(x_ratio - 0.5) < 0.01 193 | is_quarter_mark = abs(x_ratio - 0.25) < 0.01 or abs(x_ratio - 0.75) < 0.01 194 | is_end_mark = abs(x_ratio - 0) < 0.01 or abs(x_ratio - 1) < 0.01 195 | 196 | if is_center_mark: 197 | tick = "┼" 198 | label = "IN TUNE" 199 | label_offset = -len(label) // 2 200 | elif is_quarter_mark: 201 | tick = "┴" 202 | elif is_end_mark: 203 | tick = "┴" 204 | label = "FLAT" if x_ratio < 0.1 else "SHARP" 205 | label_offset = -len(label) // 2 if x_ratio < 0.1 else -len(label)//2 # Adjust label pos slightly 206 | 207 | 208 | # Color the ticks based on position (relative to center 0.5) 209 | if abs(x_ratio - 0.5) < 0.05: # In tune - green zone (5%) 210 | color = self.hsv_to_color_pair(stdscr, 0.33, 0.8, 0.9) 211 | elif abs(x_ratio - 0.5) < 0.15: # Slightly off - yellow zone (15%) 212 | color = self.hsv_to_color_pair(stdscr, 0.15, 0.8, 0.9) 213 | else: # Off - red zone 214 | color = self.hsv_to_color_pair(stdscr, 0.0, 0.8, 0.9) 215 | 216 | stdscr.addstr(meter_y, x_pos, tick, color) 217 | if label: 218 | label_x = x_pos + label_offset 219 | if 0 <= label_x < width and label_x + len(label) <= width: 220 | stdscr.addstr(label_y, label_x, label) 221 | 222 | # Draw frequency readout 223 | freq_str = f"Detected: {detected_freq:.2f} Hz" 224 | freq_x = (width - len(freq_str)) // 2 225 | if 0 <= freq_y < height and 0 <= freq_x < width and freq_x + len(freq_str) <= width: 226 | stdscr.addstr(freq_y, freq_x, freq_str) 227 | 228 | # Draw the needle 229 | needle_meter_width = tuner_width - 7 # Width available for needle travel 230 | needle_x = int(meter_start_x + needle_meter_width * self.needle_position) 231 | needle_x = max(meter_start_x, min(meter_start_x + needle_meter_width -1, needle_x)) # Clamp needle x 232 | 233 | # Determine needle color based on accuracy 234 | if accuracy > 0.9: # In tune - green (+/- 5 cents) 235 | needle_color = self.hsv_to_color_pair(stdscr, 0.33, 0.9, 1.0) 236 | elif accuracy > 0.7: # Close - yellow (+/- 15 cents) 237 | needle_color = self.hsv_to_color_pair(stdscr, 0.15, 0.9, 1.0) 238 | else: # Off - red 239 | needle_color = self.hsv_to_color_pair(stdscr, 0.0, 0.9, 1.0) 240 | 241 | for i in range(1, 5): # Draw needle lines 242 | needle_char = "█" if i < 4 else "▼" 243 | needle_y = meter_y - i 244 | if 0 <= needle_y < height and 0 <= needle_x < width: 245 | stdscr.addstr(needle_y, needle_x, needle_char, needle_color | curses.A_BOLD) 246 | 247 | # Draw tuning indicator if in tune 248 | if accuracy > 0.9: # Threshold for "IN TUNE" display 249 | tune_msg = "•• IN TUNE ••" 250 | blink = (self.animation_frames // 6) % 2 == 0 # Slower blink 251 | tune_msg_x = (width - len(tune_msg)) // 2 252 | if 0 <= tune_msg_y < height and 0 <= tune_msg_x < width and tune_msg_x + len(tune_msg) <= width: 253 | color = self.hsv_to_color_pair(stdscr, 0.33, 0.9, 1.0) 254 | attr = curses.A_BOLD | (curses.A_BLINK if blink else 0) 255 | stdscr.addstr(tune_msg_y, tune_msg_x, tune_msg, color | attr) 256 | 257 | # Draw guitar strings visualization 258 | string_display_height = tuner_start_y + tuner_height - string_display_start_y - 1 259 | num_strings = len(self.guitar_strings) 260 | vertical_spacing = 2 # Lines between strings 261 | 262 | # Check if there's enough vertical space to draw strings 263 | if string_display_height >= num_strings * vertical_spacing: 264 | string_length = tuner_width - 4 265 | string_start_x = tuner_start_x + 2 266 | 267 | for i, string_name in enumerate(self.guitar_strings.keys()): 268 | is_selected = string_name == current_string 269 | string_base_y = string_display_start_y + i * vertical_spacing 270 | 271 | # Ensure base string position is valid before drawing anything for this string 272 | if not (0 <= string_base_y < height): 273 | continue 274 | 275 | # Calculate string thickness 276 | string_char = "╌" # Thin (B, E4) 277 | if string_name in ['D3', 'G3']: string_char = "─" # Medium 278 | if string_name in ['E2', 'A2']: string_char = "═" # Thick 279 | 280 | # Draw string name (check bounds) 281 | name_x = string_start_x - 3 # Adjusted position 282 | name_color = curses.A_BOLD if is_selected else curses.A_NORMAL 283 | if 0 <= name_x < width and name_x + len(string_name) <= width: 284 | stdscr.addstr(string_base_y, name_x, string_name, name_color) 285 | 286 | # Draw the string itself (vibrating or straight) 287 | if is_selected and energy > 0.01: # Only vibrate selected string if there's sound 288 | # Adjust vibration based on how far off tune and overall energy 289 | cents_off = abs(1200 * math.log2(detected_freq / target_freq)) if detected_freq > 0 else max_diff_cents 290 | off_tune_factor = min(1, cents_off / 20) # Max vibration when > 20 cents off 291 | 292 | vibration_amplitude = (off_tune_factor * 0.5 + energy * 0.5) * 2.0 # Mix off-tune and energy 293 | vibration_amplitude = min(1.5, vibration_amplitude) # Limit max amplitude 294 | wave_speed = 0.15 + off_tune_factor * 0.2 # Faster wave if more off-tune 295 | 296 | # Determine color based on accuracy 297 | if accuracy > 0.9: color = self.hsv_to_color_pair(stdscr, 0.33, 0.8, 0.9) 298 | elif accuracy > 0.7: color = self.hsv_to_color_pair(stdscr, 0.15, 0.8, 0.9) 299 | else: color = self.hsv_to_color_pair(stdscr, 0.0, 0.8, 0.9) 300 | 301 | # Draw vibrating string (sine wave) 302 | if 0 <= string_start_x < width and string_start_x + string_length <= width: # Check horizontal bounds for the string area 303 | phase = self.animation_frames * wave_speed 304 | for x in range(string_length): 305 | x_pos = string_start_x + x 306 | x_ratio = x / (string_length -1) if string_length > 1 else 0.5 307 | 308 | # Standing wave pattern 309 | wave = math.sin(x_ratio * math.pi * 2 + phase) # Simple sine wave 310 | y_offset = round(wave * vibration_amplitude) 311 | 312 | y_pos = string_base_y + y_offset 313 | if 0 <= y_pos < height: # Check y bound for this specific char 314 | stdscr.addstr(y_pos, x_pos, string_char, color | curses.A_BOLD) 315 | 316 | else: 317 | # Draw straight string (not vibrating or not selected) 318 | color = curses.A_DIM # Use DIM for non-selected strings 319 | y_pos = string_base_y 320 | 321 | # --- Boundary Check FIX applied here --- 322 | # Check if the line is within vertical bounds AND horizontal bounds 323 | if 0 <= y_pos < height and 0 <= string_start_x < width and string_start_x + string_length <= width: 324 | stdscr.addstr(y_pos, string_start_x, string_char * string_length, color) 325 | # --- End FIX --- 326 | 327 | 328 | def handle_keypress(self, key): 329 | if key == curses.KEY_RIGHT or key == 'd' or key == 'l': 330 | # Move to next string 331 | self.string_index = (self.string_index + 1) % len(self.string_names) 332 | self.selected_string = self.string_names[self.string_index] 333 | self.needle_momentum = 0 # Reset needle momentum when changing strings 334 | self.needle_position = 0.5 335 | return True 336 | elif key == curses.KEY_LEFT or key == 'a' or key == 'h': 337 | # Move to previous string 338 | self.string_index = (self.string_index - 1 + len(self.string_names)) % len(self.string_names) 339 | self.selected_string = self.string_names[self.string_index] 340 | self.needle_momentum = 0 # Reset needle momentum 341 | self.needle_position = 0.5 342 | return True 343 | # Removed space key binding as 'auto-tune' wasn't implemented 344 | # elif key == ' ': 345 | # # Placeholder for a potential auto-tune feature trigger 346 | # # self.needle_position = 0.5 # Simulate tuning instantly 347 | # # self.accuracy = 1.0 348 | # return True # Consume the keypress even if feature isn't there 349 | return False -------------------------------------------------------------------------------- /visualizers/matrix_rain.py: -------------------------------------------------------------------------------- 1 | # visualizers/matrix_rain.py 2 | import curses 3 | import random 4 | import numpy as np 5 | from visualizer_base import VisualizerBase 6 | 7 | class MatrixRainVisualizer(VisualizerBase): 8 | def __init__(self): 9 | super().__init__(name="Matrix Rain") 10 | # Configuration parameters 11 | self.density = 0.2 # Controls density of drops (0.0-1.0) 12 | self.speed_factor = 1.0 # Base speed multiplier 13 | self.length_factor = 1.0 # Length of trails 14 | self.spawn_threshold = 0.05 # Minimum energy to spawn drops 15 | 16 | # Internal state 17 | self.drops = [] # List of active raindrop trails 18 | self.chars = "".join([chr(i) for i in range(33, 127)] + [chr(i) for i in range(0x30A0, 0x30FF)]) 19 | self.last_beat = 0 20 | self.beat_active = False 21 | self.beat_cooldown = 0 22 | 23 | def setup(self): 24 | # Initialize any resources needed 25 | random.seed() 26 | 27 | def spawn_drops(self, count, width, spectrum): 28 | """Spawn new drops based on audio spectrum""" 29 | for _ in range(count): 30 | # Determine position - influenced by spectrum 31 | # Higher amplitude frequencies have more chance to spawn drops 32 | spectrum_pos = min(len(spectrum) - 1, random.randint(0, len(spectrum) // 2)) 33 | spawn_weight = spectrum[spectrum_pos] * 3.0 34 | 35 | if random.random() < spawn_weight: 36 | x = random.randint(0, width - 1) 37 | # Create a new drop 38 | drop = { 39 | 'x': x, 40 | 'y': 0, 41 | 'speed': random.uniform(0.2, 0.8) * self.speed_factor, 42 | 'length': random.randint(3, 20) * self.length_factor, 43 | 'char_idx': 0, 44 | 'chars': ''.join(random.choice(self.chars) for _ in range(random.randint(5, 15))), 45 | 'bright': random.random() < 0.2, # Occasional bright character 46 | 'freq_index': spectrum_pos, # Remember which frequency this drop responds to 47 | } 48 | self.drops.append(drop) 49 | 50 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 51 | # Handle beat detection for dramatic effects 52 | beat_detected = False 53 | if energy > 0.2 and self.beat_cooldown == 0: 54 | self.beat_active = True 55 | self.beat_cooldown = 5 56 | beat_detected = True 57 | elif self.beat_cooldown > 0: 58 | self.beat_cooldown -= 1 59 | 60 | # Calculate bass, mid, and treble energy 61 | bass = np.mean(spectrum[:10]) * 2 62 | mids = np.mean(spectrum[10:30]) 63 | treble = np.mean(spectrum[30:]) 64 | 65 | # Base spawn rate on overall energy and beat detection 66 | base_spawn_rate = int(width * 0.15 * self.density) 67 | spawn_boost = 5 if beat_detected else 0 68 | spawn_count = base_spawn_rate + int(energy * 15) + spawn_boost 69 | 70 | # Spawn new drops 71 | if energy > self.spawn_threshold: 72 | self.spawn_drops(spawn_count, width, spectrum) 73 | 74 | # Update and draw existing drops 75 | new_drops = [] 76 | for drop in self.drops: 77 | # Update drop position - speed affected by corresponding frequency 78 | freq_amplitude = spectrum[drop['freq_index']] * 3.0 79 | drop['y'] += drop['speed'] * (1 + freq_amplitude) 80 | 81 | # Periodically change the character 82 | if random.random() < 0.1: 83 | drop['char_idx'] = (drop['char_idx'] + 1) % len(drop['chars']) 84 | 85 | # Skip if out of bounds 86 | if drop['y'] >= height: 87 | continue 88 | 89 | # Calculate how much of the trail to draw 90 | trail_start = int(max(0, drop['y'] - drop['length'])) 91 | trail_end = int(drop['y']) 92 | 93 | # Draw the trail 94 | for y in range(trail_start, trail_end + 1): 95 | if 0 <= y < height: 96 | # Closer to head = brighter 97 | proximity = 1.0 - (trail_end - y) / drop['length'] 98 | 99 | # Get a character from the drop's character set 100 | char_position = (drop['char_idx'] + (y % len(drop['chars']))) % len(drop['chars']) 101 | char = drop['chars'][char_position] 102 | 103 | # Determine color based on position and audio frequencies 104 | if y == trail_end: # Head of the drop 105 | # Head color influenced by beat and bass 106 | hue = (drop['x'] / width + hue_offset + bass * 0.3) % 1.0 107 | saturation = 0.7 if drop['bright'] else 0.5 108 | value = 1.0 if drop['bright'] else 0.7 109 | attr = curses.A_BOLD 110 | else: 111 | # Trail color with gradient 112 | hue = (drop['x'] / width + hue_offset) % 1.0 113 | saturation = 0.7 * proximity 114 | value = max(0.4, proximity) * 0.8 115 | attr = 0 116 | 117 | # Adjust color based on beat detection 118 | if self.beat_active and drop['bright']: 119 | saturation = min(1.0, saturation + 0.3) 120 | value = min(1.0, value + 0.3) 121 | 122 | # Apply color and draw character 123 | color = self.hsv_to_color_pair(stdscr, hue, saturation, value) 124 | try: 125 | stdscr.addstr(y, drop['x'], char, color | attr) 126 | except curses.error: 127 | pass # Ignore errors from writing at screen edge 128 | 129 | # Keep this drop for next frame 130 | new_drops.append(drop) 131 | 132 | # Replace drops list with updated list 133 | self.drops = new_drops 134 | 135 | # Draw info text 136 | info_text = f"Drops: {len(self.drops)} | Energy: {energy:.2f} | Bass: {bass:.2f}" 137 | color = self.hsv_to_color_pair(stdscr, hue_offset, 0.7, 1.0) 138 | stdscr.addstr(height - 1, 0, info_text, color | curses.A_BOLD) 139 | 140 | # Additional visual effect on beat 141 | if beat_detected: 142 | # Flash screen corners on strong beats 143 | for corner_y, corner_x in [(1, 1), (1, width-2), (height-2, 1), (height-2, width-2)]: 144 | if 0 <= corner_y < height and 0 <= corner_x < width: 145 | try: 146 | stdscr.addstr(corner_y, corner_x, "✧", self.hsv_to_color_pair(stdscr, hue_offset, 1.0, 1.0) | curses.A_BOLD) 147 | except curses.error: 148 | pass 149 | 150 | # Decay beat active state 151 | if self.beat_active and not beat_detected: 152 | self.beat_active = False 153 | 154 | def handle_keypress(self, key): 155 | # Adjust density 156 | if key == 'd': 157 | self.density = min(1.0, self.density + 0.05) 158 | return True 159 | elif key == 'D': 160 | self.density = max(0.05, self.density - 0.05) 161 | return True 162 | # Adjust speed 163 | elif key == 's': 164 | self.speed_factor = min(3.0, self.speed_factor + 0.1) 165 | return True 166 | elif key == 'S': 167 | self.speed_factor = max(0.1, self.speed_factor - 0.1) 168 | return True 169 | # Adjust trail length 170 | elif key == 'l': 171 | self.length_factor = min(3.0, self.length_factor + 0.1) 172 | return True 173 | elif key == 'L': 174 | self.length_factor = max(0.1, self.length_factor - 0.1) 175 | return True 176 | # Reset parameters 177 | elif key == 'r': 178 | self.density = 0.2 179 | self.speed_factor = 1.0 180 | self.length_factor = 1.0 181 | return True 182 | return False -------------------------------------------------------------------------------- /visualizers/neural_dreamscape.py: -------------------------------------------------------------------------------- 1 | # visualizers/neural_dreamscape.py 2 | import curses 3 | import numpy as np 4 | import random 5 | import math 6 | from visualizer_base import VisualizerBase 7 | 8 | class NeuralDreamscapeVisualizer(VisualizerBase): 9 | def __init__(self): 10 | super().__init__(name="Neural Dreamscape") 11 | # Neural field 12 | self.field = None 13 | self.energy_field = None 14 | self.wave_field = None 15 | self.thought_particles = [] 16 | 17 | # Dynamic state 18 | self.evolution_speed = 0.0 19 | self.last_beat_time = 0 20 | self.resonance = 0.0 21 | self.consciousness_level = 0.0 22 | self.dream_intensity = 0.0 23 | self.time_counter = 0 24 | 25 | # Rendering options 26 | self.symbols = "·•○◙★✧♦❂☼❈♠♣⚡✿❀⚘❄✺⚕" 27 | self.active_neurons = {} 28 | self.synapses = [] 29 | 30 | def setup(self): 31 | # Will initialize fields when we know the screen dimensions 32 | pass 33 | 34 | def initialize_fields(self, height, width): 35 | # Only initialize if needed or dimensions changed 36 | if (self.field is None or 37 | self.field.shape[0] != height-2 or 38 | self.field.shape[1] != width): 39 | 40 | # Neural activity field 41 | self.field = np.zeros((height-2, width), dtype=float) 42 | 43 | # Energy propagation field 44 | self.energy_field = np.zeros((height-2, width), dtype=float) 45 | 46 | # Wave propagation field 47 | self.wave_field = np.zeros((height-2, width), dtype=float) 48 | 49 | # Active neurons (coordinate -> properties) 50 | self.active_neurons = {} 51 | 52 | # Neural connections 53 | self.synapses = [] 54 | 55 | # Seed initial activity 56 | self.seed_initial_activity(height-2, width) 57 | 58 | def seed_initial_activity(self, height, width): 59 | # Create initial neurons in a pleasing pattern 60 | neuron_count = int(min(width, height) * 0.2) 61 | 62 | # Create some neurons in a circular pattern 63 | center_x, center_y = width // 2, height // 2 64 | radius = min(width, height) // 4 65 | 66 | for i in range(neuron_count): 67 | angle = 2 * math.pi * i / neuron_count 68 | x = int(center_x + radius * math.cos(angle)) 69 | y = int(center_y + radius * math.sin(angle)) 70 | 71 | if 0 <= x < width and 0 <= y < height: 72 | # Create a neuron with random properties 73 | self.active_neurons[(y, x)] = { 74 | 'strength': random.uniform(0.5, 1.0), 75 | 'hue': random.random(), 76 | 'pulse_rate': random.uniform(0.05, 0.2), 77 | 'age': 0 78 | } 79 | 80 | # Add some random energy to the energy field 81 | self.energy_field[y, x] = random.uniform(0.5, 1.0) 82 | 83 | # Create some synaptic connections 84 | for i in range(len(self.active_neurons) * 3): 85 | neurons = list(self.active_neurons.keys()) 86 | if len(neurons) >= 2: 87 | start = random.choice(neurons) 88 | end = random.choice(neurons) 89 | if start != end: 90 | self.synapses.append({ 91 | 'start': start, 92 | 'end': end, 93 | 'strength': random.uniform(0.3, 1.0), 94 | 'active': 0.0 95 | }) 96 | 97 | def update_neural_field(self, spectrum, energy, height, width): 98 | # Increase time counter 99 | self.time_counter += 1 100 | 101 | # Calculate neural parameters based on audio 102 | bass = np.mean(spectrum[:10]) * 2 103 | mids = np.mean(spectrum[10:30]) 104 | treble = np.mean(spectrum[30:]) 105 | 106 | # Beat detection 107 | current_time = self.time_counter 108 | beat_detected = False 109 | 110 | # Energy affects evolution speed 111 | self.evolution_speed = 0.1 + energy * 0.4 112 | 113 | # Resonance represents harmonic balance 114 | target_resonance = mids + treble * 0.5 115 | self.resonance = self.resonance * 0.9 + target_resonance * 0.1 116 | 117 | # Consciousness represents treble vs bass balance 118 | consciousness_target = treble / (bass + 0.01) # Avoid division by zero 119 | self.consciousness_level = self.consciousness_level * 0.95 + consciousness_target * 0.05 120 | 121 | # Dream intensity represents overall energy 122 | self.dream_intensity = self.dream_intensity * 0.9 + energy * 0.1 123 | 124 | # Check for beats (energy spikes) 125 | if energy > 0.25 and current_time - self.last_beat_time > 5: 126 | beat_detected = True 127 | self.last_beat_time = current_time 128 | 129 | # Strong beat - create thought particles 130 | thought_count = int(energy * 10) 131 | for _ in range(thought_count): 132 | # Create a thought particle at a random location 133 | x = random.randint(0, width-1) 134 | y = random.randint(0, height-1) 135 | 136 | # Angle based on current position 137 | angle = math.atan2(y - height/2, x - width/2) 138 | 139 | self.thought_particles.append({ 140 | 'x': x, 141 | 'y': y, 142 | 'vx': math.cos(angle) * (0.5 + energy * 2), 143 | 'vy': math.sin(angle) * (0.5 + energy * 2), 144 | 'life': random.randint(10, 30), 145 | 'hue': random.random(), 146 | 'size': random.uniform(0.5, 1.5) 147 | }) 148 | 149 | # Add energy to the wave field 150 | if 0 <= y < height and 0 <= x < width: 151 | self.wave_field[y, x] = 1.0 152 | 153 | # Spontaneous neuron creation/deletion based on music 154 | if random.random() < 0.1 * energy: 155 | # Create new neurons 156 | for _ in range(int(energy * 5)): 157 | x = random.randint(0, width-1) 158 | y = random.randint(0, height-1) 159 | 160 | self.active_neurons[(y, x)] = { 161 | 'strength': random.uniform(0.5, 1.0) * energy, 162 | 'hue': (bass + treble) % 1.0, 163 | 'pulse_rate': random.uniform(0.05, 0.2), 164 | 'age': 0 165 | } 166 | 167 | # Add energy 168 | self.energy_field[y, x] = energy 169 | 170 | # Neural death based on age and low energy 171 | neurons_to_remove = [] 172 | for pos, neuron in self.active_neurons.items(): 173 | neuron['age'] += 1 174 | 175 | # Death conditions 176 | if (neuron['age'] > 100 and random.random() < 0.02) or \ 177 | (neuron['strength'] < 0.1 and random.random() < 0.1): 178 | neurons_to_remove.append(pos) 179 | 180 | # Remove dead neurons 181 | for pos in neurons_to_remove: 182 | del self.active_neurons[pos] 183 | 184 | # Create synapses when certain frequencies are strong 185 | if random.random() < treble * 0.3: 186 | neurons = list(self.active_neurons.keys()) 187 | if len(neurons) >= 2: 188 | start = random.choice(neurons) 189 | end = random.choice(neurons) 190 | if start != end: 191 | self.synapses.append({ 192 | 'start': start, 193 | 'end': end, 194 | 'strength': random.uniform(0.3, 1.0) * mids, 195 | 'active': 0.0 196 | }) 197 | 198 | # Remove old synapses 199 | self.synapses = [s for s in self.synapses if s['strength'] > 0.1] 200 | 201 | # Update energy field - diffusion 202 | new_energy = np.zeros_like(self.energy_field) 203 | 204 | # Simple diffusion kernel 205 | for y in range(1, height-1): 206 | for x in range(1, width-1): 207 | # Average of neighbors 208 | new_energy[y, x] = ( 209 | self.energy_field[y-1, x] + 210 | self.energy_field[y+1, x] + 211 | self.energy_field[y, x-1] + 212 | self.energy_field[y, x+1] 213 | ) * 0.23 + self.energy_field[y, x] * 0.08 214 | 215 | # Apply energy decay 216 | self.energy_field = new_energy * (0.95 + 0.05 * energy) 217 | 218 | # Add energy from active neurons 219 | for (y, x), neuron in self.active_neurons.items(): 220 | if 0 <= y < height and 0 <= x < width: 221 | # Neurons pulse based on their pulse rate 222 | pulse = (math.sin(self.time_counter * neuron['pulse_rate']) + 1) / 2 223 | self.energy_field[y, x] += neuron['strength'] * pulse * 0.2 224 | 225 | # Add energy from bass frequencies 226 | if bass > 0.2: 227 | center_y, center_x = height // 2, width // 2 228 | radius = int(min(width, height) * 0.3 * bass) 229 | 230 | for y in range(max(0, center_y-radius), min(height, center_y+radius)): 231 | for x in range(max(0, center_x-radius), min(width, center_x+radius)): 232 | dist = math.sqrt((y-center_y)**2 + (x-center_x)**2) 233 | if dist < radius: 234 | # Add energy that decreases with distance from center 235 | energy_val = (1 - dist/radius) * bass * 0.5 236 | self.energy_field[y, x] += energy_val 237 | 238 | # Update wave field (ripple effect) 239 | new_wave = np.zeros_like(self.wave_field) 240 | 241 | # Wave propagation algorithm 242 | for y in range(1, height-1): 243 | for x in range(1, width-1): 244 | # Classic wave equation discretization 245 | new_wave[y, x] = ( 246 | self.wave_field[y-1, x] + 247 | self.wave_field[y+1, x] + 248 | self.wave_field[y, x-1] + 249 | self.wave_field[y, x+1] 250 | ) * 0.25 - self.wave_field[y, x] 251 | 252 | # Damping 253 | new_wave[y, x] *= 0.99 254 | 255 | self.wave_field = new_wave + self.wave_field * 0.5 256 | 257 | # Apply energy to wave field (energy causes waves) 258 | self.wave_field += self.energy_field * 0.03 259 | 260 | # Clamp wave field values 261 | self.wave_field = np.clip(self.wave_field, -1.0, 1.0) 262 | 263 | # Update thought particles 264 | new_thoughts = [] 265 | for thought in self.thought_particles: 266 | # Update position 267 | thought['x'] += thought['vx'] 268 | thought['y'] += thought['vy'] 269 | 270 | # Age the thought 271 | thought['life'] -= 1 272 | 273 | # Add energy where thoughts are 274 | y, x = int(thought['y']), int(thought['x']) 275 | if 0 <= y < height and 0 <= x < width: 276 | self.energy_field[y, x] += 0.2 * thought['size'] 277 | self.wave_field[y, x] += 0.1 278 | 279 | # Keep if still alive and on screen 280 | if (thought['life'] > 0 and 281 | 0 <= thought['x'] < width and 282 | 0 <= thought['y'] < height): 283 | new_thoughts.append(thought) 284 | 285 | self.thought_particles = new_thoughts 286 | 287 | # Update synapse activity based on neuron activity 288 | for synapse in self.synapses: 289 | start_pos = synapse['start'] 290 | end_pos = synapse['end'] 291 | 292 | if start_pos in self.active_neurons and end_pos in self.active_neurons: 293 | start_neuron = self.active_neurons[start_pos] 294 | 295 | # Synapse activation based on start neuron strength and pulse 296 | pulse = (math.sin(self.time_counter * start_neuron['pulse_rate']) + 1) / 2 297 | activation = start_neuron['strength'] * pulse * synapse['strength'] 298 | 299 | # Smooth activation change 300 | synapse['active'] = 0.8 * synapse['active'] + 0.2 * activation 301 | 302 | # Transfer energy along synapse 303 | y1, x1 = start_pos 304 | y2, x2 = end_pos 305 | 306 | # Simple linear interpolation of points along the synapse 307 | steps = max(abs(y2-y1), abs(x2-x1)) + 1 308 | for i in range(steps): 309 | t = i / steps 310 | y = int(y1 + (y2-y1) * t) 311 | x = int(x1 + (x2-x1) * t) 312 | 313 | if 0 <= y < height and 0 <= x < width: 314 | self.energy_field[y, x] += synapse['active'] * 0.05 315 | else: 316 | # One of the connected neurons died, weaken synapse 317 | synapse['strength'] *= 0.9 318 | 319 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 320 | # Initialize fields if needed 321 | self.initialize_fields(height, width) 322 | 323 | # Update the neural state based on audio 324 | self.update_neural_field(spectrum, energy, height-2, width) 325 | 326 | # Get bass, mid, and treble energy for color modulation 327 | bass = np.mean(spectrum[:10]) * 2 328 | mids = np.mean(spectrum[10:30]) 329 | treble = np.mean(spectrum[30:]) 330 | 331 | # Clear the screen first 332 | stdscr.clear() 333 | 334 | # Draw title information bar 335 | title = f"Neural Dreamscape | Consciousness: {self.consciousness_level:.2f} | Resonance: {self.resonance:.2f} | Energy: {energy:.2f}" 336 | title_color = self.hsv_to_color_pair(stdscr, hue_offset, 1.0, 1.0) 337 | stdscr.addstr(0, width//2 - len(title)//2, title, title_color | curses.A_BOLD) 338 | 339 | # Draw the neural field 340 | for y in range(height-2): 341 | for x in range(width): 342 | # Get total intensity from energy and wave fields 343 | energy_val = self.energy_field[y, x] 344 | wave_val = (self.wave_field[y, x] + 1) / 2 # Normalize to 0-1 345 | 346 | # Skip drawing empty space to improve performance 347 | if energy_val < 0.05 and wave_val < 0.1: 348 | continue 349 | 350 | # Calculate display values 351 | intensity = min(1.0, energy_val * 2 + wave_val * 0.5) 352 | if intensity < 0.05: 353 | continue 354 | 355 | # Calculate color 356 | # Base hue varies with position and offset 357 | point_hue = ((x / width + y / height) / 2 + hue_offset) % 1.0 358 | 359 | # Modulate hue based on audio frequencies 360 | hue = (point_hue + bass * 0.2 + mids * 0.1 + treble * 0.05) % 1.0 361 | 362 | # Saturation and value based on intensity 363 | sat = 0.7 + 0.3 * intensity 364 | val = min(1.0, 0.5 + 0.5 * intensity) 365 | 366 | # Get color 367 | color = self.hsv_to_color_pair(stdscr, hue, sat, val) 368 | 369 | # Choose character based on intensity and wave field 370 | idx = min(len(self.symbols)-1, int(intensity * len(self.symbols))) 371 | char = self.symbols[idx] 372 | 373 | # Apply bold for higher intensity 374 | attrs = curses.A_BOLD if intensity > 0.7 else 0 375 | 376 | # Draw the character 377 | try: 378 | stdscr.addstr(y+1, x, char, color | attrs) 379 | except curses.error: 380 | pass 381 | 382 | # Draw active neural connections (synapses) 383 | for synapse in self.synapses: 384 | if synapse['active'] > 0.1: 385 | y1, x1 = synapse['start'] 386 | y2, x2 = synapse['end'] 387 | 388 | # Draw more points for stronger connections 389 | steps = max(3, int(10 * synapse['active'])) 390 | 391 | for i in range(steps): 392 | t = i / (steps - 1) 393 | y = int(y1 + (y2-y1) * t) 394 | x = int(x1 + (x2-x1) * t) 395 | 396 | # Skip if out of bounds 397 | if not (0 <= y < height-2 and 0 <= x < width): 398 | continue 399 | 400 | # Calculate pulse effect along synapse 401 | pulse_offset = (t + self.time_counter * 0.05) % 1.0 402 | pulse_intensity = (math.sin(pulse_offset * 2 * math.pi) + 1) / 2 403 | 404 | # Determine synapse appearance 405 | intensity = synapse['active'] * (0.5 + 0.5 * pulse_intensity) 406 | 407 | # Skip dim points 408 | if intensity < 0.1: 409 | continue 410 | 411 | # Calculate synapse color (bluish for inhibitory, reddish for excitatory) 412 | base_hue = 0.6 if synapse['strength'] < 0.5 else 0.0 413 | hue = (base_hue + hue_offset + pulse_offset * 0.2) % 1.0 414 | sat = 0.8 415 | val = 0.7 + 0.3 * intensity 416 | 417 | color = self.hsv_to_color_pair(stdscr, hue, sat, val) 418 | 419 | # Get character based on intensity 420 | chars = "·•◦○●" 421 | char_idx = min(len(chars)-1, int(intensity * len(chars))) 422 | 423 | try: 424 | stdscr.addstr(y+1, x, chars[char_idx], color | curses.A_BOLD) 425 | except curses.error: 426 | pass 427 | 428 | # Draw active neurons (cell bodies) 429 | for (y, x), neuron in self.active_neurons.items(): 430 | if 0 <= y < height-2 and 0 <= x < width: 431 | # Calculate neuron pulse 432 | pulse = (math.sin(self.time_counter * neuron['pulse_rate']) + 1) / 2 433 | intensity = neuron['strength'] * (0.7 + 0.3 * pulse) 434 | 435 | # Skip dim neurons 436 | if intensity < 0.2: 437 | continue 438 | 439 | # Calculate color 440 | neuron_hue = (neuron['hue'] + hue_offset) % 1.0 441 | neuron_sat = 0.8 + 0.2 * pulse 442 | neuron_val = 0.7 + 0.3 * intensity 443 | 444 | color = self.hsv_to_color_pair(stdscr, neuron_hue, neuron_sat, neuron_val) 445 | 446 | # Draw the neuron 447 | try: 448 | # Use different symbols based on neuron characteristics 449 | if intensity > 0.8: 450 | char = "❂" # Highly active 451 | elif intensity > 0.5: 452 | char = "☼" # Moderately active 453 | else: 454 | char = "○" # Less active 455 | 456 | stdscr.addstr(y+1, x, char, color | curses.A_BOLD) 457 | except curses.error: 458 | pass 459 | 460 | # Draw thought particles 461 | for thought in self.thought_particles: 462 | y, x = int(thought['y']), int(thought['x']) 463 | 464 | # Skip if out of bounds 465 | if not (0 <= y < height-2 and 0 <= x < width): 466 | continue 467 | 468 | # Calculate color 469 | thought_hue = (thought['hue'] + hue_offset) % 1.0 470 | thought_sat = 0.9 471 | thought_val = 0.8 * (thought['life'] / 30) 472 | 473 | color = self.hsv_to_color_pair(stdscr, thought_hue, thought_sat, thought_val) 474 | 475 | # Draw different characters based on thought size 476 | if thought['size'] > 1.0: 477 | char = "✺" 478 | else: 479 | char = "✧" 480 | 481 | try: 482 | stdscr.addstr(y+1, x, char, color | curses.A_BOLD) 483 | except curses.error: 484 | pass 485 | 486 | # Draw consciousness wave at the bottom - FIX: avoid the last column to prevent cursor error 487 | consciousness_bar = "" 488 | consciousness_width = width - 1 # Avoid the very last column 489 | 490 | for x in range(consciousness_width): 491 | # Create wave pattern 492 | wave_val = math.sin(x / 10 + self.time_counter * 0.1) * 0.5 + 0.5 493 | intensity = wave_val * self.consciousness_level 494 | 495 | if intensity < 0.2: 496 | bar_char = "░" 497 | elif intensity < 0.5: 498 | bar_char = "▒" 499 | else: 500 | bar_char = "█" 501 | 502 | consciousness_bar += bar_char 503 | 504 | # Draw the consciousness wave - safely avoiding the last position 505 | wave_color = self.hsv_to_color_pair(stdscr, (hue_offset + 0.7) % 1.0, 0.8, 0.9) 506 | try: 507 | stdscr.addstr(height-1, 0, consciousness_bar, wave_color) 508 | except curses.error: 509 | # Fallback if even the modified version has issues 510 | stdscr.addstr(height-1, 0, consciousness_bar[:-1], wave_color) -------------------------------------------------------------------------------- /visualizers/neural_dreamscape_lite.py: -------------------------------------------------------------------------------- 1 | # visualizers/neural_dreamscape_lite.py 2 | import curses 3 | import numpy as np 4 | import random 5 | import math 6 | from visualizer_base import VisualizerBase 7 | 8 | class NeuralDreamscapeLiteVisualizer(VisualizerBase): 9 | def __init__(self): 10 | super().__init__(name="Neural Dreamscape Lite") 11 | # Neural field 12 | self.field = None 13 | self.energy_field = None 14 | 15 | # Dynamic state 16 | self.evolution_speed = 0.0 17 | self.time_counter = 0 18 | self.last_beat_time = 0 19 | self.consciousness_level = 0.0 20 | self.beat_memory = [0] * 8 # Remember last 8 beats for patterns 21 | self.color_mode = 0 # Different color modes 22 | self.particle_mode = 0 # Different particle behaviors 23 | 24 | # Rendering options 25 | self.symbols = "·•○★✧♦◆❅✺" # More interesting symbols 26 | self.active_neurons = {} 27 | self.synapses = [] 28 | self.special_particles = [] 29 | 30 | # User controls 31 | self.density = 0.5 # Controls visualization density 32 | self.show_info = True 33 | 34 | def setup(self): 35 | # Will initialize fields when we know the screen dimensions 36 | pass 37 | 38 | def handle_keypress(self, key): 39 | """Handle visualization-specific key commands""" 40 | if key == 'c': 41 | # Cycle through color modes 42 | self.color_mode = (self.color_mode + 1) % 3 43 | return True 44 | elif key == 'p': 45 | # Cycle through particle modes 46 | self.particle_mode = (self.particle_mode + 1) % 3 47 | return True 48 | elif key == 'd': 49 | # Toggle density 50 | self.density = 1.0 if self.density < 0.7 else 0.5 51 | return True 52 | elif key == 'i': 53 | # Toggle info display 54 | self.show_info = not self.show_info 55 | return True 56 | elif key == 'b': 57 | # Create a burst of activity 58 | self.trigger_burst() 59 | return True 60 | return False 61 | 62 | def trigger_burst(self): 63 | """Create a burst of neural activity""" 64 | ds_height, ds_width = self.field.shape 65 | center_y, center_x = ds_height // 2, ds_width // 2 66 | 67 | # Create a burst of neurons in a circular pattern 68 | for i in range(12): # Add 12 neurons in a circle 69 | angle = 2 * math.pi * i / 12 70 | dist = min(ds_width, ds_height) // 4 71 | y = int(center_y + dist * math.sin(angle)) 72 | x = int(center_x + dist * math.cos(angle)) 73 | 74 | if 0 <= x < ds_width and 0 <= y < ds_height: 75 | # Create neuron with random properties 76 | self.active_neurons[(y, x)] = { 77 | 'strength': random.uniform(0.8, 1.0), 78 | 'hue': (i / 12.0 + self.time_counter / 100) % 1.0, 79 | 'pulse_rate': random.uniform(0.1, 0.3), 80 | 'age': 0, 81 | 'type': random.randint(0, 2) # Different neuron types 82 | } 83 | 84 | # Add energy 85 | self.energy_field[y, x] = 1.0 86 | 87 | # Create special particle 88 | self.special_particles.append({ 89 | 'x': x, 90 | 'y': y, 91 | 'vx': math.cos(angle) * 0.5, 92 | 'vy': math.sin(angle) * 0.5, 93 | 'life': 30, 94 | 'hue': (i / 12.0 + 0.5) % 1.0, 95 | 'size': random.uniform(0.7, 1.3) 96 | }) 97 | 98 | # Connect the neurons with synapses 99 | neurons = list(self.active_neurons.keys()) 100 | for i in range(len(neurons)): 101 | if i > 0: # Connect each neuron to the previous one 102 | start = neurons[i] 103 | end = neurons[i-1] 104 | self.synapses.append({ 105 | 'start': start, 106 | 'end': end, 107 | 'strength': 0.9, 108 | 'active': 0.5 109 | }) 110 | 111 | def initialize_fields(self, height, width): 112 | # Only initialize if needed or dimensions changed 113 | if (self.field is None or 114 | self.field.shape[0] != height//2 or 115 | self.field.shape[1] != width//2): 116 | 117 | # Downsample dimensions for performance 118 | ds_height = height // 2 119 | ds_width = width // 2 120 | 121 | # Neural activity field - use downsampled dimensions 122 | self.field = np.zeros((ds_height, ds_width), dtype=float) 123 | 124 | # Energy propagation field - use downsampled dimensions 125 | self.energy_field = np.zeros((ds_height, ds_width), dtype=float) 126 | 127 | # Active neurons (coordinate -> properties) 128 | self.active_neurons = {} 129 | 130 | # Neural connections - reduced count 131 | self.synapses = [] 132 | 133 | # Special particles 134 | self.special_particles = [] 135 | 136 | # Seed initial activity 137 | self.seed_initial_activity(ds_height, ds_width) 138 | 139 | def seed_initial_activity(self, height, width): 140 | # Create fewer initial neurons in a simple pattern 141 | neuron_count = int(min(width, height) * 0.1) # Reduced count 142 | 143 | # Create some neurons in a spiral pattern for more interest 144 | center_x, center_y = width // 2, height // 2 145 | 146 | for i in range(neuron_count): 147 | # Spiral pattern 148 | angle = 0.5 * i 149 | radius = 2 + (i / neuron_count) * min(width, height) // 3 150 | x = int(center_x + radius * math.cos(angle)) 151 | y = int(center_y + radius * math.sin(angle)) 152 | 153 | if 0 <= x < width and 0 <= y < height: 154 | # Create a neuron with random properties 155 | self.active_neurons[(y, x)] = { 156 | 'strength': random.uniform(0.5, 1.0), 157 | 'hue': (i / neuron_count) % 1.0, # Different colors along spiral 158 | 'pulse_rate': random.uniform(0.05, 0.2), 159 | 'age': 0, 160 | 'type': random.randint(0, 2) # Different neuron types 161 | } 162 | 163 | # Add some random energy to the energy field 164 | self.energy_field[y, x] = random.uniform(0.5, 1.0) 165 | 166 | # Create fewer synaptic connections 167 | for i in range(neuron_count // 2): # Reduced count 168 | neurons = list(self.active_neurons.keys()) 169 | if len(neurons) >= 2: 170 | start = random.choice(neurons) 171 | end = random.choice(neurons) 172 | if start != end: 173 | self.synapses.append({ 174 | 'start': start, 175 | 'end': end, 176 | 'strength': random.uniform(0.3, 1.0), 177 | 'active': 0.0 178 | }) 179 | 180 | def update_neural_field(self, spectrum, energy, height, width): 181 | # Increase time counter 182 | self.time_counter += 1 183 | 184 | # Calculate neural parameters based on audio - with more frequency bands 185 | bass = np.mean(spectrum[:6]) * 2 186 | mid_low = np.mean(spectrum[6:12]) 187 | mid_high = np.mean(spectrum[12:20]) 188 | treble = np.mean(spectrum[20:]) 189 | 190 | # Beat detection with memory 191 | current_time = self.time_counter 192 | beat_detected = False 193 | 194 | # Shift beat memory and add new value 195 | if current_time - self.last_beat_time > 10: 196 | if energy > 0.3: 197 | beat_detected = True 198 | self.last_beat_time = current_time 199 | self.beat_memory = [1.0] + self.beat_memory[:-1] 200 | else: 201 | self.beat_memory = [0.0] + self.beat_memory[:-1] 202 | 203 | # Calculate rhythm pattern strength - how regular are the beats? 204 | rhythm_pattern = 0 205 | for i in range(4): 206 | if i < len(self.beat_memory)-1: 207 | # Check if adjacent beat timings are similar 208 | rhythm_pattern += self.beat_memory[i] * self.beat_memory[i+1] 209 | 210 | # Consciousness represents treble vs bass balance 211 | consciousness_target = treble / (bass + 0.01) # Avoid division by zero 212 | self.consciousness_level = self.consciousness_level * 0.95 + consciousness_target * 0.05 213 | 214 | # Create neurons on beat - more variety based on which frequencies are strong 215 | if beat_detected: 216 | # Determine neuron generation approach based on dominant frequency 217 | dominant = max(bass, mid_low, mid_high, treble) 218 | 219 | if dominant == bass: 220 | # Bass creates neurons in center 221 | center_y, center_x = height // 2, width // 2 222 | radius = int(min(width, height) * 0.15) 223 | count = int(3 * energy) 224 | 225 | for _ in range(count): 226 | angle = random.random() * 2 * math.pi 227 | dist = random.random() * radius 228 | y = int(center_y + dist * math.sin(angle)) 229 | x = int(center_x + dist * math.cos(angle)) 230 | 231 | if 0 <= x < width and 0 <= y < height: 232 | self.active_neurons[(y, x)] = { 233 | 'strength': random.uniform(0.7, 1.0) * energy, 234 | 'hue': (0.0 + self.time_counter / 100) % 1.0, # Reddish 235 | 'pulse_rate': random.uniform(0.05, 0.15), # Slower pulse 236 | 'age': 0, 237 | 'type': 0 # Bass type 238 | } 239 | self.energy_field[y, x] = energy 240 | 241 | elif dominant == mid_low or dominant == mid_high: 242 | # Mids create neurons in a line pattern 243 | count = int(4 * energy) 244 | step = width // (count + 1) 245 | 246 | for i in range(count): 247 | x = (i + 1) * step 248 | y = height // 2 + int((random.random() - 0.5) * height * 0.3) 249 | 250 | if 0 <= x < width and 0 <= y < height: 251 | self.active_neurons[(y, x)] = { 252 | 'strength': random.uniform(0.6, 0.9) * energy, 253 | 'hue': (0.3 + self.time_counter / 100) % 1.0, # Greenish 254 | 'pulse_rate': random.uniform(0.1, 0.2), 255 | 'age': 0, 256 | 'type': 1 # Mid type 257 | } 258 | self.energy_field[y, x] = energy 259 | 260 | else: # treble 261 | # Treble creates neurons around edges 262 | count = int(5 * energy) 263 | 264 | for _ in range(count): 265 | # Choose edge 266 | edge = random.randint(0, 3) 267 | if edge == 0: # Top 268 | x = random.randint(0, width-1) 269 | y = random.randint(0, height//5) 270 | elif edge == 1: # Right 271 | x = random.randint(width*4//5, width-1) 272 | y = random.randint(0, height-1) 273 | elif edge == 2: # Bottom 274 | x = random.randint(0, width-1) 275 | y = random.randint(height*4//5, height-1) 276 | else: # Left 277 | x = random.randint(0, width//5) 278 | y = random.randint(0, height-1) 279 | 280 | if 0 <= x < width and 0 <= y < height: 281 | self.active_neurons[(y, x)] = { 282 | 'strength': random.uniform(0.8, 1.0) * energy, 283 | 'hue': (0.6 + self.time_counter / 100) % 1.0, # Bluish 284 | 'pulse_rate': random.uniform(0.15, 0.25), # Faster pulse 285 | 'age': 0, 286 | 'type': 2 # Treble type 287 | } 288 | self.energy_field[y, x] = energy 289 | 290 | # Create special particle 291 | for _ in range(int(energy * 3)): 292 | x = random.randint(0, width-1) 293 | y = random.randint(0, height-1) 294 | angle = random.random() * 2 * math.pi 295 | 296 | self.special_particles.append({ 297 | 'x': x, 298 | 'y': y, 299 | 'vx': math.cos(angle) * 0.7, 300 | 'vy': math.sin(angle) * 0.7, 301 | 'life': random.randint(20, 40), 302 | 'hue': random.random(), 303 | 'size': random.uniform(0.5, 1.5) 304 | }) 305 | 306 | # Neural death based on age and low energy - different lifespans for different types 307 | neurons_to_remove = [] 308 | for pos, neuron in self.active_neurons.items(): 309 | neuron['age'] += 1 310 | 311 | # Death conditions - vary by type 312 | max_age = 40 if neuron['type'] == 0 else 60 if neuron['type'] == 1 else 80 313 | death_chance = 0.08 if neuron['type'] == 0 else 0.05 if neuron['type'] == 1 else 0.03 314 | 315 | if (neuron['age'] > max_age and random.random() < death_chance) or \ 316 | (neuron['strength'] < 0.1): 317 | neurons_to_remove.append(pos) 318 | 319 | # Remove dead neurons 320 | for pos in neurons_to_remove: 321 | del self.active_neurons[pos] 322 | 323 | # Create synapses occasionally based on frequency content 324 | synapse_chance = treble * 0.1 + mid_high * 0.05 325 | if random.random() < synapse_chance and len(self.active_neurons) > 2: 326 | neurons = list(self.active_neurons.keys()) 327 | start = random.choice(neurons) 328 | 329 | # Find a nearby neuron to connect to 330 | potential_ends = [] 331 | for end in neurons: 332 | if start != end: 333 | y1, x1 = start 334 | y2, x2 = end 335 | dist = math.sqrt((y2-y1)**2 + (x2-x1)**2) 336 | if dist < width//4: # Only connect to nearby neurons 337 | potential_ends.append((end, dist)) 338 | 339 | if potential_ends: 340 | # Sort by distance and pick one of the closest 341 | potential_ends.sort(key=lambda x: x[1]) 342 | end = potential_ends[min(2, len(potential_ends)-1)][0] 343 | 344 | self.synapses.append({ 345 | 'start': start, 346 | 'end': end, 347 | 'strength': random.uniform(0.3, 1.0), 348 | 'active': 0.0 349 | }) 350 | 351 | # Remove old synapses 352 | self.synapses = [s for s in self.synapses if s['strength'] > 0.2] 353 | 354 | # Update energy field - improved diffusion 355 | # Process a subset of points each frame in a more strategic pattern 356 | new_energy = np.zeros_like(self.energy_field) 357 | 358 | # Process points in a grid pattern that changes each frame 359 | grid_size = 4 # Process every 4th point 360 | offset_y = self.time_counter % grid_size 361 | offset_x = (self.time_counter // 2) % grid_size 362 | 363 | for y in range(offset_y, height, grid_size): 364 | for x in range(offset_x, width, grid_size): 365 | # Safely get neighbors 366 | neighbors = [] 367 | neighbor_count = 0 368 | 369 | for dy, dx in [(-1,0), (1,0), (0,-1), (0,1)]: 370 | ny, nx = y + dy, x + dx 371 | if 0 <= ny < height and 0 <= nx < width: 372 | neighbors.append(self.energy_field[ny, nx]) 373 | neighbor_count += 1 374 | 375 | if neighbor_count > 0: 376 | # Average of neighbors 377 | avg = sum(neighbors) / neighbor_count 378 | new_energy[y, x] = avg * 0.8 + self.energy_field[y, x] * 0.1 379 | 380 | # Apply energy decay 381 | self.energy_field = self.energy_field * 0.9 382 | # Add new diffused energy where calculated 383 | for y in range(offset_y, height, grid_size): 384 | for x in range(offset_x, width, grid_size): 385 | if new_energy[y, x] > 0: 386 | self.energy_field[y, x] += new_energy[y, x] * 0.1 387 | 388 | # Add energy from active neurons - different patterns based on type 389 | for (y, x), neuron in self.active_neurons.items(): 390 | if 0 <= y < height and 0 <= x < width: 391 | # Simple pulse with neuron type variation 392 | if neuron['type'] == 0: # Bass type - strong central pulse 393 | pulse = (math.sin(self.time_counter * neuron['pulse_rate']) + 1) / 2 394 | self.energy_field[y, x] += neuron['strength'] * pulse * 0.3 395 | 396 | elif neuron['type'] == 1: # Mid type - wave pattern 397 | pulse = (math.sin(self.time_counter * neuron['pulse_rate']) + 1) / 2 398 | 399 | # Add energy in a small radius 400 | radius = 2 401 | for dy in range(-radius, radius+1): 402 | for dx in range(-radius, radius+1): 403 | ny, nx = y + dy, x + dx 404 | if 0 <= ny < height and 0 <= nx < width: 405 | dist = math.sqrt(dy*dy + dx*dx) 406 | if dist <= radius: 407 | energy_val = (1 - dist/radius) * pulse * neuron['strength'] * 0.1 408 | self.energy_field[ny, nx] += energy_val 409 | 410 | else: # Treble type - sparking pattern 411 | # Random sparks 412 | if random.random() < 0.2: 413 | # Choose a random direction 414 | angle = random.random() * 2 * math.pi 415 | dist = random.randint(1, 3) 416 | spark_y = int(y + dist * math.sin(angle)) 417 | spark_x = int(x + dist * math.cos(angle)) 418 | 419 | if 0 <= spark_y < height and 0 <= spark_x < width: 420 | self.energy_field[spark_y, spark_x] += neuron['strength'] * 0.4 421 | 422 | # Add energy from bass frequencies in a more interesting pattern 423 | if bass > 0.2: 424 | center_y, center_x = height // 2, width // 2 425 | 426 | # Create ripple pattern 427 | ripple_count = int(bass * 8) 428 | for i in range(ripple_count): 429 | radius = (i / ripple_count) * min(width, height) * 0.4 * bass 430 | points_to_add = int(min(2 * math.pi * radius, 20)) # Points proportional to circumference 431 | 432 | for j in range(points_to_add): 433 | angle = 2 * math.pi * j / points_to_add 434 | y = int(center_y + radius * math.sin(angle)) 435 | x = int(center_x + radius * math.cos(angle)) 436 | 437 | if 0 <= y < height and 0 <= x < width: 438 | energy_val = (1 - i/ripple_count) * bass * 0.3 439 | self.energy_field[y, x] += energy_val 440 | 441 | # Update special particles 442 | new_particles = [] 443 | for particle in self.special_particles: 444 | # Update position 445 | particle['x'] += particle['vx'] 446 | particle['y'] += particle['vy'] 447 | 448 | # Age the particle 449 | particle['life'] -= 1 450 | 451 | # Add energy where particles are 452 | py, px = int(particle['y']), int(particle['x']) 453 | if 0 <= py < height and 0 <= px < width: 454 | self.energy_field[py, px] += 0.3 * particle['size'] 455 | 456 | # Keep if still alive and on screen 457 | if (particle['life'] > 0 and 458 | 0 <= particle['x'] < width and 459 | 0 <= particle['y'] < height): 460 | # Modify velocity based on particle mode 461 | if self.particle_mode == 1: # Swirling mode 462 | # Add circular motion 463 | speed = math.sqrt(particle['vx']**2 + particle['vy']**2) 464 | angle = math.atan2(particle['vy'], particle['vx']) + 0.1 # Rotate 465 | particle['vx'] = math.cos(angle) * speed 466 | particle['vy'] = math.sin(angle) * speed 467 | elif self.particle_mode == 2: # Chaotic mode 468 | # Random jitter 469 | particle['vx'] += (random.random() - 0.5) * 0.2 470 | particle['vy'] += (random.random() - 0.5) * 0.2 471 | # Limit speed 472 | speed = math.sqrt(particle['vx']**2 + particle['vy']**2) 473 | if speed > 1.0: 474 | particle['vx'] /= speed 475 | particle['vy'] /= speed 476 | 477 | new_particles.append(particle) 478 | 479 | self.special_particles = new_particles 480 | 481 | # Update synapse activity based on neuron activity - improved for more visual interest 482 | for synapse in self.synapses: 483 | start_pos = synapse['start'] 484 | end_pos = synapse['end'] 485 | 486 | if start_pos in self.active_neurons and end_pos in self.active_neurons: 487 | start_neuron = self.active_neurons[start_pos] 488 | end_neuron = self.active_neurons[end_pos] 489 | 490 | # Synapse activation based on both neurons 491 | start_pulse = (math.sin(self.time_counter * start_neuron['pulse_rate']) + 1) / 2 492 | activation = start_neuron['strength'] * start_pulse * synapse['strength'] 493 | 494 | # Synapse activity affected by neuron types 495 | if start_neuron['type'] == end_neuron['type']: 496 | # Same type - stronger connection 497 | activation *= 1.5 498 | 499 | # Smooth activation change 500 | synapse['active'] = 0.7 * synapse['active'] + 0.3 * activation 501 | 502 | # Transfer energy - now with pulse wave effect along synapse 503 | y1, x1 = start_pos 504 | y2, x2 = end_pos 505 | 506 | # Calculate length 507 | length = max(abs(y2-y1), abs(x2-x1)) 508 | pulse_pos = (self.time_counter * 0.1) % 1.0 # Position of pulse along synapse 509 | 510 | # Only transfer at a few points for performance 511 | num_points = min(length, 3) 512 | for i in range(num_points): 513 | t = i / max(1, num_points-1) 514 | y = int(y1 + (y2-y1) * t) 515 | x = int(x1 + (x2-x1) * t) 516 | 517 | # Pulse intensity based on distance from pulse position 518 | pulse_dist = abs(t - pulse_pos) 519 | pulse_intensity = max(0, 1 - pulse_dist * 5) # Narrow pulse 520 | 521 | if 0 <= y < height and 0 <= x < width: 522 | self.energy_field[y, x] += synapse['active'] * 0.05 * (1 + pulse_intensity) 523 | else: 524 | # One of the connected neurons died, weaken synapse 525 | synapse['strength'] *= 0.8 526 | 527 | def get_display_color(self, x, y, energy_val, hue_offset): 528 | """Get color based on current color mode""" 529 | if self.color_mode == 0: # Normal color mode - position based 530 | point_hue = ((x + y) / 40 + hue_offset) % 1.0 531 | sat = 0.7 + 0.3 * min(1.0, energy_val * 2) 532 | val = min(1.0, 0.5 + 0.5 * min(1.0, energy_val * 2)) 533 | return point_hue, sat, val 534 | 535 | elif self.color_mode == 1: # Energy based coloring 536 | # Energy determines hue 537 | point_hue = (energy_val * 0.7 + hue_offset) % 1.0 538 | sat = 0.8 539 | val = min(1.0, 0.5 + 0.5 * min(1.0, energy_val * 2)) 540 | return point_hue, sat, val 541 | 542 | else: # Complementary color mode 543 | # Base hue varies with position but in a different pattern 544 | point_hue = ((x * y) / 1000 + hue_offset) % 1.0 545 | # Higher energy points get complementary color 546 | if energy_val > 0.5: 547 | point_hue = (point_hue + 0.5) % 1.0 548 | sat = 0.7 + 0.3 * min(1.0, energy_val * 2) 549 | val = min(1.0, 0.5 + 0.5 * min(1.0, energy_val * 2)) 550 | return point_hue, sat, val 551 | 552 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 553 | # Initialize fields if needed 554 | self.initialize_fields(height, width) 555 | 556 | # Get downsampled dimensions 557 | ds_height = height // 2 558 | ds_width = width // 2 559 | 560 | # Update the neural state based on audio 561 | self.update_neural_field(spectrum, energy, ds_height, ds_width) 562 | 563 | # Get bass and treble energy for color modulation 564 | bass = np.mean(spectrum[:6]) * 2 565 | mid_low = np.mean(spectrum[6:12]) 566 | mid_high = np.mean(spectrum[12:20]) 567 | treble = np.mean(spectrum[20:]) 568 | 569 | # Clear the screen first 570 | stdscr.clear() 571 | 572 | # Draw title information bar - with more info based on show_info setting 573 | if self.show_info: 574 | title = f"Neural Dreamscape Lite | Energy: {energy:.2f} | Color: {self.color_mode} | Particles: {self.particle_mode} | Density: {self.density:.1f}" 575 | controls = "[C]olor [P]articles [D]ensity [B]urst [I]nfo" 576 | else: 577 | title = "Neural Dreamscape Lite" 578 | controls = "" 579 | 580 | title_color = self.hsv_to_color_pair(stdscr, hue_offset, 1.0, 1.0) 581 | stdscr.addstr(0, max(0, width//2 - len(title)//2), title, title_color | curses.A_BOLD) 582 | 583 | if controls: 584 | controls_color = self.hsv_to_color_pair(stdscr, (hue_offset + 0.3) % 1.0, 0.8, 0.9) 585 | try: 586 | stdscr.addstr(height-1, max(0, width - len(controls) - 2), controls, controls_color) 587 | except curses.error: 588 | pass 589 | 590 | # Draw the neural field - with density control and improved visuals 591 | # Calculate how many points to sample based on density setting 592 | points_to_draw = min(ds_height * ds_width * self.density, 300) # Cap for performance 593 | 594 | # Sample points to draw 595 | sampled_points = [] 596 | 597 | # Strategic sampling: 598 | # 1. Always include points with high energy 599 | # 2. Always include active neurons 600 | # 3. Fill remaining quota with random sampling 601 | 602 | # First add high energy points 603 | for y in range(ds_height): 604 | for x in range(ds_width): 605 | if self.energy_field[y, x] > 0.5: # High energy threshold 606 | sampled_points.append((y, x)) 607 | 608 | # Stop if we've reached our quota 609 | if len(sampled_points) >= points_to_draw: 610 | break 611 | if len(sampled_points) >= points_to_draw: 612 | break 613 | 614 | # Add active neurons if not already included 615 | for (y, x) in self.active_neurons.keys(): 616 | if (y, x) not in sampled_points and len(sampled_points) < points_to_draw: 617 | sampled_points.append((y, x)) 618 | 619 | # Fill remaining quota with random sampling 620 | remaining = points_to_draw - len(sampled_points) 621 | if remaining > 0: 622 | # Create a list of all possible points not already sampled 623 | all_points = [(y, x) for y in range(ds_height) for x in range(ds_width) 624 | if (y, x) not in sampled_points and self.energy_field[y, x] > 0.1] 625 | 626 | # Random sample from remaining points 627 | if all_points: 628 | random_samples = random.sample(all_points, min(remaining, len(all_points))) 629 | sampled_points.extend(random_samples) 630 | 631 | # Draw sampled points 632 | for ds_y, ds_x in sampled_points: 633 | # Upscale to screen coordinates 634 | y = ds_y * 2 635 | x = ds_x * 2 636 | 637 | # Skip if out of bounds 638 | if not (0 <= y < height-2 and 0 <= x < width): 639 | continue 640 | 641 | # Get value from downsampled field 642 | energy_val = self.energy_field[ds_y, ds_x] 643 | 644 | # Skip drawing empty space 645 | if energy_val < 0.1: 646 | continue 647 | 648 | # Calculate intensity 649 | intensity = min(1.0, energy_val * 2) 650 | 651 | # Get color based on current color mode 652 | hue, sat, val = self.get_display_color(ds_x, ds_y, energy_val, hue_offset) 653 | 654 | # Get color 655 | color = self.hsv_to_color_pair(stdscr, hue, sat, val) 656 | 657 | # Choose character based on intensity and mode 658 | idx = min(len(self.symbols)-1, int(intensity * len(self.symbols))) 659 | char = self.symbols[idx] 660 | 661 | # Apply bold for higher intensity 662 | attrs = curses.A_BOLD if intensity > 0.7 else 0 663 | 664 | # Draw the character 665 | try: 666 | stdscr.addstr(y+1, x, char, color | attrs) 667 | except curses.error: 668 | pass 669 | 670 | # Draw active neural connections (synapses) - more visually interesting 671 | active_synapses = [s for s in self.synapses if s['active'] > 0.1] 672 | sample_size = min(len(active_synapses), int(15 * self.density)) # More with higher density 673 | 674 | if sample_size > 0: 675 | for synapse in random.sample(active_synapses, sample_size): 676 | ds_y1, ds_x1 = synapse['start'] 677 | ds_y2, ds_x2 = synapse['end'] 678 | 679 | # Upscale to screen coordinates 680 | y1, x1 = ds_y1 * 2, ds_x1 * 2 681 | y2, x2 = ds_y2 * 2, ds_x2 * 2 682 | 683 | # Calculate distance to determine how many points to draw 684 | distance = math.sqrt((y2-y1)**2 + (x2-x1)**2) 685 | steps = min(int(distance / 2) + 1, 5) # More points for longer synapses 686 | 687 | # Pulse wave effect along synapses 688 | pulse_pos = (self.time_counter * 0.1) % 1.0 # Position of pulse (0-1) 689 | 690 | for i in range(steps): 691 | t = i / (steps - 1) if steps > 1 else 0.5 692 | y = int(y1 + (y2-y1) * t) 693 | x = int(x1 + (x2-x1) * t) 694 | 695 | # Skip if out of bounds 696 | if not (0 <= y < height-2 and 0 <= x < width): 697 | continue 698 | 699 | # Calculate pulse intensity - brightest at pulse position 700 | pulse_dist = abs(t - pulse_pos) 701 | pulse_intensity = max(0, 1 - pulse_dist * 3) # Narrower pulse = more visible movement 702 | 703 | # Determine synapse appearance 704 | intensity = synapse['active'] * (0.6 + 0.4 * pulse_intensity) 705 | 706 | # Skip dim points 707 | if intensity < 0.2: 708 | continue 709 | 710 | # Calculate synapse color - based on color mode 711 | if self.color_mode == 0: 712 | hue = (0.6 + hue_offset + pulse_intensity * 0.2) % 1.0 # Bluish with pulse highlight 713 | elif self.color_mode == 1: 714 | hue = (0.0 + hue_offset + intensity * 0.3) % 1.0 # Reddish intensity 715 | else: 716 | hue = (0.85 + hue_offset) % 1.0 # Purple 717 | 718 | color = self.hsv_to_color_pair(stdscr, hue, 0.8, 0.7 + 0.3 * intensity) 719 | 720 | # Choose character based on pulse position 721 | chars = "·•+*❃" 722 | char_idx = min(len(chars)-1, int((intensity + pulse_intensity) * 0.7 * len(chars))) 723 | char = chars[char_idx] 724 | 725 | try: 726 | attrs = curses.A_BOLD if pulse_intensity > 0.5 else 0 727 | stdscr.addstr(y+1, x, char, color | attrs) 728 | except curses.error: 729 | pass 730 | 731 | # Draw active neurons (cell bodies) - with type differentiation 732 | active_neurons = list(self.active_neurons.items()) 733 | sample_size = min(len(active_neurons), int(30 * self.density)) # More with higher density 734 | 735 | if sample_size > 0: 736 | for (ds_y, ds_x), neuron in random.sample(active_neurons, sample_size): 737 | # Upscale to screen coordinates 738 | y, x = ds_y * 2, ds_x * 2 739 | 740 | if 0 <= y < height-2 and 0 <= x < width: 741 | # Calculate neuron pulse 742 | pulse = (math.sin(self.time_counter * neuron['pulse_rate']) + 1) / 2 743 | intensity = neuron['strength'] * (0.7 + 0.3 * pulse) 744 | 745 | # Skip dim neurons 746 | if intensity < 0.2: 747 | continue 748 | 749 | # Calculate color - based on neuron type and color mode 750 | if self.color_mode == 0: 751 | # Type-based coloring 752 | base_hue = 0.0 if neuron['type'] == 0 else 0.3 if neuron['type'] == 1 else 0.6 753 | neuron_hue = (base_hue + hue_offset + pulse * 0.1) % 1.0 754 | elif self.color_mode == 1: 755 | # Intensity-based coloring 756 | neuron_hue = (intensity * 0.8 + hue_offset) % 1.0 757 | else: 758 | # Position-based with complementary pulses 759 | neuron_hue = ((ds_x * ds_y) / 1000 + hue_offset) % 1.0 760 | if pulse > 0.7: 761 | neuron_hue = (neuron_hue + 0.5) % 1.0 762 | 763 | neuron_sat = 0.8 + 0.2 * pulse 764 | neuron_val = 0.7 + 0.3 * intensity 765 | 766 | color = self.hsv_to_color_pair(stdscr, neuron_hue, neuron_sat, neuron_val) 767 | 768 | # Draw the neuron - different symbols based on type and intensity 769 | try: 770 | if neuron['type'] == 0: # Bass type 771 | char = "★" if intensity > 0.7 else "✧" 772 | elif neuron['type'] == 1: # Mid type 773 | char = "❅" if intensity > 0.7 else "♦" 774 | else: # Treble type 775 | char = "✺" if intensity > 0.7 else "◆" 776 | 777 | stdscr.addstr(y+1, x, char, color | curses.A_BOLD) 778 | except curses.error: 779 | pass 780 | 781 | # Draw special particles - with different behaviors based on particle mode 782 | for particle in self.special_particles: 783 | # Convert to screen coordinates 784 | y = int(particle['y'] * 2) 785 | x = int(particle['x'] * 2) 786 | 787 | if 0 <= y < height-2 and 0 <= x < width: 788 | # Calculate fade based on lifetime 789 | fade = particle['life'] / 40.0 # Normalize to 0-1 790 | 791 | # Calculate color based on particle mode 792 | if self.particle_mode == 0: # Normal mode 793 | hue = (particle['hue'] + hue_offset) % 1.0 794 | sat = 0.9 795 | val = 0.7 * fade + 0.3 796 | elif self.particle_mode == 1: # Swirling mode - color changes with direction 797 | angle = math.atan2(particle['vy'], particle['vx']) 798 | hue = ((angle / (2 * math.pi)) + hue_offset) % 1.0 799 | sat = 0.9 800 | val = 0.8 * fade + 0.2 801 | else: # Chaotic mode - rapid color changes 802 | hue = ((particle['hue'] + self.time_counter * 0.03) % 1.0 + hue_offset) % 1.0 803 | sat = 0.9 804 | val = 0.8 * fade + 0.2 805 | 806 | color = self.hsv_to_color_pair(stdscr, hue, sat, val) 807 | 808 | # Character based on particle size and mode 809 | if self.particle_mode == 0: 810 | char = "✧" if particle['size'] > 1.0 else "•" 811 | elif self.particle_mode == 1: 812 | char = "✺" if particle['size'] > 1.0 else "✧" 813 | else: 814 | chars = "•✧✺★✦" 815 | idx = int((self.time_counter + particle['life']) * 0.1) % len(chars) 816 | char = chars[idx] 817 | 818 | try: 819 | stdscr.addstr(y+1, x, char, color | curses.A_BOLD) 820 | except curses.error: 821 | pass 822 | 823 | # Draw enhanced consciousness wave at the bottom - with reactive design 824 | consciousness_bar = "" 825 | consciousness_width = min(width - 1, 60) # Reasonable width for performance 826 | 827 | # Determine pattern based on audio characteristics 828 | if bass > treble * 1.5: # Bass dominant 829 | pattern = "▁▂▃▄▅▆▇█" # Blocky pattern 830 | wave_hue = (0.0 + hue_offset) % 1.0 # Red 831 | elif treble > bass * 1.5: # Treble dominant 832 | pattern = "∿⌇⌇∿⌇⌇∿" # Wavy pattern 833 | wave_hue = (0.6 + hue_offset) % 1.0 # Blue 834 | else: # Balanced 835 | pattern = "▁▄▂▇▃█▅▆" # Mixed pattern 836 | wave_hue = (0.3 + hue_offset) % 1.0 # Green 837 | 838 | # Generate the bar 839 | for x in range(consciousness_width): 840 | # Create wave pattern with more variation 841 | wave_val = 0.5 + 0.5 * math.sin(x / 5 + self.time_counter * 0.1) 842 | beat_factor = sum(self.beat_memory) / len(self.beat_memory) # How regular the beats are 843 | intensity = (wave_val * self.consciousness_level) + (beat_factor * 0.3) 844 | 845 | # Choose symbol based on intensity 846 | if intensity < 0.3: 847 | idx = 0 848 | else: 849 | idx = min(len(pattern) - 1, int(intensity * len(pattern))) 850 | bar_char = pattern[idx] 851 | 852 | consciousness_bar += bar_char 853 | 854 | # Draw the consciousness wave 855 | wave_color = self.hsv_to_color_pair(stdscr, wave_hue, 0.8, 0.9) 856 | try: 857 | stdscr.addstr(height-1, 0, consciousness_bar, wave_color) 858 | except curses.error: 859 | # Fallback if there's an issue 860 | try: 861 | stdscr.addstr(height-1, 0, consciousness_bar[:-1], wave_color) 862 | except curses.error: 863 | pass -------------------------------------------------------------------------------- /visualizers/particles.py: -------------------------------------------------------------------------------- 1 | # visualizers/particles.py 2 | import curses 3 | import random 4 | from visualizer_base import VisualizerBase 5 | 6 | class ParticlesVisualizer(VisualizerBase): 7 | def __init__(self): 8 | super().__init__(name="Particles") 9 | self.particles = [] 10 | self.max_particles = 100 11 | 12 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 13 | # Particle system visualization 14 | 15 | # Create new particles based on audio energy 16 | if len(self.particles) < self.max_particles and energy > 0.1: 17 | spawn_count = int(energy * 10) 18 | for _ in range(spawn_count): 19 | # Create particle at the bottom center 20 | self.particles.append({ 21 | 'x': width // 2 + random.randint(-10, 10), 22 | 'y': height - 5, 23 | 'vx': random.uniform(-2, 2), 24 | 'vy': random.uniform(-5, -2), 25 | 'life': random.uniform(0.5, 1.0), 26 | 'hue': random.random(), 27 | 'size': random.choice(['.', '*', '+', '•', '○', '◌', '◦']) 28 | }) 29 | 30 | # Update and draw particles 31 | new_particles = [] 32 | for p in self.particles: 33 | # Update position 34 | p['x'] += p['vx'] 35 | p['y'] += p['vy'] 36 | p['vy'] += 0.1 # Gravity 37 | p['life'] -= 0.02 38 | 39 | # If particle is still alive and on screen 40 | if p['life'] > 0 and 0 <= p['y'] < height - 1 and 0 <= p['x'] < width: 41 | # Draw particle 42 | x, y = int(p['x']), int(p['y']) 43 | 44 | # Color based on life and initial hue 45 | hue = (p['hue'] + hue_offset) % 1.0 46 | sat = 0.8 47 | val = 0.7 + 0.3 * p['life'] 48 | 49 | # Get color pair and draw 50 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val) 51 | stdscr.addstr(y, x, p['size'], color_attr | curses.A_BOLD) 52 | 53 | # Keep this particle 54 | new_particles.append(p) 55 | 56 | # Update particle list 57 | self.particles = new_particles 58 | 59 | def handle_keypress(self, key): 60 | if key == 'p': 61 | self.max_particles = min(500, self.max_particles + 50) 62 | return True 63 | elif key == 'P': 64 | self.max_particles = max(50, self.max_particles - 50) 65 | return True 66 | return False -------------------------------------------------------------------------------- /visualizers/starfield_warp.py: -------------------------------------------------------------------------------- 1 | # visualizers/starfield_warp.py 2 | import curses 3 | import random 4 | import math 5 | import numpy as np 6 | from visualizer_base import VisualizerBase 7 | 8 | class StarfieldWarpVisualizer(VisualizerBase): 9 | def __init__(self): 10 | super().__init__(name="Starfield Warp") 11 | self.stars = [] 12 | self.star_count = 100 13 | self.warp_speed = 0.5 14 | self.waveform_intensity = 1.0 15 | self.waveform_count = 6 16 | self.waveform_life = [] 17 | self.star_density = 1.0 18 | # New audio reactivity variables 19 | self.beat_pulse = 0.0 20 | self.bass_level = 0.0 21 | self.mid_level = 0.0 22 | self.treble_level = 0.0 23 | self.beat_history = [0.0] * 10 24 | 25 | def setup(self): 26 | self.phase = 0 27 | self.stars = [] 28 | self.waveform_life = [] 29 | self.beat_pulse = 0.0 30 | 31 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 32 | center_x = width // 2 33 | center_y = height // 2 34 | 35 | # Process audio spectrum into frequency bands 36 | if len(spectrum) > 30: 37 | self.bass_level = np.mean(spectrum[:10]) * 2 # Bass frequencies 38 | self.mid_level = np.mean(spectrum[10:20]) # Mid frequencies 39 | self.treble_level = np.mean(spectrum[20:30]) # Treble frequencies 40 | 41 | # Update beat detection 42 | self.beat_history.append(energy) 43 | self.beat_history.pop(0) 44 | avg_energy = sum(self.beat_history) / len(self.beat_history) 45 | is_beat = energy > 1.5 * avg_energy and energy > 0.4 46 | 47 | # Create beat pulse effect 48 | if is_beat: 49 | self.beat_pulse = min(1.0, self.beat_pulse + 0.7 * self.bass_level) 50 | else: 51 | self.beat_pulse *= 0.8 # Decay the pulse 52 | 53 | # Update phase with audio reactivity 54 | self.phase += (0.5 + energy) * self.warp_speed 55 | 56 | # Generate new stars - AUDIO REACTIVE: more stars with bass 57 | star_gen_chance = 0.3 + self.bass_level * 2 58 | if (len(self.stars) < self.star_count * self.star_density and 59 | random.random() < star_gen_chance): 60 | # Create a new star at a random position near the center 61 | angle = random.uniform(0, 2 * math.pi) 62 | distance = random.uniform(1, 3) 63 | 64 | # Vary star type based on audio spectrum 65 | if self.bass_level > 0.5: 66 | star_char = '*' # Bass-heavy stars are brighter 67 | elif self.mid_level > 0.3: 68 | star_char = '+' # Mid-range gets plus signs 69 | elif self.treble_level > 0.2: 70 | star_char = '·' # Treble gets small dots 71 | else: 72 | star_char = '.' # Default tiny dot 73 | 74 | self.stars.append({ 75 | 'x': center_x + distance * math.cos(angle), 76 | 'y': center_y + distance * math.sin(angle), 77 | 'angle': angle, 78 | 'speed': 0.1 + random.uniform(0, 0.4) * (1 + self.bass_level), # Faster with bass 79 | 'size': star_char, 80 | 'hue': random.random(), 81 | 'birth_time': self.phase # Track when star was created 82 | }) 83 | 84 | # VISUALIZE BEAT: Draw expanding circle on beats 85 | if self.beat_pulse > 0.1: 86 | pulse_radius = int(max(width, height) * 0.4 * self.beat_pulse) 87 | pulse_color = self.hsv_to_color_pair(stdscr, hue_offset + 0.6, 1.0, 1.0) 88 | 89 | for angle in range(0, 360, 15): # Draw segments for performance 90 | rad_angle = math.radians(angle) 91 | x = int(center_x + pulse_radius * math.cos(rad_angle)) 92 | y = int(center_y + pulse_radius * math.sin(rad_angle)) 93 | 94 | if 0 <= x < width and 0 <= y < height: 95 | try: 96 | char = '°' if angle % 30 == 0 else '·' 97 | stdscr.addstr(y, x, char, pulse_color) 98 | except: 99 | pass 100 | 101 | # Update and draw stars with enhanced audio reactivity 102 | new_stars = [] 103 | for star in self.stars: 104 | # Update star position with audio-reactive speed 105 | # Bass affects speed, treble affects wobble 106 | wobble = self.treble_level * math.sin(self.phase * 5 + star['birth_time']) 107 | star_angle = star['angle'] + wobble * 0.2 108 | 109 | # Accelerate based on bass and energy 110 | star['speed'] += 0.02 * self.bass_level + 0.01 * energy 111 | 112 | # Move star outward with audio-reactive speed 113 | star['x'] += math.cos(star_angle) * star['speed'] * (1 + energy) 114 | star['y'] += math.sin(star_angle) * star['speed'] * (1 + energy) 115 | 116 | # Check if star is still on screen 117 | distance = math.sqrt((star['x'] - center_x)**2 + (star['y'] - center_y)**2) 118 | if (0 <= star['x'] < width and 0 <= star['y'] < height and 119 | distance < max(width, height)): 120 | # Draw star with audio-reactive color 121 | x, y = int(star['x']), int(star['y']) 122 | 123 | # Base hue on original + offset, but modulate with mid frequencies 124 | hue = (star['hue'] + hue_offset + self.mid_level * 0.2) % 1.0 125 | 126 | # Saturation based on energy and distance 127 | sat = min(1.0, 0.5 + distance / 20 + self.beat_pulse * 0.3) 128 | 129 | # Value (brightness) pulses with the beat and varies with distance 130 | val = min(1.0, 0.5 + distance / 20 + self.beat_pulse * 0.5) 131 | 132 | # Get color pair and draw 133 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val) 134 | 135 | # Choose character based on speed and audio levels 136 | char = star['size'] 137 | if star['speed'] > 0.5 + self.bass_level: 138 | char = '-' if abs(math.cos(star_angle)) > abs(math.sin(star_angle)) else '|' 139 | if star['speed'] > 1.0 + self.bass_level * 0.5: 140 | char = '=' if abs(math.cos(star_angle)) > abs(math.sin(star_angle)) else '‖' 141 | 142 | # On strong beats, make some stars flash 143 | if is_beat and random.random() < self.bass_level: 144 | color_attr |= curses.A_REVERSE 145 | 146 | try: 147 | stdscr.addstr(y, x, char, color_attr | curses.A_BOLD) 148 | except: 149 | # Skip if we can't draw at this position 150 | pass 151 | 152 | # Keep this star 153 | new_stars.append(star) 154 | 155 | # Update star list 156 | self.stars = new_stars 157 | 158 | # Generate new waveforms on beat detection - ENHANCED 159 | if is_beat and self.bass_level > 0.3: 160 | # Find the dominant frequency band 161 | if len(spectrum) > 0: 162 | # Create multiple waveforms on strong beats 163 | waveforms_to_create = 1 + int(self.bass_level * 3) 164 | 165 | for _ in range(waveforms_to_create): 166 | # Get a random frequency to highlight 167 | max_band = max(range(min(len(spectrum), 30)), 168 | key=lambda i: spectrum[i] * (1 if random.random() < 0.7 else 0.5)) 169 | norm_freq = max_band / min(len(spectrum), 30) # Normalized frequency (0-1) 170 | 171 | # Add new waveform with the dominant frequency 172 | angle = random.uniform(0, 2 * math.pi) 173 | self.waveform_life.append({ 174 | 'angle': angle, 175 | 'life': 1.0, 176 | 'freq': 3 + norm_freq * 10, # Frequency scaled 177 | 'amplitude': 5 + self.bass_level * 15, # Bass-based amplitude 178 | 'hue': (hue_offset + norm_freq) % 1.0, # Frequency-based color 179 | 'width': 1 + int(energy * 2) # Wider on stronger beats 180 | }) 181 | 182 | # Limit waveform count 183 | if len(self.waveform_life) > self.waveform_count + int(self.bass_level * 3): 184 | self.waveform_life.pop(0) 185 | 186 | # Draw waveforms with enhanced audio reactivity 187 | for wf in self.waveform_life: 188 | # Decrease life 189 | wf['life'] -= 0.03 + 0.02 * self.treble_level # Treble makes waves dissipate faster 190 | 191 | if wf['life'] <= 0: 192 | continue 193 | 194 | # Draw waveform line from center outward 195 | max_distance = min(width, height) // 2 * wf['life'] 196 | angle = wf['angle'] 197 | amplitude = wf['amplitude'] * self.waveform_intensity 198 | frequency = wf['freq'] * (1 + self.mid_level * 0.5) # Mid frequencies affect wavelength 199 | width = wf['width'] 200 | 201 | # Get color for this waveform with audio-reactive intensity 202 | hue = (wf['hue'] + self.phase * 0.01) % 1.0 203 | sat = 0.8 + 0.2 * self.mid_level 204 | val = 0.7 + 0.3 * wf['life'] + 0.2 * self.beat_pulse 205 | 206 | # Draw a sinusoidal line extending outward 207 | for dist in range(1, int(max_distance), 2): # Step by 2 for performance 208 | # Calculate position with audio-reactive wave pattern 209 | wave_offset = amplitude * math.sin(dist / frequency * math.pi) * wf['life'] 210 | 211 | # Calculate perpendicular offset direction 212 | perp_angle = angle + math.pi/2 213 | 214 | # Apply offset perpendicular to the main angle 215 | x = center_x + dist * math.cos(angle) + wave_offset * math.cos(perp_angle) 216 | y = center_y + dist * math.sin(angle) + wave_offset * math.sin(perp_angle) 217 | 218 | # Check bounds 219 | if 0 <= x < width and 0 <= y < height: 220 | # Draw with intensity based on life and audio 221 | intensity = wf['life'] * (1 + self.bass_level * 0.5) 222 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val * intensity) 223 | 224 | # Use different characters based on wave intensity and audio 225 | if dist % 3 == 0: 226 | char = '*' if intensity > 0.7 or self.beat_pulse > 0.5 else '+' 227 | else: 228 | char = '·' if intensity > 0.5 else '.' 229 | 230 | # Draw the wave point 231 | try: 232 | stdscr.addstr(int(y), int(x), char, color_attr) 233 | 234 | # For wide waves, add extra points 235 | if width > 1: 236 | for w in range(1, width): 237 | if random.random() < 0.7: # Sparse additional points 238 | try: 239 | wx = int(x + w * math.cos(perp_angle)) 240 | wy = int(y + w * math.sin(perp_angle)) 241 | if 0 <= wx < width and 0 <= wy < height: 242 | stdscr.addstr(wy, wx, '.', color_attr) 243 | except: 244 | pass 245 | except: 246 | # Skip if we can't draw at this position 247 | pass 248 | 249 | # Filter out dead waveforms 250 | self.waveform_life = [wf for wf in self.waveform_life if wf['life'] > 0] 251 | 252 | # Display audio levels for debugging 253 | debug_color = self.hsv_to_color_pair(stdscr, 0.3, 0.7, 0.9) 254 | try: 255 | level_bar = f"Bass: {'█' * int(self.bass_level * 10):<10} Mid: {'█' * int(self.mid_level * 10):<10} Treble: {'█' * int(self.treble_level * 10):<10}" 256 | stdscr.addstr(height-1, 0, level_bar, debug_color) 257 | except: 258 | pass 259 | 260 | def handle_keypress(self, key): 261 | if key == 'w': # Increase warp speed 262 | self.warp_speed = min(2.0, self.warp_speed + 0.1) 263 | return True 264 | elif key == 'W': # Decrease warp speed 265 | self.warp_speed = max(0.1, self.warp_speed - 0.1) 266 | return True 267 | elif key == 'i': # Increase waveform intensity 268 | self.waveform_intensity = min(2.0, self.waveform_intensity + 0.1) 269 | return True 270 | elif key == 'I': # Decrease waveform intensity 271 | self.waveform_intensity = max(0.1, self.waveform_intensity - 0.1) 272 | return True 273 | elif key == 'n': # Increase star density 274 | self.star_density = min(3.0, self.star_density + 0.2) 275 | return True 276 | elif key == 'N': # Decrease star density 277 | self.star_density = max(0.2, self.star_density - 0.2) 278 | return True 279 | return False -------------------------------------------------------------------------------- /visualizers/stick_figure.py: -------------------------------------------------------------------------------- 1 | # visualizers/stick_figure.py 2 | import curses 3 | import random 4 | import math 5 | from visualizer_base import VisualizerBase 6 | 7 | class StickFigureVisualizer(VisualizerBase): 8 | def __init__(self): 9 | super().__init__(name="Dancing Stick Figure") 10 | self.dance_mode = 0 # Different dance styles 11 | self.speed = 0.5 12 | 13 | def setup(self): 14 | self.phase = 0 15 | 16 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 17 | # Centers for stick figure 18 | center_x = width // 2 19 | center_y = height // 2 20 | 21 | # Clear figure area 22 | for y in range(center_y - 10, center_y + 10): 23 | if 0 <= y < height: 24 | for x in range(center_x - 15, center_x + 15): 25 | if 0 <= x < width: 26 | stdscr.addstr(y, x, " ") 27 | 28 | # Animate based on audio energy and current phase 29 | self.phase += energy * self.speed 30 | 31 | # Calculate limb positions based on phase and dance mode 32 | if self.dance_mode == 0: 33 | # Regular dancing 34 | head_x = center_x 35 | head_y = center_y - 6 36 | 37 | # Body 38 | body_bottom_x = center_x 39 | body_bottom_y = center_y + 1 40 | 41 | # Arms 42 | left_arm_x = center_x - 4 * math.cos(self.phase) 43 | left_arm_y = center_y - 3 - math.sin(self.phase) 44 | 45 | right_arm_x = center_x + 4 * math.cos(self.phase + math.pi) 46 | right_arm_y = center_y - 3 - math.sin(self.phase + math.pi) 47 | 48 | # Legs 49 | left_leg_x = center_x - 3 * math.cos(self.phase * 0.7) 50 | left_leg_y = center_y + 5 + math.sin(self.phase * 0.7) 51 | 52 | right_leg_x = center_x + 3 * math.cos(self.phase * 0.7 + math.pi) 53 | right_leg_y = center_y + 5 + math.sin(self.phase * 0.7 + math.pi) 54 | 55 | elif self.dance_mode == 1: 56 | # Breakdance mode 57 | head_x = center_x + 4 * math.cos(self.phase) 58 | head_y = center_y - 2 - 2 * math.sin(self.phase) 59 | 60 | # Body 61 | body_bottom_x = center_x 62 | body_bottom_y = center_y + 1 63 | 64 | # Arms 65 | left_arm_x = center_x - 5 * math.cos(self.phase * 2) 66 | left_arm_y = center_y - 1 - 2 * math.sin(self.phase * 2) 67 | 68 | right_arm_x = center_x + 5 * math.cos(self.phase * 2 + math.pi/2) 69 | right_arm_y = center_y - 1 - 2 * math.sin(self.phase * 2 + math.pi/2) 70 | 71 | # Legs 72 | left_leg_x = center_x - 5 * math.cos(self.phase * 1.5) 73 | left_leg_y = center_y + 3 + 3 * math.sin(self.phase * 1.5) 74 | 75 | right_leg_x = center_x + 5 * math.cos(self.phase * 1.5 + math.pi/3) 76 | right_leg_y = center_y + 3 + 3 * math.sin(self.phase * 1.5 + math.pi/3) 77 | 78 | else: 79 | # Robot dance 80 | head_x = center_x + (energy > 0.5) * random.randint(-1, 1) 81 | head_y = center_y - 6 + (energy > 0.7) * random.randint(-1, 1) 82 | 83 | # Body 84 | body_bottom_x = center_x 85 | body_bottom_y = center_y + 1 86 | 87 | # Arms - robotic movements 88 | arm_phase = int(self.phase * 4) % 4 89 | 90 | if arm_phase == 0: 91 | left_arm_x, left_arm_y = center_x - 4, center_y - 3 92 | right_arm_x, right_arm_y = center_x + 4, center_y - 3 93 | elif arm_phase == 1: 94 | left_arm_x, left_arm_y = center_x - 4, center_y - 5 95 | right_arm_x, right_arm_y = center_x + 4, center_y - 3 96 | elif arm_phase == 2: 97 | left_arm_x, left_arm_y = center_x - 4, center_y - 5 98 | right_arm_x, right_arm_y = center_x + 4, center_y - 5 99 | else: 100 | left_arm_x, left_arm_y = center_x - 4, center_y - 3 101 | right_arm_x, right_arm_y = center_x + 4, center_y - 5 102 | 103 | # Legs - robot dance 104 | leg_phase = int(self.phase * 2) % 2 105 | 106 | if leg_phase == 0: 107 | left_leg_x, left_leg_y = center_x - 3, center_y + 5 108 | right_leg_x, right_leg_y = center_x + 3, center_y + 5 109 | else: 110 | left_leg_x, left_leg_y = center_x - 2, center_y + 5 111 | right_leg_x, right_leg_y = center_x + 2, center_y + 5 112 | 113 | # Calculate color based on energy and hue_offset 114 | hue = (hue_offset + energy) % 1.0 115 | sat = 0.8 116 | val = 0.7 + 0.3 * energy 117 | 118 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val) 119 | 120 | # Draw stick figure 121 | # Head 122 | stdscr.addstr(int(head_y), int(head_x), "O", color_attr | curses.A_BOLD) 123 | 124 | # Body 125 | for i in range(1, 6): 126 | body_y = head_y + i 127 | if 0 <= int(body_y) < height and 0 <= int(center_x) < width: 128 | stdscr.addstr(int(body_y), int(center_x), "|", color_attr) 129 | 130 | # Arms 131 | self.draw_line(stdscr, center_x, head_y + 2, left_arm_x, left_arm_y, "/", color_attr) 132 | self.draw_line(stdscr, center_x, head_y + 2, right_arm_x, right_arm_y, "\\", color_attr) 133 | 134 | # Legs 135 | self.draw_line(stdscr, center_x, body_bottom_y, left_leg_x, left_leg_y, "/", color_attr) 136 | self.draw_line(stdscr, center_x, body_bottom_y, right_leg_x, right_leg_y, "\\", color_attr) 137 | 138 | # Add a dancing floor that reacts to the music 139 | floor_y = body_bottom_y + 6 140 | floor_width = int(15 + 10 * energy) 141 | 142 | for x in range(center_x - floor_width, center_x + floor_width + 1): 143 | if 0 <= x < width and 0 <= floor_y < height: 144 | floor_hue = (hue_offset + (x - center_x) / floor_width / 2) % 1.0 145 | floor_color = self.hsv_to_color_pair(stdscr, floor_hue, 0.7, 0.8) 146 | stdscr.addstr(floor_y, x, "_", floor_color) 147 | 148 | def draw_line(self, stdscr, x1, y1, x2, y2, char, attr): 149 | """Draw a line between two points using Bresenham's algorithm""" 150 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 151 | dx = abs(x2 - x1) 152 | dy = abs(y2 - y1) 153 | sx = 1 if x1 < x2 else -1 154 | sy = 1 if y1 < y2 else -1 155 | err = dx - dy 156 | 157 | height, width = stdscr.getmaxyx() 158 | 159 | while True: 160 | if 0 <= y1 < height and 0 <= x1 < width: 161 | stdscr.addstr(y1, x1, char, attr) 162 | 163 | if x1 == x2 and y1 == y2: 164 | break 165 | 166 | e2 = 2 * err 167 | if e2 > -dy: 168 | err -= dy 169 | x1 += sx 170 | if e2 < dx: 171 | err += dx 172 | y1 += sy 173 | 174 | def handle_keypress(self, key): 175 | if key == 'd': # Change dance mode 176 | self.dance_mode = (self.dance_mode + 1) % 3 177 | return True 178 | elif key == 's': # Speed up 179 | self.speed = min(1.5, self.speed + 0.1) 180 | return True 181 | elif key == 'S': # Slow down 182 | self.speed = max(0.1, self.speed - 0.1) 183 | return True 184 | return False -------------------------------------------------------------------------------- /visualizers/wave.py: -------------------------------------------------------------------------------- 1 | # visualizers/wave.py 2 | import curses 3 | import math 4 | import time 5 | from visualizer_base import VisualizerBase 6 | 7 | class WaveVisualizer(VisualizerBase): 8 | def __init__(self): 9 | super().__init__(name="Wave") 10 | 11 | def draw(self, stdscr, spectrum, height, width, energy, hue_offset): 12 | # Sinusoidal wave visualization 13 | mid_y = height // 2 14 | samples = width 15 | 16 | wave = [0] * samples 17 | for i in range(min(20, len(spectrum))): 18 | # Add sine waves with amplitudes from spectrum 19 | freq = (i + 1) * 2 20 | amp = spectrum[i] * 10 * (height / 4) 21 | phase = time.time() * (i + 1) * 1.5 22 | 23 | # Generate sine wave 24 | for x in range(samples): 25 | wave[x] += amp * math.sin(2 * math.pi * freq * x / samples + phase) 26 | 27 | # Draw wave 28 | for x in range(samples): 29 | if x < width: 30 | y = int(mid_y + wave[x]) 31 | if 0 <= y < height - 1: 32 | # Calculate color based on amplitude and position 33 | hue = (x / width + hue_offset) % 1.0 34 | sat = 0.8 + 0.2 * (abs(wave[x]) / (height / 4) if height > 0 else 0) 35 | val = 0.7 + 0.3 * (abs(wave[x]) / (height / 4) if height > 0 else 0) 36 | 37 | # Get color pair and draw 38 | color_attr = self.hsv_to_color_pair(stdscr, hue, sat, val) 39 | stdscr.addstr(y, x, "•", color_attr | curses.A_BOLD) --------------------------------------------------------------------------------