├── .github
├── funding.yaml
└── workflows
│ └── python-publish.yml
├── .gitignore
├── .readthedocs.yaml
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── docs
├── README.md
├── conf.py
├── index.rst
└── requirements.txt
├── examples
├── __init__.py
└── mug_objectron
│ ├── __init__.py
│ └── demo.py
├── justpyplot
├── __init__.py
├── justpyplot.py
└── textrender.py
├── pyproject.toml
├── resources
├── demo.gif
├── jupyter_pil.png
└── sinus.gif
└── tests
├── __init__.py
├── test.py
├── test_basic.py
├── test_basic_plot.py
├── test_plot.py
├── test_plot_components.py
├── test_standalone.ipynb
├── test_standalone.py
└── test_textrender.py
/.github/funding.yaml:
--------------------------------------------------------------------------------
1 | github: bedbad
2 | buy_me_a_coffee: bedbad
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will upload a Python Package to PyPI when a release is created
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
3 |
4 | # This workflow uses actions that are not certified by GitHub.
5 | # They are provided by a third-party and are governed by
6 | # separate terms of service, privacy policy, and support
7 | # documentation.
8 |
9 | name: Upload Python Package
10 |
11 | on:
12 | release:
13 | types: [published]
14 |
15 | permissions:
16 | contents: read
17 |
18 | jobs:
19 | release-build:
20 | runs-on: ubuntu-latest
21 |
22 | steps:
23 | - uses: actions/checkout@v4
24 |
25 | - uses: actions/setup-python@v5
26 | with:
27 | python-version: "3.x"
28 |
29 | - name: Build release distributions
30 | run: |
31 | # NOTE: put your own distribution build steps here.
32 | python -m pip install build
33 | python -m build
34 |
35 | - name: Upload distributions
36 | uses: actions/upload-artifact@v4
37 | with:
38 | name: release-dists
39 | path: dist/
40 |
41 | pypi-publish:
42 | runs-on: ubuntu-latest
43 | needs:
44 | - release-build
45 | permissions:
46 | # IMPORTANT: this permission is mandatory for trusted publishing
47 | id-token: write
48 |
49 | # Dedicated environments with protections for publishing are strongly recommended.
50 | # For more information, see: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment#deployment-protection-rules
51 | environment:
52 | name: pypi
53 | # OPTIONAL: uncomment and update to include your PyPI project URL in the deployment status:
54 | # url: https://pypi.org/p/YOURPROJECT
55 | #
56 | # ALTERNATIVE: if your GitHub Release name is the PyPI project version string
57 | # ALTERNATIVE: exactly, uncomment the following line instead:
58 | url: https://pypi.org/project/justpyplot/${{ github.event.release.name }}
59 |
60 | steps:
61 | - name: Retrieve release distributions
62 | uses: actions/download-artifact@v4
63 | with:
64 | name: release-dists
65 | path: dist/
66 |
67 | - name: Publish release distributions to PyPI
68 | uses: pypa/gh-action-pypi-publish@release/v1
69 | with:
70 | packages-dir: dist/
71 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .venv/
2 | .vscode/
3 | dist/
4 | *.egg-info/
5 | */**/__pycache__
6 | dist/justpyplot-0.0.9-py3-none-any.whl
7 | dist/justpyplot-0.0.9.tar.gz
8 | dist/justpyplot-0.1-py3-none-any.whl
9 | dist/justpyplot-0.1.tar.gz
10 | docs/_*
11 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | build:
4 | os: ubuntu-22.04
5 | tools:
6 | python: "3.8"
7 |
8 | sphinx:
9 | configuration: docs/conf.py
10 |
11 | python:
12 | install:
13 | - requirements: docs/requirements.txt
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to JustPyPlot
2 |
3 | Justpyplot is still in seed stage and is rapidly inflecting
4 | We welcome contributions from all, the code of justpyplot itself is perfect exercise to
5 | learn vectorized tensor operations. Competent contributors should focus on the codebase while for
6 | beginner contributors we reserve documentation, formating and presentation issues under regular feedback
7 |
8 | ## How to contribute
9 |
10 | - Read the post from this list with suggestions of community, these posts are updating:
11 | - https://www.reddit.com/r/Python/comments/1f7jfgd/why_not_just_get_your_plots_in_numpy/
12 |
13 | - Create an issue that is raised in the posts or you noticed in documentation or README.md if not exists already
14 | - Choose correct issue tag (feature request, documentation, bug, package)
15 | - Proof read your text with tools like grammarly
16 | - If it's a bug be clear about what the actual and expected behaviors are
17 | - If it's a feature be clear about proposed steps to implement it
18 | - If it's a usecase - feature request please specify in detail your *real* need and how you work it currently
19 | - add a PR for README.md with your usecase using justpyplot
20 | - **Fork the repository**
21 | - Click on the "Fork" button at the top right of the repository page to create your own copy of the repository.
22 | - If you are adding a usecase modify your version of README.md right there and then press the Pull Request button
23 | - *Your contribution will be reviewed very quickly(within day or even hours)! Thank you for contributing!*
24 | - If it's documentation or other readme documents it is not necessary to clone the repository
25 | - **Clone the repository**
26 | - Create new branch with github handle/name ( `git branch -c`)
27 | - Commit your work in that branch **as frequently as you can** `git commit -m` with descriptive messages
28 | - it's best to use auto generated commit messages, there is vscode plugin for that as well as tools for other editors
29 | - Test Your full change your contributing working as expected
30 | - After it's been successfully tested merge to main branch `git merge`
31 | - Press on PR button for main branch
32 | - Gather all your commit messages to the Pull Request text, and summarize them in the second half. Don't put any pther
33 | information with your Pull Request. Use Issues for that.
34 | - *Your contribution will be reviewed very quickly(within day or even hours)! Thank you for contributing!*
35 |
36 | - **Repeat**
37 | - Any contribution is welcome however at this stage we are looking for contributors who will stick to it
38 | - Amass 10 documentation or 5 code contributions successfully and you will automatically be added to maintainer with write rights to the repo
39 | - **Become an Owner**
40 | - We want to make it clear that a single maintainer who contributed the bulk of the project, by clear margin more then any other maintainer will get the ownership of the project. We state that this rule will not change.
41 |
42 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 bedbad
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
Why not just get your plots in numpy?!
2 | Why not just have a simple API
3 |
4 | __plot__(__values__, __grid_options__, __figure_options__, ...) ->
5 | (__figures__, __grid__, __axis__,__labels__)
6 |
7 | which renders plot in numpy/tensors and you can overlay, mask, transform, publish in any way you want whilst fully controlling every detail of your plot and figures?
8 |
9 | ## Installation
10 | ```bash
11 | pip install justpyplot
12 | ```
13 | JustPyPlot: Fast NumPy-Native Plotting
14 |
15 |
16 | ## So Why Yes?
17 | - **Fast**: Vectorized plotting directly to NumPy arrays - no memory screenshots or buffer copies
18 | A high-performance plotting library that renders directly to NumPy arrays. Measured 20-100x faster than matplotlib:
19 | ```bash
20 | timer "full justpyplot + rendering": avg 382 µs ± 135 µs, max 962 µs
21 | ```
22 | - **Simple**: Get all plots parts as arrays with a single function call
23 | ```python
24 | figure, grid, axis, labels = jplt.plot(values,grid_opts,figure_opts,title,(600, 400))
25 | ```
26 | and put them on, in or into everwhere you need in 1-2 subsequent lines
27 |
28 | - **Flexible**: Control every plot element (figures, grid, axis, labels) and every detail of plotting options independently, all design options are parsed from json-style figure and grid option dicts with full documentation available: https://justpyplot.readthedocs.io/en/latest/
29 |
30 | - **No dependencies**: Just vectorized python with numpy(+ other tensor lib you need), which makes standalone and compatible with any use case
31 |
32 | - **Small** Less then 20K total and 1000 total of core code which you can all understand
33 |
34 |
35 | - **Real-time**: Was initially scrapped for computer vision and robotics for real-time visualization and physical debugging tasks where standard plotting failed
36 |
37 | - **ML/AI Pluggable**: You can, for example stick it onto visual layers of neural network for it to learn your plotted characteristic without changing any dimensions.
38 |
39 | ## BASIC USAGE
40 |
41 | 
42 |
43 | You can explore the documentation for the plot function which supports any flexing or adjusting of the plot as you need such as colors, scatter, connected with line thickness, labels, etc.
44 |
45 | ```python
46 | import numpy as np
47 | import cv2
48 | import time
49 | from justpyplot import justpyplot as jplt
50 |
51 | t0 = time.perf_counter()
52 | xs, ys = [], []
53 |
54 | while(cv2.waitKey(1) != 27): # Press ESC to exit
55 | xt = time.perf_counter() - t0
56 | yx = np.sin(xt)
57 | xs.append(xt)
58 | ys.append(yx)
59 |
60 | # Create plot data array
61 | plot_data = np.array([xs, ys])
62 |
63 | # Generate plot arrays with customized options
64 | figure, grid, labels, title_img = jplt.plot(
65 | plot_data,
66 | grid={'nticks': 5, 'color': (128, 128, 128, 255)},
67 | figure={'scatter': True, 'line_color': (255, 0, 0, 255), 'line_width': 2},
68 | title="sin() from Clock",
69 | size=(600, 400)
70 | )
71 |
72 | # Blend all plot layers into single image
73 | plotted_array = jplt.blend(figure, grid, labels, title_img)
74 |
75 |
76 | cv2.imshow('np array plot', plotted_array)
77 | ```
78 |
79 |
80 | ### Overlaying Multiple Plots
81 |
82 | You take to take your plot which you can mask, stretch, transform and render wherever in frame array/canvas/memory you need and just overlay it simpler in one line?
83 |
84 | Here 1-liner function implementing those basic steps stuck on running mug neural network.
85 | In 3 lines of code it shows how to mess and visually debug 3d position of computer vision/robotics object in physical world (try in examples/mug_objectron/demo.py):
86 |
87 |
88 |
89 |
90 |
91 | ```python
92 | jplt.plot1_at(image, ang_zs,
93 | title='Angle from Z axis', offset=(50,50), size=(270, 300),
94 | point_color=(255,0,0),line_color=(255,0,0), label_color=(255,0,0), grid_color=(126,126,126))
95 | jplt.plot1_at(image, ang_ys,
96 | title='Angle from Y axis', offset=(400,50), size=(270, 300),
97 | point_color=(0,255,0), line_color=(0,255,0),label_color=(0,255,0), grid_color=(126,126,126),
98 | scatter=False)
99 | jplt.plot1_at(image,ang_xs,
100 | title='Angle from X axis', offset=(750,50), size=(270, 300),
101 | point_color=(0,0,255), line_color=(0,0,255),label_color=(0,0,255), grid_color=(126,126,126),
102 | scatter=False)
103 | ```
104 |
105 | ### Jupyter Notebook Usage
106 |
107 | For jupyter support you can render images anyhow you like. One of the simplest is to use the blend2PIL function depening on Pillow package which will blend all the plot layers into a single PIL image buffer which you display in notebook
108 |
109 | ```python
110 | import numpy as np
111 | from IPython.display import Image as ipyImage, display
112 | from justpyplot import justpyplot as jplt
113 |
114 | # Make some dependency data
115 | x = np.linspace(0, 10, 50)
116 | y = np.sin(x)
117 |
118 | # Just plot in numpy using the plot() function
119 | figure_img, grid_img, labels_img, title_img = jplt.plot(
120 | np.array([x, y]),
121 | grid={'nticks': 10, 'color': (128, 128, 128, 255), 'label_color': (255, 0, 0, 255),'precision': 1, 'label_font_size': 0.9},
122 | figure={'scatter':False,'point_color': (255, 0, 0, 255), 'point_radius':3, 'line_color':(0,64,64, 255), 'line_width': 2, 'marker_style':'circle'},
123 | title='Sine Wave',
124 | size=(300, 400),
125 | max_len=100
126 | )
127 |
128 | # blend arrays into PIL picture buffer (requires Pillow)
129 | buffer = jplt.blend2PIL(grid_img, figure_img, labels_img, title_img, format='PNG')
130 |
131 | # Display the image
132 | display(ipyImage(buffer.getvalue()))
133 | ```
134 |
135 |
136 |
137 |
138 |
139 | ### Documentation
140 |
141 | Documentation is fully available at https://justpyplot.readthedocs.io/en/latest/
142 |
143 |
144 |
145 |
146 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Building Documentation for justpyplot
2 |
3 | ## Requirements
4 |
5 | Install documentation dependencies:
6 |
7 | ```bash
8 | pip install -r docs/requirements.txt
9 | ```
10 |
11 | The requirements include:
12 | - sphinx
13 | - sphinx_rtd_theme
14 | - numpy
15 | - opencv-python
16 | - Pillow
17 |
18 | ## Building the Documentation
19 |
20 | From the root directory of the project, run:
21 |
22 | ```bash
23 | sphinx-build -b html docs docs/_build/html
24 | ```
25 |
26 | This will:
27 | 1. Generate HTML documentation from docstrings
28 | 2. Create API reference automatically
29 | 3. Output files to docs/_build/html directory
30 |
31 | ## Development
32 |
33 | For live preview while writing documentation:
34 |
35 | ```bash
36 | sphinx-autobuild docs docs/_build/html
37 | ```
38 |
39 | This will:
40 | 1. Start a local server (usually at http://127.0.0.1:8000)
41 | 2. Auto-rebuild when files change
42 | 3. Auto-reload the browser
43 |
44 | ## Documentation Style Guide
45 |
46 | Use NumPy style docstrings for all Python functions:
47 |
48 | ```python
49 | def function_name(param1: type, param2: type) -> return_type:
50 | """Short description of function.
51 |
52 | Detailed description of function behavior.
53 |
54 | Parameters
55 | ----------
56 | param1 : type
57 | Description of first parameter
58 | param2 : type
59 | Description of second parameter
60 |
61 | Returns
62 | -------
63 | return_type
64 | Description of return value
65 |
66 | Examples
67 | --------
68 | >>> result = function_name(1, 2)
69 | >>> print(result)
70 | 3
71 | """
72 | ```
73 |
74 | ## Project Structure
75 |
76 | ```
77 | docs/
78 | ├── conf.py # Sphinx configuration
79 | ├── index.rst # Main documentation page
80 | ├── requirements.txt # Documentation dependencies
81 | ├── _build/ # Generated documentation
82 | └── _static/ # Static files (images, etc)
83 | ```
84 |
85 | ## Read the Docs Integration
86 |
87 | The documentation automatically builds on [Read the Docs](https://readthedocs.org/) when you push to the main branch. Configuration is in `.readthedocs.yaml` at the root of the project.
88 |
89 | ## Troubleshooting
90 |
91 | If builds fail:
92 | 1. Check the build logs on Read the Docs
93 | 2. Verify all dependencies are in docs/requirements.txt
94 | 3. Test locally with:
95 | ```bash
96 | sphinx-build -b html docs docs/_build/html -a -E
97 | ```
98 | 4. Clear build directory and rebuild:
99 | ```bash
100 | rm -rf docs/_build
101 | sphinx-build -b html docs docs/_build/html
102 | ```
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | # Add the project root directory to Python path
5 | sys.path.insert(0, os.path.abspath('..'))
6 |
7 | # Configuration file for the Sphinx documentation builder
8 | project = 'justpyplot'
9 | copyright = '2024'
10 | author = 'bedbad'
11 |
12 | # Basic Sphinx settings
13 | extensions = [
14 | 'sphinx.ext.autodoc',
15 | 'sphinx.ext.napoleon', # For NumPy style docstrings
16 | 'sphinx.ext.viewcode', # To show source code
17 | ]
18 |
19 | # Autodoc settings
20 | autodoc_default_options = {
21 | 'members': None,
22 | 'undoc-members': False,
23 | 'private-members': False,
24 | 'special-members': False,
25 | 'imported-members': False
26 | }
27 |
28 | def skip_non_all(app, what, name, obj, skip, options):
29 | # Get the module name and member name
30 | module_name = name.rsplit('.', 1)[0]
31 | member_name = name.split('.')[-1]
32 |
33 | try:
34 | # Get the module's __all__
35 | module = sys.modules[module_name]
36 | all_list = getattr(module, '__all__', [])
37 |
38 | # Skip if not in __all__
39 | if member_name not in all_list:
40 | return True
41 | except (KeyError, AttributeError):
42 | pass
43 |
44 | return skip
45 |
46 | def setup(app):
47 | app.connect('autodoc-skip-member', skip_non_all)
48 |
49 | templates_path = ['_templates']
50 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
51 | html_theme = 'sphinx_rtd_theme'
52 |
53 | # Napoleon settings
54 | napoleon_google_docstring = False
55 | napoleon_numpy_docstring = True
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | justpyplot documentation
2 | =======================
3 |
4 | A fast, lightweight plotting library for real-time visualization.
5 |
6 | Basic Usage
7 | ----------
8 |
9 | .. code-block:: python
10 |
11 | import numpy as np
12 | from justpyplot import justpyplot as jplt
13 |
14 | x = np.linspace(0, 10, 50)
15 | y = np.sin(x)
16 |
17 | # Create plot components
18 | figure, grid, labels, title = jplt.plot(
19 | np.array([x, y]),
20 | title='Sine Wave'
21 | )
22 |
23 | # Blend components
24 | final_image = jplt.blend(grid, figure, labels, title)
25 |
26 | Installation
27 | -----------
28 |
29 | .. code-block:: bash
30 |
31 | pip install justpyplot
32 |
33 | API Reference
34 | ------------
35 |
36 | Main Functions
37 | ~~~~~~~~~~~~~
38 |
39 | .. automodule:: justpyplot.justpyplot
40 | :members:
41 | :imported-members: False
42 | :special-members: False
43 | :private-members: False
44 | :undoc-members: False
45 |
46 | Text Rendering
47 | ~~~~~~~~~~~~~
48 |
49 | .. automodule:: justpyplot.textrender
50 | :members:
51 | :imported-members: False
52 | :special-members: False
53 | :private-members: False
54 | :undoc-members: False
55 |
56 | Indices
57 | =======
58 |
59 | * :ref:`genindex`
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | sphinx_rtd_theme
3 | numpy
4 | opencv-python
5 | Pillow
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bedbad/justpyplot/5d8b33fbc6ed697273fba113016ec7df72772e1b/examples/__init__.py
--------------------------------------------------------------------------------
/examples/mug_objectron/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bedbad/justpyplot/5d8b33fbc6ed697273fba113016ec7df72772e1b/examples/mug_objectron/__init__.py
--------------------------------------------------------------------------------
/examples/mug_objectron/demo.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import mediapipe as mp
3 | import numpy as np
4 |
5 | from scipy.spatial.transform import Rotation as R
6 |
7 | from justpyplot import justpyplot as jplt
8 |
9 |
10 | # Most popular - Macbook Air "13, in centimenters
11 | # px_sz = 0.01119
12 | # monitor= screeninfo.get_monitors()[0]
13 | # screen_width, screen_height = monitor.width, monitor.height
14 | # screen_width_phys, screen_height_phys = monitor.width_mm, monitor.height_mm
15 | # print("Screen Width: {}, Screen Height: {}\n\n".format(screen_width_phys, screen_height_phys))
16 | # px_sz = screen_width_phys / screen_width
17 | # print("Pixel Size on system: {}, cm".format(px_sz*.1))
18 |
19 |
20 | def reject_outliers(data, max_len=10, m=2):
21 | if data.size < 3:
22 | return data
23 |
24 | tail, last_data = data[:-max_len], data[-max_len:]
25 | u = np.mean(data)
26 | s = np.std(data)
27 | filtered_data = last_data[abs(last_data - u) < s]
28 | data = np.concatenate([tail, filtered_data])
29 | return data
30 |
31 |
32 | def main():
33 | # Initialize MediaPipe Objectron
34 | mp_objectron = mp.solutions.objectron
35 | objectron = mp_objectron.Objectron(
36 | static_image_mode=False,
37 | max_num_objects=1,
38 | min_detection_confidence=0.4,
39 | min_tracking_confidence=0.25,
40 | model_name='Cup',
41 | )
42 |
43 | # Initialize MediaPipe Drawing Utils
44 | mp_drawing = mp.solutions.drawing_utils
45 |
46 | # Initialize the webcam
47 | cap = cv2.VideoCapture(0)
48 |
49 | angles_x = []
50 | angles_y = []
51 | angles_z = []
52 | distances = []
53 | volumes = []
54 |
55 | while cap.isOpened():
56 | success, image = cap.read()
57 | if not success:
58 | print('Ignoring empty camera frame.')
59 | continue
60 |
61 | # Convert the BGR image to RGB
62 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
63 |
64 | # Process the image and retrieve the results
65 | results = objectron.process(image)
66 |
67 | if results.detected_objects:
68 | for detected_object in results.detected_objects:
69 | # Retrieve rotation, translation, and size/distance of the cup
70 | rotation = detected_object.rotation
71 | translation = detected_object.translation
72 | # Size/distance can be inferred from the translation
73 | distance = np.linalg.norm(translation)
74 | dimensions = detected_object.scale # Calculate 3D distance
75 | volume_size = np.prod(dimensions) # Store volume size in a NumPy array
76 |
77 | r = R.from_matrix(rotation)
78 | rotations = r.as_euler('zyx', degrees=True)
79 | # Print out the rotation, volume size, and 3D distance
80 | angles_z.append(rotations[0])
81 | angles_y.append(rotations[1])
82 | angles_x.append(rotations[2])
83 | volumes.append(volume_size)
84 | distances.append(distance)
85 | ang_zs = np.array(angles_z)
86 | ang_ys = np.array(angles_y)
87 | ang_xs = np.array(angles_x)
88 |
89 | jplt.plot1_at(
90 | image,
91 | ang_zs,
92 | title='Angle from Z axis',
93 | offset=(50, 50),
94 | size=(270, 300),
95 | point_color=(255, 0, 0),
96 | line_color=(255, 0, 0),
97 | label_color=(255, 0, 0),
98 | grid_color=(126, 126, 126),
99 | )
100 | jplt.plot1_at(
101 | image,
102 | ang_ys,
103 | title='Angle from Y axis',
104 | offset=(400, 50),
105 | size=(270, 300),
106 | point_color=(0, 255, 0),
107 | line_color=(0, 255, 0),
108 | label_color=(0, 255, 0),
109 | grid_color=(126, 126, 126),
110 | scatter=False,
111 | )
112 | jplt.plot1_at(
113 | image,
114 | ang_xs,
115 | title='Angle from X axis',
116 | offset=(750, 50),
117 | size=(270, 300),
118 | point_color=(0, 0, 255),
119 | line_color=(0, 0, 255),
120 | label_color=(0, 0, 255),
121 | grid_color=(126, 126, 126),
122 | scatter=False,
123 | )
124 |
125 | # print("Rotation:\n", rotations)
126 | # print("Volume Size:", volume_size)
127 | # print("3D Distance:", distance)
128 |
129 | # Draw the 3D bounding box and axis
130 | mp_drawing.draw_landmarks(
131 | image, detected_object.landmarks_2d, mp_objectron.BOX_CONNECTIONS
132 | )
133 | mp_drawing.draw_axis(image, rotation, translation)
134 |
135 | # Display the annotated image
136 | cv2.imshow('MediaPipe Objectron', cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
137 | c = cv2.waitKey(5)
138 | if c == 27:
139 | break
140 | elif c == 32:
141 | angles_z.clear()
142 | angles_x.clear()
143 | angles_y.clear()
144 |
145 | # Release the webcam and destroy all windows
146 | cap.release()
147 | cv2.destroyAllWindows()
148 |
149 |
150 | if __name__ == '__main__':
151 | main()
152 |
--------------------------------------------------------------------------------
/justpyplot/__init__.py:
--------------------------------------------------------------------------------
1 | VERSION = '0.1.0'
2 |
--------------------------------------------------------------------------------
/justpyplot/justpyplot.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2023 bedbad
2 | import numpy as np
3 | from typing import Tuple, Optional
4 | import functools
5 | import importlib
6 | from justpyplot.textrender import vectorized_text
7 |
8 | __all__ = [
9 | 'plot', # Main plotting function
10 | 'blend', # Core blending function for numpy arrays
11 | 'blend2PIL', # Specialized blending for Jupyter/PIL output
12 | 'plot_at', # Plot directly onto existing array
13 | 'plot1_at', # Plot 1D array onto existing array
14 | ]
15 |
16 |
17 | # Attempt to import optional modules
18 | def is_module_available(module_name):
19 | try:
20 | importlib.import_module(module_name)
21 | return True
22 | except ImportError:
23 | return False
24 |
25 | cv2_available = is_module_available("cv2")
26 | perf_timer_available = is_module_available("perf_timer")
27 | PIL_available = is_module_available("PIL")
28 |
29 | if cv2_available:
30 | import cv2
31 |
32 | if perf_timer_available:
33 | from perf_timer import PerfTimer
34 | perf_timers = {
35 | '_veclinesperf': PerfTimer('vectorized lines render'),
36 | '_plotperf': PerfTimer('full plot rendering')
37 | }
38 | else:
39 | perf_timers = {}
40 |
41 |
42 | def debug_performance(perf_name: str):
43 | def decorator(func):
44 | @functools.wraps(func)
45 | def wrapper(*args, **kwargs):
46 | if perf_name in perf_timers:
47 | with perf_timers[perf_name]:
48 | result = func(*args, **kwargs)
49 | return result
50 | else:
51 | return func(*args, **kwargs)
52 | return wrapper
53 | return decorator
54 |
55 | def adjust_values(values, grid_shape):
56 | """
57 | Adjusts the values to fill the grid box maximally.
58 |
59 | Parameters:
60 | values (ndarray): The input array of values.
61 | grid_shape (tuple): The shape of the grid box.
62 |
63 | Returns:
64 | ndarray: The adjusted values.
65 | ndarray: The bounds of the values.
66 | ndarray: The scaling factor.
67 | ndarray: The median degree of the values.
68 | """
69 | # Calculate the bounds for both rows of the array values
70 | bounds = np.array([np.min(values, axis=1), np.max(values, axis=1)])
71 |
72 | # Calculate the range of the values
73 | value_range = bounds[1] - bounds[0]
74 | value_range[value_range == 0] = 1
75 |
76 | # Calculate the scaling factor
77 | scale = np.array(grid_shape) / value_range
78 |
79 | # Adjust the values to fill the grid box maximally
80 | adjusted_values = (values - bounds[0, :, np.newaxis]) * scale[:, np.newaxis]
81 |
82 | # Calculate the median degree for values in both rows and round to the nearest whole number
83 | median_degree = np.round(np.median(values, axis=1)).astype(int)
84 |
85 | return adjusted_values, bounds, scale, median_degree
86 |
87 |
88 | def adjust_values_maxlen(values, grid_shape, max_len):
89 | """
90 | Adjusts the values array to a maximum length and scales it to fit a grid box.
91 |
92 | Parameters:
93 | values (ndarray): The input array of values.
94 | grid_shape (tuple): The shape of the grid box.
95 | max_len (int): The maximum length of the values array.
96 |
97 | Returns:
98 | ndarray: The adjusted values array.
99 | ndarray: The bounds of the values array.
100 | ndarray: The scaling factor.
101 | ndarray: The median degree of the values array.
102 | """
103 | # Calculate the bounds for both rows of the array values
104 |
105 | ybounds = np.array([np.min(values[1, :]), np.max(values[1, :])])
106 | values = values[:, -max_len:]
107 | xbounds = np.array([np.min(values[0]), np.max(values[0])])
108 | bounds = np.stack([xbounds, ybounds], axis=1)
109 | # Calculate the range of the values
110 | value_range = np.array([xbounds[1] - xbounds[0], ybounds[1] - ybounds[0]])
111 | median_degree = np.array([0, 0])
112 | if value_range[0] == 0:
113 | value_range[0] = values[0]
114 | median_degree[0] = 0
115 | if value_range[1] == 0:
116 | value_range[1] = values[1]
117 | median_degree[1] = 0
118 | if value_range[0] and value_range[1]:
119 | median_degree = np.round(np.log10(np.median(np.abs(values), axis=1))).astype(
120 | int
121 | )
122 |
123 | # Calculate the scaling factor
124 | scale = np.array(grid_shape[::-1]) / value_range
125 |
126 | # Adjust the values to fill the grid box maximally
127 | adjusted_values = (values - bounds[0, :, np.newaxis]) * (scale[:, np.newaxis])
128 |
129 | # Calculate the median degree for values in both rows and round to the nearest whole number
130 |
131 | return adjusted_values, bounds, scale, median_degree
132 |
133 |
134 | def vectorized_line(y0, x0, y1, x1, canvas_size, thickness):
135 | """
136 | Generate a boolean mask representing a vectorized line on a canvas.
137 |
138 | Parameters:
139 | y0 (int): The y-coordinate of the starting point of the line.
140 | x0 (int): The x-coordinate of the starting point of the line.
141 | y1 (int): The y-coordinate of the ending point of the line.
142 | x1 (int): The x-coordinate of the ending point of the line.
143 | canvas_size (tuple): The size of the canvas as a tuple (height, width).
144 | thickness (int): The thickness of the line.
145 |
146 | Returns:
147 | numpy.ndarray: A boolean mask representing the line on the canvas.
148 | """
149 | # Create an array of distances
150 | num_points = max(np.max(abs(x1 - x0)), np.max(abs(y1 - y0))) + 1
151 | t = np.linspace(0, 1, num_points)
152 | # Create 2D arrays for x and y coordinates
153 | x = (x0 + np.outer(t, (x1 - x0))).astype(int)
154 | y = (y0 + np.outer(t, (y1 - y0))).astype(int)
155 | # Create a boolean mask with the size of the canvas and an additional dimension for t
156 | mask = np.zeros(canvas_size, dtype=bool)
157 | # Set the corresponding positions to True
158 | mask[y.ravel(), x.ravel()] = True
159 | return mask
160 |
161 |
162 | @debug_performance('_veclinesperf')
163 | def vectorized_lines(y0, x0, y1, x1, img_array, clr=(0, 0, 255)):
164 | """
165 | Draw vectorized lines on an image array.
166 |
167 | Parameters:
168 | y0 (array-like): Starting y-coordinates of the lines.
169 | x0 (array-like): Starting x-coordinates of the lines.
170 | y1 (array-like): Ending y-coordinates of the lines.
171 | x1 (array-like): Ending x-coordinates of the lines.
172 | img_array (ndarray): Image array on which the lines will be drawn.
173 | clr (tuple, optional): RGB color tuple for the lines. Defaults to (0, 0, 255).
174 |
175 | Returns:
176 | ndarray: Image array with the lines drawn.
177 | """
178 | # Create an array of distances
179 | num_points = max(np.max(abs(x1 - x0)), np.max(abs(y1 - y0))) + 1
180 | t = np.linspace(0, 1, num_points)
181 | # Create 2D arrays for x and y coordinates
182 | x = (x0 + np.outer(t, (x1 - x0))).astype(int)
183 | y = (y0 + np.outer(t, (y1 - y0))).astype(int)
184 | # Set the corresponding positions to clr
185 | img_array[y.ravel(), x.ravel()] = clr
186 | return img_array
187 |
188 |
189 | @debug_performance('_veclinesperf')
190 | def vectorized_lines_with_thickness(
191 | y0, x0, y1, x1, img_array, thickness, clr=(0, 0, 255)
192 | ):
193 | """
194 | Draw multiple lines with specified thickness on an image array.
195 |
196 | This function uses vectorized operations to draw lines between pairs of points
197 | defined by corresponding elements in the x0, y0 (start points) and x1, y1 (end points)
198 | arrays. It modifies the input image array in-place by setting the color of the pixels
199 | along the lines to the specified color.
200 |
201 | Parameters:
202 | y0 (np.ndarray): An array of y-coordinates for the start points of the lines.
203 | x0 (np.ndarray): An array of x-coordinates for the start points of the lines.
204 | y1 (np.ndarray): An array of y-coordinates for the end points of the lines.
205 | x1 (np.ndarray): An array of x-coordinates for the end points of the lines.
206 | img_array (np.ndarray): The image array on which to draw the lines. This array will be modified in-place.
207 | thickness (int): The thickness of the lines to be drawn.
208 | clr (tuple): A tuple of three integers representing the color of the lines in BGR (blue, green, red) format.
209 |
210 | Returns:
211 | np.ndarray: The modified image array with the lines drawn on it.
212 |
213 | Example:
214 | >>> img = np.zeros((100, 100, 3), dtype=np.uint8)
215 | >>> y0 = np.array([10, 20])
216 | >>> x0 = np.array([10, 20])
217 | >>> y1 = np.array([80, 80])
218 | >>> x1 = np.array([80, 30])
219 | >>> vectorized_lines_with_thickness(y0, x0, y1, x1, img, 3, (255, 0, 0))
220 | """
221 | # Create an array of distances
222 | num_points = max(np.max(abs(x1 - x0)), np.max(abs(y1 - y0))) + 1
223 | t = np.linspace(0, 1, num_points)
224 | # Create 2D arrays for x and y coordinates
225 | x = (x0 + np.outer(t, (x1 - x0))).astype(int)
226 | y = (y0 + np.outer(t, (y1 - y0))).astype(int)
227 | # Create the shift indices
228 | shift_indices = np.arange(-thickness // 2, thickness // 2 + 1)
229 | # Ensure that the shift is broadcastable by adding a new axis to y1 and y0
230 | y1 = y1[:, np.newaxis]
231 | y0 = y0[:, np.newaxis]
232 | x1 = x1[:, np.newaxis]
233 | x0 = x0[:, np.newaxis]
234 | # Create the shifted coordinates
235 | x_shifted = x[..., np.newaxis] + shift_indices * np.sign(x1 - x0)
236 | y_shifted = y[..., np.newaxis] + shift_indices * np.sign(y1 - y0)
237 | # Clip the shifted coordinates to the image boundaries
238 | x_shifted = np.clip(x_shifted, 0, img_array.shape[1] - 1)
239 | y_shifted = np.clip(y_shifted, 0, img_array.shape[0] - 1)
240 | # Flatten the arrays to set the color in the image array
241 | img_array[y_shifted.ravel(), x_shifted.ravel()] = clr
242 | return img_array
243 |
244 |
245 | def plot2_at(
246 | img_array: np.ndarray,
247 | values: np.array,
248 | offset: Tuple[int, int],
249 | title: str = 'Measuring',
250 | size: Tuple[int, int] = (300, 300),
251 | point_color: Tuple[int, int, int, int] = (0, 0, 255),
252 | r=2,
253 | nticks: int = 16,
254 | grid_color: Tuple[int, int, int, int] = (128, 128, 128),
255 | precision: int = 4,
256 | default_font_size: float = 0.5,
257 | default_font_size_small: float = 0.4,
258 | label_color: Tuple[int, int, int, int] = (0, 0, 255),
259 | scatter=True,
260 | thickness=2,
261 | line_color: Tuple[int, int, int, int] = (0, 0, 255),
262 | max_len: int = 100,
263 | ) -> np.ndarray:
264 | """Plot into a NumPy image array.
265 |
266 | Plots given array of `values`, adapting
267 | the plot scale and size to fit the input data.
268 | Plots fast - no single loop in the code, even if you want to connect points with
269 | line segments, measured 20-100x faster then matplotlib.
270 | Useful for overlaying real-time plots on images and video frames.
271 |
272 | Args:
273 | img_array: NumPy ndarray to draw the plot on, likely a video frame
274 | values: NumPy 1D array of values to plot over time
275 | title: Plot title string
276 | offset: (x, y) offset tuple for the top-left of plot
277 | size: (width, height) tuple for plot size in pixels
278 | clr: (R, G, B) tuple for plot color
279 | pxdelta: Grid size in pixels
280 | precision: Floating point precision for y-axis labels
281 | default_font_size: Font size for title
282 | default_font_size_small: Font size for axis labels
283 | opacity: Opacity value 0-1 for plot elements
284 | max_len: Maximum history length for values array
285 |
286 | Returns:
287 | img_array: Image array with overlaid adaptive plot
288 |
289 | Example:
290 | frame = cv2.imread('frame.jpg')
291 | values = sensor_data[-100:]
292 | frame = draw_adaptive_plot(frame, values)
293 | """
294 |
295 | font_size = default_font_size
296 | font_size_small = default_font_size_small
297 | font = cv2.FONT_HERSHEY_SIMPLEX
298 | text_size_title = cv2.getTextSize(title, font, font_size, 1)[0]
299 | margin_ver = int(text_size_title[1] * 2.0)
300 |
301 | axlablen = cv2.getTextSize('A' * precision, font, font_size_small, 1)[0][0]
302 | margin_hor = int(axlablen * 1.5)
303 |
304 | grid_topleft = np.array((margin_ver, margin_hor))
305 | grid_botright = np.array(size) - grid_topleft
306 | gsize = grid_botright - grid_topleft
307 | gsize2 = gsize - (gsize % nticks)
308 | iota = (gsize - gsize2) / 2
309 | gsize = gsize2
310 | grid_topleft = (grid_topleft + iota).astype(int)
311 | grid_botright = (grid_botright - iota).astype(int)
312 | pxdelta = (gsize // nticks).astype(int)
313 |
314 | gh, gw = tuple(gsize)
315 | adjusted_values, bounds, scale, median_degree = adjust_values_maxlen(
316 | values, gsize, max_len=max_len
317 | )
318 | pxdelta = (gsize // nticks).astype(int)
319 | # Adjust the title to include the multiplier
320 |
321 | # Draw grid and rectangle with opacity
322 | gtl_img = grid_topleft + offset
323 | gbr_img = grid_botright + offset
324 |
325 | title += f', 10^{int(median_degree[1])}'
326 | text_x_title = int(
327 | gtl_img[1] + gw / 2 - cv2.getTextSize(title, font, font_size, 1)[0][0] / 2
328 | )
329 | text_y_title = gtl_img[0] - int(text_size_title[1] * 1.5)
330 |
331 | img_array[
332 | gtl_img[0] : gbr_img[0] + 1 : pxdelta[0], gtl_img[1] : gbr_img[1] + 1, :
333 | ] = grid_color
334 | img_array[
335 | gtl_img[0] : gbr_img[0] + 1, gtl_img[1] : gbr_img[1] + 1 : pxdelta[1], :
336 | ] = grid_color
337 |
338 | # Render points
339 |
340 | # Create an array of indices
341 | x = gtl_img[1] + (adjusted_values[0, ...]).astype(int)
342 | y = gbr_img[0] - (adjusted_values[1, ...]).astype(int)
343 |
344 | # Create a mask for valid indices
345 | valid_mask = (
346 | (gtl_img[0] <= y) & (y <= gbr_img[0]) & (gtl_img[1] <= x) & (x <= gbr_img[1])
347 | )
348 | valsx = x[valid_mask]
349 | valsy = y[valid_mask]
350 | # Create a grid of offsets
351 | x_offset = np.arange(-r, r + 1)
352 | y_offset = np.arange(-r, r + 1)
353 | xx, yy = np.meshgrid(x_offset, y_offset)
354 |
355 | # Apply offsets to the original x and y coordinates
356 | xx = xx.ravel() + valsx[:, None]
357 | yy = yy.ravel() + valsy[:, None]
358 |
359 | # Flatten the arrays
360 | xx = xx.ravel()
361 | yy = yy.ravel()
362 |
363 | # Assign color to the corresponding pixels and the surrounding pixels
364 | img_array[yy, xx] = point_color
365 |
366 | if not scatter and values.shape[1] >= 2:
367 | # Create pairs of adjacent points
368 | with _veclinesperf:
369 | img_array = vectorized_lines_with_thickness(
370 | y[:-1],
371 | x[:-1],
372 | y[1:],
373 | x[1:],
374 | img_array,
375 | clr=line_color,
376 | thickness=thickness,
377 | )
378 |
379 | # rendering text
380 | n = gsize[0] // (2 * pxdelta[0])
381 | tick_color = label_color
382 | yscale = bounds[1, 1] - bounds[0, 1]
383 | for i in range(n + 1):
384 | # Scale the tick label by the multiplier
385 | tickval = bounds[0, 1] + (yscale / n) * i
386 | dotp = precision - len(str(tickval).split('.')[0])
387 | val = '{:.{}f}'.format(tickval, dotp)
388 | text_size, _ = cv2.getTextSize(val, font, font_size_small, 1)
389 | text_width, text_height = text_size
390 | text_x = offset[1] + pxdelta[1] // 2 # Adjust position to the left of the grid
391 | text_y = gbr_img[0] - i * 2 * pxdelta[0] + text_height // 2
392 | cv2.putText(
393 | img_array, val, (text_x, text_y), font, font_size_small, tick_color, 1
394 | )
395 |
396 | # Draw title with opacity
397 | cv2.putText(
398 | img_array, title, (text_x_title, text_y_title), font, font_size, label_color, 1
399 | )
400 | return img_array
401 |
402 |
403 | @debug_performance('_plotperf')
404 | def plot2(
405 | values: np.array,
406 | title: str = 'Measuring',
407 | size: Tuple[int, int] = (300, 300),
408 | point_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
409 | r=2,
410 | nticks: int = 16,
411 | grid_color: Tuple[int, int, int, int] = (128, 128, 128, 255),
412 | precision: int = 4,
413 | default_font_size: float = 0.5,
414 | default_font_size_small: float = 0.4,
415 | label_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
416 | scatter=True,
417 | thickness=2,
418 | line_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
419 | max_len: int = 100,
420 | ) -> np.array:
421 | """Plot into a NumPy image array.
422 |
423 | Plots given array of `values`, adapting
424 | the plot scale and size to fit the input data.
425 | Plots fast - no single loop in the code, even if you want to connect points with
426 | line segments, measured 20-100x faster then matplotlib.
427 | Useful for overlaying real-time plots on images and video frames.
428 |
429 | Args:
430 | img_array: NumPy ndarray to draw the plot on, likely a video frame
431 | values: NumPy 1D array of values to plot over time
432 | title: Plot title string
433 | offset: (x, y) offset tuple for the top-left of plot
434 | size: (width, height) tuple for plot size in pixels
435 | clr: (R, G, B) tuple for plot color
436 | pxdelta: Grid size in pixels
437 | precision: Floating point precision for y-axis labels
438 | default_font_size: Font size for title
439 | default_font_size_small: Font size for axis labels
440 | opacity: Opacity value 0-1 for plot elements
441 | max_len: Maximum history length for values array
442 |
443 | Returns:
444 | img_array: Image array with overlaid adaptive plot
445 |
446 | Example:
447 | frame = cv2.imread('frame.jpg')
448 | values = sensor_data[-100:]
449 | frame = draw_adaptive_plot(frame, values)
450 | """
451 |
452 | font_size = default_font_size
453 | font_size_small = default_font_size_small
454 | font = cv2.FONT_HERSHEY_SIMPLEX
455 | text_size_title = cv2.getTextSize(title, font, font_size, 1)[0]
456 | text_y_title = int(text_size_title[1] * 1.5)
457 | margin_ver = int(text_y_title + text_size_title[1] * 0.5)
458 |
459 | axlablen = cv2.getTextSize('A' * precision, font, font_size_small, 1)[0][0]
460 | margin_hor = int(axlablen * 1.5)
461 |
462 | grid_topleft = np.array((margin_ver, margin_hor))
463 | grid_botright = np.array(size) - grid_topleft
464 | gsize = grid_botright - grid_topleft
465 | gsize2 = gsize - (gsize % nticks)
466 | iota = (gsize - gsize2) / 2
467 | gsize = gsize2
468 | grid_topleft = (grid_topleft + iota).astype(int)
469 | grid_botright = (grid_botright - iota).astype(int)
470 | pxdelta = (gsize // nticks).astype(int)
471 |
472 | gh, gw = tuple(gsize)
473 | adjusted_values, bounds, scale, median_degree = adjust_values_maxlen(
474 | values, gsize, max_len=max_len
475 | )
476 | title += f', 10^{int(median_degree[1])}'
477 | text_x_title = int(
478 | grid_topleft[1] + gw / 2 - cv2.getTextSize(title, font, font_size, 1)[0][0] / 2
479 | )
480 | pxdelta = (gsize // nticks).astype(int)
481 |
482 | img_array = np.zeros((*size, 4), np.uint8)
483 | # Adjust the title to include the multiplier
484 |
485 | # Draw grid and rectangle with opacity
486 | img_array[
487 | grid_topleft[0] : grid_botright[0] + 1 : pxdelta[0],
488 | grid_topleft[1] : grid_botright[1] + 1,
489 | :,
490 | ] = grid_color
491 | img_array[
492 | grid_topleft[0] : grid_botright[0] + 1,
493 | grid_topleft[1] : grid_botright[1] + 1 : pxdelta[1],
494 | :,
495 | ] = grid_color
496 |
497 | # Render points
498 |
499 | # Create an array of indices
500 | x = grid_topleft[1] + (adjusted_values[0, ...]).astype(int)
501 | y = grid_botright[0] - (adjusted_values[1, ...]).astype(int)
502 |
503 | # Create a mask for valid indices
504 | valid_mask = (
505 | (grid_topleft[0] <= y)
506 | & (y <= grid_botright[0])
507 | & (grid_topleft[1] <= x)
508 | & (x <= grid_botright[1])
509 | )
510 | valsx = x[valid_mask]
511 | valsy = y[valid_mask]
512 | # Create a grid of offsets
513 | x_offset = np.arange(-r, r + 1)
514 | y_offset = np.arange(-r, r + 1)
515 | xx, yy = np.meshgrid(x_offset, y_offset)
516 |
517 | # Apply offsets to the original x and y coordinates
518 | xx = xx.ravel() + valsx[:, None]
519 | yy = yy.ravel() + valsy[:, None]
520 |
521 | # Flatten the arrays
522 | xx = xx.ravel()
523 | yy = yy.ravel()
524 |
525 | # Assign color to the corresponding pixels and the surrounding pixels
526 | img_array[yy, xx] = point_color
527 |
528 | if not scatter and values.shape[1] >= 2:
529 | # Create pairs of adjacent points
530 | with _veclinesperf:
531 | img_array = vectorized_lines_with_thickness(
532 | y[:-1],
533 | x[:-1],
534 | y[1:],
535 | x[1:],
536 | img_array,
537 | clr=line_color,
538 | thickness=thickness,
539 | )
540 |
541 | # rendering text
542 | n = gsize[0] // (2 * pxdelta[0])
543 | tick_color = label_color
544 | yscale = bounds[1, 1] - bounds[0, 1]
545 | for i in range(n + 1):
546 | # Scale the tick label by the multiplier
547 | tickval = bounds[0, 1] + (yscale / n) * i
548 | dotp = precision - len(str(tickval).split('.')[0])
549 | val = '{:.{}f}'.format(tickval, dotp)
550 | text_size, _ = cv2.getTextSize(val, font, font_size_small, 1)
551 | text_width, text_height = text_size
552 | text_x = pxdelta[1] // 2 # Adjust position to the left of the grid
553 | text_y = grid_botright[0] - i * 2 * pxdelta[0] + text_height // 2
554 | cv2.putText(
555 | img_array, val, (text_x, text_y), font, font_size_small, tick_color, 1
556 | )
557 |
558 | # Draw title with opacity
559 | cv2.putText(
560 | img_array, title, (text_x_title, text_y_title), font, font_size, label_color, 1
561 | )
562 | return img_array
563 |
564 |
565 | @debug_performance('_plotperf')
566 | def plot1_cv(
567 | values: np.array,
568 | title: str = 'Measuring',
569 | size: Tuple[int, int] = (300, 300),
570 | point_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
571 | r=2,
572 | nticks: int = 16,
573 | grid_color: Tuple[int, int, int, int] = (128, 128, 128, 255),
574 | precision: int = 2,
575 | default_font_size: float = 0.5,
576 | default_font_size_small: float = 0.4,
577 | label_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
578 | scatter=True,
579 | thickness=2,
580 | line_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
581 | max_len: int = 100,
582 | ) -> np.array:
583 | """Plot into a NumPy image array.
584 |
585 | Plots given array of `values`, adapting
586 | the plot scale and size to fit the input data.
587 | Plots fast - no single loop in the code, even if you want to connect points with
588 | line segments, measured 20-100x faster then matplotlib.
589 | Useful for overlaying real-time plots on images and video frames.
590 |
591 | Args:
592 | img_array: NumPy ndarray to draw the plot on, likely a video frame
593 | values: NumPy 1D array of values to plot over time
594 | title: Plot title string
595 | offset: (x, y) offset tuple for the top-left of plot
596 | size: (width, height) tuple for plot size in pixels
597 | clr: (R, G, B) tuple for plot color
598 | pxdelta: Grid size in pixels
599 | precision: Floating point precision for y-axis labels
600 | default_font_size: Font size for title
601 | default_font_size_small: Font size for axis labels
602 | opacity: Opacity value 0-1 for plot elements
603 | max_len: Maximum history length for values array
604 |
605 | Returns:
606 | img_array: Image array with overlaid adaptive plot
607 |
608 | Example:
609 | frame = cv2.imread('frame.jpg')
610 | values = sensor_data[-100:]
611 | frame = draw_adaptive_plot(frame, values)
612 | """
613 | min_val = np.min(values)
614 | max_val = np.max(values)
615 |
616 | if max_len > 0:
617 | values = values[-max_len:]
618 |
619 | # Calculate adjustment factor and shift
620 | if max_val - min_val == 0:
621 | if min_val == 0:
622 | scale = 1.0
623 | shift = -0.5
624 | power = 1
625 | adjust_factor = 1
626 | else:
627 | scale = min_val / 2
628 | power = np.floor(np.log10(min_val))
629 | adjust_factor = 1
630 | shift = -scale
631 | else:
632 | scale = max_val - min_val
633 | power = np.ceil(np.log10((np.abs(min_val) + np.abs(max_val)) / 2))
634 | shift = -min_val
635 |
636 | # Determine the multiplier to scale the tick values above 1.0
637 |
638 | multiplier = 10**-power
639 |
640 | title += f', 10^{int(power)}'
641 | # Set the paramerics
642 | # height = size[1]
643 | # width = size[0]
644 |
645 | font_size = default_font_size
646 | font_size_small = default_font_size_small
647 | font = cv2.FONT_HERSHEY_SIMPLEX
648 | text_size_title = cv2.getTextSize(title, font, font_size, 1)[0]
649 | text_y_title = int(text_size_title[1] * 1.5)
650 | margin_ver = int(text_y_title + text_size_title[1] * 0.5)
651 |
652 | axlablen = cv2.getTextSize('A' * precision, font, font_size_small, 1)[0][0]
653 | margin_hor = int(axlablen * 1.5)
654 |
655 | grid_topleft = np.array((margin_ver, margin_hor))
656 | grid_botright = np.array(size) - grid_topleft
657 | gsize = grid_botright - grid_topleft
658 | gsize2 = gsize - (gsize % nticks)
659 | iota = (gsize - gsize2) / 2
660 | grid_topleft = (grid_topleft + iota).astype(int)
661 | grid_botright = (grid_botright - iota).astype(int)
662 | pxdelta = (gsize // nticks).astype(int)
663 | gsize = gsize2
664 |
665 | gh, gw = tuple(gsize)
666 | text_x_title = int(grid_topleft[1] + gw / 2 - text_size_title[0] / 2)
667 | pxdelta = (gsize // nticks).astype(int)
668 |
669 | img_array = np.zeros((*size, 4), np.uint8)
670 | # Adjust the title to include the multiplier
671 |
672 | adjust_factor = gsize[0] / scale
673 | # Adjust values
674 | adjusted_values = (values + shift) * adjust_factor
675 |
676 | # top_left = (0, 0)
677 | # bottom_right = (height, width)
678 |
679 | # Draw grid and rectangle with opacity
680 | img_array[
681 | grid_topleft[0] : grid_botright[0] + 1 : pxdelta[0],
682 | grid_topleft[1] : grid_botright[1] + 1,
683 | :,
684 | ] = grid_color
685 | img_array[
686 | grid_topleft[0] : grid_botright[0] + 1,
687 | grid_topleft[1] : grid_botright[1] + 1 : pxdelta[1],
688 | :,
689 | ] = grid_color
690 |
691 | # Render points
692 |
693 | # Create an array of indices
694 | i = np.arange(len(adjusted_values))
695 | x = grid_botright[1] - ((i + 1) * gw // len(adjusted_values))
696 | y = grid_botright[0] - (adjusted_values).astype(int)
697 |
698 | # Create a mask for valid indices
699 | valid_mask = (
700 | (grid_topleft[0] <= y)
701 | & (y <= grid_botright[0])
702 | & (grid_topleft[1] <= x)
703 | & (x <= grid_botright[1])
704 | )
705 | valsx = x[valid_mask]
706 | valsy = y[valid_mask]
707 | # Create a grid of offsets
708 | x_offset = np.arange(-r, r + 1)
709 | y_offset = np.arange(-r, r + 1)
710 | xx, yy = np.meshgrid(x_offset, y_offset)
711 |
712 | # Apply offsets to the original x and y coordinates
713 | xx = xx.ravel() + valsx[:, None]
714 | yy = yy.ravel() + valsy[:, None]
715 |
716 | # Flatten the arrays
717 | xx = xx.ravel()
718 | yy = yy.ravel()
719 |
720 | # Assign color to the corresponding pixels and the surrounding pixels
721 | img_array[yy, xx] = point_color
722 |
723 | if not scatter and values.shape[0] >= 2:
724 | # Create pairs of adjacent points
725 | with _veclinesperf:
726 | img_array = vectorized_lines_with_thickness(
727 | y[:-1],
728 | x[:-1],
729 | y[1:],
730 | x[1:],
731 | img_array,
732 | clr=line_color,
733 | thickness=thickness,
734 | )
735 |
736 | # rendering text
737 | n = gsize[0] // (2 * pxdelta[0])
738 | tick_color = label_color
739 | for i in range(n + 1):
740 | # Scale the tick label by the multiplier
741 | val = '{:.{}f}'.format((scale / n * i) * multiplier, precision)
742 | text_size, _ = cv2.getTextSize(val, font, font_size_small, 1)
743 | text_width, text_height = text_size
744 | text_x = pxdelta[1] // 2 # Adjust position to the left of the grid
745 | text_y = grid_botright[0] - i * 2 * pxdelta[0] + text_height // 2
746 | cv2.putText(
747 | img_array, val, (text_x, text_y), font, font_size_small, tick_color, 1
748 | )
749 |
750 | # Draw title with opacity
751 | cv2.putText(
752 | img_array, title, (text_x_title, text_y_title), font, font_size, label_color, 1
753 | )
754 | return img_array
755 |
756 | @debug_performance('_plotperf')
757 | def plot1(
758 | values: np.array,
759 | title: str = 'Measuring',
760 | size: Tuple[int, int] = (300, 300),
761 | point_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
762 | r=2,
763 | nticks: int = 16,
764 | grid_color: Tuple[int, int, int, int] = (128, 128, 128, 255),
765 | precision: int = 2,
766 | default_font_size: float = 0.8,
767 | default_font_size_small: float = 0.6,
768 | label_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
769 | scatter=True,
770 | thickness=2,
771 | line_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
772 | max_len: int = 100,
773 | ) -> np.array:
774 | """Draw a plot on a new NumPy image array using textrender for text rendering.
775 |
776 | Creates a new NumPy ndarray and plots the given `values` on it,
777 | adapting the plot scale and size to fit the input data.
778 | Plots fast - no single loop in the code, even if you want to connect points with
779 | line segments, measured 20-100x faster then matplotlib.
780 | Useful for creating standalone plot images.
781 |
782 | Args:
783 | values: NumPy 1D array of values to plot over time
784 | title: Plot title string
785 | size: (width, height) tuple for plot size in pixels
786 | point_color: (R, G, B, A) tuple for plot color
787 | r: Radius of points
788 | nticks: Number of ticks on the y-axis
789 | grid_color: (R, G, B, A) tuple for grid color
790 | precision: Floating point precision for y-axis labels
791 | default_font_size: Font size for title
792 | default_font_size_small: Font size for axis labels
793 | label_color: (R, G, B, A) tuple for label color
794 | scatter: If True, plot points without connecting lines
795 | thickness: Thickness of connecting lines
796 | line_color: (R, G, B, A) tuple for line color
797 | max_len: Maximum history length for values array
798 |
799 | Returns:
800 | img_array: New image array with plot
801 |
802 | Example:
803 | values = sensor_data[-100:]
804 | plot_img = plot1(values, title="Sensor Data")
805 | """
806 | if max_len > 0:
807 | values = values[-max_len:]
808 |
809 | min_val = np.min(values)
810 | max_val = np.max(values)
811 |
812 | # Calculate adjustment factor and shift
813 | if max_val - min_val == 0:
814 | if min_val == 0:
815 | scale = 1.0
816 | shift = -0.5
817 | power = 1
818 | adjust_factor = 1
819 | else:
820 | scale = min_val / 2
821 | power = np.floor(np.log10(min_val))
822 | adjust_factor = 1
823 | shift = -scale
824 | else:
825 | scale = max_val - min_val
826 | power = np.ceil(np.log10((np.abs(min_val) + np.abs(max_val)) / 2))
827 | shift = -min_val
828 |
829 | multiplier = 10**-power
830 |
831 | title += f', 10^{int(power)}'
832 |
833 | # Estimate text sizes and positions
834 | font_size = default_font_size # Adjust this factor as needed
835 | font_size_small = default_font_size_small # Adjust this factor as needed
836 |
837 | # Estimate margins and grid size
838 | margin_ver = int(size[1] * 0.1) # 10% of height for vertical margin
839 | margin_hor = int(size[0] * 0.15) # 15% of width for horizontal margin
840 | grid_topleft = np.array((margin_hor, margin_ver))
841 | grid_botright = np.array(size) - grid_topleft
842 | gsize = grid_botright - grid_topleft
843 |
844 | # Adjust grid size to be divisible by nticks
845 | gsize2 = gsize - (gsize % nticks)
846 | iota = (gsize - gsize2) / 2
847 | grid_topleft = (grid_topleft + iota).astype(int)
848 | grid_botright = (grid_botright - iota).astype(int)
849 | gsize = gsize2
850 |
851 | pxdelta = (gsize // nticks).astype(int)
852 |
853 | # Create image array
854 | img_array = np.zeros((*size, 4), np.uint8)
855 |
856 | adjust_factor = gsize[0] / scale
857 | adjusted_values = (values + shift) * adjust_factor
858 |
859 | # Draw grid and rectangle with opacity
860 | img_array[
861 | grid_topleft[0] : grid_botright[0] + 1 : pxdelta[0],
862 | grid_topleft[1] : grid_botright[1] + 1,
863 | :,
864 | ] = grid_color
865 | img_array[
866 | grid_topleft[0] : grid_botright[0] + 1,
867 | grid_topleft[1] : grid_botright[1] + 1 : pxdelta[1],
868 | :,
869 | ] = grid_color
870 |
871 | # Render points
872 | i = np.arange(len(adjusted_values))
873 | x = grid_botright[1] - ((i + 1) * gsize[1] // len(adjusted_values))
874 | y = grid_botright[0] - (adjusted_values).astype(int)
875 |
876 | valid_mask = (
877 | (grid_topleft[0] <= y)
878 | & (y <= grid_botright[0])
879 | & (grid_topleft[1] <= x)
880 | & (x <= grid_botright[1])
881 | )
882 | valsx = x[valid_mask]
883 | valsy = y[valid_mask]
884 | x_offset = np.arange(-r, r + 1)
885 | y_offset = np.arange(-r, r + 1)
886 | xx, yy = np.meshgrid(x_offset, y_offset)
887 |
888 | xx = xx.ravel() + valsx[:, None]
889 | yy = yy.ravel() + valsy[:, None]
890 |
891 | xx = xx.ravel()
892 | yy = yy.ravel()
893 |
894 | img_array[yy, xx] = point_color
895 |
896 | if not scatter and values.shape[0] >= 2:
897 | with _veclinesperf:
898 | img_array = vectorized_lines_with_thickness(
899 | y[:-1],
900 | x[:-1],
901 | y[1:],
902 | x[1:],
903 | img_array,
904 | clr=line_color,
905 | thickness=thickness,
906 | )
907 |
908 | # Render y-axis labels
909 | tick_color = label_color[:3] # Remove alpha channel for vectorized_text
910 | for i in range(nticks + 1):
911 | val = '{:.{}f}'.format((scale / nticks * i) * multiplier, precision)
912 | l = len(val)
913 | dx = int(l * 5 * font_size_small * 2)
914 | text_x = grid_topleft[1] - dx # Approximate text width
915 | text_y = grid_botright[0] - i * pxdelta[0] - 5 * int(font_size_small) # Adjust for text height
916 | img_array = vectorized_text(
917 | img_array, val, (text_x, text_y), color=tick_color, font_size=font_size_small
918 | )
919 |
920 | # Draw title
921 | title_color = label_color[:3] # Remove alpha channel for vectorized_text
922 | text_x_title = grid_topleft[1] + (grid_botright[1] - grid_topleft[1]) // 2 - len(title) * 5 * int(font_size * 2) // 2 # Approximate text width
923 | text_y_title = grid_topleft[0] - int(font_size * 5*2)
924 | img_array = vectorized_text(
925 | img_array, title, (text_x_title, text_y_title), color=title_color, font_size=font_size
926 | )
927 |
928 | return img_array
929 |
930 |
931 | def blend(*arrays)->np.ndarray:
932 | """
933 | Blends multiple NumPy arrays in the order they are provided.
934 |
935 | Parameters:
936 | *arrays: Variable length argument list of NumPy arrays to be blended.
937 |
938 | Returns:
939 | np.ndarray: The blended image if all arrays have the same dimensions,
940 | otherwise returns the first array.
941 | """
942 | if not arrays:
943 | raise ValueError("At least one array must be provided")
944 |
945 | # Use the first array as the base
946 | base_array = arrays[0]
947 |
948 | # Check if all arrays have the same shape
949 | for array in arrays:
950 | if array.shape != base_array.shape:
951 | return base_array
952 |
953 | # Blend arrays by overlaying them in order
954 | blended_array = base_array.copy()
955 | for array in arrays[1:]:
956 | alpha = array[..., 3] / 255.0 # Assuming the last channel is alpha
957 | blended_array[..., :3] = (1 - alpha[..., None]) * blended_array[..., :3] + alpha[..., None] * array[..., :3]
958 |
959 | return blended_array
960 |
961 | def blend_at(
962 | dst_img: np.ndarray, paste_img: np.ndarray, offset: Tuple[int, int]
963 | ) -> np.ndarray:
964 | # pasting image fits
965 | assert dst_img.ndim == paste_img.ndim and np.all(
966 | offset + paste_img.shape[0:2] <= dst_img.shape[0:2]
967 | )
968 | # it is rgba and proper type
969 | assert paste_img.shape[2] == 4 and paste_img.dtype == np.uint8
970 | alpha = paste_img[..., 3][..., None].astype(np.float32) / 255.0
971 | img = paste_img[..., 0:3]
972 | sz = img.shape[0:2]
973 | y0 = offset[0]
974 | y1 = y0 + sz[0]
975 | x0 = offset[1]
976 | x1 = x0 + sz[1]
977 |
978 | dst_img[y0:y1, x0:x1] = dst_img[y0:y1, x0:x1] * (1 - alpha) + img * alpha
979 | return dst_img
980 |
981 |
982 | @debug_performance('_plotperf')
983 | def plot1_atcv(
984 | img_array: np.ndarray,
985 | values: np.ndarray,
986 | title: str = 'Measuring',
987 | offset: Tuple[int, int] = (50, 50),
988 | size: Tuple[int, int] = (300, 270),
989 | point_color: Tuple[int, int, int, int] = (0, 0, 255),
990 | r=2,
991 | pxdelta: int = 15,
992 | grid_color: Tuple[int, int, int, int] = (128, 128, 128),
993 | precision: int = 2,
994 | default_font_size: float = 0.75,
995 | default_font_size_small: float = 0.5,
996 | label_color: Tuple[int, int, int, int] = (0, 0, 255),
997 | scatter=False,
998 | thickness=2,
999 | line_color: Tuple[int, int, int, int] = (0, 0, 255),
1000 | max_len: int = 100,
1001 | ) -> np.ndarray:
1002 | """Adaptively draw a plot on a NumPy image array.
1003 |
1004 | Plots given `values` to a given NumPy ndarray, adapting
1005 | the plot scale and size to fit the input data.
1006 | Plots fast - no single loop in the code, even if you want to connect points with
1007 | line segments, measured 20-100x faster then matplotlib.
1008 | Useful for overlaying real-time plots on images and video frames.
1009 |
1010 | Args:
1011 | img_array: NumPy ndarray to draw the plot on, likely a video frame
1012 | values: NumPy 1D array of values to plot over time
1013 | title: Plot title string
1014 | offset: (x, y) offset tuple for the top-left of plot
1015 | size: (width, height) tuple for plot size in pixels
1016 | clr: (R, G, B) tuple for plot color
1017 | pxdelta: Grid size in pixels
1018 | precision: Floating point precision for y-axis labels
1019 | default_font_size: Font size for title
1020 | default_font_size_small: Font size for axis labels
1021 | opacity: Opacity value 0-1 for plot elements
1022 | max_len: Maximum history length for values array
1023 |
1024 | Returns:
1025 | img_array: Image array with overlaid adaptive plot
1026 |
1027 | Example:
1028 | frame = cv2.imread('frame.jpg')
1029 | values = sensor_data[-100:]
1030 | frame = draw_adaptive_plot(frame, values)
1031 | """
1032 | min_val = np.min(values)
1033 | max_val = np.max(values)
1034 |
1035 | if max_len > 0:
1036 | values = values[-max_len:]
1037 |
1038 | # Calculate adjustment factor and shift
1039 | if max_val - min_val == 0:
1040 | if min_val == 0:
1041 | scale = 1.0
1042 | shift = -0.5
1043 | power = 1
1044 | adjust_factor = 1
1045 | else:
1046 | scale = min_val / 2
1047 | power = np.floor(np.log10(np.abs(min_val)))
1048 | adjust_factor = 1
1049 | shift = -scale
1050 | else:
1051 | scale = max_val - min_val
1052 | power = np.ceil(np.log10((np.abs(min_val) + np.abs(max_val)) / 2))
1053 | adjust_factor = size[1] / scale
1054 | shift = -min_val
1055 |
1056 | # Determine the multiplier to scale the tick values above 1.0
1057 | multiplier = 10**-power
1058 |
1059 | # Adjust the title to include the multiplier
1060 | title += f', 10^{int(power)}'
1061 |
1062 | # Adjust values
1063 | adjusted_values = (values + shift) * adjust_factor
1064 |
1065 | # Draw the plot
1066 | height = size[1]
1067 | width = size[0]
1068 |
1069 | top_left = (offset[0], offset[1])
1070 | bottom_right = (offset[0] + width, offset[1] + height)
1071 |
1072 | font_size = default_font_size
1073 | font = cv2.FONT_HERSHEY_SIMPLEX
1074 |
1075 | # Draw grid and rectangle with opacity
1076 | img_array[
1077 | top_left[1] : bottom_right[1] + 1 : pxdelta,
1078 | top_left[0] : bottom_right[0] + 1,
1079 | :,
1080 | ] = grid_color
1081 | img_array[
1082 | top_left[1] : bottom_right[1] + 1,
1083 | top_left[0] : bottom_right[0] + 1 : pxdelta,
1084 | :,
1085 | ] = grid_color
1086 |
1087 | # Render points
1088 | point_color = point_color
1089 |
1090 | # Create an array of indices
1091 | i = np.arange(len(adjusted_values))
1092 | x = bottom_right[0] - ((i + 1) * width // len(adjusted_values))
1093 | y = bottom_right[1] - (adjusted_values).astype(int)
1094 |
1095 | # Create a mask for valid indices
1096 | valid_mask = (
1097 | (top_left[0] <= x)
1098 | & (x <= bottom_right[0])
1099 | & (top_left[1] <= y)
1100 | & (y <= bottom_right[1])
1101 | )
1102 | valsx = x[valid_mask]
1103 | valsy = y[valid_mask]
1104 | # Create a grid of offsets
1105 | x_offset = np.arange(-r, r + 1)
1106 | y_offset = np.arange(-r, r + 1)
1107 | xx, yy = np.meshgrid(x_offset, y_offset)
1108 |
1109 | # Apply offsets to the original x and y coordinates
1110 | xx = xx.ravel() + valsx[:, None]
1111 | yy = yy.ravel() + valsy[:, None]
1112 |
1113 | # Flatten the arrays
1114 | xx = xx.ravel()
1115 | yy = yy.ravel()
1116 |
1117 | # Assign color to the corresponding pixels and the surrounding pixels
1118 | img_array[yy, xx] = point_color
1119 |
1120 | if not scatter and values.shape[0] >= 2:
1121 | # Create pairs of adjacent points
1122 | with _veclinesperf:
1123 | img_array = vectorized_lines_with_thickness(
1124 | y[:-1],
1125 | x[:-1],
1126 | y[1:],
1127 | x[1:],
1128 | img_array,
1129 | clr=line_color,
1130 | thickness=thickness,
1131 | )
1132 |
1133 | # rendering text
1134 | font_size_small = default_font_size_small
1135 | n = height // (2 * pxdelta)
1136 | tick_color = label_color
1137 | for i in range(n + 1):
1138 | # Scale the tick label by the multiplier
1139 | val = '{:.{}f}'.format((scale / n * i) * multiplier, precision)
1140 | text_size, _ = cv2.getTextSize(val, font, font_size_small, 1)
1141 | text_width, text_height = text_size
1142 | text_x = top_left[0] - text_width # Adjust position to the left of the grid
1143 | text_y = bottom_right[1] - i * 2 * pxdelta + pxdelta // 2
1144 | cv2.putText(
1145 | img_array, val, (text_x, text_y), font, font_size_small, tick_color, 3
1146 | )
1147 |
1148 | # Draw title with opacity
1149 | title_color = label_color
1150 | text_size_title = cv2.getTextSize(title, font, font_size, 1)[0]
1151 | text_x_title = top_left[0] + width // 2 - text_size_title[0] // 2
1152 | text_y_title = top_left[1] - text_size_title[1] - pxdelta // 2
1153 | cv2.putText(
1154 | img_array, title, (text_x_title, text_y_title), font, font_size, title_color, 3
1155 | )
1156 |
1157 | return img_array
1158 |
1159 | @debug_performance('_plotperf')
1160 | def plot1_at(
1161 | img_array: np.ndarray,
1162 | values: np.ndarray,
1163 | title: str = 'Measuring',
1164 | offset: Tuple[int, int] = (50, 50),
1165 | size: Tuple[int, int] = (300, 270),
1166 | point_color: Tuple[int, int, int, int] = (0, 0, 255),
1167 | r=2,
1168 | pxdelta: int = 15,
1169 | grid_color: Tuple[int, int, int, int] = (128, 128, 128),
1170 | precision: int = 2,
1171 | default_font_size: float = 0.75,
1172 | default_font_size_small: float = 0.5,
1173 | label_color: Tuple[int, int, int, int] = (0, 0, 255),
1174 | scatter=False,
1175 | thickness=2,
1176 | line_color: Tuple[int, int, int, int] = (0, 0, 255),
1177 | max_len: int = 100,
1178 | ) -> np.ndarray:
1179 | """Adaptively draw a plot on a NumPy image array.
1180 |
1181 | Plots given `values` to a given NumPy ndarray, adapting
1182 | the plot scale and size to fit the input data.
1183 | Plots fast - no single loop in the code, even if you want to connect points with
1184 | line segments, measured 20-100x faster then matplotlib.
1185 | Useful for overlaying real-time plots on images and video frames.
1186 |
1187 | Args:
1188 | img_array: NumPy ndarray to draw the plot on, likely a video frame
1189 | values: NumPy 1D array of values to plot over time
1190 | title: Plot title string
1191 | offset: (x, y) offset tuple for the top-left of plot
1192 | size: (width, height) tuple for plot size in pixels
1193 | clr: (R, G, B) tuple for plot color
1194 | pxdelta: Grid size in pixels
1195 | precision: Floating point precision for y-axis labels
1196 | default_font_size: Font size for title
1197 | default_font_size_small: Font size for axis labels
1198 | opacity: Opacity value 0-1 for plot elements
1199 | max_len: Maximum history length for values array
1200 |
1201 | Returns:
1202 | img_array: Image array with overlaid adaptive plot
1203 |
1204 | """
1205 | min_val = np.min(values)
1206 | max_val = np.max(values)
1207 |
1208 | if max_len > 0:
1209 | values = values[-max_len:]
1210 |
1211 | # Calculate adjustment factor and shift
1212 | if max_val - min_val == 0:
1213 | if min_val == 0:
1214 | scale = 1.0
1215 | shift = -0.5
1216 | power = 1
1217 | adjust_factor = 1
1218 | else:
1219 | scale = min_val / 2
1220 | power = np.floor(np.log10(np.abs(min_val)))
1221 | adjust_factor = 1
1222 | shift = -scale
1223 | else:
1224 | scale = max_val - min_val
1225 | power = np.ceil(np.log10((np.abs(min_val) + np.abs(max_val)) / 2))
1226 | adjust_factor = size[1] / scale
1227 | shift = -min_val
1228 |
1229 | # Determine the multiplier to scale the tick values above 1.0
1230 | multiplier = 10**-power
1231 |
1232 | # Adjust the title to include the multiplier
1233 | title += f', 10^{int(power)}'
1234 |
1235 | # Adjust values
1236 | adjusted_values = (values + shift) * adjust_factor
1237 |
1238 | # Draw the plot
1239 | height = size[1]
1240 | width = size[0]
1241 |
1242 | top_left = (offset[0], offset[1])
1243 | bottom_right = (offset[0] + width, offset[1] + height)
1244 |
1245 | font_size = default_font_size
1246 |
1247 | # Draw grid and rectangle with opacity
1248 | img_array[
1249 | top_left[1] : bottom_right[1] + 1 : pxdelta,
1250 | top_left[0] : bottom_right[0] + 1,
1251 | :,
1252 | ] = grid_color
1253 | img_array[
1254 | top_left[1] : bottom_right[1] + 1,
1255 | top_left[0] : bottom_right[0] + 1 : pxdelta,
1256 | :,
1257 | ] = grid_color
1258 |
1259 | # Render points
1260 | point_color = point_color
1261 |
1262 | # Create an array of indices
1263 | i = np.arange(len(adjusted_values))
1264 | x = bottom_right[0] - ((i + 1) * width // len(adjusted_values))
1265 | y = bottom_right[1] - (adjusted_values).astype(int)
1266 |
1267 | # Create a mask for valid indices
1268 | valid_mask = (
1269 | (top_left[0] <= x)
1270 | & (x <= bottom_right[0])
1271 | & (top_left[1] <= y)
1272 | & (y <= bottom_right[1])
1273 | )
1274 | valsx = x[valid_mask]
1275 | valsy = y[valid_mask]
1276 | # Create a grid of offsets
1277 | x_offset = np.arange(-r, r + 1)
1278 | y_offset = np.arange(-r, r + 1)
1279 | xx, yy = np.meshgrid(x_offset, y_offset)
1280 |
1281 | # Apply offsets to the original x and y coordinates
1282 | xx = xx.ravel() + valsx[:, None]
1283 | yy = yy.ravel() + valsy[:, None]
1284 |
1285 | # Flatten the arrays
1286 | xx = xx.ravel()
1287 | yy = yy.ravel()
1288 |
1289 | # Assign color to the corresponding pixels and the surrounding pixels
1290 | img_array[yy, xx] = point_color
1291 |
1292 | if not scatter and values.shape[0] >= 2:
1293 | # Create pairs of adjacent points
1294 | with _veclinesperf:
1295 | img_array = vectorized_lines_with_thickness(
1296 | y[:-1],
1297 | x[:-1],
1298 | y[1:],
1299 | x[1:],
1300 | img_array,
1301 | clr=line_color,
1302 | thickness=thickness,
1303 | )
1304 |
1305 | # rendering text
1306 | font_size_small = default_font_size_small
1307 | n = height // (2 * pxdelta)
1308 | tick_color = label_color[:3] # Remove alpha channel for vectorized_text
1309 | for i in range(n + 1):
1310 | # Scale the tick label by the multiplier
1311 | val = '{:.{}f}'.format((scale / n * i) * multiplier, precision)
1312 | text_x = top_left[0] - len(val) * 5 * int(font_size_small * 2) # Approximate text width
1313 | text_y = bottom_right[1] - i * 2 * pxdelta + pxdelta // 2
1314 | img_array = vectorized_text(
1315 | img_array, val, (text_x, text_y), color=tick_color, scale=int(font_size_small * 2)
1316 | )
1317 |
1318 | # Draw title with opacity
1319 | title_color = label_color[:3] # Remove alpha channel for vectorized_text
1320 | text_x_title = top_left[0] + width // 2 - len(title) * 5 * int(font_size * 2) // 2 # Approximate text width
1321 | text_y_title = top_left[1] - 15 * int(font_size * 2) - pxdelta // 2 # Approximate text height
1322 | img_array = vectorized_text(
1323 | img_array, title, (text_x_title, text_y_title), color=title_color, scale=int(font_size * 2)
1324 | )
1325 |
1326 | return img_array
1327 |
1328 | @debug_performance('_plotperf')
1329 | def plot1_components(
1330 | values: np.array,
1331 | bounds: Optional[np.ndarray] = None,
1332 | title: str = 'Measuring',
1333 | size: Tuple[int, int] = (300, 300),
1334 | point_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
1335 | r=2,
1336 | nticks: int = 16,
1337 | grid_color: Tuple[int, int, int, int] = (128, 128, 128, 255),
1338 | precision: int = 2,
1339 | default_font_size: float = 0.8,
1340 | default_font_size_small: float = 0.6,
1341 | label_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
1342 | scatter=True,
1343 | thickness=2,
1344 | line_color: Tuple[int, int, int, int] = (0, 0, 255, 255),
1345 | max_len: int = 100,
1346 | *args,
1347 | **kwargs
1348 | ) -> Tuple[np.array, np.array, np.array, np.array]:
1349 | """
1350 | Create a plot and return its components as separate NumPy arrays.
1351 |
1352 | Args:
1353 | (same as plot1 function)
1354 | bounds: Optional[np.ndarray], custom bounds for the plot (shape: (2, 2) for min/max of x and y)
1355 |
1356 | Returns:
1357 | Tuple[np.array, np.array, np.array, np.array]:
1358 | - figure: The main plot content (points and lines)
1359 | - grid: The grid lines
1360 | - labels: The axis labels
1361 | - title_img: The title image
1362 | """
1363 | if max_len > 0:
1364 | values = values[:, -max_len:]
1365 |
1366 | # Estimate margins and grid size
1367 | margin_ver = int(size[1] * 0.15) # 15% of height for vertical margin
1368 | margin_hor = int(size[0] * 0.15) # 15% of width for horizontal margin
1369 | grid_topleft = np.array((margin_hor, margin_ver))
1370 | grid_botright = np.array(size) - grid_topleft
1371 | gsize = grid_botright - grid_topleft
1372 |
1373 | # Adjust grid size to be divisible by nticks
1374 | gsize2 = gsize - (gsize % nticks)
1375 | iota = (gsize - gsize2) / 2
1376 | grid_topleft = (grid_topleft + iota).astype(int)
1377 | grid_botright = (grid_botright - iota).astype(int)
1378 | gsize = gsize2
1379 |
1380 | pxdelta = (gsize // nticks).astype(int)
1381 |
1382 | if bounds is None:
1383 | bounds = np.array([
1384 | [np.min(values[0]), np.max(values[0])],
1385 | [np.min(values[1]), np.max(values[1])]
1386 | ])
1387 | else:
1388 | bounds = np.array(bounds, copy=True)
1389 | # Ensure bounds cover both dimensions
1390 | if bounds.shape != (2, 2):
1391 | raise ValueError("Bounds should have shape (2, 2) for min/max of x and y")
1392 |
1393 | # If any bound is None, calculate it from the values
1394 | for i in range(2):
1395 | if bounds[i, 0] is None:
1396 | bounds[i, 0] = np.min(values[i])
1397 | if bounds[i, 1] is None:
1398 | bounds[i, 1] = np.max(values[i])
1399 |
1400 | value_range = bounds[:,1] - bounds[:,0]
1401 | scale = gsize[::-1] / value_range
1402 |
1403 | # Correct the broadcasting for adjusted_values
1404 | adjusted_values = (values - bounds[:,0][:, np.newaxis]) * scale[:, np.newaxis]
1405 |
1406 | # Calculate the order of magnitude for each bound
1407 | magnitude = np.floor(np.log10(np.abs(bounds).astype(float)))
1408 | # Take the maximum magnitude for each dimension
1409 | max_magnitude = np.max(magnitude, axis=0)
1410 |
1411 | # Update title with the power of 10 for the y-axis (second dimension)
1412 | title += f', 10^{int(max_magnitude[1])}'
1413 |
1414 | # Create separate image arrays for each component
1415 | figure = np.zeros((*size, 4), np.uint8)
1416 | grid = np.zeros((*size, 4), np.uint8)
1417 | labels = np.zeros((*size, 4), np.uint8)
1418 | title_img = np.zeros((*size, 4), np.uint8)
1419 |
1420 | # Draw grid
1421 | grid[
1422 | grid_topleft[0] : grid_botright[0] + 1 : pxdelta[0],
1423 | grid_topleft[1] : grid_botright[1] + 1,
1424 | :,
1425 | ] = grid_color
1426 | grid[
1427 | grid_topleft[0] : grid_botright[0] + 1,
1428 | grid_topleft[1] : grid_botright[1] + 1 : pxdelta[1],
1429 | :,
1430 | ] = grid_color
1431 |
1432 | # Render points
1433 | x = grid_topleft[1] + adjusted_values[0].astype(int)
1434 | y = grid_botright[0] - adjusted_values[1].astype(int)
1435 |
1436 | valid_mask = (
1437 | (grid_topleft[0] < y) &
1438 | (y < grid_botright[0]) &
1439 | (grid_topleft[1] < x) &
1440 | (x < grid_botright[1])
1441 | )
1442 | valsx = x[valid_mask]
1443 | valsy = y[valid_mask]
1444 | x_offset = np.arange(-r, r + 1)
1445 | y_offset = np.arange(-r, r + 1)
1446 | xx, yy = np.meshgrid(x_offset, y_offset)
1447 |
1448 | xx = xx.ravel() + valsx[:, None]
1449 | yy = yy.ravel() + valsy[:, None]
1450 |
1451 | xx = xx.ravel()
1452 | yy = yy.ravel()
1453 |
1454 | figure[yy, xx] = point_color
1455 |
1456 | if not scatter and values.shape[1] >= 2:
1457 | with _veclinesperf:
1458 | figure = vectorized_lines_with_thickness(
1459 | y[:-1], x[:-1], y[1:], x[1:],
1460 | figure,
1461 | clr=line_color,
1462 | thickness=thickness,
1463 | )
1464 |
1465 | # Render y-axis labels
1466 | tick_color = label_color[:3] # Remove alpha channel for vectorized_text
1467 | for i in range(nticks + 1):
1468 | tick_value = bounds[1, 0] + (value_range[1] * i / nticks)
1469 | val = '{:.{}f}'.format(tick_value, precision)
1470 | l = len(val)
1471 | dx = int(l * 5 * default_font_size_small * 2)
1472 | text_x = grid_topleft[1] - dx # Approximate text width
1473 | text_y = grid_botright[0] - i * pxdelta[0] - int(5 * default_font_size_small) # Adjust for text height
1474 |
1475 | labels = vectorized_text(
1476 | labels, val, (text_x, text_y), color=tick_color, font_size=default_font_size_small
1477 | )
1478 |
1479 | # Render x-axis labels
1480 | for i in range(nticks + 1):
1481 | tick_value = bounds[0, 0] + (value_range[0] * i / nticks)
1482 | val = '{:.{}f}'.format(tick_value, precision)
1483 | l = len(val)
1484 | dy = int(5 * default_font_size_small * 2)
1485 | text_x = grid_topleft[1] + i * pxdelta[1] - int(l * 2.5 * default_font_size_small) # Center text
1486 | text_y = grid_botright[0] + dy # Position below x-axis
1487 |
1488 | labels = vectorized_text(
1489 | labels, val, (text_x, text_y), color=tick_color, font_size=default_font_size_small
1490 | )
1491 |
1492 | # Draw title
1493 | title_color = label_color[:3] # Remove alpha channel for vectorized_text
1494 | text_x_title = grid_topleft[1] + (grid_botright[1] - grid_topleft[1]) // 2 - len(title) * 5 * int(default_font_size * 2) // 2 # Approximate text width
1495 | text_y_title = grid_topleft[0] - int(default_font_size * 5*2)
1496 | title_img = vectorized_text(
1497 | title_img, title, (text_x_title, text_y_title), color=title_color, font_size=default_font_size
1498 | )
1499 |
1500 | return figure, grid, labels, title_img
1501 |
1502 | @debug_performance('_plotperf')
1503 | def plot(values: np.array,
1504 | grid: dict = None,
1505 | figure: dict = None,
1506 | title: str = 'Plot',
1507 | size: Tuple[int, int] = (300, 300),
1508 | bounds: Optional[np.ndarray] = None,
1509 | max_len: int = 100) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
1510 | """Generate plot components as separate RGBA numpy arrays.
1511 |
1512 | Creates a plot from input values with customizable grid and figure options.
1513 | Returns separate arrays for figure, grid, labels and title that can be blended together.
1514 |
1515 | Parameters
1516 | ----------
1517 | values : np.array
1518 | 2D array of shape (2, N) containing x and y coordinates
1519 | grid : dict, optional
1520 | Grid customization options:
1521 |
1522 | nticks : int
1523 | Number of grid divisions (default: 10)
1524 |
1525 | color : tuple
1526 | RGBA color for grid lines (default: (128, 128, 128, 255))
1527 |
1528 | label_color : tuple
1529 | RGBA color for axis labels (default: (0, 0, 255, 255))
1530 |
1531 | label_font_size : float
1532 | Font size for axis labels (default: 0.4)
1533 |
1534 | precision : int
1535 | Decimal precision for axis labels (default: 2)
1536 |
1537 | title_margin : int
1538 | Margin above title in pixels (default: 30)
1539 |
1540 | y_tick_offset : int
1541 | Offset for y-axis labels (default: 5)
1542 |
1543 | x_tick_offset : int
1544 | Offset for x-axis labels (default: 5)
1545 |
1546 | figure : dict, optional
1547 | Figure customization options:
1548 |
1549 | scatter : bool
1550 | Whether to draw points (default: False)
1551 |
1552 | line_color : tuple
1553 | RGBA color for lines (default: (255, 0, 0, 255))
1554 |
1555 | line_width : int
1556 | Width of lines in pixels (default: 2)
1557 |
1558 | point_color : tuple
1559 | RGBA color for points (default: (0, 255, 0, 255))
1560 |
1561 | point_radius : int
1562 | Radius of points in pixels (default: 3)
1563 |
1564 | marker_style : str
1565 | Point marker style ('circle', 'cross', etc) (default: 'circle')
1566 |
1567 | line_thickness : int
1568 | Thickness of connecting lines (default: 2)
1569 |
1570 | line_style : str
1571 | Line style ('solid', 'dashed', etc) (default: 'solid')
1572 |
1573 | title : str, optional
1574 | Plot title (default: 'Plot')
1575 | size : tuple of int
1576 | (width, height) of plot in pixels (default: (300, 300))
1577 | bounds: Optional[np.ndarray] = None,
1578 | Custom bounds for the plot (shape==(values.shape[-1], 2) for min/max of x and y)
1579 | max_len : int, optional
1580 | Maximum number of points to plot (default: 100)
1581 |
1582 | Returns
1583 | -------
1584 | tuple of np.ndarray
1585 | (figure_array, grid_array, labels_array, title_array)
1586 | Each array has shape (height, width, 4) with RGBA channels
1587 |
1588 | Examples
1589 | --------
1590 | >>> x = np.linspace(0, 10, 50)
1591 | >>> y = np.sin(x)
1592 | >>> plot_arrays = plot(np.array([x, y]))
1593 | >>> final_image = blend(*plot_arrays)
1594 | """
1595 | # Default values from plot1_components
1596 | default_grid = {
1597 | 'nticks': 16,
1598 | 'color': (128, 128, 128, 255),
1599 | 'label_color': (0, 0, 255, 255),
1600 | 'label_font_size': 0.6,
1601 | 'precision': 2,
1602 | 'title_margin': 30,
1603 | 'y_tick_offset': 5,
1604 | 'x_tick_offset': 5
1605 | }
1606 |
1607 | default_figure = {
1608 | 'scatter': True,
1609 | 'point_color': (0, 0, 255, 255),
1610 | 'point_radius': 2,
1611 | 'line_color': (0, 0, 255, 255),
1612 | 'line_thickness': 2,
1613 | 'marker_style': 'circle',
1614 | 'line_style': 'solid'
1615 | }
1616 |
1617 | # Create new dicts with defaults, updated by user-provided values
1618 | grid_style = default_grid.copy()
1619 | if grid is not None:
1620 | grid_style.update(grid)
1621 |
1622 | figure_style = default_figure.copy()
1623 | if figure is not None:
1624 | figure_style.update(figure)
1625 |
1626 | if max_len > 0:
1627 | values = values[:, -max_len:]
1628 |
1629 | # Extract grid options
1630 | nticks = grid_style['nticks']
1631 | grid_color = grid_style['color']
1632 | label_color = grid_style['label_color']
1633 | label_font_size = grid_style['label_font_size']
1634 | precision = grid_style['precision']
1635 | title_margin = grid_style['title_margin']
1636 | y_tick_offset = grid_style['y_tick_offset']
1637 | x_tick_offset = grid_style['x_tick_offset']
1638 |
1639 | # Extract figure options
1640 | scatter = figure_style['scatter']
1641 | point_color = figure_style['point_color']
1642 | r = figure_style['point_radius']
1643 | line_color = figure_style['line_color']
1644 | thickness = figure_style['line_thickness']
1645 | marker_style = figure_style['marker_style']
1646 | line_style = figure_style['line_style']
1647 |
1648 | # Estimate margins and grid size
1649 | margin_ver = int(size[1] * 0.15)
1650 | margin_hor = int(size[0] * 0.15)
1651 | grid_topleft = np.array((margin_hor, margin_ver))
1652 | grid_botright = np.array(size) - grid_topleft
1653 | gsize = grid_botright - grid_topleft
1654 |
1655 | # Adjust grid size to be divisible by nticks
1656 | gsize2 = gsize - (gsize % nticks)
1657 | iota = (gsize - gsize2) / 2
1658 | grid_topleft = (grid_topleft + iota).astype(int)
1659 | grid_botright = (grid_botright - iota).astype(int)
1660 | gsize = gsize2
1661 |
1662 | pxdelta = (gsize // nticks).astype(int)
1663 |
1664 | if bounds is None:
1665 | bounds = np.array([
1666 | [np.min(values[0]), np.max(values[0])],
1667 | [np.min(values[1]), np.max(values[1])]
1668 | ])
1669 | else:
1670 | bounds = np.array(bounds, copy=True)
1671 | if bounds.shape != (2, 2):
1672 | raise ValueError("Bounds should have shape (2, 2) for min/max of x and y")
1673 |
1674 | for i in range(2):
1675 | if bounds[i, 0] is None:
1676 | bounds[i, 0] = np.min(values[i])
1677 | if bounds[i, 1] is None:
1678 | bounds[i, 1] = np.max(values[i])
1679 |
1680 | value_range = bounds[:,1] - bounds[:,0]
1681 | scale = gsize[::-1] / value_range
1682 |
1683 | adjusted_values = (values - bounds[:,0][:, np.newaxis]) * scale[:, np.newaxis]
1684 |
1685 | # Calculate y magnitude based on the range of y values
1686 | y_range = bounds[1, 1] - bounds[1, 0]
1687 | y_magnitude = int(np.floor(np.log10(y_range))) if y_range != 0 else 0
1688 |
1689 | title += f', 10^{y_magnitude}'
1690 |
1691 | figure_img = np.zeros((*size, 4), np.uint8)
1692 | grid_img = np.zeros((*size, 4), np.uint8)
1693 | labels_img = np.zeros((*size, 4), np.uint8)
1694 | title_img = np.zeros((*size, 4), np.uint8)
1695 |
1696 | # Draw grid
1697 | grid_img[
1698 | grid_topleft[0] : grid_botright[0] + 1 : pxdelta[0],
1699 | grid_topleft[1] : grid_botright[1] + 1,
1700 | :,
1701 | ] = grid_color
1702 | grid_img[
1703 | grid_topleft[0] : grid_botright[0] + 1,
1704 | grid_topleft[1] : grid_botright[1] + 1 : pxdelta[1],
1705 | :,
1706 | ] = grid_color
1707 |
1708 | # Render points
1709 | x = grid_topleft[1] + adjusted_values[0].astype(int)
1710 | y = grid_botright[0] - adjusted_values[1].astype(int)
1711 |
1712 | if not scatter and values.shape[1] >= 2:
1713 | # with _veclinesperf:
1714 | figure_img = vectorized_lines_with_thickness(
1715 | y[:-1], x[:-1], y[1:], x[1:],
1716 | figure_img,
1717 | clr=line_color,
1718 | thickness=thickness,
1719 | )
1720 |
1721 | valid_mask = (
1722 | (grid_topleft[0] <= y) & (y < grid_botright[0]) &
1723 | (grid_topleft[1] <= x) & (x < grid_botright[1])
1724 | )
1725 | valsx = x[valid_mask]
1726 | valsy = y[valid_mask]
1727 |
1728 | if marker_style == 'circle':
1729 | x_offset = np.arange(-r, r + 1)
1730 | y_offset = np.arange(-r, r + 1)
1731 | xx, yy = np.meshgrid(x_offset, y_offset)
1732 | mask = xx**2 + yy**2 <= r**2
1733 | elif marker_style == 'square':
1734 | x_offset = np.arange(-r, r + 1)
1735 | y_offset = np.arange(-r, r + 1)
1736 | xx, yy = np.meshgrid(x_offset, y_offset)
1737 | mask = np.ones_like(xx, dtype=bool)
1738 | elif marker_style == 'triangle':
1739 | x_offset = np.arange(-r, r + 1)
1740 | y_offset = np.arange(-r, r + 1)
1741 | xx, yy = np.meshgrid(x_offset, y_offset)
1742 | mask = (yy <= 0) & (xx + yy >= -r) & (-xx + yy >= -r)
1743 | elif marker_style == 'cross':
1744 | x_offset = np.arange(-r, r + 1)
1745 | y_offset = np.arange(-r, r + 1)
1746 | xx, yy = np.meshgrid(x_offset, y_offset)
1747 | mask = (xx == 0) | (yy == 0)
1748 | else:
1749 | # Default to circle if unsupported style is specified
1750 | x_offset = np.arange(-r, r + 1)
1751 | y_offset = np.arange(-r, r + 1)
1752 | xx, yy = np.meshgrid(x_offset, y_offset)
1753 | mask = xx**2 + yy**2 <= r**2
1754 |
1755 | #mask to separate shape out of bounded rectangle
1756 | xx = xx[mask]
1757 | yy = yy[mask]
1758 |
1759 | # 1st - despise the term 'ravel', its more 'unravel' and even more 'straighten'
1760 | # 2nd - this hack allows stamped form vectorized rendering
1761 | # by using the fact that reshaped indexing on both pairs keeps matching exactly
1762 | xx = xx.reshape(1, -1) + valsx.reshape(-1, 1)
1763 | yy = yy.reshape(1, -1) + valsy.reshape(-1, 1)
1764 | # now all point pixels are there in straightened form
1765 |
1766 | #cut out pixels that are out of bounds
1767 | valid_points = (0 <= xx) & (xx < figure_img.shape[1]) & (0 <= yy) & (yy < figure_img.shape[0])
1768 | #fold points back into plot tensor asigning their places color
1769 | figure_img[yy[valid_points], xx[valid_points]] = point_color
1770 |
1771 |
1772 | # Render y-axis labels
1773 | tick_color = label_color[:3]
1774 | for i in range(nticks + 1):
1775 | tick_value = bounds[1, 0] + (value_range[1] * i / nticks)
1776 | val = '{:.{}f}'.format(tick_value, precision)
1777 | l = len(val)
1778 | dx = int(l * 5 * label_font_size * 2)
1779 | text_x = grid_topleft[1] - dx - y_tick_offset # Apply y-axis offset
1780 | text_y = grid_botright[0] - i * pxdelta[0] - int(5 * label_font_size)
1781 |
1782 | labels_img = vectorized_text(
1783 | labels_img, val, (text_x, text_y), color=tick_color, font_size=label_font_size, spacing=0.2
1784 | )
1785 |
1786 | # Render x-axis labels
1787 | for i in range(nticks + 1):
1788 | tick_value = bounds[0, 0] + (value_range[0] * i / nticks)
1789 | val = '{:.{}f}'.format(tick_value, precision)
1790 | l = len(val)
1791 | dy = int(5 * label_font_size * 2)
1792 | text_x = grid_topleft[1] + i * pxdelta[1] - int(l * 2.5 * label_font_size)
1793 | text_y = grid_botright[0] + dy + x_tick_offset # Apply x-axis offset
1794 |
1795 | labels_img = vectorized_text(
1796 | labels_img, val, (text_x, text_y), color=tick_color, font_size=label_font_size, spacing=0.1
1797 | )
1798 |
1799 | # Draw title
1800 | title_color = label_color[:3]
1801 | text_x_title = grid_topleft[1] + (grid_botright[1] - grid_topleft[1]) // 2 - len(title) * 5 * int(label_font_size * 2) // 2
1802 | # Adjust the title's y-position to be closer to the grid
1803 | text_y_title = grid_topleft[0] - int(label_font_size*10) # Reduced margin
1804 |
1805 | title_img = vectorized_text(
1806 | title_img, title, (text_x_title, text_y_title), color=title_color, font_size=label_font_size, spacing=0.2
1807 | )
1808 |
1809 | return figure_img, grid_img, labels_img, title_img
1810 |
1811 | if PIL_available:
1812 | from PIL import Image
1813 | from io import BytesIO
1814 | def blend2PIL(arrays, format='PNG') -> BytesIO:
1815 | """Blend multiple arrays into a PIL image buffer.
1816 |
1817 | Optimized blending function for Jupyter notebook display that converts
1818 | plot components directly to a PIL image buffer. Requires the Pillow (PIL)
1819 | package to be installed.
1820 |
1821 | Parameters
1822 | ----------
1823 | arrays : tuple of np.ndarray
1824 | Tuple of RGBA arrays to blend:
1825 | - figure_array: Plot figure components
1826 | - grid_array: Grid lines and background
1827 | - labels_array: Axis labels and ticks
1828 | - title_array: Plot title
1829 | Each array should have shape (height, width, 4) with RGBA channels
1830 |
1831 | format : str, optional
1832 | Output image format ('PNG', 'JPEG', etc) (default: 'PNG')
1833 |
1834 | Returns
1835 | -------
1836 | BytesIO
1837 | Buffer containing the blended image in specified format
1838 |
1839 | Raises
1840 | ------
1841 | ImportError
1842 | If Pillow package is not installed
1843 | ValueError
1844 | If input arrays have different shapes
1845 |
1846 | Examples
1847 | --------
1848 | >>> plot_arrays = plot(np.array([x, y]))
1849 | >>> buffer = blend2PIL(plot_arrays)
1850 | >>> display(Image(buffer.getvalue())) # Jupyter display
1851 | """
1852 | figure_pil = Image.fromarray(arrays[0], 'RGBA')
1853 | grid_pil = Image.fromarray(arrays[1], 'RGBA')
1854 | labels_pil = Image.fromarray(arrays[2], 'RGBA')
1855 | title_pil = Image.fromarray(arrays[3], 'RGBA')
1856 | blended_img = Image.alpha_composite(grid_pil, figure_pil)
1857 | blended_img = Image.alpha_composite(blended_img, labels_pil)
1858 | blended_img = Image.alpha_composite(blended_img, title_pil)
1859 | buffer = BytesIO()
1860 | blended_img.save(buffer, format=format)
1861 | return buffer
1862 |
1863 |
--------------------------------------------------------------------------------
/justpyplot/textrender.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from typing import Tuple
3 |
4 | # Define a 5x5 pixel font for uppercase letters, numbers, and some punctuation
5 | FONT = {
6 | 'A': np.array([
7 | [0,1,1,0,0],
8 | [1,0,0,1,0],
9 | [1,1,1,1,0],
10 | [1,0,0,1,0],
11 | [1,0,0,1,0]
12 | ]),
13 | 'B': np.array([
14 | [1,1,1,0,0],
15 | [1,0,0,1,0],
16 | [1,1,1,0,0],
17 | [1,0,0,1,0],
18 | [1,1,1,0,0]
19 | ]),
20 | 'C': np.array([
21 | [0,1,1,1,0],
22 | [1,0,0,0,0],
23 | [1,0,0,0,0],
24 | [1,0,0,0,0],
25 | [0,1,1,1,0]
26 | ]),
27 | 'D': np.array([
28 | [1,1,1,0,0],
29 | [1,0,0,1,0],
30 | [1,0,0,1,0],
31 | [1,0,0,1,0],
32 | [1,1,1,0,0]
33 | ]),
34 | 'E': np.array([
35 | [1,1,1,1,0],
36 | [1,0,0,0,0],
37 | [1,1,1,0,0],
38 | [1,0,0,0,0],
39 | [1,1,1,1,0]
40 | ]),
41 | 'F': np.array([
42 | [1,1,1,1,0],
43 | [1,0,0,0,0],
44 | [1,1,1,0,0],
45 | [1,0,0,0,0],
46 | [1,0,0,0,0]
47 | ]),
48 | 'G': np.array([
49 | [0,1,1,1,0],
50 | [1,0,0,0,0],
51 | [1,0,1,1,0],
52 | [1,0,0,1,0],
53 | [0,1,1,1,0]
54 | ]),
55 | 'H': np.array([
56 | [1,0,0,1,0],
57 | [1,0,0,1,0],
58 | [1,1,1,1,0],
59 | [1,0,0,1,0],
60 | [1,0,0,1,0]
61 | ]),
62 | 'I': np.array([
63 | [1,1,1,0,0],
64 | [0,1,0,0,0],
65 | [0,1,0,0,0],
66 | [0,1,0,0,0],
67 | [1,1,1,0,0]
68 | ]),
69 | 'J': np.array([
70 | [0,0,1,1,0],
71 | [0,0,0,1,0],
72 | [0,0,0,1,0],
73 | [1,0,0,1,0],
74 | [0,1,1,0,0]
75 | ]),
76 | 'K': np.array([
77 | [1,0,0,1,0],
78 | [1,0,1,0,0],
79 | [1,1,0,0,0],
80 | [1,0,1,0,0],
81 | [1,0,0,1,0]
82 | ]),
83 | 'L': np.array([
84 | [1,0,0,0,0],
85 | [1,0,0,0,0],
86 | [1,0,0,0,0],
87 | [1,0,0,0,0],
88 | [1,1,1,1,0]
89 | ]),
90 | 'M': np.array([
91 | [1,0,0,0,1],
92 | [1,1,0,1,1],
93 | [1,0,1,0,1],
94 | [1,0,0,0,1],
95 | [1,0,0,0,1]
96 | ]),
97 | 'N': np.array([
98 | [1,0,0,0,1],
99 | [1,1,0,0,1],
100 | [1,0,1,0,1],
101 | [1,0,0,1,1],
102 | [1,0,0,0,1]
103 | ]),
104 | 'O': np.array([
105 | [0,1,1,1,0],
106 | [1,0,0,0,1],
107 | [1,0,0,0,1],
108 | [1,0,0,0,1],
109 | [0,1,1,1,0]
110 | ]),
111 | 'P': np.array([
112 | [1,1,1,0,0],
113 | [1,0,0,1,0],
114 | [1,1,1,0,0],
115 | [1,0,0,0,0],
116 | [1,0,0,0,0]
117 | ]),
118 | 'Q': np.array([
119 | [0,1,1,0,0],
120 | [1,0,0,1,0],
121 | [1,0,0,1,0],
122 | [1,0,1,0,0],
123 | [0,1,0,1,0]
124 | ]),
125 | 'R': np.array([
126 | [1,1,1,0,0],
127 | [1,0,0,1,0],
128 | [1,1,1,0,0],
129 | [1,0,1,0,0],
130 | [1,0,0,1,0]
131 | ]),
132 | 'S': np.array([
133 | [0,1,1,1,0],
134 | [1,0,0,0,0],
135 | [0,1,1,0,0],
136 | [0,0,0,1,0],
137 | [1,1,1,0,0]
138 | ]),
139 | 'T': np.array([
140 | [1,1,1,1,1],
141 | [0,0,1,0,0],
142 | [0,0,1,0,0],
143 | [0,0,1,0,0],
144 | [0,0,1,0,0]
145 | ]),
146 | 'U': np.array([
147 | [1,0,0,0,1],
148 | [1,0,0,0,1],
149 | [1,0,0,0,1],
150 | [1,0,0,0,1],
151 | [0,1,1,1,0]
152 | ]),
153 | 'V': np.array([
154 | [1,0,0,0,1],
155 | [1,0,0,0,1],
156 | [1,0,0,0,1],
157 | [0,1,0,1,0],
158 | [0,0,1,0,0]
159 | ]),
160 | 'W': np.array([
161 | [1,0,0,0,1],
162 | [1,0,0,0,1],
163 | [1,0,1,0,1],
164 | [1,0,1,0,1],
165 | [0,1,0,1,0]
166 | ]),
167 | 'X': np.array([
168 | [1,0,0,0,1],
169 | [0,1,0,1,0],
170 | [0,0,1,0,0],
171 | [0,1,0,1,0],
172 | [1,0,0,0,1]
173 | ]),
174 | 'Y': np.array([
175 | [1,0,0,0,1],
176 | [0,1,0,1,0],
177 | [0,0,1,0,0],
178 | [0,0,1,0,0],
179 | [0,0,1,0,0]
180 | ]),
181 | 'Z': np.array([
182 | [1,1,1,1,1],
183 | [0,0,0,1,0],
184 | [0,0,1,0,0],
185 | [0,1,0,0,0],
186 | [1,1,1,1,1]
187 | ]),
188 | '0': np.array([
189 | [0,1,1,1,0],
190 | [1,0,0,0,1],
191 | [1,0,0,0,1],
192 | [1,0,0,0,1],
193 | [0,1,1,1,0]
194 | ]),
195 | '1': np.array([
196 | [0,0,1,0,0],
197 | [0,1,1,0,0],
198 | [0,0,1,0,0],
199 | [0,0,1,0,0],
200 | [0,1,1,1,0]
201 | ]),
202 | '2': np.array([
203 | [0,1,1,1,0],
204 | [1,0,0,0,1],
205 | [0,0,1,1,0],
206 | [0,1,0,0,0],
207 | [1,1,1,1,1]
208 | ]),
209 | '3': np.array([
210 | [1,1,1,1,0],
211 | [0,0,0,0,1],
212 | [0,1,1,1,0],
213 | [0,0,0,0,1],
214 | [1,1,1,1,0]
215 | ]),
216 | '4': np.array([
217 | [0,0,1,1,0],
218 | [0,1,0,1,0],
219 | [1,0,0,1,0],
220 | [1,1,1,1,1],
221 | [0,0,0,1,0]
222 | ]),
223 | '5': np.array([
224 | [1,1,1,1,1],
225 | [1,0,0,0,0],
226 | [1,1,1,1,0],
227 | [0,0,0,0,1],
228 | [1,1,1,1,0]
229 | ]),
230 | '6': np.array([
231 | [0,1,1,1,0],
232 | [1,0,0,0,0],
233 | [1,1,1,1,0],
234 | [1,0,0,0,1],
235 | [0,1,1,1,0]
236 | ]),
237 | '7': np.array([
238 | [1,1,1,1,1],
239 | [0,0,0,0,1],
240 | [0,0,0,1,0],
241 | [0,0,1,0,0],
242 | [0,1,0,0,0]
243 | ]),
244 | '8': np.array([
245 | [0,1,1,1,0],
246 | [1,0,0,0,1],
247 | [0,1,1,1,0],
248 | [1,0,0,0,1],
249 | [0,1,1,1,0]
250 | ]),
251 | '9': np.array([
252 | [0,1,1,1,0],
253 | [1,0,0,0,1],
254 | [0,1,1,1,1],
255 | [0,0,0,0,1],
256 | [0,1,1,1,0]
257 | ]),
258 | '.': np.array([
259 | [0,0,0,0,0],
260 | [0,0,0,0,0],
261 | [0,0,0,0,0],
262 | [0,0,0,0,0],
263 | [0,0,1,0,0]
264 | ]),
265 | ',': np.array([
266 | [0,0,0,0,0],
267 | [0,0,0,0,0],
268 | [0,0,0,0,0],
269 | [0,0,1,0,0],
270 | [0,1,0,0,0]
271 | ]),
272 | '!': np.array([
273 | [0,0,1,0,0],
274 | [0,0,1,0,0],
275 | [0,0,1,0,0],
276 | [0,0,0,0,0],
277 | [0,0,1,0,0]
278 | ]),
279 | '?': np.array([
280 | [0,1,1,1,0],
281 | [1,0,0,0,1],
282 | [0,0,0,1,0],
283 | [0,0,0,0,0],
284 | [0,0,1,0,0]
285 | ]),
286 | '-': np.array([
287 | [0,0,0,0,0],
288 | [0,0,0,0,0],
289 | [1,1,1,1,0],
290 | [0,0,0,0,0],
291 | [0,0,0,0,0]
292 | ]),
293 | '+': np.array([
294 | [0,0,1,0,0],
295 | [0,0,1,0,0],
296 | [1,1,1,1,1],
297 | [0,0,1,0,0],
298 | [0,0,1,0,0]
299 | ]),
300 | '=': np.array([
301 | [0,0,0,0,0],
302 | [1,1,1,1,0],
303 | [0,0,0,0,0],
304 | [1,1,1,1,0],
305 | [0,0,0,0,0]
306 | ]),
307 | ':': np.array([
308 | [0,0,0,0,0],
309 | [0,0,1,0,0],
310 | [0,0,0,0,0],
311 | [0,0,1,0,0],
312 | [0,0,0,0,0]
313 | ]),
314 | ';': np.array([
315 | [0,0,0,0,0],
316 | [0,0,1,0,0],
317 | [0,0,0,0,0],
318 | [0,0,1,0,0],
319 | [0,1,0,0,0]
320 | ]),
321 | '(': np.array([
322 | [0,0,1,0,0],
323 | [0,1,0,0,0],
324 | [0,1,0,0,0],
325 | [0,1,0,0,0],
326 | [0,0,1,0,0]
327 | ]),
328 | ')': np.array([
329 | [0,0,1,0,0],
330 | [0,0,0,1,0],
331 | [0,0,0,1,0],
332 | [0,0,0,1,0],
333 | [0,0,1,0,0]
334 | ]),
335 | '[': np.array([
336 | [0,1,1,0,0],
337 | [0,1,0,0,0],
338 | [0,1,0,0,0],
339 | [0,1,0,0,0],
340 | [0,1,1,0,0]
341 | ]),
342 | ']': np.array([
343 | [0,0,1,1,0],
344 | [0,0,0,1,0],
345 | [0,0,0,1,0],
346 | [0,0,0,1,0],
347 | [0,0,1,1,0]
348 | ]),
349 | '/': np.array([
350 | [0,0,0,0,1],
351 | [0,0,0,1,0],
352 | [0,0,1,0,0],
353 | [0,1,0,0,0],
354 | [1,0,0,0,0]
355 | ]),
356 | '\\': np.array([
357 | [1,0,0,0,0],
358 | [0,1,0,0,0],
359 | [0,0,1,0,0],
360 | [0,0,0,1,0],
361 | [0,0,0,0,1]
362 | ]),
363 | ' ': np.zeros((5, 5)),
364 | '^': np.array([
365 | [0,0,1,0,0],
366 | [0,1,0,1,0],
367 | [1,0,0,0,1],
368 | [0,0,0,0,0],
369 | [0,0,0,0,0]
370 | ])
371 | }
372 |
373 | def render_text(text: str, scale: int = 1) -> np.ndarray:
374 | """Render the entire text as a single numpy array."""
375 | if not text:
376 | return np.zeros((5 * scale, 1), dtype=np.uint8) # Return a minimal array for empty text
377 |
378 | char_arrays = [FONT.get(char.upper(), np.zeros((5, 5))) for char in text]
379 | text_array = np.hstack(char_arrays)
380 | return np.kron(text_array, np.ones((scale, scale)))
381 |
382 | def vectorized_text(
383 | img_array: np.ndarray,
384 | text: str,
385 | position: Tuple[int, int],
386 | color: Tuple[int, int, int] = (255, 255, 255),
387 | font_size: float = 1,
388 | spacing: float = 1.0
389 | ) -> np.ndarray:
390 | """
391 | Render text onto a NumPy array using optimized vectorized operations.
392 |
393 | Args:
394 | img_array (np.ndarray): The input image as a NumPy array.
395 | text (str): The text to render.
396 | position (Tuple[int, int]): The (x, y) position to place the text.
397 | color (Tuple[int, int, int]): RGB color of the text.
398 | font_size (float): Font size, similar to CV2's font scale.
399 | spacing (float): Spacing between characters.
400 |
401 | Returns:
402 | np.ndarray: The image array with the text rendered on it.
403 | """
404 | x, y = position
405 |
406 | # Calculate scale based on font_size
407 | scale = int(font_size*2)
408 | # if scale < 1:
409 | # scale = 1
410 |
411 | # Render the entire text at once
412 | text_array = render_text(text, scale)
413 |
414 | # Add spacing between characters
415 | if spacing > 0 and text:
416 | char_width = 5 * scale
417 | # Calculate total width with fractional spacing
418 | total_width = int(text_array.shape[1] + spacing * (len(text) - 1) * char_width)
419 | spaced_text_array = np.zeros((text_array.shape[0], total_width), dtype=text_array.dtype)
420 |
421 | # Adjust spacing for '.' character
422 | char_positions = []
423 | current_position = 0
424 | for char in text:
425 | char_positions.append(current_position)
426 | if char == '.':
427 | current_position += char_width # No additional spacing for '.'
428 | else:
429 | current_position += char_width + int(spacing * char_width)
430 |
431 | char_positions = np.array(char_positions)
432 | spaced_text_array[:, char_positions[:, None] + np.arange(char_width)] = text_array.reshape(text_array.shape[0], -1, char_width)
433 | text_array = spaced_text_array
434 |
435 | # Calculate the region where the text will be placed
436 | y_start = max(0, y)
437 | x_start = max(0, x)
438 | y_end = min(img_array.shape[0], y + text_array.shape[0])
439 | x_end = min(img_array.shape[1], x + text_array.shape[1])
440 |
441 | # Check if the text is completely out of bounds
442 | if y_end <= y_start or x_end <= x_start:
443 | return img_array # Text is completely out of bounds, return original image
444 |
445 | # Calculate the visible portion of the text array
446 | text_y_start = max(0, -y)
447 | text_x_start = max(0, -x)
448 | text_y_end = text_y_start + (y_end - y_start)
449 | text_x_end = text_x_start + (x_end - x_start)
450 |
451 | # Crop text_array to the visible portion
452 | visible_text_array = text_array[text_y_start:text_y_end, text_x_start:text_x_end]
453 |
454 | # Get the section of the image we're working with
455 | img_section = img_array[y_start:y_end, x_start:x_end]
456 |
457 | # Create a mask for the text, matching the dimensions of img_section
458 | mask = np.repeat(visible_text_array[:, :, np.newaxis], img_section.shape[2], axis=2)
459 |
460 | # Prepare the color array
461 | color_array = np.array(color)
462 | if img_section.shape[2] == 4: # If the image has an alpha channel
463 | color_array = np.append(color_array, 255) # Add full opacity
464 |
465 | # Create a color overlay
466 | color_overlay = np.tile(color_array, (mask.shape[0], mask.shape[1], 1))
467 |
468 | # Blend the text with the image
469 | blended = img_section * (1 - mask) + color_overlay * mask
470 |
471 | # Assign the blended result back to the image array
472 | img_array[y_start:y_end, x_start:x_end] = blended.astype(img_array.dtype)
473 |
474 | return img_array
475 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ['hatchling']
3 | build-backend = 'hatchling.build'
4 |
5 | [project]
6 | name='justpyplot'
7 | version='0.2.4'
8 | authors=[{ name='bedbad',email='antonyuk@bu.edu'}]
9 | description='Get your plot in you array, plot fast'
10 | readme='README.md'
11 | classifiers=[
12 | 'Programming Language :: Python :: 3',
13 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
14 | 'Operating System :: OS Independent',
15 | ]
16 | Repository='https://github.com/bedbad/justpyplot'
17 | dependencies=[
18 | 'numpy',
19 | #'opencv-python',
20 | ]
21 |
22 | [project.optional-dependencies]
23 | demo=[
24 | 'mediapipe',
25 | 'scipy'
26 | ]
27 | cv2 = ["opencv-python"]
28 | perf_timer = ["perf_timer"]
29 | PIL = ["PIL"]
30 | dev = [
31 | 'ruff>=0.0.279', # Specify the Ruff version
32 | 'perf_timer',
33 | 'opencv-python',
34 | ]
35 |
36 | [tool.hatch.build]
37 | include = [
38 | "justpyplot/*.py",
39 | "examples/*",
40 | "docs/*",
41 | "LICENSE",
42 | ]
43 | exclude = [
44 | "README.md",
45 | "tests/*",
46 | "scripts/*",
47 | "*.pyc",
48 | "__pycache__",
49 | "*.so"
50 | ]
51 |
52 | [tool.ruff]
53 | line-length = 88
54 |
55 | lint.ignore = [
56 | "W191", # tab-indentation
57 | "E111", # indentation-with-invalid-multiple
58 | "E114", # indentation-with-invalid-multiple-comment
59 | "E117", # over-indented
60 | "D206", # indent-with-spaces
61 | "D300", # triple-single-quotes
62 | "Q000", # bad-quotes-inline-string
63 | "Q001", # bad-quotes-multiline-string
64 | "Q002", # bad-quotes-docstring
65 | "Q003", # avoidable-escaped-quote
66 | "COM812", # missing-trailing-comma
67 | "COM819", # prohibited-trailing-comma
68 | "ISC001", # single-line-implicit-string-concatenation
69 | "ISC002", # multi-line-implicit-string-concatenation
70 | ]
71 |
72 | [tool.ruff.format]
73 | quote-style='single'
74 |
--------------------------------------------------------------------------------
/resources/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bedbad/justpyplot/5d8b33fbc6ed697273fba113016ec7df72772e1b/resources/demo.gif
--------------------------------------------------------------------------------
/resources/jupyter_pil.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bedbad/justpyplot/5d8b33fbc6ed697273fba113016ec7df72772e1b/resources/jupyter_pil.png
--------------------------------------------------------------------------------
/resources/sinus.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bedbad/justpyplot/5d8b33fbc6ed697273fba113016ec7df72772e1b/resources/sinus.gif
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bedbad/justpyplot/5d8b33fbc6ed697273fba113016ec7df72772e1b/tests/__init__.py
--------------------------------------------------------------------------------
/tests/test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import time
3 |
4 | import cv2
5 |
6 | import perf_timer
7 | from justpyplot import justpyplot as jplt
8 |
9 | _plottimer = perf_timer.PerfTimer('full justpyplot + rendering')
10 |
11 | t0 = time.perf_counter()
12 |
13 | xs, ys = [], []
14 | c = 0
15 | while c != 27:
16 | xt = time.perf_counter() - t0
17 | yx = np.sin(xt)
18 | xs.append(xt)
19 | ys.append(yx)
20 | frame = np.full((400, 400, 3), (255, 255, 255), dtype=np.uint8)
21 | vals = np.array([xs, ys])
22 | with _plottimer:
23 | final = jplt.plot2_at(
24 | img_array=frame, values=vals, offset=(50, 50), title='sin() from Clock'
25 | )
26 | # final = jplt.blend_at(frame, plot, offset=(50,50))
27 | # drawn = jplt.plot1_at(frame, vals,title="sin() from Clock", offset=(50,50), scatter=True, max_len=100)
28 |
29 | cv2.imshow('frame', final)
30 | c = cv2.waitKey(1)
31 | if c == 32:
32 | c = cv2.waitKey(0)
33 |
--------------------------------------------------------------------------------
/tests/test_basic.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import time
4 | from justpyplot import justpyplot as jplt
5 |
6 | t0 = time.perf_counter()
7 | xs, ys = [], []
8 | while cv2.waitKey(1) != 27:
9 | xt = time.perf_counter() - t0
10 | yx = np.sin(xt)
11 | xs.append(xt)
12 | ys.append(yx)
13 |
14 | vals = np.array(ys)
15 | plotted_array = jplt.plot1(vals, title='sin() from Clock')
16 |
17 | cv2.imshow('np array plot', plotted_array)
18 |
--------------------------------------------------------------------------------
/tests/test_basic_plot.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import time
4 | from justpyplot import justpyplot as jplt
5 |
6 | t0 = time.perf_counter()
7 | xs, ys = [], []
8 |
9 | while(cv2.waitKey(1) != 27): # Press ESC to exit
10 | xt = time.perf_counter() - t0
11 | yx = np.sin(xt)
12 | xs.append(xt)
13 | ys.append(yx)
14 |
15 | # Create plot data array
16 | plot_data = np.array([xs, ys])
17 |
18 | # Generate plot arrays with customized options
19 | figure, grid, labels, title_img = jplt.plot(
20 | plot_data,
21 | grid={'nticks': 5, 'color': (128, 128, 128, 255)},
22 | figure={'scatter': True, 'line_color': (255, 0, 0, 255), 'line_width': 2},
23 | title="sin() from Clock",
24 | size=(600, 400)
25 | )
26 |
27 | # Blend all plot layers into single image
28 | plotted_array = jplt.blend(figure, grid, labels, title_img)
29 |
30 | cv2.imshow('np array plot', plotted_array)
--------------------------------------------------------------------------------
/tests/test_plot.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | from justpyplot import justpyplot as jplt
4 |
5 | print(jplt.__file__)
6 |
7 | def test_plot():
8 | # Create a window
9 | cv2.namedWindow("Plot Test", cv2.WINDOW_NORMAL)
10 | cv2.resizeWindow("Plot Test", 600, 400)
11 |
12 | # Define plot options outside the loop
13 | grid_options = {
14 | 'nticks': 10,
15 | 'color': (128, 128, 128, 255),
16 | 'label_color': (0, 0, 255, 255),
17 | 'label_font_size': 0.8,
18 | 'precision': 2,
19 | 'title_margin': 30,
20 | 'y_tick_offset': 5,
21 | 'x_tick_offset': 5
22 | }
23 |
24 | figure_options = {
25 | 'line_color': (255, 0, 0, 255),
26 | 'line_width': 2,
27 | 'scatter': False,
28 | 'point_color': (0, 255, 0, 255),
29 | 'point_radius': 3,
30 | 'marker_style': 'circle',
31 | 'line_thickness': 2,
32 | 'line_style': 'solid'
33 | }
34 |
35 | for i in range(10000):
36 | # Generate sample data
37 | x = np.linspace(i * 0.1, 4 * np.pi+ i * 0.1, 100)
38 | y = np.sin(x) * np.exp(-x / 10)
39 |
40 | # Define the title for each frame
41 | title = f"Dynamic Plot (Frame {i})"
42 |
43 | # Call the plot function
44 | figure, grid, labels, title_img = jplt.plot(
45 | np.vstack((x, y)),
46 | grid=grid_options,
47 | figure=figure_options,
48 | title=title,
49 | size=(600, 400),
50 | max_len=100
51 | )
52 |
53 | # Blend all components
54 | # blended = grid + figure + labels + title_img
55 | blended = jplt.blend(grid, figure, labels, title_img)
56 |
57 | # Convert to uint8 for display
58 | blended = blended.astype(np.uint8)
59 |
60 | # Display the result
61 | cv2.imshow("Plot Test", cv2.cvtColor(blended, cv2.COLOR_RGBA2BGR))
62 |
63 | # Check for key press
64 | key = cv2.waitKey(30)
65 | if key == 27: # ESC key
66 | break
67 | elif key == 32: # Spacebar key
68 | cv2.waitKey(0) # Wait indefinitely until another key is pressed
69 |
70 | cv2.destroyAllWindows()
71 |
72 | if __name__ == "__main__":
73 | test_plot()
--------------------------------------------------------------------------------
/tests/test_plot_components.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | from justpyplot.justpyplot import plot1_components, plot
4 |
5 | def dumped_sine_wave(N, t):
6 | x = np.linspace(0+0.05*t, 4*np.pi+0.05*t, N)
7 | y = np.sin(x) * np.exp(-x/10)
8 | return x, y
9 |
10 | def dumped_sine_wave_noise(N, t):
11 | x = np.linspace(0+0.05*t, 4*np.pi+0.05*t, N)
12 | y = np.sin(x) * np.exp(-x/10) + np.random.normal(0, 0.1, N)
13 | return x, y
14 |
15 |
16 | def simple_test_plot_components():
17 | # Generate sample data
18 | x = np.linspace(0, 4*np.pi, 100)
19 | y = np.sin(x) * np.exp(-x/10)
20 |
21 | # Call plot1_components
22 | figure, grid, labels, title_img = plot1_components(
23 | np.vstack((x, y)),
24 | title="Damped Sine Wave",
25 | size=(600, 400),
26 | scatter=False,
27 | )
28 |
29 | # Simple blending (just add all components)
30 | blended = figure + grid + labels + title_img
31 |
32 | # Convert to uint8 for display
33 | blended = blended.astype(np.uint8)
34 | # Display the result
35 | cv2.imshow("Simple Damped Sine Wave Plot", cv2.cvtColor(blended, cv2.COLOR_RGBA2BGR))
36 | cv2.waitKey(0)
37 | cv2.destroyAllWindows()
38 |
39 | def test_in_loop(plotfunc, N, *args, **kwargs):
40 | cv2.namedWindow("Dynamic Plot", cv2.WINDOW_NORMAL)
41 | cv2.resizeWindow("Dynamic Plot", 600, 400)
42 |
43 | for i in range(N):
44 |
45 | figure, grid, labels, title_img = plotfunc(i, *args, **kwargs)
46 | blended = grid + labels + title_img + figure
47 | overlay = np.where((grid>0) & (figure>0))
48 | blended[overlay] = figure[overlay]
49 | cv2.imshow("Dynamic Plot", cv2.cvtColor(blended, cv2.COLOR_RGBA2BGR))
50 |
51 | # Toggle pause state if space is pressed, quiton ESC
52 | key = cv2.waitKey(30)
53 | if key == 32: #Space key
54 | key = cv2.waitKey(0)
55 | if key == 32:
56 | continue
57 | if key == 27:
58 | break
59 | elif key == 27: # ESC key
60 | break
61 |
62 |
63 | cv2.destroyAllWindows()
64 |
65 | def test_plot_components_loop():
66 | # Create a window
67 | cv2.namedWindow("Dynamic Plot", cv2.WINDOW_NORMAL)
68 | cv2.resizeWindow("Dynamic Plot", 600, 400)
69 |
70 | for i in range(500):
71 | # Generate sample data
72 | x = np.linspace(0, 4*np.pi, 100)
73 | y = np.sin(x + i*0.1) * np.exp(-x/10)
74 |
75 | # Call plot1_components
76 | figure, grid, labels, title_img = plot1_components(
77 | np.vstack((x, y)),
78 | title=f"Dynamic Wave (Frame {i})",
79 | size=(600, 400),
80 | scatter=False,
81 | )
82 |
83 | # Simple blending (just add all components)
84 | blended = figure + grid + labels + title_img
85 |
86 | # Convert to uint8 for display
87 | blended = blended.astype(np.uint8)
88 | # Display the result
89 | cv2.imshow("Dynamic Plot", cv2.cvtColor(blended, cv2.COLOR_RGBA2BGR))
90 | # Break the loop if 'ESC' is pressed
91 | if cv2.waitKey(30) == 27:
92 | break
93 |
94 | cv2.destroyAllWindows()
95 |
96 | def scatter_plot(i, *args, **kwargs):
97 | x = np.linspace(i*0.1, i*0.1+4*np.pi, 50)
98 | y = np.sin(x) * np.exp(-x/10) + np.random.normal(0, 0.1, 50)
99 |
100 | figure, grid, labels, title_img = plot1_components(
101 | np.vstack((x, y)),
102 | title=f"Scatter Plot (Frame {i})",
103 | size=(600, 400),
104 | scatter=True,
105 | point_color=(255, 0, 0, 255), # Red points
106 | r=3, # Larger points
107 | *args,
108 | **kwargs
109 | )
110 |
111 | return figure, grid, labels, title_img
112 |
113 | def line_plot_with_custom_colors(i, *args, **kwargs):
114 | x = np.linspace(i*0.1, i*0.1+4*np.pi, 100)
115 | y = np.sin(x) * np.exp(-x/10)
116 |
117 | figure, grid, labels, title_img = plot1_components(
118 | np.vstack((x, y)),
119 | title=f"Custom Color Line Plot (Frame {i})",
120 | size=(600, 400),
121 | scatter=False,
122 | line_color=(0, 255, 0, 255), # Green line
123 | grid_color=(200, 200, 200, 128), # Light gray, semi-transparent grid
124 | label_color=(255, 0, 0, 255), # Red labels
125 | *args,
126 | **kwargs
127 | )
128 |
129 | return figure,grid,labels,title_img
130 |
131 | def high_precision_plot(i):
132 | x = np.linspace(i*0.1, i*0.1+1, 100)
133 | y = x*np.sin(x) * 0.1
134 |
135 | figure, grid, labels, title_img = plot1_components(
136 | np.vstack((x, y)),
137 | title=f"High Precision Plot (Frame {i})",
138 | size=(800, 600),
139 | scatter=False,
140 | precision=6, # Higher precision for y-axis labels
141 | nticks=20, # More ticks
142 | )
143 |
144 | return figure,grid,labels,title_img
145 |
146 | def thick_line_plot(i, *args, **kwargs):
147 | x = np.linspace(0, 4*np.pi, 100)
148 | y = np.sin(x + i*0.1)
149 |
150 | figure, grid, labels, title_img = plot1_components(
151 | np.vstack((x, y)),
152 | title=f"Thick Line Plot (Frame {i})",
153 | size=(600, 400),
154 | scatter=False,
155 | thickness=5, # Thicker line
156 | line_color=(255, 165, 0, 255), # Orange line
157 | *args,
158 | **kwargs
159 | )
160 |
161 | return figure ,grid ,labels , title_img
162 |
163 | def plot_dict_line(i, *args, **kwargs):
164 | x = np.linspace(i*0.1, i*0.1+4*np.pi, 100)
165 | y = np.sin(x) * np.exp(-x/10)
166 |
167 | grid_style = {
168 | 'color': (200, 200, 200, 128), # Light gray, semi-transparent grid
169 | 'label_color': (255, 0, 0, 255), # Red labels
170 | }
171 |
172 | figure_style = {
173 | 'scatter': False,
174 | 'line_color': (0, 255, 0, 255), # Green line
175 | }
176 |
177 | figure, grid, labels, title_img = plot(
178 | np.vstack((x, y)),
179 | grid=grid_style,
180 | figure=figure_style,
181 | title=f"Dict Line Plot (Frame {i})",
182 | size=(600, 400),
183 | *args,
184 | **kwargs
185 | )
186 |
187 | return figure, grid, labels, title_img
188 |
189 | def plot_dict_scatter(i, *args, **kwargs):
190 | x = np.linspace(i*0.1, i*0.1+4*np.pi, 50)
191 | y = np.sin(x) * np.exp(-x/10) + np.random.normal(0, 0.1, 50)
192 |
193 | grid_style = {
194 | 'color': (128, 128, 128, 200), # Gray, semi-transparent grid
195 | 'label_color': (0, 0, 255, 255), # Blue labels
196 | 'nticks': 8,
197 | }
198 |
199 | figure_style = {
200 | 'scatter': True,
201 | 'point_color': (255, 0, 0, 255), # Red points
202 | 'point_radius': 3, # Larger points
203 | }
204 |
205 | figure, grid, labels, title_img = plot(
206 | np.vstack((x, y)),
207 | grid=grid_style,
208 | figure=figure_style,
209 | title=f"Dict Scatter Plot (Frame {i})",
210 | size=(600, 400),
211 | *args,
212 | **kwargs
213 | )
214 |
215 | return figure, grid, labels, title_img
216 |
217 | def plot_dict_thick_line(i, *args, **kwargs):
218 | x = np.linspace(0, 4*np.pi, 100)
219 | y = np.sin(x + i*0.1)
220 |
221 | grid_style = {
222 | 'color': (200, 200, 200, 128), # Light gray, semi-transparent grid
223 | 'label_color': (0, 0, 0, 255), # Black labels
224 | 'precision': 3,
225 | }
226 |
227 | figure_style = {
228 | 'scatter': False,
229 | 'line_color': (255, 165, 0, 255), # Orange line
230 | 'thickness': 5, # Thicker line
231 | }
232 |
233 | figure, grid, labels, title_img = plot(
234 | np.vstack((x, y)),
235 | grid=grid_style,
236 | figure=figure_style,
237 | title=f"Dict Thick Line Plot (Frame {i})",
238 | size=(600, 400),
239 | *args,
240 | **kwargs
241 | )
242 |
243 | return figure, grid, labels, title_img
244 |
245 | def plot_dict_dashed_line(i, *args, **kwargs):
246 | x = np.linspace(i*0.1, i*0.1+4*np.pi, 100)
247 | y = np.sin(x) * np.exp(-x/10)
248 |
249 | grid_style = {
250 | 'color': (200, 200, 200, 128), # Light gray, semi-transparent grid
251 | 'label_color': (0, 0, 255, 255), # Blue labels
252 | }
253 |
254 | figure_style = {
255 | 'scatter': False,
256 | 'line_color': (255, 0, 0, 255), # Red line
257 | 'line_style': 'dashed',
258 | 'line_thickness': 2,
259 | 'marker_style': 'o_filled',
260 | }
261 |
262 | figure, grid, labels, title_img = plot(
263 | np.vstack((x, y)),
264 | grid=grid_style,
265 | figure=figure_style,
266 | title=f"Dict Dashed Line Plot (Frame {i})",
267 | size=(600, 400),
268 | *args,
269 | **kwargs
270 | )
271 |
272 | return figure, grid, labels, title_img
273 |
274 | def plot_dict_dotted_line(i, *args, **kwargs):
275 | x = np.linspace(i*0.1, i*0.1+4*np.pi, 100)
276 | y = np.sin(x) * np.exp(-x/10)
277 |
278 | grid_style = {
279 | 'color': (200, 200, 200, 128), # Light gray, semi-transparent grid
280 | 'label_color': (0, 255, 0, 255), # Green labels
281 | }
282 |
283 | figure_style = {
284 | 'scatter': False,
285 | 'line_color': (255, 0, 0, 255), # Blue line
286 | 'line_style': 'dashed',
287 | 'dash_length': 10,
288 | 'line_thickness': 1,
289 | 'marker_style': 'triangle',
290 | 'point_color': (0, 255, 0, 255),
291 | 'point_radius': 4,
292 | }
293 |
294 | figure, grid, labels, title_img = plot(
295 | np.vstack((x, y)),
296 | grid=grid_style,
297 | figure=figure_style,
298 | title=f"Dict Dotted Line Plot (Frame {i})",
299 | size=(600, 400),
300 | *args,
301 | **kwargs
302 | )
303 |
304 | return figure, grid, labels, title_img
305 |
306 | def plot_dict_cross_markers(i, *args, **kwargs):
307 | x = np.linspace(i*0.1, i*0.1+4*np.pi, 50)
308 | y = np.sin(x) * np.exp(-x/10) + np.random.normal(0, 0.1, 50)
309 |
310 | grid_style = {
311 | 'color': (128, 128, 128, 200), # Gray, semi-transparent grid
312 | 'label_color': (255, 0, 255, 255), # Magenta labels
313 | 'nticks': 8,
314 | }
315 |
316 | figure_style = {
317 | 'scatter': True,
318 | 'point_color': (0, 255, 255, 255), # Cyan points
319 | 'point_radius': 4, # Larger points
320 | 'marker_style': 'cross',
321 | }
322 |
323 | figure, grid, labels, title_img = plot(
324 | np.vstack((x, y)),
325 | grid=grid_style,
326 | figure=figure_style,
327 | title=f"Dict Cross Markers Plot (Frame {i})",
328 | size=(600, 400),
329 | *args,
330 | **kwargs
331 | )
332 |
333 | return figure, grid, labels, title_img
334 |
335 | def plot_dict_square_markers(i, *args, **kwargs):
336 | x = np.linspace(i*0.1, i*0.1+4*np.pi, 50)
337 | y = np.sin(x) * np.exp(-x/10) + np.random.normal(0, 0.1, 50)
338 |
339 | grid_style = {
340 | 'color': (128, 128, 128, 200), # Gray, semi-transparent grid
341 | 'label_color': (255, 165, 0, 255), # Orange labels
342 | 'nticks': 10,
343 | }
344 |
345 | figure_style = {
346 | 'scatter': True,
347 | 'point_color': (128, 0, 128, 255), # Purple points
348 | 'point_radius': 3, # Larger points
349 | 'marker_style': 'square',
350 | }
351 |
352 | figure, grid, labels, title_img = plot(
353 | np.vstack((x, y)),
354 | grid=grid_style,
355 | figure=figure_style,
356 | title=f"Dict Square Markers Plot (Frame {i})",
357 | size=(600, 400),
358 | *args,
359 | **kwargs
360 | )
361 |
362 | return figure, grid, labels, title_img
363 |
364 |
365 | if __name__ == "__main__":
366 | # print("Running simple test...")
367 | # simple_test_plot_components()
368 |
369 | print("Running loop test...")
370 | test_in_loop(plot_dict_dotted_line, N=500, bounds=np.array([[None, None],[-1, 1]]))
371 |
--------------------------------------------------------------------------------
/tests/test_standalone.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "data": {
10 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZAAAAEsCAYAAADtt+XCAAAQ1ElEQVR4nO3dC3KjOhYA0GRqFuadNb0z7yxTuKMMIUDA1l/nVLle2o9EBgNXP3Tf3gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGM7H29tHeB29F97f+vdy+/U2e2Xu/TtluVc/17qMV7cH6MbRTe7o5rje5urN8iggbG1zZvtXXQlcZz5z7M8Hs/84DNTi/e3t/WptOfzO+v1nat0x/k6s2v68X1fKDNvvHQ9IQQChKvMN8OpNcGv78HdifZ698pY379/KTXVj3/p8KcqBNSca1VjeYJc3waP3t/69FUzOlB1+N2y/9/NR+Xvlbv3+b59n7+9c3a8r20MTztbGDAIC8CXG7JiRjLrfQN2KjIFoSu/bm0KqFUYrzk4xpn3/Lf0B+Odo+iX0cA6H91Qg+1EkgOzNYFm/tx4QjXHiTdNU5815mk5tNh+Pvye3haxOnpfVXoORTNMkQPYq98l7prz1E8xHr7fb7bCWV+P+Ka//8q6cwzlb2K0ez1Z4DqQCV1tWxkaowdd5d7t9vN9up39vua1zt20CSA0WrYql5UNpXz/f75rHFPWjAnO/v/43BJImCSAV1N52g8Wno4fVfnsfsrjf37fO4fV7j/E7laBumIVVeqbK/f4tQPzWnbW3zIeZLeRwVIFZBor1/z9bCXIet0ULBDht7wZ/5cYf429QBwEko1jdTz+6CuZByZ1xFIjKecaCAJJR7JqXGhslzBWWvfG6pypB89+7MIuLegggPdTewqDk5983oE7S8/fzfIu1XP76/KUdAkiDtbdN9/v7PJ3yz+fTwKZGkjJ4RCeINEkAabj2BjmUqIio/LTBNN4eam+mRpJ6yvnnw4JJT6S5Ff2vzG9dsc7femmBJBa6lFIzNRLITQukl9obNN6i1YpujxZIR76WijA1kgZbtFrR7RFAEii9XpULDug+odSZm13shFI5VLVe1TyAb/E6rpqXaF90weY6d0M5j26r0IJ+YrVfOm2BhP7M8DqqlV/ZlhVBg4ir7OYutpXK4uiq7sIKQSO8mjmpPhPshBpU1AcGIbVaHubzcGH1qg4gzbdA7vevQe1S5T/+e7t5Kh2Irkit+GhcY93SiD0Gkjpn8bTor50qmAW19RxK0aBG9cI5XMP5W+Pn+c00TXobepU86f3c3F40uZOXt7Iub9kFmGJ9rNL7p7zI5YXzN1G30VP798LnKX48O1d1FxZtTyemPV/5zWuahKErtlqeRG9szatmpxPT2soJVU1a+Qps1siqihZILGaKAIMRQHpu+sv8RgddnbV/vpEJIC9aDkzPQaTWk1pzn1bXoKr9841MAAHgKQLISE1rT/bS6MoJP7piK/t8oxJAXqBpTVcKrXt1xdcYnkpQFQSQUbkAaVFlk1RGJ4C8qPam/w8uQHaW3oGrBJBItfjqAwf0VrHQii5OABn14pu5AIEXCCCDB7xqZ4wx3NI7p7X2eTtmLaxBa+/WFgJepQUCI2q8AtTdfjSqSAC5kpsiVR6LkZvTzT0ASbIWaCtJmnq47nqUvQtrK+PgXtdJU3nQG2KZ93H9WLb9fn9rPT+l+0Q51Y+BxE5pG4VmMxRnHK+86sdAwsN5e7XmohpuRltbaDy6LontvbZWxVYX1962pXIW/5mmx0232f7jnSeRe9gffj931/5uvNfiftSyP9M0qYz16pUAshzQPzuwHyNgXfFUeXOX3JPdck3sn/JeOodr3b8z+9Lb91eb6ruwgHRrt5WurdM2AeSkIfqPTQ7oW2drtxnHK08AOanr3B8NTwYAOULKEUBgNL1VGHrbn4YIID3n/niGWlyffK8kIIAM2n/8g1ocPRAosxJAYCS9VhR63a/KCSBXjXCiqsUBJwggZ4xyQx0hOI5olPN31P0tSADhrfvnWxijgtD7/lWo+tV4qzLACWqF086ojZOQFgjQH4EzCwHkNwOdiEMs1zKqAVrPQ+1nJQSQswY4MbtermVQyy7J0aj4pGcM5MhArQ86T107UNrXZdCc84b8GWjfh2iBXM1FUCJvwaiscApUG0BCxsGzaWrXGQqLGKD7as0Kp+0aeSxr5H0voeoxkNLBY+T+Y9o18ljWyPteQtUBZN19lasW8a3M+33M2suArS7gmiI3ieUNeV0z2Gt1xGqNnMlZPA+8rY2Y+nP6bIFNoTuL5r67uQI04rkbruO5Kzb3+TtNk8pXr34LIMsB/vUrRXmxRS1vHv8IrxzlnaC8ON9b1PIuyFreheMQy5T5eJZWfRdWbvpQF3RjAQcEEOjV6BWA0fc/AwFk1NS1V5nOC6wIIKOlrr1KLa4tAv02xyUJAQTolhmEaQkge9S8f1KLa4fzlwwEEOiFAH/M8YlOAFlygm1Tm22L7+s7xyMZAYRLhlzWBdgkgGxRY/nhsSZYWB7D8vr10Xo+x3GKSgAJnFgAlwgga1ofP8ix0BDn7zbHJQkBhF9ZH6x+ctecp+ITj5zoM91XNGrk3OevBNlw3Byr12iBcIpc6UAVAeRKjo1X83Fcop/0fK50ijM2dZ5j1UkACZkFw+voC7yy7UufSf/xdbr9ijM2dZ5jNeAYSPhyU/VX6j8G6HgMZNkKKf1Z0M1XG7lrznscn3CsjONFUeQALmv+6y8xBIz1dnvbP5uz+M807W7z9+D/8fY2fXb5WSq7PN9FfcdsmiaBqVfLpPfLAfpUA/XL8nLIUt48/vH56nL/Wilv8T1kKS+CKsp78ZhdLq9j1XdhASRhIkjfg+iphf7QeQBdfyjNMi5FIVogpvJd54ZFy5y/0YwbQDRfaZnzlwqMG0CIOqMFmiQQv0QA0ZwFeIoAwnME3jr4HihozACi2QpjE3ijGDOAEJUVTTNTAaISAghPm5+fCSsZZ1tyH2ITkJ82dgDRjKVlzl8KGzuA8DQJemieAPyy4QKI5xbikKCnkNvtQwI0ajFcAIFWPcaZwpjTPP6ktReNY/mccQOI5uvL5tZGyJ0iQQ8tMhGkkQByJedGyvwcpFvV2GyWdIw5MWwAWaalnV9HJ/2VbS/Tf0yjjDnFJyhXFkBSZ/d7hf7jhHQH0iBBubKEUg48pCMBGrXJdhIua/zrkz90W53Z9tWy18IgMPGmR09hTIRoHN+0/kzTI0DHOLbTNAnuvVl3r+XqwpqmKWtXWdHy5kH0xAPpQx3PpXBsIx/favavdHmRju+Uef9KG3caL7TIeBMVGSaAhFld659JwHReWiMwP2WYABIY86A5AjKVGi6AkJBaHD0QsE8TQKAVAnRaju9lAghpqMVB9wQQqJlAXIbjfooAQly6AdJwXKmQAEIy1hujOQJ1ubWwYLZc8TgEEc/dPEE3CpXTAgHYIoD/SgAhKjkWEtCtQqUEEKKSYyEeyc8KEbBPE0CgMsvVoiU/K8uxP2YQnehCa+OR52XOr6BGR2NMBKmsBXIlD0etaXF5LoOewchriY22uBYYNoCEjIPhdXQxXNkWRlktWu03HxNBCgWQGBn/lt0f61S3NEbXFQ0yEaTQGEisAyxwMLLQ9Tf3w7sOqFm2k3PZIllfFMuAsdVyiXkRjZazuLRpMRg5hTERdjle9Y1JzQH9yncxTZOg36vcAUR5n4PokQbSq9y/mMKxyjTxoPfj+XJ5F7+PabAKqudAoEbGj2iAAEI+pvPSGoH8kAACtRBgaYwAQnpqcfRAgP9BAIHaCLh18X3sEkDISy0OuiGAQA0E1jb4nr4RQMhDN8ApHh6kJQII2VmUjuaoAG2SD4Rs5FjYoVuERmmBAFwh4H8RQMhCjoUTdJPQGAGELORYONe1R8UE+B8EEChkmXhtDiJaY+3wXf1jEJ1sljlfHkmT1OhojIkghVogV1PdvpIWlzYy7o1s79yeExhBK7IEkJBxMLx+CwxS2g5i4Nkse+f3XwGkWiaCJO7CWh/IZ4KA4AHUaK/y+z7w/SpqC2TZynjloH4bXBw4unfL2MdXV17oznv1moESsp2wy0CwvlD2Wh0pWiOj5Syu1fQ5GDny2k+OQbvmsar3223zu5umSUWgV7kDiPIOxj/CK0d5T0pW3s7+d7N/vZdXyfdXmudAoCTdeTRMAKEMN05a5vx9EEAgt4GnL3fpNu73KYBQ3sAXIA27a4UIIAA8RQChnNFrcKPvf09uY7aiBRAAniKAUIVhVhwYtKbarfvYrUjLuVPUsMtjD37j6dJtvMqBFggATxFAKGa45bFvtw/pazt0H7c1KYBQzEh50h8rTH8GD+lr6YUAAhB5heVRCCAUtcyD8ciP0Wnr48r7NOg+ZjeWAEJdedI7nMkyUlfd0O7jBZFsASRkGTxT67qyLZ0Y8OKjP9NcEeqwElT0OZB1ZsGjTINyotNzC2seQHd+d+4+TmUoagtk2XJ4pfWgBTL4hddbDW6xP4IHPYnaAol1cZxtrUBTBqqZMoZsJ/SyNbEOCFtdXHvbvmq0nMUtT4N89Cd3tE+97A/HpmlSUehV7gCivCe7fHa6sZrbv7AvJ7vlmts/5Q1dQTWNF3LQfUWHBBDq1dtgOnRGAKE+vdTWQwDsZX9gRQChaqZyQ70klKJKzSea0v3GALRAICXdV3RMAKE6PaxeK3EUIxBAqE7Lq9cul+CROIreCSAAPEUAoY1EUw0sA9JD1xtcIYBQtRa6rXroeoNnCCAAPMVzILQxFfbCgoTF3G7zqtIPEkcxAgEEYrvf5/Eb6J4uLJp6IG+ZLwQYpAVyJUlUyoRSkETt3WvQagskZBwMr6NpjVe2ZVA136wtXcJA/pPiKdzl07hXhaARXlogbN2cq6pYWLadQUXtwopxs9/Kjy6I0M0qvdCRbBfe0bjGVtDY2/ZVo+Us7s2fafrx3t+N93JZDupPDTwtT3rTNKnQ9Cp3AFFeui7SV7tLY+zfo+yIYzI9f3/K649pvDSjpqVCrLoLAggATxJAaMo83lF6lV6r7sI/AghNKjnzqqauNChJAKFd4bmL3A8W1vwgI2QkgNCHzDf1R/dZ6EpbJL+CkQggtC3309+rQCVwMDIBhH7cbkmfCfnGsiUgHwgduN+/LbqZbIkTa17BN1ogcIIpuvCTAELzUj6XMa+99e2Jcwmt4IsAQvM8lwFlCCCwwxPncMwgOl0IrY1HaoD/L2/y8cqA915GTC0b+EcLhK48bu6roPF0i8ET51BPADl7IefI88AAU3sXy6xfOZ+WOT6+Ld7oiXMoE0CuXLzhQt3rQoAsOT4WM64EDkg8BrK+2S8vOsGAWgbAH2MkWwsxmqIL5QKIWhq12B0ADwPsq/GNvec75udAymVch7plu+EvL+Z1oAndVme2fdVoOYtHNt/81+YxjWknWGwFkXl7uGKaJhVp6Lo7K7RAFi2R5eQNkzgAuEzgAAAAAAAAAAAAAAAAAAAAAACAbLpeNfLsqr4xV/9dryycsryc+3eU6yVFeeu/lXv/jv5WD99fb/uXs7xnVw//SLjKOJGtL5ijBENntjtb5m+/H6u8EvuXu7xlOtrcx7O38s6W3+r5mau89TX+Ufg67Cqh1OhyZl0MNZgzNfRYctagzrTkUpQZfu7teK7LTV3e+lrIXV7r5bRCAGlY7pvsutmeI4jkKu+3JGepy8z1XZYqp8TxTFkW/3TdD7dXA0qVATFneUd92jnKy3E8j/5+D/tX4vzc+v2e9i91eVu/+5F5PwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4G8D/ALSyfrzW4eYQAAAAAElFTkSuQmCC",
11 | "text/plain": [
12 | ""
13 | ]
14 | },
15 | "metadata": {},
16 | "output_type": "display_data"
17 | }
18 | ],
19 | "source": [
20 | "import numpy as np\n",
21 | "from IPython.display import Image as ipyImage, display\n",
22 | "from justpyplot import justpyplot as jplt\n",
23 | "# Make some dependency data\n",
24 | "x = np.linspace(0, 10, 50)\n",
25 | "y = np.sin(x)\n",
26 | "# Just plot in numpy using the plot() function\n",
27 | "figure_img, grid_img, labels_img, title_img = jplt.plot(\n",
28 | " np.array([x, y]),\n",
29 | " grid={'nticks': 10, 'color': (128, 128, 128, 255), 'label_color': (255, 0, 0, 255),'precision': 1, 'label_font_size': 0.9},\n",
30 | " figure={'scatter':False,'point_color': (255, 0, 0, 255), 'point_radius':3, 'line_color':(0,64,64, 255), 'line_width': 2, 'marker_style':'circle'},\n",
31 | " title='Sine Wave',\n",
32 | " size=(300, 400),\n",
33 | " max_len=100\n",
34 | ")\n",
35 | "#blend arrays into PIL picture buffer\n",
36 | "buffer = jplt.blend2PIL(grid_img, figure_img, labels_img, title_img, format='PNG')\n",
37 | "# Display the image\n",
38 | "display(ipyImage(buffer.getvalue()))"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": null,
44 | "metadata": {},
45 | "outputs": [],
46 | "source": []
47 | }
48 | ],
49 | "metadata": {
50 | "kernelspec": {
51 | "display_name": "Python 3 (ipykernel)",
52 | "language": "python",
53 | "name": "python3"
54 | },
55 | "language_info": {
56 | "codemirror_mode": {
57 | "name": "ipython",
58 | "version": 3
59 | },
60 | "file_extension": ".py",
61 | "mimetype": "text/x-python",
62 | "name": "python",
63 | "nbconvert_exporter": "python",
64 | "pygments_lexer": "ipython3",
65 | "version": "3.12.5"
66 | }
67 | },
68 | "nbformat": 4,
69 | "nbformat_minor": 4
70 | }
71 |
--------------------------------------------------------------------------------
/tests/test_standalone.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pyvista as pv
3 | from justpyplot import justpyplot as jplt
4 | import time
5 |
6 | # Create plotter
7 | plotter = pv.Plotter()
8 | plotter.add_axes()
9 |
10 | def update_plot(phase=0):
11 | # Create data
12 | x = np.linspace(-10, 10, 100)
13 | z = np.sin(x + phase)
14 | plot_data = np.array([x, z])
15 |
16 | # Create 2D plot using jplt
17 | plot_array = jplt.plot(
18 | plot_data,
19 | grid={'nticks': 5, 'color': (128, 128, 128, 255)},
20 | figure={'scatter': False, 'line_color': (255, 0, 0, 255), 'line_width': 2},
21 | title='Sine Wave',
22 | size=(400, 300)
23 | )
24 | blended = jplt.blend(*plot_array)
25 |
26 | # Create a surface by rotating the sine wave
27 | theta = np.linspace(0, 2*np.pi, 100)
28 | x_grid, theta_grid = np.meshgrid(x, theta)
29 | r = z # use sine wave values as radius
30 | y_grid = r * np.cos(theta_grid)
31 | z_grid = r * np.sin(theta_grid)
32 |
33 | # Create PyVista structured grid
34 | grid = pv.StructuredGrid(x_grid, y_grid, z_grid)
35 |
36 | # Map texture to plane
37 | grid.texture_map_to_plane()
38 | grid.active_texture = pv.numpy_to_texture(blended)
39 |
40 | return grid
41 |
42 | # Set up the plotter for animation
43 | plotter.open_movie('animation.mp4', framerate=24)
44 | mesh = None
45 | phase = 0
46 |
47 | # Update every frame
48 | for i in range(120): # More frames for smoother animation
49 | if mesh:
50 | plotter.remove_actor(mesh)
51 |
52 | grid = update_plot(phase)
53 | mesh = plotter.add_mesh(grid)
54 |
55 | # Rotate camera
56 | plotter.camera.azimuth = i * 3 # Rotate 3 degrees per frame
57 | plotter.camera.elevation = 20 # Fixed elevation angle
58 |
59 | plotter.write_frame()
60 | phase += 0.1 # Smaller phase increment for smoother wave motion
61 |
62 | # No sleep for smoother animation
63 |
64 | # Close the plotter
65 | plotter.close()
--------------------------------------------------------------------------------
/tests/test_textrender.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | from justpyplot.textrender import vectorized_text
4 |
5 | def test_text_rendering():
6 | # Create a blank image
7 | img_array = np.zeros((300, 600, 3), dtype=np.uint8)
8 |
9 | # Test 1: Basic text rendering
10 | img_array = vectorized_text(img_array, "JUST TEXT", (50, 50), color=(255, 255, 255), spacing=2)
11 |
12 | # Test 2: Colored text
13 | img_array = vectorized_text(img_array, "RED TEXT", (50, 100), color=(0, 0, 255), spacing=2)
14 |
15 | # Test 3: Smaller scale
16 | img_array = vectorized_text(img_array, "Large Text", (50, 150), color=(0, 255, 0), font_size=1.25, spacing=1)
17 |
18 | # Test 4: Numbers and punctuation
19 | img_array = vectorized_text(img_array, "123!@#$%", (50, 200), color=(255, 255, 0), spacing=2)
20 |
21 | # Test 5: Long text with wrapping
22 | long_text = "This is a long text that will wrap to the next line"
23 | words = long_text.split()
24 | y_position = 250
25 | for i in range(0, len(words), 3):
26 | line = " ".join(words[i:i+3])
27 | img_array = vectorized_text(img_array, line, (50, y_position), color=(255, 0, 255), font_size=0.5, spacing=1)
28 | y_position += 15
29 |
30 | # Display the result using OpenCV
31 | cv2.imshow('Text Rendering Test', img_array)
32 | cv2.waitKey(0)
33 | cv2.destroyAllWindows()
34 |
35 | if __name__ == "__main__":
36 | test_text_rendering()
--------------------------------------------------------------------------------