├── .github ├── ISSUE_TEMPLATE │ ├── bug-report.yml │ └── feature-request.yml └── workflows │ └── welcome.yml ├── .gitignore ├── CITATION.cff ├── CONTRIBUTING.md ├── CONTRIBUTORS.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── TEST_COVERAGE.md ├── requirements.txt ├── setup.py ├── tests ├── .DS_Store ├── __pycache__ │ ├── test_visionscript.cpython-39-pytest-7.1.2.pyc │ └── test_visionscript.cpython-39-pytest-7.4.0.pyc ├── images │ ├── .DS_Store │ ├── bus.jpg │ ├── bus.svg │ ├── cans.png │ ├── cat.jpeg │ ├── corrupted_image.jpg │ ├── gettext.png │ ├── ocr.png │ ├── pose.jpg │ ├── qr.png │ ├── scissors.png │ └── webmention_button.webp ├── manual_vics │ ├── breakpoint.vic │ ├── compare_raw.vic │ ├── compare_with_detections.vic │ ├── compare_with_segments.vic │ ├── getfps.vic │ ├── roboflow.vic │ ├── show.vic │ ├── show_with_detections.vic │ ├── show_with_segments.vic │ ├── showtext.vic │ └── usecamera.vic ├── output │ ├── blur.jpg │ ├── bus.jpg │ ├── bus_brightness.jpg │ ├── bus_cutout.jpg │ ├── bus_cutout_saved.jpg │ ├── bus_edges.jpg │ ├── bus_resized.jpg │ ├── greyscale.jpg │ ├── predictions.csv │ ├── replace_in_images.jpg │ ├── replace_with_color.jpg │ ├── rotate.jpg │ └── web.html ├── test_visionscript.py ├── valid_output │ ├── blur.jpg │ ├── bus.jpg │ ├── bus_brightness.jpg │ ├── bus_brightness.png │ ├── bus_cutout.png │ ├── bus_cutout_saved.png │ ├── bus_edges.jpg │ ├── bus_edges.png │ ├── bus_resized.jpg │ ├── classify_image.vic.txt │ ├── find_in_images.vic.txt │ ├── greyscale.jpg │ ├── load_detect_save.vic.txt │ ├── replace_in_images.png │ ├── replace_with_color.jpg │ └── rotate.jpg ├── vics │ ├── associative_array.vic │ ├── blur.vic │ ├── break.vic │ ├── buffer_overload_prevention.vic │ ├── caption.vic │ ├── classify_image.vic │ ├── comment.vic │ ├── compare_pose.vic │ ├── count.vic │ ├── count_in_region.vic │ ├── cutout.vic │ ├── decrement.vic │ ├── describe.vic │ ├── detect_pose.vic │ ├── equal_to.vic │ ├── exit.vic │ ├── filter_by_class.vic │ ├── find.vic │ ├── find_in_images.vic │ ├── first.vic │ ├── get.vic │ ├── get_distinct_scenes.vic │ ├── get_text.vic │ ├── getcolors.vic │ ├── getcolours.vic │ ├── getedges.vic │ ├── greater_than.vic │ ├── greater_than_or_equal_to.vic │ ├── greyscale.vic │ ├── grid.vic │ ├── if.vic │ ├── import.vic │ ├── in_video.vic │ ├── increment.vic │ ├── input.vic │ ├── is.vic │ ├── last.vic │ ├── less_than.vic │ ├── less_than_or_equal_to.vic │ ├── list.vic │ ├── load_detect_save.vic │ ├── load_image.vic │ ├── load_video.vic │ ├── make.vic │ ├── merge.vic │ ├── models │ │ ├── fastsam.vic │ │ ├── groundingdino.vic │ │ ├── roboflow.vic │ │ ├── yolov8.vic │ │ └── yolov8s-pose.vic │ ├── not.vic │ ├── not_equal_to.vic │ ├── profile.vic │ ├── raises_exceptions │ │ ├── image_corrupted.vic │ │ ├── image_not_supported.vic │ │ ├── image_out_of_bounds.vic │ │ ├── path_not_exists.vic │ │ ├── set_function_error.vic │ │ └── stack_empty.vic │ ├── random.vic │ ├── read.vic │ ├── readqr.vic │ ├── remove.vic │ ├── replace_in_images.vic │ ├── replace_with_color.vic │ ├── reset.vic │ ├── resize.vic │ ├── rotate.vic │ ├── save.vic │ ├── save_video.vic │ ├── say.vic │ ├── search.vic │ ├── segment_image.vic │ ├── set.vic │ ├── setbrightness.vic │ ├── setconfidence.vic │ ├── similarity.vic │ ├── size.vic │ ├── use.vic │ ├── use_background.vic │ ├── use_roboflow.vic │ ├── variable_assignment.vic │ ├── wait.vic │ └── web.vic └── videos │ └── cars.mp4 └── visionscript ├── .DS_Store ├── __init__.py ├── cloud.py ├── config.py ├── constants.py ├── error_handling.py ├── grammar.py ├── lang.py ├── notebook.py ├── paper_ocr_correction.py ├── pose.py ├── reference.json ├── registry.py ├── rf_models.py ├── state.py ├── static ├── .DS_Store ├── deploy_intro_styles.css ├── deploy_styles.css ├── drag_and_drop.png ├── examples.js ├── functions.js ├── main.js ├── manifest.json ├── purify.min.js ├── renderCells.js └── styles.css ├── templates ├── deployintro.html ├── error.html ├── index.html ├── notebook.html ├── public_notebook.html └── public_notebook_embed.html └── usage.py /.github/ISSUE_TEMPLATE/bug-report.yml: -------------------------------------------------------------------------------- 1 | name: 🐞 Bug Report 2 | description: Problems with VisionScript 3 | labels: [bug] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thank you for submitting a VisionScript Bug Report! 9 | 10 | - type: checkboxes 11 | attributes: 12 | label: Search before asking 13 | description: > 14 | Please search the [issues](https://github.com/capjamesg/visionscript/issues) to see if a similar bug report already exists. 15 | options: 16 | - label: > 17 | I have searched the VisionScript [issues](https://github.com/capjamesg/visionscript/issues) and found no similar bug report. 18 | required: true 19 | 20 | - type: textarea 21 | attributes: 22 | label: What is the issue? 23 | description: Provide a detailed description of the issue you are facing. Also note in what environment you are working (REPL, Notebook, running a .vic file, or using VisionScript Cloud). 24 | placeholder: | 25 | 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.). 26 | validations: 27 | required: true 28 | 29 | - type: textarea 30 | attributes: 31 | label: Environment 32 | description: Please specify the software and hardware you used to produce the bug. 33 | placeholder: | 34 | - VisionScript: 0.0.3 35 | - OS: Ubuntu 20.04 36 | - Python: 3.8.10 37 | validations: 38 | required: false 39 | 40 | - type: textarea 41 | attributes: 42 | label: How can we replicate the bug? 43 | description: > 44 | Provide instructions on how to replicate the bug you have encountered. 45 | validations: 46 | required: false 47 | 48 | - type: textarea 49 | attributes: 50 | label: Additional 51 | description: Anything else you would like to share? 52 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Suggest a VisionScript idea 3 | labels: [enhancement] 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thank you for submitting a VisionScript Feature Request! 9 | 10 | - type: checkboxes 11 | attributes: 12 | label: Search before asking 13 | description: > 14 | Please search the [issues](https://github.com/capjamesg/visionscript/issues) to see if a similar feature request already exists. 15 | options: 16 | - label: > 17 | I have searched the VisionScript [issues](https://github.com/capjamesg/visionscript/issues) and found no similar feature requests. 18 | required: true 19 | 20 | - type: textarea 21 | attributes: 22 | label: Project area 23 | description: Does your idea concern the VisionScript language, VisionScript Notebooks, VisionScript Cloud, or the project documentation? 24 | placeholder: | 25 | What new feature would you like to see in VisionScript? 26 | validations: 27 | required: true 28 | 29 | - type: textarea 30 | attributes: 31 | label: Description 32 | description: A short description of your feature. 33 | placeholder: | 34 | What new feature would you like to see in VisionScript? 35 | validations: 36 | required: true 37 | 38 | - type: textarea 39 | attributes: 40 | label: Use case 41 | description: | 42 | Describe the use case of your feature request. It will help us understand and prioritize the feature request. 43 | placeholder: | 44 | How would this feature be used, and who would use it? 45 | 46 | - type: textarea 47 | attributes: 48 | label: Additional 49 | description: Anything else you would like to share? 50 | 51 | - type: checkboxes 52 | attributes: 53 | label: Are you willing to submit a PR? 54 | description: > 55 | (Optional) We encourage you to submit a [Pull Request](https://github.com/capjamesg/visionscript/pulls) (PR) to help improve the language and its toolset for everyone. 56 | options: 57 | - label: Yes I'd like to help by submitting a PR! -------------------------------------------------------------------------------- /.github/workflows/welcome.yml: -------------------------------------------------------------------------------- 1 | name: Welcome WorkFlow 2 | 3 | on: 4 | issues: 5 | types: [opened] 6 | pull_request_target: 7 | types: [opened] 8 | 9 | jobs: 10 | build: 11 | name: 👋 Welcome 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/first-interaction@v1.1.1 15 | with: 16 | repo-token: ${{ secrets.GITHUB_TOKEN }} 17 | issue-message: "Hello there, thank you for opening an Issue ! 🙏🏻 The team was notified and they will get back to you asap." 18 | pr-message: "Hello there, thank you for opening an PR ! 🙏🏻 The team was notified and they will get back to you asap." -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | *.png 163 | *.jpg 164 | folder 165 | *.pt 166 | *.vic 167 | *.json 168 | tmp/* 169 | runs/* 170 | *.svg -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - given-names: James (capjamesg) 5 | title: "VisionScript" 6 | version: 0.0.02 7 | date-released: 2023-07-08 -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ![VisionScript logo](https://visionscript.dev/assets/full_logo.svg) 2 | 3 | # Contribute to VisionScript 4 | 5 | *Before you read on, please read the project README in full. The README explains some additional background on VisionScript you will need before contributing.* 6 | 7 | Thank you for your interest in contributing to VisionScript! 8 | 9 | The aim of VisionScript is to provide an abstract programming language for experimenting with computer vision. VisionScript provides powerful primitives that, combined, enable people to express their creativity. 10 | 11 | VisionScript is a programming language. VisionScript Notebooks and Deploy, bundled with the core VisionScript project, are consumers of the language. The former lets you write and run code in an interactive web interface on desktop and mobile devices. The latter lets you deploy an API that runs VisionScript code. 12 | 13 | ## Contributing to VisionScript 14 | 15 | There are many ways you can help contribute to VisionScript, including: 16 | 17 | 1. Adding a new function or primative to the programming language; 18 | 2. Improving the VisionScript Notebooks web interface; 19 | 3. Improving the VisionScript deployment option; 20 | 4. Improve the documentation (see the [visionscript-docs](https://github.com/capjamesg/visionscript) repository for the project documentation); 21 | 5. Add tests for existing functions and logic; 22 | 6. Reporting bugs, and; 23 | 7. Sharing ideas on how we can improve the VisionScript language, Notebooks, and deployment. 24 | 25 | **Before you add a new feature, please open an Issue on the project repository so we can discuss and refine the idea before implementation.** 26 | 27 | To get started with a development environment, first clone the VisionScript GitHub repository: 28 | 29 | ``` 30 | git clone https://github.com/capjamesg/visionscript 31 | ``` 32 | 33 | Then, install the `visionscript` package and its dependencies: 34 | 35 | ``` 36 | pip3 install -e . 37 | ``` 38 | 39 | Once you have installed VisionScript, you can start working on the language, Notebooks, or deployment: 40 | 41 | ``` 42 | visionscript --repl # create a REPL 43 | visionscript --notebook # run an interactive notebook 44 | visionscript --cloud # create a HTTP server to which you can deploy VisionScript code 45 | ``` 46 | 47 | ## How the Language Works 48 | 49 | VisionScript is a programming language. Suppose a user writes a program like this: 50 | 51 | ``` 52 | Load["./person.png"] 53 | Detect["person"] 54 | Replace["blue"] 55 | Show[] 56 | ``` 57 | 58 | This program would replace all people in an image with a blue box. 59 | 60 | VisionScript will first generate a syntax tree for this code. This is done using `lark`, a Python lexing library. 61 | 62 | Here is the syntax tree associated with our program: 63 | 64 | ``` 65 | start 66 | expr 67 | load "./person.png" 68 | 69 | 70 | expr 71 | detect "person" 72 | 73 | 74 | expr 75 | replace "blue" 76 | 77 | 78 | expr 79 | show 80 | ``` 81 | 82 | *Pro tip: You can generate a syntax tree for a program without evaluating the program by using the --debug=True parameter when executing a `.vic` file.* 83 | 84 | This syntax tree has a `start` entry point that contains four expressions. Those expressions each contain one child (load, detect, replace, and show). If we had a more complicated program, elements of the tree may be nested. For instance, a `Load[Input["text"]]` statement will be nested like this: 85 | 86 | ``` 87 | start 88 | expr 89 | load 90 | input "text" 91 | ``` 92 | 93 | Here, `input` is nested within `load`. 94 | 95 | After VisionScript generates a syntax tree, the tree is evaluated. 96 | 97 | The tree is evaluated in a Python function called `parse_tree`. This function will recursively traverse the tree. Statements may either update `state`, a global state that is maintained throughout program execution, or return a statement. Any statement that is explicitly returned may or may not update `state`, depending on the statement. 98 | 99 | `parse_tree` contains a lot of logic for both fundamental program parsing as well as control flow (i.e. `If` statements, `Make` function definitions). 100 | 101 | ## Add a Function 102 | 103 | To add a function to VisionScript, first edit `grammar.py` to add a grammar for your function. There are examples in the file already that show functions that accept zero, one, or an aribitrary number of arguments. 104 | 105 | Then, create a new Python function in `lang.py` with the logic you want to enable. Take a look at other functions that are related to your idea within the codebase to see how functions are implemented. 106 | 107 | Once you have implemented a function, add it to the `self.function_calls` value in the `VisionScript` `__init__` code. 108 | 109 | After making these changes, your function will be part of the language and available when executing `.vic` files or using the VisionScript REPL. 110 | 111 | VisionScript Notebooks is a _consumer of_ the VisionScript language, not the language itself. Thus, to add your function to the web interface, you need to make a few more changes. 112 | 113 | Edit `visionscript/static/functions.js` and add your function as an entry under the header that makes the most sense (i.e. Input, Output, Process). Look at other entries to see how they work. Entries can accept file or text arguments. 114 | 115 | If your function involves additional control flow, you will need to add that logic into the `visionscript/static/main.js` file. 116 | 117 | ## Code Organization 118 | 119 | - `visionscript/lang.py`: Core language code. 120 | - `test/test.py`: Run tests. 121 | - `visionscript/usage.py`: Variables referenced for usage instructions in `lang.py`. 122 | - `visionscript/grammar.py`: The VisionScript grammar. 123 | - `visionscript/notebook.py`: The VisionScript notebook environment code. 124 | - `visionscript/cloud.py`: The VisionScript cloud environment code. 125 | - `tests/`: VisionScript tests. 126 | 127 | ## How to Make a Change 128 | 129 | Whether you want to improve documentation, submit a bug fix, or add to the library, you'll first need to get our repo set up on your local machine. 130 | 131 | First, fork the repository to your own GitHub account. This fork is where you will push your code changes. 132 | 133 | Next, you'll need to download and configure the project on your local machine. You can do this by following the instructions in our [README](README.md) file. The README outlines how to install the library and run test cases. 134 | 135 | Please create a new branch for your changes by using the `git checkout -b ` command. 136 | 137 | Once you have made a change, please run all test cases according to the instructions in the [README](README.md) file. This helps us assure the quality of the code submitted to the library. 138 | 139 | When you are ready to submit a change, commit your changes to the branch. Then, submit a pull request to this repository. 140 | 141 | A contributor will review your request and either approve it or provide feedback and proposed changes. 142 | 143 | ## Feedback 144 | 145 | Do you have some feedback on how we can improve this file? Let us know by submitting an issue in the [Issues](https://github.com/capjamesg/visionscript/issues) section of this repository. 146 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | - capjamesg 4 | - mahimairaja -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023 capjamesg 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | tests: 2 | python3 tests/test.py 3 | build: 4 | python3 -m pip install --upgrade build 5 | pip3 -m build 6 | install: 7 | pip3 install . 8 | deploy: 9 | pip3 install twine 10 | python3 -m twine upload --repository pypi dist/* 11 | clean: 12 | rm -rf dist 13 | rm -rf build 14 | rm -rf *.egg-info 15 | lint: 16 | black visionscript/* 17 | isort visionscript/* -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![VisionScript logo](https://visionscript.dev/assets/full_logo.svg) 2 | 3 | # VisionScript 4 | 5 | [VisionScript](https://visionscript.dev) is an abstract programming language for doing common computer vision tasks, fast. 6 | 7 | VisionScript is built in Python, offering a simple syntax for running object detection, classification, and segmentation models. [Read the documentation](https://visionscript.dev/docs/). 8 | 9 | [View the demo](https://vimeo.com/856043804). 10 | 11 | ## Get Started 🚀 12 | 13 | First, install VisionScript: 14 | 15 | ```bash 16 | pip install visionscript 17 | ``` 18 | 19 | You can then run VisionScript using: 20 | 21 | ```bash 22 | visionscript 23 | ``` 24 | 25 | This will open a VisionScript REPL in which you can type commands. 26 | 27 | ### Run a File 📁 28 | 29 | To run a VisionScript file, use: 30 | 31 | ```bash 32 | visionscript ./your_file.vic 33 | ``` 34 | 35 | ### Use VisionScript in a Notebook 📓 36 | 37 | VisionScript offers an interactive web notebook through which you can run VisionScript code. 38 | 39 | To use the notebook, run: 40 | 41 | ```bash 42 | visionscript --notebook 43 | ``` 44 | 45 | This will open a notebook in your browser. Notebooks are ephermal. You will need to copy your code to a file to save it. 46 | 47 | ## Quickstart 🚀 48 | 49 | ### Find people in an image using object detection 50 | 51 | ``` 52 | Load["./photo.jpg"] 53 | Detect["person"] 54 | Say[] 55 | ``` 56 | 57 | ### Find people in all images in a folder using object detection 58 | 59 | ``` 60 | In["./images"] 61 | Detect["person"] 62 | Say[] 63 | ``` 64 | 65 | ### Replace people in a photo with an emoji 66 | 67 | ``` 68 | Load["./abbey.jpg"] 69 | Size[] 70 | Say[] 71 | Detect["person"] 72 | Replace["emoji.png"] 73 | Save["./abbey2.jpg"] 74 | ``` 75 | 76 | ### Classify an image 77 | 78 | ``` 79 | Load["./photo.jpg"] 80 | Classify["apple", "banana"] 81 | ``` 82 | 83 | ## Installation 👷 84 | 85 | To install VisionScript, clone this repository and run `pip install -r requirements.txt`. 86 | 87 | Then, make a file ending in `.vic` in which to write your VisionScript code. 88 | 89 | When you have written your code, run: 90 | 91 | ```bash 92 | visionscript ./your_file.vic 93 | ``` 94 | 95 | ### Run in debug mode 96 | 97 | Running in debug mode shows the full Abstract Syntax Tree (AST) of your code. 98 | 99 | ```bash 100 | visionscript ./your_file.vic --showtree=True 101 | ``` 102 | 103 | Debug mode is useful for debugging code while adding new features to the VisionScript language. 104 | 105 | ## Inspiration 🌟 106 | 107 | The inspiration behind this project was to build a simple way of doing one-off tasks. 108 | 109 | Consider a scenario where you want to run zero-shot classification on a folder of images. With VisionScript, you can do this in three lines of code: 110 | 111 | ``` 112 | In["./images"] 113 | Classify["cat", "dog"] 114 | Say[] 115 | ``` 116 | 117 | VisionScript is not meant to be a full programming language for all vision tasks, rather an abstract way of doing common tasks. 118 | 119 | VisionScript is ideal if you are new to concepts like "classify" and "segment" and want to explore what they do to an image. 120 | 121 | ### Syntax 122 | 123 | The syntax is inspired by both Python and the Wolfram Language. VisionScript is an interpreted language, run line-by-line like Python. Statements use the format: 124 | 125 | ``` 126 | Statement[argument1, argument2, ...] 127 | ``` 128 | 129 | This is the same format as the Wolfram Language. 130 | 131 | ### Lexical Inference and Memory 132 | 133 | An (I think!) unique feature in VisionScript compared to other languages is lexical inference. 134 | 135 | You don't need to declare variables to store images, etc. Rather, you can let VisionScript do the work. Consider this example: 136 | 137 | ``` 138 | Load["./photo.jpg"] 139 | Size[] 140 | Say[] 141 | ``` 142 | 143 | Here, `Size[]` and `Say[]` do not have any arguments. Rather, they use the last input. Wolfram Alpha has a feature to get the last input using `%`. VisionScript uses the same concept, but with a twist. 144 | 145 | Indeed, `Size[]` and `Say[]` don't accept any arguments. 146 | 147 | ## Developer Setup 🛠 148 | 149 | If you want to add new features or fix bugs in the VisionScript language, you will need to set up a developer environment. 150 | 151 | To do so, clone the language repository: 152 | 153 | ```bash 154 | git clone https://github.com/capjamesg/VisionScript 155 | ``` 156 | 157 | Then, install the required dependencies and VisionScript: 158 | 159 | ```bash 160 | pip install -r requirements.txt 161 | pip install -e . 162 | ``` 163 | 164 | Now, you can run VisionScript using: 165 | 166 | ```bash 167 | visionscript 168 | ``` 169 | 170 | ## Supported Models 📚 171 | 172 | VisionScript provides abstract wrappers around: 173 | 174 | - [CLIP](https://github.com/openai/clip) by OpenAI (Classification) 175 | - [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) (Object Detection Training, Segmentation Training) 176 | - [FastSAM](https://github.com/CASIA-IVA-Lab/FastSAM) by CASIA-IVA-Lab. (Segmentation) 177 | - [GroundedSAM](https://docs.autodistill.com/base_models/groundedsam/) (Object Detection, Segmentation) 178 | - [BLIP](https://github.com/salesforce/BLIP) (Caption Generation) 179 | - [ViT](https://github.com/autodistill/autodistill-vit) (Classification Training) 180 | 181 | ## License 📝 182 | 183 | This project is licensed under an [MIT license](LICENSE). 184 | -------------------------------------------------------------------------------- /TEST_COVERAGE.md: -------------------------------------------------------------------------------- 1 | # VisionScript Language Test Coverage 2 | 3 | The VisionScript Language is the language used to write VisionScript programs. This document contains a list of all built-in VisionScript functions, and the state of their test coverage. 4 | 5 | This document pertains only to the VisionScript Language and runtime, not Notebooks or Cloud. 6 | 7 | This document does not say which tests are passing. It only says whether a function has tests. 8 | 9 | To run the tests, run `pytest tests/` in the root directory of this repository. 10 | 11 | ## Manual Testing 12 | 13 | Some methods need manual testing because they use a webcam. The following functions must be tested manually: 14 | 15 | - [X] `Breakpoint[]` 16 | - [X] `Compare[]` 17 | - [X] `GetFPS[]` 18 | - [X] `Show[]` 19 | - [X] `ShowText[]` 20 | - [X] `UseCamera[]` 21 | - [ ] `Deploy[]` 22 | 23 | ## Automated Testing 24 | 25 | A [1] indicates a test `.vic` file has been written but the corresponding Python test has not been added to `tests/test_visionscript.py`. 26 | 27 | ### Functions 28 | 29 | - [X] `Blur[]` 30 | - [X] `Break[]` 31 | - [X] `Caption[]` 32 | - [X] `Classify[]` 33 | - [X] `ComparePose[]` 34 | - [X] `Count[]` 35 | - [X] `CountInRegion[]` 36 | - [X] `Cutout[]` 37 | - [X] `Describe[]` 38 | - [X] `Detect[]` 39 | - [X] `DetectPose[]` 40 | - [X] `Exit[]` 41 | - [X] `FilterByClass[]` 42 | - [X] `Find[]` 43 | - [X] `First[]` 44 | - [x] `GetDistinctScenes[]` 45 | - [X] `GetEdges[]` 46 | - [X] `GetText[]` 47 | - [X] `Greyscale[]` 48 | - [X] `If[]` 49 | - [X] `Import[]` 50 | - [X] `In[]` (folder of images) 51 | - [X] `In[]` (video file) 52 | - [X] `Input[]` 53 | - [X] `Last[]` 54 | - [ ] `Load[]` 55 | - [X] `Make[]` 56 | - [X] `Not[]` 57 | - [ ] `Paste[]` 58 | - [ ] `PasteRandom[]` 59 | - [X] `Random[]` 60 | - [X] `Read[]` 61 | - [X] `ReadQR[]` 62 | - [X] `Replace[]` 63 | - [X] `Reset[]` 64 | - [X] `Resize[]` 65 | - [X] `Rotate[]` 66 | - [X] `Save[]` 67 | - [X] `Say[]` 68 | - [X] `Search[]` 69 | - [X] `Segment[]` 70 | - [X] `Select[]` 71 | - [X] `SetBrightness[]` 72 | - [X] `SetConfidence[]` 73 | - [X] `Similarity[]` 74 | - [X] `Size[]` 75 | - [X] `Use[]` 76 | - [X] `Profile[]` 77 | - [X] `Web[]` 78 | - [ ] `Crop[]` 79 | - [ ] `Contains[]` 80 | - [X] `Get[]` 81 | - [X] `Set[]` 82 | - [ ] `Remove[]` [1] 83 | - [X] `Wait[]` 84 | - [ ] `Track[]` 85 | - [ ] `GetUniqueAppearances[]` 86 | - [ ] `Apply[]` 87 | - [X] `Grid[]` 88 | - [ ] `Shuffle[]` 89 | - [X] `GetColors[]` 90 | - [X] `GetColours[]` 91 | - [X] `IsItA[]` 92 | - [X] `Is[]` 93 | - [ ] `Merge[]` 94 | - [ ] `Say[]` [1] 95 | 96 | ### Language Features 97 | 98 | - [X] Increment 99 | - [X] Decrement 100 | - [X] Comment 101 | - [X] Assignment 102 | - [X] List 103 | - [ ] Associative array [1] 104 | - [X] Greater than 105 | - [X] Less than 106 | - [X] Greater than or equal to 107 | - [X] Less than or equal to 108 | - [X] Equal to 109 | - [X] Not equal to 110 | 111 | ### Exceptions 112 | 113 | - [X] `visionscript.errors.PathNotExists` 114 | - [ ] `visionscript.errors.StackEmpty` 115 | - [ ] `visionscript.errors.SetFunctionError` 116 | - [ ] `visionscript.errors.ImageOutOfBounds` 117 | - [ ] `visionscript.errors.CameraNotAccessible` 118 | 119 | ### States 120 | 121 | - [ ] Ensure a buffer overflow does not occur when loading more than 1000 large images into memory 122 | - [X] Ensure the `image_stack` never exceeds 100 images 123 | 124 | ## Models 125 | 126 | - [ ] YOLOv8 Object Detection (small) [1] 127 | - [ ] FastSAM [1] 128 | - [ ] Grounding DINO [1] 129 | - [ ] YOLOv8 Pose (small) [1] 130 | - [ ] Roboflow `rock paper scissors` [1] -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | lark 2 | pyspellchecker 3 | supervision 4 | numpy 5 | Pillow 6 | requests 7 | ultralytics 8 | autodistill 9 | autodistill_fastsam 10 | autodistill_grounded_sam 11 | autodistill_blip 12 | autodistill_vit 13 | click 14 | validators 15 | werkzeug 16 | faiss-cpu 17 | torch 18 | easyocr 19 | scikit-learn 20 | webcolors 21 | transformers==4.38.0 22 | matplotlib 23 | watchdog 24 | qrcode 25 | flask 26 | cryptography 27 | pygtrie 28 | roboflow -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | from setuptools import find_packages 3 | import re 4 | import os 5 | import subprocess 6 | 7 | with open("./visionscript/__init__.py", 'r') as f: 8 | content = f.read() 9 | # from https://www.py4u.net/discuss/139845 10 | version = re.search(r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', content).group(1) 11 | 12 | with open("README.md", "r") as fh: 13 | long_description = fh.read() 14 | 15 | with open("./requirements.txt", "r") as f: 16 | reqs = f.read().splitlines() 17 | 18 | def install_fast_sam_for_segmentation() -> None: 19 | commands = [ 20 | "pip install -r requirements.txt", 21 | "curl" # install weights 22 | ] 23 | 24 | HOME = os.getcwd() 25 | 26 | subprocess.run(f"cd {HOME} && git clone https://github.com/CASIA-IVA-Lab/FastSAM") 27 | 28 | for command in commands: 29 | subprocess.run(f"cd {HOME} && {command}", shell=True) 30 | 31 | 32 | setuptools.setup( 33 | name="visionscript", 34 | version=version, 35 | author="capjamesg", 36 | author_email="jamesg@jamesg.blog", 37 | description="VisionScript is an abstract programming language for doing common computer vision tasks, fast.", 38 | long_description=long_description, 39 | long_description_content_type="text/markdown", 40 | url="https://github.com/capjamesg/visionscript", 41 | install_requires=reqs, 42 | packages=find_packages(exclude=("tests",), include=("visionscript",)), 43 | extras_require={ 44 | "dev": ["flake8", "black==22.3.0", "isort", "twine", "pytest", "wheel"], 45 | }, 46 | entry_points={ 47 | "console_scripts": [ 48 | "visionscript=visionscript.lang:main", 49 | ], 50 | }, 51 | classifiers=[ 52 | "Programming Language :: Python :: 3", 53 | "License :: OSI Approved :: MIT License", 54 | "Operating System :: OS Independent", 55 | ], 56 | python_requires=">=3.7", 57 | ) 58 | 59 | # install_fast_sam_for_segmentation() -------------------------------------------------------------------------------- /tests/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/.DS_Store -------------------------------------------------------------------------------- /tests/__pycache__/test_visionscript.cpython-39-pytest-7.1.2.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/__pycache__/test_visionscript.cpython-39-pytest-7.1.2.pyc -------------------------------------------------------------------------------- /tests/__pycache__/test_visionscript.cpython-39-pytest-7.4.0.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/__pycache__/test_visionscript.cpython-39-pytest-7.4.0.pyc -------------------------------------------------------------------------------- /tests/images/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/.DS_Store -------------------------------------------------------------------------------- /tests/images/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/bus.jpg -------------------------------------------------------------------------------- /tests/images/bus.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/bus.svg -------------------------------------------------------------------------------- /tests/images/cans.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/cans.png -------------------------------------------------------------------------------- /tests/images/cat.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/cat.jpeg -------------------------------------------------------------------------------- /tests/images/corrupted_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/corrupted_image.jpg -------------------------------------------------------------------------------- /tests/images/gettext.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/gettext.png -------------------------------------------------------------------------------- /tests/images/ocr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/ocr.png -------------------------------------------------------------------------------- /tests/images/pose.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/pose.jpg -------------------------------------------------------------------------------- /tests/images/qr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/qr.png -------------------------------------------------------------------------------- /tests/images/scissors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/scissors.png -------------------------------------------------------------------------------- /tests/images/webmention_button.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/images/webmention_button.webp -------------------------------------------------------------------------------- /tests/manual_vics/breakpoint.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Detect["person"] 3 | Breakpoint[] 4 | Count[] 5 | Say[] -------------------------------------------------------------------------------- /tests/manual_vics/compare_raw.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Load["./tests/images/bus.jpg"] 3 | Compare[] -------------------------------------------------------------------------------- /tests/manual_vics/compare_with_detections.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Detect["person"] 3 | Load["./tests/images/bus.jpg"] 4 | Detect["person"] 5 | Compare[] -------------------------------------------------------------------------------- /tests/manual_vics/compare_with_segments.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Segment[] 3 | Load["./tests/images/bus.jpg"] 4 | Segment[] 5 | Compare[] -------------------------------------------------------------------------------- /tests/manual_vics/getfps.vic: -------------------------------------------------------------------------------- 1 | UseCamera[] 2 | GetFPS[] 3 | Show[] 4 | EndCamera -------------------------------------------------------------------------------- /tests/manual_vics/roboflow.vic: -------------------------------------------------------------------------------- 1 | Use["roboflow rock paper scissors"] 2 | 3 | UseCamera["background"] 4 | Detect[] 5 | 6 | If["rock"] 7 | Break[] 8 | End 9 | 10 | Say[] 11 | EndCamera -------------------------------------------------------------------------------- /tests/manual_vics/show.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Show[] -------------------------------------------------------------------------------- /tests/manual_vics/show_with_detections.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Detect["person"] 3 | Show[] -------------------------------------------------------------------------------- /tests/manual_vics/show_with_segments.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Segment[] 3 | Show[] -------------------------------------------------------------------------------- /tests/manual_vics/showtext.vic: -------------------------------------------------------------------------------- 1 | UseCamera[] 2 | Load[] 3 | Count["person"] 4 | ShowText[] 5 | EndCamera -------------------------------------------------------------------------------- /tests/manual_vics/usecamera.vic: -------------------------------------------------------------------------------- 1 | UseCamera[] 2 | Load[] 3 | Detect["person"] 4 | Show[] 5 | EndCamera -------------------------------------------------------------------------------- /tests/output/blur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/blur.jpg -------------------------------------------------------------------------------- /tests/output/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/bus.jpg -------------------------------------------------------------------------------- /tests/output/bus_brightness.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/bus_brightness.jpg -------------------------------------------------------------------------------- /tests/output/bus_cutout.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/bus_cutout.jpg -------------------------------------------------------------------------------- /tests/output/bus_cutout_saved.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/bus_cutout_saved.jpg -------------------------------------------------------------------------------- /tests/output/bus_edges.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/bus_edges.jpg -------------------------------------------------------------------------------- /tests/output/bus_resized.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/bus_resized.jpg -------------------------------------------------------------------------------- /tests/output/greyscale.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/greyscale.jpg -------------------------------------------------------------------------------- /tests/output/predictions.csv: -------------------------------------------------------------------------------- 1 | [ 48.739 399.26 244.5 902.5],0.86898017,0,person 2 | [ 670.27 380.28 809.86 875.69],0.8536039,0,person 3 | [ 221.39 405.79 344.72 857.39],0.8193051,0,person 4 | [ 0 551.01 67.105 873.94],0.30129424,0,person 5 | -------------------------------------------------------------------------------- /tests/output/replace_in_images.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/replace_in_images.jpg -------------------------------------------------------------------------------- /tests/output/replace_with_color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/replace_with_color.jpg -------------------------------------------------------------------------------- /tests/output/rotate.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/output/rotate.jpg -------------------------------------------------------------------------------- /tests/valid_output/blur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/blur.jpg -------------------------------------------------------------------------------- /tests/valid_output/bus.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/bus.jpg -------------------------------------------------------------------------------- /tests/valid_output/bus_brightness.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/bus_brightness.jpg -------------------------------------------------------------------------------- /tests/valid_output/bus_brightness.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/bus_brightness.png -------------------------------------------------------------------------------- /tests/valid_output/bus_cutout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/bus_cutout.png -------------------------------------------------------------------------------- /tests/valid_output/bus_cutout_saved.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/bus_cutout_saved.png -------------------------------------------------------------------------------- /tests/valid_output/bus_edges.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/bus_edges.jpg -------------------------------------------------------------------------------- /tests/valid_output/bus_edges.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/bus_edges.png -------------------------------------------------------------------------------- /tests/valid_output/bus_resized.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/bus_resized.jpg -------------------------------------------------------------------------------- /tests/valid_output/classify_image.vic.txt: -------------------------------------------------------------------------------- 1 | banana -------------------------------------------------------------------------------- /tests/valid_output/find_in_images.vic.txt: -------------------------------------------------------------------------------- 1 | Object found: person 2 | Confidence: 86.90% 3 | xyxy Coordinates: 48, 399, 244, 902 4 | 5 | Object found: person 6 | Confidence: 85.36% 7 | xyxy Coordinates: 670, 380, 809, 875 8 | 9 | Object found: person 10 | Confidence: 81.93% 11 | xyxy Coordinates: 221, 405, 344, 857 12 | 13 | Object found: person 14 | Confidence: 30.13% 15 | xyxy Coordinates: 0, 551, 67, 873 16 | 17 | -------------------------------------------------------------------------------- /tests/valid_output/greyscale.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/greyscale.jpg -------------------------------------------------------------------------------- /tests/valid_output/load_detect_save.vic.txt: -------------------------------------------------------------------------------- 1 | Saved to ./bus1.jpg -------------------------------------------------------------------------------- /tests/valid_output/replace_in_images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/replace_in_images.png -------------------------------------------------------------------------------- /tests/valid_output/replace_with_color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/replace_with_color.jpg -------------------------------------------------------------------------------- /tests/valid_output/rotate.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/valid_output/rotate.jpg -------------------------------------------------------------------------------- /tests/vics/associative_array.vic: -------------------------------------------------------------------------------- 1 | images = ["image1": "./tests/images/bus.jpg"] 2 | 3 | Get[images, "image1"] 4 | Load[] 5 | 6 | Find["person"] -------------------------------------------------------------------------------- /tests/vics/blur.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Blur[] 3 | Save["./tests/output/blur.jpg"] 4 | -------------------------------------------------------------------------------- /tests/vics/break.vic: -------------------------------------------------------------------------------- 1 | counter = 0 2 | 3 | In["./tests/images"] 4 | counter += 1 5 | 6 | Read[] 7 | 8 | Break[] 9 | EndIn -------------------------------------------------------------------------------- /tests/vics/buffer_overload_prevention.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] -------------------------------------------------------------------------------- /tests/vics/caption.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Caption[] -------------------------------------------------------------------------------- /tests/vics/classify_image.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Classify["apple", "banana"] -------------------------------------------------------------------------------- /tests/vics/comment.vic: -------------------------------------------------------------------------------- 1 | # Profile[] 2 | 3 | Load["./tests/images/bus.jpg"] 4 | Classify["apple", "banana"] -------------------------------------------------------------------------------- /tests/vics/compare_pose.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | DetectPose[] 3 | 4 | Load["./tests/images/pose.jpg"] 5 | DetectPose[] 6 | 7 | ComparePose[] 8 | Say[] -------------------------------------------------------------------------------- /tests/vics/count.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Count["person"] 3 | Say[] -------------------------------------------------------------------------------- /tests/vics/count_in_region.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Detect["person"] 3 | CountInRegion["bottom half"] 4 | -------------------------------------------------------------------------------- /tests/vics/cutout.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Detect["bus"] 3 | Cutout[] 4 | Save["./tests/output/bus_cutout.jpg"] -------------------------------------------------------------------------------- /tests/vics/decrement.vic: -------------------------------------------------------------------------------- 1 | counter = 0 2 | 3 | In["./tests/directory_list_test"] 4 | Load[] 5 | counter-- 6 | EndIn 7 | 8 | Say[counter] -------------------------------------------------------------------------------- /tests/vics/describe.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Describe[] -------------------------------------------------------------------------------- /tests/vics/detect_pose.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | DetectPose[] 3 | Say[] -------------------------------------------------------------------------------- /tests/vics/equal_to.vic: -------------------------------------------------------------------------------- 1 | x = 1 2 | 3 | If[x == 1] 4 | Say["x is equal to 1"] 5 | End -------------------------------------------------------------------------------- /tests/vics/exit.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/ocr.png"] 2 | Exit[] 3 | GetText[] 4 | Say[] -------------------------------------------------------------------------------- /tests/vics/filter_by_class.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Detect[] 3 | FilterByClass["bus"] 4 | Count[] -------------------------------------------------------------------------------- /tests/vics/find.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Find["person"] 3 | Save["./bus1.jpg"] 4 | -------------------------------------------------------------------------------- /tests/vics/find_in_images.vic: -------------------------------------------------------------------------------- 1 | In["./tests/directory_list_test"] 2 | Load[] 3 | Detect["person"] 4 | Say[] 5 | EndIn -------------------------------------------------------------------------------- /tests/vics/first.vic: -------------------------------------------------------------------------------- 1 | x = [1, 2, 3] 2 | 3 | First[x] -------------------------------------------------------------------------------- /tests/vics/get.vic: -------------------------------------------------------------------------------- 1 | images = ["image1": "./tests/images/bus.jpg"] 2 | 3 | Get[images, "image1"] 4 | Load[] 5 | 6 | Find["person"] 7 | Count[] -------------------------------------------------------------------------------- /tests/vics/get_distinct_scenes.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/videos/cars.mp4"] 2 | GetDistinctScenes[] -------------------------------------------------------------------------------- /tests/vics/get_text.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/ocr.png"] 2 | GetText[] 3 | Say[] -------------------------------------------------------------------------------- /tests/vics/getcolors.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Segment["hat"] 3 | 4 | GetColors[] -------------------------------------------------------------------------------- /tests/vics/getcolours.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/pose.jpg"] 2 | Segment["hat"] 3 | 4 | GetColours[] -------------------------------------------------------------------------------- /tests/vics/getedges.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | GetEdges[] 3 | Save["./tests/output/bus_edges.jpg"] -------------------------------------------------------------------------------- /tests/vics/greater_than.vic: -------------------------------------------------------------------------------- 1 | x = 1 2 | 3 | If[x > 1] 4 | Say["x is greater than 1"] 5 | End -------------------------------------------------------------------------------- /tests/vics/greater_than_or_equal_to.vic: -------------------------------------------------------------------------------- 1 | x = 1 2 | 3 | If[x >= 1] 4 | Say["x is greater than or equal to 1"] 5 | End -------------------------------------------------------------------------------- /tests/vics/greyscale.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Greyscale[] 3 | Save["./tests/output/greyscale.jpg"] 4 | -------------------------------------------------------------------------------- /tests/vics/grid.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/cat.jpeg"] 2 | Load["./tests/images/bus.jpg"] 3 | Load["./tests/images/pose.jpg"] 4 | 5 | Grid[3] 6 | 7 | Save["./tests/output/grid.png"] -------------------------------------------------------------------------------- /tests/vics/if.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | 3 | If [Count["person"] > 2] 4 | Say["More than two people!"] 5 | End -------------------------------------------------------------------------------- /tests/vics/import.vic: -------------------------------------------------------------------------------- 1 | Import["./tests/vics/classify_image.vic"] 2 | -------------------------------------------------------------------------------- /tests/vics/in_video.vic: -------------------------------------------------------------------------------- 1 | counter = 0 2 | 3 | In["./tests/videos/cars.mp4"] 4 | counter += 1 5 | End 6 | 7 | Say[] -------------------------------------------------------------------------------- /tests/vics/increment.vic: -------------------------------------------------------------------------------- 1 | counter = 0 2 | 3 | In["./tests/directory_list_test"] 4 | Load[] 5 | counter++ 6 | EndIn 7 | 8 | Say[counter] -------------------------------------------------------------------------------- /tests/vics/input.vic: -------------------------------------------------------------------------------- 1 | Load[Input["file"]] 2 | Count["person"] 3 | Say[] -------------------------------------------------------------------------------- /tests/vics/is.vic: -------------------------------------------------------------------------------- 1 | images = ["image1": "./tests/images/bus.jpg"] 2 | 3 | Get[images, "image1"] 4 | Load[] 5 | 6 | Find["person"] 7 | Is[] -------------------------------------------------------------------------------- /tests/vics/last.vic: -------------------------------------------------------------------------------- 1 | x = [1, 2, 3] 2 | 3 | Last[x] -------------------------------------------------------------------------------- /tests/vics/less_than.vic: -------------------------------------------------------------------------------- 1 | x = 1 2 | 3 | If[x < 1] 4 | Say["x is less than 1"] 5 | End -------------------------------------------------------------------------------- /tests/vics/less_than_or_equal_to.vic: -------------------------------------------------------------------------------- 1 | x = 1 2 | 3 | If[x <= 1] 4 | Say["x is less than or equal to 1"] 5 | End -------------------------------------------------------------------------------- /tests/vics/list.vic: -------------------------------------------------------------------------------- 1 | x = [1, 2, 3] 2 | First[x] -------------------------------------------------------------------------------- /tests/vics/load_detect_save.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Detect["person"] 3 | Save["./bus1.jpg"] 4 | -------------------------------------------------------------------------------- /tests/vics/load_image.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] -------------------------------------------------------------------------------- /tests/vics/load_video.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/videos/cars.mp4"] -------------------------------------------------------------------------------- /tests/vics/make.vic: -------------------------------------------------------------------------------- 1 | Make count [] 2 | If [Count["person"] > 2] 3 | Say["More than two people!"] 4 | End 5 | End 6 | 7 | Load["./tests/images/bus.jpg"] 8 | count[] -------------------------------------------------------------------------------- /tests/vics/merge.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | results = Detect["person"] 3 | Load["./tests/images/bus.jpg"] 4 | results1 = Detect["person"] 5 | 6 | Merge[results, results1] -------------------------------------------------------------------------------- /tests/vics/models/fastsam.vic: -------------------------------------------------------------------------------- 1 | Use["fastsam"] 2 | 3 | Load["./tests/images/bus.jpg"] 4 | Segment["bus"] 5 | Count[] -------------------------------------------------------------------------------- /tests/vics/models/groundingdino.vic: -------------------------------------------------------------------------------- 1 | Use["groundingdino"] 2 | 3 | Load["./tests/images/bus.jpg"] 4 | Detect["bus"] 5 | Count[] -------------------------------------------------------------------------------- /tests/vics/models/roboflow.vic: -------------------------------------------------------------------------------- 1 | # this model comes out of the box 2 | # with VisionScript 3 | # but an API key is needed for use 4 | Use["roboflow rock paper scissors"] 5 | Load["./tests/images/scissors.png"] 6 | Detect["scissors"] 7 | Count[] -------------------------------------------------------------------------------- /tests/vics/models/yolov8.vic: -------------------------------------------------------------------------------- 1 | Use["yolov8"] 2 | 3 | Load["./tests/images/bus.jpg"] 4 | Detect["bus"] 5 | Count[] -------------------------------------------------------------------------------- /tests/vics/models/yolov8s-pose.vic: -------------------------------------------------------------------------------- 1 | Use["yolov8s-pose"] 2 | 3 | Load["./tests/images/pose.jpg"] 4 | DetectPose[] 5 | Say[] -------------------------------------------------------------------------------- /tests/vics/not.vic: -------------------------------------------------------------------------------- 1 | Not[True] 2 | Read[] -------------------------------------------------------------------------------- /tests/vics/not_equal_to.vic: -------------------------------------------------------------------------------- 1 | x = 1 2 | 3 | If[x != 1] 4 | Say["x is not equal to 1"] 5 | End -------------------------------------------------------------------------------- /tests/vics/profile.vic: -------------------------------------------------------------------------------- 1 | Profile[] 2 | 3 | Load["./tests/images/bus.jpg"] 4 | Classify["apple", "banana"] -------------------------------------------------------------------------------- /tests/vics/raises_exceptions/image_corrupted.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/corrupted_image.jpg"] 2 | Show[] -------------------------------------------------------------------------------- /tests/vics/raises_exceptions/image_not_supported.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/webmention_button.webp"] 2 | Show[] -------------------------------------------------------------------------------- /tests/vics/raises_exceptions/image_out_of_bounds.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Crop[1000, 1000, 1000, 1000] -------------------------------------------------------------------------------- /tests/vics/raises_exceptions/path_not_exists.vic: -------------------------------------------------------------------------------- 1 | Load["test"] -------------------------------------------------------------------------------- /tests/vics/raises_exceptions/set_function_error.vic: -------------------------------------------------------------------------------- 1 | Use["coffee"] -------------------------------------------------------------------------------- /tests/vics/raises_exceptions/stack_empty.vic: -------------------------------------------------------------------------------- 1 | Detect["person"] -------------------------------------------------------------------------------- /tests/vics/random.vic: -------------------------------------------------------------------------------- 1 | x = [1, 2, 3] 2 | 3 | Random[x] 4 | Say[] -------------------------------------------------------------------------------- /tests/vics/read.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Count["person"] 3 | 4 | If [Read[] > 2] 5 | Say["More than two people!"] 6 | End -------------------------------------------------------------------------------- /tests/vics/readqr.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/qr.png"] 2 | ReadQR[] -------------------------------------------------------------------------------- /tests/vics/remove.vic: -------------------------------------------------------------------------------- 1 | x = ["coffee", "tea", "cookies"] 2 | 3 | Remove[x, "coffee"] 4 | Say[x] -------------------------------------------------------------------------------- /tests/vics/replace_in_images.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Size[] 3 | Say[] 4 | Detect["person"] 5 | Replace["./tests/images/emoji.png"] 6 | Save["./tests/output/replace_in_images.jpg"] 7 | -------------------------------------------------------------------------------- /tests/vics/replace_with_color.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Find["person"] 3 | Cutout[] 4 | Replace["blue"] 5 | Save["./tests/output/replace_with_color.jpg"] 6 | -------------------------------------------------------------------------------- /tests/vics/reset.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Classify["apple", "banana"] -------------------------------------------------------------------------------- /tests/vics/resize.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Resize[200, 200] 3 | Save["./tests/output/bus_resized.jpg"] -------------------------------------------------------------------------------- /tests/vics/rotate.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Rotate[90] 3 | Save["./tests/output/rotate.jpg"] 4 | -------------------------------------------------------------------------------- /tests/vics/save.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Detect["bus"] 3 | Cutout[] 4 | Save["./tests/output/bus_cutout_saved.jpg"] -------------------------------------------------------------------------------- /tests/vics/save_video.vic: -------------------------------------------------------------------------------- 1 | counter = 0 2 | 3 | In["./tests/videos/cars.mp4"] 4 | Greyscale[] 5 | End 6 | 7 | Save["./tests/output/video.mp4"] 8 | 9 | Say[] -------------------------------------------------------------------------------- /tests/vics/say.vic: -------------------------------------------------------------------------------- 1 | Count["person"] 2 | Say[] -------------------------------------------------------------------------------- /tests/vics/search.vic: -------------------------------------------------------------------------------- 1 | In["./tests/images"] 2 | Load[] 3 | EndIn 4 | 5 | Search["bus"] 6 | -------------------------------------------------------------------------------- /tests/vics/segment_image.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Segment["person"] 3 | Say[] 4 | -------------------------------------------------------------------------------- /tests/vics/set.vic: -------------------------------------------------------------------------------- 1 | images = [] 2 | 3 | Set[images, "image1", "./tests/images/bus.jpg"] 4 | 5 | Get[images, "image1"] 6 | 7 | Load[] 8 | 9 | Find["person"] 10 | Count[] -------------------------------------------------------------------------------- /tests/vics/setbrightness.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | SetBrightness[100] 3 | Save["./tests/output/bus_brightness.jpg"] -------------------------------------------------------------------------------- /tests/vics/setconfidence.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | SetConfidence[75] 3 | Find["person"] 4 | Say[] -------------------------------------------------------------------------------- /tests/vics/similarity.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Load["./tests/images/bus.jpg"] 3 | 4 | Similarity[] 5 | Say[] 6 | -------------------------------------------------------------------------------- /tests/vics/size.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Size[] -------------------------------------------------------------------------------- /tests/vics/use.vic: -------------------------------------------------------------------------------- 1 | Use["groundingdino"] -------------------------------------------------------------------------------- /tests/vics/use_background.vic: -------------------------------------------------------------------------------- 1 | Use["background"] -------------------------------------------------------------------------------- /tests/vics/use_roboflow.vic: -------------------------------------------------------------------------------- 1 | Use["roboflow rock paper scissors"] -------------------------------------------------------------------------------- /tests/vics/variable_assignment.vic: -------------------------------------------------------------------------------- 1 | x = Count["person"] 2 | 3 | Say[x] -------------------------------------------------------------------------------- /tests/vics/wait.vic: -------------------------------------------------------------------------------- 1 | Load["./tests/images/bus.jpg"] 2 | Wait[1] 3 | Say["Complete!"] -------------------------------------------------------------------------------- /tests/vics/web.vic: -------------------------------------------------------------------------------- 1 | Web["https://example.com"] 2 | Say[] -------------------------------------------------------------------------------- /tests/videos/cars.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/tests/videos/cars.mp4 -------------------------------------------------------------------------------- /visionscript/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/visionscript/.DS_Store -------------------------------------------------------------------------------- /visionscript/__init__.py: -------------------------------------------------------------------------------- 1 | from visionscript.lang import * 2 | 3 | __version__ = "0.0.7" 4 | -------------------------------------------------------------------------------- /visionscript/cloud.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import string 4 | import uuid 5 | from io import BytesIO 6 | 7 | import markdown 8 | import numpy as np 9 | from flask import Flask, jsonify, redirect, render_template, request, send_file 10 | 11 | from visionscript import lang, parser 12 | 13 | app = Flask(__name__) 14 | 15 | API_KEY = uuid.uuid4().hex 16 | 17 | if not os.path.exists("scripts.json"): 18 | with open("scripts.json", "w") as f: 19 | json.dump({}, f) 20 | 21 | if not os.path.exists("notebooks.json"): 22 | with open("notebooks.json", "w") as f: 23 | json.dump({}, f) 24 | 25 | with open("scripts.json", "r") as f: 26 | scripts = json.load(f) 27 | 28 | for script in scripts: 29 | scripts[script]["session"] = lang.VisionScript() 30 | 31 | with open("notebooks.json", "r") as f: 32 | notebooks = json.load(f) 33 | 34 | print("Your API key is", API_KEY) 35 | print("Keep it safe and don't share it with anyone!") 36 | 37 | 38 | @app.route("/") 39 | def index_page(): 40 | return render_template("deployintro.html", url_root=request.url_root.strip("/")) 41 | 42 | 43 | @app.route("/", methods=["GET", "POST"]) 44 | def home(id): 45 | if request.method == "POST": 46 | if scripts.get(id) is None: 47 | return jsonify({"error": "Invalid ID"}) 48 | 49 | # if no session for the script, make it 50 | if scripts[id].get("session") is None: 51 | scripts[id]["session"] = lang.VisionScript() 52 | 53 | data = request.form 54 | files = request.files 55 | 56 | results = {} 57 | 58 | for variable in scripts[id]["variables"]: 59 | if not data.get(variable) and not files.get(variable): 60 | return jsonify({"error": f"Missing variable {variable}"}) 61 | # if data is an image, turn into numpy array 62 | elif scripts[id]["variables"][variable] == "image": 63 | from PIL import Image 64 | 65 | ram_file = BytesIO() 66 | 67 | files[variable].save(ram_file) 68 | 69 | ram_file.seek(0) 70 | 71 | image = Image.open(ram_file).convert("RGB") 72 | 73 | results[variable] = np.array(image)[:, :, ::-1] 74 | else: 75 | results[variable] = data[variable] 76 | 77 | try: 78 | session = scripts[id]["session"] 79 | 80 | session.state["input_variables"] = { 81 | **session.state["input_variables"], 82 | **results, 83 | } 84 | 85 | session.notebook = True 86 | 87 | session.parse_tree(parser.parse(scripts[id]["script"])) 88 | except Exception as e: 89 | raise e 90 | return jsonify({"error": str(e)}) 91 | 92 | output = session.state["output"] 93 | 94 | if isinstance(output, dict) and output.get("image"): 95 | # output is base64, convert to png 96 | import base64 97 | 98 | image = BytesIO(base64.b64decode(output["image"])) 99 | image.seek(0) 100 | return send_file(image, mimetype="image/png") 101 | 102 | return jsonify({"output": session.state["output"]}) 103 | 104 | # make sure any new scripts are added 105 | # but preserve any existing scripts and 106 | # their state 107 | with open("scripts.json", "r") as f: 108 | new_scripts = json.load(f) 109 | 110 | for script in new_scripts: 111 | if scripts.get(script) is None: 112 | scripts[script] = new_scripts[script].copy() 113 | 114 | if not scripts.get(id): 115 | return redirect("/") 116 | 117 | image_inputs = [[v, k] for k, v in scripts[id]["variables"].items() if v == "image"] 118 | text_inputs = [[v, k] for k, v in scripts[id]["variables"].items() if v == "text"] 119 | 120 | return render_template( 121 | "index.html", 122 | id=id, 123 | image_inputs=image_inputs, 124 | text_inputs=text_inputs, 125 | title=scripts[id]["title"], 126 | ) 127 | 128 | 129 | @app.route("/notebook/") 130 | @app.route("/notebook//embed") 131 | @app.route("/notebook//export_vic") 132 | @app.route("/notebook//export_vicnb") 133 | def notebook(id): 134 | with open("notebooks.json", "r") as f: 135 | notebooks = json.load(f) 136 | 137 | notebook_data = notebooks.get(id) 138 | 139 | if notebook_data is None: 140 | return redirect("/notebook") 141 | 142 | if request.path.endswith("/export_vicnb"): 143 | # force download with Content-Disposition so that user doesn't see raw JSON 144 | return ( 145 | jsonify(notebook_data), 146 | 200, 147 | {"Content-Disposition": f"attachment; filename={id}.vicnb"}, 148 | ) 149 | elif request.path.endswith("/export_vic"): 150 | # concatenate all celsl 151 | cells = [i["data"] for i in notebook_data["notebook"]] 152 | 153 | code = "\n".join(cells) + "\n" 154 | 155 | return ( 156 | jsonify(code), 157 | 200, 158 | {"Content-Disposition": f"attachment; filename={id}.vic"}, 159 | ) 160 | 161 | # merge cells and output 162 | cells = [] 163 | 164 | for i, cell in enumerate(notebook_data["notebook"]): 165 | # if output has editable_text key, parse with markdown 166 | if cell.get("type") == "editable_text": 167 | cell["data"] = markdown.markdown(cell["data"]) 168 | 169 | cells.append( 170 | { 171 | "type": "code", 172 | "data": cell, 173 | "output": notebook_data["output"][i], 174 | "id": i, 175 | } 176 | ) 177 | 178 | if request.path.endswith("/embed"): 179 | template = "public_notebook_embed.html" 180 | else: 181 | template = "public_notebook.html" 182 | 183 | return render_template( 184 | template, 185 | cells=cells, 186 | url_root=request.url_root.strip("/"), 187 | title=notebook_data["title"], 188 | description=notebook_data["description"], 189 | id=id, 190 | notebook_url=request.url_root.strip("/") + "/notebook/" + id, 191 | ) 192 | 193 | 194 | @app.route("/create", methods=["POST"]) 195 | def create(): 196 | data = request.json 197 | 198 | if data.get("api_key") != API_KEY: 199 | return jsonify({"error": "Invalid API key"}), 401 200 | 201 | id = data["slug"].lower() 202 | 203 | publish_as_noninteractive_webpage = data.get("publish_as_noninteractive_webpage") 204 | 205 | if publish_as_noninteractive_webpage: 206 | # add to notebooks.json 207 | notebooks = json.load(open("notebooks.json", "r")) 208 | 209 | notebooks[id] = { 210 | "title": data["title"], 211 | "notebook": data["notebook"], 212 | "output": data["output"], 213 | "description": data.get("description"), 214 | } 215 | 216 | app_slug = ( 217 | data["title"] 218 | .translate(str.maketrans("", "", string.punctuation.replace("-", ""))) 219 | .replace(" ", "-") 220 | ) 221 | 222 | notebooks[id]["app_slug"] = app_slug 223 | 224 | with open("notebooks.json", "w") as f: 225 | json.dump(notebooks, f) 226 | 227 | return jsonify({"id": request.url_root + "notebook/" + id}) 228 | 229 | with open("scripts.json", "r") as f: 230 | scripts = json.load(f) 231 | 232 | scripts[id] = { 233 | "title": data["title"], 234 | "script": data["script"], 235 | "variables": data["variables"], 236 | "description": data.get("description"), 237 | } 238 | 239 | app_slug = ( 240 | data["title"] 241 | .translate(str.maketrans("", "", string.punctuation.replace("-", ""))) 242 | .replace(" ", "-") 243 | ) 244 | 245 | scripts[id]["app_slug"] = app_slug 246 | 247 | with open("scripts.json", "w") as f: 248 | json.dump(scripts, f) 249 | 250 | scripts = json.load(open("scripts.json", "r")) 251 | 252 | return jsonify({"id": request.url_root + id}) 253 | 254 | 255 | @app.errorhandler(404) 256 | def page_not_found(e): 257 | return render_template("error.html", title="Page Not Found"), 404 258 | 259 | 260 | @app.errorhandler(500) 261 | def internal_server_error(e): 262 | return render_template("error.html", title="Internal Server Error"), 500 263 | -------------------------------------------------------------------------------- /visionscript/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import psutil 5 | import torch 6 | from PIL import Image 7 | 8 | import supervision as sv 9 | from visionscript import registry 10 | from visionscript.pose import Pose 11 | 12 | DATA_TYPES = { 13 | sv.Detections: "Detection", 14 | np.ndarray: "Image", 15 | torch.Tensor: "Image", 16 | Image.Image: "Image", 17 | str: "String", 18 | int: "Integer", 19 | Pose: "Pose", 20 | } 21 | 22 | STACK_MAXIMUM = { 23 | "image_stack": { 24 | # 50% of available memory 25 | "maximum": 0.5 * psutil.virtual_memory().available, 26 | "also_reset": ["detections_stack"], 27 | } 28 | } 29 | 30 | CONCURRENT_MAXIMUM = 10 31 | 32 | VIDEO_STRIDE = 2 33 | 34 | CACHE_DIRECTORY = os.path.join(os.path.expanduser("~"), ".visionscript") 35 | 36 | FASTSAM_DIR = os.path.join(CACHE_DIRECTORY, "FastSAM") 37 | FASTSAM_WEIGHTS_DIR = os.path.join(FASTSAM_DIR, "weights") 38 | 39 | CONCURRENT_VIDEO_TRANSFORMATIONS = ["showtext", "greyscale", "show"] 40 | 41 | DEVICE = "cuda" if torch.cuda.is_available() else "cpu" 42 | MAX_FILE_SIZE = 10000000 # 10MB 43 | 44 | SUPPORTED_INFERENCE_MODELS = { 45 | "groundingdino": lambda self, classes: registry.grounding_dino_base(self, classes), 46 | "yolov8": lambda self, classes: registry.yolov8_base(self, classes), 47 | "fastsam": lambda self, classes: registry.fast_sam_base(self, classes), 48 | "yolov8s-pose": lambda self, _: registry.yolov8_pose_base(self, _), 49 | "roboflow": lambda self, _: registry.use_roboflow_hosted_inference(self, _), 50 | } 51 | 52 | SUPPORTED_TRAIN_MODELS = { 53 | "vit": lambda self, folder: registry.vit_target(self, folder), 54 | "yolov8": lambda self, folder: registry.yolov8_target(self, folder), 55 | } 56 | 57 | ALIASED_FUNCTIONS = { 58 | "isita": "classify", 59 | "find": "detect", 60 | "describe": "caption", 61 | "getcolors": "getcolours", 62 | } 63 | -------------------------------------------------------------------------------- /visionscript/constants.py: -------------------------------------------------------------------------------- 1 | ERROR_CODE = "\033[1;34mVisionScript Error\033[0m:" 2 | KEYBOARD_INTERRUPT_CODE = "\033[1;33mVisionScript Program Stopped\033[0m:" 3 | -------------------------------------------------------------------------------- /visionscript/error_handling.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from spellchecker import SpellChecker 4 | 5 | from visionscript.constants import ERROR_CODE 6 | from visionscript.usage import ( 7 | language_grammar_reference, 8 | lowercase_language_grammar_reference, 9 | ) 10 | 11 | 12 | def visionscript_exception_handler(_, exception, _1): 13 | print(ERROR_CODE, exception) 14 | 15 | 16 | class PathNotExists(Exception): 17 | def __init__(self, path): 18 | sys.excepthook = visionscript_exception_handler 19 | self.path = path 20 | 21 | def __str__(self): 22 | return f"The file '{self.path}' does not exist." 23 | 24 | 25 | class StackEmpty(Exception): 26 | def __init__(self, stack): 27 | sys.excepthook = visionscript_exception_handler 28 | self.stack = stack 29 | 30 | def __str__(self): 31 | if self.stack == "image_stack": 32 | return "You need to load an image before you can use the image stack." 33 | elif self.stack == "pose_stack": 34 | return "You need to run DetectPose[] before you can use the pose stack." 35 | 36 | 37 | class SetFunctionError(Exception): 38 | def __init__(self, function): 39 | sys.excepthook = visionscript_exception_handler 40 | self.function = function 41 | 42 | def __str__(self): 43 | return f"The '{self.function}' model is not available." 44 | 45 | 46 | class ImageOutOfBounds(Exception): 47 | def __init__(self, x, y): 48 | sys.excepthook = visionscript_exception_handler 49 | self.x = x 50 | self.y = y 51 | 52 | def __str__(self): 53 | return f"The image is out of bounds at ({self.x}, {self.y})." 54 | 55 | 56 | class CameraNotAccessible(Exception): 57 | def __init__(self): 58 | sys.excepthook = visionscript_exception_handler 59 | 60 | def __str__(self): 61 | return "The camera is not accessible." 62 | 63 | 64 | class ImageNotSupported(Exception): 65 | def __init__(self, image): 66 | sys.excepthook = visionscript_exception_handler 67 | self.image = image 68 | 69 | def __str__(self): 70 | return f"The image '{self.image}' is in an unsupported format. Supported formats are: .jpg, .jpeg, and .png" 71 | 72 | 73 | class ImageCorrupted(Exception): 74 | def __init__(self, image): 75 | sys.excepthook = visionscript_exception_handler 76 | self.image = image 77 | 78 | def __str__(self): 79 | return f"The image '{self.image}' is corrupt and cannot be opened." 80 | 81 | 82 | class ModelNotAvailable(Exception): 83 | def __init__(self, message): 84 | sys.excepthook = visionscript_exception_handler 85 | self.message = message 86 | 87 | def __str__(self): 88 | return self.message 89 | 90 | 91 | class UndefinedVariableOrFunction(Exception): 92 | def __init__(self, variable): 93 | sys.excepthook = visionscript_exception_handler 94 | self.variable = variable 95 | 96 | def __str__(self): 97 | return f"The variable or function '{self.variable}' is undefined." 98 | 99 | 100 | class NestedCameraNotAllowed(Exception): 101 | def __init__(self): 102 | sys.excepthook = visionscript_exception_handler 103 | 104 | def __str__(self): 105 | return "You cannot use UseCamera[] inside another UseCamera[] statement." 106 | 107 | 108 | spell = SpellChecker() 109 | 110 | 111 | def handle_unexpected_characters(e, code, interactive=False): 112 | # if line doesn't end with ], add it 113 | if not code.strip().endswith("]"): 114 | code += "]" 115 | 116 | return 117 | 118 | # if space between statement and [, remove it 119 | # get position of [ 120 | position = code.find("[") 121 | 122 | if code[position - 1] == " ": 123 | code = code[: position - 1] + code[position:] 124 | 125 | return 126 | 127 | # replace all “ with " 128 | code = code.replace("“", '"') 129 | code = code.replace("”", '"') 130 | 131 | # raise error if character not in grammar 132 | if e.char not in ["[", "]", "'", '"', ",", " ", '"', '"', "\n", "\t", "\r"]: 133 | print(ERROR_CODE, f"Syntax error on line {e.line}, column {e.column}.") 134 | print(ERROR_CODE, f"Unexpected character: {e.char!r}") 135 | exit(1) 136 | 137 | # raise error if class doesn't exist 138 | line = e.line 139 | column = e.column 140 | 141 | # check if function name in grammar 142 | function_name = code.strip().split("\n")[line - 1].split("[")[0].strip() 143 | 144 | language_grammar_reference_keys = language_grammar_reference.keys() 145 | 146 | if function_name in language_grammar_reference_keys: 147 | print(ERROR_CODE, f"Syntax error on line {line}, column {column}.") 148 | print(ERROR_CODE, f"Unexpected character: {e.char!r}") 149 | exit(1) 150 | 151 | spell.known(lowercase_language_grammar_reference) 152 | spell.word_frequency.load_words(lowercase_language_grammar_reference) 153 | 154 | alternatives = spell.candidates(function_name) 155 | 156 | if len(alternatives) == 0: 157 | print(ERROR_CODE, f"Function {function_name} does not exist.") 158 | exit(1) 159 | 160 | print( 161 | ERROR_CODE, 162 | f"Function '{function_name}' does not exist. Did you mean one of these?", 163 | ) 164 | print("-" * 10) 165 | 166 | for item in list(alternatives): 167 | if item.lower() in lowercase_language_grammar_reference: 168 | print( 169 | list(language_grammar_reference.keys())[ 170 | lowercase_language_grammar_reference.index(item.lower()) 171 | ] 172 | ) 173 | 174 | if interactive is False: 175 | exit(1) 176 | 177 | return 178 | 179 | 180 | def handle_unexpected_token(e, interactive=False): 181 | line = e.line 182 | column = e.column 183 | 184 | print(ERROR_CODE, f"Syntax error on line {line}, column {column}.") 185 | print(f"Unexpected token: {e.token!r}") 186 | if interactive is False: 187 | exit(1) 188 | -------------------------------------------------------------------------------- /visionscript/grammar.py: -------------------------------------------------------------------------------- 1 | grammar = """ 2 | start: (expr | EOL)* 3 | 4 | expr: (count | set | var | make | in | if | break | random | train | label | detect | countinregion | help | get | exit | read | compare | count | cutout | show | size | caption | say | save | load | use | replace | var | classify | segment | comment | contains | if | else | end | run | find | describe | import | rotate | getcolours | getcolors | get_text | greyscale | paste | pasterandom | resize | blur | literal | setbrightness | search | similarity | readqr | reset | negate | BOOL | INT | equality | not_equality | input | deploy | getedges | setconfidence | setregion | filterbyclass | crop | shuffle | grid | run | camera | showtext | getfps | gt | lt | expr | increment | decrement | track | getdistinctscenes | getuniqueappearances | usecamera | breakpoint | profile | math | first | last | is | merge | remove | web | wait | apply | opposite | detectpose | comparepose | associative_array | list | STRING | EOL) 5 | classify: "Classify" "[" STRING ("," STRING)* "]" 6 | merge: "Merge" "[" (variable | list | associative_array) ("," (variable | list | associative_array))* "]" 7 | var: variable "=" (expr | STRING | INT) 8 | replace: "Replace" "[" STRING "]" 9 | use: "Use" "[" STRING "]" 10 | load: "Load[]" | "Load" "[" (STRING | input) "]" 11 | save: "Save" "[" STRING "]" 12 | getfps: "GetFPS[]" 13 | is: "Is" "[" (expr | STRING) "]" | "Is[]" 14 | getdistinctscenes: "GetDistinctScenes[]" 15 | getuniqueappearances: "GetUniqueAppearances" ("[" STRING "]")? 16 | say: "Say" "[" (STRING | math | variable | expr) "]" | "Say[]" 17 | get_text: "GetText[]" 18 | camera: "Camera[]" 19 | greyscale: "Greyscale" "[]" 20 | opposite: "Opposite" "[" (BOOL | expr) "]" 21 | detectpose: "DetectPose[]" 22 | comparepose: "ComparePose" "[" (expr) "," (expr) "]" | "ComparePose[]" 23 | showtext: "ShowText" "[" (STRING | expr) "]" | "ShowText[]" 24 | random: "Random" "[" (STRING | expr) ("," (STRING | expr))* "]" 25 | search: "Search" "[" STRING "]" 26 | deploy: "Deploy" "[" STRING "]" 27 | getedges: "GetEdges" ("[" "]")? 28 | profile: "Profile[]" 29 | filterbyclass: "FilterByClass" "[" STRING ("," STRING)* "]" | "FilterByClass[]" 30 | describe: "Describe[]" 31 | setregion: "SetRegion" "[" INT "," INT "," INT "," INT "]" | "SetRegion[]" 32 | readqr: "ReadQR[]" 33 | setconfidence: "SetConfidence" "[" INT "]" | "SetConfidence[]" 34 | rotate: "Rotate" "[" (INT | STRING) "]" 35 | resize: "Resize" "[" INT "," INT "]" 36 | getcolors: "GetColors[]" | "GetColors" "[" INT "]" 37 | getcolours: "GetColours[]" | "GetColours" "[" INT "]" 38 | find: "Find" "[" STRING "]" 39 | args: ((STRING | INT | FLOAT | expr) ("," (STRING | INT | FLOAT | expr))*) | (STRING | INT | FLOAT | expr)? 40 | make: "Make " literal ("[" args "]") EOL (INDENT (expr+))* "End" EOL 41 | caption: "Caption[]" 42 | size: "Size[]" 43 | import: "Import" "[" STRING "]" 44 | run: "Run" "[" "]" 45 | shuffle: "Shuffle[]" 46 | grid: "Grid" "[" INT "]" 47 | show: "Show[]" 48 | paste: "Paste" "[" INT "," INT "]" 49 | pasterandom: "PasteRandom[]" 50 | cutout: "Cutout[]" 51 | crop: "Crop" "[" (INT | STRING) "," (INT | STRING) "," (INT | STRING) "," (INT | STRING) "]" 52 | count: "Count" "[" STRING "]" | "Count[]" 53 | input: "Input" ("[" STRING "]")? 54 | contains: "Contains" "[" STRING "]" 55 | compare: "Compare[]" 56 | setbrightness: "SetBrightness" "[" INT "]" 57 | read: "Read" "[" STRING "]" | "Read[]" 58 | exit: "Exit[]" 59 | blur: "Blur[]" 60 | similarity: "Similarity" ("[" (INT | FLOAT) "]")? 61 | get: "Get" "[" (INT | expr) ("," (STRING))* "]" 62 | set: "Set" "[" (INT | expr) ("," (STRING))* "]" 63 | remove: "Remove" "[" (variable) ("," (expr))* "]" 64 | help: "Help" "[" STRING "]" 65 | end: "End[]" 66 | wait: "Wait" "[" (INT | expr) "]" 67 | track: "Track[]" 68 | countinregion: "CountInRegion" "[" INT "," INT "," INT "," INT "]" | "CountInRegion" "[" STRING "]" 69 | detect: "Detect" "[" STRING "]" | "Detect" "[" expr "]" | "Detect[]" 70 | segment: "Segment" "[" STRING "]" 71 | else: "Else" 72 | breakpoint: "Breakpoint[]" 73 | usecamera: "UseCamera" "[" (STRING)? "]" EOL expr+ "EndCamera" EOL 74 | in: "In" "[" (STRING | expr) "]" EOL expr+ "EndIn" EOL 75 | if: "If" "[" comparison_expressions "]" EOL expr+ "End" EOL 76 | web: "Web" "[" (STRING | expr) ("," (STRING | expr))? "]" 77 | reset: "Reset[]" 78 | negate: "Not" "[" expr "]" 79 | math: expr (OPERAND expr)* 80 | OPERAND: "+" | "-" | "*" | "/" | "^" 81 | equality: (expr | INT | STRING | FLOAT | BOOL) "==" (expr | INT | STRING | FLOAT | BOOL) 82 | not_equality: (INT | STRING | expr | BOOL) "!=" (INT | STRING | expr | BOOL) 83 | train: "Train" "[" STRING "," STRING "]" | "Train" "[" STRING "]" 84 | label: "Label" "[" STRING "," STRING ("," STRING )* "]" 85 | break: "Break[]" 86 | associative_expr: STRING ":" (STRING | INT | FLOAT | expr) 87 | associative_array: "[" EOL? (associative_expr ("," EOL? associative_expr)*)? EOL? "]" | "AssociativeArray[]" 88 | list: "[" EOL? (INT | STRING | FLOAT | expr) ("," EOL? (STRING | INT | FLOAT | expr))* EOL?"]" | "List[]" 89 | first: "First[]" | "First" "[" (variable | expr) "]" 90 | last: "Last[]" | "Last" "[" (variable | expr) "]" 91 | apply: "Apply" "[" (variable | expr) "," expr "]" 92 | literal: /([a-z][a-zA-Z0-9_]*)/ ( "[" (STRING | INT | FLOAT | expr) ("," (STRING | INT | FLOAT | expr))* "]" )? | /([a-z][a-zA-Z0-9_]*)/ "[" "]" 93 | variable: /[a-zA-Z_][a-zA-Z0-9_]*/ 94 | comment: /#.*?\\n/ 95 | comparison_expressions: gt | lt | gte | lte | equality | not_equality 96 | gt: expr ">" expr 97 | lt: expr "<" expr 98 | gte: expr ">=" expr 99 | lte: expr "<=" expr 100 | increment: variable "++" 101 | decrement: variable "--" 102 | EOL: "\\n" 103 | EOF: "\\Z" 104 | INT: /-?\d+/ 105 | FLOAT: /-?\d+\.\d+/ 106 | INDENT: " " | "\\t" 107 | BOOL: "True" | "False" 108 | %import common.ESCAPED_STRING -> STRING 109 | %import common.WS_INLINE 110 | %ignore WS_INLINE 111 | %ignore EOL 112 | """ 113 | -------------------------------------------------------------------------------- /visionscript/notebook.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import copy 3 | import json 4 | import os 5 | import re 6 | import string 7 | import time 8 | import uuid 9 | from io import BytesIO 10 | 11 | import numpy as np 12 | import qrcode 13 | import requests 14 | from flask import Flask, jsonify, redirect, render_template, request, url_for 15 | 16 | import visionscript.lang as lang 17 | from visionscript.lang import parser 18 | 19 | app = Flask(__name__) 20 | 21 | API_URL = None 22 | 23 | notebooks = {} 24 | 25 | 26 | def init_notebook(): 27 | # cells have a session that contains state and an output 28 | # notebook schema looks like: 29 | # { 30 | # "session": session, 31 | # "cells": [ 32 | # { 33 | # "type": "code", 34 | # "data": "code" 35 | # }, 36 | # { 37 | # "type": "comment", 38 | # "data": "comment" 39 | # } 40 | # ], 41 | # "output": [ 42 | # "output", 43 | # ... 44 | # ] 45 | # } 46 | return {"session": None, "cells": [], "output": []} 47 | 48 | 49 | @app.route("/") 50 | def home(): 51 | return redirect(url_for("notebook")) 52 | 53 | 54 | @app.route("/notebook", methods=["GET", "POST"]) 55 | def notebook(): 56 | # generate random id, then redirect user 57 | if request.method == "POST": 58 | data = request.json 59 | session_id = data["state_id"] 60 | is_text_cell = data.get("is_text_cell", False) 61 | 62 | user_input = data["code"] 63 | 64 | if ( 65 | notebooks.get(session_id) is None 66 | or notebooks[session_id].get("session") is None 67 | ): 68 | session = lang.VisionScript() 69 | 70 | session.notebook = True 71 | 72 | session.state["session_id"] = session_id 73 | 74 | notebooks[session_id]["session"] = session 75 | 76 | session = notebooks[session_id]["session"] 77 | 78 | if is_text_cell: 79 | notebooks[session_id]["cells"].append( 80 | {"type": "editable_text", "data": user_input} 81 | ) 82 | notebooks[session_id]["output"].append( 83 | {"type": "editable_text", "data": ""} 84 | ) 85 | 86 | # save notebook 87 | with open(os.path.join("tmp", session_id + ".vicnb"), "w") as f: 88 | json.dump( 89 | { 90 | "cells": notebooks[session_id]["cells"], 91 | "output": notebooks[session_id]["output"], 92 | }, 93 | f, 94 | ) 95 | 96 | return jsonify({"output": "", "time": 0}) 97 | 98 | start_time = time.time() 99 | 100 | # if Load[] in line, replace content between "" with tmp/{session_id}/{image_name} 101 | user_input = re.sub( 102 | r"Load\[\s*\"(.*)\"\s*\]", 103 | r"Load[\"tmp/" + session_id + r"/\1\"]", 104 | user_input, 105 | ) 106 | 107 | user_input = user_input.replace('\\"', '"') 108 | 109 | code = parser.parse(user_input.strip() + "\n") 110 | 111 | session.check_inputs(code) 112 | 113 | if len(session.state["input_variables"]) == 0: 114 | try: 115 | session.parse_tree(code) 116 | except Exception as e: 117 | raise e 118 | return jsonify({"error": str(e)}) 119 | 120 | end_time = time.time() 121 | 122 | run_time = round(end_time - start_time, 1) 123 | 124 | notebooks[session_id]["cells"].append({"type": "code", "data": user_input}) 125 | notebooks[session_id]["output"].append(session.state["output"]) 126 | 127 | # if output is ndarray, convert to base64 image 128 | if session.state.get("output") and isinstance( 129 | session.state["output"].get("text"), np.ndarray 130 | ): 131 | import base64 132 | from io import BytesIO 133 | 134 | image = BytesIO() 135 | # load from np array 136 | from PIL import Image 137 | 138 | Image.fromarray(session.state["output"]["text"]).save(image, format="PNG") 139 | 140 | notebooks[session_id]["output"][-1] = { 141 | "image": base64.b64encode(image.getvalue()).decode("utf-8"), 142 | "type": "image", 143 | } 144 | 145 | # save notebook 146 | with open(os.path.join("tmp", session_id + ".vicnb"), "w") as f: 147 | json.dump( 148 | { 149 | "cells": notebooks[session_id]["cells"], 150 | "output": notebooks[session_id]["output"], 151 | }, 152 | f, 153 | ) 154 | 155 | if len(notebooks[session_id]["output"]) > 0: 156 | return jsonify( 157 | {"output": notebooks[session_id]["output"][-1], "time": run_time} 158 | ) 159 | else: 160 | return jsonify({"output": "Success", "time": run_time}) 161 | 162 | if request.args.get("state_id"): 163 | state_id = request.args.get("state_id") 164 | else: 165 | state_id = uuid.uuid4().hex 166 | 167 | notebooks[state_id] = init_notebook() 168 | 169 | return render_template( 170 | "notebook.html", 171 | state_id=state_id, 172 | api_url=API_URL or request.url_root, 173 | url_root=request.url_root.strip("/"), 174 | ) 175 | 176 | 177 | @app.route("/notebook/upload", methods=["POST"]) 178 | def upload(): 179 | from werkzeug.utils import secure_filename 180 | 181 | session_id = request.args.get("state_id") 182 | file = request.files["file"] 183 | 184 | file.filename = secure_filename(file.filename) 185 | 186 | if session_id and notebooks.get(session_id) is None: 187 | return jsonify({"error": "No session found"}), 404 188 | 189 | # if file is taken 190 | if os.path.exists(os.path.join("tmp", file.filename)): 191 | # add unique id 192 | while os.path.exists(os.path.join("tmp", file.filename)): 193 | file.filename = uuid.uuid4().hex[:4] + file.filename 194 | 195 | # save as tmp file 196 | file_name = file.filename 197 | 198 | # only allow image uploads 199 | import mimetypes 200 | 201 | if file_name == "": 202 | return jsonify({"error": "No file provided"}) 203 | 204 | if mimetypes.guess_type(file_name)[0]: 205 | if not mimetypes.guess_type(file_name)[0].startswith( 206 | "text" 207 | ) and not mimetypes.guess_type(file_name)[0].startswith("image"): 208 | return jsonify({"error": "File type not allowed"}), 415 209 | elif not file_name.endswith(("png", "jpg", "jpeg", "gif", "vicnb", "vic", "avif")): 210 | return jsonify({"error": "File type not allowed"}), 415 211 | 212 | # remove special chars 213 | file_name = "".join([c for c in file_name if c.isalnum() or c == "." or c == "_"]) 214 | 215 | file_name = file_name.replace("..", "") 216 | 217 | # mkdir tmp if not exists 218 | if not os.path.exists("tmp"): 219 | os.mkdir("tmp") 220 | 221 | # mkdir tmp/session 222 | if not os.path.exists(os.path.join("tmp", session_id)): 223 | os.mkdir(os.path.join("tmp", session_id)) 224 | 225 | with open(os.path.join("tmp", session_id, file_name), "wb") as f: 226 | f.write(file.read()) 227 | 228 | # if filename ends in .vicnb, reset state 229 | if file_name.endswith(".vicnb"): 230 | notebooks[session_id] = init_notebook() 231 | 232 | notebooks[session_id]["session"] = lang.VisionScript() 233 | notebooks[session_id]["session"].notebook = True 234 | 235 | notebooks[session_id]["session"].state["session_id"] = session_id 236 | 237 | with open(os.path.join("tmp", file_name), "r") as f: 238 | # file is json 239 | notebook = json.load(f) 240 | 241 | notebooks[session_id]["cells"] = notebook["cells"] 242 | notebooks[session_id]["output"] = notebook["output"] 243 | 244 | # zip cells and output 245 | result = [] 246 | for cell, output in zip(notebook["cells"], notebook["output"]): 247 | result.append({"cell": cell, "output": output}) 248 | 249 | return jsonify({"cells": result}) 250 | 251 | # notebook session should have state id 252 | if notebooks[session_id]["session"] is None: 253 | notebooks[session_id]["session"] = lang.VisionScript() 254 | notebooks[session_id]["session"].notebook = True 255 | 256 | notebooks[session_id]["session"].state["session_id"] = session_id 257 | 258 | return jsonify({"file_name": file_name}) 259 | 260 | 261 | # save 262 | @app.route("/notebook/save", methods=["POST"]) 263 | def save(): 264 | session_id = request.json.get("state_id") 265 | file_name = "export.vic" 266 | 267 | if session_id and notebooks.get(session_id) is None: 268 | return jsonify({"error": "No session found"}) 269 | 270 | if file_name is None: 271 | return jsonify({"error": "No file name provided"}) 272 | 273 | notebook = copy.deepcopy(notebooks[session_id]) 274 | 275 | # delete session 276 | del notebook["session"] 277 | 278 | return jsonify({"file": notebook}) 279 | 280 | 281 | @app.route("/notebook/deploy", methods=["POST"]) 282 | def deploy(): 283 | session_id = request.json.get("state_id") 284 | name = request.json.get("name") 285 | api_url = request.json.get("api_url") 286 | api_key = request.json.get("api_key") 287 | description = request.json.get("description") 288 | publish_as_noninteractive_webpage = request.json.get( 289 | "publish_as_noninteractive_webpage" 290 | ) 291 | 292 | if session_id and notebooks.get(session_id) is None: 293 | return jsonify({"error": "No session found"}), 404 294 | 295 | if name is None: 296 | return jsonify({"error": "No file name provided"}), 400 297 | 298 | # make a post request 299 | notebook = copy.deepcopy(notebooks[session_id]) 300 | 301 | app_slug = name.translate( 302 | str.maketrans("", "", string.punctuation.replace("-", "")) 303 | ).replace(" ", "-") 304 | 305 | deploy_request = requests.post( 306 | api_url, 307 | json={ 308 | "title": name, 309 | "slug": app_slug, 310 | "api_key": api_key, 311 | "description": description, 312 | "script": "\n".join([cell["data"] for cell in notebook["cells"]]), 313 | "notebook": notebook["cells"], 314 | "output": notebook["output"], 315 | "variables": notebook["session"].state["input_variables"], 316 | "publish_as_noninteractive_webpage": publish_as_noninteractive_webpage, 317 | }, 318 | ) 319 | qr_code = qrcode.make( 320 | api_url.strip().strip("/create") + "/notebook?state_id=" + session_id 321 | ) 322 | 323 | image = BytesIO() 324 | qr_code.save(image, format="PNG") 325 | 326 | qr_code = "data:image/png;base64," + base64.b64encode(image.getvalue()).decode( 327 | "utf-8" 328 | ) 329 | 330 | if deploy_request.ok: 331 | return jsonify( 332 | { 333 | "success": True, 334 | "message": deploy_request.json()["id"], 335 | "qr_code": qr_code, 336 | } 337 | ) 338 | 339 | return jsonify({"success": False, "message": deploy_request.text}) 340 | 341 | 342 | @app.route("/static/") 343 | def static_files(path): 344 | return app.send_static_file(path) 345 | 346 | 347 | @app.route("/quit") 348 | def quit(): 349 | exit() 350 | 351 | 352 | @app.errorhandler(404) 353 | def page_not_found(e): 354 | return render_template("error.html", title="Page Not Found"), 404 355 | 356 | 357 | @app.errorhandler(500) 358 | def internal_server_error(e): 359 | return render_template("error.html", title="Internal Server Error"), 500 360 | -------------------------------------------------------------------------------- /visionscript/paper_ocr_correction.py: -------------------------------------------------------------------------------- 1 | # Syntax correction for text run through paper OCR 2 | 3 | import os 4 | 5 | import pygtrie 6 | 7 | from visionscript.grammar import grammar 8 | 9 | CONTEXT_MANAGERS = ("If[", "In[", "UseCamera[") 10 | INDENDATION_MANAGER = "Next[" 11 | 12 | lines = grammar.split("\n") 13 | 14 | functions = [ 15 | l.split(" ")[1].replace('"', "") 16 | for l in lines 17 | if len(l.split(" ")) > 1 and l.split(" ")[1].istitle() 18 | ] 19 | 20 | functions = [ 21 | "".join([c for c in f if c.isalnum() or c == "." or c == "_"]) for f in functions 22 | ] 23 | 24 | # if < 2 letters long, remove (these are false positives) 25 | functions = [f for f in functions if len(f) >= 2] 26 | 27 | # get longest prefix, then chomp 28 | trie = pygtrie.CharTrie() 29 | 30 | for f in functions: 31 | trie[f] = True 32 | 33 | 34 | def line_processing(string: str) -> str: 35 | context_level = 0 36 | 37 | lines = string.split("\n") 38 | 39 | all_lines = [] 40 | 41 | for l in lines: 42 | if context_level > 0: 43 | l = "\t" * context_level + l 44 | 45 | # if Next, -1 context 46 | if l.strip().startswith(INDENDATION_MANAGER): 47 | context_level -= 1 48 | if context_level < 0: 49 | context_level = 0 50 | 51 | if l.startswith(CONTEXT_MANAGERS): 52 | context_level += 1 53 | 54 | all_lines.append(l) 55 | 56 | return "\n".join(all_lines) 57 | 58 | 59 | def syntax_correction(string: str) -> str: 60 | string = "Load[Detect[dog]Replace[Calpng]" 61 | string = "Load['test.png']\nDetect['dog']\nReplace['cat']\n" 62 | 63 | final_string = "" 64 | 65 | while True: 66 | longest_prefix, _ = trie.longest_prefix(string) 67 | # if longest prefix is none, strip chars until we find a prefix 68 | if len(string) == 0 and longest_prefix == None: 69 | break 70 | 71 | if longest_prefix == None: 72 | string = string[1:] 73 | continue 74 | 75 | if longest_prefix == "": 76 | break 77 | else: 78 | final_string += longest_prefix 79 | # if ] before next function, then we are done 80 | next_longest_prefix, _ = trie.longest_prefix(string[len(longest_prefix) :]) 81 | 82 | # if len between longest_prefix and next_longest_prefix is 0, then add a ] and move on 83 | if next_longest_prefix == None: 84 | final_string += string[len(longest_prefix) :] 85 | # if not ends with ], then add one 86 | if not final_string.endswith("]"): 87 | final_string += "]" 88 | break 89 | 90 | longest_prefix_idx = string.find(longest_prefix) 91 | next_longest_prefix_idx = string.find(next_longest_prefix) 92 | 93 | # print("Longest prefix idx: " + str(longest_prefix_idx), next_longest_prefix_idx) 94 | 95 | if (longest_prefix_idx + len(longest_prefix)) == next_longest_prefix_idx: 96 | final_string += "]\n" 97 | string = string[len(longest_prefix) :] 98 | continue 99 | 100 | text_between_current_and_next = string[len(longest_prefix) :].split("[")[1] 101 | # print("Text between current and next: " + text_between_current_and_next) 102 | 103 | text_between_current_and_next = text_between_current_and_next.split("]")[0] 104 | 105 | # print("Text between current and next: " + text_between_current_and_next) 106 | 107 | final_string += text_between_current_and_next + "]\n" 108 | string = string[ 109 | len(longest_prefix) + len(text_between_current_and_next) + 2 : 110 | ] 111 | 112 | final_string = final_string.replace("]", "]\n") 113 | 114 | # chomp all new lines from end 115 | while final_string.endswith("\n") or final_string.endswith("]"): 116 | final_string = final_string[:-1] 117 | 118 | final_string += "]" 119 | 120 | return final_string 121 | -------------------------------------------------------------------------------- /visionscript/pose.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | 4 | @dataclass 5 | class Pose: 6 | """ 7 | A pose. 8 | """ 9 | 10 | keypoints: list 11 | confidence: float 12 | -------------------------------------------------------------------------------- /visionscript/reference.json: -------------------------------------------------------------------------------- 1 | {"Blur":{"body":"### Syntax\n\n```\nBlur[]\n```\n\n### Examples\n\nThe following example loads an image, detects people, blurs them, and displays the new image.\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"person\"]\nBlur[]\nShow[]\n```"},"Break":{"body":"### Syntax\n\n```\nIn[\"./folder/\"]\n ...\n If[...]\n Break[]\n```\n\n### Examples\n\nThe following example loads a folder of images, looks for a cat in each image, and exits the loop if more than three cats were found in an image:\n\n```\nIn[\"./folder/\"]\n Load[]\n Detect[\"cat\"]\n If[Count[] > 3]\n Break[]\n```"},"Breakpoint":{"body":"### Syntax\n\n```\nBreakpoint[]\n```\n\n### Examples\n\nThe following example loads an image, then detects cats in the image. A breakpoint runs which starts an interactive [debugging session](/debugging). Once exiting from the debugging session, the program continues to run.\n\n```\nLoad[\"./image.jpg\"]\nDetect[\"cat\"]\nBreakpoint[]\nIf[Count[] > 4]\n Say[\"There are more than four cats in this image!\"]\n```"},"Caption":{"body":"
\n

The model behind Detect[] will be downloaded automatically the first time you use it on your computer. This can take a few minutes depending on your internet connection.

\n
\n\n### Syntax\n\n```\nCaption[]\n```\n\n### Examples\n\nThe following example loads an image, generates a caption, and displays the results.\n\n```\nLoad[\"./tmp/bus.jpg\"]\nGetEdges[]\nShow[]\n```\n\n![A caption reading \"a bus is driving down the street in front of a building\"](/assets/caption.png)\n\n### Supported Models\n\n- [BLIP](https://github.com/salesforce/BLIP)"},"Classify":{"body":"Classify[] uses a zero-shot model which means you can specify any class you like.\n\n
\n

The model behind Classify[] will be downloaded automatically the first time you use it on your computer. This can take a few minutes depending on your internet connection.

\n
\n\n### Syntax\n\n```\nClassify[\"cat\", \"dog\"]\n```\n\n### Arguments\n\n- An arbitrary number of `class` arguments that specify the labels to use in classification.\n\n### Examples\n\nThe following example loads an image, classifies if the image is a cat or a dog, and displays the results.\n\n```\nLoad[\"./tmp/cat.jpg\"]\nClassify[\"cat\", \"dog\"]\nShow[]\n```\n\n![A photo of a cat classified as a cat](/assets/classify.png)\n\n### Supported Models\n\n- [CLIP](https://github.com/openai/clip)"},"Compare":{"body":"If you have called `Detect[]` or `Segment[]`, the respective bounding boxes or masks will be shown on the image.\n\n### Syntax\n\n```\nCompare[]\n```\n\n### Examples\n\nIn this example, two images are loaded and displayed size-by-side for comparison.\n\n```\nLoad[\"./photo.jpg\"]\nLoad[\"./photo1.jpg\"]\nCompare[]\n```"},"Count":{"body":"### Syntax\n\n```\nCount[]\n```\n\n### Examples\n\nThe following example loads an image, detects all the people in the image, counts the number of people, and says how many were identified in the image.\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"person\"]\nCount[]\nSay[]\n```\n\n![A photo of people outside with a count below indicating there are eight people in the image](/assets/count.png)"},"CountInRegion":{"body":"### Syntax\n\n```\nCountInRegion[x, y, width, height]\n```\n\n## Arguments\n\n- `x` - The x coordinate of the top left corner of the region.\n- `y` - The y coordinate of the top left corner of the region.\n- `width` - The width of the region.\n- `height` - The height of the region.\n\n### Examples\n\nThe following example loads an image, detects all the people in the image, gets the number of people in a region of the image, and shows how many detections were identified in the region.\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"person\"]\nCountInRegion[0, 0, 100, 100]\nSay[]\n```"},"Cutout":{"body":"### Syntax\n\n```\nCutout[]\n```\n\n### Examples\n\nIn this example, an image is loaded, faces are detected, and the first face is cut out and added to the image stack. The original image preserves the cut out area.\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"face\"]\nCompare[]\n```"},"Describe":{"body":"A synonym for Caption[]."},"Detect":{"body":"Detect[] uses a zero-shot model which means you can specify any class you like.\n\n
\n

The model behind Detect[] will be downloaded automatically the first time you use it on your computer. This can take a few minutes depending on your internet connection.

\n
\n\n### Syntax\n\n```\nDetect[]\nDetect[\"person\"]\n```\n\n### Arguments\n\n- `class` - The class to detect.\n\n### Examples\n\nThe following example loads an image, runs inference to find all objects matching the \"person\" class, and displays the results.\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"person\"]\nShow[]\n```\n\n### Supported Models\n\n- [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO)\n- [Ultralytics YOLOv8 (COCO checkpoint)](https://github.com/ultralytics/ultralytics)\n\n### Synonyms\n\n- Find[]"},"Equality (==, !=)":{"body":"### Syntax\n\n```\nx == y\nx != y\n```\n\n### Arguments\n\n- `x` - The first statement to evaluate\n- `y` - The second statemenet to evaluate\n\n### Examples\n\nThe following example loads an image, reads the text in the image, and shows the image if it contains \"tea\".\n\n```\nLoad[\"./photo.jpg\"]\nGetText[]\nIf[Read[] == \"tea\"]\n Show[]\n```"},"Exit":{"body":"### Syntax\n\n```\nExit[]\n```"},"FilterByClass":{"body":"You can set a class filter before or after you run `Detect[]` or `Segment[]`. By default, no classes are filtered.\n\nTo reset the class filter, use `FilterByClass[]`.\n\n### Syntax\n\n```\nFilterByClass[]\nFilterByClass[class]\nFilterByClass[class1, class2, ...]\n```\n\n### Arguments\n\n- `class`: The class(es) to filter by. Can be a single class or a list of classes.\n\n### Examples\n\nThe following example finds cats in an image called `house.jpg`. `Detect[]` finds all the objects in the image. Then, a filter is applied so that only cats are used in future cells. Then, the cat predictions are displayed on the `house.jpg` image that was loaded at the beginning of the example.\n\n```\nLoad[\"./house.jpg\"]\nDetect[\"cat\"]\nSetConfidence[90]\nShow[]\n```"},"Find":{"body":"A synonym for Detect[]."},"GetDistinctScenes":{"body":"You can only use this function after you have used `In[\"video.mov\"]` on a video and applied a `Classify[]` operation to each frame.\n\n### Syntax\n\n```\nGetDistinctScenes[]\n```\n\n### Examples\n\nThe following example loads a video, classifies each frame, and returns the timestamps at which classifications change:\n\n```\nIn[\"video.mov\"]\n Classify[\"sports\", \"cafe\", \"beach\"]\n\nGetDistinctScenes[]\n```"},"GetEdges":{"body":"### Syntax\n\n```\nGetEdges[]\n```\n\n### Arguments\n\n- `confidence`: The confidence threshold value represented as a percentage whole number (i.e. 20, 50, 90).\n\n### Examples\n\nThe following example loads an image, gets the edges of objects in the image using Sobel edge detection, and shows the results of the Sobel edge detection process.\n\n```\nLoad[\"./tmp/bus.jpg\"]\nGetEdges[]\nShow[]\n```\n\n![A bus with Sobel edge detection applied to it](/assets/get_edges.png)"},"GetFPS":{"body":"### Syntax\n\n```\nGetFPS[]\n```\n\n### Examples\n\nThe following example loads a webcam, gets the webcam stream FPS, writes the FPS to the top left corner of the image, and displays the resultant image:\n\n```\nUseCamera[]\n GetFPS[]\n Read[]\n WriteText[]\n```"},"GetText":{"body":"### Syntax\n\n```\nGetText[]\n```\n\n### Examples\n\nThe following example loads an image, retrieves the text, and displays it.\n\n```\nLoad[\"./photo.jpg\"]\nGetText[]\nSay[]\n```"},"Greyscale":{"body":"### Syntax\n\n```\nGreyscale[]\n```\n\n### Examples\n\nThe following example loads an image, converts it to greyscale, and displays it.\n\n```\nLoad[\"./photo.jpg\"]\nGreyscale[]\nShow[]\n```"},"If":{"body":"### Syntax\n\n```\nIf[statement]\n ...\n```\n\n### Arguments\n\n- `statement`: A statement to evaluate. The statement must evaluate to `True` or `False`. Statements can also be comparisons using `>`, `<`, `>=`, `<=`, `==`, and `!=`.\n- `...`: An arbitrary number of commands indented with a tab character. These commands are run if the `if` statement evaluates to `True`.\n\n### Examples\n\nThe following example loads an image, reads the text in the image, and shows the image if it contains \"tea\".\n\n```\nLoad[\"./photo.jpg\"]\nGetText[]\nIf[Read[] == \"tea\"]\n Show[]\n```\n\nIn this example, `Read[] == \"tea\"` is the statement to evaluate. If this statement returns `True`, the indented statements are run. Otherwise, the indented statements are not run."},"Import":{"body":"### Syntax\n\n```\nImport[\"./file.vic\"]\n```\n\n### Arguments\n\n- `path` - The path to the .vic file to import.\n\n### Examples\n\nThe following example laods a .vic file and calls a function within it:\n\n```\nImport[\"./counter.vic]\n\ncountcars\nSay[]\n```"},"In":{"body":"### Syntax\n\n```\nIn[\"./folder/\"]\n ...\n```\n\n### Arguments\n\n- `query` - The text query.\n- `...` - Statements to evaluate for each image in the specified folder.\n\n### Examples\n\nThe following example loads a folder of images, searches for images related to a plane, and displays the images ordered by relevance in descending order.\n\n```\nIn[\"./folder/\"]\n Load[]\n\nSearch[\"plane\"]\nCompare[]\n```"},"Input":{"body":"Inputs let you accept image or text input from a user.\n\nInputs are used as part of VisionScript Cloud, a method through which you can deploy code to an API or a HTML web page that you can share with others.\n\nIf a script contains an `Input[]`, it cannot be executed as a VisionScript script from the command line or a notebook environment. We recommend adding `Input[]` when you have already written the logic for your program and are ready to share it with the world.\n\n### Syntax\n\n```\nInput[\"Form Name\"]\n```\n\n### Arguments\n\n- `Form Name`: A unique identifier for the input. This will be shown as the form name in VisionScript Cloud deployments.\n\n### Examples\n\nThe following example asks a user to provide an imagei and will classify whether the image is a cat or a dog. The text response is then displayed.\n\n```\nLoad[Input[]]\nClassify[\"cat\", \"dog\"]\nShow[]\n```"},"Load":{"body":"### Examples\n\n```\nLoad[\"./photo.jpg\"]\nLoad[\"https://example.com/photo.jpg\"]\nLoad[\"./folder/\"]\n```\n\n### Arguments\n\n- `path` - The path to the image or folder to load.\n\n### Example\n\nThe following example loads an image and displays it.\n\n```\nLoad[\"./photo.jpg\"]\nShow[]\n```"},"Make":{"body":"### Syntax\n\n#### Declaration\n\n```\nMake showgreyscale\n ...\n```\n\n#### Call\n\n```\nshowgreyscale[]\n```\n\n### Arguments\n\n- `name` - The name of the function.\n- `...` - The body of the function.\n\n### Examples\n\nThe following example declares a function that converts an image to greyscale and displays it.\n\nThis function is applied to all images in a folder.\n\n```\nMake showgreyscale\n Greyscale[]\n Show[]\n\nIn[\"./folder\"]\n showgreyscale[]\n```"},"Paste":{"body":"### Syntax\n\n```\nPaste[x, y]\n```\n\n### Arguments\n\n- `x` - The x position to start pasting the image.\n- `y` - The y position to start pasting the image.\n\n### Examples\n\nThe following example cuts out the first cat in an image then pastes it in the top corner of the image.\n\nThis function is applied to all images in a folder.\n\n```\nLoad[\"./image.jpg\"]\nDetect[\"cat\"]\nGet[1]\nCutout[]\nPaste[0, 0]\n```"},"PasteRandom":{"body":"### Syntax\n\n```\nPasteRandom[]\n```\n\n### Examples\n\nThe following example cuts out the first cat in an image then pastes it at a random x, y coordinate in the image.\n\nThis function is applied to all images in a folder.\n\n```\nLoad[\"./image.jpg\"]\nDetect[\"cat\"]\nGet[1]\nCutout[]\nPasteRandom[]\n```"},"Profile":{"body":"`Profile[]` groups run time by function rather than documenting the length of individual function calls.\n\n### Syntax\n\n```\nProfile[]\n```\n\n### Examples\n\nThe following code prints a statement saying how many cats are in an image if a cat is found in an image:\n\n```\nProfile[]\n\nLoad[\"./garden.jpg\"]\nDetect[\"cat\"]\nCount[]\nIf[Read[] > 0]\n Say[]\nEnd\n\nSay[]\n```\n\nThe code starts with a `Profile[]` statement, which enables the profiler.\n\nThis code runs and then shows the following information:\n\n```\n--------------------\nProfile:\n--------------------\ndetect : 5.28s\nread : 0.00s\ngt : 0.00s\nexpr : 0.00s\nload : 0.00s\nsay : 0.00s\ncount : 0.00s\nif : 0.00s\nmake : 0.00s\nTotal run time: 5.28s\n```\n\nHere, we can see program execution time as well as the time spent to run each function type."},"Read":{"body":"When writing an `if` statement, you may want to use a value from a previous computation.\n\nTo do so, you need to use `Read[]` to retrieve the last computed value before an if statement was evaluated.\n\n### Syntax\n\n```\nRead[]\n```\n\n### Examples\n\nThe following example loads an image, reads the text in the image, and shows the image if it contains \"tea\".\n\n```\nLoad[\"./photo.jpg\"]\nGetText[]\nIf[Read[] == \"tea\"]\n Show[]\n```"},"ReadQR":{"body":"### Syntax\n\n```\nReadQR[]\n```\n\n### Examples\n\nThe following example loads an image, detects a QR code, and displays the text associated with the QR code.\n\n```\nLoad[\"./photo1.jpg\"]\nReadQR[]\nSay[]\n```"},"Replace":{"body":"### Syntax\n\n```\nReplace[]\nReplace[\"blue\"]\n```\n\n### Examples\n\nIn this example, an image is loaded, faces are detected, and the first face is replaced with a blue box. The new image is then saved to a file.\n\n```\nLoad[\"./photo.jpg\"]\nReplace[\"blue\"]\nSave[\"./new_photo.jpg\"]\n```"},"Reset":{"body":"### Syntax\n\n```\nReset[]\n```\n\n### Examples\n\nThe following example loads an image, detects solar panels, then clears the program state.\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"solar panel\"]\nReset[]\n```"},"Resize":{"body":"### Syntax\n\n```\nResize[100, 100]\n```\n\n### Arguments\n\n- `width` - The width to resize the image to.\n- `height` - The height to resize the image to.\n\n### Examples\n\nThe following example loads an image, resizes it to 100x100, and displays it.\n\n```\nLoad[\"./tmp/cat.jpg\"]\nResize[250, 250]\nShow[]\n```\n\n![A photo of a cat resized to 250x250](/assets/resize.png)"},"Rotate":{"body":"### Syntax\n\n```\nRotate[90]\n```\n\n### Arguments\n\n- `angle` - The angle to rotate the image by, in degrees.\n\n### Examples\n\nThe following example loads an image, rotates it by 90 degrees, and displays it.\n\n```\nLoad[\"./photo.jpg\"]\nRotate[90]\nShow[]\n```"},"Save":{"body":"You can save two types of data to a file:\n\n1. An image\n2. Detections from Detect[] or Segment[]\n\n### Syntax\n\n```\nSave[\"filename\"]\n```\n\n### Arguments\n\n- `filename` - The name of the file to save the image or detections to. Use `.csv` as the file extension to save detections. Use `.jpg` or `.png` to save an image.\n\n### Examples\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"people\"]\nSave[\"./photo_with_detections.jpg\"]\n```"},"Say":{"body":"### Syntax\n\n```\nSay[]\nSay[\"Hello, world!\"]\n```\n\n### Arguments\n\n- `message` - The message to say.\n\n### Examples\n\nThe following example runs inference on an image and prints the results.\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"person\"]\nSay[]\n```","signatures":["string","expr"]},"Search":{"body":"### Syntax\n\n```\nSearch[\"query\"]\n```\n\n### Arguments\n\n- `query` - The text query.\n\n### Examples\n\nThe following example loads a folder of images, searches for images related to a plane, and displays the images ordered by relevance in descending order.\n\n```\nIn[\"./folder/\"]\n Load[]\n\nSearch[\"plane\"]\nCompare[]\n```"},"Segment":{"body":"Segment[] uses a zero-shot model which means you can specify any class you like.\n\n
\n

The model behind Segment[] will be downloaded automatically the first time you use it on your computer. This can take a few minutes depending on your internet connection.

\n
\n\n### Syntax\n\n```\nSegment[]\nSegment[\"person\"]\n```\n\n### Arguments\n\n- `class` - The class to detect.\n\n### Examples\n\nThe following example loads an image, runs inference to find all objects matching the \"person\" class, and displays the results.\n\n```\nLoad[\"./photo.jpg\"]\nSegment[\"person\"]\nShow[]\n```\n\n### Supported Models\n\n- [GroundedSAM](https://github.com/autodistill/autodistill-grounded-sam)"},"Select":{"body":"### Syntax\n\n```\nSelect[1]\n```\n\n### Arguments\n\n- `idx` - The index of the item to select.\n\n### Examples\n\nThe following example loads an image, finds all of the people, and retrieves the first three predictions.\n\n```\nLoad[\"./photo.jpg\"]\nSelect[3]\nShow[]\n```"},"SetBrightness":{"body":"### Syntax\n\n```\nSetBrightness[10]\n```\n\n### Arguments\n\n- `brightness` - The percentage by which to increase or decrease the brightness of the image. Minimum value is -100, maximum value is 100.\n\n### Examples\n\nThe following example loads an image, detects solar panels, increases the brightness of the solar panels by 10%, and displays the new image.\n\n```\nLoad[\"./photo.jpg\"]\nSegment[\"solar panel\"]\nSetBrightness[10]\nShow[]\n```"},"SetConfidence":{"body":"You can set the confidence threshold before or after you run `Detect[]` or `Segment[]`. The default confidence value is 50%.\n\n### Syntax\n\n```\nSetConfidence[]\nSetConfidence[confidence]\n```\n\n### Arguments\n\n- `confidence`: The confidence threshold value represented as a percentage whole number (i.e. 20, 50, 90). Min: 0, Max: 100. If no value is provided, the default value of 50 is used.\n\n### Examples\n\nThe following example finds cats in an image. `Detect[]` finds all the cats in the image. Then, a filter is applied so that only predictions with a confidence of 90% or higher are returned. Then, the predictions that meet the criteria are displayed on the image of the cat.\n\n```\nLoad[\"./cat.jpg\"]\nDetect[\"cat\"]\nSetConfidence[90]\nShow[]\n```"},"SetRegion":{"body":"SetRegion[] must be called before Detect[] or Segment[] to filter detections by region. If SetRegion[] is not called, the entire image is used as the region.\n\n### Syntax\n\n```\nSetRegion[x, y, width, height]\n```\n\n### Arguments\n\n- `x`, `y` - The x and y coordinates of the top left corner of the region.\n- `width`, `height` - The width and height of the region.\n\n### Examples\n\nThe following example loads an image and sets the region in which detections must appear to be returned by Detect[]. Then, Detect[] is run to detect solar panels. Predictions not in the specified region are not returned. Then, the filtered predictions are displayed on the the image on which detection was run.\n\nThe region is the top left quadrant of the image, assuming the image is 500x500 pixels.\n\n```\nLoad[\"./photo.jpg\"]\nSetRegion[0, 0, 250, 250]\nDetect[\"solar panel\"]\nShow[]\n```"},"Show":{"body":"If you have called `Detect[]` or `Segment[]`, the respective bounding boxes or masks will be shown on the image.\n\n### Syntax\n\n```\nShow[]\n```\n\n### Examples\n\nThe following example loads an image, detects the people, and displays the image with bounding boxes around the people.\n\n```\nLoad[\"./photo.jpg\"]\nDetect[\"person\"]\nShow[]\n```"},"ShowText":{"body":"### Syntax\n\n```\nUseCamera[]\n ...\n```\n\nWhere `...` is the code you want to run on each frame in the image.\n\n### Examples\n\nThe following example loads a webcam, turns each frame greyscale, and shows the resultant frame:\n\n```\nUseCamera[]\n Greyscale[]\n Show[]\n```"},"Similarity":{"body":"### Syntax\n\n```\nSimilarity[]\nSimilarity[3]\n```\n\n### Arguments\n\n- `n` - The number of images to compare. Defaults to 2.\n\n### Examples\n\nThe following example loads two images, compares them, and displays a similarity score.\n\n```\nLoad[\"./photo.jpg\"]\nLoad[\"./photo1.jpg\"]\nSimilarity[]\nSay[]\n```"},"Size":{"body":"### Syntax\n\n```\nSize[]\n```\n\n### Examples\n\nThe following example loads a folder of images. The code displays each image in the folder, then prints out the size of each image to the console.\n\n```\nIn[\"./folder/\"]\n Load[]\n Show[]\n Size[]\n```"},"Use":{"body":"### Syntax\n\n```\nUse[\"model\"]\n```\n\n### Examples\n\nThe following example declares that the YOLOv8 (COCO checkpoint) model should be used, before loading an image, detecting objects, and saying what they are.\n\n```\nUse[\"yolov8\"]\nLoad[\"./photo.jpg\"]\nDetect[]\nSay[]\n```\n\n### Supported Models\n\nSee Supported Models section in Detect[], Segment[], Classify[], and Caption[] to find which models are supported for the task type you are using."}} -------------------------------------------------------------------------------- /visionscript/registry.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import io 3 | import json 4 | import logging 5 | import os 6 | import sys 7 | 8 | import numpy as np 9 | import torch 10 | from PIL import Image 11 | from roboflow import Roboflow 12 | 13 | import supervision as sv 14 | from visionscript.error_handling import ModelNotAvailable 15 | from visionscript.rf_models import STANDARD_ROBOFLOW_MODELS 16 | 17 | if os.environ.get("ROBOFLOW_API_KEY"): 18 | rf = Roboflow(api_key=os.environ["ROBOFLOW_API_KEY"]) 19 | else: 20 | rf = None 21 | 22 | DEVICE = "cuda" if torch.cuda.is_available() else "cpu" 23 | 24 | CACHE_DIR = os.path.join(os.path.expanduser("~"), ".cache", "visionscript") 25 | 26 | # retrieve rf_models.json from ~/.cache/visionscript 27 | # this is where the user keeps a registry of custom models 28 | # which is combined with the standard RF models 29 | if not os.path.exists(CACHE_DIR): 30 | os.makedirs(CACHE_DIR) 31 | 32 | if not os.path.exists(os.path.join(CACHE_DIR, "rf_models.json")): 33 | with open(os.path.join(CACHE_DIR, "rf_models.json"), "w") as f: 34 | json.dump({}, f) 35 | 36 | with open(os.path.join(CACHE_DIR, "rf_models.json"), "r") as f: 37 | ROBOFLOW_MODELS = json.load(f) 38 | 39 | ROBOFLOW_MODELS = {**ROBOFLOW_MODELS, **STANDARD_ROBOFLOW_MODELS} 40 | 41 | 42 | def use_roboflow_hosted_inference(self, _) -> list: 43 | with open(os.devnull, "w") as f, contextlib.redirect_stdout(f): 44 | if not rf: 45 | raise ModelNotAvailable( 46 | "The model you have tried to use requires an API key. Run export ROBOFLOW_API_KEY='' with your API key and run your script again." 47 | ) 48 | 49 | # base64 image 50 | 51 | image = self._get_item(-1, "image_stack") 52 | # PIL to base64 53 | buffered = io.BytesIO() 54 | 55 | # read into PIL 56 | 57 | # bgr to rgb 58 | image = image[:, :, ::-1] 59 | image = Image.fromarray(image) 60 | 61 | if self.state.get("last_loaded_image_name") and self.state.get( 62 | "last_loaded_image_name", "" 63 | ).endswith(".jpg"): 64 | image.save(buffered, format="JPEG") 65 | else: 66 | image.save(buffered, format="PNG") 67 | 68 | if self.state.get("model") is None: 69 | model = ROBOFLOW_MODELS.get( 70 | self.state["current_active_model"].lower().split("roboflow")[1].strip() 71 | ) 72 | model["labels"] = [l.lower() for l in model["labels"]] 73 | project = rf.workspace().project(model["model_id"]) 74 | 75 | self.state["last_classes"] = [ 76 | i.lower() for i in list(sorted(project.classes.keys())) 77 | ] 78 | 79 | if os.environ.get("ROBOFLOW_INFER_SERVER_DESTINATION"): 80 | inference_model = project.version( 81 | model["version"], 82 | local=os.environ.get("ROBOFLOW_INFER_SERVER_DESTINATION"), 83 | ).model 84 | else: 85 | inference_model = project.version(model["version"]).model 86 | 87 | self.state["model"] = inference_model 88 | else: 89 | model = ROBOFLOW_MODELS.get( 90 | self.state["current_active_model"].lower().split("roboflow")[1].strip() 91 | ) 92 | model["labels"] = [l.lower() for l in model["labels"]] 93 | inference_model = self.state["model"] 94 | 95 | with open("temp.jpg", "wb") as f: 96 | f.write(buffered.getvalue()) 97 | 98 | predictions = inference_model.predict("temp.jpg", confidence=0.3) 99 | predictions = predictions.json() 100 | 101 | for p in predictions["predictions"]: 102 | p["class"] = p["class"].lower() 103 | 104 | classes = [i.lower() for i in list(sorted(self.state["last_classes"]))] 105 | not_sorted_classes = [i.lower() for i in self.state["last_classes"]] 106 | 107 | processed_detections = sv.Detections.from_roboflow(predictions, classes) 108 | 109 | idx_to_class = {idx: item for idx, item in enumerate(not_sorted_classes)} 110 | 111 | return processed_detections, idx_to_class, ",".join(model["labels"]) 112 | 113 | 114 | def yolov8_pose_base(self, _) -> list: 115 | # returns 1x17 vector 116 | from ultralytics import YOLO 117 | 118 | logging.disable(logging.CRITICAL) 119 | 120 | if self.state.get("model") and self.state["current_active_model"].lower() == "yolo": 121 | model = model 122 | else: 123 | model = YOLO("yolov8s-pose.pt") 124 | 125 | inference_results = model(self._get_item(-1, "image_stack"))[0] 126 | 127 | logging.disable(logging.NOTSET) 128 | 129 | return inference_results.keypoints[0] 130 | 131 | 132 | def yolov8_base(self, user_classes) -> sv.Detections: 133 | from ultralytics import YOLO 134 | 135 | if self.state.get("model") and self.state["current_active_model"].lower() == "yolo": 136 | model = model 137 | else: 138 | model = YOLO("yolov8n.pt") 139 | 140 | inference_results = model(self._get_item(-1, "image_stack"))[0] 141 | classes = inference_results.names 142 | 143 | logging.disable(logging.NOTSET) 144 | 145 | results = sv.Detections.from_yolov8(inference_results) 146 | 147 | return results, classes, user_classes 148 | 149 | 150 | def grounding_dino_base(self, classes) -> sv.Detections: 151 | from autodistill.detection import CaptionOntology 152 | from autodistill_grounding_dino import GroundingDINO 153 | 154 | mapped_items = {item: item for item in classes} 155 | 156 | base_model = GroundingDINO(CaptionOntology(mapped_items)) 157 | 158 | inference_results = base_model.predict(self.state["last_loaded_image_name"]) 159 | 160 | return inference_results, classes 161 | 162 | 163 | def fast_sam_base(self, text_prompt) -> sv.Detections: 164 | from autodistill.detection import CaptionOntology 165 | from autodistill_fastsam import FastSAM 166 | 167 | mapped_items = {item: item for item in text_prompt} 168 | 169 | base_model = FastSAM(CaptionOntology(mapped_items)) 170 | 171 | inference_results = base_model.predict(self.state["last_loaded_image_name"]) 172 | 173 | return inference_results, text_prompt 174 | 175 | 176 | def yolov8_target(self, folder): 177 | # if "autodistill_yolov8" not in sys.modules: 178 | # from autodistill_yolov8 import YOLOv8 179 | 180 | base_model = YOLOv8("yolov8n.pt") 181 | 182 | model = base_model.train(os.path.join(folder, "data.yaml"), epochs=10) 183 | 184 | return model, model.names 185 | 186 | 187 | def vit_target(self, folder): 188 | if "autodistill_vit" not in sys.modules: 189 | import autodistill_vit as ViT 190 | 191 | base_model = ViT("ViT-B/32") 192 | 193 | model = base_model.train(folder, "ViT-B/32") 194 | 195 | return model, model.names 196 | -------------------------------------------------------------------------------- /visionscript/rf_models.py: -------------------------------------------------------------------------------- 1 | STANDARD_ROBOFLOW_MODELS = { 2 | "rock paper scissors": { 3 | "workspace_id": "roboflow-58fyf", 4 | "model_id": "rock-paper-scissors-sxsw", 5 | "version": 11, 6 | "labels": ["rock", "paper", "scissors"], 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /visionscript/state.py: -------------------------------------------------------------------------------- 1 | def init_state(): 2 | return { 3 | "functions": {}, 4 | "last_loaded_image": None, 5 | "last_loaded_image_name": None, 6 | "last": None, 7 | "last_function_type": None, 8 | "last_function_args": None, 9 | "image_stack": [], 10 | "detections_stack": [], 11 | "poses_stack": [], 12 | "history": [], 13 | "search_index_stack": [], 14 | "current_active_model": None, 15 | "output": None, 16 | "input_variables": {}, 17 | "last_classes": [], 18 | "confidence": 50, 19 | "active_region": None, 20 | "active_filters": {"class": None, "region": None}, 21 | "load_queue": [], 22 | "stack_size": {"image_stack": 0}, 23 | "show_text_count": 0, 24 | "in_concurrent_context": False, 25 | "ctx": {}, 26 | "tracker": None, 27 | "run_video_in_background": False, 28 | "raw_detections_stack": [], 29 | } 30 | -------------------------------------------------------------------------------- /visionscript/static/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/visionscript/static/.DS_Store -------------------------------------------------------------------------------- /visionscript/static/deploy_intro_styles.css: -------------------------------------------------------------------------------- 1 | 2 | * { 3 | font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif; 4 | box-sizing: border-box; 5 | padding: 0; 6 | margin: 0; 7 | line-height: 1.5; 8 | } 9 | html { 10 | background-color: #f7f7f7; 11 | } 12 | ul { 13 | list-style-type: none; 14 | padding: 0; 15 | } 16 | #cells, .cell { 17 | max-width: 40em; 18 | } 19 | .cell { 20 | background-color: #eee; 21 | padding: 1rem; 22 | border-radius: 0.5rem; 23 | margin-bottom: 1rem; 24 | } 25 | textarea { 26 | width: 100%; 27 | border: 1px solid #ccc; 28 | resize: none; 29 | border-radius: 0.5rem; 30 | padding: 0.5rem; 31 | display: block; 32 | } 33 | input[type="submit"] { 34 | margin-top: 0.5rem; 35 | background-color: darkgreen; 36 | color: white; 37 | border: none; 38 | padding: 0.5rem 1rem; 39 | border-radius: 0.5rem; 40 | cursor: pointer; 41 | resize: none; 42 | overflow: hidden; 43 | } 44 | img { 45 | max-width: 100%; 46 | } 47 | nav { 48 | background-color: darkgreen; 49 | color: white; 50 | width: 100%; 51 | padding: 1rem; 52 | text-align: center; 53 | } 54 | main { 55 | padding: 1rem; 56 | max-width: 40em; 57 | margin: 0 auto; 58 | } 59 | #current_count { 60 | font-size: 1rem; 61 | font-weight: bold; 62 | margin-bottom: 0.5rem; 63 | } 64 | select { 65 | margin-bottom: 1rem; 66 | display: block; 67 | width: 100%; 68 | border: 1px solid #ccc; 69 | border-radius: 0.5rem; 70 | padding: 0.5rem; 71 | } 72 | .small_links a { 73 | color: grey; 74 | font-size: 12px; 75 | } 76 | .small_links li { 77 | display: inline-block; 78 | margin-bottom: 10px; 79 | } 80 | h2 { 81 | font-size: 18px; 82 | margin-bottom: 10px; 83 | } 84 | #export_vic, #export_vicnb { 85 | color: grey; 86 | font-size: 12px; 87 | text-decoration: underline; 88 | cursor: pointer; 89 | } 90 | #files li { 91 | border: 1px solid lightgrey; 92 | display: inline-block; 93 | padding: 10px; 94 | border-radius: 10px; 95 | } 96 | #files_section { 97 | display: none; 98 | margin-bottom: 10px; 99 | } 100 | pre { 101 | margin-top: 0.5rem; 102 | white-space: pre-wrap; 103 | word-wrap: break-word; 104 | } 105 | main { 106 | min-height: 100vh; 107 | } 108 | .cta { 109 | border: 3px solid darkgreen; 110 | border-radius: 0.5rem; 111 | margin-bottom: 1rem; 112 | padding: 10px; 113 | display: block; 114 | margin-top: 20px; 115 | } 116 | .cta a { 117 | background-color: darkgreen; 118 | color: white; 119 | border: none; 120 | padding: 0.5rem 1rem; 121 | border-radius: 0.5rem; 122 | cursor: pointer; 123 | resize: none; 124 | overflow: hidden; 125 | text-decoration: none; 126 | display: block; 127 | margin-top: 20px; 128 | } -------------------------------------------------------------------------------- /visionscript/static/deploy_styles.css: -------------------------------------------------------------------------------- 1 | 2 | * { 3 | font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif; 4 | box-sizing: border-box; 5 | padding: 0; 6 | margin: 0; 7 | line-height: 1.5em; 8 | } 9 | html { 10 | background-color: #f7f7f7; 11 | } 12 | ul { 13 | list-style-type: none; 14 | padding: 0; 15 | } 16 | #cells, .cell { 17 | max-width: 40em; 18 | } 19 | .cell { 20 | background-color: #eee; 21 | padding: 1rem; 22 | border-radius: 0.5rem; 23 | margin-bottom: 1rem; 24 | } 25 | textarea { 26 | width: 100%; 27 | border: 1px solid #ccc; 28 | resize: none; 29 | border-radius: 0.5rem; 30 | padding: 0.5rem; 31 | display: block; 32 | } 33 | input[type="submit"] { 34 | margin-top: 0.5rem; 35 | background-color: darkgreen; 36 | color: white; 37 | border: none; 38 | padding: 0.5rem 1rem; 39 | border-radius: 0.5rem; 40 | cursor: pointer; 41 | resize: none; 42 | overflow: hidden; 43 | } 44 | img { 45 | max-width: 100%; 46 | } 47 | nav { 48 | background-color: darkgreen; 49 | color: white; 50 | width: 100%; 51 | padding: 1rem; 52 | text-align: center; 53 | } 54 | main { 55 | padding: 1rem; 56 | max-width: 40em; 57 | margin: 0 auto; 58 | } 59 | #current_count { 60 | font-size: 1rem; 61 | font-weight: bold; 62 | margin-bottom: 0.5rem; 63 | } 64 | select { 65 | margin-bottom: 1rem; 66 | display: block; 67 | width: 100%; 68 | border: 1px solid #ccc; 69 | border-radius: 0.5rem; 70 | padding: 0.5rem; 71 | } 72 | .small_links a { 73 | color: grey; 74 | font-size: 12px; 75 | } 76 | .small_links li { 77 | display: inline-block; 78 | margin-bottom: 10px; 79 | } 80 | h2 { 81 | font-size: 18px; 82 | margin-bottom: 10px; 83 | } 84 | #export_vic, #export_vicnb { 85 | color: grey; 86 | font-size: 12px; 87 | text-decoration: underline; 88 | cursor: pointer; 89 | } 90 | #files li { 91 | border: 1px solid lightgrey; 92 | display: inline-block; 93 | padding: 10px; 94 | border-radius: 10px; 95 | } 96 | #files_section { 97 | display: none; 98 | margin-bottom: 10px; 99 | } 100 | pre { 101 | margin-top: 0.5rem; 102 | white-space: pre-wrap; 103 | word-wrap: break-word; 104 | } 105 | .time { 106 | font-size: 12px; 107 | color: grey; 108 | margin-bottom: 0.5rem; 109 | } 110 | main { 111 | min-height: 100vh; 112 | } 113 | .cell_run { 114 | background-color: #eee; 115 | padding: 1rem; 116 | border-radius: 0.5rem; 117 | margin-bottom: 1rem; 118 | cursor: pointer; 119 | } 120 | .cell_run:focus { 121 | outline: none; 122 | } 123 | label { 124 | margin-top: 10px; 125 | margin-bottom: 5px; 126 | font-weight: bold; 127 | display: block; 128 | } 129 | input { 130 | margin-bottom: 10px; 131 | display: block; 132 | width: 100%; 133 | border: 1px solid #ccc; 134 | border-radius: 0.5rem; 135 | padding: 0.5rem; 136 | } 137 | form { 138 | margin-top: 10px; 139 | border: 1px solid lightgrey; 140 | padding: 10px; 141 | border-radius: 10px; 142 | } 143 | .cta { 144 | border: 3px solid darkgreen; 145 | border-radius: 0.5rem; 146 | margin-bottom: 1rem; 147 | padding: 10px; 148 | display: block; 149 | margin-top: 20px; 150 | } 151 | .cta a { 152 | background-color: darkgreen; 153 | color: white; 154 | border: none; 155 | padding: 0.5rem 1rem; 156 | border-radius: 0.5rem; 157 | cursor: pointer; 158 | resize: none; 159 | overflow: hidden; 160 | text-decoration: none; 161 | display: block; 162 | margin-top: 20px; 163 | } 164 | .cell { 165 | background-color: #eee; 166 | padding: 1rem; 167 | border-radius: 0.5rem; 168 | margin-bottom: 1rem; 169 | } 170 | .download a { 171 | background-color: darkgreen; 172 | color: white; 173 | border: none; 174 | padding: 0.5rem 1rem; 175 | border-radius: 0.5rem; 176 | cursor: pointer; 177 | resize: none; 178 | overflow: hidden; 179 | text-decoration: none; 180 | display: block; 181 | margin-top: 20px; 182 | margin-bottom: 20px; 183 | } 184 | .small_link, .small_link a { 185 | color: grey; 186 | font-size: 12px; 187 | text-decoration: underline; 188 | cursor: pointer; 189 | } -------------------------------------------------------------------------------- /visionscript/static/drag_and_drop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/capjamesg/visionscript/75a70c7f05a50cd3f7d33a094af7826638985792/visionscript/static/drag_and_drop.png -------------------------------------------------------------------------------- /visionscript/static/examples.js: -------------------------------------------------------------------------------- 1 | 2 | var examples = { 3 | "Classify an image": `Load["./folder/abbey.jpg"] 4 | Classify["person", "cookie"] 5 | Say[]`, 6 | "Detect objects in an image": `Load["./folder/abbey.jpg"] 7 | Detect["person"] 8 | Say[]`, 9 | "Replace objects with a blue box": `Load["./folder/abbey.jpg"] 10 | Detect["car"] 11 | Replace["blue"] 12 | Show[]`, 13 | "Build a search engine": `In["./folder"] 14 | \tLoad[] 15 | Search["taylor swift"] 16 | Compare[]`, 17 | 18 | "Count hot dogs in an image": `Load["./folder/abbey.jpg"] 19 | Detect["hot dogs"] 20 | Count[] 21 | Say[]`, 22 | "Rotate and greyscale an image": `Load["./folder/abbey.jpg"] 23 | Rotate[90] 24 | Greyscale[] 25 | Say[]` 26 | }; 27 | 28 | var examples_select = document.getElementById("examples"); 29 | 30 | examples_select.addEventListener("change", function (event) { 31 | var example = examples_select.value; 32 | var code = examples[example]; 33 | textarea.value = code; 34 | }); -------------------------------------------------------------------------------- /visionscript/static/functions.js: -------------------------------------------------------------------------------- 1 | 2 | const FUNCTIONS = { 3 | "Input": { 4 | "Load": { 5 | "args": ["file"], 6 | "description": "Load an image from a file", 7 | "example": "Load[\"./folder/abbey.jpg\"]", 8 | "supports_arguments": true, 9 | "argument_default": "\"\"" 10 | }, 11 | }, 12 | "Process": { 13 | "Greyscale": { 14 | "args": [], 15 | "description": "Convert an image to greyscale", 16 | "example": "Greyscale[]", 17 | "supports_arguments": false 18 | }, 19 | "Rotate": { 20 | "args": ["angle"], 21 | "description": "Rotate an image", 22 | "example": "Rotate[90]", 23 | "supports_arguments": true 24 | }, 25 | "Resize": { 26 | "args": ["width", "height"], 27 | "description": "Resize an image", 28 | "example": "Resize[100, 100]", 29 | "supports_arguments": true 30 | }, 31 | "Crop": { 32 | "args": ["x", "y", "width", "height"], 33 | "description": "Crop an image", 34 | "example": "Crop[0, 0, 100, 100]", 35 | "supports_arguments": true, 36 | "argument_default": "0, 0, 0, 0" 37 | }, 38 | "Blur": { 39 | "args": [], 40 | "description": "Blur an image", 41 | "example": "Blur[]", 42 | "supports_arguments": false 43 | }, 44 | "SetBrightness": { 45 | "args": ["amount"], 46 | "description": "Adjust the brightness of an image", 47 | "example": "SetBrightness[50]", 48 | "supports_arguments": true 49 | }, 50 | "Replace": { 51 | "args": ["file"], 52 | "description": "Replace part of an image", 53 | "example": "Replace[\"red\"]", 54 | "supports_arguments": true, 55 | "argument_default": "\"\"" 56 | }, 57 | "Cutout": { 58 | "args": ["x", "y", "width", "height"], 59 | "description": "Cut out part of an image", 60 | "example": "Cutout[0, 0, 100, 100]", 61 | "supports_arguments": true, 62 | "argument_default": "0, 0, 0, 0" 63 | }, 64 | "Size": { 65 | "args": [], 66 | "description": "Get the size of an image", 67 | "example": "Size[]", 68 | "supports_arguments": false 69 | }, 70 | "Contrast": { 71 | "args": ["amount"], 72 | "description": "Adjust the contrast of an image", 73 | "example": "Contrast[1.5]", 74 | "supports_arguments": true 75 | } 76 | }, 77 | "Find": { 78 | "SetRegion": { 79 | "args": ["x", "y", "width", "height"], 80 | "description": "Set the region to search for objects in (use before Detect[] or Segment[])", 81 | "example": "SetRegion[0, 0, 100, 100]", 82 | "supports_arguments": true, 83 | "argument_default": "0, 0, 0, 0" 84 | }, 85 | "FilterByClass": { 86 | "args": ["object"], 87 | "description": "Filter objects by class", 88 | "example": "FilterByClass[\"person\"]", 89 | "supports_arguments": true, 90 | "argument_default": "\"\"" 91 | }, 92 | "Classify": { 93 | "args": ["object"], 94 | "description": "Classify an image", 95 | "example": "Classify[\"person\", \"cat\"]", 96 | "supports_arguments": true, 97 | "argument_default": "\"\", \"\"" 98 | }, 99 | "Detect": { 100 | "args": ["object"], 101 | "description": "Detect objects in an image", 102 | "example": "Detect[\"person\"]", 103 | "supports_arguments": true, 104 | "argument_default": "\"\"" 105 | }, 106 | "DetectPose": { 107 | "args": ["object"], 108 | "description": "Detect a pose in an image", 109 | "example": "DetectPose[]", 110 | "supports_arguments": false 111 | }, 112 | "ComparePose": { 113 | "args": ["object"], 114 | "description": "Compare poses in two images", 115 | "example": "ComparePose[]", 116 | "supports_arguments": false 117 | }, 118 | "Segment": { 119 | "args": ["object"], 120 | "description": "Segment objects in an image", 121 | "example": "Segment[\"person\"]", 122 | "supports_arguments": true, 123 | "argument_default": "\"\"" 124 | }, 125 | "Search": { 126 | "args": ["file"], 127 | "description": "Build a search engine with loaded images", 128 | "example": "Search[\"./image.png\"]", 129 | "supports_arguments": true, 130 | "argument_default": "\"\"" 131 | }, 132 | "Caption": { 133 | "args": [], 134 | "description": "Caption an image", 135 | "example": "Caption[]", 136 | "supports_arguments": false 137 | }, 138 | "Count": { 139 | "args": ["object"], 140 | "description": "Count objects in an image", 141 | "example": "Count[\"person\"]", 142 | "supports_arguments": true, 143 | "argument_default": "\"\"" 144 | }, 145 | "ReadQR": { 146 | "args": [], 147 | "description": "Read a QR code in an image", 148 | "example": "ReadQR[]", 149 | "supports_arguments": false 150 | }, 151 | "GetText": { 152 | "args": [], 153 | "description": "Get the text in an image", 154 | "example": "GetText[]", 155 | "supports_arguments": false 156 | }, 157 | "Similarity": { 158 | "args": [], 159 | "description": "Find the similarity between two images", 160 | "example": "Similarity[]", 161 | "supports_arguments": false, 162 | }, 163 | "GetColours": { 164 | "args": [], 165 | "description": "Get the most common colours in an image", 166 | "example": "GetColours[]", 167 | "supports_arguments": false 168 | }, 169 | "GetEdges": { 170 | "args": [], 171 | "description": "Get the edges in an image", 172 | "example": "GetEdges[]", 173 | "supports_arguments": false 174 | } 175 | }, 176 | "Output": { 177 | "Say": { 178 | "args": [], 179 | "description": "Output the result of the previous function", 180 | "example": "Say[]", 181 | "supports_arguments": false 182 | }, 183 | "Show": { 184 | "args": [], 185 | "description": "Show the result of the previous function", 186 | "example": "Show[]", 187 | "supports_arguments": false 188 | }, 189 | "Compare": { 190 | "args": ["file"], 191 | "description": "Compare two or more images", 192 | "example": "Compare[]", 193 | "supports_arguments": false 194 | }, 195 | "Save": { 196 | "args": ["file"], 197 | "description": "Save the result of the previous function to a file", 198 | "example": "Save[\"./folder/abbey.jpg\"]", 199 | "supports_arguments": true, 200 | "argument_default": "\"\"" 201 | }, 202 | "Read": { 203 | "args": ["file"], 204 | "description": "Read the last value from state", 205 | "example": "Read[]", 206 | "supports_arguments": false 207 | }, 208 | "GetDistinctScenes": { 209 | "args": [], 210 | "description": "Get the distinct scenes in a video", 211 | "example": "GetDistinctScenes[]", 212 | "supports_arguments": false 213 | }, 214 | "GetUniqueAppearances": { 215 | "args": [], 216 | "description": "Get the unique appearances of an object in a video", 217 | "example": "GetUniqueAppearances[\"person\"]", 218 | "supports_arguments": true, 219 | "argument_default": "\"\"" 220 | } 221 | }, 222 | "Logic": { 223 | "If": { 224 | "args": ["condition"], 225 | "description": "If a condition is true, run the next function", 226 | "example": "If[\"person\"]", 227 | "supports_arguments": true, 228 | "argument_default": "\"\"" 229 | }, 230 | "In": { 231 | "args": [], 232 | "description": "Iterate over a folder of images", 233 | "example": "In[\"./folder\"]", 234 | "supports_arguments": true, 235 | "argument_default": "\"\"" 236 | }, 237 | "Use": { 238 | "args": ["file"], 239 | "description": "Specify a model for use", 240 | "example": "Use[\"groundingdino\"]", 241 | "supports_arguments": true, 242 | "argument_default": "\"\"" 243 | }, 244 | "Web": { 245 | "args": ["url"], 246 | "description": "Make a web request", 247 | "example": "Web[\"https://example.com/turn-on-lights\"]", 248 | "supports_arguments": true, 249 | "argument_default": "\"\"" 250 | }, 251 | "Reset": { 252 | "args": [], 253 | "description": "Reset the state of the program", 254 | "example": "Reset[]", 255 | "supports_arguments": false 256 | } 257 | }, 258 | "Deploy": { 259 | "Input": { 260 | "args": [], 261 | "description": "Specify a custom field users can input with a deployed model", 262 | "example": "Input[]", 263 | "supports_arguments": false 264 | }, 265 | } 266 | }; -------------------------------------------------------------------------------- /visionscript/static/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "Notebook", 3 | "name": "Notebook", 4 | "id": "/?source=pwa", 5 | "start_url": "/?source=pwa", 6 | "background_color": "#fff", 7 | "display": "standalone", 8 | "scope": "/", 9 | "theme_color": "darkgreen", 10 | "description": "An interactive VisionScript notebook environment for building computer vision apps.", 11 | "icons": [ 12 | { 13 | "src": "https://visionscript.dev/assets/logo.png", 14 | "sizes": "any", 15 | "type": "image/png" 16 | } 17 | ] 18 | } -------------------------------------------------------------------------------- /visionscript/static/purify.min.js: -------------------------------------------------------------------------------- 1 | /*! @license DOMPurify 3.0.5 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/3.0.5/LICENSE */ 2 | !function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).DOMPurify=t()}(this,(function(){"use strict";const{entries:e,setPrototypeOf:t,isFrozen:n,getPrototypeOf:o,getOwnPropertyDescriptor:r}=Object;let{freeze:i,seal:a,create:l}=Object,{apply:c,construct:s}="undefined"!=typeof Reflect&&Reflect;c||(c=function(e,t,n){return e.apply(t,n)}),i||(i=function(e){return e}),a||(a=function(e){return e}),s||(s=function(e,t){return new e(...t)});const m=b(Array.prototype.forEach),u=b(Array.prototype.pop),f=b(Array.prototype.push),p=b(String.prototype.toLowerCase),d=b(String.prototype.toString),h=b(String.prototype.match),g=b(String.prototype.replace),T=b(String.prototype.indexOf),y=b(String.prototype.trim),E=b(RegExp.prototype.test),A=(_=TypeError,function(){for(var e=arguments.length,t=new Array(e),n=0;n1?n-1:0),r=1;r/gm),H=a(/\${[\w\W]*}/gm),z=a(/^data-[\-\w.\u00B7-\uFFFF]/),B=a(/^aria-[\-\w]+$/),W=a(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|sms|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),G=a(/^(?:\w+script|data):/i),Y=a(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),j=a(/^html$/i);var q=Object.freeze({__proto__:null,MUSTACHE_EXPR:P,ERB_EXPR:F,TMPLIT_EXPR:H,DATA_ATTR:z,ARIA_ATTR:B,IS_ALLOWED_URI:W,IS_SCRIPT_OR_DATA:G,ATTR_WHITESPACE:Y,DOCTYPE_NAME:j});const X=()=>"undefined"==typeof window?null:window,K=function(e,t){if("object"!=typeof e||"function"!=typeof e.createPolicy)return null;let n=null;const o="data-tt-policy-suffix";t&&t.hasAttribute(o)&&(n=t.getAttribute(o));const r="dompurify"+(n?"#"+n:"");try{return e.createPolicy(r,{createHTML:e=>e,createScriptURL:e=>e})}catch(e){return console.warn("TrustedTypes policy "+r+" could not be created."),null}};var V=function t(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:X();const o=e=>t(e);if(o.version="3.0.5",o.removed=[],!n||!n.document||9!==n.document.nodeType)return o.isSupported=!1,o;const r=n.document,a=r.currentScript;let{document:l}=n;const{DocumentFragment:c,HTMLTemplateElement:s,Node:_,Element:b,NodeFilter:P,NamedNodeMap:F=n.NamedNodeMap||n.MozNamedAttrMap,HTMLFormElement:H,DOMParser:z,trustedTypes:B}=n,G=b.prototype,Y=R(G,"cloneNode"),V=R(G,"nextSibling"),$=R(G,"childNodes"),Z=R(G,"parentNode");if("function"==typeof s){const e=l.createElement("template");e.content&&e.content.ownerDocument&&(l=e.content.ownerDocument)}let J,Q="";const{implementation:ee,createNodeIterator:te,createDocumentFragment:ne,getElementsByTagName:oe}=l,{importNode:re}=r;let ie={};o.isSupported="function"==typeof e&&"function"==typeof Z&&ee&&void 0!==ee.createHTMLDocument;const{MUSTACHE_EXPR:ae,ERB_EXPR:le,TMPLIT_EXPR:ce,DATA_ATTR:se,ARIA_ATTR:me,IS_SCRIPT_OR_DATA:ue,ATTR_WHITESPACE:fe}=q;let{IS_ALLOWED_URI:pe}=q,de=null;const he=N({},[...w,...D,...L,...x,...k]);let ge=null;const Te=N({},[...O,...I,...M,...U]);let ye=Object.seal(Object.create(null,{tagNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},attributeNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},allowCustomizedBuiltInElements:{writable:!0,configurable:!1,enumerable:!0,value:!1}})),Ee=null,Ae=null,_e=!0,be=!0,Ne=!1,Se=!0,Re=!1,we=!1,De=!1,Le=!1,ve=!1,xe=!1,Ce=!1,ke=!0,Oe=!1;const Ie="user-content-";let Me=!0,Ue=!1,Pe={},Fe=null;const He=N({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]);let ze=null;const Be=N({},["audio","video","img","source","image","track"]);let We=null;const Ge=N({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),Ye="http://www.w3.org/1998/Math/MathML",je="http://www.w3.org/2000/svg",qe="http://www.w3.org/1999/xhtml";let Xe=qe,Ke=!1,Ve=null;const $e=N({},[Ye,je,qe],d);let Ze;const Je=["application/xhtml+xml","text/html"],Qe="text/html";let et,tt=null;const nt=l.createElement("form"),ot=function(e){return e instanceof RegExp||e instanceof Function},rt=function(e){if(!tt||tt!==e){if(e&&"object"==typeof e||(e={}),e=S(e),Ze=Ze=-1===Je.indexOf(e.PARSER_MEDIA_TYPE)?Qe:e.PARSER_MEDIA_TYPE,et="application/xhtml+xml"===Ze?d:p,de="ALLOWED_TAGS"in e?N({},e.ALLOWED_TAGS,et):he,ge="ALLOWED_ATTR"in e?N({},e.ALLOWED_ATTR,et):Te,Ve="ALLOWED_NAMESPACES"in e?N({},e.ALLOWED_NAMESPACES,d):$e,We="ADD_URI_SAFE_ATTR"in e?N(S(Ge),e.ADD_URI_SAFE_ATTR,et):Ge,ze="ADD_DATA_URI_TAGS"in e?N(S(Be),e.ADD_DATA_URI_TAGS,et):Be,Fe="FORBID_CONTENTS"in e?N({},e.FORBID_CONTENTS,et):He,Ee="FORBID_TAGS"in e?N({},e.FORBID_TAGS,et):{},Ae="FORBID_ATTR"in e?N({},e.FORBID_ATTR,et):{},Pe="USE_PROFILES"in e&&e.USE_PROFILES,_e=!1!==e.ALLOW_ARIA_ATTR,be=!1!==e.ALLOW_DATA_ATTR,Ne=e.ALLOW_UNKNOWN_PROTOCOLS||!1,Se=!1!==e.ALLOW_SELF_CLOSE_IN_ATTR,Re=e.SAFE_FOR_TEMPLATES||!1,we=e.WHOLE_DOCUMENT||!1,ve=e.RETURN_DOM||!1,xe=e.RETURN_DOM_FRAGMENT||!1,Ce=e.RETURN_TRUSTED_TYPE||!1,Le=e.FORCE_BODY||!1,ke=!1!==e.SANITIZE_DOM,Oe=e.SANITIZE_NAMED_PROPS||!1,Me=!1!==e.KEEP_CONTENT,Ue=e.IN_PLACE||!1,pe=e.ALLOWED_URI_REGEXP||W,Xe=e.NAMESPACE||qe,ye=e.CUSTOM_ELEMENT_HANDLING||{},e.CUSTOM_ELEMENT_HANDLING&&ot(e.CUSTOM_ELEMENT_HANDLING.tagNameCheck)&&(ye.tagNameCheck=e.CUSTOM_ELEMENT_HANDLING.tagNameCheck),e.CUSTOM_ELEMENT_HANDLING&&ot(e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck)&&(ye.attributeNameCheck=e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck),e.CUSTOM_ELEMENT_HANDLING&&"boolean"==typeof e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements&&(ye.allowCustomizedBuiltInElements=e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements),Re&&(be=!1),xe&&(ve=!0),Pe&&(de=N({},[...k]),ge=[],!0===Pe.html&&(N(de,w),N(ge,O)),!0===Pe.svg&&(N(de,D),N(ge,I),N(ge,U)),!0===Pe.svgFilters&&(N(de,L),N(ge,I),N(ge,U)),!0===Pe.mathMl&&(N(de,x),N(ge,M),N(ge,U))),e.ADD_TAGS&&(de===he&&(de=S(de)),N(de,e.ADD_TAGS,et)),e.ADD_ATTR&&(ge===Te&&(ge=S(ge)),N(ge,e.ADD_ATTR,et)),e.ADD_URI_SAFE_ATTR&&N(We,e.ADD_URI_SAFE_ATTR,et),e.FORBID_CONTENTS&&(Fe===He&&(Fe=S(Fe)),N(Fe,e.FORBID_CONTENTS,et)),Me&&(de["#text"]=!0),we&&N(de,["html","head","body"]),de.table&&(N(de,["tbody"]),delete Ee.tbody),e.TRUSTED_TYPES_POLICY){if("function"!=typeof e.TRUSTED_TYPES_POLICY.createHTML)throw A('TRUSTED_TYPES_POLICY configuration option must provide a "createHTML" hook.');if("function"!=typeof e.TRUSTED_TYPES_POLICY.createScriptURL)throw A('TRUSTED_TYPES_POLICY configuration option must provide a "createScriptURL" hook.');J=e.TRUSTED_TYPES_POLICY,Q=J.createHTML("")}else void 0===J&&(J=K(B,a)),null!==J&&"string"==typeof Q&&(Q=J.createHTML(""));i&&i(e),tt=e}},it=N({},["mi","mo","mn","ms","mtext"]),at=N({},["foreignobject","desc","title","annotation-xml"]),lt=N({},["title","style","font","a","script"]),ct=N({},D);N(ct,L),N(ct,v);const st=N({},x);N(st,C);const mt=function(e){let t=Z(e);t&&t.tagName||(t={namespaceURI:Xe,tagName:"template"});const n=p(e.tagName),o=p(t.tagName);return!!Ve[e.namespaceURI]&&(e.namespaceURI===je?t.namespaceURI===qe?"svg"===n:t.namespaceURI===Ye?"svg"===n&&("annotation-xml"===o||it[o]):Boolean(ct[n]):e.namespaceURI===Ye?t.namespaceURI===qe?"math"===n:t.namespaceURI===je?"math"===n&&at[o]:Boolean(st[n]):e.namespaceURI===qe?!(t.namespaceURI===je&&!at[o])&&(!(t.namespaceURI===Ye&&!it[o])&&(!st[n]&&(lt[n]||!ct[n]))):!("application/xhtml+xml"!==Ze||!Ve[e.namespaceURI]))},ut=function(e){f(o.removed,{element:e});try{e.parentNode.removeChild(e)}catch(t){e.remove()}},ft=function(e,t){try{f(o.removed,{attribute:t.getAttributeNode(e),from:t})}catch(e){f(o.removed,{attribute:null,from:t})}if(t.removeAttribute(e),"is"===e&&!ge[e])if(ve||xe)try{ut(t)}catch(e){}else try{t.setAttribute(e,"")}catch(e){}},pt=function(e){let t,n;if(Le)e=""+e;else{const t=h(e,/^[\r\n\t ]+/);n=t&&t[0]}"application/xhtml+xml"===Ze&&Xe===qe&&(e=''+e+"");const o=J?J.createHTML(e):e;if(Xe===qe)try{t=(new z).parseFromString(o,Ze)}catch(e){}if(!t||!t.documentElement){t=ee.createDocument(Xe,"template",null);try{t.documentElement.innerHTML=Ke?Q:o}catch(e){}}const r=t.body||t.documentElement;return e&&n&&r.insertBefore(l.createTextNode(n),r.childNodes[0]||null),Xe===qe?oe.call(t,we?"html":"body")[0]:we?t.documentElement:r},dt=function(e){return te.call(e.ownerDocument||e,e,P.SHOW_ELEMENT|P.SHOW_COMMENT|P.SHOW_TEXT,null,!1)},ht=function(e){return e instanceof H&&("string"!=typeof e.nodeName||"string"!=typeof e.textContent||"function"!=typeof e.removeChild||!(e.attributes instanceof F)||"function"!=typeof e.removeAttribute||"function"!=typeof e.setAttribute||"string"!=typeof e.namespaceURI||"function"!=typeof e.insertBefore||"function"!=typeof e.hasChildNodes)},gt=function(e){return"object"==typeof _?e instanceof _:e&&"object"==typeof e&&"number"==typeof e.nodeType&&"string"==typeof e.nodeName},Tt=function(e,t,n){ie[e]&&m(ie[e],(e=>{e.call(o,t,n,tt)}))},yt=function(e){let t;if(Tt("beforeSanitizeElements",e,null),ht(e))return ut(e),!0;const n=et(e.nodeName);if(Tt("uponSanitizeElement",e,{tagName:n,allowedTags:de}),e.hasChildNodes()&&!gt(e.firstElementChild)&&(!gt(e.content)||!gt(e.content.firstElementChild))&&E(/<[/\w]/g,e.innerHTML)&&E(/<[/\w]/g,e.textContent))return ut(e),!0;if(!de[n]||Ee[n]){if(!Ee[n]&&At(n)){if(ye.tagNameCheck instanceof RegExp&&E(ye.tagNameCheck,n))return!1;if(ye.tagNameCheck instanceof Function&&ye.tagNameCheck(n))return!1}if(Me&&!Fe[n]){const t=Z(e)||e.parentNode,n=$(e)||e.childNodes;if(n&&t){for(let o=n.length-1;o>=0;--o)t.insertBefore(Y(n[o],!0),V(e))}}return ut(e),!0}return e instanceof b&&!mt(e)?(ut(e),!0):"noscript"!==n&&"noembed"!==n&&"noframes"!==n||!E(/<\/no(script|embed|frames)/i,e.innerHTML)?(Re&&3===e.nodeType&&(t=e.textContent,t=g(t,ae," "),t=g(t,le," "),t=g(t,ce," "),e.textContent!==t&&(f(o.removed,{element:e.cloneNode()}),e.textContent=t)),Tt("afterSanitizeElements",e,null),!1):(ut(e),!0)},Et=function(e,t,n){if(ke&&("id"===t||"name"===t)&&(n in l||n in nt))return!1;if(be&&!Ae[t]&&E(se,t));else if(_e&&E(me,t));else if(!ge[t]||Ae[t]){if(!(At(e)&&(ye.tagNameCheck instanceof RegExp&&E(ye.tagNameCheck,e)||ye.tagNameCheck instanceof Function&&ye.tagNameCheck(e))&&(ye.attributeNameCheck instanceof RegExp&&E(ye.attributeNameCheck,t)||ye.attributeNameCheck instanceof Function&&ye.attributeNameCheck(t))||"is"===t&&ye.allowCustomizedBuiltInElements&&(ye.tagNameCheck instanceof RegExp&&E(ye.tagNameCheck,n)||ye.tagNameCheck instanceof Function&&ye.tagNameCheck(n))))return!1}else if(We[t]);else if(E(pe,g(n,fe,"")));else if("src"!==t&&"xlink:href"!==t&&"href"!==t||"script"===e||0!==T(n,"data:")||!ze[e]){if(Ne&&!E(ue,g(n,fe,"")));else if(n)return!1}else;return!0},At=function(e){return e.indexOf("-")>0},_t=function(e){let t,n,r,i;Tt("beforeSanitizeAttributes",e,null);const{attributes:a}=e;if(!a)return;const l={attrName:"",attrValue:"",keepAttr:!0,allowedAttributes:ge};for(i=a.length;i--;){t=a[i];const{name:c,namespaceURI:s}=t;if(n="value"===c?t.value:y(t.value),r=et(c),l.attrName=r,l.attrValue=n,l.keepAttr=!0,l.forceKeepAttr=void 0,Tt("uponSanitizeAttribute",e,l),n=l.attrValue,l.forceKeepAttr)continue;if(ft(c,e),!l.keepAttr)continue;if(!Se&&E(/\/>/i,n)){ft(c,e);continue}Re&&(n=g(n,ae," "),n=g(n,le," "),n=g(n,ce," "));const m=et(e.nodeName);if(Et(m,r,n)){if(!Oe||"id"!==r&&"name"!==r||(ft(c,e),n=Ie+n),J&&"object"==typeof B&&"function"==typeof B.getAttributeType)if(s);else switch(B.getAttributeType(m,r)){case"TrustedHTML":n=J.createHTML(n);break;case"TrustedScriptURL":n=J.createScriptURL(n)}try{s?e.setAttributeNS(s,c,n):e.setAttribute(c,n),u(o.removed)}catch(e){}}}Tt("afterSanitizeAttributes",e,null)},bt=function e(t){let n;const o=dt(t);for(Tt("beforeSanitizeShadowDOM",t,null);n=o.nextNode();)Tt("uponSanitizeShadowNode",n,null),yt(n)||(n.content instanceof c&&e(n.content),_t(n));Tt("afterSanitizeShadowDOM",t,null)};return o.sanitize=function(e){let t,n,i,a,l=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(Ke=!e,Ke&&(e="\x3c!--\x3e"),"string"!=typeof e&&!gt(e)){if("function"!=typeof e.toString)throw A("toString is not a function");if("string"!=typeof(e=e.toString()))throw A("dirty is not a string, aborting")}if(!o.isSupported)return e;if(De||rt(l),o.removed=[],"string"==typeof e&&(Ue=!1),Ue){if(e.nodeName){const t=et(e.nodeName);if(!de[t]||Ee[t])throw A("root node is forbidden and cannot be sanitized in-place")}}else if(e instanceof _)t=pt("\x3c!----\x3e"),n=t.ownerDocument.importNode(e,!0),1===n.nodeType&&"BODY"===n.nodeName||"HTML"===n.nodeName?t=n:t.appendChild(n);else{if(!ve&&!Re&&!we&&-1===e.indexOf("<"))return J&&Ce?J.createHTML(e):e;if(t=pt(e),!t)return ve?null:Ce?Q:""}t&&Le&&ut(t.firstChild);const s=dt(Ue?e:t);for(;i=s.nextNode();)yt(i)||(i.content instanceof c&&bt(i.content),_t(i));if(Ue)return e;if(ve){if(xe)for(a=ne.call(t.ownerDocument);t.firstChild;)a.appendChild(t.firstChild);else a=t;return(ge.shadowroot||ge.shadowrootmode)&&(a=re.call(r,a,!0)),a}let m=we?t.outerHTML:t.innerHTML;return we&&de["!doctype"]&&t.ownerDocument&&t.ownerDocument.doctype&&t.ownerDocument.doctype.name&&E(j,t.ownerDocument.doctype.name)&&(m="\n"+m),Re&&(m=g(m,ae," "),m=g(m,le," "),m=g(m,ce," ")),J&&Ce?J.createHTML(m):m},o.setConfig=function(e){rt(e),De=!0},o.clearConfig=function(){tt=null,De=!1},o.isValidAttribute=function(e,t,n){tt||rt({});const o=et(e),r=et(t);return Et(o,r,n)},o.addHook=function(e,t){"function"==typeof t&&(ie[e]=ie[e]||[],f(ie[e],t))},o.removeHook=function(e){if(ie[e])return u(ie[e])},o.removeHooks=function(e){ie[e]&&(ie[e]=[])},o.removeAllHooks=function(){ie={}},o}();return V})); 3 | //# sourceMappingURL=purify.min.js.map -------------------------------------------------------------------------------- /visionscript/static/renderCells.js: -------------------------------------------------------------------------------- 1 | function uploadNotebook (event, mode, file = null) { 2 | var file = file || event.dataTransfer.files[0]; 3 | var body = new FormData(); 4 | body.append("file", file) 5 | body.append("state_id", STATE_ID); 6 | // base64 file 7 | var reader = new FileReader(); 8 | // read file 9 | reader.readAsDataURL(file); 10 | 11 | // post to /notebook/upload with state id 12 | fetch(`${API_URL}/notebook/upload?state_id=${STATE_ID}`, { 13 | method: 'POST', 14 | body: body 15 | }) 16 | .then(response => response.json()) 17 | .then(data => { 18 | if (data.cells) { 19 | if (mode == "code") { 20 | data.cells.forEach(function (cell) { 21 | var code = cell.cell; 22 | var output = cell.output; 23 | if (output == null) { 24 | output = ""; 25 | } 26 | if (output.image) { 27 | output = ``; 28 | } 29 | cells.innerHTML += ` 30 |
  • 31 |

    #${cells.children.length + 1}

    32 | 33 |
    ${output}
    34 |

    Rerun

    35 |
  • 36 | `; 37 | }); 38 | return; 39 | } else { 40 | var interactive_notebook = document.getElementById("drag_drop_notebook"); 41 | 42 | data.cells.forEach(function (cell) { 43 | var code = cell.cell; 44 | // for item in newline 45 | var code = code.split("\n"); 46 | for (var i = 0; i < code.length; i++) { 47 | // set background to white 48 | interactive_notebook.style.background = "white"; 49 | var line = code[i]; 50 | var function_name = line.split("[")[0]; 51 | var argument = line.split("[")[1]; 52 | if (argument) { 53 | argument = argument.split("]")[0]; 54 | } 55 | var color = mapped_functions[function_name].element.firstElementChild.style.color; 56 | var html = ""; 57 | if (mapped_functions[function_name].supports_arguments) { 58 | html = ` 59 |
    60 |

    ${function_name}[]

    61 |
    62 | `; 63 | } else { 64 | html = ` 65 |
    66 |

    ${function_name}[]

    67 |
    68 | `; 69 | } 70 | interactive_notebook.appendChild(document.createRange().createContextualFragment(html)); 71 | } 72 | }).catch((error) => { 73 | return; 74 | }); 75 | } 76 | } 77 | var file_name = data.file_name; 78 | var files = document.getElementById("files"); 79 | var files_section = document.getElementById("files_section"); 80 | 81 | files_section.style.display = "block"; 82 | 83 | var base64 = reader.result; 84 | 85 | // if already exists, don't add 86 | var file_names = document.getElementsByClassName("file_name"); 87 | 88 | for (var i = 0; i < file_names.length; i++) { 89 | var file_name_element = file_names[i]; 90 | if (file_name_element.innerText == file_name) { 91 | return; 92 | } 93 | } 94 | 95 | files.innerHTML += ` 96 |
  • ${file_name}${file_name}
  • 97 | `; 98 | // if dragged over an Load statement, add iamge to the argument block 99 | 100 | if (event.target.classList.contains("argument_block")) { 101 | // replace argument block 102 | var argument_block = event.target; 103 | var new_element = ` 104 | ${file_name} 105 | `; 106 | 107 | // add before argument block 108 | argument_block.insertAdjacentHTML("beforebegin", new_element); 109 | 110 | // remove argument block 111 | argument_block.parentNode.removeChild(argument_block); 112 | } 113 | }) 114 | .catch(err => { 115 | var dialog = document.getElementById("dialog"); 116 | var error_message = document.getElementById("error_message"); 117 | // if file ends with .vicnb 118 | 119 | if (file.name.endsWith(".vicnb")) { 120 | error_message.innerText = "Please import your notebook in interactive mode."; 121 | dialog.showModal(); 122 | return; 123 | } 124 | 125 | error_message.innerText = "Your file could not be uploaded. Please make sure you have uploaded a supported format."; 126 | dialog.showModal(); 127 | }); 128 | } -------------------------------------------------------------------------------- /visionscript/static/styles.css: -------------------------------------------------------------------------------- 1 | * { 2 | font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif; 3 | box-sizing: border-box; 4 | padding: 0; 5 | margin: 0; 6 | } 7 | html { 8 | background-color: #f7f7f7; 9 | } 10 | a { 11 | color: darkgreen; 12 | text-decoration: none; 13 | } 14 | ul { 15 | list-style-type: none; 16 | padding: 0; 17 | } 18 | .cell { 19 | background-color: #eee; 20 | padding: 1rem; 21 | border-radius: 0.5rem; 22 | margin-bottom: 1rem; 23 | } 24 | .sidebar_cell { 25 | background-color: #eee; 26 | padding: 1rem; 27 | border-radius: 0.5rem; 28 | margin-bottom: 1rem; 29 | cursor: grab; 30 | } 31 | textarea { 32 | width: 100%; 33 | border: 1px solid #ccc; 34 | resize: none; 35 | border-radius: 0.5rem; 36 | padding: 0.5rem; 37 | display: block; 38 | } 39 | input[type="submit"], #create_comment { 40 | margin-top: 0.5rem; 41 | background-color: darkgreen; 42 | color: white; 43 | border: none; 44 | padding: 0.5rem 1rem; 45 | border-radius: 0.5rem; 46 | cursor: pointer; 47 | resize: none; 48 | overflow: hidden; 49 | width: 100%; 50 | } 51 | img { 52 | max-width: 100%; 53 | } 54 | nav { 55 | background-color: darkgreen; 56 | color: white; 57 | width: 100%; 58 | padding: 1rem; 59 | } 60 | h2 { 61 | font-size: 2rem; 62 | margin-bottom: 0.5rem; 63 | } 64 | h3 { 65 | font-size: 14px; 66 | margin-bottom: 0.5rem; 67 | } 68 | section, #notebook { 69 | padding: 1rem; 70 | max-width: 50em; 71 | margin: 0 auto; 72 | flex: 1; 73 | } 74 | #current_count { 75 | font-size: 1rem; 76 | font-weight: bold; 77 | margin-bottom: 0.5rem; 78 | } 79 | main { 80 | display: flex; 81 | flex-direction: row; 82 | justify-content: center; 83 | align-items: flex-start; 84 | flex-wrap: wrap; 85 | } 86 | select { 87 | margin-bottom: 1rem; 88 | display: block; 89 | width: 100%; 90 | border: 1px solid #ccc; 91 | border-radius: 0.5rem; 92 | padding: 0.5rem; 93 | } 94 | .small_links a { 95 | color: grey; 96 | font-size: 12px; 97 | } 98 | .small_links li { 99 | display: inline-block; 100 | margin-bottom: 10px; 101 | } 102 | h2 { 103 | font-size: 18px; 104 | margin-bottom: 10px; 105 | } 106 | #files li { 107 | border: 1px solid lightgrey; 108 | display: inline-block; 109 | padding: 10px; 110 | border-radius: 10px; 111 | white-space: pre-wrap; 112 | word-wrap: break-word; 113 | max-width: 150px; 114 | margin-bottom: 20px; 115 | } 116 | pre { 117 | margin-top: 0.5rem; 118 | white-space: pre-wrap; 119 | word-wrap: break-word; 120 | } 121 | .time { 122 | font-size: 12px; 123 | color: grey; 124 | margin-bottom: 0.5rem; 125 | } 126 | main { 127 | min-height: 100vh; 128 | } 129 | .cell_run { 130 | background-color: #eee; 131 | padding: 1rem; 132 | border-radius: 0.5rem; 133 | margin-bottom: 1rem; 134 | cursor: pointer; 135 | } 136 | .cell_run:focus { 137 | outline: none; 138 | } 139 | footer { 140 | margin-top: 20px; 141 | } 142 | #drag_drop_notebook { 143 | border: 1px solid lightgrey; 144 | min-height: 400px; 145 | border-radius: 0.5rem; 146 | background-image: url("/static/drag_and_drop.png"); 147 | background-repeat: no-repeat; 148 | background-position: center; 149 | background-size: 100%; 150 | } 151 | #function_box, #files_section { 152 | width: 20em; 153 | padding: 1rem; 154 | background-color: white; 155 | overflow-y: scroll; 156 | border: 1px solid #d3d3d3; 157 | height: 100vh; 158 | line-height: 1.5em; 159 | position: sticky; 160 | top: 0; 161 | } 162 | #function_box h2 { 163 | font-size: 18px; 164 | margin-bottom: 10px; 165 | } 166 | #function_box h3 { 167 | font-size: 16px; 168 | margin-bottom: 10px; 169 | } 170 | #function_box p { 171 | font-size: 14px; 172 | margin-bottom: 10px; 173 | } 174 | #function_box .function { 175 | margin-bottom: 1rem; 176 | cursor: grab; 177 | border: 1px solid lightgrey; 178 | padding: 5px; 179 | border-radius: 7.5px; 180 | padding-top: 7.5px; 181 | } 182 | #drag_drop_notebook { 183 | width: 100%; 184 | margin: 0 auto; 185 | margin-top: 15px; 186 | margin-bottom: 15px; 187 | box-sizing: border-box; 188 | } 189 | #drag_drop_notebook .cell { 190 | padding: 10px; 191 | border-radius: 5px; 192 | height: 100%; 193 | display: table; 194 | margin: 10px; 195 | } 196 | #drag_drop_notebook .cell p { 197 | display: table-cell; 198 | vertical-align: middle; 199 | text-align: center; 200 | font-size: 14px; 201 | } 202 | #drag_drop_notebook .cell p:focus { 203 | outline: none; 204 | } 205 | #code_mode { 206 | display: none; 207 | } 208 | .argument_block { 209 | background: white; 210 | padding: 7.5px; 211 | border-radius: 5px; 212 | margin-left: 10px; 213 | } 214 | #error, .error { 215 | display: none; 216 | background-color: lightcoral; 217 | border-radius: 5px; 218 | padding: 10px; 219 | margin-top: 10px; 220 | } 221 | #run input { 222 | font-size: 1.1em; 223 | margin-top: 10px; 224 | } 225 | #output { 226 | margin-top: 10px; 227 | border: 1px solid darkgreen; 228 | border-radius: 5px; 229 | padding: 10px; 230 | } 231 | dialog { 232 | border-radius: 5px; 233 | padding: 50px; 234 | background-color: white; 235 | margin: auto; 236 | max-width: 40em; 237 | min-width: 20em; 238 | top: 0; 239 | left: 0; 240 | right: 0; 241 | bottom: 0; 242 | } 243 | dialog h2 { 244 | font-size: 18px; 245 | margin-bottom: 10px; 246 | } 247 | .success { 248 | background-color: lightgreen; 249 | border-radius: 5px; 250 | padding: 10px; 251 | margin-top: 10px; 252 | } 253 | dialog section { 254 | margin: auto; 255 | width: 35em; 256 | } 257 | dialog button, input[type="submit"] { 258 | background-color: darkgreen; 259 | color: white; 260 | border: none; 261 | padding: 0.5rem 1rem; 262 | border-radius: 0.5rem; 263 | cursor: pointer; 264 | resize: none; 265 | overflow: hidden; 266 | margin-top: 10px; 267 | width: 100%; 268 | } 269 | dialog input { 270 | margin-top: 5px; 271 | width: 100%; 272 | border: 1px solid #ccc; 273 | resize: none; 274 | border-radius: 0.5rem; 275 | padding: 0.5rem; 276 | display: block; 277 | margin-bottom: 10px; 278 | } 279 | nav { 280 | display: flex; 281 | justify-content: space-between; 282 | align-items: center; 283 | } 284 | nav ul { 285 | display: flex; 286 | justify-content: space-between; 287 | align-items: center; 288 | } 289 | nav ul li { 290 | margin-right: 25px; 291 | background-color: white; 292 | color: darkgreen; 293 | border-radius: 5px; 294 | padding: 7.5px; 295 | cursor: pointer 296 | } 297 | nav ul li a { 298 | color: darkgreen; 299 | text-decoration: none; 300 | } 301 | nav ul li:last-child { 302 | margin-right: 0; 303 | } 304 | #files { 305 | min-height: 50vh; 306 | } 307 | #toggle_menu { 308 | display: none; 309 | } 310 | .hide_on_mobile { 311 | display: inline; 312 | } 313 | .hide_on_desktop { 314 | display: none; 315 | } 316 | #nav_menu { 317 | display: flex; 318 | justify-content: space-between; 319 | align-items: center; 320 | } 321 | #toast { 322 | position: fixed; 323 | top: 0; 324 | right: 0; 325 | margin: 10px; 326 | padding: 10px; 327 | border-radius: 5px; 328 | background-color: white; 329 | border: 1px solid lightgrey; 330 | box-shadow: 0 0 10px lightgrey; 331 | z-index: 100; 332 | display: none; 333 | } 334 | #deploy_message { 335 | display: none; 336 | margin-bottom: 10px; 337 | border: 1px solid lightgrey; 338 | padding: 10px; 339 | } 340 | dialog p { 341 | margin-bottom: 10px; 342 | } 343 | label { 344 | display: block; 345 | font-weight: bold; 346 | } 347 | #hide_tab { 348 | display: none; 349 | } 350 | @media screen and (max-width: 800px) { 351 | #nav_menu { 352 | display: none; 353 | } 354 | #files_section { 355 | display: none; 356 | } 357 | #hide_tab { 358 | display: block; 359 | } 360 | #function_box { 361 | position: fixed; 362 | bottom: 0; 363 | left: 0; 364 | width: 100%; 365 | background-color: white; 366 | z-index: 100; 367 | justify-content: center; 368 | align-items: center; 369 | flex-direction: column; 370 | padding: 50px; 371 | top: initial; 372 | height: 300px; 373 | } 374 | #function_box h2 { 375 | display: none; 376 | } 377 | #nav_menu { 378 | display: none; 379 | position: fixed; 380 | top: 0; 381 | left: 0; 382 | width: 100%; 383 | height: 100vh; 384 | background-color: darkgreen; 385 | z-index: 200; 386 | justify-content: center; 387 | align-items: center; 388 | flex-direction: column; 389 | padding: 50px; 390 | } 391 | #nav_menu ul { 392 | display: flex; 393 | flex-direction: column; 394 | justify-content: center; 395 | align-items: center; 396 | } 397 | #nav_menu li { 398 | margin-top: 0; 399 | margin-bottom: 25px; 400 | padding: 20px; 401 | width: 100%; 402 | } 403 | #nav_menu { 404 | margin-right: 0 !important; 405 | } 406 | nav ul li:last-child { 407 | margin-left: initial !important; 408 | } 409 | #toggle_menu { 410 | display: block; 411 | background-color: lightskyblue; 412 | color: white; 413 | } 414 | #nav_menu li { 415 | margin-right: 0; 416 | } 417 | .hide_on_mobile { 418 | display: none; 419 | } 420 | #drag_drop_notebook { 421 | min-height: 300px; 422 | } 423 | .hide_on_desktop { 424 | display: block; 425 | } 426 | } -------------------------------------------------------------------------------- /visionscript/templates/deployintro.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | VisionScript Deploy 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 29 |
    30 |

    VisionScript Deploy

    31 |

    Run VisionScript code via a web application and API.

    32 |
    33 |

    Build your own VisionScript apps

    34 |

    Using VisionScript you can build computer vision apps with a drag-and-drop interface, or a concise programming language.

    35 | Get started ➡️ 36 |
    37 |
    38 | 39 | -------------------------------------------------------------------------------- /visionscript/templates/error.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | VisionScript Deploy 8 | 9 | 10 | 11 | 12 | 13 | 14 | 19 |
    20 |

    {{ title }}

    21 |

    The page for which you are looking cannot be found.

    22 |

    Try going back to the home page.

    23 |
    24 | 25 | -------------------------------------------------------------------------------- /visionscript/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | VisionScript Deploy 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 29 |
    30 |

    {{ title }}

    31 |
    32 | {% for input in image_inputs %} 33 | 34 | 35 | {% endfor %} 36 | {% for input in text_inputs %} 37 | 38 | 39 | {% endfor %} 40 | 41 |
    42 |
    43 |

    Build your own VisionScript apps

    44 |

    Using VisionScript you can build computer vision apps with a drag-and-drop interface, or a concise programming language.

    45 | Get started ➡️ 46 |
    47 |
    48 | 49 | -------------------------------------------------------------------------------- /visionscript/templates/notebook.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | VisionScript Notebook 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 30 | 31 | 32 | 33 | 34 | 35 | 53 |
    54 | 75 | 107 | 112 |
    113 |
    114 |

    Make something cool

    115 |

    Drag and drop functions in the box below to build your program.

    116 |
    117 |
    Waiting for your first program! ✨
    118 |
    119 |
    120 |

    Notebook

    121 |

    Write your program in the boxes below.

    122 |
      123 |
    124 |
    125 |

    #1

    126 | 130 | 131 | 132 |
    133 |
    134 |

    135 | 136 |
    137 | 150 | 151 |

    Close

    152 |
    153 |

    Message

    154 |

    155 |
    156 |
    157 | 158 |

    Close

    159 |
    160 |

    Deploy your app

    161 |

    162 |

    You can deploy your app as a web app or a notebook.

    163 |

    People can use your apps, and you can use them in your own projects.

    164 |

    Notebooks, on the other hand, are a great way to share your code with others.

    165 |

    Learn more in the VisionScript documentation.

    166 |
    167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 |
    178 |
    179 |
    180 | 181 |

    Close

    182 |
    183 |

    Export your app

    184 |

    185 |

    You can export your app as a .vic file (a script), or a .vicnb file (a notebook).

    186 |

    These files can be used in your own projects, or shared with others.

    187 | 191 |
    192 |
    193 | 194 |

    Close

    195 |

    Take a photo

    196 |

    Take a photo of VisionScript code to bring it into your notebook.

    197 | 198 | 199 | 200 | 201 | 202 | 203 |
    204 | 205 |

    Prepare your code

    206 |

    Fill in the inputs below to run your code.

    207 |
    208 |

    209 |
    210 | 211 |
    212 |
    213 |
    214 |
    215 | 216 | 217 | 218 | 219 | 220 | -------------------------------------------------------------------------------- /visionscript/templates/public_notebook.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | {{ title }} - VisionScript Notebook 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 29 |
    30 |

    {{ title }}

    31 |

    {{ description }}

    32 |
      33 | {% for cell in cells %} 34 |
    • 35 | {% if cell.data.type != "editable_text" %} 36 |

      Cell #{{ cell.id + 1 }}

      37 | 38 | {% else %} 39 |

      {{ cell.data.data | safe }}

      40 | {% endif %} 41 | {% if cell.output %} 42 | {% if cell.output.image %} 43 | 44 | {% elif cell.output.text %} 45 |
      {{ cell.output.text }}
      46 | {% endif %} 47 | {% endif %} 48 |
    • 49 | {% endfor %} 50 |
    51 |
    52 |

    Download as .vic

    53 |

    Download as .vicnb

    54 |
    55 |
    56 |

    Embed this notebook

    57 |

    Embed this notebook in your website using the following code:

    58 | 59 |
    60 |
    61 |

    Build your own VisionScript apps

    62 |

    Using VisionScript you can build computer vision apps with a drag-and-drop interface, or a concise programming language.

    63 | Get started ➡️ 64 |
    65 |
    66 | 67 | -------------------------------------------------------------------------------- /visionscript/templates/public_notebook_embed.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | {{ title }} - VisionScript Notebook 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 |
    25 |

    {{ title }}

    26 |

    {{ description }}

    27 |
      28 | {% for cell in cells %} 29 |
    • 30 | {% if cell.data.type != "editable_text" %} 31 |

      Cell #{{ cell.id + 1 }}

      32 | 33 | {% else %} 34 |

      {{ cell.data.data | safe }}

      35 | {% endif %} 36 | {% if cell.output %} 37 | {% if cell.output.image %} 38 | 39 | {% elif cell.output.text %} 40 |
      {{ cell.output.text }}
      41 | {% endif %} 42 | {% endif %} 43 |
    • 44 | {% endfor %} 45 |
    46 |

    View notebook

    47 | 48 |
    49 | 50 | -------------------------------------------------------------------------------- /visionscript/usage.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | with open(os.path.join(os.path.dirname(__file__), "reference.json")) as f: 5 | language_grammar_reference = json.load(f) 6 | 7 | lowercase_language_grammar_reference = [ 8 | item.lower() for item in language_grammar_reference 9 | ] 10 | 11 | USAGE = """ 12 | VisionScript (VIC) is a visual programming language for computer vision. 13 | 14 | VisionScript is a line-based language. Each line is a function call. 15 | 16 | View the full documentation at: 17 | 18 | https://visionscript.dev/docs 19 | """ 20 | --------------------------------------------------------------------------------