├── LICENSE ├── README.md ├── docs ├── annapalm │ ├── annapalm-cover.gif │ ├── annapalm-example.png │ ├── annapalm-gs-engine-tag.gif │ ├── annapalm-launchpad.png │ ├── annapalm-load-model-to-engine.gif │ ├── annapalm-loss.png │ ├── annapalm-plugin-engines.png │ ├── annapalm-plugin-entry.png │ ├── annapalm-plugin-tags.png │ ├── annapalm-prediction.png │ ├── annapalm-principle.png │ └── annapalm-tubulin-zoom-in.gif ├── care │ ├── care-configure-training.png │ ├── care-cover.gif │ ├── care-dashboard-loss.png │ ├── care-dashboard-results.png │ ├── care-find-models.gif │ ├── care-gs-engine-tag.gif │ ├── care-gs-prediction-inspection.gif │ ├── care-gs-prediction.gif │ ├── care-gs-training-progress.gif │ ├── care-gs-training.gif │ ├── care-launchpad.png │ ├── care-plugin-engines.png │ ├── care-plugin-menu.png │ ├── care-prediction.gif │ ├── care-screenshot.png │ ├── care-select-folder.png │ ├── care-tags.png │ ├── care-workspace.gif │ └── care-workspace.png ├── dpnunet │ ├── dpnunet-annotation.png │ ├── dpnunet-annotator-load-data.gif │ ├── dpnunet-cover.png │ ├── dpnunet-launchpad.png │ ├── dpnunet-neural_network.png │ ├── dpnunet-nuclei-annotation.png │ ├── dpnunet-nuclei-mask-border.png │ ├── dpnunet-nuclei-mask-fill.png │ ├── dpnunet-prediction-dashboard.png │ ├── dpnunet-prediction-example.png │ ├── dpnunet-prediction-image-name.png │ ├── dpnunet-training-config.png │ ├── dpnunet-training_validation.png │ └── dpnunet-upload.gif ├── hpa-classification │ └── hpa-classification-cover.gif ├── hpa-umap │ └── hpa-umap-cover.gif ├── image-annotator │ ├── annotator-annotations.gif │ ├── annotator-channels.gif │ ├── annotator-cover.gif │ └── annotator-import-local.gif └── maui │ └── maui-training.png ├── imjoy-plugins ├── 3D-visualization.imjoy.html ├── CARE.imjoy.html ├── DPNUnet.imjoy.html ├── DeepBindScan.imjoy.html ├── HPA-Classification.imjoy.html ├── HPA-Image-Selection.imjoy.html ├── HPA-UMAP.imjoy.html ├── ImageAnnotator.imjoy.html ├── Noise2Self.imjoy.html ├── PyImageJ-Demo.imjoy.html ├── SchemaIO.imjoy.html ├── Skin-Lesion-Analyzer.imjoy.html ├── annaPalm.imjoy.html └── maui-demo.imjoy.html ├── manifest.imjoy.json └── update_manifest.js /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 imjoy-team 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Repository contains plugins for [ImJoy](https://imjoy.io) 2 | Plugin repository for ImJoy.IO, with a special focus on deep learning applications. 3 | For more details on ImJoy, see . 4 | 5 | These example plugins are described in our publication [Ouyang et al., arXiv:1905.13105, ImJoy: an open-source computational platform for the deep learning era](https://arxiv.org/abs/1905.13105). 6 | 7 | The provided version are frozen at the time of publication. For more recent version, 8 | please also visit the plugin repositories 9 | 10 | * [**ImJoy Plugins**](https://github.com/oeway/ImJoy-Plugins) 11 | * [**ImJoy Demo Plugins**](https://github.com/oeway/ImJoy-Demo-Plugins) 12 | 13 | ## Dependencies and tags 14 | The provided deep learning plugins are built on open-source Python libraries 15 | with specific dependencies. For these plugins we therefore provide different 16 | `tags`, that allow to control which libraries are installed. 17 | 18 | Deep learning methods require large computational resources, e.g. training is 19 | often performed on GPUs rather than CPUs. For a quick primer on 'CPU vs GPU computing' 20 | see the dedicated section below. 21 | 22 | For the ImJoy plugins, we hence provide tags to determine if a plugin will be 23 | running on a GPU or CPU. Below we detail the operating systems and hardware 24 | configurations where the plugins were tested. 25 | 26 | Some plugins, such as the `DPNUnet` are based on implementations that 27 | only permit GPU computations. Here, no `CPU` tag is provided. 28 | 29 | Additional tags can be added for other configurations. For this, please 30 | file an issue [here](https://github.com/imjoy-team/example-plugins/issues). 31 | 32 | **Table 1. Tested environments for the DPNUnet plugin** 33 | 34 | | Operating System | CPU | GPU | Hardware specification | 35 | | ------------------------ | --- | --- | ------------------------------------------------------------------------------- | 36 | | Ubuntu 16.04 | [X] | [X] | Application pod running on a NVIDIA DGX-1 node, with 4 Tesla V100 GPUs | 37 | | MacOS ( Mojave 10.14.4 ) | [X] | [ ] | iMac (Retina 5K, 27-inch, Late 2014), 3.5 GHz Intel Core i5,32 GB 1600 MHz DDR3 | 38 | | Window 10 | [ ] | [ ] | Not tested yet | 39 | 40 | 41 | 42 | **Table 2. Tested environments for the CARE plugin** 43 | 44 | | Operating System | CPU | GPU | Hardware specification | 45 | | ------------------------ | --- | --- | ------------------------------------------------------------------------------- | 46 | | Ubuntu 16.04 | [X] | [X] | Application pod running on a NVIDIA DGX-1 node, with 4 Tesla V100 GPUs | 47 | | MacOS ( Mojave 10.14.4 ) | [X] | [ ] | iMac (Retina 5K, 27-inch, Late 2014), 3.5 GHz Intel Core i5,32 GB 1600 MHz DDR3 | 48 | | Window 10 | [X] | [ ] | Windows 10 (Core i7 16GB DDR4) | 49 | 50 | 51 | 52 | **Table 3. Tested environments for the ANNA-PALM plugin** 53 | 54 | | Operating System | CPU | GPU | Hardware specification | 55 | | ------------------------ | --- | --- | ------------------------------------------------------------------------------- | 56 | | Ubuntu 16.04 | [X] | [X] | Application pod running on a NVIDIA DGX-1 node, with 4 Tesla V100 GPUs | 57 | | MacOS ( Mojave 10.14.4 ) | [X] | [ ] | iMac (Retina 5K, 27-inch, Late 2014), 3.5 GHz Intel Core i5,32 GB 1600 MHz DDR3 | 58 | | Window 10 | [ ] | [ ] | Not tested yet | 59 | 60 | ## GPU vs CPU computing 61 | Below we only a provide very brief overview over these two computational modes. 62 | More details can be found in many online tutorials, for instance [here](https://medium.com/altumea/gpu-vs-cpu-computing-what-to-choose-a9788a2370c4.) 63 | 64 | - Training of large neural networks is frequently 65 | performed on so-called **GPUs (Graphics processing unit)**. These are specialised 66 | devices, which are ideally suited for training and prediction in deep learning 67 | applications. Many of the open-source libraries require GPUs of Nvidia. Such a device 68 | will not be available on every computer. Libraries might further depend on which 69 | specific GPU will be used. 70 | - As an alternative computations can be performed on the **CPU (central processing units)**, 71 | which is the general purpose processing unit of every computer. Here, training is 72 | often slow and in extreme cases not even possible. However, frequently trained networks 73 | can be be used with a **CPU**. While every computer will have a CPU, libraries will depend 74 | on the precise hardware specification and also the operating system. 75 | - Installations can depend on the **operating systems**, e.g. to account for 76 | differences between Windows and Linux. This could be accounted for by 77 | operating system specific tags, such as `CPU_Windows` for a plugin that will be using 78 | CPUs on Windows. 79 | 80 | 81 | -------------------------------------------------------------------------------- /docs/annapalm/annapalm-cover.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-cover.gif -------------------------------------------------------------------------------- /docs/annapalm/annapalm-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-example.png -------------------------------------------------------------------------------- /docs/annapalm/annapalm-gs-engine-tag.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-gs-engine-tag.gif -------------------------------------------------------------------------------- /docs/annapalm/annapalm-launchpad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-launchpad.png -------------------------------------------------------------------------------- /docs/annapalm/annapalm-load-model-to-engine.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-load-model-to-engine.gif -------------------------------------------------------------------------------- /docs/annapalm/annapalm-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-loss.png -------------------------------------------------------------------------------- /docs/annapalm/annapalm-plugin-engines.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-plugin-engines.png -------------------------------------------------------------------------------- /docs/annapalm/annapalm-plugin-entry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-plugin-entry.png -------------------------------------------------------------------------------- /docs/annapalm/annapalm-plugin-tags.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-plugin-tags.png -------------------------------------------------------------------------------- /docs/annapalm/annapalm-prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-prediction.png -------------------------------------------------------------------------------- /docs/annapalm/annapalm-principle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-principle.png -------------------------------------------------------------------------------- /docs/annapalm/annapalm-tubulin-zoom-in.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/annapalm/annapalm-tubulin-zoom-in.gif -------------------------------------------------------------------------------- /docs/care/care-configure-training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-configure-training.png -------------------------------------------------------------------------------- /docs/care/care-cover.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-cover.gif -------------------------------------------------------------------------------- /docs/care/care-dashboard-loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-dashboard-loss.png -------------------------------------------------------------------------------- /docs/care/care-dashboard-results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-dashboard-results.png -------------------------------------------------------------------------------- /docs/care/care-find-models.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-find-models.gif -------------------------------------------------------------------------------- /docs/care/care-gs-engine-tag.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-gs-engine-tag.gif -------------------------------------------------------------------------------- /docs/care/care-gs-prediction-inspection.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-gs-prediction-inspection.gif -------------------------------------------------------------------------------- /docs/care/care-gs-prediction.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-gs-prediction.gif -------------------------------------------------------------------------------- /docs/care/care-gs-training-progress.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-gs-training-progress.gif -------------------------------------------------------------------------------- /docs/care/care-gs-training.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-gs-training.gif -------------------------------------------------------------------------------- /docs/care/care-launchpad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-launchpad.png -------------------------------------------------------------------------------- /docs/care/care-plugin-engines.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-plugin-engines.png -------------------------------------------------------------------------------- /docs/care/care-plugin-menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-plugin-menu.png -------------------------------------------------------------------------------- /docs/care/care-prediction.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-prediction.gif -------------------------------------------------------------------------------- /docs/care/care-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-screenshot.png -------------------------------------------------------------------------------- /docs/care/care-select-folder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-select-folder.png -------------------------------------------------------------------------------- /docs/care/care-tags.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-tags.png -------------------------------------------------------------------------------- /docs/care/care-workspace.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-workspace.gif -------------------------------------------------------------------------------- /docs/care/care-workspace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/care/care-workspace.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-annotation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-annotation.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-annotator-load-data.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-annotator-load-data.gif -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-cover.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-launchpad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-launchpad.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-neural_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-neural_network.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-nuclei-annotation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-nuclei-annotation.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-nuclei-mask-border.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-nuclei-mask-border.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-nuclei-mask-fill.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-nuclei-mask-fill.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-prediction-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-prediction-dashboard.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-prediction-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-prediction-example.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-prediction-image-name.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-prediction-image-name.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-training-config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-training-config.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-training_validation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-training_validation.png -------------------------------------------------------------------------------- /docs/dpnunet/dpnunet-upload.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/dpnunet/dpnunet-upload.gif -------------------------------------------------------------------------------- /docs/hpa-classification/hpa-classification-cover.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/hpa-classification/hpa-classification-cover.gif -------------------------------------------------------------------------------- /docs/hpa-umap/hpa-umap-cover.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/hpa-umap/hpa-umap-cover.gif -------------------------------------------------------------------------------- /docs/image-annotator/annotator-annotations.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/image-annotator/annotator-annotations.gif -------------------------------------------------------------------------------- /docs/image-annotator/annotator-channels.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/image-annotator/annotator-channels.gif -------------------------------------------------------------------------------- /docs/image-annotator/annotator-cover.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/image-annotator/annotator-cover.gif -------------------------------------------------------------------------------- /docs/image-annotator/annotator-import-local.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/image-annotator/annotator-import-local.gif -------------------------------------------------------------------------------- /docs/maui/maui-training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/imjoy-team/example-plugins/483447db80dd56ee5dad6425de401d6c51b2a200/docs/maui/maui-training.png -------------------------------------------------------------------------------- /imjoy-plugins/3D-visualization.imjoy.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | Plugin demonstrates that 3D rendering can be readily used in ImJoy plugins. 4 | 5 | Plugin is using [vtk-js](https://kitware.github.io/vtk-js/) 6 | 7 | Specifically, it shows the [PiecewiseGaussianWidget](https://kitware.github.io/vtk-js/examples/PiecewiseGaussianWidget.html) 8 | 9 | 10 | # From the documentation: 11 | 12 | **Double click**: 13 | * create a gaussian at that given position and height with no bias. 14 | 15 | **Left click**: 16 | * On a gaussian, that will active it so if you click on the (-) button, that will be the one removed. 17 | * On a button: perform the action of the button (add or remove a selected gaussian). 18 | 19 | **Right click** 20 | * delete the gaussian underneath. 21 | 22 | **Drag**: 23 | * Bottom of a gaussian will adjust the width. From left to right will increase the width. 24 | * Middle of the gaussian will adjust the biais (x and y). 25 | * Top of the gaussian will adjust height. 26 | * Anywhere else will adjust its position. 27 | 28 | 29 | 30 | 31 | 32 | 33 | { 34 | "name": "3D-visualization", 35 | "type": "window", 36 | "tags": [], 37 | "ui": "Illustrate 3D visualization with vtk in the browser", 38 | "version": "0.1.0", 39 | "api_version": "0.1.2", 40 | "description": "Demonstration of 3D visualization wiht vtk.", 41 | "icon": "extension", 42 | "inputs": null, 43 | "outputs": null, 44 | "dependencies": [], 45 | "defaults": {"w": 30, "h":20} 46 | } 47 | 48 | 49 | 65 | 66 | 67 | 68 |
69 | 70 |
71 | -------------------------------------------------------------------------------- /imjoy-plugins/CARE.imjoy.html: -------------------------------------------------------------------------------- 1 |  2 | # CARE 3 | 4 | Weigert et. al, Content-aware image restoration: pushing the limits of fluorescence microscopy, Nature Methods, 2018 5 | 6 | [Paper on Nature Methods](https://www.nature.com/articles/s41592-018-0216-7) 7 | 8 | This is a demo plugin ported from https://github.com/CSBDeep/CSBDeep/blob/master/examples/denoising2D_probabilistic/1_training.ipynb 9 | 10 | 11 | 12 | 13 | { 14 | "name": "CARE", 15 | "type": "native-python", 16 | "version": "0.1.20", 17 | "api_version": "0.1.2", 18 | "description": "This plugin demonstrates denoising using CARE.", 19 | "tags": ["Linux CPU", "Linux GPU", "Windows CPU", "Windows GPU", "MacOS CPU"], 20 | "ui": "", 21 | "inputs": null, 22 | "outputs": null, 23 | "flags": [], 24 | "icon": null, 25 | "env": { 26 | "CPU": "conda create -n care-cpu python=3.6.7", 27 | "GPU": "conda create -n care-gpu python=3.6.7", 28 | "macOS CPU": "conda create -n care-mac-cpu python=3.6.7" 29 | }, 30 | "requirements": { 31 | "Linux CPU": ["pip: tensorflow==1.15.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six"], 32 | "Linux GPU": ["pip: tensorflow-gpu==1.15.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six gputil"], 33 | "Windows CPU": ["pip: tensorflow==1.15.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six"], 34 | "Windows GPU": ["pip: tensorflow-gpu==1.15.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six gputil"], 35 | "MacOS CPU": ["pip: tensorflow==1.5.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six"] 36 | }, 37 | "dependencies": [ 38 | "oeway/ImJoy-Plugins:Im2Im-Dashboard", 39 | "oeway/ImJoy-Plugins:launchpad", 40 | "oeway/ImJoy-Plugins:Tabbed-Docs" 41 | ], 42 | "cover": ["https://oeway.github.io/ImJoy-Plugins/care/care-cover.gif"] 43 | } 44 | 45 | 46 | 458 | 459 | 460 | 461 |
462 |

463 | The CARE framework allows to perform content-aware restoration of fluorescence microscopy images. 464 | In this documentation, we describe the ImJoy implementation of an CARE example to perform 3D denoising. 465 |

466 | 467 | More details about CARE can be found here: 468 | 469 | * **Publication**: Weigert et. al, Content-aware image restoration: pushing the limits of fluorescence microscopy, 470 | Nature Methods, 2018 471 | * **GitHub**: https://github.com/CSBDeep/CSBDeep 472 | 473 | The ImJoy plugins are ported from an **example code** to 474 | perform denoising of 3D images. 475 | 476 |
477 | care-dashboard-results.png 478 |
Example of CARE denoising: input image, target image, predicted image.
479 |
480 | 481 | 482 | ## Installing the plugin and main features 483 | 484 | If you don't already have the plugin, you can install it from this **link.** 485 | 486 | When starting the CARE plugin, you will see a **central launch pad** allowing to select the task to be performed: 487 | 488 |
489 | care-launchpad.png 490 |
Screen shot of CARE plugin launchpad.
491 |
492 | 493 | 494 | Some of the **main features** are: 495 | * Training can be performed either on example data or your own data. 496 | * Prediction can be performed once the model is trained. 497 | 498 |
499 | 500 | 501 | 502 |

These steps require that data is already on the engine. As an alternative, you can choose the option to train from URL.

503 | 504 | Here we describe how to quickly get started with the CARE plugin. 505 | 506 | ## Select plugin engine and computational mode 507 | We select a remote engine (`imjoy.pasteur.cloud`) and the GPU computation (`TAG: GPU`) for fast training and prediction. 508 | 509 |
510 | care-gs-engine-tag.gif 511 |
Select remote engine and GPU tag.
512 |
513 | 514 | ## Training 515 | We perform training on the example data on the plugin engine. 516 | We then set the model name to `CARE_demo_v1` and train for 60 epochs. 517 | 518 |
519 | care-gs-training.gif 520 |
Training with CARE.
521 |
522 | 523 | While the network is training, progress can be monitored **dashboard**: it shows how loss changes, and how 524 | the trained model performs on validation data. 525 | 526 |
527 | care-gs-training-progress.gif 528 |
Monitoring training with CARE.
529 |
530 | 531 | ## Prediction 532 | Prediction with the trained model is performed: 533 | 1. Select trained model `CARE_demo_v1`. 534 | 2. Select test data `test` of provided example on the remote server. 535 | 3. This will open a dedicated interface where prediction progress is shown. 536 | 537 |
538 | care-gs-prediction.gif 539 |
Training with CARE.
540 |
541 | 542 | Once the prediction is done, you can inspect the prediction results and scroll through the z-stack of the sample. 543 | 544 |
545 | care-gs-prediction-inspection.gif 546 |
Monitoring training with CARE.
547 |
548 | 549 | 550 |
551 | 552 | 553 |

554 | Here we describe briefly how the training images have to be provided: image format, naming scheme, and data organization. 555 |

556 | 557 | The provided data from the CARE example is organized like this this: 558 | * Data for the 3D denoising corresponds to **pairs of low and high high signal-to-noise ratio (SNR) stacks**. 559 | Images are 3D TIFF with identical filenames and are stored in two folders "low" and "GT", corresponding 560 | to low and high-SNR stacks. 561 | * The training data for this demo corresponds to **one Tribolium stack pair**, whereas in an actual 562 | application the authors recommend to acquire at least 10-50 stacks from different developmental timepoints. 563 | * Training and validation data are stored in separate folders `train` and `test`. 564 | 565 | ``` 566 | ├─ tribolium/ 567 | │ ├─ test/ 568 | │ │ ├─ GT 569 | │ │ │ ├─ nGFP_0.1_0.2_0.5_20_14_late.tif 570 | │ │ ├─ low 571 | │ │ │ ├─ nGFP_0.1_0.2_0.5_20_14_late.tif 572 | │ ├─ train/ 573 | │ │ ├─ GT 574 | │ │ │ ├─ nGFP_0.1_0.2_0.5_20_13_late.tif 575 | │ │ ├─ low 576 | │ │ │ ├─ nGFP_0.1_0.2_0.5_20_13_late.tif 577 | │ │ ├─ img58 578 | ``` 579 | To use these **example data** you have two options 580 | 1. They can be directly used for training with the option `Train with data from the web`. 581 | The provided default URLs points to these data. 582 | 2. Alternatively, the data can be downloaded with this **link**. 583 | After unzipping, you can load the data into the plugin engine with drag & drop and specify it with the option `Train with data from the engine`. 584 | 585 | A detailed description for how to generate the training data can be found 586 | here. 587 | 588 | 589 |
590 | 591 | 592 |
593 |

594 | Here we describe how to select a plugin engine, a computational environment, and the training data to perform training. 595 |

596 | 597 | ## Where to perform training: plugin engine 598 | Training is performed on an ImJoy plugin engine, which can either run remotely or on your 599 | own local workstation. To install a local plugin engine, please follow 600 | **these instructions.** 601 | 602 | Once your ImJoy app is connected to one (or more) plugin engine(s), you can choose on which engine the CARE 603 | plugin should run. For this press on the icon next to the plugin name in the plugin menu. 604 | 605 |
606 | care_plugin_menu.png 607 |
CARE plugin in the plugin menu.
608 |
609 | This will show a dropdown menu, where you can determine how and where the plugin is running. 610 | In the lower part of the dropdown menu you can then choose on which of the the available plugin engines the plugin should run. 611 |
612 | care-plugin-engines.png 613 |
Choosing the plugin engine.
614 |
615 | 616 | ## GPU or CPU computation 617 | Training can be performed on CPUs or GPUs. The latter is substantially faster. To switch 618 | between these computational modes, you can select the corresponding "tag". Currently supported are 619 |
620 | care-tags.png 621 |
Choosing between GPU and CPU computing.
622 |
623 | 624 | ## Providing training data 625 | Two options exist to provide the training, which can be selected from the CARE launchpad prior to start training. 626 | For either option, we provide the **CARE example data** as a default. 627 | 628 | * `Train with data from the web`: here you can specify an URL containing data as a zip archive. 629 | The default points to the zipped example data. 630 | * `Train with data from the engine`: here you can select data from a plugin engine. More precisely, you have to select the folder 631 | containing the training data, e.g. the folder `tribolium` for the example data. 632 | 633 |
634 | care-select-folder.png 635 |
Selecting training data from the plugin engine.
636 |
637 | 638 | You can also provide **your own data**. Your own data has to be organized as explained in the dedicated section **Data**. 639 | * You can create *a zip archive of your folder*, upload it to the web, and use an URL 640 | * You can **drag and drop a folder containing subfolders 'train' and 'test'** into the plugin engine, where the CARE plugin is running: 641 | 1. In the ImJoy interface, select `Files` and `Open Engine files`. 642 | 2. If you have multiple plugin engines running, select the engine on which the CARE plugin is running in the upper part of the interface. 643 | 3. Navigate to the folder where you want to load your data. 644 | 4. Drag & drop the folder containing subfolders 'train' and 'test' into this folder. 645 | 5. You can then use this folder for training. 646 | 647 | ## Configure training for CARE 648 | Once you specified the data, a dialog will be shown to configure the training. Here you have to specify 649 | * Under which name the trained model should be stored 650 | * For how many epochs the training should be performed. There are 20 training steps per epoche. Longer training 651 | might improve the prediction but can also lead to overfitting. 652 | * The name of the folder containing the source images, e.g. the folder `low` containing the low SNR images. 653 | * The name of the folder containing the target images, e.g. the folder `GT` containing the high SNR images. 654 | 655 |
656 | care-configure-training.png 657 |
Configuration parameters to train the CARE network.
658 |
659 | 660 | 661 | ## Monitoring training progress 662 | After launch the training a dashboard will be shown that allows to inspect training progress. 663 | 664 | * In the upper part the dashboard shows the loss functions for the training and validation data. You can hoover 665 | over the curves to see the current value and what loss function it is. 666 | * In the lower part it shows the prediction results on a randomly selected z-slices of the validation data. 667 | * The slider allows you to scroll through the different training steps and inspect how the quality 668 | of the prediction changes. 669 | 670 | If you want to **terminate the training** click on the icon next to the plugin name, and select `Terminate` and close 671 | the window. 672 | 673 | ## Trained model 674 | Once training is finished, the trained model is stored in folder with the specified name in the configuration. 675 | This folder can found in the current ImJoy workspace in a folder called `models`. You can find the name of your current workspace, 676 | but hoovering over the four squares in the upper left part of the interface, e.g. `default` for the example below. 677 | 678 |
679 | care-workspace.gif 680 |
How to identify the current workspace.
681 |
682 | 683 | To find the trained model, 684 | 1. Click `Files` in the ImJoy interface, and `Open Engine files`. 685 | 2. Select the plugin engine where you performed training, e.g. where the CARE plugin was running. 686 | 3. Navigate to your workspace name. The dialog will initiate for the `default` workspace, if you are in another 687 | workspace, you have to move up one folder and select your workspace name. 688 | 4. The folder `CARE-MODELS` contains all trained models 689 | 690 |
691 | 692 | 693 | Once a model is trained, you can use it for prediction. 694 | 695 | Data for prediction has to be stored in a folder with same name as the input images, e.g. `low` in the example data. 696 | Prediction will then be performed for each image in this folder. 697 | Results are stored in folder `CARE_results`, where one folder for each data-set with the same name is created. Predictions 698 | for each z-slice are stored as separate PNGs. 699 | 700 | 1. Press on `Predict` 701 | 2. Specify the folder containing the trained model. 702 | 3. Specify the folder containg the images you want to process. 703 | 704 | This will then open a new interface, where progress on prediction is reported 705 | 706 |
707 | care-prediction.gif 708 |
CARE prediction interface.
709 |
710 | 711 |
712 | 713 | 714 | 715 | Here we provide answer to frequenty asked questions and encountered problems. 716 | 717 | ### Example training data is not available on the remote engine 718 | In case the example data is not available anymore, you can either train with data by specyfing the URL or 719 | download the data and upload to the remote engine again. 720 | 721 | 722 | -------------------------------------------------------------------------------- /imjoy-plugins/HPA-Classification.imjoy.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | This plugin allows to predict the subcellular protein localization pattern for a provided image. 4 | 5 | ## Main features 6 | * Prediction is performed with the winner of the special price of the 2018 Kaggle challenge (https://github.com/CellProfiling/HPA-Special-Prize). 7 | * Annotated localization classes are from the **HPA cell atlas**. 8 | * The plugin uses a trained **ShuffleNet (v2)** to perform the prediction directly in the browser. 9 | * Class Activation Map: https://arxiv.org/pdf/1512.04150.pdf 10 | 11 | 12 | 13 | 14 | { 15 | "name": "HPA-Classification", 16 | "type": "window", 17 | "tags": [], 18 | "ui": [{ 19 | "model": { 20 | "id": "model", 21 | "type": "choose", 22 | "options": ["ShuffleNet-V2"], 23 | "placeholder": "ShuffleNet-V2" 24 | } 25 | }], 26 | "version": "0.1.23", 27 | "api_version": "0.1.2", 28 | "url": "", 29 | "description": "ShuffleNetV2 for HPA.", 30 | "icon": "extension", 31 | "inputs": null, 32 | "outputs": null, 33 | "env": null, 34 | "requirements": [ 35 | "https://cdn.jsdelivr.net/npm/apexcharts", 36 | "https://cdn.jsdelivr.net/npm/@tensorflow/tfjs", 37 | "https://cdn.jsdelivr.net/npm/simpleheat@0.4.0/simpleheat.min.js", 38 | "https://cdn.jsdelivr.net/gh/photopea/UTIF.js@4f1b10cb09e244cfd4f9631245d2231537148be7/UTIF.js" 39 | ], 40 | "dependencies": ["https://raw.githubusercontent.com/imjoy-team/example-plugins/master/imjoy-plugins/HPA-Image-Selection.imjoy.html"], 41 | "defaults": {"w": 60, "h": 20, "standalone": true}, 42 | "cover": "https://imjoy-team.github.io/example-plugins/hpa-classification/hpa-classification-cover.gif" 43 | } 44 | 45 | 46 | 47 | 748 | 749 | 750 | 751 |
752 |

753 | 754 | 755 |
756 |
757 |

Click the bar to see Class Activation Map.

758 |
759 |
760 |
761 |
762 |
763 | 764 |
765 |
766 |
767 |
768 | 769 | 839 | -------------------------------------------------------------------------------- /imjoy-plugins/HPA-Image-Selection.imjoy.html: -------------------------------------------------------------------------------- 1 | 2 | [TODO: write documentation for this plugin.] 3 | 4 | 5 | 6 | { 7 | "name": "HPA-Image-Selection", 8 | "type": "window", 9 | "tags": [], 10 | "ui": "", 11 | "version": "0.1.4", 12 | "api_version": "0.1.2", 13 | "description": "This is a backbone plugin for using Vue.js in ImJoy", 14 | "icon": "extension", 15 | "inputs": null, 16 | "outputs": null, 17 | "env": "", 18 | "requirements": [ 19 | "https://cdnjs.cloudflare.com/ajax/libs/vue/2.5.22/vue.min.js" 20 | ], 21 | "dependencies": [], 22 | "defaults": {"w": 50, "h": 30}, 23 | "runnable": true 24 | } 25 | 26 | 27 | 592 | 593 | 594 |
595 |
596 |
597 | 598 |
599 |
600 |
601 |
602 | 603 | 643 | -------------------------------------------------------------------------------- /imjoy-plugins/HPA-UMAP.imjoy.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | This plugin allows to visualize results of a classification of protein 4 | localization patterns from microscope images of the entire HPA cell atlas(v18, 63955 images). 5 | 6 | Classification is performed with the winning approach of a 2018 Kaggle challenge 7 | (https://www.kaggle.com/c/human-protein-atlas-image-classification). 8 | 9 | Plots shows a UMAP projection (Uniform Manifold Approximation and Projection) calculated from the 10 | last layer of the network (densenet121, dimensions: 1024). Each dot is an image and when clicking on a 11 | dot, the corresponding image is displayed together with information about the cell line, the 12 | protein, and a link to the HPA website. The plugin also allows to search for a gene, cell line, or 13 | a localization pattern. 14 | 15 | 16 | 17 | 18 | { 19 | "name": "HPA-UMAP", 20 | "type": "window", 21 | "tags": [], 22 | "ui": "Explore protein localization patterns in HPA images.", 23 | "version": "0.4.2", 24 | "api_version": "0.1.2", 25 | "description": "Explore protein localization patterns in HPA images.", 26 | "icon": "photo", 27 | "inputs": null, 28 | "outputs": null, 29 | "requirements": [ 30 | "https://cdn.plot.ly/plotly-latest.min.js", 31 | "https://cdn.jsdelivr.net/npm/papaparse@5.2.0/papaparse.min.js", 32 | "https://cdn.jsdelivr.net/npm/vue@2.6.10/dist/vue.min.js", 33 | "https://unpkg.com/spectre.css/dist/spectre.min.css", 34 | "https://unpkg.com/spectre.css/dist/spectre-exp.min.css", 35 | "https://unpkg.com/spectre.css/dist/spectre-icons.min.css"], 36 | "dependencies": [], 37 | "defaults": {"w": 50, "h":20, "fullscreen": true}, 38 | "cover": "https://imjoy-team.github.io/imjoy-plugins/hpa-umap/hpa-umap-cover.gif" 39 | } 40 | 41 | 42 | 411 | 412 | 413 |
414 | 454 |
455 |
Loading...
456 |
457 |
458 |
459 | 460 |
461 |
462 |

{{images[current_index].meta['gene']}} ({{images[current_index].meta['cell_line']}})

463 |

cell line: {{images[current_index].meta['cell_line']}}

464 |

gene: {{images[current_index].meta['gene']}}

465 |

location: {{images[current_index].meta['location']}}

466 |

image id: {{ images[current_index].meta.id}}

467 | 468 | 469 |
470 |

471 |
472 | Click on the UMAP to see the sample image. 473 |

474 | 475 | 492 |
493 |
494 |
495 |
496 | 497 |
498 | 499 | 522 | -------------------------------------------------------------------------------- /imjoy-plugins/Noise2Self.imjoy.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | ## An ImJoy plugin using Noise2Self method for denosing. 4 | 5 | This demo is ported from: https://github.com/czbiohub/noise2self/blob/master/notebooks/Intro%20to%20Neural%20Nets.ipynb 6 | 7 | ## About Noise2Self 8 | 9 | Paper: https://arxiv.org/abs/1901.11365 10 | Authors: Joshua Batson, Loic Royer 11 | 12 | 13 | 14 | 15 | { 16 | "name": "Noise2Self", 17 | "type": "native-python", 18 | "version": "0.1.9", 19 | "tags": ["CPU", "GPU"], 20 | "api_version": "0.1.2", 21 | "description": "A demo plugin for Noise2Self (Blind Denoising by Self-Supervision)", 22 | "ui": "training steps:{id:'steps', type:'number', min:1, placeholder:1000}", 23 | "inputs": null, 24 | "outputs": null, 25 | "flags": [], 26 | "icon": "extension", 27 | "env": { 28 | "CPU": "conda create -n noise2self-cpu python=3.6.2", 29 | "GPU": "conda create -n noise2self-gpu python=3.6.2" 30 | }, 31 | "requirements": { 32 | "CPU": ["repo:https://github.com/czbiohub/noise2self.git", "conda: -c pytorch six pillow numpy scipy pytorch=1.0 torchvision"], 33 | "GPU": ["repo:https://github.com/czbiohub/noise2self.git", "conda: -c pytorch six pillow numpy scipy cudatoolkit=9.0 pytorch=1.0 torchvision"] 34 | }, 35 | "dependencies": ["oeway/ImJoy-Plugins:Im2Im-Dashboard"] 36 | } 37 | 38 | 39 | 147 | -------------------------------------------------------------------------------- /imjoy-plugins/PyImageJ-Demo.imjoy.html: -------------------------------------------------------------------------------- 1 | 2 | ## PyImageJ demo 3 | 4 | A PyImageJ demo ported from https://github.com/imagej/pyimagej 5 | 6 | 7 | 8 | 9 | { 10 | "name": "PyImageJ-Demo", 11 | "type": "native-python", 12 | "version": "0.1.0", 13 | "api_version": "0.1.2", 14 | "description": "This plugin process image with frangiVesselness function in ImageJ", 15 | "tags": [], 16 | "ui": "", 17 | "inputs": null, 18 | "outputs": null, 19 | "flags": [], 20 | "icon": "extension", 21 | "env": "conda create -n imagej2 openjdk=8", 22 | "requirements": ["conda: -c conda-forge pyimagej", "pip: scikit-image pillow"], 23 | "dependencies": [] 24 | } 25 | 26 | 27 | 70 | -------------------------------------------------------------------------------- /imjoy-plugins/SchemaIO.imjoy.html: -------------------------------------------------------------------------------- 1 | 2 | # Introduction 3 | 4 | Input and output schema use this plugin. 5 | 6 | ### API 7 | 8 | #### clear_view (id) 9 | 10 | Clear up the input view 11 | 12 | **id** `int` view id 13 | 14 | 15 | 16 | #### vega_view_insert (args) 17 | 18 | Insert vega data to the chart 19 | 20 | ##### args `object` 21 | 22 | - id: `int` vega view index 23 | - data: `array` vega data value 24 | - dataName: `string` vega data name 25 | 26 | e.g. 27 | 28 | ~~~ 29 | { 30 | id: 0, 31 | data: [{ "category": "C", "position": 0, "value": 0.6 }], 32 | dataName: 'table' 33 | } 34 | ~~~ 35 | 36 | 37 | 38 | #### vega_schema_output (args) 39 | 40 | Output vega schema data, page display chart 41 | 42 | ##### args `object` 43 | 44 | - schema: `object` vega schema 45 | - message: `string` chart description 46 | - Id: `int` vega view index 47 | - title: `string` view title 48 | - show: `boolean` show status 49 | - buttons: `array` button list 50 | 51 | e.g. 52 | 53 | ~~~ 54 | { 55 | schema: vega schema, 56 | message: '', 57 | id: 1, 58 | title: 'vega output', 59 | show: true, 60 | buttons: [] 61 | } 62 | ~~~ 63 | 64 | 65 | 66 | #### form_schema_input (args) / run (args) 67 | 68 | Add input pannel / init SchemaIO window 69 | 70 | ##### args `object` 71 | 72 | - schema: `object` form-schema 73 | - model: `object` form data 74 | - callback: `object` submit function 75 | - title: `string` view title 76 | - show: `boolean` show status 77 | - formOptions: `object` form options 78 | - id: `int` view id 79 | 80 | e.g. 81 | 82 | ~~~ 83 | { 84 | schema: schema, 85 | model: { 86 | DNA: 'GGAGGCG...', 87 | modelType: 'all species' 88 | }, 89 | callback: plugin.show_schema_data1, 90 | title: 'input vue schema', 91 | show: true, 92 | formOptions: { 93 | validateAfterLoad: true, 94 | validateAfterChanged: true 95 | }, 96 | id: 1 97 | } 98 | ~~~ 99 | 100 | 101 | 102 | #### Input Form Schema 103 | 104 | **GitHub:** https://github.com/vue-generators/vue-form-generator 105 | 106 | **Example:** https://jsfiddle.net/zoul0813/d8excp36 107 | 108 | #### Output Vega Schema 109 | 110 | **Github:** 111 | 112 | **Example:** 113 | 114 | 115 | { 116 | "name": "SchemaIO", 117 | "type": "window", 118 | "tags": [], 119 | "ui": "", 120 | "version": "0.2.6", 121 | "api_version": "0.1.3", 122 | "description": "[TODO: describe this plugin with one sentence.]", 123 | "icon": "extension", 124 | "inputs": null, 125 | "outputs": null, 126 | "env": "", 127 | "requirements": [ 128 | "https://cdnjs.cloudflare.com/ajax/libs/vue/2.5.22/vue.min.js", 129 | "https://unpkg.com/vue-form-generator@2.3.4/dist/vfg.js", 130 | "https://unpkg.com/vue-form-generator@2.3.4/dist/vfg.css", 131 | "https://cdn.jsdelivr.net/npm/vega@5.3.4/build/vega.js", 132 | "https://cdn.jsdelivr.net/npm/vega-lite@3.1.0", 133 | "https://cdn.jsdelivr.net/npm/vega-embed@4.0.0" 134 | ], 135 | "dependencies": [], 136 | "defaults": {"w": 20, "h": 10}, 137 | "runnable": false 138 | } 139 | 140 | 141 |
142 |
143 |
{{item.title}}
144 |
145 | 146 |
147 | 148 |
149 |
150 |
151 |
152 |
{{item.title}}
153 |
154 |
155 |
156 |

{{item.message}}

157 |
158 | 159 |
160 |
161 |
162 |
163 | 243 | 244 |
245 |
246 |
247 | 350 | -------------------------------------------------------------------------------- /imjoy-plugins/Skin-Lesion-Analyzer.imjoy.html: -------------------------------------------------------------------------------- 1 | 2 | This plugin allows to classify 7 types of skin lesions as described in the publication 3 | [The HAM10000 Dataset: A Large Collection of Multi-Source Dermatoscopic Images of Common Pigmented Skin Lesions](https://arxiv.org/abs/1803.10417) 4 | 5 | Images are analyze with a pretrained model built with Tensorflow.js. 6 | 7 | When loading the plugin, a default image is provided. You can also use your own images: on **computer** you can upload an image, on a **smartphone** you can 8 | snap an image. 9 | 10 | # Source code 11 | This plugin is ported from: [Skin Lesion Analyzer Web App built using Tensorflow.js](https://github.com/vbookshelf/Skin-Lesion-Analyzer). 12 | 13 | The entire process of model building and the training process is described in this Kaggle kernel: 14 | https://www.kaggle.com/vbookshelf/skin-lesion-analyzer-tensorflow-js-web-app 15 | 16 | 17 | 18 | 19 | { 20 | "name": "Skin-Lesion-Analyzer", 21 | "type": "window", 22 | "tags": [], 23 | "ui": "A pretrained MobileNet image classifier for images of skin lesion built with Tensorflow.js", 24 | "version": "0.1.2", 25 | "api_version": "0.1.2", 26 | "description": "A pretrained MobileNet image classifier built with Tensorflow.js", 27 | "icon": "extension", 28 | "inputs": null, 29 | "outputs": null, 30 | "env": "", 31 | "requirements": [ 32 | "https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@0.14.1", 33 | "https://unpkg.com/spectre.css/dist/spectre.min.css", 34 | "https://unpkg.com/spectre.css/dist/spectre-exp.min.css", 35 | "https://unpkg.com/spectre.css/dist/spectre-icons.min.css" 36 | ], 37 | "dependencies": [], 38 | "defaults": {"w": 15, "h": 20} 39 | } 40 | 41 | 42 | 184 | 185 | 186 |
187 | 188 |
189 |
190 |

191 |

192 |
193 |
194 | 195 | 196 |
197 | 198 |
199 | 200 |
201 | 202 | 203 |
204 | 205 |
206 |
207 |
208 | 209 | 225 | -------------------------------------------------------------------------------- /imjoy-plugins/annaPalm.imjoy.html: -------------------------------------------------------------------------------- 1 |  2 | 3 | # ANNA-PALM 4 | Deep learning powered plugin to massively accelerate super-resolution localization microscopy. 5 | 6 | More details about ANNA-PALM can be found here: 7 | 8 | * **Documentation**: available from the plugin launchpad. 9 | 10 | * **Publication**: Ouyang et. al, Deep learning massively accelerates super-resolution localization microscopy, 11 | Nature Biotechnology, 2018 12 | 13 | * **GitHub**: https://github.com/imodpasteur/ANNA-PALM 14 | 15 | 16 | 17 | 18 | { 19 | "name": "ANNA-PALM", 20 | "type": "native-python", 21 | "version": "0.1.18", 22 | "api_version": "0.1.5", 23 | "description": "A plugin for training models with ANNA-PALM.", 24 | "tags": ["CPU", "GPU"], 25 | "ui": null, 26 | "inputs": null, 27 | "outputs": null, 28 | "icon": null, 29 | "env": {"CPU": ["conda create -n annapalm-cpu python=3.6"], "GPU": ["conda create -n annapalm-gpu python=3.6"]}, 30 | "requirements": { 31 | "CPU": ["repo:https://github.com/imodpasteur/ANNA-PALM", "cmd:pip install -r ANNA-PALM/requirements.txt"], 32 | "GPU": ["repo:https://github.com/imodpasteur/ANNA-PALM", "pip: Pillow numpy==1.15.0 scipy matplotlib scikit-image tensorflow-gpu==1.8.0 gputil==1.4.0"] 33 | }, 34 | "dependencies": [ 35 | "oeway/ImJoy-Plugins:Im2Im-Dashboard", 36 | "oeway/ImJoy-Plugins:launchpad", 37 | "oeway/ImJoy-Plugins:Tabbed-Docs" 38 | ], 39 | "cover": "https://imjoy-team.github.io/example-plugins/annapalm/annapalm-cover.gif" 40 | } 41 | 42 | 43 | 352 | 353 | 354 |
355 |

356 | This plugin implements ANNA-PALM, which uses deep learning to reconstruct super-resolution 357 | views from sparse, rapidly acquired localization images and/or widefield images. 358 |

359 | 360 | More details about ANNA-PALM can be found here: 361 | 362 | * **Publication**: Ouyang et. al, Deep learning massively accelerates super-resolution localization microscopy, 363 | Nature Biotechnology, 2018 364 | * **GitHub**: https://github.com/imodpasteur/ANNA-PALM 365 | 366 | Here, we provide workflows to train and use ANNA-PALM to reconstruct super-resolution views from sparse localization and widefield images. 367 | We further provide example data for PALM experiments performed on microtubules. 368 | 369 |
370 | annapalm-example 371 |
Example of ANNA-PALM reconstruction: (left) input = sparse localization image, (right) output = predicted super-resolution image.
372 |
373 | 374 | ## Installing the plugin and main features 375 | 376 | If you don't already have the plugin, you can install it from this **link.** 377 | 378 | When starting the ANNA-PALM plugin, you will see a **central launch pad** allowing to select the task to be performed: 379 | 380 |
381 | anna-palm-launchpad 382 |
ANNA-PALM plugin launchpad.
383 |
384 | 385 | The **main features** are: 386 | * Training can be performed either on a provided remote plugin engine or a local plugin engine. 387 | * Trained model can be loaded and used for prediction or re-training of both simulations and experimental data. 388 | * Prediction can be performed once the model is trained. 389 | 390 |
391 | 392 | 393 |

394 | Here we describe how to load a pre-trained model that can be used either directly for prediction or for re-training. 395 |

396 | 397 | ## Trained model for microtubulins 398 | A pre-trained model for microtubulins is available **here**. 399 | 1. The actual model is contained in the folder `__model__`. 400 | 2. After downloading, you can load the trained model to the plugin engine (See FAQ for how to load data on the engine). 401 | 402 | ## Load model into ANNA-PALM plugin 403 | Pressing the button `Load trained model` will open a file-dialog where you can select 404 | a model that is stored on the plugin engine on which the plugin is running. 405 | 406 | This model is then available either for re-training or prediction. 407 | 408 |
409 | 410 | 411 | 412 |

413 | Here we describe how to select a plugin engine, a computational environment, and the training data to perform training. 414 |

415 | 416 | ## Training data 417 | * Training data consist of widefield and dense PALM/STORM images. The localization data must be 418 | provided using the csv format of ThunderSTORM. These data pairs have the same filename, except the extension. 419 | Training data will then be performed on sub-sampled PALM/STORM images. 420 | * Training and validation data are in separate folders 'train' and 'test'. 421 | 422 | Training data (the folder containing subfolder 'train' and 'test') has to be loaded onto 423 | the plugin engine, where the ANNA-PALM plugin is running. See FAQ for how to load data 424 | on the engine. 425 | 426 | Experimental test data is available **here**. 427 | 428 | The provided example data for the microtubules looks like this: 429 | 430 | ``` 431 | ├─ experimental_microtubules/ 432 | │ ├─ test/ 433 | │ │ ├─ Ctrl-lowDens_pos2.csv 434 | │ │ ├─ Ctrl-lowDens_pos2.png 435 | │ ├─ train/ 436 | │ │ ├─ Ctrl-lowDens_pos08.csv 437 | │ │ ├─ Ctrl-lowDens_pos08.png 438 | │ │ ├─ Ctrl-lowDens_pos10.csv 439 | │ │ ├─ Ctrl-lowDens_pos10.png 440 | │ │ ├─ ... 441 | ``` 442 | 443 | ## Where to perform training: plugin engine 444 | Training is performed on an ImJoy plugin engine, which can either run remotely or on your 445 | own local workstation. To install a local plugin engine, please follow 446 | **these instructions.** 447 | Once your ImJoy app is connected to one (or more) plugin engine(s), you can choose on which engine the CARE 448 | plugin should run. For this press on the icon next to the plugin name in the plugin menu. 449 | 450 |
451 | annapalm-plugin-entry 452 |
ANNA-PALM plugin in the plugin menu.
453 |
454 | This will show a dropdown menu, where you can determine how and where the plugin is running. 455 | In the lower part of the dropdown menu you can then choose on which of the the available plugin engines the plugin should run. 456 |
457 | annapalm-plugin-engines 458 |
Choosing the plugin engine.
459 |
460 | 461 | ## GPU or CPU computation 462 | Training can be performed on CPUs or GPUs. The latter is substantially faster. To switch 463 | between these computational modes, you can select the corresponding "tag". Currently supported are 464 |
465 | annapalm-tags 466 |
Choosing between GPU and CPU computing.
467 |
468 | 469 | ## Monitoring training progress 470 | Once training data is provided, the training a dashboard will be shown that allows to inspect training progress. 471 | * In the upper part the dashboard shows the loss functions for the training and validation data. You can hoover 472 | over the curves to see the current value and what loss function it is. The slider allows you to scroll through the different training steps and inspect how the quality 473 | of the prediction changes. 474 | 475 |
476 | annapalm-loss 477 |
Transferring training data to the plugin engine.
478 |
479 | 480 | * In the lower part it shows the prediction results on a randomly selected image. 481 | 'A' stands for sparse (input image), 'B' for high-resolution image. The following images are shown 482 | - real_A: sparse input image 483 | - real_B: high-resolution target image 484 | - reco_B: reconstructed high-resolution image 485 | - squirrel_error_map: error map calculate with the squirrel approach 486 | 487 | If you want to **terminate the training** click on the icon next to the plugin name, 488 | select `Terminate` and then close the window with the red x. 489 | 490 | ## Trained model 491 | Once training is finished, the trained model can be used for prediction. 492 | 493 |
494 | 495 | 496 | To predict an image, you can either load a pre-trained model into the engine, or 497 | train a new model. For either option, we refer to the dedicated sections in this documentation. 498 | 499 | 500 | 1. Press on `Predict`. This will then use either the loaded model, or (if available) the newly trained one. 501 | 2. Specify the folder the data you want to predict (it must contain a 'test' subfolder), e.g. the folder of the provided example data 502 | 3. This will then open a new interface, where progress on prediction is reported. 503 | The plugin will use the data in the 'test' folder. 504 | It will create 30 different samples, by sub-sampling with vastly different framenumbers 505 | 5. Once prediction is finished, you can scroll throught these data. 'Step' corresponds to one 506 | created data-set. 507 | 508 |
509 | annapalm-prediction 510 |
ANNA-PALM prediction interface.
511 |
512 | 513 |
514 | 515 | 516 | Here we provide answer to frequenty asked questions and encountered problems. 517 | 518 | ## How can I load data or a model into the plugin engine? 519 | You can simply drag and drop a folder containing either training data or model: 520 | 521 | 1. In ImJOy, press `Files`, and `Open Engine File`. 522 | 2. If ImJoy is connected to multiple plugin engines, select the one where ANNA-PALM plugin is running in the upper part of the interface. 523 | 3. Navigate to the folder where you want to store your data. 524 | 4. Drag & drop the folder from Finder (MacOS) or Explorer (WIN) into the ImJoy file dialog. 525 | 526 |
527 | annapalm-load-model-into-engine 528 |
Load a pre-trained model on a plugin engine.
529 |
530 | 531 |
532 | -------------------------------------------------------------------------------- /imjoy-plugins/maui-demo.imjoy.html: -------------------------------------------------------------------------------- 1 |  2 | ## A demo plugin for maui 3 | 4 | maui: Multi-omics Autoencoder Integration: Deep learning-based heterogenous data analysis toolkit 5 | 6 | https://github.com/BIMSBbioinfo/maui 7 | 8 | This plugin is ported from: https://github.com/BIMSBbioinfo/maui/blob/master/vignette/maui_vignette.ipynb 9 | 10 | 11 | 12 | { 13 | "name": "maui-demo", 14 | "type": "native-python", 15 | "version": "0.1.0", 16 | "description": "A demo plugin for Multi-omics Autoencoder Integration: Deep learning-based heterogenous data analysis toolkit", 17 | "tags": [], 18 | "ui": "", 19 | "cover": "", 20 | "inputs": null, 21 | "outputs": null, 22 | "flags": [], 23 | "icon": "extension", 24 | "api_version": "0.1.7", 25 | "env": "conda create -n maui python=3.6", 26 | "permissions": [], 27 | "requirements": ["maui-tools", "scikit-learn", "pandas", "matplotlib", "seaborn==0.9.0", "lifelines==0.14.6"], 28 | "dependencies": ["oeway/ImJoy-Plugins:Im2Im-Dashboard"], 29 | "cover": "https://imjoy-team.github.io/example-plugins/maui/maui-training.png" 30 | } 31 | 32 | 33 | 231 | -------------------------------------------------------------------------------- /manifest.imjoy.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ImJoy Repository", 3 | "description": "ImJoy plugin repository.", 4 | "uri_root": "", 5 | "version": "0.2.0", 6 | "plugins": [ 7 | { 8 | "name": "3D-visualization", 9 | "type": "window", 10 | "tags": [], 11 | "ui": "Illustrate 3D visualization with vtk in the browser", 12 | "version": "0.1.0", 13 | "api_version": "0.1.2", 14 | "description": "Demonstration of 3D visualization wiht vtk.", 15 | "icon": "extension", 16 | "inputs": null, 17 | "outputs": null, 18 | "dependencies": [], 19 | "defaults": { 20 | "w": 30, 21 | "h": 20 22 | }, 23 | "uri": "imjoy-plugins/3D-visualization.imjoy.html" 24 | }, 25 | { 26 | "name": "CARE", 27 | "type": "native-python", 28 | "version": "0.1.20", 29 | "api_version": "0.1.2", 30 | "description": "This plugin demonstrates denoising using CARE.", 31 | "tags": [ 32 | "Linux CPU", 33 | "Linux GPU", 34 | "Windows CPU", 35 | "Windows GPU", 36 | "MacOS CPU" 37 | ], 38 | "ui": "", 39 | "inputs": null, 40 | "outputs": null, 41 | "flags": [], 42 | "icon": null, 43 | "env": { 44 | "CPU": "conda create -n care-cpu python=3.6.7", 45 | "GPU": "conda create -n care-gpu python=3.6.7", 46 | "macOS CPU": "conda create -n care-mac-cpu python=3.6.7" 47 | }, 48 | "requirements": { 49 | "Linux CPU": [ 50 | "pip: tensorflow==1.15.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six" 51 | ], 52 | "Linux GPU": [ 53 | "pip: tensorflow-gpu==1.15.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six gputil" 54 | ], 55 | "Windows CPU": [ 56 | "pip: tensorflow==1.15.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six" 57 | ], 58 | "Windows GPU": [ 59 | "pip: tensorflow-gpu==1.15.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six gputil" 60 | ], 61 | "MacOS CPU": [ 62 | "pip: tensorflow==1.5.0 keras==2.1.5 Pillow csbdeep==0.4.1 tifffile six" 63 | ] 64 | }, 65 | "dependencies": [ 66 | "oeway/ImJoy-Plugins:Im2Im-Dashboard", 67 | "oeway/ImJoy-Plugins:launchpad", 68 | "oeway/ImJoy-Plugins:Tabbed-Docs" 69 | ], 70 | "cover": [ 71 | "https://oeway.github.io/ImJoy-Plugins/care/care-cover.gif" 72 | ], 73 | "uri": "imjoy-plugins/CARE.imjoy.html" 74 | }, 75 | { 76 | "name": "DPNUnet", 77 | "type": "native-python", 78 | "version": "0.2.10", 79 | "description": "Nuclei segmentation with deep learning.", 80 | "tags": [ 81 | "GPU" 82 | ], 83 | "ui": "", 84 | "cover": "https://imjoy-team.github.io/example-plugins/dpnunet/dpnunet-cover.png", 85 | "inputs": null, 86 | "outputs": null, 87 | "flags": [], 88 | "icon": "extension", 89 | "api_version": "0.1.5", 90 | "env": { 91 | "CPU": [ 92 | "conda create -n dsb2018-cpu python=3.6.8" 93 | ], 94 | "GPU": [ 95 | "conda create -n dsb2018-gpu python=3.6.8" 96 | ] 97 | }, 98 | "requirements": { 99 | "CPU": [ 100 | "pip: descartes palettable geojson read-roi gputil namedlist", 101 | "pip: lightgbm imgaug pandas imageio", 102 | "conda: opencv tqdm scipy=1.0.0", 103 | "conda: pytorch=0.4.1 torchvision==0.2.0 -c pytorch", 104 | "pip: tensorboardX", 105 | "repo: https://github.com/oeway/DPNUnet-Segmentation" 106 | ], 107 | "GPU": [ 108 | "pip: descartes palettable geojson read-roi gputil namedlist", 109 | "pip: lightgbm imgaug pandas imageio", 110 | "conda: opencv tqdm scipy=1.0.0", 111 | "conda: pytorch=0.4.1 torchvision==0.2.0 cuda90 -c pytorch", 112 | "pip: tensorboardX", 113 | "repo: https://github.com/oeway/DPNUnet-Segmentation" 114 | ] 115 | }, 116 | "dependencies": [ 117 | "oeway/ImJoy-Plugins:Im2Im-Dashboard", 118 | "oeway/ImJoy-Plugins:ImageAnnotator", 119 | "oeway/ImJoy-Plugins:launchpad", 120 | "oeway/ImJoy-Plugins:Tabbed-Docs" 121 | ], 122 | "uri": "imjoy-plugins/DPNUnet.imjoy.html" 123 | }, 124 | { 125 | "name": "DeepBindScan", 126 | "type": "native-python", 127 | "version": "0.2.15", 128 | "api_version": "0.1.3", 129 | "description": "Predict binding affinity of TF/RBP to a given DNA/RNA sequence.", 130 | "tags": [], 131 | "ui": "", 132 | "inputs": null, 133 | "outputs": null, 134 | "flags": [], 135 | "icon": "extension", 136 | "env": "conda create -n deepbindscan python=3.6.8", 137 | "requirements": [ 138 | "conda: h5py bioconda::pybedtools bioconda::pyfaidx numpy pandas", 139 | "pip: -U tensorflow==1.15.0 keras==2.1.6 kipoi kipoiseq" 140 | ], 141 | "dependencies": [ 142 | "imjoy-team/example-plugins:SchemaIO" 143 | ], 144 | "cmd": "KERAS_BACKEND=tensorflow python", 145 | "uri": "imjoy-plugins/DeepBindScan.imjoy.html" 146 | }, 147 | { 148 | "name": "HPA-Classification", 149 | "type": "window", 150 | "tags": [], 151 | "ui": [ 152 | { 153 | "model": { 154 | "id": "model", 155 | "type": "choose", 156 | "options": [ 157 | "ShuffleNet-V2" 158 | ], 159 | "placeholder": "ShuffleNet-V2" 160 | } 161 | } 162 | ], 163 | "version": "0.1.23", 164 | "api_version": "0.1.2", 165 | "url": "", 166 | "description": "ShuffleNetV2 for HPA.", 167 | "icon": "extension", 168 | "inputs": null, 169 | "outputs": null, 170 | "env": null, 171 | "requirements": [ 172 | "https://cdn.jsdelivr.net/npm/apexcharts", 173 | "https://cdn.jsdelivr.net/npm/@tensorflow/tfjs", 174 | "https://cdn.jsdelivr.net/npm/simpleheat@0.4.0/simpleheat.min.js", 175 | "https://cdn.jsdelivr.net/gh/photopea/UTIF.js@4f1b10cb09e244cfd4f9631245d2231537148be7/UTIF.js" 176 | ], 177 | "dependencies": [ 178 | "https://raw.githubusercontent.com/imjoy-team/example-plugins/master/imjoy-plugins/HPA-Image-Selection.imjoy.html" 179 | ], 180 | "defaults": { 181 | "w": 60, 182 | "h": 20, 183 | "standalone": true 184 | }, 185 | "cover": "https://imjoy-team.github.io/example-plugins/hpa-classification/hpa-classification-cover.gif", 186 | "uri": "imjoy-plugins/HPA-Classification.imjoy.html" 187 | }, 188 | { 189 | "name": "HPA-Image-Selection", 190 | "type": "window", 191 | "tags": [], 192 | "ui": "", 193 | "version": "0.1.4", 194 | "api_version": "0.1.2", 195 | "description": "This is a backbone plugin for using Vue.js in ImJoy", 196 | "icon": "extension", 197 | "inputs": null, 198 | "outputs": null, 199 | "env": "", 200 | "requirements": [ 201 | "https://cdnjs.cloudflare.com/ajax/libs/vue/2.5.22/vue.min.js" 202 | ], 203 | "dependencies": [], 204 | "defaults": { 205 | "w": 50, 206 | "h": 30 207 | }, 208 | "runnable": true, 209 | "uri": "imjoy-plugins/HPA-Image-Selection.imjoy.html" 210 | }, 211 | { 212 | "name": "HPA-UMAP", 213 | "type": "window", 214 | "tags": [], 215 | "ui": "Explore protein localization patterns in HPA images.", 216 | "version": "0.3.11", 217 | "api_version": "0.1.2", 218 | "description": "Explore protein localization patterns in HPA images.", 219 | "icon": "photo", 220 | "inputs": null, 221 | "outputs": null, 222 | "requirements": [ 223 | "https://cdn.plot.ly/plotly-latest.min.js", 224 | "https://unpkg.com/papaparse@latest/papaparse.min.js", 225 | "https://cdn.jsdelivr.net/npm/vue@2.6.10/dist/vue.min.js", 226 | "https://unpkg.com/spectre.css/dist/spectre.min.css", 227 | "https://unpkg.com/spectre.css/dist/spectre-exp.min.css", 228 | "https://unpkg.com/spectre.css/dist/spectre-icons.min.css" 229 | ], 230 | "dependencies": [], 231 | "defaults": { 232 | "w": 50, 233 | "h": 20, 234 | "fullscreen": true 235 | }, 236 | "cover": "https://oeway.github.io/ImJoy-Plugins/hpa-umap/hpa-umap-cover.gif", 237 | "uri": "imjoy-plugins/HPA-UMAP.imjoy.html" 238 | }, 239 | { 240 | "name": "ImageAnnotator", 241 | "type": "window", 242 | "tags": [], 243 | "ui": "", 244 | "version": "0.5.76", 245 | "api_version": "0.1.5", 246 | "description": "An image annotator made with OpenLayers", 247 | "icon": "extension", 248 | "inputs": null, 249 | "outputs": null, 250 | "env": "", 251 | "requirements": [ 252 | "https://cdn.jsdelivr.net/npm/vue@2.6.10/dist/vue.min.js", 253 | "https://cdn.jsdelivr.net/npm/openlayers@4.6.5/css/ol.css", 254 | "https://cdn.jsdelivr.net/npm/openlayers@4.6.5/dist/ol.min.js", 255 | "https://static.imjoy.io/spectre.css/spectre.min.css", 256 | "https://static.imjoy.io/spectre.css/spectre-exp.min.css", 257 | "https://static.imjoy.io/spectre.css/spectre-icons.min.css", 258 | "https://cdnjs.cloudflare.com/ajax/libs/noUiSlider/13.1.5/nouislider.min.css", 259 | "https://cdnjs.cloudflare.com/ajax/libs/noUiSlider/13.1.5/nouislider.min.js", 260 | "https://cdnjs.cloudflare.com/ajax/libs/pako/1.0.10/pako.min.js", 261 | "https://cdnjs.cloudflare.com/ajax/libs/upng-js/2.1.0/UPNG.min.js", 262 | "https://cdnjs.cloudflare.com/ajax/libs/jszip/3.2.0/jszip.min.js", 263 | "https://use.fontawesome.com/releases/v5.8.2/css/all.css", 264 | "cache:https://use.fontawesome.com/releases/v5.8.2/webfonts/fa-solid-900.woff2", 265 | "cache:https://images.proteinatlas.org/19661/221_G2_1_blue_red_green.jpg", 266 | "cache:https://images.proteinatlas.org/19663/395_C4_2_blue_red_green.jpg" 267 | ], 268 | "dependencies": [ 269 | "oeway/ImJoy-Plugins:ImageSelection", 270 | "oeway/ImJoy-Plugins:Tif File Importer" 271 | ], 272 | "defaults": { 273 | "w": 35, 274 | "h": 25, 275 | "fullscreen": true 276 | }, 277 | "cover": "https://oeway.github.io/ImJoy-Plugins/image-annotator/annotator-cover.gif", 278 | "uri": "imjoy-plugins/ImageAnnotator.imjoy.html" 279 | }, 280 | { 281 | "name": "Noise2Self", 282 | "type": "native-python", 283 | "version": "0.1.9", 284 | "tags": [ 285 | "CPU", 286 | "GPU" 287 | ], 288 | "api_version": "0.1.2", 289 | "description": "A demo plugin for Noise2Self (Blind Denoising by Self-Supervision)", 290 | "ui": "training steps:{id:'steps', type:'number', min:1, placeholder:1000}", 291 | "inputs": null, 292 | "outputs": null, 293 | "flags": [], 294 | "icon": "extension", 295 | "env": { 296 | "CPU": "conda create -n noise2self-cpu python=3.6.2", 297 | "GPU": "conda create -n noise2self-gpu python=3.6.2" 298 | }, 299 | "requirements": { 300 | "CPU": [ 301 | "repo:https://github.com/czbiohub/noise2self.git", 302 | "conda: -c pytorch six pillow numpy scipy pytorch=1.0 torchvision" 303 | ], 304 | "GPU": [ 305 | "repo:https://github.com/czbiohub/noise2self.git", 306 | "conda: -c pytorch six pillow numpy scipy cudatoolkit=9.0 pytorch=1.0 torchvision" 307 | ] 308 | }, 309 | "dependencies": [ 310 | "oeway/ImJoy-Plugins:Im2Im-Dashboard" 311 | ], 312 | "uri": "imjoy-plugins/Noise2Self.imjoy.html" 313 | }, 314 | { 315 | "name": "PyImageJ-Demo", 316 | "type": "native-python", 317 | "version": "0.1.0", 318 | "api_version": "0.1.2", 319 | "description": "This plugin process image with frangiVesselness function in ImageJ", 320 | "tags": [], 321 | "ui": "", 322 | "inputs": null, 323 | "outputs": null, 324 | "flags": [], 325 | "icon": "extension", 326 | "env": "conda create -n imagej2 openjdk=8", 327 | "requirements": [ 328 | "conda: -c conda-forge pyimagej", 329 | "pip: scikit-image pillow" 330 | ], 331 | "dependencies": [], 332 | "uri": "imjoy-plugins/PyImageJ-Demo.imjoy.html" 333 | }, 334 | { 335 | "name": "SchemaIO", 336 | "type": "window", 337 | "tags": [], 338 | "ui": "", 339 | "version": "0.2.6", 340 | "api_version": "0.1.3", 341 | "description": "[TODO: describe this plugin with one sentence.]", 342 | "icon": "extension", 343 | "inputs": null, 344 | "outputs": null, 345 | "env": "", 346 | "requirements": [ 347 | "https://cdnjs.cloudflare.com/ajax/libs/vue/2.5.22/vue.min.js", 348 | "https://unpkg.com/vue-form-generator@2.3.4/dist/vfg.js", 349 | "https://unpkg.com/vue-form-generator@2.3.4/dist/vfg.css", 350 | "https://cdn.jsdelivr.net/npm/vega@5.3.4/build/vega.js", 351 | "https://cdn.jsdelivr.net/npm/vega-lite@3.1.0", 352 | "https://cdn.jsdelivr.net/npm/vega-embed@4.0.0" 353 | ], 354 | "dependencies": [], 355 | "defaults": { 356 | "w": 20, 357 | "h": 10 358 | }, 359 | "runnable": false, 360 | "uri": "imjoy-plugins/SchemaIO.imjoy.html" 361 | }, 362 | { 363 | "name": "Skin-Lesion-Analyzer", 364 | "type": "window", 365 | "tags": [], 366 | "ui": "A pretrained MobileNet image classifier for images of skin lesion built with Tensorflow.js", 367 | "version": "0.1.1", 368 | "api_version": "0.1.2", 369 | "description": "A pretrained MobileNet image classifier built with Tensorflow.js", 370 | "icon": "extension", 371 | "inputs": null, 372 | "outputs": null, 373 | "env": "", 374 | "requirements": [ 375 | "https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@0.14.1", 376 | "https://unpkg.com/spectre.css/dist/spectre.min.css", 377 | "https://unpkg.com/spectre.css/dist/spectre-exp.min.css", 378 | "https://unpkg.com/spectre.css/dist/spectre-icons.min.css" 379 | ], 380 | "dependencies": [], 381 | "defaults": { 382 | "w": 15, 383 | "h": 20 384 | }, 385 | "uri": "imjoy-plugins/Skin-Lesion-Analyzer.imjoy.html" 386 | }, 387 | { 388 | "name": "ANNA-PALM", 389 | "type": "native-python", 390 | "version": "0.1.18", 391 | "api_version": "0.1.5", 392 | "description": "A plugin for training models with ANNA-PALM.", 393 | "tags": [ 394 | "CPU", 395 | "GPU" 396 | ], 397 | "ui": null, 398 | "inputs": null, 399 | "outputs": null, 400 | "icon": null, 401 | "env": { 402 | "CPU": [ 403 | "conda create -n annapalm-cpu python=3.6" 404 | ], 405 | "GPU": [ 406 | "conda create -n annapalm-gpu python=3.6" 407 | ] 408 | }, 409 | "requirements": { 410 | "CPU": [ 411 | "repo:https://github.com/imodpasteur/ANNA-PALM", 412 | "cmd:pip install -r ANNA-PALM/requirements.txt" 413 | ], 414 | "GPU": [ 415 | "repo:https://github.com/imodpasteur/ANNA-PALM", 416 | "pip: Pillow numpy==1.15.0 scipy matplotlib scikit-image tensorflow-gpu==1.8.0 gputil==1.4.0" 417 | ] 418 | }, 419 | "dependencies": [ 420 | "oeway/ImJoy-Plugins:Im2Im-Dashboard", 421 | "oeway/ImJoy-Plugins:launchpad", 422 | "oeway/ImJoy-Plugins:Tabbed-Docs" 423 | ], 424 | "cover": "https://imjoy-team.github.io/example-plugins/annapalm/annapalm-cover.gif", 425 | "uri": "imjoy-plugins/annaPalm.imjoy.html" 426 | }, 427 | { 428 | "name": "maui-demo", 429 | "type": "native-python", 430 | "version": "0.1.0", 431 | "description": "A demo plugin for Multi-omics Autoencoder Integration: Deep learning-based heterogenous data analysis toolkit", 432 | "tags": [], 433 | "ui": "", 434 | "cover": "https://imjoy-team.github.io/example-plugins/maui/maui-training.png", 435 | "inputs": null, 436 | "outputs": null, 437 | "flags": [], 438 | "icon": "extension", 439 | "api_version": "0.1.7", 440 | "env": "conda create -n maui python=3.6", 441 | "permissions": [], 442 | "requirements": [ 443 | "maui-tools", 444 | "scikit-learn", 445 | "pandas", 446 | "matplotlib", 447 | "seaborn==0.9.0", 448 | "lifelines==0.14.6" 449 | ], 450 | "dependencies": [ 451 | "oeway/ImJoy-Plugins:Im2Im-Dashboard" 452 | ], 453 | "uri": "imjoy-plugins/maui-demo.imjoy.html" 454 | } 455 | ], 456 | "collections": [] 457 | } -------------------------------------------------------------------------------- /update_manifest.js: -------------------------------------------------------------------------------- 1 | var https = require('https'); 2 | var fs = require('fs'); 3 | 4 | var childProcess = require('child_process'); 5 | 6 | function runScript(scriptPath, callback) { 7 | // keep track of whether callback has been invoked to prevent multiple invocations 8 | var invoked = false; 9 | 10 | var process = childProcess.fork(scriptPath); 11 | 12 | // listen for errors as they may prevent the exit event from firing 13 | process.on('error', function (err) { 14 | if (invoked) return; 15 | invoked = true; 16 | if(callback) callback(err); 17 | }); 18 | 19 | // execute the callback once the process has finished running 20 | process.on('exit', function (code) { 21 | if (invoked) return; 22 | invoked = true; 23 | var err = code === 0 ? null : new Error('exit code ' + code); 24 | if(callback) callback(err); 25 | }); 26 | 27 | } 28 | 29 | 30 | function downloadScripts(){ 31 | return new Promise((resolve, reject)=>{ 32 | https.get("https://raw.githubusercontent.com/oeway/ImJoy/master/web/src/pluginParser.js", (response)=>{ 33 | if(response.statusCode == 200){ 34 | var file = fs.createWriteStream( './pluginParser.js'); 35 | var code = '' 36 | response.on('data', (d) => { 37 | code = code + d.toString() 38 | }); 39 | response.on('end', () => { 40 | code = code.replace('export function', 'function') 41 | code = code + '\nexports.parseComponent = parseComponent;' 42 | file.write(code); 43 | file.close(()=>{ 44 | https.get("https://raw.githubusercontent.com/oeway/ImJoy/master/web/src/buildManifest.js", (response)=>{ 45 | if(response.statusCode == 200){ 46 | var code2 = '' 47 | response.on('data', (d) => { 48 | code2 = code2 + d.toString() 49 | }); 50 | response.on('end', () => { 51 | var file = fs.createWriteStream( './buildManifest.js'); 52 | file.write(code2); 53 | file.close(resolve); 54 | }) 55 | } 56 | else{ 57 | reject(response.statusCode) 58 | } 59 | 60 | }).on('error', function(err) { // Handle errors 61 | reject(err.message); 62 | }); 63 | }); 64 | }) 65 | } 66 | else{ 67 | reject(response.statusCode) 68 | } 69 | 70 | }).on('error', function(err) { // Handle errors 71 | reject(err.message); 72 | }); 73 | }) 74 | } 75 | 76 | downloadScripts().then(()=>{ 77 | runScript('./buildManifest.js', ()=>{ 78 | fs.unlink('./pluginParser.js', ()=>{}) 79 | fs.unlink('./buildManifest.js', ()=>{}) 80 | }) 81 | 82 | }).catch((err)=>{ 83 | console.error(err) 84 | runScript('./buildManifest.js') 85 | }) 86 | --------------------------------------------------------------------------------