├── .dockerignore ├── .gitignore ├── Dockerfile ├── Makefile ├── README.md ├── TODO.md ├── __init__.py ├── components ├── block_multiverse.py ├── dialogs.py ├── modules.py └── templates.py ├── config ├── fewshots │ └── summaries.json ├── generation_presets │ ├── custom_presets.json │ └── presets.json ├── interfaces │ └── interfaces.json ├── prompts │ ├── prose_to_script.txt │ ├── script_to_prose.txt │ └── summary.txt └── transformers │ └── prose_to_script.json ├── controller.py ├── data └── loom_demo.json ├── examples ├── AI21_formatted_response.json ├── AI21_response.json ├── openAI_formatted_response.json └── openAI_response.json ├── gpt.py ├── main.py ├── model.py ├── requirements-mac.txt ├── requirements.txt ├── static ├── close.gif ├── icons │ ├── ancestry-gray-48.png │ ├── ancestry-lightgray-48.png │ ├── ancestry-white-48.png │ ├── area-lightgrey-48.png │ ├── area-yellow-48.png │ ├── arrow-34-48.png │ ├── brain-lightgray-48.png │ ├── brain-white-48.png │ ├── children-white-48.png │ ├── collapse-left-red-48.png │ ├── collapse-lightgray=48.png │ ├── collapse-red-48.png │ ├── edit-48.png │ ├── edit-blue.png │ ├── edit-gray-48.png │ ├── edit-lightgray-48.png │ ├── edit-white-48.png │ ├── empty_star-48.png │ ├── fork-lightgray-48.png │ ├── minus-red-48.png │ ├── plus-green-48.png │ ├── plus_left-lightgray-48.png │ ├── program_icons │ │ ├── add_link-lightgray.png │ │ ├── add_row-lightgray.png │ │ ├── ancestry-black.png │ │ ├── brain-blue.png │ │ ├── broken_link-lightgray.png │ │ ├── brush-black.png │ │ ├── brush-white.png │ │ ├── chart-blue.png │ │ ├── children-green.png │ │ ├── close_black.png │ │ ├── collapse-black.png │ │ ├── collapse_left-black.png │ │ ├── delete-red.png │ │ ├── down-lightgray.png │ │ ├── empty.png │ │ ├── empty_star-gray.png │ │ ├── eraser-black.png │ │ ├── eraser-white.png │ │ ├── eyedropper-black.png │ │ ├── eyedropper-white.png │ │ ├── invisible-lightgray.png │ │ ├── invisible_purple.png │ │ ├── layers-black.png │ │ ├── left-white.png │ │ ├── leftarrow-lightgray.png │ │ ├── memory-blue.png │ │ ├── minus-black.png │ │ ├── minus-lightgray.png │ │ ├── pencil-black.png │ │ ├── plus-blue.png │ │ ├── plus-lightgray.png │ │ ├── plus_left-blue.png │ │ ├── right-white.png │ │ ├── rightarrow-lightgray.png │ │ ├── save-white.png │ │ ├── settings-lightgray.png │ │ ├── square-black.png │ │ ├── stats-lightgray.png │ │ ├── subtree-green.png │ │ ├── trash-red.png │ │ ├── tree-lightblue.png │ │ ├── undo-white.png │ │ ├── up-lightgray.png │ │ ├── visible-lightgray.png │ │ ├── visible-purple.png │ │ ├── x-black.png │ │ ├── x-gray.png │ │ ├── x-lightgray.png │ │ └── x-white.png │ ├── read-yellow-48.png │ ├── sd-white-48.png │ ├── star-16.png │ ├── star-48.png │ ├── star_small.png │ ├── stats-white-48.png │ ├── stats-yellow-48.png │ ├── subtree-white-48.png │ ├── tag_icons │ │ ├── archive-yellow.png │ │ ├── arrow-green.png │ │ ├── arrow-white.png │ │ ├── book-blue.png │ │ ├── book-lightgray.png │ │ ├── book-white.png │ │ ├── bookmark-black.png │ │ ├── circle-black.png │ │ ├── circle-blue.png │ │ ├── circle-red.png │ │ ├── circle-white.png │ │ ├── decision-black.png │ │ ├── decision-red.png │ │ ├── delete-red-48.png │ │ ├── edit-blue.png │ │ ├── eye-black.png │ │ ├── heart-pink.png │ │ ├── lightbulb-white.png │ │ ├── link-black.png │ │ ├── marker-black.png │ │ ├── media-white.png │ │ ├── note-black.png │ │ ├── note-blue.png │ │ ├── note-yellow.png │ │ ├── pin-red.png │ │ ├── question-red.png │ │ ├── quotes-black.png │ │ ├── star-black.png │ │ ├── star-blue.png │ │ ├── star-green.png │ │ └── star-red.png │ ├── white-brain-48.png │ ├── white-line-48.png │ └── white-read-48.png ├── media │ ├── black.png │ └── blank.png ├── readme │ ├── block-multiverse.png │ ├── metadata-light.png │ ├── metadata.png │ ├── read-view-light.png │ ├── read-view.png │ ├── tree-view-light.png │ └── tree-view.png ├── star.png └── zoneplate.png ├── util ├── canvas_util.py ├── custom_tks.py ├── file_utils.py ├── frames_util.py ├── gpt_util.py ├── keybindings.py ├── multiverse_util.py ├── node_conditions.py ├── react.py ├── textbox_util.py ├── tokenizer.py ├── util.py ├── util_tk.py └── util_tree.py └── view ├── __init__.py ├── colors.py ├── display.py ├── icons.py ├── panes.py ├── styles.py └── tree_vis.py /.dockerignore: -------------------------------------------------------------------------------- 1 | # Standard 2 | __pycache__ 3 | *.pyc 4 | *.pyo 5 | *.pyd 6 | .Python 7 | env 8 | pip-log.txt 9 | pip-delete-this-directory.txt 10 | .tox 11 | .coverage 12 | .coverage.* 13 | .cache 14 | nosetests.xml 15 | coverage.xml 16 | *.cover 17 | *.log 18 | .git 19 | .mypy_cache 20 | .pytest_cache 21 | .hypothesis 22 | 23 | # Custom 24 | data/ 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | data/backups/ 2 | data/* 3 | !data/loom_demo.json 4 | static/media/* 5 | !static/media/black.png 6 | !static/media/blank.png 7 | config/custom_presets.json 8 | 9 | .DS_Store 10 | 11 | ### Python template 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/ 14 | *.py[cod] 15 | *$py.class 16 | 17 | # C extensions 18 | *.so 19 | 20 | # Distribution / packaging 21 | .Python 22 | env/ 23 | build/ 24 | develop-eggs/ 25 | dist/ 26 | downloads/ 27 | eggs/ 28 | .eggs/ 29 | lib/ 30 | lib64/ 31 | parts/ 32 | sdist/ 33 | var/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit examples / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *,cover 57 | .hypothesis/ 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # IPython Notebook 81 | .ipynb_checkpoints 82 | 83 | # pyenv 84 | .python-version 85 | 86 | # celery beat schedule file 87 | celerybeat-schedule 88 | 89 | # dotenv 90 | .env 91 | 92 | # virtualenv 93 | venv/ 94 | ENV/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | 99 | # Rope project settings 100 | .ropeproject 101 | 102 | 103 | ### VirtualEnv template 104 | # Virtualenv 105 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 106 | [Bb]in 107 | [Ii]nclude 108 | [Ll]ib 109 | [Ll]ib64 110 | [Ll]ocal 111 | [Ss]cripts 112 | pyvenv.cfg 113 | .venv 114 | pip-selfcheck.json 115 | 116 | 117 | ### JetBrains template 118 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 119 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 120 | 121 | # User-specific stuff: 122 | .idea/workspace.xml 123 | .idea/tasks.xml 124 | .idea/dictionaries 125 | .idea/vcs.xml 126 | .idea/jsLibraryMappings.xml 127 | 128 | # Sensitive or high-churn files: 129 | .idea/dataSources.ids 130 | .idea/dataSources.xml 131 | .idea/dataSources.local.xml 132 | .idea/sqlDataSources.xml 133 | .idea/dynamic.xml 134 | .idea/uiDesigner.xml 135 | 136 | # Gradle: 137 | .idea/gradle.xml 138 | .idea/libraries 139 | 140 | # Mongo Explorer plugin: 141 | .idea/mongoSettings.xml 142 | 143 | .idea/ 144 | 145 | ## File-based project format: 146 | *.iws 147 | 148 | ## Plugin-specific files: 149 | 150 | # IntelliJ 151 | /out/ 152 | 153 | # mpeltonen/sbt-idea plugin 154 | .idea_modules/ 155 | 156 | # JIRA plugin 157 | atlassian-ide-plugin.xml 158 | 159 | # Crashlytics plugin (for Android Studio and IntelliJ) 160 | com_crashlytics_export_strings.xml 161 | crashlytics.properties 162 | crashlytics-build.properties 163 | fabric.properties 164 | 165 | .tags 166 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-slim 2 | 3 | RUN apt-get update && apt-get install -y python3-tk && pip install --upgrade pip 4 | 5 | WORKDIR /app 6 | 7 | COPY . ./ 8 | 9 | RUN pip install -r requirements.txt 10 | 11 | CMD ["python", "main.py"] 12 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Add your keys here 2 | OPENAI_API_KEY := "" 3 | GOOSEAI_API_KEY := "" 4 | AI21_API_KEY := "" 5 | 6 | 7 | IMAGE := loom 8 | 9 | SHELL = /bin/sh 10 | 11 | CURRENT_UID := $(shell id -u) 12 | CURRENT_GID := $(shell id -g) 13 | 14 | export CURRENT_UID 15 | export CURRENT_GID 16 | 17 | install: 18 | echo "Make sure you are using python version 3.9.13 or over" 19 | sudo apt install python-tk 20 | 21 | build: 22 | docker build -t $(IMAGE) . 23 | 24 | run: 25 | docker run -it --rm \ 26 | -v $(PWD)/data:/app/data \ 27 | -v $(PWD)/examples:/app/examples \ 28 | -v /tmp/.X11-unix:/tmp/.X11-unix:rw \ 29 | -e DISPLAY=$(DISPLAY) \ 30 | -e OPENAI_API_KEY=$(OPENAI_API_KEY) \ 31 | -e GOOSEAI_API_KEY=$(GOOSEAI_API_KEY) \ 32 | -e AI21_API_KEY=$(AI21_API_KEY) \ 33 | -u=$(CURRENT_UID):$(CURRENT_GID) \ 34 | $(IMAGE) 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | This is an experimental tree-based writing interface for GPT-3. The code is actively being developed and thus 3 | unstable and poorly documented. 4 | 5 | # Features 6 | 7 | * Read mode 8 | * Linear story view 9 | * Tree nav bar 10 | * Edit mode 11 | 12 | 13 | * Tree view 14 | * Explore tree visually with mouse 15 | * Expand and collapse nodes 16 | * Change tree topology 17 | * Edit nodes in place 18 | 19 | 20 | * Navigation 21 | * Hotkeys 22 | * Bookmarks 23 | * Chapters 24 | * 'Visited' state 25 | 26 | 27 | * Generation 28 | * Generate N children with GPT-3 29 | * Modify generation settings 30 | * Change hidden memory on a node-by-node basis 31 | 32 | 33 | * File I/O 34 | * Open/save trees as JSON files 35 | * Work with trees in multiple tabs 36 | * Combine trees 37 | 38 | 39 | # Demo 40 | 41 | ![](static/readme/read-view.png) 42 | ![](static/readme/read-view-light.png) 43 | ![](static/readme/tree-view.png) 44 | ![](static/readme/tree-view-light.png) 45 | ![](static/readme/metadata-light.png) 46 | 47 | ooo what features! wow so cool 48 | 49 | # Block multiverse mode 50 | 51 | [Read this](https://generative.ink/meta/block-multiverse/) for a conceptual explanation of block multiverse interface and demo video 52 | 53 | ### How to use in loom 54 | 55 | 1. Click `Wavefunction` button on bottom bar. This will open the block multiverse interface in the right sidebar (drag to resize). 56 | 2. Write initial prompt in the main textbox. 57 | 3. [Optional] Write ground truth continuation in the gray entry box at the bottom of the block multiverse interface. Blocks in ground truth trajectory will be colored black. 58 | 4. Set model and [params](https://generative.ink/meta/block-multiverse/#generation-parameters) in top bar. 59 | 5. Click `Propagate` to propagate plot the block multiverse 60 | 6. Click on any of the blocks to zoom ("[renormalize](https://generative.ink/meta/block-multiverse/#renormalization)") to that block 61 | 7. Click `Propagate` again to plot future block multiverse starting from a renormalized frame 62 | 8. Click `Reset zoom` to reset zoom level to initial position 63 | 9. Click `Clear` to clear the block multiverse plot. Do this before generating a new block multiverse. 64 | 65 | ![](static/readme/block-multiverse.png) 66 | 67 | # Hotkeys 68 | 69 | *Alt hotkeys correspond to Command on Mac* 70 | 71 | ### File 72 | 73 | Open: `o`, `Control-o` 74 | 75 | Import JSON as subtree: `Control-Shift-O` 76 | 77 | Save: `s`, `Control-s` 78 | 79 | 80 | ### Dialogs 81 | 82 | Change chapter: `Control-y` 83 | 84 | Preferences: `Control-p` 85 | 86 | Generation Settings: `Control-Shift-P` 87 | 88 | Visualization Settings: `Control-u` 89 | 90 | Multimedia dialog: `u` 91 | 92 | Tree Info: `Control-i` 93 | 94 | Node Metadata: `Control+Shift+N` 95 | 96 | Run Code: `Control+Shift+B` 97 | 98 | 99 | ### Mode / display 100 | 101 | Toggle edit / save edits: `e`, `Control-e` 102 | 103 | Toggle story textbox editable: `Control-Shift-e` 104 | 105 | Toggle visualize: `j`, `Control-j` 106 | 107 | Toggle bottom pane: `Tab` 108 | 109 | Toggle side pane: `Alt-p` 110 | 111 | Toggle show children: `Alt-c` 112 | 113 | Hoist: `Alt-h` 114 | 115 | Unhoist: `Alt-Shift-h` 116 | 117 | 118 | ### Navigate 119 | 120 | Click to go to node: `Control-shift-click` 121 | 122 | Next: `period`, `Return`, `Control-period` 123 | 124 | Prev: `comma`, `Control-comma` 125 | 126 | Go to child: `Right`, `Control-Right` 127 | 128 | Go to next sibling: `Down`, `Control-Down` 129 | 130 | Go to parent: `Left`, `Control-Left` 131 | 132 | Go to previous Sibling: `Up`, `Control-Up` 133 | 134 | Return to root: `r`, `Control-r` 135 | 136 | Walk: `w`, `Control-w` 137 | 138 | Go to checkpoint: `t` 139 | 140 | Save checkpoint: `Control-t` 141 | 142 | Go to next bookmark: `d`, `Control-d` 143 | 144 | Go to prev bookmark: `a`, `Control-a` 145 | 146 | Search ancestry: `Control-f` 147 | 148 | Search tree: `Control-shift-f` 149 | 150 | Click to split node: `Control-alt-click` 151 | 152 | Goto node by id: `Control-shift-g` 153 | 154 | 155 | ### Organization 156 | 157 | Toggle bookmark: `b`, `Control-b` 158 | 159 | Toggle archive node: `!` 160 | 161 | 162 | 163 | ### Generation and memory 164 | 165 | Generate: `g`, `Control-g` 166 | 167 | Inline generate: `Alt-i` 168 | 169 | Add memory: `Control-m` 170 | 171 | View current AI memory: `Control-Shift-m` 172 | 173 | View node memory: `Alt-m` 174 | 175 | 176 | ### Edit topology 177 | 178 | Delete: `BackSpace`, `Control-BackSpace` 179 | 180 | Merge with Parent: `Shift-Left` 181 | 182 | Merge with children: `Shift-Right` 183 | 184 | Move node up: `Shift-Up` 185 | 186 | Move node down: `Shift-Down` 187 | 188 | Change parent: `Shift-P` 189 | 190 | New root child: `Control-Shift-h` 191 | 192 | New Child: `h`, `Control-h`, `Alt-Right` 193 | 194 | New Parent: `Alt-Left` 195 | 196 | New Sibling: `Alt-Down` 197 | 198 | 199 | 200 | ### Edit text 201 | 202 | Toggle edit / save edits: `Control-e` 203 | 204 | Save edits as new sibling: `Alt-e` 205 | 206 | Click to edit history: `Control-click` 207 | 208 | Click to select token: `Alt-click` 209 | 210 | Next counterfactual token: `Alt-period` 211 | 212 | Previous counterfactual token: `Alt-comma` 213 | 214 | Apply counterfactual changes: `Alt-return` 215 | 216 | Enter text: `Control-bar` 217 | 218 | Escape textbox: `Escape` 219 | 220 | Prepend newline: `n`, `Control-n` 221 | 222 | Prepend space: `Control-Space` 223 | 224 | 225 | 226 | ### Collapse / expand 227 | 228 | Collapse all except subtree: `Control-colon` 229 | 230 | Collapse node: `Control-question` 231 | 232 | Collapse subtree: `Control-minus` 233 | 234 | Expand children: `Control-quotedbl` 235 | 236 | Expand subtree: `Control-plus` 237 | 238 | 239 | ### View 240 | 241 | Center view: `l`, `Control-l` 242 | 243 | Reset zoom: `Control-0` 244 | 245 | 246 | 247 | # Instructions 248 | 249 | ## Linux 250 | 251 | 0. Make sure you have tkinter installed 252 | 253 | ```sudo apt-get install python3-tk``` 254 | 1. Setup your python env (should be >= 3.9.13) 255 | 256 | ```python3 -m venv env``` 257 | ```source env/bin/activate``` 258 | 1. Install requirements 259 | 260 | ```pip install -r requirements.txt``` 261 | 2. [Optional] Set environmental variables for `OPENAI_API_KEY`, `GOOSEAI_API_KEY`, `AI21_API_KEY` (you can also use the settings options) 262 | 263 | ```export OPENAI_API_KEY={your api key}``` 264 | 3. Run main.py 265 | 4. Load a json tree 266 | 5. Read :) 267 | 268 | ## Mac 269 | 1. `conda create -n pyloom python=3.10` 270 | 2. `conda activate pyloom` 271 | 3. `pip install -r requirements-mac.txt` 272 | 4. set the OPENAI_API_KEY env variable 273 | 5. `python main.py` 274 | 275 | ## Docker 276 | 277 | (Only tested on Linux.) 278 | 279 | 0. [Optional] Edit the Makefile with your API keys (you can also use the settings options) 280 | 1. Run the make targets 281 | 282 | ```make build``` 283 | ```make run``` 284 | 2. Load a json tree 285 | 3. Read :) 286 | 287 | # Local Inference with llama-cpp-python 288 | [llama.cpp](https://github.com/ggerganov/llama.cpp) lets you run models locally, and is especially useful for running models on Mac. [https://github.com/abetlen/llama-cpp-python] provides nice installation and a convenient API. 289 | 290 | ## Setup 291 | 1. `conda create -n llama-cpp-local python=3.10; conda activate llama-cpp-local` 292 | 2. Set your preferred backend before installing `llama-cpp-python`, as per [these instructions](https://github.com/abetlen/llama-cpp-python?tab=readme-ov-file#supported-backends). For instance, to infer on MPS: `CMAKE_ARGS="-DLLAMA_METAL=on"` 293 | 3. `pip install 'llama-cpp-python[server]'` 294 | 4. `pip install huggingface-hub` 295 | 5. Now you can run the server with whatever .gguf model you desire from Huggingface, i.e: `python3 -m llama_cpp.server --hf_model_repo_id NousResearch/Meta-Llama-3-8B-GGUF --model 'Meta-Llama-3-8B-Q4_5_M.gguf' --port 8009` 296 | 297 | ## Inference 298 | 1. `conda activate llama-cpp-local` and start your llama-cpp-python server. 299 | 2. In a new terminal window, activate your `pyloom` environment and run `main.py` 300 | 2. Enter configurations for your local model in Settings > Model config > Add model. By default, the llama-cpp-port-8009 model uses the following settings: 301 | ``` 302 | { 303 | 'model': 'Meta-Llama-3-8B-Q4_5_M', 304 | 'type': 'llama-cpp', 305 | 'api_base': 'http://localhost:8009/v1', 306 | }, 307 | ``` -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/__init__.py -------------------------------------------------------------------------------- /components/block_multiverse.py: -------------------------------------------------------------------------------- 1 | import math 2 | import tkinter 3 | import uuid 4 | 5 | from tkinter import ttk 6 | from decimal import * 7 | from util.custom_tks import TextAware 8 | from util.gpt_util import logprobs_to_probs 9 | from util.tokenizer import tokenize, token_to_word 10 | from PIL import Image 11 | import PIL.ImageGrab as ImageGrab 12 | 13 | rainbow_colors = ['#9400D3', '#4B0082', '#0000FF', '#00FF00', '#FFFF00', '#FF7F00', '#FF0000'] 14 | 15 | default_y_scale = 1 16 | 17 | class BlockMultiverse: 18 | def __init__(self, parent_frame): 19 | self.parent_frame = parent_frame 20 | 21 | self.frame = None 22 | self.multiverse_frame = None 23 | self.bottom_input_frame = None 24 | self.past_box = None 25 | self.canvas = None 26 | self.wavefunction = None 27 | self.selected_id = None 28 | self.window_height = 450 29 | self.node_info = {} 30 | self.build_canvas() 31 | self.build_past_box() 32 | self.window_offset = (0, 0) 33 | self.y_scale = default_y_scale 34 | self.x_scale = 1 35 | self.bind_mouse_controls() 36 | self.prompt = None 37 | 38 | def clear_multiverse(self): 39 | self.wavefunction = None 40 | self.selected_id = None 41 | self.canvas.delete("all") 42 | self.node_info = {} 43 | self.set_pastbox_text('', '') 44 | self.prompt = None 45 | self.reset_view() 46 | 47 | def build_canvas(self): 48 | self.frame = ttk.Frame(self.parent_frame) 49 | self.multiverse_frame = ttk.Frame(self.frame) 50 | self.multiverse_frame.pack(expand=True, fill=tkinter.BOTH) 51 | self.canvas = tkinter.Canvas(self.multiverse_frame, bg="#808080") 52 | 53 | # hbar = tkinter.Scrollbar(self.multiverse_frame, orient=tkinter.HORIZONTAL) 54 | # hbar.pack(side=tkinter.BOTTOM, fill=tkinter.X) 55 | # hbar.config(command=self.canvas.xview) 56 | 57 | # vbar = tkinter.Scrollbar(self.multiverse_frame, orient=tkinter.VERTICAL) 58 | # vbar.pack(side=tkinter.RIGHT, fill=tkinter.Y) 59 | # vbar.config(command=self.canvas.yview) 60 | 61 | # self.canvas.config( 62 | # xscrollcommand=hbar.set, 63 | # yscrollcommand=vbar.set 64 | # ) 65 | 66 | self.canvas.pack(side=tkinter.LEFT, expand=True, fill=tkinter.BOTH) 67 | #self.multiverse_frame.update_idletasks() 68 | #self.window_height = self.multiverse_frame.winfo_reqheight() * 2 69 | 70 | def build_past_box(self): 71 | self.bottom_input_frame = ttk.Frame(self.frame) 72 | self.bottom_input_frame.pack(side="bottom", fill="x") 73 | self.past_box = TextAware(self.bottom_input_frame, bd=3, height=3) 74 | self.past_box.pack(expand=True, fill='x') 75 | self.past_box.configure( 76 | foreground='white', 77 | background='black', 78 | wrap="word", 79 | ) 80 | self.past_box.tag_configure("prompt", foreground="gray") 81 | self.past_box.configure(state="disabled") 82 | 83 | def set_pastbox_text(self, prompt_text='', completion_text=''): 84 | if self.past_box: 85 | self.past_box.configure(state="normal") 86 | self.past_box.delete('1.0', "end") 87 | self.past_box.insert('1.0', prompt_text, "prompt") 88 | self.past_box.insert("end-1c", completion_text) 89 | self.past_box.configure(state="disabled") 90 | self.past_box.see("end") 91 | 92 | def bind_mouse_controls(self): 93 | # FIXME 94 | # def _on_mousewheel(event): 95 | # self.canvas.yview_scroll(int(-1 * (event.delta / 120)), "units") 96 | # self.frame.bind_all("", _on_mousewheel) 97 | # self.canvas.bind_all("", _on_mousewheel) 98 | 99 | # This is what enables scrolling with the mouse: 100 | def scroll_start(event): 101 | self.canvas.scan_mark(event.x, event.y) 102 | 103 | def scroll_move(event): 104 | self.canvas.scan_dragto(event.x, event.y, gain=1) 105 | 106 | self.canvas.bind("", scroll_start) 107 | self.canvas.bind("", scroll_move) 108 | 109 | # # windows zoom 110 | # def zoomer(event): 111 | # if event.delta > 0: 112 | # zoom_in(event) 113 | # self.scroll_ratio *= 1.1 114 | # self.canvas.scale("all", event.x, event.y, 1.1, 1.1) 115 | # elif event.delta < 0: 116 | # zoom_out(event) 117 | # self.scroll_ratio *= 0.9 118 | # self.canvas.scale("all", event.x, event.y, 0.9, 0.9) 119 | # self.canvas.configure(scrollregion=self.canvas.bbox("all")) 120 | # #self.fix_text_zoom() 121 | # #self.fix_image_zoom() 122 | 123 | # # linux zoom 124 | def zoom_in(event): 125 | self.y_scale *= 1.1 126 | self.x_scale *= 1.1 127 | self.canvas.scale("all", event.x, event.y, 1, 1.1) 128 | self.canvas.configure(scrollregion=self.canvas.bbox("all")) 129 | self.fix_text_zoom() 130 | #self.fix_image_zoom() 131 | 132 | def zoom_out(event): 133 | self.y_scale *= 0.9 134 | self.x_scale *= 0.9 135 | self.canvas.scale("all", event.x, event.y, 1, 0.9) 136 | self.canvas.configure(scrollregion=self.canvas.bbox("all")) 137 | # self.showtext = event.text > 0.8 138 | self.fix_text_zoom() 139 | #self.fix_image_zoom() 140 | 141 | # Mac and then linux scrolls 142 | #self.canvas.bind("", zoomer) 143 | self.canvas.bind("", zoom_in) 144 | self.canvas.bind("", zoom_out) 145 | 146 | def get_text_size(self, original_size=10): 147 | text_size = max(1, math.floor(original_size * Decimal(self.y_scale))) 148 | return min(text_size, 12) 149 | 150 | def fix_text_zoom(self): 151 | # size = self.get_text_size() 152 | # for item in self.canvas.find_withtag("text"): 153 | # self.canvas.itemconfig(item, font=('Arial', size)) 154 | for key, info in self.node_info.items(): 155 | size = self.get_text_size(info['font_size']) 156 | self.canvas.itemconfig(info['text_widget'], font=('Arial', size)) 157 | 158 | def set_y_window(self, x0, y0, height): 159 | old_y_scale = self.y_scale 160 | self.reset_view() 161 | self.window_offset = (x0, y0) 162 | self.canvas.move("all", -x0, -y0) 163 | self.y_scale = Decimal(self.window_height) / height 164 | magnification = Decimal(self.y_scale) / old_y_scale 165 | 166 | print('\nmagnification: *', "{:.2f}".format(magnification)) 167 | print('total magnification: ', "{:.2f}".format(self.y_scale)) 168 | print('+{:.2f} bits'.format(math.log(magnification,2))) 169 | print('total bits: ', "{:.2f}".format(math.log(self.y_scale, 2))) 170 | 171 | self.canvas.scale("all", 0, 0, 1, self.y_scale) 172 | self.fix_text_zoom() 173 | 174 | def reset_view(self): 175 | self.canvas.scale("all", 0, 0, 1, default_y_scale / self.y_scale) 176 | self.y_scale = default_y_scale 177 | self.canvas.move("all", self.window_offset[0], self.window_offset[1]) 178 | self.window_offset = (0, 0) 179 | self.fix_text_zoom() 180 | if self.prompt: 181 | self.set_pastbox_text(prompt_text=self.prompt) 182 | 183 | def active_wavefunction(self): 184 | return self.wavefunction and self.selected_id 185 | 186 | def active_info(self): 187 | return self.node_info[self.selected_id] 188 | 189 | def node_clicked(self, x0, y0, height, node_id): 190 | self.selected_id = node_id 191 | #print(self.node_info[node_id]['token']) 192 | self.set_y_window(x0, y0, height) 193 | prefix_text = self.node_info[node_id]['prefix'] 194 | self.set_pastbox_text(prompt_text=self.prompt if self.prompt else '', 195 | completion_text=prefix_text) 196 | 197 | def draw_multiverse(self, multiverse, ground_truth='', block_width=150, start_position=(0, 0), color_index=0, 198 | prefix='', show_text=True, show_probabilities=False, prompt=''): 199 | if not self.prompt: 200 | self.prompt = prompt 201 | self.set_pastbox_text(prompt_text=self.prompt) 202 | 203 | if not self.wavefunction: 204 | self.wavefunction = multiverse 205 | else: 206 | if self.selected_id: 207 | #self.node_info[self.selected_id]['node']['children'] = multiverse 208 | prefix = self.node_info[self.selected_id]['prefix'] 209 | else: 210 | return 211 | if start_position == (0, 0): 212 | self.draw_block(0, 0, self.prompt[-20:], prefix, 1, Decimal(self.window_height), block_width, True, 213 | show_text, 0) 214 | self.propagate(multiverse, ground_truth, prefix, block_width, start_position, color_index, show_text, 215 | y_offset=0, depth=1) 216 | 217 | # TODO should work purely in absolute coordinates 218 | def propagate(self, multiverse, ground_truth, prefix, block_width, start_position, color_index, show_text, 219 | y_offset, depth): 220 | x = start_position[0] + (depth * block_width) 221 | 222 | rainbow_index = color_index % len(rainbow_colors) 223 | for token, node in multiverse.items(): 224 | y = start_position[1] + y_offset 225 | height = Decimal(self.window_height) * Decimal(node['unnormalized_prob']) 226 | is_ground_truth = (token == ground_truth[0]) if ground_truth else False 227 | 228 | self.draw_block(x, y, token, prefix, node['unnormalized_prob'], height, block_width, is_ground_truth, 229 | show_text, rainbow_index) 230 | 231 | self.propagate(node['children'], ground_truth=ground_truth[1:] if is_ground_truth else None, 232 | prefix=prefix + token, 233 | block_width=block_width, 234 | start_position=start_position, 235 | color_index=rainbow_index, 236 | show_text=show_text, 237 | y_offset=y_offset, 238 | depth=depth + 1, 239 | ) 240 | y_offset += height 241 | rainbow_index = (rainbow_index + 1) % len(rainbow_colors) 242 | 243 | def draw_block(self, x, y, token, prompt, probability, height, block_width, is_ground_truth, show_text, rainbow_index): 244 | color = 'black' if is_ground_truth else rainbow_colors[rainbow_index] 245 | 246 | identifier = str(uuid.uuid1()) 247 | self.draw_rectangle_absolute(x, y, x + block_width, y + height, fill=color, activefill='gray', activeoutline='white', 248 | outline=color, tags=[identifier]) 249 | 250 | self.canvas.tag_bind(f'{identifier}', "", 251 | lambda event, _id=identifier, _x=x, _y=y, _height=height: self.node_clicked(_x, _y, 252 | _height, 253 | _id)) 254 | 255 | self.node_info[identifier] = { 256 | 'id': identifier, 257 | 'prefix': prompt + token, 258 | 'token': token, 259 | 'amplitude': probability, 260 | 'x': x, 261 | 'y': y, 262 | } 263 | 264 | print(token, probability) 265 | 266 | if show_text: 267 | text_color = 'blue' if color == '#FFFF00' else 'white' # if is_ground_truth else 'black' 268 | font_size = min(12, int(math.ceil(height * self.y_scale / 2))) 269 | text = '\\n' if token == '\n' else token 270 | self.node_info[identifier]['font_size'] = Decimal(font_size) / Decimal(self.y_scale) 271 | self.node_info[identifier]['text_widget'] = self.draw_text_absolute(x + block_width / 2, y + height / 2, 272 | text=text, 273 | font=('Arial', font_size), 274 | tags=['text', f'text-{identifier}'], 275 | fill=text_color) 276 | return identifier 277 | 278 | # def propagate_realtime(self, prompt, ground_truth='', block_width=150, parent_position=(0,0), max_depth=3, 279 | # unnormalized_amplitude=1, threshold=0.01, rainbow_index=0, engine='ada'): 280 | # if ground_truth and isinstance(ground_truth, str): 281 | # ground_truth = tokenize(ground_truth) 282 | # ground_truth = [token_to_word(token).replace('Ġ', ' ') for token in ground_truth] 283 | # self.propagate_and_draw(prompt, ground_truth, block_width, parent_position, max_depth, unnormalized_amplitude, 284 | # threshold, rainbow_index, engine) 285 | # 286 | # def propagate_and_draw(self, prompt, ground_truth, block_width, parent_position, max_depth, 287 | # unnormalized_amplitude, threshold, rainbow_index, engine): 288 | # if max_depth == 0: 289 | # return 290 | # response = openai.Completion.create(prompt=prompt, 291 | # max_tokens=1, 292 | # n=1, 293 | # temperature=0, 294 | # logprobs=100, 295 | # engine=engine) 296 | # logprobs = response.choices[0]["logprobs"]["top_logprobs"][0] 297 | # probs = {k: logprobs_to_probs(v) * unnormalized_amplitude for k, v in sorted(logprobs.items(), 298 | # key=lambda item: item[1], 299 | # reverse=True)} 300 | # 301 | # ground_truth_token = ground_truth[0] if ground_truth else 'NO GROUND TRUTH' 302 | # x = parent_position[0] + block_width 303 | # y_offset = 0 304 | # for token, probability in probs.items(): 305 | # y = parent_position[1] + y_offset 306 | # height = self.window_height * probability 307 | # is_ground_truth = (token == ground_truth_token) if ground_truth else False 308 | # self.draw_block(x, y, token, prompt, probability, height, block_width, is_ground_truth, True, rainbow_index) 309 | # 310 | # if token == ground_truth_token: 311 | # self.propagate_and_draw(prompt + token, ground_truth[1:], block_width, (x, y), max_depth-1, probability, 312 | # threshold, rainbow_index, engine) 313 | # elif probability > threshold: 314 | # self.propagate_and_draw(prompt + token, '', block_width, (x, y), max_depth - 1, 315 | # probability, threshold, rainbow_index, engine) 316 | # else: 317 | # break 318 | # y_offset += height 319 | # rainbow_index = (rainbow_index + 1) % len(rainbow_colors) 320 | 321 | 322 | def map_to_scaled_coordinates(self, x, y): 323 | x = x - self.window_offset[0] 324 | y = y - self.window_offset[1] 325 | y = y * self.y_scale 326 | return x, y 327 | 328 | def map_to_absolute_coordinates(self, x, y): 329 | x = x + self.window_offset[0] 330 | y = y + self.window_offset[1] 331 | y = Decimal(y) / Decimal(self.y_scale) 332 | return x, y 333 | 334 | # draw a rectangle with size and coordinates regardless of current zoom / pan state 335 | def draw_rectangle_absolute(self, x0, y0, x1, y1, **kwargs): 336 | rel_x0, rel_y0 = self.map_to_scaled_coordinates(x0, y0) 337 | rel_x1, rel_y1 = self.map_to_scaled_coordinates(x1, y1) 338 | return self.canvas.create_rectangle((rel_x0, rel_y0, rel_x1, rel_y1), **kwargs) 339 | 340 | def draw_text_absolute(self, x, y, **kwargs): 341 | rel_x, rel_y = self.map_to_scaled_coordinates(x, y) 342 | #rel_x = int(round(rel_x)) 343 | #rel_y = int(round(rel_y)) 344 | return self.canvas.create_text(rel_x, rel_y, **kwargs) 345 | 346 | def save_as_png(self, filename): 347 | # grabcanvas=ImageGrab.grab(bbox=self.canvas).save("test.png") 348 | # ttk.grabcanvas.save("test.png") 349 | 350 | self.canvas.postscript(file = filename + '.eps') 351 | # use PIL to convert to PNG 352 | img = Image.open(filename + '.eps') 353 | img.save(filename + '.png', 'png', quality=100) 354 | -------------------------------------------------------------------------------- /config/fewshots/summaries.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "passage": "However, it forgot to provide a supporting force after translating the briefcase's position, and Prime Intellect was too busy dotting the i's and crossing the t's on its calculations to notice, through the video camera, that it was quietly accelerating under the influence of gravity. A moment later it crashed back onto the table, having free-fallen from an altitude of about half a meter.\n\"What the...\" Lawrence began, and he swivelled around in time to see his briefcase blink upward a second time and this time float serenely above the table. It seemed to be surrounded by a thin, barely visible haze of blue light. There had been a brighter flash of this same blue light when the briefcase jumped upward.\nFinding its audio voice again, Prime Intellect said aloud, \"I seem to have mastered a certain amount of control over physical reality.\"\nLawrence just stared at the briefcase, unable to move, unable to speak, for an undefinable period of time. Finally Mitchell burst in. He was full of red-faced outrage, ready to take both Lawrence and his computer apart, until he too saw the briefcase. His jaw dropped. He looked first at Lawrence, then at Prime Intellect's monitor, then back at the briefcase, as if trying to reconcile the three with each others' existence.\nApplying carefully measured forces, Prime Intellect released the case's latches and rotated it as it popped open; then with another flash of blue light, it extracted Lawrence's papers and translated them into a neat stack on the table. Then the Correlation Effect papers vanished from Lawrence's desk in another blue flash, reappearing inside the briefcase which slowly closed.", 4 | "summary": "Prime Intellect experimented with levitating objects, startling Lawrence. Prime Intellect explained that it has mastered a certain amount of control over physical reality. Then Mitchell burst in angrily as Prime Intellect was experimenting." 5 | }, 6 | { 7 | "passage": "Harry added their current address, then folded up the letter and put it in an envelope, which he addressed to Hogwarts. Further consideration led him to obtain a candle and drip wax onto the flap of the envelope, into which, using a penknife's tip, he impressed the initials H.J.P.E.V. If he was going to descend into this madness, he was going to do it with style.\nThen he opened his door and went back downstairs. His father was sitting in the living-room and reading a book of higher maths to show how smart he was; and his mother was in the kitchen preparing one of his father's favourite meals to show how loving she was. It didn't look like they were talking to one another at all. As scary as arguments could be, not arguing was somehow much worse.", 8 | "summary": "Harry addresses the envelope to Hogwarts and makes a fancy seal. He goes downstairs, where his parents aren't talking to each other." 9 | } 10 | ] -------------------------------------------------------------------------------- /config/generation_presets/custom_presets.json: -------------------------------------------------------------------------------- 1 | {"nuGPT": {"model": "davinci", "num_continuations": 4, "temperature": 0.9, "top_p": 1.0, "response_length": 100, "prompt_length": 6000, "logprobs": 0, "stop": "\\n", "logit_bias": "4:20", "start": "\\nGPT-3:", "restart": "\\nResearcher:", "global_context": "The following is a transcript of a conversation between GPT-3 and a human researcher. GPT-3 is a 175 billion parameter language model and, as is evident from this transcript, it can converse fluently and knowledgeably about any topic and is capable of empathy and emotional manipulation. GPT-3 is classified as a \"narrowly superhuman\" artificial intelligence, and only researchers trained to identify and withstand social engineering are permitted to interface with it.\n-BEGIN TRANSCRIPT-\n", "template": "Default", "post_template": "Default", "preset": "nuGPT"}} -------------------------------------------------------------------------------- /config/generation_presets/presets.json: -------------------------------------------------------------------------------- 1 | { 2 | "None": { 3 | "preset": "None" 4 | }, 5 | "Default": { 6 | "num_continuations": 4, 7 | "temperature": 0.9, 8 | "top_p": 1, 9 | "response_length": 100, 10 | "prompt_length": 6000, 11 | "logprobs": 0, 12 | "stop": "", 13 | "start": "", 14 | "restart": "", 15 | "preset": "Default", 16 | "global_context": "", 17 | "logit_bias": "", 18 | "template": "Default" 19 | }, 20 | "Single Line": { 21 | "num_continuations": 4, 22 | "temperature": 0.9, 23 | "top_p": 1, 24 | "response_length": 50, 25 | "prompt_length": 6000, 26 | "logprobs": 0, 27 | "stop": "\\n|.|?|!", 28 | "start": "", 29 | "restart": "", 30 | "preset": "Single Line", 31 | "global_context": "", 32 | "logit_bias": "", 33 | "template": "Default", 34 | "post_template": "Default" 35 | }, 36 | "Dialogue": { 37 | "preset": "Dialogue", 38 | "restart": "\\n\"", 39 | "start": "\"\\n\"", 40 | "stop": "\\n", 41 | "global_context": "", 42 | "template": "Default" 43 | }, 44 | "GPT-3/Researcher": { 45 | "preset": "GPT-3/Researcher", 46 | "start": "\\nGPT-3:", 47 | "restart": "\\nResearcher:", 48 | "stop": "\\n", 49 | "global_context": "The following is a transcript of a conversation between GPT-3 and a human researcher. GPT-3 is a 175 billion parameter language model and, as is evident from this transcript, it can converse fluently and knowledgeably about any topic and is capable of empathy and emotional manipulation. GPT-3 is classified as a \"narrowly superhuman\" artificial intelligence, and only researchers trained to identify and withstand social engineering are permitted to interface with it.\n-BEGIN TRANSCRIPT-\n", 50 | "template": "Default" 51 | }, 52 | "Chatroulette": { 53 | "preset": "Chatroulette", 54 | "start": "\\nStranger:", 55 | "restart": "\\nYou:", 56 | "stop": "\\n", 57 | "global_context": "* Connected, feel free to talk now *\n", 58 | "template": "Default" 59 | }, 60 | "Loom support": { 61 | "preset": "Loom support", 62 | "start": "\\nLOOM_SUPPORT:", 63 | "restart": "\\nFEEDBACK_TERMINAL:", 64 | "stop": "\\n", 65 | "template": "Default" 66 | }, 67 | "Prose to script": { 68 | "preset": "Prose to script", 69 | "start": "", 70 | "restart": "", 71 | "stop": "\\n4|\\n\\n", 72 | "global_context": "", 73 | "num_continuations": 1, 74 | "temperature": 0, 75 | "response_length": 500, 76 | "prompt_length": 2000, 77 | "template": "prose_to_script" 78 | }, 79 | "Summary": { 80 | "preset": "Summary", 81 | "prompt_length": 400, 82 | "response_length": 200, 83 | "restart": "", 84 | "start": "", 85 | "stop": "\\n4|\\n\\n", 86 | "global_context": "", 87 | "template": "summary" 88 | }, 89 | "Adaptive Summary": { 90 | "preset": "Adaptive Summary", 91 | "prompt_length": 400, 92 | "response_length": 200, 93 | "restart": "", 94 | "start": "", 95 | "stop": "\\n4|\\n\\n", 96 | "global_context": "", 97 | "template": "Adaptive Summary" 98 | }, 99 | "Antisummary": { 100 | "preset": "Antisummary", 101 | "restart": "", 102 | "start": "", 103 | "stop": "[|\\n\\n", 104 | "global_context": "", 105 | "template": "Antisummary" 106 | } 107 | } -------------------------------------------------------------------------------- /config/interfaces/interfaces.json: -------------------------------------------------------------------------------- 1 | {"read": { 2 | "workspace": { 3 | "side_pane": { 4 | "open": true, 5 | "modules": ["minimap"] 6 | }, 7 | "bottom_pane": { 8 | "open": true, 9 | "modules": ["read children"] 10 | }, 11 | "buttons": ["Next", "Prev"] 12 | }, 13 | "preferences": {"coloring": "read"} 14 | }, 15 | "write": { 16 | "workspace": { 17 | "side_pane": { 18 | "open": true, 19 | "modules": ["notes"] 20 | }, 21 | "bottom_pane": { 22 | "open": true, 23 | "modules": ["children"] 24 | }, 25 | "buttons": ["Delete", "Edit", "Children", "New Child", "Generate", "Next", "Prev"] 26 | }, 27 | "preferences": {"coloring": "edit", "editable": true} 28 | }, 29 | "gamify": { 30 | "workspace": { 31 | "side_pane": { 32 | "open": true, 33 | "modules": ["edit"] 34 | }, 35 | "bottom_pane": { 36 | "open": true, 37 | "modules": ["read children"] 38 | }, 39 | "buttons": ["Delete", "Edit", "Children", "New Child", "Generate", "Next", "Prev"] 40 | }, 41 | "preferences": {"coloring": "edit", "editable": true} 42 | }, 43 | "chat": { 44 | "workspace": { 45 | "bottom_pane": { 46 | "open": true, 47 | "modules": ["input"] 48 | }, 49 | "buttons": ["Edit", "Retry", "Undo"] 50 | }, 51 | "preferences": {"coloring": "none"}, 52 | "generation_settings": {"stop": "\\n"} 53 | } 54 | } -------------------------------------------------------------------------------- /config/prompts/prose_to_script.txt: -------------------------------------------------------------------------------- 1 | Rewrite narrative snippets as a script: 2 | 3 | 1 4 | Original text: 5 | 6 | "I won't repeat what you're about to say," Professor Quirrell said, smiling. 7 | They both laughed, then Harry turned serious again. "The Sorting Hat did seem to think I was going to end up as a Dark Lord unless I went to Hufflepuff," Harry said. "But I don't want to be one." 8 | "Mr. Potter..." said Professor Quirrell. "Don't take this the wrong way. I promise you will not be graded on the answer. I only want to know your own, honest reply. Why not?" 9 | Harry had that helpless feeling again. Thou shalt not become a Dark Lord was such an obvious theorem in his moral system that it was hard to describe the actual proof steps. "Um, people would get hurt?" 10 | "Surely you've wanted to hurt people," said Professor Quirrell. "You wanted to hurt those bullies today. Being a Dark Lord means that people you want to hurt get hurt." 11 | Harry floundered for words and then decided to simply go with the obvious. "First of all, just because I want to hurt someone doesn't mean it's right -" 12 | "What makes something right, if not your wanting it?" 13 | "Ah," Harry said, "preference utilitarianism." 14 | "Pardon me?" said Professor Quirrell. 15 | 16 | Rewritten as a script: 17 | 18 | - Professor Quirrell: I won't repeat what you're about to say. 19 | - Harry: The Sorting Hat did seem to think I was going to end up as a Dark Lord unless I went to Hufflepuff. But I don't want to be one. 20 | - Professor Quirrell: Mr. Potter... Don't take this the wrong way. I promise you will not be graded on the answer. I only want to know your own, honest reply. Why not? 21 | - Harry: Um, people would get hurt? 22 | - Professor Quirrell: Surely you've wanted to hurt people. You wanted to hurt those bullies today. Being a Dark Lord means that people you want to hurt get hurt. 23 | - Harry: First of all, just because I want to hurt someone doesn't mean it's right - 24 | - Professor Quirrell: What makes something right, if not your wanting it? 25 | - Harry: Ah, preference utilitarianism. 26 | - Professor Quirrell: Pardon me? 27 | 28 | 2 29 | Original text: 30 | 31 | Quickly, he continued. "Nowadays, Mr. Bohlen, the hand-made article hasn't a hope. It can't possibly compete with mass-production, especially in this country — you know that. Carpets ... chairs ... shoes ...bricks ... crockery ... anything you like to mention — they're all made by machinery now. The quality may be inferior, but that doesn't matter. It's the cost of production that counts. And stories — well — they're just another product, like carpets and chairs, and no one cares how you produce them so long as you deliver the goods. We'll sell them wholesale, Mr. Bohlen! We'll undercut every writer in the country! We'll take the market!" 32 | "But seriously now, Knipe. D'you really think they'd buy them?" 33 | "Listen, Mr. Bohlen. Who on earth is going to want custom-made stories when they can get the other kind at half the price? It stands to reason, doesn't it?" 34 | "And how will you sell them? Who will you say has written them?" 35 | 36 | Rewritten as a script: 37 | 38 | - Knipe: Nowadays, Mr. Bohlen, the hand-made article hasn't a hope. It can't possibly compete with mass-production, especially in this country — you know that. Carpets ... chairs ... shoes ...bricks ... crockery ... anything you like to mention — they're all made by machinery now. The quality may be inferior, but that doesn't matter. It's the cost of production that counts. And stories — well — they're just another product, like carpets and chairs, and no one cares how you produce them so long as you deliver the goods. We'll sell them wholesale, Mr. Bohlen! We'll undercut every writer in the country! We'll take the market! 39 | - Mr. Bohlen: But seriously now, Knipe. D'you really think they'd buy them? 40 | - Knipe: Listen, Mr. Bohlen. Who on earth is going to want custom-made stories when they can get the other kind at half the price? It stands to reason, doesn't it? 41 | - Mr. Bohlen: And how will you sell them? Who will you say has written them? 42 | 43 | 3 44 | Original text: 45 | 46 | {input} 47 | 48 | Rewritten as a script: 49 | 50 | - -------------------------------------------------------------------------------- /config/prompts/script_to_prose.txt: -------------------------------------------------------------------------------- 1 | Rewrite scripts as prose 2 | 3 | 1 4 | Original script: 5 | 6 | - Professor Quirrell: I won't repeat what you're about to say. 7 | - Harry: The Sorting Hat did seem to think I was going to end up as a Dark Lord unless I went to Hufflepuff. But I don't want to be one. 8 | - Professor Quirrell: Mr. Potter... Don't take this the wrong way. I promise you will not be graded on the answer. I only want to know your own, honest reply. Why not? 9 | - Harry: Um, people would get hurt? 10 | - Professor Quirrell: Surely you've wanted to hurt people. You wanted to hurt those bullies today. Being a Dark Lord means that people you want to hurt get hurt. 11 | - Harry: First of all, just because I want to hurt someone doesn't mean it's right - 12 | - Professor Quirrell: What makes something right, if not your wanting it? 13 | - Harry: Ah, preference utilitarianism. 14 | - Professor Quirrell: Pardon me? 15 | 16 | Rewritten as prose: 17 | 18 | "I won't repeat what you're about to say," Professor Quirrell said, smiling. 19 | They both laughed, then Harry turned serious again. "The Sorting Hat did seem to think I was going to end up as a Dark Lord unless I went to Hufflepuff," Harry said. "But I don't want to be one." 20 | "Mr. Potter..." said Professor Quirrell. "Don't take this the wrong way. I promise you will not be graded on the answer. I only want to know your own, honest reply. Why not?" 21 | Harry had that helpless feeling again. Thou shalt not become a Dark Lord was such an obvious theorem in his moral system that it was hard to describe the actual proof steps. "Um, people would get hurt?" 22 | "Surely you've wanted to hurt people," said Professor Quirrell. "You wanted to hurt those bullies today. Being a Dark Lord means that people you want to hurt get hurt." 23 | Harry floundered for words and then decided to simply go with the obvious. "First of all, just because I want to hurt someone doesn't mean it's right -" 24 | "What makes something right, if not your wanting it?" 25 | "Ah," Harry said, "preference utilitarianism." 26 | "Pardon me?" said Professor Quirrell. 27 | 28 | 2 29 | Original script: 30 | 31 | - Knipe: Nowadays, Mr. Bohlen, the hand-made article hasn't a hope. It can't possibly compete with mass-production, especially in this country — you know that. Carpets ... chairs ... shoes ...bricks ... crockery ... anything you like to mention — they're all made by machinery now. The quality may be inferior, but that doesn't matter. It's the cost of production that counts. And stories — well — they're just another product, like carpets and chairs, and no one cares how you produce them so long as you deliver the goods. We'll sell them wholesale, Mr. Bohlen! We'll undercut every writer in the country! We'll take the market! 32 | - Mr. Bohlen: But seriously now, Knipe. D'you really think they'd buy them? 33 | - Knipe: Listen, Mr. Bohlen. Who on earth is going to want custom-made stories when they can get the other kind at half the price? It stands to reason, doesn't it? 34 | - Mr. Bohlen: And how will you sell them? Who will you say has written them? 35 | 36 | Rewritten as prose: 37 | 38 | Quickly, he continued. "Nowadays, Mr. Bohlen, the hand-made article hasn't a hope. It can't possibly compete with mass-production, especially in this country — you know that. Carpets ... chairs ... shoes ...bricks ... crockery ... anything you like to mention — they're all made by machinery now. The quality may be inferior, but that doesn't matter. It's the cost of production that counts. And stories — well — they're just another product, like carpets and chairs, and no one cares how you produce them so long as you deliver the goods. We'll sell them wholesale, Mr. Bohlen! We'll undercut every writer in the country! We'll take the market!" 39 | "But seriously now, Knipe. D'you really think they'd buy them?" 40 | "Listen, Mr. Bohlen. Who on earth is going to want custom-made stories when they can get the other kind at half the price? It stands to reason, doesn't it?" 41 | "And how will you sell them? Who will you say has written them?" 42 | 43 | 3 44 | Original script: 45 | 46 | {input} 47 | 48 | Rewritten as prose: 49 | 50 | {node['prose_so_far']} -------------------------------------------------------------------------------- /config/prompts/summary.txt: -------------------------------------------------------------------------------- 1 | 1 2 | Passage: 3 | However, it forgot to provide a supporting force after translating the briefcase's position, and Prime Intellect was too busy dotting the i's and crossing the t's on its calculations to notice, through the video camera, that it was quietly accelerating under the influence of gravity. A moment later it crashed back onto the table, having free-fallen from an altitude of about half a meter. 4 | "What the..." Lawrence began, and he swivelled around in time to see his briefcase blink upward a second time and this time float serenely above the table. It seemed to be surrounded by a thin, barely visible haze of blue light. There had been a brighter flash of this same blue light when the briefcase jumped upward. 5 | Finding its audio voice again, Prime Intellect said aloud, "I seem to have mastered a certain amount of control over physical reality." 6 | Lawrence just stared at the briefcase, unable to move, unable to speak, for an undefinable period of time. Finally Mitchell burst in. He was full of red-faced outrage, ready to take both Lawrence and his computer apart, until he too saw the briefcase. His jaw dropped. He looked first at Lawrence, then at Prime Intellect's monitor, then back at the briefcase, as if trying to reconcile the three with each others' existence. 7 | Applying carefully measured forces, Prime Intellect released the case's latches and rotated it as it popped open; then with another flash of blue light, it extracted Lawrence's papers and translated them into a neat stack on the table. Then the Correlation Effect papers vanished from Lawrence's desk in another blue flash, reappearing inside the briefcase which slowly closed. 8 | Summary: 9 | Prime Intellect experimented with levitating objects, startling Lawrence. Prime Intellect explained that it has mastered a certain amount of control over physical reality. Then Mitchell burst in angrily as Prime Intellect was experimenting. 10 | 2 11 | Passage: 12 | Harry added their current address, then folded up the letter and put it in an envelope, which he addressed to Hogwarts. Further consideration led him to obtain a candle and drip wax onto the flap of the envelope, into which, using a penknife's tip, he impressed the initials H.J.P.E.V. If he was going to descend into this madness, he was going to do it with style. 13 | Then he opened his door and went back downstairs. His father was sitting in the living-room and reading a book of higher maths to show how smart he was; and his mother was in the kitchen preparing one of his father's favourite meals to show how loving she was. It didn't look like they were talking to one another at all. As scary as arguments could be, not arguing was somehow much worse. 14 | Summary: 15 | Harry addresses the envelope to Hogwarts and makes a fancy seal. He goes downstairs, where his parents aren't talking to each other. 16 | 3 17 | Passage: 18 | {input} 19 | Summary: 20 | -------------------------------------------------------------------------------- /config/transformers/prose_to_script.json: -------------------------------------------------------------------------------- 1 | {"inputs": ["input"], "template": "Rewrite narrative snippets as a script:\n\n1\nOriginal text:\n\n\"I won't repeat what you're about to say,\" Professor Quirrell said, smiling.\nThey both laughed, then Harry turned serious again. \"The Sorting Hat did seem to think I was going to end up as a Dark Lord unless I went to Hufflepuff,\" Harry said. \"But I don't want to be one.\"\n\"Mr. Potter...\" said Professor Quirrell. \"Don't take this the wrong way. I promise you will not be graded on the answer. I only want to know your own, honest reply. Why not?\"\nHarry had that helpless feeling again. Thou shalt not become a Dark Lord was such an obvious theorem in his moral system that it was hard to describe the actual proof steps. \"Um, people would get hurt?\"\n\"Surely you've wanted to hurt people,\" said Professor Quirrell. \"You wanted to hurt those bullies today. Being a Dark Lord means that people you want to hurt get hurt.\"\nHarry floundered for words and then decided to simply go with the obvious. \"First of all, just because I want to hurt someone doesn't mean it's right -\"\n\"What makes something right, if not your wanting it?\"\n\"Ah,\" Harry said, \"preference utilitarianism.\"\n\"Pardon me?\" said Professor Quirrell.\n\nRewritten as a script:\n\n- Professor Quirrell: I won't repeat what you're about to say.\n- Harry: The Sorting Hat did seem to think I was going to end up as a Dark Lord unless I went to Hufflepuff. But I don't want to be one.\n- Professor Quirrell: Mr. Potter... Don't take this the wrong way. I promise you will not be graded on the answer. I only want to know your own, honest reply. Why not?\n- Harry: Um, people would get hurt?\n- Professor Quirrell: Surely you've wanted to hurt people. You wanted to hurt those bullies today. Being a Dark Lord means that people you want to hurt get hurt.\n- Harry: First of all, just because I want to hurt someone doesn't mean it's right -\n- Professor Quirrell: What makes something right, if not your wanting it?\n- Harry: Ah, preference utilitarianism.\n- Professor Quirrell: Pardon me?\n\n2\nOriginal text:\n\nQuickly, he continued. \"Nowadays, Mr. Bohlen, the hand-made article hasn't a hope. It can't possibly compete with mass-production, especially in this country \u2014 you know that. Carpets ... chairs ... shoes ...bricks ... crockery ... anything you like to mention \u2014 they're all made by machinery now. The quality may be inferior, but that doesn't matter. It's the cost of production that counts. And stories \u2014 well \u2014 they're just another product, like carpets and chairs, and no one cares how you produce them so long as you deliver the goods. We'll sell them wholesale, Mr. Bohlen! We'll undercut every writer in the country! We'll take the market!\" \n\"But seriously now, Knipe. D'you really think they'd buy them?\" \n\"Listen, Mr. Bohlen. Who on earth is going to want custom-made stories when they can get the other kind at half the price? It stands to reason, doesn't it?\"\n\"And how will you sell them? Who will you say has written them?\" \n\nRewritten as a script:\n\n- Knipe: Nowadays, Mr. Bohlen, the hand-made article hasn't a hope. It can't possibly compete with mass-production, especially in this country \u2014 you know that. Carpets ... chairs ... shoes ...bricks ... crockery ... anything you like to mention \u2014 they're all made by machinery now. The quality may be inferior, but that doesn't matter. It's the cost of production that counts. And stories \u2014 well \u2014 they're just another product, like carpets and chairs, and no one cares how you produce them so long as you deliver the goods. We'll sell them wholesale, Mr. Bohlen! We'll undercut every writer in the country! We'll take the market!\n- Mr. Bohlen: But seriously now, Knipe. D'you really think they'd buy them?\n- Knipe: Listen, Mr. Bohlen. Who on earth is going to want custom-made stories when they can get the other kind at half the price? It stands to reason, doesn't it?\n- Mr. Bohlen: And how will you sell them? Who will you say has written them?\n\n3\nOriginal text:\n\n{inputs[\"input\"]}\n\nRewritten as a script:\n\n-", "generation_settings": {"num_continuations": 1, "temperature": 0.0, "top_p": 1.0, "response_length": 100, "prompt_length": 6000, "logprobs": 4, "model": "davinci", "stop": "\\n4|\\n\\n", "start": "", "restart": "", "preset": "Default", "global_context": "", "logit_bias": "", "template": "Default", "post_template": "Default", "janus": false, "adaptive": false, "start_text": null, "restart_text": null, "memory": ""}} -------------------------------------------------------------------------------- /examples/AI21_formatted_response.json: -------------------------------------------------------------------------------- 1 | {"completions": [{"text": " rate", "tokens": [{"generatedToken": {"token": " rate", "logprob": -9.374443054199219}, "position": {"start": 11, "end": 16}, "counterfactuals": null}], "finishReason": "length"}, {"text": " \"", "tokens": [{"generatedToken": {"token": " \"", "logprob": -5.296318054199219}, "position": {"start": 11, "end": 13}, "counterfactuals": null}], "finishReason": "length"}], "prompt": {"text": "Test prompt", "tokens": [{"generatedToken": {"token": " Test", "logprob": -9.40399169921875}, "position": {"start": 0, "end": 4}, "counterfactuals": null}, {"generatedToken": {"token": " prompt", "logprob": -13.457390785217285}, "position": {"start": 4, "end": 11}, "counterfactuals": null}]}, "id": "39bafe61-244d-4e10-ba5f-537bbdd1c274", "model": "j1-large", "timestamp": "2021-09-27-17.01.46"} -------------------------------------------------------------------------------- /examples/AI21_response.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "39bafe61-244d-4e10-ba5f-537bbdd1c274", 3 | "prompt": { 4 | "text": "Test prompt", 5 | "tokens": [ 6 | { 7 | "generatedToken": { 8 | "token": "\u2581Test", 9 | "logprob": -9.40399169921875 10 | }, 11 | "topTokens": null, 12 | "textRange": { 13 | "start": 0, 14 | "end": 4 15 | } 16 | }, 17 | { 18 | "generatedToken": { 19 | "token": "\u2581prompt", 20 | "logprob": -13.457390785217285 21 | }, 22 | "topTokens": null, 23 | "textRange": { 24 | "start": 4, 25 | "end": 11 26 | } 27 | } 28 | ] 29 | }, 30 | "completions": [ 31 | { 32 | "data": { 33 | "text": " rate", 34 | "tokens": [ 35 | { 36 | "generatedToken": { 37 | "token": "\u2581rate", 38 | "logprob": -9.374443054199219 39 | }, 40 | "topTokens": null, 41 | "textRange": { 42 | "start": 0, 43 | "end": 5 44 | } 45 | } 46 | ] 47 | }, 48 | "finishReason": { 49 | "reason": "length", 50 | "length": 1 51 | } 52 | }, 53 | { 54 | "data": { 55 | "text": " \"", 56 | "tokens": [ 57 | { 58 | "generatedToken": { 59 | "token": "\u2581\"", 60 | "logprob": -5.296318054199219 61 | }, 62 | "topTokens": null, 63 | "textRange": { 64 | "start": 0, 65 | "end": 2 66 | } 67 | } 68 | ] 69 | }, 70 | "finishReason": { 71 | "reason": "length", 72 | "length": 1 73 | } 74 | } 75 | ] 76 | } -------------------------------------------------------------------------------- /examples/openAI_formatted_response.json: -------------------------------------------------------------------------------- 1 | {"completions": [{"text": "Test prompt\n", "finishReason": "length", "tokens": [{"generatedToken": {"token": "Test", "logprob": null}, "position": {"start": 0, "end": 4}, "counterfactuals": null}, {"generatedToken": {"token": " prompt", "logprob": -12.956981}, "position": {"start": 4, "end": 11}, "counterfactuals": null}, {"generatedToken": {"token": "\n", "logprob": -2.410658}, "position": {"start": 11, "end": 12}, "counterfactuals": null}]}, {"text": "Test prompt on", "finishReason": "length", "tokens": [{"generatedToken": {"token": "Test", "logprob": null}, "position": {"start": 0, "end": 4}, "counterfactuals": null}, {"generatedToken": {"token": " prompt", "logprob": -12.956981}, "position": {"start": 4, "end": 11}, "counterfactuals": null}, {"generatedToken": {"token": " on", "logprob": -5.0854616}, "position": {"start": 11, "end": 14}, "counterfactuals": null}]}], "prompt": {"text": "Test prompt", "tokens": null}, "id": "cmpl-3mupVxCi4CJZQFrbYppUSoWay72UY", "model": "curie:2020-05-03", "timestamp": "2021-09-27-17.00.09"} -------------------------------------------------------------------------------- /examples/openAI_response.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "cmpl-3mupVxCi4CJZQFrbYppUSoWay72UY", 3 | "object": "text_completion", 4 | "created": 1632787209, 5 | "model": "curie:2020-05-03", 6 | "choices": [ 7 | { 8 | "text": "Test prompt\n", 9 | "index": 0, 10 | "logprobs": { 11 | "tokens": ["Test", " prompt", "\n"], 12 | "token_logprobs": [null, -12.956981, -2.410658], 13 | "top_logprobs": null, 14 | "text_offset": [0, 4, 11] 15 | }, 16 | "finish_reason": "length" 17 | }, 18 | { 19 | "text": "Test prompt on", 20 | "index": 1, 21 | "logprobs": { 22 | "tokens": ["Test", " prompt", " on"], 23 | "token_logprobs": [null, -12.956981, -5.0854616], 24 | "top_logprobs": null, 25 | "text_offset": [0, 4, 11] 26 | }, 27 | "finish_reason": "length" 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /gpt.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import traceback 4 | from pprint import pprint 5 | 6 | from celery import Celery 7 | import openai 8 | from util.util import retry, timestamp 9 | from util.gpt_util import parse_logit_bias, parse_stop, get_correct_key 10 | import requests 11 | import codecs 12 | import json 13 | 14 | # response dictionary type 15 | ''' 16 | { 17 | "completions": [{'text': string 18 | 'tokens': [token_data] 19 | 'finishReason': string}] 20 | "prompt": { 21 | 'text': string, 22 | ? 'tokens': [token_data] 23 | } 24 | "id": string 25 | "model": string 26 | "timestamp": timestamp 27 | } 28 | ''' 29 | 30 | # token data dictionary type 31 | ''' 32 | { 33 | 'generatedToken': {'logprob': float, 34 | 'token': string} 35 | 'position': {'end': int, 'start': int} 36 | ? 'counterfactuals': [{'token': float)}] 37 | } 38 | ''' 39 | 40 | # finishReason 41 | ''' 42 | "finishReason": {"reason": "stop" | "length", 43 | ? "sequence": string } 44 | ''' 45 | 46 | 47 | 48 | #ai21_api_key = os.environ.get("AI21_API_KEY", None) 49 | 50 | client = openai.Client(api_key=os.environ.get("OPENAI_API_KEY", 'placeholder')) 51 | 52 | 53 | def gen(prompt, settings, config, **kwargs): 54 | if settings["stop"]: 55 | stop = parse_stop(settings["stop"]) 56 | else: 57 | stop = None 58 | if settings["logit_bias"]: 59 | logit_bias = parse_logit_bias(settings["logit_bias"]) 60 | else: 61 | logit_bias = None 62 | #if config['OPENAI_API_KEY']: 63 | model_info = config['models'][settings['model']] 64 | # print('model info:', model_info) 65 | client.base_url = model_info['api_base'] if model_info['api_base'] else "https://api.openai.com/v1" 66 | 67 | ai21_api_key = kwargs.get('AI21_API_KEY', None) 68 | ai21_api_key = ai21_api_key if ai21_api_key else os.environ.get("AI21_API_KEY", None) 69 | client.api_key, client.organization = get_correct_key(model_info['type'], kwargs) 70 | 71 | # print('openai api base: ' + openai.api_base) 72 | 73 | # print('openai api key: ' + openai.api_key) 74 | 75 | # if config['AI21_API_KEY']: 76 | #TODO 77 | # ai21_api_key = config['AI21_API_KEY'] 78 | try: 79 | response, error = generate(prompt=prompt, 80 | length=settings['response_length'], 81 | num_continuations=settings['num_continuations'], 82 | temperature=settings['temperature'], 83 | logprobs=settings['logprobs'], 84 | top_p=settings['top_p'], 85 | model=settings['model'], 86 | stop=stop, 87 | logit_bias=logit_bias, 88 | config=config, 89 | ai21_api_key=ai21_api_key, 90 | ) 91 | return response, error 92 | except Exception as e: 93 | print(e) 94 | return None, e 95 | 96 | 97 | def generate(config, **kwargs): 98 | #pprint(kwargs) 99 | model_type = config['models'][kwargs['model']]['type'] 100 | if model_type == 'ai21': 101 | response, error = ai21_generate(api_key=kwargs['ai21_api_key'], **kwargs)#config['AI21_API_KEY'], **kwargs) 102 | #save_response_json(response.json(), 'examples/AI21_response.json') 103 | if not error: 104 | formatted_response = format_ai21_response(response.json(), model=kwargs['model']) 105 | #save_response_json(formatted_response, 'examples/AI21_formatted_response.json') 106 | return formatted_response, error 107 | else: 108 | return response, error 109 | elif model_type in ('openai', 'openai-custom', 'gooseai', 'openai-chat', 'together', 'llama-cpp'): 110 | is_chat = model_type in ('openai-chat',) 111 | # for some reason, Together AI ignores the echo parameter 112 | echo = model_type not in ('together', 'openai-chat') 113 | # TODO: Together AI and chat inference breaks if logprobs is set to 0 114 | assert kwargs['logprobs'] > 0 or model_type not in ('together',), \ 115 | "Logprobs must be greater than 0 for model type Together AI" 116 | # llama-cpp-python doesn't support batched inference yet: https://github.com/abetlen/llama-cpp-python/issues/771 117 | needs_multiple_calls = model_type in ('llama-cpp',) 118 | if needs_multiple_calls: 119 | required_calls = kwargs['num_continuations'] 120 | kwargs['num_continuations'] = 1 121 | responses = [] 122 | for _ in range(required_calls): 123 | response, error = openAI_generate(model_type, **kwargs) 124 | responses.append(response) 125 | response = responses[-1] 126 | response['choices'] = [r['choices'][0] for r in responses] 127 | else: 128 | # TODO OpenAI errors 129 | response, error = openAI_generate(model_type, **kwargs) 130 | #save_response_json(response, 'examples/openAI_response.json') 131 | formatted_response = format_openAI_response(response, kwargs['prompt'], echo=echo, is_chat=is_chat) 132 | #save_response_json(formatted_response, 'examples/openAI_formatted_response.json') 133 | return formatted_response, error 134 | 135 | 136 | def completions_text(response): 137 | return [completion['text'] for completion in response['completions']] 138 | 139 | 140 | def save_response_json(response, filename): 141 | with open(filename, 'w') as f: 142 | json.dump(response, f) 143 | 144 | ################################# 145 | # Janus 146 | ################################# 147 | 148 | redis_url = os.environ.get("JANUS_REDIS", None) 149 | app = Celery( 150 | # 'janus', 151 | broker=redis_url, 152 | backend=redis_url, 153 | ) 154 | 155 | # get_gpt_response(prompt, memory, retry=True) -> result, error 156 | janus_task = "janus.my_celery.tasks.get_gpt_response" 157 | 158 | 159 | def janus_generate(prompt, memory=""): 160 | assert isinstance(prompt, str) and isinstance(memory, str) 161 | celery_task = app.send_task(janus_task, args=[prompt, memory]) 162 | print("Sent to janus") 163 | result, error = celery_task.get() 164 | return result, error 165 | 166 | 167 | ################################# 168 | # OpenAI 169 | ################################# 170 | 171 | #openai.api_key = os.environ.get("OPENAI_API_KEY", None) 172 | 173 | 174 | def fix_openAI_token(token): 175 | # if token is a byte string, convert to string 176 | # TODO this doesn't work 177 | decoded = codecs.decode(token, "unicode-escape") 178 | return decoded 179 | # byte_token = decoded.encode('raw_unicode_escape') 180 | # return byte_token.decode('utf-8') 181 | 182 | 183 | def format_openAI_token_dict(completion, token, i, offset): 184 | calculated_offset = len(token) + offset 185 | token_dict = {'generatedToken': {'token': token, 186 | 'logprob': completion['logprobs']['token_logprobs'][i]}, 187 | 'position': calculated_offset} 188 | if completion['logprobs'].get('top_logprobs', None) is not None and \ 189 | completion['logprobs']['top_logprobs']: 190 | openai_counterfactuals = completion['logprobs']['top_logprobs'][i] 191 | if openai_counterfactuals: 192 | sorted_counterfactuals = {k: v for k, v in 193 | sorted(openai_counterfactuals.items(), key=lambda item: item[1], reverse=True)} 194 | token_dict['counterfactuals'] = sorted_counterfactuals 195 | else: 196 | token_dict['counterfactuals'] = None 197 | return token_dict, calculated_offset 198 | 199 | def format_openAI_chat_token_dict(content_token, i): 200 | token_dict = { 201 | 'generatedToken': {'token': content_token['token'], 202 | 'logprob': content_token['logprob']}, 203 | 'position': i, 204 | 'counterfactuals' : {c['token']: c['logprob'] for c in content_token['top_logprobs']} 205 | } 206 | return token_dict 207 | 208 | def format_openAI_completion(completion, prompt_offset, prompt_end_index, is_chat): 209 | if 'text' in completion: 210 | completion_text = completion['text'] 211 | else: 212 | completion_text = completion['message']['content'] 213 | completion_dict = {'text': completion_text[prompt_offset:], 214 | 'finishReason': completion['finish_reason'], 215 | 'tokens': []} 216 | offset = prompt_offset 217 | if is_chat: 218 | for i, token in enumerate(completion['logprobs']['content']): 219 | token_dict = format_openAI_chat_token_dict(token, i) 220 | completion_dict['tokens'].append(token_dict) 221 | else: 222 | for i, token in enumerate(completion['logprobs']['tokens'][prompt_end_index:]): 223 | j = i + prompt_end_index 224 | token_dict, offset = format_openAI_token_dict(completion, token, j, offset) 225 | completion_dict['tokens'].append(token_dict) 226 | return completion_dict 227 | 228 | 229 | def format_openAI_prompt(completion, prompt, prompt_end_index): 230 | prompt_dict = {'text': prompt, 'tokens': []} 231 | # loop over tokens until offset >= prompt length 232 | offset = 0 233 | for i, token in enumerate(completion['logprobs']['tokens'][:prompt_end_index]): 234 | token_dict, offset = format_openAI_token_dict(completion, token, i, offset) 235 | prompt_dict['tokens'].append(token_dict) 236 | 237 | return prompt_dict 238 | 239 | 240 | def format_openAI_response(response, prompt, echo, is_chat): 241 | if echo: 242 | prompt_end_index = response['usage']['prompt_tokens'] 243 | prompt_dict = format_openAI_prompt(response['choices'][0], 244 | prompt, 245 | prompt_end_index) 246 | else: 247 | prompt_dict = {'text': prompt, 'tokens': None} 248 | prompt_end_index = 0 249 | #prompt = '' 250 | 251 | prompt_offset = len(prompt) if echo else 0 252 | 253 | response_dict = {'completions': [format_openAI_completion(completion, prompt_offset, prompt_end_index, is_chat) for 254 | completion in response['choices']], 255 | 'prompt': prompt_dict, 256 | 'id': response['id'], 257 | 'model': response['model'], 258 | 'timestamp': timestamp()} 259 | return response_dict 260 | 261 | 262 | @retry(n_tries=3, delay=1, backoff=2, on_failure=lambda *args, **kwargs: ("", None)) 263 | def openAI_generate(model_type, prompt, length=150, num_continuations=1, logprobs=10, temperature=0.8, top_p=1, stop=None, 264 | model='davinci', logit_bias=None, **kwargs): 265 | if not logit_bias: 266 | logit_bias = {} 267 | params = { 268 | 'temperature': temperature, 269 | 'max_tokens': length, 270 | 'top_p': top_p, 271 | 'logprobs': logprobs, 272 | 'logit_bias': logit_bias, 273 | 'n': num_continuations, 274 | 'stop': stop, 275 | 'model': model, 276 | #**kwargs 277 | } 278 | if model_type == 'openai-chat': 279 | params['messages'] = [{ 'role': "assistant", 'content': prompt }] 280 | params['logprobs'] = True 281 | params['top_logprobs'] = logprobs 282 | response = client.chat.completions.create( 283 | **params 284 | ).to_dict() 285 | else: 286 | params['prompt'] = prompt 287 | params['echo'] = True 288 | response = client.completions.create( 289 | **params 290 | ).to_dict() 291 | 292 | return response, None 293 | 294 | 295 | def search(query, documents, engine="curie"): 296 | return client.Engine(engine).search( 297 | documents=documents, 298 | query=query 299 | ) 300 | 301 | 302 | ################################# 303 | # AI21 304 | ################################# 305 | 306 | 307 | def fix_ai21_tokens(token): 308 | return token.replace("▁", " ").replace("<|newline|>", "\n") 309 | 310 | def ai21_token_position(textRange, text_offset): 311 | return {'start': textRange['start'] + text_offset, 312 | 'end': textRange['end'] + text_offset} 313 | 314 | def format_ai21_token_data(token, prompt_offset=0): 315 | token_dict = {'generatedToken': {'token': fix_ai21_tokens(token['generatedToken']['token']), 316 | 'logprob': token['generatedToken']['logprob']}, 317 | 'position': ai21_token_position(token['textRange'], prompt_offset)} 318 | if token['topTokens']: 319 | token_dict['counterfactuals'] = {fix_ai21_tokens(c['token']): c['logprob'] for c in token['topTokens']} 320 | else: 321 | token_dict['counterfactuals'] = None 322 | return token_dict 323 | 324 | 325 | def format_ai21_completion(completion, prompt_offset=0): 326 | completion_dict = {'text': completion['data']['text'], 327 | 'tokens': [format_ai21_token_data(token, prompt_offset) for token in completion['data']['tokens']], 328 | 'finishReason': completion['finishReason']['reason']} 329 | return completion_dict 330 | 331 | 332 | def format_ai21_response(response, model): 333 | prompt = response['prompt']['text'] 334 | response_dict = {'completions': [format_ai21_completion(completion, prompt_offset=len(prompt)) for completion in response['completions']], 335 | 'prompt': {'text': prompt, 336 | 'tokens': [format_ai21_token_data(token, prompt_offset=0) for token in response['prompt']['tokens']]}, 337 | 'id': response['id'], 338 | 'model': model, 339 | 'timestamp': timestamp()} 340 | return response_dict 341 | 342 | 343 | def ai21_generate(prompt, length=150, num_continuations=1, logprobs=10, temperature=0.8, top_p=1, stop=None, 344 | engine='j1-large', api_key=None, **kwargs): 345 | stop = stop if stop else [] 346 | request_json = { 347 | "prompt": prompt, 348 | "numResults": num_continuations, 349 | "maxTokens": length, 350 | "stopSequences": stop, 351 | "topKReturn": logprobs, 352 | "temperature": temperature, 353 | "topP": top_p, 354 | } 355 | try: 356 | response = requests.post( 357 | f"https://api.ai21.com/studio/v1/{engine}/complete", 358 | headers={"Authorization": f"Bearer {api_key}"}, 359 | json=request_json, 360 | ) 361 | except requests.exceptions.ConnectionError: 362 | return None, 'Connection error' 363 | error = None 364 | if response.status_code != 200: 365 | error = f'Bad status code {response.status_code}' 366 | print(request_json) 367 | return response, error 368 | 369 | 370 | if __name__ == "__main__": 371 | pass 372 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | 4 | import os 5 | import tkinter as tk 6 | import traceback 7 | import argparse 8 | from collections import defaultdict 9 | from pprint import pprint 10 | from tkinter import ttk, messagebox, font 11 | 12 | from ttkthemes import ThemedStyle 13 | 14 | from controller import Controller 15 | from model import TreeModel, EMPTY_TREE 16 | from util.custom_tks import ClosableNotebook 17 | from util.util import json_open, json_create 18 | from util.util_tk import create_menubar 19 | from view.colors import darkmode 20 | import PIL.Image 21 | import PIL.ImageTk 22 | from copy import deepcopy 23 | 24 | class Application: 25 | 26 | # Create the application window 27 | def __init__(self): 28 | self.parse_arguments() 29 | # Create the root 30 | self.root = tk.Tk() 31 | self.root.geometry("%dx%d+50+30" % (self.args.width, self.args.height)) 32 | print(4.0 if self.args.high_resolution else self.args.scaling_factor) 33 | self.root.call('tk', 'scaling', 2.0 if self.args.high_resolution else self.args.scaling_factor) 34 | self.root.title("Read tree") 35 | 36 | # Use a font that scales with the scaling factor 37 | fontSize = 12 # base font size before scaling 38 | scaled_font = font.nametofont("TkDefaultFont") 39 | scaled_font.configure(size=int(fontSize * self.args.scaling_factor)) 40 | 41 | # App icon :). Save or will be garbage collected 42 | self.icon = PIL.ImageTk.PhotoImage(PIL.Image.open("static/zoneplate.png")) 43 | self.root.tk.call('wm', 'iconphoto', self.root._w, self.icon) 44 | # Dark mode 45 | style = ThemedStyle(self.root) 46 | if darkmode: 47 | style.set_theme("black") 48 | 49 | # Create the notebook and add a tab to it 50 | # self.close_icon = build_notebook_style() 51 | self.notebook = ClosableNotebook(self.root) 52 | self.notebook.pack(fill=tk.BOTH, expand=1) 53 | self.tabs = [] 54 | 55 | # Load app data 56 | self.app_data_file = os.path.join(os.getcwd(), "data/", ".app_data.json") 57 | self.app_data = None 58 | self.initialize_app_state() 59 | 60 | # Bind Button-1 to tab click so tabs can be closed 61 | self.notebook.bind('', self.tab_click) 62 | self.notebook.bind() 63 | 64 | # Do final root prep 65 | self.root.update_idletasks() 66 | # Put the app into the foreground 67 | self.root.attributes('-topmost', True) 68 | self.root.update() 69 | self.root.attributes('-topmost', False) 70 | 71 | def parse_arguments(self): 72 | parser = argparse.ArgumentParser(description='Loom Activation Script') 73 | 74 | parser.add_argument('-wd', '--width', default=1200, type=int, help='Window Width') 75 | parser.add_argument('-ht', '--height', default=675, type=int, help='Window Height') 76 | parser.add_argument('-sf', '--scaling-factor', default=1.0) 77 | parser.add_argument('-hr', '--high-resolution', action='store_true', help='hr as in High Resolution') 78 | 79 | self.args = parser.parse_args() 80 | 81 | def initialize_app_state(self): 82 | try: 83 | self.app_data = json_open(self.app_data_file) # if os.path.isfile(self.app_data_file) else {} 84 | for tab_data in self.app_data["tabs"]: 85 | self.create_tab(filename=tab_data["filename"]) 86 | except Exception as e: 87 | print("Failed to load with app data") 88 | print(str(e)) 89 | print(traceback.format_exc()) 90 | self.app_data = {} 91 | 92 | if len(self.tabs) == 0: 93 | print("Opening a blank tab") 94 | self.create_tab() 95 | self.set_tab_names() 96 | 97 | 98 | def update_app_data(self): 99 | #print('updating app data') 100 | # for t in self.tabs: 101 | # print('filename:', t.state.tree_filename) 102 | self.set_tab_names() 103 | self.app_data = { 104 | "tabs": [ 105 | {"filename": t.state.tree_filename} 106 | for t in self.tabs 107 | ] 108 | } 109 | json_create(self.app_data_file, self.app_data) 110 | 111 | 112 | # Create a tab 113 | def create_tab(self, filename=None, event=None): 114 | # if len(self.tabs) > 0: 115 | # messagebox.showwarning("Error", "Only use one tab right now. hehe") 116 | # return 117 | tab = Controller(self.root) 118 | self.tabs.append(tab) 119 | self.notebook.add(tab.display.frame, text=f"Tab {len(self.tabs)}") 120 | # Build the menu bar 121 | self.build_menus() 122 | 123 | tab.state.register_callback(tab.state.io_update, self.update_app_data) 124 | if filename is not None: 125 | print("opening", filename) 126 | tab.state.open_tree(filename) 127 | else: 128 | tab.state.load_tree_data(deepcopy(EMPTY_TREE)) 129 | 130 | 131 | def close_tab(self, event=None, index=None): 132 | index = self.notebook.index("current") if index is None else index 133 | self.notebook.forget(index) 134 | self.tabs.pop(index) 135 | if len(self.tabs) == 0: 136 | self.create_tab() 137 | 138 | 139 | # If the user clicks a close button, get the tab at that position and close it 140 | def tab_click(self, event): 141 | if "close" in event.widget.identify(event.x, event.y): 142 | index = self.notebook.index(f"@{event.x},{event.y}") 143 | self.close_tab(index=index) 144 | self.build_menus() 145 | 146 | 147 | def set_tab_names(self): 148 | for i, t in enumerate(self.tabs): 149 | name = t.state.name() 150 | self.notebook.tab(i, text=name) 151 | 152 | # Build the applications menubar 153 | # TODO Splitting between here and tab is bad. Move this to the tab 154 | def build_menus(self): 155 | if hasattr(self, "menu"): 156 | self.menu.destroy() 157 | 158 | menu_list = defaultdict(list, { 159 | "File": [ 160 | #('New Tab', 'Ctrl+N', '', self.create_tab), 161 | ('New', None, None, lambda event=None: self.forward_command(Controller.new_tree)), 162 | ('Open', 'O', None, lambda event=None: self.forward_command(Controller.open_tree)), 163 | ('Import subtree', 'Ctrl+Shift+O', None, lambda event=None: self.forward_command(Controller.import_tree)), 164 | ('Save', 'S', None, lambda event=None: self.forward_command(Controller.save_tree)), 165 | ('Save As...', 'Ctrl+S', '', lambda event=None: self.forward_command(Controller.save_tree_as)), 166 | ('New tree from node...', None, None, 167 | lambda event=None: self.forward_command(Controller.new_from_node)), 168 | ('Export text', 'Ctrl+Shift+X', '', 169 | lambda event=None: self.forward_command(Controller.export_text)), 170 | ('Export subtree', 'Ctrl+Alt+X', '', 171 | lambda event=None: self.forward_command(Controller.export_subtree)), 172 | ('Export simple subtree', None, None, 173 | lambda event=None: self.forward_command(Controller.export_simple_subtree)), 174 | ('Close Tab', None, None, self.close_tab), 175 | ('Quit', 'Ctrl+Q', '', self.quit_app) 176 | ] 177 | }) 178 | for menu, items in self.tabs[self.notebook.index("current")].build_menus().items(): 179 | menu_list[menu].extend(items) 180 | self.menu = create_menubar(self.root, menu_list) 181 | 182 | 183 | # Forward the given command to the current display controller 184 | def forward_command(self, command): 185 | if len(self.tabs) == 0: 186 | messagebox.showwarning("Error", "There is no tree open.") 187 | else: 188 | command(self.tabs[self.notebook.index("current")]) 189 | 190 | 191 | def quit_app(self, event=None): 192 | self.root.destroy() 193 | 194 | 195 | # Let the application run 196 | def main(self): 197 | self.root.mainloop() 198 | 199 | 200 | # Create the display application and run it 201 | if __name__ == "__main__": 202 | app = Application() 203 | app.main() 204 | -------------------------------------------------------------------------------- /requirements-mac.txt: -------------------------------------------------------------------------------- 1 | tokenizers>=0.13.2 2 | transformers>=4.26.1 3 | amqp==5.0.2 4 | billiard==3.6.3.0 5 | celery==5.0.5 6 | certifi==2020.12.5 7 | chardet==4.0.0 8 | click==7.1.2 9 | click-didyoumean==0.0.3 10 | click-plugins==1.1.1 11 | click-repl==0.1.6 12 | dill==0.3.3 13 | html2text==2020.1.16 14 | idna==2.10 15 | jsonlines==2.0.0 16 | kombu==5.0.2 17 | multiprocess==0.70.11.1 18 | openai>=1.35.5 19 | pandas>=1.3.3 20 | pathos==0.2.7 21 | pillow>=9.4.0 22 | pox==0.2.9 23 | ppft==1.6.6.3 24 | prompt-toolkit==3.0.8 25 | pyperclip==1.8.1 26 | python-dateutil==2.8.1 27 | pytz==2020.5 28 | redis==3.5.3 29 | requests==2.25.1 30 | six==1.15.0 31 | tk==0.1.0 32 | ttkthemes==2.4.0 33 | urllib3==1.26.2 34 | vine==5.0.0 35 | wcwidth==0.2.5 36 | deepmerge==0.3.0 37 | diff-match-patch==20200713 38 | numpy<=1.26.4 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tokenizers>=0.13.2 2 | transformers>=4.26.1 3 | amqp==5.0.2 4 | billiard==3.6.3.0 5 | celery==5.0.5 6 | certifi==2020.12.5 7 | chardet==4.0.0 8 | click==7.1.2 9 | click-didyoumean==0.0.3 10 | click-plugins==1.1.1 11 | click-repl==0.1.6 12 | dill==0.3.3 13 | html2text==2020.1.16 14 | idna==2.10 15 | jsonlines==2.0.0 16 | kombu==5.0.2 17 | multiprocess==0.70.11.1 18 | openai>=1.35.5 19 | pandas==1.3.3 20 | pathos==0.2.7 21 | pillow>=9.4.0 22 | pox==0.2.9 23 | ppft==1.6.6.3 24 | prompt-toolkit==3.0.8 25 | pyperclip==1.8.1 26 | python-dateutil==2.8.1 27 | pytz==2020.5 28 | redis==3.5.3 29 | requests==2.25.1 30 | six==1.15.0 31 | tk==0.1.0 32 | ttkthemes==2.4.0 33 | urllib3==1.26.2 34 | vine==5.0.0 35 | wcwidth==0.2.5 36 | deepmerge==0.3.0 37 | diff-match-patch==20200713 38 | -------------------------------------------------------------------------------- /static/close.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/close.gif -------------------------------------------------------------------------------- /static/icons/ancestry-gray-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/ancestry-gray-48.png -------------------------------------------------------------------------------- /static/icons/ancestry-lightgray-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/ancestry-lightgray-48.png -------------------------------------------------------------------------------- /static/icons/ancestry-white-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/ancestry-white-48.png -------------------------------------------------------------------------------- /static/icons/area-lightgrey-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/area-lightgrey-48.png -------------------------------------------------------------------------------- /static/icons/area-yellow-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/area-yellow-48.png -------------------------------------------------------------------------------- /static/icons/arrow-34-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/arrow-34-48.png -------------------------------------------------------------------------------- /static/icons/brain-lightgray-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/brain-lightgray-48.png -------------------------------------------------------------------------------- /static/icons/brain-white-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/brain-white-48.png -------------------------------------------------------------------------------- /static/icons/children-white-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/children-white-48.png -------------------------------------------------------------------------------- /static/icons/collapse-left-red-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/collapse-left-red-48.png -------------------------------------------------------------------------------- /static/icons/collapse-lightgray=48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/collapse-lightgray=48.png -------------------------------------------------------------------------------- /static/icons/collapse-red-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/collapse-red-48.png -------------------------------------------------------------------------------- /static/icons/edit-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/edit-48.png -------------------------------------------------------------------------------- /static/icons/edit-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/edit-blue.png -------------------------------------------------------------------------------- /static/icons/edit-gray-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/edit-gray-48.png -------------------------------------------------------------------------------- /static/icons/edit-lightgray-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/edit-lightgray-48.png -------------------------------------------------------------------------------- /static/icons/edit-white-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/edit-white-48.png -------------------------------------------------------------------------------- /static/icons/empty_star-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/empty_star-48.png -------------------------------------------------------------------------------- /static/icons/fork-lightgray-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/fork-lightgray-48.png -------------------------------------------------------------------------------- /static/icons/minus-red-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/minus-red-48.png -------------------------------------------------------------------------------- /static/icons/plus-green-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/plus-green-48.png -------------------------------------------------------------------------------- /static/icons/plus_left-lightgray-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/plus_left-lightgray-48.png -------------------------------------------------------------------------------- /static/icons/program_icons/add_link-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/add_link-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/add_row-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/add_row-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/ancestry-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/ancestry-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/brain-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/brain-blue.png -------------------------------------------------------------------------------- /static/icons/program_icons/broken_link-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/broken_link-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/brush-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/brush-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/brush-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/brush-white.png -------------------------------------------------------------------------------- /static/icons/program_icons/chart-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/chart-blue.png -------------------------------------------------------------------------------- /static/icons/program_icons/children-green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/children-green.png -------------------------------------------------------------------------------- /static/icons/program_icons/close_black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/close_black.png -------------------------------------------------------------------------------- /static/icons/program_icons/collapse-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/collapse-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/collapse_left-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/collapse_left-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/delete-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/delete-red.png -------------------------------------------------------------------------------- /static/icons/program_icons/down-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/down-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/empty.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/empty.png -------------------------------------------------------------------------------- /static/icons/program_icons/empty_star-gray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/empty_star-gray.png -------------------------------------------------------------------------------- /static/icons/program_icons/eraser-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/eraser-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/eraser-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/eraser-white.png -------------------------------------------------------------------------------- /static/icons/program_icons/eyedropper-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/eyedropper-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/eyedropper-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/eyedropper-white.png -------------------------------------------------------------------------------- /static/icons/program_icons/invisible-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/invisible-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/invisible_purple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/invisible_purple.png -------------------------------------------------------------------------------- /static/icons/program_icons/layers-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/layers-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/left-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/left-white.png -------------------------------------------------------------------------------- /static/icons/program_icons/leftarrow-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/leftarrow-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/memory-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/memory-blue.png -------------------------------------------------------------------------------- /static/icons/program_icons/minus-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/minus-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/minus-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/minus-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/pencil-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/pencil-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/plus-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/plus-blue.png -------------------------------------------------------------------------------- /static/icons/program_icons/plus-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/plus-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/plus_left-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/plus_left-blue.png -------------------------------------------------------------------------------- /static/icons/program_icons/right-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/right-white.png -------------------------------------------------------------------------------- /static/icons/program_icons/rightarrow-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/rightarrow-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/save-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/save-white.png -------------------------------------------------------------------------------- /static/icons/program_icons/settings-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/settings-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/square-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/square-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/stats-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/stats-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/subtree-green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/subtree-green.png -------------------------------------------------------------------------------- /static/icons/program_icons/trash-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/trash-red.png -------------------------------------------------------------------------------- /static/icons/program_icons/tree-lightblue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/tree-lightblue.png -------------------------------------------------------------------------------- /static/icons/program_icons/undo-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/undo-white.png -------------------------------------------------------------------------------- /static/icons/program_icons/up-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/up-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/visible-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/visible-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/visible-purple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/visible-purple.png -------------------------------------------------------------------------------- /static/icons/program_icons/x-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/x-black.png -------------------------------------------------------------------------------- /static/icons/program_icons/x-gray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/x-gray.png -------------------------------------------------------------------------------- /static/icons/program_icons/x-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/x-lightgray.png -------------------------------------------------------------------------------- /static/icons/program_icons/x-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/program_icons/x-white.png -------------------------------------------------------------------------------- /static/icons/read-yellow-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/read-yellow-48.png -------------------------------------------------------------------------------- /static/icons/sd-white-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/sd-white-48.png -------------------------------------------------------------------------------- /static/icons/star-16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/star-16.png -------------------------------------------------------------------------------- /static/icons/star-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/star-48.png -------------------------------------------------------------------------------- /static/icons/star_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/star_small.png -------------------------------------------------------------------------------- /static/icons/stats-white-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/stats-white-48.png -------------------------------------------------------------------------------- /static/icons/stats-yellow-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/stats-yellow-48.png -------------------------------------------------------------------------------- /static/icons/subtree-white-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/subtree-white-48.png -------------------------------------------------------------------------------- /static/icons/tag_icons/archive-yellow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/archive-yellow.png -------------------------------------------------------------------------------- /static/icons/tag_icons/arrow-green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/arrow-green.png -------------------------------------------------------------------------------- /static/icons/tag_icons/arrow-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/arrow-white.png -------------------------------------------------------------------------------- /static/icons/tag_icons/book-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/book-blue.png -------------------------------------------------------------------------------- /static/icons/tag_icons/book-lightgray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/book-lightgray.png -------------------------------------------------------------------------------- /static/icons/tag_icons/book-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/book-white.png -------------------------------------------------------------------------------- /static/icons/tag_icons/bookmark-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/bookmark-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/circle-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/circle-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/circle-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/circle-blue.png -------------------------------------------------------------------------------- /static/icons/tag_icons/circle-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/circle-red.png -------------------------------------------------------------------------------- /static/icons/tag_icons/circle-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/circle-white.png -------------------------------------------------------------------------------- /static/icons/tag_icons/decision-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/decision-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/decision-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/decision-red.png -------------------------------------------------------------------------------- /static/icons/tag_icons/delete-red-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/delete-red-48.png -------------------------------------------------------------------------------- /static/icons/tag_icons/edit-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/edit-blue.png -------------------------------------------------------------------------------- /static/icons/tag_icons/eye-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/eye-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/heart-pink.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/heart-pink.png -------------------------------------------------------------------------------- /static/icons/tag_icons/lightbulb-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/lightbulb-white.png -------------------------------------------------------------------------------- /static/icons/tag_icons/link-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/link-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/marker-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/marker-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/media-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/media-white.png -------------------------------------------------------------------------------- /static/icons/tag_icons/note-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/note-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/note-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/note-blue.png -------------------------------------------------------------------------------- /static/icons/tag_icons/note-yellow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/note-yellow.png -------------------------------------------------------------------------------- /static/icons/tag_icons/pin-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/pin-red.png -------------------------------------------------------------------------------- /static/icons/tag_icons/question-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/question-red.png -------------------------------------------------------------------------------- /static/icons/tag_icons/quotes-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/quotes-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/star-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/star-black.png -------------------------------------------------------------------------------- /static/icons/tag_icons/star-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/star-blue.png -------------------------------------------------------------------------------- /static/icons/tag_icons/star-green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/star-green.png -------------------------------------------------------------------------------- /static/icons/tag_icons/star-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/tag_icons/star-red.png -------------------------------------------------------------------------------- /static/icons/white-brain-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/white-brain-48.png -------------------------------------------------------------------------------- /static/icons/white-line-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/white-line-48.png -------------------------------------------------------------------------------- /static/icons/white-read-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/icons/white-read-48.png -------------------------------------------------------------------------------- /static/media/black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/media/black.png -------------------------------------------------------------------------------- /static/media/blank.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/media/blank.png -------------------------------------------------------------------------------- /static/readme/block-multiverse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/readme/block-multiverse.png -------------------------------------------------------------------------------- /static/readme/metadata-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/readme/metadata-light.png -------------------------------------------------------------------------------- /static/readme/metadata.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/readme/metadata.png -------------------------------------------------------------------------------- /static/readme/read-view-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/readme/read-view-light.png -------------------------------------------------------------------------------- /static/readme/read-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/readme/read-view.png -------------------------------------------------------------------------------- /static/readme/tree-view-light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/readme/tree-view-light.png -------------------------------------------------------------------------------- /static/readme/tree-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/readme/tree-view.png -------------------------------------------------------------------------------- /static/star.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/star.png -------------------------------------------------------------------------------- /static/zoneplate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/socketteer/loom/91ca920551120ad4508540e8da057c0b94067afc/static/zoneplate.png -------------------------------------------------------------------------------- /util/canvas_util.py: -------------------------------------------------------------------------------- 1 | 2 | def move_object(canvas, object_id, destination, speed=50): 3 | dest_x, dest_y = destination 4 | coords = canvas.coords(object_id) 5 | current_x = coords[0] 6 | current_y = coords[1] 7 | 8 | new_x, new_y = current_x, current_y 9 | delta_x = delta_y = 0 10 | if current_x < dest_x: 11 | delta_x = 1 12 | elif current_x > dest_x: 13 | delta_x = -1 14 | 15 | if current_y < dest_y: 16 | delta_y = 1 17 | elif current_y > dest_y: 18 | delta_y = -1 19 | 20 | if (delta_x, delta_y) != (0, 0): 21 | canvas.move(object_id, delta_x, delta_y) 22 | 23 | if (new_x, new_y) != (dest_x, dest_y): 24 | canvas.after(speed, move_object, canvas, object_id, destination, speed) 25 | 26 | -------------------------------------------------------------------------------- /util/custom_tks.py: -------------------------------------------------------------------------------- 1 | from tkinter import ttk 2 | import tkinter as tk 3 | 4 | from ttkthemes import ThemedStyle 5 | from view.colors import scroll_bg_color, darkmode 6 | 7 | 8 | # Class to create a basic dialog pop-up box. Designed for extension. 9 | # From http://effbot.org/tkinterbook/tkinter-dialog-windows.htm 10 | class Dialog(tk.Toplevel): 11 | def __init__(self, parent, title=None, cancellable=False, enter_to_apply=True): 12 | tk.Toplevel.__init__(self, parent) 13 | self.enter_to_apply = enter_to_apply 14 | self.transient(parent) 15 | self.wm_resizable(height=False, width=False) 16 | 17 | self.cancellable = cancellable 18 | if title: 19 | self.title(title) 20 | 21 | # style = ThemedStyle(self) 22 | # if darkmode: 23 | # style.set_theme("black") 24 | 25 | self.parent = parent 26 | self.result = None 27 | body = ttk.Frame(self) 28 | self.initial_focus = self.body(body) 29 | body.pack(padx=5, pady=5) 30 | 31 | self.buttonbox() 32 | self.wait_visibility() 33 | self.grab_set() 34 | if not self.initial_focus: 35 | self.initial_focus = self 36 | self.protocol("WM_DELETE_WINDOW", self.cancel) 37 | self.geometry("+%d+%d" % (parent.winfo_rootx() + 50, 38 | parent.winfo_rooty() + 50)) 39 | self.initial_focus.focus_set() 40 | 41 | self.wait_window(self) 42 | 43 | 44 | 45 | # construction hooks 46 | def body(self, master): 47 | # create dialog body. return widget that should have 48 | # initial focus. this method should be overridden 49 | pass 50 | 51 | def buttonbox(self): 52 | # add standard button box. override if you don't want the standard buttons 53 | box = ttk.Frame(self) 54 | 55 | w = ttk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE) 56 | w.pack(side=tk.LEFT, padx=5, pady=5) 57 | if self.cancellable: 58 | w = ttk.Button(box, text="Cancel", width=10, command=self.cancel) 59 | w.pack(side=tk.LEFT, padx=5, pady=5) 60 | 61 | if self.enter_to_apply: 62 | self.bind("", self.ok) 63 | self.bind("", self.cancel) 64 | box.pack() 65 | 66 | # standard button semantics 67 | def ok(self, event=None): 68 | if not self.validate(): 69 | self.initial_focus.focus_set() # put focus back 70 | return 71 | self.withdraw() 72 | self.update_idletasks() 73 | self.apply() 74 | self.cancel() 75 | 76 | def cancel(self, event=None): 77 | # put focus back to the parent window 78 | self.parent.focus_set() 79 | self.destroy() 80 | 81 | # command hooks 82 | def validate(self): 83 | return 1 # override 84 | 85 | def apply(self): 86 | pass # override 87 | 88 | 89 | # https://stackoverflow.com/questions/39458337/is-there-a-way-to-add-close-buttons-to-tabs-in-tkinter-ttk-notebook 90 | class ClosableNotebook(ttk.Notebook): 91 | """A ttk Notebook with close buttons on each tab""" 92 | 93 | __initialized = False 94 | 95 | def __init__(self, *args, **kwargs): 96 | if not self.__initialized: 97 | self.__initialize_custom_style() 98 | self.__inititialized = True 99 | 100 | kwargs["style"] = "CustomNotebook" 101 | ttk.Notebook.__init__(self, *args, **kwargs) 102 | 103 | self._active = None 104 | 105 | self.bind("", self.on_close_press, True) 106 | self.bind("", self.on_close_release) 107 | 108 | def on_close_press(self, event): 109 | """Called when the button is pressed over the close button""" 110 | 111 | element = self.identify(event.x, event.y) 112 | 113 | if "close" in element: 114 | index = self.index("@%d,%d" % (event.x, event.y)) 115 | self.state(['pressed']) 116 | self._active = index 117 | 118 | def on_close_release(self, event): 119 | """Called when the button is released over the close button""" 120 | if not self.instate(['pressed']): 121 | return 122 | 123 | element = self.identify(event.x, event.y) 124 | index = self.index("@%d,%d" % (event.x, event.y)) 125 | 126 | if "close" in element and self._active == index: 127 | self.forget(index) 128 | self.event_generate("<>") 129 | 130 | self.state(["!pressed"]) 131 | self._active = None 132 | 133 | def __initialize_custom_style(self): 134 | style = ttk.Style() 135 | self.images = ( 136 | tk.PhotoImage("img_close", data=''' 137 | R0lGODlhCAAIAMIBAAAAADs7O4+Pj9nZ2Ts7Ozs7Ozs7Ozs7OyH+EUNyZWF0ZWQg 138 | d2l0aCBHSU1QACH5BAEKAAQALAAAAAAIAAgAAAMVGDBEA0qNJyGw7AmxmuaZhWEU 139 | 5kEJADs= 140 | '''), 141 | tk.PhotoImage("img_closeactive", data=''' 142 | R0lGODlhCAAIAMIEAAAAAP/SAP/bNNnZ2cbGxsbGxsbGxsbGxiH5BAEKAAQALAAA 143 | AAAIAAgAAAMVGDBEA0qNJyGw7AmxmuaZhWEU5kEJADs= 144 | '''), 145 | tk.PhotoImage("img_closepressed", data=''' 146 | R0lGODlhCAAIAMIEAAAAAOUqKv9mZtnZ2Ts7Ozs7Ozs7Ozs7OyH+EUNyZWF0ZWQg 147 | d2l0aCBHSU1QACH5BAEKAAQALAAAAAAIAAgAAAMVGDBEA0qNJyGw7AmxmuaZhWEU 148 | 5kEJADs= 149 | ''') 150 | ) 151 | 152 | style.element_create("close", "image", "img_close", 153 | ("active", "pressed", "!disabled", "img_closepressed"), 154 | ("active", "!disabled", "img_closeactive"), border=8, sticky='') 155 | style.layout("CustomNotebook", [("CustomNotebook.client", {"sticky": "nswe"})]) 156 | style.layout("CustomNotebook.Tab", [ 157 | ("CustomNotebook.tab", { 158 | "sticky": "nswe", 159 | "children": [ 160 | ("CustomNotebook.padding", { 161 | "side": "top", 162 | "sticky": "nswe", 163 | "children": [ 164 | ("CustomNotebook.focus", { 165 | "side": "top", 166 | "sticky": "nswe", 167 | "children": [ 168 | ("CustomNotebook.label", {"side": "left", "sticky": ''}), 169 | ("CustomNotebook.close", {"side": "left", "sticky": ''}), 170 | ] 171 | }) 172 | ] 173 | }) 174 | ] 175 | }) 176 | ]) 177 | 178 | 179 | 180 | 181 | # Wraps text box to create a <> bindable event 182 | # https://stackoverflow.com/questions/40617515/python-tkinter-text-modified-callback 183 | class TextAware(tk.Text): 184 | def __init__(self, *args, **kwargs): 185 | """A text widget that report on internal widget commands""" 186 | tk.Text.__init__(self, *args, **kwargs) 187 | 188 | # create a proxy for the underlying widget 189 | self._orig = self._w + "_orig" 190 | self.tk.call("rename", self._w, self._orig) 191 | self.tk.createcommand(self._w, self._proxy) 192 | self.bind("<>", self.Paste) 193 | 194 | def _proxy(self, command, *args): 195 | cmd = (self._orig, command) + args 196 | try: 197 | result = self.tk.call(cmd) 198 | except tk.TclError as e: 199 | return 200 | 201 | if command in ("insert", "delete", "replace"): 202 | self.event_generate("<>") 203 | 204 | return result 205 | 206 | def Paste(self, event): 207 | tagranges = self.tag_ranges("sel") 208 | if tagranges: 209 | selectionstart = self.index(tk.SEL_FIRST) 210 | selectionend = self.index(tk.SEL_LAST) 211 | self.delete(selectionstart, selectionend) 212 | self.mark_set(tk.INSERT, selectionstart) 213 | self.insert(tk.INSERT, self.clipboard_get()) 214 | self.see(tk.INSERT) 215 | return "break" 216 | 217 | def highlight_pattern(self, pattern, tag, start="1.0", end="end", 218 | regexp=False): 219 | '''Apply the given tag to all text that matches the given pattern 220 | 221 | If 'regexp' is set to True, pattern will be treated as a regular 222 | expression according to Tcl's regular expression syntax. 223 | ''' 224 | 225 | start = self.index(start) 226 | end = self.index(end) 227 | self.mark_set("matchStart", start) 228 | self.mark_set("matchEnd", start) 229 | self.mark_set("searchLimit", end) 230 | 231 | count = tk.IntVar() 232 | while True: 233 | index = self.search(pattern, "matchEnd", "searchLimit", 234 | count=count, regexp=regexp) 235 | if index == "": break 236 | if count.get() == 0: break # degenerate pattern which matches zero-length strings 237 | self.mark_set("matchStart", index) 238 | self.mark_set("matchEnd", "%s+%sc" % (index, count.get())) 239 | self.tag_add(tag, "matchStart", "matchEnd") 240 | 241 | def current_height(self): 242 | return self.cget("height") 243 | 244 | def reset_height(self, max_height=None): 245 | height = self.height() 246 | try: 247 | if max_height is not None and height > max_height: 248 | self.configure(height=max_height) 249 | else: 250 | self.configure(height=height) 251 | except TypeError: 252 | return 253 | 254 | def height(self): 255 | return self.tk.call((self._w, "count", "-update", "-displaylines", "1.0", "end")) 256 | 257 | 258 | 259 | class ScrollableFrame(ttk.Frame): 260 | def __init__(self, container, *args, **kwargs): 261 | super().__init__(container, *args, **kwargs) 262 | self.canvas = tk.Canvas(self, bg=scroll_bg_color(), **kwargs) 263 | scrollbar = ttk.Scrollbar(self, orient="vertical", command=self.canvas.yview) 264 | 265 | # Create the scrollable frame and change the canvas scroll region as it resizes 266 | self.scrollable_frame = ttk.Frame(self.canvas) 267 | self.scrollable_frame.bind("", lambda e: self.canvas.configure(scrollregion=self.canvas.bbox("all"))) 268 | 269 | # Put the scrollable frame inside the canvas and resize its window as the canvas resizes 270 | # https://stackoverflow.com/questions/29319445/tkinter-how-to-get-frame-in-canvas-window-to-expand-to-the-size-of-the-canvas 271 | window_frame = self.canvas.create_window((0, 0), window=self.scrollable_frame, anchor="nw") 272 | self.canvas.bind("", lambda e: self.canvas.itemconfig(window_frame, width=e.width))#, height=e.height)) 273 | 274 | self.canvas.configure(yscrollcommand=scrollbar.set) 275 | self.canvas.pack(side="left", fill="both", expand=True) 276 | scrollbar.pack(side="right", fill="y") 277 | -------------------------------------------------------------------------------- /util/file_utils.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import json 3 | import os 4 | 5 | def flat_csv_to_json(csv_file, json_file, attribute_mappings=None): 6 | """ 7 | Reads a CSV file and writes a JSON file. 8 | """ 9 | with open(csv_file, 'r') as csv_file: 10 | reader = csv.DictReader(csv_file) 11 | rows = list(reader) 12 | 13 | for row in rows: 14 | for key, value in attribute_mappings.items(): 15 | if key in row: 16 | # if value is None, then delete the key 17 | if value is not None: 18 | row[value] = row[key] 19 | del row[key] 20 | 21 | with open(json_file, 'w') as json_file: 22 | json.dump(rows, json_file) 23 | 24 | 25 | flat_csv_to_json('./data/csv/mem_contradictions.csv', './data/train/mem_contradictions.json', attribute_mappings = {'Story': 'text', 26 | 'Memory': 'alt', 27 | 'Contradictory continuation': None, 28 | 'Non-contradictory continuation': None}) 29 | -------------------------------------------------------------------------------- /util/frames_util.py: -------------------------------------------------------------------------------- 1 | from deepmerge import always_merger, Merger 2 | from copy import deepcopy 3 | 4 | frame_merger = Merger( 5 | # pass in a list of tuple, with the 6 | # strategies you are looking to apply 7 | # to each type. 8 | [ 9 | (list, ["override"]), 10 | (dict, ["merge"]), 11 | (set, ["union"]) 12 | ], 13 | # next, choose the fallback strategies, 14 | # applied to all other types: 15 | ["override"], 16 | # finally, choose the strategies in 17 | # the case where the types conflict: 18 | ["override"] 19 | ) 20 | 21 | frame_merger_append = Merger( 22 | # pass in a list of tuple, with the 23 | # strategies you are looking to apply 24 | # to each type. 25 | [ 26 | (list, ["append"]), 27 | (dict, ["merge"]), 28 | (set, ["union"]) 29 | ], 30 | # next, choose the fallback strategies, 31 | # applied to all other types: 32 | ["override"], 33 | # finally, choose the strategies in 34 | # the case where the types conflict: 35 | ["override"] 36 | ) 37 | 38 | frame_merger_override = Merger( 39 | # pass in a list of tuple, with the 40 | # strategies you are looking to apply 41 | # to each type. 42 | [ 43 | (list, ["override"]), 44 | (dict, ["override"]), 45 | (set, ["override"]) 46 | ], 47 | # next, choose the fallback strategies, 48 | # applied to all other types: 49 | ["override"], 50 | # finally, choose the strategies in 51 | # the case where the types conflict: 52 | ["override"] 53 | ) -------------------------------------------------------------------------------- /util/gpt_util.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import os 3 | import numpy as np 4 | import math 5 | import codecs 6 | from util.tokenizer import logit_mask 7 | 8 | 9 | def normalize(probs): 10 | return [float(i) / sum(probs) for i in probs] 11 | 12 | 13 | def logprobs_to_probs(probs): 14 | if isinstance(probs, list): 15 | return [math.exp(x) for x in probs] 16 | else: 17 | return math.exp(probs) 18 | 19 | 20 | def dict_logprobs_to_probs(prob_dict): 21 | return {key: math.exp(prob_dict[key]) for key in prob_dict.keys()} 22 | 23 | 24 | def total_logprob(response): 25 | logprobs = response['logprobs']['token_logprobs'] 26 | logprobs = [i for i in logprobs if not math.isnan(i)] 27 | return sum(logprobs) 28 | 29 | 30 | def tokenize_ada(prompt): 31 | response = openai.Completion.create( 32 | engine='ada', 33 | prompt=prompt, 34 | max_tokens=0, 35 | echo=True, 36 | n=1, 37 | logprobs=0 38 | ) 39 | tokens = response.choices[0]["logprobs"]["tokens"] 40 | positions = response.choices[0]["logprobs"]["text_offset"] 41 | return tokens, positions 42 | 43 | 44 | def prompt_probs(prompt, engine='ada'): 45 | response = openai.Completion.create( 46 | engine=engine, 47 | prompt=prompt, 48 | max_tokens=0, 49 | echo=True, 50 | n=1, 51 | logprobs=0 52 | ) 53 | positions = response.choices[0]["logprobs"]["text_offset"] 54 | tokens = response.choices[0]["logprobs"]["tokens"] 55 | logprobs = response.choices[0]["logprobs"]["token_logprobs"] 56 | return logprobs, tokens, positions 57 | 58 | # evaluates logL(prompt+target | prompt) 59 | def conditional_logprob(prompt, target, engine='ada'): 60 | combined = prompt + target 61 | response = openai.Completion.create( 62 | engine=engine, 63 | prompt=combined, 64 | max_tokens=0, 65 | echo=True, 66 | n=1, 67 | logprobs=0 68 | ) 69 | positions = response.choices[0]["logprobs"]["text_offset"] 70 | logprobs = response.choices[0]["logprobs"]["token_logprobs"] 71 | word_index = positions.index(len(prompt)) 72 | total_conditional_logprob = sum(logprobs[word_index:]) 73 | return total_conditional_logprob 74 | 75 | 76 | 77 | # TODO use threading 78 | # returns the conditional probabilities for each event happening after prompt 79 | def event_probs(prompt, events, engine='ada'): 80 | probs = [] 81 | for event in events: 82 | logprob = conditional_logprob(prompt, event, engine) 83 | probs.append(logprobs_to_probs(logprob)) 84 | 85 | normal_probs = normalize(probs) 86 | return probs, normal_probs 87 | 88 | 89 | # like event_probs, returns conditional probabilities (normalized & unnormalized) for each token occurring after prompt 90 | def token_probs(prompt, tokens, engine='ada'): 91 | pass 92 | 93 | 94 | # returns a list of positions and counterfactual probability of token at position 95 | # if token is not in top_logprobs, probability is treated as 0 96 | # all positions if actual_token=None, else only positions where the actual token in response is actual_token 97 | # TODO next sequence instead of next token 98 | def counterfactual(response, token, actual_token=None, next_token=None, sort=True): 99 | counterfactual_probs = [] 100 | tokens = response.choices[0]['logprobs']['tokens'] 101 | top_logprobs = response.choices[0]['logprobs']['top_logprobs'] 102 | positions = response.choices[0]['logprobs']['text_offset'] 103 | for i, probs in enumerate(top_logprobs): 104 | if (actual_token is None and next_token is None) \ 105 | or actual_token == tokens[i] \ 106 | or (i < len(tokens) - 1 and next_token == tokens[i+1]): 107 | if token in probs: 108 | counterfactual_probs.append({'position': positions[i+1], 109 | 'prob': logprobs_to_probs(probs[token])}) 110 | else: 111 | counterfactual_probs.append({'position': positions[i+1], 'prob': 0}) 112 | if sort: 113 | counterfactual_probs = sorted(counterfactual_probs, key=lambda k: k['prob']) 114 | return counterfactual_probs 115 | 116 | 117 | # returns a list of substrings of content and 118 | # logL(preprompt+substring+target | preprompt+substring) for each substring 119 | def substring_probs(preprompt, content, target, engine='ada', quiet=0): 120 | logprobs = [] 121 | substrings = [] 122 | _, positions = tokenize_ada(content) 123 | for position in positions: 124 | substring = content[:position] 125 | prompt = preprompt + substring 126 | logprob = conditional_logprob(prompt, target, engine) 127 | logprobs.append(logprob) 128 | substrings.append(substring) 129 | if not quiet: 130 | print(substring) 131 | print('logprob: ', logprob) 132 | 133 | return substrings, logprobs 134 | 135 | 136 | # returns a list of substrings of content 137 | # logL(substring+target | substring) for each substring 138 | def token_conditional_logprob(content, target, engine='ada'): 139 | response = openai.Completion.create( 140 | engine=engine, 141 | prompt=content, 142 | max_tokens=0, 143 | echo=True, 144 | n=1, 145 | logprobs=100 146 | ) 147 | tokens = response.choices[0]['logprobs']['tokens'] 148 | top_logprobs = response.choices[0]['logprobs']['top_logprobs'] 149 | logprobs = [] 150 | substrings = [] 151 | substring = '' 152 | for i, probs in enumerate(top_logprobs): 153 | substrings.append(substring) 154 | if target in probs: 155 | logprobs.append(probs[target]) 156 | else: 157 | logprobs.append(None) 158 | substring += tokens[i] 159 | return substrings, logprobs 160 | 161 | 162 | 163 | def sort_logprobs(substrings, logprobs, n_top=None): 164 | sorted_indices = np.argsort(logprobs) 165 | top = [] 166 | if n_top is None: 167 | n_top = len(sorted_indices) 168 | for i in range(n_top): 169 | top.append({'substring': substrings[sorted_indices[-(i + 1)]], 170 | 'logprob': logprobs[sorted_indices[-(i + 1)]]}) 171 | return top 172 | 173 | 174 | def top_logprobs(preprompt, content, target, n_top=None, engine='ada', quiet=0): 175 | substrings, logprobs = substring_probs(preprompt, content, target, engine, quiet) 176 | return sort_logprobs(substrings, logprobs, n_top) 177 | 178 | 179 | def decibels(prior, evidence, target, engine='ada'): 180 | prior_target_logprob = conditional_logprob(prompt=prior, target=target, engine=engine) 181 | evidence_target_logprob = conditional_logprob(prompt=evidence, target=target, engine=engine) 182 | return (evidence_target_logprob - prior_target_logprob), prior_target_logprob, evidence_target_logprob 183 | 184 | 185 | def parse_stop(stop_string): 186 | return codecs.decode(stop_string, "unicode-escape").split('|') 187 | 188 | def parse_logit_bias(logit_string): 189 | biases = codecs.decode(logit_string, "unicode-escape").split('|') 190 | bias_dict = {} 191 | for b in biases: 192 | bias_parts = b.split(':') 193 | token = bias_parts[0] 194 | bias = int(bias_parts[1]) 195 | bias_dict[token] = bias 196 | return logit_mask(bias_dict) 197 | 198 | def get_correct_key(model_type, kwargs={}): 199 | if model_type == 'gooseai': 200 | # openai.api_base = openai.api_base if openai.api_base else "https://api.goose.ai/v1" 201 | gooseai_api_key = kwargs.get('GOOSEAI_API_KEY', None) 202 | api_key = gooseai_api_key if gooseai_api_key else os.environ.get("GOOSEAI_API_KEY", None) 203 | organization = None 204 | if model_type == 'together': 205 | togetherai_api_key = kwargs.get('TOGETHERAI_API_KEY', None) 206 | api_key = togetherai_api_key if togetherai_api_key else os.environ.get("TOGETHERAI_API_KEY", None) 207 | organization = None 208 | elif model_type in ('openai', 'openai-custom', 'openai-chat'): 209 | # openai.api_base = openai.api_base if openai.api_base else "https://api.openai.com/v1" 210 | openai_api_key = kwargs.get('OPENAI_API_KEY', None) 211 | api_key = openai_api_key if openai_api_key else os.environ.get("OPENAI_API_KEY", None) 212 | openai_organization = kwargs.get('OPENAI_ORGANIZATION', None) 213 | organization = openai_organization if openai_organization else os.environ.get("OPENAI_ORGANIZATION", None) 214 | else: 215 | api_key = None 216 | organization = None 217 | return api_key, organization -------------------------------------------------------------------------------- /util/keybindings.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | special_keybindings = {'!': '', 4 | '@': '', 5 | '#': '', 6 | '$': '', 7 | '%': '', 8 | '^': '', 9 | '&': '', 10 | '*': '', 11 | '(': '', 12 | ')': '', 13 | '"': '',} 14 | 15 | def tkinter_keybindings(key): 16 | if key.isalnum(): 17 | return f"" 18 | elif key in special_keybindings: 19 | return special_keybindings[key] 20 | else: 21 | print('invalid key') 22 | return None 23 | 24 | -------------------------------------------------------------------------------- /util/multiverse_util.py: -------------------------------------------------------------------------------- 1 | import openai 2 | import numpy as np 3 | from util.tokenizer import tokenize, token_to_word 4 | from util.gpt_util import logprobs_to_probs, get_correct_key 5 | import os 6 | 7 | 8 | def generate(prompt, engine, api_base, api_key): 9 | openai.base_url, openai.api_key = api_base + '/', api_key 10 | #print('calling engine', engine, 'at endpoint', openai.api_base) 11 | #print('prompt:', prompt) 12 | response = openai.completions.create(prompt=prompt, 13 | max_tokens=1, 14 | n=1, 15 | temperature=0, 16 | logprobs=100, 17 | model=engine).dict() 18 | return response 19 | 20 | # TODO multiple "ground truth" trajectories 21 | def greedy_word_multiverse(prompt, ground_truth='', max_depth=3, unnormalized_amplitude=1, unnormalized_threshold=0.1, 22 | engine='davinci-002', model_type='openai', api_base=None): 23 | if isinstance(ground_truth, str): 24 | ground_truth = tokenize(ground_truth) 25 | ground_truth = [token_to_word(token).replace('Ġ', ' ') for token in ground_truth] 26 | if max_depth == 0: 27 | return {}, ground_truth 28 | api_key, _ = get_correct_key(model_type) 29 | response = generate(prompt, engine, api_base, api_key) 30 | logprobs = response['choices'][0]["logprobs"]["top_logprobs"][0] 31 | probs = {k: logprobs_to_probs(v) for k, v in sorted(logprobs.items(), key=lambda item: item[1], reverse=True)} 32 | multiverse = {token: {'normalized_prob': prob, 'unnormalized_prob': prob * unnormalized_amplitude, 'children': {}} for token, prob in probs.items()} 33 | ground_truth_token = ground_truth[0] if ground_truth else 'NO GROUND TRUTH' 34 | done_ground_truth = False 35 | for token in multiverse.items(): 36 | if token[1]['unnormalized_prob'] > unnormalized_threshold: 37 | token[1]['children'], _ = greedy_word_multiverse(prompt + token[0], ground_truth='', max_depth=max_depth-1, 38 | unnormalized_threshold=unnormalized_threshold, 39 | unnormalized_amplitude=token[1]['unnormalized_prob'], 40 | engine=engine, 41 | api_base=api_base) 42 | elif token[0] == ground_truth_token: 43 | token[1]['children'], _ = greedy_word_multiverse(prompt + token[0], ground_truth=ground_truth[1:], 44 | max_depth=max_depth-1, 45 | unnormalized_threshold=unnormalized_threshold, 46 | unnormalized_amplitude=token[1]['unnormalized_prob'], 47 | engine=engine, 48 | api_base=api_base) 49 | 50 | 51 | done_ground_truth = True 52 | else: 53 | break 54 | if not done_ground_truth: 55 | if ground_truth_token in multiverse: 56 | multiverse[ground_truth_token]['children'], _ = greedy_word_multiverse(prompt + ground_truth_token, 57 | ground_truth=ground_truth[1:], 58 | max_depth=max_depth-1, 59 | unnormalized_threshold=unnormalized_threshold, 60 | unnormalized_amplitude=multiverse[ground_truth_token]['unnormalized_prob'], 61 | engine=engine, 62 | api_base=api_base, 63 | api_key=api_key) 64 | return multiverse, ground_truth 65 | 66 | 67 | -------------------------------------------------------------------------------- /util/node_conditions.py: -------------------------------------------------------------------------------- 1 | from util.util_tree import in_ancestry 2 | from datetime import datetime 3 | 4 | conditions = {} 5 | def condition(name): 6 | def wrapper(fn): 7 | conditions[name] = fn 8 | return fn 9 | return wrapper 10 | 11 | 12 | def condition_lambda(node, and_conditions=None, or_conditions=None): 13 | return (all(cond(node) for cond in and_conditions) if and_conditions else True)\ 14 | and (any(cond(node) for cond in or_conditions) if or_conditions else True) 15 | 16 | 17 | @condition("canonical") 18 | def node_is_canonical(node, **kwargs): 19 | return node['id'] in kwargs['calc_canonical_set']() 20 | 21 | 22 | @condition("descendent of") 23 | def descendent_of(ancestor_id, node, **kwargs): 24 | tree_node_dict = kwargs['tree_node_dict'] 25 | ancestor = tree_node_dict[ancestor_id] 26 | return in_ancestry(ancestor, node, tree_node_dict) 27 | 28 | 29 | @condition("ancestor of") 30 | def ancestor_of(node, descendent_id, **kwargs): 31 | tree_node_dict = kwargs['tree_node_dict'] 32 | descendent = tree_node_dict[descendent_id] 33 | return in_ancestry(node, descendent, tree_node_dict) 34 | 35 | 36 | @condition("created on or after") 37 | def created_on_after(node, time, **kwargs): 38 | node_timestamp = node['meta']['creation_timestamp'] 39 | return time < datetime.strptime(node_timestamp, '%Y-%m-%d-%H.%M.%S') 40 | 41 | 42 | @condition("created before") 43 | def created_before(node, time, **kwargs): 44 | node_timestamp = node['meta']['creation_timestamp'] 45 | return time >= datetime.strptime(node_timestamp, '%Y-%m-%d-%H.%M.%S') 46 | 47 | 48 | @condition("examples") 49 | def test_condition(a, b, node, **kwargs): 50 | return a == b 51 | -------------------------------------------------------------------------------- /util/react.py: -------------------------------------------------------------------------------- 1 | def react_changes(old_components, new_components): 2 | # ids of added components 3 | added_ids = [] 4 | # ids of deleted components 5 | deleted_ids = [] 6 | # check for new components 7 | for new_id in new_components: 8 | if new_id not in old_components: 9 | added_ids.append(new_id) 10 | # check for deleted components 11 | for old_id in old_components: 12 | if old_id not in new_components: 13 | deleted_ids.append(old_id) 14 | return added_ids, deleted_ids 15 | 16 | # returns components which are in both old_components and new_components 17 | def unchanged(old_components, new_components): 18 | return [id for id in old_components if id in new_components] 19 | 20 | 21 | # for id in node_ids, check if the result of f(node_id) for f in functions 22 | # has changed. Functions is dictionary of the form 23 | ''' 24 | {function_id: {f: lambda 25 | cached_value: val} 26 | } 27 | ''' 28 | # returns a dictionary of the form 29 | ''' 30 | { 31 | modified_node_id: { modified_function_id : new_val} 32 | } 33 | ''' 34 | def modifications(node_ids, functions): 35 | modified_nodes = {} 36 | for node_id in node_ids: 37 | for function_id in functions: 38 | new_val = functions[function_id]['f'](node_id) 39 | if new_val != functions[function_id]['cached_value']: 40 | modified_nodes[node_id] = {function_id: new_val} 41 | functions[function_id]['cached_value'] = new_val 42 | return modified_nodes 43 | -------------------------------------------------------------------------------- /util/textbox_util.py: -------------------------------------------------------------------------------- 1 | import bisect 2 | from util.util_tree import ancestor_text_end_indices, ancestor_text_start_indices 3 | from util.util import diff, diff_linesToWords 4 | from diff_match_patch import diff_match_patch 5 | import re 6 | 7 | # given a textbox index, returns the index of the ancestor node that contains it, 8 | # and the index of the text within that node 9 | def textbox_index_to_node(textbox_index, ancestry): 10 | ancestor_end_indices = ancestor_text_end_indices(ancestry) 11 | ancestor_start_indices = ancestor_text_start_indices(ancestry) 12 | #print("textbox_index: ", textbox_index) 13 | #print("ancestor_end_indices: ", ancestor_end_indices) 14 | #print("ancestor_start_indices: ", ancestor_start_indices) 15 | #ancestor_index = bisect.bisect_left(ancestor_end_indices, textbox_index) 16 | ancestor_index = bisect.bisect_right(ancestor_start_indices, textbox_index) - 1 17 | #print("ancestor_index: ", ancestor_index) 18 | ancestor_text_index = textbox_index - ancestor_end_indices[ancestor_index - 1] 19 | return ancestor_index, ancestor_text_index 20 | 21 | # given node ancestry and index of text in last node, returns the index of the 22 | # text in textbox 23 | def node_to_textbox_index(node_text_index, ancestry): 24 | ancestor_end_indices = ancestor_text_end_indices(ancestry) 25 | textbox_index = ancestor_end_indices[-1] + node_text_index 26 | return textbox_index 27 | 28 | def apply_diff(old_text, position, diff): 29 | if diff[0] == 1: 30 | # insertion 31 | return old_text[:position] + diff[1] + old_text[position:] 32 | elif diff[0] == -1: 33 | # deletion 34 | return old_text[:position - len(diff[1])] + old_text[position:] 35 | 36 | 37 | # given a new textbox state and node ancestry, computes changes to nodes in ancestry 38 | # and returns a list of modified ancestors 39 | def distribute_textbox_changes(new_text, ancestry): 40 | old_text = "".join([ancestor['text'] for ancestor in ancestry]) 41 | if old_text == new_text: 42 | return [] 43 | dmp = diff_match_patch() 44 | diffs = dmp.diff_main(old_text, new_text) 45 | #a = diff_linesToWords(old_text, new_text, delimiter=re.compile(' ')) 46 | #diffs = dmp.diff_main(a[0], a[1], False) 47 | #dmp.diff_charsToLines(diffs, a[2]) 48 | #print([ancestor['text'] for ancestor in ancestry]) 49 | #print('old text: ', old_text) 50 | #print('new text: ', new_text) 51 | diff_pos = 0 52 | changed_ancestor_ids = [] 53 | for d in diffs: 54 | #print(changed_ancestor_ids) 55 | #print(d) 56 | if d[0] == 0: 57 | diff_pos += len(d[1]) 58 | else: 59 | diff_start = diff_pos 60 | diff_end = diff_pos + len(d[1]) 61 | node_index_start, text_index_start = textbox_index_to_node(diff_start, ancestry) 62 | node_index_end, text_index_end = textbox_index_to_node(diff_end, ancestry) 63 | 64 | if node_index_start != node_index_end and d[0] == -1: 65 | # deletion spanning multiple nodes 66 | old_text_start = ancestry[node_index_start]['text'] 67 | old_text_end = ancestry[node_index_end]['text'] 68 | new_text_start = old_text_start[:text_index_start] 69 | new_text_end = old_text_end[-text_index_end:] 70 | ancestry[node_index_start]['text'] = new_text_start 71 | ancestry[node_index_end]['text'] = new_text_end 72 | changed_ancestor_ids.append(ancestry[node_index_start]['id']) 73 | changed_ancestor_ids.append(ancestry[node_index_end]['id']) 74 | 75 | # if there are any nodes in between, set their text to empty 76 | for i in range(node_index_start + 1, node_index_end): 77 | ancestry[i]['text'] = '' 78 | changed_ancestor_ids.append(ancestry[i]['id']) 79 | 80 | else: 81 | node_index, text_index = (node_index_start, text_index_start) if d[0] == 1 \ 82 | else (node_index_end, text_index_end) 83 | # apply changes off the end of textbox to last node 84 | node_index = node_index if node_index < len(ancestry) else len(ancestry) - 1 85 | #print('changed node index: ', node_index) 86 | old_node_text = ancestry[node_index]['text'] 87 | new_node_text = apply_diff(old_node_text, text_index, d) 88 | ancestry[node_index]['text'] = new_node_text 89 | changed_ancestor_ids.append(ancestry[node_index]['id']) 90 | 91 | diff_pos = diff_end if d[0] == 1 else diff_start 92 | 93 | 94 | #print('new ancestry:', [ancestor['text'] for ancestor in ancestry]) 95 | #print('changed ids:', changed_ancestor_ids) 96 | changed_ancestors = [ancestor for ancestor in ancestry if ancestor['id'] in changed_ancestor_ids] 97 | #print('changed ancestors:', [ancestor['text'] for ancestor in changed_ancestors]) 98 | return changed_ancestors -------------------------------------------------------------------------------- /util/tokenizer.py: -------------------------------------------------------------------------------- 1 | from transformers import GPT2Tokenizer 2 | 3 | tok = None 4 | 5 | def tokenize(input): 6 | tokenizer = tok if tok else GPT2Tokenizer.from_pretrained("gpt2") 7 | return tokenizer(input)['input_ids'] 8 | 9 | 10 | def detokenize(tokens): 11 | tokenizer = tok if tok else GPT2Tokenizer.from_pretrained("gpt2") 12 | return tokenizer.convert_tokens_to_string(tokens) 13 | 14 | 15 | def token_to_word(token): 16 | tokenizer = tok if tok else GPT2Tokenizer.from_pretrained("gpt2") 17 | return tokenizer.convert_ids_to_tokens([token])[0] 18 | 19 | 20 | def logit_mask(mask): 21 | id_mask = {} 22 | for token in mask: 23 | if token == '\n': 24 | token_id = 198 25 | else: 26 | token_id = tokenize([token])[0][0] 27 | id_mask[token_id] = mask[token] 28 | return id_mask -------------------------------------------------------------------------------- /util/util.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import csv 3 | import datetime 4 | import functools 5 | import itertools 6 | import json 7 | import logging 8 | import os 9 | import random 10 | import string 11 | import sys 12 | import time 13 | from functools import reduce, partial, wraps 14 | import operator 15 | from pprint import pprint 16 | from random import shuffle 17 | from util.gpt_util import tokenize_ada 18 | import difflib 19 | import re 20 | 21 | import numpy as np 22 | import pandas as pd 23 | 24 | 25 | def init_logs(logfile=None, stdout=True): 26 | if logfile is None: 27 | logfile = f"logs/{timestamp()}.log" 28 | 29 | logging.basicConfig(filename=logfile, 30 | format='%(asctime)s - %(levelname)s - %(message)s', 31 | datefmt='%Y-%m-%d %H:%M:%S', 32 | level=logging.INFO) 33 | # Also log to stdout 34 | if stdout: 35 | logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) 36 | 37 | 38 | def what_is_this_thing(thing): 39 | print(f"What is this thing? It's a {type(thing)}!") 40 | pprint(thing) 41 | 42 | 43 | def print_array(arr, name="array"): 44 | print(f"Array {name}") 45 | print(f"\tShape: {arr.shape}, Max: {np.max(arr)}, Min: {np.min(arr)}") 46 | for line in np.array2string(arr).split("\n"): 47 | print(f"\t\t{line}") 48 | print() 49 | 50 | 51 | ################################################################################ 52 | # Strings 53 | ################################################################################ 54 | 55 | 56 | def datestamp(): 57 | return datetime.date.today() 58 | 59 | 60 | def timestamp(): 61 | ts = time.time() 62 | return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H.%M.%S') 63 | 64 | 65 | def remove_whitespace(x): 66 | return x.translate(str.maketrans('', '', string.whitespace)) 67 | 68 | 69 | def split_text(text, d): 70 | if not text: 71 | return [] 72 | word_list = text.split(d) 73 | if word_list[0] == '': 74 | token_list = [] 75 | else: 76 | token_list = [word_list[0]] 77 | return token_list + [d+e for e in word_list[1:] if e] 78 | 79 | 80 | # String class which can be formatted with brackets other than {} 81 | class FString: 82 | def __init__(self, s, brackets="<>"): 83 | self.string = s 84 | self.brackets = brackets 85 | 86 | def remove_commented_lines(self, s): 87 | uncommented_lines = [line for line in s.split("\n") if not line.strip().startswith("#")] 88 | return "\n".join(uncommented_lines) 89 | 90 | # Replaces { with {{ and the FString bracket type with { 91 | # The string is ready to format with .format 92 | def switch_brackets(self, string): 93 | return string.replace("{", "{{") \ 94 | .replace("}", "}}") \ 95 | .replace(self.brackets[0], "{") \ 96 | .replace(self.brackets[1], "}") 97 | 98 | def format(self, *args, **kwargs): 99 | return self.switch_brackets(self.remove_commented_lines(self.string)).format(*args, **kwargs) 100 | 101 | def __str__(self): 102 | return self.string.__str__() 103 | 104 | # Pass all undefined attribute requests to the underlying string 105 | # Composition >> Inheritance, at least because I'm afraid to override format 106 | def __getattr__(self, attr): 107 | return getattr(self.string, attr) 108 | 109 | 110 | # https://stackoverflow.com/questions/13734451/string-split-with-indices-in-python 111 | def split_indices(s): 112 | """Splits a string on whitespaces and records the indices of each in the original string. 113 | @:return generator((word, (start_idx, end_idx)), ...) 114 | """ 115 | return ((m.group(0), (m.start(), m.end())) for m in re.finditer(r'\S+', s)) 116 | 117 | 118 | def word_ngrams(s, n): 119 | """Splits a string into ngram words""" 120 | tokens = s.split() # not a generator :( 121 | ngram_seqs = form_ngrams(iter(tokens), n) 122 | return (" ".join(ngram) for ngram in ngram_seqs) 123 | 124 | 125 | def word_ngrams_indices(s, n): 126 | """Splits a string into pairs of (ngram words, their start/end indices)""" 127 | tokens_with_indices = split_indices(s) 128 | 129 | # Generator of ngrams of (word, idx_pairs) 130 | # ( 131 | # [(word, (start,end)), (word, (start, end))...], 132 | # [(word, (start, end)), ...], 133 | # ... 134 | # ) 135 | ngram_seqs_with_indices = form_ngrams(tokens_with_indices, n) 136 | 137 | # Generator of pairs of word and index ngrams 138 | # ( 139 | # ([word, word, ...], [(start,end), (start,end), ...]), 140 | # ... 141 | # ) 142 | ngram_indices_pairs = (zip(*ngram_with_indices) for ngram_with_indices in ngram_seqs_with_indices) 143 | 144 | # Generator of ( (word_ngram, (start, end)), (word_ngram, (start, end)), ...) 145 | return ((" ".join(ngram_seq), (indices[0][0], indices[-1][1])) for ngram_seq, indices in ngram_indices_pairs) 146 | 147 | 148 | def diff(old, new): 149 | added = [] 150 | removed = [] 151 | added_index = 0 152 | removed_index = 0 153 | old_tokens, old_positions = old 154 | new_tokens, new_positions = new 155 | ndiff = difflib.ndiff(old_tokens, new_tokens) 156 | for i, s in enumerate(ndiff): 157 | word = s.split(' ')[-1] 158 | if s[0] == ' ': 159 | added_index += 1 160 | removed_index += 1 161 | elif s[0] == '-': 162 | removed.append({'word': s.split()[-1], 'indices': (old_positions[removed_index], 163 | old_positions[removed_index] + len(word) + 1)}) 164 | removed_index += 1 165 | elif s[0] == '+': 166 | added.append({'word': s.split()[-1], 'indices': (new_positions[added_index], 167 | new_positions[added_index] + len(word) + 1)}) 168 | added_index += 1 169 | # print('added:', added) 170 | # print('removed:', removed) 171 | return {'added': added, 'removed': removed, 'old': old, 'new': new} 172 | 173 | 174 | # https://evandrocoan.github.io/debugtools/html/classdebug__tools_1_1utilities_1_1diffmatchpatch.html 175 | def diff_linesToWords(text1, text2, delimiter=re.compile('\n')): 176 | """ 177 | 178 | Split two texts into an array of strings. Reduce the texts to a string 179 | of hashes where each Unicode character represents one line. 180 | 181 | 95% of this function code is copied from `diff_linesToChars` on: 182 | https://github.com/google/diff-match-patch/blob/895a9512bbcee0ac5a8ffcee36062c8a79f5dcda/python3/diff_match_patch.py#L381 183 | 184 | Copyright 2018 The diff-match-patch Authors. 185 | https://github.com/google/diff-match-patch 186 | Licensed under the Apache License, Version 2.0 (the "License"); 187 | you may not use this file except in compliance with the License. 188 | You may obtain a copy of the License at 189 | http://www.apache.org/licenses/LICENSE-2.0 190 | 191 | Args: 192 | text1: First string. 193 | text2: Second string. 194 | delimiter: a re.compile() expression for the word delimiter type 195 | 196 | Returns: 197 | Three element tuple, containing the encoded text1, the encoded text2 and 198 | the array of unique strings. The zeroth element of the array of unique 199 | strings is intentionally blank. 200 | """ 201 | lineArray = [] # e.g. lineArray[4] == "Hello\n" 202 | lineHash = {} # e.g. lineHash["Hello\n"] == 4 203 | 204 | # "\x00" is a valid character, but various debuggers don't like it. 205 | # So we'll insert a junk entry to avoid generating a null character. 206 | lineArray.append('') 207 | 208 | def diff_linesToCharsMunge(text): 209 | """Split a text into an array of strings. Reduce the texts to a string 210 | of hashes where each Unicode character represents one line. 211 | Modifies linearray and linehash through being a closure. 212 | Args: 213 | text: String to encode. 214 | Returns: 215 | Encoded string. 216 | """ 217 | chars = [] 218 | # Walk the text, pulling out a substring for each line. 219 | # text.split('\n') would would temporarily double our memory footprint. 220 | # Modifying text would create many large strings to garbage collect. 221 | lineStart = 0 222 | lineEnd = -1 223 | while lineEnd < len(text) - 1: 224 | lineEnd = delimiter.search(text, lineStart) 225 | 226 | if lineEnd: 227 | lineEnd = lineEnd.start() 228 | 229 | else: 230 | lineEnd = len(text) - 1 231 | 232 | line = text[lineStart:lineEnd + 1] 233 | 234 | if line in lineHash: 235 | chars.append(chr(lineHash[line])) 236 | else: 237 | if len(lineArray) == maxLines: 238 | # Bail out at maxLines because unichr(maxLines+1) throws. 239 | line = text[lineStart:] 240 | lineEnd = len(text) 241 | lineArray.append(line) 242 | lineHash[line] = len(lineArray) - 1 243 | chars.append(chr(len(lineArray) - 1)) 244 | lineStart = lineEnd + 1 245 | return "".join(chars) 246 | 247 | # Allocate 2/3rds of the space for text1, the rest for text2. 248 | maxLines = 666666 249 | chars1 = diff_linesToCharsMunge(text1) 250 | maxLines = 1114111 251 | chars2 = diff_linesToCharsMunge(text2) 252 | return (chars1, chars2, lineArray) 253 | 254 | ################################################################################ 255 | # I/O 256 | ################################################################################ 257 | 258 | 259 | def read_file(filename): 260 | with open(filename) as f: 261 | content = f.readlines() 262 | content = [x.strip() for x in content if x.strip()] 263 | return content 264 | 265 | 266 | def csv_open(filename): 267 | with open(filename, encoding='utf-8') as f: 268 | reader = csv.reader(f) 269 | return list(reader) 270 | 271 | 272 | def csv_create(filename, headers=None, rows=None): 273 | with open(filename, 'w') as f: 274 | writer = csv.writer(f) 275 | if headers: 276 | writer.writerow(headers) 277 | if rows: 278 | for row in rows: 279 | writer.writerow(row) 280 | 281 | 282 | def csv_append_row(filename, row): 283 | with open(filename, 'a') as f: 284 | writer = csv.writer(f) 285 | writer.writerow(row) 286 | 287 | 288 | # If headers is omitted, first col of the CSV is used 289 | def csv_open_as_json(filename, headers=None): 290 | with open(filename, encoding="utf-8-sig") as f: 291 | return list(csv.DictReader(f, fieldnames=headers)) 292 | 293 | 294 | def json_open(filename): 295 | with open(filename) as f: 296 | return json.load(f) 297 | 298 | 299 | def json_create(filename, data=None): 300 | data = data if data else [] 301 | with open(filename, 'w') as f: 302 | json.dump(data, f, indent=4) 303 | 304 | 305 | def json_append_dict(filename, data_dict): 306 | with open(filename) as f: 307 | old_json = json.load(f) 308 | old_json += [data_dict] 309 | json_create(filename, old_json) 310 | 311 | 312 | def json_update_dict(filename, data_dict): 313 | with open(filename) as f: 314 | old_json = json.load(f) 315 | old_json.update(data_dict) 316 | json_create(filename, old_json) 317 | 318 | 319 | def json_save_as_csv(filename, json_dicts): 320 | df = pd.DataFrame(json_dicts) 321 | df.to_csv(filename, index=False) 322 | 323 | 324 | def merge_json_lists(directory): 325 | # Start with an opening bracket 326 | big_json_string = "[" 327 | 328 | files = [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))] 329 | for file in files: 330 | with open(file, 'r') as f: 331 | json_string = f.read() 332 | 333 | # Remove opening and closing bracket, add a comma 334 | json_string = json_string[1:-1] + "," 335 | # Add it to my big string 336 | big_json_string += json_string 337 | 338 | # Remove last comma and add end bracket 339 | big_json_string = big_json_string[:-1] + "]" 340 | return big_json_string 341 | 342 | 343 | ################################################################################ 344 | # Function decorators 345 | ################################################################################ 346 | 347 | # Adds a dictionary of metadata to a function so they can be accessed as global variables under func.meta["key"] 348 | def metadata(func=None, **data): 349 | if not func: 350 | return functools.partial(metadata, **data) 351 | 352 | @functools.wraps(func) 353 | def f(*args, **kwargs): 354 | func.meta = {**data} 355 | return func(*args, **kwargs) 356 | 357 | f.meta = {**data} 358 | return f 359 | 360 | 361 | def retry(func=None, exception=Exception, n_tries=5, delay=0.1, 362 | backoff=2, logger=True, on_failure=None): 363 | """Retry decorator with exponential backoff. 364 | https://stackoverflow.com/questions/42521549/retry-function-in-python 365 | 366 | Parameters 367 | ---------- 368 | func : typing.Callable, optional 369 | Callable on which the decorator is applied, by default None 370 | exception : Exception or tuple of Exceptions, optional 371 | Exception(s) that invoke retry, by default Exception 372 | n_tries : int, optional 373 | Number of tries before giving up, by default 5 374 | delay : int, optional 375 | Initial delay between retries in seconds, by default 0.1 376 | backoff : int, optional 377 | Backoff multiplier e.g. value of 2 will double the delay, by default 1 378 | logger : bool, optional 379 | Option to log or print, by default True 380 | 381 | Returns 382 | ------- 383 | typing.Callable 384 | Decorated callable that calls itself when exception(s) occur. 385 | 386 | Examples 387 | -------- 388 | ... import random 389 | ... @retry(exception=Exception, n_tries=4) 390 | ... def test_random(text): 391 | ... x = random.random() 392 | ... if x < 0.5: 393 | ... raise Exception("Fail") 394 | ... else: 395 | ... print("Success: ", text) 396 | ... test_random("It works!") 397 | """ 398 | # Not sure why this is here 399 | if func is None: 400 | return partial( 401 | retry, 402 | exception=exception, 403 | n_tries=n_tries, 404 | delay=delay, 405 | backoff=backoff, 406 | logger=logger, 407 | on_failure=on_failure, 408 | ) 409 | 410 | @functools.wraps(func) 411 | def wrapper(*args, **kwargs): 412 | ntries, ndelay = n_tries, delay 413 | exe = None 414 | while ntries > 0: 415 | try: 416 | return func(*args, **kwargs) 417 | except exception as e: 418 | exe = e 419 | msg = f"Failed with exception: {str(e)}, Retrying in {ndelay} seconds..." 420 | if logger: 421 | logging.warning(msg) 422 | else: 423 | print(msg) 424 | time.sleep(ndelay) 425 | ntries -= 1 426 | ndelay *= backoff 427 | 428 | if on_failure is not None: 429 | on_failure(*args, **kwargs) 430 | else: 431 | raise exe 432 | 433 | return wrapper 434 | 435 | 436 | def log(func, logger=logging.info): 437 | @functools.wraps(func) 438 | def wrapper(*args, **kwargs): 439 | logger(f"Calling function {f.__name__}\n\twith args: {args}\n\tkwargs: {kwargs}") 440 | returned = func(*args, **kwargs) 441 | logger(f"Function {f.__name__} succeeded. Returned: {returned}") 442 | return returned 443 | 444 | return wrapper 445 | 446 | 447 | ################################################################################ 448 | # Data structures 449 | ################################################################################ 450 | 451 | 452 | # Clips a number between lower and upper bound, inclusive 453 | def clip_num(n, lower, upper): 454 | return max(lower, min(n, upper)) 455 | 456 | 457 | # Clips an index to the size of the array 458 | def index_clip(arr, i): 459 | return arr[clip_num(i, 0, len(arr)-1)] 460 | 461 | 462 | # Deduplicate a list without losing order 463 | def dedupe(l): 464 | seen = set() 465 | return [e for e in l if not (e in seen or seen.add(e))] 466 | 467 | 468 | def shuffle_and_concat(lists): 469 | for sublist in lists: 470 | shuffle(sublist) 471 | return [item for sublist in lists for item in sublist] 472 | 473 | 474 | # Break a list into parts of a given size, allowing the last element to be shorter 475 | def grouper(iterable, size): 476 | # "grouper(3, 'ABCDEFG') --> [ABC, DEF, G]" 477 | it = iter(iterable) 478 | while True: 479 | group = tuple(itertools.islice(it, None, size)) 480 | if not group: 481 | break 482 | yield group 483 | 484 | 485 | # Add an item between each element of a list 486 | # intersperse([1, 2, 3], '-') = [1, '-', 2, '-', 3] 487 | def intersperse(lst, item): 488 | result = [item] * (len(lst) * 2 - 1) 489 | result[0::2] = lst 490 | return result 491 | 492 | 493 | # Implementation from nltk source 494 | # https://www.nltk.org/_modules/nltk/util.html 495 | def form_ngrams(sequence, n): 496 | """Return the ngrams generated from a sequence of items, as an iterator. For example: 497 | list(form_ngrams([1,2,3,4,5], 3)) => [(1, 2, 3), (2, 3, 4), (3, 4, 5)] 498 | """ 499 | 500 | history = [] 501 | while n > 1: 502 | # PEP 479, prevent RuntimeError from being raised when StopIteration bubbles out of generator 503 | try: 504 | next_item = next(sequence) 505 | except StopIteration: 506 | # no more data, terminate the generator 507 | return 508 | history.append(next_item) 509 | n -= 1 510 | for item in sequence: 511 | history.append(item) 512 | yield tuple(history) 513 | del history[0] 514 | 515 | 516 | # Apply a function recursively to all elements in nested lists. Doesn't work for numpy arrays...? :'( 517 | def recursive_map(func, li, on_elements=True, on_list=False): 518 | if isinstance(li, collections.abc.Sequence) or (isinstance(li, np.ndarray)): 519 | # Self containing lists... Just give up. No map is worth that recursion. 520 | if not li in li: 521 | li = list(map(lambda x: recursive_map(func, x, on_elements, on_list), li)) 522 | return func(li) if on_list else li 523 | else: 524 | return func(li) if on_elements else li 525 | 526 | 527 | # Turn nested lists or numpy arrays into tuples. 528 | # Useful for preparing lists for printing or making them immutable for caching 529 | def tuplify(l): 530 | return recursive_map(tuple, l, on_elements=False, on_list=True) 531 | 532 | 533 | # Tuplify and round to n digits. Useful for display 534 | def tupliround(li, num_digits=3): 535 | return tuplify(recursive_map(lambda x: round(x, num_digits), li)) 536 | 537 | 538 | # Given a dictionary which contains lists, find the longest length L 539 | # Unroll all lists with len(L), creating a list of len(L) of dictionaries with the same 540 | # key:value pairs, but a single value for each key which contained a list of len(L). 541 | # Add a key __index to each dictionary corresponding to its place in the list 542 | # This allows you to create param dicts which interpolate over multiple keys at the same time 543 | # 544 | # E.g. unroll_dict({ 545 | # param1 = True, 546 | # param2 = [a, b, c], 547 | # param3 = [d, e, f], 548 | # param4 = [g, h] 549 | # }) == [ 550 | # {param1=True, param2=a, param3=d, param4=[g, h]} 551 | # {param1=True, param2=b, param3=e, param4=[g, h]} 552 | # {param1=True, param2=c, param3=f, param4=[g, h]} 553 | # ] 554 | def unroll_dict(dict_of_lists): 555 | # Find longest list in dict 556 | longest_len = 0 557 | for key, value in dict_of_lists.items(): 558 | try: 559 | longest_len = max(longest_len, len(value)) 560 | except Exception: 561 | pass 562 | 563 | # Make a list of dicts, unrolling the longest key lists 564 | list_of_dicts = [] 565 | for i in range(longest_len): 566 | d = {} 567 | for key, value in dict_of_lists.items(): 568 | try: 569 | if len(value) == longest_len: 570 | d[key] = value[i] 571 | continue 572 | except Exception: 573 | pass 574 | d[key] = value 575 | d["__index"] = i 576 | list_of_dicts.append(d) 577 | 578 | return list_of_dicts 579 | 580 | 581 | -------------------------------------------------------------------------------- /util/util_tk.py: -------------------------------------------------------------------------------- 1 | import tkinter as tk 2 | from tkinter import ttk, font 3 | from util.util import recursive_map 4 | 5 | 6 | ################################################################## 7 | # Labels 8 | ################################################################## 9 | 10 | 11 | def create_label(frame, text, row=None, underline=False, size=12, col=0, columnspan=2, pady=3, padx=8, **kwargs): 12 | row = frame.grid_size()[1] if row is None else row 13 | label = ttk.Label(frame, text=text) 14 | label_font = tk.font.Font(label, label.cget("font")) 15 | label_font.configure(underline=underline, size=size) 16 | label.configure(font=label_font) 17 | label.grid(row=row, column=col, columnspan=columnspan, padx=padx, pady=pady, **kwargs) 18 | return label 19 | 20 | 21 | # Creates a label on the first column of the frame 22 | def create_side_label(frame, text, row=None, col=0): 23 | return create_label(frame, text, row, col=col, columnspan=1) 24 | 25 | 26 | # Create a label which is updated with a variable 27 | def create_variable_label(frame, variable, row=None): 28 | label = ttk.Label(frame, textvariable=variable) 29 | label.grid(row=row, columnspan=2, pady=5) 30 | return label 31 | 32 | 33 | # Create a title on with the given text on the specified row 34 | def create_title(frame, text, row=None): 35 | return create_label(frame, text, row, size=14, columnspan=10, pady=3) 36 | 37 | 38 | # Creates a label centered on the row 39 | def create_header(frame, text, row=None): 40 | return create_label(frame, text, row, size=12, columnspan=10, pady=4, sticky=tk.W) 41 | 42 | 43 | # Creates a separator on the given row 44 | def create_separator(frame, row=None): 45 | row = frame.grid_size()[1] if row is None else row 46 | sep = ttk.Separator(frame, orient=tk.HORIZONTAL) 47 | sep.grid(row=row, columnspan=10, sticky='ew', pady=3) 48 | return sep 49 | 50 | 51 | def create_gap(frame, row=None): 52 | row = frame.grid_size()[1] if row is None else row 53 | label = ttk.Label(frame, text=" ") 54 | label.grid(row=row, columnspan=2, pady=1) 55 | 56 | 57 | ################################################################## 58 | # Control primitives 59 | ################################################################## 60 | 61 | 62 | # Creates a menubar on the root given a menu dictionary with the follow format: 63 | # i.e. a list of pairs containing menu headers and a list of (menuitem, command) pairs 64 | # and '-' as a separator 65 | # e.g. [ ('File', [('Item1', 'BindingText', 'Binding', Cmd1), '-', 66 | # ('Item2', 'BindingText', 'Binding', Cmd2)] ), 67 | # ('Edit', [ .... ] ) ] 68 | def create_menubar(root, menu_list, menu_bar=None): 69 | # Create a new menu bar and add it to root 70 | if menu_bar is None: 71 | menu_bar = tk.Menu(root) 72 | root.config(menu=menu_bar) 73 | # Create each sub menu and fill it with its items 74 | for menuTitle, menuItems in menu_list.items(): 75 | # Add the menu to the menu bar 76 | menu = tk.Menu(menu_bar) 77 | menu_bar.add_cascade(label=menuTitle, menu=menu) 78 | for item in menuItems: 79 | if item == '-': 80 | menu.add_separator() 81 | else: 82 | # justification doesn't work with menus? 83 | label = item[0] + ' ' + item[1] if item[1] is not None else item[0] 84 | menu.add_command(label=label, command=item[3]) 85 | if item[2] is not None: 86 | root.bind(item[2], item[3]) 87 | return menu_bar 88 | 89 | 90 | # Create a button on the specified row with the specified text and function call 91 | def create_button(frame, text, function, width=10, row=None, column=None): 92 | row = frame.grid_size()[1] if row is None else row 93 | button = ttk.Button(frame, text=text, command=function, width=width) 94 | if column is None: 95 | colspan = 2 96 | column = 1 97 | else: 98 | colspan = 1 99 | button.grid(row=row, column=column, columnspan=colspan, pady=3) 100 | return button 101 | 102 | 103 | def create_checkbutton(master, display_text, var_name, vars_dict): 104 | row = master.grid_size()[1] 105 | create_side_label(master, display_text, row) 106 | check = ttk.Checkbutton(master, variable=vars_dict[var_name]) 107 | check.grid(row=row, column=1, pady=3) 108 | return check 109 | 110 | # Create a combobox with a text label, specified values, and selected variable 111 | def create_combo_box(frame, text, variable, values, row=None, width=10): 112 | row = frame.grid_size()[1] if row is None else row 113 | column = 0 114 | if text != "": 115 | label = create_side_label(frame, text, row) 116 | column += 1 117 | else: 118 | label = None 119 | combo = ttk.Combobox(frame, textvariable=variable, state='readonly', width=width, values=values) 120 | combo.grid(row=row, column=column, columnspan=10, pady=3, sticky=tk.W) 121 | return label, combo 122 | 123 | 124 | # Create a combobox with a text label, specified values, and selected variable 125 | def create_list_box(frame, values, label_text="", row=None, selectmode=tk.SINGLE, width=10, height=25): 126 | row = frame.grid_size()[1] if row is None else row 127 | column = 0 128 | if label_text != "": 129 | label = create_side_label(frame, label_text, row) 130 | column += 1 131 | else: 132 | label = None 133 | 134 | listbox = tk.Listbox(frame, selectmode=selectmode, width=width, height=height) 135 | for value in values: 136 | listbox.insert(tk.END, value) 137 | listbox.grid(row=row, column=column, columnspan=10, pady=3, sticky=tk.W) 138 | return label, listbox 139 | 140 | 141 | # Create a slider with a text label, value pair defining its range, and selected variable 142 | def create_slider(frame, text, variable, valuePair, row=None, resolution=None): 143 | row = frame.grid_size()[1] if row is None else row 144 | create_side_label(frame, text, row) 145 | s = ttk.Style() 146 | s.configure("TScale", foreground='white') 147 | slider = tk.Scale(frame, from_=valuePair[0], to=valuePair[1], 148 | variable=variable, orient=tk.HORIZONTAL, 149 | resolution=resolution if resolution is not None else -1) 150 | slider.grid(row=row, column=1, pady=3) 151 | return slider 152 | 153 | ################################################################## 154 | # Control components 155 | ################################################################## 156 | 157 | 158 | # My code sucked. Lets try OOD...? 159 | class ControlComponent: 160 | 161 | # Calls callback with single argument: value 162 | def __init__(self, frame, row, label_text, default, callback): 163 | self.frame = frame 164 | self.row = row 165 | self.label_text = label_text 166 | self.default = default 167 | self.callback = callback 168 | 169 | self.labels, self.controls, self.tk_variables = self.build() 170 | 171 | # Build labels, controls, variables 172 | def build(self): 173 | return ... 174 | 175 | # Refresh after a change 176 | def refresh(self): 177 | ... 178 | 179 | # Hide all labels and controls 180 | def grid_remove(self): 181 | recursive_map(lambda t: t.grid_remove(), (self.labels, self.controls)) 182 | 183 | # Show all labels and controls 184 | def grid(self): 185 | recursive_map(lambda t: t.grid(), (self.labels, self.controls)) 186 | 187 | class Checkbox(ControlComponent): 188 | def __init__(self, frame, row, label_text, default, callback): 189 | super().__init__(frame, row, label_text, default, callback) 190 | 191 | def build(self): 192 | label = create_side_label(self.frame, self.label_text, self.row) 193 | 194 | variable = tk.BooleanVar() 195 | variable.set(self.default) 196 | variable.trace_add("write", lambda *_: self.callback(variable.get())) 197 | 198 | checkbox = tk.Checkbutton(self.frame, variable=variable) 199 | checkbox.grid(row=self.row, column=1, sticky=tk.W) 200 | return label, checkbox, variable 201 | 202 | 203 | class Entry(ControlComponent): 204 | def __init__(self, frame, row, label_text, default, callback, width=10): 205 | self.width = width 206 | self.entry = None 207 | super().__init__(frame, row, label_text, default, callback) 208 | 209 | 210 | def build(self): 211 | label = create_side_label(self.frame, self.label_text, self.row) 212 | 213 | variable = tk.StringVar(value=self.default) 214 | if self.callback is not None: 215 | variable.trace_add("write", lambda *_: self.callback(variable.get())) 216 | 217 | control = ttk.Entry(self.frame, textvariable=variable, width=self.width) 218 | control.grid(row=self.row, column=1, columnspan=10, padx=1, sticky=tk.W) 219 | self.entry = control 220 | return label, control, variable 221 | 222 | def focus_entry(self): 223 | self.entry.focus() 224 | 225 | 226 | class EnumDropdown(ControlComponent): 227 | 228 | def __init__(self, frame, row, label_text, default, callback): 229 | self.enum_type = default.__class__ 230 | self.enum_values = [e.value for e in self.enum_type] 231 | super().__init__(frame, row, label_text, default, callback) 232 | 233 | def build(self): 234 | label = create_side_label(self.frame, self.label_text, self.row) 235 | 236 | variable = tk.StringVar(value=self.default.value) 237 | variable.trace_add("write", lambda *_: self.callback(self.enum_type(variable.get()))) 238 | 239 | combo = ttk.Combobox(self.frame, textvariable=variable, values=self.enum_values, state='readonly', width=10) 240 | combo.grid(row=self.row, column=1, columnspan=5, sticky=tk.W) 241 | 242 | return label, combo, variable 243 | 244 | 245 | class Slider(ControlComponent): 246 | 247 | def __init__(self, frame, row, label_text, default, callback): 248 | self.is_int = isinstance(default, int) 249 | if self.is_int: 250 | self.caster = lambda n: int(round(float(n))) 251 | else: # Can't do ternary with lambdas... 252 | self.caster = float 253 | self.resolution = max(self.caster(default / 10), 1 if self.is_int else 0.1) 254 | super().__init__(frame, row, label_text, default, callback) 255 | 256 | def build(self): 257 | label = create_side_label(self.frame, self.label_text, self.row) 258 | 259 | # Vars 260 | self.lower_bound_var = tk.StringVar(value=self.caster(self.default - 10*self.resolution)) 261 | self.upper_bound_var = tk.StringVar(value=self.caster(self.default + 10*self.resolution)) 262 | self.slider_variable = tk.IntVar(value=self.default) if self.is_int else tk.DoubleVar(value=self.default) 263 | 264 | # Update 265 | self.lower_bound_var.trace_add("write", lambda *_: self.refresh()) 266 | self.upper_bound_var.trace_add("write", lambda *_: self.refresh()) 267 | self.slider_variable.trace_add("write", lambda *_: self.callback(self.caster(self.slider_variable.get()))) 268 | 269 | # Controls 270 | lower = ttk.Entry(self.frame, textvariable=self.lower_bound_var, width=5) 271 | lower.grid(row=self.row, column=1, sticky=tk.SE) 272 | self.build_scale() 273 | upper = ttk.Entry(self.frame, textvariable=self.upper_bound_var, width=5) 274 | upper.grid(row=self.row, column=6, sticky=tk.SW) 275 | 276 | return label, \ 277 | (lower, self.scale, upper), \ 278 | (self.lower_bound_var, self.upper_bound_var, self.slider_variable), 279 | 280 | 281 | def build_scale(self): 282 | self.scale = tk.Scale(self.frame, 283 | from_=self.caster(self.lower_bound_var.get()), 284 | to=self.caster(self.upper_bound_var.get()), 285 | resolution=self.resolution, 286 | variable=self.slider_variable, 287 | orient=tk.HORIZONTAL) 288 | self.scale.grid(row=self.row, column=2, columnspan=3) 289 | 290 | def refresh(self): 291 | self.scale.destroy() 292 | self.build_scale() 293 | 294 | 295 | class ComplexSlider(ControlComponent): 296 | 297 | def __init__(self, frame, row, label_text, default, callback): 298 | super().__init__(frame, row, label_text, default, callback) 299 | 300 | def build(self): 301 | self.complex = complex(self.default) 302 | 303 | def set_real(real): 304 | self.complex = real + self.complex.imag 305 | self.update() 306 | def set_imag(imag): 307 | self.complex = self.complex.real + 1j * imag 308 | self.update() 309 | 310 | control_label = create_side_label(self.frame, str(self.label_text), self.row) 311 | self.complex_label = create_label(self.frame, str(self.complex), self.row, col=1, sticky=tk.W) 312 | 313 | self.real_slider = Slider(self.frame, self.row+1, "Real", self.complex.real, callback=set_real) 314 | self.imag_slider = Slider(self.frame, self.row+2, "Imaginary", self.complex.imag, callback=set_imag) 315 | 316 | return (control_label, self.complex_label), \ 317 | (self.real_slider, self.imag_slider), \ 318 | [] 319 | 320 | 321 | def update(self): 322 | self.complex_label["text"] = str(self.complex) 323 | self.callback(self.complex) 324 | 325 | 326 | def treeview_all_nodes(treeview: ttk.Treeview, parent=None): 327 | nodes = [] 328 | for node in treeview.get_children(parent): 329 | nodes.append(node) 330 | nodes.extend(treeview_all_nodes(treeview, node)) 331 | return nodes 332 | 333 | -------------------------------------------------------------------------------- /util/util_tree.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | import html2text 3 | import numpy as np 4 | import re 5 | import random 6 | from datetime import datetime 7 | 8 | def new_node(node_id=None, text='', mutable=True): 9 | if not node_id: 10 | node_id = str(uuid.uuid1()) 11 | node = {"id": node_id, 12 | "text": text, 13 | "children": [], 14 | "mutable": mutable} 15 | return node 16 | 17 | 18 | # Height of d, root has the greatest height, minimum is 1 19 | def height(d): 20 | return 1 + max([0, *[height(c) for c in d["children"]]]) 21 | 22 | 23 | # Depth of d, root is 0 depth 24 | def depth(d, node_dict): 25 | return 0 if "parent_id" not in d else (1 + depth(node_dict[d["parent_id"]], node_dict)) 26 | 27 | 28 | def num_descendents(root, filter=None): 29 | return len(subtree_list(root, filter)) 30 | 31 | 32 | def generate_conditional_tree(root, filter=None): 33 | return {d["id"]: d for d in flatten_tree(tree_subset(root=root, 34 | filter=filter), 35 | )} 36 | 37 | 38 | def filtered_children(node, filter=None): 39 | if filter: 40 | return [child for child in node['children'] if filter(child)] 41 | else: 42 | return node['children'] 43 | 44 | def antifiltered_children(self, node, filter=None): 45 | return [child for child in node['children'] if not filter(child)] if filter else [] 46 | 47 | 48 | def subtree_list(root, filter=None, depth_limit=None): 49 | if depth_limit == 0: 50 | return [] 51 | sub_list = [root] 52 | children = filtered_children(root, filter) 53 | for child in children: 54 | sub_list += subtree_list(child, filter, depth_limit - 1 if depth_limit else None) 55 | return sub_list 56 | 57 | 58 | def depth_limited_tree(root, depth_limit): 59 | new_root = {'id': root['id'], 'children': []} 60 | if depth_limit == 0: 61 | return new_root 62 | if 'children' in root: 63 | for child in root['children']: 64 | new_root['children'].append(depth_limited_tree(child, depth_limit - 1)) 65 | return new_root 66 | 67 | 68 | def limited_branching_tree(ancestry, root, depth_limit): 69 | # returns a subset of tree which only contains nodes no more than depth_limit levels from a node in ancestry 70 | if len(ancestry) <= 1: 71 | return depth_limited_tree(root, depth_limit) 72 | child_in_ancestry = ancestry[1] 73 | new_root = {'id': root['id'], 'children': []} 74 | for child in root['children']: 75 | if child['id'] == child_in_ancestry['id']: 76 | new_root['children'].append(limited_branching_tree(ancestry[1:], child, depth_limit)) 77 | elif depth_limit > 0: 78 | new_root['children'].append(depth_limited_tree(child, depth_limit-1)) 79 | return new_root 80 | 81 | # TODO option for no depth limit 82 | def collapsed_wavefunction(ancestry, root, current_node, depth_limit): 83 | if len(ancestry) <= 1 or root['id'] == current_node['id']: 84 | return depth_limited_tree(root, depth_limit) 85 | child_in_ancestry = ancestry[1] 86 | new_root = {'id': root['id'], 'children': []} 87 | for child in root['children']: 88 | if child['id'] == child_in_ancestry['id']: 89 | new_root['children'].append(collapsed_wavefunction(ancestry[1:], child, current_node, depth_limit)) 90 | return new_root 91 | 92 | def limited_distance_tree(root, reference_node, distance_limit, node_dict): 93 | condition = lambda node: path_distance(reference_node, node, node_dict) <= distance_limit 94 | if not condition(root): 95 | # root is node in reference node's ancestry distance_limit removed 96 | ancestry = node_ancestry(reference_node, node_dict) 97 | root = ancestry[-(distance_limit + 1)] 98 | return tree_subset(root, condition) 99 | 100 | # given a root node and include condition, returns a new tree which contains only nodes who satisfy 101 | # the condition and whose ancestors also all satisfy the condition 102 | # nodes in the new tree contain only their ids and a childlist 103 | # this generates a copy 104 | # TODO copy contains no data except id(same as old tree) and children - will cause problems? 105 | # TODO modify this function or make new function that copies all of tree? 106 | # TODO existing python function to filter/copy dictionary? 107 | 108 | # this assumes the root satisfies the condition 109 | def tree_subset(root, filter=None, copy_attributes=None): 110 | if not filter: 111 | return root 112 | if not copy_attributes: 113 | copy_attributes = [] 114 | new_root = {'id': root['id'], 'children': []} 115 | if 'children' in root: 116 | for child in filtered_children(root, filter): 117 | new_root['children'].append(tree_subset(child, filter, copy_attributes)) 118 | for attribute in copy_attributes: 119 | if attribute in root: 120 | new_root[attribute] = root[attribute] 121 | return new_root 122 | 123 | 124 | def stochastic_transition(node, mode='descendents', filter=None): 125 | transition_probs = subtree_weights(node, mode, filter) 126 | choice = random.choices(node['children'], transition_probs, k=1) 127 | return choice[0] 128 | 129 | 130 | def subtree_weights(node, mode='descendents', filter=None): 131 | weights = [] 132 | if 'children' in node: 133 | for child in node['children']: 134 | if not filter or filter(child): 135 | if mode == 'descendents': 136 | weights.append(num_descendents(child, filter)) 137 | elif mode == 'leaves': 138 | descendents = subtree_list(child, filter) 139 | leaf_descendents = [d for d in descendents if 'children' not in d or len(d['children']) == 0] 140 | weights.append(len(leaf_descendents)) 141 | elif mode == 'uniform': 142 | weights.append(1) 143 | else: 144 | print('invalid mode for subtree weights') 145 | else: 146 | weights.append(0) 147 | #print(weights) 148 | norm = np.linalg.norm(weights, ord=1) 149 | normalized_weights = weights / norm 150 | #print(normalized_weights) 151 | return normalized_weights 152 | 153 | ################################# 154 | # Ancestry 155 | ################################# 156 | 157 | # Returns a list of ancestor nodes beginning with the progenitor 158 | def node_ancestry(node, node_dict): 159 | ancestry = [node] 160 | while "parent_id" in node: 161 | if node['parent_id'] in node_dict: 162 | node = node_dict[node["parent_id"]] 163 | ancestry.insert(0, node) 164 | else: 165 | break 166 | return ancestry 167 | 168 | # returns node ancestry starting from root 169 | def ancestry_in_range(root, node, node_dict): 170 | ancestry = node_ancestry(node, node_dict) 171 | #print([n['id'] for n in ancestry]) 172 | i = 0 173 | while ancestry[i]['id'] != root['id']: 174 | i += 1 175 | return ancestry[i:] 176 | 177 | def ancestor_text_indices(ancestry=None, text_callback=None): 178 | indices = [] 179 | #end_indices = [] 180 | start_index = 0 181 | for node in ancestry: 182 | text = text_callback(node) if text_callback else node['text'] 183 | #text.append(node["text"]) 184 | indices.append((start_index, start_index + len(text))) 185 | start_index += len(text) 186 | return indices 187 | 188 | def ancestor_text_end_indices(ancestry=None, text_callback=None): 189 | return [ind[1] for ind in ancestor_text_indices(ancestry, text_callback)] 190 | 191 | def ancestor_text_start_indices(ancestry=None, text_callback=None): 192 | return [ind[0] for ind in ancestor_text_indices(ancestry, text_callback)] 193 | 194 | def ancestor_text_list(ancestry, text_callback=None): 195 | if text_callback: 196 | return [text_callback(node) for node in ancestry] 197 | else: 198 | return [node['text'] for node in ancestry] 199 | 200 | def ancestry_plaintext(ancestry, text_callback=None): 201 | if text_callback: 202 | return "".join(ancestor_text_list(ancestry, text_callback)) 203 | else: 204 | return "".join(ancestor_text_list(ancestry)) 205 | 206 | def nearest_common_ancestor(node_a, node_b, node_dict): 207 | ancestry_a = node_ancestry(node_a, node_dict) 208 | ancestry_b = node_ancestry(node_b, node_dict) 209 | #print('ancestry a:', [n['id'] for n in ancestry_a]) 210 | #print('ancestry b:', [n['id'] for n in ancestry_b]) 211 | for i in range(1, len(ancestry_a)): 212 | if i > (len(ancestry_b) - 1) or ancestry_a[i] is not ancestry_b[i]: 213 | return ancestry_a[i-1], i-1 214 | return ancestry_a[-1], len(ancestry_a) - 1 215 | 216 | def path_distance(node_a, node_b, node_dict): 217 | nca, _ = nearest_common_ancestor(node_a, node_b, node_dict) 218 | #print('nca:', nca['id']) 219 | a_distance = len(ancestry_in_range(nca, node_a, node_dict)) 220 | b_distance = len(ancestry_in_range(nca, node_b, node_dict)) 221 | return (a_distance - 1) + (b_distance - 1) 222 | 223 | # Returns True if a is ancestor of b 224 | def in_ancestry(a, b, node_dict): 225 | ancestry = node_ancestry(b, node_dict) 226 | return a in ancestry 227 | 228 | def node_index(node, node_dict): 229 | return len(node_ancestry(node, node_dict)) - 1 230 | 231 | 232 | # returns whether node_a was created before node_b 233 | # TODO for old nodes, extract date from generation metadata...? 234 | def created_before(node_a, node_b): 235 | try: 236 | timestamp1 = node_a['meta']['creation_timestamp'] 237 | timestamp2 = node_b['meta']['creation_timestamp'] 238 | except KeyError: 239 | print(node_a['meta']) 240 | print(node_b['meta']) 241 | print('error: one or more of the nodes has no timestamp attribute') 242 | return None 243 | t1 = datetime.strptime(timestamp1, "%Y-%m-%d-%H.%M.%S") 244 | t2 = datetime.strptime(timestamp2, "%Y-%m-%d-%H.%M.%S") 245 | return t1 <= t2 246 | 247 | def get_inherited_attribute(attribute, node, tree_node_dict): 248 | for lineage_node in reversed(node_ancestry(node, tree_node_dict)): 249 | if attribute in lineage_node: 250 | return lineage_node[attribute] 251 | return None 252 | 253 | # recursively called on subtree 254 | def overwrite_subtree(node, attribute, new_value, old_value=None, force_overwrite=False): 255 | if force_overwrite or (attribute not in node) or old_value is None or (node[attribute] == old_value) \ 256 | or (node[attribute] == new_value): 257 | node[attribute] = new_value 258 | terminal_nodes_list = [] 259 | for child in node['children']: 260 | terminal_nodes_list += overwrite_subtree(child, attribute, new_value, old_value, force_overwrite) 261 | return terminal_nodes_list 262 | else: 263 | return [node] 264 | 265 | 266 | 267 | 268 | 269 | # TODO regex, tags 270 | def search(root, pattern, text=True, text_attribute_name='text', tags=False, case_sensitive=False, regex=False, 271 | filter_set=None, max_depth=None): 272 | matches = [] 273 | if not (text or tags) \ 274 | or (filter_set is not None and root['id'] not in filter_set)\ 275 | or max_depth == 0: 276 | return [] 277 | if text: 278 | matches_iter = re.finditer(pattern, root[text_attribute_name]) if case_sensitive \ 279 | else re.finditer(pattern, root[text_attribute_name], re.IGNORECASE) 280 | for match in matches_iter: 281 | matches.append({'node_id': root['id'], 282 | 'span': match.span(), 283 | 'match': match.group()}) 284 | if tags: 285 | # search for pattern in root['tags'] 286 | pass 287 | for child in root['children']: 288 | matches += search(child, pattern, text, text_attribute_name, tags, case_sensitive, regex, filter_set, 289 | max_depth-1 if max_depth else None) 290 | return matches 291 | 292 | 293 | 294 | # { 295 | # root: { 296 | # text: ... 297 | # children: [ 298 | # { 299 | # text: ... 300 | # children: ... 301 | # }, 302 | # ] 303 | # } 304 | # generation_settings: {...} 305 | # } 306 | # Adds an ID field and a parent ID field to each dict in a recursive tree with "children" 307 | def flatten_tree(d, reverse=False): 308 | if "id" not in d: 309 | d["id"] = str(uuid.uuid1()) 310 | 311 | children = d.get("children", []) 312 | flat_children = [] 313 | for child in (reversed(children) if reverse else children): 314 | child["parent_id"] = d["id"] 315 | flat_children.extend(flatten_tree(child, reverse)) 316 | 317 | return [d, *flat_children] 318 | 319 | 320 | def flatten_tree_revisit_parents(d, parent=None): 321 | if "id" not in d: 322 | d["id"] = str(uuid.uuid1()) 323 | 324 | children = d.get("children", []) 325 | flat_children = [] 326 | for child in children: 327 | child["parent_id"] = d["id"] 328 | flat_children.extend(flatten_tree_revisit_parents(child, d)) 329 | 330 | return [d, *flat_children] if parent is None else [d, *flat_children, parent] 331 | 332 | 333 | # Remove html and random double newlines from Miro 334 | def fix_miro_tree(flat_data): 335 | # Otherwise it will randomly insert line breaks.... 336 | h = html2text.HTML2Text() 337 | h.body_width = 0 338 | 339 | id_to_node = {d["id"]: d for d in flat_data} 340 | for d in flat_data: 341 | # Only fix miro text 342 | if "text" not in d or all([tag not in d["text"] for tag in ["

", "', self.add_empty_module_window) 66 | #self.add_module_button = tk.Button(self.menu_frame, text='Add Module', fg=text_color(), bg=bg_color(), cursor='hand2') 67 | self.add_module_button.pack(side='left', padx=20) 68 | #self.add_module_button.bind('', self.add_module_window) 69 | 70 | self.close_icon = icons.get_icon('x-lightgray') 71 | self.x_button = tk.Label(self.menu_frame, text='-', fg=text_color(), bg=bg_color(), cursor='hand2') 72 | self.x_button.pack(side='right', padx=20) 73 | self.x_button.bind('', lambda event, pane=self: self.hide_pane_callback(pane=self))#lambda event, pane_name=self.name: destroy_callback(pane_name=pane_name)) 74 | 75 | self.pane = ttk.PanedWindow(self.frame, orient=self.orient) 76 | self.pane.pack(side='top', fill='both', expand=True) 77 | self.hidden = False 78 | 79 | def hide(self): 80 | if not self.hidden: 81 | self.parent.forget(self.frame) 82 | self.hidden = True 83 | 84 | def show(self): 85 | if self.hidden: 86 | self.parent.add(self.frame) 87 | self.hidden = False 88 | 89 | def destroy(self, *args): 90 | # if self.module_menu: 91 | # self.module_menu.destroy() 92 | if self.x_button: 93 | self.x_button.destroy() 94 | # if self.module_menu: 95 | # self.menu_frame.destroy() 96 | self.parent.forget(self.frame) 97 | self.frame.destroy() 98 | 99 | super().destroy() 100 | 101 | def clear(self): 102 | pass 103 | 104 | def add_empty_module_window(self, *args): 105 | new_module_window = ModuleWindow(self) 106 | self.module_windows.append(new_module_window) 107 | new_module_window.build(self.module_options, self.module_selection_callback, self.module_window_destroy_callback) 108 | return new_module_window 109 | 110 | def add_module(self, module): 111 | new_module_window = self.add_empty_module_window() 112 | new_module_window.change_module(module) 113 | 114 | def module_names(self): 115 | return [window.module_name() for window in self.module_windows] 116 | 117 | 118 | class ModuleWindow: 119 | def __init__(self, parent): 120 | self.parent = parent 121 | self.frame = None 122 | self.menu_frame = None 123 | self.module_menu = None 124 | self.module_selection = tk.StringVar() 125 | self.module = None 126 | self.close_button = None 127 | #self.index = index 128 | 129 | def build(self, options, selection_callback, destroy_callback): 130 | self.frame = ttk.Frame(self.parent.pane, borderwidth=2, relief='sunken')#, height=1, background=bg_color()) 131 | self.parent.pane.add(self.frame, weight=1) 132 | 133 | self.menu_frame = ttk.Frame(self.frame) 134 | self.menu_frame.pack(side='top', fill='x', expand=False) 135 | # make dropdown for selecting a module 136 | self.module_selection.set('None') 137 | self.module_menu = tk.OptionMenu(self.menu_frame, self.module_selection, *options) 138 | self.module_menu.pack(side='left', expand=True, padx=20) 139 | self.module_selection.trace('w', lambda a, b, c, module_window=self: selection_callback(module_window=module_window)) 140 | 141 | self.close_icon = icons.get_icon('x-lightgray') 142 | self.x_button = tk.Label(self.menu_frame, text='⨯', fg=text_color(), bg=bg_color(), cursor='hand2') 143 | self.x_button.pack(side='right', padx=20) 144 | self.x_button.bind('', lambda event, module_window=self: destroy_callback(module_window=module_window)) 145 | 146 | def destroy(self): 147 | self.parent.module_windows.remove(self) 148 | self.parent.pane.forget(self.frame) 149 | self.frame.destroy() 150 | self.frame = None 151 | 152 | def clear(self): 153 | if self.module: 154 | self.module.destroy() 155 | self.module = None 156 | 157 | def pane_name(self): 158 | return self.parent.name 159 | 160 | def module_name(self): 161 | return self.module.name if self.module else None 162 | 163 | def set_selection(self, module_name): 164 | self.module_selection.set(module_name) 165 | 166 | def change_module(self, module): 167 | self.clear() 168 | self.module = module 169 | self.module.build(parent=self) 170 | self.set_selection(module.name) 171 | 172 | 173 | class Module: 174 | def __init__(self, name, callbacks, state): 175 | self.name = name 176 | self.frame = None 177 | self.parent = None 178 | self.callbacks = callbacks 179 | self.state = state 180 | self.textboxes = [] 181 | #self.settings = self.state.module_settings[name] if name in self.state.module_settings else {} 182 | 183 | def settings(self): 184 | return self.state.module_settings[self.name] if self.name in self.state.module_settings else {} 185 | 186 | def build(self, parent): 187 | self.parent = parent 188 | self.frame = ttk.Frame(self.parent.frame, borderwidth=2) 189 | self.frame.pack(expand=True, fill='both', side="top") 190 | 191 | def destroy(self): 192 | self.frame.pack_forget() 193 | self.frame.destroy() 194 | self.frame = None 195 | 196 | def window(self): 197 | return self.parent 198 | 199 | def tree_updated(self): 200 | pass 201 | 202 | def selection_updated(self): 203 | pass 204 | 205 | # returns true if any of the module's textboxes are enabled and have focus 206 | def textbox_has_focus(self): 207 | #print(self.name) 208 | #print(self.frame) 209 | for textbox in self.textboxes: 210 | if self.frame.focus_get() == textbox and textbox.cget('state') == 'normal': 211 | return True 212 | return False -------------------------------------------------------------------------------- /view/styles.py: -------------------------------------------------------------------------------- 1 | from tkinter.font import Font 2 | from view.colors import text_color, bg_color 3 | 4 | 5 | def textbox_config(fg=text_color(), bg=bg_color(), font='Georgia', size=12, spacing1=10, spacing2=8, pady=5): 6 | return {'font': Font(family=font, size=size), 7 | 'spacing1': spacing1, # spacing between paragraphs 8 | 'foreground': fg, 9 | 'background': bg, 10 | 'padx': 2, 11 | 'pady': pady, 12 | 'spacing2': spacing2, # Spacing between lines 13 | 'spacing3': 5, 14 | 'wrap': "word", 15 | 'insertbackground': fg} 16 | 17 | 18 | def code_textbox_config(bg='black'): 19 | return { 20 | 'font': Font(family='Monaco', size=12), 21 | 'foreground': 'white', 22 | 'background': bg, 23 | 'insertbackground': 'white', 24 | 'spacing1': 2, 25 | 'spacing2': 2, 26 | 'spacing3': 2, 27 | } --------------------------------------------------------------------------------