├── lmao
├── lmao
│ ├── __init__.py
│ ├── wsgi.py
│ ├── urls.py
│ └── settings.py
├── endpoints
│ ├── __init__.py
│ ├── migrations
│ │ └── __init__.py
│ ├── models.py
│ ├── admin.py
│ ├── tests.py
│ ├── apps.py
│ ├── urls.py
│ └── views.py
├── requirements.txt
└── manage.py
├── popup.jpg
├── lmao_zoomed.gif
├── lmao_integrator
├── images
│ ├── small.png
│ ├── marquee.png
│ ├── extension_thumb.png
│ ├── extension_thumb16.png
│ ├── extension_thumb32.png
│ ├── extension_thumb48.png
│ ├── extension_thumb128.png
│ └── extension_thumb256.png
├── contentScript.js
├── material
│ ├── bower.json
│ ├── package.json
│ └── material.min.js
├── manifest.json
├── popup.js
├── popup.html
├── injector.js
└── background.js
├── .gitignore
├── privacy_policy.md
└── README.md
/lmao/lmao/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lmao/endpoints/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lmao/endpoints/migrations/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/popup.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/popup.jpg
--------------------------------------------------------------------------------
/lmao_zoomed.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_zoomed.gif
--------------------------------------------------------------------------------
/lmao/endpoints/models.py:
--------------------------------------------------------------------------------
1 | from django.db import models
2 |
3 | # Create your models here.
4 |
--------------------------------------------------------------------------------
/lmao/endpoints/admin.py:
--------------------------------------------------------------------------------
1 | from django.contrib import admin
2 |
3 | # Register your models here.
4 |
--------------------------------------------------------------------------------
/lmao/endpoints/tests.py:
--------------------------------------------------------------------------------
1 | from django.test import TestCase
2 |
3 | # Create your tests here.
4 |
--------------------------------------------------------------------------------
/lmao_integrator/images/small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_integrator/images/small.png
--------------------------------------------------------------------------------
/lmao_integrator/images/marquee.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_integrator/images/marquee.png
--------------------------------------------------------------------------------
/lmao_integrator/images/extension_thumb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_integrator/images/extension_thumb.png
--------------------------------------------------------------------------------
/lmao_integrator/images/extension_thumb16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_integrator/images/extension_thumb16.png
--------------------------------------------------------------------------------
/lmao_integrator/images/extension_thumb32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_integrator/images/extension_thumb32.png
--------------------------------------------------------------------------------
/lmao_integrator/images/extension_thumb48.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_integrator/images/extension_thumb48.png
--------------------------------------------------------------------------------
/lmao/endpoints/apps.py:
--------------------------------------------------------------------------------
1 | from django.apps import AppConfig
2 |
3 |
4 | class EndpointsConfig(AppConfig):
5 | name = 'endpoints'
6 |
--------------------------------------------------------------------------------
/lmao_integrator/images/extension_thumb128.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_integrator/images/extension_thumb128.png
--------------------------------------------------------------------------------
/lmao_integrator/images/extension_thumb256.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RF5/lmao/HEAD/lmao_integrator/images/extension_thumb256.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/*.pyc
2 | **/__pycache__
3 | .vscode
4 | env
5 | **/*.sqlite3
6 | data/
7 | lmao/models/
8 | sagemaker/
9 | *.zip
10 | **/config.json
--------------------------------------------------------------------------------
/lmao/requirements.txt:
--------------------------------------------------------------------------------
1 | Django==3.1.14
2 | djangorestframework==3.11.2
3 | Markdown==3.1.1
4 | numpy==1.22.0
5 | Pillow>=7.1.0
6 | pytz==2019.3
7 | six==1.13.0
8 | sqlparse==0.4.4
9 | torch==1.3.1+cpu
10 | torchvision==0.4.2+cpu
11 | transformers==3.5.1
12 |
--------------------------------------------------------------------------------
/lmao/endpoints/urls.py:
--------------------------------------------------------------------------------
1 | from . import views
2 | from django.urls import path
3 | from django.conf.urls import url, include
4 | from rest_framework.urlpatterns import format_suffix_patterns
5 | # urlpatterns = []
6 |
7 | urlpatterns = [
8 | # path('url_checker', views.check_url),
9 | *format_suffix_patterns([url(r'^infer$', views.Infer.as_view())], allowed=['json', 'html', 'api']),
10 | ]
11 |
--------------------------------------------------------------------------------
/lmao/lmao/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for lmao project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.wsgi import get_wsgi_application
13 |
14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lmao.settings')
15 |
16 | application = get_wsgi_application()
17 |
--------------------------------------------------------------------------------
/lmao_integrator/contentScript.js:
--------------------------------------------------------------------------------
1 | if(document.getElementById('lmao_injector_script') === null) {
2 | var s = document.createElement('script');
3 | s.id = "lmao_injector_script"
4 | s.src = chrome.runtime.getURL('injector.js');
5 | // s.onload = function() {
6 | // console.log(this)
7 | // this.remove();
8 | // };
9 | (document.head || document.documentElement).appendChild(s);
10 |
11 | chrome.storage.sync.set({local_offline: false}, function() {});
12 | chrome.storage.sync.set({cloud_offline: false}, function() {});
13 | }
14 |
--------------------------------------------------------------------------------
/lmao_integrator/material/bower.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "material-design-lite",
3 | "version": "1.3.0",
4 | "homepage": "https://github.com/google/material-design-lite",
5 | "authors": [
6 | "Material Design Lite team"
7 | ],
8 | "description": "Material Design Components in CSS, JS and HTML",
9 | "main": [
10 | "material.min.css",
11 | "material.min.js"
12 | ],
13 | "keywords": [
14 | "material",
15 | "design",
16 | "styleguide",
17 | "style",
18 | "guide"
19 | ],
20 | "license": "Apache-2",
21 | "ignore": [
22 | "**/.*",
23 | "node_modules",
24 | "bower_components",
25 | "./lib/.bower_components",
26 | "test",
27 | "tests"
28 | ]
29 | }
30 |
--------------------------------------------------------------------------------
/lmao/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Django's command-line utility for administrative tasks."""
3 | import os
4 | import sys
5 |
6 |
7 | def main():
8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lmao.settings')
9 | try:
10 | from django.core.management import execute_from_command_line
11 | except ImportError as exc:
12 | raise ImportError(
13 | "Couldn't import Django. Are you sure it's installed and "
14 | "available on your PYTHONPATH environment variable? Did you "
15 | "forget to activate a virtual environment?"
16 | ) from exc
17 | execute_from_command_line(sys.argv)
18 |
19 |
20 | if __name__ == '__main__':
21 | main()
22 |
--------------------------------------------------------------------------------
/lmao_integrator/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "LMAO Overleaf Integration",
3 | "version": "0.3.5",
4 | "description": "A LaTeX neural autocomplete plugin for Overleaf",
5 | "permissions": ["activeTab", "declarativeContent", "storage", "http://127.0.0.1:8000/*", "https://h0sywlk4gh.execute-api.eu-west-1.amazonaws.com/test-lmao-en/invoke-lmao"],
6 | "background": {
7 | "scripts": ["background.js"],
8 | "persistent": false
9 | },
10 | "page_action": {
11 | "default_popup": "popup.html",
12 | "default_icon": {
13 | "16": "images/extension_thumb16.png",
14 | "32": "images/extension_thumb32.png",
15 | "48": "images/extension_thumb48.png",
16 | "128": "images/extension_thumb128.png",
17 | "256": "images/extension_thumb256.png"
18 | }
19 | },
20 | "icons": {
21 | "16": "images/extension_thumb16.png",
22 | "32": "images/extension_thumb32.png",
23 | "48": "images/extension_thumb48.png",
24 | "128": "images/extension_thumb128.png",
25 | "256": "images/extension_thumb256.png"
26 | },
27 | "web_accessible_resources": ["injector.js", "config.json"],
28 | "externally_connectable": {
29 | "matches": ["https://*.overleaf.com/project/*"]
30 | },
31 | "manifest_version": 2
32 | }
--------------------------------------------------------------------------------
/lmao/lmao/urls.py:
--------------------------------------------------------------------------------
1 | """lmao URL Configuration
2 |
3 | The `urlpatterns` list routes URLs to views. For more information please see:
4 | https://docs.djangoproject.com/en/2.2/topics/http/urls/
5 | Examples:
6 | Function views
7 | 1. Add an import: from my_app import views
8 | 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 | Class-based views
10 | 1. Add an import: from other_app.views import Home
11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 | Including another URLconf
13 | 1. Import the include() function: from django.urls import include, path
14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 | """
16 | from django.contrib import admin
17 | from django.urls import path
18 | from django.conf.urls import url, include
19 | from django.views.generic.base import RedirectView
20 | from lmao import settings
21 | from rest_framework.authtoken import views as token_views
22 | from rest_framework import routers
23 |
24 | dev_urls = [
25 | path('admin/', admin.site.urls),
26 | ]
27 |
28 | urlpatterns = [
29 | url(r'^', include('endpoints.urls')),
30 | # url(r'^', include(router.urls)),
31 | # url(r'^api-v1/api-auth/', include('rest_framework.urls', namespace='rest_framework')),
32 | # url(r'^api-v1/api-token-auth/', token_views.obtain_auth_token)
33 | ]
34 |
35 | if settings.DEBUG:
36 | urlpatterns = dev_urls + urlpatterns
37 |
38 |
--------------------------------------------------------------------------------
/privacy_policy.md:
--------------------------------------------------------------------------------
1 | # LMAO Privacy Policy
2 | LMAO does not store any data of any kind. The only information it ever interacts with is the last several of lines of text in an Overleaf document. It processes them in real time and then discards them. i.e no predictions, historical text, or anything at all is ever stored.
3 |
4 | - LMAO has no home server (only an inference endpoint if you choose to use cloud hosted predictions).
5 | - LMAO doesn't embed any kind of analytic hooks in its code.
6 | - LMAO doesn't cost anything and no feature of it is behind any paywall.
7 | - The only time LMAO connects to a remote server is
8 | - (a) by Chrome when it automatically updates the extension, and
9 | - (b) if you choose to use the cloud hosted predictions (and the prediction server is up and running), then whenever you start a prediction in an Overlead document, the last several lines of text in the current `.tex` file you are editing is sent via HTTPS to an AWS server, which runs it through GPT2 and returns the predictions. The sent text and computed predictions are ephemeral -- they are immidately discarded after use.
10 |
11 | The project is currently hosted on github.com, which is owned by GitHub Inc. (now a subsidiary of Microsoft Corporation), and thus is unrelated to LMAO.
12 |
13 | ### Changes to the privacy policy
14 | Since LMAO does not collect any information on users at all, there is no way to notify you of changes. But I will always update the privacy policy at least 3 months before they go into effect, so if you are concered just check back here a few times a year. But I don't have plants to ever change it.
15 |
16 | That is all.
--------------------------------------------------------------------------------
/lmao_integrator/material/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "material-design-lite",
3 | "version": "1.3.0",
4 | "description": "Material Design Components in CSS, JS and HTML",
5 | "private": true,
6 | "license": "Apache-2.0",
7 | "author": "Google",
8 | "repository": "google/material-design-lite",
9 | "main": "dist/material.min.js",
10 | "devDependencies": {
11 | "acorn": "^7.1.1",
12 | "babel-core": "^6.20.0",
13 | "babel-preset-es2015": "^6.18.0",
14 | "browser-sync": "^2.2.3",
15 | "chai": "^3.3.0",
16 | "chai-jquery": "^2.0.0",
17 | "del": "^2.0.2",
18 | "drool": "^0.4.0",
19 | "escodegen": "^1.6.1",
20 | "google-closure-compiler": "",
21 | "gulp": "^3.9.0",
22 | "gulp-autoprefixer": "^3.0.2",
23 | "gulp-cache": "^0.4.5",
24 | "gulp-closure-compiler": "^0.4.0",
25 | "gulp-concat": "^2.4.1",
26 | "gulp-connect": "^5.0.0",
27 | "gulp-css-inline-images": "^0.1.1",
28 | "gulp-csso": "1.0.0",
29 | "gulp-file": "^0.3.0",
30 | "gulp-flatten": "^0.3.1",
31 | "gulp-front-matter": "^1.2.2",
32 | "gulp-header": "^1.2.2",
33 | "gulp-if": "^2.0.0",
34 | "gulp-iife": "^0.3.0",
35 | "gulp-imagemin": "^3.1.0",
36 | "gulp-jscs": "^4.0.0",
37 | "gulp-jshint": "^2.0.4",
38 | "gulp-load-plugins": "^1.3.0",
39 | "gulp-marked": "^1.0.0",
40 | "gulp-mocha-phantomjs": "^0.12.0",
41 | "gulp-open": "^2.0.0",
42 | "gulp-rename": "^1.2.0",
43 | "gulp-replace": "^0.5.3",
44 | "gulp-sass": "3.0.0",
45 | "gulp-shell": "^0.5.2",
46 | "gulp-size": "^2.0.0",
47 | "gulp-sourcemaps": "^2.0.1",
48 | "gulp-subtree": "^0.1.0",
49 | "gulp-tap": "^0.1.3",
50 | "gulp-uglify": "^2.0.0",
51 | "gulp-util": "^3.0.4",
52 | "gulp-zip": "^3.0.2",
53 | "humanize": "0.0.9",
54 | "jquery": "^3.1.1",
55 | "jshint": "^2.9.4",
56 | "jshint-stylish": "^2.2.1",
57 | "merge-stream": "^1.0.0",
58 | "mocha": "^3.0.2",
59 | "prismjs": ">=1.23.0",
60 | "run-sequence": "^1.0.2",
61 | "swig": "^1.4.2",
62 | "through2": "^2.0.0",
63 | "vinyl-paths": "^2.0.0"
64 | },
65 | "engines": {
66 | "node": ">=0.12.0"
67 | },
68 | "scripts": {
69 | "test": "gulp && git status | grep 'working directory clean' >/dev/null || (echo 'Please commit all changes generated by building'; exit 1)"
70 | },
71 | "babel": {
72 | "only": "gulpfile.babel.js",
73 | "presets": [
74 | "es2015"
75 | ]
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/lmao/lmao/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for LMAO [Language Model Accessor Object] project.
3 |
4 | Generated by 'django-admin startproject' using Django 2.2.7.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/2.2/topics/settings/
8 |
9 | For the full list of settings and their values, see
10 | https://docs.djangoproject.com/en/2.2/ref/settings/
11 | """
12 |
13 | import os
14 |
15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17 |
18 |
19 | # Quick-start development settings - unsuitable for production
20 | # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
21 |
22 | # SECURITY WARNING: keep the secret key used in production secret!
23 | SECRET_KEY = '0+$-b3kr0utx98fo@c61fyp$s($*u&da$1gmt0o87$2t+6%0n*'
24 |
25 | # SECURITY WARNING: don't run with debug turned on in production!
26 | DEBUG = True
27 | if os.getenv('GAE_APPLICATION', None):
28 | DEBUG = False
29 |
30 | ALLOWED_HOSTS = ['*']
31 |
32 |
33 | # Application definition
34 |
35 | INSTALLED_APPS = [
36 | 'django.contrib.admin',
37 | 'django.contrib.auth',
38 | 'django.contrib.contenttypes',
39 | 'django.contrib.sessions',
40 | 'django.contrib.messages',
41 | 'django.contrib.staticfiles',
42 | 'rest_framework',
43 | 'rest_framework.authtoken',
44 | # 'simple_email_confirmation',
45 | # 'django_inlinecss',
46 | 'endpoints'
47 | ]
48 |
49 | MIDDLEWARE = [
50 | 'django.middleware.security.SecurityMiddleware',
51 | 'django.contrib.sessions.middleware.SessionMiddleware',
52 | 'django.middleware.common.CommonMiddleware',
53 | 'django.middleware.csrf.CsrfViewMiddleware',
54 | 'django.contrib.auth.middleware.AuthenticationMiddleware',
55 | 'django.contrib.messages.middleware.MessageMiddleware',
56 | 'django.middleware.clickjacking.XFrameOptionsMiddleware',
57 | ]
58 |
59 | ROOT_URLCONF = 'lmao.urls'
60 |
61 | TEMPLATES = [
62 | {
63 | 'BACKEND': 'django.template.backends.django.DjangoTemplates',
64 | 'DIRS': [],
65 | 'APP_DIRS': True,
66 | 'OPTIONS': {
67 | 'context_processors': [
68 | 'django.template.context_processors.debug',
69 | 'django.template.context_processors.request',
70 | 'django.contrib.auth.context_processors.auth',
71 | 'django.contrib.messages.context_processors.messages',
72 | ],
73 | },
74 | },
75 | ]
76 |
77 | WSGI_APPLICATION = 'lmao.wsgi.application'
78 |
79 |
80 | # Database
81 | # https://docs.djangoproject.com/en/2.2/ref/settings/#databases
82 |
83 | DATABASES = {
84 | 'default': {
85 | 'ENGINE': 'django.db.backends.sqlite3',
86 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
87 | }
88 | }
89 |
90 |
91 | # Password validation
92 | # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
93 |
94 | AUTH_PASSWORD_VALIDATORS = [
95 | {
96 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
97 | },
98 | {
99 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
100 | },
101 | {
102 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
103 | },
104 | {
105 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
106 | },
107 | ]
108 |
109 |
110 | # Internationalization
111 | # https://docs.djangoproject.com/en/2.2/topics/i18n/
112 |
113 | LANGUAGE_CODE = 'en-us'
114 |
115 | TIME_ZONE = 'UTC'
116 |
117 | USE_I18N = True
118 |
119 | USE_L10N = True
120 |
121 | USE_TZ = True
122 |
123 |
124 | # Static files (CSS, JavaScript, Images)
125 | # https://docs.djangoproject.com/en/2.2/howto/static-files/
126 |
127 | STATIC_URL = '/static/'
128 |
--------------------------------------------------------------------------------
/lmao_integrator/popup.js:
--------------------------------------------------------------------------------
1 |
2 | function inject_script() {
3 | console.log("Running injector script")
4 | setTimeout(() => {
5 | chrome.tabs.query({active: true, currentWindow: true}, function(tabs) {
6 | chrome.tabs.executeScript(tabs[0].id, {file: 'contentScript.js'});
7 | });
8 | }, 50);
9 | }
10 |
11 | document.addEventListener('DOMContentLoaded', (event) => {
12 | var offRadio = document.getElementById('offRadio');
13 | var onLocal = document.getElementById('onLocalHosted');
14 | var onCloud = document.getElementById('onCloudHosted');
15 |
16 | var offRadioLbl = document.getElementById('offRadioLbl');
17 | var onLocalLbl = document.getElementById('onLocalLbl');
18 | var onCloudLbl = document.getElementById('onHostedLbl');
19 |
20 | var prev = null;
21 |
22 | chrome.storage.sync.get('lm_inference_state', function(data) {
23 | if (data.lm_inference_state === 'off') {
24 | console.log("Inference is off")
25 | offRadio.checked = true
26 | setTimeout(() => {
27 | offRadioLbl.MaterialRadio.check()
28 | }, 40);
29 |
30 | prev = offRadio
31 | } else if(data.lm_inference_state === 'on_local') {
32 | console.log("Getting inference from local machine")
33 | onLocal.checked = true
34 | setTimeout(() => {
35 | onLocalLbl.MaterialRadio.check()
36 | }, 40);
37 | inject_script()
38 | prev = onLocal
39 | } else if(data.lm_inference_state === 'on_cloud') {
40 | console.log("Getting inference from cloud")
41 | onCloud.checked = true
42 | setTimeout(() => {
43 | onCloudLbl.MaterialRadio.check()
44 | }, 40);
45 | inject_script()
46 | prev = onCloud
47 | }
48 | offRadio.addEventListener('change', function() {
49 | if (this !== prev) {prev = this;}
50 | chrome.storage.sync.set({lm_inference_state: this.value}, function() {
51 | console.log("lm_inference_state has been set to", prev.value);
52 | });
53 | });
54 |
55 | onLocal.addEventListener('change', function() {
56 | if (this !== prev) {prev = this;}
57 | chrome.storage.sync.set({lm_inference_state: this.value}, function() {
58 | console.log("lm_inference_state has been set to", prev.value);
59 | inject_script()
60 | });
61 | });
62 |
63 | onCloud.addEventListener('change', function() {
64 | if (this !== prev) {prev = this;}
65 | chrome.storage.sync.set({lm_inference_state: this.value}, function() {
66 | console.log("lm_inference_state has been set to", prev.value);
67 | inject_script()
68 | });
69 | });
70 | });
71 |
72 | var links = document.getElementsByTagName("a");
73 | for (var i = 0; i < links.length; i++) {
74 | (function () {
75 | var ln = links[i];
76 | var location = ln.href;
77 | ln.onclick = function () {
78 | chrome.tabs.create({active: true, url: location});
79 | };
80 | })();
81 | }
82 |
83 | chrome.storage.sync.get('local_offline', function(data) {
84 | console.log(data)
85 | if(data.local_offline === true) {
86 | var spon = document.getElementById('local_status');
87 | spon.textContent = "[OFFLINE]"
88 | }
89 | });
90 | chrome.storage.sync.get('cloud_offline', function(data) {
91 | if(data.cloud_offline === true) {
92 | var spon = document.getElementById('cloud_status');
93 | spon.textContent = "[OFFLINE]"
94 | }
95 | });
96 |
97 | // stuff for prediction length slider
98 | var predlenSlider = document.getElementById('pred_len_slider');
99 | var predlenLbl = document.getElementById('pred_len_lbl');
100 | predlenSlider.addEventListener('change', function() {
101 | const new_val = this.value;
102 | chrome.storage.sync.set({pred_len: new_val}, function() {
103 | console.log("pred_len has been set to", new_val);
104 | predlenLbl.textContent = new_val;
105 | });
106 | });
107 |
108 | chrome.storage.sync.get('pred_len', function(data) {
109 | predlenLbl.textContent = data.pred_len;
110 | setTimeout(() => {
111 | predlenSlider.MaterialSlider.change(data.pred_len);
112 | }, 40);
113 | });
114 | // stuff for context length slider
115 | var ctxlenSlider = document.getElementById('context_len_slider');
116 | var ctxlenLbl = document.getElementById('context_len_lbl');
117 | ctxlenSlider.addEventListener('change', function() {
118 | const new_val = this.value;
119 | chrome.storage.sync.set({context_len: new_val}, function() {
120 | console.log("context_len has been set to", new_val);
121 | ctxlenLbl.textContent = new_val;
122 | });
123 | });
124 |
125 | chrome.storage.sync.get('context_len', function(data) {
126 | ctxlenLbl.textContent = data.context_len;
127 | setTimeout(() => {
128 | ctxlenSlider.MaterialSlider.change(data.context_len);
129 | }, 40);
130 | });
131 | })
132 |
133 |
134 |
135 |
--------------------------------------------------------------------------------
/lmao_integrator/popup.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
Languange Model Accessor Object
56 |
57 |
58 | Just turn it on and hit shift+tab to predict
59 |
60 |
61 |
62 |
115 |
116 | Current model: gpt2
117 | Funding for full-time server: 10%
118 | Interesting in helping fund the cloud server? Please get in touch with me here.
119 |
120 |
121 |
122 |
--------------------------------------------------------------------------------
/lmao_integrator/injector.js:
--------------------------------------------------------------------------------
1 | util = ace.require('ace/autocomplete/util')
2 | const aceSnippetManager55 = ace.require('ace/snippets').snippetManager
3 | const { Autocomplete } = ace.require('ace/autocomplete')
4 |
5 | console.log("Content script initializing...");
6 | var editorExtensionId = "iolciglhoknmnacbfjgccoibbcffofhk";
7 | var editor_proxy = _debug_editors[0];
8 |
9 | // inserting text at current position:
10 | // var cursorPosition = editor_proxy.getCursorPosition();
11 | // editor_proxy.session.insert(cursorPosition, "MEME REVIEW");
12 |
13 | // gets the last 26 lines
14 | // var lines = editor_proxy.session.getLines(cursorPosition["row"]-25, cursorPosition["row"])
15 | // lines[lines.length - 1] = lines[lines.length - 1].substring(0, cursorPosition["column"])
16 | var lm_prediction_flag = false;
17 |
18 | var languageModelCompleter = {
19 | identifierRegexps: [/.+/],
20 | getCompletions: function(editor, session, pos, prefix, callback) {
21 | // console.log(pos, prefix);
22 |
23 | // don't try autocomplete if we are already doing commands...
24 | if(prefix.startsWith('\\') && prefix.length == 1) return
25 | if(prefix.startsWith('\\') && (prefix.includes(" ") == false) && (prefix.includes("{") == false)) return
26 | if(lm_prediction_flag == false) return
27 |
28 | // gather last n lines
29 | const grab_n_lines = 100;//50;
30 | var lines = editor_proxy.session.getLines(pos["row"] - grab_n_lines, pos["row"])
31 |
32 | lines[lines.length - 1] = lines[lines.length - 1].substring(0, pos["column"]+1)
33 |
34 | console.log(">>> Getting predictions with last 5 lines ", lines.slice(lines.length-5))
35 |
36 | // dispatch message to background script
37 | var resp = "None";
38 | chrome.runtime.sendMessage(editorExtensionId,
39 | {lines: lines},
40 | function(response) {
41 | if(response === null) {
42 | callback(null, []);
43 | lm_prediction_flag = false;
44 | return;
45 | }
46 | resp = response.prediction;
47 | console.log("Inserting text:\t", resp);
48 | const result = resp.map(function(x) {
49 | return {
50 | caption: x, // what is shown in the preview bar
51 | value: x, // what goes onto the line if u smash tab
52 | // snippet: '>>'+x,
53 | completer: {
54 | insertMatch: function (editor, data) {
55 | editor.completer.insertMatch({value: data.value})
56 | Autocomplete.prototype.insertMatch = Autocomplete.prototype._overleafInsertMatch;
57 | }
58 | },
59 | meta: 'gpt2',
60 | score: 90
61 | }
62 | })
63 | callback(null, result)
64 | lm_prediction_flag = false;
65 | });
66 | }
67 | };
68 |
69 | function getLastCommandFragment(lineUpToCursor) {
70 | let index
71 | if ((index = getLastCommandFragmentIndex(lineUpToCursor)) > -1) {
72 | return lineUpToCursor.slice(index)
73 | } else {
74 | return null
75 | }
76 | };
77 |
78 | function getLastCommandFragmentIndex(lineUpToCursor) {
79 | let m
80 | const blankArguments = lineUpToCursor.replace(/\[([^\]]*)\]/g, args =>
81 | Array(args.length + 1).join('.')
82 | )
83 | if ((m = blankArguments.match(/(\\[^\\]*)$/))) {
84 | return m.index
85 | } else {
86 | return -1
87 | }
88 | };
89 |
90 | util.retrievePrecedingIdentifier = function(text, pos, regex) {
91 | let currentLineOffset = 0
92 | for (let i = pos - 1; i <= 0; i++) {
93 | if (text[i] === '\n') {
94 | currentLineOffset = i + 1
95 | break
96 | }
97 | }
98 | const currentLine = text.slice(currentLineOffset, pos) // problem 2: this fucks up
99 | var fragment = getLastCommandFragment(currentLine) || '';
100 | if (lm_prediction_flag) {
101 | fragment = ''
102 | }
103 | return fragment
104 | };
105 |
106 | editor_proxy.completers.push(languageModelCompleter);
107 |
108 | Autocomplete.prototype._overleafInsertMatch = Autocomplete.prototype.insertMatch
109 | Autocomplete.prototype._lmaoInsertMatch = function(data) {
110 | if (!data)
111 | data = this.popup.getData(this.popup.getRow());
112 | if (!data)
113 | return false;
114 |
115 | if (data.completer && data.completer.insertMatch) {
116 | data.completer.insertMatch(this.editor, data);
117 | } else {
118 | // TODO add support for options.deleteSuffix
119 | if (this.completions.filterText) {
120 | var ranges = this.editor.selection.getAllRanges();
121 | for (var i = 0, range; range = ranges[i]; i++) {
122 | range.start.column -= this.completions.filterText.length;
123 | this.editor.session.remove(range);
124 | }
125 | }
126 | if (data.snippet)
127 | aceSnippetManager55.insertSnippet(this.editor, data.snippet);
128 | else
129 | this.editor.execCommand("insertstring", data.value || data);
130 | }
131 | this.detach();
132 | };
133 |
134 | document.addEventListener("keyup", function (zEvent) {
135 | // console.log
136 | if (zEvent.shiftKey && zEvent.key === "Tab") { // case sensitive
137 | lm_prediction_flag = true;
138 | Autocomplete.prototype.insertMatch = Autocomplete.prototype._lmaoInsertMatch
139 | editor_proxy.execCommand("startAutocomplete");
140 | }
141 | });
142 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # LMAO - Language model accessor object
2 | What? A chrome extension that adds a neural auto-complete function to LaTeX documents in any Overleaf project. Concretely, it is a GPT2 language model server which hooks into Overleaf's editor to add 'gmail smart compose' or '[write with transformer](https://transformer.huggingface.co/)'-like functionlity, but for LaTeX and it works seamlessly on top of Overleaf's editor :).
3 |
4 | Some more info:
5 | - Works with all existing Overleaf setups (all color themes, existing hotkeys...)
6 | - Autocompletes the next few words given the historical context in the current document.
7 | - Here is what some of the predictions look like in the editor:
8 |
9 | 
10 |
11 | **STATUS UPDATE Feb 2021: on haitus**. Please get in contact if you would like to help test out the extension or take the project further. Due to time and other constraints, I haven't been able to continue development of this project. Following the rest of this readme might not work exactly as some libraries have been updated and API calls have changed, so you might need to fiddle around to get things to work. Nevertheless, it is still quite fun once one gets it working, and if anyone wants to continue work on it feel free to fork the repo or just see how I've done things.
12 |
13 | **STATUS UPDATE May 2023: indefinite haitus**. I am archiving the repository since it is very inactive and many of the libraries it is based on have had substantial changes and upgrades in the intervening years. If anyone wishes to pick up this project please let me know.
14 |
15 | ## TL;DR: how it works
16 | A chrome extension interfaces with Overleaf to, whenever a certain hotkey is pressed, send an HTTPS request to an external inference server (running the GPT2 model) passing along the recent text history around the cursor. The server then returns a few predictions for the next few words, and the chrome extension adds these as autocompletions in the Overleaf editor. [Feel free to read more about it, and how the GPT2 models were fine-tuned here](https://rf5.github.io/2019/12/09/lmao-overleaf.html).
17 |
18 | This external server can either be a local python Django server or the external cloud server that I would like to host. Currently only the local option is available until I find enough funds to host a persistent GPU server (as any CPU instance takes waaay to long for autocompletes). If you are in a position to help fund this project, please get in [contact with me here](https://rf5.github.io/about.html).
19 |
20 | Note: this project is in no way associated with Overleaf or their team. It's just a cool extension that hooks into Overleaf because their service and editor is quite nice.
21 |
22 | ### Customization
23 | The various settings of LMAO can be adjusted by clicking on the icon when in Overleaf, which brings up a menu that looks something like
24 |
25 |
26 |
27 | The settings are:
28 | - **Prediction length** is the number of words to auto-complete for each prediction. i.e a value of 4 means that predictions for the next 4 words will be generated. Extremely high values of this may increase prediction times a little.
29 | - **Context length** is the number of words before the current cursor position to use as historical context to condition the text generation on. So a value of 200 means "use the last 200 words to predict the next word". Extremely high values of this may increase prediction times a little (still <800ms even at 600 words with a GPU).
30 |
31 | ## Installation
32 | Simply go to the Google chrome web store and navigate to this extension and hit 'install'. Then next time you go to an overleaf project, click the icon and it should be pretty obvious what to do :).
33 |
34 | ## Setting up python Django server for local inference
35 | If you have a reasonable Nvidia GPU and have python installed, then you can host the server locally with django! So, in addition to getting the chrome extension, you need to:
36 | - Clone this repo
37 | - Create a new python environment (3.6 or newer)
38 | - Open a new terminal with the new python environment activated, cd into `lmao/` folder.
39 | - Install pip requirements (`pip install -r requirements.txt`). You might need to run a separate command to install pytorch correctly on your system. See [Pytorch's installation guide for more info](https://pytorch.org/get-started/locally/).
40 | - Create a folder in this directory called `models/`. Download the GPT2 model/config zip file `gpt2-small.zip` under the 'Releases' tab and extract it into the `models/` folder. So now there should be a `lmao/models/gpt2-small` folder.
41 | - Now run `python manage.py runserver`. It should start up the server and just idle in the background. Leave this terminal running while you are using the extension on Overleaf. If you want to run the local server again, just start up the terminal and activate your python environment, and run `python manage.py runserver` again. PS: you might need to run `python manage.py migrate` the first time you try run the server, depending on the Django version.
42 |
43 | ## Setting up cloud hosting on AWS
44 | Don't worry, only I need to do this. Currently just trying to gather enough funds for a persistent GPU inference server.
45 |
46 | ## Current leads
47 | - Aquire funding for GPU cloud server.
48 | - Allow custom hotkey setup for inference
49 | - Allow inference if user does not type for a certain number of seconds
50 | - Train and use bigger GPT2 models for better predictions + get more quality LaTeX source files to train on.
51 |
52 | ## Past explored leads:
53 | - [Links to host the model with GPU support](https://pytorch.org/blog/model-serving-in-pyorch/)
54 | - find where `base` of `editor.completer.base` is set during each call.... It defines the start position of the prefix (where the last '\' is ).
55 | - Alternatively, try find a way to change the prefix behavior calculation. See line 1508/1542 of ext-language-tools.
56 |
57 | ## Known bugs
58 | - ~~Does not work so well if large portions of the document are commented out.~~ Fixed.
59 |
--------------------------------------------------------------------------------
/lmao_integrator/background.js:
--------------------------------------------------------------------------------
1 | chrome.runtime.onInstalled.addListener(function() {
2 | chrome.storage.sync.set({lm_inference_state: 'off', pred_len: 4, context_len: 200}, function() {
3 | console.log("LMAO reset xD");
4 | });
5 | chrome.declarativeContent.onPageChanged.removeRules(undefined, function() {
6 | chrome.declarativeContent.onPageChanged.addRules([{
7 | conditions: [new chrome.declarativeContent.PageStateMatcher({
8 | pageUrl: {urlMatches: '.*overleaf\.com\\/project/.*'},
9 | })
10 | ],
11 | actions: [
12 | new chrome.declarativeContent.ShowPageAction(),
13 | ]
14 | }]);
15 | });
16 | });
17 |
18 | const endpoint_name = "lmao-test-01";
19 | const url = chrome.runtime.getURL('config.json');
20 | let _api_key = null;
21 | function setAPI(incoming) {
22 | _api_key = incoming["x-api-key"];
23 | }
24 |
25 | fetch(url)
26 | .then((response) => response.json()) //assuming file contains json
27 | .then((json) => setAPI(json));
28 |
29 | function get_word_count(str_arr) {
30 | var wcnt = 0;
31 | for (let i = 0; i < str_arr.length; i++) {
32 | wcnt = wcnt + str_arr[i].split(" ").length
33 | }
34 | return wcnt;
35 | }
36 |
37 | function prune_comments_and_trim(str_arr, ctx_len) {
38 | function _remove_commented_lines(line) {
39 | const hashInd = line.indexOf('%');
40 | if(hashInd === 0) return false
41 | if(hashInd === 1 && line[hashInd - 1] != '\\') return false
42 |
43 | return true
44 | }
45 | str_arr = str_arr.filter(_remove_commented_lines);
46 | for (let i = 0; i < str_arr.length; i++) {
47 | const hashInd = str_arr[i].search(/[^\\]%/);
48 | if(hashInd != -1 && hashInd != null) {
49 | str_arr[i] = str_arr[i].substring(0, hashInd);
50 | }
51 | }
52 | var tmp = str_arr.slice(1);
53 | if(get_word_count(str_arr) > ctx_len) {
54 | while(get_word_count(tmp) > ctx_len) {
55 | str_arr.shift();
56 | tmp = str_arr.slice(1);
57 | }
58 | }
59 | return str_arr;
60 | }
61 |
62 | chrome.runtime.onMessageExternal.addListener(
63 | function(request, sender, sendResponse) {
64 | if(request.lines === null) {
65 | return;
66 | }
67 |
68 | // note: lm_inference_state is either "off", "on_local", or "on_cloud"
69 | chrome.storage.sync.get(['lm_inference_state', 'pred_len', 'context_len'], function(data) {
70 | if (data.lm_inference_state === 'off') {
71 | // console.log("Inference is off");
72 | sendResponse(null);
73 | return;
74 | } else if(data.lm_inference_state === 'on_local') {
75 | // console.log("Getting inference from local machine")
76 | var xhr = new XMLHttpRequest();
77 | xhr.open("POST", "http://127.0.0.1:8000/infer", true);
78 | xhr.setRequestHeader("Content-Type", "application/json;charset=UTF-8");
79 | xhr.onload = function (e) {
80 | if (xhr.readyState === 4) {
81 | if (xhr.status === 200) {
82 | // console.log(xhr.responseText);
83 | var response = JSON.parse(xhr.responseText);
84 | chrome.storage.sync.set({local_offline: false}, function() {});
85 | sendResponse({prediction: response.prediction});
86 | } else {
87 | sendResponse(null);
88 | msg_popup_offline('local');
89 | console.log(xhr.statusText);
90 | }
91 | }
92 | };
93 | xhr.onerror = function (e) {
94 | sendResponse(null);
95 | msg_popup_offline('local');
96 | console.log(xhr.statusText);
97 | };
98 | // console.log("sending " + JSON.stringify(request.lines))
99 | var lines = prune_comments_and_trim(request.lines, data.context_len);
100 | xhr.send(JSON.stringify({lines: lines, pred_length: data.pred_len}));
101 | } else if(data.lm_inference_state === 'on_cloud') {
102 | var xhr = new XMLHttpRequest();
103 | xhr.open("POST", "https://h0sywlk4gh.execute-api.eu-west-1.amazonaws.com/test-lmao-en/invoke-lmao", true);
104 | xhr.setRequestHeader('x-api-key', _api_key);
105 | xhr.setRequestHeader("Content-Type", "application/json;charset=UTF-8");
106 | xhr.onload = function (e) {
107 | if (xhr.readyState === 4) {
108 | if (xhr.status === 200) {
109 | // console.log(xhr.responseText);
110 | var response = JSON.parse(xhr.responseText);
111 | chrome.storage.sync.set({local_offline: false}, function() {});
112 | sendResponse({prediction: response.prediction});
113 | } else {
114 | sendResponse(null);
115 | msg_popup_offline('cloud');
116 | console.log(xhr.statusText);
117 | }
118 | }
119 | };
120 | xhr.onerror = function (e) {
121 | sendResponse(null);
122 | msg_popup_offline('cloud');
123 | console.log(xhr.statusText);
124 | };
125 | var lines = prune_comments_and_trim(request.lines, data.context_len);
126 | xhr.send(JSON.stringify({"data":{"lines": lines, "pred_length": data.pred_len, n_seqs: 3}}));
127 | }
128 | });
129 | });
130 |
131 | function msg_popup_offline(service) {
132 | if(service === 'local') {
133 | chrome.storage.sync.set({local_offline: true}, function() {
134 | });
135 | } else if(service === 'cloud') {
136 | chrome.storage.sync.set({cloud_offline: true}, function() {
137 | });
138 | }
139 | }
140 |
141 |
--------------------------------------------------------------------------------
/lmao/endpoints/views.py:
--------------------------------------------------------------------------------
1 | import os, time
2 | import urllib.request
3 |
4 | from django.conf import settings
5 | from django.http import HttpResponse
6 | from django.shortcuts import render
7 | from django.views.generic import View
8 | from rest_framework.authentication import (SessionAuthentication,
9 | TokenAuthentication)
10 | from rest_framework.authtoken.models import Token
11 | from rest_framework.decorators import action
12 | from rest_framework import status
13 | from rest_framework.permissions import AllowAny, IsAuthenticated
14 | from rest_framework.response import Response
15 | from rest_framework.views import APIView
16 |
17 | import torch
18 | import torch.nn.functional as F
19 | from transformers import GPT2LMHeadModel, GPT2Tokenizer
20 | from transformers import WEIGHTS_NAME, CONFIG_NAME
21 |
22 | '''
23 | LMAO [Language Model Accessor Orchestrator views
24 | '''
25 |
26 | output_dir = "./models/gpt2-small"
27 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
28 | print("Loading model into memory from dir ", output_dir)
29 | # Step 1: Save a model, configuration and vocabulary that you have fine-tuned
30 |
31 | # If we have a distributed model, save only the encapsulated model
32 | # (it was wrapped in PyTorch DistributedDataParallel or DataParallel)
33 | # model_to_save = model.module if hasattr(model, 'module') else model
34 |
35 | # If we save using the predefined names, we can load using `from_pretrained`
36 | # output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
37 | # output_config_file = os.path.join(output_dir, CONFIG_NAME)
38 |
39 | # torch.save(model_to_save.state_dict(), output_model_file)
40 | # model_to_save.config.to_json_file(output_config_file)
41 | # tokenizer.save_vocabulary(output_dir)
42 |
43 | # Step 2: Re-load the saved model and vocabulary
44 | model = GPT2LMHeadModel.from_pretrained(output_dir)
45 | tokenizer = GPT2Tokenizer.from_pretrained(output_dir)
46 | model.eval()
47 | model.to(device)
48 | print("Done!")
49 | print("Is on GPU: ", next(model.parameters()).is_cuda)
50 |
51 | def gpt2_infer(historical_context, pred_length=10, repetition_penalty=1.0, num_samples=3):
52 | top_p = 0.5
53 | temperature = 0.9 # more temperature -> more entropy
54 |
55 | original_context_tokens = torch.tensor(tokenizer.encode(historical_context)).to(device)
56 | generated = original_context_tokens.unsqueeze(0).repeat(num_samples, 1)
57 | context = generated
58 | # context = torch.tensor([generated]).to(device).repeat(num_samples, 1)
59 | past = None
60 |
61 | for i in range(pred_length):
62 | output, past = model(context, past=past)
63 |
64 | next_token_logits = output[:, -1, :]
65 | next_token_logits /= (temperature if temperature > 0 else 1.)
66 |
67 | filtered_logits = top_k_top_p_filtering(next_token_logits, top_p=top_p)
68 | if temperature == 0: # greedy sampling:
69 | next_token = torch.argmax(filtered_logits, dim=-1).unsqueeze(-1)
70 | else:
71 | next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
72 | # print(next_token, token, next_token.squeeze(), token.unsqueeze(0))
73 | # generated += [next_token.squeeze().tolist()]
74 | generated = torch.cat((generated, next_token), dim=1)
75 | context = next_token
76 | # print(past[0][0].shape) # WATCH OUT TODO: the shape of past grows a lot as u generate more tokens
77 |
78 | gen_seqs = []
79 | gen_lists = generated[:, len(original_context_tokens):].tolist()
80 | for o in gen_lists:
81 | sequence = tokenizer.decode(o, clean_up_tokenization_spaces=True)
82 | # print('>> ', sequence[-500:], 'TRIMMED', sequence[len(historical_context):])
83 | # gen_seqs.append(sequence[len(historical_context):])
84 | if historical_context[-1] == ' ' and sequence[0] == ' ':
85 | gen_seqs.append(sequence[1:])
86 | else:
87 | gen_seqs.append(sequence)
88 |
89 |
90 | return gen_seqs
91 |
92 |
93 | def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
94 | """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
95 | Args:
96 | logits: logits distribution shape (batch size x vocabulary size)
97 | top_k > 0: keep only top k tokens with highest probability (top-k filtering).
98 | top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
99 | Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
100 | From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
101 | """
102 | top_k = min(top_k, logits.size(-1)) # Safety check
103 | if top_k > 0:
104 | # Remove all tokens with a probability less than the last token of the top-k
105 | indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
106 | logits[indices_to_remove] = filter_value
107 |
108 | if top_p > 0.0:
109 | sorted_logits, sorted_indices = torch.sort(logits, descending=True)
110 | cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
111 |
112 | # Remove tokens with cumulative probability above the threshold
113 | sorted_indices_to_remove = cumulative_probs > top_p
114 | # Shift the indices to the right to keep also the first token above the threshold
115 | sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
116 | sorted_indices_to_remove[..., 0] = 0
117 |
118 | # scatter sorted tensors to original indexing
119 | indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)
120 | logits[indices_to_remove] = filter_value
121 | return logits
122 |
123 | """
124 | VIEWS
125 | """
126 |
127 | class Infer(APIView):
128 | """ Logout view; only applicable to token authentication """
129 | authentication_classes = (TokenAuthentication, )
130 | permission_classes = (AllowAny, )
131 |
132 | def post(self, request, format=None):
133 | try:
134 | lines = request.data['lines']
135 | except Exception as e:
136 | return Response("Error: Invalid parameters.",
137 | status=status.HTTP_400_BAD_REQUEST)
138 |
139 | if 'pred_length' in request.data:
140 | try:
141 | pred_length = int(request.data['pred_length'])
142 | except Exception as e:
143 | return Response("Error: prediction length specified but in wrong format",
144 | status=status.HTTP_400_BAD_REQUEST)
145 | else:
146 | pred_length = 5
147 |
148 | start = time.time()
149 | pred_text = gpt2_infer('\n'.join(lines), pred_length=pred_length)
150 | end = time.time()
151 | print(f">>>>> Took {end-start} seconds.")
152 | print(">>>>> Predicted text: ", pred_text)
153 | response = {
154 | 'message': 'Success',
155 | 'prediction': pred_text,
156 | }
157 |
158 | return Response(response,
159 | status=status.HTTP_200_OK)
160 |
161 |
162 |
--------------------------------------------------------------------------------
/lmao_integrator/material/material.min.js:
--------------------------------------------------------------------------------
1 | /**
2 | * material-design-lite - Material Design Components in CSS, JS and HTML
3 | * @version v1.3.0
4 | * @license Apache-2.0
5 | * @copyright 2015 Google, Inc.
6 | * @link https://github.com/google/material-design-lite
7 | */
8 | !function(){"use strict";function e(e,t){if(e){if(t.element_.classList.contains(t.CssClasses_.MDL_JS_RIPPLE_EFFECT)){var s=document.createElement("span");s.classList.add(t.CssClasses_.MDL_RIPPLE_CONTAINER),s.classList.add(t.CssClasses_.MDL_JS_RIPPLE_EFFECT);var i=document.createElement("span");i.classList.add(t.CssClasses_.MDL_RIPPLE),s.appendChild(i),e.appendChild(s)}e.addEventListener("click",function(s){if("#"===e.getAttribute("href").charAt(0)){s.preventDefault();var i=e.href.split("#")[1],n=t.element_.querySelector("#"+i);t.resetTabState_(),t.resetPanelState_(),e.classList.add(t.CssClasses_.ACTIVE_CLASS),n.classList.add(t.CssClasses_.ACTIVE_CLASS)}})}}function t(e,t,s,i){function n(){var n=e.href.split("#")[1],a=i.content_.querySelector("#"+n);i.resetTabState_(t),i.resetPanelState_(s),e.classList.add(i.CssClasses_.IS_ACTIVE),a.classList.add(i.CssClasses_.IS_ACTIVE)}if(i.tabBar_.classList.contains(i.CssClasses_.JS_RIPPLE_EFFECT)){var a=document.createElement("span");a.classList.add(i.CssClasses_.RIPPLE_CONTAINER),a.classList.add(i.CssClasses_.JS_RIPPLE_EFFECT);var l=document.createElement("span");l.classList.add(i.CssClasses_.RIPPLE),a.appendChild(l),e.appendChild(a)}i.tabBar_.classList.contains(i.CssClasses_.TAB_MANUAL_SWITCH)||e.addEventListener("click",function(t){"#"===e.getAttribute("href").charAt(0)&&(t.preventDefault(),n())}),e.show=n}var s={upgradeDom:function(e,t){},upgradeElement:function(e,t){},upgradeElements:function(e){},upgradeAllRegistered:function(){},registerUpgradedCallback:function(e,t){},register:function(e){},downgradeElements:function(e){}};s=function(){function e(e,t){for(var s=0;s0&&l(t.children))}function o(t){var s="undefined"==typeof t.widget&&"undefined"==typeof t.widget,i=!0;s||(i=t.widget||t.widget);var n={classConstructor:t.constructor||t.constructor,className:t.classAsString||t.classAsString,cssClass:t.cssClass||t.cssClass,widget:i,callbacks:[]};if(c.forEach(function(e){if(e.cssClass===n.cssClass)throw new Error("The provided cssClass has already been registered: "+e.cssClass);if(e.className===n.className)throw new Error("The provided className has already been registered")}),t.constructor.prototype.hasOwnProperty(C))throw new Error("MDL component classes must not have "+C+" defined as a property.");var a=e(t.classAsString,n);a||c.push(n)}function r(t,s){var i=e(t);i&&i.callbacks.push(s)}function _(){for(var e=0;e0&&this.container_.classList.contains(this.CssClasses_.IS_VISIBLE)&&(e.keyCode===this.Keycodes_.UP_ARROW?(e.preventDefault(),t[t.length-1].focus()):e.keyCode===this.Keycodes_.DOWN_ARROW&&(e.preventDefault(),t[0].focus()))}},d.prototype.handleItemKeyboardEvent_=function(e){if(this.element_&&this.container_){var t=this.element_.querySelectorAll("."+this.CssClasses_.ITEM+":not([disabled])");if(t&&t.length>0&&this.container_.classList.contains(this.CssClasses_.IS_VISIBLE)){var s=Array.prototype.slice.call(t).indexOf(e.target);if(e.keyCode===this.Keycodes_.UP_ARROW)e.preventDefault(),s>0?t[s-1].focus():t[t.length-1].focus();else if(e.keyCode===this.Keycodes_.DOWN_ARROW)e.preventDefault(),t.length>s+1?t[s+1].focus():t[0].focus();else if(e.keyCode===this.Keycodes_.SPACE||e.keyCode===this.Keycodes_.ENTER){e.preventDefault();var i=new MouseEvent("mousedown");e.target.dispatchEvent(i),i=new MouseEvent("mouseup"),e.target.dispatchEvent(i),e.target.click()}else e.keyCode===this.Keycodes_.ESCAPE&&(e.preventDefault(),this.hide())}}},d.prototype.handleItemClick_=function(e){e.target.hasAttribute("disabled")?e.stopPropagation():(this.closing_=!0,window.setTimeout(function(e){this.hide(),this.closing_=!1}.bind(this),this.Constant_.CLOSE_TIMEOUT))},d.prototype.applyClip_=function(e,t){this.element_.classList.contains(this.CssClasses_.UNALIGNED)?this.element_.style.clip="":this.element_.classList.contains(this.CssClasses_.BOTTOM_RIGHT)?this.element_.style.clip="rect(0 "+t+"px 0 "+t+"px)":this.element_.classList.contains(this.CssClasses_.TOP_LEFT)?this.element_.style.clip="rect("+e+"px 0 "+e+"px 0)":this.element_.classList.contains(this.CssClasses_.TOP_RIGHT)?this.element_.style.clip="rect("+e+"px "+t+"px "+e+"px "+t+"px)":this.element_.style.clip=""},d.prototype.removeAnimationEndListener_=function(e){e.target.classList.remove(d.prototype.CssClasses_.IS_ANIMATING)},d.prototype.addAnimationEndListener_=function(){this.element_.addEventListener("transitionend",this.removeAnimationEndListener_),this.element_.addEventListener("webkitTransitionEnd",this.removeAnimationEndListener_)},d.prototype.show=function(e){if(this.element_&&this.container_&&this.outline_){var t=this.element_.getBoundingClientRect().height,s=this.element_.getBoundingClientRect().width;this.container_.style.width=s+"px",this.container_.style.height=t+"px",this.outline_.style.width=s+"px",this.outline_.style.height=t+"px";for(var i=this.Constant_.TRANSITION_DURATION_SECONDS*this.Constant_.TRANSITION_DURATION_FRACTION,n=this.element_.querySelectorAll("."+this.CssClasses_.ITEM),a=0;a0&&this.showSnackbar(this.queuedNotifications_.shift())},C.prototype.cleanup_=function(){this.element_.classList.remove(this.cssClasses_.ACTIVE),setTimeout(function(){this.element_.setAttribute("aria-hidden","true"),this.textElement_.textContent="",Boolean(this.actionElement_.getAttribute("aria-hidden"))||(this.setActionHidden_(!0),this.actionElement_.textContent="",this.actionElement_.removeEventListener("click",this.actionHandler_)),this.actionHandler_=void 0,this.message_=void 0,this.actionText_=void 0,this.active=!1,this.checkQueue_()}.bind(this),this.Constant_.ANIMATION_LENGTH)},C.prototype.setActionHidden_=function(e){e?this.actionElement_.setAttribute("aria-hidden","true"):this.actionElement_.removeAttribute("aria-hidden")},s.register({constructor:C,classAsString:"MaterialSnackbar",cssClass:"mdl-js-snackbar",widget:!0});var u=function(e){this.element_=e,this.init()};window.MaterialSpinner=u,u.prototype.Constant_={MDL_SPINNER_LAYER_COUNT:4},u.prototype.CssClasses_={MDL_SPINNER_LAYER:"mdl-spinner__layer",MDL_SPINNER_CIRCLE_CLIPPER:"mdl-spinner__circle-clipper",MDL_SPINNER_CIRCLE:"mdl-spinner__circle",MDL_SPINNER_GAP_PATCH:"mdl-spinner__gap-patch",MDL_SPINNER_LEFT:"mdl-spinner__left",MDL_SPINNER_RIGHT:"mdl-spinner__right"},u.prototype.createLayer=function(e){var t=document.createElement("div");t.classList.add(this.CssClasses_.MDL_SPINNER_LAYER),t.classList.add(this.CssClasses_.MDL_SPINNER_LAYER+"-"+e);var s=document.createElement("div");s.classList.add(this.CssClasses_.MDL_SPINNER_CIRCLE_CLIPPER),s.classList.add(this.CssClasses_.MDL_SPINNER_LEFT);var i=document.createElement("div");i.classList.add(this.CssClasses_.MDL_SPINNER_GAP_PATCH);var n=document.createElement("div");n.classList.add(this.CssClasses_.MDL_SPINNER_CIRCLE_CLIPPER),n.classList.add(this.CssClasses_.MDL_SPINNER_RIGHT);for(var a=[s,i,n],l=0;l=this.maxRows&&e.preventDefault()},L.prototype.onFocus_=function(e){this.element_.classList.add(this.CssClasses_.IS_FOCUSED)},L.prototype.onBlur_=function(e){this.element_.classList.remove(this.CssClasses_.IS_FOCUSED)},L.prototype.onReset_=function(e){this.updateClasses_()},L.prototype.updateClasses_=function(){this.checkDisabled(),this.checkValidity(),this.checkDirty(),this.checkFocus()},L.prototype.checkDisabled=function(){this.input_.disabled?this.element_.classList.add(this.CssClasses_.IS_DISABLED):this.element_.classList.remove(this.CssClasses_.IS_DISABLED)},L.prototype.checkDisabled=L.prototype.checkDisabled,L.prototype.checkFocus=function(){Boolean(this.element_.querySelector(":focus"))?this.element_.classList.add(this.CssClasses_.IS_FOCUSED):this.element_.classList.remove(this.CssClasses_.IS_FOCUSED)},L.prototype.checkFocus=L.prototype.checkFocus,L.prototype.checkValidity=function(){this.input_.validity&&(this.input_.validity.valid?this.element_.classList.remove(this.CssClasses_.IS_INVALID):this.element_.classList.add(this.CssClasses_.IS_INVALID))},L.prototype.checkValidity=L.prototype.checkValidity,L.prototype.checkDirty=function(){this.input_.value&&this.input_.value.length>0?this.element_.classList.add(this.CssClasses_.IS_DIRTY):this.element_.classList.remove(this.CssClasses_.IS_DIRTY)},L.prototype.checkDirty=L.prototype.checkDirty,L.prototype.disable=function(){this.input_.disabled=!0,this.updateClasses_()},L.prototype.disable=L.prototype.disable,L.prototype.enable=function(){this.input_.disabled=!1,this.updateClasses_()},L.prototype.enable=L.prototype.enable,L.prototype.change=function(e){this.input_.value=e||"",this.updateClasses_()},L.prototype.change=L.prototype.change,L.prototype.init=function(){if(this.element_&&(this.label_=this.element_.querySelector("."+this.CssClasses_.LABEL),this.input_=this.element_.querySelector("."+this.CssClasses_.INPUT),this.input_)){this.input_.hasAttribute(this.Constant_.MAX_ROWS_ATTRIBUTE)&&(this.maxRows=parseInt(this.input_.getAttribute(this.Constant_.MAX_ROWS_ATTRIBUTE),10),isNaN(this.maxRows)&&(this.maxRows=this.Constant_.NO_MAX_ROWS)),this.input_.hasAttribute("placeholder")&&this.element_.classList.add(this.CssClasses_.HAS_PLACEHOLDER),this.boundUpdateClassesHandler=this.updateClasses_.bind(this),this.boundFocusHandler=this.onFocus_.bind(this),this.boundBlurHandler=this.onBlur_.bind(this),this.boundResetHandler=this.onReset_.bind(this),this.input_.addEventListener("input",this.boundUpdateClassesHandler),this.input_.addEventListener("focus",this.boundFocusHandler),this.input_.addEventListener("blur",this.boundBlurHandler),this.input_.addEventListener("reset",this.boundResetHandler),this.maxRows!==this.Constant_.NO_MAX_ROWS&&(this.boundKeyDownHandler=this.onKeyDown_.bind(this),this.input_.addEventListener("keydown",this.boundKeyDownHandler));var e=this.element_.classList.contains(this.CssClasses_.IS_INVALID);this.updateClasses_(),this.element_.classList.add(this.CssClasses_.IS_UPGRADED),e&&this.element_.classList.add(this.CssClasses_.IS_INVALID),this.input_.hasAttribute("autofocus")&&(this.element_.focus(),this.checkFocus())}},s.register({constructor:L,classAsString:"MaterialTextfield",cssClass:"mdl-js-textfield",widget:!0});var I=function(e){this.element_=e,this.init()};window.MaterialTooltip=I,I.prototype.Constant_={},I.prototype.CssClasses_={IS_ACTIVE:"is-active",BOTTOM:"mdl-tooltip--bottom",LEFT:"mdl-tooltip--left",RIGHT:"mdl-tooltip--right",TOP:"mdl-tooltip--top"},I.prototype.handleMouseEnter_=function(e){var t=e.target.getBoundingClientRect(),s=t.left+t.width/2,i=t.top+t.height/2,n=-1*(this.element_.offsetWidth/2),a=-1*(this.element_.offsetHeight/2);this.element_.classList.contains(this.CssClasses_.LEFT)||this.element_.classList.contains(this.CssClasses_.RIGHT)?(s=t.width/2,i+a<0?(this.element_.style.top="0",this.element_.style.marginTop="0"):(this.element_.style.top=i+"px",this.element_.style.marginTop=a+"px")):s+n<0?(this.element_.style.left="0",this.element_.style.marginLeft="0"):(this.element_.style.left=s+"px",this.element_.style.marginLeft=n+"px"),this.element_.classList.contains(this.CssClasses_.TOP)?this.element_.style.top=t.top-this.element_.offsetHeight-10+"px":this.element_.classList.contains(this.CssClasses_.RIGHT)?this.element_.style.left=t.left+t.width+10+"px":this.element_.classList.contains(this.CssClasses_.LEFT)?this.element_.style.left=t.left-this.element_.offsetWidth-10+"px":this.element_.style.top=t.top+t.height+10+"px",this.element_.classList.add(this.CssClasses_.IS_ACTIVE)},I.prototype.hideTooltip_=function(){this.element_.classList.remove(this.CssClasses_.IS_ACTIVE)},I.prototype.init=function(){if(this.element_){var e=this.element_.getAttribute("for")||this.element_.getAttribute("data-mdl-for");e&&(this.forElement_=document.getElementById(e)),this.forElement_&&(this.forElement_.hasAttribute("tabindex")||this.forElement_.setAttribute("tabindex","0"),this.boundMouseEnterHandler=this.handleMouseEnter_.bind(this),this.boundMouseLeaveAndScrollHandler=this.hideTooltip_.bind(this),this.forElement_.addEventListener("mouseenter",this.boundMouseEnterHandler,!1),this.forElement_.addEventListener("touchend",this.boundMouseEnterHandler,!1),this.forElement_.addEventListener("mouseleave",this.boundMouseLeaveAndScrollHandler,!1),window.addEventListener("scroll",this.boundMouseLeaveAndScrollHandler,!0),window.addEventListener("touchstart",this.boundMouseLeaveAndScrollHandler))}},s.register({constructor:I,classAsString:"MaterialTooltip",cssClass:"mdl-tooltip"});var f=function(e){this.element_=e,this.init()};window.MaterialLayout=f,f.prototype.Constant_={MAX_WIDTH:"(max-width: 1024px)",TAB_SCROLL_PIXELS:100,RESIZE_TIMEOUT:100,MENU_ICON:"",CHEVRON_LEFT:"chevron_left",CHEVRON_RIGHT:"chevron_right"},f.prototype.Keycodes_={ENTER:13,ESCAPE:27,SPACE:32},f.prototype.Mode_={STANDARD:0,SEAMED:1,WATERFALL:2,SCROLL:3},f.prototype.CssClasses_={CONTAINER:"mdl-layout__container",HEADER:"mdl-layout__header",DRAWER:"mdl-layout__drawer",CONTENT:"mdl-layout__content",DRAWER_BTN:"mdl-layout__drawer-button",ICON:"material-icons",JS_RIPPLE_EFFECT:"mdl-js-ripple-effect",RIPPLE_CONTAINER:"mdl-layout__tab-ripple-container",RIPPLE:"mdl-ripple",RIPPLE_IGNORE_EVENTS:"mdl-js-ripple-effect--ignore-events",HEADER_SEAMED:"mdl-layout__header--seamed",HEADER_WATERFALL:"mdl-layout__header--waterfall",HEADER_SCROLL:"mdl-layout__header--scroll",FIXED_HEADER:"mdl-layout--fixed-header",OBFUSCATOR:"mdl-layout__obfuscator",TAB_BAR:"mdl-layout__tab-bar",TAB_CONTAINER:"mdl-layout__tab-bar-container",TAB:"mdl-layout__tab",TAB_BAR_BUTTON:"mdl-layout__tab-bar-button",TAB_BAR_LEFT_BUTTON:"mdl-layout__tab-bar-left-button",TAB_BAR_RIGHT_BUTTON:"mdl-layout__tab-bar-right-button",TAB_MANUAL_SWITCH:"mdl-layout__tab-manual-switch",PANEL:"mdl-layout__tab-panel",HAS_DRAWER:"has-drawer",HAS_TABS:"has-tabs",HAS_SCROLLING_HEADER:"has-scrolling-header",CASTING_SHADOW:"is-casting-shadow",IS_COMPACT:"is-compact",IS_SMALL_SCREEN:"is-small-screen",IS_DRAWER_OPEN:"is-visible",IS_ACTIVE:"is-active",IS_UPGRADED:"is-upgraded",IS_ANIMATING:"is-animating",ON_LARGE_SCREEN:"mdl-layout--large-screen-only",ON_SMALL_SCREEN:"mdl-layout--small-screen-only"},f.prototype.contentScrollHandler_=function(){if(!this.header_.classList.contains(this.CssClasses_.IS_ANIMATING)){var e=!this.element_.classList.contains(this.CssClasses_.IS_SMALL_SCREEN)||this.element_.classList.contains(this.CssClasses_.FIXED_HEADER);this.content_.scrollTop>0&&!this.header_.classList.contains(this.CssClasses_.IS_COMPACT)?(this.header_.classList.add(this.CssClasses_.CASTING_SHADOW),this.header_.classList.add(this.CssClasses_.IS_COMPACT),e&&this.header_.classList.add(this.CssClasses_.IS_ANIMATING)):this.content_.scrollTop<=0&&this.header_.classList.contains(this.CssClasses_.IS_COMPACT)&&(this.header_.classList.remove(this.CssClasses_.CASTING_SHADOW),this.header_.classList.remove(this.CssClasses_.IS_COMPACT),e&&this.header_.classList.add(this.CssClasses_.IS_ANIMATING))}},f.prototype.keyboardEventHandler_=function(e){e.keyCode===this.Keycodes_.ESCAPE&&this.drawer_.classList.contains(this.CssClasses_.IS_DRAWER_OPEN)&&this.toggleDrawer()},f.prototype.screenSizeHandler_=function(){this.screenSizeMediaQuery_.matches?this.element_.classList.add(this.CssClasses_.IS_SMALL_SCREEN):(this.element_.classList.remove(this.CssClasses_.IS_SMALL_SCREEN),this.drawer_&&(this.drawer_.classList.remove(this.CssClasses_.IS_DRAWER_OPEN),this.obfuscator_.classList.remove(this.CssClasses_.IS_DRAWER_OPEN)))},f.prototype.drawerToggleHandler_=function(e){if(e&&"keydown"===e.type){if(e.keyCode!==this.Keycodes_.SPACE&&e.keyCode!==this.Keycodes_.ENTER)return;e.preventDefault()}this.toggleDrawer()},f.prototype.headerTransitionEndHandler_=function(){this.header_.classList.remove(this.CssClasses_.IS_ANIMATING)},f.prototype.headerClickHandler_=function(){this.header_.classList.contains(this.CssClasses_.IS_COMPACT)&&(this.header_.classList.remove(this.CssClasses_.IS_COMPACT),this.header_.classList.add(this.CssClasses_.IS_ANIMATING))},f.prototype.resetTabState_=function(e){for(var t=0;t0?c.classList.add(this.CssClasses_.IS_ACTIVE):c.classList.remove(this.CssClasses_.IS_ACTIVE),this.tabBar_.scrollLeft0)return;this.setFrameCount(1);var i,n,a=e.currentTarget.getBoundingClientRect();if(0===e.clientX&&0===e.clientY)i=Math.round(a.width/2),n=Math.round(a.height/2);else{var l=void 0!==e.clientX?e.clientX:e.touches[0].clientX,o=void 0!==e.clientY?e.clientY:e.touches[0].clientY;i=Math.round(l-a.left),n=Math.round(o-a.top)}this.setRippleXY(i,n),this.setRippleStyles(!0),window.requestAnimationFrame(this.animFrameHandler.bind(this))}},S.prototype.upHandler_=function(e){e&&2!==e.detail&&window.setTimeout(function(){this.rippleElement_.classList.remove(this.CssClasses_.IS_VISIBLE)}.bind(this),0)},S.prototype.init=function(){if(this.element_){var e=this.element_.classList.contains(this.CssClasses_.RIPPLE_CENTER);this.element_.classList.contains(this.CssClasses_.RIPPLE_EFFECT_IGNORE_EVENTS)||(this.rippleElement_=this.element_.querySelector("."+this.CssClasses_.RIPPLE),this.frameCount_=0,this.rippleSize_=0,this.x_=0,this.y_=0,this.ignoringMouseDown_=!1,this.boundDownHandler=this.downHandler_.bind(this),this.element_.addEventListener("mousedown",this.boundDownHandler),this.element_.addEventListener("touchstart",this.boundDownHandler),this.boundUpHandler=this.upHandler_.bind(this),this.element_.addEventListener("mouseup",this.boundUpHandler),this.element_.addEventListener("mouseleave",this.boundUpHandler),this.element_.addEventListener("touchend",this.boundUpHandler),this.element_.addEventListener("blur",this.boundUpHandler),this.getFrameCount=function(){return this.frameCount_},this.setFrameCount=function(e){this.frameCount_=e},this.getRippleElement=function(){return this.rippleElement_},this.setRippleXY=function(e,t){this.x_=e,this.y_=t},this.setRippleStyles=function(t){if(null!==this.rippleElement_){var s,i,n,a="translate("+this.x_+"px, "+this.y_+"px)";t?(i=this.Constant_.INITIAL_SCALE,n=this.Constant_.INITIAL_SIZE):(i=this.Constant_.FINAL_SCALE,n=this.rippleSize_+"px",e&&(a="translate("+this.boundWidth/2+"px, "+this.boundHeight/2+"px)")),s="translate(-50%, -50%) "+a+i,this.rippleElement_.style.webkitTransform=s,this.rippleElement_.style.msTransform=s,this.rippleElement_.style.transform=s,t?this.rippleElement_.classList.remove(this.CssClasses_.IS_ANIMATING):this.rippleElement_.classList.add(this.CssClasses_.IS_ANIMATING)}},this.animFrameHandler=function(){this.frameCount_-- >0?window.requestAnimationFrame(this.animFrameHandler.bind(this)):this.setRippleStyles(!1)})}},s.register({constructor:S,classAsString:"MaterialRipple",cssClass:"mdl-js-ripple-effect",widget:!1})}();
10 | //# sourceMappingURL=material.min.js.map
11 |
--------------------------------------------------------------------------------