├── __init__.py ├── web ├── __init__.py ├── client │ ├── .gitignore │ ├── .env │ ├── src │ │ ├── components │ │ │ ├── ImageTrace │ │ │ │ ├── ImageTrace.css │ │ │ │ └── ImageTrace.js │ │ │ ├── TestMorph.css │ │ │ ├── UploadFile.js │ │ │ └── TestMorph.js │ │ ├── pages │ │ │ ├── select │ │ │ │ ├── select.css │ │ │ │ └── SelectPage.js │ │ │ ├── global.css │ │ │ ├── decode │ │ │ │ └── DecodePage.js │ │ │ ├── auth │ │ │ │ └── AuthPage.js │ │ │ ├── encode │ │ │ │ └── EncodePage.js │ │ │ ├── App.js │ │ │ └── background.js │ │ ├── index.js │ │ ├── registerServiceWorker.js │ │ └── svg.js │ ├── public │ │ ├── favicon.ico │ │ ├── protected.jpeg │ │ ├── protected.jpg │ │ ├── manifest.json │ │ ├── icon │ │ │ ├── lock.svg │ │ │ └── unlock.svg │ │ ├── index.html │ │ └── js │ │ │ └── svg.js │ ├── config-overrides.js │ └── package.json ├── .gitignore ├── .vscode │ └── settings.json ├── encoder │ └── server.pyc ├── __pycache__ │ └── __init__.cpython-36.pyc └── server │ ├── functions │ ├── facebook_auth.js │ └── __main__.js │ └── package.json ├── images ├── car.jpg ├── cat.jpg └── house.png ├── .gitignore ├── optimizers.py ├── run.py ├── old ├── train_dsc.py ├── train_unet.py ├── losses.py └── modules.py ├── train_amnesia.py ├── encoding.py ├── logger.py ├── README.md ├── utils.py ├── testing.py ├── models.py └── transforms.py /__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /web/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /web/client/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules -------------------------------------------------------------------------------- /web/client/.env: -------------------------------------------------------------------------------- 1 | FACEBOOK_AUTH_ID=1951426491774080 -------------------------------------------------------------------------------- /web/.gitignore: -------------------------------------------------------------------------------- 1 | /node_modules 2 | env.json 3 | .DS_Store -------------------------------------------------------------------------------- /web/.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.formatting.provider": "yapf" 3 | } -------------------------------------------------------------------------------- /images/car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikcheerla/neuralhash/HEAD/images/car.jpg -------------------------------------------------------------------------------- /images/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikcheerla/neuralhash/HEAD/images/cat.jpg -------------------------------------------------------------------------------- /images/house.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikcheerla/neuralhash/HEAD/images/house.png -------------------------------------------------------------------------------- /web/encoder/server.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikcheerla/neuralhash/HEAD/web/encoder/server.pyc -------------------------------------------------------------------------------- /web/client/src/components/ImageTrace/ImageTrace.css: -------------------------------------------------------------------------------- 1 | path { 2 | fill: white; 3 | stroke: black; 4 | } -------------------------------------------------------------------------------- /web/client/src/pages/select/select.css: -------------------------------------------------------------------------------- 1 | .bigButton { 2 | transform: scale(5); 3 | margin: 50px; 4 | } -------------------------------------------------------------------------------- /web/client/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikcheerla/neuralhash/HEAD/web/client/public/favicon.ico -------------------------------------------------------------------------------- /web/client/public/protected.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikcheerla/neuralhash/HEAD/web/client/public/protected.jpeg -------------------------------------------------------------------------------- /web/client/public/protected.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikcheerla/neuralhash/HEAD/web/client/public/protected.jpg -------------------------------------------------------------------------------- /web/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nikcheerla/neuralhash/HEAD/web/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .floyd* 2 | __* 3 | __pycache__/* 4 | *.swp 5 | *.pyc 6 | data 7 | sftp-config.json 8 | output 9 | jobs 10 | jobs/* 11 | output/* 12 | *.pth 13 | images/* 14 | .DS_Store 15 | -------------------------------------------------------------------------------- /web/server/functions/facebook_auth.js: -------------------------------------------------------------------------------- 1 | const lib = require('lib'); 2 | 3 | /** 4 | * @returns {any} 5 | */ 6 | module.exports = (context, callback) => { 7 | callback(null, 'hello world'); 8 | }; 9 | 10 | -------------------------------------------------------------------------------- /web/client/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | import App from './pages/App'; 4 | import registerServiceWorker from './registerServiceWorker'; 5 | 6 | ReactDOM.render(, document.getElementById('root')); 7 | registerServiceWorker(); 8 | -------------------------------------------------------------------------------- /web/server/functions/__main__.js: -------------------------------------------------------------------------------- 1 | /** 2 | * A basic Hello World function 3 | * @param {string} name Who you're saying hello to 4 | * @returns {string} 5 | */ 6 | module.exports = (name = 'world', context, callback) => { 7 | 8 | callback(null, `hello there ${name}`); 9 | 10 | }; 11 | -------------------------------------------------------------------------------- /web/client/src/components/TestMorph.css: -------------------------------------------------------------------------------- 1 | svg { 2 | display: block; 3 | margin: 0 auto; 4 | width: 600px; 5 | } 6 | 7 | #letters { 8 | visibility: hidden; 9 | } 10 | .st0 { 11 | fill: #1d1d1d; 12 | } 13 | .st1 { 14 | fill: #89c540; 15 | } 16 | .st2 { 17 | fill: #e24b6c; 18 | } 19 | -------------------------------------------------------------------------------- /web/client/config-overrides.js: -------------------------------------------------------------------------------- 1 | const { injectBabelPlugin } = require("react-app-rewired"); 2 | 3 | module.exports = function override(config, env) { 4 | config = injectBabelPlugin( 5 | ["import", { libraryName: "antd", libraryDirectory: "es", style: "css" }], 6 | config 7 | ); 8 | return config; 9 | }; 10 | -------------------------------------------------------------------------------- /web/client/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | } 10 | ], 11 | "start_url": "./index.html", 12 | "display": "standalone", 13 | "theme_color": "#000000", 14 | "background_color": "#ffffff" 15 | } 16 | -------------------------------------------------------------------------------- /web/server/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "neuralhash", 3 | "version": "0.0.0", 4 | "description": "", 5 | "author": "isaacpz ", 6 | "main": "functions/__main__.js", 7 | "dependencies": {}, 8 | "private": true, 9 | "stdlib": { 10 | "build": "faaslang", 11 | "name": "isaacpz/neuralhash", 12 | "timeout": 10000, 13 | "publish": true, 14 | "personalize": { 15 | "keys": [], 16 | "user": [] 17 | } 18 | } 19 | } -------------------------------------------------------------------------------- /web/client/src/pages/global.css: -------------------------------------------------------------------------------- 1 | html { 2 | margin: 0px; 3 | height: 100%; 4 | background-image: -webkit-linear-gradient(#edecec, #cecbc9); 5 | background-image: linear-gradient(#edecec, #cecbc9); 6 | } 7 | 8 | .wrapper { 9 | display: flex; 10 | align-items: center; 11 | justify-content: center; 12 | min-height: 100vh; 13 | } 14 | 15 | .switch-wrapper { 16 | position: relative; 17 | } 18 | 19 | .switch-wrapper > div { 20 | position: relative; 21 | } 22 | -------------------------------------------------------------------------------- /web/client/public/icon/lock.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /web/client/src/pages/decode/DecodePage.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import UploadFileComponent from "../../components/UploadFile"; 3 | 4 | export default class DecodePage extends React.Component { 5 | render() { 6 | return ( 7 |
8 |

Decode File

9 | { 12 | alert(await file.text()) 13 | }} 14 | /> 15 |
16 | ); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /web/client/public/icon/unlock.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /web/client/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | NeuralHash 14 | 15 | 16 | 17 | 20 |
21 | 22 | 23 | -------------------------------------------------------------------------------- /web/client/src/pages/auth/AuthPage.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import FacebookAuth from 'react-facebook-auth'; 3 | import Cookies from 'js-cookie'; 4 | import {Button} from 'antd'; 5 | export default class AuthPage extends React.Component { 6 | Button = ({ onClick }) => ( 7 | 8 | ); 9 | 10 | authenticate = response => { 11 | if(response.userID) { 12 | Cookies.set('facebook-id', response.userID); 13 | this.props.history.push("/sign"); 14 | } 15 | }; 16 | 17 | render() { 18 | return ( 19 |
20 |

Facebook Auth

21 | 27 |
28 | ); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /web/client/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "neuralhash", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "antd": "^3.2.1", 7 | "canny-edge-detector": "^1.0.0", 8 | "gsap": "^1.20.4", 9 | "html-to-react": "^1.3.3", 10 | "image-js": "^0.19.1", 11 | "imagetracerjs": "^1.2.3", 12 | "js-cookie": "^2.2.0", 13 | "jsfeat": "^0.0.8", 14 | "msqr": "^0.2.1", 15 | "react": "^16.2.0", 16 | "react-dom": "^16.2.0", 17 | "react-dropzone": "^4.2.8", 18 | "react-facebook-auth": "^1.3.0", 19 | "react-router": "^4.2.0", 20 | "react-router-transition": "^1.2.1", 21 | "react-scripts": "1.1.1", 22 | "svg-points": "^6.0.1" 23 | }, 24 | "scripts": { 25 | "start": "react-app-rewired start", 26 | "build": "react-app-rewired build", 27 | "test": "react-app-rewired test --env=jsdom", 28 | "eject": "react-app-rewired eject" 29 | }, 30 | "devDependencies": { 31 | "babel-plugin-import": "^1.6.4", 32 | "react-app-rewired": "^1.4.1" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /web/client/src/components/UploadFile.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { Upload, Icon } from "antd"; 3 | 4 | export default class UploadFileComponent extends React.Component { 5 | render() { 6 | return ( 7 |
8 | { 14 | this.submit(info, this.props.data) 15 | return false; 16 | }} 17 | > 18 |

19 | 20 |

21 |

Click or drag file a to upload

22 |
23 |
24 | ); 25 | } 26 | 27 | submit = (file, data) => { 28 | let fileName = document.getElementById("file").files[0].name; 29 | let data = await fetch("http://localhost:5000/" + this.props.endpoint, { 30 | method: "POST", 31 | body: formData 32 | }); 33 | this.props.onResponse(data, fileName); 34 | }; 35 | } 36 | -------------------------------------------------------------------------------- /web/client/src/pages/encode/EncodePage.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import UploadFileComponent from "../../components/UploadFile"; 3 | import Cookies from "js-cookie"; 4 | 5 | export default class EncodePage extends React.Component { 6 | render() { 7 | return ( 8 |
9 |

Sign File

10 | 15 |
16 | ); 17 | } 18 | 19 | downloadFile = (file, fileName) => { 20 | console.log("Starting timeout..."); 21 | console.log(file); 22 | setTimeout(async () => { 23 | let a = document.createElement("a"); 24 | document.body.appendChild(a); 25 | a.style = "display: none"; 26 | let url = window.URL.createObjectURL(await file.blob()); 27 | a.href = url; 28 | a.download = "protected_" + fileName; 29 | a.click(); 30 | window.URL.revokeObjectURL(url); 31 | }, 2000); 32 | }; 33 | 34 | handleFormChange(event) { 35 | this.setState({ value: event.target.value }); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /web/client/src/pages/select/SelectPage.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import "./select.css"; 3 | import Link from "react-router-dom/Link"; 4 | import { Button, Row, Col, Tooltip } from "antd"; 5 | import TestMorph from '../../components/TestMorph' 6 | 7 | export default class SelectPage extends React.Component { 8 | state = { 9 | page: "" 10 | }; 11 | render() { 12 | return ( 13 |
14 | 15 | 16 | {" "} 17 | 18 | 19 |
43 | ); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /web/client/src/components/TestMorph.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { Button } from "antd"; 3 | import { TweenLite } from "gsap"; 4 | 5 | export default class TestMorph extends React.Component { 6 | render() { 7 | return ( 8 |
9 | 23 | 24 | 34 | 39 | 40 |
41 | ); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /web/client/src/pages/App.js: -------------------------------------------------------------------------------- 1 | import React, { Component } from "react"; 2 | import AuthPage from "./auth/AuthPage"; 3 | import SelectPage from "./select/SelectPage"; 4 | import EncodePage from "./encode/EncodePage"; 5 | import DecodePage from "./decode/DecodePage"; 6 | import "./global.css"; 7 | import Router from "react-router-dom/BrowserRouter"; 8 | import { AnimatedSwitch, spring } from "react-router-transition"; 9 | import Route from "react-router-dom/Route"; 10 | import ImageTrace from '../components/ImageTrace/ImageTrace' 11 | 12 | 13 | function mapStyles(styles) { 14 | return { 15 | opacity: styles.opacity, 16 | transform: `scale(${styles.scale})` 17 | }; 18 | } 19 | 20 | // wrap the `spring` helper to use a bouncy config 21 | function bounce(val) { 22 | return spring(val, { 23 | stiffness: 330, 24 | damping: 22 25 | }); 26 | } 27 | 28 | class App extends Component { 29 | state = { 30 | page: "select" 31 | }; 32 | 33 | render() { 34 | return ( 35 |
36 | 37 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 |
61 | ); 62 | } 63 | } 64 | 65 | export default App; 66 | -------------------------------------------------------------------------------- /optimizers.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import random, sys, os, json, glob, math 4 | 5 | import IPython 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | import torch.optim as optim 11 | from torch.autograd import Variable 12 | 13 | 14 | class Optimizer(nn.Module): 15 | def __init__(self, parameters): 16 | self.parameters = parameters 17 | pass 18 | 19 | def step(self, loss): 20 | loss.backward(create_graph=True, retain_graph=True) 21 | for param in self.parameters: 22 | update = self.forward(param.grad, param) 23 | yield (param + update) 24 | 25 | def forward(self, grad, param=None): 26 | raise NotImplementedError() 27 | 28 | 29 | class Adam(Optimizer): 30 | def __init__(self, parameters, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, differentiable=False): 31 | 32 | super().__init__(parameters) 33 | 34 | self.lr = lr 35 | self.betas = betas 36 | self.eps = eps 37 | self.state = {param: {} for param in parameters} 38 | self.differentiable = differentiable 39 | 40 | def forward(self, grad, param=None): 41 | 42 | state = self.state[param] 43 | step = state["step"] = state.get("step", 0) + 1 44 | exp_avg = state["exp_avg"] = state.get("exp_avg", torch.zeros_like(grad.data)) 45 | exp_avg_sq = state["exp_avg_sq"] = state.get("exp_avg_sq", torch.zeros_like(grad.data)) 46 | beta1, beta2 = self.betas 47 | 48 | exp_avg = exp_avg * beta1 + (1 - beta1) * (grad) 49 | exp_avg_sq = exp_avg_sq * (beta2) + (1 - beta2) * (grad) * (grad) 50 | denom = exp_avg_sq.sqrt() + self.eps 51 | 52 | bias_correction1 = 1 - beta1 ** state["step"] 53 | bias_correction2 = 1 - beta2 ** state["step"] 54 | 55 | step_size = self.lr * math.sqrt(bias_correction2) / bias_correction1 56 | update = -step_size * exp_avg / denom 57 | 58 | state["exp_avg"] = exp_avg.detach().data 59 | state["exp_avg_sq"] = exp_avg_sq.detach().data 60 | 61 | if not self.differentiable: 62 | update = update.data 63 | 64 | return update 65 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | 2 | import random, sys, os, glob, yaml, time 3 | import argparse, subprocess, shutil, shlex 4 | from fire import Fire 5 | from utils import elapsed 6 | 7 | import IPython 8 | 9 | 10 | def execute(cmd, mode="experiment", config="default", shutdown=False, debug=False): 11 | 12 | elapsed() 13 | try: 14 | run_log = yaml.load(open("jobs/runlog.yml")) 15 | except: 16 | run_log = {} 17 | 18 | run_data = run_log[mode] = run_log.get(mode, {}) 19 | run_data["runs"] = run_data.get("runs", 0) + 1 20 | run_name = mode + str(run_data["runs"]) 21 | run_data[run_name] = run_data.get(run_name, {"config": config, "cmd": cmd, "status": "Running"}) 22 | run_data = run_data[run_name] 23 | 24 | print(f"Running job: {run_name}") 25 | 26 | shutil.rmtree("output/", ignore_errors=True) 27 | os.makedirs("output/") 28 | os.makedirs(f"jobs/{run_name}", exist_ok=True) 29 | 30 | with open("jobs/jobinfo.txt", "w") as config_file: 31 | print(config, file=config_file) 32 | 33 | cmd = shlex.split(cmd) 34 | if cmd[0] == "python" and debug: 35 | cmd[0] = "ipython" 36 | cmd.insert(1, "-i") 37 | elif cmd[0] == "python": 38 | cmd.insert(1, "-u") 39 | 40 | print(" ".join(cmd)) 41 | process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, universal_newlines=True) 42 | 43 | try: 44 | with open(f"jobs/{run_name}/stdout.txt", "w") as outfile: 45 | for stdout_line in iter(process.stdout.readline, ""): 46 | print(stdout_line, end="") 47 | outfile.write(stdout_line) 48 | 49 | return_code = process.wait() 50 | run_data["status"] = "Error" if return_code else "Complete" 51 | except KeyboardInterrupt: 52 | print("\nKilled by user.") 53 | process.kill() 54 | run_data["status"] = "Killed" 55 | except OSError: 56 | print("\nSystem error.") 57 | process.kill() 58 | run_data["status"] = "Error" 59 | 60 | process.kill() 61 | 62 | if debug and run_data["status"] != "Complete": 63 | return 64 | 65 | shutil.copytree("output", f"jobs/{run_name}/output") 66 | for file in glob.glob("*.py"): 67 | shutil.copy(file, f"jobs/{run_name}") 68 | 69 | yaml.safe_dump(run_log, open("jobs/runlog.yml", "w"), allow_unicode=True, default_flow_style=False) 70 | yaml.safe_dump(run_data, open(f"jobs/{run_name}/comments.yml", "w"), allow_unicode=True, default_flow_style=False) 71 | 72 | interval = elapsed() 73 | print(f"Program ended after {interval:0.4f} seconds.") 74 | if shutdown and run_data["status"] != "Killed" and interval > 60: 75 | print(f"Shutting down in 1 minute.") 76 | time.sleep(60) 77 | subprocess.call("sudo shutdown -h now", shell=True) 78 | 79 | 80 | def run(cmd, mode="experiment", config="default", shutdown=False, debug=False): 81 | cmd = f""" screen -S {config} bash -c "python run.py execute \\"{cmd}\\" --mode {mode} --config {config} --shutdown {shutdown} --debug {debug}" """ 82 | subprocess.call(shlex.split(cmd)) 83 | 84 | 85 | if __name__ == "__main__": 86 | Fire({"run": run, "execute": execute}) 87 | -------------------------------------------------------------------------------- /old/train_dsc.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import random, sys, os, json, glob 4 | import tqdm, itertools, shutil 5 | 6 | import matplotlib as mpl 7 | 8 | mpl.use("Agg") 9 | import matplotlib.pyplot as plt 10 | 11 | import torch 12 | 13 | torch.backends.cudnn.benchmark = True 14 | import torch.nn as nn 15 | import torch.nn.functional as F 16 | import torch.optim as optim 17 | from torch.autograd import Variable 18 | 19 | from utils import * 20 | import transforms 21 | from modules import UNet 22 | from logger import Logger 23 | 24 | from sklearn.metrics import roc_auc_score 25 | from scipy.stats import pearsonr 26 | import IPython 27 | 28 | DATA_PATH = "data/encode_120" 29 | logger = Logger("train_dsc", ("loss", "corr"), print_every=5, plot_every=20) 30 | 31 | 32 | def loss_func(model, x, y): 33 | cleaned = model.forward(x) 34 | corr, p = pearsonr( 35 | cleaned.data.cpu().numpy().flatten(), y.data.cpu().numpy().flatten() 36 | ) 37 | return (cleaned - y).pow(2).sum(), corr 38 | 39 | 40 | def data_gen(files, batch_size=64): 41 | while True: 42 | enc_files = random.sample(files, batch_size) 43 | orig_files = [f.replace("encoded", "original") for f in enc_files] 44 | print(enc_files) 45 | encoded_ims = [im.load(image) for image in enc_files] 46 | original_ims = [im.load(image) for image in orig_files] 47 | encoded, original = im.stack(encoded_ims), im.stack(original_ims) 48 | 49 | yield encoded, (encoded - original) 50 | 51 | 52 | def viz_preds(model, x, y): 53 | preds = model(x) 54 | for i, (pred, truth, enc) in enumerate(zip(preds, y, x)): 55 | im.save(im.numpy(enc), f"{OUTPUT_DIR}{i}_encoded.jpg") 56 | im.save(3 * np.abs(im.numpy(pred)), f"{OUTPUT_DIR}{i}_pred.jpg") 57 | im.save(3 * np.abs(im.numpy(truth)), f"{OUTPUT_DIR}{i}_truth.jpg") 58 | 59 | 60 | if __name__ == "__main__": 61 | 62 | model = nn.DataParallel(UNet()) 63 | model.train() 64 | 65 | optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) 66 | 67 | # optimizer.load_state_dict('output/unet_opt.pth') 68 | model.module.load("jobs/experiment_unet/output/train_unet.pth") 69 | 70 | logger.add_hook( 71 | lambda: [ 72 | print(f"Saving model/opt to {OUTPUT_DIR}train_unet.pth"), 73 | model.module.save(OUTPUT_DIR + "train_unet.pth"), 74 | torch.save(optimizer.state_dict(), OUTPUT_DIR + "unet_opt.pth"), 75 | ], 76 | freq=100, 77 | ) 78 | 79 | files = glob.glob(f"{DATA_PATH}/*encoded*.jpg") 80 | train_files, val_files = files[:-128], files[-128:] 81 | x_val, y_val = next(data_gen(val_files, 128)) 82 | 83 | for i, (x, y) in enumerate(data_gen(train_files, 128)): 84 | loss, corr = loss_func(model, x, y) 85 | 86 | logger.step("loss", min(5000, loss)) 87 | logger.step("corr", corr) 88 | 89 | optimizer.zero_grad() 90 | loss.backward() 91 | optimizer.step() 92 | 93 | if i % 20 == 0: 94 | model.eval() 95 | val_loss = loss_func(model, x_val, y_val) 96 | model.train() 97 | print(f"val_loss = {val_loss}") 98 | 99 | if i == 2000: 100 | break 101 | -------------------------------------------------------------------------------- /old/train_unet.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import random, sys, os, json, glob 4 | import tqdm, itertools, shutil 5 | 6 | import matplotlib as mpl 7 | 8 | mpl.use("Agg") 9 | import matplotlib.pyplot as plt 10 | 11 | import torch 12 | 13 | torch.backends.cudnn.benchmark = True 14 | import torch.nn as nn 15 | import torch.nn.functional as F 16 | import torch.optim as optim 17 | from torch.autograd import Variable 18 | 19 | from utils import * 20 | import transforms 21 | from models import UNet 22 | from logger import Logger 23 | 24 | from sklearn.metrics import roc_auc_score 25 | from scipy.stats import pearsonr 26 | import IPython 27 | 28 | DATA_PATH = "data/encode_120" 29 | logger = Logger("train_dsc", ("loss", "corr"), print_every=5, plot_every=20) 30 | 31 | 32 | def loss_func(model, x, y): 33 | cleaned = model.forward(x) 34 | corr, p = pearsonr( 35 | cleaned.data.cpu().numpy().flatten(), y.data.cpu().numpy().flatten() 36 | ) 37 | return (cleaned - y).pow(2).sum(), corr 38 | 39 | 40 | def data_gen(files, batch_size=64): 41 | while True: 42 | enc_files = random.sample(files, batch_size) 43 | orig_files = [f.replace("encoded", "original") for f in enc_files] 44 | encoded_ims = [im.load(image) for image in enc_files] 45 | original_ims = [im.load(image) for image in orig_files] 46 | encoded, original = im.stack(encoded_ims), im.stack(original_ims) 47 | 48 | yield original, (encoded - original) 49 | 50 | 51 | def viz_preds(model, x, y): 52 | preds = model(x) 53 | for i, (pred, truth, enc) in enumerate(zip(preds, y, x)): 54 | im.save(im.numpy(enc + truth), f"{OUTPUT_DIR}{i}_encoded.jpg") 55 | im.save(3 * np.abs(im.numpy(pred)), f"{OUTPUT_DIR}{i}_pred.jpg") 56 | im.save(3 * np.abs(im.numpy(truth)), f"{OUTPUT_DIR}{i}_truth.jpg") 57 | 58 | 59 | if __name__ == "__main__": 60 | 61 | model = nn.DataParallel(UNet()) 62 | model.train() 63 | 64 | optimizer = torch.optim.Adam(model.parameters(), lr=3e-3) 65 | 66 | # optimizer.load_state_dict('output/unet_opt.pth') 67 | model.module.load("output/train_unet.pth") 68 | 69 | logger.add_hook( 70 | lambda: [ 71 | print(f"Saving model/opt to {OUTPUT_DIR}train_unet.pth"), 72 | model.module.save(OUTPUT_DIR + "train_unet.pth"), 73 | torch.save(optimizer.state_dict(), OUTPUT_DIR + "unet_opt.pth"), 74 | ], 75 | freq=100, 76 | ) 77 | 78 | files = glob.glob(f"{DATA_PATH}/*encoded*.jpg") 79 | train_files, val_files = files[:-142], files[-142:] 80 | x_val, y_val = next(data_gen(val_files, 142)) 81 | 82 | for i, (x, y) in enumerate(data_gen(train_files, 142)): 83 | loss, corr = loss_func(model, x, y) 84 | 85 | logger.step("loss", min(5000, loss)) 86 | logger.step("corr", corr) 87 | 88 | optimizer.zero_grad() 89 | loss.backward() 90 | optimizer.step() 91 | 92 | if i % 50 == 0: 93 | model.eval() 94 | val_loss = loss_func(model, x_val, y_val) 95 | viz_preds(model, x_val[:8], y_val[:8]) 96 | model.train() 97 | print(f"val_loss = {val_loss}") 98 | 99 | if i == 2000: 100 | break 101 | -------------------------------------------------------------------------------- /old/losses.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | import random, sys, os, json 5 | 6 | import matplotlib as mpl 7 | 8 | mpl.use("Agg") 9 | import matplotlib.pyplot as plt 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.functional as F 14 | import torch.optim as optim 15 | from torch.autograd import Variable 16 | 17 | from torchvision import models 18 | from utils import * 19 | 20 | from skimage import filters 21 | from skimage.morphology import binary_dilation 22 | 23 | import IPython 24 | 25 | import transforms 26 | 27 | 28 | def getFeatures(model, x): 29 | x = torch.cat( 30 | [ 31 | ((x[0] - 0.485) / (0.229)).unsqueeze(0), 32 | ((x[1] - 0.456) / (0.224)).unsqueeze(0), 33 | ((x[2] - 0.406) / (0.225)).unsqueeze(0), 34 | ], 35 | dim=0, 36 | ) 37 | x = transforms.identity(x).unsqueeze(0) 38 | 39 | features = [] 40 | prev_feat = x 41 | 42 | for i, module in enumerate(model.features.features._modules.values()): 43 | next_feat = module(prev_feat) 44 | features.append(next_feat) 45 | prev_feat = next_feat 46 | 47 | return features 48 | 49 | 50 | def gram_matrix(features, normalize=True): 51 | N, C, H, W = features.shape 52 | featuresReshaped = features.reshape((N, C, H * W)) 53 | featuresTranspose = featuresReshaped.permute(0, 2, 1) 54 | ans = torch.matmul(featuresReshaped, featuresTranspose) 55 | if normalize: 56 | ans /= H * W * C 57 | return ans 58 | 59 | 60 | # features_list is a list of layers of model to calculate content at 61 | # weights_list is a list of corresponding weights for features_list 62 | def content_loss(model, features_list, weights_list, changed_image, original_image): 63 | 64 | original_features = model.getFeatures(original_image) 65 | changed_features = model.getFeatures(changed_image) 66 | 67 | loss = 0 68 | 69 | for i, layer_number in enumerate(features_list): 70 | activation_changed = changed_features[layer_number] 71 | activation_original = original_features[layer_number] 72 | 73 | N, C_1, H_1, W_1 = activation_changed.shape 74 | F_ij = activation_changed.reshape((C_1, H_1 * W_1)) 75 | P_ij = activation_original.reshape((C_1, H_1 * W_1)) 76 | loss += weights_list[i] * (((F_ij - P_ij).norm(2)) ** 2) 77 | 78 | return loss 79 | 80 | 81 | def gram_matrix(features, normalize=True): 82 | N, C, H, W = features.shape 83 | featuresReshaped = features.reshape((N, C, H * W)) 84 | featuresTranspose = featuresReshaped.permute(0, 2, 1) 85 | ans = torch.matmul(featuresReshaped, featuresTranspose) 86 | if normalize: 87 | ans /= H * W * C 88 | return ans 89 | 90 | 91 | # features_list is a list of layers of model to calculate style at 92 | # weights_list is a list of corresponding weights for features_list 93 | def style_loss(model, features_list, weights_list, changed_image, original_image): 94 | 95 | original_features = model.getFeatures(original_image) 96 | changed_features = model.getFeatures(changed_image) 97 | 98 | loss = 0 99 | 100 | for i, layer_number in enumerate(features_list): 101 | activation_changed = changed_features[layer_number] 102 | activation_original = original_features[layer_number] 103 | changed_gram = gram_matrix(activation_changed) 104 | original_gram = gram_matrix(activation_original) 105 | loss += weights_list[i] * (((changed_gram - original_gram).norm(2)) ** 2) 106 | 107 | return loss 108 | -------------------------------------------------------------------------------- /train_amnesia.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import random, sys, os, json, glob 4 | import tqdm, itertools, shutil 5 | 6 | import matplotlib as mpl 7 | 8 | mpl.use("Agg") 9 | import matplotlib.pyplot as plt 10 | 11 | import torch 12 | 13 | torch.backends.cudnn.benchmark = True 14 | import torch.nn as nn 15 | import torch.nn.functional as F 16 | import torch.optim as optim 17 | from torch.autograd import Variable 18 | 19 | from utils import * 20 | import transforms 21 | from encoding import encode_binary 22 | from models import DecodingModel, DataParallelModel 23 | from logger import Logger, VisdomLogger 24 | 25 | from skimage.morphology import binary_dilation 26 | import IPython 27 | 28 | from testing import test_transforms 29 | 30 | 31 | def loss_func(model, x, targets): 32 | scores = model.forward(x) 33 | predictions = scores.mean(dim=1) 34 | score_targets = binary.target(targets).unsqueeze(1).expand_as(scores) 35 | 36 | return (F.binary_cross_entropy(scores, score_targets), predictions.cpu().data.numpy().round(2)) 37 | 38 | 39 | def init_data(output_path, n=None): 40 | 41 | shutil.rmtree(output_path) 42 | os.makedirs(output_path) 43 | 44 | image_files = TRAIN_FILES 45 | if n is not None: 46 | image_files = image_files[0:n] 47 | 48 | for k, files in tqdm.tqdm(list(enumerate(batch(image_files, batch_size=BATCH_SIZE))), ncols=50): 49 | 50 | images = im.stack([im.load(img_file) for img_file in files]).detach() 51 | perturbation = nn.Parameter(0.03 * torch.randn(images.size()).to(DEVICE) + 0.0) 52 | targets = [binary.random(n=TARGET_SIZE) for i in range(len(images))] 53 | torch.save((perturbation.data, images.data, targets), f"{output_path}/{k}.pth") 54 | 55 | 56 | if __name__ == "__main__": 57 | 58 | model = DataParallelModel(DecodingModel(n=DIST_SIZE, distribution=transforms.training)) 59 | params = itertools.chain(model.module.classifier.parameters(), model.module.features[-1].parameters()) 60 | optimizer = torch.optim.Adam(params, lr=2.5e-3) 61 | init_data("data/amnesia") 62 | 63 | logger = VisdomLogger("train", server="35.230.67.129", port=8000, env=JOB) 64 | logger.add_hook(lambda x: logger.step(), feature="epoch", freq=20) 65 | logger.add_hook(lambda data: logger.plot(data, "train_loss"), feature="loss", freq=50) 66 | logger.add_hook(lambda data: logger.plot(data, "train_bits"), feature="bits", freq=50) 67 | logger.add_hook(lambda x: model.save("output/train_test.pth", verbose=True), feature="epoch", freq=100) 68 | model.save("output/train_test.pth", verbose=True) 69 | 70 | files = glob.glob(f"data/amnesia/*.pth") 71 | for i, save_file in enumerate(random.choice(files) for i in range(0, 2701)): 72 | 73 | perturbation, images, targets = torch.load(save_file) 74 | perturbation = perturbation.requires_grad_() 75 | 76 | perturbation.requires_grad = True 77 | encoded_ims, perturbation = encode_binary( 78 | images, targets, model, max_iter=1, perturbation=perturbation, use_weighting=True 79 | ) 80 | 81 | loss, predictions = loss_func(model, encoded_ims, targets) 82 | error = np.mean([binary.distance(x, y) for x, y in zip(predictions, targets)]) 83 | 84 | logger.update("epoch", i) 85 | logger.update("loss", loss) 86 | logger.update("bits", error) 87 | 88 | loss.backward() 89 | optimizer.step() 90 | optimizer.zero_grad() 91 | 92 | torch.save((perturbation.data, images.data, targets), save_file) 93 | 94 | if i != 0 and i % 300 == 0: 95 | 96 | model.save("output/train_test.pth") 97 | model2 = DataParallelModel( 98 | DecodingModel.load(distribution=transforms.training, n=DIST_SIZE, weights_file="output/train_test.pth") 99 | ) 100 | # test_transforms(model, random.sample(TRAIN_FILES, 16), name=f'iter{i}_train') 101 | test_transforms(model2, VAL_FILES, name=f"iter{i}_test", max_iter=300) 102 | -------------------------------------------------------------------------------- /old/modules.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | 4 | import numpy as np 5 | import random, sys, os, json 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | import torch.optim as optim 11 | from torch.autograd import Variable 12 | 13 | from torchvision import models 14 | from utils import * 15 | import transforms 16 | 17 | import IPython 18 | 19 | 20 | class UNet_down_block(nn.Module): 21 | def __init__(self, input_channel, output_channel, down_size=True): 22 | super(UNet_down_block, self).__init__() 23 | self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1) 24 | self.bn1 = nn.BatchNorm2d(output_channel) 25 | self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1) 26 | self.bn2 = nn.BatchNorm2d(output_channel) 27 | self.max_pool = nn.MaxPool2d(2, 2) 28 | self.relu = nn.ReLU() 29 | self.down_size = down_size 30 | 31 | def forward(self, x): 32 | 33 | x = self.relu(self.bn1(self.conv1(x))) 34 | x = self.relu(self.bn2(self.conv2(x))) 35 | if self.down_size: 36 | x = self.max_pool(x) 37 | return x 38 | 39 | 40 | class UNet_up_block(nn.Module): 41 | def __init__(self, prev_channel, input_channel, output_channel, up_sample=True): 42 | super(UNet_up_block, self).__init__() 43 | self.up_sampling = nn.Upsample(scale_factor=2, mode="bilinear") 44 | self.conv1 = nn.Conv2d( 45 | prev_channel + input_channel, output_channel, 3, padding=1 46 | ) 47 | self.bn1 = nn.BatchNorm2d(output_channel) 48 | self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1) 49 | self.bn2 = nn.BatchNorm2d(output_channel) 50 | self.relu = torch.nn.ReLU() 51 | self.up_sample = up_sample 52 | 53 | def forward(self, prev_feature_map, x): 54 | if self.up_sample: 55 | x = self.up_sampling(x) 56 | x = torch.cat((x, prev_feature_map), dim=1) 57 | x = self.relu(self.bn1(self.conv1(x))) 58 | x = self.relu(self.bn2(self.conv2(x))) 59 | return x 60 | 61 | 62 | class UNet(nn.Module): 63 | def __init__(self): 64 | super(UNet, self).__init__() 65 | 66 | self.down_block1 = UNet_down_block(3, 16, False) 67 | self.down_block2 = UNet_down_block(16, 32, True) 68 | self.down_block3 = UNet_down_block(32, 64, True) 69 | self.down_block4 = UNet_down_block(64, 128, True) 70 | self.down_block5 = UNet_down_block(128, 256, True) 71 | # self.down_block6 = UNet_down_block(256, 512, True) 72 | 73 | self.mid_conv1 = nn.Conv2d(256, 256, 3, padding=1) 74 | self.bn1 = nn.BatchNorm2d(256) 75 | # self.mid_conv2 = nn.Conv2d(512, 512, 3, padding=1) 76 | # self.bn2 = nn.BatchNorm2d(512) 77 | # self.mid_conv3 = torch.nn.Conv2d(512, 512, 3, padding=1) 78 | # self.bn3 = torch.nn.BatchNorm2d(512) 79 | 80 | # self.up_block1 = UNet_up_block(256, 512, 256) 81 | self.up_block2 = UNet_up_block(128, 256, 128) 82 | self.up_block3 = UNet_up_block(64, 128, 64) 83 | self.up_block4 = UNet_up_block(32, 64, 32) 84 | self.up_block5 = UNet_up_block(16, 32, 16) 85 | 86 | self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) 87 | self.last_bn = nn.BatchNorm2d(16) 88 | self.last_conv2 = nn.Conv2d(16, 3, 1, padding=0) 89 | self.relu = nn.ReLU() 90 | self.to(DEVICE) 91 | 92 | def forward(self, x): 93 | self.x1 = self.down_block1(x) 94 | self.x2 = self.down_block2(self.x1) 95 | self.x3 = self.down_block3(self.x2) 96 | self.x4 = self.down_block4(self.x3) 97 | self.x5 = self.down_block5(self.x4) 98 | # self.x6 = self.down_block6(self.x5) 99 | 100 | self.x5 = self.relu(self.bn1(self.mid_conv1(self.x5))) 101 | # self.x6 = self.relu(self.bn2(self.mid_conv2(self.x6))) 102 | # self.x6 = self.relu(self.bn3(self.mid_conv3(self.x6))) 103 | 104 | # x = self.up_block1(self.x5, self.x6) 105 | x = self.up_block2(self.x4, self.x5) 106 | x = self.up_block3(self.x3, x) 107 | x = self.up_block4(self.x2, x) 108 | x = self.up_block5(self.x1, x) 109 | x = self.relu(self.last_bn(self.last_conv1(x))) 110 | x = self.last_conv2(x) 111 | return x 112 | 113 | def load(self, file_path): 114 | self.load_state_dict(torch.load(file_path)) 115 | 116 | def save(self, file_path): 117 | torch.save(self.state_dict(), file_path) 118 | -------------------------------------------------------------------------------- /encoding.py: -------------------------------------------------------------------------------- 1 | 2 | import random, sys, os, json, glob, argparse 3 | 4 | import numpy as np 5 | import matplotlib as mpl 6 | 7 | mpl.use("Agg") 8 | import matplotlib.pyplot as plt 9 | from fire import Fire 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.nn.functional as F 14 | import torch.optim as optim 15 | from torch.autograd import Variable 16 | 17 | from models import DecodingModel, DataParallelModel 18 | from torchvision import models 19 | from logger import Logger, VisdomLogger 20 | from utils import * 21 | 22 | import IPython 23 | 24 | import transforms 25 | 26 | 27 | # LOGGING 28 | logger = VisdomLogger("encoding", server="35.230.67.129", port=8000, env=JOB) 29 | logger.add_hook(lambda x: logger.step(), feature="epoch", freq=20) 30 | logger.add_hook(lambda x: logger.plot(x, "Encoding Loss", opts=dict(ymin=0)), feature="loss", freq=50) 31 | 32 | 33 | """ 34 | Computes the changed images, given a a specified perturbation, standard deviation weighting, 35 | and epsilon. 36 | """ 37 | 38 | 39 | def compute_changed_images(images, perturbation, std_weights, epsilon=EPSILON): 40 | 41 | perturbation_w2 = perturbation * std_weights 42 | perturbation_zc = ( 43 | perturbation_w2 44 | / perturbation_w2.view(perturbation_w2.shape[0], -1) 45 | .norm(2, dim=1, keepdim=True) 46 | .unsqueeze(2) 47 | .unsqueeze(2) 48 | .expand_as(perturbation_w2) 49 | * epsilon 50 | * (perturbation_w2[0].nelement() ** 0.5) 51 | ) 52 | 53 | changed_images = (images + perturbation_zc).clamp(min=0.0, max=1.0) 54 | return changed_images 55 | 56 | 57 | """ 58 | Computes the cross entropy loss of a set of encoded images, given the model and targets. 59 | """ 60 | 61 | 62 | def loss_func(model, x, targets): 63 | scores = model.forward(x) 64 | predictions = scores.mean(dim=1) 65 | score_targets = binary.target(targets).unsqueeze(1).expand_as(scores) 66 | 67 | return (F.binary_cross_entropy(scores, score_targets), predictions.cpu().data.numpy().round(2)) 68 | 69 | 70 | """ 71 | Encodes a set of images with the specified binary targets, for a given number of iterations. 72 | """ 73 | 74 | 75 | def encode_binary( 76 | images, targets, model=DecodingModel(), n=None, max_iter=500, verbose=False, perturbation=None, use_weighting=False 77 | ): 78 | 79 | if n is not None: 80 | if verbose: 81 | print(f"Changing distribution size: {model.n} -> {n}") 82 | n, model.n = (model.n, n) 83 | 84 | returnPerturbation = True 85 | if perturbation is None: 86 | perturbation = nn.Parameter(0.03 * torch.randn(images.size()).to(DEVICE) + 0.0) 87 | returnPerturbation = False 88 | 89 | changed_images = images.detach() 90 | optimizer = torch.optim.Adam([perturbation], lr=ENCODING_LR) 91 | std_weights = get_std_weight(images, alpha=PERT_ALPHA) if use_weighting else 1 92 | 93 | for i in range(0, max_iter): 94 | 95 | changed_images = compute_changed_images(images, perturbation, std_weights) 96 | loss, predictions = loss_func(model, changed_images, targets) 97 | 98 | loss.backward() 99 | optimizer.step() 100 | optimizer.zero_grad() 101 | 102 | error = np.mean([binary.distance(x, y) for x, y in zip(predictions, targets)]) 103 | 104 | if verbose: 105 | logger.update("epoch", i) 106 | logger.update("loss", loss) 107 | logger.update("bits", error) 108 | 109 | changed_images = compute_changed_images(images, perturbation, std_weights) 110 | 111 | if n is not None: 112 | if verbose: 113 | print(f"Fixing distribution size: {model.n} -> {n}") 114 | n, model.n = (model.n, n) 115 | 116 | if returnPerturbation: 117 | return changed_images.detach(), perturbation.detach() 118 | 119 | return changed_images.detach() 120 | 121 | 122 | """ 123 | Command-line interface for encoding a single image. 124 | """ 125 | 126 | 127 | def encode( 128 | image, 129 | out, 130 | target=binary.str(binary.random(TARGET_SIZE)), 131 | n=96, 132 | model=None, 133 | max_iter=500, 134 | use_weighting=True, 135 | perturbation_out=None, 136 | ): 137 | 138 | if not isinstance(model, DecodingModel): 139 | model = DataParallelModel(DecodingModel.load(distribution=transforms.encoding, n=n, weights_file=model)) 140 | image = im.torch(im.load(image)).unsqueeze(0) 141 | print("Target: ", target) 142 | target = binary.parse(str(target)) 143 | encoded = encode_binary(image, [target], model, n=n, verbose=True, max_iter=max_iter, use_weighting=use_weighting) 144 | im.save(im.numpy(encoded.squeeze()), file=out) 145 | if perturbation_out != None: 146 | im.save(im.numpy((image - encoded).squeeze()), file=perturbation_out) 147 | 148 | 149 | if __name__ == "__main__": 150 | Fire(encode) 151 | -------------------------------------------------------------------------------- /web/client/src/registerServiceWorker.js: -------------------------------------------------------------------------------- 1 | // In production, we register a service worker to serve assets from local cache. 2 | 3 | // This lets the app load faster on subsequent visits in production, and gives 4 | // it offline capabilities. However, it also means that developers (and users) 5 | // will only see deployed updates on the "N+1" visit to a page, since previously 6 | // cached resources are updated in the background. 7 | 8 | // To learn more about the benefits of this model, read https://goo.gl/KwvDNy. 9 | // This link also includes instructions on opting out of this behavior. 10 | 11 | const isLocalhost = Boolean( 12 | window.location.hostname === 'localhost' || 13 | // [::1] is the IPv6 localhost address. 14 | window.location.hostname === '[::1]' || 15 | // 127.0.0.1/8 is considered localhost for IPv4. 16 | window.location.hostname.match( 17 | /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/ 18 | ) 19 | ); 20 | 21 | export default function register() { 22 | if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) { 23 | // The URL constructor is available in all browsers that support SW. 24 | const publicUrl = new URL(process.env.PUBLIC_URL, window.location); 25 | if (publicUrl.origin !== window.location.origin) { 26 | // Our service worker won't work if PUBLIC_URL is on a different origin 27 | // from what our page is served on. This might happen if a CDN is used to 28 | // serve assets; see https://github.com/facebookincubator/create-react-app/issues/2374 29 | return; 30 | } 31 | 32 | window.addEventListener('load', () => { 33 | const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`; 34 | 35 | if (isLocalhost) { 36 | // This is running on localhost. Lets check if a service worker still exists or not. 37 | checkValidServiceWorker(swUrl); 38 | 39 | // Add some additional logging to localhost, pointing developers to the 40 | // service worker/PWA documentation. 41 | navigator.serviceWorker.ready.then(() => { 42 | console.log( 43 | 'This web app is being served cache-first by a service ' + 44 | 'worker. To learn more, visit https://goo.gl/SC7cgQ' 45 | ); 46 | }); 47 | } else { 48 | // Is not local host. Just register service worker 49 | registerValidSW(swUrl); 50 | } 51 | }); 52 | } 53 | } 54 | 55 | function registerValidSW(swUrl) { 56 | navigator.serviceWorker 57 | .register(swUrl) 58 | .then(registration => { 59 | registration.onupdatefound = () => { 60 | const installingWorker = registration.installing; 61 | installingWorker.onstatechange = () => { 62 | if (installingWorker.state === 'installed') { 63 | if (navigator.serviceWorker.controller) { 64 | // At this point, the old content will have been purged and 65 | // the fresh content will have been added to the cache. 66 | // It's the perfect time to display a "New content is 67 | // available; please refresh." message in your web app. 68 | console.log('New content is available; please refresh.'); 69 | } else { 70 | // At this point, everything has been precached. 71 | // It's the perfect time to display a 72 | // "Content is cached for offline use." message. 73 | console.log('Content is cached for offline use.'); 74 | } 75 | } 76 | }; 77 | }; 78 | }) 79 | .catch(error => { 80 | console.error('Error during service worker registration:', error); 81 | }); 82 | } 83 | 84 | function checkValidServiceWorker(swUrl) { 85 | // Check if the service worker can be found. If it can't reload the page. 86 | fetch(swUrl) 87 | .then(response => { 88 | // Ensure service worker exists, and that we really are getting a JS file. 89 | if ( 90 | response.status === 404 || 91 | response.headers.get('content-type').indexOf('javascript') === -1 92 | ) { 93 | // No service worker found. Probably a different app. Reload the page. 94 | navigator.serviceWorker.ready.then(registration => { 95 | registration.unregister().then(() => { 96 | window.location.reload(); 97 | }); 98 | }); 99 | } else { 100 | // Service worker found. Proceed as normal. 101 | registerValidSW(swUrl); 102 | } 103 | }) 104 | .catch(() => { 105 | console.log( 106 | 'No internet connection found. App is running in offline mode.' 107 | ); 108 | }); 109 | } 110 | 111 | export function unregister() { 112 | if ('serviceWorker' in navigator) { 113 | navigator.serviceWorker.ready.then(registration => { 114 | registration.unregister(); 115 | }); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /logger.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import matplotlib as mpl 4 | 5 | mpl.use("Agg") 6 | import matplotlib.pyplot as plt 7 | import random, sys, os, json, math 8 | 9 | import torch 10 | from torchvision import datasets, transforms 11 | import visdom 12 | 13 | from utils import * 14 | import IPython 15 | 16 | 17 | class BaseLogger(object): 18 | def __init__(self, name, verbose=True): 19 | 20 | self.name = name 21 | self.data = {} 22 | self.running_data = {} 23 | self.reset_running = {} 24 | self.verbose = verbose 25 | self.hooks = [] 26 | 27 | def add_hook(self, hook, feature="epoch", freq=40): 28 | self.hooks.append((hook, feature, freq)) 29 | 30 | def update(self, feature, x): 31 | if isinstance(x, torch.Tensor): 32 | x = x.data.cpu().numpy().mean() 33 | 34 | self.data[feature] = self.data.get(feature, []) 35 | self.data[feature].append(x) 36 | if feature not in self.running_data or self.reset_running.pop(feature, False): 37 | self.running_data[feature] = [] 38 | self.running_data[feature].append(x) 39 | 40 | for hook, hook_feature, freq in self.hooks: 41 | if feature == hook_feature and len(self.data[feature]) % freq == 0: 42 | hook(self.data[feature]) 43 | 44 | def step(self): 45 | self.text(f"({self.name}) ", end="") 46 | for feature in self.running_data.keys(): 47 | if len(self.running_data[feature]) == 0: 48 | continue 49 | val = np.mean(self.running_data[feature]) 50 | if float(val).is_integer(): 51 | self.text(f"{feature}: {int(val)}", end=", ") 52 | else: 53 | self.text(f"{feature}: {val:0.4f}", end=", ") 54 | self.reset_running[feature] = True 55 | self.text(f" ... {elapsed():0.2f} sec") 56 | 57 | def text(self, text, end="\n"): 58 | raise NotImplementedError() 59 | 60 | def plot(self, data, plot_name, opts={}): 61 | raise NotImplementedError() 62 | 63 | def images(self, data, image_name): 64 | raise NotImplementedError() 65 | 66 | 67 | class Logger(BaseLogger): 68 | def __init__(self, *args, **kwargs): 69 | self.results = kwargs.pop("results", "output") 70 | super().__init__(*args, **kwargs) 71 | 72 | def text(self, text, end="\n"): 73 | print(text, end=end, flush=True) 74 | 75 | def plot(self, data, plot_name, opts={}): 76 | np.savez_compressed(f"{self.results}/{plot_name}.npz", data) 77 | plt.plot(data) 78 | plt.savefig(f"{self.results}/{plot_name}.jpg") 79 | plt.clf() 80 | 81 | 82 | class VisdomLogger(BaseLogger): 83 | def __init__(self, *args, **kwargs): 84 | self.port = kwargs.pop("port", 7000) 85 | self.server = kwargs.pop("server", "35.230.67.129") 86 | self.env = kwargs.pop("env", "main") 87 | print(f"Logging to environment {self.env}") 88 | self.visdom = visdom.Visdom( 89 | server="http://" + self.server, port=self.port, env=self.env, use_incoming_socket=False 90 | ) 91 | self.visdom.delete_env(self.env) 92 | self.windows = {} 93 | super().__init__(*args, **kwargs) 94 | 95 | def text(self, text, end="\n"): 96 | print(text, end=end) 97 | window, old_text = self.windows.get("text", (None, "")) 98 | if end == "\n": 99 | end = "
" 100 | display = old_text + text + end 101 | 102 | if window is not None: 103 | window = self.visdom.text(display, win=window, append=False) 104 | else: 105 | window = self.visdom.text(display) 106 | 107 | self.windows["text"] = window, display 108 | 109 | def viz(self, viz_name, method, *args, **kwargs): 110 | window = self.windows.get(viz_name, None) 111 | if window is not None: 112 | window = getattr(self.visdom, method)(*args, **kwargs, win=window) 113 | else: 114 | window = getattr(self.visdom, method)(*args, **kwargs, win=window) 115 | self.windows[viz_name] = window 116 | 117 | def plot(self, data, plot_name, opts={}): 118 | 119 | window = self.windows.get(plot_name, None) 120 | opts.update({"title": plot_name}) 121 | 122 | self.viz(plot_name, "line", np.array(data), opts=opts) 123 | 124 | def images(self, data, image_name, opts={}, resize=64): 125 | 126 | transform = transforms.Compose([transforms.ToPILImage(), transforms.Resize(resize), transforms.ToTensor()]) 127 | data = torch.stack([transform(x) for x in data.cpu()]) 128 | data = data.data.cpu().numpy() 129 | 130 | window = self.windows.get(image_name, None) 131 | 132 | opts.update({"title": image_name}) 133 | self.viz(image_name, "images", np.array(data), opts=opts) 134 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NeuralHash: An Adversarial Steganographic Method For Robust, Imperceptible Watermarking 2 | Building the next-gen watermark with deep learning: imperceptibly encoding images with un-erasable patterns to verify content ownership. 3 | 4 | ## What it does: 5 | Given an image (like Scream), Neuralhash makes small perturbations to visually encode a unique signature of the author: 6 | 7 | original_to_watermarked 8 | 9 | Which is able to be decoded even after extreme transformations (like a cellphone photo of the encoded image): 10 | 11 |

12 | 13 |

14 | 15 | Our secure watermarking scheme represents significant advances in protecting content ownership and piracy prevention on the Internet. 16 | ## Harnessing Adversarial Examples 17 | 18 | Our key insight is that we can use adversarial example techniques on a Decoder Network (that maps input images to 32-bit signatures) to generate perturbations that decode to the desired signature. We perform projected gradient descent under the Expectation over Transformation framework to do this as follows: 19 |

20 | 21 |

22 | We simulate an attack distrubtion using a set of differentiable transformations over which we train over. Here are some sample transforms: 23 |

24 | 25 |

26 | 27 | ## Training the Network 28 | We also propose a method to train our decoder network under the Expectation-Maximization (EM) framework to learn feature transformations that are more resilient to the threat space of attacks. As shown below, we alternate between encoding images using the network and then updating the network's weights to be more robust to attacks. 29 |

30 | 31 |

32 | 33 | The below plots show robustness of our encoded images during the training process. As you can see, over many iterations, the line becomes flatter, indicating robustness over rotation and scaling. Shown later, our approach generalizes to more extreme transformations. 34 |

35 | 36 |

37 | 38 | ## Sample Encodings 39 | Here are some sample original images (top row) and the corresponding watermarked image (bottom row): 40 | 41 | 42 | 43 | ## Example Attacks 44 | Some examples where our approach succeessfully decodes the correct signature and examples where it fails: 45 | 46 | 47 | 48 | ## Final Thoughts: 49 | 50 | The development of a secure watermarking scheme is an important problem that has applications in content ownership and piracy prevention. Current state-of-the-art techniques are unable to document robustness across a variety of affine transformations. We propose a method that harnesses the expressiveness of deep neural networks to covertly embed imperceptible, transformation-resilient binary signatures into images. Given a decoder network, our key insight is that adversarial example generation techniques can be used to encode images by performing projected gradient descent on the image to embed a chosen signature. 51 | 52 | By performing projective gradient descent on the decoder model with respect to a given image, we can use it to “sign” images robustly (think of a more advanced watermark). We start with the original image, then repeatedly tweak the pixel values such that the image (and all transformations, including scaling, rotation, adding noise, blurring, random cropping, and more) decodes to a specified 32-bit code. The resultant image will be almost imperceptible from the original image, yet contain an easily-decodable signature that cannot be removed even by the most dedicated of adversaries. 53 | 54 | We also propose a method to train our decoder network under the Expectation-Maximization (EM) framework to learn feature transformations that are more resilient to the threat space of attacks. Experimental results indicate that our model achieves robustness across different transformations such as scaling and rotating, with improved results over the length of EM training. Furthermore, we show an inherent trade-off between robustness and imperceptibility, which allows the user of the model flexibility in adjusting parameters to fit a particular task. 55 | 56 | Paper and more details coming soon. 57 | -------------------------------------------------------------------------------- /web/client/src/pages/background.js: -------------------------------------------------------------------------------- 1 | 2 | 3 | var colorPicker = (function() { 4 | var colors = ["#FF6138", "#FFBE53", "#2980B9", "#282741"]; 5 | var index = 0; 6 | function next() { 7 | index = index++ < colors.length - 1 ? index : 0; 8 | return colors[index]; 9 | } 10 | function current() { 11 | return colors[index]; 12 | } 13 | return { 14 | next: next, 15 | current: current 16 | }; 17 | })(); 18 | 19 | function removeAnimation(animation) { 20 | var index = animations.indexOf(animation); 21 | if (index > -1) animations.splice(index, 1); 22 | } 23 | 24 | function calcPageFillRadius(x, y) { 25 | var l = Math.max(x - 0, cW - x); 26 | var h = Math.max(y - 0, cH - y); 27 | return Math.sqrt(Math.pow(l, 2) + Math.pow(h, 2)); 28 | } 29 | 30 | function addClickListeners() { 31 | document.addEventListener("touchstart", handleEvent); 32 | document.addEventListener("mousedown", handleEvent); 33 | } 34 | 35 | function handleEvent(e) { 36 | if (e.touches) { 37 | e.preventDefault(); 38 | e = e.touches[0]; 39 | } 40 | var currentColor = colorPicker.current(); 41 | var nextColor = colorPicker.next(); 42 | var targetR = calcPageFillRadius(e.pageX, e.pageY); 43 | var rippleSize = Math.min(200, cW * 0.4); 44 | var minCoverDuration = 750; 45 | 46 | var pageFill = new Circle({ 47 | x: e.pageX, 48 | y: e.pageY, 49 | r: 0, 50 | fill: nextColor 51 | }); 52 | var fillAnimation = anime({ 53 | targets: pageFill, 54 | r: targetR, 55 | duration: Math.max(targetR / 2, minCoverDuration), 56 | easing: "easeOutQuart", 57 | complete: function() { 58 | bgColor = pageFill.fill; 59 | removeAnimation(fillAnimation); 60 | } 61 | }); 62 | 63 | var ripple = new Circle({ 64 | x: e.pageX, 65 | y: e.pageY, 66 | r: 0, 67 | fill: currentColor, 68 | stroke: { 69 | width: 3, 70 | color: currentColor 71 | }, 72 | opacity: 1 73 | }); 74 | var rippleAnimation = anime({ 75 | targets: ripple, 76 | r: rippleSize, 77 | opacity: 0, 78 | easing: "easeOutExpo", 79 | duration: 900, 80 | complete: removeAnimation 81 | }); 82 | 83 | var particles = []; 84 | for (var i = 0; i < 32; i++) { 85 | var particle = new Circle({ 86 | x: e.pageX, 87 | y: e.pageY, 88 | fill: currentColor, 89 | r: anime.random(24, 48) 90 | }); 91 | particles.push(particle); 92 | } 93 | var particlesAnimation = anime({ 94 | targets: particles, 95 | x: function(particle) { 96 | return particle.x + anime.random(rippleSize, -rippleSize); 97 | }, 98 | y: function(particle) { 99 | return particle.y + anime.random(rippleSize * 1.15, -rippleSize * 1.15); 100 | }, 101 | r: 0, 102 | easing: "easeOutExpo", 103 | duration: anime.random(1000, 1300), 104 | complete: removeAnimation 105 | }); 106 | animations.push(fillAnimation, rippleAnimation, particlesAnimation); 107 | } 108 | 109 | function extend(a, b) { 110 | for (var key in b) { 111 | if (b.hasOwnProperty(key)) { 112 | a[key] = b[key]; 113 | } 114 | } 115 | return a; 116 | } 117 | 118 | var Circle = function(opts) { 119 | extend(this, opts); 120 | }; 121 | 122 | Circle.prototype.draw = function() { 123 | ctx.globalAlpha = this.opacity || 1; 124 | ctx.beginPath(); 125 | ctx.arc(this.x, this.y, this.r, 0, 2 * Math.PI, false); 126 | if (this.stroke) { 127 | ctx.strokeStyle = this.stroke.color; 128 | ctx.lineWidth = this.stroke.width; 129 | ctx.stroke(); 130 | } 131 | if (this.fill) { 132 | ctx.fillStyle = this.fill; 133 | ctx.fill(); 134 | } 135 | ctx.closePath(); 136 | ctx.globalAlpha = 1; 137 | }; 138 | 139 | var animate = anime({ 140 | duration: Infinity, 141 | update: function() { 142 | ctx.fillStyle = bgColor; 143 | ctx.fillRect(0, 0, cW, cH); 144 | animations.forEach(function(anim) { 145 | anim.animatables.forEach(function(animatable) { 146 | animatable.target.draw(); 147 | }); 148 | }); 149 | } 150 | }); 151 | 152 | var resizeCanvas = function() { 153 | cW = window.innerWidth; 154 | cH = window.innerHeight; 155 | c.width = cW * devicePixelRatio; 156 | c.height = cH * devicePixelRatio; 157 | ctx.scale(devicePixelRatio, devicePixelRatio); 158 | }; 159 | 160 | (function init() { 161 | resizeCanvas(); 162 | if (window.CP) { 163 | // CodePen's loop detection was causin' problems 164 | // and I have no idea why, so... 165 | window.CP.PenTimer.MAX_TIME_IN_LOOP_WO_EXIT = 6000; 166 | } 167 | window.addEventListener("resize", resizeCanvas); 168 | addClickListeners(); 169 | if (!!window.location.pathname.match(/fullcpgrid/)) { 170 | startFauxClicking(); 171 | } 172 | handleInactiveUser(); 173 | })(); 174 | 175 | function handleInactiveUser() { 176 | var inactive = setTimeout(function() { 177 | fauxClick(cW / 2, cH / 2); 178 | }, 2000); 179 | 180 | function clearInactiveTimeout() { 181 | clearTimeout(inactive); 182 | document.removeEventListener("mousedown", clearInactiveTimeout); 183 | document.removeEventListener("touchstart", clearInactiveTimeout); 184 | } 185 | 186 | document.addEventListener("mousedown", clearInactiveTimeout); 187 | document.addEventListener("touchstart", clearInactiveTimeout); 188 | } 189 | 190 | function startFauxClicking() { 191 | setTimeout(function() { 192 | fauxClick( 193 | anime.random(cW * 0.2, cW * 0.8), 194 | anime.random(cH * 0.2, cH * 0.8) 195 | ); 196 | startFauxClicking(); 197 | }, anime.random(200, 900)); 198 | } 199 | 200 | function fauxClick(x, y) { 201 | var fauxClick = new Event("mousedown"); 202 | fauxClick.pageX = x; 203 | fauxClick.pageY = y; 204 | document.dispatchEvent(fauxClick); 205 | } 206 | -------------------------------------------------------------------------------- /web/client/src/components/ImageTrace/ImageTrace.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import "./ImageTrace.css"; 3 | 4 | import Image from "image-js"; 5 | import { toPath, toPoints } from "svg-points"; 6 | import * as simplify from "simplify-js"; 7 | import * as ImageTracer from "imagetracerjs"; 8 | import HtmlToReact, { Parser } from "html-to-react"; 9 | import { TweenLite, morphSVG, TimelineLite, SlowMo, CustomEase } from "gsap"; 10 | 11 | export default class ImageTrace extends React.Component { 12 | state = { 13 | path: 14 | }; 15 | 16 | componentDidMount() { 17 | Image.load("protected.jpeg").then(async img => { 18 | let data = ImageTracer.imagedataToSVG(img, { 19 | ltres: 30, 20 | qtres: 30, 21 | numberofcolors: 2, 22 | pal: [{ r: 255, b: 255, g: 255, a: 0 }, { r: 0, b: 0, g: 0, a: 1 }], 23 | colorsampling: 0, 24 | linefilter: true 25 | }); 26 | 27 | let svgData = this.convertSvgGroupToPath(data); 28 | 29 | this.setState({ 30 | path: 31 | }); 32 | 33 | let tl = new TimelineLite(); 34 | 35 | let orig = document.querySelector("#data"); 36 | let obj = { 37 | length: 0, 38 | pathLength: orig.getTotalLength() 39 | }; 40 | 41 | tl.to(obj, 500, { 42 | length: obj.pathLength, 43 | onUpdate: drawLine, 44 | ease: SlowMo.ease.config(0.1, 0.7, false) 45 | }); 46 | 47 | function drawLine() { 48 | orig.style.strokeDasharray = [obj.length, obj.pathLength].join(" "); 49 | } 50 | 51 | tl.to( 52 | "#data", 53 | 1, 54 | { 55 | morphSVG: { shape: "#lock", shapeIndex: 20 } 56 | }, 57 | "-=498" 58 | ); 59 | tl.play(); 60 | }); 61 | } 62 | 63 | convertSvgGroupToPath(data) { 64 | let finalD = ""; 65 | var processNodeDefinitions = new HtmlToReact.ProcessNodeDefinitions(React); 66 | let component = new Parser().parseWithInstructions( 67 | data, 68 | node => { 69 | return true; 70 | }, 71 | [ 72 | { 73 | shouldProcessNode: function(node) { 74 | return node.name === "svg"; 75 | }, 76 | processNode: function(node, children) { 77 | return children; 78 | } 79 | }, 80 | { 81 | shouldProcessNode: function(node) { 82 | return node.name === "path"; 83 | }, 84 | processNode: function(node, children, index) { 85 | finalD += " " + node.attribs.d; 86 | return React.createElement("path", { 87 | key: index, 88 | d: node.attribs.d 89 | }); 90 | } 91 | }, 92 | { 93 | // Anything else 94 | shouldProcessNode: function(node) { 95 | return true; 96 | }, 97 | processNode: processNodeDefinitions.processDefaultNode 98 | } 99 | ] 100 | ); 101 | return finalD; 102 | } 103 | /* 104 | let svgData = MSQR(edge.getCanvas(), { 105 | width: edge.width, 106 | tolerance: 50, 107 | align: false, 108 | alpha: 1, 109 | bleed: 5, // width of bleed mask (used with multiple shapes only) 110 | maxShapes: 5, 111 | height: edge.height, 112 | path2D: false 113 | }); 114 | 115 | let a = document.createElement("a"); 116 | document.body.appendChild(a); 117 | a.style = "display: none"; 118 | let url = window.URL.createObjectURL(await edge.toBlob()); 119 | a.href = url; 120 | a.download = "path.png"; 121 | a.click(); 122 | window.URL.revokeObjectURL(url); 123 | 124 | console.log(svgData); 125 | this.setState({ path: toPath(svgData) }); 126 | }); 127 | } 128 | 129 | trace = image => { 130 | var point, nextpoint; 131 | let data = []; 132 | 133 | for (var i = 0; i <= image.data.length; i++) { 134 | if (image.data[i] === 255) { 135 | // start pathfinding 136 | point = { x: i % image.width, y: (i / image.width) | 0 }; 137 | 138 | image.data[i] = 0; 139 | 140 | // start a line 141 | var line = []; 142 | line.push(point); 143 | while ((nextpoint = this.lineGobble(image, point))) { 144 | line.push(nextpoint); 145 | point = nextpoint; 146 | } 147 | data.push(line); 148 | } 149 | } 150 | return data; 151 | }; 152 | 153 | lineGobble = (image, point) => { 154 | var neighbor = [ 155 | [0, -1], // n 156 | [1, 0], // s 157 | [0, 1], // e 158 | [-1, 0], // w 159 | [-1, -1], // nw 160 | [1, -1], // ne 161 | [1, 1], // se 162 | [-1, 1] // sw 163 | ]; 164 | var checkpoint = {}; 165 | 166 | for (var i = 0; i < neighbor.length; i++) { 167 | checkpoint.x = point.x + neighbor[i][0]; 168 | checkpoint.y = point.y + neighbor[i][1]; 169 | 170 | var result = this.checkpixel(image, checkpoint); 171 | if (result) { 172 | return checkpoint; 173 | } 174 | } 175 | return false; 176 | }; 177 | 178 | checkpixel = (image, point) => { 179 | if (0 <= point.x < image.width) { 180 | if (0 <= point.y < image.height) { 181 | // point is "in bounds" 182 | var index = point.y * image.width + point.x; 183 | if (image.data[index] === 255) { 184 | image.data[index] = 0; 185 | return true; 186 | } 187 | } 188 | } 189 | return false; 190 | };*/ 191 | 192 | render() { 193 | return ( 194 |
195 | 196 | {this.state.path} 197 | 203 | 204 |
205 | ); 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | # utils.py 2 | 3 | import numpy as np 4 | import random, sys, os, time, glob, math 5 | 6 | import matplotlib as mpl 7 | mpl.use("Agg") 8 | import matplotlib.pyplot as plt 9 | 10 | from skimage import io, color 11 | 12 | import torch 13 | import torch.nn as nn 14 | import torch.nn.functional as F 15 | import torch.optim as optim 16 | from torch.autograd import Variable 17 | import random 18 | import IPython 19 | 20 | # CRITICAL HYPER PARAMS 21 | EPSILON = 9e-3 22 | BATCH_SIZE = 64 23 | DIST_SIZE = 48 24 | ENCODING_DIST_SIZE = 96 25 | TARGET_SIZE = 32 26 | VAL_SIZE = 16 27 | ENCODING_LR = 1e-1 28 | PERT_ALPHA = 0.5 29 | MODEL_TYPE = "DecodingGramNet" 30 | 31 | USE_CUDA = torch.cuda.is_available() 32 | DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 33 | IMAGE_MAX = 255.0 34 | OUTPUT_DIR = "output/" 35 | DATA_FILES = sorted(glob.glob("data/colornet/*.jpg")) 36 | JOB = open("jobs/jobinfo.txt").read().strip() 37 | TRAIN_FILES, VAL_FILES = DATA_FILES[:-VAL_SIZE], DATA_FILES[-VAL_SIZE:] 38 | dtype = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor 39 | 40 | 41 | def corrcoef(x): 42 | mean_x = torch.mean(x, 1).unsqueeze(1) 43 | xm = x.sub(mean_x.expand_as(x)) 44 | c = xm.mm(xm.t()) 45 | c = c / (x.size(1) - 1) 46 | 47 | # normalize covariance matrix 48 | d = torch.diag(c) 49 | stddev = torch.pow(d, 0.5) 50 | c = c.div(stddev.expand_as(c)) 51 | c = c.div(stddev.expand_as(c).t()) 52 | 53 | # clamp between -1 and 1 54 | # probably not necessary but numpy does it 55 | c = torch.clamp(c, -1.0, 1.0) 56 | 57 | return c 58 | 59 | 60 | def get_std_weight(images, n=5, alpha=0.5): 61 | N, C, H, W = images.shape 62 | kernel = kernel = torch.ones(C, 1, n, n) 63 | with torch.no_grad(): 64 | padded = F.pad(images, (n // 2, n // 2, n // 2, n // 2), mode="replicate") 65 | sums = F.conv2d(padded, kernel.to(DEVICE), groups=3, padding=0) * (1 / (n ** 2)) 66 | sums_2 = F.conv2d(padded ** 2, kernel.to(DEVICE), groups=3, padding=0) * (1 / (n ** 2)) 67 | stds = (sums_2 - (sums ** 2) + 1e-5) ** alpha 68 | return stds.detach() 69 | 70 | 71 | def gram(input): 72 | N, C, H, W = input.size() 73 | features = input.view(N, C, H * W) # resise F_XL into \hat F_XL 74 | G = torch.bmm(features, features.permute(0, 2, 1)) # compute the gram product 75 | return G.div(N * C * H * W) 76 | 77 | 78 | def zca(x): 79 | sigma = torch.mm(x.t(), x) / x.shape[0] 80 | U, S, _ = torch.svd(sigma) 81 | pcs = torch.mm(torch.mm(U, torch.diag(1. / torch.sqrt(S + 1e-7))), U.t()) 82 | 83 | # Apply ZCA whitening 84 | whitex = torch.mm(x, pcs) 85 | return whitex 86 | 87 | 88 | def color_normalize(x): 89 | return torch.cat( 90 | [ 91 | ((x[0] - 0.485) / (0.229)).unsqueeze(0), 92 | ((x[1] - 0.456) / (0.224)).unsqueeze(0), 93 | ((x[2] - 0.406) / (0.225)).unsqueeze(0), 94 | ], 95 | dim=0, 96 | ) 97 | 98 | 99 | def tve_loss(x): 100 | return ((x[:, :-1, :] - x[:, 1:, :]) ** 2).sum() + ((x[:, :, :-1] - x[:, :, 1:]) ** 2).sum() 101 | 102 | 103 | def batch(datagen, batch_size=32): 104 | arr = [] 105 | for data in datagen: 106 | arr.append(data) 107 | if len(arr) == batch_size: 108 | yield arr 109 | arr = [] 110 | if len(arr) != 0: 111 | yield arr 112 | 113 | 114 | def batched(datagen, batch_size=32): 115 | arr = [] 116 | for data in datagen: 117 | arr.append(data) 118 | if len(arr) == batch_size: 119 | yield list(zip(*arr)) 120 | arr = [] 121 | if len(arr) != 0: 122 | yield list(zip(*arr)) 123 | 124 | 125 | def elapsed(times=[time.time()]): 126 | times.append(time.time()) 127 | return times[-1] - times[-2] 128 | 129 | 130 | def create_heatmap(data, labels, filename='output/heatmap.jpg', x_label="", y_label=""): 131 | fig, ax = plt.subplots() 132 | im = ax.imshow(data) 133 | 134 | # We want to show all ticks... 135 | ax.set_xticks(np.arange(len(labels))) 136 | ax.set_yticks(np.arange(len(labels))) 137 | # ... and label them with the respective list entries 138 | ax.set_xticklabels(labels) 139 | ax.set_yticklabels(labels) 140 | 141 | # Rotate the tick labels and set their alignment. 142 | plt.setp(ax.get_xticklabels(), rotation=45, ha="right", 143 | rotation_mode="anchor") 144 | 145 | # Loop over data dimensions and create text annotations. 146 | for i in range(len(labels)): 147 | for j in range(len(labels)): 148 | text = ax.text(j, i, "{0:.2f}".format(round(data[i,j],2)), 149 | ha="center", va="center", color="w") 150 | 151 | ax.set_title("Affinity matrix") 152 | fig.tight_layout() 153 | plt.savefig(filename) 154 | plt.cla() 155 | plt.clf() 156 | plt.close() 157 | 158 | def gaussian_filter(kernel_size=5, sigma=1.0): 159 | 160 | channels = 3 161 | # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2) 162 | x_cord = torch.arange(kernel_size).to(DEVICE) 163 | x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size) 164 | y_grid = x_grid.t() 165 | xy_grid = torch.stack([x_grid, y_grid], dim=-1) 166 | 167 | mean = (kernel_size - 1) / 2. 168 | variance = sigma ** 2. 169 | 170 | # Calculate the 2-dimensional gaussian kernel which is 171 | # the product of two gaussian distributions for two different 172 | # variables (in this case called x and y) 173 | gaussian_kernel = (1. / (2. * math.pi * variance)) * torch.exp( 174 | -torch.sum((xy_grid - mean) ** 2., dim=-1) / (2 * variance) 175 | ) 176 | # Make sure sum of values in gaussian kernel equals 1. 177 | gaussian_kernel = gaussian_kernel.to(DEVICE) / torch.sum(gaussian_kernel).to(DEVICE) 178 | 179 | # Reshape to 2d depthwise convolutional weight 180 | gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size) 181 | gaussian_kernel = gaussian_kernel.repeat(channels, 1, 1, 1) 182 | 183 | return gaussian_kernel 184 | 185 | 186 | def motion_blur_filter(kernel_size=15): 187 | channels = 3 188 | kernel_motion_blur = torch.zeros((kernel_size, kernel_size)) 189 | kernel_motion_blur[int((kernel_size - 1) / 2), :] = torch.ones(kernel_size) 190 | kernel_motion_blur = kernel_motion_blur / kernel_size 191 | kernel_motion_blur = kernel_motion_blur.view(1, 1, kernel_size, kernel_size) 192 | kernel_motion_blur = kernel_motion_blur.repeat(channels, 1, 1, 1) 193 | return kernel_motion_blur 194 | 195 | 196 | """Image manipulation methods""" 197 | 198 | 199 | class im(object): 200 | @staticmethod 201 | def load(filename): 202 | 203 | img = None 204 | 205 | try: 206 | img = io.imread(filename) / IMAGE_MAX 207 | if len(img.shape) != 3: 208 | return None 209 | img = img[:, :, 0:3] 210 | except (IndexError, OSError) as e: 211 | img = None 212 | 213 | return img 214 | 215 | @staticmethod 216 | def save(image, file="out.jpg"): 217 | io.imsave(file, (image * IMAGE_MAX).astype(np.uint8)) 218 | 219 | @staticmethod 220 | def torch(image): 221 | x = torch.tensor(image).float().permute(2, 0, 1) 222 | return x.to(DEVICE) 223 | 224 | @staticmethod 225 | def numpy(image): 226 | return image.data.permute(1, 2, 0).cpu().numpy() 227 | 228 | @staticmethod 229 | def stack(images): 230 | return torch.cat([im.torch(image).unsqueeze(0) for image in images], dim=0) 231 | 232 | 233 | """Binary array data manipulation methods""" 234 | 235 | 236 | class binary(object): 237 | @staticmethod 238 | def parse(bstr): 239 | return [int(c) for c in bstr] 240 | 241 | @staticmethod 242 | def get(predictions): 243 | if predictions is Variable: 244 | predictions = predictions.data.cpu().numpy() 245 | return list(predictions.clip(min=0, max=1).round().astype(int)) 246 | 247 | @staticmethod 248 | def str(vals): 249 | return "".join([str(x) for x in vals]) 250 | 251 | @staticmethod 252 | def target(values): 253 | values = torch.tensor(values).float() 254 | return values.to(DEVICE) 255 | 256 | @staticmethod 257 | def redundant(values, n=3): 258 | return list(values) * n 259 | 260 | @staticmethod 261 | def consensus(values, n=3): 262 | return list((np.reshape(values, (n, -1)).mean(axis=0) >= 0.5).astype(int)) 263 | 264 | @staticmethod 265 | def random(n=10): 266 | return [random.randint(0, 1) for i in range(0, n)] 267 | 268 | @staticmethod 269 | def distance(code1, code2): 270 | code1 = np.array(code1).clip(min=0, max=1).round() 271 | code2 = np.array(code2).clip(min=0, max=1).round() 272 | num = 0 273 | for i in range(len(code1)): 274 | if code1[i] != code2[i]: 275 | num += 1 276 | return num 277 | 278 | @staticmethod 279 | def mse_dist(code1, code2): 280 | a = np.array(code1) 281 | b = np.array(code2) 282 | return np.mean((a - b) ** 2) 283 | 284 | 285 | if __name__ == "__main__": 286 | 287 | data = im.load("test_data/n02108915_4657.jpg") 288 | im.save(data, file="out.jpg") 289 | 290 | print(im.torch(data).size()) 291 | print(im.numpy(im.torch(data)).shape) 292 | im.save(im.numpy(im.torch(data)), file="out2.jpg") 293 | 294 | print(binary.consensus(binary.redundant([1, 1, 0, 1, 0, 0]))) 295 | print(binary("111011")) 296 | -------------------------------------------------------------------------------- /web/client/public/js/svg.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * VERSION: 0.3.1 3 | * DATE: 2015-10-21 4 | * UPDATES AND DOCS AT: http://greensock.com 5 | * 6 | * @license Copyright (c) 2008-2015, GreenSock. All rights reserved. 7 | * MorphSVGPlugin is a Club GreenSock membership benefit; You must have a valid membership to use 8 | * this code without violating the terms of use. Visit http://greensock.com/club/ to sign up or get more details. 9 | * This work is subject to the software agreement that was issued with your membership. 10 | * 11 | * @author: Jack Doyle, jack@greensock.com 12 | */ 13 | var _gsScope="undefined"!=typeof module&&module.exports&&"undefined"!=typeof global?global:this||window;(_gsScope._gsQueue||(_gsScope._gsQueue=[])).push(function(){"use strict";var a=Math.PI/180,b=180/Math.PI,c=/[achlmqstvz]|(-?\d*\.?\d*(?:e[\-+]?\d+)?)[0-9]/gi,d=/(?:(-|-=|\+=)?\d*\.?\d*(?:e[\-+]?\d+)?)[0-9]/gi,e=/[achlmqstvz]/gi,f=_gsScope.TweenLite,g=function(a){window.console&&console.log(a)},h=function(b,c){var d,e,f,g,h,i,j=Math.ceil(Math.abs(c)/90),k=0,l=[];for(b*=a,c*=a,d=c/j,e=4/3*Math.sin(d/2)/(1+Math.cos(d/2)),i=0;j>i;i++)f=b+i*d,g=Math.cos(f),h=Math.sin(f),l[k++]=g-e*h,l[k++]=h+e*g,f+=d,g=Math.cos(f),h=Math.sin(f),l[k++]=g+e*h,l[k++]=h-e*g,l[k++]=g,l[k++]=h;return l},i=function(c,d,e,f,g,i,j,k,l){if(c!==k||d!==l){e=Math.abs(e),f=Math.abs(f);var m=g%360*a,n=Math.cos(m),o=Math.sin(m),p=(c-k)/2,q=(d-l)/2,r=n*p+o*q,s=-o*p+n*q,t=e*e,u=f*f,v=r*r,w=s*s,x=v/t+w/u;x>1&&(e=Math.sqrt(x)*e,f=Math.sqrt(x)*f,t=e*e,u=f*f);var y=i===j?-1:1,z=(t*u-t*w-u*v)/(t*w+u*v);0>z&&(z=0);var A=y*Math.sqrt(z),B=A*(e*s/f),C=A*-(f*r/e),D=(c+k)/2,E=(d+l)/2,F=D+(n*B-o*C),G=E+(o*B+n*C),H=(r-B)/e,I=(s-C)/f,J=(-r-B)/e,K=(-s-C)/f,L=Math.sqrt(H*H+I*I),M=H;y=0>I?-1:1;var N=y*Math.acos(M/L)*b;L=Math.sqrt((H*H+I*I)*(J*J+K*K)),M=H*J+I*K,y=0>H*K-I*J?-1:1;var O=y*Math.acos(M/L)*b;!j&&O>0?O-=360:j&&0>O&&(O+=360),O%=360,N%=360;var P,Q,R,S=h(N,O),T=n*e,U=o*e,V=o*-f,W=n*f,X=S.length-2;for(P=0;X>P;P+=2)Q=S[P],R=S[P+1],S[P]=Q*T+R*V+F,S[P+1]=Q*U+R*W+G;return S[S.length-2]=k,S[S.length-1]=l,S}},j=function(a){var b,d,e,f,h,j,k,l,m,n,o,p,q,r=(a+"").match(c)||[],s=[],t=0,u=0,v=r.length,w=2,x=0;if(!a||!isNaN(r[0])||isNaN(r[1]))return g("ERROR: malformed path data: "+a),s;for(b=0;v>b;b++)if(q=h,isNaN(r[b])?(h=r[b].toUpperCase(),j=h!==r[b]):b--,e=+r[b+1],f=+r[b+2],j&&(e+=t,f+=u),0===b&&(l=e,m=f),"M"===h)t=l=e,u=m=f,k=[e,f],x+=w,w=2,s.push(k),b+=2;else if("C"===h)k||(k=[0,0]),k[w++]=e,k[w++]=f,j||(t=u=0),k[w++]=t+1*r[b+3],k[w++]=u+1*r[b+4],k[w++]=t+=1*r[b+5],k[w++]=u+=1*r[b+6],b+=6;else if("S"===h)"C"===q||"S"===q?(n=t-k[w-4],o=u-k[w-3],k[w++]=t+n,k[w++]=u+o):(k[w++]=t,k[w++]=u),k[w++]=e,k[w++]=f,j||(t=u=0),k[w++]=t+=1*r[b+3],k[w++]=u+=1*r[b+4],b+=4;else if("Q"===h)n=e-t,o=f-u,k[w++]=t+2*n/3,k[w++]=u+2*o/3,j||(t=u=0),t+=1*r[b+3],u+=1*r[b+4],n=e-t,o=f-u,k[w++]=t+2*n/3,k[w++]=u+2*o/3,k[w++]=t,k[w++]=u,b+=4;else if("T"===h)n=t-k[w-4],o=u-k[w-3],k[w++]=t+n,k[w++]=u+o,n=t+1.5*n-e,o=u+1.5*o-f,k[w++]=e+2*n/3,k[w++]=f+2*o/3,k[w++]=t=e,k[w++]=u=f,b+=2;else if("H"===h)f=u,k[w++]=t+(e-t)/3,k[w++]=u+(f-u)/3,k[w++]=t+2*(e-t)/3,k[w++]=u+2*(f-u)/3,k[w++]=t=e,k[w++]=f,b+=1;else if("V"===h)f=e,e=t,j&&(f+=u-t),k[w++]=e,k[w++]=u+(f-u)/3,k[w++]=e,k[w++]=u+2*(f-u)/3,k[w++]=e,k[w++]=u=f,b+=1;else if("L"===h||"Z"===h)"Z"===h&&(e=l,f=m,k.closed=!0),("L"===h||Math.abs(t-e)>1||Math.abs(u-f)>1)&&(k[w++]=t+(e-t)/3,k[w++]=u+(f-u)/3,k[w++]=t+2*(e-t)/3,k[w++]=u+2*(f-u)/3,k[w++]=e,k[w++]=f,b+=2),t=e,u=f;else if("A"===h){for(p=i(t,u,1*r[b+1],1*r[b+2],1*r[b+3],1*r[b+4],1*r[b+5],(j?t:0)+1*r[b+6],(j?u:0)+1*r[b+7]),d=0;do;o+=6)for(q+=t;q>r;)c=a[o-2],d=a[o-1],e=a[o],f=a[o+1],g=a[o+2],h=a[o+3],i=a[o+4],j=a[o+5],p=1/(Math.floor(q)+1),k=c+(e-c)*p,m=e+(g-e)*p,k+=(m-k)*p,m+=(g+(i-g)*p-m)*p,l=d+(f-d)*p,n=f+(h-f)*p,l+=(n-l)*p,n+=(h+(j-h)*p-n)*p,a.splice(o,4,c+(e-c)*p,d+(f-d)*p,k,l,k+(m-k)*p,l+(n-l)*p,m,n,g+(i-g)*p,h+(j-h)*p),o+=6,s+=6,q--;return a},l=function(a){var b,c,d,e,f="",g=a.length,h=100;for(c=0;g>c;c++){for(e=a[c],f+="M"+e[0]+","+e[1]+" C",b=e.length,d=2;b>d;d++)f+=(e[d++]*h|0)/h+","+(e[d++]*h|0)/h+" "+(e[d++]*h|0)/h+","+(e[d++]*h|0)/h+" "+(e[d++]*h|0)/h+","+(e[d]*h|0)/h+" ";e.closed&&(f+="z")}return f},m=function(a){for(var b=[],c=a.length-1,d=0;--c>-1;)b[d++]=a[c],b[d++]=a[c+1],c--;for(c=0;d>c;c++)a[c]=b[c];a.reversed=a.reversed?!1:!0},n=function(a){var b,c=a.length,d=0,e=0;for(b=0;c>b;b++)d+=a[b++],e+=a[b];return[d/(c/2),e/(c/2)]},o=function(a){var b,c,d,e=a.length,f=a[0],g=f,h=a[1],i=h;for(d=6;e>d;d+=6)b=a[d],c=a[d+1],b>f?f=b:g>b&&(g=b),c>h?h=c:i>c&&(i=c);return a.centerX=(f+g)/2,a.centerY=(h-i)/2,a.size=(f-g)*(h-i)},p=function(a,b){return b.length-a.length},q=function(a,b){var c=a.size||o(a),d=b.size||o(b);return Math.abs(d-c)<(c+d)/20?b.centerX-a.centerX||b.centerY-a.centerY:d-c},r=function(a,b){var c,d,e=a.slice(0),f=a.length,g=f-2;for(b=0|b,c=0;f>c;c++)d=(c+b)%g,a[c++]=e[d],a[c]=e[d+1]},s=function(a,b,c,d,e){var f,g,h,i,j=a.length,k=0,l=j-2;for(c*=6,g=0;j>g;g+=6)f=(g+c)%l,i=a[f]-(b[g]-d),h=a[f+1]-(b[g+1]-e),k+=Math.sqrt(h*h+i*i);return k},t=function(a,b,c){var d,e,f,g=a.length,h=n(a),i=n(b),j=i[0]-h[0],k=i[1]-h[1],l=s(a,b,0,j,k),o=0;for(f=6;g>f;f+=6)e=s(a,b,f/6,j,k),l>e&&(l=e,o=f);if(c)for(d=a.slice(0),m(d),f=6;g>f;f+=6)e=s(d,b,f/6,j,k),l>e&&(l=e,o=-f);return o/6},u=function(a,b,c){var d,e,f,g,h=a.length,i=99999999999,j=0;for(g=0;h>g;g+=6)d=a[g]-b,e=a[g+1]-c,f=Math.sqrt(d*d+e*e),i>f&&(i=f,j=g);return j},v=function(a,b,c){var d,e,f,g,h=b.length,i=0,j=.8*(b[c].size||o(b[c])),k=999999999999,l=a.size||o(a),m=a.centerX,n=a.centerY;for(d=c;h>d&&(l=b[d].size||o(b[d]),!(j>l));d++)e=b[d].centerX-m,f=b[d].centerY-n,g=Math.sqrt(e*e+f*f),k>g&&(i=d,k=g);return g=b[i],b.splice(i,1),g},w=function(a,b,c,d){var e,f,g,h,i,j=b.length-a.length,l=j>0?b:a,n=j>0?a:b,o=0,s="complexity"===d?p:q,w=n.length,x="object"==typeof c&&c.push?c.slice(0):[c],y="reverse"===x[0]||x[0]<0;if(n[0]){if(j){if(0>j&&(j=-j),a.sort(s),b.sort(s),s===q)for(w=0;wn[0].length&&k(n[0],(l[0].length-n[0].length)/6|0),w=n.length;j>o;)g=u(n[0],l[w][0],l[w][1]),h=n[0][g],i=n[0][g+1],n[w++]=[h,i,h,i,h,i,h,i],n.totalPoints+=8,o++}for(w=0;wj?k(e,-j/6|0):j>0&&k(f,j/6|0),y&&!f.reversed&&m(f),c=x[w]||0===x[w]?x[w]:"auto",c&&(f.closed||Math.abs(f[0]-f[f.length-2])<.5&&Math.abs(f[1]-f[f.length-1])<.5?"auto"===c?(x[w]=c=t(f,e,0===w),0>c&&(y=!0,m(f),c=-c),r(f,6*c)):"reverse"!==c&&(w&&0>c&&m(f),r(f,6*(0>c?-c:c))):!y&&("auto"===c&&Math.abs(e[0]-f[0])+Math.abs(e[1]-f[1])+Math.abs(e[e.length-2]-f[f.length-2])+Math.abs(e[e.length-1]-f[f.length-1])>Math.abs(e[0]-f[f.length-2])+Math.abs(e[1]-f[f.length-1])+Math.abs(e[e.length-2]-f[0])+Math.abs(e[e.length-1]-f[1])||c%2)?(m(f),x[w]=-1,y=!0):"auto"===c?x[w]=0:"reverse"===c&&(x[w]=-1));return x}},x=function(a,b,c){var d=j(a[0]),e=j(a[1]);w(d,e,b||0===b?b:"auto",c)&&(a[0]=l(d),a[1]=l(e))},y=function(a,b){return b||a||0===a?function(c){x(c,a,b)}:x},z=function(a,b){if(!b)return a;var c,e,f,g=a.match(d)||[],h=g.length,i="";for("reverse"===b?(e=h-1,c=-2):(e=(2*(parseInt(b,10)||0)+1+100*h)%h,c=2),f=0;h>f;f+=2)i+=g[e-1]+","+g[e]+" ",e=(e+c)%h;return i},A=function(a,b){var c,d,e,f,g,h,i,j=0,k=parseFloat(a[0]),l=parseFloat(a[1]),m=k+","+l+" ",n=.999999;for(e=a.length,c=.5*b/(.5*e-1),d=0;e-2>d;d+=2){if(j+=c,h=parseFloat(a[d+2]),i=parseFloat(a[d+3]),j>n)for(g=1/(Math.floor(j)+1),f=1;j>n;)m+=(k+(h-k)*g*f).toFixed(2)+","+(l+(i-l)*g*f).toFixed(2)+" ",j--,f++;m+=h+","+i+" ",k=h,l=i}return m},B=function(a){var b=a[0].match(d)||[],c=a[1].match(d)||[],e=c.length-b.length;e>0?a[0]=A(b,e):a[1]=A(c,-e)},C=function(a){return isNaN(a)?B:function(b){B(b),b[1]=z(b[1],parseInt(a,10))}},D=function(a,b){var c=document.createElementNS("http://www.w3.org/2000/svg","path"),d=Array.prototype.slice.call(a.attributes),e=d.length;for(b=","+b+",";--e>-1;)-1===b.indexOf(","+d[e].nodeName+",")&&c.setAttributeNS(null,d[e].nodeName,d[e].nodeValue);return c},E=function(a,b){var c,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y=a.tagName.toLowerCase(),z=.552284749831;return"path"!==y&&a.getBBox?(i=D(a,"x,y,width,height,cx,cy,rx,ry,r,x1,x2,y1,y2,points"),"rect"===y?(g=+a.getAttribute("rx")||0,h=+a.getAttribute("ry")||0,e=+a.getAttribute("x")||0,f=+a.getAttribute("y")||0,m=(+a.getAttribute("width")||0)-2*g,n=(+a.getAttribute("height")||0)-2*h,g||h?(o=e+g*(1-z),p=e+g,q=p+m,r=q+g*z,s=q+g,t=f+h*(1-z),u=f+h,v=u+n,w=v+h*z,x=v+h,c="M"+s+","+u+" V"+v+" C"+[s,w,r,x,q,x,q-(q-p)/3,x,p+(q-p)/3,x,p,x,o,x,e,w,e,v,e,v-(v-u)/3,e,u+(v-u)/3,e,u,e,t,o,f,p,f,p+(q-p)/3,f,q-(q-p)/3,f,q,f,r,f,s,t,s,u].join(",")+"z"):c="M"+(e+m)+","+f+" v"+n+" h"+-m+" v"+-n+" h"+m+"z"):"circle"===y||"ellipse"===y?("circle"===y?(g=h=+a.getAttribute("r")||0,k=g*z):(g=+a.getAttribute("rx")||0,h=+a.getAttribute("ry")||0,k=h*z),e=+a.getAttribute("cx")||0,f=+a.getAttribute("cy")||0,j=g*z,c="M"+(e+g)+","+f+" C"+[e+g,f+k,e+j,f+h,e,f+h,e-j,f+h,e-g,f+k,e-g,f,e-g,f-k,e-j,f-h,e,f-h,e+j,f-h,e+g,f-k,e+g,f].join(",")+"z"):"line"===y?c="M"+a.getAttribute("x1")+","+a.getAttribute("y1")+" L"+a.getAttribute("x2")+","+a.getAttribute("y2"):("polyline"===y||"polygon"===y)&&(l=(a.getAttribute("points")+"").match(d)||[],e=l.shift(),f=l.shift(),c="M"+e+","+f+" L"+l.join(","),"polygon"===y&&(c+=","+e+","+f+"z")),i.setAttribute("d",c),b&&a.parentNode&&(a.parentNode.insertBefore(i,a),a.parentNode.removeChild(a)),i):a},F=function(a,b){var c,e;return("string"!=typeof a||(a.match(d)||[]).length<3)&&(c=f.selector(a),c&&c[0]?(c=c[0],e=c.nodeName.toUpperCase(),b&&"PATH"!==e&&(c=E(c,!1),e="PATH"),a=c.getAttribute("PATH"===e?"d":"points")||""):(g("WARNING: invalid morph to: "+a),a=!1)),a},G="Use MorphSVGPlugin.convertToPath(elementOrSelectorText) to convert to a path before morphing.",H=_gsScope._gsDefine.plugin({propName:"morphSVG",API:2,global:!0,version:"0.3.1",init:function(a,b,c){var d,f,h,i,j;return"function"!=typeof a.setAttribute?!1:(d=a.nodeName.toUpperCase(),j="POLYLINE"===d||"POLYGON"===d,"PATH"===d||j?(f="PATH"===d?"d":"points",("string"==typeof b||b.getBBox)&&(b={shape:b}),i=F(b.shape||b.d||b.points||"","d"===f),j&&e.test(i)?(g("WARNING: a <"+d+"> cannot accept path data. "+G),!1):(i&&(this._target=a,h=this._addTween(a,"setAttribute",a.getAttribute(f)+"",i+"","morphSVG",!1,f,"d"===f?y(b.shapeIndex,b.map):C(b.shapeIndex)),h&&(this._overwriteProps.push("morphSVG"),h.end=i,h.endProp=f)),!0)):(g("WARNING: cannot morph a <"+d+"> SVG element. "+G),!1))},set:function(a){var b;if(this._super.setRatio.call(this,a),1===a)for(b=this._firstPT;b;)b.end&&this._target.setAttribute(b.endProp,b.end),b=b._next}});H.pathFilter=x,H.pointsFilter=B,H.subdivideRawBezier=k,H.pathDataToRawBezier=function(a){return j(F(a,!0))},H.equalizeSegmentQuantity=w,H.convertToPath=function(a,b){"string"==typeof a&&(a=f.selector(a));for(var c=a&&0!==a.length?a.length&&a[0]&&a[0].nodeType?Array.prototype.slice.call(a,0):[a]:[],d=c.length;--d>-1;)c[d]=E(c[d],b!==!1);return c},H.pathDataToBezier=function(a,b){var c,d,e,f,g,h,i=j(F(a,!0))[0]||[];if(b=b||{},f=b.matrix,c=[],e=i.length,f)for(b.relative&&(f=f.slice(0),f[4]-=i[0],f[5]-=i[1]),d=0;e>d;d+=2)c.push({x:i[d]*f[0]+i[d+1]*f[2]+f[4],y:i[d]*f[1]+i[d+1]*f[3]+f[5]});else for(g=b.offsetX||0,h=b.offsetY||0,b.relative&&(g-=i[0],h-=i[1]),d=0;e>d;d+=2)c.push({x:i[d]+g,y:i[d+1]+h});return c}}),_gsScope._gsDefine&&_gsScope._gsQueue.pop()(); -------------------------------------------------------------------------------- /testing.py: -------------------------------------------------------------------------------- 1 | 2 | import random, sys, os, glob, pickle 3 | import argparse, tqdm 4 | 5 | import matplotlib as mpl 6 | 7 | mpl.use("Agg") 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | from fire import Fire 11 | 12 | import torch 13 | import torch.nn as nn 14 | import torch.nn.functional as F 15 | import torch.optim as optim 16 | 17 | from utils import * 18 | import transforms 19 | 20 | from encoding import encode_binary 21 | from models import BaseModel, DecodingModel, DataParallelModel 22 | from logger import Logger, VisdomLogger 23 | 24 | import IPython 25 | 26 | 27 | # LOGGING 28 | logger = VisdomLogger("test", server="35.230.67.129", port=8000, env=JOB) 29 | logger.add_hook(lambda x: logger.step(), feature="orig", freq=1) 30 | 31 | 32 | def sweep(images, targets, model, transform, name, samples=10): 33 | 34 | min_val, max_val = transform.plot_range 35 | 36 | results = [] 37 | for val in tqdm.tqdm(np.linspace(min_val, max_val, samples), ncols=30): 38 | transformed = transform(images, val) 39 | predictions = model(transformed).mean(dim=1).cpu().data.numpy() 40 | 41 | mse_loss = np.mean([binary.mse_dist(x, y) for x, y in zip(predictions, targets)]) 42 | binary_loss = np.mean([binary.distance(x, y) for x, y in zip(predictions, targets)]) 43 | results.append((val, binary_loss, mse_loss)) 44 | 45 | x, bits_off, mse = (np.array(x) for x in zip(*results)) 46 | 47 | print(transform.__name__, np.mean(bits_off)) 48 | logger.update(transform.__name__, np.mean(bits_off)) 49 | 50 | np.savez_compressed(f"output/{name}_{transform.__name__}.npz", x=x, bits_off=bits_off, mse=mse) 51 | # logger.viz(f"{name}_{transform_name}", method='line', 52 | # Y=np.column_stack((32*mse, bits_off)), 53 | # X=np.column_stack((x, x)), 54 | # opts=dict(title=f"{name}_{transform_name}", ylim=(0, 16), 55 | # legend=['squared error', 'bits']) 56 | # ) 57 | 58 | fig, ax1 = plt.subplots() 59 | ax1.plot(x, bits_off, "b") 60 | ax1.set_ylim(0, TARGET_SIZE // 2) 61 | ax1.set_ylabel("Number Incorrect Bits") 62 | ax2 = ax1.twinx() 63 | ax2.plot(x, mse, "r") 64 | ax2.set_ylim(0, 0.25) 65 | ax2.set_ylabel("Mean Squared Error") 66 | plt.savefig(f"output/{name}_{transform.__name__}.jpg") 67 | plt.cla() 68 | plt.clf() 69 | plt.close() 70 | return np.mean(bits_off) 71 | 72 | 73 | def test_transforms(model=None, image_files=VAL_FILES, name="test", max_iter=250): 74 | 75 | if not isinstance(model, BaseModel): 76 | print(f"Loading model from {model}") 77 | model = DataParallelModel( 78 | DecodingModel.load(distribution=transforms.new_dist, n=ENCODING_DIST_SIZE, weights_file=model) 79 | ) 80 | 81 | images = [im.load(image) for image in image_files] 82 | images = im.stack(images) 83 | targets = [binary.random(n=TARGET_SIZE) for _ in range(0, len(images))] 84 | model.eval() 85 | 86 | encoded_images = encode_binary( 87 | images, targets, model, n=ENCODING_DIST_SIZE, verbose=True, max_iter=max_iter, use_weighting=True 88 | ) 89 | 90 | logger.images(images, "original_images", resize=196) 91 | logger.images(encoded_images, "encoded_images", resize=196) 92 | for img, encoded_im, filename, target in zip(images, encoded_images, image_files, targets): 93 | im.save(im.numpy(img), file=f"output/_{binary.str(target)}_original_{filename.split('/')[-1]}") 94 | im.save(im.numpy(encoded_im), file=f"output/_{binary.str(target)}_encoded_{filename.split('/')[-1]}") 95 | 96 | model.set_distribution(transforms.identity, n=1) 97 | predictions = model(encoded_images).mean(dim=1).cpu().data.numpy() 98 | binary_loss = np.mean([binary.distance(x, y) for x, y in zip(predictions, targets)]) 99 | 100 | for transform in [ 101 | transforms.pixilate, 102 | transforms.blur, 103 | transforms.rotate, 104 | transforms.scale, 105 | transforms.translate, 106 | transforms.noise, 107 | transforms.crop, 108 | transforms.gauss, 109 | transforms.whiteout, 110 | transforms.resize_rect, 111 | transforms.color_jitter, 112 | transforms.jpeg_transform, 113 | transforms.elastic, 114 | transforms.brightness, 115 | transforms.contrast, 116 | transforms.flip, 117 | ]: 118 | sweep(encoded_images, targets, model, transform=transform, name=name, samples=60) 119 | 120 | # sweep( 121 | # encoded_images, 122 | # targets, 123 | # model, 124 | # transform=lambda x, val: transforms.rotate(x, rand_val=False, theta=val), 125 | # name=name, 126 | # transform_name="rotate", 127 | # min_val=-0.6, 128 | # max_val=0.6, 129 | # samples=80, 130 | # ) 131 | 132 | # sweep( 133 | # encoded_images, 134 | # targets, 135 | # model, 136 | # transform=lambda x, val: transforms.scale(x, rand_val=False, scale_val=val), 137 | # name=name, 138 | # transform_name="scale", 139 | # min_val=0.6, 140 | # max_val=1.4, 141 | # samples=50, 142 | # ) 143 | 144 | # sweep( 145 | # encoded_images, 146 | # targets, 147 | # model, 148 | # transform=lambda x, val: transforms.translate(x, rand_val=False, radius=val), 149 | # name=name, 150 | # transform_name="translate", 151 | # min_val=0.0, 152 | # max_val=1.0, 153 | # samples=50, 154 | # ) 155 | 156 | # sweep( 157 | # encoded_images, 158 | # targets, 159 | # model, 160 | # transform=lambda x, val: transforms.noise(x, intensity=val), 161 | # name=name, 162 | # transform_name="noise", 163 | # min_val=0.0, 164 | # max_val=0.1, 165 | # samples=30, 166 | # ) 167 | 168 | # sweep( 169 | # encoded_images, 170 | # targets, 171 | # model, 172 | # transform=lambda x, val: transforms.crop(x, p=val), 173 | # name=name, 174 | # transform_name="crop", 175 | # min_val=0.1, 176 | # max_val=1.0, 177 | # samples=50, 178 | # ) 179 | 180 | # sweep( 181 | # encoded_images, 182 | # targets, 183 | # model, 184 | # transform=lambda x, val: transforms.gauss(x, sigma=val, rand_val=False), 185 | # name=name, 186 | # transform_name="gauss", 187 | # min_val=0.3, 188 | # max_val=4, 189 | # samples=50, 190 | # ) 191 | 192 | # sweep( 193 | # encoded_images, 194 | # targets, 195 | # model, 196 | # transform=lambda x, val: transforms.whiteout(x, scale=val, rand_val=False), 197 | # name=name, 198 | # transform_name="whiteout", 199 | # min_val=0.02, 200 | # max_val=0.2, 201 | # samples=50, 202 | # ) 203 | 204 | # sweep( 205 | # encoded_images, 206 | # targets, 207 | # model, 208 | # transform=lambda x, val: transforms.resize_rect(x, ratio=val, rand_val=False), 209 | # name=name, 210 | # transform_name="resize_rect", 211 | # min_val=0.5, 212 | # max_val=1.5, 213 | # samples=50, 214 | # ) 215 | 216 | # sweep( 217 | # encoded_images, 218 | # targets, 219 | # model, 220 | # transform=lambda x, val: transforms.color_jitter(x, jitter=val), 221 | # name=name, 222 | # transform_name="jitter", 223 | # min_val=0, 224 | # max_val=0.2, 225 | # samples=50, 226 | # ) 227 | 228 | # sweep( 229 | # encoded_images, 230 | # targets, 231 | # model, 232 | # transform=lambda x, val: transforms.convertToJpeg(x, q=val), 233 | # name=name, 234 | # transform_name="jpg", 235 | # min_val=10, 236 | # max_val=100, 237 | # samples=50, 238 | # ) 239 | 240 | logger.update("orig", binary_loss) 241 | model.set_distribution(transforms.training, n=DIST_SIZE) 242 | model.train() 243 | 244 | 245 | def evaluate(model, image, target, test_transforms=False): 246 | 247 | if not isinstance(model, BaseModel): 248 | model = DataParallelModel(DecodingModel.load(distribution=transforms.identity, n=1, weights_file=model)) 249 | 250 | image = im.torch(im.load(image)).unsqueeze(0) 251 | target = binary.parse(str(target)) 252 | prediction = model(image).mean(dim=1).squeeze().cpu().data.numpy() 253 | prediction = binary.get(prediction) 254 | 255 | # print (f"Target: {binary.str(target)}, Prediction: {binary.str(prediction)}, \ 256 | # Distance: {binary.distance(target, prediction)}") 257 | 258 | if test_transforms: 259 | sweep(image, [target], model, transform=transforms.rotate, name="eval", samples=60) 260 | 261 | 262 | def test_transfer(model=None, image_files=VAL_FILES, max_iter=250): 263 | if not isinstance(model, BaseModel): 264 | print(f"Loading model from {model}") 265 | model = DataParallelModel( 266 | DecodingModel.load(distribution=transforms.encoding, n=ENCODING_DIST_SIZE, weights_file=model) 267 | ) 268 | 269 | images = [im.load(image) for image in image_files] 270 | images = im.stack(images) 271 | targets = [binary.random(n=TARGET_SIZE) for _ in range(0, len(images))] 272 | model.eval() 273 | 274 | transform_list = [ 275 | transforms.rotate, 276 | transforms.translate, 277 | transforms.scale, 278 | transforms.resize_rect, 279 | transforms.crop, 280 | transforms.whiteout, 281 | transforms.elastic, 282 | transforms.motion_blur, 283 | transforms.brightness, 284 | transforms.contrast, 285 | transforms.pixilate, 286 | transforms.blur, 287 | transforms.color_jitter, 288 | transforms.gauss, 289 | transforms.noise, 290 | transforms.impulse_noise, 291 | transforms.flip, 292 | ] 293 | 294 | labels = [t.__name__ for t in transform_list] 295 | score_matrix = np.zeros((len(transform_list), len(transform_list))) 296 | 297 | for i, t1 in enumerate(transform_list): 298 | 299 | model.set_distribution(lambda x: t1.random(x), n=ENCODING_DIST_SIZE) 300 | encoded_images = encode_binary( 301 | images, targets, model, n=ENCODING_DIST_SIZE, verbose=True, max_iter=max_iter, use_weighting=True 302 | ) 303 | 304 | model.set_distribution(transforms.identity, n=1) 305 | t1_error = sweep(encoded_images, targets, model, transform=t1, name=f"{t1.__name__}", samples=60) 306 | 307 | for j, t2 in enumerate(transform_list): 308 | if t1.__name__ == t2.__name__: 309 | score_matrix[i,j] = t1_error 310 | continue 311 | t2_error = sweep(encoded_images, targets, model, transform=t2, name=f'{t1.__name__}->{t2.__name__}', samples=60) 312 | score_matrix[i,j] = t2_error 313 | 314 | print(f'{t1.__name__} --> {t2.__name__}: {t2_error}') 315 | np.save('labels', labels) 316 | np.save('score_matrix', score_matrix) 317 | create_heatmap(score_matrix, labels) 318 | 319 | 320 | if __name__ == "__main__": 321 | Fire() 322 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | 4 | import numpy as np 5 | import random, sys, os, json 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | import torch.optim as optim 11 | from torch.autograd import Variable 12 | 13 | from torchvision import models 14 | from utils import * 15 | import transforms 16 | 17 | import IPython 18 | 19 | 20 | """ Base model class. """ 21 | 22 | 23 | class BaseModel(nn.Module): 24 | def __init__(self, distribution=transforms.identity, n=1): 25 | super(BaseModel, self).__init__() 26 | if None not in [distribution, n]: 27 | self.distribution, self.n = distribution, n 28 | 29 | def forward(self, x): 30 | raise NotImplementedError() 31 | 32 | @property 33 | def distribution(self): 34 | return self.__distribution 35 | 36 | @distribution.setter 37 | def distribution(self, x): 38 | self.__distribution = x 39 | 40 | @property 41 | def n(self): 42 | return self.__n 43 | 44 | @n.setter 45 | def n(self, n): 46 | self.__n = n 47 | 48 | def set_distribution(self, distribution=transforms.identity, n=1): 49 | self.distribution, self.n = distribution, n 50 | 51 | @classmethod 52 | def load(cls, weights_file=None, distribution=transforms.identity, n=1): 53 | model = cls(distribution=distribution, n=n) 54 | if weights_file is not None: 55 | model.load_state_dict(torch.load(weights_file)) 56 | return model 57 | 58 | def save(self, weights_file, verbose=False): 59 | if verbose: 60 | print(f"Saving model to {weights_file}") 61 | torch.save(self.state_dict(), weights_file) 62 | 63 | 64 | """ 65 | DataParallel wrapper for BaseModels that exposes the same methods 66 | (including save and distribution variables) without a .module() call. 67 | """ 68 | 69 | 70 | class DataParallelModel(BaseModel): 71 | def __init__(self, *args, **kwargs): 72 | super().__init__(distribution=None, n=None) 73 | self.parallel_apply = nn.DataParallel(*args, **kwargs) 74 | 75 | def forward(self, x): 76 | return self.parallel_apply(x) 77 | 78 | @property 79 | def distribution(self): 80 | return self.parallel_apply.module.distribution 81 | 82 | @distribution.setter 83 | def distribution(self, x): 84 | self.parallel_apply.module.distribution = x 85 | 86 | @property 87 | def n(self): 88 | return self.parallel_apply.module.n 89 | 90 | @n.setter 91 | def n(self, n): 92 | self.parallel_apply.module.n = n 93 | 94 | @property 95 | def module(self): 96 | return self.parallel_apply.module 97 | 98 | @classmethod 99 | def load(cls, weights_file=None, distribution=transforms.identity, n=1): 100 | model = cls(distribution=distribution, n=n) 101 | if weights_file is not None: 102 | model.parallel_apply.module.load_state_dict(torch.load(weights_file)) 103 | return model 104 | 105 | def save(self, weights_file, verbose=False): 106 | if verbose: 107 | print(f"Saving model to {weights_file}") 108 | torch.save(self.parallel_apply.module.state_dict(), weights_file) 109 | 110 | 111 | """ 112 | Simple decoding network with squeezenet features and a 113 | pooling-based linear bit transform. 114 | """ 115 | 116 | 117 | class DecodingNet(BaseModel): 118 | def __init__(self, *args, **kwargs): 119 | super(DecodingNet, self).__init__(*args, **kwargs) 120 | 121 | self.features = models.squeezenet1_1(pretrained=True).features 122 | self.classifier = nn.Sequential(nn.Linear(512 * 8, TARGET_SIZE * 2)) 123 | # nn.ReLU(inplace=True), 124 | # nn.Linear(4096, TARGET_SIZE*2)) 125 | self.bn = nn.BatchNorm2d(512) 126 | self.to(DEVICE) 127 | 128 | def forward(self, x): 129 | 130 | x = torch.cat([self.distribution(x).unsqueeze(1) for i in range(0, self.n)], dim=1) 131 | B, N, C, H, W = x.shape 132 | 133 | x = torch.cat( 134 | [ 135 | ((x[:, :, 0] - 0.485) / (0.229)).unsqueeze(2), 136 | ((x[:, :, 1] - 0.456) / (0.224)).unsqueeze(2), 137 | ((x[:, :, 2] - 0.406) / (0.225)).unsqueeze(2), 138 | ], 139 | dim=2, 140 | ) 141 | 142 | x = x.view(B * N, C, H, W) 143 | x = self.features(x) 144 | 145 | x = torch.cat([F.avg_pool2d(x, (x.shape[2] // 2)), F.max_pool2d(x, (x.shape[2] // 2))], dim=1) 146 | x = x.view(x.size(0), -1) 147 | x = (x - x.mean(dim=1, keepdim=True)) / (x.std(dim=1, keepdim=True)) 148 | x = self.classifier(x) 149 | x = x.view(B, N, TARGET_SIZE, 2) # .mean(dim=0) # reshape and average 150 | 151 | return F.softmax(x, dim=3)[:, :, :, 0].clamp(min=0, max=1) 152 | 153 | 154 | """ 155 | Decoding network with squeezenet features and a 156 | gram-matrix based output that connects to intermediate layers. 157 | """ 158 | 159 | 160 | class DecodingGramNet(BaseModel): 161 | def __init__(self, *args, **kwargs): 162 | super(DecodingGramNet, self).__init__(*args, **kwargs) 163 | 164 | self.features = models.squeezenet1_1(pretrained=True).features 165 | # self.gram_classifiers = nn.ModuleList([ 166 | # nn.Linear(256**2, 256), 167 | # nn.Linear(384**2, 256), 168 | # nn.Linear(512**2, 256), 169 | # ]) 170 | self.indices = [6, 8, 10, 12] 171 | self.classifier = nn.Linear(1408, TARGET_SIZE * 2) 172 | self.to(DEVICE) 173 | 174 | def forward(self, x): 175 | 176 | x = torch.cat([self.distribution(x).unsqueeze(1) for i in range(0, self.n)], dim=1) 177 | B, N, C, H, W = x.shape 178 | 179 | x = torch.cat( 180 | [ 181 | ((x[:, :, 0] - 0.485) / (0.229)).unsqueeze(2), 182 | ((x[:, :, 1] - 0.456) / (0.224)).unsqueeze(2), 183 | ((x[:, :, 2] - 0.406) / (0.225)).unsqueeze(2), 184 | ], 185 | dim=2, 186 | ) 187 | 188 | x = x.view(B * N, C, H, W) 189 | 190 | layers = list(self.features._modules.values()) 191 | gram_maps = [] 192 | 193 | for i, layer in enumerate(layers): 194 | x = layer(x) 195 | j = self.indices.index(i) if i in self.indices else None 196 | 197 | if j is not None: 198 | y = F.max_pool2d(x, (x.shape[2], x.shape[3])) 199 | gram_maps.append(y) 200 | 201 | # gram_maps = [] 202 | # for layer, clf in zip(layers[-3:], self.gram_classifiers): 203 | # x = layer(x) 204 | # y = gram(x).view(x.shape[0], -1) 205 | # print (x.shape, y.shape) 206 | # print (clf) 207 | # #gram_maps.append(clf(y)) 208 | 209 | x = torch.cat(gram_maps, dim=1) 210 | x = x.view(x.size(0), -1) 211 | 212 | x = (x - x.mean(dim=1, keepdim=True)) / (x.std(dim=1, keepdim=True)) 213 | x = self.classifier(x) 214 | x = x.view(B, N, TARGET_SIZE, 2) # .mean(dim=0) # reshape and average 215 | 216 | return F.softmax(x, dim=3)[:, :, :, 0].clamp(min=0, max=1) 217 | 218 | 219 | """ 220 | Tiny un-pretrained decoding network. 221 | """ 222 | 223 | 224 | class TinyDecodingNet(BaseModel): 225 | def __init__(self, *args, **kwargs): 226 | super().__init__(*args, **kwargs) 227 | 228 | self.conv1 = nn.Conv2d(3, 128, (3, 3), padding=1) 229 | self.conv2 = nn.Conv2d(128, 128, (3, 3), padding=1) 230 | self.conv3 = nn.Conv2d(128, 128, (3, 3), padding=1) 231 | self.conv4 = nn.Conv2d(128, 2 * TARGET_SIZE, (3, 3), padding=1) 232 | 233 | self.to(DEVICE) 234 | 235 | def forward(self, x): 236 | x = torch.cat([self.distribution(x).unsqueeze(1) for i in range(0, self.n)], dim=1) 237 | B, N, C, H, W = x.shape 238 | 239 | x = torch.cat( 240 | [ 241 | ((x[:, :, 0] - 0.485) / (0.229)).unsqueeze(2), 242 | ((x[:, :, 1] - 0.456) / (0.224)).unsqueeze(2), 243 | ((x[:, :, 2] - 0.406) / (0.225)).unsqueeze(2), 244 | ], 245 | dim=2, 246 | ) 247 | 248 | x = x.view(B * N, C, H, W).contiguous() 249 | # print (x.shape) 250 | 251 | # x = F.relu(self.conv1(x)) 252 | # x = F.max_pool2d(x, 2) 253 | # print (x.shape) 254 | 255 | # x = F.relu(self.conv2(x)) 256 | # x = F.max_pool2d(x, 2) 257 | # print (x.shape) 258 | 259 | # x = F.relu(self.conv3(x)) 260 | # x = F.max_pool2d(x, 2) 261 | # print (x.shape) 262 | 263 | # x = F.relu(self.conv4(x)) 264 | # x = F.max_pool2d(x, 2) 265 | # print (x.shape) 266 | x = F.avg_pool2d(x, (x.shape[2], x.shape[3])) 267 | x = x.view(B, N, TARGET_SIZE, 2) # .mean(dim=0) # reshape and average 268 | 269 | return F.softmax(x, dim=3)[:, :, :, 0].clamp(min=0, max=1) 270 | 271 | 272 | """Decoding network that tries to predict on images using a dilated DCNN, 273 | which should theoretically be invariant to any scale of input. """ 274 | 275 | 276 | class DilatedDecodingNet(BaseModel): 277 | def __init__(self, *args, **kwargs): 278 | super(DilatedDecodingNet, self).__init__(*args, **kwargs) 279 | 280 | self.features = models.vgg11(pretrained=True) 281 | self.features.eval() 282 | self.classifier = nn.Linear(512 ** 2, TARGET_SIZE * 2) 283 | self.gram = GramMatrix() 284 | 285 | if USE_CUDA: 286 | self.cuda() 287 | 288 | def forward(self, x, verbose=False, distribution=transforms.identity, n=1, return_variance=False): 289 | 290 | # make sure to center the image and divide by standard deviation 291 | x = torch.cat( 292 | [ 293 | ((x[0] - 0.485) / (0.229)).unsqueeze(0), 294 | ((x[1] - 0.456) / (0.224)).unsqueeze(0), 295 | ((x[2] - 0.406) / (0.225)).unsqueeze(0), 296 | ], 297 | dim=0, 298 | ) 299 | 300 | x = torch.cat([distribution(x).unsqueeze(0) for i in range(0, n)], dim=0) 301 | 302 | # vgg layers 303 | dilation_factor = 1 304 | for layer in list(self.features.features._modules.values()): 305 | if isinstance(layer, nn.Conv2d): 306 | x = F.conv2d( 307 | x, 308 | layer.weight, 309 | bias=layer.bias, 310 | stride=layer.stride, 311 | padding=tuple(layer.padding * np.array(dilation_factor)), 312 | dilation=dilation_factor, 313 | ) 314 | elif isinstance(layer, nn.MaxPool2d): 315 | if dilation_factor == 1: 316 | x = F.max_pool2d(x, 2, stride=1, dilation=1) 317 | x = F.pad(x, (1, 0, 1, 0)) 318 | else: 319 | x = F.max_pool2d(x, 2, stride=1, dilation=dilation_factor) 320 | x = F.pad(x, [dilation_factor // 2] * 4) 321 | dilation_factor *= 2 322 | else: 323 | x = layer(x) 324 | 325 | x = self.gram(x) 326 | x = x.view(x.size(0), -1) 327 | x = (x - x.mean(dim=1, keepdim=True)) / (x.std(dim=1, keepdim=True)) 328 | x = self.classifier(x) 329 | x = x.view(x.size(0), TARGET_SIZE, 2) # .mean(dim=0) # reshape and average 330 | 331 | predictions = F.softmax(x, dim=2)[:, :, 0] 332 | 333 | return predictions 334 | 335 | 336 | DecodingModel = eval(MODEL_TYPE) 337 | 338 | if __name__ == "__main__": 339 | 340 | model = nn.DataParallel(TinyDecodingNet(n=16, distribution=transforms.identity)) 341 | images = torch.randn(4, 3, 224, 224).float().to(DEVICE) 342 | x = model.forward(images) 343 | print(x.shape) 344 | -------------------------------------------------------------------------------- /transforms.py: -------------------------------------------------------------------------------- 1 | 2 | from __future__ import print_function 3 | 4 | import numpy as np 5 | import random, sys, os, timeit, math 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | from torch.autograd import Variable 11 | 12 | from utils import * 13 | 14 | import IPython 15 | 16 | from scipy.ndimage import filters 17 | 18 | import torchvision 19 | from io import BytesIO 20 | from PIL import Image 21 | 22 | 23 | def affine(data, x=[1, 0, 0], y=[0, 1, 0]): 24 | return dtype([x, y], device=data.device).float().repeat(data.shape[0], 1, 1) 25 | 26 | 27 | def sample(min_val=0, max_val=1, plot_range=None, generator=None): 28 | 29 | if plot_range is None: 30 | span = max_val - min_val 31 | min_plot = min_val - span / 2.0 32 | max_plot = max_val + span / 2.0 33 | if min_val >= 0 and max_val >= 0: 34 | min_plot = max(min_plot, 0) 35 | if min_val <= 0 and max_val <= 0: 36 | max_plot = min(max_plot, 0) 37 | plot_range = (min_plot, max_plot) 38 | 39 | if generator is None: 40 | generator = lambda: random.uniform(min_val, max_val) 41 | 42 | def wrapper(transform): 43 | class RandomSampler: 44 | """Wrapper class that turns transforms into dynamic 45 | callables.""" 46 | 47 | def __init__(self, transform, plot_range, generator): 48 | self.transform = transform 49 | self.plot_range = plot_range 50 | self.generator = generator 51 | self.__name__ = transform.__name__ 52 | 53 | def __call__(self, x, val=None, **kwargs): 54 | if val == None: 55 | return self.transform(x) 56 | return self.transform(x, val, **kwargs) 57 | 58 | def random(self, x, **kwargs): 59 | val = self.generator() 60 | return self.transform(x, val, **kwargs) 61 | 62 | return RandomSampler(transform, plot_range, generator) 63 | 64 | return wrapper 65 | 66 | 67 | @sample(0, 0) 68 | def identity(x, val=None): 69 | x = resize(x, 224) 70 | return x 71 | 72 | 73 | @sample(100, 300) 74 | def resize(x, val=224): 75 | val = int(val) 76 | grid = F.affine_grid(affine(x), size=torch.Size((x.shape[0], 3, val, val))) 77 | img = F.grid_sample(x, grid, padding_mode="border") 78 | return img 79 | 80 | 81 | @sample(0.6, 1.4) 82 | def resize_rect(x, ratio=0.8): 83 | 84 | x_scale = random.uniform(ratio, 1) 85 | y_scale = x_scale / ratio 86 | 87 | grid = F.affine_grid(affine(x), size=x.size()) 88 | grid = torch.cat([grid[:, :, :, 0].unsqueeze(3) * y_scale, grid[:, :, :, 1].unsqueeze(3) * x_scale], dim=3) 89 | img = F.grid_sample(x, grid, padding_mode="border") 90 | return img 91 | 92 | 93 | @sample(0.05, 0.2) 94 | def color_jitter(x, jitter=0.1): 95 | R, G, B = (random.uniform(1 - jitter, 1 + jitter) for i in range(0, 3)) 96 | x = torch.cat([x[:, 0].unsqueeze(1) * R, x[:, 1].unsqueeze(1) * G, x[:, 2].unsqueeze(1) * B], dim=1) 97 | return x.clamp(min=0, max=1) 98 | 99 | 100 | @sample(0.6, 1.4) 101 | def scale(x, scale_val=1): 102 | grid = F.affine_grid(affine(x), size=x.size()) 103 | img = F.grid_sample(x, grid * scale_val, padding_mode="border") 104 | return img 105 | 106 | 107 | @sample(-60, 60) 108 | def rotate(x, theta=45): 109 | c, s = np.cos(np.radians(theta)), np.sin(np.radians(theta)) 110 | grid = F.affine_grid(affine(x, [c, s, 0], [-s, c, 0]), size=x.size()) 111 | img = F.grid_sample(x, grid, padding_mode="border") 112 | return img 113 | 114 | 115 | @sample(0.9, 1.1, plot_range=(0.8, 1.2)) 116 | def elastic(x, ratio=0.8, n=3, p=0.1): 117 | 118 | N, C, H, W = x.shape 119 | H_c, W_c = int((H * W * p) ** 0.5), int((H * W * p) ** 0.5) 120 | 121 | grid = F.affine_grid(affine(x), size=x.size()) 122 | grid_y = grid[:, :, :, 0].unsqueeze(3) 123 | grid_x = grid[:, :, :, 1].unsqueeze(3) 124 | 125 | # stretch/contract n small image regions 126 | for i in range(0, n): 127 | x_coord = int(random.uniform(0, H - H_c)) 128 | y_coord = int(random.uniform(0, W - W_c)) 129 | 130 | x_scale = random.uniform(0, 1 - ratio) + 1 131 | y_scale = x_scale / ratio 132 | grid_y[:, x_coord : x_coord + H_c, y_coord : y_coord + W_c] = ( 133 | grid_y[:, x_coord : x_coord + H_c, y_coord : y_coord + W_c] * y_scale 134 | ) 135 | grid_x[:, x_coord : x_coord + H_c, y_coord : y_coord + W_c] = ( 136 | grid_x[:, x_coord : x_coord + H_c, y_coord : y_coord + W_c] * x_scale 137 | ) 138 | 139 | grid = torch.cat([grid_y, grid_x], dim=3) 140 | img = F.grid_sample(x, grid, padding_mode="border") 141 | return img 142 | 143 | 144 | @sample(0.1, 0.4) 145 | def translate(x, radius=0.15): 146 | theta = random.uniform(-np.pi, np.pi) 147 | sx, sy = np.cos(theta) * radius, np.sin(theta) * radius 148 | grid = F.affine_grid(affine(x, [1, 0, sx], [0, 1, sy]), size=x.size()) 149 | img = F.grid_sample(x, grid, padding_mode="border") 150 | return img 151 | 152 | 153 | @sample(0.3, 2, plot_range=(0.01, 4)) 154 | def gauss(x, sigma=1): 155 | filter = gaussian_filter(kernel_size=7, sigma=sigma) 156 | x = F.conv2d(x, weight=filter.to(x.device), bias=None, groups=3, padding=2) 157 | return x.clamp(min=1e-3, max=1) 158 | 159 | 160 | @sample(5, 9) 161 | def motion_blur(x, val): 162 | filter = motion_blur_filter(kernel_size=int(val)) 163 | x = F.conv2d(x, weight=filter.to(x.device), bias=None, groups=3) 164 | return x.clamp(min=1e-3, max=1) 165 | 166 | 167 | @sample(0.03, 0.06) 168 | def noise(x, intensity=0.05): 169 | noise = dtype(x.size(), device=x.device).normal_().requires_grad_(False) * intensity 170 | img = (x + noise).clamp(min=1e-3, max=1) 171 | return img 172 | 173 | 174 | @sample(0, 1, plot_range=(0, 1)) 175 | def flip(x, val): 176 | if val < 0.5: 177 | return x 178 | grid = F.affine_grid(affine(x, [-1, 0, 0], [0, 1, 0]), size=x.size()) 179 | img = F.grid_sample(x, grid, padding_mode="border") 180 | return img 181 | 182 | 183 | @sample(0, 0.2) 184 | def impulse_noise(x, intensity=0.1): 185 | num = 10000 186 | _, _, H, W = x.shape 187 | x_coords = np.random.randint(low=0, high=H, size=(int(intensity * num),)) 188 | y_coords = np.random.randint(low=0, high=W, size=(int(intensity * num),)) 189 | 190 | R, G, B = (random.uniform(0, 1) for i in range(0, 3)) 191 | mask = torch.ones_like(x) 192 | mask[:, 0, x_coords, y_coords] = R 193 | mask[:, 1, x_coords, y_coords] = G 194 | mask[:, 2, x_coords, y_coords] = B 195 | return x * mask 196 | 197 | 198 | @sample(0.01, 0.2, plot_range=(0.01, 0.3)) 199 | def whiteout(x, scale=0.1, n=6): 200 | 201 | noise = dtype(x.size(), device=x.device).normal_().requires_grad_(False) * 0.5 202 | 203 | for i in range(0, n): 204 | w, h = int(scale * x.shape[2]), int(scale * x.shape[3]) 205 | sx, sy = (random.randrange(0, x.shape[2] - w), random.randrange(0, x.shape[3] - h)) 206 | 207 | mask = torch.ones_like(x) 208 | mask[:, :, sx : (sx + w), sy : (sy + h)] = 0.0 209 | 210 | R, G, B = (random.random() for i in range(0, 3)) 211 | bias = dtype([R, G, B], device=x.device).view(1, 3, 1, 1).expand_as(mask) 212 | 213 | if random.randint(0, 1): 214 | bias = (bias + noise).clamp(min=1e-3, max=1) 215 | x = mask * x + (1.0 - mask) * bias 216 | return x 217 | 218 | 219 | @sample(0.5, 1, plot_range=(0.2, 1)) 220 | def crop(x, p=0.6): 221 | N, C, H, W = x.shape 222 | H_c, W_c = int((H * W * p) ** 0.5), int((H * W * p) ** 0.5) 223 | x_coord = int(random.uniform(0, H - H_c)) 224 | y_coord = int(random.uniform(0, W - W_c)) 225 | 226 | mask = torch.zeros_like(x) 227 | mask[:, :, x_coord : x_coord + H_c, y_coord : y_coord + W_c] = 1.0 228 | return x * mask 229 | 230 | 231 | ## NOT DIFFERENTIABLE ## 232 | @sample(50, 100, plot_range=(10, 100)) 233 | def jpeg_transform(x, q=50): 234 | jpgs = [] 235 | for img in x: 236 | img = img.squeeze() 237 | img = torchvision.transforms.ToPILImage()(img.cpu()) 238 | with BytesIO() as f: 239 | img.save(f, format="JPEG", quality=int(q)) 240 | f.seek(0) 241 | ima_jpg = Image.open(f) 242 | jpgs.append(torchvision.transforms.ToTensor()(ima_jpg)) 243 | return torch.stack(jpgs).to(DEVICE) 244 | 245 | 246 | @sample(-0.4, 0.4) 247 | def brightness(x, brightness_val=0.2): 248 | x = torch.cat( 249 | [ 250 | x[:, 0].unsqueeze(1) + brightness_val, 251 | x[:, 1].unsqueeze(1) + brightness_val, 252 | x[:, 2].unsqueeze(1) + brightness_val, 253 | ], 254 | dim=1, 255 | ) 256 | return x.clamp(min=0, max=1) 257 | 258 | 259 | @sample(0.5, 1.5) 260 | def contrast(x, factor=0.1): 261 | R = (x[:, 0].unsqueeze(1) - 0.5) * factor + 0.5 262 | G = (x[:, 1].unsqueeze(1) - 0.5) * factor + 0.5 263 | B = (x[:, 2].unsqueeze(1) - 0.5) * factor + 0.5 264 | x = torch.cat([R, G, B], dim=1) 265 | return x.clamp(min=0, max=1) 266 | 267 | 268 | @sample(2, 6) 269 | def blur(x, blur_val=4): 270 | N, C, H, W = x.shape 271 | 272 | # downsampling 273 | out_size_h = H // max(int(blur_val), 2) 274 | out_size_w = W // max(int(blur_val), 2) 275 | grid = F.affine_grid(affine(x), size=torch.Size((x.shape[0], 3, out_size_h, out_size_w))) 276 | x = F.grid_sample(x, grid, padding_mode="border") 277 | 278 | # upsampling 279 | grid = F.affine_grid(affine(x), size=torch.Size((x.shape[0], 3, H, W))) 280 | x = F.grid_sample(x, grid, padding_mode="border") 281 | 282 | return x 283 | 284 | 285 | @sample(2, 8) 286 | def pixilate(x, res=4): 287 | res = max(2, min(res, 8)) 288 | res = max(2, min(2 ** (math.ceil(math.log(res, 2))), 8)) 289 | return F.upsample(F.avg_pool2d(x, int(res)), scale_factor=int(res)) 290 | 291 | 292 | # def training(x): 293 | # _ = sample(0, 0)(lambda x, val: x) 294 | # x = random.choice([gauss, noise, color_jitter, whiteout, _, _]).random(x) 295 | # x = random.choice([rotate, resize_rect, scale, translate, flip, _, _]).random(x) 296 | # x = random.choice([flip, crop, _]).random(x) 297 | # x = random.choice([rotate, resize_rect, scale, translate, flip, _]).random(x) 298 | # x = random.choice([gauss, noise, color_jitter, crop, _, _]).random(x) 299 | # x = identity(x) 300 | # return x 301 | 302 | 303 | def training(x): 304 | _ = sample(0, 0)(lambda x, val: x) 305 | t_list = [ 306 | identity, 307 | elastic, 308 | motion_blur, 309 | impulse_noise, 310 | jpeg_transform, 311 | brightness, 312 | contrast, 313 | blur, 314 | pixilate, 315 | resize, 316 | resize_rect, 317 | color_jitter, 318 | crop, 319 | scale, 320 | rotate, 321 | translate, 322 | gauss, 323 | noise, 324 | flip, 325 | whiteout, 326 | _, 327 | _, 328 | ] 329 | x = random.choice(t_list).random(x) 330 | x = random.choice(t_list).random(x) 331 | x = random.choice(t_list).random(x) 332 | x = identity(x) 333 | return x 334 | 335 | 336 | def encoding(x): 337 | return training(x) 338 | 339 | 340 | # def inference(x): 341 | # x = random.choice([rotate, resize_rect, scale, translate, flip, lambda x: x])(x) 342 | # x = random.choice([gauss, noise, color_jitter, lambda x: x])(x) 343 | # x = random.choice([rotate, resize_rect, scale, translate, flip, lambda x: x])(x) 344 | # x = identity(x) 345 | # return x 346 | 347 | # def easy(x): 348 | # x = resize_rect(x) 349 | # x = rotate(scale(x, 0.6, 1.4), max_angle=30) 350 | # x = gauss(x, min_sigma=0.8, max_sigma=1.2) 351 | # x = translate(x) 352 | # x = identity(x) 353 | # return x 354 | 355 | if __name__ == "__main__": 356 | import matplotlib.pyplot as plt 357 | 358 | img = im.load("images/house.png") 359 | img = im.torch(img).unsqueeze(0) 360 | 361 | for transform in [ 362 | identity, 363 | elastic, 364 | motion_blur, 365 | impulse_noise, 366 | jpeg_transform, 367 | brightness, 368 | contrast, 369 | blur, 370 | pixilate, 371 | resize, 372 | resize_rect, 373 | color_jitter, 374 | crop, 375 | scale, 376 | rotate, 377 | translate, 378 | gauss, 379 | noise, 380 | flip, 381 | whiteout, 382 | ]: 383 | transformed = im.numpy(transform.random(img).squeeze()) 384 | plt.imsave(f"output/encoded_{transform.__name__}.jpg", transformed) 385 | time = timeit.timeit(lambda: im.numpy(transform.random(img).squeeze()), number=40) 386 | x_min, x_max = transform.plot_range 387 | print(f"{transform.__name__}: ({x_min} - {x_max}) {time:0.5f}") 388 | 389 | for i in range(0, 10): 390 | transformed = im.numpy(encoding(img).squeeze()) 391 | plt.imsave(f"output/encoded_{i}.jpg", transformed) 392 | -------------------------------------------------------------------------------- /web/client/src/svg.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * VERSION: 0.8.10 3 | * DATE: 2017-04-29 4 | * UPDATES AND DOCS AT: http://greensock.com 5 | * 6 | * @license Copyright (c) 2008-2017, GreenSock. All rights reserved. 7 | * MorphSVGPlugin is a Club GreenSock membership benefit; You must have a valid membership to use 8 | * this code without violating the terms of use. Visit http://greensock.com/club/ to sign up or get more details. 9 | * This work is subject to the software agreement that was issued with your membership. 10 | * 11 | * @author: Jack Doyle, jack@greensock.com 12 | */ 13 | var _gsScope="undefined"!=typeof module&&module.exports&&"undefined"!=typeof global?global:this||window;(_gsScope._gsQueue||(_gsScope._gsQueue=[])).push(function(){"use strict";var e=Math.PI/180,t=180/Math.PI,r=/[achlmqstvz]|(-?\d*\.?\d*(?:e[\-+]?\d+)?)[0-9]/gi,o=/(?:(-|-=|\+=)?\d*\.?\d*(?:e[\-+]?\d+)?)[0-9]/gi,n=/(^[#\.]|[a-y][a-z])/gi,i=/[achlmqstvz]/gi,a=/[\+\-]?\d*\.?\d+e[\+\-]?\d+/gi,s=_gsScope._gsDefine.globals.TweenLite,h="codepen",f="MorphSVGPlugin",g=String.fromCharCode(103,114,101,101,110,115,111,99,107,46,99,111,109),u=String.fromCharCode(47,114,101,113,117,105,114,101,115,45,109,101,109,98,101,114,115,104,105,112,47),d=function(e){for(var t=-1!==(window?window.location.href:"").indexOf(String.fromCharCode(103,114,101,101,110,115,111,99,107))&&-1!==e.indexOf(String.fromCharCode(108,111,99,97,108,104,111,115,116)),r=[g,String.fromCharCode(99,111,100,101,112,101,110,46,105,111),String.fromCharCode(99,111,100,101,112,101,110,46,100,101,118),String.fromCharCode(99,115,115,45,116,114,105,99,107,115,46,99,111,109),String.fromCharCode(99,100,112,110,46,105,111),String.fromCharCode(103,97,110,110,111,110,46,116,118),String.fromCharCode(99,111,100,101,99,97,110,121,111,110,46,110,101,116),String.fromCharCode(116,104,101,109,101,102,111,114,101,115,116,46,110,101,116),String.fromCharCode(99,101,114,101,98,114,97,120,46,99,111,46,117,107),String.fromCharCode(116,121,109,112,97,110,117,115,46,110,101,116),String.fromCharCode(116,119,101,101,110,109,97,120,46,99,111,109),String.fromCharCode(116,119,101,101,110,108,105,116,101,46,99,111,109),String.fromCharCode(112,108,110,107,114,46,99,111),String.fromCharCode(104,111,116,106,97,114,46,99,111,109),String.fromCharCode(119,101,98,112,97,99,107,98,105,110,46,99,111,109),String.fromCharCode(97,114,99,104,105,118,101,46,111,114,103),String.fromCharCode(106,115,102,105,100,100,108,101,46,110,101,116)],o=r.length;--o>-1;)if(-1!==e.indexOf(r[o]))return!0;return t&&window&&window.console&&console.log(String.fromCharCode(87,65,82,78,73,78,71,58,32,97,32,115,112,101,99,105,97,108,32,118,101,114,115,105,111,110,32,111,102,32)+f+String.fromCharCode(32,105,115,32,114,117,110,110,105,110,103,32,108,111,99,97,108,108,121,44,32,98,117,116,32,105,116,32,119,105,108,108,32,110,111,116,32,119,111,114,107,32,111,110,32,97,32,108,105,118,101,32,100,111,109,97,105,110,32,98,101,99,97,117,115,101,32,105,116,32,105,115,32,97,32,109,101,109,98,101,114,115,104,105,112,32,98,101,110,101,102,105,116,32,111,102,32,67,108,117,98,32,71,114,101,101,110,83,111,99,107,46,32,80,108,101,97,115,101,32,115,105,103,110,32,117,112,32,97,116,32,104,116,116,112,58,47,47,103,114,101,101,110,115,111,99,107,46,99,111,109,47,99,108,117,98,47,32,97,110,100,32,116,104,101,110,32,100,111,119,110,108,111,97,100,32,116,104,101,32,39,114,101,97,108,39,32,118,101,114,115,105,111,110,32,102,114,111,109,32,121,111,117,114,32,71,114,101,101,110,83,111,99,107,32,97,99,99,111,117,110,116,32,119,104,105,99,104,32,104,97,115,32,110,111,32,115,117,99,104,32,108,105,109,105,116,97,116,105,111,110,115,46,32,84,104,101,32,102,105,108,101,32,121,111,117,39,114,101,32,117,115,105,110,103,32,119,97,115,32,108,105,107,101,108,121,32,100,111,119,110,108,111,97,100,101,100,32,102,114,111,109,32,101,108,115,101,119,104,101,114,101,32,111,110,32,116,104,101,32,119,101,98,32,97,110,100,32,105,115,32,114,101,115,116,114,105,99,116,101,100,32,116,111,32,108,111,99,97,108,32,117,115,101,32,111,114,32,111,110,32,115,105,116,101,115,32,108,105,107,101,32,99,111,100,101,112,101,110,46,105,111,46)),t}(window?window.location.host:""),l=function(e){_gsScope.console&&console.log(e)},c=function(t,r){var o,n,i,a,s,h,f=Math.ceil(Math.abs(r)/90),g=0,u=[];for(t*=e,r*=e,o=r/f,n=4/3*Math.sin(o/2)/(1+Math.cos(o/2)),h=0;f>h;h++)i=t+h*o,a=Math.cos(i),s=Math.sin(i),u[g++]=a-n*s,u[g++]=s+n*a,i+=o,a=Math.cos(i),s=Math.sin(i),u[g++]=a+n*s,u[g++]=s-n*a,u[g++]=a,u[g++]=s;return u},p=function(r,o,n,i,a,s,h,f,g){if(r!==f||o!==g){n=Math.abs(n),i=Math.abs(i);var u=a%360*e,d=Math.cos(u),l=Math.sin(u),p=(r-f)/2,C=(o-g)/2,m=d*p+l*C,S=-l*p+d*C,w=n*n,x=i*i,y=m*m,_=S*S,v=y/w+_/x;v>1&&(n=Math.sqrt(v)*n,i=Math.sqrt(v)*i,w=n*n,x=i*i);var b=s===h?-1:1,M=(w*x-w*_-x*y)/(w*_+x*y);0>M&&(M=0);var A=b*Math.sqrt(M),T=A*(n*S/i),O=A*-(i*m/n),N=(r+f)/2,L=(o+g)/2,D=N+(d*T-l*O),q=L+(l*T+d*O),z=(m-T)/n,G=(S-O)/i,R=(-m-T)/n,P=(-S-O)/i,k=Math.sqrt(z*z+G*G),B=z;b=0>G?-1:1;var E=b*Math.acos(B/k)*t;k=Math.sqrt((z*z+G*G)*(R*R+P*P)),B=z*R+G*P,b=0>z*P-G*R?-1:1;var V=b*Math.acos(B/k)*t;!h&&V>0?V-=360:h&&0>V&&(V+=360),V%=360,E%=360;var F,j,Q,I=c(E,V),Y=d*n,X=l*n,W=l*-i,H=d*i,U=I.length-2;for(F=0;U>F;F+=2)j=I[F],Q=I[F+1],I[F]=j*Y+Q*W+D,I[F+1]=j*X+Q*H+q;return I[I.length-2]=f,I[I.length-1]=g,I}},C=function(e){var t,o,n,i,s,h,f,g,u,d,c,C,m,S=(e+"").replace(a,function(e){var t=+e;return 1e-4>t&&t>-1e-4?0:t}).match(r)||[],w=[],x=0,y=0,_=S.length,v=2,b=0;if(!e||!isNaN(S[0])||isNaN(S[1]))return l("ERROR: malformed path data: "+e),w;for(t=0;_>t;t++)if(m=s,isNaN(S[t])?(s=S[t].toUpperCase(),h=s!==S[t]):t--,n=+S[t+1],i=+S[t+2],h&&(n+=x,i+=y),0===t&&(g=n,u=i),"M"===s)f&&f.length<8&&(w.length-=1,v=0),x=g=n,y=u=i,f=[n,i],b+=v,v=2,w.push(f),t+=2,s="L";else if("C"===s)f||(f=[0,0]),f[v++]=n,f[v++]=i,h||(x=y=0),f[v++]=x+1*S[t+3],f[v++]=y+1*S[t+4],f[v++]=x+=1*S[t+5],f[v++]=y+=1*S[t+6],t+=6;else if("S"===s)"C"===m||"S"===m?(d=x-f[v-4],c=y-f[v-3],f[v++]=x+d,f[v++]=y+c):(f[v++]=x,f[v++]=y),f[v++]=n,f[v++]=i,h||(x=y=0),f[v++]=x+=1*S[t+3],f[v++]=y+=1*S[t+4],t+=4;else if("Q"===s)d=n-x,c=i-y,f[v++]=x+2*d/3,f[v++]=y+2*c/3,h||(x=y=0),x+=1*S[t+3],y+=1*S[t+4],d=n-x,c=i-y,f[v++]=x+2*d/3,f[v++]=y+2*c/3,f[v++]=x,f[v++]=y,t+=4;else if("T"===s)d=x-f[v-4],c=y-f[v-3],f[v++]=x+d,f[v++]=y+c,d=x+1.5*d-n,c=y+1.5*c-i,f[v++]=n+2*d/3,f[v++]=i+2*c/3,f[v++]=x=n,f[v++]=y=i,t+=2;else if("H"===s)i=y,f[v++]=x+(n-x)/3,f[v++]=y+(i-y)/3,f[v++]=x+2*(n-x)/3,f[v++]=y+2*(i-y)/3,f[v++]=x=n,f[v++]=i,t+=1;else if("V"===s)i=n,n=x,h&&(i+=y-x),f[v++]=n,f[v++]=y+(i-y)/3,f[v++]=n,f[v++]=y+2*(i-y)/3,f[v++]=n,f[v++]=y=i,t+=1;else if("L"===s||"Z"===s)"Z"===s&&(n=g,i=u,f.closed=!0),("L"===s||Math.abs(x-n)>.5||Math.abs(y-i)>.5)&&(f[v++]=x+(n-x)/3,f[v++]=y+(i-y)/3,f[v++]=x+2*(n-x)/3,f[v++]=y+2*(i-y)/3,f[v++]=n,f[v++]=i,"L"===s&&(t+=2)),x=n,y=i;else if("A"===s){if(C=p(x,y,1*S[t+1],1*S[t+2],1*S[t+3],1*S[t+4],1*S[t+5],(h?x:0)+1*S[t+6],(h?y:0)+1*S[t+7]))for(o=0;oc;c+=6)for(C+=w;C>m;)r=e[c-2],o=e[c-1],n=e[c],i=e[c+1],a=e[c+2],s=e[c+3],h=e[c+4],f=e[c+5],p=1/(Math.floor(C)+1),g=r+(n-r)*p,d=n+(a-n)*p,g+=(d-g)*p,d+=(a+(h-a)*p-d)*p,u=o+(i-o)*p,l=i+(s-i)*p,u+=(l-u)*p,l+=(s+(f-s)*p-l)*p,e.splice(c,4,r+(n-r)*p,o+(i-o)*p,g,u,g+(d-g)*p,u+(l-u)*p,d,l,a+(h-a)*p,s+(f-s)*p),c+=6,S+=6,C--;return e},S=function(e){var t,r,o,n,i="",a=e.length,s=100;for(r=0;a>r;r++){for(n=e[r],i+="M"+n[0]+","+n[1]+" C",t=n.length,o=2;t>o;o++)i+=(n[o++]*s|0)/s+","+(n[o++]*s|0)/s+" "+(n[o++]*s|0)/s+","+(n[o++]*s|0)/s+" "+(n[o++]*s|0)/s+","+(n[o]*s|0)/s+" ";n.closed&&(i+="z")}return i},w=function(e){for(var t=[],r=e.length-1,o=0;--r>-1;)t[o++]=e[r],t[o++]=e[r+1],r--;for(r=0;o>r;r++)e[r]=t[r];e.reversed=e.reversed?!1:!0},x=function(e){var t,r=e.length,o=0,n=0;for(t=0;r>t;t++)o+=e[t++],n+=e[t];return[o/(r/2),n/(r/2)]},y=function(e){var t,r,o,n=e.length,i=e[0],a=i,s=e[1],h=s;for(o=6;n>o;o+=6)t=e[o],r=e[o+1],t>i?i=t:a>t&&(a=t),r>s?s=r:h>r&&(h=r);return e.centerX=(i+a)/2,e.centerY=(s+h)/2,e.size=(i-a)*(s-h)},_=function(e){for(var t,r,o,n,i,a=e.length,s=e[0][0],h=s,f=e[0][1],g=f;--a>-1;)for(i=e[a],t=i.length,n=6;t>n;n+=6)r=i[n],o=i[n+1],r>s?s=r:h>r&&(h=r),o>f?f=o:g>o&&(g=o);return e.centerX=(s+h)/2,e.centerY=(f+g)/2,e.size=(s-h)*(f-g)},v=function(e,t){return t.length-e.length},b=function(e,t){var r=e.size||y(e),o=t.size||y(t);return Math.abs(o-r)<(r+o)/20?t.centerX-e.centerX||t.centerY-e.centerY:o-r},M=function(e,t){var r,o,n=e.slice(0),i=e.length,a=i-2;for(t=0|t,r=0;i>r;r++)o=(r+t)%a,e[r++]=n[o],e[r]=n[o+1]},A=function(e,t,r,o,n){var i,a,s,h,f=e.length,g=0,u=f-2;for(r*=6,a=0;f>a;a+=6)i=(a+r)%u,h=e[i]-(t[a]-o),s=e[i+1]-(t[a+1]-n),g+=Math.sqrt(s*s+h*h);return g},T=function(e,t,r){var o,n,i,a=e.length,s=x(e),h=x(t),f=h[0]-s[0],g=h[1]-s[1],u=A(e,t,0,f,g),d=0;for(i=6;a>i;i+=6)n=A(e,t,i/6,f,g),u>n&&(u=n,d=i);if(r)for(o=e.slice(0),w(o),i=6;a>i;i+=6)n=A(o,t,i/6,f,g),u>n&&(u=n,d=-i);return d/6},O=function(e,t,r){for(var o,n,i,a,s,h,f=e.length,g=99999999999,u=0,d=0;--f>-1;)for(o=e[f],h=o.length,s=0;h>s;s+=6)n=o[s]-t,i=o[s+1]-r,a=Math.sqrt(n*n+i*i),g>a&&(g=a,u=o[s],d=o[s+1]);return[u,d]},N=function(e,t,r,o,n,i){var a,s,h,f,g,u=t.length,d=0,l=Math.min(e.size||y(e),t[r].size||y(t[r]))*o,c=999999999999,p=e.centerX+n,C=e.centerY+i;for(s=r;u>s&&(a=t[s].size||y(t[s]),l<=a);s++)h=t[s].centerX-p,f=t[s].centerY-C,g=Math.sqrt(h*h+f*f),c>g&&(d=s,c=g);return g=t[d],t.splice(d,1),g},L=function(e,t,r,o){var n,i,a,s,h,f,g,u=t.length-e.length,d=u>0?t:e,c=u>0?e:t,p=0,C="complexity"===o?v:b,S="position"===o?0:"number"==typeof o?o:.8,x=c.length,A="object"==typeof r&&r.push?r.slice(0):[r],L="reverse"===A[0]||A[0]<0,D="log"===r;if(c[0]){if(d.length>1&&(e.sort(C),t.sort(C),f=d.size||_(d),f=c.size||_(c),f=d.centerX-c.centerX,g=d.centerY-c.centerY,C===b))for(x=0;xu&&(u=-u),d[0].length>c[0].length&&m(c[0],(d[0].length-c[0].length)/6|0),x=c.length;u>p;)s=d[x].size||y(d[x]),a=O(c,d[x].centerX,d[x].centerY),s=a[0],h=a[1],c[x++]=[s,h,s,h,s,h,s,h],c.totalPoints+=8,p++;for(x=0;xu?m(n,-u/6|0):u>0&&m(i,u/6|0),L&&!i.reversed&&w(i),r=A[x]||0===A[x]?A[x]:"auto",r&&(i.closed||Math.abs(i[0]-i[i.length-2])<.5&&Math.abs(i[1]-i[i.length-1])<.5?"auto"===r||"log"===r?(A[x]=r=T(i,n,0===x),0>r&&(L=!0,w(i),r=-r),M(i,6*r)):"reverse"!==r&&(x&&0>r&&w(i),M(i,6*(0>r?-r:r))):!L&&("auto"===r&&Math.abs(n[0]-i[0])+Math.abs(n[1]-i[1])+Math.abs(n[n.length-2]-i[i.length-2])+Math.abs(n[n.length-1]-i[i.length-1])>Math.abs(n[0]-i[i.length-2])+Math.abs(n[1]-i[i.length-1])+Math.abs(n[n.length-2]-i[0])+Math.abs(n[n.length-1]-i[1])||r%2)?(w(i),A[x]=-1,L=!0):"auto"===r?A[x]=0:"reverse"===r&&(A[x]=-1),i.closed!==n.closed&&(i.closed=n.closed=!1));return D&&l("shapeIndex:["+A.join(",")+"]"),A}},D=function(e,t,r,o){var n=C(e[0]),i=C(e[1]);L(n,i,t||0===t?t:"auto",r)&&(e[0]=S(n),e[1]=S(i),("log"===o||o===!0)&&l('precompile:["'+e[0]+'","'+e[1]+'"]'))},q=function(e,t,r){return t||r||e||0===e?function(o){D(o,e,t,r)}:D},z=function(e,t){if(!t)return e;var r,n,i,a=e.match(o)||[],s=a.length,h="";for("reverse"===t?(n=s-1,r=-2):(n=(2*(parseInt(t,10)||0)+1+100*s)%s,r=2),i=0;s>i;i+=2)h+=a[n-1]+","+a[n]+" ",n=(n+r)%s;return h},G=function(e,t){var r,o,n,i,a,s,h,f=0,g=parseFloat(e[0]),u=parseFloat(e[1]),d=g+","+u+" ",l=.999999;for(n=e.length,r=.5*t/(.5*n-1),o=0;n-2>o;o+=2){if(f+=r,s=parseFloat(e[o+2]),h=parseFloat(e[o+3]),f>l)for(a=1/(Math.floor(f)+1),i=1;f>l;)d+=(g+(s-g)*a*i).toFixed(2)+","+(u+(h-u)*a*i).toFixed(2)+" ",f--,i++;d+=s+","+h+" ",g=s,u=h}return d},R=function(e){var t=e[0].match(o)||[],r=e[1].match(o)||[],n=r.length-t.length;n>0?e[0]=G(t,n):e[1]=G(r,-n)},P=function(e){return isNaN(e)?R:function(t){R(t),t[1]=z(t[1],parseInt(e,10))}},k=function(e,t){var r,o=_gsScope.document.createElementNS("http://www.w3.org/2000/svg","path"),n=Array.prototype.slice.call(e.attributes),i=n.length;for(t=","+t+",";--i>-1;)r=n[i].nodeName.toLowerCase(),-1===t.indexOf(","+r+",")&&o.setAttributeNS(null,r,n[i].nodeValue);return o},B=function(e,t){var r,n,i,a,s,h,f,g,u,d,l,c,p,C,m,S,w,x,y,_,v,b=e.tagName.toLowerCase(),M=.552284749831;return"path"!==b&&e.getBBox?(h=k(e,"x,y,width,height,cx,cy,rx,ry,r,x1,x2,y1,y2,points"),"rect"===b?(a=+e.getAttribute("rx")||0,s=+e.getAttribute("ry")||0,n=+e.getAttribute("x")||0,i=+e.getAttribute("y")||0,d=(+e.getAttribute("width")||0)-2*a,l=(+e.getAttribute("height")||0)-2*s,a||s?(c=n+a*(1-M),p=n+a,C=p+d,m=C+a*M,S=C+a,w=i+s*(1-M),x=i+s,y=x+l,_=y+s*M,v=y+s,r="M"+S+","+x+" V"+y+" C"+[S,_,m,v,C,v,C-(C-p)/3,v,p+(C-p)/3,v,p,v,c,v,n,_,n,y,n,y-(y-x)/3,n,x+(y-x)/3,n,x,n,w,c,i,p,i,p+(C-p)/3,i,C-(C-p)/3,i,C,i,m,i,S,w,S,x].join(",")+"z"):r="M"+(n+d)+","+i+" v"+l+" h"+-d+" v"+-l+" h"+d+"z"):"circle"===b||"ellipse"===b?("circle"===b?(a=s=+e.getAttribute("r")||0,g=a*M):(a=+e.getAttribute("rx")||0,s=+e.getAttribute("ry")||0,g=s*M),n=+e.getAttribute("cx")||0,i=+e.getAttribute("cy")||0,f=a*M,r="M"+(n+a)+","+i+" C"+[n+a,i+g,n+f,i+s,n,i+s,n-f,i+s,n-a,i+g,n-a,i,n-a,i-g,n-f,i-s,n,i-s,n+f,i-s,n+a,i-g,n+a,i].join(",")+"z"):"line"===b?r="M"+(e.getAttribute("x1")||0)+","+(e.getAttribute("y1")||0)+" L"+(e.getAttribute("x2")||0)+","+(e.getAttribute("y2")||0):("polyline"===b||"polygon"===b)&&(u=(e.getAttribute("points")+"").match(o)||[],n=u.shift(),i=u.shift(),r="M"+n+","+i+" L"+u.join(","),"polygon"===b&&(r+=","+n+","+i+"z")),h.setAttribute("d",r),t&&e.parentNode&&(e.parentNode.insertBefore(h,e),e.parentNode.removeChild(e)),h):e},E=function(e,t,r){var i,a,h="string"==typeof e;return(!h||n.test(e)||(e.match(o)||[]).length<3)&&(i=h?s.selector(e):e&&e[0]?e:[e],i&&i[0]?(i=i[0],a=i.nodeName.toUpperCase(),t&&"PATH"!==a&&(i=B(i,!1),a="PATH"),e=i.getAttribute("PATH"===a?"d":"points")||"",i===r&&(e=i.getAttributeNS(null,"data-original")||e)):(l("WARNING: invalid morph to: "+e),e=!1)),e},V="Use MorphSVGPlugin.convertToPath(elementOrSelectorText) to convert to a path before morphing.",F=_gsScope._gsDefine.plugin({propName:"morphSVG",API:2,global:!0,version:"0.8.10",init:function(e,t,r,o){var n,a,s,c,p;return"function"!=typeof e.setAttribute?!1:d?("function"==typeof t&&(t=t(o,e)),n=e.nodeName.toUpperCase(),p="POLYLINE"===n||"POLYGON"===n,"PATH"===n||p?(a="PATH"===n?"d":"points",("string"==typeof t||t.getBBox||t[0])&&(t={shape:t}),c=E(t.shape||t.d||t.points||"","d"===a,e),p&&i.test(c)?(l("WARNING: a <"+n+"> cannot accept path data. "+V),!1):(c&&(this._target=e,e.getAttributeNS(null,"data-original")||e.setAttributeNS(null,"data-original",e.getAttribute(a)),s=this._addTween(e,"setAttribute",e.getAttribute(a)+"",c+"","morphSVG",!1,a,"object"==typeof t.precompile?function(e){e[0]=t.precompile[0],e[1]=t.precompile[1]}:"d"===a?q(t.shapeIndex,t.map||F.defaultMap,t.precompile):P(t.shapeIndex)),s&&(this._overwriteProps.push("morphSVG"),s.end=c,s.endProp=a)),d)):(l("WARNING: cannot morph a <"+n+"> SVG element. "+V),!1)):(window.location.href="http://"+g+u+"?plugin="+f+"&source="+h,!1)},set:function(e){var t;if(this._super.setRatio.call(this,e),1===e)for(t=this._firstPT;t;)t.end&&this._target.setAttribute(t.endProp,t.end),t=t._next}});F.pathFilter=D,F.pointsFilter=R,F.subdivideRawBezier=m,F.defaultMap="size",F.pathDataToRawBezier=function(e){return C(E(e,!0))},F.equalizeSegmentQuantity=L,F.convertToPath=function(e,t){"string"==typeof e&&(e=s.selector(e));for(var r=e&&0!==e.length?e.length&&e[0]&&e[0].nodeType?Array.prototype.slice.call(e,0):[e]:[],o=r.length;--o>-1;)r[o]=B(r[o],t!==!1);return r},F.pathDataToBezier=function(e,t){var r,o,n,i,a,h,f,g,u=C(E(e,!0))[0]||[],d=0;if(t=t||{},g=t.align||t.relative,i=t.matrix||[1,0,0,1,0,0],a=t.offsetX||0,h=t.offsetY||0,"relative"===g||g===!0?(a-=u[0]*i[0]+u[1]*i[2],h-=u[0]*i[1]+u[1]*i[3],d="+="):(a+=i[4],h+=i[5],g&&(g="string"==typeof g?s.selector(g):g&&g[0]?g:[g],g&&g[0]&&(f=g[0].getBBox()||{x:0,y:0},a-=f.x,h-=f.y))),r=[],n=u.length,i&&"1,0,0,1,0,0"!==i.join(","))for(o=0;n>o;o+=2)r.push({x:d+(u[o]*i[0]+u[o+1]*i[2]+a),y:d+(u[o]*i[1]+u[o+1]*i[3]+h)});else for(o=0;n>o;o+=2)r.push({x:d+(u[o]+a),y:d+(u[o+1]+h)});return r}}),_gsScope._gsDefine&&_gsScope._gsQueue.pop()(),function(e){"use strict";var t=function(){return(_gsScope.GreenSockGlobals||_gsScope)[e]};"function"==typeof define&&define.amd?define(["TweenLite"],t):"undefined"!=typeof module&&module.exports&&(require("../TweenLite.js"),module.exports=t())}("MorphSVGPlugin"); --------------------------------------------------------------------------------