├── .python-version ├── backend ├── .gitignore ├── machine_learning │ ├── RawNet3 │ │ ├── __init__.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── weights │ │ │ │ ├── README.md │ │ │ │ ├── model.pt │ │ │ │ └── .gitattributes │ │ │ ├── RawNetBasicBlock.py │ │ │ └── RawNet3.py │ │ ├── README.md │ │ ├── utils.py │ │ └── infererence.py │ ├── test.wav │ └── speaker_recognition.py ├── package.json ├── .DS_Store ├── convert.py ├── app.py └── utils.py ├── app ├── public │ ├── robots.txt │ ├── favicon.ico │ ├── logo192.png │ ├── logo512.png │ ├── manifest.json │ └── index.html ├── src │ ├── setupTests.js │ ├── App.test.js │ ├── index.css │ ├── reportWebVitals.js │ ├── util.js │ ├── index.js │ ├── countdown.jsx │ ├── App.css │ ├── RegisterStatus.jsx │ ├── RecordButton.jsx │ ├── logo.svg │ └── App.js ├── .gitignore ├── package.json └── README.md ├── voice_recovery_python ├── .env │ └── pyvenv.cfg ├── pyproject.toml ├── .gitignore ├── Cargo.toml ├── src │ └── lib.rs └── .github │ └── workflows │ └── CI.yml ├── hardhat ├── .gitignore ├── tsconfig.json ├── package.json ├── README.md ├── test_data │ ├── evm_public_input.json │ ├── demo_evm_public_input.json │ ├── evm_proof.hex │ └── demo_evm_proof.hex ├── scripts │ └── deploy.ts ├── hardhat.config.ts ├── contracts │ ├── DemoContractWallet.sol │ ├── VerifierWrapper.sol │ └── VoiceKeyRecover.sol └── test │ ├── DemoContractWallet.ts │ └── VoiceKeyRecover.ts ├── eth_voice_recovery ├── configs │ ├── agg_circuit.config │ └── test1_circuit.config ├── Cargo.toml └── src │ ├── fuzzy.rs │ ├── bin │ └── voice_recovery.rs │ ├── poseidon_circuit.rs │ ├── lib.rs │ └── helper.rs ├── rust-toolchain ├── Cargo.toml ├── .gitignore ├── pyproject.toml └── README.md /.python-version: -------------------------------------------------------------------------------- 1 | 3.10.5 2 | -------------------------------------------------------------------------------- /backend/.gitignore: -------------------------------------------------------------------------------- 1 | # audio save dir 2 | /storage -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /backend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hardhat-project" 3 | } 4 | -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/models/weights/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | license: mit 3 | --- 4 | -------------------------------------------------------------------------------- /app/public/robots.txt: -------------------------------------------------------------------------------- 1 | # https://www.robotstxt.org/robotstxt.html 2 | User-agent: * 3 | Disallow: 4 | -------------------------------------------------------------------------------- /backend/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SoraSuegami/voice_recovery_circuit/HEAD/backend/.DS_Store -------------------------------------------------------------------------------- /app/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SoraSuegami/voice_recovery_circuit/HEAD/app/public/favicon.ico -------------------------------------------------------------------------------- /app/public/logo192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SoraSuegami/voice_recovery_circuit/HEAD/app/public/logo192.png -------------------------------------------------------------------------------- /app/public/logo512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SoraSuegami/voice_recovery_circuit/HEAD/app/public/logo512.png -------------------------------------------------------------------------------- /backend/machine_learning/test.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SoraSuegami/voice_recovery_circuit/HEAD/backend/machine_learning/test.wav -------------------------------------------------------------------------------- /voice_recovery_python/.env/pyvenv.cfg: -------------------------------------------------------------------------------- 1 | home = /Users/suegamisora/opt/miniconda3/bin 2 | include-system-site-packages = false 3 | version = 3.9.12 4 | -------------------------------------------------------------------------------- /hardhat/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | .env 3 | coverage 4 | coverage.json 5 | typechain 6 | typechain-types 7 | 8 | # Hardhat files 9 | cache 10 | artifacts 11 | 12 | -------------------------------------------------------------------------------- /eth_voice_recovery/configs/agg_circuit.config: -------------------------------------------------------------------------------- 1 | {"strategy":"Simple","degree":22,"num_advice":[1],"num_lookup_advice":[1],"num_fixed":1,"lookup_bits":20,"limb_bits":88,"num_limbs":3} 2 | -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/models/weights/model.pt: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:1ab283bcdf776bfceceea18240e56a8756835b1911b04f9c44f347d47c09f90c 3 | size 65297133 4 | -------------------------------------------------------------------------------- /app/src/setupTests.js: -------------------------------------------------------------------------------- 1 | // jest-dom adds custom jest matchers for asserting on DOM nodes. 2 | // allows you to do things like: 3 | // expect(element).toHaveTextContent(/react/i) 4 | // learn more: https://github.com/testing-library/jest-dom 5 | import '@testing-library/jest-dom'; 6 | -------------------------------------------------------------------------------- /eth_voice_recovery/configs/test1_circuit.config: -------------------------------------------------------------------------------- 1 | { 2 | "degree": 20, 3 | "num_advice": 1, 4 | "num_lookup_advice": 1, 5 | "num_fixed": 1, 6 | "lookup_bits": 12, 7 | "error_threshold": 100, 8 | "word_size": 140, 9 | "max_msg_size": 64 10 | } -------------------------------------------------------------------------------- /app/src/App.test.js: -------------------------------------------------------------------------------- 1 | import { render, screen } from '@testing-library/react'; 2 | import App from './App'; 3 | 4 | test('renders learn react link', () => { 5 | render(); 6 | const linkElement = screen.getByText(/learn react/i); 7 | expect(linkElement).toBeInTheDocument(); 8 | }); 9 | -------------------------------------------------------------------------------- /hardhat/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2020", 4 | "module": "commonjs", 5 | "esModuleInterop": true, 6 | "forceConsistentCasingInFileNames": true, 7 | "strict": true, 8 | "skipLibCheck": true, 9 | "resolveJsonModule": true 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | [channel] 2 | nightly = "2022-12-18" 3 | 4 | [target.'cfg(any(wasm32-unknown-unknown))'] 5 | rustc = "nightly-2022-12-18" 6 | 7 | [components] 8 | rustfmt = { version = "nightly-2022-12-18", features = [] } 9 | 10 | [toolchain] 11 | channel = "nightly-2022-12-18" 12 | profile = "minimal" 13 | 14 | [tool.rustup] 15 | version = "1.25.1" -------------------------------------------------------------------------------- /hardhat/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hardhat-project", 3 | "devDependencies": { 4 | "@nomicfoundation/hardhat-toolbox": "^2.0.2", 5 | "@tovarishfin/hardhat-yul": "^3.0.5", 6 | "hardhat": "^2.13.1", 7 | "hardhat-contract-sizer": "^2.8.0", 8 | "ts-node": "^10.9.1", 9 | "typescript": "^5.0.4" 10 | }, 11 | "dependencies": { 12 | "@ensdomains/ens-contracts": "^0.0.8" 13 | } 14 | } -------------------------------------------------------------------------------- /hardhat/README.md: -------------------------------------------------------------------------------- 1 | # Sample Hardhat Project 2 | 3 | This project demonstrates a basic Hardhat use case. It comes with a sample contract, a test for that contract, and a script that deploys that contract. 4 | 5 | Try running some of the following tasks: 6 | 7 | ```shell 8 | npx hardhat help 9 | npx hardhat test 10 | REPORT_GAS=true npx hardhat test 11 | npx hardhat node 12 | npx hardhat run scripts/deploy.ts 13 | ``` 14 | -------------------------------------------------------------------------------- /app/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # production 12 | /build 13 | 14 | # misc 15 | .DS_Store 16 | .env.local 17 | .env.development.local 18 | .env.test.local 19 | .env.production.local 20 | 21 | npm-debug.log* 22 | yarn-debug.log* 23 | yarn-error.log* 24 | -------------------------------------------------------------------------------- /app/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 5 | sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | } 9 | 10 | code { 11 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 12 | monospace; 13 | } 14 | -------------------------------------------------------------------------------- /app/src/reportWebVitals.js: -------------------------------------------------------------------------------- 1 | const reportWebVitals = onPerfEntry => { 2 | if (onPerfEntry && onPerfEntry instanceof Function) { 3 | import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { 4 | getCLS(onPerfEntry); 5 | getFID(onPerfEntry); 6 | getFCP(onPerfEntry); 7 | getLCP(onPerfEntry); 8 | getTTFB(onPerfEntry); 9 | }); 10 | } 11 | }; 12 | 13 | export default reportWebVitals; 14 | -------------------------------------------------------------------------------- /voice_recovery_python/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["maturin>=0.14,<0.15"] 3 | build-backend = "maturin" 4 | 5 | [project] 6 | name = "voice_recovery_python" 7 | requires-python = ">=3.7" 8 | classifiers = [ 9 | "Programming Language :: Rust", 10 | "Programming Language :: Python :: Implementation :: CPython", 11 | "Programming Language :: Python :: Implementation :: PyPy", 12 | ] 13 | 14 | 15 | [tool.maturin] 16 | features = ["pyo3/extension-module"] 17 | -------------------------------------------------------------------------------- /app/src/util.js: -------------------------------------------------------------------------------- 1 | export function countOnes(hexString) { 2 | // 16進数文字列をバイト配列に変換 3 | let byteArray = []; 4 | for (let i = 2; i < hexString.length; i += 2) { 5 | byteArray.push(parseInt(hexString.substr(i, 2), 16)); 6 | } 7 | 8 | // バイト配列をビット列に変換して1の数を数える 9 | let ones = 0; 10 | for (let i = 0; i < byteArray.length; i++) { 11 | let byte = byteArray[i]; 12 | while (byte !== 0) { 13 | ones += byte & 1; 14 | byte >>= 1; 15 | } 16 | } 17 | 18 | return ones; 19 | } 20 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = ["eth_voice_recovery", "voice_recovery_python"] 4 | 5 | [patch."https://github.com/axiom-crypto/halo2-lib.git"] 6 | halo2-base = { version = "0.2.2", git = "https://github.com/SoraSuegami/halo2-lib.git" } 7 | halo2-ecc = { version = "0.2.2", git = "https://github.com/SoraSuegami/halo2-lib.git" } 8 | 9 | [patch."https://github.com/privacy-scaling-explorations/halo2.git"] 10 | halo2_proofs = { git = "https://github.com/privacy-scaling-explorations//halo2.git", tag = "v2023_02_02" } 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/ 14 | *.py[cod] 15 | 16 | # Added by cargo 17 | 18 | /target 19 | /Cargo.lock 20 | /build 21 | /hardhat/node_modules -------------------------------------------------------------------------------- /app/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom/client'; 3 | import './index.css'; 4 | import App from './App'; 5 | import reportWebVitals from './reportWebVitals'; 6 | 7 | const root = ReactDOM.createRoot(document.getElementById('root')); 8 | root.render( 9 | 10 | 11 | 12 | ); 13 | 14 | // If you want to start measuring performance in your app, pass a function 15 | // to log results (for example: reportWebVitals(console.log)) 16 | // or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals 17 | reportWebVitals(); 18 | -------------------------------------------------------------------------------- /app/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | }, 10 | { 11 | "src": "logo192.png", 12 | "type": "image/png", 13 | "sizes": "192x192" 14 | }, 15 | { 16 | "src": "logo512.png", 17 | "type": "image/png", 18 | "sizes": "512x512" 19 | } 20 | ], 21 | "start_url": ".", 22 | "display": "standalone", 23 | "theme_color": "#000000", 24 | "background_color": "#ffffff" 25 | } 26 | -------------------------------------------------------------------------------- /hardhat/test_data/evm_public_input.json: -------------------------------------------------------------------------------- 1 | {"commitment":"0xb577b007bd3c08abfb74aa19c01395a00788c7862913953def64406050c7322ab5a393558c081b143a325c1de06856846f80704900df75be381216f53d7f6651d2557dae5c029b3cc0fbb671f53e11a1745466b4b1d4f39cf52c9e389bfe58796013ee4031816e839041cde2dc8d212ddf488834de1de7164e7f87b0e5ac1af210374da374ca572751247a8a","commitment_hash":"0x0129f34e10ec07abdc499f18205e05a931ae6d3d0583a15272ad514f2a7523fe","message":"0xf39fd6e51aad88f6f4ce6ab8827279cfffb9226670997970c51812dc3a010c7d01b50e0d17dc79c8","feature_hash":"0x2e979115027c73c78696d80c384e230b1317f84fca89034267579c1eb9d46db0","message_hash":"0x01256258a58b041e2277ff9b654ca23be60c30d391aed1af324f46cb60e92d61"} -------------------------------------------------------------------------------- /backend/convert.py: -------------------------------------------------------------------------------- 1 | import os 2 | import io 3 | import soundfile 4 | import numpy as np 5 | from machine_learning.speaker_recognition import calc_feat_vec 6 | 7 | 8 | def bytearray_to_hex(ba) : 9 | return '0x' + ''.join(format(x, '02x') for x in ba) 10 | 11 | def hex_to_bytearray(hex_string): 12 | return bytearray.fromhex(hex_string[2:]) 13 | 14 | def feat_bytearray_from_wav_blob(wav_form_file): 15 | file_data = io.BytesIO(wav_form_file.read()) 16 | audio, sample_rate = soundfile.read(file_data) 17 | feat_vec = calc_feat_vec(audio, sample_rate) 18 | feat_bytearray = bytearray(np.packbits(feat_vec)) 19 | return feat_bytearray -------------------------------------------------------------------------------- /hardhat/test_data/demo_evm_public_input.json: -------------------------------------------------------------------------------- 1 | {"commitment":"0xb577b007bd3c08abfb74aa19c01395a00788c7862913953def64406050c7322ab5a393558c081b143a325c1de06856846f80704900df75be381216f53d7f6651d2557dae5c029b3cc0fbb671f53e11a1745466b4b1d4f39cf52c9e389bfe58796013ee4031816e839041cde2dc8d212ddf488834de1de7164e7f87b0e5ac1af210374da374ca572751247a8a","commitment_hash":"0x0129f34e10ec07abdc499f18205e05a931ae6d3d0583a15272ad514f2a7523fe","message":"0xf39fd6e51aad88f6f4ce6ab8827279cfffb9226670997970c51812dc3a010c7d01b50e0d17dc79c8","feature_hash":"0x2e979115027c73c78696d80c384e230b1317f84fca89034267579c1eb9d46db0","message_hash":"0x01256258a58b041e2277ff9b654ca23be60c30d391aed1af324f46cb60e92d61"} -------------------------------------------------------------------------------- /app/src/countdown.jsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react'; 2 | 3 | export default function Countdown({sec, onCountdownEnd}) { 4 | const [seconds, setSeconds] = useState(null); 5 | 6 | useEffect(() => { 7 | setSeconds(sec) 8 | const interval = setInterval(() => { 9 | setSeconds(seconds => seconds - 1); 10 | }, 1000); 11 | return () => clearInterval(interval); 12 | // eslint-disable-next-line react-hooks/exhaustive-deps 13 | }, []); 14 | 15 | useEffect(() => { 16 | if (seconds === 0 && onCountdownEnd) { 17 | onCountdownEnd(); 18 | } 19 | }, [seconds, onCountdownEnd]); 20 | 21 | return ( 22 | 23 | {seconds} 24 | 25 | ); 26 | } -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "voice-recovery-circuit" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Your Name "] 6 | readme = "README.md" 7 | packages = [{include = "voice_recovery_circuit"}] 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.10" 11 | flask = "^2.2.3" 12 | flask-cors = "^3.0.10" 13 | numpy = "^1.24.2" 14 | torch = "^2.0.0" 15 | tqdm = "^4.65.0" 16 | soundfile = "^0.12.1" 17 | asteroid-filterbanks = "^0.4.0" 18 | scikit-learn = "^1.2.2" 19 | bchlib = "^0.14.0" 20 | 21 | 22 | [tool.poetry.group.dev.dependencies] 23 | maturin = "^0.14.17" 24 | autopep8 = "^2.0.2" 25 | 26 | [build-system] 27 | requires = ["poetry-core"] 28 | build-backend = "poetry.core.masonry.api" 29 | -------------------------------------------------------------------------------- /app/src/App.css: -------------------------------------------------------------------------------- 1 | .App { 2 | text-align: center; 3 | } 4 | 5 | .App-logo { 6 | height: 40vmin; 7 | pointer-events: none; 8 | } 9 | 10 | @media (prefers-reduced-motion: no-preference) { 11 | .App-logo { 12 | animation: App-logo-spin infinite 20s linear; 13 | } 14 | } 15 | 16 | .App-header { 17 | background-color: #282c34; 18 | min-height: 100vh; 19 | display: flex; 20 | flex-direction: column; 21 | align-items: center; 22 | justify-content: center; 23 | font-size: calc(10px + 2vmin); 24 | color: white; 25 | } 26 | 27 | .App-link { 28 | color: #61dafb; 29 | } 30 | 31 | @keyframes App-logo-spin { 32 | from { 33 | transform: rotate(0deg); 34 | } 35 | to { 36 | transform: rotate(360deg); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /hardhat/scripts/deploy.ts: -------------------------------------------------------------------------------- 1 | import { ethers } from "hardhat"; 2 | 3 | async function main() { 4 | const accounts = await ethers.getSigners(); 5 | console.log('Account:', accounts[0].address); 6 | 7 | // Deploy VoiceKeyRecover 8 | console.log('Deploying VoiceKeyRecover...'); 9 | const voiceKeyRecover = await ethers.getContractFactory('VoiceKeyRecover'); 10 | // await voiceKeyRecover.deploy(dummyVerifierAddress, ensDeployer.address, 32, 64); 11 | const vk = await voiceKeyRecover.deploy(64); 12 | console.log('VoiceKeyRecover deployed to:', vk.address); 13 | 14 | } 15 | 16 | // We recommend this pattern to be able to use async/await everywhere 17 | // and properly handle errors. 18 | main().catch((error) => { 19 | console.error(error); 20 | process.exitCode = 1; 21 | }); -------------------------------------------------------------------------------- /hardhat/hardhat.config.ts: -------------------------------------------------------------------------------- 1 | import { HardhatUserConfig } from "hardhat/config"; 2 | import "@nomicfoundation/hardhat-toolbox"; 3 | import "@tovarishfin/hardhat-yul"; 4 | import "hardhat-contract-sizer"; 5 | 6 | const config: HardhatUserConfig = { 7 | solidity: { 8 | version: "0.8.18", 9 | settings: { 10 | optimizer: { 11 | enabled: true, 12 | runs: 200, 13 | }, 14 | }, 15 | }, 16 | defaultNetwork: "hardhat", 17 | networks: { 18 | hardhat: { 19 | chainId: 1337, 20 | }, 21 | }, 22 | contractSizer: { 23 | alphaSort: true, 24 | disambiguatePaths: false, 25 | runOnCompile: true, 26 | strict: true, 27 | }, 28 | typechain: { 29 | outDir: "typechain", 30 | target: "ethers-v5", 31 | }, 32 | }; 33 | 34 | export default config; -------------------------------------------------------------------------------- /hardhat/contracts/DemoContractWallet.sol: -------------------------------------------------------------------------------- 1 | // // SPDX-License-Identifier: MIT 2 | // pragma solidity >=0.8.4; 3 | 4 | // import "./VoiceKeyRecover.sol"; 5 | // import "hardhat/console.sol"; 6 | 7 | // contract DemoContractWallet { 8 | // VoiceKeyRecover vkr; 9 | 10 | // constructor(address _vkr) { 11 | // vkr = VoiceKeyRecover(_vkr); 12 | // vkr.registerOwner(msg.sender); 13 | // } 14 | 15 | // function getEthBalance() public view returns (uint) { 16 | // require(msg.sender == vkr.getOwner(), "only owner"); 17 | // return address(this).balance; 18 | // } 19 | 20 | // function depositEth() public payable { 21 | // require(msg.sender == vkr.getOwner(), "only owner"); 22 | // } 23 | 24 | // function transferEth(address payable to, uint amount) public { 25 | // require(msg.sender == vkr.getOwner(), "only owner"); 26 | // require(address(this).balance >= amount, "too large amount"); 27 | // to.transfer(amount); 28 | // } 29 | // } 30 | -------------------------------------------------------------------------------- /voice_recovery_python/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | .pytest_cache/ 6 | *.py[cod] 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | .venv/ 14 | env/ 15 | bin/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | include/ 26 | man/ 27 | venv/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | pip-selfcheck.json 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | 45 | # Translations 46 | *.mo 47 | 48 | # Mr Developer 49 | .mr.developer.cfg 50 | .project 51 | .pydevproject 52 | 53 | # Rope 54 | .ropeproject 55 | 56 | # Django stuff: 57 | *.log 58 | *.pot 59 | 60 | .DS_Store 61 | 62 | # Sphinx documentation 63 | docs/_build/ 64 | 65 | # PyCharm 66 | .idea/ 67 | 68 | # VSCode 69 | .vscode/ 70 | 71 | # Pyenv 72 | .python-version -------------------------------------------------------------------------------- /voice_recovery_python/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "voice_recovery_python" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | [lib] 8 | name = "voice_recovery_python" 9 | crate-type = ["cdylib"] 10 | 11 | [dependencies] 12 | pyo3-log = { version = "0.8.1" } 13 | pyo3 = "0.18.1" 14 | eth-voice-recovery = { path = "../eth_voice_recovery" } 15 | halo2-base = { version = "0.2.2", default-features = false, features = [ 16 | "halo2-pse", 17 | "display", 18 | ], git = "https://github.com/axiom-crypto/halo2-lib.git" } 19 | halo2-ecc = { version = "0.2.2", default-features = false, features = [ 20 | "halo2-pse", 21 | "display", 22 | ], git = "https://github.com/axiom-crypto/halo2-lib.git" } 23 | hex = "0.4.3" 24 | snark-verifier = { git = "https://github.com/zkemail/snark-verifier.git", version = "0.1.0", branch = "main", default-features = false, features = [ 25 | "loader_evm", 26 | "loader_halo2", 27 | "halo2-pse", 28 | ] } 29 | snark-verifier-sdk = { git = "https://github.com/zkemail/snark-verifier.git", version = "0.0.1", branch = "main", default-features = false, features = [ 30 | "loader_evm", 31 | "loader_halo2", 32 | "halo2-pse", 33 | ] } 34 | -------------------------------------------------------------------------------- /app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "app", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@emotion/react": "^11.10.6", 7 | "@emotion/styled": "^11.10.6", 8 | "@mui/icons-material": "^5.11.16", 9 | "@mui/material": "^5.12.0", 10 | "@testing-library/jest-dom": "^5.16.5", 11 | "@testing-library/react": "^13.4.0", 12 | "@testing-library/user-event": "^13.5.0", 13 | "ethers": "5.7.2", 14 | "msr": "^1.3.4", 15 | "react": "^18.2.0", 16 | "react-dom": "^18.2.0", 17 | "react-scripts": "5.0.1", 18 | "url-join": "^5.0.0", 19 | "web-vitals": "^2.1.4" 20 | }, 21 | "scripts": { 22 | "start": "react-scripts start", 23 | "build": "react-scripts build", 24 | "test": "react-scripts test", 25 | "eject": "react-scripts eject" 26 | }, 27 | "eslintConfig": { 28 | "extends": [ 29 | "react-app", 30 | "react-app/jest" 31 | ] 32 | }, 33 | "browserslist": { 34 | "production": [ 35 | ">0.2%", 36 | "not dead", 37 | "not op_mini all" 38 | ], 39 | "development": [ 40 | "last 1 chrome version", 41 | "last 1 firefox version", 42 | "last 1 safari version" 43 | ] 44 | }, 45 | "devDependencies": { 46 | "eslint-plugin-react": "^7.32.2" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/README.md: -------------------------------------------------------------------------------- 1 | ## Usage 2 | 3 | RawNet3 is hosted via two repositories. 4 | Inference of any utterance with 16k 16bit mono format and Vox1-O benchmark is 5 | supported in this repository. 6 | 7 | Training recipe, on the other hand, will be supported in 8 | https://github.com/clovaai/voxceleb_trainer. 9 | 10 | Model weight parameters are served by huggingface at 11 | https://huggingface.co/jungjee/RawNet3, which is used as a submodule here 12 | 13 | To download the model, run: 14 | `git submodule update --init --recursive` 15 | 16 | ### Single utterance inference 17 | Run: `python inference.py --inference_utterance --input {YOUR_INPUT_FILE}` 18 | 19 | Optionally, `--out_dir` can be set to direct where to save the extracted speaker embedding. (default: `./out.npy`) 20 | 21 | ### Benchmark on the Vox1-O evaluation protocol 22 | Run: `python inference.py --vox1_o_benchmark --DB_dir` 23 | 24 | Note that `DB_dir` should direct the directory of VoxCeleb1 dataset. 25 | For example, if `DB_dir`="/home/abc/db/VoxCeleb1", 26 | VoxCeleb1 folder is expected to have 1,251 folders inside which corresponds to 1,251 speakers of the VoxCeleb1 dataset. 27 | 28 | If you successfully run the benchmark, you will get: 29 | `Vox1-O benchmark Finished. EER: 0.8932, minDCF:0.06690`. 30 | -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/models/weights/.gitattributes: -------------------------------------------------------------------------------- 1 | *.7z filter=lfs diff=lfs merge=lfs -text 2 | *.arrow filter=lfs diff=lfs merge=lfs -text 3 | *.bin filter=lfs diff=lfs merge=lfs -text 4 | *.bin.* filter=lfs diff=lfs merge=lfs -text 5 | *.bz2 filter=lfs diff=lfs merge=lfs -text 6 | *.ftz filter=lfs diff=lfs merge=lfs -text 7 | *.gz filter=lfs diff=lfs merge=lfs -text 8 | *.h5 filter=lfs diff=lfs merge=lfs -text 9 | *.joblib filter=lfs diff=lfs merge=lfs -text 10 | *.lfs.* filter=lfs diff=lfs merge=lfs -text 11 | *.model filter=lfs diff=lfs merge=lfs -text 12 | *.msgpack filter=lfs diff=lfs merge=lfs -text 13 | *.onnx filter=lfs diff=lfs merge=lfs -text 14 | *.ot filter=lfs diff=lfs merge=lfs -text 15 | *.parquet filter=lfs diff=lfs merge=lfs -text 16 | *.pb filter=lfs diff=lfs merge=lfs -text 17 | *.pt filter=lfs diff=lfs merge=lfs -text 18 | *.pth filter=lfs diff=lfs merge=lfs -text 19 | *.rar filter=lfs diff=lfs merge=lfs -text 20 | saved_model/**/* filter=lfs diff=lfs merge=lfs -text 21 | *.tar.* filter=lfs diff=lfs merge=lfs -text 22 | *.tflite filter=lfs diff=lfs merge=lfs -text 23 | *.tgz filter=lfs diff=lfs merge=lfs -text 24 | *.xz filter=lfs diff=lfs merge=lfs -text 25 | *.zip filter=lfs diff=lfs merge=lfs -text 26 | *.zstandard filter=lfs diff=lfs merge=lfs -text 27 | *tfevents* filter=lfs diff=lfs merge=lfs -text 28 | -------------------------------------------------------------------------------- /hardhat/contracts/VerifierWrapper.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity ^0.8.0; 3 | 4 | import "./Verifier.sol"; 5 | 6 | contract VerifierWrapper is Verifier { 7 | uint maxMsgSize; 8 | 9 | constructor(uint _maxMsgSize) { 10 | maxMsgSize = _maxMsgSize; 11 | } 12 | 13 | function verify( 14 | bytes32 commitmentHash, 15 | bytes32 featureHash, 16 | bytes32 messageHash, 17 | bytes memory message, 18 | bytes memory proof 19 | ) public view returns (bool) { 20 | uint256[] memory pubInputs = new uint256[](3 + maxMsgSize); 21 | pubInputs[0] = uint256(commitmentHash); 22 | pubInputs[1] = uint256(featureHash); 23 | pubInputs[2] = uint256(messageHash); 24 | bytes memory messageExt = abi.encodePacked( 25 | message, 26 | new bytes(maxMsgSize - message.length) 27 | ); 28 | for (uint i = 0; i < messageExt.length / 16; i++) { 29 | uint coeff = 1; 30 | for (uint j = 0; j < 16; j++) { 31 | pubInputs[3 + i] += 32 | coeff * 33 | uint256(uint8(messageExt[16 * i + j])); 34 | coeff = coeff << 8; 35 | } 36 | } 37 | return Verifier.verify(pubInputs, proof); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /app/src/RegisterStatus.jsx: -------------------------------------------------------------------------------- 1 | import { Typography } from "@mui/material"; 2 | import React from "react"; 3 | 4 | export default function RegisterStatus({ registerStatus }) { 5 | return ( 6 |
7 | {registerStatus !== null && 0 <= registerStatus && ( 8 |

9 | 10 | Creating commitment... 11 | {registerStatus >= 2 && ( 12 | 13 | Success 14 | 15 | )} 16 | {registerStatus === 1 && ( 17 | 18 | Error 19 | 20 | )} 21 | 22 |

23 | )} 24 | {3 <= registerStatus && ( 25 |

26 | 27 | Sending commitment... 28 | {registerStatus === 5 && ( 29 | 30 | Success 31 | 32 | )} 33 | {registerStatus === 4 && ( 34 | 35 | Error 36 | 37 | )} 38 | 39 |

40 | )} 41 |
42 | ); 43 | } 44 | -------------------------------------------------------------------------------- /backend/machine_learning/speaker_recognition.py: -------------------------------------------------------------------------------- 1 | # 音声入力に対して特徴量を計算する関数を定義する 2 | from .RawNet3.models import RawNet3 3 | 4 | from .RawNet3.infererence import extract_speaker_embd 5 | import torch 6 | import numpy as np 7 | import soundfile 8 | 9 | def calc_feat_vec(audio, sample_rate): 10 | """ 11 | 音声入力に対して特徴量を計算する関数 12 | 13 | Parameters 14 | ---------- 15 | input_wav_path : string 16 | 音声データのファイルパスまたはwavファイルをnumpy.arrayに変換したもの。shapeは(10,48000)。 17 | """ 18 | # 1. 変数の用意 19 | # model.ptのパス 20 | path_pt = "machine_learning/RawNet3/models/weights/model.pt" 21 | 22 | n_segments = 10 23 | gpu = False 24 | 25 | # 2. 音声データを読み込む 26 | torch_model = RawNet3.MainModel( 27 | encoder_type="ECA", 28 | nOut=256, 29 | out_bn=False, 30 | sinc_stride=10, 31 | log_sinc=True, 32 | norm_sinc="mean", 33 | grad_mult=1) 34 | torch_model.load_state_dict(torch.load(path_pt, map_location=lambda storage, loc: storage)["model"]) 35 | torch_model.eval() 36 | 37 | # 3. 音声データを特徴量に変換する 38 | output = extract_speaker_embd( 39 | torch_model, 40 | audio, 41 | sample_rate, 42 | n_samples=48000, 43 | n_segments=n_segments, 44 | gpu=gpu, 45 | ).mean(0) 46 | feat_vec = output 47 | 48 | binary_vec = np.where(feat_vec > 0, 1, 0) 49 | 50 | # 4. 特徴量を返す 51 | return binary_vec -------------------------------------------------------------------------------- /app/src/RecordButton.jsx: -------------------------------------------------------------------------------- 1 | import React, { useRef } from "react"; 2 | import MicIcon from "@mui/icons-material/Mic"; 3 | import IconButton from "@mui/material/IconButton"; 4 | import MediaStreamRecorder from "msr"; 5 | 6 | const record_duration =5000; 7 | 8 | // eslint-disable-next-line react/prop-types 9 | function RecordButton({ sendRecording, disabled, setDisabled, ...props }) { 10 | const mediaRecorder = useRef(null); 11 | 12 | const startRecording = () => { 13 | navigator.mediaDevices 14 | .getUserMedia({ audio: true }) 15 | .then((stream) => { 16 | mediaRecorder.current = new MediaStreamRecorder(stream); 17 | mediaRecorder.current.mimeType = "audio/wav" 18 | mediaRecorder.current.audioChannels = 1; 19 | mediaRecorder.current.sampleRate = 16000; 20 | mediaRecorder.current.start(record_duration); // 5秒ごとにデータを取得する 21 | setDisabled(true); 22 | 23 | mediaRecorder.current.ondataavailable = (blob) => { 24 | // 5秒経過したら録音を停止する 25 | stopRecording(); 26 | sendRecording(blob); 27 | }; 28 | }) 29 | .catch((err) => { 30 | console.log("録音が開始できませんでした: ", err); 31 | }); 32 | }; 33 | 34 | const stopRecording = () => { 35 | if (mediaRecorder.current) { 36 | mediaRecorder.current.stop(); 37 | } 38 | }; 39 | 40 | return ( 41 | 42 | 48 | 49 | ); 50 | } 51 | 52 | export default RecordButton; 53 | -------------------------------------------------------------------------------- /app/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 12 | 13 | 17 | 18 | 27 | React App 28 | 29 | 30 | 31 |
32 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /eth_voice_recovery/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eth-voice-recovery" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | poseidon = { version = "0.2.0", git = "https://github.com/privacy-scaling-explorations/poseidon", rev = "2fb4a23" } 10 | num-bigint = { version = "0.4", features = ["rand"] } 11 | sha2 = "0.10.6" 12 | rand = "0.8.5" 13 | halo2-base = { version = "0.2.2", default-features = false, features = [ 14 | "halo2-pse", 15 | "display", 16 | ], git = "https://github.com/axiom-crypto/halo2-lib.git" } 17 | halo2-ecc = { version = "0.2.2", default-features = false, features = [ 18 | "halo2-pse", 19 | "display", 20 | ], git = "https://github.com/axiom-crypto/halo2-lib.git" } 21 | num-traits = "0.2.15" 22 | snark-verifier = { git = "https://github.com/zkemail/snark-verifier.git", version = "0.1.0", branch = "main", default-features = false, features = [ 23 | "loader_evm", 24 | "loader_halo2", 25 | "halo2-pse", 26 | ] } 27 | snark-verifier-sdk = { git = "https://github.com/zkemail/snark-verifier.git", version = "0.0.1", branch = "main", default-features = false, features = [ 28 | "loader_evm", 29 | "loader_halo2", 30 | "halo2-pse", 31 | ] } 32 | ark-std = { version = "0.4.0", features = ["print-trace"] } 33 | ethereum-types = { version = "0.14", default-features = false, features = [ 34 | "std", 35 | ] } 36 | itertools = "0.10.3" 37 | clap = { version = "4.2.1", features = ["derive"] } 38 | serde_json = "1.0.95" 39 | serde = { version = "1.0.159", features = ["derive"] } 40 | hex = "0.4.3" 41 | regex = "1" 42 | # halo2-dynamic-sha256 = { git = "https://github.com/zkemail/halo2-dynamic-sha256.git", version = "0.1.0", branch = "main" } 43 | 44 | # tokio = { version = "1.16", features = [ 45 | # "net", 46 | # "io-util", 47 | # "time", 48 | # "rt-multi-thread", 49 | # "macros", 50 | # ] } 51 | 52 | [dev-dependencies] 53 | temp-env = "0.3.3" 54 | 55 | [[bin]] 56 | name = "voice_recovery" 57 | test = false 58 | bench = false 59 | -------------------------------------------------------------------------------- /voice_recovery_python/src/lib.rs: -------------------------------------------------------------------------------- 1 | use eth_voice_recovery; 2 | use halo2_base::halo2_proofs::circuit::{AssignedCell, Cell, Region, SimpleFloorPlanner, Value}; 3 | use halo2_base::halo2_proofs::halo2curves::bn256::Fr; 4 | use halo2_base::halo2_proofs::halo2curves::FieldExt; 5 | use halo2_base::halo2_proofs::plonk::{Circuit, Column, ConstraintSystem, Instance}; 6 | use halo2_base::halo2_proofs::{circuit::Layouter, plonk::Error}; 7 | use halo2_base::{ 8 | gates::{flex_gate::FlexGateConfig, range::RangeConfig, GateInstructions}, 9 | utils::PrimeField, 10 | Context, 11 | }; 12 | use halo2_base::{ 13 | gates::{range::RangeStrategy::Vertical, RangeInstructions}, 14 | ContextParams, SKIP_FIRST_PASS, 15 | }; 16 | use halo2_base::{AssignedValue, QuantumCell}; 17 | use hex; 18 | use pyo3::exceptions::{PyIOError, PyRuntimeError, PyValueError}; 19 | use pyo3::prelude::*; 20 | use pyo3::wrap_pyfunction; 21 | use snark_verifier_sdk::evm::encode_calldata; 22 | 23 | #[pyfunction] 24 | pub fn poseidon_hash(input_hex: String) -> PyResult { 25 | let input = hex::decode(&input_hex[2..]).expect("invalid hex input"); 26 | let out_fr = eth_voice_recovery::poseidon_circuit::poseidon_hash(&input); 27 | let out_hex = format!( 28 | "0x{}", 29 | hex::encode(encode_calldata(&[vec![out_fr]], &[])).as_str(), 30 | ); 31 | Ok(out_hex) 32 | } 33 | 34 | #[pyfunction] 35 | pub fn evm_prove( 36 | params_dir: String, 37 | app_circuit_config: String, 38 | agg_circuit_config: String, 39 | pk_dir: String, 40 | input_path: String, 41 | proof_path: String, 42 | public_input_path: String, 43 | ) -> PyResult<()> { 44 | eth_voice_recovery::helper::evm_prove( 45 | ¶ms_dir, 46 | &app_circuit_config, 47 | &agg_circuit_config, 48 | &pk_dir, 49 | &input_path, 50 | &proof_path, 51 | &public_input_path, 52 | ) 53 | .unwrap(); 54 | Ok(()) 55 | } 56 | 57 | #[pymodule] 58 | fn voice_recovery_python(_py: Python<'_>, m: &PyModule) -> PyResult<()> { 59 | pyo3_log::init(); 60 | m.add_function(wrap_pyfunction!(poseidon_hash, m)?)?; 61 | m.add_function(wrap_pyfunction!(evm_prove, m)?)?; 62 | Ok(()) 63 | } 64 | -------------------------------------------------------------------------------- /app/src/logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Follow this document to call rust from python 2 | https://pyo3.rs/v0.18.3/getting_started 3 | 4 | How to rebuild 5 | ``` 6 | poetry install 7 | poetry shell 8 | cd voice_recovery_python 9 | maturin develop 10 | ``` 11 | 12 | Setup 13 | 14 | GenParams -> GenKeys -> GenEvmVerifier 15 | 16 | 見るべきファイルはこちらを参考に 17 | 18 | ``` 19 | EvmProve { 20 | /// setup parameter file 21 | #[arg(short, long, default_value = "./build/params")] 22 | params_dir: String, 23 | /// circuit configure file 24 | #[arg( 25 | short, 26 | long, 27 | default_value = "./eth_voice_recovery/configs/test1_circuit.config" 28 | )] 29 | app_circuit_config: String, 30 | #[arg( 31 | short, 32 | long, 33 | default_value = "./eth_voice_recovery/configs/agg_circuit.config" 34 | )] 35 | agg_circuit_config: String, 36 | /// proving key file path 37 | #[arg(long, default_value = "./build/pks")] 38 | pk_dir: String, 39 | /// input file path 40 | #[arg(long, default_value = "./build/input.json")] 41 | input_path: String, 42 | /// proof file path 43 | #[arg(long, default_value = "./build/evm_proof.hex")] 44 | proof_path: String, 45 | /// public input file path 46 | #[arg(long, default_value = "./build/evm_public_input.json")] 47 | public_input_path: String, 48 | }, 49 | ``` 50 | 51 | Public Inputの中身 52 | ``` 53 | pub struct DefaultVoiceRecoverCircuitPublicInput { 54 | commitment: String, 55 | commitment_hash: String, 56 | message: String, 57 | feature_hash: String, 58 | message_hash: String, 59 | // acc: String, 60 | } 61 | ``` 62 | 63 | Secret Inputの中身 64 | ``` 65 | { 66 | "features": "0x52ad6993e8ed48b87023fa32cb416c49b4e0b87c2c63a8ea8e68818c776d9e7f8efc64a1f3b96e806ec2bc9fb4301ce7c9b47ac29ca143d25ca3b082b8f76c207dcaa671ca4df240d277ffde7d4d37887266e923cc51910039f485823dba94dc02da01bca68bbb7b79695b693341eca4bbd955714e6155d2eb641762a307c2c7e0c021fabb817da4f720f9f9", 67 | "errors": "0x02000401080080007000040030000110000000000090090010024000000c08008000000000040000000080002000000001c044001800000000080100440100000a00000000004000000000001298a400000049800004900080400000000000000031080110004800200801608410040020000000801000018212000022100000002401160380200080010000", 68 | "commitment": "0xb577b007bd3c08abfb74aa19c01395a00788c7862913953def64406050c7322ab5a393558c081b143a325c1de06856846f80704900df75be381216f53d7f6651d2557dae5c029b3cc0fbb671f53e11a1745466b4b1d4f39cf52c9e389bfe58796013ee4031816e839041cde2dc8d212ddf488834de1de7164e7f87b0e5ac1af210374da374ca572751247a8a", 69 | "message": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb9226670997970c51812dc3a010c7d01b50e0d17dc79c8" 70 | } 71 | ``` -------------------------------------------------------------------------------- /backend/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, jsonify, request 2 | from flask_cors import CORS 3 | from utils import fuzzy_commitment, recover, my_hash, generate_proof 4 | from convert import bytearray_to_hex, hex_to_bytearray, feat_bytearray_from_wav_blob 5 | import numpy as np 6 | import json 7 | 8 | app = Flask(__name__) 9 | cors = CORS(app, resources={r"/api/*": {"origins": "*"}}) 10 | 11 | @app.route('/api/data') 12 | def get_data(): 13 | data = {'name': 'John', 'age': 30} 14 | return jsonify(data) 15 | 16 | 17 | @app.route('/api/upload-wav', methods=['POST']) 18 | def upload(): 19 | file = request.files['file'] 20 | 21 | file.save('storage/recorded.wav') 22 | 23 | return {'message': 'File uploaded successfully'} 24 | 25 | """ 26 | 特徴量ベクトルを計算し、commitment h(W),cを返す 27 | """ 28 | @app.route('/api/feature-vector', methods=['POST']) 29 | def feat_vec(): 30 | form_file = request.files['file'] 31 | feat = feat_bytearray_from_wav_blob(form_file) 32 | print(bytearray_to_hex(feat)) 33 | 34 | feat_xor_ecc, hash_ecc = fuzzy_commitment(feat) 35 | hash_feat_xor_ecc = my_hash(feat_xor_ecc) 36 | 37 | ret = { 38 | "feat" : bytearray_to_hex(feat), 39 | "hash_ecc" : bytearray_to_hex(hash_ecc), 40 | "hash_feat_xor_ecc" : bytearray_to_hex(hash_feat_xor_ecc), 41 | "feat_xor_ecc": bytearray_to_hex(feat_xor_ecc), 42 | } 43 | print(ret) 44 | 45 | return jsonify(ret) 46 | 47 | """ 48 | 特徴量ベクトルを計算し、commitment h(W),cを返す 49 | """ 50 | @app.route('/api/gen-proof', methods=['POST']) 51 | def gen_proof(): 52 | form_file = request.files['file'] 53 | new_feat = feat_bytearray_from_wav_blob(form_file) 54 | print(bytearray_to_hex(new_feat)) 55 | 56 | """ 57 | Request形式 58 | { 59 | "hash_ecc" : hex, 60 | "feat_xor_ecc": hex, 61 | "msg": hex, 62 | } 63 | """ 64 | json_data = json.loads(request.form['jsonData']) 65 | print(json_data) 66 | 67 | hash_ecc = hex_to_bytearray(json_data["hash_ecc"]) 68 | feat_xor_ecc = hex_to_bytearray(json_data["feat_xor_ecc"]) 69 | msg = hex_to_bytearray(json_data["msg"]) 70 | 71 | code_error, hash_ecc_msg, recovered_hash_ecc = recover(new_feat, feat_xor_ecc, hash_ecc, msg) 72 | proof_succeed, proof_bin, session_id = generate_proof(new_feat, code_error, feat_xor_ecc, msg) 73 | 74 | ret = { 75 | "new_feat": bytearray_to_hex(new_feat), 76 | "recovered_hash_ecc" : bytearray_to_hex(recovered_hash_ecc), 77 | "hash_ecc_msg": bytearray_to_hex(hash_ecc_msg), 78 | "code_error": bytearray_to_hex(code_error), 79 | "proof": bytearray_to_hex(proof_bin), 80 | "session_id": session_id, 81 | } 82 | print(ret) 83 | 84 | return jsonify(ret) 85 | 86 | if __name__ == '__main__': 87 | app.run() -------------------------------------------------------------------------------- /hardhat/test/DemoContractWallet.ts: -------------------------------------------------------------------------------- 1 | // import { ethers } from "hardhat"; 2 | // import { Signer, Contract } from "ethers"; 3 | // import { expect } from "chai"; 4 | // import * as fs from "fs/promises"; 5 | // import { AbiCoder } from "@ethersproject/abi"; 6 | 7 | 8 | // describe("DemoContractWallet", function () { 9 | // let accounts: Signer[]; 10 | // let VoiceKeyRecover: Contract; 11 | // let DemoContractWallet: Contract; 12 | 13 | // beforeEach(async function () { 14 | // const maxMsgSize = 64; 15 | 16 | // const VoiceFactory = await ethers.getContractFactory("VoiceKeyRecover"); 17 | // VoiceKeyRecover = await VoiceFactory.deploy(maxMsgSize); 18 | // await VoiceKeyRecover.deployed(); 19 | // let vkrAddr = VoiceKeyRecover.address; 20 | // const DemoFactory = await ethers.getContractFactory("DemoContractWallet"); 21 | // DemoContractWallet = await DemoFactory.deploy(vkrAddr); 22 | // await DemoContractWallet.deployed(); 23 | // }); 24 | 25 | // it("should register the voice and recover", async function () { 26 | // const user0 = (await ethers.getSigners())[0]; 27 | // const testValue = 20000; 28 | // await DemoContractWallet.connect(user0).depositEth({ value: testValue }) 29 | // expect(await DemoContractWallet.connect(user0).getEthBalance()).to.equal(testValue); 30 | // expect(await DemoContractWallet.provider.getBalance(DemoContractWallet.address)).to.equal(testValue); 31 | // console.log(await DemoContractWallet.provider.getBalance(DemoContractWallet.address)); 32 | // const transferValue = 100; 33 | // await DemoContractWallet.connect(user0).transferEth(user0.address, transferValue); 34 | // expect(await DemoContractWallet.connect(user0).getEthBalance()).to.equal(testValue - transferValue); 35 | // const input = JSON.parse(await fs.readFile("./test_data/demo_evm_public_input.json", "utf-8")); 36 | // await VoiceKeyRecover.connect(user0).registerVoiceOfWallet(DemoContractWallet.address, input.feature_hash, input.commitment_hash, input.commitment); 37 | // const proof = await fs.readFile("./test_data/demo_evm_proof.hex", "utf-8"); 38 | // const user1 = (await ethers.getSigners())[1]; 39 | // console.log(await VoiceKeyRecover.connect(user1).getMessageOfRecover(DemoContractWallet.address)); 40 | // const tx = await VoiceKeyRecover.connect(user1).recover(DemoContractWallet.address, input.message_hash, proof); 41 | // const receipt = await tx.wait(); 42 | // const gasCost = receipt.gasUsed; 43 | // console.log(`gas ${gasCost}`); 44 | // expect(await DemoContractWallet.connect(user1).getEthBalance()).to.equal(testValue - transferValue); 45 | // await DemoContractWallet.connect(user1).transferEth(user0.address, transferValue); 46 | // expect(await DemoContractWallet.connect(user1).getEthBalance()).to.equal(testValue - 2 * transferValue); 47 | // }); 48 | // }); -------------------------------------------------------------------------------- /hardhat/test_data/evm_proof.hex: -------------------------------------------------------------------------------- 1 | 0x262f5514f250fbae63c592dcf462218598600e9a422a60e7fbecfbf354929b5805c2bd8c37484996128c6099ad5a3b7aee240b1564f6349b3fa7aeabe0b9fe091ee449d29b7b116c08420b3f905c52dad79efc4dd49152aeeca7f4363fba814c075fdf7f443345060644bb885ff423d35d227a4c20f8b163ffb6a54804cddb8c1c8318251fc5cd1e82ec2f602ec76924cd5647ecabe7f3fdd031973dede8ab640074467a3eeb8b7a841d35276cb23582d9e8bfa54d7116ad0d864a0fa38cfabf065ca46179934e56bc1b0e4976ac093dae32519613609f6fbb4340569ad3be3f2fc3816e092fff487d7013b141b38dc1ffaaa327ddad0800f94432fa86f71d5101aaedfbd7cc917f023659ba8ce6de60cbbfadf55ab1a7cd6d85e1cb0031e5ef12867ebd0fd57a4d7a9eed341de28adeee025d2533bd4a3a0bcda9351c0bf23320532471b24c5df3f5e750283223106e9bd95b03aaad567647b1d6398a84a8572aed73db2a1b2d6529e697aa39a923a7bcc9c62209a431458013367899a917d826094ade799a1c60238b371d3b42a4fab9b79787c84c0ec91730e3f589c0578e13e92913a21c56713e6bcaad2a310fdfc61e3b9eaec0b43b2434301cf8c3816a2e21485f7bc075146e38f42aaf090b65b5f16cd467844644a6f7e13cc3267fd10b2d4256371e0c98fd037429cd3709bb5bde4374a30f1eb98a325516145aaf752b0c670a53339b36b8a0620b516311d69927f550ddb7f2552049504a9693bac8268f1da8944df5ba245c08588c1d8aa002ae575ce5fc8692a5be1535f58b43001a5cfb039779df35990a0d73c91107b76cccb13571caf1a26af124b2692afbab0645410f6654de781ef8b2076ef044e2bfe463a8abb7279a502b834f3ef0656d067bf6df0379003b6bd97042d07dfc9074969fc84a928fccb430d957fa6b47c200f396ba8bb53f1ad7c7bf128b07af8e9a146f2cd47037cc40de6eaf7aab375d2055746faf4996cdb9c433873347482fec7351c9118e5654688ecbdb1300bf55064d3a90edfb7f08cd59a7d950b3333794acd97928030099dcd1f9dd26216385013db23aa8785fd0bfd274ec5f6ba47a5591dcc888719e74917ad5b77f5d92651b464c63da0c3b7fc2993fd1ae6630f49ea7883c31d04240acf3ba1d594eef2c1ca1cb5fc9c2909135e46b8bae6ab76cee8acb49d8e76e07f2694e802da8aba6234a62f0796a5554a36b4f1df631d97bbbd94a8477fd5a910a5bd7c67d31c9fa275f833b9b87db3a6c44b2ed1d780b94bafd34974834d5b7c9f260eed4bad13c2cf6129819934812f4d36ebb541401c20fb6f8954b168ec2c12339c1e6b80db0091299d95b153a65aa1863408f35684256519bc1c89f0c1c1faf7e86d40da04906f74cac0ea21d6b43a58c8280fda292303d7076efce087c8839153839661bf42440c0d8cb231cb7179cd387aa6a570d382990f53643c4c2de929b615f8b95f4117cd75129595033044ce80dee258d2e6984919f6d21535c9995d68e8d2620811e3fec36b445b3923947b8214ea1759437aaf60232c1dfdcca60293cd144c07c19423a3d15445483d63b4a626c8e7f02cb857e22a8d20ac08ebbb6417a2c629c1c81806bb729e5930dd18a9831ac601a23e4c5297d96ea83ddd44d1366f91d281aa45e73b0a10f4e4ab3083bfb9d3e2b2e47543b80c05b69dcdd357414ae2dba287f09a7ae4cfcb35871b572a3da0379389be0a8d90e10fd376adc7788a3f24406910e482e678c7a8bd59a79a6a449d87bcf4b37a76f18b57d7cde0b4e6bf38815f311f10a38f8fdffde69c87704ac6812a22596d7856ef5fef32c792de0f40b281c1dfb54cceaf851eab60ef3a6bdd8acd8a3f0ee992309720d9491960097ea257fbd9b5f7d5b0b61240c650b67c0a249d69b406c4a26aa87f10f07db519f5d27fe42bb61bb957217813450d2f861ff982b03eb9c480582b7810375268d58ad2456c4ebe4f63311c7f2cd4f9f611d730f580558e36eefec5a2ec0fbda184fa91f5b87a3c9d8d677798dc18f177e1bcdc39277fef6b495d219edbb239c2c4f4008ff3ae88f00de955f881d898bb7bedad59c28c2c7ac17c5bb689f7e2728d6411f293e82018b68f7d17dfd9c40315169532acbf748890afbc0177e4b01a8cc3717ce7a6a4b7ce560c06dbb5d8e18795508ef75b7fb89d4b158fa7357a04ace9f -------------------------------------------------------------------------------- /hardhat/test_data/demo_evm_proof.hex: -------------------------------------------------------------------------------- 1 | 0x262f5514f250fbae63c592dcf462218598600e9a422a60e7fbecfbf354929b5805c2bd8c37484996128c6099ad5a3b7aee240b1564f6349b3fa7aeabe0b9fe091ee449d29b7b116c08420b3f905c52dad79efc4dd49152aeeca7f4363fba814c075fdf7f443345060644bb885ff423d35d227a4c20f8b163ffb6a54804cddb8c1c8318251fc5cd1e82ec2f602ec76924cd5647ecabe7f3fdd031973dede8ab640074467a3eeb8b7a841d35276cb23582d9e8bfa54d7116ad0d864a0fa38cfabf065ca46179934e56bc1b0e4976ac093dae32519613609f6fbb4340569ad3be3f2fc3816e092fff487d7013b141b38dc1ffaaa327ddad0800f94432fa86f71d5101aaedfbd7cc917f023659ba8ce6de60cbbfadf55ab1a7cd6d85e1cb0031e5ef12867ebd0fd57a4d7a9eed341de28adeee025d2533bd4a3a0bcda9351c0bf23320532471b24c5df3f5e750283223106e9bd95b03aaad567647b1d6398a84a8572aed73db2a1b2d6529e697aa39a923a7bcc9c62209a431458013367899a917d826094ade799a1c60238b371d3b42a4fab9b79787c84c0ec91730e3f589c0578e13e92913a21c56713e6bcaad2a310fdfc61e3b9eaec0b43b2434301cf8c3816a2e21485f7bc075146e38f42aaf090b65b5f16cd467844644a6f7e13cc3267fd10b2d4256371e0c98fd037429cd3709bb5bde4374a30f1eb98a325516145aaf752b0c670a53339b36b8a0620b516311d69927f550ddb7f2552049504a9693bac8268f1da8944df5ba245c08588c1d8aa002ae575ce5fc8692a5be1535f58b43001a5cfb039779df35990a0d73c91107b76cccb13571caf1a26af124b2692afbab0645410f6654de781ef8b2076ef044e2bfe463a8abb7279a502b834f3ef0656d067bf6df0379003b6bd97042d07dfc9074969fc84a928fccb430d957fa6b47c200f396ba8bb53f1ad7c7bf128b07af8e9a146f2cd47037cc40de6eaf7aab375d2055746faf4996cdb9c433873347482fec7351c9118e5654688ecbdb1300bf55064d3a90edfb7f08cd59a7d950b3333794acd97928030099dcd1f9dd26216385013db23aa8785fd0bfd274ec5f6ba47a5591dcc888719e74917ad5b77f5d92651b464c63da0c3b7fc2993fd1ae6630f49ea7883c31d04240acf3ba1d594eef2c1ca1cb5fc9c2909135e46b8bae6ab76cee8acb49d8e76e07f2694e802da8aba6234a62f0796a5554a36b4f1df631d97bbbd94a8477fd5a910a5bd7c67d31c9fa275f833b9b87db3a6c44b2ed1d780b94bafd34974834d5b7c9f260eed4bad13c2cf6129819934812f4d36ebb541401c20fb6f8954b168ec2c12339c1e6b80db0091299d95b153a65aa1863408f35684256519bc1c89f0c1c1faf7e86d40da04906f74cac0ea21d6b43a58c8280fda292303d7076efce087c8839153839661bf42440c0d8cb231cb7179cd387aa6a570d382990f53643c4c2de929b615f8b95f4117cd75129595033044ce80dee258d2e6984919f6d21535c9995d68e8d2620811e3fec36b445b3923947b8214ea1759437aaf60232c1dfdcca60293cd144c07c19423a3d15445483d63b4a626c8e7f02cb857e22a8d20ac08ebbb6417a2c629c1c81806bb729e5930dd18a9831ac601a23e4c5297d96ea83ddd44d1366f91d281aa45e73b0a10f4e4ab3083bfb9d3e2b2e47543b80c05b69dcdd357414ae2dba287f09a7ae4cfcb35871b572a3da0379389be0a8d90e10fd376adc7788a3f24406910e482e678c7a8bd59a79a6a449d87bcf4b37a76f18b57d7cde0b4e6bf38815f311f10a38f8fdffde69c87704ac6812a22596d7856ef5fef32c792de0f40b281c1dfb54cceaf851eab60ef3a6bdd8acd8a3f0ee992309720d9491960097ea257fbd9b5f7d5b0b61240c650b67c0a249d69b406c4a26aa87f10f07db519f5d27fe42bb61bb957217813450d2f861ff982b03eb9c480582b7810375268d58ad2456c4ebe4f63311c7f2cd4f9f611d730f580558e36eefec5a2ec0fbda184fa91f5b87a3c9d8d677798dc18f177e1bcdc39277fef6b495d219edbb239c2c4f4008ff3ae88f00de955f881d898bb7bedad59c28c2c7ac17c5bb689f7e2728d6411f293e82018b68f7d17dfd9c40315169532acbf748890afbc0177e4b01a8cc3717ce7a6a4b7ce560c06dbb5d8e18795508ef75b7fb89d4b158fa7357a04ace9f -------------------------------------------------------------------------------- /voice_recovery_python/.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by maturin v0.14.17 2 | # To update, run 3 | # 4 | # maturin generate-ci github 5 | # 6 | name: CI 7 | 8 | on: 9 | push: 10 | branches: 11 | - main 12 | - master 13 | tags: 14 | - '*' 15 | pull_request: 16 | workflow_dispatch: 17 | 18 | permissions: 19 | contents: read 20 | 21 | jobs: 22 | linux: 23 | runs-on: ubuntu-latest 24 | strategy: 25 | matrix: 26 | target: [x86_64, x86, aarch64, armv7, s390x, ppc64le] 27 | steps: 28 | - uses: actions/checkout@v3 29 | - uses: actions/setup-python@v4 30 | with: 31 | python-version: '3.10' 32 | - name: Build wheels 33 | uses: PyO3/maturin-action@v1 34 | with: 35 | target: ${{ matrix.target }} 36 | args: --release --out dist --find-interpreter 37 | sccache: 'true' 38 | manylinux: auto 39 | - name: Upload wheels 40 | uses: actions/upload-artifact@v3 41 | with: 42 | name: wheels 43 | path: dist 44 | 45 | windows: 46 | runs-on: windows-latest 47 | strategy: 48 | matrix: 49 | target: [x64, x86] 50 | steps: 51 | - uses: actions/checkout@v3 52 | - uses: actions/setup-python@v4 53 | with: 54 | python-version: '3.10' 55 | architecture: ${{ matrix.target }} 56 | - name: Build wheels 57 | uses: PyO3/maturin-action@v1 58 | with: 59 | target: ${{ matrix.target }} 60 | args: --release --out dist --find-interpreter 61 | sccache: 'true' 62 | - name: Upload wheels 63 | uses: actions/upload-artifact@v3 64 | with: 65 | name: wheels 66 | path: dist 67 | 68 | macos: 69 | runs-on: macos-latest 70 | strategy: 71 | matrix: 72 | target: [x86_64, aarch64] 73 | steps: 74 | - uses: actions/checkout@v3 75 | - uses: actions/setup-python@v4 76 | with: 77 | python-version: '3.10' 78 | - name: Build wheels 79 | uses: PyO3/maturin-action@v1 80 | with: 81 | target: ${{ matrix.target }} 82 | args: --release --out dist --find-interpreter 83 | sccache: 'true' 84 | - name: Upload wheels 85 | uses: actions/upload-artifact@v3 86 | with: 87 | name: wheels 88 | path: dist 89 | 90 | sdist: 91 | runs-on: ubuntu-latest 92 | steps: 93 | - uses: actions/checkout@v3 94 | - name: Build sdist 95 | uses: PyO3/maturin-action@v1 96 | with: 97 | command: sdist 98 | args: --out dist 99 | - name: Upload sdist 100 | uses: actions/upload-artifact@v3 101 | with: 102 | name: wheels 103 | path: dist 104 | 105 | release: 106 | name: Release 107 | runs-on: ubuntu-latest 108 | if: "startsWith(github.ref, 'refs/tags/')" 109 | needs: [linux, windows, macos, sdist] 110 | steps: 111 | - uses: actions/download-artifact@v3 112 | with: 113 | name: wheels 114 | - name: Publish to PyPI 115 | uses: PyO3/maturin-action@v1 116 | env: 117 | MATURIN_PYPI_TOKEN: ${{ secrets.PYPI_API_TOKEN }} 118 | with: 119 | command: upload 120 | args: --skip-existing * 121 | -------------------------------------------------------------------------------- /app/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started with Create React App 2 | 3 | This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). 4 | 5 | ## Available Scripts 6 | 7 | In the project directory, you can run: 8 | 9 | ### `npm start` 10 | 11 | Runs the app in the development mode.\ 12 | Open [http://localhost:3000](http://localhost:3000) to view it in your browser. 13 | 14 | The page will reload when you make changes.\ 15 | You may also see any lint errors in the console. 16 | 17 | ### `npm test` 18 | 19 | Launches the test runner in the interactive watch mode.\ 20 | See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. 21 | 22 | ### `npm run build` 23 | 24 | Builds the app for production to the `build` folder.\ 25 | It correctly bundles React in production mode and optimizes the build for the best performance. 26 | 27 | The build is minified and the filenames include the hashes.\ 28 | Your app is ready to be deployed! 29 | 30 | See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. 31 | 32 | ### `npm run eject` 33 | 34 | **Note: this is a one-way operation. Once you `eject`, you can't go back!** 35 | 36 | If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project. 37 | 38 | Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own. 39 | 40 | You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it. 41 | 42 | ## Learn More 43 | 44 | You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started). 45 | 46 | To learn React, check out the [React documentation](https://reactjs.org/). 47 | 48 | ### Code Splitting 49 | 50 | This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting) 51 | 52 | ### Analyzing the Bundle Size 53 | 54 | This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size) 55 | 56 | ### Making a Progressive Web App 57 | 58 | This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app) 59 | 60 | ### Advanced Configuration 61 | 62 | This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration) 63 | 64 | ### Deployment 65 | 66 | This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment) 67 | 68 | ### `npm run build` fails to minify 69 | 70 | This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify) 71 | -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #-*- coding: utf-8 -*- 3 | """ 4 | Source code from: 5 | https://github.com/clovaai/voxceleb_trainer/blob/master/tuneThreshold.py 6 | """ 7 | 8 | import os 9 | import glob 10 | import sys 11 | import time 12 | from sklearn import metrics 13 | import numpy 14 | import pdb 15 | from operator import itemgetter 16 | 17 | def tuneThresholdfromScore(scores, labels, target_fa, target_fr = None): 18 | 19 | fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=1) 20 | fnr = 1 - tpr 21 | 22 | tunedThreshold = []; 23 | if target_fr: 24 | for tfr in target_fr: 25 | idx = numpy.nanargmin(numpy.absolute((tfr - fnr))) 26 | tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]); 27 | 28 | for tfa in target_fa: 29 | idx = numpy.nanargmin(numpy.absolute((tfa - fpr))) # numpy.where(fpr<=tfa)[0][-1] 30 | tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]); 31 | 32 | idxE = numpy.nanargmin(numpy.absolute((fnr - fpr))) 33 | eer = max(fpr[idxE],fnr[idxE])*100 34 | 35 | return (tunedThreshold, eer, fpr, fnr); 36 | 37 | # Creates a list of false-negative rates, a list of false-positive rates 38 | # and a list of decision thresholds that give those error-rates. 39 | def ComputeErrorRates(scores, labels): 40 | 41 | # Sort the scores from smallest to largest, and also get the corresponding 42 | # indexes of the sorted scores. We will treat the sorted scores as the 43 | # thresholds at which the the error-rates are evaluated. 44 | sorted_indexes, thresholds = zip(*sorted( 45 | [(index, threshold) for index, threshold in enumerate(scores)], 46 | key=itemgetter(1))) 47 | sorted_labels = [] 48 | labels = [labels[i] for i in sorted_indexes] 49 | fnrs = [] 50 | fprs = [] 51 | 52 | # At the end of this loop, fnrs[i] is the number of errors made by 53 | # incorrectly rejecting scores less than thresholds[i]. And, fprs[i] 54 | # is the total number of times that we have correctly accepted scores 55 | # greater than thresholds[i]. 56 | for i in range(0, len(labels)): 57 | if i == 0: 58 | fnrs.append(labels[i]) 59 | fprs.append(1 - labels[i]) 60 | else: 61 | fnrs.append(fnrs[i-1] + labels[i]) 62 | fprs.append(fprs[i-1] + 1 - labels[i]) 63 | fnrs_norm = sum(labels) 64 | fprs_norm = len(labels) - fnrs_norm 65 | 66 | # Now divide by the total number of false negative errors to 67 | # obtain the false positive rates across all thresholds 68 | fnrs = [x / float(fnrs_norm) for x in fnrs] 69 | 70 | # Divide by the total number of corret positives to get the 71 | # true positive rate. Subtract these quantities from 1 to 72 | # get the false positive rates. 73 | fprs = [1 - x / float(fprs_norm) for x in fprs] 74 | return fnrs, fprs, thresholds 75 | 76 | # Computes the minimum of the detection cost function. The comments refer to 77 | # equations in Section 3 of the NIST 2016 Speaker Recognition Evaluation Plan. 78 | def ComputeMinDcf(fnrs, fprs, thresholds, p_target, c_miss, c_fa): 79 | min_c_det = float("inf") 80 | min_c_det_threshold = thresholds[0] 81 | for i in range(0, len(fnrs)): 82 | # See Equation (2). it is a weighted sum of false negative 83 | # and false positive errors. 84 | c_det = c_miss * fnrs[i] * p_target + c_fa * fprs[i] * (1 - p_target) 85 | if c_det < min_c_det: 86 | min_c_det = c_det 87 | min_c_det_threshold = thresholds[i] 88 | # See Equations (3) and (4). Now we normalize the cost. 89 | c_def = min(c_miss * p_target, c_fa * (1 - p_target)) 90 | min_dcf = min_c_det / c_def 91 | return min_dcf, min_c_det_threshold -------------------------------------------------------------------------------- /hardhat/contracts/VoiceKeyRecover.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT 2 | pragma solidity >=0.8.4; 3 | 4 | import "./VerifierWrapper.sol"; 5 | 6 | // import "./ENS.sol"; 7 | 8 | contract VoiceKeyRecover is VerifierWrapper { 9 | // using ENSNamehash for bytes; 10 | struct VoiceData { 11 | address owner; 12 | bytes32 featureHash; 13 | bytes32 commitmentHash; 14 | bytes commitment; 15 | } 16 | 17 | mapping(address => bool) public isRegistered; 18 | mapping(address => VoiceData) public voiceDataOfWallet; 19 | mapping(bytes32 => bool) public usedMessageHashes; 20 | 21 | // ENS ens; 22 | 23 | constructor(uint _maxMsgSize) VerifierWrapper(_maxMsgSize) { 24 | // ens = ENS(_ens); 25 | } 26 | 27 | function getOwner() public view returns (address) { 28 | require(isRegistered[msg.sender], "not registered"); 29 | return voiceDataOfWallet[msg.sender].owner; 30 | } 31 | 32 | function register( 33 | address walletAddr, 34 | bytes32 featureHash, 35 | bytes32 commitmentHash, 36 | bytes calldata commitment 37 | ) public { 38 | require(!isRegistered[walletAddr], "already registered"); 39 | voiceDataOfWallet[walletAddr] = VoiceData( 40 | msg.sender, 41 | featureHash, 42 | commitmentHash, 43 | commitment 44 | ); 45 | isRegistered[walletAddr] = true; 46 | } 47 | 48 | function recover( 49 | address walletAddr, 50 | bytes32 messageHash, 51 | bytes calldata proof 52 | ) public { 53 | require(isRegistered[walletAddr], "The wallet is not registered"); 54 | require(!usedMessageHashes[messageHash], "Message hash already used"); 55 | VoiceData memory voiceData = voiceDataOfWallet[walletAddr]; 56 | address oldOwner = voiceData.owner; 57 | address newOwner = msg.sender; 58 | // require(oldOwner == resolveENS(oldENS), "Invalid old ENS"); 59 | bytes memory message = abi.encodePacked(oldOwner, newOwner); 60 | require( 61 | VerifierWrapper.verify( 62 | voiceData.commitmentHash, 63 | voiceData.featureHash, 64 | messageHash, 65 | message, 66 | proof 67 | ), 68 | "invalid proof" 69 | ); 70 | usedMessageHashes[messageHash] = true; 71 | // address newOwner = resolveENS(newENS); 72 | // address newOwner = msg.sender; 73 | voiceDataOfWallet[walletAddr].owner = newOwner; 74 | } 75 | 76 | function refreshVoiceData( 77 | address walletAddr, 78 | bytes32 featureHash, 79 | bytes32 commitmentHash, 80 | bytes calldata commitment 81 | ) public { 82 | require(isRegistered[walletAddr], "The wallet is not registered"); 83 | VoiceData memory voiceData = voiceDataOfWallet[walletAddr]; 84 | require( 85 | msg.sender == voiceData.owner, 86 | "The owner can call the refresh" 87 | ); 88 | voiceDataOfWallet[walletAddr].featureHash = featureHash; 89 | voiceDataOfWallet[walletAddr].commitmentHash = commitmentHash; 90 | voiceDataOfWallet[walletAddr].commitment = commitment; 91 | } 92 | 93 | function getMessageOfRecover( 94 | address walletAddr 95 | ) public view returns (bytes memory) { 96 | require(isRegistered[walletAddr], "The wallet is not registered"); 97 | VoiceData memory voiceData = voiceDataOfWallet[walletAddr]; 98 | address oldOwner = voiceData.owner; 99 | address newOwner = msg.sender; 100 | bytes memory message = abi.encodePacked(oldOwner, newOwner); 101 | return message; 102 | } 103 | 104 | // function resolveENS(string calldata ensName) public view returns (address) { 105 | // bytes32 node = bytes(ensName).namehash(); 106 | // Resolver resolver = ens.resolver(node); 107 | // return resolver.addr(node); 108 | // } 109 | } 110 | -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/models/RawNetBasicBlock.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | class PreEmphasis(torch.nn.Module): 9 | def __init__(self, coef: float = 0.97) -> None: 10 | super().__init__() 11 | self.coef = coef 12 | # make kernel 13 | # In pytorch, the convolution operation uses cross-correlation. So, filter is flipped. 14 | self.register_buffer( 15 | "flipped_filter", 16 | torch.FloatTensor([-self.coef, 1.0]).unsqueeze(0).unsqueeze(0), 17 | ) 18 | 19 | def forward(self, input: torch.tensor) -> torch.tensor: 20 | assert ( 21 | len(input.size()) == 2 22 | ), "The number of dimensions of input tensor must be 2!" 23 | # reflect padding to match lengths of in/out 24 | input = input.unsqueeze(1) 25 | input = F.pad(input, (1, 0), "reflect") 26 | return F.conv1d(input, self.flipped_filter) 27 | 28 | 29 | class AFMS(nn.Module): 30 | """ 31 | Alpha-Feature map scaling, added to the output of each residual block[1,2]. 32 | 33 | Reference: 34 | [1] RawNet2 : https://www.isca-speech.org/archive/Interspeech_2020/pdfs/1011.pdf 35 | [2] AMFS : https://www.koreascience.or.kr/article/JAKO202029757857763.page 36 | """ 37 | 38 | def __init__(self, nb_dim: int) -> None: 39 | super().__init__() 40 | self.alpha = nn.Parameter(torch.ones((nb_dim, 1))) 41 | self.fc = nn.Linear(nb_dim, nb_dim) 42 | self.sig = nn.Sigmoid() 43 | 44 | def forward(self, x): 45 | y = F.adaptive_avg_pool1d(x, 1).view(x.size(0), -1) 46 | y = self.sig(self.fc(y)).view(x.size(0), x.size(1), -1) 47 | 48 | x = x + self.alpha 49 | x = x * y 50 | return x 51 | 52 | 53 | class Bottle2neck(nn.Module): 54 | def __init__( 55 | self, 56 | inplanes, 57 | planes, 58 | kernel_size=None, 59 | dilation=None, 60 | scale=4, 61 | pool=False, 62 | ): 63 | 64 | super().__init__() 65 | 66 | width = int(math.floor(planes / scale)) 67 | 68 | self.conv1 = nn.Conv1d(inplanes, width * scale, kernel_size=1) 69 | self.bn1 = nn.BatchNorm1d(width * scale) 70 | 71 | self.nums = scale - 1 72 | 73 | convs = [] 74 | bns = [] 75 | 76 | num_pad = math.floor(kernel_size / 2) * dilation 77 | 78 | for i in range(self.nums): 79 | convs.append( 80 | nn.Conv1d( 81 | width, 82 | width, 83 | kernel_size=kernel_size, 84 | dilation=dilation, 85 | padding=num_pad, 86 | ) 87 | ) 88 | bns.append(nn.BatchNorm1d(width)) 89 | 90 | self.convs = nn.ModuleList(convs) 91 | self.bns = nn.ModuleList(bns) 92 | 93 | self.conv3 = nn.Conv1d(width * scale, planes, kernel_size=1) 94 | self.bn3 = nn.BatchNorm1d(planes) 95 | 96 | self.relu = nn.ReLU() 97 | 98 | self.width = width 99 | 100 | self.mp = nn.MaxPool1d(pool) if pool else False 101 | self.afms = AFMS(planes) 102 | 103 | if inplanes != planes: # if change in number of filters 104 | self.residual = nn.Sequential( 105 | nn.Conv1d(inplanes, planes, kernel_size=1, stride=1, bias=False) 106 | ) 107 | else: 108 | self.residual = nn.Identity() 109 | 110 | def forward(self, x): 111 | residual = self.residual(x) 112 | 113 | out = self.conv1(x) 114 | out = self.relu(out) 115 | out = self.bn1(out) 116 | 117 | spx = torch.split(out, self.width, 1) 118 | for i in range(self.nums): 119 | if i == 0: 120 | sp = spx[i] 121 | else: 122 | sp = sp + spx[i] 123 | sp = self.convs[i](sp) 124 | sp = self.relu(sp) 125 | sp = self.bns[i](sp) 126 | if i == 0: 127 | out = sp 128 | else: 129 | out = torch.cat((out, sp), 1) 130 | 131 | out = torch.cat((out, spx[self.nums]), 1) 132 | 133 | out = self.conv3(out) 134 | out = self.relu(out) 135 | out = self.bn3(out) 136 | 137 | out += residual 138 | if self.mp: 139 | out = self.mp(out) 140 | out = self.afms(out) 141 | 142 | return out 143 | -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/models/RawNet3.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | 3 | import torch 4 | import torch.nn as nn 5 | from asteroid_filterbanks import Encoder, ParamSincFB 6 | 7 | from .RawNetBasicBlock import Bottle2neck, PreEmphasis 8 | 9 | 10 | class RawNet3(nn.Module): 11 | def __init__(self, block, model_scale, context, summed, C=1024, **kwargs): 12 | super().__init__() 13 | 14 | nOut = kwargs["nOut"] 15 | 16 | self.context = context 17 | self.encoder_type = kwargs["encoder_type"] 18 | self.log_sinc = kwargs["log_sinc"] 19 | self.norm_sinc = kwargs["norm_sinc"] 20 | self.out_bn = kwargs["out_bn"] 21 | self.summed = summed 22 | 23 | self.preprocess = nn.Sequential( 24 | PreEmphasis(), nn.InstanceNorm1d(1, eps=1e-4, affine=True) 25 | ) 26 | self.conv1 = Encoder( 27 | ParamSincFB( 28 | C // 4, 29 | 251, 30 | stride=kwargs["sinc_stride"], 31 | ) 32 | ) 33 | self.relu = nn.ReLU() 34 | self.bn1 = nn.BatchNorm1d(C // 4) 35 | 36 | self.layer1 = block( 37 | C // 4, C, kernel_size=3, dilation=2, scale=model_scale, pool=5 38 | ) 39 | self.layer2 = block( 40 | C, C, kernel_size=3, dilation=3, scale=model_scale, pool=3 41 | ) 42 | self.layer3 = block(C, C, kernel_size=3, dilation=4, scale=model_scale) 43 | self.layer4 = nn.Conv1d(3 * C, 1536, kernel_size=1) 44 | 45 | if self.context: 46 | attn_input = 1536 * 3 47 | else: 48 | attn_input = 1536 49 | print("self.encoder_type", self.encoder_type) 50 | if self.encoder_type == "ECA": 51 | attn_output = 1536 52 | elif self.encoder_type == "ASP": 53 | attn_output = 1 54 | else: 55 | raise ValueError("Undefined encoder") 56 | 57 | self.attention = nn.Sequential( 58 | nn.Conv1d(attn_input, 128, kernel_size=1), 59 | nn.ReLU(), 60 | nn.BatchNorm1d(128), 61 | nn.Conv1d(128, attn_output, kernel_size=1), 62 | nn.Softmax(dim=2), 63 | ) 64 | 65 | self.bn5 = nn.BatchNorm1d(3072) 66 | 67 | self.fc6 = nn.Linear(3072, nOut) 68 | self.bn6 = nn.BatchNorm1d(nOut) 69 | 70 | self.mp3 = nn.MaxPool1d(3) 71 | 72 | def forward(self, x): 73 | """ 74 | :param x: input mini-batch (bs, samp) 75 | """ 76 | 77 | with torch.cuda.amp.autocast(enabled=False): 78 | x = self.preprocess(x) 79 | x = torch.abs(self.conv1(x)) 80 | if self.log_sinc: 81 | x = torch.log(x + 1e-6) 82 | if self.norm_sinc == "mean": 83 | x = x - torch.mean(x, dim=-1, keepdim=True) 84 | elif self.norm_sinc == "mean_std": 85 | m = torch.mean(x, dim=-1, keepdim=True) 86 | s = torch.std(x, dim=-1, keepdim=True) 87 | s[s < 0.001] = 0.001 88 | x = (x - m) / s 89 | 90 | if self.summed: 91 | x1 = self.layer1(x) 92 | x2 = self.layer2(x1) 93 | x3 = self.layer3(self.mp3(x1) + x2) 94 | else: 95 | x1 = self.layer1(x) 96 | x2 = self.layer2(x1) 97 | x3 = self.layer3(x2) 98 | 99 | x = self.layer4(torch.cat((self.mp3(x1), x2, x3), dim=1)) 100 | x = self.relu(x) 101 | 102 | t = x.size()[-1] 103 | 104 | if self.context: 105 | global_x = torch.cat( 106 | ( 107 | x, 108 | torch.mean(x, dim=2, keepdim=True).repeat(1, 1, t), 109 | torch.sqrt( 110 | torch.var(x, dim=2, keepdim=True).clamp( 111 | min=1e-4, max=1e4 112 | ) 113 | ).repeat(1, 1, t), 114 | ), 115 | dim=1, 116 | ) 117 | else: 118 | global_x = x 119 | 120 | w = self.attention(global_x) 121 | 122 | mu = torch.sum(x * w, dim=2) 123 | sg = torch.sqrt( 124 | (torch.sum((x**2) * w, dim=2) - mu**2).clamp(min=1e-4, max=1e4) 125 | ) 126 | 127 | x = torch.cat((mu, sg), 1) 128 | 129 | x = self.bn5(x) 130 | 131 | x = self.fc6(x) 132 | 133 | if self.out_bn: 134 | x = self.bn6(x) 135 | 136 | return x 137 | 138 | 139 | def MainModel(**kwargs): 140 | 141 | model = RawNet3( 142 | Bottle2neck, model_scale=8, context=True, summed=True, **kwargs 143 | ) 144 | return model 145 | -------------------------------------------------------------------------------- /hardhat/test/VoiceKeyRecover.ts: -------------------------------------------------------------------------------- 1 | import { ethers } from "hardhat"; 2 | import { Signer, Contract } from "ethers"; 3 | import { expect } from "chai"; 4 | import * as fs from "fs/promises"; 5 | import { AbiCoder } from "@ethersproject/abi"; 6 | 7 | 8 | describe("VoiceKeyRecover", function () { 9 | let accounts: Signer[]; 10 | let VoiceKeyRecover: Contract; 11 | 12 | beforeEach(async function () { 13 | // const signer = (await ethers.getSigners())[0]; 14 | // console.log(await signer.getBalance()); 15 | // const yulVerifier = await fs.readFile("./test_data/verifier_code.txt"); 16 | // const wordSize = 32; 17 | const maxMsgSize = 64; 18 | 19 | // const factory = ethers.ContractFactory.fromSolidity( 20 | // { bytecode: yulVerifier, abi: [] }, 21 | // signer 22 | // ); 23 | // const contract = await factory.deploy({ gasLimit: 900_000_000 }); 24 | // await contract.deployed(); 25 | // console.log(contract.address); 26 | 27 | const VoiceFactory = await ethers.getContractFactory("VoiceKeyRecover"); 28 | VoiceKeyRecover = await VoiceFactory.deploy(maxMsgSize); 29 | await VoiceKeyRecover.deployed(); 30 | console.log(VoiceKeyRecover.address); 31 | }); 32 | 33 | it("should register voice data", async function () { 34 | const signer = (await ethers.getSigners())[0]; 35 | const myAddr = signer.address; 36 | const input = JSON.parse(await fs.readFile("./test_data/evm_public_input.json", "utf-8")); 37 | await VoiceKeyRecover.register(myAddr, input.feature_hash, input.commitment_hash, input.commitment); 38 | const registeredData = await VoiceKeyRecover.voiceDataOfWallet(myAddr); 39 | expect(registeredData.owner).to.equal(myAddr); 40 | expect(registeredData.featureHash).to.equal(input.feature_hash); 41 | expect(registeredData.commitmentHash).to.equal(input.commitment_hash); 42 | expect(ethers.utils.hexlify(registeredData.commitment)).to.equal(ethers.utils.hexlify(input.commitment)); 43 | }); 44 | 45 | it("should recover and update the owner address", async function () { 46 | const signer0 = (await ethers.getSigners())[0]; 47 | const input = JSON.parse(await fs.readFile("./test_data/evm_public_input.json", "utf-8")); 48 | await VoiceKeyRecover.register(signer0.address, input.feature_hash, input.commitment_hash, input.commitment); 49 | const proof = await fs.readFile("./test_data/evm_proof.hex", "utf-8"); 50 | const signer1 = (await ethers.getSigners())[1]; 51 | await VoiceKeyRecover.connect(signer1).recover(signer0.address, input.message_hash, proof); 52 | const registeredData = await VoiceKeyRecover.voiceDataOfWallet(signer0.address); 53 | expect(registeredData.owner).to.equal(signer1.address); 54 | }); 55 | 56 | // it("should recover and update ENS", async function () { 57 | // const wallet = await accounts[0].getAddress(); 58 | // const currentENS = "example1.eth"; 59 | // const newENS = "example2.eth"; 60 | // const ZKPCommitment = ethers.utils.keccak256("0x1234"); 61 | // const voiceFeatures = ethers.utils.randomBytes(32); 62 | // const zkProof = ethers.utils.randomBytes(192); // Dummy proof 63 | // const messageHash = ethers.utils.keccak256("0x5678"); 64 | 65 | // await VoiceAuth.register(wallet, currentENS, ZKPCommitment, voiceFeatures); 66 | 67 | // // Set the dummy verifier to return true (proof is valid) 68 | // await DummyZKPVerifier.setVerificationResult(true); 69 | 70 | // await VoiceAuth.recover(wallet, newENS, zkProof, messageHash); 71 | 72 | // const updatedData = await VoiceAuth.voiceDataMapping(wallet); 73 | // expect(updatedData.currentENS).to.equal(newENS); 74 | // expect(updatedData.ZKPCommitment).to.equal(ZKPCommitment); 75 | // expect(ethers.utils.hexlify(updatedData.voiceFeatures)).to.equal(ethers.utils.hexlify(voiceFeatures)); 76 | // }); 77 | 78 | // it("should not allow the same message hash to be used twice", async function () { 79 | // const wallet = await accounts[0].getAddress(); 80 | // const currentENS = "example1.eth"; 81 | // const newENS1 = "example2.eth"; 82 | // const newENS2 = "example3.eth"; 83 | // const ZKPCommitment = ethers.utils.keccak256("0x1234"); 84 | // const voiceFeatures = ethers.utils.randomBytes(32); 85 | // const zkProof = ethers.utils.randomBytes(192); // Dummy proof 86 | // const messageHash = ethers.utils.keccak256("0x5678"); 87 | 88 | // await VoiceAuth.register(wallet, currentENS, ZKPCommitment, voiceFeatures); 89 | 90 | // // Set the dummy verifier to return true (proof is valid) 91 | // await DummyZKPVerifier.setVerificationResult(true); 92 | 93 | // // First recovery using messageHash 94 | // await VoiceAuth.recover(wallet, newENS1, zkProof, messageHash); 95 | 96 | // // Second recovery using the same messageHash should fail 97 | // await expect(VoiceAuth.recover(wallet, newENS2, zkProof, messageHash)).to.be.revertedWith("Message hash already used"); 98 | // }); 99 | }); -------------------------------------------------------------------------------- /backend/machine_learning/RawNet3/infererence.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import itertools 3 | import os 4 | import sys 5 | from typing import Dict 6 | import numpy as np 7 | import torch 8 | import torch.nn.functional as F 9 | from tqdm import tqdm 10 | import soundfile 11 | 12 | from .models.RawNet3 import RawNet3 13 | from .models.RawNetBasicBlock import Bottle2neck 14 | from .utils import tuneThresholdfromScore, ComputeErrorRates, ComputeMinDcf 15 | 16 | 17 | def main(args: Dict) -> None: 18 | model = RawNet3( 19 | Bottle2neck, 20 | model_scale=8, 21 | context=True, 22 | summed=True, 23 | encoder_type="ECA", 24 | nOut=256, 25 | out_bn=False, 26 | sinc_stride=10, 27 | log_sinc=True, 28 | norm_sinc="mean", 29 | grad_mult=1, 30 | ) 31 | gpu = False 32 | 33 | model.load_state_dict( 34 | torch.load( 35 | "./models/weights/model.pt", 36 | map_location=lambda storage, loc: storage, 37 | )["model"] 38 | ) 39 | model.eval() 40 | print("RawNet3 initialised & weights loaded!") 41 | 42 | if torch.cuda.is_available(): 43 | print("Cuda available, conducting inference on GPU") 44 | model = model.to("cuda") 45 | gpu = True 46 | 47 | audio, sample_rate = soundfile.read(args.input) 48 | 49 | if args.inference_utterance: 50 | output = extract_speaker_embd( 51 | model, 52 | audio, 53 | sample_rate, 54 | n_samples=48000, 55 | n_segments=args.n_segments, 56 | gpu=gpu, 57 | ).mean(0) 58 | 59 | np.save(args.out_dir, output.detach().cpu().numpy()) 60 | return 61 | 62 | if args.vox1_o_benchmark: 63 | with open("../../trials/cleaned_test_list.txt", "r") as f: 64 | trials = f.readlines() 65 | 66 | ## Get a list of unique file names 67 | files = list(itertools.chain(*[x.strip().split()[-2:] for x in trials])) 68 | 69 | setfiles = list(set(files)) 70 | setfiles.sort() 71 | 72 | embd_dic = {} 73 | for f in tqdm(setfiles): 74 | embd_dic[f] = extract_speaker_embd( 75 | model, os.path.join(args.DB_dir, f), n_samples=64000, gpu=gpu 76 | ) 77 | 78 | labels, scores = [], [] 79 | for line in trials: 80 | data = line.split() 81 | ref_feat = F.normalize(embd_dic[data[1]], p=2, dim=1) 82 | com_feat = F.normalize(embd_dic[data[2]], p=2, dim=1) 83 | 84 | if gpu: 85 | ref_feat = ref_feat.cuda() 86 | com_feat = com_feat.cuda() 87 | 88 | dist = ( 89 | torch.cdist( 90 | ref_feat.reshape((args.n_segments, -1)), 91 | com_feat.reshape((args.n_segments, -1)), 92 | ) 93 | .detach() 94 | .cpu() 95 | .numpy() 96 | ) 97 | score = -1.0 * np.mean(dist) 98 | labels.append(int(data[0])) 99 | scores.append(score) 100 | 101 | result = tuneThresholdfromScore(scores, labels, [1, 0.1]) 102 | 103 | fnrs, fprs, thresholds = ComputeErrorRates(scores, labels) 104 | p_target, c_miss, c_fa = 0.05, 1, 1 105 | mindcf, _ = ComputeMinDcf( 106 | fnrs, fprs, thresholds, p_target, c_miss, c_fa 107 | ) 108 | print( 109 | "Vox1-O benchmark Finished. EER: %2.4f, minDCF:%.5f" 110 | % (result[1], mindcf) 111 | ) 112 | 113 | 114 | def extract_speaker_embd( 115 | model, audio: np.ndarray,sample_rate: int, n_samples: int, n_segments: int = 10, gpu: bool = False 116 | ) -> np.ndarray: 117 | if len(audio.shape) > 1: 118 | raise ValueError( 119 | f"RawNet3 supports mono input only. Input data has a shape of {audio.shape}." 120 | ) 121 | 122 | if sample_rate != 16000: 123 | raise ValueError( 124 | f"RawNet3 supports 16k sampling rate only. Input data's sampling rate is {sample_rate}." 125 | ) 126 | 127 | if ( 128 | len(audio) < n_samples 129 | ): # RawNet3 was trained using utterances of 3 seconds 130 | shortage = n_samples - len(audio) + 1 131 | audio = np.pad(audio, (0, shortage), "wrap") 132 | 133 | audios = [] 134 | startframe = np.linspace(0, len(audio) - n_samples, num=n_segments) 135 | for asf in startframe: 136 | audios.append(audio[int(asf) : int(asf) + n_samples]) 137 | 138 | audios = torch.from_numpy(np.stack(audios, axis=0).astype(np.float32)) 139 | if gpu: 140 | audios = audios.to("cuda") 141 | with torch.no_grad(): 142 | output = model(audios) 143 | 144 | return output 145 | 146 | 147 | if __name__ == "__main__": 148 | parser = argparse.ArgumentParser(description="RawNet3 inference") 149 | 150 | parser.add_argument( 151 | "--inference_utterance", default=False, action="store_true" 152 | ) 153 | parser.add_argument( 154 | "--input", 155 | type=str, 156 | default="", 157 | help="Input file to extract embedding. Required when 'inference_utterance' is True", 158 | ) 159 | parser.add_argument( 160 | "--vox1_o_benchmark", default=False, action="store_true" 161 | ) 162 | parser.add_argument( 163 | "--DB_dir", 164 | type=str, 165 | default="", 166 | help="Directory for VoxCeleb1. Required when 'vox1_o_benchmark' is True", 167 | ) 168 | parser.add_argument("--out_dir", type=str, default="./out.npy") 169 | parser.add_argument( 170 | "--n_segments", 171 | type=int, 172 | default=10, 173 | help="number of segments to make using each utterance", 174 | ) 175 | args = parser.parse_args() 176 | 177 | assert args.inference_utterance or args.vox1_o_benchmark 178 | if args.inference_utterance: 179 | assert args.input != "" 180 | 181 | if args.vox1_o_benchmark: 182 | assert args.DB_dir != "" 183 | 184 | sys.exit(main(args)) 185 | -------------------------------------------------------------------------------- /backend/utils.py: -------------------------------------------------------------------------------- 1 | import bchlib 2 | import hashlib 3 | import os 4 | import random 5 | import string 6 | import io 7 | import soundfile 8 | import json 9 | from machine_learning.speaker_recognition import calc_feat_vec 10 | import numpy as np 11 | from voice_recovery_python import poseidon_hash, evm_prove 12 | from convert import bytearray_to_hex, hex_to_bytearray, feat_bytearray_from_wav_blob 13 | 14 | 15 | # create a bch object 16 | BCH_POLYNOMIAL = 8219 17 | BCH_BITS = 64 # 誤り訂正可能なビット数 18 | bch = bchlib.BCH(BCH_POLYNOMIAL, BCH_BITS) 19 | CODE_LEN = 140 20 | 21 | 22 | def generate_filename(length): 23 | letters = string.ascii_lowercase 24 | filename = ''.join(random.choice(letters) for i in range(length)) 25 | return filename 26 | 27 | # bch符号による誤り訂正 28 | 29 | 30 | def bch_error_correction(packet): 31 | ''' 32 | BCH符号による誤り訂正 33 | 34 | Parameters 35 | ---------- 36 | packet : bytearray 37 | 256ビットのデータをBCHによってエンコードしたもの。256ビットより大きい。 38 | ''' 39 | 40 | # de-packetize 41 | data, ecc = packet[:-bch.ecc_bytes], packet[-bch.ecc_bytes:] 42 | 43 | # correct 44 | bitflips = bch.decode_inplace(data, ecc) 45 | 46 | # packetize 47 | packet = data + ecc 48 | 49 | return packet 50 | 51 | 52 | def bitflip(packet): 53 | byte_num = random.randint(0, len(packet) - 1) 54 | bit_num = random.randint(0, 7) 55 | packet[byte_num] ^= (1 << bit_num) 56 | 57 | 58 | def test_bch(): 59 | data = bytearray(os.urandom(32)) 60 | 61 | ecc = bch.encode(data) 62 | packet = data + ecc 63 | print(type(packet)) 64 | 65 | assert packet == bch_error_correction(packet) 66 | 67 | 68 | def xor(a, b): 69 | ''' 70 | 排他的論理和を取る。 71 | 72 | Parameters 73 | ---------- 74 | a : bytearray 75 | b : bytearray 76 | ''' 77 | result = bytearray([x ^ y for x, y in zip(a, b)]) 78 | return result 79 | 80 | 81 | def my_hash(data): 82 | ''' 83 | Poseidonハッシュ関数をとる 84 | 85 | Parameters 86 | ---------- 87 | data : bytearray 88 | ''' 89 | return hex_to_bytearray(poseidon_hash(bytearray_to_hex(data))) 90 | 91 | 92 | def padding(data, n): 93 | ''' 94 | 256ビットになるように0を追加する。 95 | 96 | Parameters 97 | ---------- 98 | data : bytearray 99 | n : バイト数 100 | ''' 101 | padding_data = data.ljust(n, b'\x00') 102 | return padding_data 103 | 104 | 105 | def fuzzy_commitment(feat_vec): 106 | ''' 107 | 特徴量ベクトルからh(w)とcを生成する。 108 | 109 | Parameters 110 | ---------- 111 | feat_vec : bytearray 112 | ''' 113 | 114 | # generate random vector 115 | s = bytearray(os.urandom(32)) 116 | 117 | ecc = bch.encode(s) 118 | packet = s + ecc 119 | print("packet is ", bytearray_to_hex(packet)) 120 | print(len(packet)) 121 | 122 | feat_vec = padding(feat_vec, len(packet)) 123 | 124 | c = xor(feat_vec, packet) 125 | 126 | h_w = my_hash(packet) 127 | 128 | return c, h_w 129 | 130 | 131 | def recover(feat_vec, c, h_w, m): 132 | ''' 133 | 特徴量ベクトルからwを復元し、eとhash(m,w)を返す。 134 | 135 | Parameters 136 | ---------- 137 | feat_vec : bytearray 138 | c : bytearray 139 | h_w : bytearray 140 | m : bytearray 141 | ''' 142 | assert (len(c) >= len(feat_vec)) 143 | l = len(c) 144 | feat_vec = padding(feat_vec, l) 145 | w1 = xor(feat_vec, c) 146 | w = bch_error_correction(w1) 147 | 148 | e = xor(w, w1) 149 | 150 | h_m_w = my_hash(m+w) 151 | 152 | recovered_h_W = my_hash(w) 153 | print(recovered_h_W) 154 | 155 | return e, h_m_w, recovered_h_W 156 | 157 | 158 | def generate_proof(feat_vec, err, feat_xor_ecc, message): 159 | session_id = generate_filename(20) 160 | session_dir = os.path.join("./storage", session_id) 161 | print(session_dir) 162 | # params_dir = "../build/params" 163 | # pk_dir = "../build/pk" 164 | 165 | os.mkdir(session_dir) 166 | input_path = os.path.join(session_dir, "input.json") 167 | input_data = { 168 | "features": bytearray_to_hex(padding(feat_vec, CODE_LEN)), 169 | "errors": bytearray_to_hex(padding(err, CODE_LEN)), 170 | "commitment": bytearray_to_hex(padding(feat_xor_ecc, CODE_LEN)), 171 | "message": bytearray_to_hex(message) 172 | } 173 | input_json = json.dumps(input_data) 174 | with open(input_path, "w") as f: 175 | f.write(input_json) 176 | 177 | proof_path = os.path.join(session_dir, "proof.hex") 178 | public_input_path = os.path.join(session_dir, "public.json") 179 | try: 180 | evm_prove( 181 | params_dir="../build/params", 182 | app_circuit_config="../eth_voice_recovery/configs/test1_circuit.config", 183 | agg_circuit_config="../eth_voice_recovery/configs/agg_circuit.config", 184 | pk_dir="../build/pks", 185 | input_path=input_path, 186 | proof_path=proof_path, 187 | public_input_path=public_input_path 188 | ) 189 | except: 190 | return False, b'', session_id 191 | # 余裕があればpublic inputをアサートする 192 | 193 | with open(proof_path, 'r') as f: 194 | # hex 195 | proof_bin = hex_to_bytearray(f.read()) 196 | return True, proof_bin, session_id 197 | 198 | # shutil.rmtree(session_dir) 199 | 200 | # # 長さが256ビットの特徴ベクトルを生成 201 | # vec = np.random.randint(0, 2, 256) 202 | # print(vec) 203 | # bin_vec = bytearray(np.packbits(vec)) 204 | # print("bin_vec is ",bytearray_to_hex(bin_vec)) 205 | # bin_vec = padding(bin_vec, 64) 206 | # print("padding bin_vec is ",bin_vec) 207 | # h_w, c = fuzzy_commitment(bin_vec) 208 | # print ("h_w is ",h_w), print("c is ",c) 209 | 210 | 211 | def main(): 212 | generate_proof( 213 | hex_to_bytearray( 214 | "0xddeb3779c4515c05a06495c3ec2403655d9b784d7502a064ebf3c093474b23ce"), 215 | hex_to_bytearray("0x00000004410000000010a16008004002028000300200000100025001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), 216 | hex_to_bytearray("0x7d7fbf998b8e8d29756bcea0755e51a2e7208e3d9df90aa741450ced38cddbfcc8a96ccce1daa8bff47472d07907a612a761b2a1ec37d25407a6952020e413ee12f40ca7d81cb0dcab51591c3495c4b63134518969ec7c69b6469f0ab20e3d82ceffe4eda9ed71550f0ac020061eb7907cfd6eb54849fa5c7fc882764d7f815c08f5fee653a47402"), 217 | hex_to_bytearray("0x9a8f43") 218 | ) 219 | 220 | 221 | if __name__ == '__main__': 222 | main() 223 | -------------------------------------------------------------------------------- /eth_voice_recovery/src/fuzzy.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use crate::poseidon_circuit::*; 4 | use halo2_base::halo2_proofs::circuit::{Region, Value}; 5 | use halo2_base::halo2_proofs::halo2curves::bn256::Fr; 6 | use halo2_base::halo2_proofs::halo2curves::FieldExt; 7 | use halo2_base::halo2_proofs::plonk::ConstraintSystem; 8 | use halo2_base::halo2_proofs::{circuit::Layouter, plonk::Error}; 9 | use halo2_base::{ 10 | gates::{flex_gate::FlexGateConfig, range::RangeConfig, GateInstructions, RangeInstructions}, 11 | AssignedValue, Context, 12 | }; 13 | use halo2_base::{ContextParams, QuantumCell}; 14 | // use halo2_dynamic_sha256::{ 15 | // Field, Sha256AssignedRows, Sha256CompressionConfig, Sha256DynamicConfig, 16 | // }; 17 | use itertools::Itertools; 18 | 19 | #[derive(Debug, Clone)] 20 | pub struct FuzzyCommitmentResult<'a> { 21 | pub(crate) assigned_commitment: Vec>, 22 | pub(crate) assigned_feature_hash: AssignedValue<'a, Fr>, 23 | pub(crate) assigned_word: Vec>, 24 | pub(crate) word_value: Vec, 25 | } 26 | 27 | #[derive(Debug, Clone)] 28 | pub struct FuzzyCommitmentConfig { 29 | range_config: RangeConfig, 30 | error_threshold: u64, 31 | pub(crate) word_size: usize, 32 | _f: PhantomData, 33 | } 34 | 35 | impl FuzzyCommitmentConfig { 36 | pub fn configure( 37 | meta: &mut ConstraintSystem, 38 | range_config: RangeConfig, 39 | error_threshold: u64, 40 | word_size: usize, 41 | ) -> Self { 42 | Self { 43 | range_config, 44 | error_threshold, 45 | word_size, 46 | _f: PhantomData, 47 | } 48 | } 49 | 50 | pub fn recover_and_hash<'v: 'a, 'a>( 51 | &self, 52 | ctx: &mut Context<'v, Fr>, 53 | poseidon: &'a PoseidonChipBn254_8_58<'v, Fr>, 54 | features: &[u8], 55 | errors: &[u8], 56 | commitment: &[u8], 57 | ) -> Result, Error> { 58 | assert_eq!(features.len(), self.word_size); 59 | assert_eq!(errors.len(), self.word_size); 60 | assert_eq!(commitment.len(), self.word_size); 61 | let range = self.range(); 62 | let gate = self.gate(); 63 | let assigned_features = features 64 | .into_iter() 65 | .map(|val| gate.load_witness(ctx, Value::known(Fr::from(*val as u64)))) 66 | .collect_vec(); 67 | let assigned_errors = errors 68 | .into_iter() 69 | .map(|val| gate.load_witness(ctx, Value::known(Fr::from(*val as u64)))) 70 | .collect_vec(); 71 | let assigned_commitment = commitment 72 | .into_iter() 73 | .map(|val| gate.load_witness(ctx, Value::known(Fr::from(*val as u64)))) 74 | .collect_vec(); 75 | let features_bits = self.bytes2bits(ctx, &assigned_features); 76 | let errors_bits = self.bytes2bits(ctx, &assigned_errors); 77 | let commitment_bits = self.bytes2bits(ctx, &assigned_commitment); 78 | 79 | // 1. word errored = features XOR commitment 80 | let w_e = features_bits 81 | .iter() 82 | .zip(commitment_bits.iter()) 83 | .map(|(f, c)| self.xor(ctx, &f, &c)) 84 | .collect_vec(); 85 | // 2. word = word errored XOR error 86 | let word_bits = w_e 87 | .iter() 88 | .zip(errors_bits.iter()) 89 | .map(|(y, e)| self.xor(ctx, &y, &e)) 90 | .collect_vec(); 91 | let word_bytes = word_bits 92 | .chunks(8) 93 | .map(|bits| { 94 | let mut byte = gate.load_zero(ctx); 95 | for (idx, bit) in bits.into_iter().enumerate() { 96 | byte = gate.mul_add( 97 | ctx, 98 | QuantumCell::Existing(&bit), 99 | QuantumCell::Constant(Fr::from(1u64 << idx)), 100 | QuantumCell::Existing(&byte), 101 | ); 102 | } 103 | byte 104 | }) 105 | .collect_vec(); 106 | // 3. |e| < t 107 | let mut e_weight = gate.load_zero(ctx); 108 | for (idx, bit) in errors_bits.iter().enumerate() { 109 | e_weight = gate.add( 110 | ctx, 111 | QuantumCell::Existing(&e_weight), 112 | QuantumCell::Existing(&bit), 113 | ); 114 | } 115 | range.check_less_than_safe(ctx, &e_weight, self.error_threshold); 116 | let word_values = features 117 | .iter() 118 | .zip(errors.iter()) 119 | .zip(commitment.iter()) 120 | .map(|((f, e), c)| f ^ e ^ c) 121 | .collect_vec(); 122 | let assigned_feature_hash = 123 | poseidon.hash_elements(ctx, self.gate(), &word_bytes)?.0[0].clone(); 124 | 125 | Ok(FuzzyCommitmentResult { 126 | assigned_commitment, 127 | assigned_feature_hash, 128 | assigned_word: word_bytes, 129 | word_value: word_values, 130 | }) 131 | } 132 | 133 | pub fn range(&self) -> &RangeConfig { 134 | &self.range_config 135 | } 136 | 137 | pub fn gate(&self) -> &FlexGateConfig { 138 | self.range().gate() 139 | } 140 | 141 | pub fn new_context<'a, 'b>(&'b self, region: Region<'a, Fr>) -> Context<'a, Fr> { 142 | Context::new( 143 | region, 144 | ContextParams { 145 | max_rows: self.gate().max_rows, 146 | num_context_ids: 1, 147 | fixed_columns: self.gate().constants.clone(), 148 | }, 149 | ) 150 | } 151 | pub fn finalize(&self, ctx: &mut Context) { 152 | self.range().finalize(ctx); 153 | } 154 | 155 | fn bytes2bits<'v: 'a, 'a>( 156 | &self, 157 | ctx: &mut Context<'v, Fr>, 158 | assigned_bytes: &[AssignedValue<'a, Fr>], 159 | ) -> Vec> { 160 | let gate = self.gate(); 161 | let bits = assigned_bytes 162 | .into_iter() 163 | .flat_map(|byte| gate.num_to_bits(ctx, byte, 8)) 164 | .collect_vec(); 165 | assert_eq!(assigned_bytes.len() * 8, bits.len()); 166 | bits 167 | } 168 | 169 | fn xor<'v: 'a, 'a>( 170 | &self, 171 | ctx: &mut Context<'v, Fr>, 172 | a: &AssignedValue<'a, Fr>, 173 | b: &AssignedValue<'a, Fr>, 174 | ) -> AssignedValue<'a, Fr> { 175 | let gate = self.gate(); 176 | let a_not = gate.not(ctx, QuantumCell::Existing(&a)); 177 | let b_not = gate.not(ctx, QuantumCell::Existing(&b)); 178 | let ab_not = gate.and( 179 | ctx, 180 | QuantumCell::Existing(&a), 181 | QuantumCell::Existing(&b_not), 182 | ); 183 | let ba_not = gate.and( 184 | ctx, 185 | QuantumCell::Existing(&b), 186 | QuantumCell::Existing(&a_not), 187 | ); 188 | gate.or( 189 | ctx, 190 | QuantumCell::Existing(&ab_not), 191 | QuantumCell::Existing(&ba_not), 192 | ) 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /eth_voice_recovery/src/bin/voice_recovery.rs: -------------------------------------------------------------------------------- 1 | use clap::{Parser, Subcommand}; 2 | use eth_voice_recovery::*; 3 | 4 | #[derive(Parser, Debug, Clone)] 5 | #[command(author, version, about, long_about = None)] 6 | struct Cli { 7 | #[command(subcommand)] 8 | pub command: Commands, 9 | } 10 | 11 | #[derive(Debug, Subcommand, Clone)] 12 | enum Commands { 13 | /// Generate a setup parameter (not for production). 14 | GenParams { 15 | /// k parameter 16 | #[arg(long)] 17 | k: u32, 18 | /// setup parameter path 19 | #[arg(short, long)] 20 | params_path: String, 21 | }, 22 | /// Generate a proving key and a verifying key. 23 | GenKeys { 24 | /// setup parameter file 25 | #[arg(short, long, default_value = "./build/params")] 26 | params_dir: String, 27 | /// circuit configure file 28 | #[arg( 29 | short, 30 | long, 31 | default_value = "./eth_voice_recovery/configs/test1_circuit.config" 32 | )] 33 | app_circuit_config: String, 34 | #[arg( 35 | short, 36 | long, 37 | default_value = "./eth_voice_recovery/configs/agg_circuit.config" 38 | )] 39 | agg_circuit_config: String, 40 | /// proving key file path 41 | #[arg(long, default_value = "./build/pks")] 42 | pk_dir: String, 43 | /// verifying key file path 44 | #[arg(long, default_value = "./build/app.vk")] 45 | vk_path: String, 46 | }, 47 | Prove { 48 | /// setup parameter file 49 | #[arg(short, long, default_value = "./build/params")] 50 | params_dir: String, 51 | /// circuit configure file 52 | #[arg( 53 | short, 54 | long, 55 | default_value = "./eth_voice_recovery/configs/test1_circuit.config" 56 | )] 57 | app_circuit_config: String, 58 | #[arg( 59 | short, 60 | long, 61 | default_value = "./eth_voice_recovery/configs/agg_circuit.config" 62 | )] 63 | agg_circuit_config: String, 64 | /// proving key file path 65 | #[arg(long, default_value = "./build/pks")] 66 | pk_dir: String, 67 | /// input file path 68 | #[arg(long, default_value = "./build/input.json")] 69 | input_path: String, 70 | /// proof file path 71 | #[arg(long, default_value = "./build/proof.bin")] 72 | proof_path: String, 73 | /// public input file path 74 | #[arg(long, default_value = "./build/public_input.json")] 75 | public_input_path: String, 76 | }, 77 | EvmProve { 78 | /// setup parameter file 79 | #[arg(short, long, default_value = "./build/params")] 80 | params_dir: String, 81 | /// circuit configure file 82 | #[arg( 83 | short, 84 | long, 85 | default_value = "./eth_voice_recovery/configs/test1_circuit.config" 86 | )] 87 | app_circuit_config: String, 88 | #[arg( 89 | short, 90 | long, 91 | default_value = "./eth_voice_recovery/configs/agg_circuit.config" 92 | )] 93 | agg_circuit_config: String, 94 | /// proving key file path 95 | #[arg(long, default_value = "./build/pks")] 96 | pk_dir: String, 97 | /// input file path 98 | #[arg(long, default_value = "./build/input.json")] 99 | input_path: String, 100 | /// proof file path 101 | #[arg(long, default_value = "./build/evm_proof.hex")] 102 | proof_path: String, 103 | /// public input file path 104 | #[arg(long, default_value = "./build/evm_public_input.json")] 105 | public_input_path: String, 106 | }, 107 | Verify { 108 | /// setup parameter file 109 | #[arg(short, long, default_value = "./build/params")] 110 | params_dir: String, 111 | /// circuit configure file 112 | #[arg( 113 | short, 114 | long, 115 | default_value = "./eth_voice_recovery/configs/test1_circuit.config" 116 | )] 117 | app_circuit_config: String, 118 | #[arg( 119 | short, 120 | long, 121 | default_value = "./eth_voice_recovery/configs/agg_circuit.config" 122 | )] 123 | agg_circuit_config: String, 124 | /// verifying key file path 125 | #[arg(long, default_value = "./build/app.vk")] 126 | vk_path: String, 127 | /// public input file path 128 | #[arg(long, default_value = "./build/public_input.json")] 129 | public_input_path: String, 130 | /// proof file path 131 | #[arg(long, default_value = "./build/proof.bin")] 132 | proof_path: String, 133 | }, 134 | GenEvmVerifier { 135 | /// setup parameter file 136 | #[arg(short, long, default_value = "./build/params")] 137 | params_dir: String, 138 | /// circuit configure file 139 | #[arg( 140 | short, 141 | long, 142 | default_value = "./eth_voice_recovery/configs/test1_circuit.config" 143 | )] 144 | app_circuit_config: String, 145 | #[arg( 146 | short, 147 | long, 148 | default_value = "./eth_voice_recovery/configs/agg_circuit.config" 149 | )] 150 | agg_circuit_config: String, 151 | /// verifying key file path 152 | #[arg(long, default_value = "./build/app.vk")] 153 | vk_path: String, 154 | /// verifier code path 155 | #[arg(long, default_value = "./build/Verifier.sol")] 156 | code_path: String, 157 | }, 158 | } 159 | 160 | fn main() { 161 | let cli = Cli::parse(); 162 | match cli.command { 163 | Commands::GenParams { k, params_path } => gen_params(¶ms_path, k).unwrap(), 164 | Commands::GenKeys { 165 | params_dir, 166 | app_circuit_config, 167 | agg_circuit_config, 168 | pk_dir, 169 | vk_path, 170 | } => gen_keys( 171 | ¶ms_dir, 172 | &app_circuit_config, 173 | &agg_circuit_config, 174 | &pk_dir, 175 | &vk_path, 176 | ) 177 | .unwrap(), 178 | Commands::Prove { 179 | params_dir, 180 | app_circuit_config, 181 | agg_circuit_config, 182 | pk_dir, 183 | input_path, 184 | proof_path, 185 | public_input_path, 186 | } => prove( 187 | ¶ms_dir, 188 | &app_circuit_config, 189 | &agg_circuit_config, 190 | &pk_dir, 191 | &input_path, 192 | &proof_path, 193 | &public_input_path, 194 | ) 195 | .unwrap(), 196 | Commands::EvmProve { 197 | params_dir, 198 | app_circuit_config, 199 | agg_circuit_config, 200 | pk_dir, 201 | input_path, 202 | proof_path, 203 | public_input_path, 204 | } => evm_prove( 205 | ¶ms_dir, 206 | &app_circuit_config, 207 | &agg_circuit_config, 208 | &pk_dir, 209 | &input_path, 210 | &proof_path, 211 | &public_input_path, 212 | ) 213 | .unwrap(), 214 | Commands::Verify { 215 | params_dir, 216 | app_circuit_config, 217 | agg_circuit_config, 218 | vk_path, 219 | public_input_path, 220 | proof_path, 221 | } => verify( 222 | ¶ms_dir, 223 | &app_circuit_config, 224 | &agg_circuit_config, 225 | &vk_path, 226 | &public_input_path, 227 | &proof_path, 228 | ) 229 | .unwrap(), 230 | Commands::GenEvmVerifier { 231 | params_dir, 232 | app_circuit_config, 233 | agg_circuit_config, 234 | vk_path, 235 | code_path, 236 | } => gen_evm_verifier( 237 | ¶ms_dir, 238 | &app_circuit_config, 239 | &agg_circuit_config, 240 | &vk_path, 241 | &code_path, 242 | ) 243 | .unwrap(), 244 | } 245 | } 246 | -------------------------------------------------------------------------------- /app/src/App.js: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useState } from "react"; 2 | import urlJoin from "url-join"; 3 | import "./App.css"; 4 | import RecordButton from "./RecordButton"; 5 | import VoiceKeyRecovery from "./contracts/VoiceKeyRecover.sol/VoiceKeyRecover.json"; 6 | import { Typography, Box, Card, CardContent, Modal, TextField } from "@mui/material"; 7 | import { ethers } from "ethers"; 8 | import RegisterStatus from "./RegisterStatus"; 9 | import Countdown from "./countdown"; 10 | import { countOnes } from "./util"; 11 | 12 | const contractAddress = "0x5FbDB2315678afecb367f032d93F642f64180aa3"; 13 | const apiUrl = "http://127.0.0.1:5000"; 14 | const threshold = 64; 15 | 16 | function App() { 17 | const [vk, setVk] = useState(null); 18 | const [sender, setSender] = useState(null); 19 | const [isRecording, setIsRecording] = useState(false); 20 | const [hashEcc, setHashRcc] = useState(null); 21 | const [codeErrorCount, setCodeErrorCount] = useState(null); 22 | const [featXorEcc, setFeatXorEcc] = useState(null); 23 | const [recoveredHashEcc, setRecoveredHashEcc] = useState(null); 24 | const [proof, setProof] = useState(null); 25 | const [hashFeatXorEcc, setHashFeatXorEcc] = useState(null); 26 | 27 | // 0: commitment生成中, 1: commitment生成失敗, 2: commitment生成完了,3: commitment送信中, 4: commitment送信失敗 ,5: commitment送信完了, 28 | console.log(isRecording); 29 | const [registerStatus, setRegisterStatus] = useState(null); 30 | console.log("hashEcc: ", hashEcc, "featXorEcc: ", featXorEcc); 31 | 32 | const [registered, setRegistered] = useState(null); 33 | console.log("sender: ", sender); 34 | 35 | const checkRegistered = React.useCallback( 36 | async (vk, sender) => { 37 | if (vk) { 38 | const r = await vk.isRegistered(sender); 39 | console.log("registered: ", r); 40 | if (r) { 41 | if (registered === false) { 42 | alert("Your key is registered!"); 43 | } 44 | const commitmentData = await vk.voiceDataOfWallet(sender); 45 | // console.log("commitmentData: ", commitmentData); 46 | setHashRcc(commitmentData.featureHash); 47 | setFeatXorEcc(commitmentData.commitment); 48 | setHashFeatXorEcc(commitmentData.hashCommitment); 49 | } 50 | setRegistered(r); 51 | } else { 52 | console.error("VoiceKeyRecover contract not initialized yet"); 53 | } 54 | }, 55 | [registered] 56 | ); 57 | 58 | const CardContentStyle = { 59 | display: "flex", 60 | flexDirection: "column", 61 | alignItems: "center", 62 | justifyContent: "center", 63 | maxWidth: "60vw", 64 | padding: 5, 65 | backgroundColor: "#f5f5f5", 66 | }; 67 | 68 | const longTextSx = { 69 | marginBottom: 2, 70 | width: "100%", 71 | whiteSpace: "nowrap", 72 | overflow: "hidden", 73 | textOverflow: "ellipsis", 74 | }; 75 | 76 | useEffect(() => { 77 | (async () => { 78 | try { 79 | let signer; 80 | let provider; 81 | if (window.ethereum == null) { 82 | console.log("MetaMask not installed; using read-only defaults"); 83 | provider = ethers.getDefaultProvider(); 84 | } else { 85 | provider = new ethers.providers.Web3Provider(window.ethereum); 86 | await provider.send("eth_requestAccounts", []); 87 | signer = provider.getSigner(); 88 | const vk = new ethers.Contract( 89 | contractAddress, 90 | VoiceKeyRecovery.abi, 91 | signer 92 | ); 93 | console.log(VoiceKeyRecovery.abi); 94 | await checkRegistered(vk, sender); 95 | 96 | setVk(vk); 97 | console.log("initialized!"); 98 | setIsRecording(false); 99 | 100 | // vk.on("Registered", () => { 101 | // checkRegistered(vk, sender); 102 | // }).catch((err) => { 103 | // throw err; 104 | // }); 105 | 106 | return () => { 107 | vk.removeAllListeners(); 108 | }; 109 | } 110 | } catch (err) { 111 | console.error("Error initializing contract or provider:", err); 112 | } 113 | })(); 114 | }, [checkRegistered, sender]); 115 | 116 | const handleKeyRegisterWav = async (blob) => { 117 | setIsRecording(false); 118 | // Commitment生成開始 119 | setRegisterStatus(0); 120 | const url = urlJoin(apiUrl, "/api/feature-vector"); 121 | const formData = new FormData(); 122 | formData.append("file", blob, "recorded_audio.wav"); 123 | const response = await fetch(url, { method: "POST", body: formData }); 124 | const data = await response.json().catch((err) => { 125 | setRegisterStatus(1); 126 | throw err; 127 | }); 128 | console.log(data); 129 | setRegisterStatus(2); 130 | if (vk) { 131 | setRegisterStatus(3); 132 | await vk 133 | .register( 134 | sender, 135 | data.hash_ecc, 136 | data.hash_feat_xor_ecc, 137 | data.feat_xor_ecc 138 | ) 139 | .catch((err) => { 140 | setRegisterStatus(4); 141 | throw err; 142 | }); 143 | setRegisterStatus(5); 144 | } else { 145 | console.error("VoiceKeyRecover contract not initialized yet"); 146 | } 147 | }; 148 | 149 | const handleKeyRecoverWav = async (blob) => { 150 | setIsRecording(false); 151 | // Commitment生成開始 152 | const url = urlJoin(apiUrl, "/api/gen-proof"); 153 | const formData = new FormData(); 154 | formData.append("file", blob, "recorded_audio.wav"); 155 | 156 | const jsonData = { 157 | hash_ecc: hashEcc, 158 | feat_xor_ecc: featXorEcc, 159 | msg: "0x9a8f43", 160 | }; 161 | formData.append("jsonData", JSON.stringify(jsonData)); 162 | const response = await fetch(url, { method: "POST", body: formData }); 163 | const data = await response.json().catch((err) => { 164 | throw err; 165 | }); 166 | console.log(data); 167 | // console.log(countOnes(data.code_error)); 168 | setCodeErrorCount(countOnes(data.code_error)); 169 | setRecoveredHashEcc(data.recovered_hash_ecc); 170 | setProof(data.proof) 171 | 172 | if (vk) { 173 | await vk 174 | // TODO: not sender, but from form 175 | .recover(sender, data.hash_ecc_msg, data.proof) 176 | .catch((err) => { 177 | throw err; 178 | }); 179 | } else { 180 | console.error("VoiceKeyRecover contract not initialized yet"); 181 | } 182 | }; 183 | 184 | return ( 185 |
195 | {registered ? ( 196 | 197 | 198 | 199 | Recover Your Key 200 | 201 | 206 | {codeErrorCount !== null && ( 207 | <> 208 | error bit count: {codeErrorCount} is less then threshold:{" "} 209 | {threshold} 210 | 211 | )} 212 | 213 | 214 | c =
{featXorEcc} 215 |
216 | 217 | h_W =
{hashEcc} 218 |
219 | {recoveredHashEcc && ( 220 | 225 | recovered h_W =
{recoveredHashEcc} 226 |
227 | )} 228 |
229 | 0x 230 | setSender(e.target.value)} width = "100%" label="Put your (wallet contract) account here" variant="outlined"/> 231 | 232 |
233 |
234 | ) : ( 235 | 236 | 237 | 238 |

You have not Registerd!

239 |

Register Your Key

240 |
241 | 246 | 247 | 0x 248 | setSender(e.target.value)} width = "100%" label="Put your (wallet contract) account here" variant="outlined"/> 249 | 250 |
251 |
252 | )} 253 | 262 | 263 | 264 | 265 | Please read the following text.
266 | (recording...{isRecording && } s) 267 |
268 | 269 | The sun is shining and the birds are singing. 270 | 271 |
272 |
273 |
274 |
275 | ); 276 | } 277 | 278 | export default App; 279 | -------------------------------------------------------------------------------- /eth_voice_recovery/src/poseidon_circuit.rs: -------------------------------------------------------------------------------- 1 | use halo2_base::halo2_proofs::halo2curves::{bn256::Fr, group::ff::PrimeField}; 2 | // Orriginal https://github.com/SoraSuegami/halo2-fri-gadget/blob/main/src/hash/poseidon_bn254/chip.rs 3 | use halo2_base::halo2_proofs::{arithmetic::FieldExt, plonk::*}; 4 | use halo2_base::{ 5 | gates::{flex_gate::FlexGateConfig, GateInstructions}, 6 | AssignedValue, Context, 7 | QuantumCell::{Constant, Existing}, 8 | }; 9 | use poseidon::{Poseidon, SparseMDSMatrix, Spec, State}; 10 | use std::marker::PhantomData; 11 | 12 | pub trait HasherChip { 13 | type Digest<'v>: HasherChipDigest; 14 | 15 | fn new(ctx: &mut Context, main_gate: &FlexGateConfig) -> Self; 16 | 17 | fn hash_elements<'v>( 18 | &'v self, 19 | ctx: &mut Context<'_, F>, 20 | main_chip: &FlexGateConfig, 21 | values: &[AssignedValue<'v, F>], 22 | ) -> Result, Error>; 23 | 24 | fn hash_digests<'v>( 25 | &self, 26 | ctx: &mut Context<'_, F>, 27 | main_chip: &FlexGateConfig, 28 | values: Vec>, 29 | ) -> Result, Error> { 30 | todo!(); 31 | //let elements = values 32 | // .iter() 33 | // .flat_map(|x| x.to_assigned().to_vec()) 34 | // .collect::>(); 35 | //self.hash_elements(ctx, main_chip, &elements) 36 | } 37 | } 38 | 39 | // HASHER CHIP DIGEST 40 | // ========================================================================= 41 | 42 | pub trait HasherChipDigest: Clone { 43 | fn to_assigned(&self) -> &[AssignedValue]; 44 | } 45 | 46 | #[derive(Clone)] 47 | pub struct Digest<'v, F: FieldExt, const N: usize>(pub [AssignedValue<'v, F>; N]); 48 | 49 | impl<'a, F: FieldExt, const N: usize> Digest<'a, F, N> { 50 | pub fn new(values: Vec>) -> Digest<'a, F, N> { 51 | Self(values.try_into().unwrap()) 52 | } 53 | } 54 | 55 | impl HasherChipDigest for Digest<'_, F, N> { 56 | fn to_assigned(&self) -> &[AssignedValue] { 57 | self.0[..].as_ref() 58 | } 59 | } 60 | 61 | #[derive(Debug, Clone)] 62 | pub struct PoseidonChipBn254_8_58<'a, F: FieldExt>(PoseidonChip<'a, F, FlexGateConfig, 4, 3>); 63 | 64 | impl HasherChip for PoseidonChipBn254_8_58<'_, F> { 65 | type Digest<'v> = Digest<'v, F, 1>; 66 | 67 | fn new(ctx: &mut Context, flex_gate: &FlexGateConfig) -> Self { 68 | Self(PoseidonChip::, 4, 3>::new(ctx, flex_gate, 8, 58).unwrap()) 69 | } 70 | 71 | fn hash_elements<'v>( 72 | &'v self, 73 | ctx: &mut Context<'_, F>, 74 | main_chip: &FlexGateConfig, 75 | values: &[AssignedValue<'v, F>], 76 | ) -> Result, Error> { 77 | let value = self.0.hash(ctx, main_chip, &values)?; 78 | Ok(Digest([value; 1])) 79 | } 80 | } 81 | 82 | #[derive(Clone)] 83 | struct PoseidonState<'a, F: FieldExt, A: GateInstructions, const T: usize, const RATE: usize> { 84 | s: [AssignedValue<'a, F>; T], 85 | _marker: PhantomData, 86 | } 87 | 88 | impl<'a, F: FieldExt, A: GateInstructions, const T: usize, const RATE: usize> 89 | PoseidonState<'a, F, A, T, RATE> 90 | { 91 | fn x_power5_with_constant<'v>( 92 | ctx: &mut Context<'_, F>, 93 | chip: &A, 94 | x: &AssignedValue<'v, F>, 95 | constant: &F, 96 | ) -> AssignedValue<'v, F> { 97 | let x2 = chip.mul(ctx, Existing(x), Existing(x)); 98 | let x4 = chip.mul(ctx, Existing(&x2), Existing(&x2)); 99 | chip.mul_add(ctx, Existing(x), Existing(&x4), Constant(*constant)) 100 | } 101 | 102 | fn sbox_full( 103 | &mut self, 104 | ctx: &mut Context<'_, F>, 105 | chip: &A, 106 | constants: &[F; T], 107 | ) -> Result<(), Error> { 108 | for (x, constant) in self.s.iter_mut().zip(constants.iter()) { 109 | *x = Self::x_power5_with_constant(ctx, chip, x, constant); 110 | } 111 | Ok(()) 112 | } 113 | 114 | fn sbox_part(&mut self, ctx: &mut Context<'_, F>, chip: &A, constant: &F) -> Result<(), Error> { 115 | let x = &mut self.s[0]; 116 | *x = Self::x_power5_with_constant(ctx, chip, x, constant); 117 | 118 | Ok(()) 119 | } 120 | 121 | fn absorb_with_pre_constants( 122 | &mut self, 123 | ctx: &mut Context<'_, F>, 124 | chip: &A, 125 | inputs: Vec>, 126 | pre_constants: &[F; T], 127 | ) -> Result<(), Error> { 128 | assert!(inputs.len() < T); 129 | let offset = inputs.len() + 1; 130 | 131 | if let Some(s_0) = self.s.get_mut(offset) { 132 | *s_0 = chip.add(ctx, Existing(&s_0), Constant(F::one())); 133 | } 134 | 135 | for (x, input) in self.s.iter_mut().skip(1).zip(inputs.iter()) { 136 | *x = chip.add(ctx, Existing(x), Existing(input)); 137 | } 138 | 139 | for (i, (x, constant)) in self.s.iter_mut().zip(pre_constants.iter()).enumerate() { 140 | *x = chip.add(ctx, Existing(x), Constant(*constant)); 141 | } 142 | 143 | Ok(()) 144 | } 145 | 146 | fn apply_mds( 147 | &mut self, 148 | ctx: &mut Context<'_, F>, 149 | chip: &A, 150 | mds: &[[F; T]; T], 151 | ) -> Result<(), Error> { 152 | let res = mds 153 | .iter() 154 | .map(|row| { 155 | let sum = chip.inner_product( 156 | ctx, 157 | self.s.iter().map(|a| Existing(a)), 158 | row.iter().map(|c| Constant(*c)), 159 | ); 160 | Ok(sum) 161 | }) 162 | .collect::, Error>>()?; 163 | 164 | self.s = res.try_into().unwrap(); 165 | 166 | Ok(()) 167 | } 168 | 169 | fn apply_sparse_mds( 170 | &mut self, 171 | ctx: &mut Context<'_, F>, 172 | chip: &A, 173 | mds: &SparseMDSMatrix, 174 | ) -> Result<(), Error> { 175 | let sum = chip.inner_product( 176 | ctx, 177 | self.s.iter().map(|a| Existing(a)), 178 | mds.row().iter().map(|c| Constant(*c)), 179 | ); 180 | let mut res = vec![sum]; 181 | 182 | for (e, x) in mds.col_hat().iter().zip(self.s.iter().skip(1)) { 183 | res.push(chip.mul_add(ctx, Existing(&self.s[0]), Constant(*e), Existing(x))); 184 | } 185 | 186 | for (x, new_x) in self.s.iter_mut().zip(res.into_iter()) { 187 | *x = new_x 188 | } 189 | 190 | Ok(()) 191 | } 192 | } 193 | 194 | #[derive(Debug, Clone)] 195 | pub struct PoseidonChip<'a, F: FieldExt, A: GateInstructions, const T: usize, const RATE: usize> 196 | { 197 | init_state: [AssignedValue<'a, F>; T], 198 | spec: Spec, 199 | _marker: PhantomData, 200 | } 201 | 202 | impl, const T: usize, const RATE: usize> 203 | PoseidonChip<'_, F, A, T, RATE> 204 | { 205 | pub fn new(ctx: &mut Context<'_, F>, chip: &A, r_f: usize, r_p: usize) -> Result { 206 | let init_state = State::::default() 207 | .words() 208 | .into_iter() 209 | .map(|x| { 210 | Ok(chip 211 | .assign_region(ctx, vec![Constant(x)], vec![]) 212 | .pop() 213 | .unwrap()) 214 | }) 215 | .collect::>, Error>>()?; 216 | Ok(Self { 217 | spec: Spec::new(r_f, r_p), 218 | init_state: init_state.clone().try_into().unwrap(), 219 | _marker: PhantomData, 220 | }) 221 | } 222 | 223 | pub fn hash<'v>( 224 | &self, 225 | ctx: &mut Context<'_, F>, 226 | chip: &A, 227 | elements: &[AssignedValue<'v, F>], 228 | ) -> Result, Error> { 229 | let init_state = State::::default() 230 | .words() 231 | .into_iter() 232 | .map(|x| { 233 | Ok(chip 234 | .assign_region(ctx, vec![Constant(x)], vec![]) 235 | .pop() 236 | .unwrap()) 237 | }) 238 | .collect::>, Error>>()?; 239 | let mut state = PoseidonState { 240 | s: init_state.try_into().unwrap(), 241 | _marker: PhantomData, 242 | }; 243 | let mut absorbing = vec![]; 244 | 245 | // Update 246 | absorbing.extend_from_slice(elements); 247 | 248 | // Squeeze 249 | let mut input_elements = vec![]; 250 | input_elements.append(&mut absorbing); 251 | let mut padding_offset = 0; 252 | for chunk in input_elements.chunks(RATE) { 253 | padding_offset = RATE - chunk.len(); 254 | self.permutation(ctx, chip, &mut state, chunk.to_vec())?; 255 | } 256 | if padding_offset == 0 { 257 | self.permutation(ctx, chip, &mut state, vec![])?; 258 | } 259 | let out = state.s[1].clone(); 260 | 261 | Ok(out) 262 | } 263 | 264 | fn permutation<'v>( 265 | &self, 266 | ctx: &mut Context<'_, F>, 267 | chip: &A, 268 | state: &mut PoseidonState<'v, F, A, T, RATE>, 269 | inputs: Vec>, 270 | ) -> Result<(), Error> { 271 | let r_f = self.spec.r_f() / 2; 272 | let mds = &self.spec.mds_matrices().mds().rows(); 273 | 274 | let constants = &self.spec.constants().start(); 275 | state.absorb_with_pre_constants(ctx, chip, inputs, &constants[0])?; 276 | for constants in constants.iter().skip(1).take(r_f - 1) { 277 | state.sbox_full(ctx, chip, constants)?; 278 | state.apply_mds(ctx, chip, mds)?; 279 | } 280 | 281 | let pre_sparse_mds = &self.spec.mds_matrices().pre_sparse_mds().rows(); 282 | state.sbox_full(ctx, chip, constants.last().unwrap())?; 283 | state.apply_mds(ctx, chip, pre_sparse_mds)?; 284 | 285 | let sparse_matrices = &self.spec.mds_matrices().sparse_matrices(); 286 | let constants = &self.spec.constants().partial(); 287 | for (constant, sparse_mds) in constants.iter().zip(sparse_matrices.iter()) { 288 | state.sbox_part(ctx, chip, constant)?; 289 | state.apply_sparse_mds(ctx, chip, sparse_mds)?; 290 | } 291 | 292 | let constants = &self.spec.constants().end(); 293 | for constants in constants.iter() { 294 | state.sbox_full(ctx, chip, constants)?; 295 | state.apply_mds(ctx, chip, mds)?; 296 | } 297 | state.sbox_full(ctx, chip, &[F::zero(); T])?; 298 | state.apply_mds(ctx, chip, mds)?; 299 | 300 | Ok(()) 301 | } 302 | } 303 | 304 | type PoseidonBn254_4_3 = Poseidon; 305 | pub fn poseidon_hash(bytes: &[u8]) -> Fr { 306 | let inputs = bytes 307 | .into_iter() 308 | .map(|byte| Fr::from(*byte as u64)) 309 | .collect::>(); 310 | let mut hasher = PoseidonBn254_4_3::new(8, 58); 311 | hasher.update(&inputs[..]); 312 | hasher.squeeze() 313 | } 314 | -------------------------------------------------------------------------------- /eth_voice_recovery/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod fuzzy; 2 | use std::fs::File; 3 | use std::marker::PhantomData; 4 | pub mod helper; 5 | pub mod poseidon_circuit; 6 | use crate::fuzzy::*; 7 | use crate::poseidon_circuit::*; 8 | use halo2_base::halo2_proofs::circuit::{AssignedCell, Cell, Region, SimpleFloorPlanner, Value}; 9 | use halo2_base::halo2_proofs::halo2curves::bn256::Fr; 10 | use halo2_base::halo2_proofs::halo2curves::FieldExt; 11 | use halo2_base::halo2_proofs::plonk::{Circuit, Column, ConstraintSystem, Instance}; 12 | use halo2_base::halo2_proofs::{circuit::Layouter, plonk::Error}; 13 | use halo2_base::{ 14 | gates::{flex_gate::FlexGateConfig, range::RangeConfig, GateInstructions}, 15 | utils::PrimeField, 16 | Context, 17 | }; 18 | use halo2_base::{ 19 | gates::{range::RangeStrategy::Vertical, RangeInstructions}, 20 | ContextParams, SKIP_FIRST_PASS, 21 | }; 22 | use halo2_base::{AssignedValue, QuantumCell}; 23 | // use halo2_dynamic_sha256::{ 24 | // AssignedHashResult, Field, Sha256CompressionConfig, Sha256DynamicConfig, 25 | // }; 26 | pub use crate::helper::*; 27 | use itertools::Itertools; 28 | use serde_json; 29 | // use sha2::{Digest, Sha256}; 30 | use poseidon::Poseidon; 31 | use snark_verifier_sdk::CircuitExt; 32 | 33 | #[derive(Debug, Clone)] 34 | pub struct VoiceRecoverResult<'a> { 35 | pub assigned_commitment: Vec>, 36 | pub assigned_commitment_hash: AssignedValue<'a, Fr>, 37 | pub assigned_feature_hash: AssignedValue<'a, Fr>, 38 | pub assigned_message: Vec>, 39 | pub assigned_message_hash: AssignedValue<'a, Fr>, 40 | } 41 | 42 | #[derive(Debug, Clone)] 43 | pub struct VoiceRecoverConfig { 44 | fuzzy_commitment: FuzzyCommitmentConfig, 45 | max_msg_size: usize, 46 | } 47 | 48 | impl VoiceRecoverConfig { 49 | pub fn configure( 50 | meta: &mut ConstraintSystem, 51 | word_size: usize, 52 | max_msg_size: usize, 53 | range_config: RangeConfig, 54 | error_threshold: u64, 55 | ) -> Self { 56 | let fuzzy_commitment = FuzzyCommitmentConfig::configure( 57 | meta, 58 | range_config.clone(), 59 | error_threshold, 60 | word_size, 61 | ); 62 | // let sha256_comp_configs = (0..num_sha2_compression_per_column) 63 | // .map(|_| Sha256CompressionConfig::configure(meta)) 64 | // .collect(); 65 | // let max_size = word_size + max_msg_size; 66 | // let max_size = max_size + (64 - (max_size % 64)); 67 | // let msg_hash_sha256_config = 68 | // Sha256DynamicConfig::construct(sha256_comp_configs, max_size, range_config); 69 | Self { 70 | fuzzy_commitment, 71 | max_msg_size, 72 | } 73 | } 74 | 75 | pub fn auth_and_sign<'v: 'a, 'a>( 76 | &self, 77 | ctx: &mut Context<'v, Fr>, 78 | poseidon: &'a PoseidonChipBn254_8_58<'v, Fr>, 79 | features: &[u8], 80 | errors: &[u8], 81 | commitment: &[u8], 82 | message: &[u8], 83 | ) -> Result, Error> { 84 | let fuzzy_result = self 85 | .fuzzy_commitment 86 | .recover_and_hash(ctx, poseidon, features, errors, commitment)?; 87 | let mut message_ext = message.to_vec(); 88 | message_ext.append(&mut vec![0; self.max_msg_size - message.len()]); 89 | // let msg_hash_input_bytes = vec![fuzzy_result.word_value, message.to_vec()].concat(); 90 | // let msg_hash_result = self 91 | // .msg_hash_sha256_config 92 | // .digest(ctx, &msg_hash_input_bytes)?; 93 | let gate = self.gate(); 94 | let assigned_message = message_ext 95 | .into_iter() 96 | .map(|val| gate.load_witness(ctx, Value::known(Fr::from(val as u64)))) 97 | .collect_vec(); 98 | let hash_input = vec![fuzzy_result.assigned_word, assigned_message.clone()].concat(); 99 | let assigned_message_hash = poseidon.hash_elements(ctx, &gate, &hash_input)?.0[0].clone(); 100 | let assigned_commitment_hash = poseidon 101 | .hash_elements(ctx, &gate, &fuzzy_result.assigned_commitment)? 102 | .0[0] 103 | .clone(); 104 | // for idx in 0..self.fuzzy_commitment.word_size { 105 | // gate.assert_equal( 106 | // ctx, 107 | // QuantumCell::Existing(&msg_hash_result.input_bytes[idx]), 108 | // QuantumCell::Existing(&fuzzy_result.assigned_word[idx]), 109 | // ); 110 | // } 111 | // let range = self.range(); 112 | // let msg_len = gate.sub( 113 | // ctx, 114 | // QuantumCell::Existing(&msg_hash_result.input_len), 115 | // QuantumCell::Existing(&fuzzy_result.assigned_word_len), 116 | // ); 117 | // for idx in 0..self.max_msg_size { 118 | // let is_enable = range.is_less_than( 119 | // ctx, 120 | // QuantumCell::Constant(F::from(idx as u64)), 121 | // QuantumCell::Existing(&msg_len), 122 | // 64, 123 | // ); 124 | // let enabled_byte0 = gate.mul( 125 | // ctx, 126 | // QuantumCell::Existing(&is_enable), 127 | // QuantumCell::Existing( 128 | // &msg_hash_result.input_bytes[self.fuzzy_commitment.word_size + idx], 129 | // ), 130 | // ); 131 | // let enabled_byte1 = gate.mul( 132 | // ctx, 133 | // QuantumCell::Existing(&is_enable), 134 | // QuantumCell::Existing(&assigned_message[idx]), 135 | // ); 136 | // gate.assert_equal( 137 | // ctx, 138 | // QuantumCell::Existing(&enabled_byte0), 139 | // QuantumCell::Existing(&enabled_byte1), 140 | // ); 141 | // } 142 | 143 | Ok(VoiceRecoverResult { 144 | assigned_commitment: fuzzy_result.assigned_commitment, 145 | assigned_feature_hash: fuzzy_result.assigned_feature_hash, 146 | assigned_message, 147 | assigned_message_hash, 148 | assigned_commitment_hash, 149 | }) 150 | } 151 | 152 | pub fn range(&self) -> &RangeConfig { 153 | self.fuzzy_commitment.range() 154 | } 155 | 156 | pub fn gate(&self) -> &FlexGateConfig { 157 | self.range().gate() 158 | } 159 | 160 | pub fn new_context<'a, 'b>(&'b self, region: Region<'a, Fr>) -> Context<'a, Fr> { 161 | self.fuzzy_commitment.new_context(region) 162 | } 163 | 164 | pub fn finalize(&self, ctx: &mut Context) { 165 | self.fuzzy_commitment.finalize(ctx); 166 | } 167 | } 168 | 169 | pub const VOICE_RECOVER_CONFIG_ENV: &'static str = "EMAIL_VERIFY_CONFIG"; 170 | #[derive(serde::Serialize, serde::Deserialize)] 171 | pub struct DefaultVoiceRecoverConfigParams { 172 | pub degree: u32, 173 | pub num_advice: usize, 174 | pub num_lookup_advice: usize, 175 | pub num_fixed: usize, 176 | pub lookup_bits: usize, 177 | pub error_threshold: u64, 178 | pub word_size: usize, 179 | pub max_msg_size: usize, 180 | } 181 | 182 | #[derive(Debug, Clone)] 183 | pub struct DefaultVoiceRecoverConfig { 184 | inner: VoiceRecoverConfig, 185 | instance: Column, // 1. commitment hash 2. feature hash 3. message hash 4. message 186 | } 187 | 188 | #[derive(Debug, Clone)] 189 | pub struct DefaultVoiceRecoverCircuit { 190 | pub features: Vec, 191 | pub errors: Vec, 192 | pub commitment: Vec, 193 | pub message: Vec, 194 | } 195 | 196 | impl Default for DefaultVoiceRecoverCircuit { 197 | fn default() -> Self { 198 | let params = Self::read_config_params(); 199 | let word_size = params.word_size; 200 | Self { 201 | features: vec![0; word_size], 202 | errors: vec![0; word_size], 203 | commitment: vec![0; word_size], 204 | message: vec![], 205 | } 206 | } 207 | } 208 | 209 | impl Circuit for DefaultVoiceRecoverCircuit { 210 | type Config = DefaultVoiceRecoverConfig; 211 | type FloorPlanner = SimpleFloorPlanner; 212 | 213 | fn without_witnesses(&self) -> Self { 214 | Self::default() 215 | } 216 | 217 | fn configure(meta: &mut ConstraintSystem) -> Self::Config { 218 | let params = Self::read_config_params(); 219 | let range_config = RangeConfig::configure( 220 | meta, 221 | Vertical, 222 | &[params.num_advice], 223 | &[params.num_lookup_advice], 224 | params.num_fixed, 225 | params.lookup_bits, 226 | 0, 227 | params.degree as usize, 228 | ); 229 | let inner = VoiceRecoverConfig::configure( 230 | meta, 231 | params.word_size, 232 | params.max_msg_size, 233 | range_config, 234 | params.error_threshold, 235 | ); 236 | let instance = meta.instance_column(); 237 | meta.enable_equality(instance); 238 | Self::Config { inner, instance } 239 | } 240 | 241 | fn synthesize( 242 | &self, 243 | config: Self::Config, 244 | mut layouter: impl Layouter, 245 | ) -> Result<(), Error> { 246 | config.inner.range().load_lookup_table(&mut layouter)?; 247 | let mut first_pass = SKIP_FIRST_PASS; 248 | let mut instance_cell = vec![]; 249 | layouter.assign_region( 250 | || "voice recover", 251 | |region| { 252 | if first_pass { 253 | first_pass = false; 254 | return Ok(()); 255 | } 256 | let ctx = &mut config.inner.new_context(region); 257 | let poseidon = PoseidonChipBn254_8_58::new(ctx, config.inner.gate()); 258 | let result = config.inner.auth_and_sign( 259 | ctx, 260 | &poseidon, 261 | &self.features, 262 | &self.errors, 263 | &self.commitment, 264 | &self.message, 265 | )?; 266 | let gate = config.inner.gate(); 267 | let packed_msg = result 268 | .assigned_message 269 | .chunks(16) 270 | .map(|bytes| { 271 | let mut sum = gate.load_zero(ctx); 272 | for idx in 0..16 { 273 | sum = gate.mul_add( 274 | ctx, 275 | QuantumCell::Existing(&bytes[idx]), 276 | QuantumCell::Constant(Fr::from_u128(1u128 << (8 * idx))), 277 | QuantumCell::Existing(&sum), 278 | ); 279 | } 280 | sum 281 | }) 282 | .collect_vec(); 283 | debug_assert_eq!(16 * packed_msg.len(), result.assigned_message.len()); 284 | config.inner.finalize(ctx); 285 | instance_cell.push(result.assigned_commitment_hash.cell()); 286 | // result 287 | // .assigned_feature_hash 288 | // .value() 289 | // .map(|v| println!("assigned feature hash {:?}", v)); 290 | instance_cell.push(result.assigned_feature_hash.cell()); 291 | // result 292 | // .assigned_message_hash 293 | // .value() 294 | // .map(|v| println!("assigned message hash {:?}", v)); 295 | instance_cell.push(result.assigned_message_hash.cell()); 296 | instance_cell.append(&mut packed_msg.into_iter().map(|v| v.cell()).collect_vec()); 297 | 298 | Ok(()) 299 | }, 300 | )?; 301 | for (idx, cell) in instance_cell.into_iter().enumerate() { 302 | layouter.constrain_instance(cell, config.instance, idx)?; 303 | } 304 | Ok(()) 305 | } 306 | } 307 | 308 | impl CircuitExt for DefaultVoiceRecoverCircuit { 309 | fn num_instance(&self) -> Vec { 310 | let params = Self::read_config_params(); 311 | vec![3 + params.max_msg_size / 16] 312 | } 313 | 314 | fn instances(&self) -> Vec> { 315 | let mut instances = vec![]; 316 | let commitment_hash = poseidon_hash(&self.commitment); 317 | // println!( 318 | // "commitment hash {}", 319 | // hex::encode(&commitment_hash.to_bytes()) 320 | // ); 321 | instances.push(commitment_hash); 322 | let word = self 323 | .features 324 | .iter() 325 | .zip(self.commitment.iter()) 326 | .zip(self.errors.iter()) 327 | .map(|((f, c), e)| f ^ c ^ e) 328 | .collect_vec(); 329 | let feature_hash = poseidon_hash(&word); 330 | // println!("feature hash {}", hex::encode(&feature_hash.to_bytes())); 331 | instances.push(feature_hash); 332 | let mut message_ext = self.message.to_vec(); 333 | let config_params = Self::read_config_params(); 334 | message_ext.append(&mut vec![ 335 | 0; 336 | config_params.max_msg_size - self.message.len() 337 | ]); 338 | let mut packed_message = message_ext 339 | .chunks(16) 340 | .map(|bytes| Fr::from_u128(u128::from_le_bytes(bytes.try_into().unwrap()))) 341 | .collect_vec(); 342 | // let mut message_public = message_ext 343 | // .iter() 344 | // .map(|byte| Fr::from(*byte as u64)) 345 | // .collect_vec(); 346 | let message_hash = poseidon_hash(&[word.to_vec(), message_ext].concat()); 347 | // println!("message hash {}", hex::encode(&message_hash.to_bytes())); 348 | instances.push(message_hash); 349 | instances.append(&mut packed_message); 350 | vec![instances] 351 | } 352 | } 353 | 354 | impl DefaultVoiceRecoverCircuit { 355 | pub fn read_config_params() -> DefaultVoiceRecoverConfigParams { 356 | let path = std::env::var(VOICE_RECOVER_CONFIG_ENV) 357 | .expect("You should set the configure file path to VOICE_RECOVER_CONFIG_ENV."); 358 | let params: DefaultVoiceRecoverConfigParams = serde_json::from_reader( 359 | File::open(path.as_str()).expect(&format!("{} does not exist.", path)), 360 | ) 361 | .expect("File is found but invalid."); 362 | params 363 | } 364 | } 365 | 366 | #[cfg(test)] 367 | mod test { 368 | use super::*; 369 | use halo2_base::halo2_proofs::{dev::MockProver, halo2curves::bn256::Fr}; 370 | use rand::{seq::SliceRandom, thread_rng, Rng}; 371 | use sha2::{Digest, Sha256}; 372 | 373 | #[test] 374 | fn test_correct1() { 375 | temp_env::with_var( 376 | VOICE_RECOVER_CONFIG_ENV, 377 | Some("./configs/test1_circuit.config"), 378 | || { 379 | let vec_len = 140 * 8; 380 | let hamming_weight = 99; 381 | let features_bits = gen_random_vec_bits(vec_len); 382 | let word_bits = gen_random_vec_bits(vec_len); 383 | let error_bits = gen_error_term_bits(hamming_weight, vec_len); 384 | let commitment_bits = features_bits 385 | .iter() 386 | .zip(word_bits.iter()) 387 | .zip(error_bits.iter()) 388 | .map(|((f, w), e)| f ^ w ^ e) 389 | .collect_vec(); 390 | let features_bytes = bool_slice_to_le_bytes(&features_bits); 391 | println!("features_bytes {}", hex::encode(&features_bytes)); 392 | let word_bytes = bool_slice_to_le_bytes(&word_bits); 393 | println!("word_bytes {}", hex::encode(&word_bytes)); 394 | let error_bytes = bool_slice_to_le_bytes(&error_bits); 395 | println!("error_bytes {}", hex::encode(&error_bytes)); 396 | let commitment_bytes = bool_slice_to_le_bytes(&commitment_bits); 397 | println!("commitment_bytes {}", hex::encode(&commitment_bytes)); 398 | let message = b"test".to_vec(); 399 | let circuit = DefaultVoiceRecoverCircuit { 400 | features: features_bytes, 401 | errors: error_bytes, 402 | commitment: commitment_bytes, 403 | message, 404 | }; 405 | let instance = circuit.instances(); 406 | let prover = MockProver::run(20, &circuit, instance).unwrap(); 407 | assert_eq!(prover.verify(), Ok(())); 408 | }, 409 | ); 410 | } 411 | 412 | fn gen_random_vec_bits(vec_len: usize) -> Vec { 413 | let mut rng = rand::thread_rng(); 414 | let mut result = vec![false; vec_len]; 415 | for i in 0..vec_len { 416 | result[i] = rng.gen(); 417 | } 418 | result 419 | } 420 | 421 | fn gen_error_term_bits(hamming_weight: usize, vec_len: usize) -> Vec { 422 | let mut rng = rand::thread_rng(); 423 | let mut result = vec![false; vec_len]; 424 | for i in 0..hamming_weight { 425 | result[i] = true; 426 | } 427 | result.shuffle(&mut rng); 428 | result 429 | } 430 | 431 | fn bool_slice_to_le_bytes(bool_slice: &[bool]) -> Vec { 432 | let mut result = vec![]; 433 | for i in (0..bool_slice.len()).step_by(8) { 434 | let mut byte = 0u8; 435 | for j in 0..8 { 436 | if bool_slice[i + j] { 437 | byte |= 1 << j; 438 | } 439 | } 440 | result.push(byte); 441 | } 442 | result 443 | } 444 | } 445 | -------------------------------------------------------------------------------- /eth_voice_recovery/src/helper.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | DefaultVoiceRecoverCircuit, DefaultVoiceRecoverConfig, DefaultVoiceRecoverConfigParams, 3 | VOICE_RECOVER_CONFIG_ENV, 4 | }; 5 | use clap::{Parser, Subcommand}; 6 | use halo2_base::halo2_proofs::circuit::Value; 7 | use halo2_base::halo2_proofs::dev::MockProver; 8 | use halo2_base::halo2_proofs::halo2curves::bn256::{Bn256, Fq, Fr, G1Affine}; 9 | use halo2_base::halo2_proofs::halo2curves::FieldExt; 10 | use halo2_base::halo2_proofs::plonk::{ 11 | create_proof, keygen_pk, keygen_vk, verify_proof, Error, ProvingKey, VerifyingKey, 12 | }; 13 | use halo2_base::halo2_proofs::poly::commitment::{Params, ParamsProver}; 14 | use halo2_base::halo2_proofs::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; 15 | use halo2_base::halo2_proofs::poly::kzg::multiopen::{ProverGWC, VerifierGWC}; 16 | use halo2_base::halo2_proofs::poly::kzg::strategy::SingleStrategy; 17 | use halo2_base::halo2_proofs::poly::VerificationStrategy; 18 | use halo2_base::halo2_proofs::transcript::{ 19 | Blake2bRead, Blake2bWrite, Challenge255, TranscriptReadBuffer, TranscriptWrite, 20 | TranscriptWriterBuffer, 21 | }; 22 | use halo2_base::halo2_proofs::SerdeFormat; 23 | use hex; 24 | use itertools::Itertools; 25 | use num_bigint::BigUint; 26 | use num_traits::Pow; 27 | use rand::rngs::OsRng; 28 | use rand::thread_rng; 29 | use regex::Regex; 30 | use serde::{Deserialize, Serialize}; 31 | use sha2::{Digest, Sha256}; 32 | use snark_verifier::loader::evm::EvmLoader; 33 | use snark_verifier::pcs::kzg::{Gwc19, Kzg}; 34 | use snark_verifier::system::halo2::transcript::evm::EvmTranscript; 35 | use snark_verifier::system::halo2::{compile, Config}; 36 | use snark_verifier::verifier::{Plonk, PlonkVerifier}; 37 | use snark_verifier_sdk::evm::{encode_calldata, gen_evm_proof_gwc, gen_evm_verifier_gwc}; 38 | use snark_verifier_sdk::halo2::aggregation::PublicAggregationCircuit; 39 | use snark_verifier_sdk::halo2::gen_snark_gwc; 40 | use snark_verifier_sdk::{gen_pk, CircuitExt, LIMBS}; 41 | use std::env::set_var; 42 | use std::fs::{self, File}; 43 | use std::io::BufRead; 44 | use std::io::{BufReader, BufWriter, Read, Write}; 45 | use std::marker::PhantomData; 46 | use std::path::{Path, PathBuf}; 47 | use std::rc::Rc; 48 | 49 | #[derive(serde::Serialize, serde::Deserialize)] 50 | pub struct DefaultVoiceRecoverCircuitInput { 51 | features: String, 52 | errors: String, 53 | commitment: String, 54 | message: String, 55 | } 56 | 57 | #[derive(serde::Serialize, serde::Deserialize)] 58 | pub struct DefaultVoiceRecoverCircuitPublicInput { 59 | commitment: String, 60 | commitment_hash: String, 61 | message: String, 62 | feature_hash: String, 63 | message_hash: String, 64 | // acc: String, 65 | } 66 | 67 | pub fn gen_params(params_path: &str, k: u32) -> Result<(), Error> { 68 | let rng = thread_rng(); 69 | let params = ParamsKZG::::setup(k, rng); 70 | let f = File::create(params_path).unwrap(); 71 | let mut writer = BufWriter::new(f); 72 | params.write(&mut writer).unwrap(); 73 | writer.flush().unwrap(); 74 | Ok(()) 75 | } 76 | 77 | pub fn gen_keys( 78 | params_dir: &str, 79 | app_circuit_config: &str, 80 | agg_circuit_config: &str, 81 | pk_dir: &str, 82 | vk_path: &str, 83 | ) -> Result<(), Error> { 84 | set_var(VOICE_RECOVER_CONFIG_ENV, app_circuit_config); 85 | set_var("VERIFY_CONFIG", agg_circuit_config); 86 | let app_params = { 87 | let f = File::open(Path::new(params_dir).join("app.bin")).unwrap(); 88 | let mut reader = BufReader::new(f); 89 | ParamsKZG::::read(&mut reader).unwrap() 90 | }; 91 | // let agg_params = { 92 | // let f = File::open(Path::new(params_dir).join("agg.bin")).unwrap(); 93 | // let mut reader = BufReader::new(f); 94 | // ParamsKZG::::read(&mut reader).unwrap() 95 | // }; 96 | let circuit = DefaultVoiceRecoverCircuit::default(); 97 | let app_pk = gen_pk::( 98 | &app_params, 99 | &circuit, 100 | Some(&Path::new(pk_dir).join("app.pk")), 101 | ); 102 | println!("app pk generated"); 103 | // let snark = gen_snark_gwc( 104 | // &app_params, 105 | // &app_pk, 106 | // circuit.clone(), 107 | // &mut OsRng, 108 | // None::<&str>, 109 | // ); 110 | // println!("snark generated"); 111 | // let agg_circuit = PublicAggregationCircuit::new(&agg_params, vec![snark], false, &mut OsRng); 112 | // let agg_pk = gen_pk::( 113 | // &agg_params, 114 | // &agg_circuit, 115 | // Some(&Path::new(pk_dir).join("agg.pk")), 116 | // ); 117 | // println!("agg pk generated"); 118 | 119 | let vk = app_pk.get_vk(); 120 | { 121 | let f = File::create(vk_path).unwrap(); 122 | let mut writer = BufWriter::new(f); 123 | vk.write(&mut writer, SerdeFormat::RawBytesUnchecked) 124 | .unwrap(); 125 | writer.flush().unwrap(); 126 | } 127 | Ok(()) 128 | } 129 | 130 | pub fn prove( 131 | params_dir: &str, 132 | app_circuit_config: &str, 133 | agg_circuit_config: &str, 134 | pk_dir: &str, 135 | input_path: &str, 136 | proof_path: &str, 137 | public_input_path: &str, 138 | ) -> Result<(), Error> { 139 | set_var(VOICE_RECOVER_CONFIG_ENV, app_circuit_config); 140 | set_var("VERIFY_CONFIG", agg_circuit_config); 141 | let app_params = { 142 | let f = File::open(Path::new(params_dir).join("app.bin")).unwrap(); 143 | let mut reader = BufReader::new(f); 144 | ParamsKZG::::read(&mut reader).unwrap() 145 | }; 146 | // let agg_params = { 147 | // let f = File::open(Path::new(params_dir).join("agg.bin")).unwrap(); 148 | // let mut reader = BufReader::new(f); 149 | // ParamsKZG::::read(&mut reader).unwrap() 150 | // }; 151 | let app_pk = { 152 | let f = File::open(Path::new(pk_dir).join("app.pk")).unwrap(); 153 | let mut reader = BufReader::new(f); 154 | ProvingKey::::read::<_, DefaultVoiceRecoverCircuit>( 155 | &mut reader, 156 | SerdeFormat::RawBytesUnchecked, 157 | ) 158 | .unwrap() 159 | }; 160 | let input = serde_json::from_reader::( 161 | File::open(input_path).unwrap(), 162 | ) 163 | .unwrap(); 164 | let features = hex::decode(&input.features[2..]).unwrap(); 165 | let errors = hex::decode(&input.errors[2..]).unwrap(); 166 | let commitment = hex::decode(&input.commitment[2..]).unwrap(); 167 | let message = hex::decode(&input.message[2..]).unwrap(); 168 | let circuit = DefaultVoiceRecoverCircuit { 169 | features, 170 | errors, 171 | commitment, 172 | message, 173 | }; 174 | let instances = circuit.instances(); 175 | // let snark = gen_snark_gwc(&app_params, &app_pk, circuit, &mut OsRng, None::<&str>); 176 | // println!("app snark generated"); 177 | // let agg_circuit = PublicAggregationCircuit::new(&agg_params, vec![snark], false, &mut OsRng); 178 | // println!("agg_circuit created"); 179 | // let prover = MockProver::run(agg_params.k(), &agg_circuit, agg_circuit.instances()).unwrap(); 180 | // prover.assert_satisfied(); 181 | // let agg_pk = { 182 | // let f = File::open(Path::new(pk_dir).join("agg.pk")).unwrap(); 183 | // let mut reader = BufReader::new(f); 184 | // ProvingKey::::read::<_, PublicAggregationCircuit>( 185 | // &mut reader, 186 | // SerdeFormat::RawBytesUnchecked, 187 | // ) 188 | // .unwrap() 189 | // }; 190 | // println!("agg_pk extracted"); 191 | // let acc = agg_circuit.instances()[0][0..4 * LIMBS].to_vec(); 192 | // let snark = gen_snark_gwc(&agg_params, &agg_pk, agg_circuit, &mut OsRng, None::<&str>); 193 | // println!("agg snark generated"); 194 | 195 | let proof = { 196 | let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); 197 | create_proof::, ProverGWC<_>, _, _, _, _>( 198 | &app_params, 199 | &app_pk, 200 | &vec![circuit.clone()], 201 | &[&[instances[0].as_slice()]], 202 | OsRng, 203 | &mut transcript, 204 | ) 205 | .unwrap(); 206 | transcript.finalize() 207 | }; 208 | 209 | { 210 | let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); 211 | let verifier_params = app_params.verifier_params(); 212 | let strategy = SingleStrategy::new(&verifier_params); 213 | // let strategy = AccumulatorStrategy::new(verifier_params); 214 | verify_proof::<_, VerifierGWC<_>, _, _, _>( 215 | &app_params, 216 | &app_pk.get_vk(), 217 | strategy, 218 | &[&[instances[0].as_slice()]], 219 | &mut transcript, 220 | ) 221 | .unwrap(); 222 | }; 223 | { 224 | let f = File::create(proof_path).unwrap(); 225 | let mut writer = BufWriter::new(f); 226 | writer.write_all(&proof).unwrap(); 227 | writer.flush().unwrap(); 228 | }; 229 | let public_input = DefaultVoiceRecoverCircuitPublicInput { 230 | commitment: input.commitment, 231 | commitment_hash: format!("0x{}", hex::encode(instances[0][0].to_bytes()).as_str(),), 232 | feature_hash: format!("0x{}", hex::encode(instances[0][1].to_bytes()).as_str(),), 233 | message_hash: format!("0x{}", hex::encode(instances[0][2].to_bytes()).as_str()), 234 | message: input.message, // acc: format!( 235 | // "0x{}", 236 | // hex::encode(acc.iter().map(|v| v.get_lower_128() as u8).collect_vec(),).as_str() 237 | // ), 238 | }; 239 | { 240 | let public_input_str = serde_json::to_string(&public_input).unwrap(); 241 | let mut file = File::create(public_input_path)?; 242 | write!(file, "{}", public_input_str).unwrap(); 243 | file.flush().unwrap(); 244 | } 245 | Ok(()) 246 | } 247 | 248 | pub fn evm_prove( 249 | params_dir: &str, 250 | app_circuit_config: &str, 251 | agg_circuit_config: &str, 252 | pk_dir: &str, 253 | input_path: &str, 254 | proof_path: &str, 255 | public_input_path: &str, 256 | ) -> Result<(), Error> { 257 | set_var(VOICE_RECOVER_CONFIG_ENV, app_circuit_config); 258 | set_var("VERIFY_CONFIG", agg_circuit_config); 259 | // let params = { 260 | // let f = File::open(params_path).unwrap(); 261 | // let mut reader = BufReader::new(f); 262 | // ParamsKZG::::read(&mut reader).unwrap() 263 | // }; 264 | let app_params = { 265 | let f = File::open(Path::new(params_dir).join("app.bin")).unwrap(); 266 | let mut reader = BufReader::new(f); 267 | ParamsKZG::::read(&mut reader).unwrap() 268 | }; 269 | // let agg_params = { 270 | // let f = File::open(Path::new(params_dir).join("agg.bin")).unwrap(); 271 | // let mut reader = BufReader::new(f); 272 | // ParamsKZG::::read(&mut reader).unwrap() 273 | // }; 274 | let app_pk = { 275 | let f = File::open(Path::new(pk_dir).join("app.pk")).unwrap(); 276 | let mut reader = BufReader::new(f); 277 | ProvingKey::::read::<_, DefaultVoiceRecoverCircuit>( 278 | &mut reader, 279 | SerdeFormat::RawBytesUnchecked, 280 | ) 281 | .unwrap() 282 | }; 283 | let input = serde_json::from_reader::( 284 | File::open(input_path).unwrap(), 285 | ) 286 | .unwrap(); 287 | let features = hex::decode(&input.features[2..]).unwrap(); 288 | let errors = hex::decode(&input.errors[2..]).unwrap(); 289 | let commitment = hex::decode(&input.commitment[2..]).unwrap(); 290 | let message = hex::decode(&input.message[2..]).unwrap(); 291 | let circuit = DefaultVoiceRecoverCircuit { 292 | features, 293 | errors, 294 | commitment, 295 | message, 296 | }; 297 | let instances = circuit.instances(); 298 | // let snark = gen_snark_gwc(&app_params, &app_pk, circuit, &mut OsRng, None::<&str>); 299 | // let agg_circuit = PublicAggregationCircuit::new(&agg_params, vec![snark], false, &mut OsRng); 300 | // let agg_instances = agg_circuit.instances(); 301 | // let acc = agg_instances[0][0..4 * LIMBS].to_vec(); 302 | // let agg_pk = { 303 | // let f = File::open(Path::new(pk_dir).join("agg.pk")).unwrap(); 304 | // let mut reader = BufReader::new(f); 305 | // ProvingKey::::read::<_, PublicAggregationCircuit>( 306 | // &mut reader, 307 | // SerdeFormat::RawBytesUnchecked, 308 | // ) 309 | // .unwrap() 310 | // }; 311 | let proof = gen_evm_proof_gwc(&app_params, &app_pk, circuit, instances.clone(), &mut OsRng); 312 | 313 | // let proof = { 314 | // let mut transcript = Blake2bWrite::<_, G1Affine, Challenge255<_>>::init(vec![]); 315 | // create_proof::, ProverGWC<_>, _, _, _, _>( 316 | // ¶ms, 317 | // &pk, 318 | // &vec![circuit.clone()], 319 | // &[&instances 320 | // .iter() 321 | // .map(|instances| instances.as_slice()) 322 | // .collect_vec()], 323 | // OsRng, 324 | // &mut transcript, 325 | // ) 326 | // .unwrap(); 327 | // transcript.finalize() 328 | // }; 329 | { 330 | let proof_hex = hex::encode(&proof); 331 | let mut file = File::create(proof_path)?; 332 | write!(file, "0x{}", proof_hex).unwrap(); 333 | file.flush().unwrap(); 334 | }; 335 | // let acc_bytes = encode_calldata(&[acc], &[]); 336 | let public_input = DefaultVoiceRecoverCircuitPublicInput { 337 | commitment: input.commitment, 338 | commitment_hash: format!( 339 | "0x{}", 340 | hex::encode(encode_calldata(&[vec![instances[0][0]]], &[])).as_str(), 341 | ), 342 | feature_hash: format!( 343 | "0x{}", 344 | hex::encode(encode_calldata(&[vec![instances[0][1]]], &[])).as_str(), 345 | ), 346 | message_hash: format!( 347 | "0x{}", 348 | hex::encode(encode_calldata(&[vec![instances[0][2]]], &[])).as_str() 349 | ), 350 | message: input.message, // acc: format!( 351 | // "0x{}", 352 | // hex::encode(acc.iter().map(|v| v.get_lower_128() as u8).collect_vec(),).as_str() 353 | // ), 354 | }; 355 | { 356 | let public_input_str = serde_json::to_string(&public_input).unwrap(); 357 | let mut file = File::create(public_input_path)?; 358 | write!(file, "{}", public_input_str).unwrap(); 359 | file.flush().unwrap(); 360 | } 361 | Ok(()) 362 | } 363 | 364 | pub fn verify( 365 | params_dir: &str, 366 | app_circuit_config: &str, 367 | agg_circuit_config: &str, 368 | vk_path: &str, 369 | public_input_path: &str, 370 | proof_path: &str, 371 | ) -> Result<(), Error> { 372 | set_var(VOICE_RECOVER_CONFIG_ENV, app_circuit_config); 373 | set_var("VERIFY_CONFIG", agg_circuit_config); 374 | let app_params = { 375 | let f = File::open(Path::new(params_dir).join("app.bin")).unwrap(); 376 | let mut reader = BufReader::new(f); 377 | ParamsKZG::::read(&mut reader).unwrap() 378 | }; 379 | let vk = { 380 | let f = File::open(vk_path).unwrap(); 381 | let mut reader = BufReader::new(f); 382 | VerifyingKey::::read::<_, DefaultVoiceRecoverCircuit>( 383 | &mut reader, 384 | SerdeFormat::RawBytesUnchecked, 385 | ) 386 | .unwrap() 387 | }; 388 | let public_input = serde_json::from_reader::( 389 | File::open(public_input_path).unwrap(), 390 | ) 391 | .unwrap(); 392 | let proof = { 393 | let f = File::open(proof_path).unwrap(); 394 | let mut reader = BufReader::new(f); 395 | let mut proof = vec![]; 396 | reader.read_to_end(&mut proof).unwrap(); 397 | proof 398 | }; 399 | // let acc = hex::decode(&public_input.acc[2..]).unwrap(); 400 | // let acc_public = acc.iter().map(|byte| Fr::from(*byte as u64)).collect_vec(); 401 | let mut instances = vec![]; 402 | let message = hex::decode(&public_input.message[2..]).unwrap(); 403 | let mut commitment_hash = [0; 32]; 404 | commitment_hash.copy_from_slice(&hex::decode(&public_input.commitment_hash[2..]).unwrap()); 405 | instances.push(Fr::from_bytes(&commitment_hash).unwrap()); 406 | let mut feature_hash = [0; 32]; 407 | feature_hash.copy_from_slice(&hex::decode(&public_input.feature_hash[2..]).unwrap()); 408 | instances.push(Fr::from_bytes(&feature_hash).unwrap()); 409 | let mut message_ext = message.to_vec(); 410 | { 411 | let config_params = DefaultVoiceRecoverCircuit::read_config_params(); 412 | message_ext.append(&mut vec![0; config_params.max_msg_size - message.len()]); 413 | } 414 | let mut packed_message = message_ext 415 | .chunks(16) 416 | .map(|bytes| Fr::from_u128(u128::from_le_bytes(bytes.try_into().unwrap()))) 417 | .collect_vec(); 418 | // let mut message_public = message 419 | // .iter() 420 | // .map(|byte| Fr::from(*byte as u64)) 421 | // .collect_vec(); 422 | 423 | let mut message_hash = [0; 32]; 424 | message_hash.copy_from_slice(&hex::decode(&public_input.message_hash[2..]).unwrap()); 425 | instances.push(Fr::from_bytes(&message_hash).unwrap()); 426 | instances.append(&mut packed_message); 427 | { 428 | let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); 429 | let verifier_params = app_params.verifier_params(); 430 | let strategy = SingleStrategy::new(&verifier_params); 431 | verify_proof::<_, VerifierGWC<_>, _, _, _>( 432 | &app_params, 433 | &vk, 434 | strategy, 435 | &[&[instances.as_slice()]], 436 | &mut transcript, 437 | ) 438 | .unwrap(); 439 | }; 440 | Ok(()) 441 | } 442 | 443 | pub fn gen_evm_verifier( 444 | params_dir: &str, 445 | app_circuit_config: &str, 446 | agg_circuit_config: &str, 447 | vk_path: &str, 448 | code_path: &str, 449 | ) -> Result<(), Error> { 450 | set_var(VOICE_RECOVER_CONFIG_ENV, app_circuit_config); 451 | set_var("VERIFY_CONFIG", agg_circuit_config); 452 | let app_params = { 453 | let f = File::open(Path::new(params_dir).join("app.bin")).unwrap(); 454 | let mut reader = BufReader::new(f); 455 | ParamsKZG::::read(&mut reader).unwrap() 456 | }; 457 | let vk = { 458 | let f = File::open(vk_path).unwrap(); 459 | let mut reader = BufReader::new(f); 460 | VerifyingKey::::read::<_, DefaultVoiceRecoverCircuit>( 461 | &mut reader, 462 | SerdeFormat::RawBytesUnchecked, 463 | ) 464 | .unwrap() 465 | }; 466 | let circuit_params = DefaultVoiceRecoverCircuit::read_config_params(); 467 | let num_instances = vec![3 + circuit_params.max_msg_size / 16]; 468 | let verifier_yul = { 469 | let svk = app_params.get_g()[0].into(); 470 | let dk = (app_params.g2(), app_params.s_g2()).into(); 471 | let protocol = compile( 472 | &app_params, 473 | &vk, 474 | Config::kzg() 475 | .with_num_instance(num_instances.clone()) 476 | .with_accumulator_indices(DefaultVoiceRecoverCircuit::accumulator_indices()), 477 | ); 478 | 479 | let loader = EvmLoader::new::(); 480 | let protocol = protocol.loaded(&loader); 481 | let mut transcript = EvmTranscript::<_, Rc, _, _>::new(&loader); 482 | 483 | let instances = transcript.load_instances(num_instances); 484 | let proof = 485 | Plonk::>::read_proof(&svk, &protocol, &instances, &mut transcript); 486 | Plonk::>::verify(&svk, &dk, &protocol, &instances, &proof); 487 | loader.yul_code() 488 | }; 489 | // let verifier = gen_evm_verifier_gwc::( 490 | // &app_params, 491 | // &vk, 492 | // vec![num_instance], 493 | // None, 494 | // ); 495 | // let verifier_str = "0x".to_string() + &hex::encode(verifier); 496 | { 497 | let mut f = File::create(code_path).unwrap(); 498 | let _ = f.write(verifier_yul.as_bytes()); 499 | let output = fix_verifier_sol(Path::new(code_path).to_path_buf()).unwrap(); 500 | 501 | let mut f = File::create(code_path)?; 502 | let _ = f.write(output.as_bytes()); 503 | // write!(f, "{}", verifier_yul).unwrap(); 504 | // let output = f.flush().unwrap(); 505 | }; 506 | Ok(()) 507 | } 508 | 509 | pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { 510 | let file = File::open(input_file.clone())?; 511 | let reader = BufReader::new(file); 512 | 513 | let mut transcript_addrs: Vec = Vec::new(); 514 | let mut modified_lines: Vec = Vec::new(); 515 | 516 | // convert calldataload 0x0 to 0x40 to read from pubInputs, and the rest 517 | // from proof 518 | let calldata_pattern = Regex::new(r"^.*(calldataload\((0x[a-f0-9]+)\)).*$")?; 519 | let mstore_pattern = Regex::new(r"^\s*(mstore\(0x([0-9a-fA-F]+)+),.+\)")?; 520 | let mstore8_pattern = Regex::new(r"^\s*(mstore8\((\d+)+),.+\)")?; 521 | let mstoren_pattern = Regex::new(r"^\s*(mstore\((\d+)+),.+\)")?; 522 | let mload_pattern = Regex::new(r"(mload\((0x[0-9a-fA-F]+))\)")?; 523 | let keccak_pattern = Regex::new(r"(keccak256\((0x[0-9a-fA-F]+))")?; 524 | let modexp_pattern = 525 | Regex::new(r"(staticcall\(gas\(\), 0x5, (0x[0-9a-fA-F]+), 0xc0, (0x[0-9a-fA-F]+), 0x20)")?; 526 | let ecmul_pattern = 527 | Regex::new(r"(staticcall\(gas\(\), 0x7, (0x[0-9a-fA-F]+), 0x60, (0x[0-9a-fA-F]+), 0x40)")?; 528 | let ecadd_pattern = 529 | Regex::new(r"(staticcall\(gas\(\), 0x6, (0x[0-9a-fA-F]+), 0x80, (0x[0-9a-fA-F]+), 0x40)")?; 530 | let ecpairing_pattern = 531 | Regex::new(r"(staticcall\(gas\(\), 0x8, (0x[0-9a-fA-F]+), 0x180, (0x[0-9a-fA-F]+), 0x20)")?; 532 | let bool_pattern = Regex::new(r":bool")?; 533 | 534 | // Count the number of pub inputs 535 | let mut start = None; 536 | let mut end = None; 537 | for (i, line) in reader.lines().enumerate() { 538 | let line = line?; 539 | if line.trim().starts_with("mstore(0x20") { 540 | start = Some(i as u32); 541 | } 542 | 543 | if line.trim().starts_with("mstore(0x0") { 544 | end = Some(i as u32); 545 | break; 546 | } 547 | } 548 | 549 | let num_pubinputs = if let Some(s) = start { 550 | end.unwrap() - s 551 | } else { 552 | 0 553 | }; 554 | 555 | let mut max_pubinputs_addr = 0; 556 | if num_pubinputs > 0 { 557 | max_pubinputs_addr = num_pubinputs * 32 - 32; 558 | } 559 | 560 | let file = File::open(input_file)?; 561 | let reader = BufReader::new(file); 562 | 563 | for line in reader.lines() { 564 | let mut line = line?; 565 | let m = bool_pattern.captures(&line); 566 | if m.is_some() { 567 | line = line.replace(":bool", ""); 568 | } 569 | 570 | let m = calldata_pattern.captures(&line); 571 | if let Some(m) = m { 572 | let calldata_and_addr = m.get(1).unwrap().as_str(); 573 | let addr = m.get(2).unwrap().as_str(); 574 | let addr_as_num = u32::from_str_radix(addr.strip_prefix("0x").unwrap(), 16)?; 575 | 576 | if addr_as_num <= max_pubinputs_addr { 577 | let pub_addr = format!("{:#x}", addr_as_num + 32); 578 | line = line.replace( 579 | calldata_and_addr, 580 | &format!("mload(add(pubInputs, {}))", pub_addr), 581 | ); 582 | } else { 583 | let proof_addr = format!("{:#x}", addr_as_num - max_pubinputs_addr); 584 | line = line.replace( 585 | calldata_and_addr, 586 | &format!("mload(add(proof, {}))", proof_addr), 587 | ); 588 | } 589 | } 590 | 591 | let m = mstore8_pattern.captures(&line); 592 | if let Some(m) = m { 593 | let mstore = m.get(1).unwrap().as_str(); 594 | let addr = m.get(2).unwrap().as_str(); 595 | let addr_as_num = u32::from_str_radix(addr, 10)?; 596 | let transcript_addr = format!("{:#x}", addr_as_num); 597 | transcript_addrs.push(addr_as_num); 598 | line = line.replace( 599 | mstore, 600 | &format!("mstore8(add(transcript, {})", transcript_addr), 601 | ); 602 | } 603 | 604 | let m = mstoren_pattern.captures(&line); 605 | if let Some(m) = m { 606 | let mstore = m.get(1).unwrap().as_str(); 607 | let addr = m.get(2).unwrap().as_str(); 608 | let addr_as_num = u32::from_str_radix(addr, 10)?; 609 | let transcript_addr = format!("{:#x}", addr_as_num); 610 | transcript_addrs.push(addr_as_num); 611 | line = line.replace( 612 | mstore, 613 | &format!("mstore(add(transcript, {})", transcript_addr), 614 | ); 615 | } 616 | 617 | let m = modexp_pattern.captures(&line); 618 | if let Some(m) = m { 619 | let modexp = m.get(1).unwrap().as_str(); 620 | let start_addr = m.get(2).unwrap().as_str(); 621 | let result_addr = m.get(3).unwrap().as_str(); 622 | let start_addr_as_num = 623 | u32::from_str_radix(start_addr.strip_prefix("0x").unwrap(), 16)?; 624 | let result_addr_as_num = 625 | u32::from_str_radix(result_addr.strip_prefix("0x").unwrap(), 16)?; 626 | 627 | let transcript_addr = format!("{:#x}", start_addr_as_num); 628 | transcript_addrs.push(start_addr_as_num); 629 | let result_addr = format!("{:#x}", result_addr_as_num); 630 | line = line.replace( 631 | modexp, 632 | &format!( 633 | "staticcall(gas(), 0x5, add(transcript, {}), 0xc0, add(transcript, {}), 0x20", 634 | transcript_addr, result_addr 635 | ), 636 | ); 637 | } 638 | 639 | let m = ecmul_pattern.captures(&line); 640 | if let Some(m) = m { 641 | let ecmul = m.get(1).unwrap().as_str(); 642 | let start_addr = m.get(2).unwrap().as_str(); 643 | let result_addr = m.get(3).unwrap().as_str(); 644 | let start_addr_as_num = 645 | u32::from_str_radix(start_addr.strip_prefix("0x").unwrap(), 16)?; 646 | let result_addr_as_num = 647 | u32::from_str_radix(result_addr.strip_prefix("0x").unwrap(), 16)?; 648 | 649 | let transcript_addr = format!("{:#x}", start_addr_as_num); 650 | let result_addr = format!("{:#x}", result_addr_as_num); 651 | transcript_addrs.push(start_addr_as_num); 652 | transcript_addrs.push(result_addr_as_num); 653 | line = line.replace( 654 | ecmul, 655 | &format!( 656 | "staticcall(gas(), 0x7, add(transcript, {}), 0x60, add(transcript, {}), 0x40", 657 | transcript_addr, result_addr 658 | ), 659 | ); 660 | } 661 | 662 | let m = ecadd_pattern.captures(&line); 663 | if let Some(m) = m { 664 | let ecadd = m.get(1).unwrap().as_str(); 665 | let start_addr = m.get(2).unwrap().as_str(); 666 | let result_addr = m.get(3).unwrap().as_str(); 667 | let start_addr_as_num = 668 | u32::from_str_radix(start_addr.strip_prefix("0x").unwrap(), 16)?; 669 | let result_addr_as_num = 670 | u32::from_str_radix(result_addr.strip_prefix("0x").unwrap(), 16)?; 671 | 672 | let transcript_addr = format!("{:#x}", start_addr_as_num); 673 | let result_addr = format!("{:#x}", result_addr_as_num); 674 | transcript_addrs.push(start_addr_as_num); 675 | transcript_addrs.push(result_addr_as_num); 676 | line = line.replace( 677 | ecadd, 678 | &format!( 679 | "staticcall(gas(), 0x6, add(transcript, {}), 0x80, add(transcript, {}), 0x40", 680 | transcript_addr, result_addr 681 | ), 682 | ); 683 | } 684 | 685 | let m = ecpairing_pattern.captures(&line); 686 | if let Some(m) = m { 687 | let ecpairing = m.get(1).unwrap().as_str(); 688 | let start_addr = m.get(2).unwrap().as_str(); 689 | let result_addr = m.get(3).unwrap().as_str(); 690 | let start_addr_as_num = 691 | u32::from_str_radix(start_addr.strip_prefix("0x").unwrap(), 16)?; 692 | let result_addr_as_num = 693 | u32::from_str_radix(result_addr.strip_prefix("0x").unwrap(), 16)?; 694 | 695 | let transcript_addr = format!("{:#x}", start_addr_as_num); 696 | let result_addr = format!("{:#x}", result_addr_as_num); 697 | transcript_addrs.push(start_addr_as_num); 698 | transcript_addrs.push(result_addr_as_num); 699 | line = line.replace( 700 | ecpairing, 701 | &format!( 702 | "staticcall(gas(), 0x8, add(transcript, {}), 0x180, add(transcript, {}), 0x20", 703 | transcript_addr, result_addr 704 | ), 705 | ); 706 | } 707 | 708 | let m = mstore_pattern.captures(&line); 709 | if let Some(m) = m { 710 | let mstore = m.get(1).unwrap().as_str(); 711 | let addr = m.get(2).unwrap().as_str(); 712 | let addr_as_num = u32::from_str_radix(addr, 16)?; 713 | let transcript_addr = format!("{:#x}", addr_as_num); 714 | transcript_addrs.push(addr_as_num); 715 | line = line.replace( 716 | mstore, 717 | &format!("mstore(add(transcript, {})", transcript_addr), 718 | ); 719 | } 720 | 721 | let m = keccak_pattern.captures(&line); 722 | if let Some(m) = m { 723 | let keccak = m.get(1).unwrap().as_str(); 724 | let addr = m.get(2).unwrap().as_str(); 725 | let addr_as_num = u32::from_str_radix(addr.strip_prefix("0x").unwrap(), 16)?; 726 | let transcript_addr = format!("{:#x}", addr_as_num); 727 | transcript_addrs.push(addr_as_num); 728 | line = line.replace( 729 | keccak, 730 | &format!("keccak256(add(transcript, {})", transcript_addr), 731 | ); 732 | } 733 | 734 | // mload can show up multiple times per line 735 | loop { 736 | let m = mload_pattern.captures(&line); 737 | if m.is_none() { 738 | break; 739 | } 740 | let mload = m.as_ref().unwrap().get(1).unwrap().as_str(); 741 | let addr = m.as_ref().unwrap().get(2).unwrap().as_str(); 742 | 743 | let addr_as_num = u32::from_str_radix(addr.strip_prefix("0x").unwrap(), 16)?; 744 | let transcript_addr = format!("{:#x}", addr_as_num); 745 | transcript_addrs.push(addr_as_num); 746 | line = line.replace( 747 | mload, 748 | &format!("mload(add(transcript, {})", transcript_addr), 749 | ); 750 | } 751 | 752 | modified_lines.push(line); 753 | } 754 | 755 | // get the max transcript addr 756 | let max_transcript_addr = transcript_addrs.iter().max().unwrap() / 32; 757 | let mut contract = format!( 758 | "// SPDX-License-Identifier: MIT 759 | pragma solidity ^0.8.17; 760 | 761 | contract Verifier {{ 762 | function verify( 763 | uint256[] memory pubInputs, 764 | bytes memory proof 765 | ) public view returns (bool) {{ 766 | bool success = true; 767 | bytes32[{}] memory transcript; 768 | assembly {{ 769 | ", 770 | max_transcript_addr 771 | ) 772 | .trim() 773 | .to_string(); 774 | 775 | // using a boxed Write trait object here to show it works for any Struct impl'ing Write 776 | // you may also use a std::fs::File here 777 | let mut write: Box<&mut dyn std::fmt::Write> = Box::new(&mut contract); 778 | 779 | for line in modified_lines[16..modified_lines.len() - 7].iter() { 780 | write!(write, "{}", line).unwrap(); 781 | } 782 | writeln!(write, "}} return success; }} }}")?; 783 | Ok(contract) 784 | } 785 | --------------------------------------------------------------------------------