├── .gitignore ├── .vscode └── launch.json ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── api ├── .env ├── Cargo.toml ├── README.md └── src │ └── main.rs ├── benches └── bench-solve.rs ├── bex.code-workspace ├── bex.leo ├── build.rs ├── doc ├── bex-types.vue ├── dependencies.vue ├── factorbench.org ├── ft-nid-notes.org ├── howto.org ├── ideas.org ├── outdated │ └── bex-trail.org ├── outline.org ├── papers.org ├── recipe.org ├── scaffold.ipynb ├── scaffold_graph_tests.py ├── scrap │ ├── interchange.org │ ├── primes.org │ ├── solve64-log.txt │ ├── three.org │ └── why-anf.org ├── spec.org ├── swap-rc.vue ├── var-orders.org ├── wipbase.org ├── workers.org └── workstate.vue ├── examples ├── README.md ├── shell │ ├── bex-shell.rs │ └── swarm-shell.rs └── solve │ ├── bdd-solve.rs │ └── factor-p.rs ├── plans.org ├── py ├── Cargo.toml ├── README.md ├── and-or.svg ├── bex │ ├── __init__.py │ └── dd.py ├── dd-test.sh ├── dd_bex_tests.py ├── go.sh ├── pyproject.toml ├── src │ ├── lib.rs │ └── py.rs └── test.py ├── run-api.sh ├── src ├── anf.rs ├── apl.rs ├── ast.rs ├── base.rs ├── bdd.rs ├── bdd │ ├── bdd-json.rs │ ├── bdd_sols.rs │ └── bdd_swarm.rs ├── cur.rs ├── fun.rs ├── int.rs ├── io.rs ├── lib.rs ├── naf.rs ├── nid-fun.rs ├── nid.rs ├── ops.rs ├── reg.rs ├── simp.rs ├── solve.rs ├── swap.rs ├── swarm.rs ├── test-bdd.rs ├── test-swap-scaffold.rs ├── test-swap.rs ├── tmp.rs ├── vhl.rs ├── vhl_swarm.rs ├── vid.rs └── wip.rs └── viewbex.html /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | # editor/ide junk 13 | .idea 14 | *~ 15 | 16 | doc/__pycache__ 17 | .~* 18 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | 8 | { 9 | "type": "lldb", 10 | "request": "launch", 11 | "name": "Debug unit tests in library 'bex'", 12 | "cargo": { 13 | "args": [ 14 | "test", 15 | "--no-run", 16 | "--lib", 17 | "--package=bex" 18 | ], 19 | "filter": { 20 | "name": "bex", 21 | "kind": "lib" 22 | } 23 | }, 24 | "args": [], 25 | "cwd": "${workspaceFolder}" 26 | }, 27 | { 28 | "type": "lldb", 29 | "request": "launch", 30 | "name": "Debug executable 'bdd-solve'", 31 | "cargo": { 32 | "args": [ 33 | "build", 34 | "--bin=bdd-solve", 35 | "--package=bex" 36 | ], 37 | "filter": { 38 | "name": "bdd-solve", 39 | "kind": "bin" 40 | } 41 | }, 42 | "args": [], 43 | "cwd": "${workspaceFolder}" 44 | }, 45 | { 46 | "type": "lldb", 47 | "request": "launch", 48 | "name": "Debug unit tests in executable 'bdd-solve'", 49 | "cargo": { 50 | "args": [ 51 | "test", 52 | "--no-run", 53 | "--bin=bdd-solve", 54 | "--package=bex" 55 | ], 56 | "filter": { 57 | "name": "bdd-solve", 58 | "kind": "bin" 59 | } 60 | }, 61 | "args": [], 62 | "cwd": "${workspaceFolder}" 63 | }, 64 | { 65 | "type": "lldb", 66 | "request": "launch", 67 | "name": "Debug benchmark 'bench-solve'", 68 | "cargo": { 69 | "args": [ 70 | "test", 71 | "--no-run", 72 | "--bench=bench-solve", 73 | "--package=bex" 74 | ], 75 | "filter": { 76 | "name": "bench-solve", 77 | "kind": "bench" 78 | } 79 | }, 80 | "args": [], 81 | "cwd": "${workspaceFolder}" 82 | } 83 | ] 84 | } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bex" 3 | version = "0.3.0" 4 | edition = "2021" 5 | authors = ["tangentstorm "] 6 | description = "A rust library for working with boolean expressions (syntax trees, decision diagrams, algebraic normal form, etc.)" 7 | documentation = "https://docs.rs/bex/" 8 | repository = "https://github.com/tangentstorm/bex" 9 | keywords = ["bdd", "diagram", "anf"] 10 | categories = ["algorithms", "data-structures"] 11 | license = "MIT" 12 | readme = "README.md" 13 | default-run = "bex-shell" 14 | 15 | [lib] 16 | name = "bex" 17 | path = "src/lib.rs" 18 | 19 | [[bin]] 20 | name = "bex-shell" 21 | path = "examples/shell/bex-shell.rs" 22 | 23 | [[bin]] 24 | name = "swarm-shell" 25 | path = "examples/shell/swarm-shell.rs" 26 | 27 | [[bin]] 28 | name = "bdd-solve" 29 | path = "examples/solve/bdd-solve.rs" 30 | 31 | [[bin]] 32 | name = "factor-p" 33 | path = "examples/solve/factor-p.rs" 34 | 35 | [features] 36 | slowtests = [] 37 | 38 | [workspace] 39 | members = ["py", "api"] 40 | 41 | [dependencies] 42 | log = { version = "0.4", features = ["max_level_debug"]} 43 | simplelog = "0.5" 44 | num_cpus = "1.0" 45 | rand = "0.8.5" 46 | dashmap = { version="5.4.0"} 47 | boxcar = "0.1.0" 48 | fxhash = "0.2.1" 49 | concurrent-queue = "2.1.0" 50 | json = "0.12.4" 51 | 52 | [dev-dependencies] 53 | bencher = "0.1.5" 54 | 55 | [[bench]] 56 | name = "bench-solve" 57 | harness = false 58 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018-2020 tangentstorm 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # bex : binary expression toolkit 2 | 3 | Bex is a rust crate for working with binary (Boolean) expressions. 4 | 5 | This crate lets you build a complicated abstract syntax tree (AST) by working with individual Bit structs, or vectors that act like integers. 6 | 7 | You can also "solve" these AST structures by converting them into various canonical representations: 8 | 9 | - **reduced, ordered, binary decision diagrams (ROBDDs)** 10 | -- a normal form consisting of if-then-else triples that 11 | essentially act like compressed truth tables 12 | - **algebraic nomal form** 13 | -- an "xor of ands" polynomial form 14 | - (more coming in the future) 15 | 16 | ## Video introduction 17 | 18 | [J and Bex vs Primorial 15](https://www.youtube.com/watch?v=gtEGiq04E4Q&list=PLMVwLeG3bKmniOWnZUM2mcYKphm0ggS-C) 19 | is about converting "simple" factoring problems into 20 | boolean expressions and solving them with bex. 21 | 22 | It covers the large factoring problems in [examples/solve/bdd-solve.rs](https://github.com/tangentstorm/bex/blob/main/examples/solve/bdd-solve.rs) 23 | and the smaller tests in [src/solve.rs](https://github.com/tangentstorm/bex/blob/main/src/solve.rs) 24 | 25 | 26 | ## Changes in main branch (upcoming version) 27 | 28 | - TBD 29 | 30 | ## What's new in 0.3.0 31 | 32 | - Greatly expanded and fleshed out the python integration, including support for [@tulip-control/dd](https://github.com/tulip-control/dd) 33 | - Added a variety of new functions to `BddBase`: 34 | - `reorder` for arbitrary reorderings 35 | - `reorder_by_force` for the FORCE algorith, a fast (but not always as effective) alternative to variable sifting 36 | - `to_json` and `from_json` to serialize and restore a set of nids 37 | - Added a simple [HTTP API](https://github.com/tangentstorm/bex/tree/main/api) for integrating with other languages. 38 | - Added new `Fun` trait and `NidFun` struct, refining the idea of storing truth tables of up to 5 inputs in a NID. 39 | - Added `ASTBase::{apply,eval}` 40 | - `naf.rs` (a variation of ANF) 41 | - VhlSwarm (extracted a generic VHL swarm framework from BddSwarm, to re-use on other VHL-based mods) 42 | - Began standardizing the formatting/parsing of NIDs (`FromStr` and `fmt::Display` should now round-trip) 43 | - Many other small fixes and cleanups. 44 | 45 | For full changelog, see [CHANGELOG.md](https://github.com/tangentstorm/bex/blob/main/CHANGELOG.md). 46 | -------------------------------------------------------------------------------- /api/.env: -------------------------------------------------------------------------------- 1 | HOST=127.0.0.1 2 | PORT=3030 -------------------------------------------------------------------------------- /api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bex-api" 3 | version = "0.3.0" 4 | edition = "2021" 5 | authors = ["tangentstorm "] 6 | description = "A web server for the bex library" 7 | license = "MIT" 8 | 9 | [dependencies] 10 | warp = "0.3" 11 | tokio = { version = "1", features = ["full"] } 12 | dotenv = "0.15" 13 | lazy_static = "1.5.0" 14 | bex = { path = ".." } 15 | -------------------------------------------------------------------------------- /api/README.md: -------------------------------------------------------------------------------- 1 | # Bex API 2 | 3 | This program provides a simple HTTP API for constructing 4 | binary decision diagrams using bex. Below are the steps 5 | to set up and use the API. 6 | 7 | ## Setup 8 | 9 | 0. Install Rust: 10 | 11 | Follow the instructions at [rust-lang.org](https://www.rust-lang.org/tools/install) to install Rust and the `cargo` build system. 12 | 13 | 1. Clone the repository: 14 | ```sh 15 | git clone https://github.com/tangentstorm/bex.git 16 | cd bex 17 | ``` 18 | 19 | 2. Navigate to the API directory: 20 | ```sh 21 | cd api 22 | ``` 23 | 24 | 3. (Optional) Create a `.env` file in the `api` directory with the following content: 25 | ``` 26 | HOST=127.0.0.1 27 | PORT=3030 28 | ``` 29 | 30 | If the `.env` file is not created, the default values will be used (`HOST=127.0.0.1` and `PORT=3030`). 31 | 32 | 4. Build and run the API: 33 | ```sh 34 | cargo run 35 | ``` 36 | 37 | ## Usage 38 | 39 | ### Endpoints 40 | 41 | The following endpoints are available to interact with the BDD base: 42 | 43 | | Endpoint | Description | 44 | | --- | --- | 45 | | **GET /ite/{vid}/{nid1}/{nid2}** | Build an ITE (If-Then-Else) node for the given NIDs. | 46 | | **GET /nid/{nid}** | Retrieve the high and low branches of the given NID. | 47 | | **GET /xor/{nid1}/{nid2}** | Perform XOR operation on the given NIDs. | 48 | | **GET /and/{nid1}/{nid2}** | Perform AND operation on the given NIDs. | 49 | | **GET /or/{nid1}/{nid2}** | Perform OR operation on the given NIDs. | 50 | 51 | ### Example Usage 52 | 53 | 1. Build an ITE node: 54 | ``` 55 | http://localhost:3030/ite/x1/x2/x3 56 | ``` 57 | 58 | 2. Inspect the resulting node: 59 | ``` 60 | http://localhost:3030/nid/x3.2 61 | ``` 62 | 63 | 3. Perform an XOR operation: 64 | ``` 65 | http://localhost:3030/xor/x1/x2 66 | ``` 67 | 68 | 4. Perform an AND operation: 69 | ``` 70 | http://localhost:3030/and/x1/x2 71 | ``` 72 | 73 | 5. Perform an OR operation: 74 | ``` 75 | http://localhost:3030/or/x1/x2 76 | ``` 77 | 78 | ## License 79 | 80 | This project is licensed under the MIT License. 81 | -------------------------------------------------------------------------------- /api/src/main.rs: -------------------------------------------------------------------------------- 1 | use warp::Filter; 2 | use dotenv::dotenv; 3 | use std::env; 4 | use lazy_static::lazy_static; 5 | use std::sync::Mutex; 6 | use bex::bdd::BddBase; 7 | use bex::nid::NID; 8 | use bex::base::Base; 9 | 10 | lazy_static! { 11 | pub static ref BDD_BASE: Mutex = Mutex::new(BddBase::new()); 12 | } 13 | 14 | #[tokio::main] 15 | async fn main() { 16 | dotenv().ok(); 17 | let host = env::var("HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); 18 | let port = env::var("PORT").unwrap_or_else(|_| "3030".to_string()).parse().expect("PORT must be a number"); 19 | 20 | let version = env!("CARGO_PKG_VERSION"); 21 | let hello = warp::path::end().map(move || format!("bex-api version: {}", version)); 22 | 23 | let vhl = warp::path!("ite" / NID / NID / NID) 24 | .map(|vid: NID, nid1: NID, nid2: NID| { 25 | let mut bdd_base = BDD_BASE.lock().unwrap(); 26 | let new_nid = bdd_base.ite(vid, nid1, nid2); 27 | format!("{new_nid}")}); 28 | 29 | let xor = warp::path!("xor" / NID / NID) 30 | .map(|nid1: NID, nid2: NID| { 31 | let mut bdd_base = BDD_BASE.lock().unwrap(); 32 | let new_nid = bdd_base.xor(nid1, nid2); 33 | format!("{new_nid}")}); 34 | 35 | let and = warp::path!("and" / NID / NID) 36 | .map(|nid1: NID, nid2: NID| { 37 | let mut bdd_base = BDD_BASE.lock().unwrap(); 38 | let new_nid = bdd_base.and(nid1, nid2); 39 | format!("{new_nid}")}); 40 | 41 | let or = warp::path!("or" / NID / NID) 42 | .map(|nid1: NID, nid2: NID| { 43 | let mut bdd_base = BDD_BASE.lock().unwrap(); 44 | let new_nid = bdd_base.or(nid1, nid2); 45 | format!("{new_nid}")}); 46 | 47 | let nid = warp::path!("nid" / NID) 48 | .map(|nid: NID| { 49 | if nid.is_lit() || nid.is_const() || nid.is_fun() { format!("{nid}") } 50 | else { 51 | let bdd_base = BDD_BASE.lock().unwrap(); 52 | let (v, hi, lo) = bdd_base.get_vhl(nid); 53 | format!("v: {v} hi: {hi} lo: {lo}") }}); 54 | 55 | let routes = hello.or(vhl).or(xor).or(and).or(or).or(nid); 56 | 57 | let addr = (host.parse::().expect("HOST must be a valid IP address"), port); 58 | 59 | println!("Server listening on http://{}:{}", host, port); 60 | 61 | warp::serve(routes).run(addr).await; 62 | } 63 | -------------------------------------------------------------------------------- /benches/bench-solve.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate bencher; 3 | use bencher::Bencher; 4 | 5 | extern crate bex; 6 | use bex::{BddBase, solve::find_factors, int::GBASE}; 7 | 8 | pub fn tiny(b: &mut Bencher) { 9 | use bex::int::{X4,X8}; 10 | b.iter(|| { 11 | find_factors::(&mut BddBase::new(), 210, vec![(14,15)]); }); } 12 | 13 | pub fn small(b: &mut Bencher) { 14 | use bex::int::{X8,X16}; 15 | b.iter(|| { 16 | let expected = vec![(1,210), (2,105), ( 3,70), ( 5,42), 17 | (6, 35), (7, 30), (10,21), (14,15)]; 18 | find_factors::(&mut BddBase::new(), 210, expected); 19 | GBASE.with(|gb| gb.replace(bex::ast::ASTBase::empty())); 20 | }); } 21 | 22 | benchmark_group!(both, tiny, small); 23 | benchmark_main!(both); 24 | -------------------------------------------------------------------------------- /bex.code-workspace: -------------------------------------------------------------------------------- 1 | { 2 | "folders": [ 3 | { 4 | "path": "." 5 | } 6 | ], 7 | "settings": { 8 | "workbench.colorTheme": "Red" 9 | } 10 | } -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | //! build script for bex. 2 | //! This generates a small rust file that lets bex 3 | //! report what options were used for compilation. 4 | use std::env; 5 | use std::fs; 6 | use std::path::Path; 7 | 8 | fn main() { 9 | let out_dir = env::var_os("OUT_DIR").unwrap(); 10 | let opt_level = env::var_os("OPT_LEVEL").unwrap(); 11 | let bex_version = env!("CARGO_PKG_VERSION"); 12 | let dest_path = Path::new(&out_dir).join("bex-build-info.rs"); 13 | fs::write( 14 | dest_path, 15 | format!(" 16 | const BEX_VERSION : &str = {bex_version:?}; 17 | const BEX_OPT_LEVEL : &str = {opt_level:?}; 18 | ") 19 | ).unwrap(); 20 | println!("cargo:rerun-if-changed=build.rs"); 21 | } 22 | -------------------------------------------------------------------------------- /doc/factorbench.org: -------------------------------------------------------------------------------- 1 | #+title: find_factors! benchmark for bex 2 | 3 | * Overview 4 | 5 | =find_factors= is a function in [[../src/solve.rs][solve.rs]] that defines a generic benchmark, in which we try to factor a 6 | primorial into an ordered pair of factors with certain size constraints. 7 | 8 | A primorial is the product of the first n primes: 2, 6=(2*3), 30=(6*5), 210=(30*7), etc. 9 | 10 | For a given primorial /p/, we want to find two numbers, /x/ and /y/, such that /x/ < /y/. 11 | Additionally, both /x/ and /y/ must be expressible in a fixed number of bits. 12 | 13 | For example, the number 30 (2*3*5) can be factored in the following ways: 14 | 15 | | x | y | 16 | |---+----| 17 | | 1 | 30 | 18 | | 2 | 15 | 19 | | 3 | 10 | 20 | | 5 | 6 | 21 | 22 | If we further specify that x and y can only be 3-bit values, then there is only one valid answer: 23 | 24 | | x | y | 25 | |---+----| 26 | | 5 | 6 | 27 | 28 | 29 | There are several instances of this test in [[../examples/solve/bdd-solve.rs][bdd-solve.rs]], ranging from =test_nano_bdd=, which tries to find all 2-bit factorings of the number 6 (one answer: 2 * 3 = 6), to the =main= test, which attempts to factor the 64-bit product of the first 15 primes into two 32-bit numbers. 30 | 31 | Currently, this =main= test is far beyond the reach of the algorithm. It only serves as a long running process that can be used to generate profile information. 32 | 33 | * Method 34 | 35 | The way it works is to create some =xints=, which are objects that support the same operations as n-bit integers, but the bits are NIDs -- that is, references to nodes in a particular =Base=. If you populate these =xints= with constant nids, they perform the same arithmetic calculations that integers would, just much slower. However, if you populate them with nids corresponding to input variables, the each NID will point to whatever function over the input bits produces that output bit. 36 | (The function is a (usually quite large) boolean expression, represented by a graph of nodes inside the base.) 37 | 38 | So, in general, we create two =xint= values (x and y) consisting of nothing but input bits, assert that x < y, multiply them together, and compare the answer to the bits of the desired product. The result is a function that takes a certain number of input bits, conceptually grouped into two integer "registers", and returns =true= only when the product of the two registers are the number we're looking for. 39 | 40 | The next step is to simplify the expression and attempt to generate the answers. Currently, this is done by translating the =Base= we got as the result of our calculation into a =BDDBase=. 41 | 42 | 43 | 44 | * Impact of ast-nids, =repack=, =sort_by_cost= 45 | 46 | ** Metrics 47 | 48 | | branch | version | test | nodes | steps | note | 49 | |----------+---------+----------+-------+-------+-------------------------------| 50 | | master | c7b56b7 | tiny-bdd | 120 | 111 | no nids, but repack | 51 | | ast-nids | 434c4f2 | nano-bdd | 26 | 27 | nids for var,i,o; no repack | 52 | | ast-nids | 434c4f2 | tiny-bdd | 118 | 5170 | | 53 | | ast-nids | cf904a4 | nano-bdd | 18 | 22 | INV bit rather than Op::not() | 54 | | ast-nids | cf904a4 | tiny-bdd | 102 | 5188 | | 55 | | ast-nids | 2c72a32 | nano-bdd | 17 | 17 | restore repack / sort-by-cost | 56 | | ast-nids | 2c72a32 | tiny-bdd | 101 | 101 | | 57 | 58 | ** Nids for ASTBase 59 | As I write this, I just finished converting ASTBase to use NIDs instead of simple array indices. 60 | In the process I've temporarily disabled a couple of transformations meant to optimize the ASTBase before converting it to a BDD. 61 | 62 | On the main branch (with indices instead of nids, but using the transformations), =test_tiny_bdd= results in 120 bits and it actually takes 111 steps to compute the answer. (There were 2 constant bits in the structure, plus 8 input bits, so it makes sense that the number of steps should be almost equal to bits.len() - 10). 63 | 64 | On the ast-nids branch, =test_tiny_bdd= results in 118 bits, and it actually takes a whopping 5170 steps to compute the answer. The 118 number contains no variables, but there was no optimization/garbage collection because I didn't call repack(). 65 | 66 | With commit ~cf904a4~, =ASTBase= no longer stores =Op::Not= (using the =INV= bit on the NID instead). This replaces a bunch of explicit "NOT" nodes in the graph with a property on the edge. As a result, =tiny= shrinks from 118 nodes to 102 but its steps increase from 5170 to 5188. Meanwhile =nano= shrinks from 26 nodes to 18, and 27 steps to 22. 67 | 68 | ** =repack= and =sort_by_cost= 69 | 70 | *** aside what these functions accomplish 71 | 72 | Without these two functions, there are uusually more steps than nodes. 73 | This is because ASTBase attempts to reuse nodes that correspond to the same expression (but only in simple cases, as this is a hard problem). 74 | 75 | With the nano case, there's not much going on. Assuming you do the < before the *, you get this structure: 76 | 77 | #+begin_src text 78 | lt: [##5] 79 | eq: [##16] 80 | top: [##17] 81 | 0 Xor(v0, v2) 82 | 1 And(¬v0, v2) 83 | 2 Xor(v1, v3) 84 | 3 And(¬v1, v3) 85 | 4 And(#1, ¬#2) 86 | 5 Or(#4, #3) 87 | 6 And(v0, v2) 88 | 7 And(v1, v2) 89 | 8 And(v0, v3) 90 | 9 And(v1, v3) 91 | 10 Xor(#8, #7) 92 | 11 And(#8, #7) 93 | 12 Xor(#11, #9) 94 | 13 And(#11, #9) 95 | 14 And(¬#6, #10) 96 | 15 And(#14, #12) 97 | 16 And(¬#13, #15) 98 | 17 And(#16, #5) 99 | #+end_src 100 | 101 | The final "top" bit uses 17 of the 18 generated nodes (there's no reference to #0). 102 | 103 | Here's the trace when we do the substitutions: 104 | 105 | #+begin_src text 106 | step, seconds, change, newtop 107 | 0, 0, DstNid { n: v17 }→@[v5:0], DstNid { n: @[v5:0] } 108 | 1, 0, DstNid { n: @[v5:0] }→@[v3:1], DstNid { n: @[v3:1] } 109 | 2, 0, DstNid { n: @[v3:1] }→@[v4:1], DstNid { n: @[v4:1] } 110 | 3, 0, DstNid { n: @[v4:1] }→@[v1:2], DstNid { n: @[v1:2] } 111 | 4, 0, DstNid { n: @[v1:2] }→@[v2:2], DstNid { n: @[v2:2] } 112 | 5, 0, DstNid { n: @[v2:2] }→@[v16:2], DstNid { n: @[v16:2] } 113 | 6, 0, DstNid { n: @[v16:2] }→@[v13:1], DstNid { n: @[v13:1] } 114 | 7, 0, DstNid { n: @[v13:1] }→@[v9:1], DstNid { n: @[v9:1] } 115 | 8, 0, DstNid { n: @[v9:1] }→@[v11:1], DstNid { n: @[v11:1] } 116 | 9, 0, DstNid { n: @[v11:1] }→@[v7:1], DstNid { n: @[v7:1] } 117 | 10, 0, DstNid { n: @[v7:1] }→@[v8:0], DstNid { n: @[v8:0] } 118 | 11, 0, DstNid { n: @[v8:0] }→@[v15:0], DstNid { n: @[v15:0] } 119 | 12, 0, DstNid { n: @[v15:0] }→@[v12:1], DstNid { n: @[v12:1] } 120 | 13, 0, DstNid { n: @[v12:1] }→@[v9:3], DstNid { n: @[v9:3] } 121 | 14, 0, DstNid { n: @[v9:3] }→@[v11:4], DstNid { n: @[v11:4] } 122 | 15, 0, DstNid { n: @[v11:4] }→@[v7:2], DstNid { n: @[v7:2] } 123 | 16, 0, DstNid { n: @[v7:2] }→@[v8:2], DstNid { n: @[v8:2] } 124 | 17, 0, DstNid { n: @[v8:2] }→@[v14:2], DstNid { n: @[v14:2] } 125 | 18, 0, DstNid { n: @[v14:2] }→@[v6:1], DstNid { n: @[v6:1] } 126 | 19, 0, DstNid { n: @[v6:1] }→@[v10:0], DstNid { n: @[v10:0] } 127 | 20, 0, DstNid { n: @[v10:0] }→@[v7:4], DstNid { n: @[v7:4] } 128 | 21, 0, DstNid { n: @[v7:4] }→@[v8:3], DstNid { n: @[v8:3] } 129 | 22, 0, DstNid { n: @[v8:3] }→@[x0:5], DstNid { n: @[x0:5] } 130 | #+end_src 131 | 132 | This output is a little clunky to look at, but the thing to see is that the leftmost nid is the "top" of the BDD, and it always branches on some virtual variable that corresponds to a node in the AST. Usually it decreases from line to line, but sometimes it goes up. 133 | 134 | We can trace the top for the 22 lines like so: 135 | 136 | : step: 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 18 19 20 21 22 137 | : top: 17 5 3 4 1 2 16 13 9 11 7 8 15 12 9 11 7 8 14 6 10 7 8 138 | 139 | The issue is that in the BDD, the topmost node is always the lowest numbered input variable. 140 | But while we are in the process of solving, we have two kinds of "input variables" -- true variables which appear as v0, v1, etc in the AST but x0, x1, etc in the BDD, and virtual variables, which appear as normal nodes indexed nodes in the AST (#0, #1, etc) and v0, v1 etc in the BDD. 141 | 142 | A virtual variable is always above a real variable in the BDD: the point of solving is to remove all the virtual variables. The algorithm is: 143 | 144 | : while topmost(bdd) is virtual: 145 | : replace topmost node with its definition 146 | 147 | The problem is that the the topmost virtual variable always refers back to two lesser numbers, the lesser of of which winds up at the top of the BDD. We're basically just doing a depth-first walk of the AST, but we revisit the same shared nodes over and over. 148 | 149 | What we can do instead is renumber the nodes, so that the top node in the AST becomes v0 rather than (in this case) v17. 150 | Now, every node in the AST will refer to two nodes with *higher* numbers, and when we move that definition over to the BDD, the lower of those two high numbers will be at the top. So now we have a guarantee that at each step, the virtual variable at the top will be replaced either by a virtual variable with a higher number, *or* a non-virtual variable (in which case we're done). This will make the number of substitution steps equal to the number of internal nodes in the AST. 151 | 152 | *** results of re-adding these functions 153 | 154 | With repack, we garbage collect the AST. The find-factors thing uses all but one of the generated bits, so it's not that impressive: 18->17 for nano, 102->101 for tiny. 155 | 156 | With =sort_by_cost= we guarantee that #steps = #nodes, so 22->17 for nano, 5188->101 for tiny. 157 | 158 | A huge improvement! 159 | 160 | -------------------------------------------------------------------------------- /doc/howto.org: -------------------------------------------------------------------------------- 1 | #+title: howto: common (development) tasks for bex 2 | 3 | * Group git commits without squashing. :git: 4 | 5 | Make changes, then... 6 | 7 | : git fetch 8 | : git rebase 9 | : git branch -d merge # if it exists 10 | : git branch merge # create new branch to hold recent commits 11 | : git reset --hard origin/master # move the master branche's HEAD back 12 | : git merge --no-ff merge -m # force a merge commit 13 | 14 | * Enable/disable output for =trace!= 15 | This is controlled by the log crate: https://docs.rs/log/0.4.8/log/ 16 | For now, I'm configuring it globally in Cargo.toml 17 | 18 | #+begin_src toml 19 | [dependencies] 20 | log = { version = "0.4", features = ["max_level_debug"]} 21 | #+end_src 22 | 23 | 24 | * Make a release version 25 | - update version in Cargo.toml 26 | : cargo publish --dry-run 27 | -------------------------------------------------------------------------------- /doc/ideas.org: -------------------------------------------------------------------------------- 1 | - flip anf/bdd/etc upside down (so smallest is at bottom) 2 | - replace the "real" bit with a "virtual" bit 3 | - parse env/command line to determine if we show trees in tests 4 | -------------------------------------------------------------------------------- /doc/outdated/bex-trail.org: -------------------------------------------------------------------------------- 1 | #+title: bex trail (a short history of the development) 2 | 3 | * the 'base' concept 4 | :PROPERTIES: 5 | :TS: <2019-09-10 10:09AM> 6 | :ID: pwkcqhy0ngi0 7 | :END: 8 | 9 | In bex, =Base= is short for "database." In particular, a database representing bits. 10 | 11 | The intent is that at some point, =Base= will be a trait with multiple conforming implementations. 12 | 13 | A base represents a set of boolean functions over a set of input bits, called variables. 14 | 15 | The functions are represented as directed graphs of nodes, but the interpretation of the graph differs between the implementations. 16 | 17 | Intermediate nodes (any function of the input varibles) are always referenced by numeric identifiers called a NIDs. =NID= is short for "node ID". 18 | 19 | Variables are referred to by a numeric identifiers called VIDs. =VID= is short for "variable id". Although conceptually variables are just leaf nodes in our graphs, we use a separate type for them, both because they have separate distinguishing features than other nodes, and also because these nodes are so simple that they don't actually need to be stored anywhere -- the VID itself contains everything you need to know about them. 20 | 21 | We do make a distinction between "real" (input) variables, and "virtual" (intermediate) variables. Virtual variables are just placeholders that represent arbitrary sub-expressions in the graph. (For example, while working with a function ~f(g(x0))~, we might set ~v0=g(x0)~, and simply say ~f(v0)~. In that case, ~v0~ is a virtual variable, while ~x0~ is a "real" variable. 22 | 23 | Since each variable is also a node, each =VID= has a corresponding encoding as a =NID=, and each =Base= provides a method called =var(x:VID)->NID= to provide the mapping. 24 | 25 | Each base also has two special nodes named =O= and =I=, which represent the constant functions =0= ("always false") and =1= ("always true"), respectively. The =Base= methods =o()= and =i()= return these nodes. 26 | 27 | =Base= provides a core set of functions for constructing, retrieving and combining nodes. In general, some attempt is made to normalize these constructions, so that (/ideally/) functions with the same truth table have the same NID. However, this is an NP-complete problem, so each implementation provides a different set of tradeoffs here. For example, as we'll soon see, the "AST" implementation provides extremely fast constructions, but can only normalize very simple equalities, such as (P∧Q = Q∧P). The BDD implementation, on the other hand, guarantees a canonical representation for each boolean function (up to permutation of the input variables), but often takes quite a bit longer to construct the nodes. 28 | 29 | 30 | * =x32= and the =!xint_type= macro 31 | :PROPERTIES: 32 | :TS: <2019-09-10 10:09AM> 33 | :ID: cj51xgy0ngi0 34 | :END: 35 | 36 | Bex also has the ability to create arrays of NIDs tied to a particular base that look and behave like rust's native integer types. 37 | 38 | If the members of these arrays are set to =O= and =I=, then they behave just like really slow bits in an int register. 39 | 40 | But: if they're set to arbitrary NIDs, then they can be used to build complex expressions. 41 | 42 | 43 | 44 | 45 | * TODO note on permutations 46 | 47 | #+begin_src j 48 | 49 | NB. what happens to the truth table of ($0 xor $1 and ($2 xor $3) 50 | NB. for each permutation of the input variables. 51 | _ * ,/(0&{ ~: 1&{ *. 2&{ ~: 3&{)"_1 (i.24) A. ,:"1 |: #: i.16 52 | 53 | 0 0 0 0 0 _ _ 0 _ _ _ _ _ 0 0 _ 54 | 0 0 0 0 0 _ _ 0 _ _ _ _ _ 0 0 _ 55 | 0 0 0 _ 0 0 _ 0 _ _ _ 0 _ _ 0 _ 56 | 0 0 0 _ 0 0 _ 0 _ _ _ 0 _ _ 0 _ 57 | 0 0 0 _ 0 _ 0 0 _ _ _ 0 _ 0 _ _ 58 | 0 0 0 _ 0 _ 0 0 _ _ _ 0 _ 0 _ _ 59 | 0 0 0 0 _ _ _ _ 0 _ _ 0 _ 0 0 _ 60 | 0 0 0 0 _ _ _ _ 0 _ _ 0 _ 0 0 _ 61 | 0 0 0 _ _ _ _ 0 0 0 _ 0 _ _ 0 _ 62 | 0 0 0 _ _ _ _ 0 0 0 _ 0 _ _ 0 _ 63 | 0 0 0 _ _ _ _ 0 0 _ 0 0 _ 0 _ _ 64 | 0 0 0 _ _ _ _ 0 0 _ 0 0 _ 0 _ _ 65 | 0 0 _ _ 0 0 _ _ 0 _ _ 0 _ 0 0 _ 66 | 0 0 _ _ 0 0 _ _ 0 _ _ 0 _ 0 0 _ 67 | 0 0 _ _ 0 _ _ 0 0 0 _ _ _ 0 0 _ 68 | 0 0 _ _ 0 _ _ 0 0 0 _ _ _ 0 0 _ 69 | 0 0 _ _ 0 _ _ 0 0 _ _ 0 0 0 _ _ 70 | 0 0 _ _ 0 _ _ 0 0 _ _ 0 0 0 _ _ 71 | 0 _ 0 _ 0 _ 0 _ 0 _ _ 0 _ 0 0 _ 72 | 0 _ 0 _ 0 _ 0 _ 0 _ _ 0 _ 0 0 _ 73 | 0 _ 0 _ 0 _ _ 0 0 _ 0 _ _ 0 0 _ 74 | 0 _ 0 _ 0 _ _ 0 0 _ 0 _ _ 0 0 _ 75 | 0 _ 0 _ 0 _ _ 0 0 _ _ 0 0 _ 0 _ 76 | 0 _ 0 _ 0 _ _ 0 0 _ _ 0 0 _ 0 _ 77 | 78 | #+end_src 79 | -------------------------------------------------------------------------------- /doc/outline.org: -------------------------------------------------------------------------------- 1 | #+title: bex outline 2 | 3 | * bex is a toolkit for manipulating boolean expressions. 4 | - that is, statements in propositional logic 5 | 6 | * there is a tool for converting high-level rust functions to boolean expressions 7 | - write code that appears to operate on integers 8 | - pass in arrays of input bits 9 | - bex records numeric operations (addition, etc) as raw logical operations 10 | 11 | * the "atoms" of the system are NIDs 12 | - like an enum but a bitmapped u64 in practice 13 | - can act like a reference to a node in a database 14 | - expressions of fewer than 5 inputs can be represented directly in the nid 15 | 16 | * there are multiple representations, each optimized for different use cases 17 | - abstract syntax tree (AST) -- general-purpose representation of expressions in logical language. 18 | - various forms of "var-hi-lo" graphs (VHL): 19 | - binary decision diagrams (BDD) -- compressed truth tables 20 | - ITE graph using NIDs (node references contain the variable for faster(??) processing) 21 | - "Scaffold" representation using XIDs (decoupled from the variable for easy swapping) 22 | - agebraic normal form (ANF) -- XOR-sum of AND-products / mod-2 polynomial 23 | - probably others in the future (zdd, biconditional bdd(??)) 24 | - uncompressed truth tables as arrays of raw bits ("registers") 25 | 26 | * there are multiple strategies ("solvers") for converting AST representations into the other forms. 27 | - primitive VHLs can be combined to produce more complicated VHLs. 28 | - an AST can be solved "backwards" by substitution 29 | - subsolver starts with a simple bdd representing the output and recursively substituties each sub-expression with a BDD of its direct inputs 30 | - swapsolver is a type of subsolver that first sorts BDD to make the substitutions much more efficient 31 | - (planned) simplification solvers repeatedly simplifying the AST (by setting an input variable to 1 or 0) while trying to avoid exponential memory usage. 32 | - "worldsolver" attempts to build a VHL while replacing only the bottom-most layers of the graph 33 | - "clocksolver" avoids work by observing which inputs to each intermediate AST node actually affect the output in "brute force time" 34 | 35 | * bex is also an ongoing exercise in optimization 36 | - the problem space is inherently exponential 37 | - there's a lot of work done in parallel. 38 | - general-purpose "swarm" framework for dividing work 39 | - currently divides work between cpu cores 40 | - (eventually) share with remote machines across network 41 | - "work-in-progress" concept that allows short-circuiting 42 | -------------------------------------------------------------------------------- /doc/papers.org: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | our conversion from cnf to anf: 5 | http://ceur-ws.org/Vol-1974/RP1.pdf 6 | 7 | talks about grobner bases 8 | 9 | introductory grobner bases: 10 | http://www.math.utah.edu/~preszler/research/grobner.pdf 11 | 12 | 13 | cube testers and key recovery attacks on reduced-round md6 and trivium 14 | https://link.springer.com/content/pdf/10.1007%2F978-3-642-03317-9_1.pdf 15 | 16 | -------------------------------------------------------------------------------- /doc/recipe.org: -------------------------------------------------------------------------------- 1 | #+title: Refactoring: Introducing a Type (to untyped code) 2 | 3 | Changing a widely-used type in an established codebase can be daunting, simply because so many lines need to change. I've had this problem a couple times in my rust project, and this is a recipe I've come up with to help. 4 | 5 | The project in question is called [bex](https://github.com/tangentstorm/bex), a crate for working with giant boolean expressions in various forms, such as Binary Decision Diagrams, abstract syntax trees, or algebraic normal form. These are all basically graph structures, and all share the same interface through a trait called Base. 6 | 7 | Previously, Base was a generic trait with two member types: one for node identifiers (NIDs), and one for variable identifiers (VIDs - as in input variables to the boolean expressions: var(0) XOR var(3), etc.)... But recently I decided to have all the structures share the same scheme for NIDs and VIDs. 8 | 9 | So... Previously these IDs were both simple integers with magic bitmasks. Now NID is a simple struct, and VID is (becoming) an enum. 10 | 11 | Figuring out how to thread these changes 12 | -------------------------------------------------------------------------------- /doc/scrap/interchange.org: -------------------------------------------------------------------------------- 1 | 2 | You ought to be able to convert between different representations of a function. 3 | 4 | For example, you might want to convert an AST or ANF to a BDD, 5 | so it's easy to iterate through the solutions. (This already works for AST, and now I want it for ANF). 6 | 7 | The basic idea is to walk the source structure, yielding nodes in whatever logical structure it uses natively (individual operations for an AST, AB+C for ANF, ITE for BDD, etc), and then let the target encode it whatever way makes the most sense. 8 | 9 | Also, we want to preserve the ability to work in either direction (translating concrete source nodes to their exact representation in the destination, or skipping to the end and working backwards, hoping that tighter constraints at the end of the process allow many of the source nodes to be ignored. 10 | 11 | Okay, so at the very least, I want a =to_base= function in the =Base= trait, that copies a particular function over to a destination =Base= and returns the new node. 12 | 13 | 14 | -------------------------------------------------------------------------------- /doc/scrap/primes.org: -------------------------------------------------------------------------------- 1 | #+title: bex example: primes 2 | 3 | * bdd-like structure for "lowest factor" 4 | 5 | Here's how the twigs (the branches just above the bottom) look for the first 256 odd integers: 6 | 7 | : 8 16 $ _2 < \ (({.@q: :: ])"0 + _ * 1&p:) s=. >: +:i.n=.256 8 | : ┌────┬────┬───┬────┬────┬────┬────┬────┬────┬───┬────┬────┬───┬────┬────┬────┐ 9 | : │0 _ │_ _ │3 _│_ 3 │_ _ │3 _ │5 3 │_ _ │3 5 │_ 3│_ _ │3 _ │7 3│_ 5 │3 _ │_ 3 │ 10 | : ├────┼────┼───┼────┼────┼────┼────┼────┼────┼───┼────┼────┼───┼────┼────┼────┤ 11 | : │5 _ │3 _ │_ 3│7 _ │3 _ │5 3 │_ 7 │3 5 │_ 3 │_ _│3 _ │_ 3 │_ 5│3 7 │11 3│5 _ │ 12 | : ├────┼────┼───┼────┼────┼────┼────┼────┼────┼───┼────┼────┼───┼────┼────┼────┤ 13 | : │3 _ │7 3 │_ _│3 11│5 3 │_ _ │3 5 │_ 3 │7 _ │3 _│13 3│_ 5 │3 _│_ 3 │5 11│3 _ │ 14 | : ├────┼────┼───┼────┼────┼────┼────┼────┼────┼───┼────┼────┼───┼────┼────┼────┤ 15 | : │_ 3 │_ _ │3 7│5 3 │11 _│3 5 │7 3 │13 _│3 _ │_ 3│_ 5 │3 _ │_ 3│5 13│3 _ │11 3│ 16 | : ├────┼────┼───┼────┼────┼────┼────┼────┼────┼───┼────┼────┼───┼────┼────┼────┤ 17 | : │_ 7 │3 _ │5 3│_ _ │3 5 │_ 3 │_ _ │3 7 │17 3│_ 5│3 13│7 3 │5 _│3 _ │_ 3 │_ 11│ 18 | : ├────┼────┼───┼────┼────┼────┼────┼────┼────┼───┼────┼────┼───┼────┼────┼────┤ 19 | : │3 17│5 3 │7 _│3 5 │_ 3 │11 7│3 _ │_ 3 │_ 5 │3 _│19 3│5 _ │3 7│_ 3 │13 _│3 _ │ 20 | : ├────┼────┼───┼────┼────┼────┼────┼────┼────┼───┼────┼────┼───┼────┼────┼────┤ 21 | : │5 3 │_ 17│3 5│_ 3 │_ 13│3 11│_ 3 │7 5 │3 _ │_ 3│5 7 │3 _ │_ 3│19 _│3 _ │5 3 │ 22 | : ├────┼────┼───┼────┼────┼────┼────┼────┼────┼───┼────┼────┼───┼────┼────┼────┤ 23 | : │_ 11│3 5 │_ 3│_ _ │3 _ │7 3 │11 5│3 _ │13 3│5 _│3 _ │17 3│7 _│3 _ │5 3 │_ 7 │ 24 | : └────┴────┴───┴────┴────┴────┴────┴────┴────┴───┴────┴────┴───┴────┴────┴────┘ 25 | 26 | 27 | an underscore_ indicates the number is a prime to the first power. 28 | the zero in the upper left is just an artifact of how (q: 1) works in j 29 | (it returns an empty list) 30 | 31 | of course there are 128 such pairs, but only 31 unique ones: 32 | 33 | : # ~. , 8 16 $ _2 < \ (({.@q: :: ])"0 + _ * 1&p:) s=. >: +:i.n=.256 34 | : 31 35 | 36 | if we allow sorting the pairs, that gets us down to 19 unique twigs. 37 | 38 | : # ~. , /:~ each 8 16 $ _2 < \ (({.@q: :: ])"0 + _ * 1&p:) s=. >: +:i.n=.256 39 | : 19 40 | 41 | I get an out of memory error if i try to build the list for 2^32 but for 2^16 it's only 832 unique nodes: 42 | 43 | : # ~. , /:~ each _2 < \ (({.@q: :: ])"0 + _ * 1&p:) s=. >: +:i.n=.2^16 44 | : 832 45 | 46 | pumping it up to 2^24 takes a couple minutes to run: 47 | 48 | : # ~. , /:~ each _2 < \ (({.@q: :: ])"0 + _ * 1&p:) s=. >: +:i.n=.2^24 49 | : 56153 50 | 51 | 52 | * branching factors 53 | A binary tree corresponds to a binary representation of the number. if we looked at base 10, we'd need 10 branches for each node. 54 | 55 | I think if you wanted to incorporate this into a routine to factor big numbers, you'd probably want to start with base 2 to filter out multiples of two (the idea is just to find the lowest factor and then divide by that, recursively). 56 | 57 | It seems like you could start by looking at a really big anti-prime "P", and then taking the result modulo that number. This gives you the "last digit" in base P, and from that you can just look at it to filter out most of the small factors. 58 | 59 | So for example, 10 isn't an "anti prime" (2 * 3 * 5 = 30 would be) but it gives the idea: you konw immediately if a number is divisible by 10, 5, or 2 just by looking at the last digit. (if last digit is in {0,2,4,6,8}, it's divisible by 2, and if the last digit is in {0,5} it's divisible by 5. (obviously, 0 indicates it's divisible by both 2 and 5). So then you only have to really consider numbers ending with {1,3,7,9}. 60 | 61 | Well, so what? I guess my thought is that if you did the same thing with anti-prime P, you'd know any time the last digit was some multiple of the factors of P what the smallest factor was. 62 | 63 | It seems like these would cover a lot of ground... 64 | 65 | : */p:i.5 66 | : 2310 67 | 68 | So you could just have a lookup table that does all this. 69 | 70 | -------------------------------------------------------------------------------- /doc/scrap/three.org: -------------------------------------------------------------------------------- 1 | * bdd for "is divisible by 2" is just one node 2 | 3 | * bdd for "is divisible by 3" is trickier 4 | 5 | You are looking at window of 4 numbers, and asking whether they're divisible by 3. 6 | It's a simple repeating pattern: 7 | 8 | : (; 0 = 3 | ])s $ i.*/s=.10 4 9 | : ┌───────────┬───────┐ 10 | : │ 0 1 2 3│1 0 0 1│ 11 | : │ 4 5 6 7│0 0 1 0│ 12 | : │ 8 9 10 11│0 1 0 0│ 13 | : │12 13 14 15│1 0 0 1│ 14 | : │16 17 18 19│0 0 1 0│ 15 | : │20 21 22 23│0 1 0 0│ 16 | : │24 25 26 27│1 0 0 1│ 17 | : │28 29 30 31│0 0 1 0│ 18 | : │32 33 34 35│0 1 0 0│ 19 | : │36 37 38 39│1 0 0 1│ 20 | : └───────────┴───────┘ 21 | 22 | It would be nice if there were some way to say "this pattern repeats". 23 | 24 | Unfortunately, even though it repeats, there will never be a case where one bit branches between two copies of the same pattern. 25 | 26 | But what does it look like? 27 | 28 | #+begin_src j 29 | (; 0 = 3 | ]) s $ i.*/s=.8 4 30 | ┌───────────┬───────┐ 31 | │ 0 1 2 3│1 0 0 1│ 32 | │ 4 5 6 7│0 0 1 0│ 33 | │ 8 9 10 11│0 1 0 0│ 34 | │12 13 14 15│1 0 0 1│ 35 | │16 17 18 19│0 0 1 0│ 36 | │20 21 22 23│0 1 0 0│ 37 | │24 25 26 27│1 0 0 1│ 38 | │28 29 30 31│0 0 1 0│ 39 | └───────────┴───────┘ 40 | #+end_src 41 | 42 | So here's the part that repeats: 43 | 44 | : 1 0 0 1 | 0 0 1 0 | 0 1 0 0 45 | 46 | And here's how the whole truth table looks for 5 input bits (32=8*4 possible input combinations) 47 | 48 | : 0 1 2 3 | 4 5 6 7 8 9 10 11 12131415 16171819 20212223 24252627 28293031 49 | : 1 0 0 1 | 0 0 1 0 | 0 1 0 0 | 1 0 0 1 | 0 0 1 0 | 0 1 0 0 | 1 0 0 1 | 0 0 1 0 50 | 51 | so let's define the twigs: 52 | 53 | : o: 0 54 | : i: 1 55 | : 1: oi 56 | : 2: io 57 | 58 | and rewrite: 59 | 60 | : i o o i | o o i o | o i o o | i o o i | o o i o | o i o o | i o o i | o o i o 61 | : 2 1 | o 2 | 1 o | 2 1 | o 2 | 1 o | 2 1 | o 2 62 | 63 | give names to the repeating things: 64 | 65 | : 3: 2 1 66 | : 4: o 2 67 | : 5: 1 o 68 | 69 | and now it's clear we have: 70 | 71 | : 2 1 o 2 | 1 o 2 1 | o 2 1 o | 2 1 o 2 72 | : 3 4 | 5 3 | 4 5 | 3 4 73 | 74 | 75 | So... It looks like it's actually pretty easy to generate and extend this sequence. 76 | 77 | At each level above the final leaves at the bottom, we have three types of node, and we just rotate between them to construct the next level up: 78 | 79 | : (o 1 2) 80 | : (3 4 5) 81 | 82 | Now we need new nodes: 83 | 84 | : 6: 3 4 85 | : 7: 5 3 86 | : 8: 4 5 87 | 88 | and then the pattern seems to repeat: 89 | 90 | : 9: 6 7 91 | : 10: 8 6 92 | : 11: 7 8 93 | 94 | : 12: 9 10 95 | : 13: 11 9 96 | : 14: 10 11 97 | 98 | So it seems like when we add a new layer to the bdd, we only wind up adding a few nodes. (three repeating pattern nodes, and then three nodes at the top to choose between the four patterns) 99 | 100 | 101 | 102 | * what about divisible by 5 and 7? 103 | 104 | Let's look at these truth tables: 105 | 106 | : (; (0 = 5 | ]); (0 = 7 | ])) s $ i.*/s=.8 8 107 | : ┌───────────────────────┬───────────────┬───────────────┐ 108 | : │ 0 1 2 3 4 5 6 7│1 0 0 0 0 1 0 0│1 0 0 0 0 0 0 1│ 109 | : │ 8 9 10 11 12 13 14 15│0 0 1 0 0 0 0 1│0 0 0 0 0 0 1 0│ 110 | : │16 17 18 19 20 21 22 23│0 0 0 0 1 0 0 0│0 0 0 0 0 1 0 0│ 111 | : │24 25 26 27 28 29 30 31│0 1 0 0 0 0 1 0│0 0 0 0 1 0 0 0│ 112 | : │32 33 34 35 36 37 38 39│0 0 0 1 0 0 0 0│0 0 0 1 0 0 0 0│ 113 | : │40 41 42 43 44 45 46 47│1 0 0 0 0 1 0 0│0 0 1 0 0 0 0 0│ 114 | : │48 49 50 51 52 53 54 55│0 0 1 0 0 0 0 1│0 1 0 0 0 0 0 0│ 115 | : │56 57 58 59 60 61 62 63│0 0 0 0 1 0 0 0│1 0 0 0 0 0 0 1│ 116 | : └───────────────────────┴───────────────┴───────────────┘ 117 | 118 | We need to expand our scope to 8 outputs at a time for our view to be wide enough to see the whole repeating pattern. Since we're talking about compressing binary treees, we're always going to be looking at windows whose lengths are some powers of two, and cyclic patterns with periods smaller than that window. 119 | 120 | ** multiples of 5 121 | 122 | So for multiples of 5, let's look at the pattern. (we'll reuse the nodes from earlier): 123 | 124 | The repeating pattern is really this: 125 | 126 | : i o o o o , i o o o o , i o o o o , i o o o o , ... 127 | 128 | But we chop it like this: 129 | 130 | : | i o o o : o i o o | o o i o : o o o i | o o o o ... 131 | 132 | which becomes: 133 | 134 | : 2 o | 1 o | o 2 | o 1 | o o ... (and then it repeats) 135 | 136 | ** multiples of 7 137 | 138 | -------------------------------------------------------------------------------- /doc/spec.org: -------------------------------------------------------------------------------- 1 | #+title: towards a formal spec for bex 2 | 3 | * truth tables: 4 | TT(n) means a mapping of [0..2^n-1 |-> {0,1}] 5 | O = λ.0 (the mapping that always returns 0) 6 | I = λ.1 7 | TT(0) ∈ {O,I} 8 | TT(n) where n>0 is an ordered pair of two TT(n-1) 9 | - corresponds to choice between two TT(n-1) based on Var(n-1) 10 | - or simply an ordered pair of two things that map to TT(n-1) 11 | 12 | * operations on truth tables 13 | The boolean operations on TTs are simple and well defined. 14 | 15 | The problem is they're huge. 16 | 17 | Therefore we tend to use boolean expressions or formulas instead. 18 | 19 | In general, it can take a long time to decide whether two expressions are the same. 20 | - Not hard at all. Just very slow! 21 | - In particular we care: does it == O? (SAT) 22 | - Explain how this relates to P vs NP. 23 | 24 | Card(TT(n)) = 2^(2^n), so you need 2^n bits to distinguish between them. 25 | Since that's how many bits are in the truth table, they're self-enumerating. 26 | 27 | * coding schemes 28 | Usually we only care about a few of the possible truth tables, so we can create a coding scheme. 29 | 30 | The dyadic/triadic operations (if/then/else) can be decomposed and reassembled later. 31 | 32 | Now we can decompose operations on TT(n) into two operations on code names. 33 | 34 | This coding scheme allows us to efficiently memoize the sub-transactions. 35 | 36 | * distributed work 37 | Thee next step is to distribute the work across multiple workers. 38 | 39 | * current solution in bex (bddswarm) 40 | 41 | Currently, the main thread stores its cache in two dictionaries: 42 | 43 | - =stable= contains all "collected wisdom" from previous runs, is shared (immutably) by other threads. 44 | - =recent= contains only new information collected since =stable= was last updated. 45 | 46 | The main function at the moment is ~run_swarm~. 47 | It re-initializes the swarm. 48 | It clears out the channels, copies =recent= to =stable=, and 49 | 50 | 51 | 52 | * in the future: 53 | The problem is: 54 | - each worker is going to generate new names for things in its 'recent' dictionary, 55 | - *AND* we want names to /uniquely/ identify truth tables 56 | - So we need some way for the workers to "compare notes" 57 | 58 | The goal, then, is to have workers periodically merge their states. 59 | -------------------------------------------------------------------------------- /doc/var-orders.org: -------------------------------------------------------------------------------- 1 | #+title: variable orders 2 | 3 | Traditionally, ROBDDs order the variables so that the lowest numbered variable is at the top. This makes sense: to navigate the structure, you consider the input bits "in order", so the "first" bit goes at the top. 4 | 5 | But, internally, it makes sense to number the nodes in the opposite direction. 6 | 7 | Imagine a number line in binary. You want input bit n to map to the bit in the 2^n place of the binary number. 8 | 9 | : | 111111 10 | : | 0123456789012345 ... 11 | : ----+--------------------- 12 | : 2^3 | ░░░░░░░░▓▓▓▓▓▓▓▓ ... 13 | : 2^2 | ░░░░▓▓▓▓░░░░▓▓▓▓ ... 14 | : 2^1 | ░░▓▓░░▓▓░░▓▓░░▓▓ ... 15 | : 2^0 | ░▓░▓░▓░▓░▓░▓░▓░▓ ... 16 | 17 | The advantage for a BDD is that if you see the same node in two different functions, it always corresponds to the same truth table. 18 | 19 | For example, in a "bottom up" scheme like this, the normal node "nv(0)" would always correspond to a truth table of length 2, with a 0 on the left and a 1 on the right. 20 | 21 | But in the traditional "top down" scheme, nv(0) represents a truth table of length 2^n where n is however many input variables there are. 22 | 23 | Further, if you are program looking at a node that branches on variable n in a top-down scheme, you have no idea how far down the structure goes, nor can you compare the maximum possible depths of the two branches. 24 | 25 | In a bottom-up scheme, a node that branches on variable n has a truth table of size 2^n, and a depth of at most (n). You still can't tell for sure which of the branches has the bigger sub-graph, but you can compare the two branch variables to compare the maximum possible depth. 26 | 27 | Also, if you take the view that every boolean function is just an XOR-sum of infinite repeating patterns, you want those repeating patterns to always have the same names: 28 | 29 | 30 | : ░░░░░░░░░░░░░░░░... -> o 31 | : ▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓... -> i 32 | : ░▓░▓░▓░▓░▓░▓░▓░▓... -> x...0 33 | : ░░▓▓░░▓▓░░▓▓░░▓▓... -> x..1. 34 | : ░░░▓░░░▓░░░▓░░░▓... -> x..10 35 | : ░░░░▓▓▓▓░░░░▓▓▓▓... -> x.2.. 36 | : ░░░░░▓░▓░░░░░▓░▓... -> x.2.0 37 | : ░░░░░░▓▓░░░░░░▓▓... -> x.21. 38 | : ░░░░░░░▓░░░░░░░▓... -> x.210 39 | : ░░░░░░░░▓▓▓▓▓▓▓▓... -> x3... 40 | : ░░░░░░░░░▓░▓░▓░▓... -> x3..0 41 | : ░░░░░░░░░░▓▓░░▓▓... -> x3.1. 42 | : ░░░░░░░░░░░▓░░░▓... -> x..10 43 | : ░░░░░░░░░░░░▓▓▓▓... -> x32.. 44 | : ░░░░░░░░░░░░░▓░▓... -> x32.0 45 | : ░░░░░░░░░░░░░░▓▓... -> x321. 46 | : ░░░░░░░░░░░░░░░▓... -> x3210 47 | 48 | In any case, if we want to maximize the chance of hitting the cache with many different functions of different numbers of variables, we should probably keep lower numbers at the bottom. I haven't actually tested this, but it stands to reason that the most-reused nodes are likely to be the ones closer to the bottom, so it makes sense if they always get the same number. 49 | 50 | -------------------------------------------------------------------------------- /doc/wipbase.org: -------------------------------------------------------------------------------- 1 | #+title: understing the (proposed) WIPBase trait 2 | 3 | * concepts 4 | 5 | - == *Queries* are requests to the swarm. ("ITE" in BDDSwarm) 6 | 7 | - QMSG conveys a query. There are two types: 8 | - actual query (asking to construct a new node in the base) 9 | - message to update the cache 10 | 11 | - *Parts* is a struct for collecting the parts necessary to construct a node. It looks basically like the final well-formed node, but members that will take time to compute are wrapped in =Option<>=. 12 | 13 | #+begin_example rust 14 | struct BddParts{ v:VID, hi:Option, lo:Option, invert:bool} 15 | #+end_example 16 | 17 | - *Part* is an an enum representing the parts necessary to construct a well-formed node. 18 | 19 | #+begin_example rust 20 | enum BddPart { HiPart, LoPart } 21 | #+end_example 22 | 23 | - *Dep* is used to track dependencies between queries. Generally, a query is issued because it will result in a value that will become a part of some other query. Because the overall graphs we're dealing with tend to have many shared sub-structures, low level queries may have many high-level queries that depend on them. 24 | 25 | 26 | * Runtime structure 27 | 28 | At runtime, we will have one main 'swarm' and many worker threads/subprocesses. 29 | 30 | For BDDSwarm, the top-level entry point is =run_swarm=. It takes arguments i,t,e at the time of this writing, but probably ought to take a 'norm'. 31 | 32 | Internally, that calls =add_task= at the top level, and then recieves a constant stream of (qid, rmsg) pairs, some of which result in further calls to =add_task=. 33 | 34 | I think =add_task= and =init_swarm= belong to the generic =wip/swarm= module, and it should also encapsulate all the queue stuff. 35 | 36 | 37 | * Short circuiting 38 | 39 | When working with large boolean expressions, there are often opportunities to short-circuit the evaluation of sub-expressions: for example, if you are calculating AND(X,Y), where X and Y are complicated sub-expressions, it may quickly become apparent that one of the inputs actually evaluates to "0"... In which case, there is no further need to evaluate the other side of the expression. 40 | -------------------------------------------------------------------------------- /doc/workers.org: -------------------------------------------------------------------------------- 1 | #+title: BDDWorkers 2 | 3 | * The BDDWorker trait 4 | ** interface 5 | #+begin_src rust 6 | pub trait BddWorker : Sized + Serialize { 7 | fn new(nvars:usize)->Self; 8 | fn new_with_state(state: S)->Self; 9 | fn nvars(&self)->usize; 10 | fn tup(&self, n:NID)->(NID,NID); 11 | fn ite(&mut self, f:NID, g:NID, h:NID)->NID; 12 | } 13 | #+end_src 14 | 15 | ** implementations 16 | *** SimpleBddWorker 17 | This is just a refactoring of my original implementation of the BDD algorithm -- a straightforward, single-threaded, recursive implementation. The "work to be done" is implicit in the call stack, so the top-level =ite= function doesn't return until the entire BDD is constructed. 18 | 19 | *** BddSwarm 20 | This implementation reifies work-in-progress so that work can be farmed out to multiple threads. 21 | 22 | Each thread is running =swarm_loop=, which just listens for =QMsg= queries. These are either =Ite= messages, containing new work to be done, or =Cache= messages, which update the local thread's cache of known BDD nodes. 23 | 24 | =BddSwarm::swarm=, then, is just a vector of channels for sending =QMsg= structs -- one channel per thread. 25 | 26 | =BddSwarm::ite= calls =BddSwarm::run_swarm=. This is something like a trampoline function: it adds the top level task to its internal work queue by calling =BddSwarm::add_task=, then waits for a response to come back over the receiving channel. 27 | 28 | =add_task= checks whether we're already working on an identical =ite= task. If so, it marks the new request as being dependent on completion of the original task, otherwise it adds the request to =.ites= and stores the new index in =.qid= (a hashmap of =ITE= =-> =QID=, where a =QID= is just an index into =.ites=)... Then it adds a fresh =BddWIP= struct to =.wip= to hold the result. 29 | 30 | Each =swarm_loop= thread calls =swarm_ite= for each =QMsg= it receives. =swarm_ite= normalizes the ITE, then calls =swarm_ite_norm=. This either immediately resolves to a =NID=, or else creates a =WIP= node. Either way, it gets sent back as an =RMsg= to the main thread and processed in =run_swarm=. 31 | 32 | When =run_swarm= gets the =RMsg=, it either processes the finished result or adds tasks for whichever sides of the =WIP= need to be processed. 33 | 34 | So... The overall effect is that we're still building the BDD from top to bottom, but the work happens in parallel, spread out across the various threads. 35 | 36 | *** Idea: a new depth-first worker. 37 | 38 | The problem with building a BDD top-down is that it can be really really slow, and you have to wait for the whole thing to finish before you can use it. A bottom up approach would be nicer, since leaf nodes are usable as soon as they're generated. 39 | 40 | Instead of having =run_swarm= add tasks for both the high and low branches of an =RMsg::Wip=, it might only add one to the queue at a time: If =lo= is a fully resolved =NID=, it adds a task for =hi=, and otherwise it adds a task for =lo=. (They're never both resolved, or it wouldn't be WIP). 41 | 42 | The result should be a WIP BDD whose leftmost (if you visualize =lo= branches on the left) path is a chain of real input variables, in the proper order, with many un-expanded WIP nodes branching off to the right. 43 | 44 | Originally, I had pictured assembling this structure via substitution into an AST. but it seems like this algorithm /ought/ to be fast enough to use for intermediate nodes. In fact, it may actually wind up being faster than substitution, because building the chain is just one long sequence that can't be done in parallel... But since we're working with =BInt= structures, we could perform the operations for different intermediate bits in parallel. 45 | 46 | Either way we build it, our final result tells us at least the leftmost entry in the function's truth table, and possibly much more (if any =lo= branches along the leftmost path resolved to a NID, we know everything to the left of that point in the truth table.) 47 | 48 | The final "WIP" result of this process would incorporate /all/ the work that needs to be done to generate the complete truth table, /and/ the work would be ordered by input variable. 49 | 50 | Whether we used substitution or not, this mechanism would preserve the main /benefit/ of substitution: not having to fully expand a BDD until the maximal number of constraints were applied. 51 | 52 | Using this method for all intermediate nodes seems like it would offer an important benefit for repeated search when some of the input bits are fixed during each run: the fixed bits could be numbered so they appear at the bottom of the BDD, and when we start the bottom-up search process on the final node, we could have two kinds of workers: one group for fixed variables, and one for search variables. 53 | 54 | In other words, the initial process for each node gives us a "BDD spine", with a complete path through all variables on the left, and a bunch of WIP nodes off to the right. (Most of these would be compositions of other, intermediate WIP nodes, collectively representing all work to be done to generate the complete truth table.) 55 | 56 | The search process walks the spine from the top until it reaches the first fixed node. We know that everything below this point is made of constant values. There's no "searching" to be done: only on-the-fly calculation and substitution of constants for the fixed variables. Conceptually, these entire sub-graphs are constant. 57 | 58 | So, the search would branch at this point: some workers would start substituting constants into the 'fixed' part. Other workers would walk up the spine and start refining the next node up. 59 | 60 | Eventually over many runs, we would arrive at a situation where we had a complete binary tree of the search variables, leading to many, many partially-defined functions of only the fixed variables. (There would be 2^n of them in the worst case, where n = the number of search variables.) 61 | 62 | What could we do with this? I see several options: 63 | 64 | - Evaluate them all in batch mode on the fly, so we just construct the sub-bdd on the fly, but only the branches that match the actual fixed input values. This essentially is no better than a linear brute force search through the inputs to the original function. 65 | 66 | - We could "or" them all together, giving us a new function of the fixed variables, which told us whether or not a solution existed at all. This could also be constructed on the fly, given the fixed variables we're actually presented with. (Since we know the value of the branching value of each WIP node, it ought to be possible to "instantiate" and evaluate this function in O(n_fixed) time, no matter how messy and complicated it would be to flesh out the whole BDD. This "existential" function either tells us there's no answer, in which case we move on to the next set of fixed inputs, /or/ it tells us that an answer exists, and we do the search. But: we don't have to brute force the search, because we can construct the existential for a truth table by "or"-ing the existentials for the two halves of the truth table. So our binary tree of search variables (the upper, expanded part of the full BDD) becomes a blueprint for constructing a binary tree of existentials... A binary tree of existentials means: 67 | 68 | - We know /almost immediately/ whether an answer exists for the run. 69 | - /If/ it exists, we can perform a binary search to quickly find it. 70 | 71 | This second option makes an incredible amount of sense to me. 72 | 73 | 74 | 75 | 76 | 77 | * Test suite 78 | swarm test suite is in bdd.rs: 79 | 80 | - =test_swarm_xor= 81 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | 2 | # bex examples 3 | 4 | **NOTE:** these examples are kind of a mess at the moment. I'm documenting them as-is partially to make the cleanup process a bit easier next time I work on this. 5 | 6 | ## bdd-subst 7 | 8 | This is my main benchmark: 9 | 10 | # run from top level directory 11 | $ cargo run --bin bdd-solve 12 | 13 | **NOTE**: this might take years to run. get ready to press ^C. 14 | 15 | Smaller benchmarks can be run with: 16 | 17 | $ cargo bench --bench bench-solve tiny 18 | $ cargo bench --bench bench-solve small 19 | 20 | Even the "small" benchmark is currently painfully slow, which is sad, because it's just solving to find two u8 values that multiply together to get the u16 value 210... Something you can probably do faster with a pencil and paper. 21 | 22 | The 'bench' commands of course run the same thing over and over many times. There are also 'test' commands, but these currently require `graphviz` and `firefox` to be installed and on your path. 23 | 24 | (I know this is dumb: tests shouldn't have side effects. I intend to clean this up. Feel free to comment the relevant lines out, or just install graphviz from http://graphviz.org/ ) 25 | 26 | 27 | ## bex-shell 28 | 29 | This is a rudimentary shell that lets you build up expressions interactively. 30 | 31 | It's *extremely* rough and probably not terribly useful right now. (It only 32 | uses AST expressions... At one point it was using BDD nodes, but this is commented out until I get around to unifying base::TBase with bdd::Base. 33 | 34 | # from top-level directory: 35 | cargo run --bin bex-shell 36 | 37 | The syntax is forth-like, meaning each whitespace delimited token is executed in sequence from left to right, and each word takes and consumes a stack of values. The values are generally treated as NIDs. 38 | 39 | The words are: 40 | 41 | -> push n onto the stack 42 | i -> push I (true) onto the stack 43 | o -> push O (false) onto the stack 44 | q -> quit 45 | . -> print and drop topmost item 46 | dot -> show the dot syntax for the current node 47 | sho -> actually render it (panics if dot/graphviz not installed) 48 | not -> negate the node on the stack 49 | vars -> allocate vars 50 | drop -> drop topmost item from stack 51 | dup -> copy topmost item 52 | swap -> swap top two items 53 | reset -> clear the stack 54 | 55 | 56 | example: 57 | 58 | > 4 vars $0 $1 and $2 or 59 | [ 7 ] 60 | > dot 61 | digraph bdd { 62 | rankdir=BT; 63 | node[shape=circle]; 64 | edge[style=solid]; 65 | 7[label=∨]; 66 | 4->7; 67 | 6->7; 68 | 4[label="$2"]; 69 | 6[label=∧]; 70 | 2->6; 71 | 3->6; 72 | 2[label="$0"]; 73 | 3[label="$1"]; 74 | } 75 | [ ] 76 | -------------------------------------------------------------------------------- /examples/shell/bex-shell.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::io::Write; 3 | use std::collections::HashMap; 4 | use std::str::FromStr; 5 | 6 | extern crate bex; 7 | use bex::*; 8 | use bex::nid::NID; 9 | use bex::ast::ASTBase; 10 | use bex::solve; 11 | use bex::anf::ANFBase; 12 | use bex::bdd::BddBase; 13 | use bex::ops; 14 | 15 | // forth-like REPL for the BDD (helper routines) 16 | 17 | fn readln()->String { 18 | let mut buf = String::new(); 19 | print!("> "); 20 | io::stdout().flush() .expect("couldn't flush stdout."); 21 | io::stdin().read_line(&mut buf) .expect("failed to read line."); 22 | buf} 23 | 24 | fn swap(data: &mut [NID]) { 25 | let p = data.len()-1; 26 | if p > 0 { data.swap(p-1,p) }} 27 | 28 | fn pop(data: &mut Vec)->T { 29 | data.pop().expect("underflow")} 30 | 31 | fn pop2(data: &mut Vec)->(T,T){ 32 | let y=pop(data); let x=pop(data); (x,y) } 33 | 34 | /*fn pop3(data: &mut Vec)->(T,T,T){ 35 | let (y,z)=pop2(data); let x=pop(data); (x,y,z) }*/ 36 | 37 | 38 | // forth-like REPL for the BDD (main loop) 39 | 40 | // fn to_io(b:bool)->NID { if b {Op::I} else {Op::O} } 41 | // enum Item { Vid(VID), Nid(NID), Int(u32) } 42 | 43 | fn repl(base:&mut ASTBase) { 44 | let mut scope = HashMap::new(); 45 | let mut data: Vec = Vec::new(); 46 | let mut bdds = BddBase::new(); 47 | let mut anfs = ANFBase::new(); 48 | 49 | 'main: loop { 50 | print!("[ "); for x in &data { print!("{} ", *x); } println!("]"); 51 | let line = readln(); 52 | for word in line.split_whitespace() { 53 | match word { 54 | "~"|"not"|"!" => { let x = pop(&mut data); data.push(!x) } 55 | "and" => { let (x,y)=pop2(&mut data); data.push(base.and(x,y)) } 56 | "xor" => { let (x,y)=pop2(&mut data); data.push(base.xor(x,y)) } 57 | "or" => { let (x,y)=pop2(&mut data); data.push(base.or(x,y)) } 58 | "'and" => { data.push(ops::AND.to_nid()) } 59 | "'or" | 60 | "'vel" => { data.push(ops::VEL.to_nid()) } 61 | "'xor" => { data.push(ops::XOR.to_nid()) } 62 | "'imp" => { data.push(ops::IMP.to_nid()) } 63 | "'nor" => { data.push(ops::NOR.to_nid()) } 64 | //"lt" => { let (x,y)=pop2(&mut data); data.push(base.lt(x,y)) } 65 | // "gt" => { let (x,y)=pop2(&mut data); data.push(base.gt(x,y)) } 66 | //todo "lo" => { let (x,y)=pop2(&mut data); data.push(base.when_lo(y,x)) } 67 | //todo "hi" => { let (x,y)=pop2(&mut data); data.push(base.when_hi(y,x)) } 68 | //todo "cnt" => { let x = pop(&mut data); data.push(base.node_count(x)) } 69 | // "ite" => { let (x,y,z) = pop3(&mut data); data.push(base.ite(x,y,z)); } 70 | //todo "shuf" => { let (n,x,y) = pop3(&mut data); data.push(base.swap(n,x,y)); } 71 | // "norm" => { let (x,y,z) = pop3(&mut data); println!("{:?}", base.norm(x,y,z)) } 72 | // "tup" => { let (v,hi,lo) = base.tup(data.pop().expect("underflow")); println!("({}, {}, {})", v,hi,lo); }, 73 | //todo "rep" => { let (x,y,z)=pop3(&mut data); data.push(base.replace(x,y,z)); } 74 | //"var?" => { let x=pop(&mut data); data.push(to_io(base.is_var(x))); } 75 | //todo "dep?" => { let (x,y)=pop2(&mut data); data.push(to_io(base.might_depend(x,y))); } 76 | // "deep" => { let x = pop(&mut data); data.push(base.deep[x]); } 77 | "dot" => { let mut s=String::new(); base.dot(pop(&mut data),&mut s); print!("{}", s); } 78 | "sho" => base.show(pop(&mut data)), 79 | "bdd" => { let top=pop(&mut data); let n = solve::solve(&mut bdds,base.raw_ast(),top).n; bdds.show(n); data.push(n); } 80 | "bdd-dot" => { let mut s=String::new(); bdds.dot(pop(&mut data),&mut s); print!("{}", s); } 81 | "anf" => { let top=pop(&mut data); let n = solve::solve(&mut anfs,base.raw_ast(),top).n; anfs.show(n); data.push(n); } 82 | "anf-dot" => { let mut s=String::new(); anfs.dot(pop(&mut data),&mut s); print!("{}", s); } 83 | 84 | // generic forth commands 85 | "q" => break 'main, 86 | "." => { let nid = data.pop().expect("underflow"); println!("{}", nid); } 87 | "drop" => { let _ = pop(&mut data); } 88 | "dup" => { let x = pop(&mut data); data.push(x); data.push(x); } 89 | "swap" => swap(&mut data), 90 | "reset" => data = Vec::new(), 91 | //todo "save" => base.save("saved.bdd").expect("failed to save bdd"), 92 | //todo "load" => base.load("saved.bdd").expect("failed to load bdd"), 93 | // bdd commands 94 | "i"|"I" => data.push(nid::I), 95 | "o"|"O" => data.push(nid::O), 96 | _ => { 97 | // define a new binding 98 | if word.starts_with(':') { 99 | let var = word.to_string().split_off(1); 100 | let val = pop(&mut data); 101 | scope.insert(var,val); } 102 | // recall definition 103 | else if let Some(&val) = scope.get(word) { data.push(val); } 104 | // attempt to parse nid 105 | else { match NID::from_str(word) { 106 | Ok(nid) => data.push(nid), 107 | Err(err) => println!("{}", err)}}}}}}} 108 | 109 | include!(concat!(env!("OUT_DIR"), "/bex-build-info.rs")); 110 | fn main() { 111 | println!("bex {BEX_VERSION} | compile flags: -O{BEX_OPT_LEVEL} | type 'q' to quit"); 112 | let mut base = ASTBase::empty(); 113 | // for arg in ::std::env::args().skip(1) { load(arg.as_str()) } 114 | repl(&mut base) } 115 | -------------------------------------------------------------------------------- /examples/shell/swarm-shell.rs: -------------------------------------------------------------------------------- 1 | /// This is a (completely useless) shell for interacting with the swarm 2 | /// while it's running. I wrote it to debug VhlSwarm and figure out how 3 | /// to send messages to it while it was in a separate thread. (The answer 4 | /// was to expose q_sender and poll that channel in swarm::run()) 5 | 6 | use std::io; 7 | use std::io::Write; 8 | use std::thread; 9 | 10 | use bex::bdd::{NormIteKey, ITE}; 11 | use bex::swarm::SwarmCmd; 12 | use bex::vhl_swarm::VhlQ; 13 | use bex::NID; 14 | 15 | extern crate bex; 16 | 17 | fn readln()->String { 18 | let mut buf = String::new(); 19 | print!("> "); 20 | io::stdout().flush() .expect("couldn't flush stdout."); 21 | io::stdin().read_line(&mut buf) .expect("failed to read line."); 22 | buf} 23 | 24 | include!(concat!(env!("OUT_DIR"), "/bex-build-info.rs")); 25 | fn main() { 26 | println!("bex swarm-shell {BEX_VERSION} | compile flags: -O{BEX_OPT_LEVEL} | type 'q' to quit"); 27 | let mut bdd = bex::bdd::BddBase::new(); 28 | let to_swarm = bdd.swarm.q_sender(); 29 | 30 | // Spawn a new thread for VhlSwarm. `bdd` is moved into the closure. 31 | thread::spawn(move || { 32 | // TODO: give name to VhlQ in bdd modules 33 | // !! also maybe swap the argument order for the types? 34 | bdd.swarm.run(|_wid, _qid, rmsg|->SwarmCmd,()> { 35 | if let Some(r) = rmsg { 36 | println!("received: {:?}", r); 37 | SwarmCmd::Pass } 38 | else { SwarmCmd::Pass }})}); 39 | 40 | 'main: loop { 41 | for word in readln().split_whitespace() { 42 | match word { 43 | "q" => { break 'main} 44 | "o" => { to_swarm.send(VhlQ::Job(NormIteKey(ITE { 45 | i: NID::var(1), 46 | t: NID::var(2), 47 | e: NID::var(3)}))).unwrap() } 48 | _ => { println!("you typed: {:?}", word) }}}}} 49 | -------------------------------------------------------------------------------- /examples/solve/factor-p.rs: -------------------------------------------------------------------------------- 1 | //! this benchmark programs splits various primorials 2 | //! into two factors x,y where that xVec<(u64,u64)> { 8 | vec![(1,210), (2,105), ( 3,70), ( 5,42), 9 | (6, 35), (7, 30), (10,21), (14,15)]} 10 | 11 | /// Product of the first 5 primes: 2 3 5 7 11 (12 bits, treat as 16-bit) 12 | const P5 : usize = 2_310; 13 | fn p5_factors()->Vec<(u64,u64)> { 14 | vec![(10, 231), (11, 210), (14, 165), (15, 154), (21, 110), 15 | (22, 105), (30, 77), (33, 70), (35,66), (42,55)]} 16 | 17 | /// Product of the first 6 primes: 2 3 5 7 11 13 (15 bits, treat as 16-bit) 18 | const P6 : usize = 30_030; 19 | fn p6_factors()->Vec<(u64,u64)> { 20 | vec![(130,231), (143,210), (154,195), (165,182)]} 21 | 22 | 23 | extern crate bex; 24 | use bex::{solve::find_factors, bdd::BddBase, int::{X8,X16}, swap::SwapSolver}; 25 | 26 | include!(concat!(env!("OUT_DIR"), "/bex-build-info.rs")); 27 | 28 | pub fn main() { 29 | // -- parse arguments ---- 30 | let mut use_swap = false; 31 | let mut get_which = false; let mut which = 4; 32 | let mut get_threads = false; let mut num_threads = 0; 33 | for a in std::env::args() { 34 | if get_threads { num_threads = a.parse().expect("bad -t parameter"); get_threads=false; } 35 | else if get_which { which = a.parse().expect("bad -p parameter"); get_which=false; } 36 | else { match a.as_str() { 37 | "-t" => get_threads = true, 38 | "-p" => get_which = true, 39 | "swap" => use_swap = true, 40 | _ => { /* ignore for now */} }}} 41 | 42 | let (k, expected) = match which { 43 | 4 => (P4, p4_factors()), 44 | 5 => (P5, p5_factors()), 45 | 6 => (P6, p6_factors()), 46 | _ => { panic!("the available primorials are: 4,5,6") }}; 47 | 48 | // -- print current configuration --- 49 | println!("[bex {BEX_VERSION} -O{BEX_OPT_LEVEL}] factor-p4 -t {num_threads} -p {which} ({})", 50 | if use_swap { "swap solver" } else { "sub solver" }); 51 | 52 | // ---- run the requested solver 53 | if use_swap { 54 | if num_threads != 0 { println!("note: swap solver ignores -t parameter"); } 55 | find_factors::(&mut SwapSolver::new(), k, expected); } 56 | else { find_factors::(&mut BddBase::new_with_threads(num_threads), k, expected); }} 57 | -------------------------------------------------------------------------------- /plans.org: -------------------------------------------------------------------------------- 1 | #+title: plans for bex 2 | 3 | My highest priority goal is to improve the speed of the solver(s). 4 | 5 | * TODO [4/6] version 0.1.6 : optimization round 6 | ** DONE remove =nvars= param from =Base= 7 | ** DONE remove redundant ~vindex~ from ~HiLoCache~ 8 | ** DONE port BddSwarm to new swarm::Swarm framework 9 | ** TODO [0/4] solver optimizations 10 | *** TODO use concurrent data structures (dashmap and boxcar) for BddState to avoid copies 11 | *** TODO make debug output optional 12 | *** TODO optimization: nid-level truth tables for bottom 5 variables 13 | *** TODO have swarm workers descend lo branches on their own 14 | This should help fully utilize each CPU 15 | ** TODO [2/3] better graphviz rendering 16 | *** DONE use command line argument instead of macro parameter to toggle solver visualizations 17 | examples: 18 | #+begin_src shell 19 | cargo test nano_bdd -- -- -a # show AST 20 | cargo test nano_bdd -- -- -r # show result 21 | cargo test nano_bdd -- -- -a -r # show both 22 | cargo test nano_bdd -- --nocapture -- -a -r # show both, and include output logs 23 | #+end_src 24 | 25 | *** DONE draw inverted top level with an extra "not" node 26 | *** TODO group bdd/anf nodes by level 27 | some combination of these ideas: 28 | #+begin_src dot 29 | { edge [ style=invis ]; 30 | rankdir=LR; 31 | rank=same; } 32 | #+end_src 33 | ** DONE [4/4] other improvements 34 | *** DONE move =expr= macro to the base module 35 | *** DONE come up with basic decorator pattern scheme for bases 36 | will use for things like: 37 | - toggle bookkeeping for benchmarks 38 | - toggle individual optimizations 39 | - consolidate BASE/anf normalizers 40 | - swap out work coordination strategies (swarm/etc) 41 | - swap out different kinds of normalizer (main vs ITE) 42 | (allow preserving the original expression) 43 | - toggle use of constant truth tables in the nid 44 | - configure larger constant truth tables at other levels 45 | - even toggle caching to see what it gets us 46 | *** DONE remove .i, .o, .var, vir from =Base= 47 | Use the corresponding nid functions instead. 48 | *** DONE extract =vhl::Walkable= trait, and add =walk_up= 49 | 50 | 51 | * -- backlog (unsorted) -- 52 | ** TODO [0/2] better benchmarking 53 | *** TODO [2/3] collect metrics 54 | **** what to collect 55 | - for each benchmark: 56 | - original AST: 57 | - time to generate 58 | - number of nodes (broken down by type?) 59 | - number of cache tests/fails (not really that important, but might as well?) 60 | - for each step: 61 | - time to generate 62 | - number of nodes at each step 63 | - number of xmemo cache tests/fails (we don't care about hilos) 64 | - number calculations saved from short circuits 65 | 66 | **** TODO =Base::ord(nid)= for graph order (number of nodes) 67 | **** DONE record timing information at each step 68 | I do this now in seconds. Let's switch to millis. 69 | **** DONE track cache attempts / hits 70 | I can't do this in the base itself because copies are shared and therefore immutable. 71 | 72 | So instead, use thread-local counters: 73 | 74 | - xmemo lookup 75 | - xmemo fail 76 | - hilos lookup 77 | - hilos fail 78 | - hilos create 79 | 80 | Query, sum, and reset the counters after each round. 81 | 82 | *** TODO store the metrics 83 | - write (step#, time, ord, lookups, hits, shorts) to csv after each step 84 | ** TODO [2/3] swarm for ANF 85 | *** DONE extract basic test suite for =trait Base= 86 | Just take the simple tests that exist for =ast= and =bdd= 87 | *** DONE Create anf.rs stub and get the simple tests passing. 88 | *** TODO Extract wip.rs from BDDSwarm 89 | **** WIP = work in progress 90 | The idea is to reify work-in-progress so that the work can be prioritized and distributed across multiple workers. 91 | **** trait WIPBase : Base 92 | - Slow-running bases should be WIP. 93 | - Q: type for queries 94 | - W: type for work-in-progress nodes 95 | - C: type for finished work cache 96 | **** struct FWBase 97 | This is a generic type finished work. 98 | 99 | *** TODO Finish the ANF implementation as a WIPBase. 100 | ** add some more benchmarks 101 | *** keep scaling the multiplication problem search space by 1 bit 102 | *** and/xor tables for fns of n bits 103 | *** n queens 104 | https://github.com/chrisying/parabdd/blob/master/src/nqueens.cpp 105 | ** compare benchmarks 106 | *** compare to CUDD 107 | *** compare to BUDDy (has vectorized operations) 108 | http://vlsicad.eecs.umich.edu/BK/Slots/cache/www.itu.dk/research/buddy/ 109 | *** compare to sylvan (MULTI-CORE BDD) 110 | https://github.com/trolando/sylvan 111 | *** =dd= for python offers all three: 112 | https://github.com/tulip-control/dd 113 | ** proper sifting for bdds 114 | ** more new base types 115 | *** ZddBase 116 | *** CnfBase 117 | **** Plain CNF 118 | **** Tseytin Transformation 119 | **** SAT solver 120 | *** Biconditional BDD (a=b decomposition) 121 | *** BmpBase (raw bitmaps) 122 | ** allow swarms to run across the network 123 | ** web browser for bases 124 | ** move tagging to a separate struct 125 | ** implement visitor pattern for dot, count, etc 126 | *** move walk/step to top level 127 | *** linear walk of the nids (for permute/save) 128 | ** implement zdd base 129 | ** implement biconditional bdd base (bic.rs)? 130 | ** implement cnf base 131 | https://en.wikipedia.org/wiki/Conjunctive_normal_form 132 | ** implement aig (nand) base 133 | https://en.wikipedia.org/wiki/And-inverter_graph 134 | ** integrate with other rust bdd libraries 135 | ** generate vhdl/verilog 136 | ** generate compute shaders 137 | ** visual debugger for solver 138 | *** use labels for vars when generating dot 139 | *** be able to highlight certain nodes 140 | ** more optimization ideas 141 | *** bring relevant variables to top in solver 142 | - maintain top-level vec for variable permutation 143 | - at each step: 144 | - bring highest numbered =Vir= to the top 145 | - fetch relevant inputs to =Vir= in the AST 146 | - raise relevant inputs to 2nd and 3rd layers in BDD 147 | *** periodically sift variables to reduce solution size 148 | *** optionally, return vars to desired ordering in final output 149 | *** nid-level truth tables for /any/ 5 variables 150 | ** mark each AST node with highest input var, so =NoV= can go away (??) 151 | -------------------------------------------------------------------------------- /py/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bex_py" 3 | version = "0.3.0" 4 | authors = ["tangentstorm "] 5 | edition = "2021" 6 | license = "MIT" 7 | description = "Python bindings for bex, a rust crate for working with boolean functions as graphs (BDDs, etc)." 8 | repository = "https://github.com/tangentstorm/bex/tree/main/py" 9 | 10 | [lib] 11 | name = "bex" 12 | crate-type = ["cdylib"] 13 | 14 | [dependencies] 15 | bex = { path = ".." } 16 | fxhash = "0.2.1" 17 | 18 | [dependencies.pyo3] 19 | version = "0.23.4" 20 | features = ["extension-module"] 21 | -------------------------------------------------------------------------------- /py/README.md: -------------------------------------------------------------------------------- 1 | # bex-py 2 | 3 | This is a python binding for the `bex` crate, a library for boolean expression manipulation. It is inspired by and intends to be mostly compatible with the `dd` package. 4 | 5 | ## Installation 6 | 7 | ```bash 8 | pip install tangentstorm-bex 9 | ``` 10 | 11 | ## Usage 12 | 13 | Here's a simple example of how to use the `bex` module: 14 | 15 | ```python 16 | import bex 17 | 18 | # Create a BDD base 19 | base = bex.BddBase() 20 | 21 | # Create some variables 22 | a = bex.nvar(0) 23 | b = bex.nvar(1) 24 | 25 | # Perform some operations 26 | c = base.op_and(a, b) 27 | d = base.op_or(a, b) 28 | 29 | # Print the results 30 | print(f"a & b = {c}") 31 | print(f"a | b = {d}") 32 | ``` 33 | 34 | Here's a simple example of how to use the `dd_bex` module: 35 | 36 | ```python 37 | from bex.dd import BDD 38 | 39 | bdd = BDD() 40 | 41 | bdd.declare('x0', 'x1', 'x2') 42 | 43 | n = bdd.add_expr('(x0 & x1) | x2') 44 | 45 | bdd.dump('and-or.svg', [n]) 46 | 47 | import webbrowser 48 | webbrowser.open('and-or.svg') 49 | ``` 50 | 51 | ![(x0 & x1)|x2](./and-or.svg "and-or.svg") 52 | 53 | ## License 54 | 55 | This project is licensed under the MIT License. See the `LICENSE` file for details. 56 | -------------------------------------------------------------------------------- /py/and-or.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 9 | 10 | bdd 11 | 12 | 13 | h1 14 | 15 | BDD 16 | 17 | 18 | I 19 | 20 | 21 | 22 | 23 | O 24 | 25 | 26 | 27 | 28 | x2.1 29 | 30 | x2 31 | 32 | 33 | x2.1->I 34 | 35 | 36 | 37 | x1.0 38 | 39 | x1 40 | 41 | 42 | x2.1->x1.0 43 | 44 | 45 | 46 | x1.0->O 47 | 48 | 49 | 50 | x0 51 | 52 | x0 53 | 54 | 55 | x1.0->x0 56 | 57 | 58 | 59 | x0->I 60 | 61 | 62 | 63 | x0->O 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /py/bex/__init__.py: -------------------------------------------------------------------------------- 1 | """Python package for binary expressions.""" 2 | 3 | from _bex import ( 4 | NID, VID, ASTBase, BddBase, Reg, Cursor, 5 | var, vir, nvar, nvir, O, I 6 | ) 7 | 8 | __all__ = ['dd', 'NID', 'VID', 'ASTBase', 'BddBase', 'Reg', 'Cursor', 9 | 'var', 'vir', 'nvar', 'nvir', 'O', 'I'] 10 | 11 | from . import dd 12 | -------------------------------------------------------------------------------- /py/dd-test.sh: -------------------------------------------------------------------------------- 1 | PYTHONPATH=/d/src/dd/tests pytest.exe --exitfirst dd_bex_tests.py 2 | -------------------------------------------------------------------------------- /py/dd_bex_tests.py: -------------------------------------------------------------------------------- 1 | import dd_bex as _bdd 2 | 3 | # these are in the 'tests' directory in the dd package. 4 | # (i just added that directory to my PYTHONPATH for now) 5 | import common 6 | import common_bdd 7 | 8 | class Tests(common_bdd.Tests, common.Tests): 9 | """ 10 | The tests in this file override tests in the dd modules, 11 | because whereas most BDD packages number variables from the 12 | top down, bex numbers them from the bottom up. Also, bex 13 | does not need to allocate nodes for constants or literals. 14 | """ 15 | 16 | def setup_method(self): 17 | self.DD = _bdd.BDD 18 | 19 | def test_len(self): 20 | """bex does not allocate nodes for constants or literals""" 21 | bdd = self.DD() 22 | u = bdd.true 23 | # -- assert len(bdd) == 1, len(bdd) 24 | assert len(bdd) == 0, len(bdd) 25 | 26 | def test_to_expr(self): 27 | """bex orders variables from the bottom up""" 28 | bdd = self.DD() 29 | bdd.declare('x', 'y') 30 | u = bdd.var('x') 31 | r = bdd.to_expr(u) 32 | r_ = 'x' 33 | assert r == r_, (r, r_) 34 | u = bdd.add_expr(r'x /\ y') 35 | r = bdd.to_expr(u) 36 | # -- r_ = 'ite(x, y, FALSE)' 37 | # !! the branch will be on y in bex because y=1, x=0, and 1>0 38 | r_ = 'ite(y, x, FALSE)' 39 | assert r == r_, (r, r_) 40 | u = bdd.add_expr(r'x \/ y') 41 | r = bdd.to_expr(u) 42 | # r_ = 'ite(x, TRUE, y)' 43 | r_ = 'ite(y, TRUE, x)' 44 | assert r == r_, (r, r_) 45 | 46 | def test_function_properties(self): 47 | bdd = self.DD() 48 | bdd.declare('x', 'y') 49 | order = dict(x=0, y=1) 50 | bdd.reorder(order) 51 | u = bdd.add_expr(r'x \/ y') 52 | # -- y = bdd.add_expr('y') 53 | x = bdd.add_expr('x') 54 | # Assigned first because in presence of a bug 55 | # different property calls could yield 56 | # different values. 57 | level = u.level 58 | # -- assert level == 0, level 59 | # !! the level in bex is the variable with the highest number 60 | assert level == 1, level 61 | var = u.var 62 | # -- assert var == 'x', var 63 | assert var == 'y', var 64 | low = u.low 65 | # -- assert low == y, low 66 | assert low == x, low 67 | high = u.high 68 | assert high == bdd.true, high 69 | ref = u.ref 70 | assert ref == 1, ref 71 | assert not u.negated 72 | support = u.support 73 | assert support == {'x', 'y'}, support 74 | # terminal 75 | u = bdd.false 76 | assert u.var is None, u.var 77 | assert u.low is None, u.low 78 | assert u.high is None, u.high 79 | -------------------------------------------------------------------------------- /py/go.sh: -------------------------------------------------------------------------------- 1 | if [ "${PWD##*/}" != "py" ]; then 2 | cd py 3 | fi 4 | export PY=c:/python311 5 | maturin build -i $PY/python.exe \ 6 | && $PY/scripts/pip install --force-reinstall ../target/wheels/tangentstorm_bex-*-cp311-cp311-win_amd64.whl \ 7 | && $PY/python.exe test.py 8 | -------------------------------------------------------------------------------- /py/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["maturin>=1.0,<2.0"] 3 | build-backend = "maturin" 4 | 5 | [project] 6 | name = "tangentstorm-bex" 7 | version = "0.3.0" 8 | authors = [ 9 | {name = "tangentstorm", email = "michal.wallace@gmail.com"} 10 | ] 11 | description = "A brief description of your bex package" 12 | requires-python = ">=3.7" 13 | classifiers = [ 14 | "Programming Language :: Python :: 3", 15 | "License :: OSI Approved :: MIT License", 16 | "Operating System :: OS Independent", 17 | "Topic :: Scientific/Engineering" 18 | ] 19 | 20 | [tool.maturin] 21 | module-name = "_bex" 22 | python-packages = ["bex"] 23 | exclude = ["../README.md"] -------------------------------------------------------------------------------- /py/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! wrap bex as a python module 2 | pub mod py; 3 | -------------------------------------------------------------------------------- /py/src/py.rs: -------------------------------------------------------------------------------- 1 | //! wrap bex as a python module 2 | extern crate bex as bex_rs; 3 | extern crate fxhash; 4 | use pyo3::prelude::*; 5 | use pyo3::types::{PyDict, PyList}; 6 | use pyo3::exceptions::PyException; 7 | use bex_rs::{Base, GraphViz, ast::ASTBase, BddBase, nid::{I,O,NID}, vid::VID, cur::Cursor, Reg}; 8 | use std::sync::{Arc, Mutex}; 9 | use std::collections::HashMap; 10 | 11 | #[pyclass(name="NID")] #[derive(Clone)] struct PyNID(NID); 12 | #[pyclass(name="VID")] #[derive(Clone)] struct PyVID(VID); 13 | #[pyclass(name="ASTBase")] struct PyASTBase(ASTBase); 14 | #[pyclass(name="BddBase")] struct PyBddBase(Arc>); 15 | #[pyclass(name="Reg")] struct PyReg(Reg); 16 | #[pyclass(name="Cursor")] struct PyCursor(Option); 17 | 18 | enum BexErr { NegVar, NegVir } 19 | impl std::convert::From for PyErr { 20 | fn from(err: BexErr) -> PyErr { 21 | match err { 22 | BexErr::NegVar => PyException::new_err("var(i) expects i >= 0"), 23 | BexErr::NegVir => PyException::new_err("vir(i) expects i >= 0") }}} 24 | 25 | #[pymethods] 26 | impl PyNID { 27 | fn is_const(&self)->bool { self.0.is_const() } 28 | fn is_lit(&self)->bool { self.0.is_lit() } 29 | fn is_vid(&self)->bool { self.0.is_vid() } 30 | fn is_inv(&self)->bool { self.0.is_inv() } 31 | fn inv_if(&self, bit:bool)->PyNID { PyNID(self.0.inv_if(bit)) } 32 | fn _vid(&self)->PyVID { PyVID(self.0.vid()) } 33 | fn __eq__(&self, other:&PyNID)->bool { self.0 == other.0 } 34 | fn __invert__(&self)->PyNID { PyNID(!self.0) } 35 | fn __str__(&self) -> String { self.0.to_string() } 36 | fn __hash__(&self) -> u64 { fxhash::hash64(&self.0) } 37 | fn __int__(&self) -> u64 { self.0._to_u64() } 38 | #[getter] fn raw(&self)->PyNID { PyNID(self.0.raw()) } 39 | #[staticmethod] fn from_int(x:u64)->PyNID { PyNID(NID::_from_u64(x)) } 40 | fn __repr__(&self) -> String { format!("", self.0) }} 41 | 42 | #[pymethods] 43 | impl PyVID { 44 | #[getter] fn ix(&self)->usize { self.0.vid_ix() } 45 | fn to_nid(&self)->PyNID { PyNID(NID::from_vid(self.0)) } 46 | fn __eq__(&self, other:&PyVID)->bool { self.0 == other.0 } 47 | fn __hash__(&self) -> u64 { fxhash::hash64(&self.0) } 48 | fn __str__(&self) -> String { self.0.to_string() } 49 | fn __repr__(&self) -> String { format!("", self.0) }} 50 | 51 | #[pymethods] 52 | impl PyASTBase { 53 | #[new] fn __new__()->Self { Self(ASTBase::empty()) } 54 | fn op_and(&mut self, x:&PyNID, y:&PyNID)->PyNID { PyNID(self.0.and(x.0, y.0)) } 55 | fn op_xor(&mut self, x:&PyNID, y:&PyNID)->PyNID { PyNID(self.0.xor(x.0, y.0)) } 56 | fn op_or(&mut self, x:&PyNID, y:&PyNID)->PyNID { PyNID(self.0.or(x.0, y.0)) } 57 | fn to_dot(&self, x:&PyNID)->String { let mut s = String::new(); self.0.write_dot(x.0, &mut s); s }} 58 | 59 | #[pymethods] 60 | impl PyBddBase { 61 | #[new] fn __new__()->Self { Self(Arc::new(Mutex::new(BddBase::new()))) } 62 | fn op_and(&mut self, x:&PyNID, y:&PyNID)->PyNID { let mut base = self.0.lock().unwrap(); PyNID(base.and(x.0, y.0)) } 63 | fn op_xor(&mut self, x:&PyNID, y:&PyNID)->PyNID { let mut base = self.0.lock().unwrap(); PyNID(base.xor(x.0, y.0)) } 64 | fn op_or(&mut self, x:&PyNID, y:&PyNID)->PyNID { let mut base = self.0.lock().unwrap(); PyNID(base.or(x.0, y.0)) } 65 | fn ite(&mut self, i:&PyNID, t:&PyNID, e:&PyNID)->PyNID { 66 | let mut base = self.0.lock().unwrap(); 67 | PyNID(base.ite(i.0, t.0, e.0)) } 68 | fn when_hi(&self, v:&PyVID, x:&PyNID)->PyNID { PyNID(self.0.lock().unwrap().when_hi(v.0, x.0)) } 69 | fn when_lo(&self, v:&PyVID, x:&PyNID)->PyNID { PyNID(self.0.lock().unwrap().when_lo(v.0, x.0)) } 70 | fn eval(&mut self, x: &PyNID, kv: &Bound<'_, PyDict>) -> PyResult { 71 | let mut base = self.0.lock().unwrap(); 72 | let mut rust_kv = HashMap::new(); 73 | for (key, value) in kv.iter() { 74 | let py_vid: PyRef = key.extract().map_err(|_| PyException::new_err("Expected PyVID as key"))?; 75 | let py_nid: PyRef = value.extract().map_err(|_| PyException::new_err("Expected PyNID as value"))?; 76 | rust_kv.insert(py_vid.0, py_nid.0); } 77 | Ok(PyNID(base.eval(x.0, &rust_kv))) } 78 | fn __len__(&self)->usize { self.0.lock().unwrap().len() } 79 | fn node_count(&self, n:&PyNID)->usize { self.0.lock().unwrap().node_count(n.0) } 80 | fn get_vhl(&self, n:&PyNID)->(PyVID, PyNID, PyNID) { 81 | let base = self.0.lock().unwrap(); 82 | let (v, hi, lo) = base.get_vhl(n.0); (PyVID(v), PyNID(hi), PyNID(lo))} 83 | fn to_dot(&self, x:&PyNID)->String { 84 | let base = self.0.lock().unwrap(); 85 | let mut s = String::new(); base.write_dot(x.0, &mut s); s } 86 | fn to_json(&self, x:&PyNID)->String { self.0.lock().unwrap().to_json(&[x.0]) } 87 | fn solution_count(&mut self, x: &PyNID) -> u64 { 88 | let mut base = self.0.lock().unwrap(); 89 | base.solution_count(x.0) } 90 | fn support(&self, x: &PyNID) -> Vec { 91 | let base = self.0.lock().unwrap(); 92 | base.support(x.0).iter().map(|v| PyVID(*v)).collect() } 93 | 94 | fn reorder(&mut self, py_vids: &Bound<'_,PyList>, py_nids: &Bound<'_,PyList>, gc:bool)-> PyResult> { 95 | let mut base = self.0.lock().unwrap(); 96 | let vids: Vec = py_vids.iter().map(|v| 97 | v.extract::().map(|pv| pv.0).map_err(|_| PyException::new_err("reorder(py_vids=[non_VID])"))) 98 | .collect::, _>>()?; 99 | let nids:Vec = py_nids.iter().map(|n| 100 | n.extract::().map(|pn| pn.0).map_err(|_| PyException::new_err("reorder(py_nids=[non_NID])"))) 101 | .collect::>()?; 102 | Ok(base.reorder(&vids, &nids, gc).iter().map(|&nid|PyNID(nid)).collect()) } 103 | 104 | fn reorder_by_force(&mut self, py_nids: &Bound<'_,PyList>, gc:bool)-> PyResult<(Vec, Vec)> { 105 | let mut base = self.0.lock().unwrap(); 106 | let nids:Vec = py_nids.iter().map(|n| 107 | n.extract::().map(|pn| pn.0).map_err(|_| PyException::new_err("reorder_by_force(py_nids=[non_NID])"))) 108 | .collect::>()?; 109 | let (new_nids, new_vids) = base.reorder_by_force(&nids, gc); 110 | Ok((new_nids.iter().map(|&nid|PyNID(nid)).collect(), 111 | new_vids.iter().map(|&nid|PyVID(nid)).collect())) } 112 | 113 | /// Make a cursor. Base.next_solution is PyCursor::_advance in python 114 | fn make_dontcare_cursor(&self, n: &PyNID, nvars: usize) -> PyCursor { 115 | let base = self.0.lock().unwrap(); 116 | PyCursor(base.make_dontcare_cursor(n.0, nvars)) }} 117 | 118 | #[pymethods] 119 | impl PyReg { 120 | #[getter] 121 | fn len(&self) -> usize { self.0.len() } 122 | fn as_usize(&self) -> usize { self.0.as_usize() } 123 | fn as_usize_rev(&self) -> usize { self.0.as_usize_rev() } 124 | fn hi_bits(&self) -> Vec { self.0.hi_bits() }} 125 | 126 | #[pymethods] 127 | impl PyCursor { 128 | #[getter] fn scope(&self) -> Option { self.0.as_ref().map(|c| PyReg(c.scope.clone())) } 129 | #[getter] fn at_end(&self) -> bool { self.0.is_none() } 130 | #[getter] fn dontcares(&self) -> Vec { self.0.as_ref().map(|c| c.dontcares()).unwrap_or_default() } 131 | #[getter] fn cube(&self) -> Vec<(PyVID, bool)> { 132 | self.0.as_ref().map(|c| c.cube().iter().map(|(v,b)| (PyVID(*v), *b)).collect()).unwrap_or_default() } 133 | fn _watch(&mut self, v: &PyVID) { if let Some(c) = self.0.as_mut() { c.watch.put(v.0.vid_ix(), true) }} 134 | fn _advance(&mut self, base:&PyBddBase) { 135 | let base = base.0.lock().unwrap(); 136 | if self.0.is_some() { 137 | let cur = self.0.take().unwrap(); 138 | self.0 = base.next_solution(cur) }}} 139 | 140 | #[pyfunction] fn var(i:i32)->PyResult { 141 | if i<0 { Err(BexErr::NegVar.into()) } else { Ok(PyVID(VID::var(i as u32))) }} 142 | #[pyfunction] fn vir(i:i32)->PyResult { 143 | if i<0 { Err(BexErr::NegVir.into()) } else { Ok(PyVID(VID::vir(i as u32))) }} 144 | #[pyfunction] fn nvar(i:i32)->PyResult { var(i).map(|v| v.to_nid()) } 145 | #[pyfunction] fn nvir(i:i32)->PyResult { vir(i).map(|v| v.to_nid()) } 146 | 147 | #[pymodule] 148 | fn _bex(m: &Bound<'_, PyModule>)->PyResult<()> { 149 | m.add_class::()?; 150 | m.add_class::()?; 151 | m.add_class::()?; 152 | m.add_class::()?; 153 | m.add_class::()?; 154 | m.add_class::()?; 155 | m.add("O", PyNID(O))?; 156 | m.add("I", PyNID(I))?; 157 | 158 | m.add_function(wrap_pyfunction!(var, m)?)?; 159 | m.add_function(wrap_pyfunction!(vir, m)?)?; 160 | m.add_function(wrap_pyfunction!(nvar, m)?)?; 161 | m.add_function(wrap_pyfunction!(nvir, m)?)?; 162 | 163 | Ok(())} 164 | -------------------------------------------------------------------------------- /py/test.py: -------------------------------------------------------------------------------- 1 | """ 2 | sanity check for python interface to bex. 3 | for a real test suite, see the dd wrapper (dd_bex_tests) 4 | """ 5 | # TODO: make a real test suite here 6 | from bex import I, O, vir, var, nvar, ASTBase 7 | assert str(O)=="O" 8 | assert str(I)=="I" 9 | assert str(vir(0))=="v0" 10 | assert str(var(0))=="x0" 11 | 12 | x0, x1, x2 = [var(x) for x in range(3)] 13 | 14 | nx0 = x0.to_nid() 15 | nx1 = nvar(1) 16 | nx2 = nvar(2) 17 | 18 | base = ASTBase() 19 | n0 = base.op_and(nx0, nx1) 20 | n1 = base.op_or(nx2, n0) 21 | 22 | dot = base.to_dot(n1) 23 | print("(x0 & x1) | x2 :\n\n", dot) 24 | -------------------------------------------------------------------------------- /run-api.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | cd api && cargo run 3 | -------------------------------------------------------------------------------- /src/apl.rs: -------------------------------------------------------------------------------- 1 | //! Helper routines inspired by the APL family of programming languages. 2 | use std; 3 | use std::collections::HashMap; 4 | 5 | /// Return the unique items of `xs` (in order of appearance), 6 | /// and a mapping of those items to their indices. 7 | pub fn group(xs: &[T]) -> (Vec<&T>, HashMap<&T,Vec>) 8 | where T: std::hash::Hash, T: std::cmp::Eq { 9 | let mut map:HashMap<&T,Vec> = HashMap::new(); 10 | let mut nub = vec![]; // unique xs, in the order in which they appeared 11 | for (i,k) in xs.iter().enumerate() { 12 | let kxs = map.entry(k).or_default(); 13 | nub.push(k); kxs.push(i) } 14 | (nub, map) } 15 | 16 | /// Calculate a permutation vector that sorts array `xs`. 17 | pub fn gradeup(xs: &[T]) -> Vec 18 | where T: std::cmp::Ord { 19 | let mut ixs:Vec<(usize,&T)> = xs.iter().enumerate().collect(); 20 | ixs.sort_by_key(|ix|ix.1); ixs.iter().map(|ix|ix.0).collect()} 21 | 22 | /// Map the indices in `ys` to the corresponding values from `xs`. 23 | pub fn at<'a,T:Clone>(xs:&'a[T], ys:&'a[usize]) -> Vec { 24 | ys.iter().map(|&i| xs[i].clone()).collect() } 25 | -------------------------------------------------------------------------------- /src/base.rs: -------------------------------------------------------------------------------- 1 | #![macro_use] 2 | //! Standard trait for databases of boolean expressions. 3 | use std::collections::{HashSet, HashMap}; 4 | use std::fs::File; 5 | use std::io::Write; 6 | use std::process::Command; // for creating and viewing digarams 7 | use crate::{simp, nid::NID}; 8 | use crate::vid::VID; 9 | use crate::reg::Reg; 10 | 11 | /// Functions common to all expression databases. 12 | pub trait Base { 13 | /// Create a new instance of the `Base`. 14 | fn new()->Self where Self:Sized; // Sized so we can use trait objects. 15 | 16 | /// Return the value of node `n` when `v=1`. 17 | fn when_hi(&mut self, v:VID, n:NID)->NID; 18 | /// Return the value of node `n` when `v=0`. 19 | fn when_lo(&mut self, v:VID, n:NID)->NID; 20 | 21 | /// Return a `NID` representing the logical AND of `x` and `y`. 22 | fn and(&mut self, x:NID, y:NID)->NID; 23 | 24 | /// Return a `NID` representing the logical XOR of `x` and `y`. 25 | fn xor(&mut self, x:NID, y:NID)->NID; 26 | 27 | /// Return a `NID` representing the logical OR of `x` and `y`. 28 | fn or(&mut self, x:NID, y:NID)->NID; 29 | 30 | /// Assign a name to variable `v`, and return its `NID`. 31 | fn def(&mut self, s:String, v:VID)->NID; 32 | 33 | /// Assign a name to node `n` and return `n`. 34 | fn tag(&mut self, n:NID, s:String)->NID; 35 | 36 | /// Fetch a node by name. 37 | fn get(&self, s:&str)->Option; 38 | 39 | /// substitute node for variable in context. 40 | fn sub(&mut self, v:VID, n:NID, ctx:NID)->NID; 41 | 42 | /// recursively evaluate a nid, substituting in the given values 43 | /// (internal helper function for eval, eval_all) 44 | fn _eval_aux(&mut self, _n:NID, _kv: &HashMap, _cache:&mut HashMap)->NID { 45 | todo!("_eval_aux not yet implemented for this type") } 46 | 47 | /// evaluate a list of nids, substituting in the given values. 48 | fn eval_all(&mut self, nids: &[NID], kv: &HashMap)->Vec { 49 | let mut cache = HashMap::new(); 50 | nids.iter().map(|&n| self._eval_aux(n, kv, &mut cache)).collect() } 51 | 52 | /// evaluate a single nid (substituting in the given values) 53 | fn eval(&mut self, nid:NID, kv:&HashMap)->NID { 54 | self.eval_all(&[nid], kv)[0] } 55 | 56 | /// Render node `n` (and its descendents) in graphviz *.dot format. 57 | fn dot(&self, n:NID, wr: &mut dyn std::fmt::Write); 58 | 59 | /// generate ALL solutions. 60 | // !! This is a terrible idea, but it's the best I can do right now. 61 | // TODO: figure out the right way to return an iterator in a trait. 62 | fn solution_set(&self, _n:NID, _nvars:usize)->HashSet { unimplemented!() } 63 | 64 | // !! these are defined here but never overwritten in the trait (used by solver) [fix this] 65 | fn init_stats(&mut self) { } 66 | fn print_stats(&mut self) { }} 67 | 68 | 69 | /// trait for visualization using GraphViz 70 | pub trait GraphViz { 71 | fn write_dot(&self, n:NID, wr: &mut dyn std::fmt::Write); 72 | 73 | /// render to graphviz *.dot file 74 | fn save_dot(&self, n:NID, path:&str) { 75 | let mut s = String::new(); self.write_dot(n, &mut s); 76 | let mut txt = File::create(path).expect("couldn't create dot file"); 77 | txt.write_all(s.as_bytes()).expect("failed to write text to dot file"); } 78 | 79 | /// call save_dot, use graphviz to convert to svg, and open result in firefox 80 | fn show_named(&self, n:NID, s:&str) { 81 | self.save_dot(n, format!("{}.dot", s).as_str()); 82 | let out = Command::new("dot").args(["-Tsvg",format!("{}.dot",s).as_str()]) 83 | .output().expect("failed to run 'dot' command"); 84 | let mut svg = File::create(format!("{}.svg",s).as_str()).expect("couldn't create svg"); 85 | svg.write_all(&out.stdout).expect("couldn't write svg"); 86 | let _ = Command::new("firefox").args([format!("{}.svg",s).as_str()]) 87 | .spawn().expect("failed to launch firefox").wait(); } 88 | 89 | fn show(&self, n:NID) { self.show_named(n, "+bdd") } 90 | } 91 | 92 | impl GraphViz for T { 93 | fn write_dot(&self, n:NID, wr: &mut dyn std::fmt::Write) { 94 | T::dot(self,n, wr)}} 95 | 96 | 97 | /// This macro makes it easy to define decorators for `Base` implementations. 98 | /// Define your decorator as a struct with type parameter `T:Base` and member `base: T`, 99 | /// then use this macro to implement the functions you *don't* want to manually decorate. 100 | /// 101 | /// ``` 102 | /// #[macro_use] extern crate bex; 103 | /// use bex::{base::Base, nid::NID, vid::VID}; 104 | /// 105 | /// // example do-nothing decorator 106 | /// pub struct Decorated { base: T } 107 | /// impl Base for Decorated { 108 | /// inherit![ new, when_hi, when_lo, and, xor, or, def, tag, get, sub, dot ]; } 109 | /// ``` 110 | #[macro_export] macro_rules! inherit { 111 | ( $($i:ident),* ) => { $( inherit!(@fn $i); )* }; 112 | (@fn new) => { #[inline] fn new()->Self where Self:Sized { Self { base: T::new() }} }; 113 | (@fn when_hi) => { #[inline] fn when_hi(&mut self, v:VID, n:NID)->NID { self.base.when_hi(v, n) }}; 114 | (@fn when_lo) => { #[inline] fn when_lo(&mut self, v:VID, n:NID)->NID { self.base.when_lo(v, n) }}; 115 | (@fn and) => { #[inline] fn and(&mut self, x:NID, y:NID)->NID { self.base.and(x, y) }}; 116 | (@fn xor) => { #[inline] fn xor(&mut self, x:NID, y:NID)->NID { self.base.xor(x, y) }}; 117 | (@fn or) => { #[inline] fn or(&mut self, x:NID, y:NID)->NID { self.base.or(x, y) }}; 118 | (@fn def) => { #[inline] fn def(&mut self, s:String, i:VID)->NID { self.base.def(s, i) }}; 119 | (@fn tag) => { #[inline] fn tag(&mut self, n:NID, s:String)->NID { self.base.tag(n, s) }}; 120 | (@fn get) => { #[inline] fn get(&self, s:&str)->Option { self.base.get(s) }}; 121 | (@fn sub) => { #[inline] fn sub(&mut self, v:VID, n:NID, ctx:NID)->NID { self.base.sub(v, n, ctx) }}; 122 | (@fn dot) => { #[inline] fn dot(&self, n:NID, wr: &mut dyn std::fmt::Write) { self.base.dot(n, wr) }}; } 123 | 124 | 125 | 126 | // !! start on isolating simplification rules (for use in AST, ANF) 127 | pub struct Simplify { pub base: T } 128 | 129 | impl Base for Simplify { 130 | inherit![ new, when_hi, when_lo, xor, or, def, tag, get, sub, dot ]; 131 | fn and(&mut self, x:NID, y:NID)->NID { 132 | if let Some(nid) = simp::and(x,y) { nid } 133 | else { 134 | let (a, b) = if x < y { (x,y) } else { (y,x) }; 135 | self.base.and(a, b) }}} 136 | 137 | 138 | // macros for building and testing expressions 139 | 140 | /// Macro for building complex expressions in a `Base`. 141 | /// example: `expr![base, (x & y) | (y ^ z)]` 142 | #[macro_export] macro_rules! expr { 143 | (@op $b:ident, $x:tt $op:ident $y:tt) => {{ 144 | let x = expr![$b, $x]; 145 | let y = expr![$b, $y]; 146 | $b.$op(x,y) }}; 147 | ($_:ident, $id:ident) => { $id }; 148 | ($b:ident, ($x:tt ^ $y:tt)) => { expr![@op $b, $x xor $y] }; 149 | ($b:ident, ($x:tt | $y:tt)) => { expr![@op $b, $x or $y] }; 150 | ($b:ident, ($x:tt & $y:tt)) => { expr![@op $b, $x and $y] };} 151 | 152 | /// Macro to make a substitution map for eval. 153 | /// example: `use vid::named::{x0, x1}; vid_map![x0:I, x1:O]` 154 | #[macro_export] macro_rules! vid_map { 155 | ($($x:ident : $y:expr),*) => { 156 | vec![$(($x, $y)),*].iter().copied().collect::>() }} 157 | 158 | 159 | /* 160 | /// TODO: Generic tagging support for any base type. 161 | pub struct Tagged { 162 | base: B, 163 | tags: HashMap } 164 | 165 | impl Tagged { 166 | pub fn def(&mut self, s:String, v:B::V)->B::N { self.base.var(v) } 167 | pub fn tag(&mut self, n:B::N, s:String)->B::N { n }} 168 | 169 | */ 170 | 171 | // Meta-macro that generates a macro for testing any base implementation. 172 | macro_rules! base_test { 173 | ($name:ident, $basename:ident, $tt:tt) => { 174 | macro_rules! $name { 175 | ($BaseType:ident) => { 176 | #[test] fn $name() { 177 | use crate::base::Base; 178 | let mut $basename = <$BaseType as Base>::new(); 179 | $tt }}}}} 180 | 181 | 182 | // Test operations on constants. 183 | base_test!(test_base_consts, b, { 184 | use crate::{O,I}; 185 | 186 | assert!(O(x:T, y:T)->(T,T) { if x < y { (x,y) } else { (y,x) }} 224 | // pub fn order3(x:T, y:T, z:T)->(T,T,T) { 225 | // let mut res = [x,y,z]; 226 | // res.sort(); 227 | // (res[0].clone(), res[1].clone(), res[2].clone())} 228 | -------------------------------------------------------------------------------- /src/bdd/bdd-json.rs: -------------------------------------------------------------------------------- 1 | extern crate json; 2 | 3 | impl BddBase { 4 | pub fn to_json(&self, nids: &[NID]) -> String { 5 | let mut vhls = json::array![json::object!{}]; 6 | let mut mapping: HashMap = HashMap::new(); 7 | 8 | for &n in nids { 9 | if !n.is_const() { 10 | let node = if n.is_inv() { !n } else { n }; 11 | self.walk_up(node, &mut |n, v, hi, lo| { 12 | if mapping.contains_key(&n) || n.is_lit() { return } 13 | let idx = vhls.len(); 14 | mapping.insert(n, idx); 15 | 16 | let process_child = |child: NID| -> json::JsonValue { 17 | if child.is_lit() { json::JsonValue::String(child.to_string()) } 18 | else { 19 | let underlying = child.raw(); 20 | let child_idx = mapping[&underlying]; 21 | if child.is_inv() { json::JsonValue::Number((-1 * child_idx as i32).into()) } 22 | else { json::JsonValue::Number((child_idx as i32).into()) }}}; 23 | 24 | vhls.push(json::array![v.to_string(), process_child(hi), process_child(lo)]) 25 | .expect("failed to push to vhls"); });}} 26 | 27 | let keep: Vec = nids.iter().filter(|&&n| !n.is_const()) 28 | .map(|&n| { let node = n.raw(); mapping[&node] as i32 }).collect(); 29 | 30 | let out = json::object!{ 31 | "format": "bex-bdd-0.01", 32 | "vhls": vhls, 33 | "keep": keep }; 34 | out.dump() 35 | } 36 | 37 | pub fn from_json(s: &str) -> (Self, Vec) { 38 | let data = json::parse(s).unwrap(); 39 | assert_eq!(data["format"].as_str().unwrap(), "bex-bdd-0.01"); 40 | let vhls_arr = data["vhls"].members().collect::>(); 41 | let mut mapping: HashMap = HashMap::new(); 42 | let mut base = BddBase::new(); 43 | 44 | let parse_child = |child: &json::JsonValue, mapping: &HashMap| -> NID { 45 | let s = child.as_str().unwrap(); 46 | if let Ok(nid) = s.parse::() { nid } 47 | else if child.is_number() { 48 | let idx_i32 = child.as_i32().unwrap(); 49 | let idx: usize = idx_i32.abs() as usize; 50 | mapping[&idx].inv_if(idx_i32 < 0)} 51 | else { panic!("unexpected value type: {}", child) }}; 52 | 53 | for i in 1..vhls_arr.len() { 54 | let node = &vhls_arr[i]; 55 | let nv = node[0].as_str().unwrap().parse::().unwrap(); 56 | let hi = parse_child(&node[1], &mapping); 57 | let lo = parse_child(&node[2], &mapping); 58 | let nid = base.ite(nv, hi, lo); 59 | mapping.insert(i, nid);} 60 | 61 | let nids: Vec = data["keep"].members() 62 | .map(|idx| { let i = idx.as_i32().unwrap() as usize; mapping[&i]}) 63 | .collect(); 64 | 65 | (base, nids)}} 66 | 67 | 68 | #[test] fn test_json() { 69 | use crate::nid::named::{x0, x1}; 70 | let mut base = BddBase::new(); 71 | let n = base.xor(x0, x1); 72 | let s = base.to_json(&[n]); 73 | println!("json: {}", s); 74 | assert!(s.contains(r#""format":"bex-bdd-0.01""#)); 75 | assert!(s.contains(r#""vhls":[{},"#)); 76 | assert!(s.contains(r#""keep":[1]"#)); 77 | let (mut base2, nids) = BddBase::from_json(&s); 78 | assert_eq!(nids.len(), 1); 79 | let n2 = nids[0]; 80 | assert_eq!(base.len(), base2.len()); 81 | assert_eq!(base.tt(n, 3), base2.tt(n2, 3));} 82 | -------------------------------------------------------------------------------- /src/bdd/bdd_sols.rs: -------------------------------------------------------------------------------- 1 | //! Solution iterator for BddBase 2 | 3 | use std::collections::HashSet; 4 | use crate::vhl::{HiLo, HiLoBase, Walkable}; 5 | use crate::{vid::VID, nid::{NID,I,O}, bdd::BddBase, reg::Reg}; 6 | use crate::cur::{Cursor, CursorPlan}; 7 | 8 | 9 | /// helpers for solution cursor 10 | impl HiLoBase for BddBase { 11 | fn get_hilo(&self, n:NID)->Option { 12 | let (hi, lo) = self.swarm.tup(n); 13 | Some(HiLo{ hi, lo }) }} 14 | 15 | impl Walkable for BddBase { 16 | /// internal helper: one step in the walk. 17 | fn step(&self, n:NID, f:&mut F, seen:&mut HashSet, topdown:bool) 18 | where F: FnMut(NID,VID,NID,NID) { 19 | if !seen.contains(&n) { 20 | seen.insert(n); let (hi,lo) = self.tup(n); 21 | if topdown { f(n, n.vid(), hi,lo ) } 22 | if !lo.is_const() { self.step(lo, f, seen, topdown) } 23 | if !hi.is_const() { self.step(hi, f, seen, topdown) } 24 | if !topdown { f(n, n.vid(), hi, lo) }}}} 25 | 26 | pub struct BDDSolIterator<'a> { 27 | bdd: &'a BddBase, 28 | next: Option} 29 | 30 | impl<'a> BDDSolIterator<'a> { 31 | pub fn from_bdd(bdd: &'a BddBase, n:NID, nvars:usize)->BDDSolIterator<'a> { 32 | let next = bdd.first_solution(n, nvars); 33 | BDDSolIterator{ bdd, next }}} 34 | 35 | 36 | impl Iterator for BDDSolIterator<'_> { 37 | type Item = Reg; 38 | fn next(&mut self)->Option { 39 | if let Some(cur) = self.next.take() { 40 | assert!(self.bdd.in_solution(&cur)); 41 | let result = cur.scope.clone(); 42 | self.next = self.bdd.next_solution(cur); 43 | Some(result)} 44 | else { None }}} 45 | impl CursorPlan for BddBase {} 46 | 47 | 48 | /// Solution iterators. 49 | impl BddBase { 50 | pub fn solutions(&mut self, n:NID)->BDDSolIterator { 51 | let nvars = if n.is_const() { 1 } else if n.vid().is_var() { n.vid().var_ix() } 52 | else if n.vid().is_vir() { 53 | panic!("It probably doesn't make sense to call solutions(n) when n.vid().is_vir(), but you can try solutions_pad() if you think it makes sense.") } 54 | else { panic!("Don't know how to find solutions({:?}). Maybe try solutions_pad()...?", n) }; 55 | self.solutions_pad(n, nvars)} 56 | 57 | pub fn solutions_pad(&self, n:NID, nvars:usize)->BDDSolIterator { 58 | BDDSolIterator::from_bdd(self, n, nvars)} 59 | 60 | 61 | /// base function to make a cursor. if nvars < n.vid().var_ix(), it will be ignored. 62 | /// if it is larger than the var_ix, all variables above the nid will be watched. 63 | pub fn make_cursor(&self, n: NID, watch_vars: &[usize], nvars: usize) -> Option { 64 | if n == O { return None; } 65 | let base_nvars = if n.is_const() { 0 } else { n.vid().var_ix() + 1 }; 66 | let real_nvars = std::cmp::max(base_nvars, nvars); 67 | let mut cur = Cursor::new(real_nvars, n); 68 | for &idx in watch_vars { cur.watch.put(idx, true); } 69 | cur.descend(self); 70 | self.mark_skippable(&mut cur); 71 | debug_assert!(cur.node.is_const()); 72 | debug_assert!(self.in_solution(&cur), "{:?}", cur.scope); 73 | Some(cur)} 74 | 75 | // Construct a "don't care" cursor: effective nvars with all indices watched. 76 | pub fn make_dontcare_cursor(&self, n: NID, nvars: usize) -> Option { 77 | self.make_cursor(n, &[], nvars)} 78 | 79 | // cursor for .solutions: always watch all variables 80 | pub fn make_solution_cursor(&self, n: NID, nvars: usize) -> Option { 81 | let mut cur = self.make_cursor(n, &[], nvars)?; 82 | for i in 0..cur.nvars { cur.watch.put(i, true); } 83 | Some(cur)} 84 | 85 | pub fn first_solution(&self, n: NID, nvars: usize) -> Option { 86 | if n == O || nvars == 0 { None } 87 | else { self.make_solution_cursor(n, nvars)}} 88 | 89 | /// is the cursor currently pointing at a span of 1 or more solutions? 90 | pub fn in_solution(&self, cur:&Cursor)->bool { 91 | self.includes_leaf(cur.node) } 92 | 93 | 94 | /// helper function for next_solution 95 | /// walk depth-first from lo to hi until we arrive at the next solution 96 | fn find_next_leaf(&self, cur:&mut Cursor)->Option { 97 | // we always start at a leaf and move up, with the one exception of root=I 98 | assert!(cur.node.is_const(), "find_next_leaf should always start by looking at a leaf"); 99 | if cur.nstack.is_empty() { assert!(cur.node == I); return None } 100 | 101 | // now we are definitely at a leaf node with a branch above us. 102 | cur.step_up(); 103 | 104 | let tv = cur.node.vid(); // branching var for current twig node 105 | let mut rippled = false; 106 | // if we've already walked the hi branch... 107 | if cur.scope.var_get(tv) { 108 | cur.ascend(); 109 | // if we've cleared the stack and already explored the hi branch... 110 | { let iv = cur.node.vid(); 111 | if cur.nstack.is_empty() && cur.scope.var_get(iv) { 112 | // ... then first check if there are any variables above us on which 113 | // the node doesn't actually depend. ifso: ripple add. else: done. 114 | let top = cur.nvars-1; 115 | if cur.scope.ripple(iv.var_ix(), top).is_some() { rippled = true; } 116 | else { return None }}} } 117 | 118 | if rippled { cur.clear_trailing_bits() } 119 | else if cur.var_get() { return None } 120 | else { cur.put_step(self, true); } 121 | cur.descend(self); 122 | Some(cur.node) } 123 | 124 | /// walk depth-first from lo to hi until we arrive at the next solution 125 | pub fn next_solution(&self, mut cur:Cursor)->Option { 126 | assert!(cur.node.is_const(), "advance should always start by looking at a leaf"); 127 | if self.in_solution(&cur) { 128 | // if we're in the solution, we're going to increment the "counter". 129 | if cur.increment().is_some() { 130 | // The 'zpos' variable exists in the solution space, but there might or might 131 | // not be a branch node for that variable in the current bdd path. 132 | // Whether we follow the hi or lo branch depends on which variable we're looking at. 133 | if cur.node.is_const() { return Some(cur) } // special case for topmost I (all solutions) 134 | cur.put_step(self, cur.var_get()); 135 | cur.descend(self); } 136 | else { // overflow. we've counted all the way to 2^nvars-1, and we're done. 137 | return None }} 138 | // If still here, we are looking at a leaf that isn't a solution (out=0 in truth table) 139 | while !self.in_solution(&cur) { self.find_next_leaf(&mut cur)?; } 140 | self.mark_skippable(&mut cur); 141 | Some(cur) } 142 | 143 | fn mark_skippable(&self, cur: &mut Cursor) { 144 | let mut can_skip = Reg::new(cur.nvars); 145 | // iterate through the cursor's nid stack, checking each nid.vid_ix() to get its level. 146 | // any time there's a gap between the levels, mark that level as "don't care" by setting can_skip[i]=true. 147 | // We also need to include all the bits BELOW the current level any any bits above the top level. 148 | let mut prev = 0; 149 | // path from the top 150 | let path: Vec = cur.nstack.iter().map(|nid|nid.vid().var_ix()).collect(); 151 | for (i,&level) in path.iter().rev().enumerate() { 152 | if i == 0 { for j in 0..level { can_skip.put(j, true); }} 153 | else if level > prev + 1 { 154 | for j in (prev + 1)..level { can_skip.put(j, true); }} 155 | prev = level; } 156 | // skippable variables above the top level 157 | if !cur.nstack.is_empty() { 158 | for i in path[0]+1..cur.nvars { can_skip.put(i, true); }} 159 | cur.can_skip = can_skip; } 160 | 161 | } // impl BddBase 162 | -------------------------------------------------------------------------------- /src/bdd/bdd_swarm.rs: -------------------------------------------------------------------------------- 1 | use crate::{vhl::HiLoPart, wip::{Answer, Dep, ResStep}}; 2 | use crate::nid::NID; 3 | use crate::bdd::{ITE, NormIteKey, Norm}; 4 | use crate::vhl_swarm::{JobKey, VhlJobHandler, VhlSwarm, VhlWorker}; 5 | 6 | impl JobKey for NormIteKey {} 7 | 8 | #[derive(Debug, Default)] 9 | pub struct BddJobHandler {} 10 | 11 | impl VhlJobHandler for BddJobHandler { 12 | type W = VhlWorker; 13 | 14 | fn work_job(&mut self, w: &mut Self::W, q:NormIteKey) { 15 | let res = match self.ite_norm(w, q) { 16 | ResStep::Nid(n) => w.resolve_nid(&q, n), 17 | ResStep::Wip { v, hi, lo, invert } => { 18 | let mut res = w.add_wip(&q, v, invert); 19 | if res.is_none() { 20 | for &(xx, part) in &[(hi,HiLoPart::HiPart), (lo,HiLoPart::LoPart)] { 21 | match xx { 22 | Norm::Nid(nid) => { res = w.resolve_part(&q, part, nid, false) }, 23 | Norm::Ite(ite) | 24 | Norm::Not(ite) => { 25 | let (was_new, answer) = w.add_dep(&ite, Dep::new(q, part, xx.is_inv())); 26 | if was_new { w.delegate(ite) } 27 | res = answer }}}} 28 | res }}; 29 | if let Some(Answer(nid)) = res { 30 | w.send_answer(&q, nid) }}} 31 | 32 | 33 | type BddWorker = VhlWorker; 34 | 35 | impl BddJobHandler { 36 | 37 | fn vhl_norm(&self, w:&BddWorker, ite:NormIteKey)->ResStep { 38 | let ITE{i:vv,t:hi,e:lo} = ite.0; let v = vv.vid(); 39 | ResStep::Nid(w.vhl_to_nid(v, hi, lo)) } 40 | 41 | fn ite_norm(&self, w: &BddWorker, ite:NormIteKey)->ResStep { 42 | let ITE { i, t, e } = ite.0; 43 | let (vi, vt, ve) = (i.vid(), t.vid(), e.vid()); 44 | let v = ite.0.top_vid(); 45 | match w.get_done(&ite) { 46 | Some(n) => ResStep::Nid(n), 47 | None => { 48 | let (hi_i, lo_i) = if v == vi {w.tup(i)} else {(i,i)}; 49 | let (hi_t, lo_t) = if v == vt {w.tup(t)} else {(t,t)}; 50 | let (hi_e, lo_e) = if v == ve {w.tup(e)} else {(e,e)}; 51 | // now construct and normalize the queries for the hi/lo branches: 52 | let hi = ITE::norm(hi_i, hi_t, hi_e); 53 | let lo = ITE::norm(lo_i, lo_t, lo_e); 54 | // if they're both simple nids, we're guaranteed to have a vhl, so check cache 55 | if let (Norm::Nid(hn), Norm::Nid(ln)) = (hi,lo) { 56 | match ITE::norm(NID::from_vid(v), hn, ln) { 57 | // first, it might normalize to a nid directly: 58 | // !! but wait. how is this possible? i.is_const() and v == fake variable "T"? 59 | Norm::Nid(n) => { ResStep::Nid(n) } 60 | // otherwise, the normalized triple might already be in cache: 61 | Norm::Ite(ite) => self.vhl_norm(w, ite), 62 | Norm::Not(ite) => !self.vhl_norm(w, ite)}} 63 | // otherwise at least one side is not a simple nid yet, and we have to defer 64 | else { ResStep::Wip{ v, hi, lo, invert:false } }}}} } 65 | 66 | 67 | // ---------------------------------------------------------------- 68 | /// BddSwarm: a multi-threaded swarm implementation 69 | // ---------------------------------------------------------------- 70 | pub type BddSwarm = VhlSwarm; 71 | 72 | impl BddSwarm { 73 | /// all-purpose if-then-else node constructor. For the swarm implementation, 74 | /// we push all the normalization and tree traversal work into the threads, 75 | /// while this function puts all the parts together. 76 | pub fn ite(&mut self, i:NID, t:NID, e:NID)->NID { 77 | match ITE::norm(i,t,e) { 78 | Norm::Nid(n) => n, 79 | Norm::Ite(ite) => { self.run_swarm_job(ite) } 80 | Norm::Not(ite) => { !self.run_swarm_job(ite) }}}} 81 | 82 | 83 | #[test] fn test_swarm_cache() { 84 | // run a query for ite(x1,x2,x3) twice and make sure it retrieves the cached value without crashing 85 | let mut swarm = BddSwarm::new_with_threads(2); 86 | let ite = NormIteKey(ITE{i:NID::var(1), t:NID::var(2), e:NID::var(3)}); 87 | let n1 = swarm.ite(ite.0.i, ite.0.t, ite.0.e); 88 | let n2 = swarm.ite(ite.0.i, ite.0.t, ite.0.e); 89 | assert_eq!(n1, n2); } 90 | -------------------------------------------------------------------------------- /src/cur.rs: -------------------------------------------------------------------------------- 1 | //! Cursors (register + stack and scope) for navigating vhl-graphs (Bdd, Anf, etc) 2 | 3 | use crate::reg::Reg; 4 | use crate::{nid,nid::NID}; 5 | use crate::vid::VID; 6 | use crate::vhl::{HiLoPart, HiLoBase}; 7 | 8 | pub trait CursorPlan : HiLoBase { 9 | /// is the given (leaf) node a solution, given the current inversion state? 10 | fn includes_leaf(&self, n:NID)->bool { n == nid::I } 11 | fn includes_lo(&self, n:NID)->bool { n != nid::O } 12 | } 13 | 14 | #[derive(Debug)] 15 | pub struct Cursor { 16 | /// number of input variables in context 17 | pub nvars: usize, 18 | /// the current node. 19 | pub node: NID, 20 | /// whether to invert the results 21 | pub invert: bool, 22 | /// the path of nodes we have traversed 23 | pub nstack: Vec, 24 | /// the stack of node inversion states 25 | pub istack: Vec, 26 | /// the current variable assignments 27 | pub scope: Reg, 28 | /// can_skip[i]=1 means the variable is a "don't care" and can be skipped over. 29 | /// this is set on each step by the next_solution method of whatever data structure 30 | /// we're iterating through. 31 | pub can_skip: Reg, 32 | /// watch[i]=1 is an indication from the caller that they wants us to force 33 | /// iteration over this variable regardless of can_skip[i] 34 | pub watch: Reg } 35 | 36 | impl Cursor { 37 | 38 | pub fn new(nvars:usize, node:NID)->Self { 39 | Cursor { 40 | nvars, 41 | node, 42 | invert: false, // start:0, swap when we push, so parity of self.nstack == self.invert 43 | scope: Reg::new(nvars), 44 | can_skip: Reg::new(nvars), // by default we don't skip anything 45 | watch: Reg::new(nvars), // nor do we force anything 46 | nstack: vec![], 47 | istack: vec![]}} 48 | 49 | pub fn new_with_watch(nvars:usize, node:NID, watch:Reg)->Self { 50 | Self { watch, ..Self::new(nvars, node) }} 51 | 52 | /// push a new node onto the stack 53 | fn push_node(&mut self, node:NID) { 54 | self.istack.push(self.invert); 55 | self.nstack.push(self.node); 56 | self.node = node; 57 | self.invert = node.is_inv() && !node.is_const() } 58 | 59 | /// pop a node from the stack and return the old node id. 60 | fn pop_node(&mut self) { 61 | assert!(!self.nstack.is_empty()); 62 | self.invert = self.istack.pop().expect("istack.pop() should have worked, as len>0"); 63 | self.node = self.nstack.pop().expect("nstack.pop() should have worked, as len>0"); } 64 | 65 | /// take one step upward and return new node id. 66 | pub fn step_up(&mut self)->NID { 67 | self.pop_node(); 68 | self.node } 69 | 70 | pub fn at_top(&self)->bool { self.nstack.is_empty() } 71 | 72 | fn step_down(&mut self, base: &dyn CursorPlan, which:HiLoPart) { 73 | let hl = base.get_hilo(self.node).expect("node not found for step_down"); 74 | self.push_node(hl.get_part(which)); } 75 | 76 | pub fn put_step(&mut self, base:&dyn CursorPlan, val:bool) { 77 | self.scope.var_put(self.node.vid(), val); 78 | if val { self.step_down(base, HiLoPart::HiPart) } 79 | else { self.step_down(base, HiLoPart::LoPart) }} 80 | 81 | pub fn dontcares(&self)->Vec { 82 | println!("self.can_skip = {:?}", self.can_skip); 83 | println!("self.watch.= {:?}", self.watch); 84 | let mut res = vec![]; 85 | for i in self.can_skip.hi_bits() { 86 | if !self.watch.get(i) { res.push(i) }} 87 | res } 88 | 89 | pub fn cube(&self)->Vec<(VID,bool)> { 90 | let mut res = vec![]; 91 | for i in 0..self.nvars { 92 | if self.watch.get(i) || !self.can_skip.get(i) { 93 | res.push((VID::var(i as u32), self.scope.get(i))) }} 94 | res } 95 | 96 | /// walk down to next included term while setting the scope. 97 | /// this finds the leftmost leaf beneath the current node that contains a solution. 98 | /// it does NOT backtrack up higher in the graph, so once we reach the bottom we have 99 | /// to call ascend() to get back to the next branch point. 100 | pub fn descend(&mut self, base: &dyn CursorPlan) { 101 | while !self.node.is_const() { 102 | let hl = base.get_hilo(self.node).expect("couldn't get_hilo"); 103 | let choice = !base.includes_lo(hl.lo); 104 | self.put_step(base, choice) }} 105 | 106 | pub fn var_get(&self)->bool { 107 | self.scope.var_get(self.node.vid()) } 108 | 109 | /// starting at a leaf, climb the stack until we reach 110 | /// a branch whose variable is still set to lo. 111 | pub fn ascend(&mut self) { 112 | let mut bv = self.node.vid(); 113 | while self.scope.var_get(bv) && !self.nstack.is_empty() { 114 | bv = self.step_up().vid(); }} 115 | 116 | pub fn clear_trailing_bits(&mut self) { 117 | let bi = self.node.vid().var_ix(); 118 | for i in 0..bi { self.scope.put(i, false) }} 119 | 120 | /// decorate the increment() method on the scope register. 121 | /// returns Some index of first 0 or None on overflow. 122 | pub fn increment(&mut self) -> Option { 123 | // directly compose bits from the three registers to handle the "don't care" situation 124 | let len = self.scope.data.len(); 125 | for i in 0..len { self.scope.data[i] |= self.can_skip.data[i] & (!self.watch.data[i]); } 126 | // then increment as usual, and capture the bottom 0 index 127 | if let Some(zpos) = self.scope.increment() { 128 | let vz = VID::var(zpos as u32); 129 | // climb the bdd until we find the layer where the lmz would be. 130 | while !self.nstack.is_empty() 131 | && !vz.is_below(&self.nstack[self.nstack.len()-1].vid()) { 132 | self.pop_node(); } 133 | Some(zpos) } 134 | else { None }}} 135 | -------------------------------------------------------------------------------- /src/fun.rs: -------------------------------------------------------------------------------- 1 | //! The 'Fun' trait for dealing with boolean Functions 2 | use crate::NID; 3 | 4 | pub trait Fun : Sized { 5 | fn arity(&self)->Ix; 6 | /// partially apply the function to the given bit, resulting in a new function 7 | fn when(&self, bit:Ix, val:bool)->Self; 8 | /// rewrite the function when two inputs are the same 9 | fn when_same(&self, bit0:Ix, bit1:Ix)->Self; 10 | /// rewrite the function when two inputs are different 11 | fn when_diff(&self, bit0:Ix, bit1:Ix)->Self; 12 | /// rewrite the function when the given input bits are flipped. (bits is a bitset) 13 | fn when_flipped(&self, bits:Ix)->Self; 14 | /// rewrite the function when the given input bits are flipped (bits is vector of indices) 15 | fn when_flipped_vec(&self, _bits:&[Ix])->Self { todo!("Fun.when_flipped_vec") } 16 | /// rewrite the function with the given input bit swapped with its upstairs (left) neighbor. 17 | /// for example, `(f[abc]=a<(b^c)).when_lifted(a)` becomes: `f[bac]` or `g[abc]=b<(a^c)` 18 | fn when_lifted(&self, bit:Ix)->Self; 19 | /// apply the function to the given input bits 20 | fn apply(&self, _args:&[NID])->(Self, Vec) { todo!("Fun.apply") } } 21 | -------------------------------------------------------------------------------- /src/int.rs: -------------------------------------------------------------------------------- 1 | //! Helpers for working with arrays of bit structures as if they were integers. 2 | extern crate std; 3 | use std::cell::RefCell; 4 | use std::rc::Rc; 5 | use std::cmp::min; 6 | use crate::ast::ASTBase; 7 | use crate::base::Base; 8 | use crate::{nid, nid::NID}; 9 | use crate::vid::VID; 10 | 11 | 12 | // TBit : for use outside the Base, by types such as X32, below. 13 | pub trait TBit 14 | : Sized + Clone 15 | + std::ops::Not 16 | + std::ops::BitAnd 17 | + std::ops::BitXor { } 18 | 19 | // TODO: how can i merge with mj() below? 20 | fn bitmaj(x:T, y:T, z:T) -> T { 21 | (x.clone()&y.clone()) ^ (x&z.clone()) ^ (y&z) } 22 | 23 | 24 | // BaseBit implementation (u32 references into a Base) 25 | pub type BaseRef = Rc>; 26 | 27 | // -- basebit -- 28 | #[derive(Clone)] 29 | pub struct BaseBit {pub base:BaseRef, pub n:NID} 30 | 31 | impl BaseBit { 32 | /// perform an arbitrary operation using the base 33 | fn opNID>(&self, mut op:F)->BaseBit { 34 | let r = op(&mut self.base.borrow_mut()); 35 | BaseBit{base:self.base.clone(), n:r} }} 36 | 37 | impl std::cmp::PartialEq for BaseBit { 38 | fn eq(&self, other:&Self)->bool { 39 | self.base.as_ptr() == other.base.as_ptr() && self.n==other.n }} 40 | 41 | impl TBit for BaseBit {} 42 | 43 | impl std::ops::Not for BaseBit { 44 | type Output = Self; 45 | fn not(self) -> Self { 46 | self.op(|_| !self.n) }} 47 | 48 | impl std::ops::BitAnd for BaseBit { 49 | type Output = Self; 50 | fn bitand(self, other:Self) -> Self { 51 | self.op(|base| base.and(self.n, other.n)) }} 52 | 53 | impl std::ops::BitXor for BaseBit { 54 | type Output = Self; 55 | fn bitxor(self, other:Self) -> Self { 56 | self.op(|base| base.xor(self.n, other.n))}} 57 | 58 | impl std::ops::BitOr for BaseBit { 59 | type Output = Self; 60 | fn bitor(self, other:Self) -> Self { 61 | self.op(|base| base.or(self.n, other.n)) }} 62 | 63 | impl std::fmt::Debug for BaseBit { 64 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 65 | write!(f, "{:?}", self.n) }} 66 | 67 | // -- thread - global base -- 68 | 69 | thread_local!{ pub static GBASE:BaseRef = Rc::new(RefCell::new(ASTBase::empty())); } 70 | pub fn gbase_ref()->BaseRef { 71 | GBASE.with(|gb| gb.clone()) } 72 | 73 | pub fn gbase_var(v:u32)->BaseBit { 74 | GBASE.with(|gb| { BaseBit{base:gb.clone(), n:NID::var(v) }}) } 75 | 76 | pub fn gbase_tag(n:NID, s:String)->NID { 77 | GBASE.with(|gb| { gb.borrow_mut().tag(n,s) })} 78 | 79 | pub fn gbase_def(s:String, i:VID)->BaseBit { 80 | GBASE.with(|gb| { let vn=gb.borrow_mut().def(s,i); BaseBit{base:gb.clone(), n:vn }}) } 81 | 82 | pub fn gbase_o()->BaseBit { BaseBit{base:gbase_ref(), n:nid::O} } 83 | pub fn gbase_i()->BaseBit { BaseBit{base:gbase_ref(), n:nid::I} } 84 | 85 | 86 | // --- lifted u32 type ----------------------------------------- 87 | 88 | // TODO: implement iterators on the bits to simplify all these loops!! 89 | 90 | pub trait BInt : Sized { 91 | fn new(u:usize)->Self; 92 | /// the number of bits 93 | fn n() -> u32; 94 | fn i(&self) -> BaseBit; 95 | fn o(&self) -> BaseBit; 96 | fn zero() -> Self; 97 | fn get(&self, i:u32) -> BaseBit; 98 | fn set(&mut self, i:u32, v:BaseBit); 99 | fn rotate_right(&self, y:u32) -> Self { 100 | let mut res = Self::zero(); 101 | for i in 0..Self::n() { res.set(i, self.get((i+y) % Self::n())) } 102 | res} 103 | 104 | fn def(s:&str, start:u32)->Self; 105 | 106 | // TODO: this doesn't actually wrap! (should it??) 107 | fn wrapping_add(&self, y:Self) -> Self { 108 | let mut res = Self::zero(); let mut carry = self.o(); 109 | for i in 0..Self::n() { 110 | let (a,b,c) = (self.get(i), y.get(i), carry); 111 | res.set(i, a.clone() ^ b.clone() ^ c.clone()); 112 | carry = bitmaj(a, b, c);} 113 | res} 114 | 115 | fn from(other:&B) -> Self { 116 | let mut res = Self::zero(); 117 | for i in 0..min(Self::n(),B::n()) { res.set(i, other.get(i).clone()) } 118 | res } 119 | 120 | fn eq(&self, other:&Self)-> BaseBit; 121 | fn lt(&self, other:&Self)-> BaseBit; 122 | 123 | fn times(&self, y0:&Self) -> B { 124 | let mut sum = B::zero(); 125 | let x = B::from(self); 126 | let y = B::from(y0); 127 | for i in 0..B::n() { 128 | let mut xi = x.rotate_right(0); // poor man's copy 129 | for j in 0..B::n() { 130 | let xij = xi.get(j) & y.get(i); 131 | xi.set(j, xij) } 132 | sum = sum.wrapping_add(xi.rotate_right(B::n() -i)); } 133 | sum } 134 | 135 | fn u(self) -> usize; } 136 | 137 | 138 | macro_rules! xint_type { 139 | ($n:expr, $T:ident) => { 140 | 141 | #[derive(Clone,PartialEq)] 142 | pub struct $T{pub bits:Vec} 143 | 144 | impl $T { 145 | 146 | pub fn from_vec(v:Vec)->$T { 147 | $T{bits: if v.len() >= $n { v.iter().take($n).map(|x|x.clone()).collect() } 148 | else { 149 | let zs = (0..($n-v.len())).map(|_| gbase_o()); 150 | v.iter().map(|x|x.clone()).chain(zs.into_iter()).collect() }}} 151 | 152 | } 153 | 154 | impl std::fmt::Debug for $T { 155 | fn fmt(&self, f: &mut std::fmt::Formatter)->std::fmt::Result { 156 | write!(f, "[").expect("!"); 157 | for x in self.bits.iter() { write!(f, "{:?}", x).expect("!?") } 158 | write!(f, "]")}} 159 | 160 | // TODO: just inline BInt here, so people don't have to import it. 161 | 162 | impl BInt for $T { 163 | 164 | fn new(u:usize)->$T { 165 | $T{bits:(0..$n) 166 | .map(|i| if (u&1<u32 { $n } 170 | fn zero()->Self { $T::new(0) } 171 | fn o(&self)->BaseBit { gbase_o() } 172 | fn i(&self)->BaseBit { gbase_i() } 173 | fn get(&self, i:u32)->BaseBit { self.bits[i as usize].clone() } 174 | fn set(&mut self, i:u32, v:BaseBit) { self.bits[i as usize]=v } 175 | 176 | /// define an entire set of variables at once. 177 | fn def(s:&str, start:u32)->$T { 178 | $T::from_vec((0..$n).map(|i|{ gbase_def(s.to_string(), VID::var(start+i)) }).collect()) } 179 | 180 | fn eq(&self, other:&Self)-> BaseBit { 181 | let mut res = gbase_i(); 182 | for (x, y) in self.bits.iter().zip(other.bits.iter()) { 183 | // TODO: implement EQL (XNOR) nodes in base 184 | let eq = !(x.clone()^y.clone()); 185 | // println!("{} eq {} ? {}", x.n, y.n, eq.n); 186 | res = res & eq} 187 | res} 188 | 189 | fn lt(&self, other:&Self)-> BaseBit { 190 | let mut res = gbase_o(); 191 | for (x, y) in self.bits.iter().zip(other.bits.iter()) { 192 | // TODO: implement EQ, LT nodes in base 193 | let eq = !(x.clone() ^ y.clone()); 194 | let lt = (!x.clone()) & y.clone(); 195 | res = lt | (eq & res); } 196 | res} 197 | 198 | fn u(self)->usize { 199 | let mut u = 0; let mut i = 0; 200 | #[allow(clippy::toplevel_ref_arg)] 201 | for ref bit in self.bits.iter() { 202 | if bit.clone() == &self.i() { u|=1< 211 | std::result::Result<(), std::fmt::Error> { 212 | self.clone().u().fmt(formatter) } } 213 | 214 | impl std::ops::BitAnd for $T { 215 | type Output = Self; 216 | fn bitand(self, rhs:Self) -> Self { 217 | $T{bits: self.bits.iter().zip(rhs.bits.iter()) 218 | .map(|(x,y)| x.clone() & y.clone()) 219 | .collect() }}} 220 | 221 | impl std::ops::BitXor for $T { 222 | type Output = Self; 223 | fn bitxor(self, rhs:Self) -> Self { 224 | $T{bits: self.bits.iter().zip(rhs.bits.iter()) 225 | .map(|(x,y)| x.clone() ^ y.clone()) 226 | .collect() }}} 227 | 228 | impl std::ops::Shr for $T { 229 | type Output = Self; 230 | fn shr(self, y:u32) -> Self { 231 | let mut res = Self::zero(); 232 | for i in 0..($n-y) { res.bits[i as usize] = self.bits[(i+y) as usize].clone() } 233 | res }} 234 | 235 | impl std::ops::Not for $T { 236 | type Output = Self; 237 | fn not(self) -> Self { 238 | $T{bits: self.bits.iter().map(|x| !x.clone()).collect()} }} 239 | 240 | }} // end xint_type macro 241 | 242 | // actual type implementations: 243 | 244 | xint_type!( 2, X2); pub fn x2(u:usize)->X2 { X2::new(u) } 245 | xint_type!( 4, X4); pub fn x4(u:usize)->X4 { X4::new(u) } 246 | xint_type!( 8, X8); pub fn x8(u:usize)->X8 { X8::new(u) } 247 | xint_type!(16, X16); pub fn x16(u:usize)->X16 { X16::new(u) } 248 | xint_type!(32, X32); pub fn x32(u:usize)->X32 { X32::new(u) } 249 | xint_type!(64, X64); pub fn x64(u:usize)->X64 { X64::new(u) } 250 | 251 | 252 | 253 | // -- test suite for x32 254 | 255 | #[test] fn test_roundtrip() { 256 | let k = 1234567890; 257 | assert_eq!(x32(k).u(), k) } 258 | 259 | #[test] fn test_add() { 260 | assert_eq!((x32(2).wrapping_add(x32(3))).u(), 5) } 261 | 262 | #[test] fn test_mul32() { 263 | assert_eq!((x32(2).times::(&x32(3))).u(), 6); 264 | assert_eq!((x32(3).times::(&x32(5))).u(), 15) } 265 | 266 | #[test] fn test_mul64() { 267 | assert_eq!((x64(2).times::(&x64(3))).u(), 6); 268 | assert_eq!((x64(3).times::(&x64(5))).u(), 15) } 269 | 270 | #[test] fn test_ror() { 271 | assert_eq!((x32(10).rotate_right(1)).u(), 5) } 272 | 273 | #[test] fn test_lt() { 274 | assert_eq!(x4(1).lt(&x4(2)), gbase_i()); 275 | assert_eq!(x4(2).lt(&x4(1)), gbase_o()); 276 | assert_eq!(x32(10).lt(&x32(11)), gbase_i()); 277 | assert_eq!(x32(11).lt(&x32(10)), gbase_o()); 278 | assert_eq!(x32(10).lt(&x32(10)), gbase_o()); } 279 | 280 | #[test] fn test_eq() { 281 | use crate::int::BInt; 282 | assert_eq!(BInt::eq(&x32(10), &x32(10)), gbase_i()); 283 | assert_eq!(BInt::eq(&x32(11), &x32(10)), gbase_o()); 284 | assert_eq!(BInt::eq(&x32(10), &x32(11)), gbase_o()); } 285 | -------------------------------------------------------------------------------- /src/io.rs: -------------------------------------------------------------------------------- 1 | //! binary io for hashmap and typed vectors 2 | use std::fs::File; 3 | use std::io::BufReader; 4 | use std::io::prelude::*; 5 | use std::{collections::HashMap, hash::BuildHasher}; 6 | 7 | 8 | // these functions treat typed slices as raw bytes, making them easier to read/write 9 | // https://stackoverflow.com/questions/28127165/how-to-convert-struct-to-u8 10 | 11 | // adapted from the above, to deal with a slice: 12 | unsafe fn slice_to_u8s(p: &[T]) -> &[u8] { 13 | ::std::slice::from_raw_parts((p.as_ptr()) as *const u8, std::mem::size_of_val(p)) } 14 | 15 | unsafe fn u8s_to_slice(p: &[u8]) -> &[T] { 16 | ::std::slice::from_raw_parts( 17 | (p.as_ptr()) as *const T, 18 | p.len() / ::std::mem::size_of::()) } 19 | 20 | 21 | /// write the vector, as bytes, to a file at the specified path. 22 | pub fn put(path:&str, v:&[T]) -> ::std::io::Result<()> { 23 | let mut f = File::create(path)?; 24 | f.write_all( unsafe{ slice_to_u8s(v) }) } 25 | 26 | /// attempt to parse the file at the specified path as a binary `Vec`. 27 | pub fn get(path:&str) -> ::std::io::Result> { 28 | let mut f = File::open(path)?; 29 | let mut uv:Vec = Vec::new(); 30 | f.read_to_end(&mut uv).expect("couldn't read file"); 31 | let s:&[T] = unsafe { u8s_to_slice(uv.as_slice())}; 32 | Ok(s.to_vec()) } 33 | 34 | 35 | /// save a hashmap 36 | pub fn put_map(path:&str, m:&HashMap) -> ::std::io::Result<()> { 37 | let mut f = File::create(path)?; 38 | for (k,v) in m.iter() { writeln!(&mut f, "{},{}", k, v)? } 39 | Ok(())} 40 | 41 | /// load a hashmap 42 | pub fn get_map(path:&str) -> ::std::io::Result> { 43 | let mut m = HashMap::new(); 44 | let f = File::open(path)?; let r = BufReader::new(&f); 45 | for line in r.lines() { 46 | let line = line.unwrap(); 47 | let v:Vec<&str> = line.split(',').collect(); 48 | m.insert(v[0].to_string(), v[1].parse::().unwrap()); } 49 | Ok(m)} 50 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A crate for working with boolean expressions. 2 | 3 | #![allow(clippy::many_single_char_names)] 4 | 5 | #[macro_use] extern crate log; 6 | extern crate simplelog; 7 | extern crate rand; 8 | extern crate dashmap; 9 | extern crate boxcar; 10 | extern crate fxhash; 11 | extern crate concurrent_queue; 12 | 13 | pub mod base; pub use crate::base::{Base, GraphViz}; 14 | pub mod vid; 15 | pub mod nid; pub use crate::nid::{NID,I,O}; 16 | pub mod fun; pub use crate::fun::Fun; 17 | pub mod reg; pub use crate::reg::Reg; 18 | pub mod vhl; 19 | pub mod wip; 20 | pub mod ops; 21 | pub mod cur; 22 | pub mod simp; 23 | pub mod ast; 24 | pub mod bdd; pub use crate::bdd::BddBase; 25 | pub mod solve; 26 | pub mod apl; 27 | pub mod int; 28 | pub mod io; 29 | pub mod anf; 30 | pub mod swap; 31 | pub mod swarm; 32 | pub mod vhl_swarm; 33 | pub mod naf; -------------------------------------------------------------------------------- /src/ops.rs: -------------------------------------------------------------------------------- 1 | //! Tools for constructing boolean expressions using NIDs as logical operations. 2 | use crate::{NID, Fun, nid::NidFun, vid::VID}; 3 | use std::slice::Iter; 4 | 5 | /// A sequence of operations. 6 | /// Currently, RPN is the only format, but I made this an enum 7 | /// to provide a little future-proofing. 8 | #[derive(PartialOrd, PartialEq, Eq, Hash, Debug, Clone)] 9 | pub enum Ops { RPN(Vec) } 10 | impl Ops { 11 | /// Again, just for future proofing. 12 | pub fn to_rpn(&self)->Iter<'_, NID> { 13 | match self { 14 | Ops::RPN(vec) => vec.iter() }} 15 | 16 | /// return as a function application, where the first item is the function 17 | pub fn to_app(&self)->(NID, Vec) { 18 | match self { 19 | Ops::RPN(vec) => { 20 | let mut v = vec.clone(); 21 | let f = v.pop().expect("to_app() expects at least one f-nid"); 22 | assert!(f.is_fun()); 23 | (f,v) }}} 24 | 25 | /// ensure that last item is a function of n inputs, 26 | /// len is n+1, and first n inputs are not inverted. 27 | pub fn norm(&self)->Ops { 28 | let mut rpn:Vec = self.to_rpn().cloned().collect(); 29 | let f0 = rpn.pop().expect("norm() expects at least one f-nid").to_fun().unwrap(); 30 | let ar = f0.arity(); 31 | assert_eq!(ar, rpn.len() as u8); 32 | 33 | // if any of the input vars are negated, update the function to 34 | // negate the corresponding argument. this way we can just always 35 | // branch on the raw variable. 36 | let mut bits:u8 = 0; 37 | for (i,nid) in rpn.iter_mut().enumerate() { if nid.is_inv() { bits |= 1 << i; *nid = !*nid; }} 38 | let f = f0.when_flipped(bits); 39 | rpn.push(f.to_nid()); 40 | Ops::RPN(rpn)}} 41 | 42 | /// constructor for rpn 43 | pub fn rpn(xs:&[NID])->Ops { Ops::RPN(xs.to_vec()) } 44 | 45 | pub mod sig { 46 | 47 | macro_rules! signals { 48 | ($($ids:ident : $exs:expr),+ $(,)?) => { signals![@ $($ids : $exs),+]; }; 49 | (@) => {}; 50 | (@ $id:ident : $ex:expr $(, $ids:ident : $exs:expr)*) => { 51 | pub const $id:u32 = $ex; 52 | signals![@ $($ids : $exs),*]; };} 53 | 54 | signals! { 55 | // constant signals 56 | K0:0b00000000000000000000000000000000, 57 | K1:0b11111111111111111111111111111111, 58 | 59 | // input bit signals (raw and inverted) 60 | // note that these are oriented for printing aesthetics, so that 61 | // the least significant bit in the signal is actually the high 62 | // bit on the left 63 | A:0b01010101010101010101010101010101, RX0:A, NX0:!A, 64 | B:0b00110011001100110011001100110011, RX1:B, NX1:!B, 65 | C:0b00001111000011110000111100001111, RX2:C, NX2:!C, 66 | D:0b00000000111111110000000011111111, RX3:D, NX3:!D, 67 | E:0b00000000000000001111111111111111, RX4:E, NX4:!E, 68 | 69 | // NB. all possible 5-bit anf terms (except the singleton terms above) 70 | // ] (,': ',([,'&',])/,','"_)S:0 terms=:(#~1<#S:0)/:~'ABCDE'{~L:0<@I.#:i.32 71 | AB: A&B, ABC: A&B&C, ABCD: A&B&C&D, ABCDE: A&B&C&D&E, 72 | ABCE: A&B&C&E, ABD: A&B&D, ABDE: A&B&D&E, ABE: A&B&E, 73 | AC: A&C, ACD: A&C&D, ACDE: A&C&D&E, ACE: A&C&E, 74 | AD: A&D, ADE: A&D&E, AE: A&E, BC: B&C, 75 | BCD: B&C&D, BCDE: B&C&D&E, BCE: B&C&E, BD: B&D, 76 | BDE: B&D&E, BE: B&E, CD: C&D, CDE: C&D&E, 77 | CE: C&E, DE: D&E, 78 | 79 | // same ANF terms but now written in "big endian" style 80 | // ([,': ',],','"_)/@:>"1(\:~L:0 terms),.terms 81 | BA: AB, CBA: ABC, DCBA: ABCD, EDCBA: ABCDE, 82 | ECBA: ABCE, DBA: ABD, EDBA: ABDE, EBA: ABE, 83 | CA: AC, DCA: ACD, EDCA: ACDE, ECA: ACE, 84 | DA: AD, EDA: ADE, EA: AE, CB: BC, 85 | DCB: BCD, EDCB: BCDE, ECB: BCE, DB: BD, 86 | EDB: BDE, EB: BE, DC: CD, EDC: CDE, 87 | EC: CE, ED: DE, 88 | 89 | // the (non-degenerate) dyadic boolean functions 90 | AND: A&B, NAND: !(A&B), 91 | XOR: A^B, IFF: !XOR, 92 | OR: A|B, NOR: !OR, 93 | LT: !A&B, GT: A&!B, 94 | LTE: !GT, GTE: !LT, 95 | 96 | // some triadic functions 97 | ITE: A&B|!A&C, 98 | ANF: (A&B)^C, 99 | XOR3: A^B^C, 100 | MAJ: (A&B)|(A&C)|(B&C), 101 | 102 | // and some aliases: 103 | VEL:OR, IMP: LTE, 104 | NE: XOR, EQ: IFF }} 105 | 106 | /// x0 & x1 107 | pub const AND:NidFun = NID::fun(2,sig::AND); 108 | 109 | /// x0 ^ x1 110 | pub const XOR:NidFun = NID::fun(2,sig::XOR); 111 | 112 | pub const EQL:NidFun = NID::fun(2,sig::EQ); 113 | pub const NXOR:NidFun = EQL; 114 | pub const NAND:NidFun = NID::fun(2,sig::NAND); 115 | 116 | /// x0 | x1 (vel is the latin word for 'inclusive or', and the origin of the "∨" symbol in logic) 117 | pub const VEL:NidFun = NID::fun(2,sig::VEL); 118 | 119 | /// !(x0 | x1) 120 | pub const NOR:NidFun = NID::fun(2,sig::NOR); 121 | 122 | /// x0 implies x1 (x0 <= x1) 123 | pub const IMP:NidFun = NID::fun(2,sig::IMP); 124 | 125 | pub const ITE:NidFun = NID::fun(3,sig::ITE); 126 | pub const ANF:NidFun = NID::fun(3,sig::ANF); 127 | 128 | /// convenience trait that allows us to mix vids and nids 129 | /// freely when constructing expressions. 130 | pub trait ToNID { fn to_nid(&self)->NID; } 131 | impl ToNID for NID { fn to_nid(&self)->NID { *self }} 132 | impl ToNID for VID { fn to_nid(&self)->NID { NID::from_vid(*self) }} 133 | 134 | /// construct the expression `x AND y` 135 | pub fn and(x:X,y:Y)->Ops { rpn(&[x.to_nid(), y.to_nid(), AND.to_nid()]) } 136 | 137 | /// construct the expression `x XOR y` 138 | pub fn xor(x:X,y:Y)->Ops { rpn(&[x.to_nid(), y.to_nid(), XOR.to_nid()]) } 139 | 140 | /// construct the expression `x VEL y` ("x or y") 141 | pub fn vel(x:X,y:Y)->Ops { rpn(&[x.to_nid(), y.to_nid(), VEL.to_nid()]) } 142 | 143 | /// construct the expression `x IMP y` ("x implies y") 144 | pub fn imp(x:X,y:Y)->Ops { rpn(&[x.to_nid(), y.to_nid(), IMP.to_nid()]) } 145 | 146 | #[test] fn test_flip_and() { 147 | assert_eq!(AND.tbl() & 0b1111, 0b0001 ); 148 | assert_eq!(AND.when_flipped(1).tbl() & 0b1111, 0b0010 ); 149 | assert_eq!(AND.when_flipped(2).tbl() & 0b1111, 0b0100 ); 150 | assert_eq!(AND.when_flipped(3).tbl() & 0b1111, 0b1000 );} 151 | 152 | #[test] fn test_flip_vel() { 153 | assert_eq!(VEL.tbl() & 0b1111, 0b0111 ); 154 | assert_eq!(VEL.when_flipped(1).tbl() & 0b1111, 0b1011 ); 155 | assert_eq!(VEL.when_flipped(2).tbl() & 0b1111, 0b1101 ); 156 | assert_eq!(VEL.when_flipped(3).tbl() & 0b1111, 0b1110 );} 157 | 158 | #[test] fn test_flip_xor() { 159 | assert_eq!(XOR.tbl() & 0b1111, 0b0110 ); 160 | assert_eq!(XOR.when_flipped(1).tbl() & 0b1111, 0b1001 ); 161 | assert_eq!(XOR.when_flipped(2).tbl() & 0b1111, 0b1001 ); 162 | assert_eq!(XOR.when_flipped(3).tbl() & 0b1111, 0b0110 );} 163 | 164 | #[test] fn test_norm() { 165 | assert_eq!(AND.tbl() & 0b1111, 0b0001 ); 166 | let ops = Ops::RPN(vec![NID::var(0), !NID::var(1), AND.to_nid()]); 167 | let mut rpn:Vec = ops.norm().to_rpn().cloned().collect(); 168 | let f = rpn.pop().unwrap().to_fun().unwrap(); 169 | assert_eq!(2, f.arity()); 170 | assert_eq!(f.tbl() & 0b1111, 0b0100); 171 | assert_eq!(rpn, vec![NID::var(0), NID::var(1)]);} 172 | -------------------------------------------------------------------------------- /src/reg.rs: -------------------------------------------------------------------------------- 1 | //! Registers -- arbitrarily large arrays of bits. 2 | use std::fmt; 3 | use crate::vid::VID; 4 | use std::ops::{BitAnd, BitOr, BitXor, Not}; 5 | 6 | 7 | #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 8 | pub struct Reg { nbits: usize, pub(crate) data: Vec } 9 | 10 | const USIZE:usize = usize::BITS as usize; 11 | 12 | 13 | impl Reg { 14 | 15 | /// create a new register with the given number of bits 16 | pub fn new( nbits: usize )-> Self { 17 | Reg { nbits, data: vec![0; (nbits as f64 / USIZE as f64).ceil() as usize ]}} 18 | 19 | /// constructor that takes the indices of the high bits 20 | pub fn from_bits( nbits:usize, hi_bits: &[usize] )->Self { 21 | let mut res = Reg::new(nbits); 22 | for &bit in hi_bits { 23 | if bit >= nbits { panic!("called from_bits({nbits:?},...) with out-of-bounds bit {bit}") } 24 | else { res.put(bit, true) }} 25 | res} 26 | 27 | /// return the high bits of the register as a vector of indices. 28 | pub fn hi_bits(&self)->Vec { 29 | let mut res = vec![]; 30 | for (j, &raw) in self.data.iter().enumerate() { 31 | let mut bits = raw; 32 | let offset = j * USIZE; 33 | for i in 0..USIZE { 34 | if (bits & 1) == 1 { res.push(offset + i) } 35 | bits >>= 1 }} 36 | res} 37 | 38 | 39 | /// fetch value of a bit by index 40 | pub fn get(&self, ix: usize )->bool { 41 | 0 < (self.data[ix/USIZE] & 1 << (ix%USIZE)) } 42 | 43 | /// assign value of a bit by index 44 | pub fn put(&mut self, ix:usize, v:bool) { 45 | let i = ix/USIZE; let x = self.data[i]; 46 | self.data[i] = 47 | if v { x | (1 << (ix%USIZE)) } 48 | else { x & !(1 << (ix%USIZE)) }} 49 | 50 | /// fetch value of bit with the given variable's index 51 | pub fn var_get(&self, v:VID)->bool { 52 | let ix = v.var_ix(); 53 | self.get(ix) } 54 | 55 | /// assign value of bit with the given variable's index 56 | pub fn var_put(&mut self, v:VID, val:bool) { 57 | let ix = v.var_ix(); 58 | self.put(ix, val) } 59 | 60 | /// return the number of bits in the register. 61 | pub fn len(&self)->usize { self.nbits } 62 | 63 | /// true when the number of bits is 0. 64 | /// (mostly because clippy complains about len() without is_empty()) 65 | pub fn is_empty(&self)->bool { self.nbits == 0 } 66 | 67 | 68 | 69 | /// build a usize from the least significant bits of the register. 70 | pub fn as_usize(&self)->usize { self.data[0] } 71 | 72 | /// build a usize from the least significant bits of the register, in reverse order. 73 | pub fn as_usize_rev(&self)->usize { 74 | assert!(self.nbits <= 64, "usize_rev only works for <= 64 bits!"); 75 | let mut tmp = self.as_usize(); let mut res = 0; 76 | for _ in 0..self.nbits { 77 | res <<= 1; 78 | res += tmp & 1; 79 | tmp >>= 1;} 80 | res } 81 | 82 | // permute the bits according to the given permutation vector. 83 | // b=pv[i] means to grab bit b from x and move to position i in the result. 84 | pub fn permute_bits(&self, pv:&[usize])->Self { 85 | let mut res = self.clone(); 86 | for (i,b) in pv.iter().enumerate() { res.put(i, self.get(*b)); } 87 | res} 88 | 89 | 90 | /// ripple add with carry within the region specified by start and end 91 | /// (inclusive), returning Some position where a 0 became a 1, or None on overflow. 92 | pub fn ripple(&mut self, start:usize, end:usize)->Option { 93 | let mut j = start as i64; let end = end as i64; 94 | if j == end { return None } 95 | let dj:i64 = if j > end { -1 } else { 1 }; 96 | loop { 97 | let u = j as usize; 98 | let old = self.get(u); 99 | self.put(u, !old); 100 | if !old { break } // we flipped a 0 to a 1. return the position. 101 | else if j == end { return None } 102 | else { j+=dj }} 103 | Some(j as usize)} 104 | 105 | /// increment the register as if adding 1. 106 | /// return position where the ripple-carry stopped. 107 | pub fn increment(&mut self)->Option { self.ripple(0, self.nbits-1) } 108 | 109 | } // impl Reg 110 | 111 | 112 | // -- bitwise operations -------------------------------------------- 113 | 114 | impl Reg { 115 | fn mask_last_cell(&mut self) { 116 | let rem = self.nbits % USIZE; 117 | let mask = if rem == 0 { !0 } else { (1 << rem) - 1 }; 118 | if let Some(last) = self.data.last_mut() { *last &= mask; }}} 119 | 120 | impl<'b> BitAnd<&'b Reg> for &Reg { 121 | type Output = Reg; 122 | fn bitand(self, rhs: &'b Reg) -> Self::Output { 123 | let mut res = self.clone(); 124 | for (i, &val) in rhs.data.iter().enumerate() { 125 | if i < res.data.len() { res.data[i] &= val; } 126 | else { res.data.push(val); }} 127 | res }} 128 | 129 | impl<'b> BitOr<&'b Reg> for &Reg { 130 | type Output = Reg; 131 | fn bitor(self, rhs: &'b Reg) -> Self::Output { 132 | let mut res = self.clone(); 133 | for (i, &val) in rhs.data.iter().enumerate() { 134 | if i < res.data.len() { res.data[i] |= val; } 135 | else { res.data.push(val); }} 136 | res}} 137 | 138 | impl<'b> BitXor<&'b Reg> for &Reg { 139 | type Output = Reg; 140 | fn bitxor(self, rhs: &'b Reg) -> Self::Output { 141 | let mut res = self.clone(); 142 | for (i, &val) in rhs.data.iter().enumerate() { 143 | if i < res.data.len() { res.data[i] ^= val; } 144 | else { res.data.push(val); }} 145 | res }} 146 | 147 | impl Not for &Reg { 148 | type Output = Reg; 149 | fn not(self) -> Self::Output { 150 | let mut res = self.clone(); 151 | for val in &mut res.data { *val = !*val; } 152 | res.mask_last_cell(); 153 | res }} 154 | 155 | 156 | /// display the bits of the register and the usize 157 | /// e.g. reg[11o=06] 158 | impl fmt::Display for Reg { 159 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 160 | write!(f, "reg[")?; 161 | let mut write_bit = |i| { write!(f, "{}", if self.get(i) {'i'} else {'o'}) }; 162 | for i in (0..self.nbits).rev() { write_bit(i)? }; 163 | write!(f, "={:02x}]", self.as_usize()) }} 164 | 165 | /// Same as fmt::Display. 166 | impl fmt::Debug for Reg { // for test suite output 167 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self) }} 168 | 169 | #[test] #[allow(clippy::bool_assert_comparison)] 170 | fn test_reg_mut() { 171 | let mut reg = Reg::new(66); 172 | assert_eq!(reg.data.len(), 2); 173 | assert_eq!(reg.data[0], 0); 174 | assert_eq!(reg.get(0), false); 175 | reg.put(0, true); 176 | assert_eq!(reg.data[0], 1); // bit '0' is the least significant bit 177 | assert_eq!(reg.data[1], 0); 178 | assert_eq!(reg.get(0), true); 179 | assert_eq!(reg.get(1), false); 180 | // now 181 | assert_eq!(reg.as_usize(), 1, "{:?}=1", reg); 182 | reg.put(1, true); 183 | assert_eq!(reg.data[0], 3); 184 | assert_eq!(reg.get(1), true); } 185 | 186 | #[test] fn test_reg_inc_hitop() { 187 | let mut reg = Reg::new(2); 188 | assert_eq!(0, reg.as_usize()); 189 | assert_eq!(Some(0), reg.increment(), "00 -> 01"); 190 | assert_eq!(1, reg.as_usize()); 191 | assert_eq!(Some(1), reg.increment(), "01 -> 10"); 192 | assert_eq!(2, reg.as_usize()); 193 | assert_eq!(Some(0), reg.increment(), "10 -> 11"); 194 | assert_eq!(3, reg.as_usize()); 195 | assert_eq!(None, reg.increment(), "11 -> 00"); } 196 | 197 | 198 | #[test] fn test_bits() { 199 | let ten = Reg::from_bits(4, &[3,1]); 200 | assert_eq!(ten.as_usize(), 0b1010, "reg with bits 3 and 1 set should equal 10"); 201 | assert_eq!(ten.hi_bits(), [1,3], "bits for 'ten' should come back in order"); 202 | let big = Reg::from_bits(65, &[64,63]); 203 | assert_eq!(big.hi_bits(), [63,64], "bits for 'big' should come back in order"); } 204 | 205 | #[test] 206 | fn test_reg_and() { 207 | let reg1 = Reg::from_bits(70, &[0, 1, 2, 3, 64, 65]); 208 | let reg2 = Reg::from_bits(70, &[1, 2, 66, 67]); 209 | let and_result = ®1 & ®2; 210 | assert_eq!(and_result.hi_bits(), vec![1, 2]);} 211 | 212 | #[test] 213 | fn test_reg_or() { 214 | let reg1 = Reg::from_bits(70, &[0, 1, 2, 3, 64, 65]); 215 | let reg2 = Reg::from_bits(70, &[1, 2, 66, 67]); 216 | let or_result = ®1 | ®2; 217 | assert_eq!(or_result.hi_bits(), vec![0, 1, 2, 3, 64, 65, 66, 67]);} 218 | 219 | #[test] 220 | fn test_reg_xor() { 221 | let reg1 = Reg::from_bits(70, &[0, 1, 2, 3, 64, 65, 68]); 222 | let reg2 = Reg::from_bits(70, &[1, 2, 66, 67, 68]); 223 | let xor_result = ®1 ^ ®2; 224 | assert_eq!(xor_result.hi_bits(), vec![0, 3, 64, 65, 66, 67]);} 225 | 226 | #[test] 227 | fn test_reg_not() { 228 | let reg1 = Reg::from_bits(70, &[0, 1, 2, 3, 64, 65]); 229 | let not_result = !®1; 230 | let expected_not_bits: Vec = (0..70).filter(|&i| ![0, 1, 2, 3, 64, 65].contains(&i)).collect(); 231 | assert_eq!(not_result.hi_bits(), expected_not_bits);} 232 | 233 | #[test] 234 | fn test_reg_mask() { 235 | let mut reg = Reg::from_bits(5, &[0, 1, 2, 3]); 236 | assert_eq!(®.as_usize(), &0b1111); 237 | reg.mask_last_cell(); 238 | assert_eq!(®.as_usize(), &0b1111); } 239 | -------------------------------------------------------------------------------- /src/simp.rs: -------------------------------------------------------------------------------- 1 | //! Simplification rules for simple boolean operations. 2 | use crate::nid::{NID, I, O}; 3 | 4 | pub fn xor(x:NID, y:NID)->Option { 5 | if x == y { Some(O) } 6 | else if x == O { Some(y) } 7 | else if x == I { Some(!y) } 8 | else if y == O { Some(x) } 9 | else if y == I { Some(!x) } 10 | else if x == !y { Some(I) } 11 | else { None }} 12 | 13 | pub fn and(x:NID, y:NID)->Option { 14 | if x == O || y == O { Some(O) } 15 | else if x == I || x == y { Some(y) } 16 | else if y == I { Some(x) } 17 | else if x == !y { Some(O) } 18 | else { None }} 19 | 20 | pub fn or(x:NID, y:NID)->Option { 21 | if x == O { Some(y) } 22 | else if y == O { Some(x) } 23 | else if x == I || y == I { Some(I) } 24 | else if x == y { Some(x) } 25 | else if x == !y { Some(I) } 26 | else { None }} 27 | -------------------------------------------------------------------------------- /src/swarm.rs: -------------------------------------------------------------------------------- 1 | //! mini-framework for multicore programming. 2 | use std::{marker::PhantomData, thread}; 3 | use std::sync::mpsc::{Sender, Receiver, channel, SendError, RecvError}; 4 | use std::fmt::Debug; 5 | use std::collections::HashMap; 6 | use rand::seq::SliceRandom; 7 | 8 | /// query id 9 | #[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)] 10 | pub enum QID { #[default] INIT, STEP(usize), DONE } 11 | 12 | pub struct QMsg { qid:QID, q: Q } 13 | #[derive(Debug)] 14 | pub struct RMsg { pub wid: WID, pub qid:QID, pub r:Option } 15 | 16 | /// worker id 17 | #[derive(Debug,Default,PartialEq,Eq,Hash,Clone,Copy)] 18 | pub struct WID { pub n:usize } 19 | 20 | pub trait Worker where R:Debug, Q:Clone { 21 | 22 | fn new(_wid:WID)->Self; 23 | fn get_wid(&self)->WID; 24 | 25 | // swarm will call this method so a worker implementation 26 | // can clone the sender and send messages back to the swarm 27 | // outside of the work_xxx methods. 28 | fn set_tx(&mut self, _tx:&Sender>) {} 29 | 30 | /// send a message from the worker back to the swarm's main thread. 31 | /// to call this, you need your own copy of the Sender (which you) 32 | /// can obtain by implementing `set_tx` and keeping a reference to 33 | /// a clone of the parameter. (You probably also need the qid, which 34 | /// you can capture in `work_step`) 35 | fn send_msg(&self, tx:&Sender>, qid:QID, r:Option) { 36 | // println!("\x1b[32mSENDING msg: qid:{:?} for wid: {:?} -> r:{:?}\x1b[0m", &qid, wid, &r); 37 | let res = tx.send(RMsg{ wid:self.get_wid(), qid, r }); 38 | if res.is_err() { self.on_work_send_err(res.err().unwrap()) }} 39 | 40 | /// allow workers to push items into a shared (or private) queue. 41 | fn queue_push(&mut self, _item:I) { panic!("no queue defined"); } 42 | /// allow workers to pop items from a shared (or private) queue. 43 | fn queue_pop(&mut self)->Option { None } 44 | 45 | /// Generic worker lifecycle implementation. 46 | /// Hopefully, you won't need to override this. 47 | /// The worker receives a stream of Option(Q) structs (queries), 48 | /// and returns an R (result) for each one. 49 | fn work_loop(&mut self, wid:WID, rx:&Receiver>>, tx:&Sender>) { 50 | self.set_tx(tx); 51 | // and now the actual worker lifecycle: 52 | let msg = self.work_init(wid); self.send_msg(tx, QID::INIT, msg); 53 | loop { 54 | if let Some(item) = self.queue_pop() { self.work_item(item) } 55 | match rx.try_recv() { 56 | Ok(None) => break, 57 | Ok(Some(QMsg{qid, q})) => { 58 | if let QID::STEP(_) = qid { 59 | let msg = self.work_step(&qid, q); self.send_msg(tx, qid, msg); } 60 | else { panic!("Worker {:?} got unexpected qid instead of STEP: {:?}", wid, qid)}} 61 | Err(e) => match e { 62 | std::sync::mpsc::TryRecvError::Empty => {} // no problem! 63 | std::sync::mpsc::TryRecvError::Disconnected => break }}} 64 | let msg = self.work_done(); self.send_msg(tx, QID::DONE, msg); } 65 | 66 | /// What to do if a message send fails. By default, just print to stdout. 67 | fn on_work_send_err(&self, err:SendError>) { 68 | println!("failed to send response: {:?}", err.to_string()); } 69 | 70 | /// Override this to implement logic for working on queue items 71 | fn work_item(&mut self, _item:I) { } 72 | 73 | /// Override this to implement your worker's query-handling logic. 74 | fn work_step(&mut self, _qid:&QID, _q:Q)->Option { None } 75 | 76 | /// Override this if you need to send a message to the swarm before the worker starts. 77 | fn work_init(&mut self, _wid:WID)->Option { None } 78 | 79 | /// Override this if you need to send a message to the swarm after the work loop finishes. 80 | fn work_done(&mut self)->Option { None }} 81 | 82 | #[derive(Debug)] 83 | pub enum SwarmCmd { 84 | Pass, 85 | Halt, 86 | // send a new query to a worker 87 | Send(Q), 88 | Batch(Vec<(WID, Q)>), 89 | Panic(String), 90 | Return(V), 91 | // kill the worker 92 | Kill(WID)} 93 | 94 | #[derive(Debug)] 95 | pub struct Swarm where W:Worker, Q:Debug+Clone, R:Debug { 96 | /// next QID 97 | nq: usize, 98 | //// sender that newly spawned workers can clone to talk to me. 99 | me: Sender>, 100 | /// receives result (and other intermediate) messages from the workers. 101 | rx: Receiver>, 102 | /// sender for queries. clone with self.q_sender() 103 | qtx: Sender, 104 | qrx: Receiver, 105 | // /// worker queue. workers queue up to handle the queries. 106 | // wq: VecDeque, 107 | /// handles for sending messages to the workers 108 | whs: HashMap>>>, 109 | /// next unique id for new worker 110 | nw: usize, 111 | /// phantom reference to the Worker class. In practice, the workers are owned 112 | /// by their threads, so we don't actually touch them directly. 113 | _w: PhantomData, 114 | _i: PhantomData, 115 | /// handles to the actual threads 116 | threads: Vec> } 117 | 118 | impl Default for Swarm where Q:'static+Send+Debug+Clone, R:'static+Send+Debug, W:Worker { 119 | fn default()->Self { Self::new_with_threads(4) }} 120 | 121 | impl Drop for Swarm where Q:Debug+Clone, R:Debug, W:Worker { 122 | fn drop(&mut self) { self.kill_swarm() }} 123 | 124 | impl Swarm where Q:Debug+Clone, R:Debug, W:Worker { 125 | 126 | pub fn kill_swarm(&mut self) { 127 | while let Some(&w) = self.whs.keys().take(1).next() { self.kill(w); } 128 | while !self.threads.is_empty() { self.threads.pop().unwrap().join().unwrap() }} 129 | 130 | pub fn num_workers(&self)->usize { self.whs.len() } 131 | 132 | pub fn kill(&mut self, w:WID) { 133 | if let Some(h) = self.whs.remove(&w) { 134 | if h.send(None).is_err() { panic!("couldn't kill worker") }} 135 | else { panic!("worker was already gone") }}} 136 | 137 | impl Swarm where Q:'static+Send+Debug+Clone, R:'static+Send+Debug, W:Worker { 138 | 139 | pub fn new()->Self { Self::default() } 140 | 141 | pub fn new_with_threads(n:usize)->Self { 142 | let (tx, rx) = channel(); 143 | let (qtx, qrx) = channel(); 144 | let mut me = Self { nq: 0, me:tx, rx, qtx, qrx, whs:HashMap::new(), nw:0, 145 | _w:PhantomData, _i:PhantomData, threads:vec![]}; 146 | me.start(n); me } 147 | 148 | pub fn start(&mut self, num_workers:usize) { 149 | let n = if num_workers==0 { num_cpus::get() } else { num_workers }; 150 | for _ in 0..n { self.spawn(); }} 151 | 152 | fn spawn(&mut self)->WID { 153 | let wid = WID{ n: self.nw }; self.nw+=1; 154 | let me2 = self.me.clone(); 155 | let (wtx, wrx) = channel(); 156 | self.threads.push(thread::spawn(move || { W::new(wid).work_loop(wid, &wrx, &me2) })); 157 | self.whs.insert(wid, wtx); 158 | wid } 159 | 160 | /// send query to an arbitrary worker. 161 | pub fn add_query(&mut self, q:Q)->QID { 162 | // let wid = self.whs.keys().collect::>()[0]; 163 | let &wid = self.whs.keys().collect::>() 164 | .choose(&mut rand::thread_rng()).unwrap(); 165 | self.send(*wid, q)} 166 | 167 | pub fn send(&mut self, wid:WID, q:Q)->QID { 168 | let qid = QID::STEP(self.nq); self.nq+=1; 169 | let w = self.whs.get(&wid).unwrap_or_else(|| 170 | panic!("requested non-existent worker {:?}", wid)); 171 | if w.send(Some(QMsg{ qid, q })).is_err() { 172 | panic!("couldn't send message to worker {:?}", wid) } 173 | qid} 174 | 175 | pub fn recv(&self)->Result, RecvError> { self.rx.recv() } 176 | 177 | pub fn send_to_all(&mut self, q:&Q) { 178 | let wids: Vec = self.whs.keys().cloned().collect(); 179 | for wid in wids { self.send(wid, q.clone()); }} 180 | 181 | /// returns a channel to which you can send a Q, rather than calling 182 | /// add_query. (useful when the swarm is running in a separate thread) 183 | pub fn q_sender(&self)->Sender { self.qtx.clone() } 184 | 185 | pub fn send_to_self(&self, r:R) { 186 | self.me.send(RMsg{ wid:WID::default(), qid:QID::default(), r:Some(r)}) 187 | .expect("failed to sent_self"); } 188 | 189 | /// pass in the swarm dispatch loop 190 | pub fn run(&mut self, mut on_msg:F)->Option 191 | where V:Debug, F:FnMut(WID, &QID, Option)->SwarmCmd { 192 | let mut res = None; 193 | loop { 194 | if let Ok(q) = self.qrx.try_recv() { self.add_query(q); } 195 | if let Ok(rmsg) = self.rx.try_recv() { 196 | let RMsg { wid, qid, r } = rmsg; 197 | let cmd = on_msg(wid, &qid, r); 198 | match cmd { 199 | SwarmCmd::Pass => {}, 200 | SwarmCmd::Halt => break, 201 | SwarmCmd::Kill(w) => { self.kill(w); if self.whs.is_empty() { break }}, 202 | SwarmCmd::Send(q) => { self.send(wid, q); }, 203 | SwarmCmd::Batch(wqs) => for (wid, q) in wqs { self.send(wid, q); }, 204 | SwarmCmd::Panic(msg) => panic!("{}", msg), 205 | SwarmCmd::Return(v) => { res = Some(v); break }}}} 206 | res}} 207 | -------------------------------------------------------------------------------- /src/test-bdd.rs: -------------------------------------------------------------------------------- 1 | // generic Base test suite 2 | test_base_consts!(BddBase); 3 | test_base_when!(BddBase); 4 | 5 | #[cfg(test)] 6 | use std::{iter::FromIterator, hash::Hash}; 7 | 8 | #[cfg(test)] 9 | fn hs(xs: Vec)->HashSet { >::from_iter(xs) } 10 | 11 | // basic test suite 12 | 13 | #[test] fn test_base() { 14 | use crate::nid::named::{x0, x1, x2}; 15 | let mut base = BddBase::new(); 16 | assert_eq!((I,O), base.tup(I)); 17 | assert_eq!((O,I), base.tup(O)); 18 | assert_eq!((I,O), base.tup(x0)); 19 | assert_eq!((I,O), base.tup(x1)); 20 | assert_eq!((I,O), base.tup(x2)); 21 | assert_eq!(I, base.when_hi(VID::var(2),x2)); 22 | assert_eq!(O, base.when_lo(VID::var(2),x2))} 23 | 24 | #[test] fn test_and() { 25 | use crate::nid::named::{x0, x1, x2}; 26 | let mut base = BddBase::new(); 27 | let a = base.and(x1, x2); 28 | assert_eq!(O, base.when_lo(x1.vid(),a)); 29 | assert_eq!(x2, base.when_hi(x1.vid(),a)); 30 | assert_eq!(O, base.when_lo(x2.vid(),a)); 31 | assert_eq!(x1, base.when_hi(x2.vid(),a)); 32 | assert_eq!(a, base.when_hi(x0.vid(),a)); 33 | assert_eq!(a, base.when_lo(x0.vid(),a))} 34 | 35 | #[test] fn test_xor() { 36 | use crate::nid::named::{x0, x1}; 37 | let mut base = BddBase::new(); 38 | let x = base.xor(x0, x1); 39 | assert_eq!(x1, base.when_lo(x0.vid(),x)); 40 | assert_eq!(!x1, base.when_hi(x0.vid(),x)); 41 | assert_eq!(x0, base.when_lo(x1.vid(),x)); 42 | assert_eq!(!x0, base.when_hi(x1.vid(),x)); 43 | assert_eq!(x, base.when_lo(VID::var(2),x)); 44 | assert_eq!(x, base.when_hi(VID::var(2),x))} 45 | 46 | // swarm test suite 47 | #[test] fn test_swarm_xor() { 48 | use crate::nid::named::{x0, x1}; 49 | let mut base = BddBase::new(); 50 | let x = expr![base, (x0 ^ x1)]; 51 | assert_eq!(x1, base.when_lo(x0.vid(),x)); 52 | assert_eq!(!x1, base.when_hi(x0.vid(),x)); 53 | assert_eq!(x0, base.when_lo(x1.vid(),x)); 54 | assert_eq!(!x0, base.when_hi(x1.vid(),x)); 55 | assert_eq!(x, base.when_lo(VID::var(2),x)); 56 | assert_eq!(x, base.when_hi(VID::var(2),x))} 57 | 58 | #[test] fn test_swarm_and() { 59 | use crate::nid::named::{x0, x1}; 60 | let mut base = BddBase::new(); 61 | let a = expr![base, (x0 & x1)]; 62 | assert_eq!(O, base.when_lo(x0.vid(),a)); 63 | assert_eq!(x1, base.when_hi(x0.vid(),a)); 64 | assert_eq!(O, base.when_lo(x1.vid(),a)); 65 | assert_eq!(x0, base.when_hi(x1.vid(),a)); 66 | assert_eq!(a, base.when_hi(VID::var(2),a)); 67 | assert_eq!(a, base.when_lo(VID::var(2),a))} 68 | 69 | /// slightly harder test case that requires ite() to recurse 70 | #[test] fn test_swarm_ite() { 71 | use crate::nid::named::{x0, x1, x2}; 72 | let mut base = BddBase::new(); 73 | assert_eq!(vec![0,0,0,0,1,1,1,1], base.tt(x2, 3)); 74 | assert_eq!(vec![0,0,1,1,0,0,1,1], base.tt(x1, 3)); 75 | assert_eq!(vec![0,1,0,1,0,1,0,1], base.tt(x0, 3)); 76 | let x = expr![base, (x2 ^ x1)]; 77 | assert_eq!(vec![0,0,1,1,1,1,0,0], base.tt(x, 3)); 78 | let a = expr![base, (x1 & x0)]; 79 | assert_eq!(vec![0,0,0,1,0,0,0,1], base.tt(a, 3)); 80 | let i = base.ite(x, a, !a); 81 | assert_eq!(vec![1,1,0,1,0,0,1,0], base.tt(i, 3))} 82 | 83 | 84 | /// slightly harder test case that requires ite() to recurse 85 | #[test] fn test_swarm_another() { 86 | let (b, a) = (NID::var(2), NID::var(3)); 87 | let mut base = BddBase::new(); 88 | let anb = base.and(a,!b); 89 | assert_eq!(vec![0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0], base.tt(anb, 4)); 90 | 91 | let anb_nb = base.xor(anb,!b); 92 | assert_eq!(vec![1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0], base.tt(anb_nb, 4)); 93 | let anb2 = base.xor(!b, anb_nb); 94 | assert_eq!(vec![0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0], base.tt(anb2, 4)); 95 | assert_eq!(anb, anb2)} 96 | 97 | /// Test cases for SolutionIterator 98 | #[test] fn test_bdd_solutions_o() { 99 | let mut base = BddBase::new(); let mut it = base.solutions(O); 100 | assert_eq!(it.next(), None, "const O should yield no solutions.") } 101 | 102 | #[test] fn test_bdd_solutions_i() { 103 | let base = BddBase::new(); 104 | let actual:HashSet = base.solutions_pad(I, 2).map(|r| r.as_usize()).collect(); 105 | assert_eq!(actual, hs(vec![0b00, 0b01, 0b10, 0b11]), 106 | "const true should yield all solutions"); } 107 | 108 | #[test] fn test_bdd_solutions_simple() { 109 | use crate::nid::named::x0 as a; 110 | let base = BddBase::new(); 111 | let mut it = base.solutions_pad(a, 1); 112 | // it should be sitting on first solution, which is a=1 113 | assert_eq!(it.next().expect("expected solution!").as_usize(), 0b1); 114 | assert_eq!(it.next(), None);} 115 | 116 | 117 | #[test] fn test_bdd_solutions_xor() { 118 | use crate::nid::named::{x0, x1}; 119 | let mut base = BddBase::new(); 120 | let (a, b) = (x0, x1); 121 | let n = expr![base, (a ^ b)]; 122 | // base.show(n); 123 | let actual:Vec = base.solutions_pad(n, 3).map(|x|x.as_usize()).collect(); 124 | let expect = vec![0b001, 0b010, 0b101, 0b110 ]; // bits cba 125 | assert_eq!(actual, expect); } 126 | 127 | #[test] fn test_hilocache_simple() { 128 | use crate::vhl; // TODO: probably move this to test-vhl.rs? 129 | let cache = vhl::HiLoCache::default(); 130 | let hl = vhl::HiLo::new(NID::var(5), NID::var(6)); 131 | let x0 = VID::var(0); 132 | let v0 = VID::vir(0); 133 | let v1 = VID::vir(1); 134 | assert!(cache.get_node(v0, hl).is_none()); 135 | let nv0 = cache.insert(v0, hl); 136 | assert_eq!(nv0, NID::from_vid_idx(v0, 0)); 137 | 138 | // I want the following to just work, but it doesn't: 139 | // let nv1 = state.get_simple_node(v1, hl).expect("nv1"); 140 | 141 | let nv1 = cache.insert(v1, hl); 142 | assert_eq!(nv1, NID::from_vid_idx(v1, 0)); 143 | 144 | // this node is "malformed" because the lower number is on top, 145 | // but the concept should still work: 146 | let nx0 = cache.insert(x0, hl); 147 | assert_eq!(nx0, NID::from_vid_idx(x0, 0));} 148 | 149 | #[test] fn test_solution_count_simple() { 150 | use crate::nid::named::{x0, x1, x2}; 151 | let mut base = BddBase::new(); 152 | let n = expr![base, (x0 & (x1 | x2))]; 153 | assert_eq!(base.solution_count(n), 3);} 154 | 155 | #[test] fn test_solution_count_complex() { 156 | use crate::nid::named::{x0, x1, x2}; 157 | let mut base = BddBase::new(); 158 | let n = expr![base, ((x0 & x1) ^ x2)]; 159 | assert_eq!(base.solution_count(n), 4);} 160 | 161 | #[test] fn test_bdd_solutions_dontcare() { 162 | use crate::nid::named::{x1, x3}; 163 | let mut base = BddBase::new(); 164 | // the idea here is that we have "don't care" above, below, and between the used vars: 165 | let n = base.and(x1,x3); 166 | // by default, we ignore the "don't cares" above: 167 | let actual:Vec<_> = base.solutions(n).map(|r| r.as_usize()).collect(); 168 | assert_eq!(actual, vec![0b1010, 0b1011, 0b1110, 0b1111]); 169 | 170 | // but we can pad this if we prefer: 171 | let actual:Vec<_> = base.solutions_pad(n, 5).map(|r| r.as_usize()).collect(); 172 | assert_eq!(actual, vec![0b01010, 0b01011, 0b01110, 0b01111, 173 | 0b11010, 0b11011, 0b11110, 0b11111])} 174 | 175 | #[test] fn test_cursor_dontcare() { 176 | use crate::nid::named::{x1, x3}; 177 | use crate::vid::named::{x1 as X1, x3 as X3}; 178 | let mut base = BddBase::new(); 179 | let n = base.and(x1, x3); 180 | let cur = base.make_dontcare_cursor(n, 0).unwrap(); 181 | assert_eq!(cur.dontcares(), vec![0, 2], "Variables x0 and x2 should be skipped (don't care)"); 182 | assert_eq!(cur.cube(), vec![(X1, true), (X3, true)], "Variables x1 and x3 should be set to true"); 183 | assert!(base.next_solution(cur).is_none(), "Should has only one solution"); } 184 | 185 | #[test] fn test_cursor_watch() { 186 | use crate::nid::named::{x1, x3}; 187 | use crate::vid::named::{x1 as X1, x2 as X2, x3 as X3}; 188 | let mut base = BddBase::new(); 189 | let n = base.and(x1, x3); 190 | let mut cur = base.make_dontcare_cursor(n, 0).unwrap(); 191 | cur.watch.put(2, true); 192 | assert_eq!(cur.dontcares(), &[0], "X2 should no longer be skipped"); 193 | assert_eq!(cur.cube(), vec![(X1, true), (X2, false), (X3, true)], "now we should include x2=0"); 194 | let next = base.next_solution(cur); 195 | assert!(next.is_some(), "Should have another solution now"); 196 | cur = next.unwrap(); 197 | assert_eq!(cur.cube(), vec![(X1, true), (X2, true), (X3, true)], "we should get both solutions for x2"); 198 | assert!(base.next_solution(cur).is_none(), "Should have only two solutions"); } 199 | -------------------------------------------------------------------------------- /src/tmp.rs: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | row swapping algorithm (1/20/2021) 5 | --------------------------------------------------- 6 | 7 | Swap works on a single pair of rows at once. 8 | 9 | It rewrites nodes in place, so that the branch variable of node may change. 10 | (Which is why we use XIDs for reference instead of NIDs.) 11 | 12 | 0. Definitions and Observations 13 | w, rw: old top var, and its row 14 | v, rv: new top var, and its row 15 | mut gc: Vec of reclaimed xids. 16 | 17 | n0. Note that v nodes can never initially refer to w nodes. 18 | n1. Nodes in rw may or may not refer to rv. 19 | n2. Nodes in rv that have no references outside rw will be garbage collected. 20 | 21 | 22 | ## Constrained Resources: cpu time, ram, xids 23 | - swapping should be the most expensive task in the swap solver. 24 | so we don't want to stop work just to wait for an allocation of new xids. 25 | - but rows may become so large that ram is also a constraint. 26 | this means we don't want to allocate huge WIP if we can avoid it. 27 | - assume we are working in isolation from the main thread, perhaps even across 28 | the network. 29 | - so: try to keep a queue of fresh xids to use, proportional to the 30 | size of the row we're swapping. 31 | 32 | 33 | #. Move nodes on rw with references to rv up to rv: 34 | 35 | We can think of two different groups of nodes on rw: 36 | 37 | - group I (independent): 38 | These are nodes that do not reference row v. 39 | These remain on rw, unchanged. 40 | 41 | This group becomes the new rw. 42 | 43 | - group D (dependent): 44 | These are nodes with at least one child on row v. 45 | These must be rewritten in place to branch on v (and moved to rv). 46 | "In place" means that their XIDs must be preserved. 47 | The moved nodes will have children on row w: 48 | These may be new nodes, or may already exist in group I. 49 | The old children (on row v) may see their refcounts drop to 0. 50 | 51 | #. let mut rw' = new row 52 | #. drain rw -> rw': 53 | - if group I: add to rw' 54 | - if group D: 55 | For each node@(nv,hi,lo): dec_ref(hi) dec_ref(lo) if node.rv == 0 56 | 57 | #. garbage collect rv, including rv nodes whose rc was already 0. 58 | We are never going to throw away a node that we'll need to recreate 59 | here, because the only nodes added to rv will move from rw and have new 60 | children on rw. By observation n0, they must be completely fresh nodes. 61 | 62 | #. 63 | if node.rc==0 { rw.delete(node); gc.push(node.xid) } 64 | else { for child:XID in vec![node.hi, node.lo] {}} 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | --------------------- 75 | 76 | Modify the substitution solver: 77 | init(vid) (just change nid->vid) 78 | subst(vid, &base, nid) 79 | 80 | 81 | 82 | other ideas (1/17/2021) 83 | ------ 84 | the starting AST has thousands of nodes. currently, it's a binary tree labeled with boolean operators. 85 | 86 | let's mark each AST node with the highest var on which it depends, and add a second bit for whether the 87 | node directly references that var. 88 | 89 | I'm picturing something a little like the "World" concept from ometa: a world is a chained dictionary, 90 | much like an iheritance chain. 91 | 92 | But now imagine that the AST is stored in a single dictionary of nid->def, and use that as the start of 93 | our world. 94 | 95 | Most nodes do not depend directly on vars, but rather on intermediate results (virs). 96 | 97 | id->def (for every item in the AST), and then at each step, we introduce a pair of worlds (new dictionaries, 98 | but with lookup chaining to the previous dictionary) 99 | 100 | ... the idea is a little fuzzy in my head... but what if the worlds themselves could be arranged in 101 | a hi lo scaffold? 102 | 103 | let's say we have the AST floating off in space somewhere, and we observe that there are a handful of entries 104 | that depend only on variable 0. well, there should be exactly 1 - 0 itself. 105 | 106 | Okay, scratch that: let's say we've defined the AST such that expressions of the bottom 5 inputs are 107 | represented by truth tables. 108 | 109 | So now let's say the "basement" of the scaffold (the leaves) are composed of constant NIDS that directly 110 | represent 32-bit truth tables. This row is actually virtual since any references we'd keep to the entries 111 | have enough space to store the entire truth table. These nodes have cost=0. 112 | 113 | There are also a bunch of nodes that represent individual input variables, and these have cost=1. 114 | Since we're not talking about a swap solver here, these are encoded directly in the nid and so also 115 | go in the virtual "basement." 116 | 117 | So we'll sort the AST by cost. 118 | 119 | We can represent each node by its definition: id -> (fn:id, inputs:vec[id]) 120 | Note that the function is expressed in terms of an ID, so AND becomes the level2 const nid with And's truth table. 121 | (0b0001 repeated 8 times to make a 32-bit table, so 0x11111111). 122 | 123 | This also means that we have instant access to 2^32 combinators out of context, as well as any number of combinators 124 | we define ourselves by switching from a const nid to a reference to some context. 125 | 126 | Now we start to construct the scaffold. Loop through the entries of the cost-ordered AST, 127 | and just move them one at a time over to the scaffold whenever the number of inputs is 0. 128 | 129 | We can rewrite a function to have fewer inputs whenever: 130 | 131 | 1. one or more of the inputs is constant 132 | 2. two or more of the inputs are the same (ignoring the invert flag) 133 | 3. the function itself ignores one or more of the inputs 134 | 135 | So as we visit each node in the AST (from the bottom up), we will check for these reduction scenarios, 136 | and reduce as appropriate. This will probably not get us very far. 137 | 138 | Storing the AST in (fn, xs) format is interesting, because it allows us to deal with all nodes that share 139 | the same function at once. 140 | 141 | Shannon decomposition basically doubles the size of the AST at each step, so the idea is you'd probably 142 | not actually double, but work one branch and then queue the other for later. But in solving one branch 143 | recursively like this, you'll wind up doing a lot of work that you don't want to forget when you do the 144 | other branch. 145 | 146 | I started envisioning this scaffold concept as a way to avoid this work and not have to make two copies of 147 | the whole AST. Instead, we introduce worlds that patch over the nodes we can directly simplify. 148 | 149 | But this (fn,xs) idea opens up some other possibilities: we may start with simple functions of 2 or 3 values 150 | (ands, ors, xors, maj, ite, etc.), but we can also meld these functions together to produce new functions of 151 | more inputs. This melding step is intriguing because it does not necessarily double the size of the representation. 152 | In other words, whenever two inputs to a node (f a b) have definitions which use overlapping vars 153 | [for example, if (a = g x y , b = h y z) then a and b overlap on var y], then we can rewrite (f a b) as some 154 | new function (f' x y z). 155 | 156 | We can also easily query sets of nodes that use the same function, and modify them all in place. 157 | So for example, we could build our AST such that all sums of n 32-bit values were just written 158 | as abstract functions like (sum-n-bit-28). Then we could build the AST for that function exactly 159 | once, and apply it to all instances simultaneously. Maybe even decompose it further, and have 160 | a basic function like (parity-n) and (carry-n-0, carry-n-1, etc (for however many carry bits there are)). 161 | 162 | This is nice because those functions aren't really order-dependent, and we might not need to represent 163 | them with a BDD at all. 164 | 165 | Another nice thing about this (fn,xs) representation is that we can permute the inputs of an individual 166 | AST node however we like, without permuting the inputs globally, so we can rewrite it in whatever way 167 | makes sense. 168 | 169 | The whole time, we will have both AST nodes and scaffold nodes, and these can point to each other. 170 | 171 | Is there a value to the chained-scope/worlds concept? Yes, it's a space saving device. Suppose we have 172 | an extremely complicated AST to solve, and we want the shannon decomposition on x0. There may only be 173 | a handful of leaf nodes that depend directly on x0, but modifying them would force us to rewrite the 174 | entire AST. That's what we want to do eventually, but there's no point doing all that work if it's just 175 | going to generate thousands of new nodes that don't actually get us closer to the result. 176 | 177 | So instead, the "final BDD" that we want, will actually be a node that decides between two "worlds", 178 | branching on x5 (since branching on x0..x4 already producesconstant NIDs). 179 | 180 | So we have x5 ? World0 : World1. Both of these are themselves chained to the underlying AST, and 181 | they contain rewritten copies of *only* the nodes that rely directly on x5. This is a move-and-split 182 | operation: the original definitions are removed from the AST, and so we can now only ever observe 183 | them from some particular world. 184 | 185 | This gives us a way to work bottom up while the substitution solver is working top-down to 186 | convert the AST to a BDD/ANF. 187 | 188 | This scaffold can also be used to construct an actual complete BDD with nothing but input 189 | variables, by doing brute force (lo-to-hi) and/or monte-carlo evaluation. Basically, each node 190 | in the "universe" branches between two worlds, and each world represents an AST, but once you've 191 | dug deep enough along a path that all variables have been substituted, you will reach a world whose 192 | AST is a constant. Once all the leaf nodes under a branch are 'constant worlds' like this, then 193 | that branch is essentially pointing at a BDD. 194 | 195 | As long as we are talking about AST nodes, we can imagine that each node is slowly being 196 | transformed into a BDD, but also *consumed* by its downstream nodes, so we don't necessarily 197 | need to keep the intermediate BDDs around. I've probably written about this idea before, but 198 | basically it involves giving every node two "cursors". Everything to the left (lo side) of the 199 | leftmost cursor has already been consumed, so we never need to evaluate it. Basically, once all 200 | the directly-downstream neighbors of a node have moved their cursor past configuration x, there 201 | is no need to ever evaluate anything to the left of x, so it might as well be erased. (This should 202 | be attached to the AST nodes, not the BDD) 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | */ 211 | -------------------------------------------------------------------------------- /src/vhl.rs: -------------------------------------------------------------------------------- 1 | //! (Var, Hi, Lo) triples 2 | use std::collections::BinaryHeap; 3 | use std::collections::HashSet; 4 | use dashmap::DashMap; 5 | use crate::nid::NID; 6 | use crate::vid::VID; 7 | 8 | type VhlHashMap = DashMap; 9 | 10 | #[derive(Debug,Default,Clone)] 11 | struct VhlVec{ pub vec: boxcar::Vec } 12 | 13 | 14 | /// Simple Hi/Lo pair stored internally when representing nodes. 15 | /// All nodes with the same branching variable go in the same array, so there's 16 | /// no point duplicating it. 17 | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Default)] 18 | pub struct HiLo {pub hi:NID, pub lo:NID} 19 | 20 | impl HiLo { 21 | /// constructor 22 | pub fn new(hi:NID, lo:NID)->HiLo { HiLo { hi, lo } } 23 | 24 | /// apply the not() operator to both branches 25 | #[inline] pub fn invert(self)-> HiLo { HiLo{ hi: !self.hi, lo: !self.lo }} 26 | 27 | pub fn get_part(&self, which:HiLoPart)->NID { 28 | if which == HiLoPart::HiPart { self.hi } else { self.lo }} } 29 | 30 | impl std::ops::Not for HiLo { 31 | type Output = HiLo; 32 | fn not(self)-> HiLo {HiLo { hi:!self.hi, lo: !self.lo }}} 33 | 34 | 35 | /// Vhl (for when we really do need the variable) 36 | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)] 37 | pub struct Vhl {pub v:VID, pub hi:NID, pub lo:NID} 38 | 39 | impl Vhl { 40 | pub fn new(v: VID, hi:NID, lo:NID)->Vhl { Vhl{ v, hi, lo } } 41 | pub fn hilo(&self)->HiLo { HiLo{ hi:self.hi, lo: self.lo } }} 42 | 43 | impl std::ops::Not for Vhl { 44 | type Output = Vhl; 45 | fn not(self)->Vhl { Vhl { v:self.v, hi:!self.hi, lo: !self.lo }}} 46 | 47 | 48 | /// Enum for referring to the parts of a HiLo (for WIP). 49 | #[derive(PartialEq,Debug,Copy,Clone)] 50 | pub enum HiLoPart { HiPart, LoPart } 51 | 52 | /// a deconstructed Vhl (for WIP) 53 | #[derive(Default,PartialEq,Debug,Copy,Clone)] 54 | pub struct VhlParts{ 55 | pub v:VID, 56 | pub hi:Option, 57 | pub lo:Option, 58 | pub invert:bool} 59 | 60 | impl VhlParts { 61 | pub fn hilo(&self)->Option { 62 | if let (Some(hi), Some(lo)) = (self.hi, self.lo) { Some(HiLo{hi,lo}) } 63 | else { None }} 64 | pub fn set_part(&mut self, part:HiLoPart, v:Option) { 65 | if part == HiLoPart::HiPart { self.hi = v } 66 | else { self.lo = v }}} 67 | 68 | 69 | pub trait Walkable { 70 | 71 | /// walk nodes in graph for nid n recursively, without revisiting shared nodes 72 | fn step(&self, n:NID, f:&mut F, seen:&mut HashSet, topdown:bool) 73 | where F: FnMut(NID,VID,NID,NID); 74 | 75 | /// iterate through (nid, vid, hi:nid, lo:nid) tuples in the graph. 76 | /// visit the parent before visiting the children. 77 | fn walk_dn(&self, n:NID, f:&mut F) where F: FnMut(NID,VID,NID,NID) { 78 | let mut seen = HashSet::new(); 79 | self.step(n, f, &mut seen, true)} 80 | 81 | /// same as walk_dn, but visit children before firing the function. 82 | /// note that this walks from "left to right" ("lo' to "hi") 83 | /// and bottom to top, starting from the leftmost node. 84 | /// if you want the bottommost nodes to come first, use self.as_heap(n) 85 | fn walk_up(&self, n:NID, f:&mut F) where F: FnMut(NID,VID,NID,NID) { 86 | let mut seen = HashSet::new(); 87 | self.step(n, f, &mut seen, false)} 88 | 89 | /// iterate through (nid, vid, hi:nid, lo:nid) tuples in the graph 90 | /// for each nid. visit children before visiting the parent. 91 | fn walk_up_each(&self, nids:&[NID], f:&mut F) where F: FnMut(NID,VID,NID,NID) { 92 | let mut seen = HashSet::new(); 93 | for &n in nids { self.step(n, f, &mut seen, false) }} 94 | 95 | /// this is meant for walking nodes ordered by variables from bottom to top. 96 | /// it's deprecated because the whole thing ought to be replaced by a nice iterator 97 | /// (also, it's not clear to me why the derived Ord for Vhl doesn't require Reverse() here) 98 | #[deprecated] 99 | fn as_heap(&self, n:NID)->BinaryHeap<(Vhl, NID)> { 100 | let mut result = BinaryHeap::new(); 101 | self.walk_up(n, &mut |nid, v, hi, lo| result.push((Vhl{ v, hi, lo }, nid))); 102 | result }} 103 | 104 | 105 | pub trait HiLoBase { 106 | fn get_hilo(&self, n:NID)->Option; } 107 | 108 | 109 | #[derive(Debug, Default, Clone)] 110 | pub struct HiLoCache { 111 | /// variable-agnostic hi/lo pairs for individual bdd nodes. 112 | hilos: VhlVec, 113 | /// reverse map for hilos. 114 | index: VhlHashMap} 115 | 116 | 117 | impl HiLoCache { 118 | 119 | pub fn new()->Self { Self::default() } 120 | 121 | pub fn len(&self)->usize { self.hilos.vec.len() } 122 | #[must_use] pub fn is_empty(&self) -> bool { self.len() == 0 } 123 | 124 | // TODO: ->Option, and then impl HiLoBase 125 | #[inline] pub fn get_hilo(&self, n:NID)->HiLo { 126 | assert!(!n.is_lit()); 127 | let res = self.hilos.vec[n.idx()]; 128 | if n.is_inv() { res.invert() } else { res }} 129 | 130 | #[inline] pub fn get_node(&self, v:VID, hl0:HiLo)-> Option { 131 | let inv = hl0.lo.is_inv(); 132 | let hl1 = if inv { hl0.invert() } else { hl0 }; 133 | if let Some(x) = self.index.get(&hl1) { 134 | // !! maybe this should be an assertion, and callers 135 | // should be adjusted to avoid asking for ill-formed Vhl triples? 136 | // (without this check, we potentially break the contract of always 137 | // returning a NID that represents a valid Bdd) 138 | if hl1.hi.vid().is_below(&v) && hl1.lo.vid().is_below(&v) { 139 | let nid = NID::from_vid_idx(v, *x); 140 | return Some(if inv { !nid } else { nid }) }} 141 | None } 142 | 143 | #[inline] pub fn insert(&self, v:VID, hl0:HiLo)->NID { 144 | let inv = hl0.lo.is_inv(); 145 | let hilo = if inv { hl0.invert() } else { hl0 }; 146 | let ix:usize = 147 | if let Some(ix) = self.index.get(&hilo) { *ix } 148 | else { 149 | let ix = self.hilos.vec.push(hilo); 150 | self.index.insert(hilo, ix); 151 | ix }; 152 | let res = NID::from_vid_idx(v, ix); 153 | if inv { !res } else { res } }} 154 | -------------------------------------------------------------------------------- /src/vhl_swarm.rs: -------------------------------------------------------------------------------- 1 | //! # VHL Swarm 2 | //! 3 | //! Combines notions from the [`swarm`](crate::swarm), [`wip`], and [`vhl`](crate::vhl) 4 | //! modules to create a distributed system for solving VHL queries. The main idea is 5 | //! the swarm workers share a common [`WorkState`] and can delegate tasks to each 6 | //! other by pushing new jobs onto a shared queue. 7 | //! 8 | //! For a complete example, see [`bdd_swarm`](crate::bdd::bdd_swarm). 9 | 10 | use std::sync::mpsc::Sender; 11 | use std::{fmt, hash::Hash}; 12 | use std::sync::Arc; 13 | use concurrent_queue::{ConcurrentQueue,PopError}; 14 | use crate::vhl::HiLoPart; 15 | use crate::vid::VID; 16 | use crate::wip::Answer; 17 | use crate::NID; 18 | use crate::{wip, wip::{WorkState, COUNT_CACHE_HITS, COUNT_CACHE_TESTS}}; 19 | use crate::swarm::{RMsg, Swarm, SwarmCmd, Worker, QID, WID}; 20 | 21 | type R = wip::RMsg; 22 | 23 | pub trait JobKey : 'static + Copy+Clone+Default+std::fmt::Debug+Eq+Hash+Send+Sync {} 24 | 25 | /// wrapper struct for concurrent queue. This exists mostly so we 26 | /// can implement Default for it. The J parameter indicates a "Job", 27 | /// which is some kind of message indicating a request to (eventually) 28 | /// construct a VHL. 29 | #[derive(Debug)] 30 | pub struct JobQueue { q: ConcurrentQueue } 31 | impl Default for JobQueue { 32 | fn default()->Self { JobQueue{ q: ConcurrentQueue::unbounded() }}} 33 | impl JobQueue where J:std::fmt::Debug { 34 | pub fn push(&self, job:J) { self.q.push(job).unwrap() } 35 | pub fn pop(&self)->Option { 36 | match self.q.pop() { 37 | Ok(k) => Some(k), 38 | Err(PopError::Empty) => None, 39 | Err(PopError::Closed) => panic!("JobQueue was closed!") }}} 40 | 41 | /// Query messages used by the swarm. There are several general 42 | /// messages (Init, Stats) that we want for all implementations. 43 | /// Each implementation has a different kind of "Job" message, though, 44 | /// so we introduce type parameter J to represent that. 45 | #[derive(Clone)] 46 | pub enum VhlQ where J:JobKey { 47 | /// The main recursive operation: convert ITE triple to a BDD. 48 | Job(J), 49 | /// Initialize worker with its "hive mind". 50 | Init(Arc>, Arc>), 51 | /// ask for stats about cache 52 | Stats } 53 | 54 | // Q::Cache() message could potentially be huge to print, so don't. 55 | impl fmt::Debug for VhlQ where J:JobKey { 56 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 57 | match self { 58 | VhlQ::Job(j) => { write!(f, "Q::Job({:?})", j) } 59 | VhlQ::Init(_cache, _queue) => { write!(f, "Q::Init(...)") } 60 | VhlQ::Stats => { write!(f, "Q::Stats")} } }} 61 | 62 | pub trait VhlJobHandler : Default where J: JobKey { 63 | type W : Worker, R, J>; 64 | fn work_job(&mut self, w: &mut Self::W, job:J); } 65 | 66 | #[derive(Debug, Default)] 67 | pub struct VhlWorker where J:JobKey, H:VhlJobHandler { 68 | /// worker id 69 | wid: WID, 70 | /// channel for sending back to the swarm 71 | tx:Option>>, 72 | /// quick access to the next job in the queue 73 | next: Option, 74 | /// shared state for all workers 75 | state:Option>>, 76 | queue:Option>>, 77 | handler: H } 78 | 79 | /// These methods expose the WorkState methods on the worker itself. 80 | impl VhlWorker where J:JobKey, H:VhlJobHandler { 81 | pub fn vhl_to_nid(&self, v:VID, hi:NID, lo:NID)->NID { 82 | self.state.as_ref().unwrap().vhl_to_nid(v, hi, lo) } 83 | pub fn resolve_nid(&mut self, q:&J, n:NID)->Option> { 84 | self.state.as_ref().unwrap().resolve_nid(q, n) } 85 | pub fn add_wip(&mut self, q:&J, vid:VID, invert:bool)->Option> { 86 | self.state.as_ref().unwrap().add_wip(q, vid, invert) } 87 | pub fn resolve_part(&mut self, q:&J, part:HiLoPart, nid:NID, invert:bool)->Option> { 88 | self.state.as_ref().unwrap().resolve_part(q, part, nid, invert) } 89 | pub fn add_dep(&mut self, q:&J, idep:wip::Dep)->(bool, Option>) { 90 | self.state.as_ref().unwrap().add_dep(q, idep) } 91 | pub fn get_done(&self, q:&J)->Option { 92 | self.state.as_ref().unwrap().get_done(q) } 93 | pub fn tup(&self, n:NID)->(NID,NID) { 94 | self.state.as_ref().unwrap().tup(n) }} 95 | 96 | /// this lets a JobHandler send answers and sub-tasks to the swarm. 97 | impl VhlWorker where J:JobKey, H:VhlJobHandler { 98 | pub fn send_answer(&self, _q:&J, nid:NID) { 99 | // println!("!! final answer: {:?} !!", nid); 100 | let qid = { 101 | let mut mx = self.state.as_ref().unwrap().qid.lock().unwrap(); 102 | let q0 = (*mx).expect("no qid found in the mutex!"); 103 | *mx = None; // unblock the next query! 104 | q0}; 105 | self.send_msg(qid, Some(R::Ret(nid))) } 106 | pub fn delegate(&mut self, job:J) { 107 | self.queue_push(job)} 108 | pub fn send_msg(&self, qid:QID, r:Option) { 109 | self.tx.as_ref().unwrap().send(RMsg{wid:self.wid, qid, r}).unwrap() }} 110 | 111 | impl Worker, R, J> for VhlWorker where J:JobKey, H:VhlJobHandler { 112 | fn new(wid:WID)->Self { VhlWorker{ wid, ..Default::default() }} 113 | fn get_wid(&self)->WID { self.wid } 114 | fn set_tx(&mut self, tx:&Sender>) { self.tx = Some(tx.clone()) } 115 | fn queue_pop(&mut self)->Option { 116 | if self.next.is_some() { self.next.take() } 117 | else if let Some(ref q) = self.queue { q.pop() } 118 | else { None }} 119 | fn queue_push(&mut self, job:J) { 120 | if self.next.is_none() { self.next = Some(job) } 121 | else { self.queue.as_ref().unwrap().push(job) }} 122 | fn work_item(&mut self, job:J) { 123 | // swap the handler out of self so it can borrow us mutably 124 | let mut h = std::mem::take(&mut self.handler); 125 | h.work_job(self, job); 126 | self.handler = h; } 127 | fn work_step(&mut self, qid:&QID, q:VhlQ)->Option { 128 | match q { 129 | VhlQ::Init(s, q) => { self.state = Some(s); self.queue=Some(q); None } 130 | VhlQ::Job(job) => { 131 | let s = self.state.as_mut().unwrap(); 132 | if let Some(cached) = s.get_done(&job) { return Some(R::Ret(cached)) } 133 | s.cache.entry(job).or_default(); 134 | { let mut m = s.qid.lock().unwrap(); 135 | assert!((*m).is_none(), "already working on a top-level query"); 136 | *m = Some(*qid); } 137 | self.queue_push(job); None } 138 | VhlQ::Stats => { 139 | let tests = COUNT_CACHE_TESTS.with(|c| c.replace(0)); 140 | let hits = COUNT_CACHE_HITS.with(|c| c.replace(0)); 141 | Some(R::CacheStats{ tests, hits }) } }}} 142 | 143 | 144 | #[derive(Debug, Default)] 145 | pub struct VhlSwarm where J:JobKey, H:VhlJobHandler>{ 146 | swarm: Swarm, R, VhlWorker, J>, 147 | state: Arc>, 148 | queue: Arc>} 149 | 150 | impl VhlSwarm where J:JobKey, H:VhlJobHandler> { 151 | 152 | pub fn new()->Self { let mut me = Self::default(); me.reset(); me } 153 | 154 | pub fn new_with_threads(n:usize)->Self { 155 | let mut me = Self { 156 | swarm: Swarm::new_with_threads(n), 157 | ..Default::default()}; 158 | me.reset(); me } 159 | 160 | pub fn run(&mut self, on_msg:F)->Option 161 | where V:fmt::Debug, F:FnMut(WID, &QID, Option)->SwarmCmd, V> { 162 | self.swarm.run(on_msg)} 163 | 164 | pub fn q_sender(&self)->Sender> { self.swarm.q_sender() } 165 | 166 | // reset internal state without the cost of destroying and recreating 167 | // all the worker threads. 168 | pub fn reset(&mut self) { 169 | self.state = Default::default(); 170 | self.queue = Default::default(); 171 | self.swarm.send_to_all(&VhlQ::Init(self.state.clone(), self.queue.clone())); } 172 | 173 | pub fn tup(&self, n:NID)->(NID,NID) { self.state.tup(n) } 174 | 175 | pub fn len(&self)->usize { self.state.len() } 176 | #[must_use] pub fn is_empty(&self) -> bool { self.len() == 0 } 177 | 178 | pub fn run_swarm_job(&mut self, job:J)->NID { 179 | let mut result: Option = None; 180 | self.swarm.add_query(VhlQ::Job(job)); 181 | // each response can lead to up to two new ITE queries, and we'll relay those to 182 | // other workers too, until we get back enough info to solve the original query. 183 | while result.is_none() { 184 | let RMsg{wid:_,qid:_,r} = self.swarm.recv().expect("failed to recieve rmsg"); 185 | if let Some(rmsg) = r { match rmsg { 186 | R::Ret(n) => { result = Some(n) } 187 | R::CacheStats{ tests:_, hits:_ } 188 | => { panic!("got R::CacheStats before sending Q::Stats"); } }}} 189 | result.unwrap() } 190 | 191 | pub fn get_stats(&mut self) { 192 | self.swarm.send_to_all(&VhlQ::Stats); 193 | let (mut tests, mut hits, mut reports) = (0, 0, 0); 194 | while reports < self.swarm.num_workers() { 195 | let RMsg{wid:_, qid:_, r} = self.swarm.recv().expect("still expecting an Rmsg::CacheStats"); 196 | if let Some(wip::RMsg::CacheStats{ tests:t, hits: h }) = r { reports += 1; tests+=t; hits += h } 197 | else { println!("extraneous rmsg from swarm after Q::Stats: {:?}", r) }} 198 | COUNT_CACHE_TESTS.with(|c| *c.borrow_mut() += tests); 199 | COUNT_CACHE_HITS.with(|c| *c.borrow_mut() += hits); }} 200 | -------------------------------------------------------------------------------- /src/vid.rs: -------------------------------------------------------------------------------- 1 | //! Variable IDs (used interally by Base implementations) 2 | use std::cmp::Ordering; 3 | use std::fmt; 4 | 5 | /// this will probably go away in favor of a bitmask at some point 6 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] 7 | enum VidEnum { 8 | // How I (eventually) want the ordering, to be (once biggest vars go on top:) 9 | T, // Special meta-constant on which I and O branch. 10 | NoV, // Special case for AST nodes not tied to a variable 11 | Var(u32), // Real Vars go in the middle, with biggest u32 on top. 12 | Vir(u32), // Virtual are "biggest", so go to the top. 13 | } 14 | 15 | #[derive(Eq, PartialEq)] 16 | pub enum VidOrdering { 17 | Above, 18 | Level, 19 | Below } 20 | 21 | use self::VidEnum::*; 22 | 23 | 24 | #[derive(Clone, Copy, PartialEq, Eq, Hash)] 25 | pub struct VID { v:VidEnum } 26 | pub const NOV:VID = VID::nov(); 27 | pub const TOP:VID = VID::top(); 28 | 29 | impl Default for VID { fn default()->Self { NOV }} 30 | 31 | fn cmp_depth_idx(x:u32, y:&u32)->VidOrdering { 32 | match x.cmp(y) { 33 | Ordering::Less => VidOrdering::Below, 34 | Ordering::Equal => VidOrdering::Level, 35 | Ordering::Greater => VidOrdering::Above }} 36 | 37 | impl VID { 38 | pub fn cmp_depth(&self, other: &Self) -> VidOrdering { 39 | use self::VidOrdering::*; 40 | match self.v { 41 | T => if other.v == T { Level } else { Below }, 42 | NoV => match other.v { 43 | T => Above, 44 | NoV => Level, 45 | _ => Below }, 46 | Var(x) => match other.v { 47 | Vir(_) => Below, 48 | Var(y) => cmp_depth_idx(x,&y), 49 | NoV|T => Above }, 50 | Vir(x) => match other.v { 51 | Var(_) => Above, 52 | Vir(y) => cmp_depth_idx(x,&y), 53 | NoV|T => Above }}}} 54 | 55 | pub fn topmost(x:VID, y:VID)->VID { if x.is_above(&y) { x } else { y }} 56 | pub fn botmost(x:VID, y:VID)->VID { if x.is_below(&y) { x } else { y }} 57 | pub fn topmost_of3(x:VID, y:VID, z:VID)->VID { topmost(x, topmost(y, z)) } 58 | 59 | 60 | impl VID { 61 | pub const fn top()->VID { VID { v:T }} 62 | pub const fn nov()->VID { VID { v:NoV }} 63 | pub const fn var(i:u32)->VID { VID { v: Var(i) }} 64 | pub const fn vir(i:u32)->VID { VID { v: Vir(i) }} 65 | pub fn is_top(&self)->bool { VID{ v:T } == *self } 66 | pub fn is_nov(&self)->bool { matches!(self, VID{ v:NoV }) } 67 | pub fn is_var(&self)->bool { matches!(self, VID{v:Var(_)}) } 68 | pub fn is_vir(&self)->bool { matches!(self, VID{v:Vir(_)}) } 69 | 70 | pub fn is_above(&self, other:&VID)->bool { self.cmp_depth(other) == VidOrdering::Above } 71 | pub fn is_below(&self, other:&VID)->bool { self.cmp_depth(other) == VidOrdering::Below } 72 | pub fn shift_up(&self)->VID { 73 | match self.v { 74 | NoV => panic!("VID::nov().shift_up() is undefined"), 75 | T => panic!("VID::top().shift_up() is undefined"), //VID::var(0), 76 | // these two might panic on over/underflow: 77 | Var(x) => VID::var(x+1), 78 | Vir(x) => VID::vir(x+1)}} 79 | 80 | pub fn var_ix(&self)->usize { 81 | if let Var(x) = self.v { x as usize } else { panic!("var_ix({:?})", self) }} 82 | 83 | pub fn vir_ix(&self)->usize { 84 | if let Vir(x) = self.v { x as usize } else { panic!("vir_ix({:?})", self) }} 85 | 86 | pub fn vid_ix(&self)->usize { match self.v { 87 | T => panic!("x.vid_ix() makes no sense when x==T. Test with nid::is_const first."), 88 | NoV => panic!("x.vid_ix() makes no sense when x==VID::NoV. Test with x.is_nov first."), 89 | Var(x) | Vir(x) => x as usize }} 90 | 91 | pub fn bitmask(&self)->u64 { match self.v { 92 | NoV|T => 0, 93 | Var(x) | Vir(x) => if x < 64 { 1 << x as u64 } else { 0 }}}} 94 | 95 | 96 | /// Pretty-printer for NIDS that reveal some of their internal data. 97 | impl fmt::Display for VID { 98 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 99 | match self.v { 100 | T => write!(f, "T"), 101 | NoV => write!(f, "NoV"), 102 | Var(x) => write!(f, "x{:X}", x), 103 | Vir(x) => write!(f, "v{:X}", x) }}} 104 | 105 | /// Same as fmt::Display. Mostly so it's easier to see the problem when an assertion fails. 106 | impl fmt::Debug for VID { // for test suite output 107 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self) }} 108 | 109 | 110 | impl VID { 111 | #[deprecated(note="VID scaffolding")] 112 | pub fn u(&self)->usize { match self.v { 113 | T => 536870912, // 1<<29, same as nid::T, 114 | NoV => panic!("can't turn NoV into a number"), 115 | Var(x) => x as usize, 116 | Vir(x) => x as usize }}} 117 | 118 | /// this is only so I can order ops. VID should otherwise always be 119 | /// compared with is_above / iS_below or cmp_depth, for clarity. 120 | impl Ord for VID { 121 | fn cmp(&self, other: &Self)-> Ordering { 122 | match self.cmp_depth(other) { 123 | VidOrdering::Above => Ordering::Less, 124 | VidOrdering::Level => Ordering::Equal, 125 | VidOrdering::Below => Ordering::Greater}}} 126 | 127 | impl PartialOrd for VID { 128 | fn partial_cmp(&self, other: &Self) -> Option { 129 | Some(self.cmp(other))}} 130 | 131 | /// predefined consts for VIDS (mostly for tests) 132 | #[allow(non_upper_case_globals)] 133 | pub mod named { 134 | use super::VID; 135 | pub const x0:VID = VID::var(0); 136 | pub const x1:VID = VID::var(1); 137 | pub const x2:VID = VID::var(2); 138 | pub const x3:VID = VID::var(3); 139 | pub const x4:VID = VID::var(4); 140 | pub const v0:VID = VID::vir(0); 141 | pub const v1:VID = VID::vir(1); 142 | pub const v2:VID = VID::vir(2); 143 | pub const v3:VID = VID::vir(3); 144 | pub const v4:VID = VID::vir(4); 145 | } 146 | -------------------------------------------------------------------------------- /src/wip.rs: -------------------------------------------------------------------------------- 1 | //! Generic Work-in-progress support, used by e.g. [`VhlSwarm`]. 2 | //! 3 | //! In this module, the main structure is [`WorkState`]. 4 | //! 5 | //! - `K` is the hashable key for some query issued to the system. 6 | //! - `V` is the type of values produced by the system. 7 | //! - `P` is the type of parts that are being assembled to produce the final value. 8 | //! 9 | //! Inside the `WorkState`, each `K` is mapped to a `Work>`. 10 | //! - [`Work`] is either `Todo(WipRef)` or `Done(V)`. 11 | //! - [`WipRef`] is really just `Wip`. 12 | //! - [`Wip`] has `parts: P` and `deps: Vec>`. 13 | //! - [`Dep`] tracks which other queries are dependent on this one. It has 14 | //! a `HiLoPart` and an `invert` flag. (TODO: explicit use of invert and 15 | //! HiloPart should probably be in a `VhlDep` struct.) 16 | //! 17 | //! With this framework, we can track the progress of a distributed computation. 18 | //! 19 | //! The main change I forsee making here is making `Dep` an enum, that also 20 | //! includes an option to return a top-level result for a numbered query, as 21 | //! currently only one query is allowed. 22 | //! 23 | //! It would also be quite nice if dependencies could be "released" when a query 24 | //! "short circuits". Ex: if a constant 0 bubbles up to one side of an "AND" expression, 25 | //! we ought to be able to cancel the other side recursively (without necessarily throwing 26 | //! away the work that's been done so far). 27 | //! 28 | use std::borrow::BorrowMut; 29 | use std::cell::RefCell; 30 | use std::default::Default; 31 | use std::fmt::Debug; 32 | use std::marker::PhantomData; 33 | use std::collections::HashMap; 34 | use std::hash::Hash; 35 | use std::sync::Mutex; 36 | use crate::nid::NID; 37 | use crate::vid::VID; 38 | use crate::vhl::{HiLo, HiLoPart, VhlParts, HiLoCache}; 39 | use crate::bdd::{Norm, NormIteKey}; 40 | use dashmap::DashMap; 41 | 42 | // cache lookup counters: 43 | thread_local!{ 44 | pub static COUNT_CACHE_TESTS: RefCell = const { RefCell::new(0) }; 45 | pub static COUNT_CACHE_HITS: RefCell = const { RefCell::new(0) }; } 46 | 47 | 48 | 49 | pub type WIPHashMap = HashMap; 50 | 51 | #[derive(Debug,Copy,Clone)] 52 | pub struct Dep { pub dep: K, pub part: HiLoPart, pub invert: bool } 53 | impl Dep{ 54 | pub fn new(dep: K, part: HiLoPart, invert: bool)->Dep { Dep{dep, part, invert} }} 55 | 56 | #[derive(Debug, Default)] 57 | pub struct Wip { pub parts : P, pub deps : Vec> } 58 | 59 | // TODO: wrap this with a smart pointer so Work::Done and Work::Todo are both usizes. 60 | type WipRef = Wip; 61 | 62 | #[derive(Debug)] 63 | pub enum Work { Todo(W), Done(V) } 64 | 65 | impl Default for Work where W:Default { 66 | fn default() -> Self { Work::Todo(W::default()) }} 67 | 68 | impl Work { 69 | 70 | pub fn is_todo(&self)->bool { matches!(self, Self::Todo(_))} 71 | 72 | pub fn is_done(&self)->bool { matches!(self, Self::Done(_))} 73 | 74 | pub fn unwrap(&self)->&V { 75 | if let Self::Done(v) = self { v } else { 76 | panic!("cannot unwrap() a Work::Todo") }} 77 | 78 | pub fn wip_mut(&mut self)->&mut W { 79 | if let Self::Todo(w) = self { w } else { 80 | panic!("cannot get wip() from a Work::Done") }} 81 | 82 | pub fn wip(&self)->&W { 83 | if let Self::Todo(w) = self { w } else { 84 | panic!("cannot get wip() from a Work::Done") }}} 85 | 86 | 87 | /// Wrapper class to indicate a value is the final result 88 | /// to the distributed problem we're solving. 89 | pub struct Answer(pub T); // TODO: nopub 90 | 91 | /// Thread-safe map of queries->results, including results 92 | /// that are currently under construction. 93 | #[derive(Debug, Default)] 94 | pub struct WorkState where K:Eq+Hash+Debug { 95 | _kvp: PhantomData<(K,V,P)>, 96 | /// this is a kludge. it locks entire swarm from taking in new 97 | /// queries until an answer is found, because it's the only place 98 | /// we currently have to remember the query id. (since there's only 99 | /// one slot, we can only have one top level query at a time) 100 | pub qid:Mutex>, // pub so BddWorker can see it 101 | /// cache of hi,lo pairs. 102 | hilos: HiLoCache, 103 | // TODO: make .cache private 104 | pub cache: DashMap>, fxhash::FxBuildHasher> } 105 | 106 | impl WorkState { 107 | 108 | pub fn len(&self)->usize { self.hilos.len() } 109 | #[must_use] pub fn is_empty(&self) -> bool { self.len() == 0 } 110 | 111 | /// If the key exists in the cache AND the work is 112 | /// done, return the completed value, otherwise 113 | /// return None. 114 | pub fn get_done(&self, k:&K)->Option { 115 | COUNT_CACHE_TESTS.with(|c| *c.borrow_mut() += 1); 116 | if let Some(w) = self.cache.get(k) { 117 | match w.value() { 118 | Work::Todo(_) => None, 119 | Work::Done(v) => { 120 | COUNT_CACHE_HITS.with(|c| *c.borrow_mut() += 1); 121 | Some(v.clone())}}} 122 | else { None }} 123 | 124 | pub fn get_cached_nid(&self, v:VID, hi:NID, lo:NID)->Option { 125 | self.hilos.get_node(v, HiLo{hi,lo})} 126 | 127 | pub fn vhl_to_nid(&self, v:VID, hi:NID, lo:NID)->NID { 128 | match self.hilos.get_node(v, HiLo{hi,lo}) { 129 | Some(n) => n, 130 | None => { self.hilos.insert(v, HiLo{hi, lo}) }}} 131 | 132 | pub fn get_hilo(&self, n:NID)->HiLo { self.hilos.get_hilo(n) } 133 | 134 | /// return (hi, lo) pair for the given nid. used internally 135 | #[inline] pub fn tup(&self, n:NID)-> (NID, NID) { 136 | use crate::nid::{I,O}; 137 | if n.is_const() { if n==I { (I, O) } else { (O, I) } } 138 | else if n.is_vid() { if n.is_inv() { (O, I) } else { (I, O) }} 139 | else { let hilo = self.get_hilo(n); (hilo.hi, hilo.lo) }} } 140 | 141 | // TODO: nopub these methods 142 | impl WorkState { 143 | pub fn resolve_nid(&self, q:&K, nid:NID)->Option> { 144 | let mut ideps = vec![]; 145 | { // update work_cache and extract the ideps 146 | let mut v = self.cache.get_mut(q).unwrap(); 147 | if let Work::Done(old) = v.value() { 148 | warn!("resolving an already resolved nid for {:?}", q); 149 | assert_eq!(*old, nid, "old and new resolutions didn't match!") } 150 | else { 151 | ideps = std::mem::take(&mut v.value_mut().wip_mut().deps); 152 | *v = Work::Done(nid) }} 153 | if ideps.is_empty() { Some(Answer(nid)) } 154 | else { 155 | let mut res = None; 156 | for d in ideps { 157 | if let Some(Answer(a)) = self.resolve_part(&d.dep, d.part, nid, d.invert) { 158 | res =Some(Answer(a)) }} 159 | res }} 160 | 161 | pub fn resolve_vhl(&self, q:&K, v:VID, h0:NID, l0:NID, invert:bool)->Option> { 162 | use crate::bdd::ITE; // TODO: normalization strategy might need to be generic 163 | // we apply invert first so it normalizes correctly. 164 | let (h1,l1) = if invert { (!h0, !l0) } else { (h0, l0) }; 165 | let nid = match ITE::norm(NID::from_vid(v), h1, l1) { 166 | Norm::Nid(n) => n, 167 | Norm::Ite(NormIteKey(ITE{i:vv,t:hi,e:lo})) => 168 | self.vhl_to_nid(vv.vid(), hi, lo), 169 | Norm::Not(NormIteKey(ITE{i:vv,t:hi,e:lo})) => 170 | !self.vhl_to_nid(vv.vid(), hi, lo)}; 171 | self.resolve_nid(q, nid) } 172 | 173 | pub fn resolve_part(&self, q:&K, part:HiLoPart, nid:NID, invert:bool)->Option> { 174 | let mut parts = VhlParts::default(); 175 | { // -- new way -- 176 | let mut v = self.cache.get_mut(q).unwrap(); 177 | match v.value_mut() { 178 | Work::Todo(w) => { 179 | let n = if invert { !nid } else { nid }; 180 | w.borrow_mut().parts.set_part(part, Some(n)); 181 | parts = w.borrow_mut().parts } 182 | Work::Done(x) => { warn!("got part for K:{:?} ->Work::Done({:?})", q, x) } }} 183 | 184 | if let Some(HiLo{hi, lo}) = parts.hilo() { 185 | self.resolve_vhl(q, parts.v, hi, lo, parts.invert) } 186 | else { None}} 187 | 188 | /// set the branch variable and invert flag on the work in progress value 189 | pub fn add_wip(&self, q:&K, vid:VID, invert:bool)->Option> { 190 | let mut res = None; 191 | if self.cache.contains_key(q) { 192 | self.cache.alter(q, |_k, v| match v { 193 | Work::Todo(Wip{parts,deps}) => { 194 | let mut p = parts; p.v = vid; p.invert = invert; 195 | Work::Todo(Wip{parts:p,deps})}, 196 | Work::Done(nid) => { 197 | res = Some(Answer(nid)); 198 | Work::Done(nid) }});} 199 | else { panic!("got wip for unknown task");} 200 | res } 201 | 202 | // returns true if the query is new to the system 203 | pub fn add_dep(&self, q:&K, idep:Dep)->(bool, Option>) { 204 | COUNT_CACHE_TESTS.with(|c| *c.borrow_mut() += 1); 205 | let mut old_done = None; let mut was_empty = false; let mut answer = None; 206 | { // -- new way -- add_sub_task 207 | // this handles both the occupied and vacant cases: 208 | let mut v = self.cache.entry(*q).or_insert_with(|| { 209 | was_empty = true; 210 | Work::default()}); 211 | if !was_empty { COUNT_CACHE_HITS.with(|c| *c.borrow_mut() += 1) } 212 | match v.value_mut() { 213 | Work::Todo(w) => w.borrow_mut().deps.push(idep), 214 | Work::Done(n) => old_done=Some(*n) }} 215 | if let Some(nid)=old_done { 216 | answer = self.resolve_part(&idep.dep, idep.part, nid, idep.invert); } 217 | (was_empty, answer) }} 218 | 219 | 220 | 221 | // one step in the resolution of a query. 222 | // !! to be replaced by direct calls to 223 | // work.cache.resolve_nid, resolve_vhl, resolve_part 224 | #[derive(PartialEq,Debug)] 225 | pub enum ResStep { 226 | /// resolved to a nid 227 | Nid(NID), 228 | /// other work in progress 229 | Wip{v:VID, hi:Norm, lo:Norm, invert:bool}} 230 | 231 | impl std::ops::Not for ResStep { 232 | type Output = ResStep; 233 | fn not(self)->ResStep { 234 | match self { 235 | ResStep::Nid(n) => ResStep::Nid(!n), 236 | ResStep::Wip{v,hi,lo,invert} => ResStep::Wip{v,hi,lo,invert:!invert} }}} 237 | 238 | /// Response message. 239 | #[derive(PartialEq,Debug)] 240 | pub enum RMsg { 241 | /// We've solved the whole problem, so exit the loop and return this nid. 242 | Ret(NID), 243 | /// return stats about the memo cache 244 | CacheStats { tests: u64, hits: u64 }} 245 | -------------------------------------------------------------------------------- /viewbex.html: -------------------------------------------------------------------------------- 1 | 2 | 9 | 10 | 11 | 12 | viewbex 13 | 14 | 15 | 22 | 23 | 24 | 25 |
26 |
{{text}}
27 |
28 | 29 |
30 |
31 | 32 | 100 | 101 | 102 | 103 | --------------------------------------------------------------------------------