├── README.md ├── lambda_calculus.js ├── optlam.js └── test.js /README.md: -------------------------------------------------------------------------------- 1 | ## Optlam.js 2 | 3 | An optimal function evaluator written in JavaScript. 4 | 5 | **Note: this is deprecated, see a much cleaner version [here](https://github.com/maiavictor/abstract-algorithm).** 6 | 7 | ## About 8 | 9 | Optlam.js is a simple, optimal (in Levy's sense) λ-calculus evaluator using interaction nets. It is, currently, as far as I know, the fastest implementation of functions in the world. It uses Lamping's Abstract Algorithm - that is, the so called (and problematic) "oracle" is avoided altogether. As such, it is only capable of computing λ-terms that are typeable on Elementary Affine Logic. This includes most functions that you'd use in practice, but isn't powerful enough to process, for example, an unhalting turing machine. Notice being optimal doesn't mean it is efficient - it is implemented in JavaScript, after all. Nether less, it is still asymptotically faster than most evaluators, being able to quickly normalize functions that [even Haskell would take years](http://stackoverflow.com/questions/31707614/why-are-%CE%BB-calculus-optimal-evaluators-able-to-compute-big-modular-exponentiation). Improved implementations would be great, and there is a lot of potential to explore parallel (GPU/ASIC?) processing. The API is very simple, consisting of one function, `reduce`, which receives a bruijn-indexed, JSON-encoded λ calculus term and returns its normal form. See [this image](http://i.imgur.com/CSjrhsX.jpg) for an overall idea of how the magic works. 10 | 11 | ## Example 12 | 13 | What is the result of: 14 | 15 | (function (a){ return function(b){ return a; } })(1)(2); 16 | 17 | Using node.js, you can find it is `1`. Now, what is the result of: 18 | 19 | expMod = (function (v0) { return (function (v1) { return (function (v2) { return (((((((function(n){return(function(f){return(function(a){ for (var i=0;i Term 11 | // Creates an abstraction. 12 | function Lam(body){ 13 | return {type:LAM, body:body}; 14 | }; 15 | 16 | // App :: Term -> Term -> Term 17 | // The application of two terms. 18 | function App(left,right){ 19 | return {type:APP, left:left, right:right}; 20 | }; 21 | 22 | // Var :: Int -> Term 23 | // A bruijn-indexed variable. 24 | function Var(index){ 25 | return {type:VAR, index:index}; 26 | }; 27 | 28 | // reduce :: Term -> Term 29 | // Reduces a term to normal form. Will fail to terminate if the term isn't 30 | // strongly normalizing - that is, λ-combinator and similar absurds are banned. 31 | function reduce(term){ 32 | switch (term.type){ 33 | case VAR: return term; 34 | case LAM: return Lam(reduce(term.body)); 35 | case APP: 36 | var left = reduce(term.left); 37 | var right = reduce(term.right); 38 | switch (left.type){ 39 | case LAM : return reduce(substitute(right, true, 0, -1, left.body)); 40 | case APP : return App(left,right); 41 | default : return App(left, right); 42 | }; 43 | }; 44 | function substitute(value, subs, depth, wrap, term){ 45 | switch (term.type){ 46 | case VAR: return subs && term.index === depth 47 | ? substitute(Var(0), false, -1, depth, value) 48 | : Var(term.index + (term.index > depth ? wrap : 0)); 49 | case LAM: return Lam(substitute(value, subs, depth+1, wrap, term.body)); 50 | case APP: return App( 51 | substitute(value, subs, depth, wrap, term.left), 52 | substitute(value, subs, depth, wrap, term.right)); 53 | }; 54 | }; 55 | }; 56 | 57 | // fold :: (Int -> a) -> (a -> a -> a) -> (a -> a) -> Term -> a 58 | function fold(var_,lam,app){ 59 | return function R(term){ 60 | switch (term.type){ 61 | case VAR: return var_(term.index); 62 | case LAM: return lam(R(term.body)); 63 | case APP: return app(R(term.left),R(term.right)); 64 | }; 65 | }; 66 | }; 67 | 68 | // nat :: Number -> Term 69 | // Converts a JavaScript number to a λ-calculus church number. 70 | function nat(x){ 71 | return Lam(Lam((function go(x){return x===0?Var(0):App(Var(1),go(x-1))})(x))); 72 | }; 73 | 74 | // nat_ :: Term -> Number 75 | // Converts a λ-calculus church number to a JavaScript number. 76 | // TODO: do this decently. 77 | function nat_(x){ 78 | return size(x)-1; 79 | }; 80 | 81 | // print :: Term -> IO () 82 | function print(x){ 83 | console.log(pretty(x)); 84 | return x; 85 | }; 86 | 87 | // size :: Term -> Int 88 | // Number of variables on a λ-term. 89 | // TODO: that isn't the usual definition of size, dumb. 90 | var size = fold( 91 | function(idx){ return 1; }, 92 | function(body){ return body; }, 93 | function(left,right){ return left+right; }); 94 | 95 | // pretty :: Term -> String 96 | var pretty = fold( 97 | function(index){ return index; }, 98 | function(body){ return "λ" + body; }, 99 | function(left,right){ return "(" + left + " " + right + ")"; }); 100 | 101 | // show :: Term -> String 102 | var show = fold( 103 | function(index){ return "Var(" + index + ")"; }, 104 | function(body){ return "Lam(" + body + ")"; }, 105 | function(left,right){ return "App(" + left + "," + right + ")"; }); 106 | 107 | // Export a pattern-matching function instead. 108 | // TODO: APP/LAM/VAR are internal tags and shouldn't be exported. 109 | return { 110 | APP : APP, 111 | LAM : LAM, 112 | VAR : VAR, 113 | Lam : Lam, 114 | App : App, 115 | Var : Var, 116 | reduce : reduce, 117 | fold : fold, 118 | nat : nat, 119 | nat_ : nat_, 120 | print : print, 121 | size : size, 122 | pretty : pretty, 123 | show : show}; 124 | })(); 125 | 126 | var lc = module.exports; 127 | -------------------------------------------------------------------------------- /optlam.js: -------------------------------------------------------------------------------- 1 | // ~~~~~~~~~~~~~~~~~~~~~~ Optlam.js ~~~~~~~~~~~~~~~~~~~~~~ 2 | // An optimal λ-calculus normalizer written in JavaScript. 3 | // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4 | // Optlam.js is a simple, optimal (in Levy's sense) λ-calculus evaluator using 5 | // interaction nets. It is, currently, as far as I know, the fastest 6 | // implementation of functions in the world. It uses Lamping's Abstract 7 | // Algorithm - that is, the so called (and problematic) "oracle" is avoided 8 | // altogether. As such, it is only capable of computing λ-terms that are 9 | // typeable on Elementary Affine Logic. This includes most functions that you'd 10 | // use in practice, but isn't powerful enough to process, for example, an 11 | // unhalting turing machine. Notice being optimal doesn't mean it is efficient 12 | // - it is implemented in JavaScript, after all. Nether less, it is still 13 | // asymptotically faster than most evaluators, being able to quickly normalize 14 | // functions that even Haskell would take years. Improved implementations would 15 | // be great, and there is a lot of potential to explore parallel (GPU/ASIC?) 16 | // processing. The API is very simple, consisting of one function, `reduce`, 17 | // which receives a bruijn-indexed, JSON-encoded λ calculus term and returns 18 | // its normal form. See this image for an overall idea of how the magic works: 19 | // http://i.imgur.com/CSjrhsX.jpg 20 | // REPO : https://github.com/maiavictor/optlam 21 | // EXAMPLE : optlam.reduce(App(Lam(Var(0)),Lam(Var(0)))) // ((λ x . x) (λ x . x)) 22 | // RESULT : Lam(Var(0)) // (λ x . x) 23 | 24 | module.exports = (function(){ 25 | var lambda = require("./lambda_calculus.js"); 26 | 27 | // Node types. Each node has 3 ports, where 28 | // the first port is the active one, and with 29 | // the following semantical configuration: 30 | // type port0 port1 port2 31 | // ROT down n/a n/a 32 | // APP func arg up 33 | // LAM up var body 34 | // DUP target left right 35 | // ERA up n/a n/a 36 | var ROT = 0, LAM = 1, APP = 2, DUP = 4, ERA = 5; 37 | 38 | // The app's state: 39 | // `next_id` : next id to be allocated. 40 | // `memory` : the memory buffer. 41 | // `garbage` : index of collected nodes (to be reclaimed). 42 | var next_id = 0; 43 | var memory = []; 44 | var garbage = []; 45 | 46 | // Statistics: updated whenever `net_reduce` is called. 47 | // `iterations` : number of times a node was visited on the reduction. 48 | // `applications` : how many times a rule was applied (commute or annihilate). 49 | // `used_memory` : number of allocated memory cells (on JS, a cell is a `double`). 50 | var stats = { 51 | iterations : 0, 52 | applications : 0, 53 | betas : 0, 54 | used_memory : 0}; 55 | 56 | // Node :: Type -> Tag -> IO Node 57 | // Allocates space for a node of given type (ROT/LAM/APP/DUP/ERA) 58 | // and an integer tag. The type is used on the readback, and the tag is 59 | // used for reductions: when active ports meet, a pair will commute if 60 | // their tags are different and annihilate if their tags are identical. 61 | // Returns the index on memory on which the node was allocated. 62 | function Node(type,tag){ 63 | var idx = garbage.pop() || memory.length; 64 | memory[idx+0] = type; // Node type (used on readback) 65 | memory[idx+1] = 0; // Port 0 target port 66 | memory[idx+2] = 0; // Port 1 target port 67 | memory[idx+3] = 0; // Port 2 target port 68 | memory[idx+4] = 0; // Port 0 target node 69 | memory[idx+5] = 0; // Port 1 target node 70 | memory[idx+6] = 0; // Port 2 target node 71 | memory[idx+7] = ++next_id; // Unique id 72 | memory[idx+8] = tag; // Tag (used to decide which reduction rule apply) 73 | stats.used_memory = memory.length; 74 | return idx; 75 | }; 76 | 77 | // get_target_port :: Node -> Port -> Node 78 | // Returns which port on the target node that this port is connected to. 79 | function get_target_port(node,port){ 80 | return memory[node+1+port]; 81 | }; 82 | 83 | // get_target :: Node -> Port -> Node 84 | // Returns the target node that this port is connected to. 85 | function get_target(node,port){ 86 | return memory[node+4+port]; 87 | }; 88 | 89 | // half_link :: Node -> Port -> Node -> Port -> IO () 90 | // Links (one-way) `node`'s port `port` to `target`'s port `target_port`. 91 | function half_link(node,port,target,target_port){ 92 | memory[node+1+port] = target_port; 93 | memory[node+4+port] = target; 94 | }; 95 | 96 | // get_id :: Node -> Int 97 | // Returns a node's id. 98 | function get_id(node){ 99 | return memory[node+7]; 100 | }; 101 | 102 | // get_tag :: Node -> Tag 103 | // Returns a node's tag (used to decide which rule to apply on active pairs). 104 | function get_tag(node){ 105 | return memory[node+8]; 106 | }; 107 | 108 | // get_type :: Node -> Type 109 | // Returns a node's type (used for decoding - possibly redundant). 110 | function get_type(node){ 111 | return memory[node]; 112 | }; 113 | 114 | // link :: Node -> Port -> Node -> Port -> IO () 115 | // Two-way link between two nodes's ports. 116 | function link(a_target,a_port,b_target,b_port){ 117 | half_link(a_target,a_port,b_target,b_port); 118 | half_link(b_target,b_port,a_target,a_port); 119 | }; 120 | 121 | // annihilate :: Node -> Node -> IO () 122 | // Annihilates two nodes. This rule is used wen 123 | // two nodes of identical tags collide. 124 | // a b a b 125 | // \ / \ / 126 | // A -- B --> X 127 | // / \ / \ 128 | // c d c d 129 | function annihilate(a,b){ 130 | link(get_target(a,1),get_target_port(a,1),get_target(b,1),get_target_port(b,1)); 131 | link(get_target(a,2),get_target_port(a,2),get_target(b,2),get_target_port(b,2)); 132 | garbage.push(a,b); 133 | }; 134 | 135 | // commute :: Node -> Node -> IO () 136 | // Commutes two nodes. This rule is used when 137 | // two nodes of different tags collide. 138 | // a d a - B --- A - d 139 | // \ / \ / 140 | // A -- B --> X 141 | // / \ / \ 142 | // b c b - B --- A - c 143 | function commute(a,b){ 144 | var a2 = Node(get_type(a), get_tag(a)); 145 | var b2 = Node(get_type(b), get_tag(b)); 146 | link(b , 0 , get_target(a,1) , get_target_port(a,1)); 147 | link(a , 0 , get_target(b,1) , get_target_port(b,1)); 148 | link(b , 1 , a , 1); 149 | link(b2 , 0 , get_target(a,2) , get_target_port(a,2)); 150 | link(a2 , 0 , get_target(b,2) , get_target_port(b,2)); 151 | link(a , 2 , b2 , 1); 152 | link(b , 2 , a2 , 1); 153 | link(b2 , 2 , a2 , 2); 154 | }; 155 | 156 | // erase :: Node -> Node -> IO () 157 | // The erase node's main role is guiding garbage collection, 158 | // but this isn't present on Optlam yet. 159 | // d e - d 160 | // / 161 | // e -- B --> 162 | // \ 163 | // c e - c 164 | function erase(a,b){ 165 | var e2 = Node(ERA, -1); 166 | link(a, 0, get_target(b,1), get_target_port(b,1)); 167 | link(e2, 0, get_target(b,2), get_target_port(b,2)); 168 | garbage.push(b); 169 | }; 170 | 171 | // net_reduce :: Node -> Node 172 | // Reduces an interaction net to normal form. 173 | // Instead of applying rules in parallel with no ordering, we walk through 174 | // the net from the root through its circuit, only traversing visible 175 | // branches. That is done in order to avoid unecessary computation. For 176 | // example, the term `(bool.true 1 (nat.div 10000 10000))` has many active 177 | // pairs that aren't necessary for the final result at all, since they are 178 | // in an unreachable branch. This strategy allows us to skip those pairs. 179 | function net_reduce(net){ 180 | stats.applications = 0; 181 | stats.iterations = 0; 182 | var solid = {}; 183 | var exit = {}; 184 | var visit = [[net,0]]; 185 | 186 | visit_a_node: 187 | while (visit.length > 0){ 188 | // While the must-visit queue is occupied, we pick a node from there 189 | // and start walking through the graph following its semantic path. 190 | 191 | var next = visit.pop(); 192 | var next_target = get_target(next[0],next[1]); 193 | var next_port = get_target_port(next[0],next[1]); 194 | 195 | while (next_target!==undefined){ 196 | ++stats.iterations; 197 | var exit_target; 198 | var prev_target = get_target(next_target,next_port); 199 | var prev_port = get_target_port(next_target,next_port); 200 | 201 | // A solid node is already part of the canonical graph. 202 | // If we met one, there is no point in continuing this walk. 203 | if (solid[get_id(next_target)]) 204 | continue visit_a_node; 205 | 206 | // At this point, we're walking between two nodes. 207 | if (next_port === 0){ 208 | if (prev_port === 0 && get_tag(prev_target) !== -2 && get_tag(next_target) !== -2){ 209 | // In the case this is an active link (i.e, next and 210 | // previous ports are both 0), we need to apply some 211 | // graph-rewrite rule and move on. 212 | 213 | ++stats.applications; 214 | if (get_tag(prev_target) === 0 && get_tag(prev_target) === 0) ++stats.betas; 215 | 216 | exit_target = get_target(prev_target,exit[get_id(prev_target)]); 217 | exit_port = get_target_port(prev_target,exit[get_id(prev_target)]); 218 | 219 | // If one of the nodes is "erase", we apply its rule. 220 | // If two nodes have the same tag, we annihilate them. 221 | // If two nodes have different tags, we commute them. 222 | if (get_tag(next_target) === -1) 223 | erase(next_target,prev_target); 224 | else if (get_tag(prev_target) === get_tag(next_target)) 225 | annihilate(prev_target,next_target); 226 | else 227 | commute(prev_target,next_target); 228 | 229 | next_target = get_target(exit_target,exit_port); 230 | next_port = get_target_port(exit_target,exit_port); 231 | } else { 232 | // If the next port is 0 but this one isn't, then the 233 | // target node will be part of the canonical graph. 234 | solid[get_id(next_target)] = true; 235 | visit.push([next_target,2],[next_target,1]) 236 | continue visit_a_node; 237 | }; 238 | } else { 239 | // In the next port isn't 0, we can go ahead and 240 | // move to the next node. 241 | exit[get_id(next_target)] = next_port; 242 | next_port = get_target_port(next_target,0); 243 | next_target = get_target(next_target,0); 244 | }; 245 | }; 246 | }; 247 | return net; 248 | }; 249 | 250 | // net_encode :: Term -> Node 251 | // Converts a λ-calculus term to an interaction net. 252 | // Receives the λ-calculus term and returns the pointer to the root node 253 | // of the created interaction-net. 254 | function net_encode(root){ 255 | function Link(target, port){ 256 | return {target : target, port : port}; 257 | }; 258 | var next_tag = 0; 259 | var net_root = Node(ROT, -2); 260 | function net_encode(node,scope,up_link){ 261 | 262 | switch (node.type){ 263 | 264 | // To encode a Lambda, we use a node with tag 0, such that the 265 | // port 1 points to the bound variable, the port 2 points to 266 | // the abstraction body, and port 0 points to the return location. 267 | case lambda.LAM: 268 | var del = Node(ERA, -1); 269 | var lam = Node(LAM, 0); 270 | half_link(lam,0,up_link.target,up_link.port); 271 | link(lam,1,del,0); 272 | link(del,1,del,2); 273 | var bod = net_encode(node.body,[lam].concat(scope),Link(lam,2)); 274 | half_link(lam,2,bod.target,bod.port); 275 | return Link(lam,0); 276 | 277 | // To encode an application, we use, too, a node with tag 0. 278 | // That is, APP and LAM nodes are isomorphic and need no 279 | // distinction on the sharing graph. The difference, thus, is 280 | // that APP is upside-down. Its port 0 points to the first 281 | // argument, its port 1 points to the second argument, and its 282 | // port 2 points to the return location. This, albeit 283 | // unintuitive, is the only way it works and makes sense once 284 | // you observe the graph visually. Each line here is on the 285 | // exact order it must be. 286 | case lambda.APP: 287 | var app = Node(APP, 0); 288 | half_link(app,2,up_link.target,up_link.port); 289 | var left = net_encode(node.left,scope,Link(app,0)); 290 | half_link(app,0,left.target,left.port); 291 | var right = net_encode(node.right,scope,Link(app,1)); 292 | half_link(app,1,right.target,right.port); 293 | return Link(app,2); 294 | 295 | // A variable connects to its binding lambda node. If there is 296 | // already another variable connected to it, then a "DUP" node 297 | // must be created and wired to the lambda. 298 | case lambda.VAR: 299 | var idx = node.index; 300 | var lam = scope[idx]; 301 | if (get_type(get_target(lam,1)) === ERA){ 302 | half_link(lam,1,up_link.target,up_link.port); 303 | return Link(lam,1); 304 | } else { 305 | var dup = Node(DUP, ++next_tag); 306 | half_link(dup,0,lam,1); 307 | half_link(dup,1,up_link.target,up_link.port); 308 | half_link(dup,2,get_target(lam,1),get_target_port(lam,1)); 309 | half_link(get_target(lam,1),get_target_port(lam,1),dup,2); 310 | half_link(lam,1,dup,0); 311 | return Link(dup,1); 312 | }; 313 | }; 314 | }; 315 | var encoded_link = net_encode(root,[],Link(net_root,0)); 316 | half_link(net_root,0,encoded_link.target,encoded_link.port); 317 | return net_root; 318 | }; 319 | 320 | // net_decode :: Node -> Term 321 | // Converts an interaction net back to a λ-calculus term. 322 | // Receives the pointer to the root node of the net and returns the λ-term. 323 | // This function uses a manual stack for recursion in order to avoid stack 324 | // overflows and to enable tail call optimization for one of its branches. 325 | function net_decode(root){ 326 | var stack = []; 327 | var retur = null; 328 | var index = -1; 329 | 330 | // Execute a recursive call. 331 | function CALL(node,port,depth,exit){ 332 | stack[index+1] = {cont:0, node:node, port:port, depth:depth, exit:exit, left:null}; 333 | ++index; 334 | }; 335 | 336 | // Execute a recursive tail-call. 337 | function TAIL_CALL(node,port,depth,exit){ 338 | var s = stack[index]; 339 | s.cont=0, s.node=node, s.port=port, s.depth=depth, s.exit=exit, s.left=null; 340 | }; 341 | 342 | // Return a recursive call. 343 | function RETURN(val){ 344 | retur = val; 345 | --index; 346 | }; 347 | 348 | // First contructor of the usual List datatype. 349 | function Cons(head,tail){ 350 | return {head:head, tail:tail}; 351 | }; 352 | 353 | var go_link; 354 | var node_depth = {}; 355 | 356 | // We start by calling the recursive procedure on the root ndoe. 357 | CALL(get_target(root,0), get_target_port(root,0), 0, null); 358 | while (index>=0){ 359 | var st = stack[index]; 360 | 361 | // This implements the pattern matching. 362 | switch(st.cont){ 363 | case 0: 364 | if (node_depth[get_id(st.node)] === undefined) 365 | node_depth[get_id(st.node)] = st.depth; 366 | switch(get_type(st.node)){ 367 | 368 | // Reads back a DUP node. 369 | case DUP: 370 | go_link = st.port>0?0:st.exit.head; 371 | st.exit = st.port ? Cons(st.port,st.exit) : st.exit.tail; 372 | TAIL_CALL(get_target(st.node,go_link), get_target_port(st.node,go_link), st.depth, st.exit); 373 | continue; 374 | 375 | // Reads back a LAM node. 376 | case LAM: 377 | if (st.port === 1){ 378 | RETURN(lambda.Var(st.depth - node_depth[get_id(st.node)] - 1)); 379 | } else { 380 | CALL(get_target(st.node,2), get_target_port(st.node,2), st.depth+1, st.exit); 381 | st.cont = 1; 382 | }; 383 | continue; 384 | 385 | // Reads back an APP ndoe. 386 | case APP: 387 | CALL(get_target(st.node,0), get_target_port(st.node,0), st.depth, st.exit); 388 | st.cont = 2; 389 | continue; 390 | } 391 | continue; 392 | 393 | // This continues the "LAM" case after we regain 394 | // control from the manual recursive call. 395 | case 1: 396 | RETURN(lambda.Lam(retur)); 397 | continue; 398 | 399 | // This continues the "APP" case after we regain 400 | // control from the manual recursive call. 401 | case 2: 402 | st.left = retur; 403 | CALL(get_target(st.node,1), get_target_port(st.node,1), st.depth, st.exit); 404 | st.cont = 3; 405 | continue; 406 | 407 | // This continues the continuation of the "APP" 408 | // case after we regain control from the second 409 | // manual recursive call. 410 | case 3: 411 | RETURN(lambda.App(st.left, retur)); 412 | continue; 413 | }; 414 | }; 415 | return retur; 416 | }; 417 | 418 | // clear :: IO () 419 | // Completely wipes all local data. 420 | // Previously returned pointers/nets are now invalid. 421 | function clean(){ 422 | next_id = 0; 423 | memory = []; 424 | garbage = []; 425 | stats.iterations = 0; 426 | stats.applications = 0; 427 | stats.betas = 0; 428 | stats.used_memory = 0; 429 | }; 430 | 431 | // reduce :: Term -> Term 432 | // The main API. Receives a λ-calculus term and returns its normal form. 433 | function reduce(term){ 434 | clean(); 435 | return net_decode(net_reduce(net_encode(term))); 436 | }; 437 | 438 | return { 439 | reduce : reduce, 440 | net_encode : net_encode, 441 | net_decode : net_decode, 442 | net_reduce : net_reduce, 443 | stats : stats, 444 | 445 | // net_reduce_2 : net_reduce_2 446 | }; 447 | })(); 448 | -------------------------------------------------------------------------------- /test.js: -------------------------------------------------------------------------------- 1 | // Require the needed libraries 2 | optlam = require("./optlam.js"); 3 | lambda = require("./lambda_calculus.js"); 4 | 5 | // Shortcuts (A = apply, L = lambda, V = bruijn indexed variable) 6 | var A = lambda.App, L = lambda.Lam, V = lambda.Var, n = lambda.nat, n_ = lambda.nat_; 7 | 8 | // An exponential modulus implementation for church numbers is: 9 | // (λabc.(c(λde.(d(λf.(e(λgh.(g(fgh)))f))))(λd.(d(λef.f)))(λd.(ba(c(λefg.(e(λh.(fhg))))(λe.e)(λef.(fe)))(c(λef.e)(λe.e)(λe.e)))))) 10 | // It receives three church numbers, `a`, `b`, `c`, and returns `a^b%c`. 11 | // I don't have a parser yet, so lets create this term manually: 12 | var exp_mod = L(L(L(A(A(A(V(0),L(L(A(V(1),L(A(A(V(1),L(L(A(V(1),A(A(V(2),V(1)),V(0)))))),V(0))))))),L(A(V(0),L(L(V(0)))))),L(A(A(A(V(2),V(3)),A(A(A(V(1),L(L(L(A(V(2),L(A(A(V(2),V(0)),V(1)))))))),L(V(0))),L(L(A(V(0),V(1)))))),A(A(A(V(1),L(L(V(1)))),L(V(0))),L(V(0))))))))) 13 | 14 | // With that, this term now computes `100 ^ 100 % 31`. 15 | var term = A(A(A(exp_mod,n(10)),n(10)),n(3)); 16 | 17 | // We use optlam to reduce the term, not lambda (which has a naive evaluator 18 | // that wouldn't finish any soon). lambda.nat_ reads a church number back to a 19 | // JS number. You could use lambda.pretty instead if you feel like today is a 20 | // good day for counting. 21 | console.log(lambda.nat_(optlam.reduce(term))); 22 | console.log(optlam.stats); 23 | 24 | --------------------------------------------------------------------------------