├── .gitattributes ├── .github └── workflows │ └── ci-workflow.yaml ├── .gitignore ├── README.md ├── build.zig └── src ├── alltests.zig ├── compose.zig ├── errors.zig ├── main.zig ├── parse.zig ├── prove.zig ├── read.zig ├── tokenize.zig └── verify.zig /.gitattributes: -------------------------------------------------------------------------------- 1 | *.zig text eol=lf 2 | -------------------------------------------------------------------------------- /.github/workflows/ci-workflow.yaml: -------------------------------------------------------------------------------- 1 | name: Build with zig master 2 | 3 | on: 4 | push: 5 | pull_request: 6 | schedule: 7 | - cron: '23 3 * * *' 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - uses: goto-bus-stop/setup-zig@v1 15 | with: 16 | version: master 17 | - run: wget --quiet https://github.com/metamath/set.mm/raw/develop/set.mm --output-document set.mm 18 | - run: zig build 19 | - run: zig build -Drelease-fast=true 20 | # the previous step brings down memory use in the next step, from 10.7 GB to 6.4 GB 21 | # TODO: fix that exorbitant memory use for a 42 MB set.mm file... 22 | - run: /usr/bin/time -v zig-out/bin/zigmmverify set.mm 23 | lint: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v2 27 | - uses: goto-bus-stop/setup-zig@v1 28 | with: 29 | version: master 30 | - run: zig fmt --check build.zig src/*.zig 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | zig-cache/ 2 | zig-out/ 3 | *.o 4 | *.exe 5 | 6 | # to allow extracting a zig master download 7 | /zig-*+* 8 | 9 | .*.swp 10 | 11 | set.mm 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | _Note that this branch is built using nightly 'master' zig. 2 | The more actively developed branch is 3 | [zig-0.9.x](https://github.com/marnix/zigmmverify/tree/zig-0.9.x). 4 | Changes are periodically merged from there to this branch._ 5 | 6 | [![Build with zig master](https://github.com/marnix/zigmmverify/workflows/Build%20with%20zig%20master/badge.svg?branch=zig-master)](https://github.com/marnix/zigmmverify/actions?query=branch%3Azig-master) 7 | 8 | # A [Metamath](http://metamath.org) proof verifier in [Zig](https://ziglang.org/) 9 | 10 | At least, the beginnings of one. 11 | 12 | This is partly to learn Zig, 13 | partly to have a fast verifier that I can hack 14 | (Python is way too slow, Rust is too weird for me still, Nim seems less clean, C is out), 15 | so that I can perhaps one day try to do experiments around 16 | parsing (where parse tree of ` X... ` is proof of ` |- TOPLEVEL X... ` ), 17 | abbreviations, 18 | and modules. 19 | 20 | # How to build and run 21 | 22 | Build using Zig master, then just run the resulting binary, 23 | passing a single .mm file on the command line. 24 | 25 | For example, use set.mm, which you can get from metamath.org, 26 | or download the most recent version directly from GitHub: 27 | https://github.com/metamath/set.mm/raw/develop/set.mm. 28 | 29 | The version of the Metamath specification that was implemented, is 30 | the one from the 2nd edition Metamath book. 31 | (Norman D. Megill, David A. Wheeler, 32 | "Metamath: A Computer Language for Mathematical Proofs". 33 | Lulu Press, 2019. 34 | http://us.metamath.org/downloads/metamath.pdf .) 35 | 36 | # Next tasks 37 | 38 | Verifier completeness: 39 | 40 | - Verify that 'normal' tokens don't start with `$`, 41 | label tokens use only allowed characters, etc. 42 | 43 | - Support `?` in proofs. 44 | 45 | Clean up: 46 | 47 | - Clean-up / refactor RuleIterator + ScopeDiff: 48 | Add methods, move functionality between these structs. 49 | Also encapsulate some parts. 50 | Also try to avoid duplication in statement handling. 51 | 52 | - Work towards a library that can also be used for $a/$p/$e parse trees. 53 | 54 | Functionality: 55 | 56 | - Generate a parse tree for every $a/$p/$e statement. 57 | If possible/feasible, check that it is the only possible parse tree. 58 | Try to also support 'conditional syntax rules, 59 | so e.g. `${ $e -. B = 0 $. $a class ( A / B ) $. $}` which expresses that 60 | `A / B` is only a syntactically valid expression if `B` is not zero. 61 | 62 | Verifier performance: 63 | 64 | - Optimize performance by reducing memory use: 65 | A token is currently represented by a Zig slice (= pointer and a length), 66 | and this could be replaced by a single small(ish) integer 67 | by 'interning' all tokens. 68 | 69 | - Optimize performance by parallelizing using Zig `async`. 70 | 71 | - Optimize performance by reducing memory use: 72 | I suspect ArrayList is not the right data structure for some of the lists. 73 | 74 | Verifier tests: 75 | 76 | - Run the test suite from https://github.com/david-a-wheeler/metamath-test 77 | probably by checking it out as a Git submodule 78 | and using the zig-mm-verify binary as the approved-or-not judge. 79 | 80 | - (Separate project.) Extend that test suite, to capture as many as possible 81 | deviations from the specification as possible. 82 | 83 | Verifier usability: 84 | 85 | - Identify the location (line/column) of at least the first error. 86 | 87 | - Don't use 'error union' for Metamath verification errors. 88 | 89 | Language dialects: 90 | 91 | - Optional modes where $c/$v is allowed to be duplicated 92 | (useful to create set-parsed.mm which declares stuff before `$[ set.mm $]`); 93 | where $f implicitly does $c/$v; 94 | and perhaps more. 95 | -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const Builder = @import("std").build.Builder; 2 | 3 | pub fn build(b: *Builder) void { 4 | const mode = b.standardReleaseOptions(); 5 | const target = b.standardTargetOptions(.{}); 6 | 7 | var exe = b.addExecutable("zigmmverify", "src/main.zig"); 8 | exe.setBuildMode(mode); 9 | exe.setTarget(target); 10 | 11 | exe.step.dependOn(&(b.addTest("src/alltests.zig")).step); 12 | 13 | b.default_step.dependOn(&exe.step); 14 | 15 | exe.install(); 16 | } 17 | -------------------------------------------------------------------------------- /src/alltests.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | pub const TestFS = struct { 6 | tmpDir: std.testing.TmpDir = undefined, 7 | 8 | pub fn init() TestFS { 9 | return TestFS{ .tmpDir = std.testing.tmpDir(.{}) }; 10 | } 11 | 12 | pub fn deinit(self: *TestFS) void { 13 | self.tmpDir.cleanup(); 14 | } 15 | 16 | pub fn writeFile(self: *TestFS, name: []const u8, buffer: []const u8) !void { 17 | const file = try self.tmpDir.dir.createFile(name, .{}); 18 | defer file.close(); 19 | try file.writeAll(buffer); 20 | } 21 | }; 22 | 23 | test "" { 24 | std.testing.refAllDecls(@import("main.zig")); 25 | } 26 | -------------------------------------------------------------------------------- /src/compose.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | const errors = @import("errors.zig"); 6 | const Error = errors.Error; 7 | 8 | const tokenize = @import("tokenize.zig"); 9 | const Token = tokenize.Token; 10 | const TokenList = tokenize.TokenList; 11 | const TokenSet = tokenize.TokenSet; 12 | const TokenMap = tokenize.TokenMap; 13 | const eq = tokenize.eq; 14 | 15 | const parse = @import("parse.zig"); 16 | const Statement = parse.Statement; 17 | const StatementIterator = parse.StatementIterator; 18 | 19 | pub fn copyExpression(allocator: Allocator, original: Expression) !Expression { 20 | return try sliceCopy(CVToken, allocator, original); 21 | } 22 | 23 | // TODO: move to new utils.zig? 24 | fn sliceCopy(comptime T: type, allocator: Allocator, original: []const T) ![]const T { 25 | var copy = try allocator.alloc(T, original.len); 26 | errdefer allocator.free(copy); 27 | std.mem.copy(T, copy, original); 28 | return copy; 29 | } 30 | 31 | const SinglyLinkedList = std.SinglyLinkedList; 32 | const FELabel = struct { label: Token, fe: enum { F, E } }; 33 | const FELabelList = std.ArrayList(FELabel); 34 | pub const DVPair = struct { var1: Token, var2: Token }; 35 | const DVPairList = std.ArrayList(DVPair); 36 | 37 | pub const CVToken = struct { token: Token, cv: enum { C, V } }; 38 | pub const Expression = []const CVToken; 39 | 40 | pub fn eqExpr(a: Expression, b: Expression) bool { 41 | const result = brk: { 42 | if (a.len != b.len) break :brk false; 43 | for (a) |ai, i| { 44 | if (!(eq(ai.token, b[i].token) and ai.cv == b[i].cv)) break :brk false; 45 | } 46 | break :brk true; 47 | }; 48 | if (!result) { 49 | std.debug.print("\neqExpr: actual = ", .{}); 50 | debugPrintExpr(a); 51 | std.debug.print(", expected = ", .{}); 52 | debugPrintExpr(b); 53 | std.debug.print(".\n", .{}); 54 | } 55 | return result; 56 | } 57 | 58 | fn debugPrintExpr(expr: Expression) void { 59 | var sep: []const u8 = ""; 60 | for (expr) |cvToken| { 61 | switch (cvToken.cv) { 62 | .C => { 63 | std.debug.print("{s}{s}", .{ sep, cvToken.token }); 64 | }, 65 | .V => { 66 | std.debug.print("{s}${s}", .{ sep, cvToken.token }); 67 | }, 68 | } 69 | sep = " "; 70 | } 71 | } 72 | 73 | pub const Hypothesis = struct { 74 | const Self = @This(); 75 | 76 | expression: Expression, 77 | isF: bool, 78 | 79 | fn deinit(self: *Self, allocator: Allocator) void { 80 | allocator.free(self.expression); 81 | } 82 | }; 83 | 84 | pub const InferenceRule = struct { 85 | const Self = @This(); 86 | 87 | activeDVPairs: []DVPair, 88 | hypotheses: []Hypothesis, 89 | conclusion: Expression, 90 | 91 | fn deinit(self: *Self, allocator: Allocator) void { 92 | allocator.free(self.activeDVPairs); 93 | for (self.hypotheses) |*hyp| { 94 | hyp.deinit(allocator); 95 | } 96 | allocator.free(self.hypotheses); 97 | allocator.free(self.conclusion); 98 | } 99 | }; 100 | 101 | const ProofState = std.SinglyLinkedList(Expression); 102 | 103 | /// TODO: Find a better name; {Token,Label,Symbol}Interpretation? 104 | const MeaningType = enum { Constant, Variable, Rule }; 105 | const Meaning = union(MeaningType) { 106 | const Self = @This(); 107 | 108 | Constant: void, 109 | Variable: struct { usedInFStatement: bool }, 110 | Rule: InferenceRule, 111 | 112 | fn deinit(self: *Self, allocator: Allocator) void { 113 | switch (self.*) { 114 | .Constant, .Variable => {}, 115 | .Rule => self.*.Rule.deinit(allocator), 116 | } 117 | } 118 | }; 119 | 120 | const RuleIteratorItem = struct { 121 | const Self = @This(); 122 | 123 | label: Token, 124 | rule: InferenceRule, 125 | proof: ?TokenList = null, 126 | 127 | pub fn deinit(self: *Self) void { 128 | if (self.proof) |*p| p.deinit(); 129 | } 130 | }; 131 | 132 | pub const RuleIterator = struct { 133 | const Self = @This(); 134 | 135 | allocator: Allocator, 136 | 137 | statements: ?parse.StatementIterator, 138 | 139 | /// what each active token means 140 | meanings: TokenMap(Meaning), 141 | /// the active hypotheses, and whether they are $f or $e 142 | activeHypotheses: FELabelList, 143 | /// the active distinct variable pairs 144 | activeDVPairs: DVPairList, 145 | /// the difference between the current nested scope 146 | /// and its immediately surrounding scope 147 | currentScopeDiff: ?*ScopeDiff, 148 | 149 | pub fn init(allocator: Allocator) !Self { 150 | return Self{ 151 | .allocator = allocator, 152 | .statements = null, 153 | .meanings = TokenMap(Meaning).init(allocator), 154 | .activeHypotheses = FELabelList.init(allocator), 155 | .activeDVPairs = DVPairList.init(allocator), 156 | .currentScopeDiff = null, 157 | }; 158 | } 159 | 160 | pub fn deinit(self: *Self) void { 161 | while (self.currentScopeDiff) |scopeDiff| { 162 | scopeDiff.pop(); 163 | } 164 | self.activeDVPairs.deinit(); 165 | self.activeHypotheses.deinit(); 166 | var it = self.meanings.iterator(); 167 | while (it.next()) |kv| { 168 | kv.value_ptr.deinit(self.allocator); 169 | } 170 | self.meanings.deinit(); 171 | } 172 | 173 | pub fn addStatementsFrom(self: *Self, dir: std.fs.Dir, buffer: []const u8) !void { 174 | assert(self.statements == null); 175 | self.statements = StatementIterator.init(self.allocator, dir, buffer); 176 | } 177 | 178 | pub fn next(self: *Self) !?RuleIteratorItem { 179 | if (self.statements == null) return null; 180 | var nextItem: ?RuleIteratorItem = null; 181 | while (nextItem == null) { 182 | if (try self.statements.?.next()) |statement| { 183 | nextItem = try self.add(statement); 184 | } else { 185 | self.statements = null; 186 | break; 187 | } 188 | } 189 | return nextItem; 190 | } 191 | 192 | /// Consumes statement. 193 | fn add(self: *Self, statement: *Statement) !?RuleIteratorItem { 194 | var deinit_statement = true; 195 | defer if (deinit_statement) statement.deinit(self.allocator); 196 | 197 | var nextItem: ?RuleIteratorItem = null; 198 | switch (statement.*) { 199 | .C => |cStatement| { 200 | if (self.currentScopeDiff) |_| return Error.UnexpectedToken; // $c inside ${ $} 201 | for (cStatement.constants.items) |constant| { 202 | if (self.meanings.get(constant)) |_| return Error.Duplicate; 203 | _ = try self.meanings.put(constant, MeaningType.Constant); 204 | } 205 | }, 206 | .V => |vStatement| { 207 | for (vStatement.variables.items) |variable| { 208 | if (self.meanings.get(variable)) |_| return Error.Duplicate; 209 | _ = try self.meanings.put(variable, Meaning{ .Variable = .{ .usedInFStatement = false } }); 210 | if (self.currentScopeDiff) |scopeDiff| { 211 | _ = try scopeDiff.activeTokens.add(variable); // this $v will become inactive at the next $} 212 | } 213 | } 214 | }, 215 | .F => |fStatement| { 216 | if (self.meanings.get(fStatement.label)) |_| return Error.Duplicate; 217 | try self.meanings.put(fStatement.label, Meaning{ .Rule = try self.fromHypothesis(fStatement.tokens) }); 218 | const variable = fStatement.tokens.items[1]; 219 | if (self.meanings.get(variable)) |meaning| { 220 | if (meaning != .Variable) return Error.UnexpectedToken; // $f k l $. where l is something else than variable, 221 | if (meaning.Variable.usedInFStatement) return Error.Duplicate; 222 | } else unreachable; // $f k x $. without $v x $. is already detected in fromHypothesis() call above 223 | try self.meanings.put(variable, .{ .Variable = .{ .usedInFStatement = true } }); 224 | _ = try self.activeHypotheses.append(.{ .label = fStatement.label, .fe = .F }); 225 | if (self.currentScopeDiff) |scopeDiff| { 226 | scopeDiff.nrActiveHypotheses += 1; 227 | _ = try scopeDiff.activeTokens.add(fStatement.label); // this $f will become inactive at the next $} 228 | assert(!scopeDiff.variablesInFStatements.contains(variable)); 229 | _ = try scopeDiff.variablesInFStatements.add(variable); 230 | } 231 | }, 232 | .E => |eStatement| { 233 | if (self.meanings.get(eStatement.label)) |_| return Error.Duplicate; 234 | try self.meanings.put(eStatement.label, Meaning{ .Rule = try self.fromHypothesis(eStatement.tokens) }); 235 | _ = try self.activeHypotheses.append(.{ .label = eStatement.label, .fe = .E }); 236 | if (self.currentScopeDiff) |scopeDiff| { 237 | scopeDiff.nrActiveHypotheses += 1; 238 | _ = try scopeDiff.activeTokens.add(eStatement.label); // this $e will become inactive at the next $} 239 | } 240 | }, 241 | .A => |aStatement| { 242 | if (self.meanings.get(aStatement.label)) |_| return Error.Duplicate; 243 | const rule = try self.inferenceRuleOf(aStatement.tokens); 244 | try self.meanings.put(aStatement.label, Meaning{ .Rule = rule }); 245 | nextItem = .{ .label = aStatement.label, .rule = rule }; 246 | }, 247 | .P => |pStatement| { 248 | if (self.meanings.get(pStatement.label)) |_| return Error.Duplicate; 249 | const rule = try self.inferenceRuleOf(pStatement.tokens); 250 | _ = try self.meanings.put(pStatement.label, Meaning{ .Rule = rule }); 251 | const label = pStatement.label; 252 | const proof = statement.deinitLeavingProof(self.allocator); 253 | deinit_statement = false; 254 | nextItem = .{ .label = label, .rule = rule, .proof = proof }; 255 | }, 256 | .D => |dStatement| { 257 | for (dStatement.variables.items) |var1, i| { 258 | if (self.meanings.get(var1)) |meaning| { 259 | if (meaning != .Variable) return Error.UnexpectedToken; // TODO: test 260 | } else return Error.UnexpectedToken; //TODO: test 261 | // ... 262 | for (dStatement.variables.items[i + 1 ..]) |var2| { 263 | // TODO: error if var1 == var2 264 | _ = try self.activeDVPairs.append(.{ .var1 = var1, .var2 = var2 }); 265 | if (self.currentScopeDiff) |scopeDiff| { 266 | scopeDiff.nrActiveDVPairs += 1; 267 | } 268 | } 269 | // ... 270 | } 271 | }, 272 | .BlockOpen => { 273 | try ScopeDiff.push(self); 274 | }, 275 | .BlockClose => { 276 | if (self.currentScopeDiff) |scopeDiff| { 277 | scopeDiff.pop(); 278 | } else return Error.UnexpectedToken; 279 | }, 280 | } 281 | return nextItem; 282 | } 283 | 284 | /// caller does not get ownership 285 | pub fn getRuleMeaningOf(self: *Self, token: Token) !InferenceRule { 286 | switch (self.meanings.get(token) orelse return Error.UnexpectedToken) { //TODO: test 287 | .Rule => |rule| return rule, 288 | else => return Error.UnexpectedToken, // TODO: test 289 | } 290 | } 291 | 292 | /// caller gets ownership of result, needs to hand back to us to be freed by our allocator 293 | fn inferenceRuleOf(self: *Self, tokens: TokenList) !InferenceRule { 294 | const conclusion = try self.expressionOf(tokens); 295 | var it = try self.mandatoryHypothesesOf(conclusion); 296 | defer it.deinit(); 297 | 298 | // TODO: copy in a simpler way? 299 | var dvPairs = try self.allocator.alloc(DVPair, self.activeDVPairs.items.len); 300 | for (self.activeDVPairs.items) |dvPair, j| { 301 | dvPairs[j] = dvPair; 302 | } 303 | 304 | var hypotheses = try self.allocator.alloc(Hypothesis, it.count()); 305 | var i: usize = 0; 306 | while (it.next()) |feLabel| : (i += 1) { 307 | const hypExpression = self.meanings.get(feLabel.label).?.Rule.conclusion; 308 | hypotheses[i] = .{ 309 | .expression = try copyExpression(self.allocator, hypExpression), 310 | .isF = (feLabel.fe == .F), 311 | }; 312 | } 313 | 314 | return InferenceRule{ 315 | .activeDVPairs = dvPairs, 316 | .hypotheses = hypotheses, 317 | .conclusion = conclusion, 318 | }; 319 | } 320 | 321 | /// caller keeps owning the passed expression 322 | fn mandatoryHypothesesOf(self: *Self, expression: Expression) !MHIterator { 323 | return try MHIterator.init(self, self.allocator, expression); 324 | } 325 | 326 | /// caller gets ownership of result, needs to hand back to us to be freed by our allocator 327 | fn fromHypothesis(self: *Self, tokens: TokenList) !InferenceRule { 328 | const expression = try self.expressionOf(tokens); 329 | return InferenceRule{ 330 | .activeDVPairs = try self.allocator.alloc(DVPair, 0), // TODO: It seems we just can use &[_]DVPair{} ?? 331 | .hypotheses = try self.allocator.alloc(Hypothesis, 0), // TODO: It seems we just can use &[_]Hypothesis{} ?? 332 | .conclusion = expression, 333 | }; 334 | } 335 | 336 | /// caller gets ownership of result, needs to hand back to us to be freed by our allocator 337 | fn expressionOf(self: *Self, tokens: TokenList) !Expression { 338 | var result = try self.allocator.alloc(CVToken, tokens.items.len); 339 | errdefer self.allocator.free(result); 340 | for (tokens.items) |token, i| { 341 | const cv = self.meanings.get(token) orelse return Error.UnexpectedToken; 342 | result[i] = .{ 343 | .token = token, 344 | .cv = switch (cv) { 345 | .Constant => .C, 346 | .Variable => .V, 347 | else => return Error.UnexpectedToken, 348 | }, 349 | }; 350 | } 351 | return result; 352 | } 353 | }; 354 | 355 | /// A ScopeDiff represents how a nested scope differs from its immediately surrounding scope: 356 | /// which tokens and labels will become inactive again at its ` $} ` statement. 357 | const ScopeDiff = struct { 358 | const Self = @This(); 359 | 360 | iter: *RuleIterator, 361 | optOuter: ?*ScopeDiff, 362 | activeTokens: TokenSet, 363 | variablesInFStatements: TokenSet, 364 | nrActiveHypotheses: usize, 365 | nrActiveDVPairs: usize, 366 | 367 | fn push(iter: *RuleIterator) !void { 368 | const newScopeDiff = try iter.allocator.create(ScopeDiff); 369 | errdefer iter.allocator.destroy(newScopeDiff); 370 | newScopeDiff.* = Self{ 371 | .iter = iter, 372 | .optOuter = iter.currentScopeDiff, 373 | .activeTokens = TokenSet.init(iter.allocator), 374 | .variablesInFStatements = TokenSet.init(iter.allocator), 375 | .nrActiveHypotheses = 0, 376 | .nrActiveDVPairs = 0, 377 | }; 378 | 379 | iter.currentScopeDiff = newScopeDiff; 380 | } 381 | 382 | fn pop(self: *Self) void { 383 | self.iter.currentScopeDiff = self.optOuter; 384 | self.deinitNrActiveDVPairs(); 385 | self.deinitNrActiveHypotheses(); 386 | self.deinitVariableInFStatements(); 387 | self.deinitActiveTokens(); 388 | self.iter.allocator.destroy(self); 389 | } 390 | 391 | fn deinitNrActiveHypotheses(self: *Self) void { 392 | while (self.nrActiveHypotheses > 0) : (self.nrActiveHypotheses -= 1) { 393 | _ = self.iter.activeHypotheses.pop(); 394 | } 395 | } 396 | 397 | fn deinitNrActiveDVPairs(self: *Self) void { 398 | while (self.nrActiveDVPairs > 0) : (self.nrActiveDVPairs -= 1) { 399 | _ = self.iter.activeDVPairs.pop(); 400 | } 401 | } 402 | 403 | fn deinitVariableInFStatements(self: *Self) void { 404 | var it = self.variablesInFStatements.iterator(); 405 | while (it.next()) |kv| { 406 | const variable = kv.key_ptr.*; 407 | if (self.iter.meanings.get(variable)) |meaning| { 408 | assert(meaning == .Variable); 409 | assert(meaning.Variable.usedInFStatement == true); 410 | } else unreachable; 411 | _ = self.iter.meanings.put(variable, .{ .Variable = .{ .usedInFStatement = false } }) catch unreachable; // in-place update can't fail? 412 | } 413 | self.variablesInFStatements.deinit(); 414 | } 415 | 416 | fn deinitActiveTokens(self: *Self) void { 417 | var it = self.activeTokens.iterator(); 418 | while (it.next()) |kv| { 419 | if (self.iter.meanings.fetchRemove(kv.key_ptr.*)) |*kv2| { 420 | kv2.value.deinit(self.iter.allocator); 421 | } else unreachable; 422 | } 423 | self.activeTokens.deinit(); 424 | } 425 | }; 426 | 427 | // ---------------------------------------- 428 | // ITERATOR OVER MANDATORY HYPOTHESES 429 | 430 | const MHIterator = struct { 431 | const Self = @This(); 432 | 433 | allocator: Allocator, 434 | mandatoryVariables: TokenSet, 435 | mhs: SinglyLinkedList(FELabel), 436 | len: usize, 437 | 438 | /// expression remains owned by the caller 439 | fn init(iter: *RuleIterator, allocator: Allocator, expression: Expression) !MHIterator { 440 | // initially mandatory variables: those from the given expression 441 | var mandatoryVariables = TokenSet.init(allocator); 442 | for (expression) |cvToken| if (cvToken.cv == .V) { 443 | _ = try mandatoryVariables.add(cvToken.token); 444 | }; 445 | 446 | var mhs = SinglyLinkedList(FELabel){}; 447 | var len: usize = 0; 448 | // loop over iter.activeHypotheses, in reverse order 449 | var i: usize = iter.activeHypotheses.items.len; 450 | while (i > 0) { 451 | i -= 1; 452 | const activeHypothesis = iter.activeHypotheses.items[i]; 453 | 454 | switch (activeHypothesis.fe) { 455 | .F => { 456 | const fRule = iter.meanings.get(activeHypothesis.label).?.Rule; 457 | assert(fRule.conclusion.len == 2); 458 | assert(fRule.conclusion[1].cv == .V); 459 | const fVariable = fRule.conclusion[1].token; 460 | if (mandatoryVariables.contains(fVariable)) { 461 | // include every $f for every mandatory variable 462 | var node = try allocator.create(@TypeOf(mhs).Node); 463 | node.* = @TypeOf(mhs).Node{ .data = .{ .label = activeHypothesis.label, .fe = .F } }; 464 | mhs.prepend(node); 465 | len += 1; 466 | } 467 | }, 468 | .E => { 469 | // include every $e 470 | var node = try allocator.create(@TypeOf(mhs).Node); 471 | node.* = @TypeOf(mhs).Node{ .data = .{ .label = activeHypothesis.label, .fe = .E } }; 472 | mhs.prepend(node); 473 | len += 1; 474 | // the variables of the $e hypothesis are also mandatory 475 | const eRule = iter.meanings.get(activeHypothesis.label).?.Rule; 476 | const eExpression = eRule.conclusion; 477 | for (eExpression) |cvToken| { 478 | if (iter.meanings.get(cvToken.token)) |meaning| switch (meaning) { 479 | .Variable => _ = try mandatoryVariables.add(cvToken.token), 480 | else => {}, 481 | }; 482 | } 483 | for (eExpression) |cvToken| if (cvToken.cv == .V) { 484 | _ = try mandatoryVariables.add(cvToken.token); 485 | }; 486 | }, 487 | } 488 | } 489 | 490 | return MHIterator{ .allocator = allocator, .mandatoryVariables = mandatoryVariables, .mhs = mhs, .len = len }; 491 | } 492 | 493 | fn deinit(self: *Self) void { 494 | // loop over all nodes so that all get freed 495 | while (self.next()) |_| {} 496 | self.mandatoryVariables.deinit(); 497 | } 498 | 499 | fn count(self: *Self) usize { 500 | return self.len; 501 | } 502 | 503 | fn next(self: *Self) ?FELabel { 504 | if (self.mhs.popFirst()) |node| { 505 | defer self.allocator.destroy(node); 506 | self.len -= 1; 507 | return node.data; 508 | } else return null; 509 | } 510 | }; 511 | 512 | const expect = std.testing.expect; 513 | const expectError = std.testing.expectError; 514 | const eqs = tokenize.eqs; 515 | 516 | fn runRuleIterator(buffer: []const u8, allocator: Allocator) !void { 517 | errdefer |err| std.log.info("\nError {} happened...\n", .{err}); 518 | var iter = try RuleIterator.init(allocator); 519 | defer iter.deinit(); 520 | try runRuleIteratorPart(&iter, buffer); 521 | if (iter.currentScopeDiff) |_| return Error.Incomplete; // unclosed $} 522 | } 523 | 524 | fn runRuleIteratorPart(iter: *RuleIterator, buffer: []const u8) !void { 525 | const unused_root_dir = undefined; // no file access done in this test 526 | try iter.addStatementsFrom(unused_root_dir, buffer); 527 | while (try iter.next()) |*_| {} 528 | } 529 | 530 | test "active DVR (found a memory leak)" { 531 | try runRuleIterator( 532 | \\$c wff $. 533 | \\$v P Q $. 534 | \\wp $f wff P $. 535 | \\wq $f wff Q $. 536 | \\$d P Q $. pq.1 $e wff P $. pq $a wff Q $. 537 | , std.testing.allocator); 538 | } 539 | 540 | test "count number of active $d pairs" { 541 | var iter = try RuleIterator.init(std.testing.allocator); 542 | defer iter.deinit(); 543 | var n: usize = 0; 544 | try runRuleIteratorPart(&iter, "$v a b c d e $."); 545 | 546 | try runRuleIteratorPart(&iter, "$d $."); 547 | try expect(iter.activeDVPairs.items.len == n + 0); 548 | n = iter.activeDVPairs.items.len; 549 | 550 | try runRuleIteratorPart(&iter, "$d a $."); 551 | try expect(iter.activeDVPairs.items.len == n + 0); 552 | n = iter.activeDVPairs.items.len; 553 | 554 | try runRuleIteratorPart(&iter, "$d a a $."); 555 | try expect(iter.activeDVPairs.items.len == n + 1); 556 | n = iter.activeDVPairs.items.len; 557 | 558 | try runRuleIteratorPart(&iter, "$d a b $."); 559 | try expect(iter.activeDVPairs.items.len == n + 1); 560 | n = iter.activeDVPairs.items.len; 561 | 562 | try runRuleIteratorPart(&iter, "$d a b c d e $."); 563 | try expect(iter.activeDVPairs.items.len == n + 10); 564 | n = iter.activeDVPairs.items.len; 565 | } 566 | 567 | test "$d with constant" { 568 | try expectError(Error.UnexpectedToken, runRuleIterator("$c class $. $d class $.", std.testing.allocator)); 569 | } 570 | 571 | test "$d with undeclared token" { 572 | try expectError(Error.UnexpectedToken, runRuleIterator("$d class $.", std.testing.allocator)); 573 | } 574 | 575 | test "use undeclared variable" { 576 | try expectError(Error.UnexpectedToken, runRuleIterator("$c wff $. wph $f wff ph $.", std.testing.allocator)); 577 | } 578 | 579 | test "use undeclared variable" { 580 | try expectError(Error.UnexpectedToken, runRuleIterator("$c wff $. wph $f wff ph $.", std.testing.allocator)); 581 | } 582 | 583 | test "use statement label as a token" { 584 | try expectError(Error.UnexpectedToken, runRuleIterator("$c wff ph $. wph $f wff ph $. wxx $e wph $.", std.testing.allocator)); 585 | } 586 | 587 | test "simplest correct $f" { 588 | try runRuleIterator("$c wff $. $v ph $. wph $f wff ph $.", std.testing.allocator); 589 | } 590 | 591 | test "tokenlist to expression" { 592 | var iter = try RuleIterator.init(std.testing.allocator); 593 | defer iter.deinit(); 594 | try iter.meanings.put("wff", MeaningType.Constant); 595 | try iter.meanings.put("ph", Meaning{ .Variable = .{ .usedInFStatement = false } }); 596 | var tokens = TokenList.init(std.testing.allocator); 597 | defer tokens.deinit(); 598 | try tokens.append("wff"); 599 | try tokens.append("ph"); 600 | try expect(eqs(tokens, &[_]Token{ "wff", "ph" })); 601 | 602 | const expression = try iter.expressionOf(tokens); 603 | defer std.testing.allocator.free(expression); 604 | 605 | try expect(expression.len == 2); 606 | try expect(eq(expression[0].token, "wff")); 607 | try expect(expression[0].cv == .C); 608 | try expect(eq(expression[1].token, "ph")); 609 | try expect(expression[1].cv == .V); 610 | } 611 | 612 | test "no duplicate variable declarations" { 613 | try expectError(Error.Duplicate, runRuleIterator("$c ca cb $. $v v $. cav $f ca v $. cbv $f cb v $.", std.testing.allocator)); 614 | } 615 | 616 | test "no duplicate variable declarations, in nested scope (2)" { 617 | try expectError(Error.Duplicate, runRuleIterator("$c ca cb $. ${ $v v $. cav $f ca v $. cbv $f cb v $. $}", std.testing.allocator)); 618 | } 619 | 620 | test "duplicate variable declarations, in nested scope (2)" { 621 | try runRuleIterator("$c ca cb $. $v v $. ${ cav $f ca v $. $} cbv $f cb v $.", std.testing.allocator); 622 | } 623 | 624 | test "$v in nested scope" { 625 | try runRuleIterator("$c ca $. ${ $v v $. $}", std.testing.allocator); 626 | } 627 | 628 | test "$v in nested scope, used in $a (use-after-free reproduction)" { 629 | var iter = try RuleIterator.init(std.testing.allocator); 630 | defer iter.deinit(); 631 | 632 | try runRuleIteratorPart(&iter, "$c class setvar $. ${ $v x $. vx.cv $f setvar x $. cv $a class x $."); 633 | const cv: InferenceRule = iter.meanings.get("cv").?.Rule; 634 | const vx_cv: Hypothesis = cv.hypotheses[0]; 635 | const x: Token = vx_cv.expression[1].token; 636 | try expect(eq(x, "x")); 637 | 638 | try runRuleIteratorPart(&iter, "$}"); 639 | _ = iter.meanings.get("cv").?.Rule; 640 | _ = cv.hypotheses[0]; 641 | const x2: Token = vx_cv.expression[1].token; 642 | try expect(eq(x2, "x")); 643 | } 644 | 645 | test "$f in nested scope (1)" { 646 | try runRuleIterator("$c ca $. ${ $v v $. cav $f ca v $. $}", std.testing.allocator); 647 | } 648 | 649 | test "$f in nested scope (2)" { 650 | try runRuleIterator("$c ca $. $v v $. ${ cav $f ca v $. $}", std.testing.allocator); 651 | } 652 | 653 | test "$f using two constants" { 654 | try expectError(Error.UnexpectedToken, runRuleIterator("$c wff $. wwff $f wff wff $.", std.testing.allocator)); 655 | } 656 | 657 | test "$f using undeclared variable" { 658 | try expectError(Error.UnexpectedToken, runRuleIterator("$c wff $. wps $f wff ps $.", std.testing.allocator)); 659 | } 660 | 661 | test "token is either constant or variable, not both" { 662 | try expectError(Error.Duplicate, runRuleIterator("$c wff $. $v wff $.", std.testing.allocator)); 663 | } 664 | 665 | test "no constant allowed in nested scope" { 666 | try expectError(Error.UnexpectedToken, runRuleIterator("${ $c wff $. $}", std.testing.allocator)); 667 | } 668 | 669 | test "nested variable" { 670 | try runRuleIterator("$v ph $. ${ $v ps $. $} $v ps $.", std.testing.allocator); 671 | } 672 | 673 | test "nested duplicate variable" { 674 | try expectError(Error.Duplicate, runRuleIterator("$v ph $. ${ $v ph $. $}", std.testing.allocator)); 675 | } 676 | 677 | test "unopened block" { 678 | try expectError(Error.UnexpectedToken, runRuleIterator("$}", std.testing.allocator)); 679 | } 680 | 681 | test "unclosed block" { 682 | try expectError(Error.Incomplete, runRuleIterator("${", std.testing.allocator)); 683 | } 684 | 685 | test "multiple blocks" { 686 | try runRuleIterator("${ $} ${ ${ $} $}", std.testing.allocator); 687 | } 688 | 689 | test "duplicate variable" { 690 | try expectError(Error.Duplicate, runRuleIterator("$v ph ps ph $.", std.testing.allocator)); 691 | } 692 | 693 | test "single variable" { 694 | try runRuleIterator("$v ph $.", std.testing.allocator); 695 | } 696 | 697 | test "duplicate constant" { 698 | try expectError(Error.Duplicate, runRuleIterator("$c wff wff $.", std.testing.allocator)); 699 | } 700 | 701 | fn tokenListOf(buffer: []const u8) !TokenList { 702 | var it = @import("parse.zig")._StatementIterator_init(buffer); 703 | var result = TokenList.init(std.testing.allocator); 704 | while (true) { 705 | if (try it.nextToken()) |token| { 706 | _ = try result.append(token); 707 | } else break; 708 | } 709 | return result; 710 | } 711 | 712 | fn expressionOf(iter: *RuleIterator, buffer: []const u8) !Expression { 713 | var t = try tokenListOf(buffer); 714 | defer t.deinit(); 715 | return try iter.expressionOf(t); 716 | } 717 | 718 | test "iterate over no mandatory hypotheses" { 719 | var iter = try RuleIterator.init(std.testing.allocator); 720 | defer iter.deinit(); 721 | try runRuleIteratorPart(&iter, "$c T $."); 722 | 723 | var expression = try expressionOf(&iter, "T"); 724 | defer std.testing.allocator.free(expression); 725 | var it = try iter.mandatoryHypothesesOf(expression); 726 | defer it.deinit(); 727 | assert(it.count() == 0); 728 | try expect(it.next() == null); 729 | } 730 | 731 | test "iterate over single $f hypothesis" { 732 | var iter = try RuleIterator.init(std.testing.allocator); 733 | defer iter.deinit(); 734 | try runRuleIteratorPart(&iter, "$c wff |- $. $v ph ps $. wph $f wff ph $. wps $f wff ps $."); 735 | 736 | var expression = try expressionOf(&iter, "|- ph"); 737 | defer std.testing.allocator.free(expression); 738 | var it = try iter.mandatoryHypothesesOf(expression); 739 | defer it.deinit(); 740 | var item: ?FELabel = null; 741 | assert(it.count() == 1); 742 | 743 | item = it.next(); 744 | try expect(eq(item.?.label, "wph")); 745 | try expect(item.?.fe == .F); 746 | 747 | try expect(it.next() == null); 748 | } 749 | 750 | test "iterate with $e hypothesis" { 751 | var iter = try RuleIterator.init(std.testing.allocator); 752 | defer iter.deinit(); 753 | try runRuleIteratorPart(&iter, "$c wff |- $. $v ph ps ta $. wta $f wff ta $. wph $f wff ph $. hyp $e wff ta $."); 754 | 755 | var expression = try expressionOf(&iter, "|- ph"); 756 | defer std.testing.allocator.free(expression); 757 | var it = try iter.mandatoryHypothesesOf(expression); 758 | defer it.deinit(); 759 | var item: ?FELabel = null; 760 | assert(it.count() == 3); 761 | 762 | item = it.next(); 763 | try expect(eq(item.?.label, "wta")); 764 | try expect(item.?.fe == .F); 765 | 766 | item = it.next(); 767 | try expect(eq(item.?.label, "wph")); 768 | try expect(item.?.fe == .F); 769 | assert(it.count() == 1); 770 | 771 | item = it.next(); 772 | try expect(eq(item.?.label, "hyp")); 773 | try expect(item.?.fe == .E); 774 | 775 | try expect(it.next() == null); 776 | } 777 | 778 | test "inference rule with $f and $e mandatory hypotheses" { 779 | var iter = try RuleIterator.init(std.testing.allocator); 780 | defer iter.deinit(); 781 | try runRuleIteratorPart(&iter, "$c wff |- $. $v ph ps ta $. wta $f wff ta $. wph $f wff ph $. hyp $e wff ta $."); 782 | 783 | try runRuleIteratorPart(&iter, "alltrue $a |- ph $."); 784 | 785 | const alltrueRule = iter.meanings.get("alltrue").?.Rule; 786 | try expect(alltrueRule.hypotheses.len == 3); 787 | try expect(eq(alltrueRule.hypotheses[1].expression[1].token, "ph")); 788 | try expect(eq(alltrueRule.conclusion[0].token, "|-")); 789 | } 790 | -------------------------------------------------------------------------------- /src/errors.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | pub const Error = error{ 6 | // syntax errors 7 | IllegalCharacter, 8 | Incomplete, 9 | UnexpectedToken, 10 | IllegalToken, 11 | MissingLabel, 12 | UnexpectedLabel, 13 | IncorrectFileName, 14 | // semantical errors 15 | Duplicate, 16 | // proof errors 17 | HypothesisMismatch, 18 | ResultMismatch, 19 | DVRMissing, 20 | // compressed proof errors 21 | NumberIncomplete, 22 | NumberZEarly, 23 | NumberTooLarge, 24 | }; 25 | -------------------------------------------------------------------------------- /src/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | const verify = @import("verify.zig"); 6 | 7 | pub fn main() !void { 8 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 9 | defer arena.deinit(); 10 | const allocator = arena.child_allocator; 11 | 12 | const fileName = fileName: { 13 | var argIter = try std.process.argsWithAllocator(allocator); 14 | _ = argIter.next().?; // skip command name, is always present 15 | break :fileName (argIter.next() orelse return error.SingleCommandLineArgumentExpected); 16 | }; 17 | 18 | _ = verify.verifyFile(allocator, std.fs.cwd(), fileName) catch |err| { 19 | // ...some nice error reporting 20 | return err; 21 | }; 22 | } 23 | -------------------------------------------------------------------------------- /src/parse.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | const errors = @import("errors.zig"); 6 | const Error = errors.Error; 7 | 8 | const read = @import("read.zig"); 9 | const readBuffer = read.readBuffer; 10 | 11 | const tokenize = @import("tokenize.zig"); 12 | const Token = tokenize.Token; 13 | const eq = tokenize.eq; 14 | const eqs = tokenize.eqs; 15 | const TokenList = tokenize.TokenList; 16 | const TokenIterator = tokenize.TokenIterator; 17 | 18 | pub const StatementType = enum { C, V, F, E, D, A, P, BlockOpen, BlockClose }; 19 | 20 | pub const Statement = union(StatementType) { 21 | C: struct { constants: TokenList }, 22 | V: struct { variables: TokenList }, 23 | F: struct { label: Token, tokens: TokenList }, // runtime checked: exactly 2 tokens 24 | E: struct { label: Token, tokens: TokenList }, 25 | D: struct { variables: TokenList }, 26 | A: struct { label: Token, tokens: TokenList }, 27 | P: struct { label: Token, tokens: TokenList, proof: TokenList }, 28 | BlockOpen, 29 | BlockClose: struct {}, 30 | 31 | pub fn deinit(self: *Statement, allocator: Allocator) void { 32 | switch (self.*) { 33 | .C => self.*.C.constants.deinit(), 34 | .V => self.*.V.variables.deinit(), 35 | .F => self.*.F.tokens.deinit(), 36 | .E => self.*.E.tokens.deinit(), 37 | .D => self.*.D.variables.deinit(), 38 | .A => self.*.A.tokens.deinit(), 39 | .P => { 40 | self.*.P.tokens.deinit(); 41 | self.*.P.proof.deinit(); 42 | }, 43 | .BlockOpen, .BlockClose => {}, 44 | } 45 | allocator.destroy(self); 46 | } 47 | 48 | pub fn deinitLeavingProof(self: *Statement, allocator: Allocator) TokenList { 49 | defer allocator.destroy(self); 50 | self.*.P.tokens.deinit(); 51 | return self.*.P.proof; 52 | } 53 | }; 54 | 55 | pub const StatementIterator = struct { 56 | allocator: Allocator, 57 | dir: std.fs.Dir, 58 | tokens: TokenIterator, 59 | optStatement: ?*Statement = null, 60 | nestedIterator: ?*StatementIterator = null, 61 | 62 | pub fn init(allocator: Allocator, dir: std.fs.Dir, buffer: []const u8) StatementIterator { 63 | return StatementIterator{ .allocator = allocator, .dir = dir, .tokens = TokenIterator{ .buffer = buffer } }; 64 | } 65 | 66 | pub fn next(self: *StatementIterator) (Error || Allocator.Error || std.os.ReadError)!?*Statement { 67 | // return any statement detected in the previous call 68 | if (self.optStatement) |s| { 69 | self.optStatement = null; 70 | return s; 71 | } 72 | // get next token 73 | var token: Token = undefined; 74 | while (true) { 75 | // find token in nested iterator 76 | if (self.nestedIterator) |it| { 77 | const optStatement = try it.next(); 78 | if (optStatement) |stat| return stat; 79 | // end of nested iterator, clean up 80 | self.allocator.free(it.tokens.buffer); 81 | self.allocator.destroy(it); 82 | self.nestedIterator = null; 83 | } 84 | 85 | // find token in myself 86 | token = (try self.nextToken()) orelse { 87 | // we have seen the last Token, so we have seen the last Statement, end of iteration 88 | return null; 89 | }; 90 | if (!eq(token, "$[")) break; 91 | 92 | // create nested iterator for include file 93 | const f = try self.nextUntil("$]"); 94 | if (f.items.len != 1) return Error.IncorrectFileName; // TODO: test 95 | defer f.deinit(); 96 | const include_file_name = f.items[0]; 97 | 98 | const buffer = try readBuffer(self.allocator, self.dir, include_file_name); 99 | self.nestedIterator = try self.allocator.create(StatementIterator); 100 | self.nestedIterator.?.* = StatementIterator.init(self.allocator, self.dir, buffer); 101 | // ...and go on to immediately use this new nested iterator 102 | } 103 | // if the token is a label, read one more token 104 | var optLabel: ?Token = null; 105 | while (optLabel == null) { 106 | if (token[0] == '$') break; 107 | optLabel = token; 108 | token = (try self.nextToken()) orelse return Error.Incomplete; 109 | } 110 | if (token[0] != '$') return Error.UnexpectedToken; 111 | if (token.len != 2) return Error.IllegalToken; 112 | var result: *Statement = undefined; 113 | switch (token[1]) { // handle the $x command 114 | 'c' => { 115 | result = try self.statement(.{ 116 | .C = .{ .constants = try self.nextUntil("$.") }, 117 | }); 118 | }, 119 | 'v' => { 120 | result = try self.statement(.{ 121 | .V = .{ .variables = try self.nextUntil("$.") }, 122 | }); 123 | }, 124 | 'f' => { 125 | defer optLabel = null; 126 | const label = optLabel orelse return Error.MissingLabel; 127 | var tokens = try self.nextUntil("$."); 128 | errdefer tokens.deinit(); 129 | if (tokens.items.len < 2) return Error.Incomplete; 130 | if (tokens.items.len > 2) return Error.UnexpectedToken; 131 | result = try self.statement(.{ 132 | .F = .{ 133 | .label = label, 134 | .tokens = tokens, 135 | }, 136 | }); 137 | }, 138 | 'e' => { 139 | defer optLabel = null; 140 | result = try self.statement(.{ 141 | .E = .{ 142 | .label = optLabel orelse return Error.MissingLabel, 143 | .tokens = try self.nextUntil("$."), 144 | }, 145 | }); 146 | }, 147 | 'd' => { 148 | result = try self.statement(.{ 149 | .D = .{ .variables = try self.nextUntil("$.") }, 150 | }); 151 | }, 152 | 'a' => { 153 | defer optLabel = null; 154 | result = try self.statement(.{ 155 | .A = .{ 156 | .label = optLabel orelse return Error.MissingLabel, 157 | .tokens = try self.nextUntil("$."), 158 | }, 159 | }); 160 | }, 161 | 'p' => { 162 | defer optLabel = null; 163 | result = try self.statement(.{ 164 | .P = .{ 165 | .label = optLabel orelse return Error.MissingLabel, 166 | .tokens = try self.nextUntil("$="), 167 | .proof = try self.nextUntil("$."), 168 | }, 169 | }); 170 | }, 171 | '{' => result = try self.statement(.{ .BlockOpen = .{} }), 172 | '}' => result = try self.statement(.{ .BlockClose = .{} }), 173 | else => return Error.IllegalToken, 174 | } 175 | if (optLabel) |_| { 176 | self.optStatement = result; 177 | return Error.UnexpectedLabel; 178 | } 179 | return result; 180 | } 181 | 182 | pub fn nextToken(self: *StatementIterator) !?Token { 183 | while (true) { 184 | const result = try self.tokens.next(); 185 | if (result) |token| { 186 | if (eq(token, "$(")) { 187 | (try self.nextUntil("$)")).deinit(); 188 | continue; 189 | } 190 | } 191 | return result; 192 | } 193 | } 194 | 195 | fn nextUntil(self: *StatementIterator, terminator: Token) (Error || Allocator.Error)!TokenList { 196 | var result = TokenList.init(self.allocator); 197 | while (true) { 198 | const token = (try self.nextToken()) orelse return Error.Incomplete; 199 | if (eq(token, terminator)) break; 200 | _ = try result.append(token); 201 | } 202 | return result; 203 | } 204 | 205 | fn statement(self: *StatementIterator, s: Statement) !*Statement { 206 | const result = try self.allocator.create(Statement); 207 | result.* = s; 208 | return result; 209 | } 210 | }; 211 | 212 | const expect = std.testing.expect; 213 | const alltests = @import("alltests.zig"); 214 | const TestFS = alltests.TestFS; 215 | 216 | fn forNext(statements: *StatementIterator, f: anytype) !void { 217 | const s = try statements.next(); 218 | _ = try f.do(s); 219 | s.?.deinit(std.testing.allocator); 220 | } 221 | 222 | pub fn _StatementIterator_init(buffer: []const u8) StatementIterator { 223 | const unused_root_dir = undefined; // no file access done in this test 224 | return StatementIterator.init(std.testing.allocator, unused_root_dir, buffer); 225 | } 226 | 227 | test "parse constant declaration from include file" { 228 | var testFS = TestFS.init(); 229 | defer testFS.deinit(); 230 | try testFS.writeFile("constants.mm", "$c wff |- $."); 231 | 232 | var statements = StatementIterator.init(std.testing.allocator, testFS.tmpDir.dir, "$[ constants.mm $]"); 233 | _ = try forNext(&statements, struct { 234 | fn do(s: anytype) !void { 235 | try expect(eqs(s.?.C.constants, &[_]Token{ "wff", "|-" })); 236 | } 237 | }); 238 | try expect((try statements.next()) == null); 239 | try expect((try statements.next()) == null); 240 | } 241 | 242 | test "parse file only including empty include file" { 243 | var testFS = TestFS.init(); 244 | defer testFS.deinit(); 245 | try testFS.writeFile("empty.mm", ""); 246 | 247 | var statements = StatementIterator.init(std.testing.allocator, testFS.tmpDir.dir, "$[ empty.mm $]"); 248 | try expect((try statements.next()) == null); 249 | try expect((try statements.next()) == null); 250 | } 251 | 252 | test "include non-existing file" { 253 | var testFS = TestFS.init(); 254 | defer testFS.deinit(); 255 | 256 | var statements = StatementIterator.init(std.testing.allocator, testFS.tmpDir.dir, "$[ nonexisting.mm $]"); 257 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.IncorrectFileName); 258 | try expect((try statements.next()) == null); 259 | try expect((try statements.next()) == null); 260 | } 261 | 262 | test "include without file name" { 263 | var statements = _StatementIterator_init("$[ $]"); 264 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.IncorrectFileName); 265 | try expect((try statements.next()) == null); 266 | try expect((try statements.next()) == null); 267 | } 268 | 269 | test "$c with label" { 270 | var statements = _StatementIterator_init("c $c T $."); 271 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.UnexpectedLabel); 272 | _ = try forNext(&statements, struct { 273 | fn do(s: anytype) !void { 274 | try expect(s != null); 275 | } 276 | }); 277 | try expect((try statements.next()) == null); 278 | try expect((try statements.next()) == null); 279 | } 280 | 281 | test "$v with label" { 282 | var statements = _StatementIterator_init("v $v a $."); 283 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.UnexpectedLabel); 284 | _ = try forNext(&statements, struct { 285 | fn do(s: anytype) !void { 286 | try expect(s != null); 287 | } 288 | }); 289 | try expect((try statements.next()) == null); 290 | try expect((try statements.next()) == null); 291 | } 292 | 293 | test "$f without label" { 294 | var statements = _StatementIterator_init("$f"); 295 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.MissingLabel); 296 | try expect((try statements.next()) == null); 297 | try expect((try statements.next()) == null); 298 | } 299 | 300 | test "$e without label" { 301 | var statements = _StatementIterator_init("$e"); 302 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.MissingLabel); 303 | try expect((try statements.next()) == null); 304 | try expect((try statements.next()) == null); 305 | } 306 | 307 | test "$d with label" { 308 | var statements = _StatementIterator_init("dxy $d x y $."); 309 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.UnexpectedLabel); 310 | _ = try forNext(&statements, struct { 311 | fn do(s: anytype) !void { 312 | try expect(s != null); 313 | } 314 | }); 315 | try expect((try statements.next()) == null); 316 | try expect((try statements.next()) == null); 317 | } 318 | 319 | test "$a without label" { 320 | var statements = _StatementIterator_init("$a"); 321 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.MissingLabel); 322 | try expect((try statements.next()) == null); 323 | try expect((try statements.next()) == null); 324 | } 325 | 326 | test "$p without label" { 327 | var statements = _StatementIterator_init("$p"); 328 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.MissingLabel); 329 | try expect((try statements.next()) == null); 330 | try expect((try statements.next()) == null); 331 | } 332 | 333 | test "${ with label" { 334 | var statements = _StatementIterator_init("block ${"); 335 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.UnexpectedLabel); 336 | _ = try forNext(&statements, struct { 337 | fn do(s: anytype) !void { 338 | try expect(s != null); 339 | } 340 | }); 341 | try expect((try statements.next()) == null); 342 | try expect((try statements.next()) == null); 343 | } 344 | 345 | test "$} with label" { 346 | var statements = _StatementIterator_init("endblock $}"); 347 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.UnexpectedLabel); 348 | _ = try forNext(&statements, struct { 349 | fn do(s: anytype) !void { 350 | try expect(s != null); 351 | } 352 | }); 353 | try expect((try statements.next()) == null); 354 | try expect((try statements.next()) == null); 355 | } 356 | 357 | test "unknown statement type (token skipped)" { 358 | var statements = _StatementIterator_init("x $x"); 359 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.IllegalToken); 360 | try expect((try statements.next()) == null); 361 | try expect((try statements.next()) == null); 362 | } 363 | 364 | test "too short $f statement" { 365 | var statements = _StatementIterator_init("w $f wff $."); 366 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.Incomplete); 367 | try expect((try statements.next()) == null); 368 | try expect((try statements.next()) == null); 369 | } 370 | 371 | test "`$xy` token after label" { 372 | var statements = _StatementIterator_init("$xy"); 373 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.IllegalToken); 374 | try expect((try statements.next()) == null); 375 | try expect((try statements.next()) == null); 376 | } 377 | 378 | test "`$xy` token after label" { 379 | var statements = _StatementIterator_init("aLabel $xy"); 380 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.IllegalToken); 381 | try expect((try statements.next()) == null); 382 | try expect((try statements.next()) == null); 383 | } 384 | 385 | test "`$` token without label" { 386 | var statements = _StatementIterator_init("$"); 387 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.IllegalToken); 388 | try expect((try statements.next()) == null); 389 | try expect((try statements.next()) == null); 390 | } 391 | 392 | test "`$` token after label" { 393 | var statements = _StatementIterator_init("aLabel $"); 394 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.IllegalToken); 395 | try expect((try statements.next()) == null); 396 | try expect((try statements.next()) == null); 397 | } 398 | 399 | test "non-$ token after label" { 400 | var statements = _StatementIterator_init("aLabel nonCommand"); 401 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.UnexpectedToken); 402 | try expect((try statements.next()) == null); 403 | try expect((try statements.next()) == null); 404 | } 405 | 406 | test "parse $p declaration" { 407 | var statements = _StatementIterator_init("idwffph $p wff ph $= ? $."); 408 | _ = try forNext(&statements, struct { 409 | fn do(s: anytype) !void { 410 | try expect(eqs(s.?.P.tokens, &[_]Token{ "wff", "ph" })); 411 | try expect(eqs(s.?.P.proof, &[_]Token{"?"})); 412 | } 413 | }); 414 | try expect((try statements.next()) == null); 415 | try expect((try statements.next()) == null); 416 | } 417 | 418 | test "parse $f declaration" { 419 | var statements = _StatementIterator_init("wph $f wff ph $."); 420 | _ = try forNext(&statements, struct { 421 | fn do(s: anytype) !void { 422 | try expect(eq(s.?.F.label, "wph")); 423 | try expect(eqs(s.?.F.tokens, &[_]Token{ "wff", "ph" })); 424 | } 425 | }); 426 | try expect((try statements.next()) == null); 427 | try expect((try statements.next()) == null); 428 | } 429 | 430 | test "check error for label on $d" { 431 | var statements = _StatementIterator_init("xfreeinA $d A x $."); 432 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.UnexpectedLabel); 433 | _ = try forNext(&statements, struct { 434 | fn do(s: anytype) !void { 435 | try expect(eqs(s.?.D.variables, &[_]Token{ "A", "x" })); 436 | } 437 | }); 438 | try expect((try statements.next()) == null); 439 | try expect((try statements.next()) == null); 440 | } 441 | 442 | test "check error for unknown command" { 443 | var statements = _StatementIterator_init("$Q"); 444 | if (statements.next()) |_| unreachable else |err| try expect(err == Error.IllegalToken); 445 | try expect((try statements.next()) == null); 446 | try expect((try statements.next()) == null); 447 | } 448 | 449 | test "parse constant declaration" { 450 | var statements = _StatementIterator_init("$c wff |- $."); 451 | _ = try forNext(&statements, struct { 452 | fn do(s: anytype) !void { 453 | try expect(eqs(s.?.C.constants, &[_]Token{ "wff", "|-" })); 454 | } 455 | }); 456 | try expect((try statements.next()) == null); 457 | try expect((try statements.next()) == null); 458 | } 459 | 460 | test "parse comment, also including $[" { 461 | var statements = _StatementIterator_init("$( a $[ b.mm $] c $)"); 462 | try expect((try statements.next()) == null); 463 | try expect((try statements.next()) == null); 464 | } 465 | 466 | test "parse empty file" { 467 | var statements = _StatementIterator_init(""); 468 | try expect((try statements.next()) == null); 469 | try expect((try statements.next()) == null); 470 | } 471 | -------------------------------------------------------------------------------- /src/prove.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | const ArrayList = std.ArrayList; 6 | 7 | const errors = @import("errors.zig"); 8 | const Error = errors.Error; 9 | 10 | const tokenize = @import("tokenize.zig"); 11 | const Token = tokenize.Token; 12 | const TokenList = tokenize.TokenList; 13 | const TokenMap = tokenize.TokenMap; 14 | const eq = tokenize.eq; 15 | 16 | const compose = @import("compose.zig"); 17 | const Expression = compose.Expression; 18 | const eqExpr = compose.eqExpr; 19 | const copyExpression = compose.copyExpression; 20 | const DVPair = compose.DVPair; 21 | const Hypothesis = compose.Hypothesis; 22 | const InferenceRule = compose.InferenceRule; 23 | const RuleIterator = compose.RuleIterator; 24 | 25 | // TODO: move to new utils.zig? 26 | fn assertCoercible(comptime T: type, comptime U: type) void { 27 | var someT: T = undefined; 28 | assert(@TypeOf(@as(U, someT)) == U); 29 | } 30 | 31 | const Substitution = TokenMap(Expression); 32 | 33 | const ProofStack = struct { 34 | const Self = @This(); 35 | allocator: Allocator, 36 | expressions: std.ArrayList(Expression), 37 | /// we collect these and clean them up at the very end 38 | arena: std.heap.ArenaAllocator, 39 | dvPairs: std.ArrayList(DVPair), 40 | 41 | fn init(allocator: Allocator) Self { 42 | return ProofStack{ 43 | .allocator = allocator, 44 | .expressions = std.ArrayList(Expression).init(allocator), 45 | .arena = std.heap.ArenaAllocator.init(allocator), 46 | .dvPairs = std.ArrayList(DVPair).init(allocator), 47 | }; 48 | } 49 | fn deinit(self: *Self) void { 50 | self.expressions.deinit(); 51 | self.arena.deinit(); 52 | // no self.dvPairs.deinit(), because client keeps ownership 53 | } 54 | 55 | fn isEmpty(self: *Self) bool { 56 | return self.expressions.items.len == 0; 57 | } 58 | fn isSingle(self: *Self) bool { 59 | return self.expressions.items.len == 1; 60 | } 61 | fn top(self: *Self) Expression { 62 | return self.expressions.items[self.expressions.items.len - 1]; 63 | } 64 | 65 | fn pushExpression(self: *Self, expression: Expression) !void { 66 | try self.expressions.append(expression); 67 | } 68 | fn pushInferenceRule(self: *Self, rule: InferenceRule) !void { 69 | const nrHyp = rule.hypotheses.len; 70 | 71 | // pop hypotheses 72 | var hypotheses = try self.allocator.alloc(Expression, nrHyp); 73 | defer self.allocator.free(hypotheses); 74 | { 75 | var j: usize = nrHyp; 76 | while (j > 0) : (j -= 1) { 77 | hypotheses[j - 1] = self.expressions.popOrNull() orelse return Error.Incomplete; // TODO: test 78 | } 79 | } 80 | 81 | // build substitution based on $f 82 | var substitution = Substitution.init(self.allocator); 83 | defer substitution.deinit(); 84 | { 85 | var i: usize = 0; 86 | while (i < nrHyp) : (i += 1) { 87 | const hyp = rule.hypotheses[i]; 88 | if (hyp.isF) { 89 | std.debug.assert(hyp.expression.len == 2); 90 | if (hypotheses[i].len == 0) return Error.HypothesisMismatch; // TODO: test 91 | if (!eq(hyp.expression[0].token, hypotheses[i][0].token)) return Error.HypothesisMismatch; // TODO: test 92 | _ = try substitution.put(hyp.expression[1].token, hypotheses[i][1..]); 93 | } 94 | } 95 | } 96 | 97 | // check substitution for $e 98 | { 99 | var i: usize = 0; 100 | while (i < nrHyp) : (i += 1) { 101 | const hyp = rule.hypotheses[i]; 102 | if (!hyp.isF) { 103 | const substitutedHyp = try substitute(hyp.expression, substitution, self.allocator); 104 | defer self.allocator.free(substitutedHyp); 105 | if (!eqExpr(substitutedHyp, hypotheses[i])) return Error.HypothesisMismatch; 106 | } 107 | } 108 | } 109 | 110 | // add distinct variable restrictions imposed by this inference rule 111 | for (rule.activeDVPairs) |dvPair| { 112 | // get the 'distinct expressions', skipping any optional DVRs 113 | const expr1 = if (substitution.get(dvPair.var1)) |e| e else continue; 114 | const expr2 = if (substitution.get(dvPair.var2)) |e| e else continue; 115 | // create DVRs for every pair of variables in the two expressions 116 | for (expr1) |cvToken1| if (cvToken1.cv == .V) { 117 | for (expr2) |cvToken2| if (cvToken2.cv == .V) { 118 | // note: don't try to check for duplicates, that is probably not worth it 119 | try self.dvPairs.append(.{ .var1 = cvToken1.token, .var2 = cvToken2.token }); 120 | }; 121 | }; 122 | } 123 | 124 | const expression = try substitute(rule.conclusion, substitution, self.arena.allocator()); 125 | try self.pushExpression(expression); 126 | } 127 | }; 128 | 129 | pub const RunProofResult = struct { 130 | expression: Expression, 131 | dvPairs: std.ArrayList(DVPair), 132 | 133 | pub fn deinit(self: *@This(), allocator: Allocator) void { 134 | allocator.free(self.expression); 135 | self.dvPairs.deinit(); 136 | } 137 | }; 138 | 139 | /// caller becomes owner of allocated result 140 | pub fn runProof(proof: TokenList, hypotheses: []Hypothesis, ruleIterator: *RuleIterator, allocator: Allocator) !RunProofResult { 141 | const Modes = enum { Initial, Uncompressed, CompressedPart1, CompressedPart2 }; 142 | var mode = Modes.Initial; 143 | 144 | var proofStack = ProofStack.init(allocator); 145 | defer proofStack.deinit(); 146 | var compressedNumber: usize = 0; 147 | var compressedLabels = TokenList.init(allocator); 148 | defer compressedLabels.deinit(); 149 | var markedExpressions = std.ArrayList(Expression).init(allocator); 150 | defer markedExpressions.deinit(); 151 | 152 | for (proof.items) |t| { 153 | var reprocessCurrentToken = false; 154 | while (true) { 155 | switch (mode) { 156 | .Initial => { 157 | if (eq(t, "(")) { 158 | mode = .CompressedPart1; 159 | } else { 160 | mode = .Uncompressed; 161 | reprocessCurrentToken = true; 162 | } 163 | }, 164 | .Uncompressed => { 165 | try proofStack.pushInferenceRule(try ruleIterator.getRuleMeaningOf(t)); 166 | }, 167 | .CompressedPart1 => { 168 | if (eq(t, ")")) { 169 | mode = .CompressedPart2; 170 | } else { 171 | try compressedLabels.append(t); 172 | } 173 | }, 174 | .CompressedPart2 => { 175 | for (t) |c| { 176 | // handle every character of t, building numbers 177 | if ('U' <= c and c <= 'Y') { 178 | compressedNumber = compressedNumber * 5 + (c - 'U' + 1); 179 | // number is still incomplete 180 | } else if ('A' <= c and c <= 'T') { 181 | compressedNumber = compressedNumber * 20 + (c - 'A' + 1); 182 | // we have a complete number now 183 | brk: { 184 | var i = compressedNumber; 185 | std.debug.assert(i > 0); 186 | i -= 1; 187 | // hypotheses... 188 | if (i < hypotheses.len) { 189 | break :brk try proofStack.pushExpression(hypotheses[i].expression); 190 | } 191 | i -= hypotheses.len; 192 | // ...labels between parentheses... 193 | if (i < compressedLabels.items.len) { 194 | break :brk try proofStack.pushInferenceRule(try ruleIterator.getRuleMeaningOf(compressedLabels.items[i])); 195 | } 196 | i -= compressedLabels.items.len; 197 | // ...expressions marked with 'Z'... 198 | if (i < markedExpressions.items.len) { 199 | break :brk try proofStack.pushExpression(markedExpressions.items[i]); 200 | } 201 | // ...or larger than expected. 202 | return Error.NumberTooLarge; // TODO: test 203 | } 204 | compressedNumber = 0; 205 | } else if (c == 'Z') { 206 | // special case: not a number, but a back reference 207 | if (compressedNumber != 0) return Error.NumberIncomplete; // 'Z' in the middle of a number, TODO: test 208 | if (proofStack.isEmpty()) return Error.NumberZEarly; // 'Z' with empty proof stack, at the very beginning, TODO: test 209 | try markedExpressions.append(proofStack.top()); 210 | } 211 | } 212 | }, 213 | } 214 | if (!reprocessCurrentToken) break; 215 | reprocessCurrentToken = false; 216 | } 217 | } 218 | if (mode == .CompressedPart1) return Error.Incomplete; // TODO: test 219 | 220 | if (proofStack.isEmpty()) return Error.Incomplete; // TODO: test 221 | if (!proofStack.isSingle()) return Error.UnexpectedToken; // TODO: test; better error code? 222 | 223 | return RunProofResult{ 224 | .expression = try copyExpression(allocator, proofStack.top()), 225 | .dvPairs = proofStack.dvPairs, 226 | }; 227 | } 228 | 229 | /// caller becomes owner of allocated result 230 | fn substitute(orig: Expression, subst: Substitution, allocator: Allocator) !Expression { 231 | var resultAsList = ArrayList(compose.CVToken).init(allocator); 232 | defer resultAsList.deinit(); 233 | for (orig) |cvToken| { 234 | if (subst.get(cvToken.token)) |repl| { 235 | if (cvToken.cv != .V) return Error.UnexpectedToken; // TODO: test 236 | for (repl) |replCVToken| { 237 | try resultAsList.append(replCVToken); 238 | } 239 | } else { 240 | try resultAsList.append(cvToken); 241 | } 242 | } 243 | 244 | var result = try allocator.alloc(compose.CVToken, resultAsList.items.len); 245 | // TODO: copy in a simpler way? 246 | for (resultAsList.items) |cvToken, i| result[i] = cvToken; 247 | return result; 248 | } 249 | 250 | // ---------------------------------------------------------------------------- 251 | 252 | const expect = std.testing.expect; 253 | 254 | test "simple substitution" { 255 | const original = &[_]compose.CVToken{ .{ .token = "class", .cv = .C }, .{ .token = "x", .cv = .V } }; 256 | var substitution = Substitution.init(std.testing.allocator); 257 | defer substitution.deinit(); 258 | _ = try substitution.put("x", &[_]compose.CVToken{.{ .token = "y", .cv = .V }}); 259 | const expected = &[_]compose.CVToken{ .{ .token = "class", .cv = .C }, .{ .token = "y", .cv = .V } }; 260 | const actual = try substitute(original, substitution, std.testing.allocator); 261 | defer std.testing.allocator.free(actual); 262 | try expect(eqExpr(actual, expected)); 263 | } 264 | 265 | test "compare equal expressions" { 266 | const a = &[_]compose.CVToken{ .{ .token = "class", .cv = .C }, .{ .token = "x", .cv = .V } }; 267 | const b = &[_]compose.CVToken{ .{ .token = "class", .cv = .C }, .{ .token = "x", .cv = .V } }; 268 | try expect(eqExpr(a, b)); 269 | } 270 | 271 | test "compare unequal expressions" { 272 | const a = &[_]compose.CVToken{ .{ .token = "a", .cv = .C }, .{ .token = "x", .cv = .V } }; 273 | const b = &[_]compose.CVToken{ .{ .token = "b", .cv = .C }, .{ .token = "x", .cv = .V } }; 274 | try expect(!eqExpr(a, b)); 275 | } 276 | 277 | test "compare unequal expressions, constant vs variable" { 278 | const a = &[_]compose.CVToken{ .{ .token = "class", .cv = .C }, .{ .token = "x", .cv = .V } }; 279 | const b = &[_]compose.CVToken{ .{ .token = "class", .cv = .C }, .{ .token = "x", .cv = .C } }; 280 | try expect(!eqExpr(a, b)); 281 | } 282 | -------------------------------------------------------------------------------- /src/read.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | const errors = @import("errors.zig"); 6 | const Error = errors.Error; 7 | 8 | /// caller owns the result, which was allocated with the provided allocator 9 | pub fn readBuffer(allocator: Allocator, dir: std.fs.Dir, file_name: []const u8) ![]const u8 { 10 | const file = dir.openFile(file_name, .{}) catch return Error.IncorrectFileName; 11 | defer file.close(); 12 | const size = (try file.stat()).size; 13 | var buffer = try allocator.alloc(u8, size); 14 | _ = try file.readAll(buffer); 15 | return buffer; 16 | } 17 | -------------------------------------------------------------------------------- /src/tokenize.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | const errors = @import("errors.zig"); 6 | const Error = errors.Error; 7 | 8 | pub const Token = []const u8; 9 | pub const TokenList = std.ArrayList(Token); 10 | pub const TokenSet = struct { 11 | const Self = @This(); 12 | map: TokenMap(void), 13 | 14 | pub fn init(allocator: Allocator) Self { 15 | return Self{ .map = TokenMap(void).init(allocator) }; 16 | } 17 | 18 | pub fn deinit(self: *Self) void { 19 | self.map.deinit(); 20 | } 21 | 22 | pub fn add(self: *Self, token: Token) !bool { 23 | if (self.map.contains(token)) return true; // already present 24 | try self.map.put(token, undefined); 25 | return false; 26 | } 27 | 28 | pub fn remove(self: *Self, token: Token) void { 29 | _ = self.map.remove(token); 30 | } 31 | 32 | pub fn contains(self: *Self, token: Token) bool { 33 | return self.map.contains(token); 34 | } 35 | 36 | /// The iterator's next() returns a ?TokenMap(void).KV, 37 | /// of which only the .key field must be used. 38 | pub fn iterator(self: *Self) TokenMap(void).Iterator { 39 | return self.map.iterator(); 40 | } 41 | }; 42 | pub fn TokenMap(comptime T: type) type { 43 | return std.StringHashMap(T); // key is Token == []const u8 44 | } 45 | 46 | pub const TokenIterator = struct { 47 | buffer: Token, 48 | index: u64 = 0, 49 | optError: ?Error = null, 50 | 51 | pub fn next(self: *TokenIterator) !?Token { 52 | // return any error detected in the previous call 53 | if (self.optError) |err| { 54 | self.optError = null; 55 | return err; 56 | } 57 | var i = self.index; 58 | var j = i; 59 | // invariant: self.buffer[i..j] is the current token so far 60 | while (true) { 61 | // where are we? 62 | const atEnd = (j >= self.buffer.len); 63 | var afterToken = true; 64 | if (!atEnd) { 65 | const c = self.buffer[j]; 66 | afterToken = c <= 32; 67 | if ((c < 32 and !(c == 9 or c == 10 or c == 12 or c == 13)) or c >= 127) { 68 | // treat as whitespace now, return the error next time 69 | self.optError = Error.IllegalCharacter; 70 | afterToken = true; 71 | } 72 | } 73 | // return the next token, if any 74 | if (afterToken and i < j) { 75 | self.index = j + 1; 76 | return self.buffer[i..j]; 77 | } 78 | // update iterator state 79 | if (atEnd) { 80 | return null; 81 | } 82 | j += 1; // extend token with current character 83 | if (afterToken) { 84 | assert(j - i == 1); 85 | i += 1; // remove current character again 86 | } 87 | } 88 | } 89 | }; 90 | 91 | pub fn eq(a: Token, b: Token) bool { 92 | return std.mem.eql(u8, a, b); 93 | } 94 | 95 | pub fn eqs(a: TokenList, b: []const Token) bool { 96 | // TODO: compare in a simpler way? 97 | if (a.items.len != b.len) return false; 98 | var i: usize = 0; 99 | while (i < b.len) : (i += 1) { 100 | if (!eq(a.items[i], b[i])) return false; 101 | } 102 | return true; 103 | } 104 | 105 | const expect = std.testing.expect; 106 | 107 | test "tokenizer on empty buffer" { 108 | var tokens = TokenIterator{ .buffer = "" }; 109 | try expect((try tokens.next()) == null); 110 | try expect((try tokens.next()) == null); 111 | } 112 | 113 | test "tokenizer on whitespace buffer" { 114 | var tokens = TokenIterator{ .buffer = " \t " }; 115 | try expect((try tokens.next()) == null); 116 | try expect((try tokens.next()) == null); 117 | } 118 | 119 | test "tokenizer with whitespace at start" { 120 | var tokens = TokenIterator{ .buffer = " $d $." }; 121 | try expect(eq((try tokens.next()).?, "$d")); 122 | try expect(eq((try tokens.next()).?, "$.")); 123 | try expect((try tokens.next()) == null); 124 | try expect((try tokens.next()) == null); 125 | } 126 | 127 | test "tokenizer with whitespace at end" { 128 | var tokens = TokenIterator{ .buffer = "$d $. " }; 129 | try expect(eq((try tokens.next()).?, "$d")); 130 | try expect(eq((try tokens.next()).?, "$.")); 131 | try expect((try tokens.next()) == null); 132 | try expect((try tokens.next()) == null); 133 | } 134 | 135 | test "tokenizer with skipped illegal 'low' character" { 136 | var tokens = TokenIterator{ .buffer = "$d\x03$." }; 137 | try expect(eq((try tokens.next()).?, "$d")); 138 | if (tokens.next()) |_| unreachable else |err| try expect(err == Error.IllegalCharacter); 139 | try expect(eq((try tokens.next()).?, "$.")); 140 | try expect((try tokens.next()) == null); 141 | try expect((try tokens.next()) == null); 142 | } 143 | 144 | test "tokenizer with skipped illegal 'high' character" { 145 | var tokens = TokenIterator{ .buffer = "$( a\x7fc $)" }; 146 | try expect(eq((try tokens.next()).?, "$(")); 147 | try expect(eq((try tokens.next()).?, "a")); 148 | if (tokens.next()) |_| unreachable else |err| try expect(err == Error.IllegalCharacter); 149 | try expect(eq((try tokens.next()).?, "c")); 150 | try expect(eq((try tokens.next()).?, "$)")); 151 | try expect((try tokens.next()) == null); 152 | try expect((try tokens.next()) == null); 153 | } 154 | 155 | test "tokenizer" { 156 | var tokens = TokenIterator{ .buffer = "$c wff $." }; 157 | try expect(eq((try tokens.next()).?, "$c")); 158 | try expect(eq((try tokens.next()).?, "wff")); 159 | try expect(eq((try tokens.next()).?, "$.")); 160 | try expect((try tokens.next()) == null); 161 | try expect((try tokens.next()) == null); 162 | } 163 | 164 | test "tokenizer comment without newline" { 165 | var tokens = TokenIterator{ .buffer = "$( a b c $)\n$c $." }; 166 | try expect(eq((try tokens.next()).?, "$(")); 167 | try expect(eq((try tokens.next()).?, "a")); 168 | try expect(eq((try tokens.next()).?, "b")); 169 | try expect(eq((try tokens.next()).?, "c")); 170 | try expect(eq((try tokens.next()).?, "$)")); 171 | try expect(eq((try tokens.next()).?, "$c")); 172 | try expect(eq((try tokens.next()).?, "$.")); 173 | try expect((try tokens.next()) == null); 174 | try expect((try tokens.next()) == null); 175 | } 176 | -------------------------------------------------------------------------------- /src/verify.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Allocator = std.mem.Allocator; 3 | const assert = std.debug.assert; 4 | 5 | const errors = @import("errors.zig"); 6 | const Error = errors.Error; 7 | 8 | const read = @import("read.zig"); 9 | const readBuffer = read.readBuffer; 10 | 11 | const tokenize = @import("tokenize.zig"); 12 | const TokenList = tokenize.TokenList; 13 | 14 | const compose = @import("compose.zig"); 15 | const Expression = compose.Expression; 16 | const eqExpr = compose.eqExpr; 17 | const Hypothesis = compose.Hypothesis; 18 | const DVPair = compose.DVPair; 19 | const InferenceRule = compose.InferenceRule; 20 | const RuleIterator = compose.RuleIterator; 21 | 22 | const prove = @import("prove.zig"); 23 | 24 | pub fn verifyFile(allocator: Allocator, dir: std.fs.Dir, mm_file_name: []const u8) !void { 25 | const buffer = try readBuffer(allocator, dir, mm_file_name); 26 | defer allocator.free(buffer); 27 | 28 | errdefer |err| std.log.warn("\nError {} happened...\n", .{err}); 29 | var iter = try RuleIterator.init(allocator); 30 | defer iter.deinit(); 31 | 32 | var nr_proofs: u64 = 0; 33 | defer std.log.info("\nFound {} $p statements so far.\n", .{nr_proofs}); 34 | 35 | try iter.addStatementsFrom(dir, buffer); 36 | while (try iter.next()) |*item| { 37 | defer item.deinit(); 38 | if (item.proof) |proof| { 39 | nr_proofs += 1; 40 | const rule = item.rule; 41 | std.event.Loop.startCpuBoundOperation(); 42 | try verifyProofConclusion(&iter, item.label, proof, rule.hypotheses, .{ 43 | .expression = rule.conclusion, 44 | .dvPairs = rule.activeDVPairs, 45 | }); 46 | } 47 | } 48 | 49 | if (iter.currentScopeDiff) |_| return Error.Incomplete; // unclosed $} 50 | } 51 | 52 | fn verifyProofConclusion(iter: *RuleIterator, label: []const u8, proof: TokenList, hypotheses: []Hypothesis, conclusion: struct { 53 | expression: Expression, 54 | dvPairs: []DVPair, 55 | }) anyerror!void { 56 | std.log.debug("starting to verify proof of {s}.", .{label}); 57 | defer std.log.debug("end of verify proof of {s}.", .{label}); 58 | var result = try prove.runProof(proof, hypotheses, iter, iter.allocator); 59 | defer result.deinit(iter.allocator); 60 | 61 | if (!eqExpr(result.expression, conclusion.expression)) return Error.ResultMismatch; 62 | 63 | // if not(every result.dvPairs is in conclusion.dvPairs) return Error.DVRMissing; 64 | for (result.dvPairs.items) |proofDVPair| { 65 | for (conclusion.dvPairs) |ruleDVPair| { 66 | if ((eq(proofDVPair.var1, ruleDVPair.var1) and eq(proofDVPair.var2, ruleDVPair.var2)) or 67 | (eq(proofDVPair.var1, ruleDVPair.var2) and eq(proofDVPair.var2, ruleDVPair.var1))) 68 | { 69 | // proofDVPair is declared in an active $d statement 70 | break; 71 | } 72 | } else { 73 | // proofDVPair is not declared in any active $d statement 74 | std.log.warn("$d {s} {s} $. expected but not found in the following list:\n", .{ proofDVPair.var1, proofDVPair.var2 }); 75 | for (conclusion.dvPairs) |ruleDVPair| { 76 | std.log.warn(" $d {s} {s} $.\n", .{ ruleDVPair.var1, ruleDVPair.var2 }); 77 | } 78 | std.log.warn("(end of list)\n", .{}); 79 | return Error.DVRMissing; // TODO: Test 80 | } 81 | } 82 | } 83 | 84 | const expect = std.testing.expect; 85 | const expectError = std.testing.expectError; 86 | const eq = tokenize.eq; 87 | const eqs = tokenize.eqs; 88 | const Token = tokenize.Token; 89 | const alltests = @import("alltests.zig"); 90 | const TestFS = alltests.TestFS; 91 | 92 | fn _verifyBuffer(buffer: []const u8) !void { 93 | const test_file_name = "test_file.mm"; 94 | 95 | var testFS = TestFS.init(); 96 | defer testFS.deinit(); 97 | try testFS.writeFile(test_file_name, buffer); 98 | 99 | try verifyFile(std.testing.allocator, testFS.tmpDir.dir, test_file_name); 100 | } 101 | 102 | fn _verifyBufferWithLogging(buffer: []const u8) !void { 103 | const test_file_name = "test_file.mm"; 104 | 105 | var testFS = TestFS.init(); 106 | defer testFS.deinit(); 107 | try testFS.writeFile(test_file_name, buffer); 108 | 109 | const allocator = std.heap.LoggingAllocator(.warn, .err).init(std.testing.allocator).allocator(); 110 | try verifyFile(allocator, testFS.tmpDir.dir, test_file_name); 111 | } 112 | 113 | test "proof with $d violation" { 114 | try expectError(Error.DVRMissing, _verifyBuffer( 115 | \\$c wff |- $. 116 | \\$v P Q R $. 117 | \\wp $f wff P $. 118 | \\wq $f wff Q $. 119 | \\wr $f wff R $. 120 | \\${ $d P Q $. pq.1 $e |- P $. pq $a |- Q $. $} 121 | \\ 122 | \\${ 123 | \\ qr.1 $e |- Q $. qr $p |- R $= wq wr qr.1 pq $. 124 | \\$} 125 | )); 126 | } 127 | 128 | test "proof with correct $d" { 129 | try _verifyBuffer( 130 | \\$c wff |- $. 131 | \\$v P Q R $. 132 | \\wp $f wff P $. 133 | \\wq $f wff Q $. 134 | \\wr $f wff R $. 135 | \\${ $d P Q $. pq.1 $e |- P $. pq $a |- Q $. $} 136 | \\ 137 | \\${ 138 | \\ $d Q R $. 139 | \\ qr.1 $e |- Q $. qr $p |- R $= wq wr qr.1 pq $. 140 | \\$} 141 | ); 142 | } 143 | 144 | test "multiple proofs" { 145 | try _verifyBuffer( 146 | \\$c T $. 147 | \\${ 148 | \\ h $e T $. 149 | \\ p $p T $= ( ) A $. 150 | \\ q $p T $= ( ) A $. 151 | \\$} 152 | ); 153 | } 154 | --------------------------------------------------------------------------------