├── version_guard.py ├── LICENSE ├── .gitignore ├── renderJustification.py ├── rmBitmasks.py ├── databaseToJSON.py ├── README.md ├── rmzoo.py ├── rmupdater.py └── results.txt /version_guard.py: -------------------------------------------------------------------------------- 1 | import sys 2 | if sys.version_info >= (3,): 3 | def isString(value): 4 | return isinstance(value, str) 5 | else: 6 | def isString(value): 7 | return isinstance(value, basestring) 8 | 9 | # Mock "lru_cache"; actually just a pass-through decorator 10 | def lru_cache(*args, **kwargs): 11 | def empty_decorator(f): 12 | f.__wrapped__ = f 13 | return f 14 | return empty_decorator 15 | try: 16 | from functools import lru_cache 17 | except ImportError: 18 | try: 19 | from repoze.lru import lru_cache 20 | except ImportError: 21 | pass 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Eric Astor and Damir Dzhafarov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Database files produced 2 | *.dat 3 | *.bak 4 | *.dir 5 | *.db 6 | 7 | # Images produced 8 | *.dot 9 | *.gif 10 | *.gv 11 | *.png 12 | *.ps 13 | *.svg 14 | *.pdf 15 | 16 | # Temp files 17 | temp/ 18 | 19 | # Byte-compiled / optimized / DLL files 20 | __pycache__/ 21 | *.py[cod] 22 | *$py.class 23 | 24 | # C extensions 25 | *.so 26 | 27 | # Profiling output files 28 | *.prof 29 | *.lprof 30 | 31 | # Distribution / packaging 32 | .Python 33 | env/ 34 | build/ 35 | develop-eggs/ 36 | dist/ 37 | downloads/ 38 | eggs/ 39 | .eggs/ 40 | lib/ 41 | lib64/ 42 | parts/ 43 | sdist/ 44 | var/ 45 | *.egg-info/ 46 | .installed.cfg 47 | *.egg 48 | *.zip 49 | 50 | # PyInstaller 51 | # Usually these files are written by a python script from a template 52 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 53 | *.manifest 54 | *.spec 55 | 56 | # Installer logs 57 | pip-log.txt 58 | pip-delete-this-directory.txt 59 | 60 | # Unit test / coverage reports 61 | htmlcov/ 62 | .tox/ 63 | .coverage 64 | .coverage.* 65 | .cache 66 | nosetests.xml 67 | coverage.xml 68 | *,cover 69 | .hypothesis/ 70 | 71 | # Translations 72 | *.mo 73 | *.pot 74 | 75 | # Django stuff: 76 | *.log 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyBuilder 82 | target/ 83 | 84 | #Ipython Notebook 85 | .ipynb_checkpoints 86 | -------------------------------------------------------------------------------- /renderJustification.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, unicode_literals 2 | 3 | from rmBitmasks import Form, Reduction 4 | 5 | from version_guard import lru_cache, isString 6 | 7 | _justLineMarker = u'*' 8 | _justIndentMarker = u'@' 9 | justMarker = _justLineMarker + _justIndentMarker 10 | _justIndented = justMarker + _justIndentMarker 11 | _justFormat = justMarker + u'{0}: ' 12 | def indentJust(jst): 13 | return jst.replace(justMarker, _justIndented) 14 | 15 | @lru_cache(maxsize=1024) 16 | def printOp(op): 17 | if isString(op): 18 | return op 19 | 20 | opCtx, opCore = op 21 | try: 22 | opCtx = opCtx.name 23 | except AttributeError: 24 | pass 25 | 26 | if opCore == u'nc': 27 | return u'n{0}c'.format(opCtx) 28 | elif opCore in (u'=>', u'=/>', u'<=', u''): 29 | return u'{1}_{0}'.format(opCtx, opCore) 30 | else: 31 | return u'{0}{1}'.format(opCtx, opCore) 32 | 33 | def printFact(a, op, b): 34 | if op == u'form': 35 | b = b.name 36 | elif op[0] in (Reduction.sW, Reduction.W, Reduction.gW, Reduction.sc, Reduction.c): # Reducibility fact, not implication fact 37 | if op[1] == u'->': 38 | op = (op[0], u'<=') 39 | a,b = b,a 40 | elif op[1] == u'-|>': 41 | op = (op[0], u'': 44 | op = (op[0], u'<=>') 45 | return u'{0} {1} {2}'.format(a, printOp(op), b) 46 | 47 | printedJustify = {} 48 | def printJustification(fact, justify, formatted=True): 49 | a,op,b = fact 50 | 51 | r = '' 52 | try: 53 | r = printedJustify[fact] 54 | except KeyError: 55 | if op == u'form': 56 | r = justMarker + printFact(*fact) 57 | else: 58 | try: 59 | jst = justify[fact] 60 | except KeyError: 61 | raise Exception(u'ERROR: Referenced fact "{0}" not justified!'.format(printFact(*fact))) 62 | 63 | if isString(jst): 64 | r = _justFormat.format(printFact(*fact)) + jst 65 | else: 66 | r = _justFormat.format(printFact(*fact)) \ 67 | + u''.join((_justIndented+f if isString(f) else indentJust(printJustification(f, justify, formatted=False))) for f in jst) 68 | printedJustify[fact] = r 69 | 70 | if formatted: 71 | return r.replace(_justLineMarker, u'\n').replace(_justIndentMarker, u' ') 72 | else: 73 | return r -------------------------------------------------------------------------------- /rmBitmasks.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, unicode_literals 2 | 3 | from enum import Enum 4 | 5 | from version_guard import lru_cache 6 | 7 | class BitmaskEnum(int, Enum): 8 | def __new__(cls, value=None): 9 | if value is None: 10 | if len(cls.__members__) == 0: 11 | value = 0 12 | else: 13 | value = 1 << (len(cls.__members__) - 1) 14 | 15 | obj = int.__new__(cls, value) 16 | obj._value_ = value 17 | return obj 18 | 19 | @staticmethod 20 | def isPresent(x,magic_num): 21 | return (x & magic_num) != 0 22 | 23 | @classmethod 24 | @lru_cache(maxsize=256) 25 | def strongest(cls,magic_num): 26 | if magic_num == 0: 27 | return cls(0) 28 | else: 29 | return cls(1 << (magic_num.bit_length() - 1)) 30 | 31 | @classmethod 32 | @lru_cache(maxsize=256) 33 | def weakest(cls,magic_num): 34 | if magic_num == 0: 35 | return cls.none 36 | else: 37 | return cls(magic_num & -magic_num) 38 | 39 | @classmethod 40 | @lru_cache(maxsize=256) 41 | def list(cls,magic_num): 42 | return [x for x in cls if cls.isPresent(x, magic_num)] 43 | 44 | @classmethod 45 | def fromString(cls,s): 46 | try: 47 | return getattr(cls, s) 48 | except AttributeError: 49 | raise NotImplementedError("The {0} `{1}` is not implemented.".format(cls.__name__, s)) 50 | 51 | class Reduction(BitmaskEnum): 52 | none = 0 53 | w = 1 << 0 54 | RCA = 1 << 1 55 | c = 1 << 2 56 | sc = 1 << 3 57 | gW = 1 << 4 58 | W = 1 << 5 59 | sW = 1 << 6 60 | 61 | @classmethod 62 | def fromString(cls,s): 63 | try: 64 | return cls.alias[s] 65 | except KeyError: 66 | try: 67 | return getattr(cls, s) 68 | except AttributeError: 69 | raise NotImplementedError("The reduction `{}` is not implemented.".format(s)) 70 | 71 | Reduction.alias = {u'': Reduction.RCA, 72 | u'gc': Reduction.w} 73 | 74 | def noReduction(): 75 | return Reduction.none 76 | 77 | class Form(BitmaskEnum): 78 | none = 0 79 | Sig02 = 1 << 10 80 | Pi02 = 1 << 9 81 | Sig03 = 1 << 8 82 | Pi03 = 1 << 7 83 | uPi03 = 1 << 6 84 | Sig04 = 1 << 5 85 | Pi04 = 1 << 4 86 | Pi11 = 1 << 3 87 | rPi12 = 1 << 2 88 | Pi12 = 1 << 1 89 | Pi13 = 1 << 0 90 | 91 | def noForm(): 92 | return Form.none 93 | 94 | def _completeImplications(enum, forward): 95 | for c in enum: 96 | if c == enum.none: continue 97 | for a in enum: 98 | if a == enum.none: continue 99 | 100 | if enum.isPresent(c, forward[a]): 101 | forward[a] |= forward[c] 102 | 103 | def _reverseImplications(enum, forward): 104 | reverse = {enum.none: enum.none} 105 | for p0 in enum: 106 | if p0 == enum.none: continue 107 | 108 | reverse[p0] = enum.none 109 | for p1 in enum: 110 | if p1 == enum.none: continue 111 | 112 | if enum.isPresent(p0, forward[p1]): 113 | reverse[p0] |= p1 114 | return reverse 115 | 116 | _R_WEAKER = {r:r for r in Reduction} 117 | 118 | _R_WEAKER[Reduction.RCA] |= Reduction.w # RCA -> w 119 | 120 | _R_WEAKER[Reduction.sc] |= Reduction.c # sc -> c 121 | _R_WEAKER[Reduction.c] |= Reduction.w # c -> w 122 | 123 | _R_WEAKER[Reduction.sW] |= Reduction.W | Reduction.sc # sW -> W, sc 124 | _R_WEAKER[Reduction.W] |= Reduction.gW | Reduction.c # W -> gW, c 125 | _R_WEAKER[Reduction.gW] |= Reduction.w # gW -> w 126 | 127 | _completeImplications(Reduction, _R_WEAKER) 128 | 129 | _R_STRONGER = _reverseImplications(Reduction, _R_WEAKER) 130 | 131 | Reduction.weaker = lambda r: _R_WEAKER[r] 132 | Reduction.stronger = lambda r: _R_STRONGER[r] 133 | 134 | _F_STRONGER = {f:f for f in Form} 135 | 136 | _F_STRONGER[Form.Pi13] |= Form.Pi12 # Pi12 implies Pi13 137 | _F_STRONGER[Form.Pi12] |= Form.rPi12 # rPi12 implies Pi12 138 | _F_STRONGER[Form.rPi12] |= Form.Pi11 # Pi11 implies rPi12 139 | _F_STRONGER[Form.Pi11] |= Form.Sig04 # Sig04 implies Pi11 140 | _F_STRONGER[Form.Pi11] |= Form.Pi04 # Pi04 implies Pi11 141 | _F_STRONGER[Form.Pi11] |= Form.uPi03 # uPi03 implies Pi11 142 | _F_STRONGER[Form.Sig04] |= Form.Sig03 # Sig03 implies Sig04 143 | _F_STRONGER[Form.Sig04] |= Form.Pi03 # Pi03 implies Sig04 144 | _F_STRONGER[Form.Pi04] |= Form.Sig03 # Sig03 implies Pi04 145 | _F_STRONGER[Form.Pi04] |= Form.Pi03 # Pi03 implies Pi04 146 | _F_STRONGER[Form.uPi03] |= Form.Pi03 # Pi03 implies uPi03 147 | _F_STRONGER[Form.Sig03] |= Form.Sig02 # Sig02 implies Sig03 148 | _F_STRONGER[Form.Sig03] |= Form.Pi02 # Pi02 implies Sig03 149 | _F_STRONGER[Form.Pi03] |= Form.Sig02 # Sig02 implies Pi03 150 | _F_STRONGER[Form.Pi03] |= Form.Pi02 # Pi02 implies Pi03 151 | 152 | _completeImplications(Form, _F_STRONGER) 153 | 154 | _F_WEAKER = _reverseImplications(Form, _F_STRONGER) 155 | 156 | Form.weaker = lambda f: _F_WEAKER[f] 157 | Form.stronger = lambda f: _F_STRONGER[f] -------------------------------------------------------------------------------- /databaseToJSON.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | from __future__ import print_function 4 | 5 | import os, sys, uuid 6 | 7 | import itertools 8 | from io import open 9 | from collections import defaultdict 10 | 11 | from version_guard import isString 12 | 13 | import zlib 14 | try: 15 | import cPickle as pickle 16 | except: 17 | import pickle 18 | 19 | try: 20 | import ujson as json 21 | except: 22 | print('UltraJSON not available; falling back to Python library.') 23 | import json 24 | 25 | from rmBitmasks import * 26 | from renderJustification import printOp 27 | 28 | Version = u'5.1' 29 | DatabaseVersion = u'5.1' 30 | 31 | class VersionError(Exception): 32 | def __init__(self, targetVersion, actualVersion): 33 | super(VersionError, self).__init__(u'Version mismatch: found v{0}, targeting v{1}'.format(actualVersion, targetVersion)) 34 | 35 | class Zoo: 36 | _nextUID = 0 37 | 38 | nodes = {} 39 | meta = {'edgeKinds': [], 40 | 'colorings': [], 41 | 'graphviz': {}} 42 | 43 | def addNode(label, definition='', key=None, uid=None, edges={}, properties={}, tags=[]): 44 | if uid is None: 45 | uid = _nextUID 46 | _nextUID += 1 47 | 48 | n = Node(uid, label, definition, key, edges, properties, tags) 49 | if key is None: 50 | nodes[label] = n 51 | else: 52 | nodes[key] = n 53 | 54 | def addEdgeKind(label, functionBody): 55 | self.meta['edgeKinds'].append({'label': label, 'functionBody': functionBody}) 56 | 57 | def addColoring(self, name, colors, labels, coloringFunction): 58 | self.meta['colorings'].append(Coloring(name, colors, labels, coloringFunction)) 59 | 60 | def __getitem__(self, key): 61 | return self.nodes[key] 62 | 63 | def __setitem__(self, key, item): 64 | self.nodes[key] = item 65 | 66 | def __contains__(self, key): 67 | return (key in self.nodes) 68 | 69 | def __init__(self, edgeKinds=[], rankdir='TB'): 70 | self.meta['edgeKinds'] = edgeKinds 71 | self.meta['graphviz'] = {'rankdir': rankdir} 72 | 73 | class Coloring: 74 | def __init__(self, name, colors, labels, coloringFunction): 75 | self.label = name 76 | self.colors = [{'color': color, 'label': label} for color,label in zip(colors,labels)] 77 | self.coloring = coloringFunction 78 | 79 | class Node: 80 | def __init__(self, uid, label, definition='', key=None, edges={}, properties={}, tags=[]): 81 | if key is None: 82 | key = label 83 | 84 | self.uid = uid 85 | self.label = label 86 | self.definition = definition 87 | self.key = key 88 | self.edges = edges 89 | self.properties = properties 90 | self.tags = tags 91 | 92 | def addEdge(self, dstKey, properties={}): 93 | self.edges[dstKey] = Edge(self.key, dstKey, properties) 94 | 95 | def addProperty(self, name, justification, value=None, description='', uid=None): 96 | self.properties[name] = Property(justification, value, description, uid) 97 | 98 | class Edge: 99 | def __init__(self, srcKey, dstKey, properties={}): 100 | self.srcKey = srcKey 101 | self.dstKey = dstKey 102 | self.properties = properties 103 | 104 | def addProperty(self, name, justification, value=None, description='', uid=None): 105 | self.properties[name] = Property(justification, value, description, uid) 106 | 107 | class Property: 108 | def __init__(self, justification, value=None, description='', uid=None): 109 | if uid is None: 110 | uid = uuid.uuid4() 111 | 112 | self.value = value 113 | self.justification = justification 114 | self.description = description 115 | self.uid = uid 116 | 117 | class Justification: 118 | weight = 0 119 | direct = None 120 | composite = None 121 | 122 | def __init__(self, direct=None, composite=None, weight=None): 123 | if direct is None and composite is None: 124 | raise ValueError('Justifications must contain some justification.') 125 | if direct is not None and composite is not None: 126 | raise ValueError('Justifications are either direct or composite, not both.') 127 | 128 | if direct is not None: 129 | self.weight = 1 130 | self.direct = direct 131 | 132 | if composite is not None: 133 | if weight is None: 134 | raise ValueError('Composite justifications must specify their weights.') 135 | 136 | self.weight = weight 137 | self.composite = composite 138 | 139 | def loadDatabase(databaseName, quiet=False): 140 | with open(databaseName, mode='rb') as databaseFile: 141 | compressedDatabase = databaseFile.read() 142 | pickledDatabase = zlib.decompress(compressedDatabase) 143 | setDatabase(pickle.loads(pickledDatabase)) 144 | 145 | def getDatabase(): 146 | return {'version': DatabaseVersion, 147 | 'principles': principles, 148 | 'implication': (implies, notImplies), 149 | 'conservation': (conservative, nonConservative), 150 | 'form': form, 151 | 'primary': (primary, primaryIndex), 152 | 'justify': justify} 153 | def setDatabase(database): 154 | if database['version'] != DatabaseVersion: 155 | raise VersionError(DatabaseVersion, database['version']) 156 | 157 | global principles 158 | principles = database['principles'] 159 | 160 | global implies, notImplies 161 | implies, notImplies = database['implication'] 162 | 163 | global conservative, nonConservative 164 | conservative, nonConservative = database['conservation'] 165 | 166 | global form 167 | form = database['form'] 168 | 169 | global primary, primaryIndex 170 | primary, primaryIndex = database['primary'] 171 | 172 | global justify 173 | justify = database['justify'] 174 | 175 | if __name__ == '__main__': 176 | databaseTitle = 'database' 177 | if os.path.splitext(databaseTitle)[1] == '': 178 | databaseName = databaseTitle + os.extsep + 'dat' 179 | else: 180 | databaseName = databaseTitle 181 | loadDatabase(databaseName) 182 | 183 | primaryIndex += sorted(principles - primary) 184 | 185 | meta = { 186 | 'tags': [], 187 | 'edgeKinds': [], 188 | 'colorings': [], 189 | 'about': {'description': 'The RM Zoo is a program to ' 190 | 'help organize reverse-mathematical relations between mathematical ' 191 | 'principles, particularly those that fail to be equivalent to any of the ' 192 | 'big five subsystems of second-order arithmetic. Its primary goal is to ' 193 | 'make it easier to see known results and open questions, and thus ' 194 | 'hopefully to serve as a useful tool to researchers in the field. As a ' 195 | 'secondary goal, the Zoo provides an interactive annotated bibliography ' 196 | 'of the field, collecting results in a standard machine-readable format.'}, 197 | 'graphviz': {} 198 | } 199 | 200 | translationFunction = "if('{0}' in edge.properties) return 1; " \ 201 | "if('{1}' in edge.properties) return 0; " \ 202 | "return 2;" 203 | for red in Reduction: 204 | if red == Reduction.none: continue 205 | 206 | posName = red.name + u'i' 207 | negName = red.name + u'ni' 208 | 209 | kindNode = {'label': '$\rightarrow_{\rm ' + red.name + '}$', 210 | 'key': posName, 211 | 'edges': [r.name + u'i' for r in Reduction.list(Reduction.weaker(red) & ~red)]} 212 | kind = {'label': '$\rightarrow_{\rm ' + red.name + '}$', 213 | 'functionBody': translationFunction.format(posName, negName), 214 | 'node': kindNode} 215 | meta['edgeKinds'].append(kind) 216 | for f in Form: 217 | if f == Form.none: continue 218 | 219 | posName = f.name + u'c' 220 | negName = f.name + u'nc' 221 | 222 | #TODO: Implement better labels for conservation results 223 | kindNode = {'label': posName, 224 | 'key': posName, 225 | 'edges': [r.name + u'i' for r in Reduction.list(Reduction.weaker(red) & ~red)]} 226 | kind = {'label': posName, 227 | 'functionBody': translationFunction.format(posName, negName), 228 | 'node': kindNode} 229 | 230 | nodes = {} 231 | for i,p in enumerate(primaryIndex): 232 | nodes[p] = dict() 233 | nodes[p]['uid'] = i 234 | 235 | #TODO: Implement labels 236 | nodes[p]['label'] = p 237 | 238 | #TODO: Implement definitions 239 | nodes[p]['definition'] = '' 240 | 241 | nodes[p]['edges'] = {dst:{'srcKey':p, 242 | 'dstKey':dst, 243 | 'properties':dict()} for dst in principles} 244 | nodes[p]['properties'] = {} 245 | nodes[p]['tags'] = [] 246 | uid = i+1 247 | 248 | properties = {} 249 | for p in sorted(principles): 250 | for f in Form.list(form[p]): 251 | #TODO: Implement justifications for forms 252 | properties[(p, u'form', f)] = {'uid': uid, 253 | 'value': f.name, 254 | 'justification': {'weight': 1, 255 | 'direct': 'Observed'}} 256 | nodes[p]['properties'][f.name] = properties[(p, u'form', f)] 257 | uid += 1 258 | 259 | for f,j in justify.items(): 260 | if f in properties: 261 | continue 262 | 263 | toJustify = [(f, j, {'uid': uid})] 264 | uid += 1 265 | while toJustify: 266 | fact,jst,prop = toJustify.pop() 267 | done = True 268 | if isString(jst): 269 | prop['justification'] = {'weight': 1, 'direct': jst} 270 | elif all(ref in properties for ref in jst): 271 | prop['justification'] = {'weight': 1 + sum(properties[ref]['justification']['weight'] for ref in jst), 272 | 'composite': [properties[ref]['uid'] for ref in jst]} 273 | else: 274 | done = False 275 | toJustify.append((fact, jst, prop)) 276 | for ref in jst: 277 | if ref in properties: 278 | continue 279 | toJustify.append((ref, justify[ref], {'uid': uid})) 280 | uid += 1 281 | if done: 282 | properties[fact] = prop 283 | 284 | a,op,b = fact 285 | opCtx,opCore = op 286 | if opCore in u'->': 287 | coreName = u'i' 288 | elif opCore == u'-|>': 289 | coreName = u'ni' 290 | else: 291 | coreName = opCore 292 | opName = opCtx.name + coreName 293 | nodes[a]['edges'][b]['properties'][opName] = prop 294 | 295 | with open('rmzoo.json', 'w') as f: 296 | json.dump({'nodes': nodes, 'meta': meta}, f, sort_keys=True, indent=4) 297 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RM Zoo 2 | 3 | The Reverse Mathematics Zoo is a program to help organize relations among various mathematical principles, particularly those that fail to be equivalent to any of the big five subsystems of second-order arithmetic. Its primary goal is to make it easier to see known results and open questions, and thus hopefully to serve as a useful tool to researchers in the field. As a secondary goal, the Zoo should provide an interactive annotated bibliography of the field, collecting results in a standard machine-readable format. 4 | 5 | The present version of the RM Zoo is a complete rewrite of the original, and features many improvements. The program has been heavily optimized and extended; the run time should generally be faster, and more true facts should be inferred from most starting results files. In addition, the RM Zoo can now handle implications, reducibilities (including both Weihrauch reducibility and computable reducibility), and conservation facts. 6 | 7 | The program is divided into two parts: a database updater/compiler, which derives all inferences from the provided results file, and a database query system, which can answer specific questions about reverse-mathematical relations or produce diagrams on request. 8 | 9 | Under the reverse-mathematical interface, the Zoo is actually a specialized inference engine, designed to reason with facts of the form "a implies b in context Q" (implication facts), "if a implies p, and p has form F, then b implies p" (conservation facts), or the negations thereof. 10 | 11 | ## Installation 12 | 13 | To run the RM Zoo, you will need to install a distribution of Python, version 2.7 or later. (The Zoo will perform best if run in either [PyPy2.7](http://pypy.org/index.html) or [Python 3.4+](https://www.python.org/).) 14 | 15 | You will also need the [Pyparsing](http://pyparsing.wikispaces.com/) module. 16 | 17 | If not using Python 3.4+, you will need to install the [enum34](https://bitbucket.org/stoneleaf/enum34) module, and if not using Python 3.2+, you will also need the [repoze.lru](https://github.com/repoze/repoze.lru) module. 18 | 19 | To install each of these modules, run the appropriate commands below: 20 | ``` 21 | pip install pyparsing 22 | pip install enum34 23 | pip install repoze.lru 24 | ``` 25 | 26 | To view/render the diagrams produced by the Zoo, you will need to install [Graphviz](http://www.graphviz.org/), or another program capable of reading DOT files. 27 | 28 | ## Usage 29 | 30 | The RM Zoo consists of two Python scripts, `rmupdater.py` and `rmzoo.py`. 31 | 32 | ### rmupdater 33 | 34 | `rmupdater.py` compiles results files into databases of known facts, and is typically run as follows: 35 | 36 | - `python rmupdater.py [results file]`, 37 | 38 | where `[results file]` is a text file containing facts; the results file included in this distribution is `byPaper.txt`. If using multiple results files (for testing purposes), you may keep them in separate databases by adding a database title: 39 | 40 | - `python rmupdater.py [results file] [database title]` 41 | 42 | For example, one would typically run 43 | 44 | - `python rmupdater.py byPaper.txt`; 45 | 46 | If maintaining an alternate results file in `test.txt`, one might separately run the command 47 | 48 | - `python rmupdater.py test.txt testDatabase`. 49 | 50 | ### rmzoo 51 | 52 | `rmzoo.py` then takes the database built by `rmupdater.py`, and carries out various tasks as controlled by its options. The basic command is 53 | 54 | - `python rmzoo.py [options]`; 55 | 56 | however, if you need to specify a database title, add it to the command as follows: 57 | 58 | - `python rmzoo.py [database title] [options]` 59 | 60 | --- 61 | 62 | To query the database for a fact (which will determine whether it is known or contradicted, and give the justification in either case), run the command 63 | 64 | - `python rmzoo.py -q "[fact]"`. 65 | 66 | For example, 67 | 68 | - `python rmzoo.py -q "RT22 -> CRT22"` 69 | 70 | will print a justification of the fact that **RT22** implies **CRT22** over **RCA0**. 71 | 72 | --- 73 | 74 | To generate a diagram from the database, instead run 75 | 76 | - `python rmzoo.py [diagram options] > [destination]`, 77 | 78 | where `[destination]` is a DOT file. The `[diagram options]` **must** include one or more of the following: 79 | 80 | - `-i`: show implications as black arrows; 81 | - `-n`: show non-implications as red arrows; 82 | - `-f`: color-codes principles by their syntactic form; currently, this uses a pink box for Π11 principles, and a cyan box for restricted Π12 principles. Other forms do not yet have a color code. 83 | - `-c`: show conservation facts, using color-coded arrows (as for the forms) to represent each form of conservation; 84 | - `-w`: show the weakest open implications as green arrows; 85 | - `-s`: show the strongest open implications as green arrows. 86 | In addition, the options may include any of the following: 87 | - `-o`: show facts that hold in ω-models; 88 | - `-t [REDUCIBILITY]`: show facts relative to implications over the given REDUCIBILITY (options include sW, W, gW, sc, c, w, and RCA); 89 | - `-p`: show only one primary principle from each group of equivalent principles; 90 | - `-r "[CLASS]"`: restrict the diagram to just the principles contained between the quotation marks (and any sub-principles of conjunctions in the list). For example, the option `-r "RT22 COH+WKL SRT22 RCA"` will show only relations between the principles **RT22**, **COH+WKL**, **SRT22**, **RCA0**, **COH**, and **WKL**. 91 | 92 | For instance, 93 | 94 | - `python rmzoo.py -i -o -w > diagram.dot` 95 | 96 | will produce a diagram of all implications between principles that hold in ω-models, along with the weakest open implications (in ω-models). Generally speaking, the more options that are selected, the more information is shown on the diagram; this tends to make it harder to read. 97 | 98 | It would probably be of very limited use to select *all* the options, for instance. 99 | 100 | ## Credits 101 | 102 | The RM Zoo was originally developed by Damir Dzhafarov, inspired by Joseph S. Miller's command-line version of the Computability Menagerie. Recently, the Zoo has been largely rewritten by Eric Astor to improve performance, expand the library of available inference rules, and move to a more maintainable/upgradeable architecture. 103 | 104 | Many people have helped with the RM Zoo, by commenting on the code, contributing facts, suggesting new features, or just expressing their interest. Thanks in particular to David Belanger, Peter Cholak, Stephen Flood, Denis Hirschfeldt, Steffen Lempp, Joe Miller, Antonio Montalbán, Carl Mummert, Ludovic Patey, Sam Sanders, and Ted Slaman. 105 | 106 | ## Contributing 107 | 108 | Contributions and/or feedback are, of course, welcome! If you are comfortable working with GitHub, the best way to contribute is as follows: 109 | 110 | 1. Fork the code. 111 | 2. Create your feature branch: `git checkout -b my-new-feature` 112 | 3. Commit your changes: `git commit -am 'Add some feature'` 113 | 4. Push to the branch: `git push origin my-new-feature` 114 | 5. Submit a pull request. 115 | 116 | Otherwise, don't hesitate to send an e-mail or other message. 117 | 118 | ### Results 119 | 120 | The results file is a simple text file containing relations between reverse-mathematical principles. This is then compiled by the Zoo's updater to create its database, which is then used by the Zoo to generate its various outputs (whether DOT files or text responses). 121 | 122 | #### Names 123 | 124 | Principles should be named by **simple plaintext alphanumeric strings** that resemble their normal acronyms in the literature; for example, we use `RT22` for Ramsey's theorem for pairs (and 2 colors). Do not use TeX in the names of principles (as in `RT^2_2` or `\mathsf{RT}^2_2`); this will make the diagrams harder to read, as DOT files have no TeX support, and can sometimes cause errors. 125 | 126 | #### Relations 127 | 128 | Relations between principles are given by using various operators. For instance: 129 | 130 | - `RT22 -> COH` 131 | 132 | indicates an **implication** provable in **RCA0**. By itself, however, this would generate an error; all facts must include a **justification**. To justify this fact, one would instead write: 133 | 134 | - `RT22 -> COH "Mileti (2004) [Claim A.1.3], and independently Jockusch and Lempp"` 135 | 136 | These justifications are used by the Zoo to keep track of why the facts it derives are true, and as such are important for maintaining a usable database. For simplicity, justifications should also be plaintext; if a principle needs to be mentioned, the same acronyms as for relations should be used. To keep the results file clean, please use the justification format: "Author 1, Author 2, and Author 3 (year) \[result citation\]". If possible, citations should be to the authoritative published version of the paper, falling back to an arXiv citation only when the authoritative version is not yet available. 137 | 138 | **Non-implications** (i.e., implications known *not* to be provable in **RCA0**), can be entered similarly, using the operator `-|>`; for example, 139 | 140 | - `RT22+WKL -|> ACA "Seetapun and Slaman (1995) [Theorem 3.1]"` 141 | 142 | However, this result said more than this; Seetapun and Slaman specifically constructed an omega-model of **RT22+WKL** in which **ACA** failed. In general, one can represent implications and non-implications over omega-models by prepending `w` before an operator. Thus, one can more accurately write the previous result as 143 | 144 | - `RT22+WKL w-|> ACA "Seetapun and Slaman (1995) [Theorem 3.1]"` 145 | 146 | and might also write 147 | 148 | - `COH w-> StCOH "Hirschfeldt and Shore (2007) [Proposition 4.4]"` 149 | 150 | to represent this implication which, while not necessarily true in all models of **RCA0**, holds over all omega models. The Zoo is programmed to understand that `->` is stronger than `w->`, and thus that `w-|>` is stronger than `-|>`. 151 | 152 | Furthermore, the Zoo now supports results from the study of computable and Weihrauch reducibilities, using the operators `<=` and ` DNR` and that `COH StCADS` 173 | - `StCADS -> StCOH` 174 | 175 | together indicate that the principles **StCOH** and **StCADS** are **equivalent** over **RCA0**, and will act accordingly. For instance, if rendering a diagram, the Zoo will pick one of the two principles to treat as 'primary', in the sense that implications and non-implications will only be shown going to and from the primary principle; this reduces the mess, and keeps the diagram more readable. Of course, the Zoo may occasionally pick the "wrong" primary principle; for instance, we probably want **StCOH** to be considered primary over **StCADS**. Since the Zoo has no way of knowing that on its own, we can include the fact 176 | 177 | - `StCOH is primary` 178 | 179 | in our results file, and ensure that the Zoo considers **StCOH** to be the primary principle. (Note that our choice of primary principles is given no justification; in fact, by the standards of the results file, it *cannot* be justified.) The order in which this is done matters. For example, if we switch to thinking about omega models, **StCOH** will be equivalent to **COH**, but we probably want **COH** to be considered primary in this case. Entering 180 | 181 | - `COH is primary` 182 | 183 | **earlier** (i.e., "higher up") in the results file will achieve the desired result. 184 | 185 | Principles can also be declared equivalent by use of dedicated operators, included for convenience. Writing 186 | 187 | - `StCOH <-> StCADS` 188 | 189 | will produce the same result as including both of the two separate implications. (**Warning:** prepending a `w` to `<->` does work, but does not merely indicate an equivalence that holds over omega models; it in fact asserts that both halves of the implication hold in omega models. One can use the operator `<=>` in a similar way, subject to the same caveat.) 190 | 191 | #### Syntactic Forms and Conservation Facts 192 | 193 | The Zoo also understands **syntactic forms** and **conservations facts** relating reverse-mathematical principles. Specifically, it understands the syntactic forms 194 | 195 | - `Sig02`, `Pi02`, `Sig03`, `Pi03`, `Sig04`, and `Pi04`: three levels of the arithmetic hierarchy 196 | - `Pi11`, `Pi12`, and `Pi13`: the first three universal levels of the analytic hierarchy 197 | - `uPi03`: Pi03 with a single universally-quantified set paramater; defined as "twiddle-Pi03" in Patey and Yokoyama (preprint) 198 | - `rPi12`: restricted Pi12 statements, as defined in Hirschfeldt and Shore (2007) \[Corollary 2.21\] 199 | 200 | We can thus enter 201 | 202 | - `RT22 form rPi12` 203 | - `BSig2 form Pi11` 204 | 205 | to indicate that the given principles have the given forms. (Note that these statements are **unjustified**.) 206 | 207 | To indicate that one principle is conservative over another for consequences of a given form (that is to say, the first proves no more consequences of that form than the second), we can add results such as: 208 | 209 | - `AMT+BSig2 Pi11c BSig2 "Hirschfeldt, Shore, and Slaman (2009) [Corollary 4.5]"` 210 | - `AMT rPi12c RCA "Hirschfeldt, Shore, and Slaman (2009) [Corollary 3.15]"` 211 | 212 | To indicate that one principle is **not** conservative over another, prepend an `n` before the conservation operator. For instance, we might add the result 213 | 214 | - `RT22 nPi04c RCA "Seetapun and Slaman (1995) [Theorem 3.6]"` 215 | 216 | Conservation and non-conservation facts must, again, be justified. The Zoo understands the connections between conservation facts and implications, and will use them to extract more relations between the known principles. 217 | 218 | #### Compound Principles (i.e., Conjunctions) 219 | 220 | As the reader may have noted above, the Zoo also understands compound principles; that is, principles that are conjunctions of other principles. For instance, if we add 221 | 222 | - `SRT22+COH <-> RT22` 223 | 224 | as a fact in the results file, the Zoo will know that `COH+SRT22` is a compound principle, denoting the conjunction of `COH` and `SRT22`. It will add any component principles to its internal list, and automatically understands the relations between the compound principle and its components. 225 | 226 | #### Organization and Formatting 227 | 228 | Please note that any line in the results file starting with a `#` symbol is ignored, and considered to be a comment for human readers. 229 | 230 | If contributing to the results file, please take note of the organization formatting used therein; we have organized the results by publication, arranged by publication year when possible (with the noted exception of Simpson's "Subsystems of Second-Order Arithmetic" \[also known as SOSOA\], which is listed first). Each publication's results should be preceded by a comment containing a full authoritative citation, including (if at all possible) a URL and DOI for the authoritative published version. 231 | 232 | Contributions to the results file are extremely welcome. For example, if anyone wants to transcribe the relevant results of Simpson's SOSOA into our format, the maintainers would be eternally grateful! (For context, please note that this textbook is over 450 pages long.) 233 | 234 | ## License 235 | 236 | The RM Zoo has been placed under the MIT license; in plain English, you can do whatever you want with it, including redistribution and creation of derivative works, as long as attribution and the appropriate license information remain. For details, please see the LICENSE file. 237 | -------------------------------------------------------------------------------- /rmzoo.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | ################################################################################## 4 | # 5 | # The Reverse Mathematics Zoo 6 | # by Damir Dzhafarov 7 | # - Version 1.0 started August, 2010 8 | # - Version 2.0 started August, 2013 9 | # Revised by Eric Astor 10 | # - Version 3.0 - 29 May 2016 11 | # - Version 4.0 - started 30 May 2016 12 | # - Version 4.1 - optimizations & refactoring, started 2 July 2016 13 | # - Version 4.2 - new forms and reasoning, started 12 July 2016 14 | # - Version 4.3 - changed internal representations, started 21 July 2016 15 | # - Version 4.4 - moved to a shelf database, started 25 July 2016 16 | # - Version 5.0 - clean implementation of inference rules, started 1 August 2016 17 | # - Version 5.1 - reverted from shelf database for cross-platform compatibility, started 16 August 2016 18 | # Documentation and support: http://rmzoo.uconn.edu 19 | # 20 | ################################################################################## 21 | 22 | from __future__ import print_function 23 | 24 | import os, sys 25 | 26 | import itertools 27 | from io import open 28 | from collections import defaultdict 29 | 30 | from version_guard import isString 31 | 32 | import zlib 33 | try: 34 | import cPickle as pickle 35 | except: 36 | import pickle 37 | 38 | from rmupdater import standardizeFact 39 | 40 | def eprint(*args, **kwargs): 41 | print(*args, file=sys.stderr, **kwargs) 42 | 43 | Error = False 44 | def warning(s): 45 | global Error 46 | Error = True 47 | eprint(s) 48 | 49 | def error(s): # Throw exception 50 | raise Exception(s) 51 | 52 | Date = u'16 August 2016' 53 | Version = u'5.1' 54 | DatabaseVersion = u'5.1' 55 | 56 | from rmBitmasks import * 57 | from renderJustification import * 58 | 59 | _FORM_COLOR = {Form.none: "white", 60 | Form.weaker(Form.Pi11): "pink", 61 | Form.weaker(Form.rPi12): "cyan"} 62 | _CONS_COLOR = {Form.none: "white", 63 | Form.weaker(Form.Pi11): "pink", 64 | Form.weaker(Form.rPi12): "cyan"} 65 | 66 | ################################################################################## 67 | # 68 | # GET OPTIONS 69 | # 70 | ################################################################################## 71 | 72 | eprint(u'\nRM Zoo (v{0})'.format(Version)) 73 | 74 | from optparse import OptionParser, OptionGroup 75 | 76 | parser = OptionParser(u'Usage: %prog [options] [database]', version=u'%prog {0} ({1})'.format(Version, Date)) 77 | 78 | parser.set_defaults(implications=False,nonimplications=False,omega=False,onlyprimary=False,weak=False,strong=False,showform=False,conservation=False,add_principles=False) 79 | 80 | parser.add_option('-i', action='store_true', dest='implications', 81 | help=u'Display implications between principles.') 82 | parser.add_option('-n', action='store_true', dest='nonimplications', 83 | help=u'Display non-implications between principles.') 84 | parser.add_option('-w', action='store_true', dest='weak', 85 | help=u'Display weakest non-redundant open implications.') 86 | parser.add_option('-s', action='store_true', dest='strong', 87 | help=u'Display strongest non-redundant open implications.') 88 | parser.add_option('-t', dest='reducibility', default='RCA', 89 | help=u'Display facts relative to REDUCIBILITY-implications.') 90 | parser.add_option('-o', action='store_const', dest='reducibility', const='w', 91 | help=u'Display only facts that hold in omega models.') 92 | parser.add_option('-p', action='store_true', dest='onlyprimary', 93 | help=u'Display only facts about primary principles.') 94 | 95 | parser.add_option('-f', action='store_true', dest='showform', 96 | help=u'Indicate syntactic forms of principles.') 97 | parser.add_option('-c', action='store_true', dest='conservation', 98 | help=u'Display known conservation results.') 99 | 100 | parser.add_option('-r', dest='restrict_string', metavar='CLASS', 101 | help=u'Restrict to only the principles in CLASS.') 102 | parser.add_option('--omit', dest='omit_string', metavar='CLASS', 103 | help=u'Omit all principles in CLASS.') 104 | 105 | parser.add_option('-q', dest='query_string', metavar='FACT', 106 | help=u'Show whether FACT is known, and if so, its justification.') 107 | parser.add_option('-F', dest='query_file', metavar='FILE', 108 | help=u'Query whether all facts in FILE are known, and return a list of all unknown facts.') 109 | 110 | parser.add_option('--force', action='store_true', dest='add_principles', 111 | help=u'Allow queries involving novel conjunctions from the database. (WARNING: slow)') 112 | 113 | (options, args) = parser.parse_args() 114 | 115 | Implications = options.implications 116 | NonImplications = options.nonimplications 117 | Weak = options.weak 118 | Strong = options.strong 119 | Reducibility = Reduction.fromString(options.reducibility) 120 | OnlyPrimary = options.onlyprimary 121 | ShowForm = options.showform 122 | Conservation = options.conservation 123 | Restrict = options.restrict_string 124 | Omissions = options.omit_string 125 | if Restrict: 126 | rSet = set() 127 | for p in Restrict.split(): 128 | splitP = p.split(u'+') 129 | setP = set(splitP) 130 | p = u'+'.join(sorted(setP)) 131 | 132 | rSet.add(p) 133 | rSet.update(splitP) 134 | Restrict = rSet 135 | if Omissions: 136 | Omissions = set(Omissions.split()) 137 | Query = options.query_string 138 | QueryFile = options.query_file 139 | AddPrinciples = options.add_principles 140 | 141 | # Give errors if bad options chosen 142 | 143 | if not Implications and not NonImplications and not OnlyPrimary and not Restrict and not Weak and not Strong and not ShowForm and not Conservation and not Query and not QueryFile: 144 | parser.error(u'No options selected.') 145 | if OnlyPrimary: 146 | if not Implications and not NonImplications and not Weak and not Strong and not ShowForm and not Conservation: 147 | parser.error(u'Option -p only works if one of -i, -n, -w, -s, -f, or -c is selected.') 148 | if Restrict or Omissions: 149 | if Restrict and Omissions: 150 | parser.error(u'Options -r and --omit are incompatible.') 151 | if not Implications and not NonImplications and not Weak and not Strong and not ShowForm and not Conservation: 152 | parser.error(u'Options -r and --omit only work if one of -i, -n, -w, -s, -f, or -c is selected.') 153 | if Query: 154 | if Implications or NonImplications or Weak or Strong or ShowForm or Conservation or Restrict or OnlyPrimary or QueryFile: 155 | parser.error(u'Option -q does not work with any other option (except --force).') 156 | if QueryFile: 157 | if Implications or NonImplications or Weak or Strong or ShowForm or Conservation or Restrict or OnlyPrimary or Query: 158 | parser.error(u'Option -F does not work with any other option (except --force).') 159 | 160 | if len(args) > 1: 161 | parser.error(u'Too many arguments.') 162 | if len(args) > 0: 163 | databaseTitle = args[0] 164 | else: 165 | eprint(u'No database title specified; defaulting to "database".') 166 | databaseTitle = 'database' 167 | 168 | if os.path.splitext(databaseTitle)[1] == '': 169 | databaseName = databaseTitle + os.extsep + 'dat' 170 | else: 171 | databaseName = databaseTitle 172 | 173 | 174 | ################################################################################## 175 | # 176 | # IMPORT AND ORGANIZE DATA 177 | # 178 | ################################################################################## 179 | 180 | eprint(u'Importing and organizing data...') 181 | 182 | class VersionError(Exception): 183 | def __init__(self, targetVersion, actualVersion): 184 | super(VersionError, self).__init__(u'Version mismatch: found v{0}, targeting v{1}'.format(actualVersion, targetVersion)) 185 | 186 | principles = {} 187 | implies, notImplies = {}, {} 188 | conservative, nonConservative = {}, {} 189 | form = {} 190 | primary, primaryIndex = {}, {} 191 | justify = {} 192 | def getDatabase(): 193 | return {'version': DatabaseVersion, 194 | 'principles': principles, 195 | 'implication': (implies, notImplies), 196 | 'conservation': (conservative, nonConservative), 197 | 'form': form, 198 | 'primary': (primary, primaryIndex), 199 | 'justify': justify} 200 | 201 | equivalent = defaultdict(set) 202 | def setDatabase(database): 203 | if database['version'] != DatabaseVersion: 204 | raise VersionError(DatabaseVersion, database['version']) 205 | 206 | global principles 207 | principles = database['principles'] 208 | 209 | global implies, notImplies 210 | implies, notImplies = database['implication'] 211 | 212 | global equivalent 213 | for a in principles: 214 | for b in principles: 215 | for r in Reduction.list(implies[(a,b)] & implies[(b,a)]): 216 | equivalent[(a, r)].add(b) 217 | 218 | global conservative, nonConservative 219 | conservative, nonConservative = database['conservation'] 220 | 221 | global form 222 | form = database['form'] 223 | 224 | global primary, primaryIndex 225 | primary, primaryIndex = database['primary'] 226 | 227 | global justify 228 | justify = database['justify'] 229 | 230 | def loadDatabase(databaseName, quiet=False): 231 | with open(databaseName, mode='rb') as databaseFile: 232 | compressedDatabase = databaseFile.read() 233 | pickledDatabase = zlib.decompress(compressedDatabase) 234 | setDatabase(pickle.loads(pickledDatabase)) 235 | loadDatabase(databaseName) 236 | 237 | def knownEquivalent(a, reduction, justification=True): 238 | if a in principles: 239 | if justification: 240 | return (a, None) 241 | else: 242 | return a 243 | 244 | splitA = a.split(u'+') 245 | if any((p not in principles) for p in splitA): 246 | if justification: 247 | return (None, None) 248 | else: 249 | return None 250 | 251 | aPrime = None 252 | for equiv in itertools.product(*(equivalent[(p, reduction)] for p in splitA)): 253 | aPrime = u'+'.join(sorted(set(equiv))) 254 | if aPrime in principles: 255 | if justification: 256 | equivJst = tuple((p, (reduction, u'<->'), q) for (p,q) in zip(splitA, equiv) if p != q) 257 | return (aPrime, equivJst) 258 | else: 259 | return aPrime 260 | 261 | if justification: 262 | return (None, None) 263 | else: 264 | return None 265 | 266 | def queryDatabase(a, op, b, justification=True): 267 | if op[1] in (u'c', u'nc'): 268 | reduction = Reduction.RCA 269 | else: 270 | reduction = op[0] 271 | 272 | if justification: 273 | aPrime, aJst = knownEquivalent(a, reduction, justification) 274 | bPrime, bJst = knownEquivalent(b, reduction, justification) 275 | if aJst is not None: 276 | justify[(a, (reduction, u'<->'), aPrime)] = aJst 277 | if bJst is not None: 278 | justify[(b, (reduction, u'<->'), bPrime)] = bJst 279 | else: 280 | aPrime = knownEquivalent(a, reduction, justification) 281 | bPrime = knownEquivalent(b, reduction, justification) 282 | 283 | aKnown = aPrime is not None 284 | bKnown = bPrime is not None 285 | 286 | aConjunct = (not aKnown) and all((p in principles) for p in a.split(u'+')) 287 | bConjunct = (not bKnown) and all((p in principles) for p in b.split(u'+')) 288 | 289 | s = u'' 290 | if not aKnown and not bKnown: 291 | s += u'{0} and {1} are unknown principles.'.format(a, b) 292 | elif not aKnown: 293 | s += u'{0} is an unknown principle.'.format(a) 294 | elif not bKnown: 295 | s += u'{0} is an unknown principle.'.format(b) 296 | if aConjunct and bConjunct: 297 | s += u'\n\tHOWEVER: {0} and {1} are conjunctions of known principles; try running with --force.'.format(a, b) 298 | elif aConjunct and bKnown: 299 | s += u'\n\tHOWEVER: {0} is a conjunction of known principles; try running with --force.'.format(a) 300 | elif bConjunct and aKnown: 301 | s += u'\n\tHOWEVER: {0} is a conjunction of known principles; try running with --force.'.format(b) 302 | if len(s) > 0: error(s) 303 | 304 | if (aPrime, op, bPrime) in justify: 305 | if not justification: 306 | return True 307 | else: 308 | r = [] 309 | if a != aPrime or b != bPrime: 310 | r.append(u'\n') 311 | if a != aPrime: 312 | r.append(u'NOTE: {0} is not a known principle, but is equivalent to {1}\n'.format(a, aPrime)) 313 | if b != bPrime: 314 | r.append(u'NOTE: {0} is not a known principle, but is equivalent to {1}\n'.format(b, bPrime)) 315 | 316 | if a != aPrime: 317 | r.append(printJustification((a, (reduction, u'<->'), aPrime), justify)) 318 | if b != bPrime: 319 | r.append(printJustification((b, (reduction, u'<->'), bPrime), justify)) 320 | r.append(printJustification((aPrime, op, bPrime), justify)) 321 | return u''.join(r) 322 | else: 323 | return False 324 | 325 | ################################################################################## 326 | # 327 | # IF RESTRICT OR QUERY: VALIDATE CLASS 328 | # 329 | ################################################################################## 330 | 331 | if Restrict: 332 | 333 | for a in Restrict: # Give warnings if CLASS is not a subset of principles 334 | if a not in principles: 335 | error(+a+u' is not in the database.') 336 | 337 | if Omissions: 338 | Restrict = principles - Omissions 339 | 340 | ################################################################################## 341 | # 342 | # IF QUERY: GIVE ANSWER 343 | # 344 | ################################################################################## 345 | 346 | from pyparsing import * 347 | 348 | name = Word( alphas+"_+^{}\\$", alphanums+"_+^{}$\\") 349 | 350 | _reductionName = NoMatch() 351 | for r in Reduction: 352 | if r != Reduction.none: 353 | _reductionName |= Literal(r.name) 354 | for r in Reduction.alias: 355 | if r != u'': 356 | _reductionName |= Literal(r) 357 | _reductionType = _reductionName.setParseAction(lambda s,l,t: [Reduction.fromString(t[0])]) 358 | reductionType = Optional(_reductionType, default=Reduction.RCA) 359 | postfixReductionType = Optional(Suppress(Literal("_")) + _reductionType, default=Reduction.RCA) 360 | 361 | implication = (reductionType + Literal("->")) | (Literal("=>") + postfixReductionType).setParseAction(lambda s,l,t: [t[1], "->"]) 362 | nonImplication = (reductionType + Literal("-|>")) | (Literal("=/>") + postfixReductionType).setParseAction(lambda s,l,t: [t[1], "-|>"]) 363 | equivalence = (reductionType + Literal("<->")) | (Literal("<=>") + postfixReductionType).setParseAction(lambda s,l,t: [t[1], "<->"]) 364 | 365 | reduction = (Literal("<=") + postfixReductionType).setParseAction(lambda s,l,t: [t[1], "<="]) 366 | nonReduction = (Literal("': 417 | opp = (op[0], u'-|>') 418 | elif op[1] == u'-|>': 419 | opp = (op[0], u'->') 420 | elif op[1] == u'c': 421 | opp = (op[0], u'nc') 422 | elif op[1] == u'nc': 423 | opp = (op[0], u'c') 424 | 425 | if opp is not None: 426 | jst = queryDatabase(a, opp, b) 427 | if jst: 428 | print(u'CONTRADICTING fact known! Justification for the fact "{0}":\n{1}'.format(printFact(a, opp, b), jst)) 429 | if op[1] == u'<->': 430 | opp = (op[0], u'-|>') 431 | jst = queryDatabase(a, opp, b) 432 | if jst: 433 | print(u'CONTRADICTING fact known! Justification for the fact "{0}":\n{1}'.format(printFact(a, opp, b), jst)) 434 | else: 435 | jst = queryDatabase(b, opp, a) 436 | if jst: 437 | print(u'CONTRADICTING fact known! Justification for the fact "{0}":\n{1}'.format(printFact(b, opp, a), jst)) 438 | 439 | if QueryFile: 440 | parenth = Literal('"') 441 | justification = QuotedString('"""',multiline=True) | quotedString.setParseAction(removeQuotes) 442 | 443 | fact = name + ((Group(operator) + name + Suppress(Optional(justification))) | (Literal('form') + formType) | (Literal('is') + Literal('primary'))) 444 | 445 | queries = [] 446 | with open(QueryFile, encoding='utf-8') as f: 447 | for q in f.readlines(): 448 | q = q.strip() 449 | if len(q) == 0 or q[0] == u'#': continue 450 | 451 | Q = fact.parseString(q) 452 | if Q[1] == u'is' and Q[2] == u'primary': continue 453 | 454 | a,op,b = Q 455 | if not isString(op): 456 | op = tuple(op) 457 | a,op,b = standardizeFact(a, op, b) 458 | 459 | queries.append((a, op, b, q)) 460 | 461 | if AddPrinciples: 462 | newPrinciples = set() 463 | unknownPrinciples = set() 464 | for (a, op, b, q) in queries: 465 | unknown = False 466 | 467 | Q = a.split(u'+') 468 | if op != u'form': 469 | Q.extend(b.split(u'+')) 470 | for p in Q: 471 | if p not in principles: 472 | unknownPrinciples.add(p) 473 | unknown = True 474 | if not unknown: 475 | if a not in principles: newPrinciples.add(a) 476 | if op != u'form' and b not in principles: newPrinciples.add(b) 477 | 478 | if len(unknownPrinciples) > 0: 479 | warning(u'Unknown principles: {0}\n'.format(u', '.join(sorted(unknownPrinciples)))) 480 | if len(newPrinciples) > 0: 481 | eprint(u'Adding {0:,d} new principles...'.format(len(newPrinciples))) 482 | import rmupdater 483 | rmupdater.setDatabase(getDatabase()) 484 | for p in newPrinciples: 485 | rmupdater.addPrinciple(p) 486 | rmupdater.principlesList = sorted(rmupdater.principles) 487 | rmupdater.deriveInferences(quiet=False) 488 | setDatabase(rmupdater.getDatabase()) 489 | 490 | for (a, op, b, q) in queries: 491 | s = u'' 492 | known = False 493 | if op == u'form': 494 | known = Form.isPresent(b, form[a]) 495 | else: 496 | try: 497 | known = queryDatabase(a, op, b, justification=False) 498 | except Exception as e: 499 | s += u'\n' + str(e) 500 | 501 | if not known: 502 | s += u'\nUnknown fact: ' + q 503 | 504 | if len(s) > 0: 505 | warning(s) 506 | eprint(u'\nFinished.') 507 | 508 | ################################################################################## 509 | # 510 | # IF RESTRICT: DELETE PRINCIPLES NOT IN CLASS 511 | # 512 | ################################################################################## 513 | 514 | if Restrict: 515 | principles &= Restrict 516 | 517 | ################################################################################## 518 | # 519 | # IF DIAGRAM: REMOVE REDUNDANT IMPLICATIONS AND NON-IMPLICATIONS 520 | # 521 | ################################################################################## 522 | 523 | if Implications or NonImplications or Weak or Strong: 524 | 525 | eprint(u'Removing redundant facts for clarity...') 526 | 527 | # Create print versions of functions 528 | 529 | simpleImplies = defaultdict(bool) 530 | printImplies = defaultdict(bool) 531 | 532 | simpleNotImplies = defaultdict(bool) 533 | printNotImplies = defaultdict(bool) 534 | 535 | equivalent = defaultdict(bool) 536 | 537 | simpleConservative = defaultdict(noForm) 538 | printConservative = defaultdict(noForm) 539 | 540 | printWeakOpen = defaultdict(bool) 541 | printStrongOpen = defaultdict(bool) 542 | 543 | for a in principles: 544 | for b in principles: 545 | if a == b: # Remove self-relations to not confuse DOT reader 546 | continue 547 | 548 | simpleImplies[(a,b)] = Reduction.isPresent(Reducibility, implies[(a,b)]) 549 | printImplies[(a,b)] = simpleImplies[(a,b)] 550 | 551 | simpleNotImplies[(a,b)] = Reduction.isPresent(Reducibility, notImplies[(a,b)]) 552 | printNotImplies[(a,b)] = simpleNotImplies[(a,b)] 553 | 554 | if simpleImplies[(a,b)] and simpleImplies[(b,a)]: 555 | equivalent[(a,b)] = True 556 | equivalent[(b,a)] = True 557 | 558 | simpleConservative[(a,b)] = conservative[(a,b)] 559 | printConservative[(a,b)] = simpleConservative[(a,b)] 560 | 561 | # Assign primaries and make them unique 562 | 563 | for a in sorted(principles): 564 | currentPrimary = a 565 | found = False 566 | toRemove = set() 567 | for b in primaryIndex: 568 | if currentPrimary == b: 569 | found = True 570 | continue 571 | if equivalent[(currentPrimary,b)]: 572 | if found: 573 | toRemove.add(b) 574 | else: 575 | if currentPrimary in primary: 576 | toRemove.add(currentPrimary) 577 | currentPrimary = b 578 | found = True 579 | if currentPrimary not in primary: 580 | primary.add(currentPrimary) 581 | primaryIndex.append(currentPrimary) 582 | for x in toRemove: 583 | primaryIndex.remove(x) 584 | primary.difference_update(toRemove) 585 | 586 | for a in principles: # Remove facts involving non-primary principles 587 | if a not in primary: 588 | for b in principles: 589 | printImplies[(a,b)] = False 590 | printImplies[(b,a)] = False 591 | 592 | printNotImplies[(a,b)] = False 593 | printNotImplies[(b,a)] = False 594 | 595 | printConservative[(a,b)] = Form.none 596 | 597 | # Remove redundant implications 598 | 599 | for a in primary: 600 | for b in primary: 601 | for c in primary: # Remove implications obtained by transitivity 602 | if simpleImplies[(b,a)] and simpleImplies[(a,c)]: 603 | printImplies[(b,c)] = False 604 | 605 | # Remove redundant non-implications 606 | 607 | for a in primary: 608 | for b in primary: 609 | if b == a: continue 610 | for c in primary: 611 | if c == a or c == b: continue 612 | 613 | if simpleNotImplies[(a,c)] and simpleImplies[(b,c)]: # If a -|> c, but b -> c, then a -|> b. 614 | printNotImplies[(a,b)] = False 615 | if simpleImplies[(c,a)] and simpleNotImplies[(c,b)]: # If c -> a, but c -|> b, then a -|> b. 616 | printNotImplies[(a,b)] = False 617 | 618 | # Remove redundant conservation facts 619 | 620 | for a in primary: # Remove conservation results obtained by transitivity 621 | for b in primary: 622 | if b == a: continue 623 | for c in primary: 624 | if c == a or c == b: continue 625 | 626 | if simpleImplies[(a,b)]: 627 | printConservative[(b,c)] &= ~simpleConservative[(a,c)] 628 | if simpleImplies[(b,c)]: 629 | printConservative[(a,b)] &= ~simpleConservative[(a,c)] 630 | 631 | # Generate open implications 632 | 633 | for a in primary: 634 | for b in primary: 635 | if b == a: continue 636 | 637 | if not simpleImplies[(a,b)] and not simpleNotImplies[(a,b)]: 638 | printWeakOpen[(a,b)] = True 639 | printStrongOpen[(a,b)] = True 640 | 641 | for a in primary: 642 | for b in primary: 643 | if b == a: continue 644 | for c in primary: 645 | if c == a or c == b: continue 646 | 647 | if simpleImplies[(c,a)] and not simpleImplies[(c,b)] and not simpleNotImplies[(c,b)]: # c -> a, c ? b 648 | printWeakOpen[(a,b)] = False 649 | if simpleImplies[(c,a)] and not simpleImplies[(b,a)] and not simpleNotImplies[(b,a)]: # c -> a, b ? a 650 | printWeakOpen[(b,c)] = False 651 | 652 | if simpleImplies[(a,c)] and not simpleImplies[(c,b)] and not simpleNotImplies[(c,b)]: # a -> c, c ? b 653 | printStrongOpen[(a,b)] = False 654 | if simpleImplies[(a,c)] and not simpleImplies[(b,a)] and not simpleNotImplies[(b,a)]: # a -> c, b ? a 655 | printStrongOpen[(b,c)] = False 656 | 657 | # Find all equivalent principles 658 | 659 | equivSet = defaultdict(set) 660 | for a in primary: 661 | for b in principles: 662 | if equivalent[(a,b)]: 663 | equivSet[a].add(b) 664 | 665 | ################################################################################## 666 | # 667 | # IF DIAGRAM: PRINT OUT THE DOT FILE 668 | # 669 | ################################################################################## 670 | 671 | if Implications or NonImplications or Weak or Strong or ShowForm or Conservation: 672 | 673 | eprint(u'Printing DOT file...') 674 | eprint("\tDiagram contains {0} non-equivalent principles.".format(len(primary))) 675 | 676 | print("""// 677 | // RM Zoo (v""" + Version + """) 678 | // 679 | 680 | digraph G { 681 | 682 | graph [ 683 | rankdir = TB // put stronger principles higher up 684 | ranksep = 1.5 685 | ] 686 | 687 | // 688 | // Node Styles 689 | // 690 | 691 | node [shape=none,color=white]; 692 | 693 | // 694 | // Data 695 | //""") 696 | 697 | if Implications: 698 | 699 | for a in primary: 700 | for b in primary: 701 | if printImplies[(a,b)]: 702 | style = [] 703 | if printNotImplies[(b,a)] and not NonImplications: 704 | style.append(u'color = "black:white:black"') 705 | if len(equivSet[a]) > 0 and not OnlyPrimary: 706 | style.append(u'minlen = 2') 707 | s = u'' 708 | if len(style) > 0: 709 | s = u' [{0}]'.format(u', '.join(style)) 710 | print(u'" {0} " -> " {1} "{2}'.format(a,b,s)) 711 | 712 | if NonImplications: 713 | 714 | for a in primary: 715 | for b in primary: 716 | if printNotImplies[(a,b)]: 717 | print(u'" {0} " -> " {1} " [color = "red"]'.format(a,b)) 718 | 719 | if not OnlyPrimary: 720 | for a in primary: 721 | for b in equivSet[a]: 722 | print(u'" {0} " -> " {1} " [dir = both]'.format(a,b)) 723 | 724 | if Weak: 725 | for a in primary: 726 | for b in primary: 727 | if printWeakOpen[(a,b)]: 728 | print(u'" {0} " -> " {1} " [color = "green"]'.format(a,b)) 729 | 730 | if Strong: 731 | for a in primary: 732 | for b in primary: 733 | if printStrongOpen[(a,b)]: 734 | print(u'" {0} " -> " {1} " [color = "orange"]'.format(a,b)) 735 | 736 | if ShowForm: 737 | for a in principles: 738 | if a in form: 739 | if form[a] != Form.none: 740 | print(u'" {0} " [shape=box, style=filled, fillcolor={1}]'.format(a, _FORM_COLOR[form[a]])) 741 | 742 | 743 | if Conservation: 744 | for a in primary: 745 | for b in primary: 746 | if a == b: continue 747 | 748 | if printConservative[(a,b)] != Form.none: 749 | print(u'" {0} " -> " {1} " [color = "{2}"]'.format(a,b, _CONS_COLOR[printConservative[(a,b)]])) 750 | 751 | print(u'}') 752 | eprint(u'Finished.') 753 | 754 | -------------------------------------------------------------------------------- /rmupdater.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | ################################################################################## 4 | # 5 | # The Reverse Mathematics Zoo Updater 6 | # by Damir Dzhafarov 7 | # - Version 1.0 started August, 2010 8 | # - Version 2.0 started August, 2013 9 | # Revised by Eric Astor 10 | # - Version 3.0 - 29 May 2016 11 | # - Version 4.0 - started 30 May 2016 12 | # - Version 4.1 - optimizations & refactoring, started 2 July 2016 13 | # - Version 4.2 - new forms and reasoning, started 12 July 2016 14 | # - Version 4.3 - changed internal representations, started 21 July 2016 15 | # - Version 4.4 - moved to a shelf database, started 25 July 2016 16 | # - Version 5.0 - clean implementation of inference rules, started 1 August 2016 17 | # - Version 5.1 - reverted from shelf database for cross-platform compatibility, started 16 August 2016 18 | # Documentation and support: http://rmzoo.uconn.edu 19 | # 20 | ################################################################################## 21 | 22 | from __future__ import print_function 23 | 24 | import itertools 25 | import sys 26 | import 27 | 28 | from io import open 29 | from collections import defaultdict 30 | 31 | from version_guard import isString, lru_cache 32 | 33 | import zlib 34 | try: 35 | import cPickle as pickle 36 | except: 37 | import pickle 38 | 39 | def eprint(*args, **kwargs): 40 | print(*args, file=sys.stderr, **kwargs) 41 | 42 | Date = u'16 August 2016' 43 | Version = u'5.1' 44 | DatabaseVersion = u'5.1' 45 | 46 | version, versionPoint = sys.version_info[0:2] 47 | if version >= 3 and versionPoint >= 3: 48 | timekeeper = time.perf_counter 49 | else: 50 | timekeeper = time.clock 51 | 52 | from rmBitmasks import * 53 | from renderJustification import * 54 | 55 | RCAprinciple = u'RCA' 56 | 57 | principlesList = [RCAprinciple] 58 | principles = set(principlesList) 59 | 60 | def addPrinciple(a): 61 | setA = set(a.split(u'+')) 62 | a = u'+'.join(sorted(setA)) 63 | principles.add(a) 64 | principles.update(setA) 65 | return a 66 | 67 | conjunction = {} 68 | def joinPrinciples(a, b): 69 | try: 70 | return conjunction[a,b] 71 | except KeyError: 72 | p = u'+'.join(sorted(set(a.split(u'+')) | set(b.split(u'+')))) 73 | if p not in principles: 74 | p = None 75 | conjunction[a,b] = p 76 | conjunction[b,a] = p 77 | return p 78 | 79 | equivalent = defaultdict(noReduction) 80 | implies = defaultdict(noReduction) 81 | notImplies = defaultdict(noReduction) 82 | 83 | def addEquivalent(a,reduction,b): 84 | equivalent[a,b] |= Reduction.weaker(reduction) 85 | 86 | def addReduction(a,reduction,b): 87 | implies[a,b] |= Reduction.weaker(reduction) 88 | 89 | def addNonReduction(a,reduction,b): 90 | notImplies[a,b] |= Reduction.stronger(reduction) 91 | 92 | conservative = defaultdict(noForm) 93 | nonConservative = defaultdict(noForm) 94 | 95 | def addConservative(a,frm,b): 96 | conservative[a,b] |= Form.stronger(frm) 97 | 98 | def addNonConservative(a,frm,b): 99 | nonConservative[a,b] |= Form.weaker(frm) 100 | 101 | form = defaultdict(noForm) 102 | 103 | primary = set() 104 | primaryIndex = [] 105 | 106 | def addForm(a, frm): 107 | form[a] |= Form.weaker(frm) 108 | 109 | def addPrimary(a): 110 | primary.add(a) 111 | primaryIndex.append(a) 112 | 113 | justify = {} 114 | justComplexity = {} 115 | 116 | def updateJustification(fact, jst, cplx): 117 | try: 118 | if cplx >= justComplexity[fact]: 119 | return False 120 | except KeyError: 121 | pass 122 | justify[fact] = jst 123 | justComplexity[fact] = cplx 124 | return True 125 | 126 | def unoptimizedJustification(fact, jst, cplx): 127 | if fact in justify: 128 | return False 129 | else: 130 | justify[fact] = jst 131 | return True 132 | 133 | class UnjustifiedFactError(Exception): 134 | def __init__(self, a, op, b): 135 | super(UnjustifiedFactError, self).__init__(u'The fact "{0}" is not justified.'.format(printFact(a, op, b))) 136 | 137 | def addUnjustified(a, op, b): 138 | raise UnjustifiedFactError(a, op, b) 139 | 140 | class ContradictionError(Exception): 141 | def __init__(self, fact1, fact2): 142 | super(ContradictionError, self).__init__(u'The following facts are contradictory:\n\n' + 143 | printJustification(fact1, justify) + u'\n\n' + 144 | printJustification(fact2, justify)) 145 | 146 | # Noted side-effects: 147 | # Changing '<->' can affect '->' 148 | # Changing '->' can affect 'c' and '<->' 149 | # Changing 'c' can affect '->' 150 | # Changing '-|>' can affect 'nc' 151 | # Changing 'nc' can affect '-|>' 152 | def addFact(a, op, b, jst, cplx): 153 | fact = (a, op, b) 154 | if not updateJustification(fact, jst, cplx): 155 | return False 156 | opCtx,opCore = op 157 | 158 | ref = (fact,) 159 | refCplx = 1 + cplx 160 | 161 | if opCore == u'<->': # equivalence 162 | # Symmetry: 163 | # IF (a X<-> b), THEN (b X<-> a). 164 | updateJustification((b, op, a), jst, cplx) 165 | 166 | for x in Reduction.list(Reduction.weaker(opCtx)): 167 | newOp = (x, u'<->') 168 | 169 | addEquivalent(a, x, b) 170 | updateJustification((a, newOp, b), ref, refCplx) 171 | 172 | # Symmetry: 173 | # IF (a X<-> b), THEN (b X<-> a). 174 | addEquivalent(b, x, a) 175 | updateJustification((b, newOp, a), ref, refCplx) 176 | 177 | # Definition of equivalence: 178 | # IF (a X<-> b), THEN (a X-> b) AND (b X-> a). 179 | impliesOp = (opCtx, u'->') 180 | addFact(a, impliesOp, b, (fact,), refCplx) 181 | addFact(b, impliesOp, a, (fact,), refCplx) 182 | elif opCore == u'->': # implication 183 | for x in Reduction.list(Reduction.weaker(opCtx)): 184 | addReduction(a, x, b) 185 | updateJustification((a, (x, u'->'), b), ref, refCplx) 186 | 187 | if Reduction.isPresent(x, notImplies[a,b]): 188 | raise ContradictionError((a, (x, u'->'), b), (a, (x, u'-|>'), b)) 189 | 190 | if x == Reduction.RCA: 191 | if x == opCtx: 192 | newRef = ref 193 | newRefCplx = refCplx 194 | else: 195 | newRef = ((a, (x, u'->'), b),) 196 | newRefCplx = 1 + refCplx 197 | 198 | # Trivial conservation: 199 | # IF (a RCA-> b), THEN (b Fc a). 200 | for f in Form: 201 | if f != Form.none: 202 | addFact(b, (f, u'c'), a, newRef, newRefCplx) 203 | 204 | # Definition of conjunction (special case): 205 | # IF (a X-> b), THEN (a X<-> a+b). 206 | ab = joinPrinciples(a,b) 207 | if ab is not None: 208 | addFact(a, (opCtx, u'<->'), ab, ref, refCplx) 209 | elif opCore == u'-|>': # non-implication 210 | for x in Reduction.list(Reduction.stronger(opCtx)): 211 | addNonReduction(a, x, b) 212 | updateJustification((a, (x, u'-|>'), b), ref, refCplx) 213 | 214 | if Reduction.isPresent(x, implies[a,b]): 215 | raise ContradictionError((a, (x, u'-|>'), b), (a, (x, u'->'), b)) 216 | 217 | if x == Reduction.RCA: 218 | if x == opCtx: 219 | newFact = fact 220 | newCplx = 1 + refCplx 221 | else: 222 | newFact = (a, (x, u'-|>'), b) 223 | newCplx = 2 + refCplx 224 | 225 | # Definition of non-conservation (special case): 226 | # IF (a RCA-|> b) AND (b form F), THEN (b nFc a). 227 | for f in Form.list(form[b]): 228 | addFact(b, (f, u'nc'), a, (newFact, (b, u'form', f)), newCplx) 229 | elif opCore == u'c': # conservation 230 | for f in Form.list(Form.stronger(opCtx)): 231 | newFact = (a, (f, u'c'), b) 232 | 233 | addConservative(a, f, b) 234 | updateJustification(newFact, ref, refCplx) 235 | 236 | if Form.isPresent(f, nonConservative[a,b]): 237 | raise ContradictionError((a, (f, u'c'), b), (a, (f, u'nc'), b)) 238 | 239 | # Definition of conservation (special case): 240 | # IF (a Fc b) AND (a form F), THEN (b RCA-> a). 241 | if Form.isPresent(f, form[a]): 242 | if f == opCtx: 243 | newCplx = 1 + refCplx 244 | else: 245 | newCplx = 2 + refCplx 246 | 247 | addFact(b, (Reduction.RCA, u'->'), a, (newFact, (a, u'form', f)), newCplx) 248 | elif opCore == u'nc': # non-conservation 249 | for f in Form.list(Form.weaker(opCtx)): 250 | addNonConservative(a, f, b) 251 | updateJustification((a, (f, u'nc'), b), ref, refCplx) 252 | 253 | if Form.isPresent(f, conservative[a,b]): 254 | raise ContradictionError((a, (f, u'nc'), b), (a, (f, u'c'), b)) 255 | 256 | # Trivial conservation (contrapositive): 257 | # IF (a nFc b), THEN (b RCA-|> a). 258 | addFact(b, (Reduction.RCA, u'-|>'), a, ref, refCplx) 259 | else: 260 | raise ValueError(u'Unrecognized operator: ' + opCore) 261 | 262 | return True 263 | 264 | def standardizePrinciple(a): 265 | return u'+'.join(sorted(set(a.split(u'+')))) 266 | def standardizeFact(a, op, b): 267 | a = standardizePrinciple(a) 268 | if op != u'form': 269 | b = standardizePrinciple(b) 270 | if op[1] == u'<=': 271 | op = (op[0], u'->') 272 | a,b = b,a 273 | elif op[1] == u'') 275 | a,b = b,a 276 | return a, op, b 277 | 278 | from pyparsing import * 279 | def parseResults(resultsString, quiet=False): 280 | start = timekeeper() 281 | if not quiet: eprint(u'Parsing results...') 282 | # Name parsed strings 283 | name = Word( alphas+"_+^{}\\$", alphanums+"_+^{}$\\").setParseAction(lambda s,l,t: addPrinciple(t[0])) 284 | 285 | parenth = Literal('"') 286 | justification = QuotedString('"""',multiline=True) | quotedString.setParseAction(removeQuotes) 287 | 288 | _reductionName = NoMatch() 289 | for r in Reduction: 290 | if r != Reduction.none: 291 | _reductionName |= Literal(r.name) 292 | for r in Reduction.alias: 293 | if r != u'': 294 | _reductionName |= Literal(r) 295 | _reductionType = _reductionName.setParseAction(lambda s,l,t: [Reduction.fromString(t[0])]) 296 | reductionType = Optional(_reductionType, default=Reduction.RCA) 297 | postfixReductionType = Optional(Suppress(Literal("_")) + _reductionType, default=Reduction.RCA) 298 | 299 | implication = (reductionType + Literal("->")) | (Literal("=>") + postfixReductionType).setParseAction(lambda s,l,t: [t[1], "->"]) 300 | nonImplication = (reductionType + Literal("-|>")) | (Literal("=/>") + postfixReductionType).setParseAction(lambda s,l,t: [t[1], "-|>"]) 301 | equivalence = (reductionType + Literal("<->")) | (Literal("<=>") + postfixReductionType).setParseAction(lambda s,l,t: [t[1], "<->"]) 302 | 303 | reduction = (Literal("<=") + postfixReductionType).setParseAction(lambda s,l,t: [t[1], "<="]) 304 | nonReduction = (Literal("', '->', and 'c' 347 | def addReflexivities(): 348 | for a in principlesList: 349 | for x in Reduction: 350 | if x == Reduction.none: continue 351 | 352 | # (a X-> a) 353 | addFact(a, (x, u'->'), a, u'reflexivity', 1) 354 | 355 | # (a X<-> a) 356 | addFact(a, (x, u'<->'), a, u'reflexivity', 1) 357 | 358 | for f in Form: 359 | if f == Form.none: continue 360 | 361 | # (a Fc a) 362 | addFact(a, (f, u'c'), a, u'reflexivity', 1) 363 | 364 | # General fact; uses nothing, affects '->' 365 | def addRCABottom(): 366 | # (a X-> RCA) 367 | for a in principlesList: 368 | for x in Reduction: 369 | if x == Reduction.none: continue 370 | 371 | addFact(a, (x, u'->'), RCAprinciple, u'', 1) 372 | 373 | # General fact; uses nothing, affects '->' 374 | def definitionOfConjunction(): 375 | # IF (a == b+...), THEN (a X-> b). 376 | for a in principlesList: 377 | splitA = set(a.split(u'+')) 378 | if len(splitA) == 1: continue 379 | 380 | for b in principlesList: 381 | if b == a: continue 382 | 383 | splitB = set(b.split(u'+')) 384 | if splitB <= splitA: 385 | for x in Reduction: 386 | if x == Reduction.none: continue 387 | 388 | addFact(a, (x, u'->'), b, u'', 1) 389 | 390 | # Uses '->', affects '<->' 391 | def definitionOfEquivalence(): 392 | #a X<-> b 393 | #WHEN 394 | # (a X-> b) AND (b X-> a) 395 | 396 | r = False 397 | for a,b in itertools.combinations(principlesList, 2): 398 | equiv = implies[a,b] & implies[b,a] 399 | 400 | if equiv != Reduction.none: 401 | for x in Reduction.list(equiv): 402 | imp = (x, u'->') 403 | aImpB = (a, imp, b) 404 | bImpA = (b, imp, a) 405 | 406 | r |= addFact(a, (x, u'<->'), b, 407 | (aImpB, bImpA), 1 + justComplexity[aImpB] + justComplexity[bImpA]) 408 | return r 409 | 410 | # Uses array, affects array 411 | def transitiveClosure(array, opName, clsCtx): 412 | # Complete (current) transitive closure of array, using Floyd-Warshall 413 | 414 | r = False 415 | for c in principlesList: 416 | for a in principlesList: 417 | if a == c: continue 418 | 419 | acRelation = array[a,c] 420 | if acRelation == clsCtx.none: continue 421 | 422 | for b in principlesList: 423 | if b == a or b == c: continue 424 | 425 | transitive = acRelation & array[c,b] 426 | if transitive == clsCtx.none: continue 427 | 428 | for x in clsCtx.list(transitive): 429 | op = (x, opName) 430 | aOpC = (a, op, c) 431 | cOpB = (c, op, b) 432 | 433 | r |= addFact(a, op, b, 434 | (aOpC, cOpB), 1 + justComplexity[aOpC] + justComplexity[cOpB]) 435 | return r 436 | 437 | # Uses '->', affects '->' 438 | def unifyOverConjunctions(): 439 | #a X-> b 440 | #WHEN 441 | # (b == c+d) AND (a X-> c) AND (a X-> d) "Definition of conjunction" 442 | 443 | r = False 444 | for b in principlesList: 445 | splitB = b.split(u'+') 446 | if len(splitB) == 1: continue # b is not a conjunction 447 | 448 | for a in principlesList: 449 | aImpliesAll = ~Reduction.none 450 | for p in splitB: 451 | aImpliesAll &= implies[a,p] 452 | if aImpliesAll == Reduction.none: continue 453 | 454 | for x in Reduction.list(aImpliesAll): 455 | aImpConjuncts = tuple([(a, (x, u'->'), t) for t in splitB]) 456 | r |= addFact(a, (x, u'->'), b, 457 | aImpConjuncts, 1 + sum(justComplexity[aImpX] for aImpX in aImpConjuncts)) 458 | return r 459 | 460 | #REDUNDANT 461 | # Uses 'c' and '->', affects '->' 462 | def definitionOfConservation(): 463 | #a RCA-> b 464 | #WHEN 465 | # (c Fc a) AND (c RCA-> b) AND (b has form F) "Definition of conservation" 466 | 467 | r = False 468 | for c in principlesList: 469 | for b in principlesList: 470 | if b == c: continue 471 | 472 | if Reduction.isPresent(Reduction.RCA, implies[c,b]): 473 | formB = form[b] 474 | if formB == Form.none: continue 475 | 476 | cImpB = (c, (Reduction.RCA, u'->'), b) 477 | refCplxCB = 2 + justComplexity[cImpB] 478 | 479 | for a in principlesList: 480 | if a == b or a == c: continue 481 | 482 | frms = formB & conservative[c,a] 483 | if frms == Form.none: continue 484 | 485 | for f in Form.list(frms): 486 | cConsA = (c, (f, u'c'), a) 487 | 488 | r |= addFact(a, (Reduction.RCA, u'->'), b, 489 | (cConsA, cImpB, (b, u'form', f)), refCplxCB + justComplexity[cConsA]) 490 | return r 491 | 492 | # Uses posArray and negArray, affects negArray 493 | def contrapositiveTransitivity(posArray, posOpName, negArray, negOpName, clsCtx): 494 | r = False 495 | for c in principlesList: 496 | for a in principlesList: 497 | if a == c: continue 498 | 499 | #a nop b 500 | #WHEN 501 | # (c op a) AND (c nop b) 502 | caRelation = posArray[c,a] 503 | if caRelation != clsCtx.none: 504 | for b in principlesList: 505 | if b == a or b == c: continue 506 | 507 | contexts = caRelation & negArray[c,b] 508 | if contexts == clsCtx.none: continue 509 | 510 | for ctx in clsCtx.list(contexts): 511 | nop = (ctx, negOpName) 512 | 513 | cOpA = (c, (ctx, posOpName), a) 514 | cNOpB = (c, nop, b) 515 | 516 | r |= addFact(a, nop, b, 517 | (cOpA, cNOpB), 1 + justComplexity[cOpA] + justComplexity[cNOpB]) 518 | 519 | #a nop b 520 | #WHEN 521 | # (a nop c) AND (b op c) 522 | acNRelation = negArray[a,c] 523 | if acNRelation != clsCtx.none: 524 | for b in principlesList: 525 | if b == a or b == c: continue 526 | 527 | contexts = acNRelation & posArray[b,c] 528 | if contexts == clsCtx.none: continue 529 | 530 | for ctx in clsCtx.list(contexts): 531 | nop = (ctx, negOpName) 532 | 533 | aNOpC = (a, nop, c) 534 | bOpC = (b, (ctx, posOpName), c) 535 | 536 | r |= addFact(a, nop, b, 537 | (aNOpC, bOpC), 1 + justComplexity[aNOpC] + justComplexity[bOpC]) 538 | return r 539 | 540 | # Uses '->' and '-|>', affects '-|>' 541 | def contrapositiveConjunction(): 542 | #a X-|> b 543 | #WHEN 544 | # (a X-> c) AND (a X-|> b+c) 545 | 546 | r = False 547 | for c in principlesList: 548 | for b in principlesList: 549 | if b == c: continue 550 | 551 | bc = joinPrinciples(b,c) 552 | if bc is None: continue 553 | 554 | for a in principlesList: 555 | if a == b: continue 556 | 557 | if a == c: # Special-case 558 | reds = notImplies[a,bc] 559 | if reds == Reduction.none: continue 560 | 561 | for x in Reduction.list(reds): 562 | notImp = (x, u'-|>') 563 | 564 | aNotImpBC = (a, notImp, bc) 565 | 566 | r |= addFact(a, notImp, b, 567 | (aNotImpBC,), 1 + justComplexity[aNotImpBC]) 568 | else: 569 | reds = implies[a,c] & notImplies[a,bc] 570 | if reds == Reduction.none: continue 571 | 572 | for x in Reduction.list(reds): 573 | notImp = (x, u'-|>') 574 | 575 | aImpC = (a, (x, u'->'), c) 576 | aNotImpBC = (a, notImp, bc) 577 | 578 | r |= addFact(a, notImp, b, 579 | (aImpC, aNotImpBC), 1 + justComplexity[aImpC] + justComplexity[aNotImpBC]) 580 | return r 581 | 582 | #REDUNDANT 583 | # Uses 'c' and '-|>', affects '-|>' 584 | def contrapositiveConservation(): 585 | #a RCA-|> b 586 | #WHEN 587 | # (a Fc c) AND (c RCA-|> b) AND (b has form F) 588 | notImp = (Reduction.RCA, u'-|>') 589 | 590 | r = False 591 | for c in principlesList: 592 | for b in principlesList: 593 | if b == c: continue 594 | 595 | if Reduction.isPresent(Reduction.RCA, notImplies[c,b]): 596 | formB = form[b] 597 | if formB == Form.none: continue 598 | 599 | cNotImpB = (c, notImp, b) 600 | refCplxCB = 2 + justComplexity[cNotImpB] 601 | 602 | for a in principlesList: 603 | if a == b or a == c: continue 604 | 605 | frms = conservative[a,c] & formB 606 | if frms == Form.none: continue 607 | 608 | for f in Form.list(frms): 609 | aConsC = (a, (f, u'c'), c) 610 | 611 | r |= addFact(a, notImp, b, 612 | (aConsC, cNotImpB, (b, u'form', f)), justComplexity[aConsC] + refCplxCB) 613 | return r 614 | 615 | #REDUNDANT 616 | # Uses 'c' and '->', affects 'c' 617 | def liftConservation(): 618 | r = False 619 | for c in principlesList: 620 | #a Fc b 621 | #WHEN 622 | # (c RCA-> a) AND (c Fc b) [aka "Weaker principles prove less"] 623 | for a in principlesList: 624 | if a == c: continue 625 | 626 | if Reduction.isPresent(Reduction.RCA, implies[c,a]): 627 | cImpA = (c, (Reduction.RCA, u'->'), a) 628 | refCplxCA = 1 + justComplexity[cImpA] 629 | 630 | for b in principlesList: 631 | if b == a or b == c: continue 632 | 633 | for f in Form.list(conservative[c,b]): 634 | fc = (f, u'c') 635 | cConsB = (c, fc, b) 636 | 637 | r |= addFact(a, fc, b, 638 | (cImpA, cConsB), refCplxCA + justComplexity[cConsB]) 639 | 640 | #a Fc b 641 | #WHEN 642 | # (a Fc c) AND (b RCA-> c) [aka "Stronger principles prove more"] 643 | for b in principlesList: 644 | if b == c: continue 645 | 646 | if Reduction.isPresent(Reduction.RCA, implies[b,c]): 647 | bImpC = (b, (Reduction.RCA, u'->'), c) 648 | refCplxBC = 1 + justComplexity[bImpC] 649 | 650 | for a in principlesList: 651 | if a == b or a == c: continue 652 | 653 | for f in Form.list(conservative[a,c]): 654 | fc = (f, u'c') 655 | aConsC = (a, fc, c) 656 | 657 | r |= addFact(a, fc, b, 658 | (aConsC, bImpC), justComplexity[aConsC] + refCplxBC) 659 | return r 660 | 661 | #REDUNDANT 662 | # Uses '->' and '-|>', affects 'nc' 663 | def definitionOfNonConservation(): 664 | #a nFc b 665 | #WHEN 666 | # (a RCA-> c) AND (b RCA-|> c) AND (c has form F) 667 | r = False 668 | for c in principlesList: 669 | formC = form[c] 670 | if formC == Form.none: continue 671 | cForms = Form.list(formC) 672 | 673 | for a in principlesList: 674 | if a == c: continue 675 | 676 | if Reduction.isPresent(Reduction.RCA, implies[a,c]): 677 | aImpC = (a, (Reduction.RCA, u'->'), c) 678 | refCplxAC = 2 + justComplexity[aImpC] 679 | 680 | for b in principlesList: 681 | if b == a or b == c: continue 682 | 683 | if Reduction.isPresent(Reduction.RCA, notImplies[b,c]): 684 | bNotImpC = (b, (Reduction.RCA, u'-|>'), c) 685 | 686 | cplx = refCplxAC + justComplexity[bNotImpC] 687 | 688 | for f in cForms: 689 | r |= addFact(a, (f, u'nc'), b, 690 | (aImpC, bNotImpC, (c, u'form', f)), cplx) 691 | return r 692 | 693 | #REDUNDANT 694 | # Uses 'nc' and '->', affects 'nc' 695 | def liftNonConservation(): 696 | imp = (Reduction.RCA, u'->') 697 | 698 | r = False 699 | for c in principlesList: 700 | #a nFc b 701 | #WHEN 702 | # (a nFc c) AND (c RCA-> b) [aka "Weaker principles prove less (contrapositive)"] 703 | for b in principlesList: 704 | if b == c: continue 705 | 706 | if Reduction.isPresent(Reduction.RCA, implies[c,b]): 707 | cImpB = (c, imp, b) 708 | refCplxCB = 1 + justComplexity[cImpB] 709 | 710 | for a in principlesList: 711 | if a == b or a == c: continue 712 | 713 | for f in Form.list(nonConservative[a,c]): 714 | nFc = (f, u'nc') 715 | aNonConsC = (a, nFc, c) 716 | 717 | r |= addFact(a, nFc, b, 718 | (aNonConsC, cImpB), justComplexity[aNonConsC] + refCplxCB) 719 | 720 | #a nFc b 721 | #WHEN 722 | # (a RCA-> c) AND (c nFc b) [aka "Stronger principles prove more (contrapositive)"] 723 | for a in principlesList: 724 | if a == c: continue 725 | 726 | if Reduction.isPresent(Reduction.RCA, implies[a,c]): 727 | aImpC = (a, imp, c) 728 | refCplxAC = 1 + justComplexity[aImpC] 729 | 730 | for b in principlesList: 731 | if b == a or b == c: continue 732 | 733 | for f in Form.list(nonConservative[c,b]): 734 | nFc = (f, u'nc') 735 | cNonConsB = (c, (f, u'nc'), b) 736 | 737 | r |= addFact(a, nFc, b, 738 | (aImpC, cNonConsB), refCplxAC + justComplexity[cNonConsB]) 739 | return r 740 | 741 | def deriveInferences(quiet=False, verbose=False): 742 | start = timekeeper() 743 | if not quiet: eprint(u'Adding reflexivity facts..') 744 | addReflexivities() 745 | if not quiet: eprint(u'Making RCA trivial..') 746 | addRCABottom() 747 | if not quiet: eprint(u'Recording conjunctions...') 748 | definitionOfConjunction() 749 | if not quiet: eprint(u'Elapsed: {0:.6f} s\n'.format(timekeeper() - start)) 750 | 751 | start = timekeeper() 752 | if not quiet: eprint(u'Deriving positive facts:') 753 | n = 0 754 | eUpdated, iUpdated, cUpdated = True, True, True 755 | while eUpdated or iUpdated or cUpdated: 756 | n += 1 757 | eChanged, iChanged, cChanged = False, False, False 758 | 759 | if iUpdated or iChanged: 760 | if not quiet: eprint(u'\tExtracting equivalences...') 761 | eChanged |= definitionOfEquivalence() # Uses '->', affects '<->' 762 | if eUpdated or eChanged: 763 | if not quiet: eprint(u'\tTaking the transitive closure of equivalence...') 764 | eChanged |= transitiveClosure(equivalent, u'<->', Reduction) # Uses '<->', affects '<->' 765 | 766 | if iUpdated or iChanged: 767 | if not quiet: eprint(u'\tTaking the transitive closure of implication...') 768 | iChanged |= transitiveClosure(implies, u'->', Reduction) # Uses '->', affects '->' 769 | if not quiet: eprint(u'\tReverse-engineering implications of conjunctions...') 770 | iChanged |= unifyOverConjunctions() # Uses '->', affects '->' 771 | if (cUpdated or cChanged) or (iUpdated or iChanged): 772 | if not quiet: eprint(u'\tImplementing conservativity for implication...') 773 | iChanged |= definitionOfConservation() # Uses 'c' and '->', affects '->' 774 | 775 | if cUpdated or cChanged: 776 | if not quiet: eprint(u'\tTaking the transitive closure of conservation facts...') 777 | cChanged |= transitiveClosure(conservative, u'c', Form) # Uses 'c', affects 'c' 778 | if (cUpdated or cChanged) or (iUpdated or iChanged): 779 | if not quiet: eprint(u'\tLifting conservation facts over implications...') 780 | cChanged |= liftConservation() # Uses 'c' and '->', affects 'c' 781 | 782 | if verbose: 783 | eprint(u'\t\tDuring iteration {0}:'.format(n)) 784 | if eChanged: eprint(u'\t\t\tEquivalences updated.') 785 | if iChanged: eprint(u'\t\t\tImplications updated.') 786 | if cChanged: eprint(u'\t\t\tConservation facts updated.') 787 | if not eChanged and not iChanged and not cChanged: eprint(u'\t\t\tNothing updated.') 788 | 789 | eUpdated = eChanged 790 | iUpdated = iChanged 791 | cUpdated = cChanged 792 | if not quiet: 793 | eprint(u'Finished with positive facts.') 794 | eprint(u'Elapsed: {0:.6f} s (with {1} repeats)\n'.format(timekeeper() - start, n)) 795 | 796 | start = timekeeper() 797 | if not quiet: eprint(u'Deriving negative facts:') 798 | n = 0 799 | niUpdated, ncUpdated = True, True 800 | while niUpdated or ncUpdated: 801 | n += 1 802 | niChanged, ncChanged = False, False 803 | 804 | if niUpdated or niChanged: 805 | if not quiet: eprint(u'\tApplying transivitity to non-implications...') 806 | niChanged |= contrapositiveTransitivity(implies, u'->', notImplies, u'-|>', Reduction) # Uses '->' and '-|>', affects '-|>' 807 | if not quiet: eprint(u'\tSplitting non-implications over conjunctions...') 808 | niChanged |= contrapositiveConjunction() # Uses '->' and '-|>', affects '-|>' 809 | if not quiet: eprint(u'\tImplementing conservativity for non-implication...') 810 | niChanged |= contrapositiveConservation() # Uses 'c' and '-|>', affects '-|>' 811 | 812 | if ncUpdated or ncChanged: 813 | if not quiet: eprint(u'\tApplying transivitity to non-conservation facts...') 814 | ncChanged |= contrapositiveTransitivity(conservative, u'c', nonConservative, u'nc', Form) # Uses 'c' and 'nc', affects 'nc' 815 | if niUpdated or niChanged: 816 | if not quiet: eprint(u'\tExtracting non-conservation facts from non-implications...') 817 | ncChanged |= definitionOfNonConservation() # Uses '->' and '-|>', affects 'nc' 818 | if ncUpdated or ncChanged: 819 | if not quiet: eprint(u'\tLifting non-conservation facts over implications...') 820 | ncChanged |= liftNonConservation() # Uses 'nc' and '->', affects 'nc' 821 | 822 | if verbose: 823 | eprint(u'\t\tDuring iteration {0}:'.format(n)) 824 | if niChanged: eprint(u'\t\t\tNon-implications updated.') 825 | if ncChanged: eprint(u'\t\t\tNon-conservation facts updated.') 826 | if not niChanged and not ncChanged: eprint(u'\t\t\tNothing updated.') 827 | 828 | niUpdated = niChanged 829 | ncUpdated = ncChanged 830 | if not quiet: 831 | eprint(u'Finished with negative facts.') 832 | eprint(u'Elapsed: {0:.6f} s (with {1} repeats)\n'.format(timekeeper() - start, n)) 833 | 834 | def getDatabase(): 835 | return {'version': DatabaseVersion, 836 | 'principles': principles, 837 | 'implication': (implies, notImplies), 838 | 'conservation': (conservative, nonConservative), 839 | 'form': form, 840 | 'primary': (primary, primaryIndex), 841 | 'justify': justify} 842 | 843 | def setDatabase(database): 844 | if database['version'] != DatabaseVersion: 845 | raise VersionError(database['version'], DatabaseVersion) 846 | 847 | global principles, principlesList 848 | principles = database['principles'] 849 | principlesList = sorted(principles) 850 | 851 | global implies, notImplies 852 | implies, notImplies = database['implication'] 853 | 854 | global conservative, nonConservative 855 | conservative, nonConservative = database['conservation'] 856 | 857 | global form 858 | form = database['form'] 859 | 860 | global primary, primaryIndex 861 | primary, primaryIndex = database['primary'] 862 | 863 | global justify 864 | justify = database['justify'] 865 | 866 | global justComplexity 867 | justComplexity = {} 868 | def rebuildComplexity(fact): 869 | try: 870 | return justComplexity[fact] 871 | except KeyError: 872 | r = 1 873 | 874 | a,op,b = fact 875 | if op != u'form': 876 | jst = justify[fact] 877 | if not isString(jst): 878 | r += sum(rebuildComplexity(f) for f in jst) 879 | 880 | justComplexity[fact] = r 881 | return r 882 | for fact in justify: 883 | rebuildComplexity(fact) 884 | 885 | def dumpDatabase(databaseName, quiet=False): 886 | if not quiet: eprint(u'Facts known: {0:,d}\n'.format(len(justify))) 887 | 888 | start = timekeeper() 889 | if not quiet: eprint(u'Dumping updated database to binary file...') 890 | with open(databaseName, mode='wb') as databaseFile: 891 | pickledDatabase = pickle.dumps(getDatabase(), protocol=2) 892 | compressedDatabase = zlib.compress(pickledDatabase) 893 | databaseFile.write(compressedDatabase) 894 | 895 | if not quiet: eprint(u'Elapsed: {0:.6f} s\n'.format(timekeeper() - start)) 896 | 897 | def loadDatabase(databaseName, quiet=False): 898 | with open(databaseName, mode='rb') as databaseFile: 899 | compressedDatabase = databaseFile.read() 900 | pickledDatabase = zlib.decompress(compressedDatabase) 901 | setDatabase(pickle.loads(pickledDatabase)) 902 | 903 | from optparse import OptionParser, OptionGroup 904 | def main(): 905 | absoluteStart = timekeeper() 906 | eprint(u'\nRM Zoo (v{0})\n'.format(Version)) 907 | 908 | parser = OptionParser(u'Usage: %prog [options] results [database_title]', version=u'%prog {0} ({1})'.format(Version, Date)) 909 | 910 | parser.set_defaults(quiet=False, verbose=False) 911 | 912 | parser.add_option('-q', action='store_true', dest='quiet', 913 | help = u'Suppress progress/timing indicators.') 914 | parser.add_option('-v', action='store_true', dest='verbose', 915 | help = u'Report additional execution information.') 916 | 917 | (options, args) = parser.parse_args() 918 | if len(args)>2: 919 | parser.error(u'Too many arguments provided.') 920 | if len(args)<1: 921 | parser.error(u'No results file specified.') 922 | 923 | if options.quiet and options.verbose: 924 | parser.error(u'Options -q and -v are incompatible.') 925 | 926 | import os 927 | resultsFile = args[0] 928 | if len(args) > 1: 929 | databaseTitle = args[1] 930 | else: 931 | eprint(u'No database title specified; defaulting to "database".') 932 | databaseTitle = 'database.dat' 933 | 934 | if os.path.splitext(databaseTitle)[1] == '': 935 | databaseName = databaseTitle + os.extsep + 'dat' 936 | else: 937 | databaseName = databaseTitle 938 | 939 | if not os.path.exists(resultsFile): 940 | parser.error(u'Results file "{0}" does not exist.'.format(resultsFile)) 941 | 942 | with open(resultsFile, encoding='utf-8') as f: 943 | parseResults(f.read(), options.quiet) 944 | deriveInferences(quiet=options.quiet, verbose=options.verbose) 945 | dumpDatabase(databaseName, options.quiet) 946 | if not options.quiet: eprint(u'Total elapsed time: {0:.6f} s'.format(timekeeper() - absoluteStart)) 947 | 948 | if options.verbose: 949 | try: 950 | report = [] 951 | report.append(u'\tReduction.list: {0}'.format(Reduction.list.cache_info())) 952 | report.append(u'\tForm.list: {0}'.format(Form.list.cache_info())) 953 | eprint(u'\nCache report: ') 954 | eprint('\n'.join(report)) 955 | except AttributeError: 956 | pass 957 | if __name__ == '__main__': 958 | main() 959 | -------------------------------------------------------------------------------- /results.txt: -------------------------------------------------------------------------------- 1 | ##### Primary principles ##### 2 | 3 | ACA is primary 4 | WKL is primary 5 | WWKL is primary 6 | RCA is primary 7 | 8 | BSig3 is primary 9 | BSig2 is primary 10 | 11 | RT is primary 12 | RT2 is primary 13 | RT22 is primary 14 | SRT2 is primary 15 | SRT22 is primary 16 | SFS2 is primary 17 | STS2 is primary 18 | 19 | EM is primary 20 | 21 | AMT is primary 22 | AMT+ISig2 is primary 23 | 24 | CAC is primary 25 | ADS is primary 26 | SADS is primary 27 | OPT is primary 28 | 29 | DNR0 is primary 30 | DNR is primary 31 | 32 | WWKL2 is primary 33 | FIP is primary 34 | RWKL is primary 35 | 36 | COH is primary 37 | StCOH is primary 38 | 39 | GEN1+WKL is primary 40 | COH+WKL is primary 41 | RT22+WKL is primary 42 | SRT22+WKL is primary 43 | 44 | POS+WWKL is primary 45 | 46 | ##### Results ##### 47 | 48 | ### Assorted results 49 | 50 | RT -> RT3 "by definition" 51 | RT3 -> RT2 "by definition" 52 | 53 | RRT -> RRT42 "by definition" 54 | 55 | RT3 -> RT32 "by definition" 56 | 57 | FS3 -> SFS3 "by definition" 58 | FS2 -> SFS2 "by definition" 59 | 60 | RT2 -> RT22 "by definition" 61 | SRT2 -> SRT22 "by definition" 62 | D2 -> D22 "by definition" 63 | 64 | RT1 -> RT12 "by definition" 65 | 66 | RT2 -> SRT2 "by definition" 67 | RT22 -> SRT22 "by definition" 68 | EM -> SEM "by definition" 69 | 70 | RT1 -> RT12 "by definition" 71 | 72 | KL -> WKL "by definition" 73 | 74 | ISig3 -> ISig2 "by definition" 75 | BSig3 -> BSig2 "by definition" 76 | 77 | RCA w-> PA "Full induction holds in any omega-model." 78 | RCA w-> ISig3 "Full induction holds in any omega-model." 79 | RCA w-> BSig3 "Full induction holds in any omega-model." 80 | RCA w-> ISig2 "Full induction holds in any omega-model." 81 | RCA w-> BSig2 "Full induction holds in any omega-model." 82 | 83 | ### Results by source 84 | 85 | #Simpson (2009) - Subsystems of Second-Order Arithmetic 86 | 87 | PA -> ISig3 "by definition" 88 | 89 | ACA -> PA "due to Friedman and Harrington, see Simpson's 'Subsystems of Second Order Arithmetic' (2009) [Theorem IX.1.5]" 90 | WKL Pi11c RCA "due to Harrington, see Simpson's 'Subsystems of Second Order Arithmetic' (2009) [Corollary IX.2.6]" 91 | 92 | #Friedman (1975) - "Some Systems of Second Order Arithmetic and Their Use" 93 | # http://www.mathunion.org/ICM/ICM1974.1/Main/icm1974.1.0235.0242.ocr.pdf 94 | # Proceedings of the International Congress of Mathematicians, Vancouver, 1974 (Vol. 1) 95 | 96 | ACA <-> KL "Friedman (1975) [Theorem 1.1]" 97 | WKL w-|> ACA "Friedman (1975) [remark following Theorem 1.3]" 98 | RCA w-|> WKL "Friedman (1975) [Theorem 1.4]" 99 | 100 | #Paris and Kirby (1978) - "$\Sigma_n$-Collection Schemas in Arithmetic" 101 | # http://www.sciencedirect.com/science/article/pii/S0049237X08720032 102 | # in Logic Colloquium '77, Studies in Logic and the Foundations of Mathematics 96, doi:10.1016/S0049-237X(08)72003-2 103 | 104 | BSig2 <-> BPi1 "Paris and Kirby (1978) [Proposition 1]" 105 | BSig3 <-> BPi2 "Paris and Kirby (1978) [Proposition 1]" 106 | ISig2 -> BSig2 "Paris and Kirby (1978) [Proposition 4]" 107 | ISig3 -> BSig3 "Paris and Kirby (1978) [Proposition 4]" 108 | BSig3 -> ISig2 "Paris and Kirby (1978) [Proposition 5]" 109 | ISig2 -|> BSig3 "Paris and Kirby (1978) [Proposition 7]" 110 | RCA -|> BSig2 "Paris and Kirby (1978) [Proposition 7]" 111 | BSig2 -|> ISig2 "Paris and Kirby (1978) [Proposition 8]" 112 | BSig3 -|> ISig3 "Paris and Kirby (1978) [Proposition 8]" 113 | 114 | #Generic: 115 | # BSig(n+1) <-> BPin "Paris and Kirby (1978) [Proposition 1]" 116 | # ISign -> BSign "Paris and Kirby (1978) [Proposition 4]" 117 | # BSig(n+1) -> ISign "Paris and Kirby (1978) [Proposition 5]" 118 | # ISign -|> BSig(n+1) "Paris and Kirby (1978) [Proposition 7]" 119 | # BSign -|> ISign "Paris and Kirby (1978) [Proposition 8]" 120 | 121 | #Paris (1980) - "A hierarchy of cuts in models of arithmetic" 122 | # http://link.springer.com/chapter/10.1007/BFb0090171 123 | # in: Model Theory of Algebra and Arithmetic (ed: Pacholski, Wierzejewski, & Wilkie) 124 | # ISBN: 978-3-540-10269-4 125 | 126 | BSig2 Pi03c RCA "Paris (1980) [Theorem 33, and independently by Friedman]" 127 | BSig3 Pi04c ISig2 "Paris (1980) [Theorem 33, and independently by Friedman]" 128 | BSig2 nSig03c RCA "Paris (1980) [remark following proof of Theorem 33, p. 331]" 129 | BSig3 nSig04c ISig2 "Paris (1980) [remark following proof of Theorem 33, p. 331]" 130 | 131 | #Generic: 132 | # BSig(n+1) Pi0(n+2)c ISign "Paris (1980) [Theorem 33, and independently by Friedman]" 133 | # BSig(n+1) nSig0(n+2)c ISign "Paris (1980) [remark following proof of Theorem 33, p. 331]" 134 | 135 | #Simpson (1985) - "Reverse Mathematics" 136 | # in: Recursion Theory (ed. Anil Nerode and Richard A. Shore), pp. 461-471, ISBN-10: 0-8218-1447-8 137 | 138 | RT32 <-> ACA "follows from Jockusch (1972) [Theorem 5.7]" 139 | 140 | #References: 141 | # Reverse mathematics examples: 142 | # Friedman (1976) - "Systems of second order arithmetic with restricted induction" 143 | # Steel (1977) - "Determinateness and subsystems of analysis" (Ph.D. thesis, Berkeley) 144 | # Simpson (1978) - "Sets which do not have subsets of every higher degree" (in JSL 43, 135-138) 145 | # Friedman, McAloon, and Simpson (1982) - "A finite combinatorial principle which is equivalent to the 1-consistency of predicative analysis" 146 | # Simpson (1984) - "Which set existence axioms are needed to prove the Cauchy/Peano theorem for ordinary differential equations?" (in JSL 49, 783-802) 147 | # Friedman, Simpson, and Smith (1985) - "Countable algebra and set existence axioms" (in APAL 25, 141-181) 148 | 149 | #Hirst (1987) - "Combinatorics in Subsystems of Second Order Arithmetic" (Ph.D. thesis, Pennsylvania State University) 150 | # http://search.proquest.com/docview/303611646 151 | 152 | BPi2 <-> BSig3 "Hirst (1987) [Theorem 5.1]" 153 | RCA -> RT12 "Hirst (1987) [Theorem 6.3]" 154 | RT1 <-> BPi1 "Hirst (1987) [Theorem 6.4]" 155 | WKL -|> RT1 "Hirst (1987) [Corollary 6.5]" 156 | RT1 -|> WKL "Hirst (1987) [remark after Corollary 6.5]" 157 | RT22 -> RT1 "Hirst (1987) [Theorem 6.8]" 158 | WKL w-|> RT22 "Hirst (1987) [Theorem 6.10]" 159 | RT2 -> BPi2 "Hirst (1987) [Theorem 6.11]" 160 | 161 | #Unsupported: 162 | # WKL has low solutions "Hirst (1987) [Theorem 1.4]" 163 | # RT22 does not have low solutions "Jockusch (1972) [Theorem 3.1]" 164 | 165 | #Cited: 166 | # ACA -> WKL "Friedman (1975)" 167 | 168 | #Yu and Simpson (1990) - "Measure theory and weak König's lemma" 169 | # http://link.springer.com/article/10.1007/BF01621469 170 | # Arch. Math. Logic 30(3), doi:10.1007/BF01621469 171 | 172 | WKL -> WWKL "by definition" 173 | 174 | RCA w-|> WWKL "Yu and Simpson (1990) [Section 2, paragraph 2]" 175 | WWKL w-|> WKL "Yu and Simpson (1990) [Section 2, theorems]" 176 | 177 | #Brown and Simpson (1993) - "The Baire category theorem in weak subsystems of second-order arithmetic" 178 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9075709&fileId=S0022481200021320 179 | # J. Symbolic Logic 58(2), doi:10.2307/2275219 180 | 181 | #WKLplus -> WKL "by definition" 182 | 183 | #WKL w-|> BCTPi01 "Brown and Simpson (1993) [Theorem 3.2]" 184 | #RCAplus -> BCTPi01 "Brown and Simpson (1993) [Theorem 4.2]" 185 | #WKLplus Pi11c RCA "Brown and Simpson (1993) [Corollary 6.5]" 186 | 187 | GEN1+WKL Pi11c RCA "Brown and Simpson (1993) [follows from Corollary 6.5]" 188 | 189 | #WKLplus w-|> ACA "Brown and Simpson (1993) [Corollary 6.15]" 190 | 191 | #Hájek (1993) - "Interpretability and fragments of arithmetic" 192 | # in: Arithmetic, Proof Theory, and Computational Complexity (ed. Clote and Krajícek) 193 | # ISBN: 9780198536901 194 | 195 | BSig2+WKL Pi11c BSig2 "Hájek (1993) [Corollary 3.14]" 196 | BSig3+WKL Pi11c BSig3 "Hájek (1993) [Corollary 3.14]" 197 | 198 | ISig2+WKL Pi11c ISig2 "Hájek (1993) [Corollary 3.14]" 199 | ISig3+WKL Pi11c ISig3 "Hájek (1993) [Corollary 3.14]" 200 | 201 | #Generic: 202 | # BSig(n+1)+WKL Pi11c BSig(n+1) "Hájek (1993) [Corollary 3.14]" 203 | # ISig(n+1)+WKL Pi11c ISig(n+1) "Hájek (1993) [Corollary 3.14]" 204 | 205 | #Seetapun and Slaman (1995) - "On the Strength of Ramsey's Theorem" 206 | # http://projecteuclid.org/euclid.ndjfl/1040136917 207 | # Notre Dame J. Formal Logic 36(4), doi:10.1305/ndjfl/1040136917 208 | 209 | RT22+WKL w-|> ACA "Seetapun and Slaman (1995) [Theorem 3.1]" 210 | RT22 nPi04c RCA "Seetapun and Slaman (1995) [Theorem 3.6]" 211 | 212 | ACA -> RT22 "follows from Specker (1971) [Remark 6, RT22 has $\Delta^0_3$ solutions]" 213 | 214 | #Unclear: 215 | # RT22 -> C(Gamma?) "Seetapun and Slaman (1995) [Remark 3.8]" 216 | 217 | #Mytilinaios and Slaman (1996) - "On a Question of Brown and Simpson" 218 | # in: Computability, Enumerability, Unsolvability: Directions in Recursion Theory (ed. Cooper, Slaman, Wainer) 219 | 220 | RT2 -> ISig2 "Mytilinaios and Slaman (1996) [Proposition 5.2]" 221 | #ISig2+RT22 -> BCTPi01 "Mytilinaios and Slaman (1996) [Proposition 5.3]" 222 | #BCTPi01 -|> RT22 "Mytilinaios and Slaman (1996) [Corollary 5.1]" 223 | 224 | #Unsupported: 225 | # BCTPi01 has low solutions 226 | # RCAplus does not have \Delta^0_2 solutions 227 | 228 | #Hájek and Pudlák (1998) - Metamathematics of first-order arithmetic 229 | # http://projecteuclid.org/euclid.pl/1235421926 230 | # ISBN-10: 3-540-63648-X 231 | 232 | RCA -|> BSig2 "Hájek and Pudlák (1998) [Theorem IV.1.29]" 233 | 234 | #Friedman (FOM 53-54) - "FOM: 53:Free Sets/Reverse Math" and "FOM: 54:Recursion Theory/Dynamics" 235 | # http://www.cs.nyu.edu/pipermail/fom/1999-July/003257.html 236 | # http://www.cs.nyu.edu/pipermail/fom/1999-July/003263.html 237 | 238 | ACA -> FS3 "Friedman (FOM 53) [Theorem 2]" 239 | WKL w-|> FS2 "Friedman (FOM 53) [Theorem 4]" 240 | WKL w-|> TS2 "Friedman (FOM 53), proven in private communication" 241 | 242 | #Giusto and Simpson (2000) - "Located sets and reverse mathematics" 243 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9069915&fileId=S0022481200011993 244 | # J. Symbolic Logic 65(3), doi:10.2307/2586708 245 | 246 | WWKL -> DNR "Giusto and Simpson (2000) [Lemma 6.18]" 247 | 248 | #Cholak, Jockusch, and Slaman (2001) - "On the strength of Ramsey's theorem for pairs" 249 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9069578&fileId=S0022481200011208 250 | # J. Symbolic Logic 66(1), doi:10.2307/2694910 251 | 252 | PA form Pi11 253 | ISig2 form Pi11 254 | BSig2 form Pi11 255 | #BSig2 form Sig03 256 | 257 | ISig2+RT22 Pi11c ISig2 "Cholak, Jockusch, and Slaman (2001) [Theorem 10.2]" 258 | ISig3+RT2+WKL Pi11c ISig3 "Cholak, Jockusch, and Slaman (2001) [Theorem 11.1]" 259 | RT22 -|> RT2 "Cholak, Jockusch, and Slaman (2001) [corollary to Theorem 11.1 and Corollary 11.5]" 260 | SRT22 -> RT1 "Cholak, Jockusch, and Slaman (2001) [Lemma 10.6]" 261 | SRT22 -> D22 "Cholak, Jockusch, and Slaman (2001) [Lemma 7.10]" 262 | BSig2+D22 -> SRT22 "Cholak, Jockusch, and Slaman (2001) [Lemma 7.10]" 263 | COH+SRT22 -> RT22 "Cholak, Jockusch, and Slaman (2001) [Lemma 7.11]" 264 | SRT2 <-> D2 "Cholak, Jockusch, and Slaman (2001) [Lemma 7.12]" 265 | RT2 <-> COH+SRT2 "Cholak, Jockusch, and Slaman (2001) [Lemma 7.13]" 266 | COH Pi11c RCA "Cholak, Jockusch, and Slaman (2001) [Theorem 9.1]" 267 | COH+ISig2+WKL Pi11c ISig2 "Cholak, Jockusch, and Slaman (2001) [Lemma 9.5 and Lemma 6.6]" 268 | COH+WKL Pi11c RCA "Cholak, Jockusch, and Slaman (2001) [Lemma 9.6 and Lemma 6.6]" 269 | WKL w-|> COH "Cholak, Jockusch, and Slaman (2001) [Lemma 9.14]" 270 | COH w-|> WKL "Cholak, Jockusch, and Slaman (2001) [Lemma 9.15]" 271 | ISig2+RT22+WKL Pi11c ISig2 "Cholak, Jockusch, and Slaman (2001) [Theorem 10.2 and remark]" 272 | SRT2 -> BSig3 "Cholak, Jockusch, and Slaman (2001) [Theorem 11.4]" 273 | RT22 -|> RT2 "Cholak, Jockusch, and Slaman (2001) [remark following Corollary 11.5]" 274 | SRT22 -|> SRT2 "Cholak, Jockusch, and Slaman (2001) [remark following Corollary 11.5]" 275 | 276 | RCA w-|> RT22 "corollary to Specker (1971)" 277 | 278 | RT2 w-|> ACA """follows from an elaboration on Seetapun's theorem; first explicit and simplified argument due to Jockusch (see Hummel (1994) [Theorem 2.1]). A clearer argument can be made from Cholak, Jockusch, and Slaman (2001) [Theorem 3.1], from which one can see that RT2 has a low_2 omega model.""" 279 | 280 | #Uncited: 281 | # ISig3 -|> PA 282 | 283 | #Cited: 284 | # RT22 nSig03c RCA "Hirst (1987)" 285 | # RT22 w-|> ACA "Seetapun's theorem" 286 | 287 | #Downey, Hirschfeldt, Lempp, and Solomon (2001) - "A $\Delta^0_2$ set with no infinite low subset in either it or its complement" 288 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9069546&fileId=S0022481200010690 289 | # J. Symbolic Logic 66(3), doi:10.2307/2695113 290 | 291 | #Unsupported: 292 | # SRT22 does not have low solutions [i.e., no $\omega$-models containing only low sets] 293 | 294 | #Ambos-Spies, Kjos-Hanssen, Lempp, and Slaman (2004) - "Comparing DNR and WWKL" 295 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9066747&fulltextType=RA&fileId=S0022481200007404 296 | # J. Symbolic Logic 69(4), doi:10.2178/jsl/1102022212 297 | 298 | DNR w-|> WWKL "Ambos-Spies, Kjos-Hanssen, Lempp, and Slaman (2004) [Theorem 1.7]" 299 | 300 | #Dobrinen and Simpson (2004) - "Almost everywhere domination" 301 | # http://projecteuclid.org/euclid.jsl/1096901775 302 | # J. Symbolic Logic 69(3), doi:10.2178/jsl/1096901775 303 | 304 | ACA -> GdREG "Dobrinen and Simpson (2004) [remark following Conjecture 3.1]" 305 | GdREG -> POS "Dobrinen and Simpson (2004) [remark following Conjecture 3.1]" 306 | WKL+RAN1+GEN1 -|> GdREG "Dobrinen and Simpson (2004) [Remark 3.5]" 307 | 308 | #Mileti (2004) - "Partition theorems and computability theory" (Ph.D. thesis, UIUC) 309 | # Personal archive: http://www.math.grinnell.edu/~miletijo/research/thesis.pdf 310 | # ProQuest: http://search.proquest.com/docview/305200043 311 | 312 | RT22 -> COH "Mileti (2004) [Claim A.1.3], and independently Jockusch and Lempp" 313 | #RT22 <-> COH+SRT22 "Mileti (2004) [Corollary A.1.4]" 314 | 315 | #Slaman (2004) - "$\Sigma_n$-bounding and $\Delta_n$-induction" 316 | # http://www.ams.org/journals/proc/2004-132-08/S0002-9939-04-07294-6/ 317 | # Proc. Amer. Math. Soc. 132, doi:10.1090/S0002-9939-04-07294-6 318 | 319 | BSig2 <-> IDelta2 "Slaman (2004) [Theorem 2.1 and proof preceding]" 320 | 321 | #Bovykin and Weiermann (2005) - "The strength of infinitary ramseyan principles can be accessed by their densities." 322 | # https://www.researchgate.net/publication/228457756_The_strength_of_infinitary_ramseyan_principles_can_be_accessed_by_their_densities 323 | # Ann. Pure Appl. Logic (to appear?) 324 | 325 | ADS+EM <-> RT22 "Bovykin and Weiermann (2005) [adaptation of Theorem 8]" 326 | 327 | #Cholak, Giusto, Hirst, and Jockusch (2005) - "Free sets and reverse mathematics" (in Reverse Mathematics 2001) 328 | # http://www.math.uiuc.edu/~jockusch/free7.pdf 329 | # Published version: http://www.aslonline.org/books-lnl_21.html 330 | 331 | FS3 -> FS2 "Cholak, Giusto, Hirst, and Jockusch (2005) [Theorem 2.3]" 332 | FS3 -> TS3 "Cholak, Giusto, Hirst, and Jockusch (2005) [Theorem 3.2]" 333 | FS2 -> TS2 "Cholak, Giusto, Hirst, and Jockusch (2005) [Theorem 3.2]" 334 | TS3 -> TS2 "Cholak, Giusto, Hirst, and Jockusch (2005) [Theorem 3.4]" 335 | WKL w-|> TS2 "Cholak, Giusto, Hirst, and Jockusch (2005) [Theorem 4.6]" 336 | RT22 -> FS2 "Cholak, Giusto, Hirst, and Jockusch (2005) [Theorem 5.2]" 337 | RT32 -> FS3 "Cholak, Giusto, Hirst, and Jockusch (2005) [Theorem 5.5 and remark]" 338 | RT -> FS "Cholak, Giusto, Hirst, and Jockusch (2005) [Theorem 5.5 and remark]" 339 | 340 | #Generic: 341 | # FS(k+1) -> FSk [Theorem 2.3] 342 | # FSk -> TSk [Theorem 3.2] 343 | # TS(k+1) -> TSk [Theorem 3.4] 344 | # RTk2 -> FSk [Theorem 5.5 and remark] 345 | 346 | #Cited: 347 | # ACA -> FSk "Friedman (FOM 53)" 348 | 349 | #Cholak, Greenberg, and Miller (2006) - "Uniform almost everywhere domination" 350 | # http://www.jstor.org/stable/27588497 351 | # J. Symbolic Logic 71(3), doi:10.2178/jsl/1154698592 352 | 353 | GdREG w-|> DNR "Cholak, Greenberg, and Miller (2006) [Theorem 1.4]" 354 | GdREG+WKL w-|> ACA "Cholak, Greenberg, and Miller (2006) [Theorem 1.6]" 355 | GdREG+WWKL w-|> WKL "Cholak, Greenberg, and Miller (2006) [Theorem 1.6]" 356 | 357 | #Hirschfeldt, Jockusch, Kjos-Hanssen, Lempp, and Slaman (2006) - "The strength of some combinatorial principles related to Ramsey's theorem for pairs" 358 | # http://math.uchicago.edu/~drh/Papers/Papers/comb.pdf 359 | # Published in Computational Prospects of Infinity, Part II: Presented Talks (http://dx.doi.org/10.1142/6786) 360 | 361 | SRT22 -> DNR "Hirschfeldt, Jockusch, Kjos-Hanssen, Lempp, and Slaman (2006) [Theorem 2.4]" 362 | COH w-|> DNR "Hirschfeldt, Jockusch, Kjos-Hanssen, Lempp, and Slaman (2006) [Theorem 3.7]" 363 | 364 | WKL -|> RT22 "follows from Jockusch (1972) [Theorem 3.1]" 365 | 366 | #Cited: 367 | # RT22 -|> ACA "Seetapun's theorem" 368 | # COH -|> DNR "independently and simultaneously obtained by Hirschfeldt and Shore (2007)" 369 | 370 | #Hirschfeldt and Shore (2007) - "Combinatorial principles weaker than Ramsey's theorem for pairs" 371 | # http://www.jstor.org/stable/27588536 372 | # J. Symbolic Logic 72(1), doi:10.2178/jsl/1174668391 373 | 374 | RCA w-|> ADS "Hirschfeldt and Shore (2007) [Corollary 2.5]" 375 | RCA w-|> SADS "Hirschfeldt and Shore (2007) [Corollary 2.6]" 376 | RCA w-|> CADS "Hirschfeldt and Shore (2007) [Corollary 2.6]" 377 | ADS <-> CADS+SADS "Hirschfeldt and Shore (2007) [Proposition 2.7]" 378 | SRT22 -> SADS "Hirschfeldt and Shore (2007) [Proposition 2.8]" 379 | CRT22 -> CADS "Hirschfeldt and Shore (2007) [Proposition 2.9]" 380 | ADS -> COH "Hirschfeldt and Shore (2007) [Proposition 2.10]" 381 | SADS w-|> SRT22 "Hirschfeldt and Shore (2007) [Corollary 2.13]" 382 | SADS w-|> CADS "Hirschfeldt and Shore (2007) [Corollary 2.16]" 383 | WKL w-|> CADS "Hirschfeldt and Shore (2007) [Corollary 2.16]" 384 | WKL w-|> SADS "Hirschfeldt and Shore (2007) [Corollary 2.19]" 385 | COH rPi12c RCA "Hirschfeldt and Shore (2007) [Corollary 2.21]" 386 | COH+WKL w-|> SADS "Hirschfeldt and Shore (2007) [Corollary 2.25]" 387 | COH+SADS w-|> DNR "Hirschfeldt and Shore (2007) [Corollary 2.28]" 388 | CAC -> ADS "Hirschfeldt and Shore (2007) [Proposition 3.1]" 389 | SRT22 -> SCAC "Hirschfeldt and Shore (2007) [Proposition 3.3]" 390 | SCAC -> SADS "Hirschfeldt and Shore (2007) [Proposition 3.3]" 391 | SCAC w-|> SRT22 "Hirschfeldt and Shore (2007) [Corollary 3.6]" 392 | SCAC w-|> CADS "Hirschfeldt and Shore (2007) [Corollary 3.6]" 393 | SCAC w-|> ADS "Hirschfeldt and Shore (2007) [Corollary 3.6]" 394 | CCAC <-> ADS "Hirschfeldt and Shore (2007) [Proposition 3.7]" 395 | CAC <-> CADS+SCAC "Hirschfeldt and Shore (2007) [Proposition 3.8]" 396 | CAC <-> ADS+SCAC "Hirschfeldt and Shore (2007) [Proposition 3.8]" 397 | CAC <-> COH+SCAC "Hirschfeldt and Shore (2007) [Proposition 3.8]" 398 | SCAC w-|> DNR "Hirschfeldt and Shore (2007) [Corollary 3.10]" 399 | CAC w-|> DNR "Hirschfeldt and Shore (2007) [Corollary 3.11]" 400 | SCAC -> RT1 "Hirschfeldt and Shore (2007) [Proposition 4.1]" 401 | StCADS <-> BSig2+CADS "Hirschfeldt and Shore (2007) [Proposition 4.4]" 402 | StCOH <-> BSig2+COH "Hirschfeldt and Shore (2007) [Proposition 4.4]" 403 | StCADS <-> StCOH "Hirschfeldt and Shore (2007) [Proposition 4.4]" 404 | StCOH -> RT1 "Hirschfeldt and Shore (2007) [Proposition 4.4]" 405 | BSig2 -> PART "Hirschfeldt and Shore (2007) [proof of Proposition 4.4]" 406 | ADS -> StCOH "Hirschfeldt and Shore (2007) [Proposition 4.5]" 407 | SADS -> PART "Hirschfeldt and Shore (2007) [Proposition 4.6]" 408 | CADS+PART -> StCADS "Hirschfeldt and Shore (2007) [proof of Proposition 4.4]" 409 | RCA -|> PART "Hirschfeldt and Shore (2007) [Corollary 4.7]" 410 | StCRT22 <-> BSig2+CRT22 "Hirschfeldt and Shore (2007) [Proposition 4.8]" 411 | BSig2+CRT22 <-> StCOH "Hirschfeldt and Shore (2007) [Proposition 4.8]" 412 | 413 | COH -> CRT22 "implicit in Cholak, Jockusch, and Slaman (2001) [Lemma 7.11]; highlighted in Hirschfeldt and Shore (2007) [Proposition 1.4]" 414 | RT22 -> CAC "remarked in Cholak, Jockusch, and Slaman (2001) [Question 13.8], explained in Hirschfeldt and Shore (2007) [comment following definition of CAC]" 415 | 416 | RT22 form rPi12 417 | SRT22 form rPi12 418 | WKL form rPi12 419 | DNR form rPi12 420 | CAC form rPi12 421 | ADS form rPi12 422 | SADS form rPi12 423 | PART form Pi11 424 | 425 | #Not supported: 426 | # SADS has low solutions 427 | # SCAC has low solutions 428 | 429 | #Cited: 430 | # RT22 -> BSig2 "Hirst (1987)" 431 | # COH -> CRT22 "Cholak, Jockusch, and Slaman (2001)" 432 | # CRT22+SRT22 -> RT22 "Cholak, Jockusch, and Slaman (2001)" 433 | 434 | #Conidis (2008) - "Classifying model-theoretic properties" 435 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9062648&fileId=S0022481200004230 436 | # J. Symbolic Logic 73(3), doi:10.2178/jsl/1230396753 437 | 438 | AMT w-> Pi01G "Conidis (2008) [Corollary 3.11]" 439 | 440 | #Cited: 441 | # AMT+BSig2 -|> ISig2 "Hirschfeldt, Soare, and Slaman (2009)" 442 | 443 | #Chubb, Hirst, and McNicholl (2009) - "Reverse mathematics, computability, and partitions of trees" 444 | # https://www.cambridge.org/core/journals/journal-of-symbolic-logic/article/reverse-mathematics-computability-and-partitions-of-trees/896FD1F7A3EF72C9F16EAC479C61FD1C 445 | # J. Symbolic Logic 74(1), doi:10.2178/jsl/1231082309 446 | 447 | TT2 -> TT22 "by definition" 448 | TT1 -> TT12 "by definition" 449 | 450 | RCA -> TT12 "Chubb, Hirst, and McNicholl (2009) [Lemma 1.1]" 451 | ISig2 -> TT1 "Chubb, Hirst, and McNicholl (2009) [Theorem 1.2]" 452 | TT1 -> BPi1 "Chubb, Hirst, and McNicholl (2009) [remark following Theorem 1.2]" 453 | ACA -> TT2 "Chubb, Hirst, and McNicholl (2009) [Theorem 1.3]" 454 | ACA <-> TT32 "Chubb, Hirst, and McNicholl (2009) [Theorem 1.5]" 455 | TT32 -> RT32 "Chubb, Hirst, and McNicholl (2009) [proof of Theorem 1.5]" 456 | TT22 -> RT22 "Chubb, Hirst, and McNicholl (2009) [proof of Theorem 1.5]" 457 | TT2 -> RT2 "Chubb, Hirst, and McNicholl (2009) [remark following Theorem 1.5]" 458 | 459 | #Csima and Mileti (2009) - "The strength of the rainbow Ramsey theorem" 460 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9061548&fileId=S0022481200003170 461 | # J. Symbolic Logic 74(4), doi:10.2178/jsl/1254748693 462 | 463 | RRT22 -> HYP "Csima and Mileti (2009) [Theorem 4.1]" 464 | RT22 -> RRT22 "Csima and Mileti (2009) [Theorem 5.2]" 465 | RT32 -> RRT32 "Csima and Mileti (2009) [Theorem 5.2]" 466 | RRT32 -> RRT22 "Csima and Mileti (2009) [Theorem 5.3]" 467 | WKL w-|> RRT22 "Csima and Mileti (2009) [Theorem 5.4]" 468 | RRT22 w-|> WKL "Csima and Mileti (2009) [Corollary 5.6]" 469 | RRT22 w-|> RT22 "Csima and Mileti (2009) [Corollary 5.9]" 470 | RRT22 w-|> SADS "Csima and Mileti (2009) [Theorem 5.11]" 471 | RRT22 w-|> RRT32 "Csima and Mileti (2009) [Corollary 5.12]" 472 | COH -|> RRT22 "Csima and Mileti (2009) [Proposition 5.14]" 473 | 474 | RRT22 form rPi12 475 | 476 | #Dzhafarov and Hirst (2009) - "The polarized Ramsey's theorem" 477 | # http://link.springer.com/article/10.1007/s00153-008-0108-0 478 | # Arch. Math. Logic 48(2), doi:10.1007/s00153-008-0108-0 479 | 480 | PT -> PT2 "by definition" 481 | PT -> PT3 "by definition" 482 | PT2 -> PT22 "by definition" 483 | PT3 -> PT32 "by definition" 484 | IPT -> IPT2 "by definition" 485 | IPT -> IPT3 "by definition" 486 | IPT2 -> IPT22 "by definition" 487 | IPT3 -> IPT32 "by definition" 488 | 489 | PT -> IPT "by definition" 490 | PT2 -> IPT2 "by definition" 491 | PT3 -> IPT3 "by definition" 492 | 493 | RT22 -> PT22 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 494 | RT32 -> PT32 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 495 | RT2 -> PT2 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 496 | RT3 -> PT3 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 497 | PT22 -> IPT22 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 498 | PT22 -> SPT22 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 499 | IPT22 -> SIPT22 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 500 | SRT22 -> SPT22 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 501 | SPT22 -> SIPT22 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 502 | SRT2 -> SPT2 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 503 | SPT2 -> SIPT2 "Dzhafarov and Hirst (2009) [Proposition 3.1]" 504 | BSig2+SIPT22 -> SRT22 "Dzhafarov and Hirst (2009) [Theorem 3.3]" 505 | SIPT2 -> RT1 "Dzhafarov and Hirst (2009) [Theorem 3.3]" 506 | SIPT2 -> D2 "Dzhafarov and Hirst (2009) [Theorem 3.3]" 507 | SIPT22 -> D22 "Dzhafarov and Hirst (2009) [Proposition 3.5]" 508 | SIPT22 -> SADS "Dzhafarov and Hirst (2009) [Proposition 3.6]" 509 | PT22 -> ADS "Dzhafarov and Hirst (2009) [Theorem 3.8]" 510 | PT22 -> RT22 "Dzhafarov and Hirst (2009) [Theorem 3.8]" 511 | RT2 <-> PT2 "Dzhafarov and Hirst (2009) [Theorem 3.8]" 512 | ACA -> PT3 "Dzhafarov and Hirst (2009) [Theorem 4.1]" 513 | PT32 -> IPT3 "Dzhafarov and Hirst (2009) [Theorem 4.1]" 514 | IPT32 -> ACA "Dzhafarov and Hirst (2009) [Theorem 4.1]" 515 | PT32 <-> RT32 "Dzhafarov and Hirst (2009) [Corollary 4.2]" 516 | RT -> PT "Dzhafarov and Hirst (2009) [Remark 1.4]" 517 | PT -> IPT "Dzhafarov and Hirst (2009) [Remark 1.4]" 518 | IPT -> RT "Dzhafarov and Hirst (2009) [Remark 1.4]" 519 | 520 | BSig2+WKL w-|> SRT22 "Dzhafarov and Hirst (2009) [proof of Proposition 3.4]" 521 | D22 -> DNR "inspection of Hirschfeldt, Jockusch, Kjos-Hanssen, Lempp, and Slaman (2006) [Theorem 2.4]" 522 | 523 | #Uncited: 524 | # RCA w-|> DNR 525 | 526 | #Cited: 527 | # CAC w-|> SRT22 "Hirschfeldt and Shore (2007)" 528 | # WKL -> DNR "Giusto and Simpson (2000) [Lemma 6.18]" 529 | # ADS -> BSig2+COH "Hirschfeldt and Shore (2007) [Propositions 4.4 and 4.5]" 530 | # SCAC -> BSig2 "Hirschfeldt and Shore (2007) [Proposition 4.1]" 531 | 532 | #Hirschfeldt, Shore, and Slaman (2009) - "The atomic model theorem and type omitting" 533 | # http://www.ams.org/journals/tran/2009-361-11/S0002-9947-09-04847-8/ 534 | # Trans. Amer. Math. Soc. 361(11), doi:10.1090/S0002-9947-09-04847-8 535 | 536 | WKL w-|> AMT "Hirschfeldt, Shore, and Slaman (2009) [Corollary 3.4]" 537 | AMT w-|> WKL "Hirschfeldt, Shore, and Slaman (2009) [Corollary 3.9]" 538 | AMT w-|> CADS "Hirschfeldt, Shore, and Slaman (2009) [Corollary 3.10]" 539 | AMT w-|> SRT22 "Hirschfeldt, Shore, and Slaman (2009) [Corollary 3.10]" 540 | AMT w-|> SADS "Hirschfeldt, Shore, and Slaman (2009) [Corollary 3.12]" 541 | AMT rPi12c RCA "Hirschfeldt, Shore, and Slaman (2009) [Corollary 3.15]" 542 | AMT+COH rPi12c RCA "Hirschfeldt, Shore, and Slaman (2009) [remark following Corollary 3.16]" 543 | SADS -> AMT "Hirschfeldt, Shore, and Slaman (2009) [Theorem 4.1]" 544 | BSig2+Pi01G -> ISig2 "Hirschfeldt, Shore, and Slaman (2009) [Theorem 4.3]" 545 | ISig2+Pi01G rPi12c ISig2 "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 4.3]" 546 | Pi01G rPi12c RCA "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 4.3]" 547 | AMT+ISig2 rPi12c ISig2 "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 4.3]" 548 | AMT rPi12c RCA "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 4.3]" 549 | AMT+BSig2 Pi11c BSig2 "Hirschfeldt, Shore, and Slaman (2009) [Corollary 4.5]" 550 | Pi01G -> AMT "Hirschfeldt, Shore, and Slaman (2009) [remark following Corollary 4.5]" 551 | AMT -|> Pi01G "Hirschfeldt, Shore, and Slaman (2009) [Theorem 4.3 and Corollary 4.5]" 552 | Pi01G w-|> CADS "Hirschfeldt, Shore, and Slaman (2009) [Corollary 3.10, and remark following Corollary 4.5]" 553 | WKL w-|> HYP "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 5.6]" 554 | HYP <-> OPT "Hirschfeldt, Shore, and Slaman (2009) [Theorem 5.7]" 555 | AMT -> HYP "Hirschfeldt, Shore, and Slaman (2009) [Corollary 5.8]" 556 | COH -> HYP "Hirschfeldt, Shore, and Slaman (2009) [Corollary 5.9]" 557 | RCA w-|> AST "Hirschfeldt, Shore, and Slaman (2009) [Theorem 6.3]" 558 | OPT -> AST "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 6.3]" 559 | DNR -> AST "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 6.3]" 560 | CADS -> AST "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 6.3]" 561 | AST w-|> OPT "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 6.3]" 562 | AST w-|> DNR "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 6.3]" 563 | AST w-|> CADS "Hirschfeldt, Shore, and Slaman (2009) [remark following Theorem 6.3]" 564 | 565 | AMT+ISig2 -> Pi01G "straightforward verification of Conidis (2008) [Corollary 3.11]" 566 | 567 | RT22 form rPi12 568 | SRT22 form rPi12 569 | WKL form rPi12 570 | DNR form rPi12 571 | CAC form rPi12 572 | ADS form rPi12 573 | SADS form rPi12 574 | ISig2 form Pi11 575 | 576 | #Not supported: 577 | # AMT has low solutions 578 | # Pi01G has low solutions 579 | # OPT has solutions in all non-zero c.e. degrees 580 | # AST has solutions in all non-recursive degrees 581 | 582 | #Cited: 583 | # CAC -|> WKL "Hirschfeldt and Shore (2007)" 584 | 585 | #Chong, Lempp, and Yang (2010) - "On the role of the collection principle for $\Sigma^0_2$-formulas in second-order reverse mathematics" 586 | # http://www.ams.org/journals/proc/2010-138-03/S0002-9939-09-10115-6/ 587 | # Proc. Amer. Math. Soc. 138(3), doi:10.1090/S0002-9939-09-10115-6 588 | 589 | PART -> BSig2 "Chong, Lempp, and Yang (2010) [Theorem 1.2]" 590 | D22 -> BSig2 "Chong, Lempp, and Yang (2010) [Theorem 1.4]" 591 | SRT22 <-> SPT22 "Chong, Lempp, and Yang (2010) [Theorem 1.7]" 592 | SRT22 <-> SIPT22 "Chong, Lempp, and Yang (2010) [Theorem 1.7]" 593 | IPT22 -> SPT22 "Chong, Lempp, and Yang (2010) [Theorem 1.7]" 594 | 595 | #Cited: 596 | # PT22 -> BSig2 "Dzhafarov and Hirst (2009)" 597 | # SIPT22 -> D22 "Dzhafarov and Hirst (2009)" 598 | # BSig2+IPT22 -> SPT22 "Dzhafarov and Hirst (2009)" 599 | 600 | #Dzhafarov, Hirst, and Lakins (2010) - "Ramsey’s theorem for trees: the polarized tree theorem and notions of stability" 601 | # http://link.springer.com/article/10.1007/s00153-010-0179-6 602 | # Arch. Math. Logic 49(3), doi:10.1007/s00153-010-0179-6 603 | 604 | PTT3 -> PTT32 "by definition" 605 | PTT2 -> PTT22 "by definition" 606 | IPTT3 -> IPTT32 "by definition" 607 | IPTT2 -> IPTT22 "by definition" 608 | 609 | STT2 -> STT22 "by definition" 610 | SPTT2 -> SPTT22 "by definition" 611 | SIPTT2 -> SIPTT22 "by definition" 612 | 613 | TT2 -> STT2 "by definition" 614 | TT22 -> STT22 "by definition" 615 | 616 | PTT32 -> IPTT32 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 1.6]" 617 | PTT22 -> IPTT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 1.6]" 618 | TT32 -> PTT32 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 2.1]" 619 | TT22 -> PTT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 2.1]" 620 | PTT22 -> SPTT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.10]" 621 | IPTT22 -> SIPTT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.10]" 622 | STT22 -> SPTT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.10]" 623 | SPTT22 -> SIPTT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.10]" 624 | SPTT22 -> SPT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.11]" 625 | SIPTT22 -> SIPT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.11]" 626 | STT22 <-> SPTT22 "Dzhafarov, Hirst, and Lakins (2010) [Theorem 3.14]" 627 | SPTT22 <-> SIPTT22 "Dzhafarov, Hirst, and Lakins (2010) [Theorem 3.14]" 628 | STT2 <-> SPTT2 "Dzhafarov, Hirst, and Lakins (2010) [Theorem 3.14]" 629 | SPTT2 <-> SIPTT2 "Dzhafarov, Hirst, and Lakins (2010) [Theorem 3.14]" 630 | TT22 <-> CTT22+STT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.18]" 631 | #TADS -> C4TT22 "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.21]" 632 | #PTT22 -> TADS "Dzhafarov, Hirst, and Lakins (2010) [Proposition 3.22]" 633 | #PTT22 <-> C4TT22+S4PTT22 "Dzhafarov, Hirst, and Lakins (2010) [Corollary 3.23]" 634 | 635 | #Dzhafarov (2011) - "Stable Ramsey's Theorem and Measure" 636 | # http://projecteuclid.org/euclid.ndjfl/1292249613 637 | # Notre Dame J. Formal Logic 52(1), doi:10.1215/00294527-2010-039 638 | 639 | ACA -> SRAM "Dzhafarov (2011) [Proposition 5.5]" 640 | SRAM -> SRT22 "Dzhafarov (2011) [Proposition 5.5]" 641 | SRT22 -> ASRT22 "Dzhafarov (2011) [Proposition 5.5]" 642 | SRAM -> ASRAM "Dzhafarov (2011) [Proposition 5.5]" 643 | ASRAM -> ASRT22 "Dzhafarov (2011) [Proposition 5.5]" 644 | SRAM w-|> ACA "Dzhafarov (2011) [Proposition 5.5]" 645 | ASRT22 -> DNR "Dzhafarov (2011) [Proposition 5.6]" 646 | WKL w-|> ASRT22 "Dzhafarov (2011) [Proposition 5.6]" 647 | ASRT22 w-|> WKL "Dzhafarov (2011) [Proposition 5.7]" 648 | ASRT22 w-|> SRT22 "Dzhafarov (2011) [Corollary 5.8]" 649 | ASRT22 w-|> ASRAM "Dzhafarov (2011) [Corollary 5.8]" 650 | ASRT22 w-|> COH "Dzhafarov (2011) [Corollary 5.8]" 651 | 652 | RT22 w-|> SRAM "follows from Mileti (2004) [Corollary 5.4.6]" 653 | 654 | #Cited: 655 | # WKL -|> SRT22 "Cholak, Jockusch, and Slaman (2001)" 656 | # SRT22 -> BSig2 "Cholak, Jockusch, and Slaman (2001)" 657 | 658 | #Slaman CiE11 659 | # Slides: http://cie2011.fmi.uni-sofia.bg/files/slides/Slaman.pdf 660 | 661 | RAN2 -> PHPM "Slaman CiE11 [Theorem (69/75)]" 662 | RCA -|> PHPM "Slaman CiE11 [Theorem (69/75)]" 663 | PHPM -|> BSig2 "Slaman CiE11 [Theorem (73/75)]" 664 | RAN2 -|> BSig2 "Slaman CiE11 [Theorem (69/75) and Theorem (73/75)]" 665 | 666 | PHPM form Pi11 667 | 668 | #Unsupported: 669 | # RAN2 Pi0kc PHPM 670 | # PHPM Pi0kc RAN2 671 | # PHPM form Pi0k 672 | 673 | #Uncited: 674 | # TS2 -> CSig2 675 | 676 | #Avigad, Dean, and Rute (2012) - "Algorithmic randomness, reverse mathematics, and the dominated convergence theorem" 677 | # http://www.sciencedirect.com/science/article/pii/S0168007212000863 678 | # Ann. Pure Appl. Logic 163(12), doi:10.1016/j.apal.2012.05.010 679 | 680 | WWKL2 -> WWKL "by definition" 681 | 682 | POS+WWKL -> POS2 "Avigad, Dean, and Rute (2012) [remark on p. 2, end of Section 1]" 683 | POS1 <-> RAN1 "Avigad, Dean, and Rute (2012) [Theorem 3.1]" 684 | WWKL2 -> BSig2 "Avigad, Dean, and Rute (2012) [Proposition 3.2]" 685 | POS -> BSig2 "Avigad, Dean, and Rute (2012) [remark following Proposition 3.2]" 686 | POS2 <-> WWKL2 "Avigad, Dean, and Rute (2012) [Proposition 3.5]" 687 | WWKL2 <-> BSig2+RAN2 "Avigad, Dean, and Rute (2012) [Proposition 3.6]" 688 | WKL -|> WWKL2 "Avigad, Dean, and Rute (2012) [Theorem 3.8]" 689 | WWKL2 -|> WKL "Avigad, Dean, and Rute (2012) [Theorem 3.8]" 690 | WWKL2 <-> DCTp "Avigad, Dean, and Rute (2012) [Theorem 4.3]" 691 | ACA -> POS2 "Avigad, Dean, and Rute (2012) [Corollary 4.6]" 692 | POS2 -|> ACA "Avigad, Dean, and Rute (2012) [Corollary 4.6]" 693 | POS2 -> WWKL "Avigad, Dean, and Rute (2012) [Corollary 4.6]" 694 | WWKL -|> POS2 "Avigad, Dean, and Rute (2012) [Corollary 4.6]" 695 | POS2 -|> WKL "Avigad, Dean, and Rute (2012) [Corollary 4.6]" 696 | WKL -|> POS2 "Avigad, Dean, and Rute (2012) [Corollary 4.6]" 697 | 698 | POS1 <-> WWKL "follows from Yu and Simpson (1990) [Theorem 1]" 699 | 700 | #Generic: 701 | # WWKLn <-> POSn <-> BSign+RANn 702 | 703 | #Chong, Slaman, and Yang (2012) - "$\Pi^1_1-conservation of combinatorial principles weaker than Ramsey's theorem for pairs" 704 | # http://www.sciencedirect.com/science/article/pii/S0001870812000965 705 | # Adv. Math. 230(3), doi:10.1016/j.aim.2012.02.025 706 | 707 | BSig2+COH Pi11c BSig2 "Chong, Slaman, and Yang (2012) [Corollary 3.1]" 708 | BSig2+SADS Pi11c BSig2 "Chong, Slaman, and Yang (2012) [Corollary 4.1]" 709 | ADS Pi11c BSig2 "Chong, Slaman, and Yang (2012) [Corollary 4.4]" 710 | SCAC Pi11c BSig2 "Chong, Slaman, and Yang (2012) [Corollary 5.1]" 711 | CAC Pi11c BSig2 "Chong, Slaman, and Yang (2012) [Corollary 5.2]" 712 | 713 | ISig2 form Pi11 714 | 715 | #Cited: 716 | # SRT22 -> BSig2 "Cholak, Jockusch, and Slaman (2001)" 717 | # CAC -|> RT22 "Hirschfeldt and Shore (2007)" 718 | # SCAC -|> CAC "Hirschfeldt and Shore (2007)" 719 | # SADS -|> ADS "Hirschfeldt and Shore (2007)" 720 | 721 | #Flood (2012) - "Reverse mathematics and a Ramsey-type König's Lemma" 722 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9059924&fileId=S0022481200001225 723 | # J. Symbolic Logic 77(4), doi:10.2178/jsl.7704120 724 | 725 | RWKL2 -> RWKL "by definition" 726 | P22 -> D22 "by definition" 727 | 728 | WKL -> RWKL "Flood (2012) [Theorem 3]" 729 | SRT22 -> RWKL "Flood (2012) [Theorem 5]" 730 | RWKL -|> SRT22 "Flood (2012) [Corollary 6]" 731 | RCA w-|> RWKL "Flood (2012) [remark following Lemma 7]" 732 | RWKL -> DNR "Flood (2012) [Theorem 8]" 733 | RT22 -> RWKL2 "Flood (2012) [Theorem 13]" 734 | RWKL2 -> P22 "Flood (2012) [Theorem 18, citing Yokoyama (personal communication)]" 735 | ACA -> RWKLw "Flood (2012) [Theorem 24]" 736 | RWKLw -> RWKL2 "Flood (2012) [Theorem 24]" 737 | RWKLw w-|> WKL "Flood (2012) [Corollary 26]" 738 | RT22 w-|> RWKLw "Flood (2012) [Proposition 29]" 739 | RWKLw -> Dw2 "Flood (2012) [Proposition 32]" 740 | Dw2 w-> RWKLw "Flood (2012) [Remark 33]" 741 | 742 | #Cited: 743 | # D22 <-> SRT22 "Cholak, Jockusch, and Slaman (2001) and Chong, Lempp, and Yang (2010)" 744 | 745 | #Kjos-Hanssen, Miller, and Solomon (2012) - "Lowness notions, measure, and domination" 746 | # http://jlms.oxfordjournals.org/content/85/3/869.abstract 747 | # J. London Math. Soc. 85(3), doi:10.1112/jlms/jdr072 748 | 749 | POS w-|> GdREG "Kjos-Hanssen, Miller, and Solomon (2012) [Corollary 5.5]" 750 | POS+WWKL -> GdREG "Kjos-Hanssen, Miller, and Solomon (2012) [Section 7]" 751 | 752 | #Kreuzer (2012) - "Primitive Recursion and the Chain Antichain Principle" 753 | # http://projecteuclid.org/euclid.ndjfl/1336588253 754 | # Notre Dame J. Formal Logic 53(2), doi:10.1215/00294527-1715716 755 | 756 | CAC+WKL -|> ISig2 "Kreuzer (2012) [Corollary 4.3]" 757 | SEM -> RT1 "Kreuzer (2012) [Proposition A.1]" 758 | 759 | ISig2 form Pi11 760 | 761 | #Cited: 762 | # CAC -|> ISig2 "Chong, Slaman, and Yang (2012)" 763 | 764 | #Kreuzer (2012) - "Proof mining and combinatorics: Program extraction for Ramsey's theorem for pairs" (Ph.D. thesis, Technische Universität, Darmstadt) 765 | # http://tuprints.ulb.tu-darmstadt.de/2972/ 766 | 767 | #Unsupported: 768 | # EM does not have low solutions "Kreuzer (2012) [Proposition 5.2]" 769 | 770 | #Cited: 771 | #BPi1 Pi03c RCA "Paris (1980) [Theorem 33, and independently by Friedman]" 772 | 773 | #Liu (2012) - "$\mathsf{RT}^2_2$ does not imply \mathsf{WKL}_0$" 774 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9058600&fileId=S0022481200000761 775 | # J. Symbolic Logic 77(2), doi:10.2178/jsl/1333566640 776 | 777 | RT22 w-|> WKL "Liu (2012) [Corollary 1.6]" 778 | 779 | #Conidis and Slaman (2013) - "Random reals, the rainbow Ramsey theorem, and arithmetic conservation" 780 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9058478&fileId=S0022481200000359 781 | # J. Symbolic Logic 78(1), doi:10.2178/jsl.7801130 782 | 783 | CSig -> CSig2 "by definition" 784 | 785 | ISig2 form Pi11 786 | 787 | RAN2 -> RRT22 "Conidis and Slaman (2013) [Theorem 2.1]" 788 | CSig -|> BSig2 "Conidis and Slaman (2013) [Theorem 3.2]" 789 | ISig2+CSig -|> BSig3 "Conidis and Slaman (2013) [Theorem 3.2]" 790 | RRT22 -> CSig2 "Conidis and Slaman (2013) [Theorem 3.3]" 791 | RAN2+BSig2 Pi11c BSig2 "Conidis and Slaman (2013) [Corollary 4.2]" 792 | 793 | #Dzhafarov and Mummert (2013) - "On the strength of the finite intersection principle" 794 | # http://link.springer.com/article/10.1007/s11856-012-0150-9 795 | # Israel J. Math. 196(1), doi:10.1007/s11856-012-0150-9 796 | 797 | ACA -> FIP "Dzhafarov and Mummert (2013) [Proposition 2.1]" 798 | FIP -> nD2IP "Dzhafarov and Mummert (2013) [Proposition 2.2]" 799 | D2IP <-> ACA "Dzhafarov and Mummert (2013) [Proposition 2.4]" 800 | FIP w-|> ACA "Dzhafarov and Mummert (2013) [Corollary 2.6]" 801 | WKL w-|> nD2IP "Dzhafarov and Mummert (2013) [Corollary 3.2]" 802 | nD2IP -> HYP "Dzhafarov and Mummert (2013) [Corollary 4.1]" 803 | Pi01G -> FIP "Dzhafarov and Mummert (2013) [Proposition 4.2]" 804 | FIP w-|> AMT "Dzhafarov and Mummert (2013) [Corollary 4.5]" 805 | 806 | #Lerman, Solomon, and Towsner (2013) - "Separating principles below Ramsey's theorem for pairs" 807 | # http://www.worldscientific.com/doi/abs/10.1142/S0219061313500074 808 | # J. Math. Logic 13(2), doi:10.1142/S0219061313500074 809 | 810 | ADS w-|> SCAC "Lerman, Solomon, and Towsner (2013) [Theorem 1.7]" 811 | RT22 -> EM "Lerman, Solomon, and Towsner (2013) [remark before Theorem 1.9]" 812 | BSig2+EM -> HYP "Lerman, Solomon, and Towsner (2013) [Theorem 1.10 and following remark]" 813 | CAC -|> EM "Lerman, Solomon, and Towsner (2013) [Corollary 1.12]" 814 | SADS+SEM -> SRT22 "Lerman, Solomon, and Towsner (2013) [Corollary 1.14]" 815 | EM w-|> SRT22 "Lerman, Solomon, and Towsner (2013) [Theorem 1.15]" 816 | 817 | #Cited: 818 | # SRT22 -|> RT22 "Chong, Slaman, and Yang (2014)" 819 | # CAC -|> SRT22 "Hirschfeldt and Shore (2007)" 820 | # SCAC -|> CAC "Hirschfeldt and Shore (2007)" 821 | # SADS -|> ADS "Hirschfeldt and Shore (2007)" 822 | # EM -> BSig2 "Kreuzer (2012)" 823 | 824 | #Wang (2013) - "Rainbow Ramsey Theorem for Triples is Strictly Weaker than the Arithmetical Comprehension Axiom" 825 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=9189589&fileId=S0022481200126623 826 | # J. Symbolic Logic 78(3), doi:10.2178/jsl/1067620187 827 | 828 | RRT32 w-|> ACA "Wang (2013) [Theorem 3.1]" 829 | RRT32+RT22+WKL w-|> ACA "Wang (2013) [Corollary 3.4]" 830 | 831 | #Chong, Slaman, and Yang (2014) - "The metamathematics of Stable Ramsey’s Theorem for Pairs" 832 | # http://www.ams.org/journals/jams/2014-27-03/S0894-0347-2014-00789-X/ 833 | # J. Amer. Math. Soc. 27(3), doi:10.1090/S0894-0347-2014-00789-X 834 | 835 | D22 -|> ISig2 "Chong, Slaman, and Yang (2014) [Theorem 2.2]" 836 | D22 -|> RT22 "Chong, Slaman, and Yang (2014) [Corollary 2.5]" 837 | BSig2+D22+WKL -|> RT22 "Chong, Slaman, and Yang (2014) [Theorem 2.7]" 838 | BSig2+D22+WKL -|> ISig2 "Chong, Slaman, and Yang (2014) [Theorem 2.7 and closing remark of Section 5]" 839 | 840 | #Cited: 841 | # SRT22 -> BSig2 "Cholak, Jockusch, and Slaman (2001)" 842 | 843 | #Kang (2014) - "Combinatorial principles between $\mathsf{RRT}^2_2$ and $\mathsf{RT}^2_2$" 844 | # http://link.springer.com/article/10.1007/s11464-014-0390-6 845 | # Front. Math. China 9(6), doi:10.1007/s11464-014-0390-6 846 | 847 | RRT22 w-|> TS2 "Kang (2014) [Theorem 9]" 848 | EM -> RRT22 "Kang (2014) [Theorem 10, due to Wang]" 849 | RRT22 w-|> EM "Kang (2014) [Theorem 11]" 850 | 851 | #Wang (2014, APAL) - "Cohesive sets and rainbows" 852 | # http://www.sciencedirect.com/science/article/pii/S0168007213000651 853 | # Ann. Pure Appl. Logic 165(2), doi:10.1016/j.apal.2013.06.002 854 | 855 | RRT42 -> RRT32 "by definition" 856 | 857 | RT22 -|> RRT32 "follows from Csima and Mileti (2009) and Cholak, Jockusch, and Slaman (2001)" 858 | ACA -> RRT42 "follows from Jockusch (1972)" 859 | 860 | RRT32 w-|> WKL "Wang (2014, APAL) [Theorem 5.2]" 861 | RRT32 w-|> RRT42 "Wang (2014, APAL) [Theorem 5.4]" 862 | 863 | #Cited: 864 | # RRT22 -|> COH "Csima and Mileti (2009)" 865 | 866 | #Wang (2014, AIM) - "Some logically weak Ramseyan theorems" 867 | # http://www.sciencedirect.com/science/article/pii/S0001870814001674 868 | # Adv. Math. 261, doi:10.1016/j.aim.2014.05.003 869 | 870 | FS -> FS2 "by definition" 871 | FS -> FS3 "by definition" 872 | 873 | FS -|> ACA "Wang (2014, AIM) [Theorem 4.1]" 874 | #???? Question: does this show FS w-|> ACA? 875 | FS2 -> RRT22 "Wang (2014, AIM) [Theorem 4.2]" 876 | FS3 -> RRT32 "Wang (2014, AIM) [Theorem 4.2]" 877 | FS -> RRT "Wang (2014, AIM) [Theorem 4.2]" 878 | 879 | #Cited: 880 | # RRT22 -|> ADS "Csima and Mileti (2009)" 881 | 882 | #Liu (2015) - "Cone avoiding closed sets" 883 | # http://www.ams.org/journals/tran/2015-367-03/S0002-9947-2014-06049-2 884 | # Trans. Amer. Math. Soc. 367(3), doi:10.1090/S0002-9947-2014-06049-2 885 | 886 | RT22 w-|> WWKL "Liu (2015) [Corollary 5.1]" 887 | 888 | #Rice (2015) - "The Thin Set Theorem for Pairs Implies DNR" 889 | # http://projecteuclid.org/euclid.ndjfl/1443620509 890 | # Notre Dame J. Formal Logic 56(4), doi:10.1215/00294527-3153606 891 | 892 | TS3 -> STS3 "by definition" 893 | TS2 -> STS2 "by definition" 894 | 895 | STS2 -> DNR "Rice (2015) [Main Theorem]" 896 | 897 | #Diamondstone, Downey, Greenberg, and Turetsky (2016) - "The finite intersection principle and genericity" 898 | # http://journals.cambridge.org/action/displayAbstract?fromPage=online&aid=10180819&fileId=S0305004115000651 899 | # Math. Proc. Cambridge Philos. Soc. 160(2), doi:10.1017/S0305004115000651 900 | 901 | #Cited: 902 | # FIP -|> ACA "Dzhafarov and Mummert (2013)" 903 | # WKL -|> FIP "Dzhafarov and Mummert (2013)" 904 | # FIP -|> WKL "Dzhafarov and Mummert (2013)" 905 | 906 | #Bienvenu, Patey, and Shafer (2017) - "On the logical strengths of partial solutions to mathematical problems" (Transactions of the London Mathematical Society vol. 4, iss. 1, 2017) 907 | # https://londmathsoc.onlinelibrary.wiley.com/doi/10.1112/tlm3.12001 908 | 909 | WKL -> RWKL "by definition" 910 | WWKL -> RWWKL "by definition" 911 | 912 | RWKL -> RWWKL "by definition" 913 | RWKL2 -> RWWKL2 "by definition" 914 | 915 | RWWKL2 -> RWWKL "by definition" 916 | 917 | RCOLOR3 -> RCOLOR2 "by definition" 918 | 919 | RT22 -> EM "easy to see, remarked in Bienvenu, Patey, and Shafer (2017)" 920 | SRT22 -> SEM "easy to see, remarked in Bienvenu, Patey, and Shafer (2017)" 921 | 922 | RCOLOR2 form rPi12 923 | 924 | RAN2 -> DNR0 "Bienvenu, Patey, and Shafer (2017) [Theorem 2.8]" 925 | SEM -> RWKL "Bienvenu, Patey, and Shafer (2017) [Theorem 2.11]" 926 | DNR <-> RWWKL "Bienvenu, Patey, and Shafer (2017) [Theorem 3.4]" 927 | BSig2+RWWKL2 -> DNR0 "Bienvenu, Patey, and Shafer (2017) [Lemma 3.6]" 928 | DNR0+ISig2 -> RWWKL2 "Bienvenu, Patey, and Shafer (2017) [Lemma 3.7]" 929 | SRT22 -|> RWKL2 "Bienvenu, Patey, and Shafer (2017) [Theorem 3.10]" 930 | BSig2+SRT22 -|> DNR0 "Bienvenu, Patey, and Shafer (2017) [proof of Theorem 3.10]" 931 | RSAT <-> RWKL "Bienvenu, Patey, and Shafer (2017) [Theorem 4.7]" 932 | RWKL -> RCOLOR3 "Bienvenu, Patey, and Shafer (2017) [Lemma 5.3]" 933 | RCOLOR3 -> RWKL "Bienvenu, Patey, and Shafer (2017) [Theorem 5.13]" 934 | #RCA w-|> RCOLOR2 "Bienvenu, Patey, and Shafer (2017) [Proposition 6.3]" 935 | RCOLOR2 -> AST "Bienvenu, Patey, and Shafer (2017) [Proposition 6.3]" 936 | CAC w-|> RCOLOR2 "Bienvenu, Patey, and Shafer (2017) [Theorem 6.9]" 937 | WWKL w-|> RCOLOR2 "Bienvenu, Patey, and Shafer (2017) [Theorem 6.11]" 938 | 939 | RAN1 -> DNR "relativization and formalization of Kučera (1985)" 940 | 941 | #Generic: 942 | # BSign+RWWKLn -> DNRn [Lemma 3.6] 943 | # ISign+DNRn -> RWWKLn [Lemma 3.7] 944 | # RWKL <-> RCOLORk for k >= 3 [Corollary 5.14] 945 | 946 | #Cited: 947 | # SRT22 -|> RT22 "Chong, Slaman, and Yang (2014)" 948 | # SCAC -|> CAC "Hirschfeldt and Shore (2007)" 949 | # CAC -|> RT22 "Hirschfeldt and Shore (2007)" 950 | #???? SRT22 -|> DNR0 "Chong, Slaman, and Yang (2014), explained in Patey (preprint) - Somewhere over the rainbow Ramsey theorem for pairs" 951 | # EM -|> RT22 "Bovykin and Weiermann (2005)" 952 | # EM -|> SCAC "Lerman, Solomon, and Towsner (2013)" 953 | # CAC -|> SEM "Lerman, Solomon, and Towsner (2013)" 954 | # RAN2 -|> BSig2 "Slaman CiE11" 955 | # WKL <-> COLORk "Hirst (1990) - Marriage theorems and reverse mathematics" 956 | # COH rPi12c RCA "Hirschfeldt, Shore, and Slaman (2009)" 957 | 958 | #Cholak, Downey, and Igusa (to appear) - "Any FIP real computes a 1-generic" 959 | # http://arxiv.org/abs/1502.03785 960 | 961 | FIP <-> GEN1 "Cholak, Downey, and Igusa (to appear) [Propositions 5.4 and 5.5]" 962 | nD2IP+ISig2 -> GEN1 "Cholak, Downey, and Igusa (to appear) [Proposition 5.7]" 963 | 964 | HYP w-|> GEN1 "W. Miller and Martin (1968) [Every non-trivial $\Delta^0_2$ degree is hyperimmune], Sacks (1963) [There is a $\Delta^0_2$ minimal degree], and sets that join to a 1-generic are Turing-incomparable." 965 | 966 | #Frittaion and Patey (to appear) - "Coloring the rationals in reverse mathematics" 967 | # http://arxiv.org/abs/1508.00752v2 968 | 969 | ER22 -> RT22 "Frittaion and Patey (to appear) [Lemma 2.1]" 970 | ACA -> ER22 "Frittaion and Patey (to appear) [Theorem 2.4]" 971 | ER22 -> ER1 "Frittaion and Patey (to appear) [Lemma 3.1]" 972 | ISig2 -> ER1 "Frittaion and Patey (to appear) [Lemma 3.1]" 973 | ER1 -> RT1 "Frittaion and Patey (to appear) [Lemma 3.1]" 974 | BSig2 -|> ER1 "Frittaion and Patey (to appear) [Theorem 3.5]" 975 | ER22 ER1 iff P -> ISig2 [Theorem 3.5] 979 | 980 | #Patey (to appear) - "Iterative forcing and hyperimmunity in reverse mathematics" 981 | # http://www.ludovicpatey.com/media/research/iterative-forcing-extended.pdf 982 | # to appear in Computability 983 | 984 | #Unsupported: 985 | # SADS does not admit hyperimmunity preservation "Patey (to appear, iterative forcing) [Theorem 6]" 986 | # SADS does not admit preservation of 2 hyperimmunities "Patey (to appear, iterative forcing) [Theorem 6]" 987 | # STS2 does not admit hyperimmunity preservation "Patey (to appear, iterative forcing) [Theorem 9]" 988 | # AMT admits hyperimmunity preservation "Patey (to appear, iterative forcing) [remark following Theorem 11]" 989 | # Pi01G admits hyperimmunity preservation "Patey (to appear, iterative forcing) [remark following Theorem 11]" 990 | # RRT22 admits hyperimmunity preservation "Patey (to appear, iterative forcing) [remark following Theorem 11]" 991 | # COH admits hyperimmunity preservation "Patey (to appear, iterative forcing) [Theorem 12]" 992 | # WKL admits hyperimmunity preservation "Patey (to appear, iterative forcing) [Theorem 14]" 993 | # EM admits hyperimmunity preservation "Patey (to appear, iterative forcing) [Theorem 17]" 994 | # STS2 admits preservation of k hyperimmunities "Patey (to appear, iterative forcing) [Theorem 23]" 995 | # STS2 does not admit hyperimmunity preservation "Patey (to appear, iterative forcing) [Theorem 23]" 996 | 997 | #Cited: 998 | # COH+WKL+RRT22+Pi01G+EM+TS2(k+1) -|> SADS "Patey (submitted) - The weakness of being cohesive, thin or free in reverse mathematics" 999 | # COH+WKL+RRT22+Pi01G+EM+TS2(k+1) -|> STS2(k) "Patey (submitted) - The weakness of being cohesive, thin or free in reverse mathematics" 1000 | 1001 | #Patey (to appear) - "The strength of the tree theorem for pairs in reverse mathematics" 1002 | # http://arxiv.org/abs/1505.01057v2 1003 | 1004 | TT12 TT22 "Patey (to appear, tree theorem) [Corollary 4.12]" 1006 | 1007 | #Unsupported: 1008 | # TT22 does not admit fairness preservation [Theorem 3.9] 1009 | # WKL admits n-fairness preservation [Theorem 4.2] 1010 | # COH admits n-fairness preservation [Theorem 4.5] 1011 | # RT12 admits strong fairness preservation [Theorem 4.8] 1012 | # RT22 admits fairness preservation [Theorem 4.11] 1013 | 1014 | #Wang (to appear) - "The Definability Strength of Combinatorial Principles" 1015 | # http://arxiv.org/abs/1408.1465 1016 | 1017 | COH+EM+Pi01G+RRT22+WKL w-|> SADS "Wang (to appear) [Theorem 5.1]" 1018 | COH+EM+Pi01G+RRT22+WKL w-|> TS2 "Wang (to appear) [Theorem 5.1]" 1019 | RRT32 w-|> TS3 "Wang (to appear) [Theorem 5.2]" 1020 | 1021 | #Unsupported: 1022 | # Pi01G admits arithmetic hierarchy preservation 1023 | # WKL admits arithmetic hierarchy preservation 1024 | # COH admits Delta02-definition preservation 1025 | # EM admits Delta02-definition preservation 1026 | # RRT22 admits arithmetic hierarchy preservation 1027 | # RRT32 admits Delta03-definition preservation 1028 | # RT2 admits Sigma01- and Pi01-definition preservation 1029 | # FS admits Sigma01- and Pi01-definition preservation 1030 | # RT32 does not admit Sigma01-definition preservation 1031 | # Pi12 consequences of RT2 admit simultaneous preservation of (Sigma0(n+1), Pi0(n+1), Delta0(n+2)) definitions [Corollary 3.29] 1032 | # SADS does not admit Delta02-definition preservation [Theorem 4.1] 1033 | # ADS and RT22 do not admit Delta02-definition preservation [Corollary 4.2] 1034 | # TS2 does not admit Delta02-definition preservation [Theorem 4.3] 1035 | # TS3 does not admit Delta02- or Delta03-definition preservation [Theorem 4.3] 1036 | # RRT32 does not admit Delta02-definition preservation [Corollary 4.10] 1037 | 1038 | #Flood and Towsner (submitted) - "Separating principles below $\mathsf{WKL}_0$" 1039 | # http://arxiv.org/abs/1410.4068 1040 | 1041 | DNR w-|> WWKL "Flood and Towsner (submitted) [Corollary 3.35]" 1042 | DNR w-|> RWKL "Flood and Towsner (submitted) [Theorem 4.18]" 1043 | SEM -> RWKL "Flood and Towsner (submitted) [Theorem 5.2]" 1044 | DNR -> RWWKL "Flood and Towsner (submitted) [Theorem 5.3]" 1045 | 1046 | #Cited: 1047 | # DNR -|> RWKL "Bienvenu, Patey, and Shafer (to appear)" 1048 | 1049 | #Chong, Slaman, and Yang (preprint) - "The inductive strength of Ramsey's theorem for pairs" 1050 | # http://www.math.nus.edu.sg/~chongct/ISigma2.pdf 1051 | 1052 | RT22 -|> ISig2 "Chong, Slaman, and Yang (preprint) [Corollary 4.2]" 1053 | 1054 | #Patey (preprint) - "Somewhere over the rainbow Ramsey theorem for pairs" 1055 | # http://arxiv.org/abs/1501.07424v2 1056 | 1057 | RRT22 -> WSRRT22 "by definition" 1058 | WSRRT22 -> SRRT22 "by definition" 1059 | 1060 | RRT32 -> SRRT32 "by definition" 1061 | 1062 | SRT22 -|> RRT22 "Patey (preprint) [Corollary 2.12]" 1063 | DNR <=_c SEM "Patey (preprint) [Corollary 3.6]" 1064 | CAC w-|> SEM "Patey (preprint) [Corollary 3.7]" 1065 | EM -> DNR0 "Patey (preprint) [Theorem 3.10]" 1066 | SEM -> DNR "Patey (preprint) [Theorem 3.13]" 1067 | SRT22 -> SFS2 "Patey (preprint) [Lemma 4.3]" 1068 | RRT32 -> TS2 "Patey (preprint) [Theorem 4.5]" 1069 | RRT w-> FS "Patey (preprint) [Corollary 4.7]" 1070 | TS2 -> DNR0 "Patey (preprint) [Theorem 4.8]" 1071 | IPT22 -> DNR0 "Patey (preprint) [Theorem 4.12]" 1072 | RT22 w-|> TS3 "Patey (preprint) [Corollary 4.15]" 1073 | STS2 -> AMT "Patey (preprint) [Theorem 4.19]" 1074 | SRRT22 -> DNR "Patey (preprint) [Theorem 5.11]" 1075 | SRRT22 w-|> SEM "Patey (preprint) [Corollary 5.23]" 1076 | SRRT22 w-|> STS2 "Patey (preprint) [Corollary 5.23]" 1077 | SRRT22 -> HYP "Patey (preprint) [Theorem 5.24]" 1078 | SRRT32 -> STS2 "Patey (preprint) [Theorem 5.26]" 1079 | ISig2+STS2 -> SRRT22 "Patey (preprint) [Corollary 5.30]" 1080 | SEM -> SRRT22 "Patey (preprint) [Corollary 5.32]" 1081 | WSRRT22 w-|> RRT22 "Patey (preprint) [Corollary 6.11]" 1082 | BSig2+SFS2 -> WSRRT22 "Patey (preprint) [Theorem 6.14]" 1083 | 1084 | RRT22 <-> DNR0 "J. Miller (personal communication)" 1085 | 1086 | SFS3 -> SFS2 "Cholak, Giusto, Hirst, and Jockusch (2005) [follows from proof of Theorem 2.3]" 1087 | 1088 | SFS3 -> STS3 "Cholak, Giusto, Hirst, and Jockusch (2005) [follows from proof of Theorem 3.2]" 1089 | SFS2 -> STS2 "Cholak, Giusto, Hirst, and Jockusch (2005) [follows from proof of Theorem 3.2]" 1090 | 1091 | STS3 -> TS2 "Cholak, Giusto, Hirst, and Jockusch (2005) [follows from proof of Theorem 3.4]" 1092 | 1093 | DNR <=_c SRT22 "Hirschfeldt, Jockusch, Kjos-Hanssen, Lempp, and Slaman (2008) [follows from proof of Theorem 2.3]" 1094 | 1095 | #Generic: 1096 | # SFSk -> STSk "Cholak, Giusto, Hirst, and Jockusch (2005) [follows from proof of Theorem 3.2]" 1097 | 1098 | #Unsupported: 1099 | # SRRT22 has low solutions 1100 | # WSRRT22 has low solutions 1101 | 1102 | #Claimed, unproven: 1103 | # SRT22 -> SRRT22 [] 1104 | # SRRT22 -|> WSRRT22 [] 1105 | 1106 | #Uncited: 1107 | # SEM+SADS <-> SRT22 1108 | # RRT22 w-|> SEM 1109 | 1110 | #Cited: 1111 | # SRT22 -|> RT22 "Chong, Slaman, and Yang (2014)" 1112 | # RAN2 -|> BSig2 "Slaman (2011)" 1113 | # BSig2+EM -> OPT "Lerman, Solomon, and Towsner (2013)" 1114 | # RCA -|> FS2 "Cholak, Giusto, Hirst, and Jockusch (2005)" 1115 | # WKL w-|> TS2 "Friedman (FOM 53)" 1116 | #???? WWKL2 w-|> AMT "Bienvenu, Patey, and Shafer (in preparation) - The role of randomness in reverse mathematics" 1117 | #???? COH+WSRRT22 -> RRT22 "Wang (2014, APAL)" 1118 | 1119 | # Patey and Yokoyama (preprint) - "The proof-theoretic strength of Ramsey's theorem for pairs and two colors" 1120 | # http://arxiv.org/abs/1601.00050 1121 | 1122 | # NOTE: uPi03 represents twiddle-Pi03 from Patey and Yokoyama... for all sets X, a Pi03 formula with parameter X 1123 | # Pi03 -> uPi03 -> Pi11 1124 | 1125 | GP22 -> SGP22 "by definition" 1126 | 1127 | RT22 -> GP22 "Infinite sets contain infinite families of large finite sets" 1128 | SRT22 -> SGP22 "Infinite sets contain infinite families of large finite sets" 1129 | 1130 | BSig2 uPi03c RCA "Patey and Yokoyama (preprint) [Theorem 1.5]; noted to be a parameterized version of the Parsons/Paris/Friedman conservation theorem" 1131 | 1132 | COH+SGP22 -> GP22 "Patey and Yokoyama (preprint) [remark preceding Definition 5.3]" 1133 | SADS+SGP22+WKL w-|> SRT22 "Patey and Yokoyama (preprint) [Corollary 5.4]" 1134 | SADS+SGP22+WKL w-|> SEM "Patey and Yokoyama (preprint) [Corollary 5.4]" 1135 | COH+EM+GP22+WKL -|> ADS "Patey and Yokoyama (preprint) [Corollary 5.6]" 1136 | SRT22 -|> GP22 "Patey and Yokoyama (preprint) [remark following proof of Theorem 5.7]" 1137 | GP22 -> DNR0 "Patey and Yokoyama (preprint) [Theorem 5.7]" 1138 | SGP22 uPi03c RCA "Patey and Yokoyama (preprint) [Theorem 6.4]" 1139 | ADS+GP22+WKL uPi03c RCA "Patey and Yokoyama (preprint) [Theorem 6.5]" 1140 | EM+WKL uPi03c RCA "Patey and Yokoyama (preprint) [Theorem 7.3 and reasoning similar to Theorem 8.2]" 1141 | RT22+WKL uPi03c RCA "Patey and Yokoyama (preprint) [Theorems 7.4 and 8.2]" 1142 | SGP22 -> BSig2 "Patey and Yokoyama (preprint) [Theorem 9.1, due to Kreuzer]" 1143 | 1144 | CSig2 form Pi11 1145 | 1146 | #Unsupported: 1147 | # SADS+SGP22+WKL has low solutions "Patey and Yokoyama (preprint) [Corollary 5.3]" 1148 | # SGP22 admits hyperimmunity preservation "Patey and Yokoyama (preprint) [Theorem 5.5]" 1149 | 1150 | #Generic: 1151 | # RTnk -> GPnk "Infinite sets contain infinite families of large finite sets" 1152 | # SRTnk -> SGPnk "Infinite sets contain infinite families of large finite sets" 1153 | 1154 | #Cited: 1155 | #RT22 -> BSig2 "Hirst (1987)" 1156 | #SRT22+WKL -|> ISig2 "Chong, Slaman, and Yang (2014)" 1157 | 1158 | #Patey (preprint) - "Partial orders and immunity in reverse mathematics" 1159 | # http://arxiv.org/abs/1607.04506v1 1160 | 1161 | CAC -> WSCAC "by definition" 1162 | WSCAC -> SCAC "by definition" 1163 | 1164 | #CAC w-|> DNR "Patey (preprint, Partial orders) [Corollary 17]" 1165 | psRT22 <-> ADS "Patey (preprint, Partial orders) [Theorem 20]" 1166 | ADS+WWKL w-|> SCAC "Patey (preprint, Partial orders) [Corollary 32]" 1167 | WSCAC SCAC (for every k >= 2) "Patey (preprint, Partial orders) [Corollary 31]" 1171 | 1172 | #Unsupported: 1173 | # NOTE: "Preservation of co-c.e. hyperimmunity" might be better called "preservation of hypersimplicity" 1174 | # RT22 admits co-ce hyperimmunity preservation "Patey (preprint, Partial orders) [Corollary 8]" 1175 | # CAC admits c.b.-immunity preservation "Patey (preprint, Partial orders) [Theorem 13]" 1176 | # DNR does not admit c.b.-immunity preservation "Patey (preprint, Partial orders) [Theorem 16]" 1177 | # SCAC does not admit dependent hyperimmunity preservation "Patey (preprint, Partial orders) [Corollary 24]" 1178 | # psRT2k admits dependent hyperimmunity preservation "Patey (preprint, Partial orders) [Theorem 25]" 1179 | # WWKL admits dependent hyperimmunity preservation "Patey (preprint, Partial orders) [Corollary 30]" 1180 | 1181 | #Dzhafarov and Patey (preprint) - "Coloring trees in reverse mathematics" 1182 | # http://arxiv.org/abs/1609.02627v1 1183 | 1184 | TT22 -> TT12 "easy to see" 1185 | 1186 | STT22 w-|> ACA "Dzhafarov and Patey (preprint) [Corollary 3.10]" 1187 | TT22 w-|> ACA "Dzhafarov and Patey (preprint) [Theorem 4.1]" 1188 | CTT22 w-|> ACA "Dzhafarov and Patey (preprint) [follows from Theorem 4.6]" 1189 | RT22+WKL w-|> SER22 "Dzhafarov and Patey (preprint) [Theorem 5.5]" 1190 | STT22 -> SER22 "Dzhafarov and Patey (preprint) [proof of Corollary 5.6]" 1191 | 1192 | #Unsupported: 1193 | # TT12 admits strong cone avoidance "Dzhafarov and Patey (preprint) [Theorem 3.7]" 1194 | # STT22 admits cone avoidance "Dzhafarov and Patey (preprint) [Corollary 3.10]" 1195 | # TT22 admits cone avoidance "Dzhafarov and Patey (preprint) [Theorem 4.1]" 1196 | # CTT22 admits cone avoidance "Dzhafarov and Patey (preprint) [Theorem 4.6]" 1197 | # ER22 does not admit ER-fairness preservation "Dzhafarov and Patey (preprint) [Theorem 5.17]" 1198 | # WKL admits n-ER-fairness preservation for every n "Dzhafarov and Patey (preprint) [Theorem 5.18]" 1199 | # COH admits n-ER-fairness preservation for every n "Dzhafarov and Patey (preprint) [Theorem 5.20]" 1200 | # RT12 admits strong ER-fairness preservation "Dzhafarov and Patey (preprint) [Theorem 5.23]" 1201 | # RT22 admits ER-fairness preservation "Dzhafarov and Patey (preprint) [Theorem 5.26]" 1202 | 1203 | #Cited: 1204 | #ACA -> TT22 "Chubb, Hirst, and McNicholl (2009) [Theorem 1.3]" 1205 | #RT22 -|> TT22 "Patey (to appear, tree theorem) [Corollary 4.12]" 1206 | #COH+SRT22 <-> RT22 "Cholak, Jockusch, and Slaman (2001) [Lemma 7.11] and Mileti (27) [Claim A.1.3]" 1207 | #BSig2+COH <-> BSig2+CRT22 "Hirschfeldt and Shore (2007) [Propositions 4.4 and 4.8]" 1208 | 1209 | #Towsner (preprint) - "Constructing sequences one step at a time" 1210 | # http://arxiv.org/abs/1609.05509v1 1211 | 1212 | ProdWQO -> ADS "Towsner (preprint) [Lemma 1.15]" 1213 | CAC+WKL w-|> STS2 "Towsner (preprint) [Theorem 2.1]" 1214 | ADS+WKL w-|> ProdWQO "Towsner (preprint) [Theorem 3.7]" 1215 | ProdWQO+WKL w-|> SCAC "Towsner (preprint) [Theorem 4.6]" 1216 | #ADS+WKL w-|> SCAC "Towsner (preprint) [follows from Theorem 4.6]" 1217 | 1218 | #Cited: 1219 | CAC -> ProdWQO "Cholak, Marcone, and Solomon (2004) [follows from Corollaries 3.4 and 4.6]" 1220 | --------------------------------------------------------------------------------