├── .gitignore ├── Makefile ├── README ├── ast-rewrite.py ├── bitmap.pxd ├── bitmap.py ├── cloc.sh ├── dirinode.pxd ├── dirinode.py ├── dirspec.py ├── disk.py ├── diskimpl.pxd ├── diskimpl.pyx ├── inodepack.pxd ├── inodepack.py ├── kvimpl.py ├── kvspec.py ├── lfs.pxd ├── lfs.py ├── lfs_fuse.pyx ├── lfs_fuse_main.py ├── llfuse.pxd ├── logspec.py ├── partition.py ├── symbolicmap.py ├── test_bitmap.py ├── test_cp.py ├── test_dirspec.py ├── test_diskspec.py ├── test_fsck.pyx ├── test_fsck_run.py ├── test_inode.py ├── test_inodepack.py ├── test_kv.py ├── test_lfs.py ├── test_partition.py ├── test_tenaciousd.py ├── test_ufarray.py ├── test_waldisk.py ├── test_xv6inode.py ├── verify.py ├── waldisk.pxd ├── waldisk.py ├── xv6inode.pxd ├── xv6inode.py ├── yav_dirimpl_fuse.pyx ├── yav_xv6_main.py └── yggdrasil ├── __init__.py ├── diskspec.py ├── server.py ├── solver.py ├── solver_utils.py ├── test.py ├── ufarray.py └── util.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.c 2 | *.o 3 | *.so 4 | *.pyc 5 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | OS := $(shell uname) 2 | 3 | PROFILE=False 4 | 5 | CFLAGS=-DFUSE_USE_VERSION=26 `pkg-config --cflags python2` `pkg-config --cflags fuse` 6 | LDFLAGS=`pkg-config --libs python2` `pkg-config --libs fuse` 7 | 8 | ifeq ($(OS),Linux) 9 | LDFLAGS += -shared 10 | endif 11 | 12 | ifeq ($(OS),Darwin) 13 | LDFLAGS += -dynamiclib -Qunused-arguments 14 | endif 15 | 16 | 17 | all: diskimpl.so yav_dirimpl_fuse.so 18 | 19 | prod: bitmap.so inodepack.so waldisk.so xv6inode.so yav_xv6_main.so dirinode.so 20 | 21 | .PHONY: verify 22 | verify: diskimpl.so 23 | python2 verify.py 24 | 25 | %.so: %.o 26 | gcc -march=native -o $@ $< $(LDFLAGS) 27 | 28 | %.o: %.c 29 | gcc -march=native -O2 -c -fPIC $(CFLAGS) $< 30 | 31 | %.c: %.pyx 32 | cython2 -X profile=$(PROFILE) $< 33 | 34 | %.c: %.py 35 | cythonize2 -X profile=$(PROFILE) $< 36 | 37 | .SECONDARY: 38 | 39 | .PHONY: clean 40 | clean: 41 | rm -f *.so *.o *.c *.pyc 42 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | See recent updates and contact information at: 2 | 3 | http://locore.cs.washington.edu/yggdrasil/ 4 | 5 | # How to run the Yxv6 file system 6 | 7 | We have tested it using the following setup: 8 | 9 | - Cython 0.25.2 10 | - Python 2 11 | - Z3 4.4.2 (git commit e3f0aff318b5873cfe858191b8e73ed716405b59) 12 | - Linux (with FUSE) 13 | 14 | Install these packages before proceeding. Other platforms or 15 | versions may not work. 16 | 17 | To compile: 18 | 19 | $ make all prod 20 | 21 | To mount: 22 | 23 | $ python2 yav_xv6_main.py -o max_read=4096 -o max_write=4096 -s a -- /dev/sXX 24 | 25 | To run verification: 26 | 27 | $ make verify 28 | 29 | If your system doesn't have `cython2`, you may want to change it 30 | to `cython` in the makefile (similarly for `python2`). 31 | 32 | # What are the guarantees of a verified file system in Yggdrasil like Yxv6 33 | 34 | The proof is that a file system implementation is a crash refinement 35 | of its specification. See the OSDI'16 paper for details. 36 | 37 | Note that this does not mean that a verified file system in Yggdrasil 38 | has zero bugs. There can be bugs in the specification (or things 39 | not modeled by the specification, like error code), the verification 40 | toolchain, and the unverified part (e.g., the glue code to FUSE, 41 | FUSE itself, and the Linux kernel). 42 | 43 | # What's new in this version of Yxv6 44 | 45 | This implementation of Yxv6 is a clean-up version. It mostly follows 46 | the design described in the OSDI'16 paper, with a few differences: 47 | 48 | - the log size is doubled; 49 | - the garbage collector (for orphan inodes) is more complete; 50 | - ported to a new version of Cython; 51 | - moved more code out of the unverified FUSE layer into the verified part. 52 | 53 | You may notice changes in runtime performance and verification time 54 | depending on your platform and tools (e.g., Z3). 55 | 56 | # What file system features are missing from Yxv6 57 | 58 | Yxv6 is a research prototype. The implementation has the following 59 | limitations: 60 | 61 | - based on FUSE in user space than in the kernel 62 | - Python runtime required (even after compiled by Cython) 63 | - mtime only, no ctime/atime 64 | - file size is limited 65 | - verification time may vary depending on the Z3 version 66 | - no ACL support 67 | - no fallocate support 68 | - no hardlinks 69 | 70 | We don't think they are necessarily fundamental limitations of the 71 | toolkit---feel free to send us pull requests if you add some of 72 | these features. 73 | -------------------------------------------------------------------------------- /ast-rewrite.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import compiler 3 | import astor 4 | import ast 5 | from astor.codegen import to_source 6 | 7 | import lfs 8 | 9 | 10 | def foo(disk): 11 | print 'test' 12 | disk.write() 13 | disk.flush() 14 | disk.write() 15 | disk.flush() 16 | disk.write() 17 | disk.flush() 18 | 19 | class RemoveFlush(ast.NodeTransformer): 20 | def __init__(self, root, *args, **kwargs): 21 | self.__root = root 22 | self.__objs = [] 23 | super(RemoveFlush, self).__init__(*args, **kwargs) 24 | 25 | def get_all(self): 26 | return self.__objs 27 | 28 | def visit_Call(self, node): 29 | # from ipdb import set_trace; set_trace() 30 | # print to_source(node, add_line_information=False) 31 | if not hasattr(node, 'func'): 32 | return node 33 | 34 | if not hasattr(node.func, 'attr'): 35 | return node 36 | 37 | if not node.func.attr == 'flush': 38 | return node 39 | 40 | l = {} 41 | g = {} 42 | 43 | s = to_source(self.__root, add_line_information=False) 44 | eval(compile(ast.parse(s), '', 'exec'), l, g) 45 | 46 | g['__source'] = s 47 | self.__objs.append(g) 48 | 49 | def remove_flush_opt(obj): 50 | code = inspect.getsource(obj) 51 | cnode = ast.parse(code) 52 | rf = RemoveFlush(cnode) 53 | cnode = rf.visit(cnode) 54 | 55 | s = to_source(cnode, add_line_information=False) 56 | l = {} 57 | g = {} 58 | eval(compile(ast.parse(s), '', 'exec'), l, g) 59 | g['__source'] = s 60 | 61 | objs = rf.get_all() 62 | 63 | objs.append(g) 64 | 65 | return objs 66 | 67 | print remove_flush_opt(lfs.LFS) 68 | for k in remove_flush_opt(lfs.LFS): 69 | print k['__source'] 70 | print "" 71 | -------------------------------------------------------------------------------- /bitmap.pxd: -------------------------------------------------------------------------------- 1 | from diskimpl cimport * 2 | from libc.stdint cimport uint64_t 3 | 4 | cdef class BitmapDisk: 5 | cdef object _disk 6 | 7 | cpdef bint is_set(self, uint64_t bit) 8 | cpdef void set_bit(self, uint64_t bit) 9 | cpdef void unset_bit(self, uint64_t bit) 10 | -------------------------------------------------------------------------------- /bitmap.py: -------------------------------------------------------------------------------- 1 | import cython 2 | if not cython.compiled: 3 | from disk import * 4 | 5 | __all__ = ['BitmapDisk'] 6 | 7 | 8 | # Implementation of a bitmap on disk 9 | class BitmapDisk(object): 10 | def __init__(self, disk): 11 | self._disk = disk 12 | 13 | # Check if a bit is set 14 | @cython.locals(mapbit='uint64_t') 15 | @cython.locals(mapfield='uint64_t') 16 | @cython.locals(mapbid='uint64_t') 17 | @cython.locals(block='Block') 18 | @cython.locals(field='uint64_t') 19 | def is_set(self, bit): 20 | # Define bit as Concat(.., ..) 21 | mapbit = Extract(6 - 1, 0, bit) 22 | mapfield = Extract(6 + 9 - 1, 6, bit) 23 | mapbid = Extract(64 - 1, 6 + 9, bit) 24 | 25 | block = self._disk.read(mapbid) 26 | field = block[mapfield] 27 | 28 | return Extract(0, 0, field >> Extend(mapbit, 64)) == 1 29 | 30 | @cython.locals(mapbit='uint64_t') 31 | @cython.locals(mapfield='uint64_t') 32 | @cython.locals(mapbid='uint64_t') 33 | @cython.locals(block='Block') 34 | @cython.locals(field='uint64_t') 35 | @cython.locals(new_field='uint64_t') 36 | def set_bit(self, bit): 37 | mapbit = Extract(6 - 1, 0, bit) 38 | mapfield = Extract(6 + 9 - 1, 6, bit) 39 | mapbid = Extract(64 - 1, 6 + 9, bit) 40 | 41 | block = self._disk.read(mapbid) 42 | 43 | field = block[mapfield] 44 | new_field = field | BitVecVal(1, 64) << Extend(mapbit, 64) 45 | 46 | block[mapfield] = new_field 47 | self._disk.write(mapbid, block) 48 | 49 | @cython.locals(mapbit='uint64_t') 50 | @cython.locals(mapfield='uint64_t') 51 | @cython.locals(mapbid='uint64_t') 52 | @cython.locals(block='Block') 53 | @cython.locals(field='uint64_t') 54 | @cython.locals(new_field='uint64_t') 55 | def unset_bit(self, bit): 56 | mapbit = Extract(6 - 1, 0, bit) 57 | mapfield = Extract(6 + 9 - 1, 6, bit) 58 | mapbid = Extract(64 - 1, 6 + 9, bit) 59 | 60 | block = self._disk.read(mapbid) 61 | 62 | field = block[mapfield] 63 | new_field = field & (~(BitVecVal(1, 64) << Extend(mapbit, 64))) 64 | 65 | block[mapfield] = new_field 66 | self._disk.write(mapbid, block) 67 | 68 | def crash(self, mach): 69 | return self.__class__(self._disk.crash(mach)) 70 | -------------------------------------------------------------------------------- /cloc.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | num=$(cat $@ | wc -l) 4 | blanknum=$(egrep "^[[:space:]]*(#.*)?$" $@ | wc -l) 5 | 6 | echo $((num - blanknum)) 7 | -------------------------------------------------------------------------------- /dirinode.pxd: -------------------------------------------------------------------------------- 1 | from libc.stdint cimport uint64_t 2 | 3 | from diskimpl cimport * 4 | from waldisk cimport WALDisk 5 | from xv6inode cimport IndirectInodeDisk 6 | 7 | 8 | cdef class DirImpl: 9 | cdef WALDisk _txndisk 10 | cdef IndirectInodeDisk _inode 11 | 12 | cdef object _Allocator 13 | cdef object _iallocator 14 | cdef object _ifree 15 | 16 | cdef object _Bitmap 17 | cdef object _ibitmap 18 | 19 | cdef object _DirLookup 20 | cdef DentryLookup _dirlook 21 | 22 | cdef object _orphans 23 | 24 | cdef object locate_dentry_ino(self, uint64_t ino, uint64_t[15] name) 25 | cdef object locate_empty_dentry_slot_ino(self, uint64_t ino) 26 | cdef object locate_empty_dentry_slot_err_ino(self, uint64_t ino) 27 | 28 | cdef void write_dentry(self, Block block, uint64_t off, uint64_t ino, uint64_t[15] name) 29 | cdef void clear_dentry(self, Block block, uint64_t off) 30 | 31 | cdef uint64_t lookup(self, uint64_t parent, uint64_t[15] name) 32 | cdef uint64_t unlink(self, uint64_t parent, uint64_t[15] name) 33 | cdef tuple rmdir(self, uint64_t parent, uint64_t[15] name) 34 | cdef uint64_t rename(self, uint64_t oparent, uint64_t[15] oname, uint64_t nparent, uint64_t[15] nname) 35 | cdef tuple mknod(self, uint64_t parent, uint64_t[16] name, uint64_t mode, uint64_t mtime) 36 | -------------------------------------------------------------------------------- /dirinode.py: -------------------------------------------------------------------------------- 1 | import cython 2 | if not cython.compiled: 3 | import z3 4 | from disk import * 5 | 6 | import errno 7 | from stat import S_IFDIR 8 | from collections import namedtuple 9 | 10 | 11 | Disk = namedtuple('Disk', ['read', 'write']) 12 | 13 | 14 | class Orphans(object): 15 | def __init__(self, orphandisk): 16 | self._orphandisk = orphandisk 17 | 18 | def size(self): 19 | return self._orphandisk.read(0)[0] 20 | 21 | def index(self, idx): 22 | orphanblock = self._orphandisk.read(0) 23 | n = orphanblock[0] 24 | 25 | assertion(0 <= n, "orphan index: n is negative") 26 | assertion(n < 511, "orphan index: n >= 511") 27 | 28 | np = Extract(8, 0, idx) 29 | 30 | return orphanblock[np + 1] 31 | 32 | def reset(self): 33 | self._orphandisk.write(0, ConstBlock(0)) 34 | 35 | def clear(self, idx): 36 | orphanblock = self._orphandisk.read(0) 37 | np = Extract(8, 0, idx) 38 | orphanblock[np] = 0 39 | self._orphandisk.write(0, orphanblock) 40 | 41 | def append(self, value): 42 | orphanblock = self._orphandisk.read(0) 43 | n = orphanblock[0] 44 | 45 | assertion(0 <= n, "orphan index: n is negative") 46 | assertion(n < 511, "orphan index: n >= 511") 47 | 48 | np = Extract(8, 0, n) 49 | 50 | orphanblock[np + 1] = value 51 | orphanblock[0] = n + 1 52 | 53 | self._orphandisk.write(0, orphanblock) 54 | 55 | 56 | 57 | class DirImpl(object): 58 | NBLOCKS = 522 59 | 60 | IFREEDISK = 4 61 | ORPHANS = 5 62 | 63 | @cython.locals(inode='IndirectInodeDisk') 64 | def __init__(self, txndisk, inode, Allocator, Bitmap, DirLookup): 65 | self._txndisk = txndisk 66 | self._inode = inode 67 | 68 | self._Allocator = Allocator 69 | self._Bitmap = Bitmap 70 | self._DirLookup = DirLookup 71 | 72 | PIno = namedtuple('Inode', ['is_mapped', 'mappingi', 'read', 'bmap']) 73 | 74 | self._dirlook = DirLookup(PIno( 75 | is_mapped=lambda vbn, inode=inode: inode.is_mapped(vbn), 76 | mappingi=lambda vbn, inode=inode: inode.mappingi(vbn), 77 | read=lambda bid, inode=inode: inode.read(bid), 78 | bmap=lambda bid, inode=inode: inode.bmap(bid), 79 | )) 80 | 81 | self._ifree = Disk( 82 | write=lambda bid, data: self._txndisk.write_tx(self.IFREEDISK, bid, data), 83 | read=lambda bid: self._txndisk._read(self.IFREEDISK, bid)) 84 | 85 | orphandisk = Disk( 86 | write=lambda bid, data: self._txndisk.write_tx(self.ORPHANS, bid, data), 87 | read=lambda bid: self._txndisk._read(self.ORPHANS, bid)) 88 | 89 | self._iallocator = Allocator( 90 | lambda n: self._ifree.read(n), 91 | 0, 1024) 92 | 93 | self._ibitmap = Bitmap(self._ifree) 94 | self._orphans = Orphans(orphandisk) 95 | 96 | def locate_dentry_ino(self, ino, name): 97 | ioff, off = self._dirlook.locate_dentry_ino(ino, name) 98 | assertion(ULT(ioff, 522), "locate_dentry_ino: invalid ioff") 99 | assertion(ioff != 10, "locate_dentry_ino: invalid ioff") 100 | bid = self._inode.bmap(Concat32(ino, ioff)) 101 | block = self._inode.read(bid) 102 | valid = And(bid != 0, off % 16 == 0, Extract(31, 0, block[off]) != 0) 103 | for i in range(15): 104 | valid = And(valid, block[off + i + 1] == name[i]) 105 | return block, bid, off, valid 106 | 107 | def locate_empty_dentry_slot_ino(self, ino): 108 | ioff, off = self._dirlook.locate_empty_slot_ino(ino) 109 | assertion(ULT(ioff, 522), "locate_empty_dentry_slot: invalid ioff") 110 | assertion(ioff != 10, "locate_empty_dentry_slot: invalid ioff") 111 | bid = self._inode.bmap(Concat32(ino, ioff)) 112 | block = self._inode.read(bid) 113 | assertion(bid != 0, "locate_empty_dentry_slot: invalid bid") 114 | assertion(off % 16 == 0, "locate_empty_dentry_slot: invalid offset") 115 | assertion(block[off] == 0, "locate_empty_dentry_slot: slot not empty") 116 | return block, bid, off 117 | 118 | def locate_empty_dentry_slot_err_ino(self, ino): 119 | ioff, off = self._dirlook.locate_empty_slot_ino(ino) 120 | assertion(ULT(ioff, 522), "locate_dentry_ino: invalid ioff") 121 | assertion(ioff != 10, "locate_dentry_ino: invalid ioff") 122 | bid = self._inode.bmap(Concat32(ino, ioff)) 123 | block = self._inode.read(bid) 124 | return block, bid, off, And(bid != 0, off % 16 == 0, block[off] == 0) 125 | 126 | def write_dentry(self, block, off, ino, name): 127 | block[off] = ino 128 | for i in range(15): 129 | block[off + i + 1] = name[i] 130 | 131 | def clear_dentry(self, block, off): 132 | for i in range(16): 133 | block[off + i] = 0 134 | 135 | def ialloc(self): 136 | # black box allocator returns a vbn 137 | ino = self._iallocator.alloc() 138 | # Validation 139 | assertion(ino != 0, "ialloc: inode is 0") 140 | assertion(self.is_ifree(ino), "ialloc: ino is not free") 141 | self._ibitmap.set_bit(ino) 142 | return ino 143 | 144 | def is_ifree(self, ino): 145 | return Not(self._ibitmap.is_set(ino)) 146 | 147 | def is_valid(self, ino): 148 | return And(ino != 0, self._ibitmap.is_set(ino), UGT(self.get_iattr(ino).nlink, 0)) 149 | 150 | def is_gcable(self, ino): 151 | return And(ino != 0, self._ibitmap.is_set(ino), self.get_iattr(ino).nlink == 0) 152 | 153 | def is_dir(self, ino): 154 | attr = self._inode.get_iattr(ino) 155 | return And(self.is_valid(ino), 156 | attr.mode & S_IFDIR != 0) 157 | 158 | def is_regular(self, ino): 159 | attr = self._inode.get_iattr(ino) 160 | return And(self.is_valid(ino), 161 | attr.mode & S_IFDIR == 0) 162 | 163 | ### 164 | 165 | def get_iattr(self, ino): 166 | return self._inode.get_iattr(ino) 167 | 168 | def set_iattr(self, ino, attr): 169 | self._inode.begin_tx() 170 | self._inode.set_iattr(ino, attr) 171 | self._inode.commit_tx() 172 | 173 | def read(self, ino, blocknum): 174 | attr = self.get_iattr(ino) 175 | bsize = attr.bsize 176 | 177 | is_mapped = self._inode.is_mapped(Concat32(ino, blocknum)) 178 | lbn = self._inode.mappingi(Concat32(ino, blocknum)) 179 | res = self._inode.read(lbn) 180 | res = If(And(is_mapped, ULT(blocknum, bsize)), res, ConstBlock(0)) 181 | return res 182 | 183 | def truncate(self, ino, fsize): 184 | 185 | target_bsize = fsize / 4096 + (fsize % 4096 != 0) 186 | 187 | # Update the size 188 | 189 | attr = self._inode.get_iattr(ino) 190 | 191 | while attr.bsize > target_bsize: 192 | self._inode.begin_tx() 193 | self._inode.bunmap(Concat32(ino, attr.bsize - 1)) 194 | attr.size = Concat32(attr.bsize - 1, fsize) 195 | self._inode.set_iattr(ino, attr) 196 | self._inode.commit_tx() 197 | 198 | if attr.fsize > fsize: 199 | self._inode.begin_tx() 200 | attr.size = Concat32(attr.bsize, fsize) 201 | self._inode.set_iattr(ino, attr) 202 | self._inode.commit_tx() 203 | 204 | def write(self, ino, blocknum, v, size=BitVecVal(4096, 32)): 205 | # Implementation support only a small number of blocknums. 206 | assertion(ULT(blocknum, 522), "write: blocknum to large") 207 | assertion(ULT(BitVecVal(0, 32), size), "write: size is 0") 208 | assertion(ULE(size, BitVecVal(4096, 32)), "write: size to large") 209 | assertion(self.is_regular(ino), "write: writing to a non-regular inode") 210 | 211 | self._inode.begin_tx() 212 | 213 | bid = self._inode.bmap(Concat32(ino, blocknum)) 214 | self._inode.write(bid, v) 215 | 216 | attr = self._inode.get_iattr(ino) 217 | 218 | nsize = Concat32(blocknum + 1, blocknum * 4096 + size) 219 | update = ULE(attr.fsize, blocknum * 4096 + size) 220 | attr.size = If(update, nsize, attr.size) 221 | 222 | self._inode.set_iattr(ino, attr) 223 | 224 | self._inode.commit_tx() 225 | 226 | return size 227 | 228 | def lookup(self, parent, name): 229 | assertion(self.is_dir(parent), "lookup: parent is not dir") 230 | 231 | self._inode.begin_tx() 232 | parent_block, _, off, valid = self.locate_dentry_ino(parent, name) 233 | self._inode.commit_tx() 234 | 235 | return If(valid, Extract(31, 0, parent_block[off]), 0) 236 | 237 | def mknod(self, parent, name, mode, mtime): 238 | assertion(self.is_dir(parent), "mknod: parent is not a directory") 239 | assertion(name[0] != 0, "mknod: name is null") 240 | 241 | self._inode.begin_tx() 242 | 243 | parent_block, parent_bid, off, valid = self.locate_empty_dentry_slot_err_ino(parent) 244 | if Not(valid): 245 | self._inode.commit_tx() 246 | return 0, errno.ENOSPC 247 | 248 | ino = self.ialloc() 249 | 250 | attr = Stat(size=0, mtime=mtime, mode=mode, nlink=2) 251 | 252 | self._inode.set_iattr(ino, attr) 253 | 254 | attr = self._inode.get_iattr(parent) 255 | assertion(ULE(attr.bsize, 522), "mknod: bsize is larger than 522") 256 | attr.size = Concat32(BitVecVal(522, 32), BitVecVal(4096 * 522, 32)) 257 | assertion(ULT(attr.nlink, attr.nlink + 1), "mknod: nlink overflow") 258 | attr.nlink += 1 259 | 260 | self._inode.set_iattr(parent, attr) 261 | 262 | self.write_dentry(parent_block, off, ino, name) 263 | parent_block[off] = ino 264 | 265 | self._inode.write(parent_bid, parent_block) 266 | 267 | self._inode.commit_tx() 268 | 269 | return ino, 0 270 | 271 | def unlink(self, parent, name): 272 | assertion(self.is_dir(parent), "unlink: not a dir") 273 | assertion(name[0] != 0, "unlink: name is null") 274 | 275 | self._inode.begin_tx() 276 | 277 | parent_block, parent_bid, off, valid = self.locate_dentry_ino(parent, name) 278 | 279 | assertion(valid, "unlink: not valid") 280 | 281 | attr = self._inode.get_iattr(parent) 282 | assertion(UGE(attr.nlink, 2), "unlink: nlink is not greater than 1: " + str(attr.nlink)) 283 | attr.nlink -= 1 284 | self._inode.set_iattr(parent, attr) 285 | 286 | ino = Extract(31, 0, parent_block[off]) 287 | 288 | attr = self._inode.get_iattr(ino) 289 | attr.nlink = 1 290 | self._inode.set_iattr(ino, attr) 291 | 292 | self.clear_dentry(parent_block, off) 293 | 294 | self._inode.write(parent_bid, parent_block) 295 | 296 | # append the inode to the orphan list 297 | self._orphans.append(Extend(ino, 64)) 298 | 299 | self._inode.commit_tx() 300 | 301 | return ino 302 | 303 | def rmdir(self, parent, name): 304 | assertion(self.is_dir(parent), "rmdir: parent is not a directory") 305 | assertion(name[0] != 0, "rmdir: name is null") 306 | 307 | self._inode.begin_tx() 308 | parent_block, parent_bid, off, valid = self.locate_dentry_ino(parent, name) 309 | if Not(valid): 310 | self._inode.commit_tx() 311 | return 0, errno.ENOENT 312 | 313 | assertion(valid, "rmdir: dentry off not valid") 314 | 315 | ino = Extract(31, 0, parent_block[off]) 316 | if Not(self.is_dir(ino)): 317 | self._inode.commit_tx() 318 | return 0, errno.ENOTDIR 319 | 320 | assertion(self.is_dir(ino), "rmdir: ino is not dir") 321 | 322 | attr = self._inode.get_iattr(ino) 323 | if UGT(attr.nlink, 2): 324 | self._inode.commit_tx() 325 | return BitVecVal(0, 32), errno.ENOTEMPTY 326 | 327 | attr = self._inode.get_iattr(parent) 328 | assertion(UGE(attr.nlink, 2), "rmdir: nlink is not greater than 1: " + str(attr.nlink)) 329 | attr.nlink -= 1 330 | self._inode.set_iattr(parent, attr) 331 | 332 | self.clear_dentry(parent_block, off) 333 | self._inode.write(parent_bid, parent_block) 334 | 335 | attr = self._inode.get_iattr(ino) 336 | attr.nlink = 1 337 | self._inode.set_iattr(ino, attr) 338 | 339 | # append the inode to the orphan list 340 | self._orphans.append(Extend(ino, 64)) 341 | 342 | self._inode.commit_tx() 343 | 344 | return ino, 0 345 | 346 | def forget(self, ino): 347 | if Or(self.get_iattr(ino).mode & S_IFDIR != 0, self.get_iattr(ino).nlink != 1): 348 | return 349 | 350 | assertion(self.is_regular(ino), "forget: ino is not regular") 351 | 352 | self._inode.begin_tx() 353 | attr = self._inode.get_iattr(ino) 354 | attr.nlink = 0 355 | self._inode.set_iattr(ino, attr) 356 | self._inode.commit_tx() 357 | 358 | def rename(self, oparent, oname, nparent, nname): 359 | assertion(self.is_dir(oparent), "rename: oparent is not dir") 360 | assertion(self.is_dir(nparent), "rename: nparent is not dir") 361 | 362 | assertion(oname[0] != 0, "rename: oname is null") 363 | assertion(nname[0] != 0, "rename: nname is null") 364 | 365 | self._inode.begin_tx() 366 | 367 | attr = self._inode.get_iattr(oparent) 368 | assertion(UGE(attr.nlink, 2), "rename: nlink is not greater than 1: " + str(attr.nlink)) 369 | attr.nlink -= 1 370 | self._inode.set_iattr(oparent, attr) 371 | 372 | attr = self._inode.get_iattr(nparent) 373 | assertion(ULE(attr.bsize, 522), "rename: bsize is larger than 522") 374 | attr.size = Concat32(BitVecVal(522, 32), BitVecVal(4096 * 522, 32)) 375 | assertion(ULT(attr.nlink, attr.nlink + 1), "rename: nlink overflow") 376 | attr.nlink += 1 377 | self._inode.set_iattr(nparent, attr) 378 | 379 | # Find target and wipe from parent block 380 | oparent_block, oparent_bid, ooff, ovalid = self.locate_dentry_ino(oparent, oname) 381 | assertion(ovalid, "rename: ooff is not valid") 382 | ino = oparent_block[ooff] 383 | self.clear_dentry(oparent_block, ooff) 384 | self._inode.write(oparent_bid, oparent_block) 385 | 386 | # Check if target exists 387 | nparent_block, nparent_bid, noff, nvalid = self.locate_dentry_ino(nparent, nname) 388 | 389 | if nvalid: 390 | # append the dst inode to the orphan list 391 | self._orphans.append(nparent_block[noff]) 392 | self.clear_dentry(nparent_block, noff) 393 | 394 | nparent_block, nparent_bid, noff = self.locate_empty_dentry_slot_ino(nparent) 395 | self.write_dentry(nparent_block, noff, ino, nname) 396 | 397 | self._inode.write(nparent_bid, nparent_block) 398 | 399 | self._inode.commit_tx() 400 | 401 | return 0 402 | 403 | def fsync(self): 404 | self._txndisk.flush() 405 | 406 | def gc1(self, orph_index, off): 407 | ino = Extract(31, 0, self._orphans.index(orph_index)) 408 | if not self.is_gcable(ino): 409 | return 410 | # Wipe data 411 | 412 | self._inode.begin_tx() 413 | self._inode.bunmap(Concat32(ino, off)) 414 | 415 | nsize = off 416 | 417 | attr = self._inode.get_iattr(ino) 418 | if attr.bsize == nsize + 1: 419 | attr.size = Concat32(nsize, nsize * 4096) 420 | self._inode.set_iattr(ino, attr) 421 | 422 | self._inode.commit_tx() 423 | 424 | # If the inode is in the orphan list, is gc-able *and* 425 | # its size is 0 we can safely mark it as 'free' 426 | def gc2(self, orph_index): 427 | ino = Extract(31, 0, self._orphans.index(orph_index)) 428 | if not self.is_gcable(ino): 429 | return 430 | 431 | if self._inode.get_iattr(ino).size == 0: 432 | self._inode.begin_tx() 433 | self._orphans.clear(orph_index) 434 | self._ibitmap.unset_bit(ino) 435 | self._inode.commit_tx() 436 | 437 | def gc3(self): 438 | self._inode.begin_tx() 439 | self._orphans.reset() 440 | self._inode.commit_tx() 441 | 442 | def crash(self, mach): 443 | return self.__class__(self._txndisk.crash(mach), self._inode.crash(mach), self._Allocator, self._Bitmap, self._DirLookup) 444 | -------------------------------------------------------------------------------- /dirspec.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import stat 3 | 4 | from yggdrasil.util import * 5 | from yggdrasil.diskspec import * 6 | from disk import * 7 | 8 | 9 | InoSort = BitVecSort(32) 10 | NameSort = SizeSort # todo 11 | 12 | 13 | def FreshIno(name): 14 | return FreshBitVec(name, InoSort.size()) 15 | 16 | def FreshName(name): 17 | return FreshBitVec(name, NameSort.size()) 18 | 19 | 20 | class DirLook(object): 21 | def __init__(self, inode): 22 | pass 23 | 24 | def locate_empty_slot(self, block): 25 | return FreshBitVec('off', 9) 26 | 27 | def locate_dentry(self, block, name): 28 | return FreshBitVec('off', 9) 29 | 30 | def locate_empty_slot_ino(self, ino): 31 | return FreshBitVec('ioff', 32), FreshBitVec('off', 9) 32 | 33 | def locate_dentry_ino(self, ino, name): 34 | return FreshBitVec('ioff', 32), FreshBitVec('off', 9) 35 | 36 | 37 | class Attributes(object): 38 | def __init__(self, bsizefn, fsizefn, mtimefn, modefn, nlinkfn): 39 | self._bsizefn = bsizefn 40 | self._fsizefn = fsizefn 41 | self._mtimefn = mtimefn 42 | self._modefn = modefn 43 | self._nlinkfn = nlinkfn 44 | 45 | def set_bsize(self, ino, bsize, guard=BoolVal(True)): 46 | self._bsizefn = self._bsizefn.update(ino, bsize, guard) 47 | 48 | def set_fsize(self, ino, fsize, guard=BoolVal(True)): 49 | self._fsizefn = self._fsizefn.update(ino, fsize, guard) 50 | 51 | def set_mtime(self, ino, mtime, guard=BoolVal(True)): 52 | self._mtimefn = self._mtimefn.update(ino, mtime, guard) 53 | 54 | def set_mode(self, ino, mode, guard=BoolVal(True)): 55 | self._modefn = self._modefn.update(ino, mode, guard) 56 | 57 | def set_nlink(self, ino, nlink, guard=BoolVal(True)): 58 | self._nlinkfn = self._nlinkfn.update(ino, nlink, guard) 59 | 60 | def bsize(self, ino): 61 | return self._bsizefn(ino) 62 | 63 | def fsize(self, ino): 64 | return self._fsizefn(ino) 65 | 66 | def mtime(self, ino): 67 | return self._mtimefn(ino) 68 | 69 | def mode(self, ino): 70 | return self._modefn(ino) 71 | 72 | def nlink(self, ino): 73 | return self._nlinkfn(ino) 74 | 75 | def to_stat(self, ino): 76 | return Stat(size=Concat(self.bsize(ino), self.fsize(ino)), 77 | mtime=self.mtime(ino), 78 | mode=self.mode(ino), 79 | nlink=self.nlink(ino)) 80 | 81 | 82 | def FreshAttributes(): 83 | bsizefn = FreshUFunction('bsizefn', InoSort, BitVecSort(32)) 84 | fsizefn = FreshUFunction('fsizefn', InoSort, BitVecSort(32)) 85 | mtimefn = FreshUFunction('mtimefn', InoSort, SizeSort) 86 | modefn = FreshUFunction('modefn', InoSort, SizeSort) 87 | nlinkfn = FreshUFunction('nlinkfn', InoSort, SizeSort) 88 | return Attributes(bsizefn, fsizefn, mtimefn, modefn, nlinkfn) 89 | 90 | 91 | class DirSpec(object): 92 | def __init__(self, mach, dirfn, direxists, datafn, ifreefn, attrs): 93 | self._mach = mach 94 | 95 | self._dirfn = dirfn 96 | self._direxists = direxists 97 | 98 | self._datafn = datafn 99 | self._ifreefn = ifreefn 100 | 101 | self._attrs = attrs 102 | 103 | def ialloc(self, on): 104 | ino = FreshIno('spec-alloc-ino') 105 | # allocator assumptions 106 | assertion(self.is_ifree(ino)) 107 | assertion(ino != 0) 108 | 109 | self._attrs.set_bsize(ino, 0, guard=on) 110 | self._attrs.set_fsize(ino, 0, guard=on) 111 | self._attrs.set_mtime(ino, 0, guard=on) 112 | self._attrs.set_mode(ino, 0, guard=on) 113 | self._attrs.set_nlink(ino, 1, guard=on) 114 | 115 | # mark the new inode as being used 116 | self._ifreefn = self._ifreefn.update(ino, BoolVal(False), guard=on) 117 | 118 | return ino 119 | 120 | def is_ifree(self, ino): 121 | return self._ifreefn(ino) 122 | 123 | def is_valid(self, ino): 124 | return And(ino != 0, Not(self._ifreefn(ino)), UGT(self._attrs.nlink(ino), 0)) 125 | 126 | def is_dir(self, ino): 127 | return And(self.is_valid(ino), 128 | self._attrs.mode(ino) & stat.S_IFDIR != 0) 129 | 130 | def is_regular(self, ino): 131 | return And(self.is_valid(ino), 132 | self._attrs.mode(ino) & stat.S_IFDIR == 0) 133 | 134 | ## 135 | 136 | def read(self, ino, blocknum, off): 137 | res = self._datafn(ino, blocknum, off) 138 | return res 139 | 140 | # Truncate a file down to block `bnum`. 141 | # truncate(ino, 0) will wipe the entire file. 142 | def truncate(self, ino, size): 143 | assertion(ULT(BitVecVal(0, 32), size)) 144 | assertion(ULE(size, BitVecVal(4096, 32))) 145 | assertion(self.is_regular(ino)) 146 | 147 | on = self._mach.create_on([]) 148 | 149 | self._attrs.set_fsize(ino, size, on) 150 | 151 | 152 | def lookup(self, parent, name): 153 | return If(self._direxists(parent, name[0]), self._dirfn(parent, name[0]), 0) 154 | 155 | def get_iattr(self, ino): 156 | return self._attrs.to_stat(ino) 157 | 158 | def write(self, ino, blocknum, v, size=4096): 159 | assertion(self.is_regular(ino)) 160 | 161 | _fn = self._datafn._fn 162 | _bfn = v._fn 163 | 164 | on = self._mach.create_on([]) 165 | 166 | fn = lambda i, b, o: If( 167 | And(on, ino == i, blocknum == b), _bfn(o), _fn(i, b, o)) 168 | self._datafn._fn = fn 169 | 170 | assertion(ULE(blocknum, 1048574)) 171 | 172 | update = ULE(self._attrs.fsize(ino), blocknum * 4096 + size) 173 | self._attrs.set_bsize(ino, blocknum + 1, And(update, on)) 174 | self._attrs.set_fsize(ino, blocknum * 4096 + size, And(update, on)) 175 | 176 | def rename(self, oparent, oname, nparent, nname): 177 | # old parent and new parent are directories 178 | assertion(self.is_dir(oparent)) 179 | assertion(self.is_dir(nparent)) 180 | 181 | # destination does not exist 182 | assertion(Not(self._direxists(nparent, nname[0]))) 183 | 184 | on = self._mach.create_on([]) 185 | 186 | ino = self._dirfn(oparent, oname[0]) 187 | 188 | assertion(ULT(self._attrs.nlink(nparent), self._attrs.nlink(nparent) + 1)) 189 | self._attrs.set_nlink(nparent, self._attrs.nlink(nparent) + 1, guard=on) 190 | self._attrs.set_bsize(nparent, 522, guard=on) 191 | self._attrs.set_fsize(nparent, 4096 * 522, guard=on) 192 | 193 | assertion(ULT(self._attrs.nlink(nparent) - 1, self._attrs.nlink(nparent))) 194 | self._attrs.set_nlink(oparent, self._attrs.nlink(oparent) - 1, guard=on) 195 | 196 | # remove oname from oparent 197 | self._direxists = self._direxists.update((oparent, oname[0]), 198 | BoolVal(False), guard=on) 199 | 200 | # associate newparent x newname -> ino 201 | self._dirfn = self._dirfn.update((nparent, nname[0]), ino, guard=on) 202 | self._direxists = self._direxists.update((nparent, nname[0]), 203 | BoolVal(True), guard=on) 204 | 205 | def mknod(self, parent, name, mode, mtime): 206 | on = self._mach.create_on([]) 207 | 208 | # Parent is a directory 209 | assertion(self.is_dir(parent)) 210 | 211 | # A file with this name does not exist in the directory 212 | assertion(Not(self._direxists(parent, name[0]))) 213 | 214 | # Get a fresh inode number.. It must be non-zero and unique 215 | ino = self.ialloc(on) 216 | 217 | # Set attribute for parent directory 218 | self._attrs.set_nlink(parent, self._attrs.nlink(parent) + 1, guard=on) 219 | self._attrs.set_bsize(parent, 522, guard=on) 220 | self._attrs.set_fsize(parent, 4096 * 522, guard=on) 221 | 222 | # Set attributes for new inode 223 | self._attrs.set_bsize(ino, 0, guard=on) 224 | self._attrs.set_fsize(ino, 0, guard=on) 225 | self._attrs.set_mtime(ino, mtime, guard=on) 226 | self._attrs.set_mode(ino, mode, guard=on) 227 | self._attrs.set_nlink(ino, 2, guard=on) 228 | 229 | # associate parent x name -> ino 230 | self._dirfn = self._dirfn.update((parent, name[0]), ino, guard=on) 231 | self._direxists = self._direxists.update((parent, name[0]), BoolVal(True), guard=on) 232 | 233 | return ino 234 | 235 | def forget(self, ino): 236 | if Or(self._attrs.nlink(ino) != 1, self.is_dir(ino)): 237 | return 238 | 239 | assertion(self.is_valid(ino)) 240 | on = self._mach.create_on([]) 241 | 242 | # Wipe data 243 | _fn = self._datafn._fn 244 | fn = lambda i, b, o: If( And(on, ino == i), BitVecVal(0, 64), _fn(i, b, o)) 245 | self._datafn._fn = fn 246 | 247 | self._attrs.set_bsize(ino, 0, guard=on) 248 | self._attrs.set_fsize(ino, 0, guard=on) 249 | self._attrs.set_nlink(ino, 0, guard=on) 250 | self._ifreefn = self._ifreefn.update(ino, BoolVal(True), guard=on) 251 | 252 | def unlink(self, parent, name): 253 | # parent is a directory 254 | assertion(self.is_dir(parent)) 255 | 256 | on = self._mach.create_on([]) 257 | 258 | # TODO: Failure? 259 | # Decrement nlink 260 | assertion(UGT(self._attrs.nlink(parent), 1)) 261 | self._attrs.set_nlink(parent, self._attrs.nlink(parent) - 1, guard=on) 262 | 263 | ino = self._dirfn(parent, name[0]) 264 | 265 | assertion(UGT(self._attrs.nlink(ino), 1)) 266 | 267 | self._attrs.set_nlink(ino, 1, guard=on) 268 | 269 | # remove ino from parent 270 | self._direxists = self._direxists.update((parent, name[0]), BoolVal(False), guard=on) 271 | 272 | return ino 273 | 274 | def rmdir(self, parent, name): 275 | # parent is a directory 276 | if Not(self.is_dir(parent)): 277 | return 0, errno.ENOTDIR 278 | 279 | on = self._mach.create_on([]) 280 | 281 | ino = self._dirfn(parent, name[0]) 282 | assertion(self.is_dir(ino)) 283 | 284 | if UGT(self._attrs.nlink(ino), 2): 285 | return BitVecVal(0, 32), errno.ENOTEMPTY 286 | 287 | # Wipe data 288 | _fn = self._datafn._fn 289 | fn = lambda i, b, o: If( And(on, ino == i), BitVecVal(0, 64), _fn(i, b, o)) 290 | self._datafn._fn = fn 291 | 292 | self._attrs.set_nlink(parent, self._attrs.nlink(parent) - 1, guard=on) 293 | 294 | self._attrs.set_nlink(ino, 1, guard=on) 295 | 296 | return ino, BitVecVal(0, 32) 297 | 298 | def crash(self, mach): 299 | return self.__class__(mach, self._dirfn, self._direxists, self._datafn, self._ifreefn, self._attrs) 300 | -------------------------------------------------------------------------------- /disk.py: -------------------------------------------------------------------------------- 1 | import z3 2 | import diskimpl 3 | import traceback 4 | import symbolicmap 5 | 6 | from yggdrasil import diskspec 7 | from yggdrasil import ufarray 8 | from yggdrasil import util 9 | 10 | 11 | native = True 12 | 13 | 14 | def _native(a, b): 15 | def inner(*args, **kwargs): 16 | if native: 17 | return a(*args, **kwargs) 18 | return b(*args, **kwargs) 19 | return inner 20 | 21 | 22 | def AsyncDiskWrap(*args, **kwargs): 23 | mach = diskspec.Machine() 24 | array = ufarray.ConstDiskArray(ConstBlock(0)) 25 | return diskspec.AsyncDisk(mach, array) 26 | 27 | 28 | def ZConcat32(a, b): 29 | # Z3 concat of 32 bit numbers 30 | # Since the implementation only uses uint64's 31 | # we have to know the the sizes for the numbers being concated. 32 | assert a.size() == 32 33 | assert b.size() == 32 34 | return z3.Concat(a, b) 35 | 36 | Block = diskimpl.Block 37 | 38 | # TODO: More efficient native stat 39 | Stat = _native(diskspec.Stat, diskspec.Stat) 40 | AsyncDisk = _native(diskimpl.AsyncDisk, AsyncDiskWrap) 41 | PartitionAsyncDisk = diskimpl.PartitionAsyncDisk 42 | ConstBlock = _native(diskimpl.ConstBlock, ufarray.ConstBlock) 43 | Extract = _native(diskimpl.Extract, z3.Extract) 44 | ULE = _native(diskimpl.ULE, z3.ULE) 45 | ULT = _native(diskimpl.ULT, z3.ULT) 46 | UGT = _native(diskimpl.UGT, z3.UGT) 47 | UGE = _native(diskimpl.UGE, z3.UGE) 48 | If = _native(diskimpl.If, util.If) 49 | Allocator32 = _native(diskimpl.Allocator, diskspec.Allocator32) 50 | Allocator64 = _native(diskimpl.Allocator, diskspec.Allocator64) 51 | Extend = _native(diskimpl.Extend, util.Extend) 52 | BitVecVal = _native(diskimpl.BitVecVal, z3.BitVecVal) 53 | Concat32 = _native(diskimpl.Concat32, ZConcat32) 54 | Not = _native(diskimpl.Not, z3.Not) 55 | And = _native(diskimpl.And, z3.And) 56 | Or = _native(diskimpl.Or, z3.Or) 57 | USub = _native(diskimpl.USub, lambda a, b: a - b) 58 | URem = _native(diskimpl.URem, z3.URem) 59 | UDiv = _native(diskimpl.UDiv, z3.UDiv) 60 | LShR = _native(diskimpl.LShR, z3.LShR) 61 | Dict = _native(diskimpl.Dict, symbolicmap.SymbolicMap) 62 | UMax = _native(None, util.UMax) 63 | 64 | def debug(msg, *var): 65 | if not native: 66 | if not hasattr(debug, 'debugs'): 67 | debug.debugs = [] 68 | debug.debugs.append((msg, map(z3.simplify, var))) 69 | 70 | def assertion(cond, msg=None): 71 | if native: 72 | if not cond: 73 | if msg: 74 | print "Assertion failure:", msg 75 | traceback.print_stack() 76 | assert cond 77 | else: 78 | if not hasattr(assertion, 'assertions'): 79 | assertion.assertions = [] 80 | assertion.assertions.append(cond) 81 | -------------------------------------------------------------------------------- /diskimpl.pxd: -------------------------------------------------------------------------------- 1 | from libc.stdint cimport uint64_t 2 | from libc.string cimport memcpy 3 | 4 | # Extract returns the same size type.. 5 | cpdef inline uint64_t Extract(int hi, int lo, uint64_t val): 6 | return val >> lo & ((1 << (hi - lo + 1)) - 1) 7 | 8 | cpdef inline uint64_t USub(uint64_t a, uint64_t b): 9 | return a - b 10 | 11 | cpdef inline uint64_t Concat32(uint64_t a, uint64_t b): 12 | return a << 32 | b 13 | 14 | cpdef inline bint ULE(uint64_t a, uint64_t b): 15 | return a <= b 16 | 17 | cpdef inline bint ULT(uint64_t a, uint64_t b): 18 | return a < b 19 | 20 | cpdef inline bint UGE(uint64_t a, uint64_t b): 21 | return a >= b 22 | 23 | cpdef inline bint UGT(uint64_t a, uint64_t b): 24 | return a > b 25 | 26 | cpdef inline uint64_t URem(uint64_t a, uint64_t b): 27 | return a % b 28 | 29 | cpdef inline uint64_t UDiv(uint64_t a, uint64_t b): 30 | return a / b 31 | 32 | cpdef inline uint64_t LShR(uint64_t a, uint64_t b): 33 | return a >> b 34 | 35 | cpdef inline uint64_t Extend(uint64_t val, uint64_t size): 36 | assert size == 64 37 | return val 38 | 39 | cpdef inline uint64_t BitVecVal(uint64_t val, uint64_t size): 40 | assert size <= 64 41 | return val 42 | 43 | cpdef inline bint Not(bint cond): 44 | return not cond 45 | 46 | cpdef inline bint Or(bint a, bint b): 47 | return (a or b) 48 | 49 | cpdef bint And(bint a=*, bint b=*, bint c=*) 50 | cpdef Block ConstBlock(uint64_t c) 51 | 52 | cpdef inline If(bint cond, a, b): 53 | if cond: 54 | return a 55 | return b 56 | 57 | 58 | cpdef void assertion(bint b, object msg=*) 59 | 60 | cdef class Stat: 61 | cdef public uint64_t size 62 | cdef public uint64_t mtime 63 | cdef public uint64_t mode 64 | cdef public uint64_t nlink 65 | 66 | 67 | cdef class Block: 68 | cdef uint64_t *buf 69 | cdef readonly int size # buf size 70 | 71 | cpdef Block copy(self) 72 | cdef void set(self, uint64_t, uint64_t) nogil 73 | cdef uint64_t get(self, uint64_t) nogil 74 | 75 | 76 | cdef class AsyncDisk: 77 | cdef char* fn 78 | cdef int fd 79 | 80 | cpdef void write(self, uint64_t blknum, Block block, bint cond=*) 81 | cpdef Block read(self, uint64_t blknum) 82 | cpdef void flush(self) 83 | 84 | 85 | cdef class Dict(object): 86 | cdef dict _map 87 | 88 | cpdef get(self, gkey, dresult) 89 | cpdef has_key(self, gkey) 90 | 91 | 92 | cdef class PartitionAsyncDisk: 93 | cdef AsyncDisk adisk 94 | cdef uint64_t start 95 | cdef uint64_t end 96 | cdef bint debug 97 | 98 | cpdef void write(self, uint64_t blknum, Block block, bint cond=*) 99 | cpdef Block read(self, uint64_t blknum) 100 | cpdef void flush(self) 101 | 102 | 103 | cdef class Allocator: 104 | cdef readfn 105 | cdef uint64_t start 106 | cdef uint64_t end 107 | 108 | cdef uint64_t _alloc(self, uint64_t i, Block block) nogil 109 | cpdef uint64_t alloc(self) 110 | 111 | 112 | cdef class DentryLookup: 113 | cdef object _inode 114 | 115 | cdef int locate_dentry(self, Block block, uint64_t[15] name) nogil 116 | cdef int locate_empty_slot(self, Block block) nogil 117 | 118 | cdef tuple locate_dentry_ino(self, uint64_t ino, uint64_t[15] name) 119 | cdef tuple locate_empty_slot_ino(self, uint64_t ino) 120 | -------------------------------------------------------------------------------- /diskimpl.pyx: -------------------------------------------------------------------------------- 1 | # cython: cdivision=True 2 | from libc.stdint cimport uint64_t 3 | from libc.stdlib cimport calloc, free 4 | from libc.string cimport memcpy 5 | from posix.fcntl cimport open, O_RDWR 6 | from posix.stat cimport mode_t 7 | from posix.unistd cimport close, pread, pwrite, fsync, _exit 8 | 9 | import traceback 10 | 11 | 12 | cdef int BLOCKSIZE = 4096 13 | 14 | 15 | cpdef inline bint And(bint a=1, bint b=1, bint c=1): 16 | return (a and b) and c 17 | 18 | 19 | cpdef inline Block ConstBlock(uint64_t c): 20 | block = Block(BLOCKSIZE) 21 | if c == 0: 22 | return block 23 | for i in range(block.size): 24 | block.buf[i] = c 25 | return block 26 | 27 | 28 | cdef class Stat: 29 | def __init__(self, uint64_t size, uint64_t mtime, uint64_t mode, uint64_t nlink): 30 | self.size = size 31 | self.mtime = mtime 32 | self.mode = mode 33 | self.nlink = nlink 34 | 35 | @property 36 | def bsize(self): 37 | return Extract(63, 32, self.size) 38 | 39 | @property 40 | def fsize(self): 41 | return Extract(31, 0, self.size) 42 | 43 | def __str__(self): 44 | return "size={}, mtime={}, mode={}, nlink={}".format(self.size, self.mtime, self.mode, self.nlink) 45 | 46 | 47 | cpdef void assertion(bint b, object msg=None): 48 | if not b: 49 | traceback.print_stack() 50 | print "Assertion failure:", msg 51 | _exit(1) 52 | 53 | 54 | # mutable block 55 | # manages a buffer 56 | cdef class Block: 57 | # size in bytes 58 | def __cinit__(self, int size): 59 | assert size % 8 == 0, "block size is not a multiple of 8" 60 | 61 | self.buf = calloc(size, sizeof(char)); 62 | if self.buf == NULL: 63 | raise MemoryError('could not calloc') 64 | 65 | self.size = size / sizeof(uint64_t) 66 | 67 | cpdef Block copy(self): 68 | cdef Block other = Block(self.size * sizeof(uint64_t)) 69 | memcpy(other.buf, self.buf, self.size * sizeof(uint64_t)) 70 | return other 71 | 72 | def __dealloc__(self): 73 | if self.buf != NULL: 74 | free(self.buf) 75 | self.buf = NULL 76 | 77 | # This is a python function.. 78 | # Remove this indirection by calling 79 | # block.buf[x] instead of block[x] 80 | def __getitem__(self, int v): 81 | if v >= self.size: 82 | # print "WARN", v, self.size 83 | v = v % self.size 84 | # assert v < self.size 85 | return self.buf[v] 86 | 87 | def __setitem__(self, int v, uint64_t k): 88 | assert v < self.size, "block index out of range" 89 | self.buf[v] = k 90 | 91 | cdef void set(self, uint64_t v, uint64_t k) nogil: 92 | self.buf[v] = k 93 | 94 | cdef uint64_t get(self, uint64_t v) nogil: 95 | return self.buf[v] 96 | 97 | def _print(self): 98 | for i in range(self.size): 99 | print self[i], 100 | if (i + 1) % 64 == 0: 101 | print 102 | 103 | 104 | cdef class AsyncDisk: 105 | def __cinit__(self, char* fn, mode_t mode=O_RDWR): 106 | self.fn = fn 107 | self.fd = open(fn, mode, 0666) 108 | 109 | def __dealloc__(self): 110 | close(self.fd) 111 | self.fd = 0 112 | 113 | cpdef void write(self, uint64_t blknum, Block block, bint cond=1): 114 | if not cond: 115 | return 116 | assert block.size == BLOCKSIZE / sizeof(uint64_t), "async disk: writing a block with invalid size" 117 | cdef ssize_t nbytes = pwrite(self.fd, block.buf, BLOCKSIZE, blknum * BLOCKSIZE) 118 | assert nbytes == BLOCKSIZE, "async disk: could not write entire nbytes" 119 | 120 | cpdef Block read(self, uint64_t blknum): 121 | cdef Block block = Block(BLOCKSIZE) 122 | cdef char* buf = block.buf 123 | cdef ssize_t nbytes = pread(self.fd, buf, BLOCKSIZE, blknum * BLOCKSIZE) 124 | assert nbytes == BLOCKSIZE, "async disk: could not read entire blocksize" 125 | return block 126 | 127 | cpdef void flush(self): 128 | fsync(self.fd) 129 | 130 | 131 | cdef class Dict(object): 132 | def __init__(self): 133 | self._map = dict() 134 | 135 | cpdef get(self, gkey, dresult): 136 | if self._map.has_key(gkey): 137 | return self._map[gkey] 138 | return dresult 139 | 140 | cpdef has_key(self, gkey): 141 | return self._map.has_key(gkey) 142 | 143 | def __setitem__(self, key, value): 144 | self._map[key] = value 145 | 146 | 147 | cdef class PartitionAsyncDisk: 148 | def __cinit__(self, AsyncDisk adisk, uint64_t start, uint64_t end, bint debug): 149 | self.start = start 150 | self.end = end 151 | self.adisk = adisk 152 | self.debug = debug 153 | 154 | cpdef void write(self, uint64_t blknum, Block block, bint cond=1): 155 | if not cond: 156 | return 157 | if self.debug: 158 | print 'write(%d)' % (blknum + self.start,) 159 | assert blknum < self.end - self.start, "partition async disk: write blknum out of range" 160 | self.adisk.write(blknum + self.start, block, cond) 161 | 162 | cpdef Block read(self, uint64_t blknum): 163 | assert blknum < self.end - self.start, "partition async disk: read blknum out of range: %s %s" % (self.start, self.end) 164 | return self.adisk.read(blknum + self.start) 165 | 166 | cpdef void flush(self): 167 | self.adisk.flush() 168 | 169 | # Unverified bitmap allocator. 170 | # It searches in the range [startblock, endblock] for an 'unset' bit. 171 | # and returns it's index (uint64_t) 172 | cdef class Allocator: 173 | def __init__(self, readfn, startblock, endblock): 174 | self.readfn = readfn 175 | self.start = startblock 176 | self.end = endblock # inclusive 177 | 178 | cdef uint64_t _alloc(self, uint64_t i, Block block) nogil: 179 | cdef uint64_t j, k 180 | 181 | for j in range(block.size): 182 | for k in range(sizeof(uint64_t) * 8): 183 | if block.get(j) & ((1) << k) == 0: 184 | return i * BLOCKSIZE * 8 + j * sizeof(uint64_t) * 8 + k 185 | return 0 186 | 187 | cpdef uint64_t alloc(self): 188 | cdef uint64_t i 189 | cdef uint64_t a 190 | cdef Block block 191 | for i in range(self.start, self.end + 1): 192 | block = self.readfn(i) 193 | a = self._alloc(i, block) 194 | if a != 0: 195 | return a 196 | return 0 197 | 198 | 199 | cdef class DentryLookup: 200 | def __init__(self, inode): 201 | self._inode = inode 202 | 203 | cdef int locate_dentry(self, Block block, uint64_t[15] name) nogil: 204 | cdef int i 205 | cdef uint64_t ino, n, v 206 | for i in range(0, 512, 16): 207 | ino = block.get(i) 208 | if ino == 0: 209 | continue 210 | for n in range(15): 211 | v = name[n] 212 | if block.get(i + n + 1) != v: 213 | break 214 | else: 215 | return i 216 | return -1 217 | 218 | cdef int locate_empty_slot(self, Block block) nogil: 219 | cdef int i 220 | cdef uint64_t ino 221 | for i in range(0, 512, 16): 222 | ino = block.get(i) 223 | if ino == 0: 224 | return i 225 | return -1 226 | 227 | cdef tuple locate_dentry_ino(self, uint64_t ino, uint64_t[15] name): 228 | cdef Block block 229 | cdef uint64_t i, bid 230 | cdef int res 231 | 232 | for i in range(522): 233 | if i == 10: 234 | continue 235 | if not self._inode.is_mapped(Concat32(ino, i)): 236 | continue 237 | bid = self._inode.mappingi(Concat32(ino, i)) 238 | block = self._inode.read(bid) 239 | res = self.locate_dentry(block, name) 240 | if res >= 0: 241 | return i, res 242 | return 0, -1 243 | 244 | cdef tuple locate_empty_slot_ino(self, uint64_t ino): 245 | cdef Block block 246 | cdef uint64_t i, bid 247 | cdef int res 248 | 249 | for i in range(522): 250 | if i == 10: 251 | continue 252 | bid = self._inode.bmap(Concat32(ino, i)) 253 | block = self._inode.read(bid) 254 | res = self.locate_empty_slot(block) 255 | if res >= 0: 256 | return i, res 257 | return 0, -1 258 | -------------------------------------------------------------------------------- /inodepack.pxd: -------------------------------------------------------------------------------- 1 | from diskimpl cimport * 2 | from libc.stdint cimport uint64_t 3 | from diskimpl cimport Block 4 | 5 | cdef class InodePackDisk: 6 | cdef object _disk 7 | 8 | cpdef Block read(self, uint64_t ino) 9 | cpdef void set_iattr(self, uint64_t ino, Stat attr, Block block=*) 10 | cpdef Stat get_iattr(self, uint64_t ino, Block block=*) 11 | cpdef set_mapping(self, uint64_t ino, uint64_t off, uint64_t ptr, Block block=*) 12 | cpdef get_mapping(self, uint64_t ino, uint64_t off, Block block=*) 13 | -------------------------------------------------------------------------------- /inodepack.py: -------------------------------------------------------------------------------- 1 | import cython 2 | if not cython.compiled: 3 | from disk import * 4 | from collections import namedtuple 5 | 6 | 7 | # A class that packs multiple inodes together into a single block 8 | 9 | 10 | class InodePackDisk(object): 11 | # Field index for meta-data 12 | SIZE = 0 13 | MTIME = 1 14 | MODE = 2 15 | NLINK = 3 16 | _UNUSED = 4 17 | OFF = 5 18 | 19 | def __init__(self, metadisk, datadisk): 20 | self._disk = metadisk 21 | 22 | def read(self, ino): 23 | return self._disk.read(LShR(ino, 5)) 24 | 25 | @cython.locals(off='uint64_t') 26 | @cython.locals(bid='uint64_t') 27 | def set_iattr(self, ino, attr, block=None): 28 | off = Extract(8, 0, ino * 16) 29 | bid = LShR(ino, 5) # UDiv(ino, 32) 30 | 31 | if block is None: 32 | inode = self._disk.read(bid) 33 | else: 34 | inode = block 35 | 36 | inode[self.SIZE + off] = attr.size 37 | inode[self.MTIME + off] = attr.mtime 38 | inode[self.MODE + off] = attr.mode 39 | inode[self.NLINK + off] = attr.nlink 40 | self._disk.write(bid, inode) 41 | 42 | @cython.locals(off='uint64_t') 43 | @cython.locals(bid='uint64_t') 44 | def get_iattr(self, ino, block=None): 45 | off = Extract(8, 0, ino * 16) 46 | bid = LShR(ino, 5) # UDiv(ino, 32) 47 | 48 | if block is None: 49 | inode = self._disk.read(bid) 50 | else: 51 | inode = block 52 | return Stat( 53 | inode[off + self.SIZE], 54 | inode[off + self.MTIME], 55 | inode[off + self.MODE], 56 | inode[off + self.NLINK]) 57 | 58 | @cython.locals(ioff='uint64_t') 59 | @cython.locals(bid='uint64_t') 60 | def set_mapping(self, ino, off, ptr, block=None): 61 | assertion(ULT(off, 11)) 62 | 63 | ioff = Extract(8, 0, ino * 16) 64 | bid = LShR(ino, 5) # UDiv(ino, 32) 65 | 66 | if block is None: 67 | old = self._disk.read(bid) 68 | else: 69 | old = block 70 | 71 | old[off + ioff + self.OFF] = ptr 72 | self._disk.write(bid, old) 73 | 74 | def get_mapping(self, ino, off, block=None): 75 | if off >= 11: 76 | return 0 77 | return self._get_mapping(ino, off, block) 78 | 79 | @cython.locals(ioff='uint64_t') 80 | @cython.locals(bid='uint64_t') 81 | def _get_mapping(self, ino, off, block=None): 82 | ioff = Extract(8, 0, ino * 16) 83 | bid = LShR(ino, 5) # UDiv(ino, 32) 84 | 85 | if block is None: 86 | block = self._disk.read(bid) 87 | 88 | return block[off + ioff + self.OFF] 89 | 90 | def crash(self, mach): 91 | return self.__class__(self._disk.crash(mach), 92 | self._disk.crash(mach)) 93 | -------------------------------------------------------------------------------- /kvimpl.py: -------------------------------------------------------------------------------- 1 | from disk import * 2 | from yggdrasil.util import * 3 | from yggdrasil.ufarray import * 4 | 5 | 6 | # TODO: implement the FNV-1a hash as the default scheme 7 | # TODO: add support for transactions 8 | class KVImpl(object): 9 | KEY_SIZE = 8 10 | OFF_VALUE_SIZE = KEY_SIZE 11 | OFF_VALUE_DATA = OFF_VALUE_SIZE + 1 12 | VALUE_SIZE_LIMIT = (BlockSize / 64) - OFF_VALUE_DATA 13 | 14 | def __init__(self, disk, hashfn): 15 | self._disk = disk 16 | self._hashfn = hashfn 17 | 18 | # TODO: linear probing for 8 locations if collision 19 | def get(self, key): 20 | addr = self._hashfn(key) 21 | data = self._disk.read(addr) 22 | # check if key matches 23 | eq = And([key[i] == data[i] for i in range(self.KEY_SIZE)]) 24 | size = data[self.OFF_VALUE_SIZE] 25 | # check value size 26 | cond = And(eq, ULE(size, self.VALUE_SIZE_LIMIT)) 27 | end = Extract(BlockOffsetSort.size() - 1, 0, self.OFF_VALUE_DATA + size) 28 | return If(cond, data.getrange(self.OFF_VALUE_DATA, end), EmptyString()) 29 | 30 | # TODO: should check if the addr is usable (unmapped) 31 | # this might get tricky with delete 32 | def put(self, key, value): 33 | addr = self._hashfn(key) 34 | data = ConstBlock(0) 35 | for i in range(self.KEY_SIZE): 36 | data[i] = key[i] 37 | data[self.OFF_VALUE_SIZE] = value.size() 38 | data.setrange(self.OFF_VALUE_DATA, value) 39 | self._disk.write(addr, data) 40 | 41 | # TODO: bumap the key; maybe need to try to move another key 42 | # to the slot first 43 | def delete(self, key): 44 | pass 45 | 46 | def crash(self, mach): 47 | return self.__class__(self._disk.crash(mach)) 48 | -------------------------------------------------------------------------------- /kvspec.py: -------------------------------------------------------------------------------- 1 | from disk import * 2 | 3 | from yggdrasil.util import * 4 | from yggdrasil.ufarray import * 5 | 6 | 7 | # key: 64 bytes (512 bits) 8 | KeySort = BitVecSort(64 * 8) 9 | 10 | def FreshKey(prefix): 11 | return Const(fresh_name('key'), KeySort) 12 | 13 | class KVSpec(object): 14 | def __init__(self, mach, fn_size, fn_data): 15 | self._mach = mach 16 | self._fn_size = fn_size 17 | self._fn_data = fn_data 18 | 19 | def get(self, key): 20 | m = (self._mach.on, BoolVal(True)) 21 | size = substitute(self._fn_size(key), m) 22 | data = lambda off, fn=self._fn_data: substitute(fn(key, off), m) 23 | return String(size, data) 24 | 25 | def put(self, key, value): 26 | on = self._mach.create_on([]) 27 | self._fn_size = lambda x, size=value.size(), oldfn=self._fn_size: If(And(on, x == key), size, oldfn(x)) 28 | self._fn_data = lambda x, off, oldfn=self._fn_data: If(And(on, x == key), value[off], oldfn(x, off)) 29 | 30 | def crash(self, mach): 31 | return self.__class__(mach, self._fn_size, self._fn_data) 32 | -------------------------------------------------------------------------------- /lfs.pxd: -------------------------------------------------------------------------------- 1 | from diskimpl cimport Block, AsyncDisk 2 | from libc.stdint cimport uint64_t 3 | 4 | cdef class LFS: 5 | cdef AsyncDisk _disk 6 | cdef Block _sb 7 | cdef Block _imap 8 | -------------------------------------------------------------------------------- /lfs.py: -------------------------------------------------------------------------------- 1 | import cython 2 | from disk import * 3 | import errno 4 | import time 5 | import z3 6 | 7 | 8 | class LFS(object): 9 | SUPERBLOCK = 0 10 | 11 | SB_OFF_BALLOC = 0 12 | SB_OFF_IALLOC = 1 13 | SB_OFF_IMAP = 2 14 | 15 | I_OFF_MTIME = 0 16 | I_OFF_MODE = 1 17 | I_OFF_DATA = 4 18 | 19 | def __init__(self, disk): 20 | self._disk = disk 21 | 22 | self._sb = None 23 | self._imap = None 24 | 25 | def read(self, ino, block): 26 | self._begin() 27 | bid = self._imap[ino] 28 | r = self._disk.read(bid) 29 | self._commit(False) 30 | return r 31 | 32 | def _begin(self): 33 | assert self._sb is None 34 | assert self._imap is None 35 | 36 | self._sb = self._disk.read(self.SUPERBLOCK) 37 | self._imap = self._disk.read(self._sb[self.SB_OFF_IMAP]) 38 | 39 | def _balloc(self): 40 | a = self._sb[self.SB_OFF_BALLOC] 41 | self._sb[self.SB_OFF_BALLOC] += 1 42 | 43 | # Allocator returned a new block 44 | assertion(0 < (a + 1)) 45 | 46 | return a 47 | 48 | def _ialloc(self): 49 | a = self._sb[self.SB_OFF_IALLOC] 50 | self._sb[self.SB_OFF_IALLOC] += 1 51 | 52 | # we have a free inode.. 53 | assertion(a < 512) 54 | 55 | return a 56 | 57 | def _commit(self, write=True): 58 | assert self._sb is not None 59 | assert self._imap is not None 60 | 61 | if write: 62 | a = self._balloc() 63 | self._disk.write(a, self._imap) 64 | self._disk.flush() 65 | self._sb[self.SB_OFF_IMAP] = a 66 | self._disk.write(self.SUPERBLOCK, self._sb) 67 | self._disk.flush() 68 | 69 | self._sb = None 70 | self._imap = None 71 | 72 | def _set_map(self, ino, bid): 73 | self._imap[Extract(8, 0, ino)] = bid 74 | 75 | def _get_map(self, ino): 76 | return self._imap[Extract(8, 0, ino)] 77 | 78 | ######## 79 | 80 | def dir_lookup(self, blk, name): 81 | res = -errno.ENOENT 82 | for i in range(2): 83 | oname = blk[self.I_OFF_DATA + i * 2] 84 | oino = blk[self.I_OFF_DATA + i * 2 + 1] 85 | 86 | res = If(And(oname == name, 0 < oino), oino, res) 87 | return res 88 | 89 | def dir_find_empty(self, blk): 90 | res = BitVecVal(-errno.ENOSPC, 64) 91 | for i in range(2): 92 | res = If(blk[self.I_OFF_DATA + i * 2] == 0, i, res) 93 | return res 94 | 95 | def get_attr(self, ino): 96 | s = Stat(0, 0, 0) 97 | 98 | self._begin() 99 | 100 | blk = self._get_map(ino) 101 | blk = self._disk.read(blk) 102 | s.bsize = 0 103 | s.mode = blk[self.I_OFF_MODE] 104 | s.mtime = blk[self.I_OFF_MTIME] 105 | 106 | self._commit(False) 107 | 108 | return s 109 | 110 | def lookup(self, parent, name): 111 | self._begin() 112 | 113 | parent_blkno = self._get_map(parent) 114 | parent_blk = self._disk.read(parent_blkno) 115 | 116 | ino = self.dir_lookup(parent_blk, name) 117 | self._commit(False) 118 | return ino 119 | 120 | def exists(self, parent, name): 121 | return 0 < self.lookup(parent, name) 122 | 123 | def mknod(self, parent, name, mode, mtime): 124 | # check if the file exists 125 | if self.exists(parent, name): 126 | assertion(False) 127 | return BitVecVal(-errno.EEXIST, 64) 128 | 129 | self._begin() 130 | 131 | parent_blkno = self._get_map(parent) 132 | parent_blk = self._disk.read(parent_blkno) 133 | 134 | ino = self._ialloc() 135 | blkno = self._balloc() 136 | 137 | eoff = self.dir_find_empty(parent_blk) 138 | 139 | if eoff < 0: 140 | self._commit(False) 141 | return eoff 142 | 143 | # write new inode 144 | inodeblk = ConstBlock(0) 145 | 146 | inodeblk[self.I_OFF_MTIME] = mtime 147 | inodeblk[self.I_OFF_MODE] = mode 148 | self._disk.write(blkno, inodeblk) 149 | 150 | # update parent directory 151 | parent_blk[self.I_OFF_DATA + 2 * Extract(8, 0, eoff)] = name 152 | parent_blk[self.I_OFF_DATA + 2 * Extract(8, 0, eoff) + 1] = ino 153 | 154 | new_parent_blkno = self._balloc() 155 | 156 | self._disk.write(new_parent_blkno, parent_blk) 157 | 158 | # update the imap 159 | self._set_map(ino, blkno) 160 | self._set_map(parent, new_parent_blkno) 161 | 162 | self._commit() 163 | 164 | return ino 165 | 166 | def crash(self, mach): 167 | return self.__class__(self._disk.crash(mach)) 168 | 169 | 170 | def mkfs(disk): 171 | sb = disk._disk.read(0) 172 | if sb[0] == 0: 173 | sb[LFS.SB_OFF_BALLOC] = 3 174 | sb[LFS.SB_OFF_IALLOC] = 2 175 | sb[LFS.SB_OFF_IMAP] = 1 176 | disk._disk.write(0, sb) 177 | 178 | imap = ConstBlock(0) 179 | imap[1] = 2 180 | disk._disk.write(1, imap) 181 | 182 | 183 | def create_lfs(*args): 184 | disk = AsyncDisk('/tmp/foo.img') 185 | lfs = LFS(disk) 186 | mkfs(lfs) 187 | 188 | return lfs 189 | 190 | if __name__ == '__main__': 191 | lfs = create_lfs() 192 | 193 | print lfs.lookup(1, 16) 194 | print lfs.get_attr(4) 195 | print lfs.mknod(1, 20, 2000, 2000) 196 | print lfs.lookup(1, 20) 197 | print lfs.get_attr(4) 198 | -------------------------------------------------------------------------------- /lfs_fuse.pyx: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import traceback 4 | 5 | from lfs import create_lfs 6 | 7 | from llfuse cimport * 8 | from diskimpl cimport Block 9 | 10 | from libc.stdlib cimport malloc, calloc, free, abort, realloc 11 | from libc.string cimport memcpy, memset, strlen, strcmp 12 | from posix.fcntl cimport S_IFREG, S_IFDIR, S_IFLNK 13 | from posix.types cimport ino_t 14 | 15 | from libc.errno cimport ENOENT, ENOTDIR, EISDIR, EACCES, ENOMEM, ENAMETOOLONG 16 | 17 | 18 | inode_obj = None 19 | MAX_NAME_LENGTH = 8 20 | 21 | ################# 22 | 23 | 24 | cdef int mkstat(fuse_ino_t ino, struct_stat *stbuf): 25 | print 'mkstat ino={}'.format(ino) 26 | stat = inode_obj.get_attr(ino) 27 | 28 | if ino == 1: 29 | stbuf.st_mode = 0755 | S_IFDIR 30 | else: 31 | stbuf.st_mode = stat.mode 32 | 33 | stbuf.st_ino = ino 34 | stbuf.st_nlink = 1 35 | stbuf.st_size = 0 36 | stbuf.st_mtime = stat.mtime 37 | 38 | return 0 39 | 40 | 41 | cdef void ll_getattr(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi): 42 | print "getattr ino={}".format(ino) 43 | 44 | cdef struct_stat stbuf 45 | memset(&stbuf, 0, sizeof(stbuf)) 46 | if mkstat(ino, &stbuf) == -1: 47 | fuse_reply_err(req, ENOENT) 48 | else: 49 | fuse_reply_attr(req, &stbuf, 1.0) 50 | 51 | 52 | cdef void ll_lookup(fuse_req_t req, fuse_ino_t parent, const char *name): 53 | print "lookup parent={}, name={}".format(parent, name) 54 | 55 | cdef size_t namelen = strlen(name) 56 | 57 | if namelen > MAX_NAME_LENGTH: 58 | fuse_reply_err(req, ENOENT) 59 | return 60 | 61 | cdef fuse_entry_param e 62 | memset(&e, 0, sizeof(e)) 63 | 64 | cdef uint64_t pname = 0 65 | memcpy(&pname, name, namelen) 66 | 67 | t = inode_obj.lookup(parent, pname) 68 | 69 | if t < 0: 70 | fuse_reply_err(req, ENOENT) 71 | return 72 | e.ino = t 73 | e.attr_timeout = 1.0 74 | e.entry_timeout = 1.0 75 | mkstat(e.ino, &e.attr) 76 | fuse_reply_entry(req, &e) 77 | 78 | 79 | cdef struct dirbuf: 80 | char *p; 81 | size_t size 82 | 83 | 84 | cdef void dirbuf_add(fuse_req_t req, dirbuf *b, const char *name, fuse_ino_t ino): 85 | cdef struct_stat stbuf 86 | cdef size_t oldsize = b.size 87 | b.size += fuse_add_direntry(req, NULL, 0, name, NULL, 0) 88 | cdef char *newp = realloc(b.p, b.size) 89 | if newp == NULL: 90 | print "memory error, could not realloc buffer" 91 | abort() 92 | 93 | b.p = newp 94 | memset(&stbuf, 0, sizeof(stbuf)) 95 | stbuf.st_ino = ino 96 | fuse_add_direntry(req, b.p + oldsize, b.size - oldsize, name, &stbuf, b.size) 97 | 98 | 99 | cdef int reply_buf_limited(fuse_req_t req, const char *buf, size_t bufsize, off_t off, size_t maxsize): 100 | if off < bufsize: 101 | return fuse_reply_buf(req, buf + off, min(bufsize - off, maxsize)) 102 | else: 103 | fuse_reply_buf(req, NULL, 0) 104 | 105 | 106 | cdef void ll_readdir(fuse_req_t req, fuse_ino_t ino, 107 | size_t size, off_t off, fuse_file_info* fi): 108 | print "readdir ino={} size={} off={}".format(ino, size, off) 109 | 110 | cdef dirbuf b 111 | cdef Block block 112 | cdef uint64_t i 113 | cdef uint64_t name = 0 114 | 115 | if ino != 1: 116 | fuse_reply_err(req, ENOTDIR) 117 | else: 118 | memset(&b, 0, sizeof(b)) 119 | dirbuf_add(req, &b, ".", ino) 120 | dirbuf_add(req, &b, "..", ino) 121 | 122 | block = inode_obj.read(ino, 0) 123 | 124 | for i in range(32): 125 | fileino = block[4 + i * 2 + 1] 126 | if fileino != 0: 127 | name = block[4 + i * 2] 128 | dirbuf_add(req, &b, &name, fileino) 129 | 130 | if b.size > size + off: 131 | break 132 | 133 | reply_buf_limited(req, b.p, b.size, off, size) 134 | free(b.p) 135 | 136 | cdef mknod(fuse_ino_t parent, const char *name, mode_t mode, dev_t rdev): 137 | cdef size_t namelen = strlen(name) 138 | 139 | if namelen > MAX_NAME_LENGTH: 140 | return -ENAMETOOLONG 141 | 142 | cdef uint64_t pname = 0 143 | memcpy(&pname, name, namelen) 144 | 145 | return inode_obj.mknod(parent, pname, mode, int(time.time())) 146 | 147 | 148 | cdef void ll_mknod(fuse_req_t req, fuse_ino_t parent, const char *name, 149 | mode_t mode, dev_t rdev): 150 | print 'mknod parent={}, name={}, mode={}, rdev={}'.format(parent, name, mode, rdev) 151 | 152 | ino = mknod(parent, name, mode, rdev) 153 | 154 | if ino < 0: 155 | fuse_reply_err(req, -1 * ino) 156 | return 157 | 158 | cdef fuse_entry_param e 159 | memset(&e, 0, sizeof(e)) 160 | e.ino = ino 161 | e.attr_timeout = 1.0 162 | e.entry_timeout = 1.0 163 | mkstat(e.ino, &e.attr) 164 | fuse_reply_entry(req, &e) 165 | 166 | 167 | 168 | def main(): 169 | global inode_obj 170 | 171 | if '--' in sys.argv: 172 | fargs = sys.argv[sys.argv.index('--') + 1:] 173 | sys.argv = sys.argv[:sys.argv.index('--')] 174 | else: 175 | fargs = [] 176 | 177 | inode_obj = create_lfs(fargs) 178 | 179 | cdef int argc = len(sys.argv) 180 | cdef char** argv = malloc(argc * sizeof(char**)) 181 | if argv == NULL: 182 | print "Malloc: Memory error. Could not allocate" 183 | abort() 184 | 185 | for i in range(argc): 186 | argv[i] = sys.argv[i] 187 | 188 | cmain(argc, argv) 189 | 190 | 191 | cdef int cmain(int argc, char **argv): 192 | cdef fuse_args args 193 | args.argc = argc 194 | args.argv = argv 195 | args.allocated = 0 196 | 197 | cdef fuse_chan *ch 198 | cdef char* mountpoint = NULL 199 | cdef int err = -1 200 | 201 | cdef fuse_session *se 202 | 203 | cdef fuse_lowlevel_ops ops 204 | memset(&ops, 0, sizeof(ops)) 205 | 206 | ops.getattr = &ll_getattr 207 | ops.lookup = &ll_lookup 208 | ops.readdir = &ll_readdir 209 | ops.mknod = &ll_mknod 210 | 211 | if fuse_parse_cmdline(&args, &mountpoint, NULL, NULL) != -1: 212 | ch = fuse_mount(mountpoint, &args) 213 | if ch != NULL: 214 | se = fuse_lowlevel_new(&args, &ops, sizeof(ops), NULL) 215 | 216 | if se != NULL: 217 | if fuse_set_signal_handlers(se) != -1: 218 | fuse_session_add_chan(se, ch) 219 | err = fuse_session_loop(se) 220 | fuse_remove_signal_handlers(se) 221 | fuse_session_remove_chan(ch) 222 | fuse_session_destroy(se) 223 | fuse_unmount(mountpoint, ch) 224 | 225 | fuse_opt_free_args(&args) 226 | 227 | return err 228 | -------------------------------------------------------------------------------- /lfs_fuse_main.py: -------------------------------------------------------------------------------- 1 | import lfs_fuse 2 | 3 | 4 | def run(): 5 | lfs_fuse.main() 6 | 7 | 8 | if __name__ == '__main__': 9 | run() 10 | 11 | -------------------------------------------------------------------------------- /logspec.py: -------------------------------------------------------------------------------- 1 | from disk import * 2 | from yggdrasil.util import * 3 | 4 | class LogSpecEntry(object): 5 | def __init__(self, data, size): 6 | self._data = data 7 | self._size = size 8 | 9 | def __getitem__(self, off): 10 | return self._data(off) 11 | 12 | @property 13 | def size(self): 14 | return self._size 15 | 16 | 17 | class LogSpec(object): 18 | def __init__(self, mach, datafn, sizefn, nextfn, tail): 19 | self._mach = mach 20 | self._datafn = datafn 21 | self._sizefn = sizefn 22 | self._nextfn = nextfn 23 | self._tail = tail 24 | 25 | def append(self, data, size): 26 | on = self._mach.create_on([]) 27 | tail = self._tail 28 | self._datafn = lambda x, off, oldfn=self._datafn: If(And(on, x == tail), data(off), oldfn(x, off)) 29 | self._sizefn = lambda x, oldfn=self._sizefn: If(And(on, x == tail), size, oldfn(x)) 30 | newtail = Const(fresh_name("tail"), SizeSort) 31 | self._mach.add_control(newtail) 32 | assertion(ULT(tail, newtail)) 33 | self._nextfn = lambda x, oldfn=self._nextfn: If(And(on, x == tail), newtail, oldfn(x)) 34 | self._tail = If(on, newtail, tail) 35 | 36 | def __getitem__(self, k): 37 | m = [(c, BoolVal(True)) for c in self._mach.control] 38 | data = lambda off, datafn=self._datafn: substitute(datafn(k, off), *m) 39 | size = substitute(self._sizefn(k), *m) 40 | return LogSpecEntry(data, size) 41 | 42 | @property 43 | def tail(self): 44 | m = [(c, BoolVal(True)) for c in self._mach.control] 45 | return substitute(self._tail, *m) 46 | 47 | def crash(self, mach): 48 | return self.__class__(mach, self._datafn, self._sizefn, self._nextfn, self._tail) 49 | -------------------------------------------------------------------------------- /partition.py: -------------------------------------------------------------------------------- 1 | from disk import * 2 | import disk 3 | 4 | 5 | class Partition(object): 6 | def __init__(self, disk, start, end): 7 | self._start = start 8 | self._end = end 9 | self._disk = disk 10 | 11 | def valid(self, bid): 12 | return ULT(bid, self._end - self._start) 13 | 14 | def read(self, bid): 15 | return self._disk.read(bid + self._start) 16 | 17 | def write(self, bid, block): 18 | disk.assertion(self.valid(bid)) 19 | self._disk.write(bid + self._start, block) 20 | 21 | def flush(self): 22 | self._disk.flush() 23 | 24 | def crash(self, mach): 25 | return self.__class__(self._disk.crash(mach), self._start, self._end) 26 | -------------------------------------------------------------------------------- /symbolicmap.py: -------------------------------------------------------------------------------- 1 | from yggdrasil.util import BoolVal, If, And, Or 2 | 3 | 4 | # A symbolic map that can be 'extracted' to a dict for execution 5 | 6 | 7 | class SymbolicMap(object): 8 | def __init__(self): 9 | self._map = [] 10 | 11 | def get(self, gkey, dresult): 12 | for key, value in self._map: 13 | try: 14 | cond = And(*[a == b for a, b in zip(gkey, key)]) 15 | except: 16 | cond = gkey == key 17 | 18 | dresult = If(cond, value, dresult) 19 | return dresult 20 | 21 | def has_key(self, gkey): 22 | res = BoolVal(False) 23 | for key, _ in self._map: 24 | try: 25 | cond = And(*[a == b for a, b in zip(gkey, key)]) 26 | except: 27 | cond = gkey == key 28 | 29 | res = Or(cond, res) 30 | 31 | return res 32 | 33 | def __setitem__(self, key, value): 34 | self._map.append((key, value)) 35 | -------------------------------------------------------------------------------- /test_bitmap.py: -------------------------------------------------------------------------------- 1 | from z3 import * 2 | from bitmap import * 3 | import disk 4 | 5 | from yggdrasil.util import * 6 | from yggdrasil.diskspec import * 7 | from yggdrasil import test 8 | 9 | 10 | class TestBitmapDisk(test.RefinementTest): 11 | def create_impl(self, mach): 12 | array = FreshDiskArray('impl', 13 | domain=BitVecSort(SizeSort.size() - LogBlockSize)) 14 | return BitmapDisk(AsyncDisk(mach, array)) 15 | 16 | def create_spec(self, mach): 17 | array = FreshDiskArray('spec') 18 | return BitmapSpec(AsyncDisk(mach, array)) 19 | 20 | def equivalence(self, spec, impl, *args, **kwargs): 21 | i = BitVec(fresh_name('i'), SizeSort.size()) 22 | return ForAll([i], spec.is_set(i) == impl.is_set(i)) 23 | 24 | @test.z3_option(AUTO_CONFIG=True) 25 | def match_set_bit(self): 26 | bid = BitVec(fresh_name('bid'), SizeSort.size()) 27 | yield (bid,) 28 | 29 | @test.z3_option(AUTO_CONFIG=True) 30 | def match_unset_bit(self): 31 | bid = BitVec(fresh_name('bid'), SizeSort.size()) 32 | yield (bid,) 33 | 34 | 35 | if __name__ == '__main__': 36 | test.main() 37 | -------------------------------------------------------------------------------- /test_cp.py: -------------------------------------------------------------------------------- 1 | from disk import * 2 | 3 | from yggdrasil.util import * 4 | from yggdrasil.diskspec import * 5 | from yggdrasil import test 6 | 7 | 8 | class CPSpec(object): 9 | def __init__(self, ds): 10 | self._ds = ds 11 | 12 | def cp(self, sparent, sname, dparent, dname): 13 | # sparent/sname exists 14 | assertion(self._ds._direxists(sparent, sname)) 15 | 16 | # dparent/dname does not exist 17 | assertion(Not(self._ds._direxists(dparent, dname))) 18 | 19 | # source inode 20 | sino = self._ds._dirfn(sparent, sname) 21 | 22 | assertion(self._ds.is_regular(sino)) 23 | 24 | # destination inode 25 | dino = self._ds.ialloc(True) 26 | 27 | debug('sino', sino) 28 | debug('sino valid?', self._ds.is_valid(sino)) 29 | debug('sino-mode', self._ds._attrs.mode(sino)) 30 | 31 | # Set attributes for new inode 32 | self._ds._attrs.set_bsize(dino, 0) 33 | self._ds._attrs.set_mtime(dino, 0) #self._ds._attrs.mtime(sino)) 34 | self._ds._attrs.set_mode(dino, 0) #self._ds._attrs.mode(sino)) 35 | # 36 | # # associate parent x name -> ino 37 | self._ds._dirfn = self._ds._dirfn.update((dparent, dname), dino) 38 | self._ds._direxists = self._ds._direxists.update((dparent, dname), BoolVal(True)) 39 | 40 | # self.write(dino, 0, 0, self._datafn(sino, 0, 0)) 41 | 42 | return dino 43 | 44 | 45 | class CPImpl(object): 46 | def __init__(self, di): 47 | self._di = di 48 | 49 | def cp(self, sparent, sname, dparent, dname): 50 | # sparent/sname exists 51 | assertion(self._di._direxists(sparent, sname)) 52 | 53 | # dparent/dname does not exist 54 | assertion(Not(self._di._direxists(dparent, dname))) 55 | 56 | sino = self._di.lookup(sparent, sname) 57 | if sino == 0: 58 | return sino 59 | 60 | assertion(self._di.is_regular(sino)) 61 | 62 | sattr = self._di.get_iattr(sino) 63 | 64 | dino = self._di.mknod(dparent, dname, 0, 0) #, sattr.mode, sattr.mtime) 65 | if dino < 0: 66 | return dino 67 | 68 | fsize = self._di.get_iattr(sino).bsize 69 | if fsize == 0: 70 | return dino 71 | else: 72 | assertion(False) 73 | return dino 74 | # dat = self.read(sino, 0, 0) 75 | # 76 | # b = ConstBlock(0) 77 | # b[0] = dat 78 | 79 | # self.write(dino, 0, 0, b) 80 | 81 | 82 | class CPRefinement(test.RefinementTest): 83 | def _create_dir(self, mach): 84 | dirfn = FreshUFunction('dirfn', InoSort, NameSort, InoSort) 85 | direxists = FreshUFunction('direxists', InoSort, NameSort, BoolSort()) 86 | datafn = FreshUFunction('datafn', InoSort, BitVecSort(32), BlockOffsetSort, SizeSort) 87 | ifree = FreshUFunction('ifreefn', InoSort, BoolSort()) 88 | attrs = FreshAttributes() 89 | return DirSpec(mach, dirfn, direxists, datafn, ifree, attrs) 90 | 91 | def create_spec(self, mach): 92 | dspec = self._create_dir(mach) 93 | return dspec, CPSpec(dspec) 94 | 95 | def create_impl(self, mach): 96 | dspec = self._create_dir(mach) 97 | return dspec, CPImpl(dspec) 98 | 99 | def test_cp(self): 100 | spec_mach = Machine() 101 | spec, cp_spec = self.create_spec(spec_mach) 102 | 103 | impl_mach = Machine() 104 | impl, cp_impl = self.create_impl(impl_mach) 105 | 106 | ## 107 | 108 | ino = FreshIno('ino-pre') 109 | 110 | blocknum = FreshBitVec('blocknum-pre', 32) 111 | off = FreshBitVec('off-pre', BlockOffsetSort.size()) 112 | 113 | pre = ForAll([ino], And( 114 | spec.is_valid(ino) == impl.is_valid(ino), 115 | 116 | # General inode properties 117 | Implies(spec.is_valid(ino), And( 118 | spec.get_iattr(ino) == impl.get_iattr(ino))), 119 | 120 | # Non-directory properties 121 | Implies(Not(spec.is_dir(ino)), And( 122 | spec.read(ino, blocknum, off) == impl.read(ino, blocknum, off), 123 | )), 124 | 125 | # Directory properties 126 | Implies(spec.is_dir(ino), And( 127 | # spec.lookup(ino, name) == impl.lookup(ino, name))), 128 | )) 129 | )) 130 | 131 | self.show(And(*assertion.assertions), pre) 132 | 133 | ######################## 134 | 135 | sparent0 = FreshIno('sparent0') 136 | sname0 = FreshName('sname0') 137 | 138 | dparent0 = FreshIno('dparent0') 139 | dname0 = FreshName('dname0') 140 | 141 | sino = cp_spec.cp(sparent0, sname0, dparent0, dname0) 142 | iino = cp_impl.cp(sparent0, sname0, dparent0, dname0) 143 | 144 | assertion(sino == iino) 145 | 146 | ######################### 147 | 148 | ino = FreshIno('ino-post') 149 | 150 | post = ForAll([ino], And( 151 | spec.is_valid(ino) == impl.is_valid(ino), 152 | 153 | # General inode properties 154 | Implies(spec.is_valid(ino), And( 155 | spec.get_iattr(ino) == impl.get_iattr(ino))), 156 | 157 | Implies(Not(spec.is_dir(ino)), And( 158 | spec.read(ino, blocknum, off) == impl.read(ino, blocknum, off), 159 | )), 160 | 161 | # Directory properties 162 | Implies(spec.is_dir(ino), And( 163 | # spec.lookup(ino, name) == impl.lookup(ino, name) 164 | )) 165 | )) 166 | 167 | self.psolve(Not(Implies(pre, post))) 168 | 169 | 170 | if __name__ == '__main__': 171 | test.main() 172 | -------------------------------------------------------------------------------- /test_dirspec.py: -------------------------------------------------------------------------------- 1 | from dirinode import DirImpl 2 | from dirspec import * 3 | 4 | from yggdrasil import test 5 | 6 | 7 | class DirRefinementTest(test.RefinementTest): 8 | def create_spec(self, mach): 9 | dirfn = FreshUFunction('dirfn', InoSort, NameSort, InoSort) 10 | direxists = FreshUFunction('direxists', InoSort, NameSort, BoolSort()) 11 | datafn = FreshUFunction('datafn', InoSort, BitVecSort(32), BlockOffsetSort, SizeSort) 12 | ifree = FreshUFunction('ifreefn', InoSort, BoolSort()) 13 | attrs = FreshAttributes() 14 | 15 | return DirSpec(mach, dirfn, direxists, datafn, ifree, attrs) 16 | 17 | def create_impl(self, mach): 18 | mappedarray = FreshDiskArray('mappedarray') 19 | attr_array = FreshDiskArray('attrs', domain=BitVecSort(32)) 20 | diskarray = FreshDiskArray('diskarray') 21 | 22 | ifreearray = FreshDiskArray('ifreearray', domain=BitVecSort(32)) 23 | orphans = FreshDiskArray('orphans') 24 | 25 | inode = InodeSpec(mach, [mappedarray, attr_array, diskarray, None, ifreearray, orphans]) 26 | return DirImpl(inode, inode, Allocator32, BitmapSpec, DirLook) 27 | 28 | def equivalence(self, spec, impl, **kwargs): 29 | ino = FreshIno('ino-pre') 30 | blocknum = FreshBitVec('blocknum-pre', 32) 31 | off = FreshBitVec('off-pre', BlockOffsetSort.size()) 32 | mapped_block = FreshBitVec('mapped-block-pre', 32) 33 | 34 | return ForAll([ino, mapped_block, blocknum, off], And( 35 | # The bsize of an inode should not overflow when multiplied by 4k 36 | ULT(spec._attrs.bsize(ino), 1048576), 37 | 38 | ULE(spec._attrs.fsize(ino), spec._attrs.bsize(ino) * 4096), 39 | 40 | Implies(ULT(0, spec._attrs.bsize(ino)), 41 | ULT(spec._attrs.bsize(ino) * 4096 - 4096, spec._attrs.fsize(ino))), 42 | 43 | impl.is_valid(ino) == spec.is_valid(ino), 44 | 45 | # GCable 46 | Implies(impl.is_gcable(ino), 47 | And( 48 | spec._ifreefn(ino), 49 | spec._attrs.bsize(ino) == 0, 50 | spec._attrs.nlink(ino) == 0, 51 | spec.read(ino, blocknum, off) == 0)), 52 | 53 | # General inode properties 54 | Implies(spec.is_valid(ino), And( 55 | spec.get_iattr(ino) == impl.get_iattr(ino))), 56 | 57 | Implies( 58 | impl.is_ifree(ino), 59 | Not(impl._inode.is_mapped(Concat(ino, mapped_block)))), 60 | 61 | Implies(ULE(impl.get_iattr(ino).bsize, mapped_block), 62 | Not(impl._inode.is_mapped(Concat(ino, mapped_block)))), 63 | 64 | # Non-directory properties 65 | Implies(Not(spec.is_dir(ino)), And( 66 | Implies(Not(impl.is_gcable(ino)), 67 | spec.read(ino, blocknum, off) == impl.read(ino, blocknum)[off]), 68 | )), 69 | )) 70 | 71 | def call_mknod(self, spec, impl, args): 72 | i1 = spec.mknod(*args) 73 | i2, err2 = impl.mknod(*args) 74 | assertion(i1 != 0) 75 | assertion(i1 == i2) 76 | 77 | @test.z3_option(AUTO_CONFIG=False) 78 | def match_mknod(self): 79 | parent0 = FreshIno('parent') 80 | name0 = [FreshName('name-pre') for _ in range(16)] 81 | 82 | mode = FreshSize('mode') 83 | mtime = FreshSize('mtime') 84 | 85 | yield (parent0, name0, mode, mtime) 86 | 87 | # @test.z3_option(AUTO_CONFIG=True) 88 | # def match_write(self): 89 | # ino0 = FreshIno('ino0-write') 90 | # blocknum0 = BitVec('blocknum0-write', 32) 91 | # v0 = FreshBlock('v0-write') 92 | # 93 | # yield (ino0, blocknum0, v0) 94 | 95 | @test.z3_option(AUTO_CONFIG=False) 96 | def match_rename(self): 97 | oparent0 = FreshIno('oparent0-write') 98 | nparent0 = FreshIno('nparent0-write') 99 | 100 | oname0 = [FreshName('oname0-write') for _ in range(16)] 101 | nname0 = [FreshName('nname0-write') for _ in range(16)] 102 | 103 | yield (oparent0, oname0, nparent0, nname0) 104 | 105 | def call_unlink(self, spec, impl, args): 106 | i1 = spec.unlink(*args) 107 | i2 = impl.unlink(*args) 108 | assertion(i1 != 0) 109 | assertion(i1 == i2) 110 | 111 | @test.z3_option(AUTO_CONFIG=False) 112 | def match_unlink(self): 113 | parent0 = FreshIno('parent0') 114 | name0 = [FreshName('name0') for _ in range(16)] 115 | 116 | yield (parent0, name0) 117 | 118 | def call_rmdir(self, spec, impl, args): 119 | i1, err1 = spec.rmdir(*args) 120 | i2, err2 = impl.rmdir(*args) 121 | assertion(err1 == err2) 122 | assertion(Implies(err1 == 0, i1 != 0)) 123 | assertion(i1 == i2) 124 | 125 | @test.z3_option(AUTO_CONFIG=False) 126 | def match_rmdir(self): 127 | parent0 = FreshIno('parent0') 128 | name0 = [FreshName('name0') for _ in range(16)] 129 | 130 | yield (parent0, name0) 131 | 132 | @test.z3_option(AUTO_CONFIG=False) 133 | def match_forget(self): 134 | ino0 = FreshIno('ino0') 135 | yield (ino0,) 136 | 137 | @test.z3_option(AUTO_CONFIG=False) 138 | def nop_gc1(self): 139 | orph_index0 = FreshIno('orph_index0') 140 | block_off0 = FreshIno('block_off') 141 | yield(orph_index0, block_off0) 142 | 143 | @test.z3_option(AUTO_CONFIG=False) 144 | def nop_gc2(self): 145 | orph_index0 = FreshIno('orph_index0') 146 | yield(orph_index0,) 147 | 148 | @test.z3_option(AUTO_CONFIG=False) 149 | def nop_gc3(self): 150 | yield () 151 | 152 | 153 | class DirRefinement(test.RefinementTest): 154 | def create_spec(self, mach): 155 | dirfn = FreshUFunction('dirfn', InoSort, NameSort, InoSort) 156 | direxists = FreshUFunction('direxists', InoSort, NameSort, BoolSort()) 157 | datafn = FreshUFunction('datafn', InoSort, BitVecSort(32), BlockOffsetSort, SizeSort) 158 | ifree = FreshUFunction('ifreefn', InoSort, BoolSort()) 159 | attrs = FreshAttributes() 160 | 161 | return DirSpec(mach, dirfn, direxists, datafn, ifree, attrs) 162 | 163 | def create_impl(self, mach): 164 | mappedarray = FreshDiskArray('mappedarray') 165 | attr_array = FreshDiskArray('attrs', domain=BitVecSort(32)) 166 | diskarray = FreshDiskArray('diskarray') 167 | 168 | ifreearray = FreshDiskArray('ifreearray', domain=BitVecSort(32)) 169 | orphans = FreshDiskArray('orphans') 170 | 171 | inode = InodeSpec(mach, [mappedarray, attr_array, diskarray, None, ifreearray, orphans]) 172 | return DirImpl(inode, inode, Allocator32, BitmapSpec, DirLook) 173 | 174 | def test_match_write(self): 175 | spec_mach = Machine() 176 | spec = self.create_spec(spec_mach) 177 | 178 | impl_mach = Machine() 179 | impl = self.create_impl(impl_mach) 180 | 181 | ino = FreshIno('ino-pre') 182 | 183 | blocknum = FreshBitVec('blocknum-pre', 32) 184 | off = FreshBitVec('off-pre', BlockOffsetSort.size()) 185 | 186 | mapped_block = FreshBitVec('mapped-block-pre', 32) 187 | 188 | pre = ForAll([ino], And( 189 | # The bsize of an inode should not overflow when multiplied by 4k 190 | ULT(spec._attrs.bsize(ino), 1048576), 191 | 192 | ULE(spec._attrs.fsize(ino), spec._attrs.bsize(ino) * 4096), 193 | 194 | Implies(ULT(0, spec._attrs.bsize(ino)), 195 | ULT(spec._attrs.bsize(ino) * 4096 - 4096, spec._attrs.fsize(ino))), 196 | 197 | spec.is_valid(ino) == impl.is_valid(ino), 198 | 199 | # GCable 200 | Implies(impl.is_gcable(ino), 201 | And( 202 | spec._ifreefn(ino), 203 | spec._attrs.bsize(ino) == 0, 204 | spec._attrs.nlink(ino) == 0, 205 | spec.read(ino, blocknum, off) == 0)), 206 | 207 | # General inode properties 208 | Implies(spec.is_valid(ino), And( 209 | spec.get_iattr(ino) == impl.get_iattr(ino))), 210 | 211 | ForAll([mapped_block], Implies( 212 | impl.is_ifree(ino), 213 | Not(impl._inode.is_mapped(Concat(ino, mapped_block))))), 214 | 215 | # Non-directory properties 216 | Implies(Not(spec.is_dir(ino)), And( 217 | Implies(Not(impl.is_gcable(ino)), spec.read(ino, blocknum, off) == impl.read(ino, blocknum)[off]), 218 | 219 | ForAll([mapped_block], 220 | Implies(ULE(impl.get_iattr(ino).bsize, mapped_block), 221 | Not(impl._inode.is_mapped(Concat(ino, mapped_block))))), 222 | )), 223 | 224 | # Directory properties 225 | Implies(spec.is_dir(ino), And( 226 | # spec.lookup(ino, name) == impl.lookup(ino, name))), 227 | )) 228 | )) 229 | 230 | # self.show(And(*assertion.assertions), pre, AUTO_CONFIG=False) 231 | 232 | ######################### 233 | 234 | ino0 = FreshIno('ino0-write') 235 | blocknum0 = BitVec('blocknum0-write', 32) 236 | size0 = BitVec('size0-write', 32) 237 | v0 = FreshBlock('v0-write') 238 | 239 | spec.write(ino0, blocknum0, v0, size0) 240 | impl.write(ino0, blocknum0, v0, size0) 241 | 242 | ################ 243 | 244 | spec = spec.crash(Machine()) 245 | impl = impl.crash(Machine()) 246 | 247 | ############### 248 | 249 | # ino = FreshIno('ino-post') 250 | 251 | post = ForAll([ino], And( 252 | # The bsize of an inode should not overflow when multiplied by 4k 253 | ULT(spec._attrs.bsize(ino), 1048576), 254 | 255 | ULE(spec._attrs.fsize(ino), spec._attrs.bsize(ino) * 4096), 256 | 257 | Implies(ULT(0, spec._attrs.bsize(ino)), 258 | ULT(spec._attrs.bsize(ino) * 4096 - 4096, spec._attrs.fsize(ino))), 259 | 260 | spec.is_valid(ino) == impl.is_valid(ino), 261 | 262 | # GCable 263 | Implies(impl.is_gcable(ino), 264 | And( 265 | spec._ifreefn(ino), 266 | spec._attrs.bsize(ino) == 0, 267 | spec._attrs.nlink(ino) == 0, 268 | spec.read(ino, blocknum, off) == 0)), 269 | 270 | # General inode properties 271 | Implies(spec.is_valid(ino), And( 272 | spec.get_iattr(ino) == impl.get_iattr(ino))), 273 | 274 | ForAll([mapped_block], Implies( 275 | impl.is_ifree(ino), 276 | Not(impl._inode.is_mapped(Concat(ino, mapped_block))))), 277 | 278 | # Non-directory properties 279 | Implies(Not(spec.is_dir(ino)), And( 280 | Implies(Not(impl.is_gcable(ino)), spec.read(ino, blocknum, off) == impl.read(ino, blocknum)[off]), 281 | 282 | ForAll([mapped_block], 283 | Implies(ULE(impl.get_iattr(ino).bsize, mapped_block), 284 | Not(impl._inode.is_mapped(Concat(ino, mapped_block))))), 285 | )), 286 | )) 287 | 288 | opt = { 289 | 'AUTO_CONFIG': False, 290 | } 291 | 292 | # self.psolve(Not(Implies(pre, post)), **opt) 293 | 294 | assumption = And(spec_mach.assumption, impl_mach.assumption, *assertion.assertions) 295 | 296 | # print z3.simplify(assumption) 297 | 298 | model = self._solve(assumption, 299 | ForAll(spec_mach.control, Not(Implies(pre, post))), **opt) 300 | 301 | if model: 302 | print 'Spec read', model.eval(spec.read(ino, blocknum, off)) 303 | print 'Impl read', model.eval(impl.read(ino, blocknum)[off]) 304 | 305 | self.assertIsNone(model) 306 | 307 | 308 | if __name__ == '__main__': 309 | test.main() 310 | -------------------------------------------------------------------------------- /test_diskspec.py: -------------------------------------------------------------------------------- 1 | from z3 import * 2 | import disk 3 | 4 | import unittest 5 | 6 | from yggdrasil.util import * 7 | from yggdrasil.diskspec import * 8 | from yggdrasil import test 9 | 10 | 11 | class AsyncDiskTest(test.DiskTest): 12 | def setUp(self): 13 | self._array = FreshDiskArray('array') 14 | 15 | def test_prefix(self): 16 | mach = Machine() 17 | d = AsyncDisk(mach, self._array) 18 | 19 | a = FreshBlock('block') 20 | b = FreshBlock('block') 21 | c = FreshBlock('block') 22 | 23 | d.write(5, a) 24 | d.write(6, b) 25 | d.write(7, c) 26 | 27 | d = d.crash(Machine()) 28 | 29 | self.show(Implies(mach.assumption, 30 | And(d.read(5) == a, d.read(6) != b), d.read(7) != c)) 31 | 32 | def test_reorder(self): 33 | mach = Machine() 34 | d = AsyncDisk(mach, self._array) 35 | 36 | a = FreshBlock('block') 37 | b = FreshBlock('block') 38 | c = FreshBlock('block') 39 | 40 | d.write(5, a) 41 | d.write(6, b) 42 | d.write(7, c) 43 | 44 | d = d.crash(Machine()) 45 | 46 | self.show(Implies(mach.assumption, 47 | And(d.read(6) == a, d.read(5) != b))) 48 | 49 | def test_nocrash(self): 50 | mach = Machine() 51 | d = AsyncDisk(mach, self._array) 52 | 53 | a = FreshBlock('block') 54 | b = FreshBlock('block') 55 | c = FreshBlock('block') 56 | 57 | d.write(5, a) 58 | d.write(6, b) 59 | d.write(7, c) 60 | 61 | d = d.crash(Machine()) 62 | 63 | self.show(Implies(mach.assumption, 64 | And(d.read(5) == a, d.read(6) == b, d.read(7) == c))) 65 | 66 | 67 | def test_write_two(self): 68 | mach = Machine() 69 | d = AsyncDisk(mach, self._array) 70 | i0 = FreshSize('i') 71 | i1 = FreshSize('i') 72 | x0 = FreshBlock('x') 73 | x1 = FreshBlock('x') 74 | assumption = i0 != i1 75 | 76 | oldv0 = d.read(i0) 77 | oldv1 = d.read(i1) 78 | 79 | d.write(i0, x0) 80 | d.write(i1, x1) 81 | 82 | newv0 = d.read(i0) 83 | newv1 = d.read(i1) 84 | self.prove(Implies(assumption, newv0 == x0)) 85 | self.prove(Implies(assumption, newv1 == x1)) 86 | 87 | d = d.crash(Machine()) 88 | anyv0 = d.read(i0) 89 | anyv1 = d.read(i1) 90 | 91 | self.prove(Implies(assumption, And( 92 | Or(anyv0 == oldv0, anyv0 == x0), 93 | Or(anyv1 == oldv1, anyv1 == x1)))) 94 | 95 | def test_flush(self): 96 | mach = Machine() 97 | d = AsyncDisk(mach, self._array) 98 | i0 = FreshSize('i') 99 | i1 = FreshSize('i') 100 | x0 = FreshBlock('x') 101 | x1 = FreshBlock('x') 102 | 103 | oldv0 = d.read(i0) 104 | oldv1 = d.read(i1) 105 | 106 | d.write(i0, x0) 107 | d.flush() 108 | d.write(i1, x1) 109 | 110 | d = d.crash(Machine()) 111 | anyv0 = d.read(i0) 112 | anyv1 = d.read(i1) 113 | 114 | assumption = And(i0 != i1, x1 != oldv1, mach.assumption) 115 | self.prove(Implies(assumption, Implies(anyv1 == x1, anyv0 == x0))) 116 | 117 | 118 | # implement SyncDisk on top of AsyncDisk 119 | class FlushingDisk: 120 | def __init__(self, disk): 121 | self._disk = disk 122 | 123 | def write(self, bid, data): 124 | self._disk.write(bid, data) 125 | self._disk.flush() 126 | 127 | def flush(self): 128 | pass 129 | 130 | def read(self, bid): 131 | return self._disk.read(bid) 132 | 133 | def crash(self, mach): 134 | return self.__class__(self._disk.crash(mach)) 135 | 136 | 137 | class SyncDiskTest(test.DiskTest): 138 | # SyncDisk is an AsyncDisk 139 | def test_syncdisk_is_async(self): 140 | array = DiskSort(fresh_name('array')) 141 | bid = FreshSize('bid') 142 | data = FreshBlock('data') 143 | 144 | spec_mach = Machine() 145 | impl_mach = Machine() 146 | 147 | ad = AsyncDisk(spec_mach, array) 148 | ad.write(bid, data) 149 | 150 | sd = SyncDisk(impl_mach, array) 151 | sd.write(bid, data) 152 | 153 | self.prove(ForAll([array] + impl_mach.control, 154 | Exists(spec_mach.control, 155 | ForAll([bid], sd.read(bid) == ad.read(bid))))) 156 | 157 | # AsyncDisk+flush is a SyncDisk 158 | def test_flushingdisk_is_sync(self): 159 | array = DiskSort(fresh_name('array')) 160 | bid = FreshSize('bid') 161 | data = FreshBlock('data') 162 | 163 | spec_mach = Machine() 164 | impl_mach = Machine() 165 | 166 | sd = SyncDisk(spec_mach, array) 167 | sd.write(bid, data) 168 | 169 | fd = FlushingDisk(AsyncDisk(impl_mach, array)) 170 | fd.write(bid, data) 171 | 172 | self.prove(ForAll([array] + impl_mach.control, 173 | Exists(spec_mach.control, 174 | ForAll([bid], sd.read(bid) == fd.read(bid))))) 175 | 176 | 177 | # implement SyncDisk on top of AsyncDisk 178 | class FlushingDisk: 179 | def __init__(self, disk): 180 | self._disk = disk 181 | 182 | def write(self, bid, data): 183 | self._disk.write(bid, data) 184 | self._disk.flush() 185 | 186 | def flush(self): 187 | pass 188 | 189 | def read(self, bid): 190 | return self._disk.read(bid) 191 | 192 | def crash(self, mach): 193 | return self.__class__(self._disk.crash(mach)) 194 | 195 | 196 | class SyncDiskTest(test.DiskTest): 197 | # SyncDisk is an AsyncDisk 198 | def test_syncdisk_is_async(self): 199 | array = FreshDiskArray('array') 200 | bid = FreshSize('bid') 201 | data = FreshBlock('data') 202 | 203 | spec_mach = Machine() 204 | impl_mach = Machine() 205 | 206 | ad = AsyncDisk(spec_mach, array) 207 | ad.write(bid, data) 208 | 209 | sd = SyncDisk(impl_mach, array) 210 | sd.write(bid, data) 211 | 212 | self.solve(ForAll(spec_mach.control, Not( 213 | ForAll([bid], sd.read(bid) == ad.read(bid))))) 214 | 215 | # AsyncDisk+flush is a SyncDisk 216 | def test_flushingdisk_is_sync(self): 217 | array = FreshDiskArray('array') 218 | bid = FreshSize('bid') 219 | data = FreshBlock('data') 220 | 221 | spec_mach = Machine() 222 | impl_mach = Machine() 223 | 224 | sd = SyncDisk(spec_mach, array) 225 | sd.write(bid, data) 226 | 227 | fd = FlushingDisk(AsyncDisk(impl_mach, array)) 228 | fd.write(bid, data) 229 | 230 | self.solve(ForAll(spec_mach.control, Not( 231 | ForAll([bid], sd.read(bid) == fd.read(bid))))) 232 | 233 | 234 | class VirtualAsyncDiskTest(test.RefinementTest): 235 | def create_spec(self, mach): 236 | array = FreshDiskArray('array') 237 | return AsyncDisk(mach, array) 238 | 239 | def create_impl(self, mach): 240 | array = FreshDiskArray('array') 241 | mapped = FreshDiskArray('mapped') 242 | return VirtualAsyncDisk(mach, mapped, array) 243 | 244 | def equivalence(self, spec, impl, **kwargs): 245 | vbn = Const(fresh_name('i'), spec.domain()) 246 | return ForAll([vbn], 247 | Implies(impl.is_mapped(vbn), 248 | impl.read(vbn) == spec.read(vbn))) 249 | 250 | def match_write(self, spec=None, impl=None): 251 | i0 = FreshSize('i') 252 | x0 = FreshBlock('x') 253 | yield (i0, x0) 254 | 255 | 256 | class InodeSpecTest(test.DiskTest): 257 | def setUp(self): 258 | disk.native = False 259 | disk.assertion.assertions = [] 260 | self._array = FreshDiskArray('array') 261 | self._attr_array = FreshDiskArray('attrs') 262 | self._mapped = FreshDiskArray('mapped') 263 | 264 | def test_compare_to_async(self): 265 | d1_mach = Machine() 266 | d1 = AsyncDisk(d1_mach, self._array) 267 | 268 | d2_mach = Machine() 269 | d2 = InodeSpec(d2_mach, [self._mapped, self._attr_array, self._array]) 270 | 271 | vbn = Const(fresh_name('i'), d1.domain()) 272 | 273 | i0 = Const(fresh_name('i'), d1.domain()) 274 | x0 = FreshBlock('x') 275 | 276 | pre = ForAll([vbn], Implies(d2.is_mapped(vbn), d1.read(vbn) == d2.read(vbn))) 277 | 278 | d1.write(i0, x0) 279 | d2.begin_tx() 280 | d2.write(i0, x0) 281 | d2.commit_tx() 282 | 283 | d1 = d1.crash(Machine()) 284 | d2 = d2.crash(Machine()) 285 | 286 | assumption = And(d1_mach.assumption, d2_mach.assumption) 287 | 288 | post = ForAll([vbn], Implies(d2.is_mapped(vbn), d1.read(vbn) == d2.read(vbn))) 289 | 290 | C1 = d1_mach.control 291 | 292 | self.solve( 293 | ForAll(C1, Not( 294 | Implies(assumption, Implies(pre, post))))) 295 | 296 | def test_bmap(self): 297 | self.enable_symbolic_execution() 298 | 299 | mach = Machine() 300 | d2 = InodeSpec(mach, [self._mapped, self._attr_array, self._array]) 301 | 302 | vbn = FreshSize('vbn') 303 | 304 | mapped = d2.is_mapped(vbn) 305 | 306 | d2.begin_tx() 307 | d2.bmap(vbn) 308 | d2.commit_tx() 309 | 310 | self.prove(Implies(And(*disk.assertion.assertions), Implies(Not(mapped), d2.is_mapped(vbn)))) 311 | 312 | def test_bunmap(self): 313 | mach = Machine() 314 | d2 = InodeSpec(mach, [self._mapped, self._attr_array, self._array]) 315 | 316 | vbn = FreshSize('vbn') 317 | 318 | mapped = d2.is_mapped(vbn) 319 | 320 | d2.begin_tx() 321 | d2.bunmap(vbn) 322 | d2.commit_tx() 323 | 324 | self.prove(Implies(mapped, Not(d2.is_mapped(vbn)))) 325 | 326 | 327 | if __name__ == '__main__': 328 | test.main() 329 | -------------------------------------------------------------------------------- /test_fsck.pyx: -------------------------------------------------------------------------------- 1 | import cython 2 | import sys 3 | 4 | from z3 import * 5 | # from disk import * 6 | 7 | from diskimpl cimport Concat32 8 | 9 | from xv6inode cimport IndirectInodeDisk 10 | from xv6inode import create_fuse_inode 11 | 12 | def evalexp(v, *args, **kwargs): 13 | if callable(v): 14 | return evalexp(v(*args, **kwargs)) 15 | elif hasattr(v, 'eval'): 16 | return evalexp(v.eval(*args, **kwargs)) 17 | elif isinstance(v, bool): 18 | return v 19 | else: 20 | print v 21 | assert False 22 | 23 | 24 | class Node(object): 25 | pass 26 | 27 | 28 | class Lambda(Node): 29 | def __init__(self, fn): 30 | self._fn = fn 31 | 32 | def eval(self, *args, **kwargs): 33 | kwargs.pop('ranges', None) 34 | res = self._fn(*args, **kwargs) 35 | return evalexp(res) 36 | 37 | 38 | class Quantifier(Node): 39 | def __init__(self, bindings, expression): 40 | if callable(expression): 41 | expression = Lambda(expression) 42 | 43 | self._bindings = bindings 44 | self._expression = expression 45 | 46 | def eval(self, *args, **kwargs): 47 | if len(self._bindings) == 1: 48 | return self.eval1(*args, **kwargs) 49 | else: 50 | assert(False) 51 | 52 | def eval1(self, *args, **kwargs): 53 | ranges = kwargs.get('ranges', {}) 54 | 55 | v1 = self._bindings[0] 56 | r1 = ranges.get(v1, [2 ** v1.size()]) 57 | if len(r1) == 1: 58 | r1 = [0, r1[0]] 59 | 60 | model = {} 61 | 62 | vv1 = r1[0] 63 | while vv1 < r1[1]: 64 | fargs = args + (vv1,) 65 | res = self.handle(fargs, evalexp(self._expression, *fargs, **kwargs)) 66 | if res is not None: 67 | # if isinstance(res, dict): 68 | # model.update(res) 69 | # res = False 70 | if not res: 71 | # model.update(kwargs) 72 | # model[v1] = vv1 73 | # return model 74 | return res 75 | return res 76 | vv1 += 1 77 | 78 | return self.done() 79 | 80 | def z3(self, *args, **kwargs): 81 | fargs = args + tuple(self._bindings) 82 | return self._z3(self._bindings, self._expression.z3(*fargs, **kwargs)) 83 | 84 | 85 | class ForAll(Quantifier): 86 | def _z3(self, *args, **kwargs): 87 | return z3.ForAll(*args, **kwargs) 88 | 89 | def handle(self, arg, res): 90 | if not res: 91 | return False 92 | 93 | def done(self): 94 | return True 95 | 96 | 97 | class Exists(Quantifier): 98 | def _z3(self, *args, **kwargs): 99 | return z3.Exists(*args, **kwargs) 100 | 101 | def handle(self, arg, res): 102 | if res: 103 | return True 104 | 105 | def done(self): 106 | return False 107 | 108 | 109 | class Implies(Node): 110 | def __init__(self, A, B): 111 | self.A = A 112 | self.B = B 113 | 114 | def z3(self, *args, **kwargs): 115 | return z3.Implies(self.A, self.B) 116 | 117 | def eval(self, *args, **kwargs): 118 | # print "%s => %s" % (self.A, self.B) 119 | return not evalexp(self.A, *args, **kwargs) or evalexp(self.B, *args, **kwargs) 120 | 121 | 122 | class And(Node): 123 | def __init__(self, *children): 124 | self._children = children 125 | 126 | def eval(self, *args, **kwargs): 127 | for i in self._children: 128 | if not evalexp(i, *args, **kwargs): 129 | return False 130 | return True 131 | 132 | 133 | class Not(Node): 134 | def __init__(self, child): 135 | self._child = child 136 | 137 | def z3(self, disk): 138 | return z3.Not(*[ f(disk) for f in self._children ]) 139 | 140 | def eval(self, *args, **kwargs): 141 | if isinstance(self._child, bool): 142 | return not self._child 143 | else: 144 | return not self._child.eval(*args, **kwargs) 145 | 146 | 147 | 148 | class SAMap(object): 149 | def __init__(self): 150 | self._map = {} 151 | 152 | def __call__(self, arg): 153 | return self[arg] 154 | 155 | def __getitem__(self, arg): 156 | return SAEQ(self._map, arg) 157 | 158 | def __setitem__(self, arg, value): 159 | assert arg not in self._map or self._map[arg] == value 160 | self._map[arg] = value 161 | 162 | def __str__(self): 163 | return str(self._map) 164 | 165 | 166 | class SAEQ(object): 167 | def __init__(self, map, arg): 168 | self._map = map 169 | self._arg = arg 170 | 171 | def __eq__(self, value): 172 | if self._arg in self._map: 173 | return self._map[self._arg] == value 174 | else: 175 | self._map[self._arg] = value 176 | return True 177 | 178 | 179 | @cython.locals(impl=IndirectInodeDisk) 180 | def run(): 181 | predicate = (lambda impl, ino, off, pre_reverse_map=None: 182 | And( 183 | # Mapped blocks should be allocated 184 | Implies(impl.is_mapped(Concat32(ino, off)), 185 | Not(impl.is_free(impl.mappingi(Concat32(ino, off))))), 186 | # Mapping should be injective 187 | Implies(impl.is_mapped(Concat32(ino, off)), 188 | pre_reverse_map(impl.mappingi(Concat32(ino, off))) == Concat32(ino, off)))) 189 | 190 | 191 | ino = BitVec('ino', 32) 192 | off = BitVec('off', 32) 193 | 194 | pre = ForAll([ino], ForAll([off], predicate)) 195 | 196 | func = SAMap() 197 | impl = create_fuse_inode(sys.argv[1:])._idisk 198 | 199 | # Corrupt the image 200 | # impl.begin_tx() 201 | # impl._idisk.free(impl.mappingi(Concat32(2, 0))) 202 | # print impl.is_mapped(Concat32(2, 0)) 203 | # impl.commit_tx() 204 | 205 | print pre.eval(impl, pre_reverse_map=func, ranges={ino: [1, 100], off: [0, 512]}) 206 | print func 207 | -------------------------------------------------------------------------------- /test_fsck_run.py: -------------------------------------------------------------------------------- 1 | import test_fsck 2 | test_fsck.run() 3 | -------------------------------------------------------------------------------- /test_inode.py: -------------------------------------------------------------------------------- 1 | from dirinode import DirImpl 2 | 3 | from yggdrasil import test 4 | from yggdrasil.dirspec import * 5 | 6 | 7 | OffsetSort = BitVecSort(32) 8 | 9 | 10 | class ISpec(object): 11 | def __init__(self, start, end): 12 | self._map = FreshUFunction('map', SizeSort, SizeSort) 13 | self._revmap = FreshUFunction('revmap', SizeSort, SizeSort) 14 | self._freemap = FreshUFunction('free', SizeSort, BoolSort()) 15 | 16 | self._datafn = FreshDiskArray('datafn') 17 | 18 | self._start = BitVecVal(start, 32) 19 | self._end = BitVecVal(end, 32) 20 | 21 | def alloc(self): 22 | block = FreshSize('alloc') 23 | assertion(self.is_free(block)) 24 | self._freemap = self._freemap.update(block, BoolVal(False)) 25 | return block 26 | 27 | def free(self, block, guard=BoolVal(True)): 28 | self._freemap = self._freemap.update(block, BoolVal(True), guard=guard) 29 | 30 | def is_free(self, block): 31 | return self._freemap(block) 32 | 33 | def inrange(self, off): 34 | return And(ULE(self._start, off), ULE(off, self._end)) 35 | 36 | ############# 37 | 38 | def is_mapped(self, ino, off): 39 | vbn = Concat(ino, off) 40 | block = self._map(vbn) 41 | return And(self.inrange(off), Not(self.is_free(block)), self._revmap(block) == vbn) 42 | 43 | ############# 44 | 45 | def bmap(self, ino, off): 46 | if Or(self.is_mapped(ino, off), Not(self.inrange(off))): 47 | return 48 | 49 | vbn = Concat(ino, off) 50 | 51 | block = self.alloc() 52 | 53 | self._map = self._map.update(vbn, block) 54 | self._revmap = self._revmap.update(block, vbn) 55 | 56 | self._datafn = self._datafn.update(block, ConstBlock(0)) 57 | 58 | def bunmap(self, ino, off): 59 | unmap = self.is_mapped(ino, off) 60 | block = self._map(Concat(ino, off)) 61 | self.free(block, guard=unmap) 62 | 63 | def _read(self, block): 64 | return self._datafn(block) 65 | 66 | def read(self, ino, off): 67 | return If(self.is_mapped(ino, off), 68 | self._read(self._map(Concat(ino, off))), ConstBlock(0)) 69 | 70 | def _write(self, block, value): 71 | self._datafn = self._datafn.update(block, value) 72 | 73 | def write(self, ino, off, value): 74 | if not self.is_mapped(ino, off): 75 | return 76 | self._write(self._map(Concat(ino, off)), value) 77 | 78 | 79 | class IExtSpec(object): 80 | def __init__(self, dat, ext): 81 | self._dat = dat 82 | self._ext = ext 83 | 84 | def alloc(self, *args, **kwargs): 85 | return self._dat.alloc(*args, **kwargs) 86 | 87 | def free(self, *args, **kwargs): 88 | return self._dat.free(*args, **kwargs) 89 | 90 | def is_free(self, *args, **kwargs): 91 | return self._dat.is_free(*args, **kwargs) 92 | 93 | ############ 94 | 95 | def bmap(self, *args, **kwargs): 96 | return self._dat.bmap(*args, **kwargs) 97 | 98 | def bunmap(self, *args, **kwargs): 99 | return self._dat.bunmap(*args, **kwargs) 100 | 101 | def _read(self, *args, **kwargs): 102 | return self._dat._read(*args, **kwargs) 103 | 104 | def _write(self, *args, **kwargs): 105 | return self._dat._write(*args, **kwargs) 106 | 107 | def read(self, *args, **kwargs): 108 | return self._dat.read(*args, **kwargs) 109 | 110 | def write(self, *args, **kwargs): 111 | return self._dat.write(*args, **kwargs) 112 | 113 | ### 114 | 115 | def ebmap(self, *args, **kwargs): 116 | return self._ext.bmap(*args, **kwargs) 117 | 118 | def ebunmap(self, *args, **kwargs): 119 | return self._ext.bunmap(*args, **kwargs) 120 | 121 | def eread(self, *args, **kwargs): 122 | return self._ext.read(*args, **kwargs) 123 | 124 | def ewrite(self, *args, **kwargs): 125 | return self._ext.write(*args, **kwargs) 126 | 127 | def crash(self, *args, **kwargs): 128 | return self 129 | 130 | 131 | class BlockImpl(object): 132 | def __init__(self, idisk, freedisk, Bitmap, Allocator): 133 | self._idisk = idisk 134 | 135 | ######################### 136 | 137 | def _read(self, block): 138 | return self._idisk._read(block) 139 | 140 | def _write(self, block, data): 141 | self._idisk._write(block, data) 142 | 143 | def read(self, ino, off): 144 | eoff = Extract(9 - 1, 0, off) 145 | 146 | # Off is direct 147 | if UGE(off, 512): 148 | return ConstBlock(0) 149 | 150 | imap = self._idisk.eread(ino, BitVecVal(0, 32)) 151 | block = imap[eoff] 152 | if self._idisk.is_free(block): 153 | return ConstBlock(0) 154 | return self._read(block) 155 | 156 | def write(self, ino, off, value): 157 | eoff = Extract(9 - 1, 0, off) 158 | 159 | # Off is direct 160 | if UGE(off, 512): 161 | return ConstBlock(0) 162 | 163 | imap = self._idisk.eread(ino, BitVecVal(0, 32)) 164 | block = imap[eoff] 165 | if self._idisk.is_free(block): 166 | return ConstBlock(0) 167 | self._write(block, value) 168 | 169 | 170 | def bmap(self, ino, off): 171 | eoff = Extract(9 - 1, 0, off) 172 | 173 | # Off is direct 174 | if UGE(off, 512): 175 | return 176 | 177 | imap = self._idisk.eread(ino, BitVecVal(0, 32)) 178 | 179 | old_lbn = imap[eoff] 180 | 181 | # Off is not-mapped 182 | if old_lbn == 0: 183 | 184 | lbn = self._idisk.alloc() 185 | 186 | self.write(ino, off, ConstBlock(0)) 187 | 188 | imap[eoff] = lbn 189 | 190 | self._idisk.ewrite(ino, BitVecVal(0, 32), imap) 191 | 192 | def crash(self, *args, **kwargs): 193 | return self 194 | 195 | 196 | class ISpecTest(test.RefinementTest): 197 | def create_spec(self, mach): 198 | dat = ISpec(0, 511) 199 | ext = ISpec(0, 0) 200 | return IExtSpec(dat, ext) 201 | 202 | def create_impl(self, mach): 203 | dat = ISpec(0, 10) 204 | ext = ISpec(0, 2) 205 | inner = IExtSpec(dat, ext) 206 | freemaparray = FreshDiskArray('freemaparray') 207 | return BlockImpl(inner, AsyncDisk(mach, freemaparray), BitmapSpec, Allocator64) 208 | 209 | def equivalence(self, spec, impl, **kwargs): 210 | ino = FreshBitVec('ino', 32) 211 | off = FreshBitVec('off', 32) 212 | 213 | return ForAll([ino, off], 214 | spec.read(ino, off) == impl.read(ino, off)) 215 | 216 | 217 | @test.z3_option(AUTO_CONFIG=False) 218 | def match_bmap(self): 219 | ino = FreshBitVec('ino', 32) 220 | off = FreshBitVec('off', 32) 221 | yield (ino, off) 222 | 223 | def test_create_spec(self): 224 | ISpec(0, 511) 225 | 226 | def test_spec_alloc(self): 227 | spec = ISpec(0, 511) 228 | 229 | block = spec.alloc() 230 | self.prove(Not(spec.is_free(block))) 231 | 232 | def test_spec_alloc_unalloc(self): 233 | spec = ISpec(0, 511) 234 | 235 | block = spec.alloc() 236 | spec.free(block) 237 | self.prove(spec.is_free(block)) 238 | 239 | def test_spec_bmap_simple(self): 240 | spec = ISpec(0, 511) 241 | 242 | ino = FreshIno('ino') 243 | off = FreshBitVec('off', 32) 244 | 245 | spec.bunmap(ino, off) 246 | self.prove(Not(spec.is_mapped(ino, off))) 247 | 248 | def test_spec_bmap_simple2(self): 249 | spec = ISpec(0, 511) 250 | 251 | ino = FreshIno('ino') 252 | off = BitVecVal(0, 32) 253 | 254 | spec.bmap(ino, off) 255 | self.pprove(spec.is_mapped(ino, off)) 256 | 257 | def test_spec_bmap(self): 258 | spec = ISpec(0, 511) 259 | 260 | ino = FreshIno('ino') 261 | off = FreshBitVec('off', 32) 262 | block = FreshBlock('block') 263 | 264 | assertion(ULT(off, 512)) 265 | 266 | spec.bmap(ino, off) 267 | spec.write(ino, off, block) 268 | self.pprove(spec.read(ino, off) == block) 269 | 270 | 271 | if __name__ == '__main__': 272 | test.main() 273 | -------------------------------------------------------------------------------- /test_inodepack.py: -------------------------------------------------------------------------------- 1 | from z3 import * 2 | import disk 3 | from inodepack import InodePackDisk 4 | 5 | from yggdrasil.ufarray import * 6 | from yggdrasil.util import * 7 | from yggdrasil.diskspec import * 8 | from yggdrasil import test 9 | from yggdrasil.test import * 10 | 11 | 12 | def FreshAttr(): 13 | size = FreshSize('bsize') 14 | mtime = FreshSize('mtime') 15 | mode = FreshSize('mode') 16 | nlink = FreshSize('nlink') 17 | return Stat(size, mtime, mode, nlink) 18 | 19 | 20 | class InodePackDiskRefinement(RefinementTest): 21 | def create_spec(self, mach): 22 | dataarray = FreshDiskArray('dataarray', domain=BitVecSort(32)) 23 | datadisk = AsyncDisk(mach, dataarray) 24 | metaarray = FreshDiskArray('metaarray', domain=BitVecSort(32)) 25 | metadisk = AsyncDisk(mach, metaarray) 26 | return InodePackSpec(metadisk, datadisk) 27 | 28 | def create_impl(self, mach): 29 | array = FreshDiskArray('array', domain=BitVecSort(32)) 30 | disk = AsyncDisk(mach, array) 31 | return InodePackDisk(disk, disk) 32 | 33 | def equivalence(self, spec, impl, **args): 34 | ino = FreshBitVec('ino', 32) 35 | ioff = FreshBitVec('ioff', 9) 36 | return ForAll([ino, ioff], Implies(ULT(ioff, 11), And( 37 | spec.get_mapping(ino, ioff) == impl._get_mapping(ino, ioff), 38 | spec.get_iattr(ino) == impl.get_iattr(ino) 39 | ))) 40 | 41 | def match_set_iattr(self): 42 | i0 = FreshBitVec('ino', 32) 43 | x0 = FreshAttr() 44 | yield (i0, x0) 45 | 46 | def match_set_mapping(self): 47 | i0 = FreshBitVec('ino', 32) 48 | off0 = FreshBitVec('off', 9) 49 | ptr0 = FreshBitVec('ptr', 64) 50 | yield (i0, off0, ptr0) 51 | 52 | 53 | if __name__ == '__main__': 54 | test.main() 55 | -------------------------------------------------------------------------------- /test_kv.py: -------------------------------------------------------------------------------- 1 | import disk 2 | 3 | from kvspec import * 4 | from kvimpl import * 5 | 6 | from yggdrasil.diskspec import * 7 | from yggdrasil import test 8 | 9 | 10 | class KVTest(test.DiskTest): 11 | def setUp(self): 12 | disk.native = False 13 | disk.assertion.assertions = [] 14 | 15 | def test_kvspec(self): 16 | mach = Machine() 17 | fn_size = Function(fresh_name('kvspec.size'), KeySort, StringOffsetSort) 18 | fn_data = Function(fresh_name('kvspec.data'), KeySort, StringOffsetSort, StringElementSort) 19 | db = KVSpec(mach, fn_size, fn_data) 20 | k = FreshKey('k') 21 | v = FreshString('v') 22 | db.put(k, v) 23 | self.prove(db.get(k) == v) 24 | 25 | def test_kvimpl(self): 26 | mach = Machine() 27 | disk = AsyncDisk(mach, FreshDiskArray('array')) 28 | sig = [StringElementSort] * KVImpl.KEY_SIZE + [SizeSort] 29 | f = Function(fresh_name('hash'), *sig) 30 | hashfn = lambda key: f(*[key[i] for i in range(KVImpl.KEY_SIZE)]) 31 | db = KVImpl(disk, hashfn) 32 | k = FreshString('k', size=KVImpl.KEY_SIZE) 33 | v = FreshString('v') 34 | assumption = ULE(v.size(), KVImpl.VALUE_SIZE_LIMIT) 35 | assumption = And(mach.assumption, assumption) 36 | db.put(k, v) 37 | self.prove(Implies(assumption, db.get(k) == v)) 38 | 39 | if __name__ == '__main__': 40 | test.main() 41 | -------------------------------------------------------------------------------- /test_lfs.py: -------------------------------------------------------------------------------- 1 | import errno 2 | from lfs import LFS 3 | from disk import assertion, debug, Stat 4 | 5 | from yggdrasil.diskspec import * 6 | from yggdrasil import test 7 | 8 | 9 | class LFSSpec(object): 10 | def __init__(self, mach, dirfn, parentfn, modefn, mtimefn): 11 | self._mach = mach 12 | self._dirfn = dirfn 13 | self._modefn = modefn 14 | self._mtimefn = mtimefn 15 | self._parentfn = parentfn 16 | 17 | def lookup(self, parent, name): 18 | ino = self._dirfn(parent, name) 19 | return If(0 < ino, ino, -errno.ENOENT) 20 | 21 | def get_attr(self, ino): 22 | return Stat(bsize=0, 23 | mode=self._modefn(ino), 24 | mtime=self._mtimefn(ino)) 25 | 26 | def mknod(self, parent, name, mode, mtime): 27 | if 0 < self.lookup(parent, name): 28 | return BitVecVal(-errno.EEXIST, 64) 29 | 30 | on = self._mach.create_on([]) 31 | 32 | ino = FreshBitVec('ino', 64) 33 | assertion(0 < ino) 34 | assertion(Not(0 < self._parentfn(ino))) 35 | 36 | self._dirfn = self._dirfn.update((parent, name), ino, guard=on) 37 | self._modefn = self._modefn.update(ino, mode, guard=on) 38 | self._mtimefn = self._mtimefn.update(ino, mtime, guard=on) 39 | self._parentfn = self._parentfn.update(ino, parent, guard=on) 40 | 41 | return ino 42 | 43 | def crash(self, mach): 44 | return self.__class__(mach, self._dirfn, self._parentfn, self._modefn, self._mtimefn) 45 | 46 | 47 | class LFSRefinement(test.RefinementTest): 48 | def create_spec(self, mach): 49 | dirfn = FreshUFunction('dirfn', SizeSort, SizeSort, SizeSort) 50 | parentfn = FreshUFunction('parentfn', SizeSort, SizeSort) 51 | modefn = FreshUFunction('modefn', SizeSort, SizeSort) 52 | mtimefn = FreshUFunction('mtimefn', SizeSort, SizeSort) 53 | return LFSSpec(mach, dirfn, parentfn, modefn, mtimefn) 54 | 55 | def create_impl(self, mach): 56 | array = FreshDiskArray('disk') 57 | disk = AsyncDisk(mach, array) 58 | return LFS(disk) 59 | 60 | def pre_post(self, spec, impl, **kwargs): 61 | name = FreshBitVec('name.pre', 64) 62 | parent = BitVecVal(1, 64) 63 | 64 | sb = impl._disk.read(0) 65 | imap = impl._disk.read(sb[2]) 66 | off = FreshBitVec('off', 9) 67 | 68 | pre = ForAll([name], Implies(name != 0, And( 69 | Implies(0 < spec._dirfn(parent, name), 70 | parent == spec._parentfn(spec._dirfn(parent, name))), 71 | 72 | Implies(0 < impl.lookup(parent, name), 73 | And(impl.lookup(parent, name) < sb[1], 74 | spec.get_attr(spec.lookup(parent, name)) == impl.get_attr(impl.lookup(parent, name)))), 75 | spec.lookup(parent, name) == impl.lookup(parent, name)))) 76 | 77 | pre = And(pre, 78 | ForAll([off], Implies(ZeroExt(64 - off.size(), off) < sb[1], And(0 < imap[off], imap[off] < sb[0])))) 79 | 80 | pre = And(pre, 81 | # allocated blocks are in range ]0..allocator[ 82 | 0 < sb[2], sb[2] < sb[0], 83 | 0 < imap[1], imap[1] < sb[0], 84 | 85 | # root dir inode has been allocated 86 | 1 < sb[1], 87 | ) 88 | 89 | (spec, impl, (_, name0, _, _), (sino, iino)) = yield pre 90 | 91 | self.show(pre) 92 | 93 | if iino < 0: 94 | iino = impl.lookup(parent, name0) 95 | 96 | if self._solve(sino == iino): 97 | assertion(sino == iino) 98 | 99 | sb = impl._disk.read(0) 100 | imap = impl._disk.read(sb[2]) 101 | 102 | post = ForAll([name], Implies(name != 0, And( 103 | Implies(0 < spec._dirfn(parent, name), 104 | parent == spec._parentfn(spec._dirfn(parent, name))), 105 | 106 | Implies(0 < impl.lookup(parent, name), 107 | And(impl.lookup(parent, name) < sb[1], 108 | spec.get_attr(spec.lookup(parent, name)) == impl.get_attr(impl.lookup(parent, name)))), 109 | spec.lookup(parent, name) == impl.lookup(parent, name)))) 110 | 111 | post = And(post, 112 | ForAll([off], Implies(ZeroExt(64 - off.size(), off) < sb[1], And(0 < imap[off], imap[off] < sb[0])))) 113 | 114 | post = And(post, 115 | # allocated blocks are in range ]0..allocator[ 116 | 0 < sb[2], sb[2] < sb[0], 117 | 0 < imap[1], imap[1] < sb[0], 118 | 119 | # root dir inode has been allocated 120 | 1 < sb[1], 121 | ) 122 | 123 | yield post 124 | 125 | def match_mknod(self): 126 | parent = BitVecVal(1, 64) 127 | name = FreshBitVec('name', 64) 128 | mode = FreshBitVec('mode', 64) 129 | mtime = FreshBitVec('mtime', 64) 130 | assertion(name != 0) 131 | yield (parent, name, mode, mtime) 132 | 133 | # test 134 | 135 | # def test_foo(self): 136 | # mach = Machine() 137 | # impl = self.create_impl(mach) 138 | # 139 | # parent = BitVecVal(1, 64) 140 | # name = BitVecVal(0xdeadbeef, 64) 141 | # mode = BitVecVal(0x1337, 64) 142 | # 143 | # sb = impl._disk.read(0) 144 | # imap = impl._disk.read(sb[2]) 145 | # 146 | # name0 = FreshSize('name') 147 | # 148 | # pre = And( 149 | # # inode alloc 150 | # 0 < sb[1], sb[1] < 512, 151 | # 152 | # # allocated blocks are in range ]0..allocator[ 153 | # 0 < sb[2], sb[2] < sb[0], 154 | # 0 < imap[1], imap[1] < sb[0], 155 | # 0 < imap[1], sb[2] < imap[1], 156 | # 157 | # # root dir inode has been allocated 158 | # 1 < sb[1], 159 | # 160 | # ForAll([name0], 161 | # Implies(0 < impl.lookup(parent, name0), 162 | # And( 163 | # impl.lookup(parent, name0) < sb[1], 164 | # imap[Extract(8, 0, impl.lookup(parent, name0))] < sb[0])))) 165 | # 166 | # res = impl.mknod(parent, name, mode) 167 | # if res < 0: 168 | # pass 169 | # else: 170 | # ino = impl.lookup(parent, name) 171 | # v = impl.get_attr(ino) 172 | # self.psolve(pre, v != mode) 173 | 174 | 175 | 176 | 177 | if __name__ == '__main__': 178 | test.main() 179 | -------------------------------------------------------------------------------- /test_partition.py: -------------------------------------------------------------------------------- 1 | from partition import Partition 2 | import disk 3 | 4 | from yggdrasil.diskspec import * 5 | from yggdrasil import test 6 | 7 | 8 | class PartitionRefinement(test.RefinementTest): 9 | def equivalence(self, spec, impl, **kwargs): 10 | bid = FreshSize('eq-bid') 11 | return ForAll([bid], And( 12 | Implies(impl[0].valid(bid), spec[0].read(bid) == impl[0].read(bid)), 13 | Implies(impl[1].valid(bid), spec[1].read(bid) == impl[1].read(bid)), 14 | )) 15 | 16 | def crash(self, p): 17 | return [p[0].crash(Machine()), p[1].crash(Machine())] 18 | 19 | crash_impl = crash 20 | crash_spec = crash 21 | 22 | def create_spec(self, mach): 23 | a1 = FreshDiskArray('spec-array-1') 24 | a2 = FreshDiskArray('spec-array-2') 25 | d1 = AsyncDisk(mach, a1) 26 | d2 = AsyncDisk(mach, a2) 27 | return [d1, d2] 28 | 29 | def create_impl(self, mach): 30 | a1 = FreshDiskArray('impl-array') 31 | d1 = AsyncDisk(mach, a1) 32 | a = FreshSize('start') 33 | b = FreshSize('mid') 34 | c = FreshSize('end') 35 | 36 | disk.assertion(ULT(a, b)) 37 | disk.assertion(ULT(b, c)) 38 | 39 | p1 = Partition(d1, a, b) 40 | p2 = Partition(d1, b, c) 41 | 42 | return [p1, p2] 43 | 44 | def call_write(self, spec, impl, args): 45 | dev, bid, block = args 46 | 47 | if dev == 0: 48 | spec[0].write(bid, block) 49 | impl[0].write(bid, block) 50 | else: 51 | spec[1].write(bid, block) 52 | impl[1].write(bid, block) 53 | 54 | @test.z3_option(AUTO_CONFIG=False) 55 | def match_write(self): 56 | dev = FreshSize('dev') 57 | bid = FreshSize('bid') 58 | block = FreshBlock('block') 59 | 60 | disk.assertion(ULT(dev, 2)) 61 | 62 | yield (dev, bid, block) 63 | match_write.debug = True 64 | 65 | 66 | if __name__ == '__main__': 67 | test.main() 68 | -------------------------------------------------------------------------------- /test_tenaciousd.py: -------------------------------------------------------------------------------- 1 | import disk 2 | from logspec import * 3 | from tenaciousd import * 4 | 5 | from yggdrasil.diskspec import * 6 | from yggdrasil import test 7 | 8 | class TenaciousdLogTest(test.DiskTest): 9 | def setUp(self): 10 | disk.native = False 11 | disk.assertion.assertions = [] 12 | 13 | def test_append(self): 14 | spec_mach = Machine() 15 | datafn = Function(fresh_name('datafn'), SizeSort, SizeSort, BlockElementSort) 16 | sizefn = Function(fresh_name('sizefn'), SizeSort, SizeSort) 17 | nextfn = Function(fresh_name('nextfn'), SizeSort, SizeSort) 18 | tail = FreshSize('tailx') 19 | spec = LogSpec(spec_mach, datafn, sizefn, nextfn, tail) 20 | 21 | impl_mach = Machine() 22 | dataarray = FreshDiskArray('dataarray') 23 | impl = TenaciousdLog(SyncDisk(impl_mach, dataarray)) 24 | 25 | i = FreshSize('i') 26 | off = FreshSize('off') 27 | 28 | spec_entry = spec[i] 29 | impl_entry = impl[i] 30 | 31 | pre = And(spec.tail == impl.tail, spec_entry[off] == impl_entry[off]) 32 | 33 | spec_data = Function(fresh_name('spec_blk'), SizeSort, BlockElementSort) 34 | impl_data = [FreshBlock('impl_blk') for c in range(TenaciousdLog.LOG_MAX_ENTRY_BLOCKS)] 35 | n = FreshSize('n') 36 | 37 | x = FreshSize('x') 38 | inp = ForAll([x], LogSpecEntry(spec_data, n)[x] == LogImplEntry(impl_data)[x]) 39 | 40 | spec.append(spec_data, n) 41 | impl.append(impl_data, n) 42 | 43 | spec = spec.crash(Machine()) 44 | impl = impl.crash(Machine()) 45 | 46 | spec_entry = spec[i] 47 | impl_entry = impl[i] 48 | 49 | post = And(spec.tail == impl.tail, spec_entry[off] == impl_entry[off]) 50 | 51 | assumption = And(inp, ULT(0, i), ULE(off, 509), spec_mach.assumption, impl_mach.assumption, *disk.assertion.assertions) 52 | 53 | self.solve(assumption, ForAll(spec_mach.control, Not(Implies(pre, post)))) 54 | 55 | if __name__ == '__main__': 56 | test.main() 57 | -------------------------------------------------------------------------------- /test_ufarray.py: -------------------------------------------------------------------------------- 1 | from z3 import * 2 | import unittest 3 | 4 | from yggdrasil.util import * 5 | from yggdrasil.ufarray import * 6 | from yggdrasil import test 7 | 8 | 9 | def StoreIn(arr, bid, off, field): 10 | i = arr[bid] 11 | i = Store(i, off, field) 12 | return Store(arr, bid, i) 13 | 14 | 15 | def UpdateIn(arr, bid, off, field): 16 | block = arr(bid) 17 | block[off] = field 18 | return arr.update(bid, block) 19 | 20 | 21 | class ArrayTest(test.DiskTest): 22 | def setUp(self): 23 | pass 24 | 25 | # Update a whole fresh block 26 | def test_array_update(self): 27 | spec = Array('spec', BitVecSort(64), ArraySort(BitVecSort(9), BitVecSort(64))) 28 | impl = FreshDiskArray('impl') 29 | 30 | 31 | bid = BitVec('bid', 64) 32 | off = BitVec('off', 9) 33 | 34 | bid0 = BitVec('bid0', 64) 35 | 36 | dataimpl = FreshBlock('blockimpl') 37 | dataspec = Array('dataspec', BitVecSort(9), BitVecSort(64)) 38 | 39 | dataoff = BitVec('off0', 9) 40 | 41 | pre = And( 42 | ForAll([dataoff], dataimpl[dataoff] == dataspec[dataoff]), 43 | ForAll([bid, off], spec[bid][off] == impl(bid)[off])) 44 | 45 | impl = impl.update(bid0, dataimpl) 46 | 47 | spec = Store(spec, bid0, dataspec) 48 | 49 | post = ForAll([bid, off], spec[bid][off] == impl(bid)[off]) 50 | self.prove(Implies(pre, post)) 51 | 52 | # Update a field within a block, write block back 53 | def test_block_update(self): 54 | spec = Array('spec', BitVecSort(64), ArraySort(BitVecSort(9), BitVecSort(64))) 55 | impl = FreshDiskArray('impl') 56 | 57 | bid = BitVec('bid', 64) 58 | off = BitVec('off', 9) 59 | 60 | bid0 = BitVec('bid0', 64) 61 | off0 = BitVec('off0', 9) 62 | field0 = BitVec('field', 64) 63 | 64 | pre = ForAll([bid, off], spec[bid][off] == impl(bid)[off]) 65 | 66 | spec = StoreIn(spec, bid0, off0, field0) 67 | impl = UpdateIn(impl, bid0, off0, field0) 68 | 69 | post = ForAll([bid, off], spec[bid][off] == impl(bid)[off]) 70 | 71 | self.prove(Implies(pre, post)) 72 | 73 | 74 | if __name__ == '__main__': 75 | test.main() 76 | -------------------------------------------------------------------------------- /test_waldisk.py: -------------------------------------------------------------------------------- 1 | from z3 import * 2 | import disk 3 | from waldisk import * 4 | import unittest 5 | import itertools 6 | 7 | from yggdrasil.ufarray import * 8 | from yggdrasil.util import * 9 | from yggdrasil.diskspec import * 10 | from yggdrasil import test 11 | 12 | 13 | class WALDiskTestRefinement(test.RefinementTest): 14 | def create_spec(self, mach): 15 | dataarray1 = FreshDiskArray('dataarray') 16 | dataarray2 = FreshDiskArray('dataarray') 17 | return MultiTxnDisk(mach, [dataarray1, dataarray2]) 18 | 19 | def create_impl(self, mach, logarray=None): 20 | if logarray is None: 21 | logarray = ConstDiskArray(ConstBlock(0)) 22 | dataarray1 = FreshDiskArray('dataarray') 23 | dataarray2 = FreshDiskArray('dataarray') 24 | logdisk = AsyncDisk(mach, logarray) 25 | datadisk1 = AsyncDisk(mach, dataarray1) 26 | datadisk2 = AsyncDisk(mach, dataarray2) 27 | return WALDisk(logdisk, [datadisk1, datadisk2], osync=False) 28 | 29 | def equivalence_volatile(self, spec, impl, **kwargs): 30 | bid = FreshSize('bid') 31 | return ForAll([bid], And( 32 | spec.read(0, bid) == impl.read(0, bid), 33 | spec.read(1, bid) == impl.read(1, bid))) 34 | 35 | def equivalence_durable(self, spec, impl, **kwargs): 36 | bid = FreshSize('bid') 37 | return ForAll([bid], And( 38 | spec._disks[0](bid) == impl._datadisks[0].read(bid), 39 | spec._disks[1](bid) == impl._datadisks[1].read(bid))) 40 | 41 | equivalence = equivalence_volatile 42 | 43 | def call_write_tx_nocommit(self, spec, impl, args): 44 | impl.begin_tx() 45 | spec.begin_tx() 46 | 47 | for arg in args[0]: 48 | impl.write_tx(*arg) 49 | spec.write_tx(*arg) 50 | 51 | def call_write_tx(self, spec, impl, args): 52 | impl.begin_tx() 53 | spec.begin_tx() 54 | 55 | for arg in args[0]: 56 | impl.write_tx(*arg) 57 | spec.write_tx(*arg) 58 | 59 | impl.commit_tx() 60 | spec.commit_tx() 61 | 62 | def _gen_iov(self, *args, **kwargs): 63 | for n in range(WALDisk.LOG_MAX_ENTRIES + 1): 64 | iov = [] 65 | for i in range(n): 66 | iov.append((1, FreshSize('i'), FreshBlock('x'))) 67 | yield (iov,) 68 | 69 | # Verify writev 70 | match_writev = _gen_iov 71 | match_write_tx = _gen_iov 72 | match_write_tx_nocommit = lambda self, *args, **kwargs: self._gen_iov(*args, **kwargs) 73 | match_write_tx_nocommit.nocrash = True 74 | 75 | # recover-full(recover-partial(d)) = recover-full(d) 76 | def test_idempotent_recovery(self): 77 | mach = Machine() 78 | logarray = FreshDiskArray('logarray') 79 | d = self.create_impl(mach, logarray=logarray) 80 | assumption = mach.assumption 81 | 82 | # Recovery post-condition.. 83 | self.solve(assumption, mach._on, 84 | d._logdisk._disk(0)[0] != 0) 85 | 86 | # block i after a full recovery 87 | i = FreshSize('i') 88 | x = d.read(0, i) 89 | 90 | # block i after a partial recovery followed by a full recovery 91 | mach = Machine() 92 | y = d.crash(mach).read(0, i) 93 | assumption = And(assumption, mach.assumption) 94 | 95 | self.solve(assumption, Not(x == y)) 96 | 97 | def test_atomic(self): 98 | for i in range(2): 99 | self.__test_atomic(i + 1) 100 | 101 | def __test_atomic(self, n): 102 | mach = Machine() 103 | d = self.create_impl(mach) 104 | 105 | bids = [FreshSize('i') for i in range(n)] 106 | xs = [FreshBlock('x') for i in range(n)] 107 | iov = zip(itertools.repeat(0), bids, xs) 108 | 109 | oldvs = [d.read(0, bid) for bid in bids] 110 | d.writev(iov) 111 | 112 | assumption = And(Distinct(*bids), mach.assumption) 113 | 114 | # reboot & recovery 115 | mach = Machine() 116 | d = d.crash(mach) 117 | anyvs = [d.read(0, bid) for bid in bids] 118 | 119 | self.prove(Implies(assumption, Or( 120 | And(*[anyv == oldv for anyv, oldv in zip(anyvs, oldvs)]), 121 | And(*[anyv == x for anyv, x in zip(anyvs, xs)])))) 122 | 123 | 124 | if __name__ == '__main__': 125 | test.main() 126 | -------------------------------------------------------------------------------- /test_xv6inode.py: -------------------------------------------------------------------------------- 1 | import disk 2 | from xv6inode import * 3 | import unittest 4 | 5 | from yggdrasil.diskspec import * 6 | from yggdrasil.util import * 7 | from yggdrasil import test 8 | 9 | 10 | def FreshAttr(): 11 | bsize = FreshSize('bsize') 12 | mtime = FreshSize('mtime') 13 | mode = FreshSize('mode') 14 | nlink = FreshSize('nlink') 15 | return Stat(bsize, mtime, mode, nlink) 16 | 17 | 18 | def attr_eq(a, b): 19 | return And(a.size == b.size, 20 | a.mtime == b.mtime, 21 | a.mode == b.mode) 22 | 23 | 24 | class InodeDiskRefinement(test.RefinementTest): 25 | def create_spec(self, mach): 26 | mappedarray = FreshDiskArray('mappedarray') 27 | attr_array = FreshDiskArray('spec_attr', domain=BitVecSort(32)) 28 | diskarray = FreshDiskArray('diskarray') 29 | return InodeSpec(mach, [mappedarray, attr_array, diskarray]) 30 | 31 | def create_impl(self, mach): 32 | freemaparray = FreshDiskArray('freemaparray') 33 | inodemetaarray = FreshDiskArray('inodemetaarray', domain=BitVecSort(32)) 34 | inodedataarray = FreshDiskArray('inodedataarray', domain=BitVecSort(32)) 35 | diskarray = FreshDiskArray('diskarray') 36 | txndisk = MultiTxnDisk(mach, [freemaparray, inodemetaarray, inodedataarray, diskarray]) 37 | return InodeDisk(txndisk, Allocator64, BitmapSpec, InodePackSpec) 38 | 39 | def pre_post(self, spec, impl, fnargs, *args, **kwargs): 40 | pre_reverse_map = Function(fresh_name('reverse_map'), SizeSort, SizeSort) 41 | 42 | bid = BitVec(fresh_name('bid'), 64) 43 | 44 | ino1 = BitVec(fresh_name('ino'), 32) 45 | off1 = BitVec(fresh_name('off'), 32) 46 | 47 | pre = ForAll([bid], 48 | And( 49 | Or(impl.is_free(bid), Exists([ino1, off1], impl.mappingi(Concat(ino1, off1)) == bid)), 50 | 51 | # mappings should be the same 52 | spec.is_mapped(bid) == impl.is_mapped(bid), 53 | 54 | # Contents of mapped blocks should be the same 55 | Implies(impl.is_mapped(bid), 56 | impl.read(impl.mappingi(bid)) == spec.read(spec.mappingi(bid))), 57 | 58 | attr_eq(impl.get_iattr(Extract(63, 32, bid)), spec.get_iattr(Extract(63, 32, bid))), 59 | 60 | # Mapping should be injective 61 | Implies(impl.is_mapped(bid), 62 | pre_reverse_map(impl.mappingi(bid)) == bid), 63 | 64 | # Mapped blocks should be allocated 65 | Implies(impl.is_mapped(bid), Not(impl.is_free(impl.mappingi(bid)))))) 66 | 67 | pre = And(pre, ULT(fnargs[0], InodeDisk.NDIRECT)) 68 | 69 | (spec, impl, args, (_, lbn)) = yield pre 70 | 71 | if len(args) == 1 and lbn is not None: 72 | post_reverse_map = lambda x: If(x == lbn, args[0], pre_reverse_map(x)) 73 | else: 74 | post_reverse_map = pre_reverse_map 75 | 76 | post = ForAll([bid], 77 | And( 78 | Or(impl.is_free(bid), Exists([ino1, off1], impl.mappingi(Concat(ino1, off1)) == bid)), 79 | 80 | # mappings should be the same 81 | spec.is_mapped(bid) == impl.is_mapped(bid), 82 | 83 | # Contents of mapped blocks should be the same 84 | Implies(impl.is_mapped(bid), 85 | impl.read(impl.mappingi(bid)) == spec.read(spec.mappingi(bid))), 86 | 87 | attr_eq(impl.get_iattr(Extract(63, 32, bid)), spec.get_iattr(Extract(63, 32, bid))), 88 | 89 | # Mapping should be injective 90 | Implies(impl.is_mapped(bid), 91 | post_reverse_map(impl.mappingi(bid)) == bid), 92 | 93 | # Mapped blocks should be allocated 94 | Implies(impl.is_mapped(bid), Not(impl.is_free(impl.mappingi(bid)))))) 95 | 96 | yield post 97 | 98 | def match_set_iattr(self): 99 | ino = BitVec(fresh_name('ino'), 32) 100 | attr = FreshAttr() 101 | yield (ino, attr) 102 | 103 | def _create_bid(self): 104 | bid = BitVec(fresh_name('ino'), 64) 105 | yield (bid,) 106 | 107 | match_bunmap = _create_bid 108 | match_bmap = _create_bid 109 | 110 | # Some regular tests below 111 | 112 | def test_bmap(self): 113 | mach = Machine() 114 | impl = self.create_impl(mach) 115 | 116 | ino = BitVec(fresh_name('ino'), 32) 117 | off = BitVec(fresh_name('off'), 32) 118 | vbn = Concat(ino, off) 119 | 120 | mapped = impl.is_mapped(vbn) 121 | 122 | pre = Not(mapped) 123 | pre = And(pre, ULT(off, InodeDisk.NDIRECT)) 124 | 125 | impl.begin_tx() 126 | impl.bmap(vbn) 127 | impl.commit_tx() 128 | 129 | post = impl.is_mapped(vbn) 130 | 131 | pre = And(pre, *disk.assertion.assertions) 132 | 133 | self.prove(Implies(pre, post)) 134 | 135 | def test_bunmap(self): 136 | mach = Machine() 137 | 138 | impl = self.create_impl(mach) 139 | 140 | ino = BitVec(fresh_name('ino'), 32) 141 | off = BitVec(fresh_name('off'), 32) 142 | vbn = Concat(ino, off) 143 | 144 | mapped = impl.is_mapped(vbn) 145 | 146 | pre = mapped 147 | pre = And(pre, ULT(off, InodeDisk.NDIRECT)) 148 | 149 | impl.begin_tx() 150 | impl.bunmap(vbn) 151 | impl.commit_tx() 152 | 153 | post = Not(impl.is_mapped(vbn)) 154 | 155 | pre = And(pre, *disk.assertion.assertions) 156 | 157 | self.prove(Implies(pre, post)) 158 | 159 | 160 | class IndirectInodeDiskRefinement(test.RefinementTest): 161 | def create_spec(self, mach): 162 | mappedarray = FreshDiskArray('mappedarray') 163 | attr_array = FreshDiskArray('spec_attr', domain=BitVecSort(32)) 164 | diskarray = FreshDiskArray('diskarray') 165 | return InodeSpec(mach, [mappedarray, attr_array, diskarray]) 166 | 167 | def create_impl(self, mach): 168 | freemaparray = FreshDiskArray('freemaparray') 169 | inodemetaarray = FreshDiskArray('inodemetaarray', domain=BitVecSort(32)) 170 | inodedataarray = FreshDiskArray('inodedataarray', domain=BitVecSort(32)) 171 | diskarray = FreshDiskArray('diskarray') 172 | txndisk = MultiTxnDisk(mach, [freemaparray, inodemetaarray, inodedataarray, diskarray]) 173 | inodedisk = InodeDisk(txndisk, Allocator64, BitmapSpec, InodePackSpec) 174 | return IndirectInodeDisk(inodedisk) 175 | 176 | def pre_post(self, spec, impl, fnargs, *args, **kwargs): 177 | pre_reverse_map = Function(fresh_name('reverse_map'), SizeSort, SizeSort) 178 | 179 | # bid = BitVec(fresh_name('bid'), 64) 180 | # ino = Extract(63, 32, bid) 181 | # off = Extract(31, 0, bid) 182 | ino = FreshBitVec('ino', 32) 183 | off = FreshBitVec('off', 32) 184 | bid = Concat(ino, off) 185 | 186 | pre = ForAll([ino, off], 187 | And( 188 | # Mapped blocks should be allocated 189 | Implies(impl.is_mapped(bid), Not(impl.is_free(impl.mappingi(bid)))), 190 | # Mapping should be injective 191 | Implies(impl.is_mapped(bid), pre_reverse_map(impl.mappingi(bid)) == bid), 192 | 193 | attr_eq(impl.get_iattr(ino), spec.get_iattr(ino)), 194 | 195 | # mappings should be the same 196 | Implies(off != InodeDisk.NDIRECT - 1, 197 | And( 198 | Implies(impl.is_mapped(bid), 199 | impl.read(impl.mappingi(bid)) == spec.read(spec.mappingi(bid))), 200 | 201 | # mapping should be the same 202 | spec.is_mapped(bid) == impl.is_mapped(bid))))) 203 | 204 | pre = And(pre, ULT(fnargs[0], InodeDisk.NDIRECT + IndirectInodeDisk.NINDIRECT)) 205 | # pre = And(pre, ULT(fnargs[0], InodeDisk.NDIRECT)) 206 | pre = And(pre, fnargs[0] != InodeDisk.NDIRECT - 1) 207 | 208 | if kwargs['fname'] == 'bmap' and kwargs['crash']: 209 | self.show(pre) 210 | elif kwargs['fname'] == 'bunmap': 211 | self.show(pre) 212 | 213 | (spec, impl, args, (_, lbn)) = yield pre 214 | 215 | lbn_indirect = impl.mappingi(Concat(ino, BitVecVal(InodeDisk.NDIRECT - 1, 32))) 216 | 217 | if len(args) == 1 and lbn is not None: 218 | post_reverse_map = lambda x: If(x == lbn, 219 | args[0], 220 | If(x == lbn_indirect, 221 | Concat(ino, BitVecVal(InodeDisk.NDIRECT - 1, 32)), 222 | pre_reverse_map(x))) 223 | else: 224 | post_reverse_map = pre_reverse_map 225 | 226 | post = ForAll([ino, off], 227 | And( 228 | # Mapped blocks should be allocated 229 | Implies(impl.is_mapped(bid), Not(impl.is_free(impl.mappingi(bid)))), 230 | # # Mapping should be injective 231 | Implies(impl.is_mapped(bid), post_reverse_map(impl.mappingi(bid)) == bid), 232 | 233 | attr_eq(impl.get_iattr(ino), spec.get_iattr(ino)), 234 | 235 | # mappings should be the same 236 | Implies(off != InodeDisk.NDIRECT - 1, 237 | And( 238 | Implies(impl.is_mapped(bid), 239 | impl.read(impl.mappingi(bid)) == spec.read(spec.mappingi(bid))), 240 | 241 | # mapping should be the same 242 | spec.is_mapped(bid) == impl.is_mapped(bid) 243 | )))) 244 | 245 | yield post 246 | 247 | @test.z3_option(AUTO_CONFIG=False) 248 | def match_set_iattr(self): 249 | ino = BitVec(fresh_name('ino'), 32) 250 | attr = FreshAttr() 251 | yield (ino, attr) 252 | 253 | @test.z3_option(AUTO_CONFIG=False) 254 | def _create_bid(self): 255 | # bid = BitVec(fresh_name('ino'), 64) 256 | ino = FreshBitVec('ino', 32) 257 | off = FreshBitVec('off', 32) 258 | bid = Concat(ino, off) 259 | yield (bid,) 260 | 261 | match_bunmap = _create_bid 262 | # match_bunmap.debug = True 263 | match_bmap = _create_bid 264 | # match_bmap.debug = True 265 | 266 | 267 | if __name__ == '__main__': 268 | test.main() 269 | -------------------------------------------------------------------------------- /verify.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | import time 4 | 5 | files = [ 6 | ('test_waldisk.py ', 'WAL Layer'), 7 | ('test_xv6inode.py', 'Inode layer'), 8 | ('test_dirspec.py', 'Directory layer'), 9 | ('test_bitmap.py', 'Bitmap disk refinement'), 10 | ('test_inodepack.py', 'Inode disk refinement'), 11 | ('test_partition.py', 'Multi disk partition refinement'), 12 | ] 13 | 14 | n = time.time() 15 | 16 | for i, pt in files: 17 | sys.stdout.write('Verifying %s.' % pt) 18 | sys.stdout.flush() 19 | outp = "" 20 | lastp = time.time() 21 | w = subprocess.Popen('python2 %s' % i, shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE) 22 | np = 0 23 | pn = time.time() 24 | while True: 25 | out = w.stderr.read(1) 26 | outp += out 27 | if not out: 28 | t = time.time() - pn 29 | sys.stdout.write("%s%f seconds\n" % ('.' * (50 - np - len(pt) - len(str(int(t)))), t)) 30 | w.wait() 31 | if w.returncode != 0: 32 | print 33 | print 'Failure.' 34 | print outp 35 | sys.exit(1) 36 | break 37 | if out == '.': 38 | if time.time() - lastp > 1: 39 | np += 1 40 | sys.stdout.write(out) 41 | sys.stdout.flush() 42 | lastp = time.time() 43 | 44 | 45 | print 46 | print 'Success. Verified Yxv6 in %fs' % (time.time() - n) 47 | -------------------------------------------------------------------------------- /waldisk.pxd: -------------------------------------------------------------------------------- 1 | from diskimpl cimport * 2 | 3 | 4 | cdef class WALDisk: 5 | cdef readonly uint64_t LOG_BID_HEADER_BLOCK 6 | cdef readonly uint64_t LOG_DEV_HEADER_BLOCK 7 | cdef readonly uint64_t LOG_HEADER_BLOCK 8 | cdef readonly uint64_t PER_BLOCK 9 | 10 | cdef bint _osync 11 | cdef PartitionAsyncDisk _logdisk 12 | cdef list _datadisks 13 | cdef list _txn 14 | cdef Dict _cache 15 | 16 | cpdef void begin_tx(self) 17 | cpdef void write_tx(self, uint64_t dev, uint64_t bid, Block data) 18 | cpdef void flush(self) 19 | cpdef void commit_tx(self, bint force=*) 20 | cpdef void writev(self, list) 21 | cpdef void __commit(self) 22 | cpdef Block read(self, uint64_t dev, uint64_t bid) 23 | -------------------------------------------------------------------------------- /waldisk.py: -------------------------------------------------------------------------------- 1 | import cython 2 | if not cython.compiled: 3 | from disk import * 4 | 5 | __all__ = ['WALDisk'] 6 | 7 | 8 | # This class implements TxnDisk using write-head logging. 9 | # commit() is both atomic and persistent. 10 | class WALDisk(object): 11 | LOG_MAX_ENTRIES = 10 12 | 13 | def __init__(self, logdisk, datadisks, osync=True): 14 | self.LOG_BID_HEADER_BLOCK = 0 15 | self.LOG_DEV_HEADER_BLOCK = 2 16 | self.LOG_HEADER_BLOCK = 3 17 | 18 | # Number of pointers in the first header blocks 19 | self.PER_BLOCK = 511 20 | 21 | self._osync = osync 22 | self._logdisk = logdisk 23 | self._datadisks = datadisks[:] 24 | self.__recover() 25 | self._txn = None 26 | self._cache = Dict() 27 | 28 | def begin_tx(self): 29 | if not self._osync and self._txn is not None: 30 | return 31 | 32 | assert self._txn is None 33 | 34 | self._txn = [] 35 | self._cache = Dict() 36 | 37 | def write_tx(self, dev, bid, data): 38 | self._txn.append((dev, bid, data)) 39 | self._logdisk.write(self.LOG_HEADER_BLOCK + len(self._txn), data) 40 | self._cache[(dev, bid)] = data 41 | 42 | def write(self, dev, bid, data): 43 | self._datadisks[dev].write(bid, data) 44 | 45 | def flush(self): 46 | self.commit_tx(True) 47 | 48 | def commit_tx(self, force=False): 49 | if self._txn is None: 50 | return 51 | 52 | if not self._osync and not force and len(self._txn) <= self.LOG_MAX_ENTRIES - 10: 53 | return 54 | 55 | assert len(self._txn) <= self.LOG_MAX_ENTRIES, "txn size larger than log" 56 | 57 | txn = self._txn 58 | 59 | self.writev(txn) 60 | self._txn = None 61 | 62 | # pre: log header empty 63 | # len(iov) <= LOG_MAX_ENTRIES 64 | @cython.locals(iov_len='uint64_t') 65 | @cython.locals(hdr_bid='Block') 66 | @cython.locals(hdr_dev='Block') 67 | @cython.locals(dev='uint64_t') 68 | @cython.locals(bid='uint64_t') 69 | @cython.locals(block='Block') 70 | @cython.locals(i='uint64_t') 71 | @cython.locals(dd='PartitionAsyncDisk') 72 | def writev(self, iov): 73 | iov_len = len(iov) 74 | 75 | if iov_len == 0: 76 | return 77 | if iov_len == 1: 78 | dev, bid, data = iov[0] 79 | dd = self._datadisks[dev] 80 | dd.write(bid, data) 81 | # self._datadisks[dev].flush() 82 | return 83 | 84 | # write log data & build up the header 85 | 86 | hdr_bid1 = ConstBlock(0) 87 | hdr_dev1 = ConstBlock(0) 88 | hdr_bid2 = ConstBlock(0) 89 | hdr_dev2 = ConstBlock(0) 90 | 91 | hdr_bid1[0] = iov_len 92 | 93 | for i in range(iov_len): 94 | (dev, bid, data) = iov[i] 95 | if not self._txn: 96 | self._logdisk.write(self.LOG_HEADER_BLOCK + 1 + i, data) 97 | 98 | if i < self.PER_BLOCK: 99 | hdr_bid1.set(i + 1, bid) 100 | hdr_dev1.set(i + 1, dev) 101 | else: 102 | hdr_bid2.set(i - self.PER_BLOCK, bid) 103 | hdr_dev2.set(i - self.PER_BLOCK, dev) 104 | 105 | self._logdisk.write(self.LOG_DEV_HEADER_BLOCK, hdr_dev1) 106 | self._logdisk.write(self.LOG_DEV_HEADER_BLOCK + 1, hdr_dev2) 107 | self._logdisk.write(self.LOG_BID_HEADER_BLOCK + 1, hdr_bid2) 108 | 109 | # make ensure log data reach disk 110 | self._logdisk.flush() 111 | # write & flush log header 112 | self._logdisk.write(self.LOG_BID_HEADER_BLOCK, hdr_bid1) 113 | self._logdisk.flush() 114 | 115 | # apply log to data disk 116 | for i in range(iov_len): 117 | dev, bid, data = iov[i] 118 | # for k in range(len(self._datadisks)): 119 | # self._datadisks[dev].write(bid, data, And(dev == k)) 120 | self._datadisks[dev].write(bid, data) 121 | self.__commit() 122 | 123 | @cython.locals(hdr='Block') 124 | def __commit(self): 125 | # make sure data reach disk 126 | for k in range(len(self._datadisks)): 127 | self._datadisks[k].flush() 128 | # delete log 129 | hdr = ConstBlock(0) 130 | self._logdisk.write(self.LOG_BID_HEADER_BLOCK, hdr) 131 | self._logdisk.flush() 132 | 133 | def __recover(self): 134 | hdr_bid1 = self._logdisk.read(self.LOG_BID_HEADER_BLOCK) 135 | hdr_dev1 = self._logdisk.read(self.LOG_DEV_HEADER_BLOCK) 136 | 137 | hdr_bid2 = self._logdisk.read(self.LOG_BID_HEADER_BLOCK + 1) 138 | hdr_dev2 = self._logdisk.read(self.LOG_DEV_HEADER_BLOCK + 1) 139 | 140 | n = hdr_bid1[0] 141 | # n is symbolic; instead of looping over n, loop over a constant 142 | for i in range(self.LOG_MAX_ENTRIES): 143 | if i < self.PER_BLOCK: 144 | dev = hdr_dev1[1 + i] 145 | bid = hdr_bid1[1 + i] 146 | else: 147 | dev = hdr_dev2[i - self.PER_BLOCK] 148 | bid = hdr_bid2[i - self.PER_BLOCK] 149 | 150 | data = self._logdisk.read(self.LOG_HEADER_BLOCK + i + 1) 151 | for k in range(len(self._datadisks)): 152 | self._datadisks[k].write(bid, data, And(dev == k, ULT(i, n))) 153 | self.__commit() 154 | 155 | @cython.locals(rdata='Block') 156 | def read(self, dev, bid): 157 | rdata = self._datadisks[dev].read(bid) 158 | return self._cache.get((dev, bid), rdata) 159 | 160 | def _read(self, dev, bid): 161 | return self.read(dev, bid) 162 | 163 | def crash(self, mach): 164 | return self.__class__(self._logdisk.crash(mach), 165 | map(lambda x: x.crash(mach), self._datadisks)) 166 | -------------------------------------------------------------------------------- /xv6inode.pxd: -------------------------------------------------------------------------------- 1 | from diskimpl cimport * 2 | from bitmap cimport BitmapDisk 3 | from waldisk cimport WALDisk 4 | from inodepack cimport InodePackDisk 5 | from dirinode cimport DirImpl 6 | 7 | cdef class InodeDisk: 8 | cdef public uint64_t _INODEDATADISK 9 | 10 | cdef readonly uint64_t _NDIRECT 11 | 12 | cdef WALDisk _txndisk 13 | cdef object _Bitmap 14 | cdef object _Allocator 15 | cdef object _Inode 16 | cdef Allocator _allocator 17 | cdef readonly BitmapDisk _bitmap 18 | cdef InodePackDisk _inode 19 | 20 | cdef void begin_tx(self) 21 | cdef void commit_tx(self) 22 | 23 | cdef Stat get_iattr(self, uint64_t ino) 24 | cdef void set_iattr(self, uint64_t ino, Stat attr) 25 | 26 | cdef Block read(self, uint64_t lbn) 27 | cdef void write_tx(self, uint64_t lbn, Block data) 28 | 29 | cdef uint64_t mappingi(self, uint64_t vbn) 30 | cdef bint is_mapped(self, uint64_t vbn) 31 | cdef bint is_free(self, uint64_t vbn) 32 | cdef uint64_t alloc(self) 33 | cdef void free(self, uint64_t lbn) 34 | cdef uint64_t bmap(self, uint64_t vbn) 35 | cdef void bunmap(self, uint64_t vbn) 36 | 37 | cdef class IndirectInodeDisk: 38 | cdef readonly uint64_t _NINDIRECT 39 | 40 | cdef readonly InodeDisk _idisk 41 | 42 | cdef void begin_tx(self) 43 | cdef void commit_tx(self) 44 | 45 | cdef Stat get_iattr(self, uint64_t ino) 46 | cdef void set_iattr(self, uint64_t ino, Stat attr) 47 | 48 | cdef Block read(self, uint64_t lbn) 49 | cdef void write_tx(self, uint64_t lbn, Block data) 50 | 51 | cdef uint64_t mappingi(self, uint64_t vbn) 52 | cdef bint is_mapped(self, uint64_t vbn) 53 | cdef bint is_free(self, uint64_t vbn) 54 | cdef uint64_t bmap(self, uint64_t vbn) 55 | cdef void bunmap(self, uint64_t vbn) 56 | -------------------------------------------------------------------------------- /xv6inode.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import sys 3 | import time 4 | import argparse 5 | from collections import namedtuple 6 | from stat import S_IFDIR 7 | 8 | import cython 9 | if not cython.compiled: 10 | from diskimpl import Allocator, DentryLookup 11 | from waldisk import WALDisk 12 | from disk import * 13 | from bitmap import BitmapDisk 14 | from inodepack import InodePackDisk 15 | from dirinode import DirImpl 16 | 17 | 18 | __all__ = ['InodeDisk', 'IndirectInodeDisk'] 19 | 20 | 21 | Disk = namedtuple('Disk', ['read', 'write']) 22 | 23 | 24 | class InodeDisk(object): 25 | FREEDISK = 0 26 | INODEMETADISK = 1 27 | INODEDATADISK = 2 28 | DATADISK = 3 29 | 30 | # Number of direct blocks 31 | NDIRECT = 11 32 | 33 | def __init__(self, txndisk, Allocator, Bitmap, Inode): 34 | self._INODEDATADISK = InodeDisk.INODEDATADISK 35 | self._NDIRECT = InodeDisk.NDIRECT 36 | 37 | self._txndisk = txndisk 38 | 39 | self._Bitmap = Bitmap 40 | self._Allocator = Allocator 41 | self._Inode = Inode 42 | 43 | self._allocator = Allocator( 44 | lambda n: self._txndisk.read(self.FREEDISK, n), 45 | 0, 1024) 46 | 47 | freedisk = Disk(write=lambda bid, data: self._txndisk.write_tx(self.FREEDISK, bid, data), 48 | read=lambda bid: self._txndisk.read(self.FREEDISK, bid)) 49 | 50 | inodemeta = Disk(write=lambda bid, data: self._txndisk.write_tx(self.INODEMETADISK, bid, data), 51 | read=lambda bid: self._txndisk.read(self.INODEMETADISK, bid)) 52 | 53 | inodedata = Disk(write=lambda bid, data: self._txndisk.write_tx(self._INODEDATADISK, bid, data), 54 | read=lambda bid: self._txndisk.read(self._INODEDATADISK, bid)) 55 | 56 | self._bitmap = Bitmap(freedisk) 57 | self._inode = Inode(inodemeta, inodedata) 58 | 59 | def set_iattr(self, ino, attr): 60 | self._inode.set_iattr(ino, attr) 61 | 62 | def get_iattr(self, ino): 63 | return self._inode.get_iattr(ino) 64 | 65 | def begin_tx(self): 66 | self._txndisk.begin_tx() 67 | 68 | def commit_tx(self): 69 | self._txndisk.commit_tx() 70 | 71 | def read(self, lbn): 72 | return self._txndisk.read(self.DATADISK, lbn) 73 | 74 | def write_tx(self, lbn, data): 75 | self._txndisk.write_tx(self.DATADISK, lbn, data) 76 | 77 | def write(self, lbn, data): 78 | self._txndisk.write_tx(self.DATADISK, lbn, data) 79 | 80 | @cython.locals(ino='uint64_t') 81 | @cython.locals(off='uint64_t') 82 | @cython.locals(eoff='uint64_t') 83 | def mappingi(self, vbn): 84 | ino = Extract(64 - 1, 32, vbn) 85 | # ino = cython.type('uint64_t') 86 | # assertion ino.size() / 8 == sizeof(uint64_t) 87 | off = Extract(32 - 1, 0, vbn) 88 | eoff = Extract(9 - 1, 0, vbn) 89 | return If(ULT(off, self._NDIRECT), 90 | self._inode.get_mapping(ino, eoff), 0) 91 | 92 | def is_mapped(self, vbn): 93 | return self.mappingi(vbn) != 0 94 | 95 | def is_free(self, lbn): 96 | return Not(self._bitmap.is_set(lbn)) 97 | 98 | @cython.locals(lbn='uint64_t') 99 | def alloc(self): 100 | # black box allocator returns a vbn 101 | lbn = self._allocator.alloc() 102 | # Validation 103 | assertion(lbn != 0, "inode.alloc: lbn is 0") 104 | assertion(self.is_free(lbn), "inode alloc: lbn not free") 105 | self._bitmap.set_bit(lbn) 106 | return lbn 107 | 108 | def free(self, lbn): 109 | self._bitmap.unset_bit(lbn) 110 | 111 | @cython.locals(ino='uint64_t') 112 | @cython.locals(off='uint64_t') 113 | @cython.locals(eoff='uint64_t') 114 | @cython.locals(iblock='Block') 115 | @cython.locals(old_lbn='uint64_t') 116 | @cython.locals(valid='bint') 117 | @cython.locals(lbn='uint64_t') 118 | def bmap(self, vbn): 119 | ino = Extract(64 - 1, 32, vbn) 120 | off = Extract(32 - 1, 0, vbn) 121 | eoff = Extract(9 - 1, 0, vbn) 122 | 123 | iblock = self._inode.read(ino) 124 | 125 | old_lbn = self._inode.get_mapping(ino, eoff, block=iblock) 126 | 127 | valid = And(old_lbn == 0, ULT(off, self._NDIRECT)) 128 | 129 | if valid: 130 | lbn = self.alloc() 131 | 132 | # Data write 133 | self.write_tx(lbn, ConstBlock(0)) 134 | 135 | self._inode.set_mapping(ino, eoff, lbn, block=iblock) 136 | 137 | return lbn 138 | 139 | if ULT(off, self._NDIRECT): 140 | return old_lbn 141 | return 0 142 | 143 | @cython.locals(ino='uint64_t') 144 | @cython.locals(off='uint64_t') 145 | @cython.locals(eoff='uint64_t') 146 | @cython.locals(iblock='Block') 147 | @cython.locals(lbn='uint64_t') 148 | def bunmap(self, vbn): 149 | ino = Extract(64 - 1, 32, vbn) 150 | off = Extract(32 - 1, 0, vbn) 151 | eoff = Extract(9 - 1, 0, vbn) 152 | 153 | if Not(ULT(off, self._NDIRECT)): 154 | return 155 | 156 | iblock = self._inode.read(ino) 157 | 158 | lbn = self._inode.get_mapping(ino, eoff, block=iblock) 159 | 160 | if lbn != 0: 161 | self.free(lbn) 162 | self._inode.set_mapping(ino, eoff, 0, block=iblock) 163 | 164 | def crash(self, mach): 165 | return self.__class__(self._txndisk.crash(mach), 166 | self._Allocator, 167 | self._Bitmap, 168 | self._Inode) 169 | 170 | def mkfs(self): 171 | self._bitmap.mkfs() 172 | self._inode.mkfs() 173 | 174 | class IndirectInodeDisk(object): 175 | NINDIRECT = 512 176 | 177 | def __init__(self, idisk): 178 | self._NINDIRECT = IndirectInodeDisk.NINDIRECT 179 | self._idisk = idisk 180 | 181 | def get_iattr(self, ino): 182 | return self._idisk.get_iattr(ino) 183 | 184 | def set_iattr(self, ino, attr): 185 | self._idisk.set_iattr(ino, attr) 186 | 187 | def read(self, lbn): 188 | return self._idisk.read(lbn) 189 | 190 | def write_tx(self, lbn, data): 191 | self._idisk.write_tx(lbn, data) 192 | 193 | def write(self, lbn, data): 194 | self._idisk.write_tx(lbn, data) 195 | 196 | def begin_tx(self): 197 | self._idisk.begin_tx() 198 | 199 | def commit_tx(self): 200 | self._idisk.commit_tx() 201 | 202 | @cython.locals(ndir='uint64_t') 203 | @cython.locals(ino='uint64_t') 204 | @cython.locals(off='uint64_t') 205 | @cython.locals(is_direct='bint') 206 | @cython.locals(off='uint64_t') 207 | @cython.locals(off='uint64_t') 208 | @cython.locals(vbnm='uint64_t') 209 | @cython.locals(ind_mapped='bint') 210 | @cython.locals(ind_mapping='uint64_t') 211 | @cython.locals(ind_block='Block') 212 | def mappingi(self, vbn): 213 | ndir = self._idisk._NDIRECT 214 | 215 | ino = Extract(64 - 1, 32, vbn) 216 | off = Extract(32 - 1, 0, vbn) 217 | 218 | is_direct = ULT(off, ndir) 219 | 220 | off = USub(off, ndir) 221 | 222 | vbnm = Concat32(ino, BitVecVal(ndir - 1, 32)) 223 | 224 | ind_mapped = self._idisk.is_mapped(vbnm) 225 | ind_mapping = self._idisk.mappingi(vbnm) 226 | ind_block = self._idisk.read(ind_mapping) 227 | 228 | return If(is_direct, self._idisk.mappingi(vbn), If(And(ULT(off, self._NINDIRECT), ind_mapped), ind_block.get(Extract(8, 0, off)), 0)) 229 | 230 | def is_mapped(self, vbn): 231 | return self.mappingi(vbn) != 0 232 | 233 | def is_free(self, lbn): 234 | return self._idisk.is_free(lbn) 235 | 236 | @cython.locals(ino='uint64_t') 237 | @cython.locals(off='uint64_t') 238 | @cython.locals(eoff='uint64_t') 239 | @cython.locals(mapping='uint64_t') 240 | @cython.locals(imap='Block') 241 | @cython.locals(old_lbn='uint64_t') 242 | @cython.locals(lbn='uint64_t') 243 | def bmap(self, vbn): 244 | ino = Extract(64 - 1, 32, vbn) 245 | off = Extract(32 - 1, 0, vbn) 246 | eoff = Extract(9 - 1, 0, USub(off, self._idisk._NDIRECT)) 247 | 248 | # Off is direct 249 | if ULT(off, self._idisk._NDIRECT): 250 | return self._idisk.bmap(vbn) 251 | 252 | # Off is not within bounds 253 | if Not(ULT(off, self._idisk._NDIRECT + self._NINDIRECT)): 254 | return 0 255 | 256 | mapping = self._idisk.bmap( 257 | Concat32(ino, BitVecVal(self._idisk._NDIRECT - 1, 32))) 258 | 259 | imap = self._idisk.read(mapping) 260 | 261 | old_lbn = imap[eoff] 262 | 263 | # Off is not-mapped 264 | if old_lbn == 0: 265 | 266 | lbn = self._idisk.alloc() 267 | 268 | self.write_tx(lbn, ConstBlock(0)) 269 | 270 | imap[eoff] = lbn 271 | self.write_tx(mapping, imap) 272 | 273 | return lbn 274 | return old_lbn 275 | 276 | 277 | @cython.locals(ino='uint64_t') 278 | @cython.locals(off='uint64_t') 279 | @cython.locals(eoff='uint64_t') 280 | @cython.locals(mapping='uint64_t') 281 | @cython.locals(imap='Block') 282 | @cython.locals(lbn='uint64_t') 283 | def bunmap(self, vbn): 284 | ino = Extract(64 - 1, 32, vbn) 285 | off = Extract(32 - 1, 0, vbn) 286 | eoff = Extract(9 - 1, 0, USub(vbn, self._idisk._NDIRECT)) 287 | 288 | # Off is out of bounds 289 | if Not(ULT(off, self._idisk._NDIRECT + self._NINDIRECT)): 290 | return 291 | 292 | # Off is direct 293 | if ULT(off, self._idisk._NDIRECT): 294 | self._idisk.bunmap(vbn) 295 | return 296 | 297 | mapping = self._idisk.mappingi( 298 | Concat32(ino, BitVecVal(self._idisk._NDIRECT - 1, 32))) 299 | 300 | imap = self._idisk.read(mapping) 301 | 302 | # Off is not mapped 303 | if Or(mapping == 0, imap[eoff] == 0): 304 | return 305 | 306 | lbn = imap[eoff] 307 | 308 | imap[eoff] = 0 309 | 310 | self._idisk.free(lbn) 311 | self.write_tx(mapping, imap) 312 | 313 | 314 | def crash(self, mach): 315 | return self.__class__(self._idisk.crash(mach)) 316 | 317 | 318 | 319 | @cython.locals(fdisk='DirImpl') 320 | @cython.locals(inode='IndirectInodeDisk') 321 | @cython.locals(root_attr='Stat') 322 | def mkfs(fdisk): 323 | inode = fdisk._inode 324 | 325 | inode.begin_tx() 326 | 327 | attr = inode.get_iattr(1) 328 | 329 | if attr.nlink == 0: 330 | # Initialize the root directory 331 | attr.mode = 0755 | S_IFDIR 332 | attr.mtime = int(time.time()) 333 | attr.nlink = 2 334 | 335 | inode.set_iattr(1, attr) 336 | 337 | # Reserve datablock 0 338 | inode._idisk._bitmap.set_bit(0) 339 | 340 | # Reserve inodes 0 and 1 341 | fdisk._ibitmap.set_bit(0) 342 | fdisk._ibitmap.set_bit(1) 343 | 344 | fdisk._txndisk.commit_tx(True) 345 | 346 | 347 | _curr = 0 348 | def create_partition(disk, size, debug=False): 349 | global _curr 350 | start = _curr 351 | _curr += size 352 | end = start + size 353 | print "Creating partition:", start, end 354 | return PartitionAsyncDisk(disk, start, end, debug) 355 | 356 | 357 | def create_fuse_inode(args): 358 | parser = argparse.ArgumentParser(description='Yxv6') 359 | 360 | parser.add_argument('imgpath', metavar='PATH', type=str, help='Path to fs image') 361 | parser.add_argument('--isize', metavar='NUM', default=1, type=int, help='Number of inode blocks (multiple of 32k)') 362 | parser.add_argument('--dsize', metavar='NUM', default=1, type=int, help='Number data blocks (multiple of 32k)') 363 | parser.add_argument('--sync', help='Synchronous mode', action='store_true') 364 | parser.set_defaults(sync=False) 365 | 366 | args = parser.parse_args(args) 367 | 368 | disk = AsyncDisk(args.imgpath) 369 | 370 | isize = args.isize 371 | dsize = args.dsize 372 | 373 | print ">>", args.imgpath 374 | print ">> number of inodes = %d" % (2 ** 15 * isize) 375 | print ">> fs size = %d MB" % (2 ** 15 * dsize * 4096 / 2 ** 20) 376 | print ">> sync = %r" % args.sync 377 | 378 | ifreedisk = create_partition(disk, isize) 379 | freedisk = create_partition(disk, dsize) 380 | inodedisk = create_partition(disk, 2 ** 15 * isize / 32) 381 | datadisk = create_partition(disk, 2 ** 15 * dsize) 382 | logdisk = create_partition(disk, 1027) 383 | orphandisk = create_partition(disk, 1) 384 | 385 | txndisk = WALDisk(logdisk, [freedisk, inodedisk, inodedisk, datadisk, ifreedisk, orphandisk], osync=args.sync) 386 | idisk = InodeDisk(txndisk, Allocator, BitmapDisk, InodePackDisk) 387 | idisk._INODEDATADISK = idisk.INODEMETADISK 388 | idisk = IndirectInodeDisk(idisk) 389 | 390 | fdisk = DirImpl(txndisk, idisk, Allocator, BitmapDisk, DentryLookup) 391 | mkfs(fdisk) 392 | 393 | return fdisk 394 | -------------------------------------------------------------------------------- /yav_dirimpl_fuse.pyx: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import traceback 4 | from xv6inode import create_fuse_inode 5 | 6 | from llfuse cimport * 7 | from diskimpl cimport Block, Stat, Concat32 8 | from dirinode cimport DirImpl 9 | 10 | from libc.stdint cimport uint64_t, int64_t 11 | from libc.stdlib cimport malloc, calloc, free, abort, realloc 12 | from libc.string cimport memcpy, memset, strlen, strcmp 13 | from posix.fcntl cimport S_IFREG, S_IFDIR, S_IFLNK 14 | from posix.types cimport ino_t 15 | 16 | from libc.errno cimport ENOENT, ENOTDIR, EISDIR, EACCES, ENOMEM, ENAMETOOLONG, EFBIG 17 | 18 | 19 | cdef DirImpl inode_obj = None 20 | 21 | 22 | cdef uint64_t NUM_BLOCKS = 522 23 | cdef uint64_t MAX_NAME_LENGTH = 120 24 | 25 | 26 | cdef void update_block(Block b, size_t off, const char *buf, size_t size) nogil: 27 | cdef char *bbuf = b.buf 28 | 29 | off = off % 4096 30 | 31 | cdef size_t i = 0 32 | for i in range(size): 33 | bbuf[off + i] = buf[i] 34 | 35 | 36 | ################# 37 | 38 | cdef void gc(): 39 | for oidx in range(inode_obj._orphans.size()): 40 | ino = inode_obj._orphans.index(oidx) 41 | for i in range(inode_obj.get_iattr(ino).bsize + 1, -1, -1): 42 | # print 'gc-ing orphan', i, oidx, ino, inode_obj.get_iattr(ino).bsize 43 | inode_obj.gc1(oidx, i) 44 | inode_obj.gc2(oidx) 45 | inode_obj.gc3() 46 | 47 | 48 | cdef int mkstat(fuse_ino_t ino, struct_stat *stbuf): 49 | cdef Stat attr = inode_obj.get_iattr(ino) 50 | # print 'mkstat ino={}, attr={}'.format(ino, attr) 51 | 52 | if attr.mode == 0: 53 | return -1 54 | 55 | stbuf.st_ino = ino 56 | if attr.mode & S_IFDIR != 0: 57 | stbuf.st_nlink = 2 58 | else: 59 | stbuf.st_nlink = 1 60 | stbuf.st_mode = attr.mode 61 | stbuf.st_size = attr.fsize 62 | if stbuf.st_size >= (10 * 4096): 63 | stbuf.st_size -= 4096 64 | stbuf.st_mtime = attr.mtime 65 | 66 | return 0 67 | 68 | 69 | cdef void ll_getattr(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi): 70 | # print "getattr ino={}".format(ino) 71 | 72 | cdef struct_stat stbuf 73 | memset(&stbuf, 0, sizeof(stbuf)) 74 | if mkstat(ino, &stbuf) == -1: 75 | fuse_reply_err(req, ENOENT) 76 | else: 77 | fuse_reply_attr(req, &stbuf, 1.0) 78 | 79 | 80 | cdef void ll_setattr(fuse_req_t req, fuse_ino_t ino, struct_stat *attr, 81 | int to_set, fuse_file_info *fi): 82 | # print "setattr ino={}, st_mode={}, st_size={}, st_mtime={}, oldattr={}".format(ino, 83 | # attr[0].st_mode, 84 | # attr[0].st_size, 85 | # attr[0].st_mtime, 86 | # iattr) 87 | 88 | cdef Stat iattr = inode_obj.get_iattr(ino) 89 | cdef uint64_t off, startoff, endoff, oldsize, newsize 90 | 91 | if to_set & FUSE_SET_ATTR_SIZE: 92 | oldsize = iattr.fsize 93 | newsize = attr[0].st_size 94 | if newsize >= 4096 * 10: 95 | newsize += 4096 96 | if oldsize > newsize: 97 | inode_obj.truncate(ino, newsize) 98 | 99 | iattr = inode_obj.get_iattr(ino) 100 | 101 | if to_set & FUSE_SET_ATTR_MTIME: 102 | iattr.mtime = attr[0].st_mtime 103 | 104 | if to_set & FUSE_SET_ATTR_MODE: 105 | iattr.mode = attr[0].st_mode 106 | 107 | inode_obj.set_iattr(ino, iattr) 108 | 109 | cdef struct_stat stbuf 110 | memset(&stbuf, 0, sizeof(stbuf)) 111 | mkstat(ino, &stbuf) 112 | fuse_reply_attr(req, &stbuf, 1.0) 113 | 114 | 115 | cdef void ll_lookup(fuse_req_t req, fuse_ino_t parent, const char *name): 116 | # print "lookup parent={}, name={}".format(parent, name) 117 | 118 | cdef size_t namelen = strlen(name) 119 | 120 | if namelen > MAX_NAME_LENGTH: 121 | fuse_reply_err(req, ENOENT) 122 | return 123 | 124 | cdef fuse_entry_param e 125 | memset(&e, 0, sizeof(e)) 126 | 127 | cdef uint64_t[15] pname 128 | memset(&pname, 0, sizeof(uint64_t) * 15) 129 | memcpy(&pname, name, namelen) 130 | 131 | t = inode_obj.lookup(parent, pname) 132 | 133 | if t is None: 134 | fuse_reply_err(req, ENOENT) 135 | return 136 | e.ino = t 137 | e.attr_timeout = 1.0 138 | e.entry_timeout = 1.0 139 | mkstat(e.ino, &e.attr) 140 | fuse_reply_entry(req, &e) 141 | 142 | cdef struct dirbuf: 143 | char *p; 144 | size_t size 145 | 146 | 147 | cdef void dirbuf_add(fuse_req_t req, dirbuf *b, const char *name, fuse_ino_t ino): 148 | cdef struct_stat stbuf 149 | cdef size_t oldsize = b.size 150 | b.size += fuse_add_direntry(req, NULL, 0, name, NULL, 0) 151 | cdef char *newp = realloc(b.p, b.size) 152 | if newp == NULL: 153 | print "memory error, could not realloc buffer" 154 | abort() 155 | 156 | b.p = newp 157 | memset(&stbuf, 0, sizeof(stbuf)) 158 | stbuf.st_ino = ino 159 | fuse_add_direntry(req, b.p + oldsize, b.size - oldsize, name, &stbuf, b.size) 160 | 161 | 162 | cdef int reply_buf_limited(fuse_req_t req, const char *buf, size_t bufsize, off_t off, size_t maxsize): 163 | if off < bufsize: 164 | return fuse_reply_buf(req, buf + off, min(bufsize - off, maxsize)) 165 | else: 166 | fuse_reply_buf(req, NULL, 0) 167 | 168 | 169 | cdef void ll_readdir(fuse_req_t req, fuse_ino_t ino, 170 | size_t size, off_t off, fuse_file_info* fi): 171 | # print "readdir ino={} size={} off={}".format(ino, size, off) 172 | 173 | cdef dirbuf b 174 | cdef Block block 175 | cdef uint64_t i, ioff, boff, fileino 176 | cdef uint64_t[16] name 177 | cdef bint mapped 178 | memset(&name, 0, sizeof(uint64_t) * 16) 179 | 180 | boff = 0 181 | 182 | iattr = inode_obj.get_iattr(ino) 183 | 184 | if ino != 1 and iattr.mode & S_IFDIR == 0: 185 | fuse_reply_err(req, ENOTDIR) 186 | else: 187 | memset(&b, 0, sizeof(b)) 188 | dirbuf_add(req, &b, ".", ino) 189 | dirbuf_add(req, &b, "..", ino) 190 | 191 | for ioff in range(522): 192 | if ioff == 10: 193 | continue 194 | is_mapped = inode_obj._inode.is_mapped(Concat32(ino, ioff)) 195 | if not is_mapped: 196 | break 197 | block = inode_obj.read(ino, ioff) 198 | 199 | for i in range(0, 512, 16): 200 | fileino = block.get(i) 201 | if fileino != 0: 202 | memcpy(&name, (block.buf) + (i + 1) * 8, MAX_NAME_LENGTH) 203 | 204 | dirbuf_add(req, &b, &name, fileino) 205 | 206 | if b.size > size + off: 207 | break 208 | 209 | reply_buf_limited(req, b.p, b.size, off, size) 210 | free(b.p) 211 | 212 | 213 | cdef void ll_open(fuse_req_t req, fuse_ino_t ino, fuse_file_info *fi): 214 | iattr = inode_obj.get_iattr(ino) 215 | # print "open ino={} attr={}".format(ino, iattr) 216 | 217 | if iattr.mode & S_IFDIR != 0: 218 | fuse_reply_err(req, EISDIR) 219 | # TODO: Check permission? 220 | # else ...: 221 | # fuse_reply_err(req, EACCES) 222 | else: 223 | fuse_reply_open(req, fi) 224 | 225 | 226 | cdef void ll_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, fuse_file_info *fi): 227 | print 'read ino={}, size={}, off={}'.format(ino, size, off) 228 | 229 | cdef Stat iattr = inode_obj.get_iattr(ino) 230 | cdef uint64_t bufsize, fsize = iattr.fsize 231 | if fsize / 4096 == off / 4096: 232 | bufsize = fsize % 4096 233 | else: 234 | bufsize = 4096 235 | 236 | bufsize = min(bufsize, size) 237 | 238 | cdef uint64_t boff = off / 4096 239 | if boff >= 10: 240 | boff += 1 241 | 242 | cdef Block block = inode_obj.read(ino, boff) 243 | reply_buf_limited(req, block.buf, bufsize, 0, bufsize) 244 | 245 | 246 | cdef (uint64_t, uint64_t) mknod(fuse_ino_t parent, const char *name, mode_t mode, dev_t rdev): 247 | cdef size_t namelen = strlen(name) 248 | 249 | if namelen > MAX_NAME_LENGTH: 250 | return (0, ENAMETOOLONG) 251 | 252 | cdef uint64_t[15] pname 253 | memset(&pname, 0, sizeof(uint64_t) * 15) 254 | memcpy(&pname, name, namelen) 255 | 256 | return inode_obj.mknod(parent, pname, mode, int(time.time())) 257 | 258 | 259 | cdef void ll_mknod(fuse_req_t req, fuse_ino_t parent, const char *name, 260 | mode_t mode, dev_t rdev): 261 | # print 'mknod parent={}, name={}, mode={}, rdev={}'.format(parent, name, mode, rdev) 262 | 263 | v = mknod(parent, name, mode, rdev) 264 | cdef int64_t ino, err 265 | ino = v[0] 266 | err = v[1] 267 | 268 | if err != 0: 269 | fuse_reply_err(req, err) 270 | return 271 | 272 | cdef fuse_entry_param e 273 | memset(&e, 0, sizeof(e)) 274 | e.ino = ino 275 | e.attr_timeout = 1.0 276 | e.entry_timeout = 1.0 277 | mkstat(e.ino, &e.attr) 278 | fuse_reply_entry(req, &e) 279 | 280 | 281 | cdef void ll_rename(fuse_req_t req, fuse_ino_t oldparent, const char *oldname, 282 | fuse_ino_t newparent, const char *newname): 283 | # print 'rename oldparent={}, oldname={}, newparent={}, newname={}'.format( 284 | # oldparent, oldname, newparent, newname) 285 | 286 | cdef size_t oldnamelen = strlen(oldname) 287 | cdef size_t newnamelen = strlen(newname) 288 | 289 | if oldnamelen > MAX_NAME_LENGTH: 290 | fuse_reply_err(req, ENOENT) 291 | return 292 | 293 | if newnamelen > MAX_NAME_LENGTH: 294 | fuse_reply_err(req, ENAMETOOLONG) 295 | return 296 | 297 | cdef uint64_t[15] poldname 298 | cdef uint64_t[15] pnewname 299 | 300 | memset(&poldname, 0, sizeof(uint64_t) * 15) 301 | memcpy(&poldname, oldname, oldnamelen) 302 | 303 | memset(&pnewname, 0, sizeof(uint64_t) * 15) 304 | memcpy(&pnewname, newname, newnamelen) 305 | 306 | ret = inode_obj.rename(oldparent, poldname, newparent, pnewname) 307 | fuse_reply_err(req, ret) 308 | 309 | 310 | cdef void ll_write(fuse_req_t req, fuse_ino_t ino, const char *buf, 311 | size_t size, off_t off, fuse_file_info *fi): 312 | # print "write off={}, size={}".format(off, size) 313 | # print (off % 4096) + size 314 | 315 | cdef uint64_t boff = off / 4096; 316 | if boff >= 10: 317 | boff += 1 318 | 319 | cdef Block b = inode_obj.read(ino, boff) 320 | update_block(b, off, buf, size) 321 | 322 | if boff >= NUM_BLOCKS: 323 | fuse_reply_err(req, EFBIG) 324 | return 325 | 326 | cdef size_t written = inode_obj.write(ino, boff, b, off % 4096 + size) 327 | 328 | if written == 0: 329 | fuse_reply_write(req, 0) 330 | return 331 | else: 332 | fuse_reply_write(req, size) 333 | return 334 | 335 | 336 | cdef void ll_mkdir(fuse_req_t req, fuse_ino_t parent, 337 | const char *name, mode_t mode): 338 | # print 'mkdir parent={}, name={}, mode={}'.format(parent, name, parent) 339 | ll_mknod(req, parent, name, S_IFDIR | mode, 0) 340 | 341 | 342 | cdef void ll_create(fuse_req_t req, fuse_ino_t parent, 343 | const char *name, mode_t mode, fuse_file_info *fi): 344 | # print 'create parent={}, name={}, mode={}'.format(parent, name, parent) 345 | 346 | cdef (uint64_t, uint64_t) res = mknod(parent, name, mode, 0) 347 | 348 | if res[1] != 0: 349 | fuse_reply_err(req, res[1]) 350 | return 351 | 352 | cdef fuse_entry_param e 353 | memset(&e, 0, sizeof(e)) 354 | e.ino = res[0] 355 | e.attr_timeout = 1.0 356 | e.entry_timeout = 1.0 357 | mkstat(e.ino, &e.attr) 358 | fuse_reply_create(req, &e, fi) 359 | 360 | 361 | cdef void ll_unlink(fuse_req_t req, fuse_ino_t parent, const char *name): 362 | # print 'unlink parent={}, name={}'.format(parent, name) 363 | 364 | cdef size_t namelen = strlen(name) 365 | 366 | if namelen > MAX_NAME_LENGTH: 367 | fuse_reply_err(req, ENOENT) 368 | return 369 | 370 | cdef uint64_t[15] pname 371 | 372 | memset(&pname, 0, sizeof(uint64_t) * 15) 373 | memcpy(&pname, name, namelen) 374 | 375 | ret = inode_obj.unlink(parent, pname) 376 | 377 | fuse_reply_err(req, 0) 378 | 379 | 380 | cdef void ll_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name): 381 | # print 'rmdir parent={}, name={}'.format(parent, name) 382 | 383 | cdef size_t namelen = strlen(name) 384 | 385 | if namelen > MAX_NAME_LENGTH: 386 | fuse_reply_err(req, ENOENT) 387 | return 388 | 389 | cdef uint64_t[15] pname 390 | 391 | memset(&pname, 0, sizeof(uint64_t) * 15) 392 | memcpy(&pname, name, namelen) 393 | 394 | cdef (uint64_t, uint64_t) res = inode_obj.rmdir(parent, pname) 395 | fuse_reply_err(req, res[1]) 396 | 397 | cdef void ll_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup): 398 | print 'forget ino={}, nlookup={}'.format(ino, nlookup) 399 | 400 | inode_obj.forget(ino) 401 | gc() 402 | fuse_reply_none(req) 403 | 404 | 405 | cdef void ll_symlink(fuse_req_t req, const char *link, fuse_ino_t parent, 406 | const char *name): 407 | # print 'symlink link={}, parent={}, name={}'.format(link, parent, name) 408 | 409 | v = mknod(parent, name, 0777 | S_IFLNK, 0) 410 | 411 | cdef uint64_t ino = v[0] 412 | cdef uint64_t err = v[1] 413 | 414 | if err != 0: 415 | fuse_reply_err(req, err) 416 | return 417 | 418 | # inode_obj.bunmap(ino, 0) 419 | cdef Block b = inode_obj.read(ino, 0) 420 | update_block(b, 0, link, strlen(link)) 421 | inode_obj.write(ino, 0, b, strlen(link)) 422 | 423 | cdef fuse_entry_param e 424 | memset(&e, 0, sizeof(e)) 425 | e.ino = ino 426 | e.attr_timeout = 1.0 427 | e.entry_timeout = 1.0 428 | mkstat(e.ino, &e.attr) 429 | fuse_reply_entry(req, &e) 430 | 431 | 432 | cdef void ll_readlink(fuse_req_t req, fuse_ino_t ino): 433 | # print 'readlink ino={}'.format(ino) 434 | 435 | cdef Block b = inode_obj.read(ino, 0) 436 | fuse_reply_readlink(req, b.buf) 437 | 438 | cdef void ll_fsync(fuse_req_t req, fuse_ino_t ino, 439 | int datasync, fuse_file_info *fi): 440 | # print "fsync ino={} datasync={}".format(ino, datasync) 441 | inode_obj.fsync() 442 | fuse_reply_err(req, 0) 443 | 444 | cdef void ll_fsyncdir(fuse_req_t req, fuse_ino_t ino, 445 | int datasync, fuse_file_info *fi): 446 | # print "fsyncdir ino={} datasync={}".format(ino, datasync) 447 | inode_obj.fsync() 448 | fuse_reply_err(req, 0) 449 | 450 | 451 | def main(): 452 | global inode_obj 453 | 454 | if '--' in sys.argv: 455 | fargs = sys.argv[sys.argv.index('--') + 1:] 456 | sys.argv = sys.argv[:sys.argv.index('--')] 457 | else: 458 | fargs = [] 459 | 460 | inode_obj = create_fuse_inode(fargs) 461 | 462 | cdef int argc = len(sys.argv) 463 | cdef char** argv = malloc(argc * sizeof(char**)) 464 | if argv == NULL: 465 | print "Malloc: Memory error. Could not allocate" 466 | abort() 467 | 468 | for i in range(argc): 469 | argv[i] = sys.argv[i] 470 | 471 | cmain(argc, argv) 472 | 473 | 474 | cdef void ll_init(void *userdata, fuse_conn_info *conn): 475 | pass 476 | 477 | 478 | cdef int cmain(int argc, char **argv): 479 | cdef fuse_args args 480 | args.argc = argc 481 | args.argv = argv 482 | args.allocated = 0 483 | 484 | cdef fuse_chan *ch 485 | cdef char* mountpoint = NULL 486 | cdef int err = -1 487 | 488 | cdef fuse_session *se 489 | 490 | cdef fuse_lowlevel_ops ops 491 | memset(&ops, 0, sizeof(ops)) 492 | 493 | # ops.init = &ll_init 494 | ops.lookup = &ll_lookup 495 | ops.getattr = &ll_getattr 496 | ops.setattr = &ll_setattr 497 | ops.readdir = &ll_readdir 498 | ops.open = &ll_open 499 | ops.read = &ll_read 500 | ops.write = &ll_write 501 | ops.mknod = &ll_mknod 502 | ops.create = &ll_create 503 | ops.mkdir = &ll_mkdir 504 | ops.rename = &ll_rename 505 | ops.unlink = &ll_unlink 506 | ops.rmdir = &ll_rmdir 507 | ops.forget = &ll_forget 508 | 509 | ops.fsync = &ll_fsync 510 | ops.fsyncdir = &ll_fsyncdir 511 | 512 | ops.symlink = &ll_symlink 513 | ops.readlink = &ll_readlink 514 | 515 | if fuse_parse_cmdline(&args, &mountpoint, NULL, NULL) != -1: 516 | ch = fuse_mount(mountpoint, &args) 517 | if ch != NULL: 518 | se = fuse_lowlevel_new(&args, &ops, sizeof(ops), NULL) 519 | 520 | if se != NULL: 521 | if fuse_set_signal_handlers(se) != -1: 522 | fuse_session_add_chan(se, ch) 523 | err = fuse_session_loop(se) 524 | fuse_remove_signal_handlers(se) 525 | fuse_session_remove_chan(ch) 526 | fuse_session_destroy(se) 527 | fuse_unmount(mountpoint, ch) 528 | 529 | fuse_opt_free_args(&args) 530 | 531 | return err 532 | -------------------------------------------------------------------------------- /yav_xv6_main.py: -------------------------------------------------------------------------------- 1 | import pstats, cProfile 2 | import yav_dirimpl_fuse 3 | 4 | def run(): 5 | yav_dirimpl_fuse.main() 6 | 7 | def profile(): 8 | cProfile.runctx("yav_dirimpl_fuse.main()", globals(), locals(), "Profile.prof") 9 | s = pstats.Stats("Profile.prof") 10 | s.strip_dirs().sort_stats("time").print_stats() 11 | 12 | 13 | if __name__ == '__main__': 14 | run() 15 | # profile() 16 | 17 | -------------------------------------------------------------------------------- /yggdrasil/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uw-unsat/yggdrasil/e1de61d798acafb9ae2d8387d176c4b091773623/yggdrasil/__init__.py -------------------------------------------------------------------------------- /yggdrasil/diskspec.py: -------------------------------------------------------------------------------- 1 | from z3 import * 2 | from util import * 3 | from ufarray import * 4 | import traceback 5 | from collections import namedtuple 6 | 7 | 8 | # Abstract machine model: multiple disks will share the same 9 | # ordering constraints. It captures two types of constraints: 10 | # - a synced being true implies the current "on" being true; 11 | # - the current "on" being true implies that all the previous 12 | # "on"s being true. 13 | class Machine(object): 14 | def __init__(self): 15 | self._on = None 16 | self._control = [] 17 | self._ordering = [] 18 | self._stacks = {} 19 | 20 | self._ons = [] 21 | self._flushes = [] 22 | 23 | def __fresh_bool(self, name): 24 | b = Bool(fresh_name(name)) 25 | 26 | self._stacks[b.sexpr()] = traceback.extract_stack()[:-2] 27 | 28 | self._control.append(b) 29 | if self._on is not None: 30 | self._ordering.append(Implies(b, self._on)) 31 | return b 32 | 33 | # Create a sync bool variable, which is conditioned on "on". 34 | def create_synced(self): 35 | synced = self.__fresh_bool("synced") 36 | self._flushes.append(synced) 37 | return synced 38 | 39 | @property 40 | def on(self): 41 | if self._on: 42 | return self._on 43 | return BoolVal(True) 44 | 45 | # Create a new "on", which is conditioned on previous "on". 46 | # It also implies that pending synced's must be true. 47 | def create_on(self, syncs): 48 | self._on = self.__fresh_bool("on") 49 | self._ons.append(self._on) 50 | for s in syncs: 51 | self._ordering.append(Implies(self._on, s)) 52 | return self._on 53 | 54 | def add_control(self, c): 55 | self._control.append(c) 56 | 57 | @property 58 | def control(self): 59 | return self._control 60 | 61 | @property 62 | def assumption(self): 63 | if len(self._ordering) == 0: 64 | return True 65 | return And(*self._ordering) 66 | 67 | def explain(self, model): 68 | print "<<<<<<<<<<<<<<<<<<" 69 | 70 | print "# Outstanding writes" 71 | for i in self._flushes: 72 | if not model.evaluate(i).eq(BoolVal(True)): 73 | print ' '.join(map(str, self._stacks[i.sexpr()][-2])) 74 | 75 | for i in self._ons: 76 | if not model.evaluate(i).eq(BoolVal(True)): 77 | print "# Crash point" 78 | for i in self._stacks[i.sexpr()]: 79 | i = ' '.join(map(str, i)) 80 | if '/usr/lib/' in i: 81 | continue 82 | print i 83 | break 84 | print ">>>>>>>>>>>>>>>>>>" 85 | 86 | 87 | class AsyncDisk(object): 88 | def __init__(self, mach, disk, cache=None): 89 | self._mach = mach 90 | # cache is a partial evaluation of _disk without any ites 91 | # Not required for correctness 92 | self._disk = disk 93 | self._dirty = [] 94 | if cache is None: 95 | self._cache = disk 96 | self._docache = False 97 | else: 98 | self._cache = cache 99 | self._docache = True 100 | 101 | def _write(self, synced, bid, data, guard=BoolVal(True)): 102 | self._cache = self._cache.update(bid, data, guard) 103 | self._disk = self._disk.update(bid, data, synced, guard) 104 | self._dirty.append(synced) 105 | 106 | # cache is updated; disk is either updated or unchanged 107 | def write(self, bid, data, guard=BoolVal(True)): 108 | synced = self._mach.create_synced() 109 | self._write(synced, bid, data, guard) 110 | 111 | # flush is a full barrier 112 | def flush(self): 113 | on = self._mach.create_on(self._dirty) 114 | if self._docache: 115 | self._disk = If(on, self._disk, self._cache) 116 | self._dirty = [] 117 | 118 | def read(self, bid): 119 | return self._cache(bid) 120 | 121 | # create a new copy of non-deterministic disk state 122 | # note that this captures all the previous states 123 | def crash(self, mach): 124 | return self.__class__(mach, self._disk) 125 | 126 | def domain(self): 127 | return self._disk.domain() 128 | 129 | 130 | class VirtualAsyncDisk(AsyncDisk): 131 | def __init__(self, mach, mapped, disk): 132 | super(VirtualAsyncDisk, self).__init__(mach, disk) 133 | self._mapped = mapped 134 | self._mapped_cache = mapped 135 | 136 | def read(self, vbn): 137 | datablock = super(VirtualAsyncDisk, self).read(vbn) 138 | return If(self.is_mapped(vbn), datablock, ConstBlock(0)) 139 | 140 | def is_mapped(self, vbn): 141 | return self._mapped_cache(vbn)[0] == 1 142 | 143 | def bmap(self, vbn, val=1): 144 | on = self._mach.create_on([]) 145 | self._write(on, vbn, ConstBlock(0)) 146 | self._mapped = self._mapped.update(vbn, ConstBlock(val), on) 147 | self._mapped_cache = self._mapped_cache.update(vbn, ConstBlock(val)) 148 | 149 | def bunmap(self, bid): 150 | self.bmap(bid, 0) 151 | 152 | # create a new copy of non-deterministic disk state 153 | # note that this captures all the previous states 154 | def crash(self, mach): 155 | return self.__class__(mach, self._mapped, self._disk) 156 | 157 | 158 | # Transactional disk - writev is atomic and persistent (sequential). 159 | class TxnDisk(object): 160 | def __init__(self, mach, disk): 161 | self._mach = mach 162 | self._cache = disk 163 | self._disk = disk 164 | self._txn = None 165 | 166 | def begin_tx(self): 167 | assert self._txn is None 168 | self._txn = [] 169 | 170 | def write_tx(self, lba, data): 171 | # write_tx updates the cache 172 | self._cache = self._cache.update(lba, data) 173 | self._txn.append((lba, data)) 174 | 175 | def commit_tx(self): 176 | self.writev(self._txn) 177 | self._txn = None 178 | 179 | def writev(self, iov): 180 | on = self._mach.create_on([]) 181 | disk = self._disk 182 | for lba, data in iov: 183 | self._cache = self._cache.update(lba, data) 184 | self._disk = self._disk.update(lba, data, on) 185 | 186 | def read(self, bid): 187 | return self._cache(bid) 188 | 189 | def crash(self, mach): 190 | return self.__class__(mach, self._disk) 191 | 192 | def domain(self): 193 | return self._disk.domain() 194 | 195 | 196 | class MultiTxnDisk(object): 197 | def __init__(self, mach, disks): 198 | self._mach = mach 199 | self._caches = disks[::] 200 | self._disks = disks[::] 201 | self._txn = None 202 | 203 | def begin_tx(self): 204 | assert self._txn is None 205 | self._txn = [] 206 | 207 | def write_tx(self, dev, lba, data): 208 | self._caches[dev] = self._caches[dev].update(lba, data) 209 | self._txn.append((dev, lba, data)) 210 | 211 | def commit_tx(self): 212 | self.writev(self._txn) 213 | self._txn = None 214 | 215 | def writev(self, iov): 216 | on = self._mach.create_on([]) 217 | for dev, lba, data in iov: 218 | if isinstance(dev, int): 219 | self._caches[dev] = self._caches[dev].update(lba, data) 220 | self._disks[dev] = self._disks[dev].update(lba, data, on) 221 | else: 222 | for idev in range(len(self._disks)): 223 | self._caches[idev] = self._caches[idev].update(lba, data, dev==idev) 224 | self._disks[idev] = self._disks[idev].update(lba, data, on, dev==idev) 225 | 226 | def read(self, dev, bid): 227 | return self._caches[dev](bid) 228 | 229 | def crash(self, mach): 230 | return self.__class__(mach, self._disks) 231 | 232 | def domain(self, dev): 233 | return self._disks[dev].domain() 234 | 235 | def range(self, dev): 236 | return self._disks[dev].range() 237 | 238 | 239 | class Stat(object): 240 | def __init__(self, size, mtime, mode, nlink=0): 241 | self.size = size 242 | self.mtime = mtime 243 | self.mode = mode 244 | self.nlink = nlink 245 | 246 | @property 247 | def bsize(self): 248 | return Extract(63, 32, self.size) 249 | 250 | @property 251 | def fsize(self): 252 | return Extract(31, 0, self.size) 253 | 254 | def __str__(self): 255 | return "Stat(size=%s, mtime=%s, mode=%s, nlink=%s)" % (self.size, self.mtime, self.mode, self.nlink) 256 | 257 | def __getitem__(self, idx): 258 | if idx == 0: 259 | return self.size 260 | if idx == 1: 261 | return self.mtime 262 | if idx == 2: 263 | return self.mode 264 | if idx == 3: 265 | return self.nlink 266 | raise IndexError("") 267 | 268 | def __eq__(self, other): 269 | return And(self.size == other.size, 270 | self.mtime == other.mtime, 271 | self.mode == other.mode, 272 | self.nlink == other.nlink) 273 | 274 | 275 | class InodeSpec(MultiTxnDisk): 276 | MAPPED = 0 277 | ATTRS = 1 278 | DATA = 2 279 | 280 | def set_iattr(self, ino, attr): 281 | data = ConstBlock(0) 282 | data[0] = attr[0] 283 | data[1] = attr[1] 284 | data[2] = attr[2] 285 | data[3] = attr[3] 286 | self.write_tx(self.ATTRS, ino, data) 287 | 288 | def get_iattr(self, ino): 289 | data = self._read(self.ATTRS, ino) 290 | return Stat(data[0], data[1], data[2], data[3]) 291 | 292 | def is_mapped(self, vbn): 293 | return self._read(self.MAPPED, vbn)[0] == 1 294 | 295 | def mappingi(self, vbn): 296 | return vbn 297 | 298 | def _read(self, *args, **kwargs): 299 | return super(InodeSpec, self).read(*args, **kwargs) 300 | 301 | def read(self, lbn): 302 | return self._read(self.DATA, lbn) 303 | 304 | def write(self, lbn, data): 305 | return self.write_tx(self.DATA, lbn, data) 306 | 307 | def bmap(self, vbn): 308 | if self.is_mapped(vbn): 309 | return vbn 310 | 311 | self.write(vbn, ConstBlock(0)) 312 | self.write_tx(self.MAPPED, vbn, ConstBlock(1)) 313 | 314 | return vbn 315 | 316 | def bunmap(self, vbn): 317 | self.write_tx(self.MAPPED, vbn, ConstBlock(0)) 318 | 319 | 320 | class RangeVirtualTxnDisk(MultiTxnDisk): 321 | MAPPED = 0 322 | DATA = 1 323 | 324 | def __init__(self, start, end, *args, **kwargs): 325 | self._start = start 326 | self._end = end 327 | super(RangeVirtualTxnDisk, self).__init__(*args, **kwargs) 328 | 329 | def is_mapped(self, vbn): 330 | # start <= vbn < end 331 | return And(ULE(self._start, vbn), ULT(vbn, self._end), self.read(self.MAPPED, vbn)[0] == 1) 332 | 333 | def readm(self, vbn): 334 | return If(self.is_mapped(vbn), self.read(self.DATA, vbn), ConstBlock(0)) 335 | 336 | def writem(self, vbn, data): 337 | self.write_tx(self.MAPPED, vbn, ConstBlock(1)) 338 | self.write_tx(self.DATA, vbn, data) 339 | 340 | def bmap(self, vbn): 341 | if self.is_mapped(vbn): 342 | return vbn 343 | 344 | self.write_tx(self.MAPPED, vbn, ConstBlock(1)) 345 | self.write_tx(self.DATA, vbn, ConstBlock(0)) 346 | 347 | def bunmap(self, vbn): 348 | self.write_tx(self.MAPPED, vbn, ConstBlock(0)) 349 | 350 | def crash(self, mach): 351 | return self.__class__(self._start, self._end, mach, self._disks) 352 | 353 | 354 | class SyncDisk: 355 | def __init__(self, mach, disk): 356 | self._mach = mach 357 | self._cache = disk 358 | self._disk = disk 359 | 360 | def write(self, bid, data): 361 | self._cache = self._cache.update(bid, data) 362 | on = self._mach.create_on([]) 363 | self._disk = self._disk.update(bid, data, on) 364 | 365 | def flush(self): 366 | pass 367 | 368 | def read(self, bid): 369 | return self._cache(bid) 370 | 371 | def crash(self, mach): 372 | return self.__class__(mach, self._disk) 373 | 374 | 375 | class BitmapSpec(object): 376 | def __init__(self, disk): 377 | self._disk = disk 378 | 379 | def is_set(self, bit): 380 | return self._disk.read(bit)[0] == 1 381 | 382 | def set_bit(self, bit): 383 | return self._disk.write(bit, ConstBlock(1)) 384 | 385 | def unset_bit(self, bit): 386 | return self._disk.write(bit, ConstBlock(0)) 387 | 388 | def crash(self, mach): 389 | return BitmapSpec(self._disk.crash(mach)) 390 | 391 | 392 | class InodePackSpec(object): 393 | def __init__(self, metadisk, datadisk): 394 | self._metadisk = metadisk 395 | self._datadisk = datadisk 396 | 397 | def read(self, bid): 398 | return None 399 | 400 | def set_mapping(self, ino, off, ptr, block=None): 401 | block = self._datadisk.read(ino) 402 | block[off] = ptr 403 | self._datadisk.write(ino, block) 404 | 405 | def get_mapping(self, ino, off, block=None): 406 | return self._datadisk.read(ino)[off] 407 | 408 | def set_iattr(self, ino, attr, block=None): 409 | data = ConstBlock(0) 410 | data[0] = attr[0] 411 | data[1] = attr[1] 412 | data[2] = attr[2] 413 | data[3] = attr[3] 414 | self._metadisk.write(ino, data) 415 | 416 | def get_iattr(self, ino, block=None): 417 | data = self._metadisk.read(ino) 418 | return Stat(data[0], data[1], data[2], data[3]) 419 | 420 | def crash(self, mach): 421 | return self.__class__(self._metadisk.crash(mach), self._datadisk.crash(mach)) 422 | 423 | 424 | class Allocator64(object): 425 | def __init__(self, _readfn, _start, _end): 426 | pass 427 | 428 | def alloc(self): 429 | return BitVec(fresh_name('alloc'), 64) 430 | 431 | 432 | class Allocator32(object): 433 | def __init__(self, _readfn, _start, _end): 434 | pass 435 | 436 | def alloc(self): 437 | return BitVec(fresh_name('alloc'), 32) 438 | 439 | -------------------------------------------------------------------------------- /yggdrasil/server.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import util 3 | import z3 4 | 5 | from solver_utils import write_cmd, read_cmd 6 | 7 | 8 | class Server(object): 9 | def __init__(self): 10 | self._s = z3.Solver() 11 | 12 | def _write(self, command): 13 | return write_cmd(sys.stdout, command) 14 | 15 | def _read(self): 16 | return read_cmd(sys.stdin) 17 | 18 | def run(self): 19 | while True: 20 | self.handle_cmd() 21 | 22 | def handle_cmd(self): 23 | cmd = self._read() 24 | if not cmd: 25 | sys.exit(0) 26 | try: 27 | self._write({'return': getattr(self, cmd['name'])(*cmd['args'], **cmd['kwargs'])}) 28 | except Exception, e: 29 | self._write({'exc': repr(e)}) 30 | 31 | def add(self, term): 32 | self._s.add(z3.parse_smt2_string(term)) 33 | 34 | def set(self, **kwargs): 35 | self._s.set(**{str(k): v for k, v in kwargs.items()}) 36 | 37 | def check(self): 38 | return str(self._s.check()) 39 | 40 | def push(self): 41 | return str(self._s.push()) 42 | 43 | def pop(self): 44 | return str(self._s.pop()) 45 | 46 | def model(self): 47 | return str(self._s.model()) 48 | 49 | def model_evaluate(self, term): 50 | model = self._s.model() 51 | 52 | for t in model.decls(): 53 | if str(t) == term: 54 | term = t() 55 | break 56 | return str(self._s.model().evaluate(term)) 57 | 58 | 59 | if __name__ == '__main__': 60 | Server().run() 61 | -------------------------------------------------------------------------------- /yggdrasil/solver.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | 4 | import util 5 | import z3 6 | import solver_utils as sutils 7 | 8 | 9 | CURRENT = os.path.dirname(os.path.realpath(__file__)) 10 | Z3_SERVER_FILE = os.path.join(CURRENT, "server.py") 11 | 12 | 13 | def to_smt2(*terms): 14 | s = z3.Solver() 15 | s.add(*terms) 16 | return s.to_smt2() 17 | 18 | 19 | class ModelProxy(object): 20 | def __init__(self, model_str, solver): 21 | self._model = model_str 22 | self._solver = solver 23 | 24 | def __str__(self): 25 | return self._model 26 | 27 | def __repr__(self): 28 | return self._model 29 | 30 | def evaluate(self, term): 31 | term = term.sexpr() 32 | return self._solver._call('model_evaluate', term) 33 | 34 | def eval(self, term): 35 | return self.evaluate(term) 36 | 37 | 38 | class Solver(object): 39 | def __init__(self): 40 | self._proc = subprocess.Popen(['python2', Z3_SERVER_FILE], 41 | stdout=subprocess.PIPE, 42 | stdin=subprocess.PIPE, 43 | # stderr=subprocess.PIPE 44 | ) 45 | 46 | def _write(self, command): 47 | sutils.write_cmd(self._proc.stdin, command) 48 | 49 | def _call(self, name, *args, **kwargs): 50 | self._write({'name': name, 'args': args, 'kwargs': kwargs}) 51 | res = self._read() 52 | if 'return' in res: 53 | return res['return'] 54 | if 'exc' in res: 55 | raise RuntimeError(res['exc']) 56 | 57 | def _read(self, **kwargs): 58 | return sutils.read_cmd(self._proc.stdout) 59 | 60 | def add(self, *terms): 61 | term = to_smt2(*terms) 62 | return self._call('add', term) 63 | 64 | def set(self, *args, **kwargs): 65 | return self._call('set', *args, **kwargs) 66 | 67 | def check(self): 68 | vals = {'sat': z3.sat, 'unsat': z3.unsat, 'unknown': z3.unknown} 69 | return vals.get(self._call('check')) 70 | 71 | def model(self): 72 | return ModelProxy(self._call('model'), self) 73 | 74 | def push(self): 75 | return self._call('push') 76 | 77 | def pop(self): 78 | return self._call('pop') 79 | 80 | 81 | if __name__ == '__main__': 82 | x = util.FreshBitVec('x', 32) 83 | y = util.FreshBitVec('y', 32) 84 | s = Solver() 85 | s.add(x == y) 86 | print s.check() 87 | model = s.model() 88 | print model 89 | print model.evaluate(x) 90 | # print s.model().evaluate(x) 91 | -------------------------------------------------------------------------------- /yggdrasil/solver_utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | import json 3 | 4 | LEN_LEN = 8 5 | 6 | def write_cmd(stream, command): 7 | payload = json.dumps(command) 8 | assert math.log(len(payload), 10) < LEN_LEN, "payload length = {} to large".format(len(payload)) 9 | stream.write(str(len(payload)).rjust(LEN_LEN, '0')) 10 | stream.write(payload) 11 | stream.flush() 12 | 13 | 14 | def read(stream, count): 15 | v = stream.read(count) 16 | return v 17 | 18 | 19 | def read_cmd(stream): 20 | cmdlen = read(stream, LEN_LEN) 21 | if not cmdlen: 22 | return None 23 | data = read(stream, int(cmdlen)) 24 | try: 25 | return json.loads(data) 26 | except Exception, e: 27 | print data 28 | raise e 29 | 30 | -------------------------------------------------------------------------------- /yggdrasil/ufarray.py: -------------------------------------------------------------------------------- 1 | from z3 import * 2 | from util import * 3 | import copy 4 | import warnings 5 | 6 | 7 | # immutable string: an array of u64s (represented using UF) 8 | StringOffsetSort = SizeSort 9 | StringElementSort = BlockElementSort 10 | 11 | 12 | class String(object): 13 | def __init__(self, size, data): 14 | self._size = size 15 | self._data = data 16 | 17 | def __eq__(self, other): 18 | off = FreshSize('off', domain=StringOffsetSort) 19 | size = self.size() 20 | return And(size == other.size(), 21 | ForAll([off], Implies(ULT(off, size), self[off] == other[off]))) 22 | 23 | def __ne__(self, other): 24 | return Not(self == other) 25 | 26 | def size(self): 27 | return self._size 28 | 29 | def __getitem__(self, off): 30 | return self._data(off) 31 | 32 | def ite(self, other, cond): 33 | data = lambda off: If(cond, self[off], other[off]) 34 | return String(self._size, data) 35 | 36 | 37 | # mutable - easy to extract to C 38 | class Block(object): 39 | def __init__(self, fn): 40 | self._fn = fn 41 | 42 | def __getitem__(self, key): 43 | if type(key) is int: 44 | key = BitVecVal(key, BlockOffsetSort) 45 | 46 | assert key.size() == BlockOffsetSort.size() 47 | 48 | return self._fn(key) 49 | 50 | def get(self, bid): 51 | return self[bid] 52 | 53 | def set(self, bid, val): 54 | self[bid] = val 55 | 56 | def ite(self, other, cond): 57 | # If cond then self else other 58 | fn = lambda off, oldfn=self._fn: If(cond, 59 | oldfn(off), 60 | other._fn(off)) 61 | return Block(fn) 62 | 63 | def __setitem__(self, key, val): 64 | if type(key) is int: 65 | key = BitVecVal(key, BlockOffsetSort) 66 | if type(val) is int: 67 | val = BitVecVal(val, BlockElementSort) 68 | 69 | if val.size() < BlockElementSort.size(): 70 | warnings.warn("Block element %s (%s) smaller then expected (%s)." % 71 | (val, val.size(), BlockElementSort.size())) 72 | val = ZeroExt(BlockElementSort.size() - val.size(), val) 73 | 74 | assert key.size() == BlockOffsetSort.size() 75 | assert val.size() == BlockElementSort.size() 76 | 77 | self._fn = lambda off, oldfn=self._fn: If(off == key, val, oldfn(off)) 78 | 79 | def set(self, key, val): 80 | self[key] = val 81 | 82 | def getrange(self, start, end=None): 83 | if type(start) is int: 84 | start = BitVecVal(start, BlockOffsetSort.size()) 85 | else: 86 | assert start.size() == BlockOffsetSort.size() 87 | if end is None: 88 | end = BlockSize.size() / BlockElementSort.size() 89 | else: 90 | assert end.size() == BlockOffsetSort.size() 91 | size = If(ULE(start, end - 1), end - start, 0) 92 | zdiff = StringOffsetSort.size() - BlockOffsetSort.size() 93 | size = ZeroExt(zdiff, size) 94 | start = ZeroExt(zdiff, start) 95 | data = lambda off, fn=self._fn: If(ULT(off, size), fn(Extract(BlockOffsetSort.size() - 1, 0, off + start)), BitVecVal(0, StringElementSort.size())) 96 | return String(size, data) 97 | 98 | def setrange(self, start, s): 99 | if type(start) is int: 100 | start = BitVecVal(start, BlockOffsetSort.size()) 101 | else: 102 | assert start.size() == BlockOffsetSort.size() 103 | zdiff = StringOffsetSort.size() - BlockOffsetSort.size() 104 | size = s.size() 105 | self._fn = lambda off, oldfn=self._fn: If(And(ULE(start, off), ULT(ZeroExt(zdiff, off - start), size)), s[ZeroExt(zdiff, off - start)], oldfn(off)) 106 | 107 | def __eq__(self, other): 108 | off = Const(fresh_name('off'), BlockOffsetSort) 109 | return ForAll([off], self[off] == other[off]) 110 | 111 | def __ne__(self, other): 112 | return Not(self.__eq__(other)) 113 | 114 | def _print(self, num=None): 115 | if num is None: 116 | num = self.size 117 | for i in range(num): 118 | print simplify(self[i]) 119 | 120 | @property 121 | def size(self): 122 | return 2 ** BlockOffsetSort.size() 123 | 124 | 125 | # immutable 126 | class DiskArray(object): 127 | def __init__(self, fn, domain=SizeSort): 128 | self._domain = domain 129 | self._fn = fn 130 | 131 | def __call__(self, key): 132 | if type(key) is int: 133 | key = BitVecVal(key, self._domain) 134 | 135 | if key.size() > self._domain.size(): 136 | warnings.warn("Disk key %s (%s) larger then expected (%s)." % 137 | (key, key.size(), self._domain.size())) 138 | key = Extract(self._domain.size() - 1, 0, key) 139 | 140 | if key.size() != self._domain.size(): 141 | print "key.size() = {} != self._domain.size() = {}".format(key.size(), 142 | self._domain.size()) 143 | assert key.size() == self._domain.size() 144 | 145 | return Block(lambda off, fn=self._fn: fn(key, off)) 146 | 147 | def update(self, key, val, *guard): 148 | if type(key) is int: 149 | key = BitVecVal(key, self._domain) 150 | 151 | if key.size() > self._domain.size(): 152 | warnings.warn("Disk key %s (%s) larger then expected (%s)." % 153 | (key, key.size(), self._domain.size())) 154 | key = Extract(self._domain.size() - 1, 0, key) 155 | 156 | if key.size() != self._domain.size(): 157 | print "key.size() = {} != self._domain.size() = {}".format(key.size(), 158 | self._domain.size()) 159 | assert key.size() == self._domain.size() 160 | assert isinstance(val, Block) 161 | 162 | # filter out None 163 | guard = [g for g in guard if g is not None] 164 | # make a copy of the block; a shallow copy is fine 165 | # as _fn is the only field 166 | val = copy.copy(val) 167 | fn = lambda bid, off, oldfn=self._fn: If(And(bid == key, *guard), val[off], oldfn(bid, off)) 168 | return DiskArray(fn, domain=self._domain) 169 | 170 | def ite(self, other, cond): 171 | # If cond then self else other 172 | # NB: don't eval here to match our C code - the result is _mutable_ 173 | fn = lambda bid, off: If(cond, self._fn(bid, off), other._fn(bid, off)) 174 | return DiskArray(fn, domain=self._domain) 175 | 176 | def domain(self): 177 | return self._domain 178 | 179 | 180 | def FreshSize(prefix, domain=SizeSort): 181 | return Const(fresh_name(prefix), domain) 182 | 183 | 184 | def FreshDiskArray(prefix, domain=SizeSort): 185 | return DiskArray(Function(fresh_name(prefix), domain, BlockOffsetSort, BlockElementSort), domain=domain) 186 | 187 | 188 | def ConstDiskArray(blk, domain=SizeSort): 189 | return DiskArray(lambda bid, off: blk[off], domain=domain) 190 | 191 | 192 | def FreshBlock(prefix): 193 | return Block(Function(fresh_name(prefix), BlockOffsetSort, BlockElementSort)) 194 | 195 | 196 | def ConstBlock(v): 197 | if type(v) is int: 198 | v = BitVecVal(v, BlockElementSort.size()) 199 | 200 | assert v.size() == BlockElementSort.size() 201 | 202 | return Block(lambda off: v) 203 | 204 | 205 | def FreshString(prefix, size=None): 206 | if size is None: 207 | size = FreshSize(prefix + '.size', domain=StringOffsetSort) 208 | elif type(size) is int: 209 | size = BitVecVal(size, SizeSort.size()) 210 | data = Function(fresh_name(prefix + '.data'), StringOffsetSort, StringElementSort) 211 | return String(size, data) 212 | 213 | 214 | def ConstString(size, v): 215 | return String(size, lambda off: v) 216 | 217 | 218 | def EmptyString(): 219 | return ConstString(BitVecVal(0, StringOffsetSort.size()), 0) 220 | 221 | 222 | def tup_eq(t1, t2): 223 | res = BoolVal(True) 224 | for a, b in zip(t1, t2): 225 | res = And(a == b, res) 226 | return res 227 | 228 | 229 | # A wrapper around an uninterpreted function that supports easily updating it 230 | # at a particular point. 231 | class UFunction(object): 232 | def __init__(self, name, *args, **kwargs): 233 | self._name = name 234 | fn = kwargs.pop('fn', None) 235 | 236 | self._domain = args[:-1] 237 | self._range = args[-1] 238 | if fn: 239 | self._fn = fn 240 | else: 241 | self._fn = Function(name, *args) 242 | 243 | def __call__(self, *key): 244 | assert len(key) == len(self._domain) 245 | 246 | key = list(key) 247 | 248 | for n in range(len(key)): 249 | if isinstance(key[n], int): 250 | key[n] = BitVecVal(key[n], self._domain[n]) 251 | 252 | assert key[n].size() == self._domain[n].size() 253 | 254 | return self._fn(*key) 255 | 256 | def update(self, key, val, guard=True): 257 | if not isinstance(key, tuple) and not isinstance(key, list): 258 | key = [key] 259 | 260 | assert len(key) == len(self._domain) 261 | 262 | key = list(key) 263 | 264 | for n in range(len(key)): 265 | if isinstance(key[n], int): 266 | key[n] = BitVecVal(key[n], self._domain[n]) 267 | 268 | assert key[n].size() == self._domain[n].size() 269 | 270 | pfn = self._fn 271 | 272 | fn = lambda *args: If(And(guard, tup_eq(args, key)), val, pfn(*args)) 273 | 274 | args = list(self._domain) + [self._range] 275 | 276 | return UFunction(self._name, *args, fn=fn) 277 | 278 | 279 | def FreshUFunction(name, *args, **kwargs): 280 | return UFunction(fresh_name(name), *args, **kwargs) 281 | -------------------------------------------------------------------------------- /yggdrasil/util.py: -------------------------------------------------------------------------------- 1 | import z3 2 | 3 | from z3 import * 4 | import collections 5 | import os, sys, subprocess 6 | import types 7 | 8 | # Stupid thing required by a recent-ish z3 update 9 | Z3_LIBRARY_PATH = os.environ.get('Z3_LIBRARY_PATH') 10 | if Z3_LIBRARY_PATH: 11 | z3.init(Z3_LIBRARY_PATH) 12 | 13 | sys.setrecursionlimit(9999) 14 | 15 | 16 | def nop(*args, **kwargs): 17 | pass 18 | setattr(z3.AstRef, '__del__', nop) 19 | 20 | 21 | # block number (64 bits) 22 | SizeSort = BitVecSort(64) 23 | 24 | # block data 32Kib 25 | BlockSize = 32 * 1024 26 | LogBlockSize = BlockSize.bit_length() - 1 27 | BlockElementSort = BitVecSort(64) 28 | BlockOffsetSort = BitVecSort(LogBlockSize - (BlockElementSort.size().bit_length() - 1)) 29 | BlockSort = ArraySort(BitVecSort(LogBlockSize - 6), BitVecSort(64)) 30 | 31 | # disk: block number -> block data 32 | DevSort = BitVecSort(64) 33 | 34 | PartitionedSizeSort = BitVecSort(SizeSort.size() + DevSort.size()) 35 | 36 | # Zero = BitVecVal(0, BlockSort.size()) 37 | #zero = K(BitVecSort(9), BitVecVal(0, 64)) 38 | 39 | def FreshBitVec(name, size): 40 | return BitVec(fresh_name(name), size) 41 | 42 | 43 | def FreshBool(b): 44 | return Bool(fresh_name(b)) 45 | 46 | 47 | def If(cond, a, b): 48 | if hasattr(a, 'ite'): 49 | assert type(a) == type(b) 50 | return a.ite(b, cond) 51 | return z3.If(cond, a, b) 52 | 53 | 54 | def bbf(p, start, end): 55 | return Extract(end - 1, start, p) 56 | 57 | 58 | def Extend(x, size): 59 | return ZeroExt(size - x.size(), x) 60 | 61 | 62 | def BitwiseBitField(p, start, end): 63 | start = Extend(start, p.size()) 64 | end = Extend(end, p.size()) 65 | return p >> start & ((1 << (end - start)) - 1) 66 | 67 | 68 | def block_extend(d): 69 | return Store(zero, 0, d) 70 | # return ZeroExt(BlockSort.size() - d.size(), d) 71 | 72 | 73 | # Unsigned max 74 | def UMax(a, *args): 75 | if len(args) == 0: 76 | return a 77 | if len(args) == 1: 78 | b = args[0] 79 | return If(ULE(a, b), b, a) 80 | 81 | 82 | def UMin(a, *args): 83 | if len(args) == 0: 84 | return a 85 | if len(args) == 1: 86 | b = args[0] 87 | return If(ULE(b, a), b, a) 88 | 89 | 90 | def fresh_name(name): 91 | if not hasattr(fresh_name, "idx"): 92 | fresh_name.idx = {} 93 | n = fresh_name.idx.get(name, 0) 94 | fresh_name.idx[name] = n + 1 95 | return name + "." + str(n) 96 | 97 | 98 | # def prove(claim, **keywords): 99 | # return solve(Not(claim), **keywords) 100 | 101 | 102 | def solve(*args, **keywords): 103 | s = Solver() 104 | s.set(**keywords) 105 | s.add(*args) 106 | if keywords.get('show', False): 107 | print(s) 108 | smt = os.getenv('SMT') 109 | if smt: 110 | PIPE = subprocess.PIPE 111 | p = subprocess.Popen(smt.split(), stdin=PIPE, stdout=PIPE, stderr=PIPE) 112 | stdout, stderr = p.communicate(s.to_smt2()) 113 | stdout = stdout.strip() 114 | if stdout == 'unsat': 115 | r = unsat 116 | else: 117 | r = unknown 118 | else: 119 | r = s.check() 120 | if r == unsat: 121 | print("no solution") 122 | elif r == unknown: 123 | print("failed to solve") 124 | try: 125 | print(s.model()) 126 | except Z3Exception: 127 | return 128 | else: 129 | return s.model() 130 | 131 | def pack_list(*p): 132 | return Concat(*p) 133 | 134 | 135 | def unpack_list(p, size): 136 | out = [] 137 | k = p.size() / size 138 | while k > 0: 139 | out.append(Extract(size * k - 1, size * (k - 1), p)) 140 | k -= 1 141 | 142 | return tuple(out) 143 | --------------------------------------------------------------------------------