├── config_helper └── __init__.py ├── pyreuse ├── apputils │ ├── __init__.py │ ├── parseleveldboutput.py │ └── fio.py ├── sysutils │ ├── __init__.py │ ├── cgroup.py │ ├── ftrace.py │ ├── filefragparser.py │ ├── blockclassifiers.py │ ├── dumpe2fsparser.py │ └── blocktrace.py ├── __init__.py ├── general │ ├── __init__.py │ ├── maketree.py │ └── zipf.py ├── fsutils │ ├── __init__.py │ ├── formatfs.py │ └── ext4dumpextents.py ├── macros.py └── helpers.py ├── .gitignore ├── utilities └── __init__.py ├── workrunner ├── __init__.py ├── nonblockingreader.py ├── cpuhandler.py ├── multiwriters.py ├── workload.py ├── filesystem.py └── fshelper.py ├── media └── zombie-curve.png ├── run_testclass.sh ├── tests ├── testdata │ ├── 64mbfile │ │ └── subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772 │ │ │ ├── dumpe2fs.out │ │ │ ├── extents.json │ │ │ ├── lpn.count │ │ │ ├── lpn_sem.out │ │ │ ├── recorder.log │ │ │ ├── stats.json │ │ │ ├── app_duration.txt │ │ │ ├── recorder.json │ │ │ ├── accumulator_table.txt │ │ │ ├── blkparse-output.txt │ │ │ ├── extents.json.table │ │ │ ├── ncq_depth_timeline.txt │ │ │ ├── blkparse-output-mkfs.txt │ │ │ ├── blkparse-events-for-ftlsim.txt │ │ │ ├── blkparse-events-for-ftlsim-mkfs.txt │ │ │ └── config.json │ ├── blkparse-events-for-ftlsim.txt │ └── sqlitewal-update │ │ └── subexp-7928737328932659543-ext4-10-07-23-50-10--2726320246496492803 │ │ ├── config.json │ │ └── blkparse-events-for-ftlsim-mkfs.txt ├── __init__.py ├── simulator_test.py ├── config_test.py ├── test_bitmap.py ├── recorder_test.py ├── test_workflow.py ├── lrulist_test.py ├── misc_test.py └── test_demo.py ├── wiscsim ├── __init__.py ├── host.py ├── ftlbuilder.py ├── flash.py ├── gc_analysis.py ├── ftlsim_commons.py ├── hostevent.py ├── bitmap.py ├── blkpool.py ├── tagblockpool.py ├── recorder.py ├── devblockpool.py └── simulator.py ├── sides └── plot-zombie-curve │ ├── README.md │ └── plot-zombie.r ├── Makefile ├── setup.env.sh ├── commons.py ├── foreign └── forcef2fsgc.c ├── workflow.py └── README.md /config_helper/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pyreuse/apputils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pyreuse/sysutils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tmp 2 | *.pyc 3 | tags 4 | -------------------------------------------------------------------------------- /pyreuse/__init__.py: -------------------------------------------------------------------------------- 1 | from . import * 2 | -------------------------------------------------------------------------------- /pyreuse/general/__init__.py: -------------------------------------------------------------------------------- 1 | from . import * 2 | -------------------------------------------------------------------------------- /utilities/__init__.py: -------------------------------------------------------------------------------- 1 | from utilities import utils 2 | -------------------------------------------------------------------------------- /workrunner/__init__.py: -------------------------------------------------------------------------------- 1 | import wlrunner 2 | import lbaworkloadgenerator 3 | -------------------------------------------------------------------------------- /pyreuse/fsutils/__init__.py: -------------------------------------------------------------------------------- 1 | from ext4dumpextents import dump_extents_of_a_file 2 | -------------------------------------------------------------------------------- /media/zombie-curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junhe/wiscsee/HEAD/media/zombie-curve.png -------------------------------------------------------------------------------- /run_testclass.sh: -------------------------------------------------------------------------------- 1 | echo sudo python -m unittest -q $1 2 | sudo python -m unittest -q $1 3 | 4 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/dumpe2fs.out: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/extents.json: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/lpn.count: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/lpn_sem.out: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/recorder.log: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/stats.json: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/app_duration.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/recorder.json: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/accumulator_table.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/blkparse-output.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/extents.json.table: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/ncq_depth_timeline.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/blkparse-output-mkfs.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /wiscsim/__init__.py: -------------------------------------------------------------------------------- 1 | import simulator 2 | from utilities import utils 3 | import ssdframework 4 | import hostevent 5 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/blkparse-events-for-ftlsim.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/blkparse-events-for-ftlsim-mkfs.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/testdata/blkparse-events-for-ftlsim.txt: -------------------------------------------------------------------------------- 1 | 29162 D read 299008 4096 0.000000000 0 False 2 | 29162 C read 299008 4096 0.000223461 0.000223461 False 3 | -------------------------------------------------------------------------------- /pyreuse/macros.py: -------------------------------------------------------------------------------- 1 | BYTE, KB, MB, GB, TB = [2**(10*i) for i in range(5)] 2 | 3 | # unit is based on nanoseconds 4 | SEC, MILISEC, MICROSEC, NANOSEC = [ 1000**3, 1000**2, 1000, 1 ] 5 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import config_test 2 | import flashcontroller_test 3 | import lrulist_test 4 | import misc_test 5 | import recorder_test 6 | import simulator_test 7 | import test_dftldes 8 | import test_bitmap 9 | -------------------------------------------------------------------------------- /sides/plot-zombie-curve/README.md: -------------------------------------------------------------------------------- 1 | To run, you need to have R installed first. 2 | 3 | Then do the following in this directory: 4 | 5 | ``` 6 | $ Rscript ./plot-zombie.r 7 | ``` 8 | 9 | The step above will generate a file "plot.pdf", which is the zombie curve. 10 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test_all: 2 | sudo python -m unittest discover -s tests -v -p '*test*.py' > test-log 3 | 4 | run_demo: 5 | ./run_testclass.sh tests.test_demo 6 | 7 | setup: 8 | ./setup.env.sh 9 | 10 | f2fsgc: 11 | mkdir -p bin 12 | cd ./foreign && gcc -o forcef2fsgc forcef2fsgc.c && mv forcef2fsgc ../bin/ 13 | 14 | -------------------------------------------------------------------------------- /setup.env.sh: -------------------------------------------------------------------------------- 1 | # This file should be executed within the current dir 2 | sudo apt-get update 3 | sudo apt-get install -y btrfs-tools f2fs-tools 4 | sudo apt-get install -y python-bitarray 5 | sudo apt-get install -y blktrace 6 | sudo apt-get install -y xfsprogs 7 | sudo apt-get install -y python-dev 8 | sudo apt-get install -y python-pip 9 | 10 | sudo pip install bidict 11 | sudo pip install simpy 12 | 13 | make f2fsgc 14 | 15 | -------------------------------------------------------------------------------- /pyreuse/general/maketree.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def mklevel(cur_level, max_level, dir_width, prefix): 5 | if cur_level == max_level: 6 | return 7 | 8 | # Create directory 9 | os.makedirs(prefix) 10 | # print prefix 11 | 12 | for i in range(dir_width): 13 | mklevel(cur_level = cur_level + 1, 14 | max_level = max_level, 15 | dir_width = dir_width, 16 | prefix = os.path.join(prefix, str(i))) 17 | 18 | 19 | def main(): 20 | mklevel(cur_level=0, max_level=3, dir_width=3, prefix='./new4') 21 | 22 | if __name__ == '__main__': 23 | main() 24 | 25 | 26 | -------------------------------------------------------------------------------- /pyreuse/general/zipf.py: -------------------------------------------------------------------------------- 1 | import random 2 | import bisect 3 | import math 4 | 5 | 6 | class ZipfGenerator: 7 | """ 8 | from 9 | http://stackoverflow.com/questions/1366984/generate-random-numbers-distributed-by-zipf 10 | """ 11 | def __init__(self, n, alpha): 12 | """ 13 | Generate numbers up to n 14 | alpha can be 0.x, or larger. Smaller -. more uniform 15 | """ 16 | # Calculate Zeta values from 1 to n: 17 | tmp = [1. / (math.pow(float(i), alpha)) for i in range(1, n+1)] 18 | zeta = reduce(lambda sums, x: sums + [sums[-1] + x], tmp, [0]) 19 | 20 | # Store the translation map: 21 | self.distMap = [x / zeta[-1] for x in zeta] 22 | 23 | def next(self): 24 | # Take a uniform 0-1 pseudo-random value: 25 | u = random.random() 26 | 27 | # Translate the Zipf variable: 28 | return bisect.bisect(self.distMap, u) - 1 29 | 30 | -------------------------------------------------------------------------------- /wiscsim/host.py: -------------------------------------------------------------------------------- 1 | from commons import * 2 | from ftlsim_commons import * 3 | import hostevent 4 | 5 | 6 | class Host(object): 7 | def __init__(self, conf, simpy_env, event_iter): 8 | self.conf = conf 9 | self.env = simpy_env 10 | self.event_iter = event_iter 11 | 12 | self._ncq = NCQSingleQueue( 13 | ncq_depth = self.conf['SSDFramework']['ncq_depth'], 14 | simpy_env = self.env) 15 | 16 | def get_ncq(self): 17 | return self._ncq 18 | 19 | def _process(self): 20 | for event in self.event_iter: 21 | if isinstance(event, hostevent.Event) and event.offset < 0: 22 | # due to padding, accesing disk head will be negative. 23 | continue 24 | 25 | if event.action == 'D': 26 | yield self._ncq.queue.put(event) 27 | 28 | def run(self): 29 | yield self.env.process(self._process()) 30 | yield self._ncq.queue.put(hostevent.ControlEvent(OP_SHUT_SSD)) 31 | 32 | 33 | -------------------------------------------------------------------------------- /workrunner/nonblockingreader.py: -------------------------------------------------------------------------------- 1 | import sys, threading, Queue 2 | 3 | def enqueue_lines(f, line_queue): 4 | for line in iter(f.readline, b''): 5 | line_queue.put(line) 6 | 7 | class NonBlockingReader(object): 8 | def __init__(self, file_path): 9 | """Note that the thread is started once the instance is created""" 10 | self.file_path = file_path 11 | self.f = open(file_path, 'r') 12 | self.q = Queue.Queue() 13 | self.t = threading.Thread(target=enqueue_lines, 14 | args=(self.f, self.q)) 15 | self.t.daemon = True # thread dies with the program 16 | self.t.start() 17 | 18 | def readline(self): 19 | """return None is there is nothing""" 20 | try: 21 | line = self.q.get_nowait() 22 | except Queue.Empty: 23 | return None 24 | else: 25 | return line 26 | 27 | 28 | if __name__ == '__main__': 29 | nb_reader = NonBlockingReader("/sys/kernel/debug/tracing/trace_pipe") 30 | 31 | while True: 32 | line = nb_reader.readline() 33 | print line, 34 | 35 | 36 | -------------------------------------------------------------------------------- /pyreuse/fsutils/formatfs.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from pyreuse.helpers import shcmd, prepare_dir 4 | 5 | def create_fs(dev, mntpoint, fstype): 6 | shcmd("sudo chmod 777 -R {}".format(mntpoint)) 7 | 8 | if fstype == 'ext4': 9 | shcmd("sudo mkfs.ext4 {}".format(dev)) 10 | elif fstype == 'ext3': 11 | shcmd("sudo mkfs.ext3 {}".format(dev)) 12 | else: 13 | raise NotImplementedError('{} not supported yet'.format(fstype)) 14 | 15 | shcmd("sudo mount {dev} {mnt}".format(dev = dev, mnt = mntpoint)) 16 | 17 | def register_fstab(dev, mntpoint, fstype): 18 | line = "{dev} {mntpoint} {fstype} defaults 0 0"\ 19 | .format(dev = dev, mntpoint = mntpoint, fstype = fstype) 20 | 21 | with open('/etc/fstab', 'a') as f: 22 | f.write(line) 23 | 24 | def format_fs(dev, mntpoint, fstype): 25 | """ 26 | It will format, mount the file system. Then it will register the file 27 | system in the /etc/fstab. 28 | """ 29 | prepare_dir(mntpoint) 30 | create_fs(dev, mntpoint, fstype) 31 | register_fstab(dev, mntpoint, fstype) 32 | 33 | 34 | -------------------------------------------------------------------------------- /tests/simulator_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import wiscsim 3 | from wiscsim.ftlsim_commons import * 4 | 5 | 6 | class TestNCQSingleQueue(unittest.TestCase): 7 | def test_holding_slots(self): 8 | env = simpy.Environment() 9 | ncq = NCQSingleQueue(2, env) 10 | 11 | env.process(self.process(env, ncq)) 12 | env.run() 13 | 14 | def process(self, env, ncq): 15 | held_slot_reqs = yield env.process(ncq.hold_all_slots()) 16 | ncq.release_all_slots(held_slot_reqs) 17 | 18 | 19 | class TestNCQSingleQueueWithWaitTime(unittest.TestCase): 20 | def test_holding_slots(self): 21 | env = simpy.Environment() 22 | ncq = NCQSingleQueue(2, env) 23 | 24 | env.process(self.main_proc(env, ncq)) 25 | env.run() 26 | 27 | def main_proc(self, env, ncq): 28 | env.process(self.use_one_slot(env, ncq)) 29 | yield env.process(self.wait_all(env, ncq)) 30 | 31 | self.assertEqual(env.now, 5) 32 | 33 | def wait_all(self, env, ncq): 34 | held_slot_reqs = yield env.process(ncq.hold_all_slots()) 35 | self.assertEqual(env.now, 5) 36 | ncq.release_all_slots(held_slot_reqs) 37 | 38 | def use_one_slot(self, env, ncq): 39 | req = ncq.slots.request() 40 | yield req 41 | 42 | yield env.timeout(5) 43 | 44 | ncq.slots.release(req) 45 | 46 | 47 | def main(): 48 | unittest.main() 49 | 50 | if __name__ == '__main__': 51 | main() 52 | 53 | 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /pyreuse/sysutils/cgroup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | from pyreuse.helpers import * 5 | 6 | class Cgroup(object): 7 | def __init__(self, name, subs): 8 | self.name = name 9 | self.subs = subs 10 | 11 | shcmd('cgcreate -g {subs}:{name}'.format(subs=subs, name=name)) 12 | 13 | def set_item(self, sub, item, value): 14 | """ 15 | Example: sub='memory', item='memory.limit_in_bytes' 16 | echo 3221225472 > memory/confine/memory.limit_in_bytes 17 | """ 18 | self._write(sub, item, value) 19 | 20 | ret_value = self._read(sub, item) 21 | 22 | if ret_value != str(value): 23 | print 'Warning:', ret_value, '!=', value 24 | 25 | def get_item(self, sub, item): 26 | return self._read(sub, item) 27 | 28 | def execute(self, cmd): 29 | cg_cmd = ['cgexec', 30 | '-g', '{subs}:{name}'.format(subs=self.subs, name=self.name), 31 | '--sticky'] 32 | cg_cmd += cmd 33 | print cg_cmd 34 | p = subprocess.Popen(cg_cmd) 35 | 36 | return p 37 | 38 | def _write(self, sub, item, value): 39 | path = self._path(sub, item) 40 | with open(path, 'w') as f: 41 | f.write(str(value)) 42 | 43 | def _read(self, sub, item): 44 | path = self._path(sub, item) 45 | with open(path, 'r') as f: 46 | value = f.read() 47 | 48 | return value.strip() 49 | 50 | def _path(self, sub, item): 51 | path = os.path.join('/sys/fs/cgroup', sub, self.name, item) 52 | return path 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/config_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import config 4 | 5 | class TestConfig(unittest.TestCase): 6 | def test_basic(self): 7 | conf = config.Config({"para1": "value1"}) 8 | 9 | self.assertDictEqual({"para1": "value1"}, conf) 10 | 11 | def test_default(self): 12 | conf = config.Config() 13 | 14 | self.assertIn('workload_class', conf) 15 | self.assertIn('expname', conf) 16 | 17 | def test_sec_translation(self): 18 | conf = config.Config() 19 | 20 | pagesize = conf['flash_page_size'] 21 | secsize = conf['sector_size'] 22 | page, cnt = conf.sec_ext_to_page_ext(pagesize*3/secsize, 23 | pagesize*2/secsize) 24 | 25 | self.assertEqual(page, 3) 26 | self.assertEqual(cnt, 2) 27 | 28 | def test_offset_size_translation(self): 29 | conf = config.Config() 30 | 31 | secsize = conf['sector_size'] 32 | sec, count = conf.off_size_to_sec_count( 33 | offset = secsize * 10, 34 | size = secsize * 31) 35 | 36 | self.assertEqual(sec, 10) 37 | self.assertEqual(count, 31) 38 | 39 | class TestConfigNewFlash(unittest.TestCase): 40 | def test_npages(self): 41 | conf = config.ConfigNewFlash() 42 | 43 | npages1 = conf.total_num_pages() 44 | npages2 = conf['flash_config']['n_pages_per_block'] \ 45 | * conf.n_blocks_per_dev 46 | print 'npages1', npages1 47 | print 'npages2', npages2 48 | self.assertEqual(npages1, npages2) 49 | 50 | 51 | if __name__ == '__main__': 52 | unittest.main() 53 | 54 | -------------------------------------------------------------------------------- /sides/plot-zombie-curve/plot-zombie.r: -------------------------------------------------------------------------------- 1 | require("ggplot2") 2 | require("jsonlite") 3 | require("reshape2") 4 | require("plyr") 5 | 6 | GB = 2^30 7 | blocksize = 2^20 8 | 9 | load_data <- function(file_path) { 10 | json_data = fromJSON(txt=file_path) 11 | return(json_data[['ftl_func_valid_ratios']]) 12 | } 13 | 14 | extract_one_snapshot <- function(snapshots) { 15 | # get only the last snapshot 16 | dd = tail(snapshots, 1) 17 | 18 | dd = melt(as.matrix(dd)) 19 | names(dd) = c('snapshot_id', 'valid_ratio', 'count') 20 | dd = subset(dd, is.na(count) == FALSE) 21 | dd$snapshot_id = NULL 22 | 23 | return(dd) 24 | } 25 | 26 | organize_data <- function(d) { 27 | d = arrange(d, desc(valid_ratio)) 28 | d = transform(d, seg_end = cumsum(count)) 29 | d = transform(d, seg_start = seg_end - count) 30 | d = melt(d, 31 | measure = c('seg_start', 'seg_end'), value.name = 'blocknum') 32 | d = arrange(d, desc(valid_ratio)) 33 | 34 | d = transform(d, block_location = (as.numeric(blocknum)/GB) * as.numeric(blocksize)) 35 | d = subset(d, valid_ratio != 0) 36 | 37 | return(d) 38 | } 39 | 40 | plot <- function(d) { 41 | p = ggplot(d, aes(x = block_location, y = valid_ratio)) + 42 | geom_line() + 43 | ylab('Valid Ratio') + 44 | xlab('Cumulative Block Size (GB)') 45 | print(p) 46 | 47 | ggsave("plot.pdf", plot = p, height = 4, width = 4) 48 | } 49 | 50 | 51 | main <- function() { 52 | print("Hello") 53 | 54 | snapshots = load_data("./recorder.json") 55 | d = extract_one_snapshot(snapshots) 56 | d = organize_data(d) 57 | plot(d) 58 | } 59 | 60 | main() 61 | 62 | -------------------------------------------------------------------------------- /commons.py: -------------------------------------------------------------------------------- 1 | # All the constants here are meant to be used by 2 | # from commons import * 3 | 4 | PAGE_PROGRAMMED, PAGE_ERASED = 'PAGE_PROGRAMMED', 'PAGE_ERASED' 5 | 6 | BYTE, KB, MB, GB, TB = [2**(10*i) for i in range(5)] 7 | 8 | MILLION = 10**6 9 | 10 | # unit is based on nanoseconds 11 | SEC, MILISEC, MICROSEC, NANOSEC = [ 1000**3, 1000**2, 1000, 1 ] 12 | # SEC, MILISEC, MICROSEC, NANOSEC = [ 1.0, 0.001, 0.000001, 0.000000001 ] 13 | 14 | OP_READ, OP_WRITE, OP_ERASE = 'OP_READ', 'OP_WRITE', 'OP_ERASE' 15 | OP_DISCARD = 'OP_DISCARD' 16 | OP_REC_TIMESTAMP = 'OP_REC_TIMESTAMP' 17 | OP_BARRIER = 'OP_BARRIER' 18 | OP_CLEAN = 'OP_CLEAN' 19 | OP_ENABLE_RECORDER = 'OP_ENABLE_RECORDER' 20 | OP_DISABLE_RECORDER = 'OP_DISABLE_RECORDER' 21 | OP_SHUT_SSD = 'OP_SHUT_SSD' 22 | OP_END_SSD_PROCESS = 'OP_END_SSD_PROCESS' 23 | OP_WORKLOADSTART = 'OP_WORKLOADSTART' 24 | OP_DROPCACHE = 'OP_DROPCACHE' 25 | OP_FALLOCATE = 'OP_FALLOCATE' 26 | OP_ARG_KEEPSIZE = 'OP_ARG_KEEPSIZE' 27 | OP_ARG_NOTKEEPSIZE = 'OP_ARG_NOTKEEPSIZE' 28 | OP_FSYNC = 'OP_FSYNC' 29 | OP_FDATASYNC = 'OP_FDATASYNC' 30 | OP_FLUSH_TRANS_CACHE = 'OP_FLUSH_TRANS_CACHE' 31 | OP_DROP_TRANS_CACHE = 'OP_DROP_TRANS_CACHE' 32 | OP_PURGE_TRANS_CACHE = 'OP_PURGE_TRANS_CACHE' 33 | OP_NOOP = 'OP_NOOP' 34 | OP_CALC_GC_DURATION = 'OP_CALC_GC_DURATION' 35 | OP_REC_FLASH_OP_CNT = 'OP_REC_FLASH_OP_CNT' 36 | OP_REC_FOREGROUND_OP_CNT = 'OP_REC_FOREGROUND_OP_CNT' 37 | OP_REC_CACHE_HITMISS = 'OP_REC_CACHE_HITMISS' 38 | OP_NON_MERGE_CLEAN = 'OP_NON_MERGE_CLEAN' 39 | OP_CALC_NON_MERGE_GC_DURATION = 'OP_CALC_NON_MERGE_GC_DURATION' 40 | OP_REC_BW = 'OP_REC_BW' 41 | 42 | TAG_BACKGROUND = "BACKGROUND" 43 | TAG_FOREGROUND = "FOREGROUND" 44 | 45 | TESTALL = False 46 | 47 | F2FS_IPU_DISABLE = 0 48 | F2FS_IPU_FORCE = 1 << 0 49 | F2FS_IPU_SSR = 1 << 1 50 | F2FS_IPU_UTIL = 1 << 2 51 | F2FS_IPU_SSR_UTIL = 1 << 3 52 | F2FS_IPU_FSYNC = 1 << 4 53 | 54 | -------------------------------------------------------------------------------- /foreign/forcef2fsgc.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | #define F2FS_IOCTL_MAGIC 0xf5 12 | #define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) 13 | #define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) 14 | #define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3) 15 | #define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) 16 | #define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) 17 | #define F2FS_IOC_GARBAGE_COLLECT _IO(F2FS_IOCTL_MAGIC, 6) 18 | #define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7) 19 | #define F2FS_IOC_DEFRAGMENT _IO(F2FS_IOCTL_MAGIC, 8) 20 | 21 | int main(int argc, char** argv) 22 | { 23 | char *devpath; 24 | int fd; 25 | int ret; 26 | int status; 27 | int arg; 28 | int n, i; 29 | int bypass_cnt_check; 30 | 31 | if (argc != 4) { 32 | printf("Usage: %s mount-point sync n\n", argv[0]); 33 | printf("sync is the third arg passed to ioctl. " 34 | "Generally, sync=1 implies forground gc. " 35 | "sync=0 implies background gc."); 36 | exit(1); 37 | } 38 | 39 | devpath = argv[1]; 40 | arg = atoi(argv[2]); 41 | n = atoi(argv[3]); 42 | printf("arg:%d\n", arg); 43 | 44 | if (n == -1) { 45 | bypass_cnt_check = 1; 46 | } else { 47 | bypass_cnt_check = 0; 48 | } 49 | 50 | fd = open(devpath, 0); 51 | if (fd == -1) { 52 | perror("open file error"); 53 | exit(1); 54 | } 55 | 56 | for (i = 0; bypass_cnt_check || i < n; i++) { 57 | ret = ioctl(fd, F2FS_IOC_GARBAGE_COLLECT, &arg); 58 | if (ret == -1) { 59 | perror("ioctl error"); 60 | break; 61 | } 62 | } 63 | 64 | printf("ioctl ret: %d. finished: %d\n", ret, i); 65 | 66 | close(fd); 67 | 68 | if (ret == -1) { 69 | return(1); 70 | } else { 71 | return(0); 72 | } 73 | } 74 | 75 | -------------------------------------------------------------------------------- /pyreuse/apputils/parseleveldboutput.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | 4 | from pyreuse.helpers import table_to_str 5 | 6 | """ 7 | output: 8 | 9 | benchname op_duration bw keysize valuesize entries rawsize filesize 10 | """ 11 | 12 | def parse_metadata(lines): 13 | meta = {} 14 | for line in lines: 15 | if line.startswith('CPUCache'): 16 | meta['CPUCache'] = line.split()[1] 17 | elif line.startswith('Keys'): 18 | meta['Keys'] = line.split()[1] 19 | elif line.startswith('Values'): 20 | meta['Values'] = line.split()[1] 21 | meta['ValuesCompressed'] = line.split()[4].strip('(') 22 | elif line.startswith('Entries'): 23 | meta['Entries'] = line.split()[1] 24 | elif line.startswith('RawSize'): 25 | meta['RawSize'] = line.split()[1] 26 | elif line.startswith('FileSize'): 27 | meta['FileSize'] = line.split()[1] 28 | 29 | return meta 30 | 31 | 32 | def parse_benchresult_line(line): 33 | if not 'micros/op' in line: 34 | return None 35 | 36 | d = {} 37 | if 'MB/s' in line: 38 | mo = re.search(r'(\w+)\s*:\s*(\S+) micros/op;\s*(\S+) MB/s', line) 39 | d['bw'] = mo.group(3) 40 | else: 41 | mo = re.search(r'(\w+)\s*:\s*(\S+) micros/op;', line) 42 | d['bw'] = 'NA' 43 | 44 | d['benchname'] = mo.group(1) 45 | d['op_duration'] = mo.group(2) 46 | 47 | return d 48 | 49 | 50 | def parse_benchresults(lines): 51 | table = [] 52 | for line in lines: 53 | d = parse_benchresult_line(line) 54 | if d is not None: 55 | table.append(d) 56 | return table 57 | 58 | 59 | def parse_file_text(text): 60 | parts = text.split('------------------------------------------------') 61 | meta = parse_metadata(parts[0].split('\n')) 62 | table = parse_benchresults(parts[1].split('\n')) 63 | tablestr = table_to_str(table, adddic=meta, width=12) 64 | return tablestr 65 | 66 | def parse_file(filepath): 67 | with open(filepath, 'r') as f: 68 | text = f.read() 69 | tablestr = parse_file_text(text) 70 | return tablestr 71 | 72 | 73 | -------------------------------------------------------------------------------- /workflow.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | from config import WLRUNNER, LBAGENERATOR, LBAMULTIPROC 5 | from commons import * 6 | from utilities.utils import * 7 | import wiscsim 8 | from wiscsim.simulator import create_simulator 9 | import workrunner 10 | 11 | 12 | def run_workflow(conf): 13 | wf = Workflow(conf) 14 | wf.run() 15 | 16 | 17 | class Workflow(object): 18 | def __init__(self, conf): 19 | self.conf = conf 20 | 21 | def run(self): 22 | self._save_conf() 23 | event_iter = self._run_workload() 24 | self._run_simulator(event_iter) 25 | 26 | def run_simulator(self, event_iter): 27 | self._save_conf() 28 | return self._run_simulator(event_iter) 29 | 30 | def run_workload(self): 31 | self._save_conf() 32 | return self._run_workload() 33 | 34 | def _save_conf(self): 35 | confpath = os.path.join(self.conf['result_dir'], 'config.json') 36 | prepare_dir_for_path(confpath) 37 | self.conf.dump_to_file(confpath) 38 | 39 | def _run_workload(self): 40 | workload_src = self.conf['workload_src'] 41 | if workload_src == WLRUNNER: 42 | runner = workrunner.wlrunner.WorkloadRunner(self.conf) 43 | event_iter = runner.run() 44 | elif workload_src == LBAGENERATOR: 45 | classname = self.conf['lba_workload_class'] 46 | cls = eval("workrunner.lbaworkloadgenerator.{}".format(classname)) 47 | lbagen = cls(self.conf) 48 | event_iter = lbagen 49 | elif workload_src == LBAMULTIPROC: 50 | classname = self.conf['lba_workload_class'] 51 | cls = "workrunner.lbaworkloadgenerator.{}".format(classname) 52 | lbagen = cls(self.conf) 53 | event_iter = lbagen.get_iter_list() 54 | else: 55 | raise RuntimeError("{} is not a valid workload source"\ 56 | .format(workload_src)) 57 | 58 | return event_iter 59 | 60 | def _run_simulator(self, event_iter): 61 | if self.conf['enable_simulation'] is not True: 62 | return 63 | 64 | simulator = create_simulator(self.conf['simulator_class'], self.conf, 65 | event_iter ) 66 | simulator.run() 67 | 68 | 69 | -------------------------------------------------------------------------------- /wiscsim/ftlbuilder.py: -------------------------------------------------------------------------------- 1 | import config 2 | import flash 3 | import recorder 4 | 5 | class FtlBuilder(object): 6 | def __init__(self, confobj, recorderobj, flashobj): 7 | if not isinstance(confobj, config.Config): 8 | raise TypeError('confobj is not of type config.Config, it is {}'. 9 | format(type(confobj).__name__)) 10 | if not isinstance(recorderobj, recorder.Recorder): 11 | raise TypeError('recorder is not of type recorder.Recorder, "\ 12 | "it is{}'.format(type(recorderobj).__name__)) 13 | if not isinstance(flashobj, flash.Flash): 14 | raise TypeError('flash is not of type flash.Flash'. 15 | format(type(flashobj).__name__)) 16 | 17 | self.conf = confobj 18 | self.recorder = recorderobj 19 | self.flash = flashobj 20 | 21 | if self.conf['workload_src'] == config.LBAGENERATOR: 22 | self.recorder.enable() 23 | elif self.conf['workload_src'] == config.WLRUNNER: 24 | self.recorder.disable() 25 | else: 26 | raise RuntimeError("workload_src:{} is not supported".format( 27 | self.conf['workload_src'])) 28 | 29 | def lba_read(self, page_num): 30 | raise NotImplementedError 31 | 32 | def lba_write(self, page_num): 33 | raise NotImplementedError 34 | 35 | def lba_discard(self, page_num): 36 | raise NotImplementedError 37 | 38 | def sec_read(self, sector, count): 39 | raise NotImplementedError 40 | 41 | def sec_write(self, sector, count, data): 42 | raise NotImplementedError 43 | 44 | def sec_discard(self, sector, count): 45 | raise NotImplementedError 46 | 47 | def debug_info(self): 48 | raise NotImplementedError 49 | 50 | def enable_recording(self): 51 | self.recorder.enable() 52 | 53 | def disable_recording(self): 54 | self.recorder.disable() 55 | 56 | def pre_workload(self): 57 | """ 58 | This will be called right before workload to be tested. 59 | It is after mounting and aging. 60 | """ 61 | raise NotImplementedError 62 | 63 | def post_processing(self): 64 | raise NotImplementedError 65 | 66 | def get_type(self): 67 | return "FtlBuilder" 68 | 69 | 70 | -------------------------------------------------------------------------------- /pyreuse/sysutils/ftrace.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import multiprocessing 3 | import os 4 | import time 5 | 6 | from pyreuse.helpers import shcmd, cd 7 | 8 | class Ftrace(object): 9 | def __init__(self): 10 | self.rootdir = "/sys/kernel/debug/tracing" 11 | 12 | def write_file(self, filename, msg): 13 | with cd(self.rootdir): 14 | with open(filename, 'w') as f: 15 | print 'writing "{}" to {}'.format(msg, filename) 16 | f.write(msg) 17 | f.flush() 18 | 19 | def append_file(self, filename, msg): 20 | with cd(self.rootdir): 21 | with open(filename, 'a') as f: 22 | print 'appending "{}" to {}'.format(msg, filename) 23 | f.write(msg) 24 | f.flush() 25 | 26 | def read_file(self, filename): 27 | with cd(self.rootdir): 28 | with open(filename, 'r') as f: 29 | text = f.read() 30 | return text 31 | 32 | def get_trace(self): 33 | text = self.read_file('trace') 34 | return text 35 | 36 | def set_tracer(self, tracer): 37 | self.write_file('current_tracer', tracer) 38 | 39 | def start_tracing(self): 40 | self.write_file('tracing_on', '1') 41 | 42 | def stop_tracing(self): 43 | self.write_file('tracing_on', '0') 44 | 45 | def clean_trace(self): 46 | self.write_file('trace', '') 47 | 48 | def write_marker(self, msg): 49 | self.write_file('trace_marker', msg) 50 | 51 | def set_filter(self, filter_str): 52 | self.write_file('set_ftrace_filter', filter_str) 53 | 54 | def add_filter(self, filter_str): 55 | self.append_file('set_ftrace_filter', filter_str) 56 | 57 | def copy_trace(self, target_path): 58 | with cd(self.rootdir): 59 | shcmd("cp trace {}".format(target_path)) 60 | 61 | 62 | def trace_cmd(cmd, tracer, ffilter): 63 | """ 64 | tracer: function or function_graph 65 | """ 66 | ftr = Ftrace() 67 | ftr.clean_trace() 68 | ftr.set_tracer(tracer) 69 | ftr.start_tracing() 70 | 71 | ftr.set_filter(ffilter) 72 | 73 | shcmd(cmd) 74 | 75 | ftr.stop_tracing() 76 | text = ftr.get_trace() 77 | 78 | return text 79 | 80 | 81 | 82 | if __name__ == '__main__': 83 | # An example 84 | ftr = Ftrace() 85 | ftr.clean_trace() 86 | -------------------------------------------------------------------------------- /pyreuse/fsutils/ext4dumpextents.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | import re 4 | import itertools 5 | 6 | """ 7 | Example: 8 | dump_extents_of_a_file('/dev/loop0', 'datafile') 9 | """ 10 | 11 | def dump_extents_of_a_file(devname, filepath): 12 | """ 13 | this is only for ext4 14 | """ 15 | cmd = ['debugfs', devname, '-R', 'dump_extents "' + filepath + '"'] 16 | proc = subprocess.Popen(cmd, stdout = subprocess.PIPE) 17 | proc.wait() 18 | 19 | lines = proc.stdout.readlines() 20 | return ''.join(lines) 21 | 22 | 23 | def parse_dump_extents_output(output_text): 24 | ret_list = [] 25 | header = ["Level_index", "Max_level", 26 | "Entry_index", "N_Entry", 27 | "Logical_start", "Logical_end", 28 | "Physical_start", "Physical_end", 29 | "Length", "Flags"] 30 | 31 | lines = output_text.split('\n') 32 | for line in lines: 33 | if "Level" in line or "debugfs" in line or len(line.strip()) == 0: 34 | continue 35 | 36 | line = re.sub(r'[/\-]', " ", line) 37 | tokens = line.split() 38 | if len(tokens) == 8: 39 | # there is no physical end 40 | tokens.insert(7, tokens[6]) #TODO: this is dangerous 41 | 42 | d = {} 43 | for i in range(9): 44 | d[ header[i] ] = int(tokens[i]) 45 | 46 | if len(tokens) == 10: 47 | d["Flags"] = tokens[9] 48 | else: 49 | d["Flags"] = "NA" 50 | 51 | ret_list.append(d) 52 | 53 | return ret_list 54 | 55 | 56 | def _add_file_path(extents, file_path): 57 | for extent in extents: 58 | extent['file_path'] = file_path 59 | 60 | return extents 61 | 62 | 63 | def get_extents_of_dir(dirpath, dev_path): 64 | """ 65 | Example: 66 | extents = get_extents_of_dir(dirpath = '/mnt/fsonloop', dev_path = '/dev/sdc1') 67 | """ 68 | all_extents = [] 69 | for root, dirs, files in os.walk(dirpath, topdown=False): 70 | for name in itertools.chain(files, dirs): 71 | rel_dir_path = os.path.relpath(root, dirpath) 72 | rel_path = os.path.join(rel_dir_path, name) 73 | 74 | out_txt = dump_extents_of_a_file(dev_path, rel_path) 75 | file_extents = parse_dump_extents_output(out_txt) 76 | _add_file_path(file_extents, rel_path) 77 | all_extents.extend(file_extents) 78 | 79 | return all_extents 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /wiscsim/flash.py: -------------------------------------------------------------------------------- 1 | import simpy 2 | 3 | 4 | class SimpleFlash(object): 5 | def __init__(self, recorder, confobj = None): 6 | self.recorder = recorder 7 | self.conf = confobj 8 | 9 | self.data = {} # ppn -> contents stored in a flash page 10 | 11 | def page_read(self, pagenum, cat): 12 | self.recorder.put('physical_read', pagenum, cat) 13 | 14 | content = self.data.get(pagenum, None) 15 | return content 16 | 17 | def page_write(self, pagenum, cat, data = None): 18 | self.recorder.put('physical_write', pagenum, cat) 19 | 20 | if data != None: 21 | self.data[pagenum] = data 22 | 23 | def block_erase(self, blocknum, cat): 24 | # print 'block_erase', blocknum, cat 25 | self.recorder.put('phy_block_erase', blocknum, cat) 26 | 27 | ppn_start, ppn_end = self.conf.block_to_page_range(blocknum) 28 | for ppn in range(ppn_start, ppn_end): 29 | try: 30 | del self.data[ppn] 31 | except KeyError: 32 | # ignore key error 33 | pass 34 | 35 | class Flash(object): 36 | def __init__(self, recorder, confobj = None, globalhelper = None): 37 | self.recorder = recorder 38 | 39 | # If you enable store data, you must provide confobj 40 | self.store_data = True # whether store data to self.data[] 41 | self.data = {} # ppn -> contents stored in a flash page 42 | self.conf = confobj 43 | 44 | def page_read(self, pagenum, cat): 45 | self.recorder.count_me(cat, 'physical_read') 46 | 47 | if self.store_data == True: 48 | content = self.data.get(pagenum, None) 49 | return content 50 | 51 | def page_write(self, pagenum, cat, data = None): 52 | self.recorder.count_me(cat, 'physical_write') 53 | 54 | # we only put data to self.data when the caller specify data 55 | if self.store_data == True: 56 | if data != None: 57 | self.data[pagenum] = data 58 | 59 | def block_erase(self, blocknum, cat): 60 | # print 'block_erase', blocknum, cat 61 | self.recorder.count_me(cat, 'phy_block_erase') 62 | 63 | if self.store_data == True: 64 | ppn_start, ppn_end = self.conf.block_to_page_range(blocknum) 65 | for ppn in range(ppn_start, ppn_end): 66 | try: 67 | del self.data[ppn] 68 | except KeyError: 69 | # ignore key error 70 | pass 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /tests/test_bitmap.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import wiscsim 4 | from utilities import utils 5 | from wiscsim.bitmap import FlashBitmap2 6 | 7 | def create_config(): 8 | conf = wiscsim.dftldes.Config() 9 | conf['SSDFramework']['ncq_depth'] = 1 10 | 11 | conf['flash_config']['n_pages_per_block'] = 64 12 | conf['flash_config']['n_blocks_per_plane'] = 2 13 | conf['flash_config']['n_planes_per_chip'] = 1 14 | conf['flash_config']['n_chips_per_package'] = 1 15 | conf['flash_config']['n_packages_per_channel'] = 1 16 | conf['flash_config']['n_channels_per_dev'] = 4 17 | 18 | utils.set_exp_metadata(conf, save_data = False, 19 | expname = 'test_expname', 20 | subexpname = 'test_subexpname') 21 | 22 | logicsize_mb = 64 23 | conf.n_cache_entries = conf.n_mapping_entries_per_page 24 | conf.set_flash_num_blocks_by_bytes(int(logicsize_mb * 2**20 * 1.28)) 25 | 26 | utils.runtime_update(conf) 27 | 28 | return conf 29 | 30 | def create_bitmap(conf): 31 | bitmap = FlashBitmap2(conf) 32 | return bitmap 33 | 34 | 35 | class TestBitmap(unittest.TestCase): 36 | def test_create(self): 37 | conf = create_config() 38 | bitmap = create_bitmap(conf) 39 | 40 | def test_init_states(self): 41 | conf = create_config() 42 | bitmap = create_bitmap(conf) 43 | 44 | for ppn in range(conf.total_num_pages()): 45 | self.assertEqual(bitmap.is_page_valid(ppn), False) 46 | self.assertEqual(bitmap.is_page_invalid(ppn), False) 47 | self.assertEqual(bitmap.is_page_erased(ppn), True) 48 | 49 | def test_validating(self): 50 | conf = create_config() 51 | bitmap = create_bitmap(conf) 52 | 53 | self.assertEqual(bitmap.block_valid_ratio(0), 0) 54 | self.assertEqual(bitmap.block_invalid_ratio(0), 1) 55 | bitmap.validate_block(0) 56 | self.assertEqual(bitmap.block_valid_ratio(0), 1) 57 | self.assertEqual(bitmap.block_invalid_ratio(0), 0) 58 | 59 | def test_invalidating(self): 60 | conf = create_config() 61 | bitmap = create_bitmap(conf) 62 | 63 | bitmap.validate_block(0) 64 | bitmap.invalidate_page(0) 65 | self.assertEqual(bitmap.is_page_invalid(0), True) 66 | self.assertEqual(bitmap.page_state(0), bitmap.INVALID) 67 | self.assertEqual(bitmap.block_valid_ratio(0), 68 | 1 - 1.0/conf.n_pages_per_block) 69 | 70 | 71 | def main(): 72 | unittest.main() 73 | 74 | if __name__ == '__main__': 75 | main() 76 | 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /tests/recorder_test.py: -------------------------------------------------------------------------------- 1 | import config 2 | import unittest 3 | 4 | import wiscsim 5 | from utilities.utils import * 6 | 7 | class TestFTLwithDFTL(unittest.TestCase): 8 | def setup_config(self): 9 | self.conf = config.ConfigNCQFTL() 10 | 11 | def setup_environment(self): 12 | metadata_dic = choose_exp_metadata(self.conf, interactive = False) 13 | self.conf.update(metadata_dic) 14 | 15 | self.conf['enable_blktrace'] = True 16 | self.conf['enable_simulation'] = True 17 | 18 | def setup_workload(self): 19 | pass 20 | 21 | def setup_ftl(self): 22 | pass 23 | 24 | def my_run(self): 25 | runtime_update(self.conf) 26 | 27 | recorder = wiscsim.recorder.Recorder( 28 | output_target = wiscsim.recorder.FILE_TARGET, 29 | output_directory = "/tmp" 30 | ) 31 | recorder.enable() 32 | 33 | recorder.add_to_general_accumulater("counter_set_1", "counter1", 3) 34 | recorder.add_to_general_accumulater("counter_set_1", "counter1", 4) 35 | self.assertEqual( 36 | recorder.general_accumulator["counter_set_1"]["counter1"], 7) 37 | 38 | def test_main(self): 39 | self.setup_config() 40 | self.setup_environment() 41 | self.setup_workload() 42 | self.setup_ftl() 43 | self.my_run() 44 | 45 | class TestCountMe(unittest.TestCase): 46 | def setup_config(self): 47 | self.conf = config.ConfigNCQFTL() 48 | 49 | def setup_environment(self): 50 | metadata_dic = choose_exp_metadata(self.conf, interactive = False) 51 | self.conf.update(metadata_dic) 52 | 53 | self.conf['enable_blktrace'] = True 54 | self.conf['enable_simulation'] = True 55 | 56 | def setup_workload(self): 57 | pass 58 | 59 | def setup_ftl(self): 60 | pass 61 | 62 | def my_run(self): 63 | runtime_update(self.conf) 64 | 65 | recorder = wiscsim.recorder.Recorder( 66 | output_target = wiscsim.recorder.FILE_TARGET, 67 | output_directory = '/tmp' 68 | ) 69 | recorder.enable() 70 | 71 | recorder.count_me("counter_name_1", "item1") 72 | recorder.count_me("counter_name_1", "item1") 73 | self.assertEqual(recorder.get_count_me("counter_name_1", "item1"), 2) 74 | 75 | def test_main(self): 76 | self.setup_config() 77 | self.setup_environment() 78 | self.setup_workload() 79 | self.setup_ftl() 80 | self.my_run() 81 | 82 | 83 | 84 | def main(): 85 | unittest.main() 86 | 87 | if __name__ == '__main__': 88 | main() 89 | 90 | 91 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /pyreuse/sysutils/filefragparser.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pprint 3 | 4 | from pyreuse.helpers import run_and_get_output 5 | 6 | def parse_file_frag_text(text): 7 | "text generated by filefrag -v" 8 | lines = text.split('\n') 9 | 10 | table = [] 11 | for line in lines[3:-2]: 12 | d = _parse_line(line) 13 | table.append(d) 14 | 15 | return table 16 | 17 | def _parse_line(line): 18 | items = line.split() 19 | items = [_clean_item(item) for item in items] 20 | 21 | keys = ['ext', 'logical_start', 'logical_end', 22 | 'physical_start', 'physical_end', 'length', 23 | 'expected', 'flags'] 24 | 25 | if len(items) == 6: 26 | items.extend(['NA', 'NA']) 27 | elif len(items) == 7: 28 | items.extend(['NA']) 29 | 30 | d = dict(zip(keys, items)) 31 | 32 | return d 33 | 34 | def _clean_item(item): 35 | item = item.replace('.', '') 36 | item = item.replace(':', '') 37 | item = item.strip() 38 | 39 | # convert if we can 40 | try: 41 | item = int(item) 42 | except ValueError: 43 | pass 44 | 45 | return item 46 | 47 | def parse_file_frag_file(path): 48 | with open(path, 'r') as f: 49 | text = f.read() 50 | 51 | return parse_file_frag_text(text) 52 | 53 | def filefrag(filepath): 54 | lines = run_and_get_output('filefrag -v {}'.format(filepath)) 55 | text = ''.join(lines) 56 | 57 | return parse_file_frag_text(text) 58 | 59 | # TODO: this function should be moved somewhere else 60 | def get_file_range_table(dirpath, BLOCKSIZE=4096): 61 | """ 62 | Get a table: 63 | [ 64 | {'path': '/mnt/fsonloop/appmix/0-Sqlite/data.db', 65 | 'start_byte': 88237, 66 | 'size': 4096}, 67 | {'path': '/mnt/fsonloop/appmix/0-Sqlite/data.db', 68 | 'start_byte': 882379, 69 | 'size': 51200}, 70 | ... 71 | ] 72 | """ 73 | ret_table = [] 74 | for root, dirs, files in os.walk(dirpath, topdown=False): 75 | for name in files: 76 | filepath = os.path.join(root, name) 77 | table = filefrag(filepath) 78 | file_range_table = file_range(table, filepath) 79 | ret_table.extend(file_range_table) 80 | 81 | return ret_table 82 | 83 | def file_range(table, path, BLOCKSIZE=4096): 84 | """ 85 | table is the thing you get from filefrag() 86 | """ 87 | range_table = [] 88 | 89 | for row in table: 90 | start_byte = row['physical_start'] * BLOCKSIZE 91 | size = row['length'] * BLOCKSIZE 92 | range_row = {'start_byte': start_byte, 93 | 'size': size, 94 | 'path': path} 95 | range_table.append(range_row) 96 | 97 | return range_table 98 | 99 | 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /pyreuse/sysutils/blockclassifiers.py: -------------------------------------------------------------------------------- 1 | class BlockClassifierBase(object): 2 | def classify(self, offset): 3 | """ 4 | Given an offset set, tell the semantics of data stored in it. 5 | """ 6 | raise NotImplementedError() 7 | 8 | class Ext4BlockClassifier(BlockClassifierBase): 9 | def __init__(self, range_table, blocksize=4096): 10 | """ 11 | range_table is 12 | [ 13 | {'inode': (startblock, endblock)}, e.g. (34, 34) 14 | {'journal': (startblock, endblock)}, 15 | ... 16 | ] 17 | """ 18 | self._range_table = range_table 19 | self._blocksize = blocksize 20 | 21 | def classify(self, offset): 22 | blocknum = offset / self._blocksize 23 | 24 | for row in self._range_table: 25 | for category, (start, end) in row.items(): 26 | if blocknum >= start and blocknum <= end: 27 | return category 28 | 29 | return 'UNKNOWN' 30 | 31 | 32 | class OffsetClassifier(BlockClassifierBase): 33 | def __init__(self, range_table): 34 | """ 35 | range_table is 36 | [ 37 | {'inode': (start offset, end offset)}, e.g. (0, 4096) 38 | {'journal': (start offset, end offst)}, 39 | ... 40 | ] 41 | """ 42 | self._range_table = range_table 43 | 44 | def classify(self, offset): 45 | for row in self._range_table: 46 | for category, (start, end) in row.items(): 47 | if offset >= start and offset < end: 48 | return category 49 | 50 | return 'UNKNOWN' 51 | 52 | 53 | class Ext4FileClassifier(BlockClassifierBase): 54 | def __init__(self, extents, blocksize=4096): 55 | """ 56 | extents is a list of extents from get_extents_of_dir() 57 | """ 58 | self._extents = extents 59 | self._blocksize = blocksize 60 | 61 | self._add_offsets(self._extents) 62 | 63 | def _add_offsets(self, extents): 64 | blocksize = self._blocksize 65 | for extent in extents: 66 | extent['physical_range'] = (extent['Physical_start'] * blocksize, 67 | (extent['Physical_end'] + 1) * blocksize) 68 | extent['logical_range'] = (extent['Logical_start'] * blocksize, 69 | (extent['Logical_end'] + 1) * blocksize) 70 | 71 | return extents 72 | 73 | def classify(self, offset): 74 | return self._find_file_of_offset(offset, self._extents) 75 | 76 | def _find_file_of_offset(self, offset, extents): 77 | for extent in extents: 78 | if self._is_physical_in_extent(offset, extent) is True: 79 | return extent['file_path'] 80 | 81 | return None 82 | 83 | def _is_physical_in_extent(self, offset, extent): 84 | return offset >= extent['physical_range'][0] and \ 85 | offset < extent['physical_range'][1] 86 | 87 | 88 | 89 | -------------------------------------------------------------------------------- /workrunner/cpuhandler.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | 4 | def get_possible_cpus(): 5 | f = open("/sys/devices/system/cpu/possible", 'r') 6 | line = f.readline() 7 | f.close() 8 | 9 | # assuming format of 0-2,4,6-63 10 | items = line.split(',') 11 | cpus = [] 12 | for item in items: 13 | if '-' in item: 14 | a,b = item.split('-') 15 | a = int(a) 16 | b = int(b) 17 | cpus.extend(range(a, b+1)) 18 | else: 19 | cpus.append(int(item)) 20 | 21 | return cpus 22 | 23 | def get_available_cpu_dirs(): 24 | "Counting dirs is more accurate than */cpu/possible, at least on emulab" 25 | cpudirs = [name for name in glob.glob("/sys/devices/system/cpu/cpu[0-9]*") \ 26 | if os.path.isdir(name)] 27 | return cpudirs 28 | 29 | def get_online_cpuids(): 30 | with open('/sys/devices/system/cpu/online', 'r') as f: 31 | line = f.readline().strip() 32 | 33 | # assuming format of 0-2,4,6-63 34 | items = line.split(',') 35 | cpus = [] 36 | for item in items: 37 | if '-' in item: 38 | a,b = item.split('-') 39 | a = int(a) 40 | b = int(b) 41 | cpus.extend(range(a, b+1)) 42 | else: 43 | cpus.append(int(item)) 44 | 45 | return cpus 46 | 47 | def switch_cpu(cpuid, mode): 48 | path = "/sys/devices/system/cpu/cpu{cpuid}/online" 49 | path = path.format(cpuid=cpuid) 50 | 51 | modedict = {'ON':'1', 'OFF':'0'} 52 | 53 | f = open(path, 'w') 54 | f.write(modedict[mode]) 55 | f.flush() 56 | f.close() 57 | 58 | return 59 | 60 | def enable_all_cpus(): 61 | possible_cpus = get_possible_cpus() 62 | enable_n_cpus(len(possible_cpus)) 63 | 64 | def set_cpus(n): 65 | if n == 'NOOP' or n == None: 66 | return 67 | 68 | if n == 'all': 69 | enable_all_cpus() 70 | return 71 | 72 | enable_n_cpus(n) 73 | 74 | def enable_n_cpus(n): 75 | """ 76 | Enable n CPUs 77 | """ 78 | online_cpus = get_online_cpuids() 79 | 80 | n_online = len(online_cpus) 81 | if n_online == n: 82 | return 83 | elif n_online > n: 84 | # more than wanted is online, disable some 85 | for cpuid in online_cpus[n:]: 86 | switch_cpu(cpuid, 'OFF') 87 | else: 88 | # we need some more CPU to be online 89 | need = n - n_online 90 | possible_cpus = get_possible_cpus() 91 | for cpuid in possible_cpus: 92 | if not cpuid in online_cpus: 93 | switch_cpu(cpuid, 'ON') 94 | need -= 1 95 | 96 | if need == 0: 97 | break 98 | if need > 0: 99 | raise RuntimeError("Need {} CPUS, but only got {}".format( 100 | n, n - need)) 101 | 102 | online_cpus = get_online_cpuids() 103 | assert len(online_cpus) == n 104 | 105 | 106 | -------------------------------------------------------------------------------- /wiscsim/gc_analysis.py: -------------------------------------------------------------------------------- 1 | import os 2 | import csv 3 | 4 | from commons import * 5 | from ftlsim_commons import * 6 | from .host import Host 7 | from utilities import utils 8 | 9 | from pyreuse.sysutils import blocktrace, blockclassifiers, dumpe2fsparser 10 | from pyreuse.fsutils import ext4dumpextents 11 | 12 | 13 | 14 | class GcLog(object): 15 | def __init__(self, device_path, result_dir, flash_page_size): 16 | self.device_path = device_path 17 | self.result_dir = result_dir 18 | self.flash_page_size = flash_page_size 19 | 20 | self.gclog_path = os.path.join(self.result_dir, 'gc.log') 21 | self.dumpe2fs_out_path = os.path.join(self.result_dir, 'dumpe2fs.out') 22 | self.extents_path = os.path.join(self.result_dir, 'extents.json') 23 | self.fs_block_size = 4096 24 | 25 | def classify_lpn_in_gclog(self): 26 | extents = self._get_extents() 27 | filepath_classifier = blockclassifiers.Ext4FileClassifier(extents, 28 | self.fs_block_size) 29 | 30 | range_table = self._get_range_table() 31 | classifier = blockclassifiers.Ext4BlockClassifier(range_table, 32 | self.fs_block_size) 33 | 34 | new_table = [] 35 | with open(self.gclog_path , 'rb') as f: 36 | reader = csv.DictReader(f, skipinitialspace=True) 37 | for row in reader: 38 | newrow = dict(zip(row.keys()[0].split(), row.values()[0].split())) 39 | if newrow['lpn'] != 'NA': 40 | offset = int(newrow['lpn']) * self.flash_page_size 41 | sem = classifier.classify(offset) 42 | if sem == 'UNKNOWN': 43 | sem = filepath_classifier.classify(offset) 44 | else: 45 | sem = 'NA' 46 | newrow['semantics'] = sem 47 | new_table.append(newrow) 48 | 49 | with open(self.gclog_path+'.parsed', 'w') as f: 50 | f.write(utils.table_to_str(new_table)) 51 | 52 | def _get_extents(self): 53 | d = utils.load_json(self.extents_path) 54 | extents = d['extents'] 55 | 56 | return extents 57 | 58 | def _get_range_table(self): 59 | with open(self.dumpe2fs_out_path, 'r') as f: 60 | text = f.read() 61 | 62 | header_text, bg_text = text.split("\n\n\n") 63 | 64 | range_table = dumpe2fsparser.parse_bg_text(bg_text) 65 | 66 | j_start, j_end = self._get_journal_block_ext(header_text) 67 | if j_start != -1: 68 | range_table.append( {'journal': (j_start, j_end)} ) 69 | 70 | return range_table 71 | 72 | def _get_journal_block_ext(self, header_text): 73 | header_dict = dumpe2fsparser.parse_header_text(header_text) 74 | 75 | if header_dict.has_key('journal-inode') is not True: 76 | return -1, -1 77 | 78 | journal_inum = header_dict['journal-inode'] 79 | journal_len = header_dict['journal-length'] 80 | 81 | ext_text = ext4dumpextents.dump_extents_of_a_file(self.device_path, 82 | '<{}>'.format(journal_inum)) 83 | table = ext4dumpextents.parse_dump_extents_output(ext_text) 84 | return table[0]['Physical_start'], table[0]['Physical_end'] 85 | 86 | 87 | -------------------------------------------------------------------------------- /workrunner/multiwriters.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import pprint 3 | 4 | KB = 2**10 5 | MB = 2**20 6 | GB = 2**30 7 | 8 | translator = { 'file_size': '-f', 9 | 'write_size': '-w', 10 | 'n_writes': '-n', 11 | 'pattern': '-p', 12 | 'fsync': '-y', 13 | 'sync': '-s', 14 | 'file_path': '-l', 15 | 'tag': '-t', 16 | 'markerfile': '-m' 17 | } 18 | 19 | 20 | def parse_player_runtime_out(lines): 21 | d = {} 22 | for line in lines: 23 | items = line.split() 24 | if len(items) == 2: 25 | d[items[0]] = items[1] 26 | 27 | return d 28 | 29 | 30 | class MultiWriters(object): 31 | def __init__(self, player_path, parameters): 32 | """ 33 | parameters is a list of dictionaries 34 | [ 35 | { 'file_size': 36 | 'write_size': 37 | 'n_writes': 38 | 'pattern': 39 | 'fsync': 40 | 'sync': 41 | 'file_path': 42 | 'tag': 43 | 'markerfile': 44 | }, 45 | ... 46 | ] 47 | """ 48 | args = [] 49 | for para in parameters: 50 | arg = [player_path, ] 51 | for k, v in para.items(): 52 | arg.append(translator[k]) 53 | arg.append(str(v)) 54 | args.append(arg) 55 | 56 | # each row is a args for a player instance 57 | self.args_table = args 58 | 59 | def run(self): 60 | procs = [] 61 | 62 | for args in self.args_table: 63 | print ' '.join(args) 64 | p = subprocess.Popen(args, stdout = subprocess.PIPE) 65 | procs.append(p) 66 | 67 | for p in procs: 68 | p.wait() 69 | 70 | results = [] 71 | for p in procs: 72 | if p.returncode != 0: 73 | raise RuntimeError("multiwriter process fails. PID={}".format( 74 | p.pid)) 75 | 76 | lines = p.communicate()[0].split('\n') 77 | d = parse_player_runtime_out(lines) 78 | d['pid.python'] = p.pid 79 | results.append(d) 80 | 81 | # pprint.pprint( results ) 82 | return results 83 | 84 | 85 | def main(): 86 | parameters = [ 87 | { 'file_size': 256 * MB, 88 | 'write_size': 64 * KB, 89 | 'n_writes': 4 * 256 * MB / (64 * KB), 90 | 'pattern': 'random', 91 | 'fsync': 1, 92 | 'sync': 0, 93 | 'file_path': '/mnt/fsonloop/file01', 94 | 'tag': 'mytag001' 95 | }, 96 | { 'file_size': 256 * MB, 97 | 'write_size': 64 * KB, 98 | 'n_writes': 4 * 256 * MB / (64 * KB), 99 | 'pattern': 'random', 100 | 'fsync': 1, 101 | 'sync': 0, 102 | 'file_path': '/mnt/fsonloop/file01', 103 | 'tag': 'mytag002' 104 | } 105 | ] 106 | 107 | mw = MultiWriters('./player-runtime', parameters) 108 | pprint.pprint( mw.run() ) 109 | 110 | 111 | if __name__ == '__main__': 112 | main() 113 | 114 | -------------------------------------------------------------------------------- /workrunner/workload.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import os 3 | import pprint 4 | import random 5 | import time 6 | import sys 7 | import subprocess 8 | 9 | from commons import * 10 | import config 11 | import multiwriters 12 | from utilities import utils 13 | 14 | from pyreuse.helpers import * 15 | from pyreuse.apputils.parseleveldboutput import parse_file 16 | 17 | class Workload(object): 18 | def __init__(self, confobj, workload_conf_key = None): 19 | """ 20 | workload_conf is part of confobj. But we may need to run 21 | multiple workloads with different configurations in our 22 | experiements. So we need workload_conf to specify which 23 | configuration we will use in a Workload instance. 24 | 25 | Since workload_conf has a default value, it should be 26 | compatible with previous code. However, new classes based 27 | one Workload should use this new __init__() with two parameters. 28 | """ 29 | if not isinstance(confobj, config.Config): 30 | raise TypeError("confobj is not of type class config.Config". 31 | format(type(confobj).__name__)) 32 | 33 | self.conf = confobj 34 | if workload_conf_key != None and workload_conf_key != 'None': 35 | self.workload_conf = confobj[workload_conf_key] 36 | 37 | def run(self): 38 | raise NotImplementedError 39 | 40 | def stop(self): 41 | raise NotImplementedError 42 | 43 | class NoOp(Workload): 44 | """ 45 | This is a workload class that does nothing. It may be used to skip 46 | the file system aging stage. To skip aging workload, set 47 | conf['age_workload_class'] = "NoOp" 48 | """ 49 | def run(self): 50 | pass 51 | 52 | def stop(self): 53 | pass 54 | 55 | 56 | class SimpleRandReadWrite(Workload): 57 | def __init__(self, confobj, workload_conf_key = None): 58 | super(SimpleRandReadWrite, self).__init__(confobj, workload_conf_key) 59 | 60 | def run(self): 61 | mnt = self.conf["fs_mount_point"] 62 | datafile = os.path.join(mnt, "datafile") 63 | region = 2 * MB 64 | chunksize = 64 * KB 65 | n_chunks = region / chunksize 66 | chunkids = range(n_chunks) 67 | 68 | buf = "a" * chunksize 69 | f = open(datafile, "w+") 70 | random.shuffle(chunkids) 71 | for chunkid in chunkids: 72 | offset = chunkid * chunksize 73 | f.seek(offset) 74 | f.write(buf) 75 | os.fsync(f) 76 | 77 | random.shuffle(chunkids) 78 | for chunkid in chunkids: 79 | offset = chunkid * chunksize 80 | f.seek(offset) 81 | buf = f.read() 82 | os.fsync(f) 83 | 84 | f.close() 85 | 86 | def stop(self): 87 | pass 88 | 89 | 90 | # class LinuxDD(Workload): 91 | # def __init__(self, confobj, workload_conf_key = None): 92 | # super(LinuxDD, self).__init__(confobj, workload_conf_key) 93 | 94 | # def run(self): 95 | # mnt = self.conf["fs_mount_point"] 96 | # cmd = "dd if=/dev/zero of={}/datafile bs=64k count=128".format(mnt) 97 | # print cmd 98 | # subprocess.call(cmd, shell=True) 99 | # subprocess.call("sync") 100 | 101 | # def stop(self): 102 | # pass 103 | 104 | 105 | -------------------------------------------------------------------------------- /pyreuse/apputils/fio.py: -------------------------------------------------------------------------------- 1 | import collections 2 | from pyreuse.helpers import * 3 | 4 | NOVALUE, HIDE_ATTR = 'NOVALUE', 'HIDE_ATTR' 5 | 6 | class JobConfig(collections.OrderedDict): 7 | """ 8 | It manages an in-memeory representation of the FIO. The core 9 | is a ordered dict 10 | Format: 11 | { "global": { 12 | "size": xxx, 13 | "xxx" : xxx, 14 | }, 15 | "job1": { 16 | "xxx" : xxx, 17 | "xxx" : xxx 18 | } 19 | } 20 | """ 21 | def append_section(self, section_name, section_dict): 22 | self[section_name] = section_dict 23 | 24 | def remove_section(self, section_name): 25 | del self[section_name] 26 | 27 | def update(self, section_name, attr_name, attr_value): 28 | self[section_name][attr_name] = attr_value 29 | 30 | def get(self, section_name, attr_name): 31 | return self[section_name][attr_name] 32 | 33 | def as_ordered_dict(self): 34 | return self 35 | 36 | def __str__(self): 37 | lines = [] 38 | for section_name, section_dict in self.items(): 39 | lines.append("[{}]".format(section_name)) 40 | 41 | for attr_name, attr_value in section_dict.items(): 42 | if attr_value == NOVALUE: 43 | lines.append(attr_name) 44 | elif attr_value == HIDE_ATTR: 45 | continue 46 | else: 47 | lines.append("{}={}".format(attr_name, attr_value)) 48 | 49 | return '\n'.join(lines) 50 | 51 | def save(self, filepath): 52 | prepare_dir_for_path(filepath) 53 | with open(filepath, 'w') as f: 54 | f.write(str(self)) 55 | 56 | 57 | class Fio(object): 58 | def __init__(self, conf_path, result_dir, to_json = True): 59 | self.conf_path = conf_path 60 | self.result_dir = result_dir 61 | self.result_path = os.path.join(result_dir, 'fio.result.json') 62 | self.to_json = to_json 63 | 64 | def parse_results(self): 65 | d = load_json(self.result_path) 66 | table = parse_json_results(d) 67 | table_to_file(table, self.result_path + '.parsed') 68 | 69 | def run(self): 70 | if self.to_json == True: 71 | prepare_dir_for_path(self.result_path) 72 | fio_cmd = "fio {} --output-format=json --output {}".format( 73 | self.conf_path, self.result_path) 74 | else: 75 | fio_cmd = "fio {}".format(self.conf_path) 76 | 77 | with cd(self.result_dir): 78 | shcmd(fio_cmd) 79 | 80 | if self.to_json: 81 | self.parse_results() 82 | 83 | 84 | def parse_json_results(d): 85 | """ 86 | The input d is the json dictionary of FIO output. 87 | All job perfs will be put into table and returned. 88 | """ 89 | table = [] 90 | for job in d['jobs']: 91 | my_dict = { 92 | 'jobname': job['jobname'], 93 | 'read_bw': job['read']['bw'], 94 | 'read_iops': job['read']['iops'], 95 | 'read_iobytes': job['read']['io_bytes'], 96 | 'read_runtime': job['read']['runtime'], 97 | 98 | 'write_bw': job['write']['bw'], 99 | 'write_iops': job['write']['iops'], 100 | 'write_iobytes': job['write']['io_bytes'], 101 | 'write_runtime': job['write']['runtime'], 102 | } 103 | table.append(my_dict) 104 | 105 | return table 106 | 107 | 108 | 109 | 110 | 111 | -------------------------------------------------------------------------------- /tests/test_workflow.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import collections 3 | import shutil 4 | import os 5 | 6 | import config 7 | from workflow import * 8 | import wiscsim 9 | from utilities import utils 10 | from wiscsim.hostevent import Event, ControlEvent 11 | from config_helper import rule_parameter 12 | from pyreuse.helpers import shcmd 13 | from config_helper import experiment 14 | 15 | 16 | def create_config(): 17 | conf = wiscsim.dftldes.Config() 18 | conf['SSDFramework']['ncq_depth'] = 1 19 | 20 | conf['flash_config']['n_pages_per_block'] = 64 21 | conf['flash_config']['n_blocks_per_plane'] = 2 22 | conf['flash_config']['n_planes_per_chip'] = 1 23 | conf['flash_config']['n_chips_per_package'] = 1 24 | conf['flash_config']['n_packages_per_channel'] = 1 25 | conf['flash_config']['n_channels_per_dev'] = 4 26 | 27 | # set ftl 28 | conf['do_not_check_gc_setting'] = True 29 | conf.GC_high_threshold_ratio = 0.96 30 | conf.GC_low_threshold_ratio = 0 31 | 32 | conf['enable_simulation'] = True 33 | 34 | utils.set_exp_metadata(conf, save_data = False, 35 | expname = 'test_expname', 36 | subexpname = 'test_subexpname') 37 | 38 | conf['ftl_type'] = 'dftldes' 39 | conf['simulator_class'] = 'SimulatorDESNew' 40 | 41 | logicsize_mb = 16 42 | conf.n_cache_entries = conf.n_mapping_entries_per_page * 16 43 | conf.set_flash_num_blocks_by_bytes(int(logicsize_mb * 2**20 * 1.28)) 44 | 45 | utils.runtime_update(conf) 46 | 47 | return conf 48 | 49 | 50 | def on_fs_config(conf): 51 | # environment 52 | conf['device_path'] = "/dev/loop0" 53 | conf['dev_size_mb'] = 16 54 | conf['filesystem'] = "ext4" 55 | conf["n_online_cpus"] = 'all' 56 | 57 | conf['linux_ncq_depth'] = 31 58 | 59 | # workload 60 | conf['workload_class'] = 'SimpleRandReadWrite' 61 | 62 | class TestWorkflow(unittest.TestCase): 63 | def test_init(self): 64 | conf = create_config() 65 | wf = Workflow(conf) 66 | 67 | def test_save_conf(self): 68 | conf = create_config() 69 | conf['result_dir'] = '/tmp/' 70 | jsonpath = os.path.join(conf['result_dir'], 'config.json') 71 | 72 | if os.path.exists(jsonpath): 73 | os.remove(jsonpath) 74 | 75 | wf = Workflow(conf) 76 | wf._save_conf() 77 | 78 | self.assertTrue(os.path.exists(jsonpath)) 79 | 80 | 81 | def test_onfs_workload(self): 82 | conf = create_config() 83 | on_fs_config(conf) 84 | 85 | datapath = os.path.join(conf["fs_mount_point"], 'datafile') 86 | if os.path.exists(datapath): 87 | os.remove(datapath) 88 | 89 | wf = Workflow(conf) 90 | wf.run_workload() 91 | 92 | self.assertTrue(os.path.exists(datapath)) 93 | 94 | def test_simulation(self): 95 | conf = create_config() 96 | 97 | ctrl_event = ControlEvent(OP_ENABLE_RECORDER) 98 | event = Event(512, 0, OP_WRITE, 0, 4096) 99 | 100 | wf = Workflow(conf) 101 | wf.run_simulator([ctrl_event, event]) 102 | 103 | def test_on_fs_run_and_sim(self): 104 | conf = create_config() 105 | on_fs_config(conf) 106 | conf['enable_blktrace'] = True 107 | 108 | datapath = os.path.join(conf["fs_mount_point"], 'datafile') 109 | if os.path.exists(datapath): 110 | os.remove(datapath) 111 | 112 | wf = Workflow(conf) 113 | wf.run() 114 | 115 | self.assertTrue(os.path.exists(datapath)) 116 | 117 | 118 | 119 | if __name__ == '__main__': 120 | unittest.main() 121 | 122 | -------------------------------------------------------------------------------- /pyreuse/sysutils/dumpe2fsparser.py: -------------------------------------------------------------------------------- 1 | import pprint 2 | import re 3 | import sys 4 | 5 | """ 6 | output: 7 | inode table: start, end 8 | inode table: start, end 9 | inode table: start, end 10 | blockbitmap: start, end 11 | blockbitmap: start, end 12 | superblock: start, end 13 | """ 14 | 15 | def is_bg_start_line(line): 16 | return line.startswith("Group") 17 | 18 | def convert_to_range(s): 19 | if '-' in s: 20 | tup = s.split('-') 21 | tup = [int(x) for x in tup] 22 | start, end = tup 23 | else: 24 | start = int(s) 25 | end = start 26 | return start, end 27 | 28 | def parse_superblock(line): 29 | mo = re.search(r'superblock at (\S+), Group descriptors at (\S+)', line) 30 | superblock_addr = convert_to_range(mo.group(1)) 31 | groupdesc = convert_to_range(mo.group(2)) 32 | return {'superblock': superblock_addr, 33 | 'groupdesc': groupdesc} 34 | 35 | def parse_gdt(line): 36 | mo = re.search(r'Reserved GDT blocks at (\S+)', line) 37 | return {'reserved-gdt': convert_to_range(mo.group(1))} 38 | 39 | def parse_bitmaps(line): 40 | mo = re.search(r'Block bitmap at (\S+) .*, Inode bitmap at (\S+) .*', line) 41 | return {'block-bitmap': convert_to_range(mo.group(1)), 42 | 'inode-bitmap': convert_to_range(mo.group(2))} 43 | 44 | def parse_inodetable(line): 45 | mo = re.search(r'Inode table at (\S+) .*', line) 46 | return {'inode-table': convert_to_range(mo.group(1))} 47 | 48 | def parse_bg_lines(bg_lines): 49 | results = [] 50 | for line in bg_lines: 51 | line = line.strip() 52 | if 'superblock' in line: 53 | d = parse_superblock(line) 54 | results.append(d) 55 | elif 'Reserved GDT' in line: 56 | d = parse_gdt(line) 57 | results.append(d) 58 | elif 'Block bitmap' in line: 59 | d = parse_bitmaps(line) 60 | results.append(d) 61 | elif 'Inode table' in line: 62 | d = parse_inodetable(line) 63 | results.append(d) 64 | return results 65 | 66 | def parse_bg_text(text): 67 | bgs = [] 68 | for line in text.split('\n'): 69 | if is_bg_start_line(line): 70 | cur_bg_lines = [] 71 | bgs.append(cur_bg_lines) 72 | cur_bg_lines.append(line) 73 | 74 | parsed_dicts = [] 75 | for bg in bgs: 76 | parsed_dicts.extend(parse_bg_lines(bg)) 77 | return parsed_dicts 78 | 79 | def as_table(parsed_dicts): 80 | rows = ["type start end"] 81 | for dic in parsed_dicts: 82 | for k, v in dic.items(): 83 | vstr = ' '.join([str(x) for x in v]) 84 | line = ' '.join([k, vstr]) 85 | rows.append(line) 86 | 87 | return rows 88 | 89 | def parse_header_text(text): 90 | """ 91 | header is the first part, before \n\n\n 92 | """ 93 | lines = text.split('\n') 94 | 95 | d = {} 96 | for line in lines: 97 | items = line.split(':') 98 | if line.startswith("Journal inode:"): 99 | d['journal-inode'] = int(items[1]) 100 | elif line.startswith("Journal length:"): 101 | d['journal-length'] = int(items[1]) 102 | 103 | return d 104 | 105 | def parse_file_text(text): 106 | # get the second part of dumpe2fs output 107 | text = text.split("\n\n\n")[1] 108 | 109 | range_table = parse_bg_text(text) 110 | return range_table 111 | 112 | def parse_file(fpath): 113 | with open(fpath, 'r') as f: 114 | text = f.read() 115 | 116 | range_table = parse_file_text(text) 117 | rows = as_table(range_table) 118 | return '\n'.join(rows) 119 | 120 | -------------------------------------------------------------------------------- /wiscsim/ftlsim_commons.py: -------------------------------------------------------------------------------- 1 | import simpy 2 | import random 3 | 4 | class Extent(object): 5 | def __init__(self, lpn_start, lpn_count): 6 | assert lpn_count > 0 7 | self.lpn_start = lpn_start 8 | self.lpn_count = lpn_count 9 | 10 | @property 11 | def next_lpn(self): 12 | return self.lpn_start + self.lpn_count 13 | 14 | def last_lpn(self): 15 | return self.end_lpn() - 1 16 | 17 | def end_lpn(self): 18 | return self.lpn_start + self.lpn_count 19 | 20 | def lpn_iter(self): 21 | return range(self.lpn_start, self.end_lpn()) 22 | 23 | def __str__(self): 24 | return "lpn_start: {}, lpn_count: {}".format( 25 | self.lpn_start, self.lpn_count) 26 | 27 | def __contains__(self, lpn): 28 | return lpn >= self.lpn_start and lpn < self.end_lpn() 29 | 30 | def __copy__(self): 31 | return Extent(self.lpn_start, self.lpn_count) 32 | 33 | 34 | class CacheExtent(Extent): 35 | def __init__(self, lpn_start, lpn_count, in_cache): 36 | super(CacheExtent, self).__init__(lpn_start, lpn_count) 37 | self.in_cache = in_cache 38 | 39 | def __str__(self): 40 | return "{}, in_cache: {}".format( 41 | super(CacheExtent, self).__str__(), self.in_cache) 42 | 43 | 44 | def display_extents(extent_list): 45 | for ext in extent_list: 46 | print str(ext) 47 | 48 | 49 | class SSDRequest(CacheExtent): 50 | def __init__(self, lpn_start, lpn_count, in_cache, operation): 51 | super(CacheExtent, self).__init__(lpn_start, lpn_count) 52 | self.operation = operation 53 | 54 | def __str__(self): 55 | return "{}, operation: {}".format( 56 | super(CacheExtent, self).__str__(), self.operation) 57 | 58 | 59 | def create_ssd_request(conf, event): 60 | lpn_start, lpn_count = conf.sec_ext_to_page_ext( 61 | event.sector, event.sector_count) 62 | return SSDRequest( 63 | lpn_start, 64 | lpn_count, 65 | None, 66 | event.operation) 67 | 68 | 69 | class NCQSingleQueue(object): 70 | """ 71 | User of the queue can take up to depth # of request without 72 | returning 73 | """ 74 | def __init__(self, ncq_depth, simpy_env): 75 | self.ncq_depth = ncq_depth 76 | self.env = simpy_env 77 | self.queue = simpy.Store(self.env) 78 | # ssd need to grab a slot before get item from queue 79 | self.slots = simpy.Resource(self.env, capacity=ncq_depth) 80 | 81 | def hold_all_slots(self): 82 | held_slot_reqs = [] 83 | for i in range(self.ncq_depth): 84 | slot_req = self.slots.request() 85 | held_slot_reqs.append(slot_req) 86 | 87 | yield simpy.events.AllOf(self.env, held_slot_reqs) 88 | 89 | self.env.exit(held_slot_reqs) 90 | 91 | def release_all_slots(self, held_slot_reqs): 92 | """Must be used in pair with hold_all_slots()""" 93 | assert len(held_slot_reqs) > 0 94 | for req in held_slot_reqs: 95 | self.slots.release(req) 96 | 97 | 98 | def split_ext_by_segment(n_pages_per_segment, extent): 99 | if extent.lpn_count == 0: 100 | return None 101 | 102 | last_seg_id = -1 103 | cur_ext = None 104 | exts = {} 105 | for lpn in extent.lpn_iter(): 106 | seg_id = lpn / n_pages_per_segment 107 | if seg_id == last_seg_id: 108 | cur_ext.lpn_count += 1 109 | else: 110 | if cur_ext is not None: 111 | exts[last_seg_id] = cur_ext 112 | cur_ext = Extent(lpn_start=lpn, lpn_count=1) 113 | last_seg_id = seg_id 114 | 115 | if cur_ext is not None: 116 | exts[seg_id] = cur_ext 117 | 118 | return exts 119 | 120 | 121 | class LockPool(object): 122 | def __init__(self, simpy_env): 123 | self.resources = {} # addr: lock 124 | self.env = simpy_env 125 | self.locked_addrs = set() 126 | 127 | def get_request(self, addr): 128 | res = self.resources.setdefault(addr, 129 | simpy.Resource(self.env, capacity = 1)) 130 | return res.request() 131 | 132 | def release_request(self, addr, request): 133 | res = self.resources[addr] 134 | res.release(request) 135 | 136 | 137 | random.seed(1) 138 | 139 | def random_channel_id(n_channels_per_dev): 140 | return random.randint(0, n_channels_per_dev - 1) 141 | 142 | 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /wiscsim/hostevent.py: -------------------------------------------------------------------------------- 1 | from ftlsim_commons import Extent 2 | from commons import * 3 | 4 | class HostEventBase(object): 5 | def get_operation(self): 6 | raise NotImplementedError 7 | 8 | def get_type(self): 9 | raise NotImplementedError 10 | 11 | 12 | class ControlEvent(HostEventBase): 13 | def __init__(self, operation, arg1=None, arg2=None, arg3=None): 14 | self.operation = operation 15 | self.arg1 = arg1 16 | self.arg2 = arg2 17 | self.arg3 = arg3 18 | self.action = 'D' # following the format of data event 19 | 20 | def get_operation(self): 21 | return self.operation 22 | 23 | def get_type(self): 24 | return 'ControlEvent' 25 | 26 | def __str__(self): 27 | return "ControlEvent: {}: {}, {}, {}".format(self.operation, 28 | self.arg1, self.arg2, self.arg3) 29 | 30 | 31 | class Event(HostEventBase): 32 | def __init__(self, sector_size, pid, operation, offset, size, 33 | timestamp = None, pre_wait_time = None, sync = True, action = 'D'): 34 | self.pid = int(pid) 35 | self.operation = operation 36 | self.offset = int(offset) 37 | self.size = int(size) 38 | self.sync = sync 39 | self.timestamp = timestamp 40 | self.pre_wait_time = pre_wait_time 41 | self.action = action 42 | assert action in ('D', 'C'), "action:{}".format(action) 43 | 44 | assert self.offset % sector_size == 0,\ 45 | "offset {} is not aligned with sector size {}.".format( 46 | self.offset, sector_size) 47 | self.sector = self.offset / sector_size 48 | 49 | assert self.size % sector_size == 0, \ 50 | "size {} is not multiple of sector size {}".format( 51 | self.size, sector_size) 52 | 53 | self.sector_count = self.size / sector_size 54 | 55 | def get_operation(self): 56 | return self.operation 57 | 58 | def get_type(self): 59 | return 'Event' 60 | 61 | def get_lpn_extent(self, conf): 62 | lpn_start, lpn_count = conf.off_size_to_page_range( 63 | self.offset, self.size, force_alignment=False) 64 | return Extent(lpn_start = lpn_start, lpn_count = lpn_count) 65 | 66 | def __str__(self): 67 | return "Event pid:{pid}, operation:{operation}, offset:{offset}, "\ 68 | "size:{size}, sector:{sector}, sector_count:{sector_count}, "\ 69 | "sync:{sync}, timestamp:{timestamp}, action:{action}"\ 70 | .format(pid = self.pid, operation = self.operation, 71 | offset = self.offset, size = self.size, 72 | sector = self.sector, sector_count = self.sector_count, 73 | sync = self.sync, timestamp = self.timestamp, 74 | action = self.action) 75 | 76 | 77 | class FileLineIterator(object): 78 | def __init__(self, file_path): 79 | self.file_path = file_path 80 | 81 | def __iter__(self): 82 | with open(self.file_path, 'r') as f: 83 | for line in f: 84 | line = line.strip() 85 | yield line 86 | 87 | 88 | class EventIterator(object): 89 | """ 90 | Convert string line to event, and iter 91 | """ 92 | def __init__(self, conf, filelineiter): 93 | self.conf = conf 94 | self.sector_size = self.conf['sector_size'] 95 | self.filelineiter = filelineiter 96 | self.event_file_column_names = self.conf['event_file_column_names'] 97 | 98 | self._translation = {'read': OP_READ, 'write': OP_WRITE, 99 | 'discard':OP_DISCARD} 100 | 101 | def _convert(self, op_in_file): 102 | return self._translation[op_in_file] 103 | 104 | def str_to_event(self, line): 105 | items = line.split() 106 | if len(self.event_file_column_names) != len(items): 107 | raise RuntimeError("Lengths not equal: {} {}".format( 108 | self.event_file_column_names, items)) 109 | dic = dict(zip(self.event_file_column_names, items)) 110 | dic['sector_size'] = self.sector_size 111 | if dic['pre_wait_time'] != 'NA': 112 | dic['pre_wait_time'] = float(dic['pre_wait_time']) 113 | 114 | dic['operation'] = self._convert(dic['operation']) 115 | 116 | return Event(**dic) 117 | 118 | def __iter__(self): 119 | for line in self.filelineiter: 120 | yield self.str_to_event(line) 121 | 122 | 123 | -------------------------------------------------------------------------------- /wiscsim/bitmap.py: -------------------------------------------------------------------------------- 1 | import bitarray 2 | import config 3 | 4 | class FlashBitmap2(object): 5 | "Using two bit to represent state of a page" 6 | ERASED, VALID, INVALID = (bitarray.bitarray('00'), 7 | bitarray.bitarray('01'), bitarray.bitarray('10')) 8 | 9 | def __init__(self, conf): 10 | if not isinstance(conf, config.Config): 11 | raise TypeError("conf is not conf.Config. it is {}". 12 | format(type(conf).__name__)) 13 | 14 | self.conf = conf 15 | 16 | # We use two bits to record state of a page so that 17 | # we will be able to record ERASED state 18 | self.bitmap = bitarray.bitarray(2 * conf.total_num_pages()) 19 | self.bitmap.setall(0) 20 | 21 | def pagenum_to_slice_range(self, pagenum): 22 | "2 is the number of bits representing the state of a page" 23 | return 2 * pagenum, 2 * (pagenum + 1) 24 | 25 | def blocknum_to_slice_range(self, blocknum): 26 | start, end = self.conf.block_to_page_range(blocknum) 27 | s, _ = self.pagenum_to_slice_range(start) 28 | # not that end is the first page after the block, so 29 | # the first bit of page end is the first bit after the block, 30 | # not the second 31 | e, _ = self.pagenum_to_slice_range(end) 32 | 33 | return s, e 34 | 35 | def validate_page(self, pagenum): 36 | s, e = self.pagenum_to_slice_range(pagenum) 37 | self.bitmap[s:e] = self.VALID 38 | 39 | def invalidate_page(self, pagenum): 40 | s, e = self.pagenum_to_slice_range(pagenum) 41 | self.bitmap[s:e] = self.INVALID 42 | 43 | def validate_block(self, blocknum): 44 | start, end = self.conf.block_to_page_range(blocknum) 45 | for pg in range(start, end): 46 | self.validate_page(pg) 47 | 48 | def invalidate_block(self, blocknum): 49 | start, end = self.conf.block_to_page_range(blocknum) 50 | for pg in range(start, end): 51 | self.validate_page(pg) 52 | 53 | def erase_block(self, blocknum): 54 | s, e = self.blocknum_to_slice_range(blocknum) 55 | self.bitmap[s:e] = 0 56 | 57 | def block_invalid_ratio(self, blocknum): 58 | start, end = self.conf.block_to_page_range(blocknum) 59 | cnt = 0 60 | for pg in range(start, end): 61 | if not self.is_page_valid(pg): 62 | cnt += 1 63 | 64 | return cnt / float(self.conf.n_pages_per_block) 65 | 66 | def block_valid_ratio(self, blocknum): 67 | start, end = self.conf.block_to_page_range(blocknum) 68 | cnt = 0 69 | for pg in range(start, end): 70 | if self.is_page_valid(pg): 71 | cnt += 1 72 | 73 | ret = cnt / float(self.conf.n_pages_per_block) 74 | return ret 75 | 76 | def block_erased_ratio(self, blocknum): 77 | start, end = self.conf.block_to_page_range(blocknum) 78 | cnt = 0 79 | for pg in range(start, end): 80 | if self.is_page_erased(pg): 81 | cnt += 1 82 | 83 | ret = cnt / float(self.conf.n_pages_per_block) 84 | return ret 85 | 86 | def is_page_valid(self, pagenum): 87 | s, e = self.pagenum_to_slice_range(pagenum) 88 | return self.bitmap[s:e] == self.VALID 89 | 90 | def is_page_invalid(self, pagenum): 91 | s, e = self.pagenum_to_slice_range(pagenum) 92 | return self.bitmap[s:e] == self.INVALID 93 | 94 | def is_page_erased(self, pagenum): 95 | s, e = self.pagenum_to_slice_range(pagenum) 96 | return self.bitmap[s:e] == self.ERASED 97 | 98 | def page_bits(self, pagenum): 99 | s, e = self.pagenum_to_slice_range(pagenum) 100 | return self.bitmap[s:e] 101 | 102 | def block_bits(self, blocknum): 103 | s, e = self.blocknum_to_slice_range(blocknum) 104 | return self.bitmap[s:e] 105 | 106 | def page_state(self, pagenum): 107 | """ 108 | This is usually for usage: 109 | if bmap.page_state(333) == bmap.VALID: 110 | do something 111 | """ 112 | s, e = self.pagenum_to_slice_range(pagenum) 113 | return self.bitmap[s:e] 114 | 115 | def page_state_human(self, pagenum): 116 | state = self.page_state(pagenum) 117 | if state == self.VALID: 118 | return "VALID" 119 | elif state == self.INVALID: 120 | return "INVALID" 121 | elif state == self.ERASED: 122 | return "ERASED" 123 | else: 124 | raise RuntimeError("page {} state is not recognized: {}".format( 125 | pagenum, state)) 126 | 127 | def initialize(self): 128 | """ this method should be called in FTL """ 129 | # set the state of all pages to ERASED 130 | self.bitmap.setall(0) 131 | 132 | 133 | -------------------------------------------------------------------------------- /wiscsim/blkpool.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | import sys 3 | from wiscsim.devblockpool import * 4 | from ftlsim_commons import random_channel_id 5 | 6 | TDATA = 'TDATA' 7 | TTRANS = 'TTRANS' 8 | 9 | class OutOfSpaceError(RuntimeError): 10 | pass 11 | 12 | class BlockPool(object): 13 | def __init__(self, confobj): 14 | self.conf = confobj 15 | self.n_channels = self.conf['flash_config']['n_channels_per_dev'] 16 | self.stripe_size = self.conf['stripe_size'] 17 | 18 | self.pool = MultiChannelBlockPool( 19 | n_channels=self.n_channels, 20 | n_blocks_per_channel=self.conf.n_blocks_per_channel, 21 | n_pages_per_block=self.conf.n_pages_per_block, 22 | tags=[TDATA, TTRANS], 23 | leveling_factor = self.conf['wear_leveling_factor'], 24 | leveling_diff = self.conf['wear_leveling_diff'] 25 | ) 26 | 27 | @property 28 | def freeblocks(self): 29 | blocks = self.pool.get_blocks_of_tag(tag=TFREE) 30 | return blocks 31 | 32 | @property 33 | def data_usedblocks(self): 34 | blocks = self.pool.get_blocks_of_tag(tag=TDATA) 35 | return blocks 36 | 37 | @property 38 | def trans_usedblocks(self): 39 | blocks = self.pool.get_blocks_of_tag(tag=TTRANS) 40 | return blocks 41 | 42 | @property 43 | def used_blocks(self): 44 | blocks1 = self.pool.get_blocks_of_tag(tag=TDATA) 45 | blocks2 = self.pool.get_blocks_of_tag(tag=TTRANS) 46 | 47 | return blocks1 + blocks2 48 | 49 | def get_wear_status(self): 50 | return self.pool.get_wear_status() 51 | 52 | def need_wear_leveling(self): 53 | return self.pool.need_wear_leveling() 54 | 55 | def get_erasure_count(self): 56 | return self.pool.get_erasure_count() 57 | 58 | def get_erasure_count_dist(self): 59 | return self.pool.get_erasure_count_dist() 60 | 61 | def count_blocks(self, tag, channels=None): 62 | return self.pool.count_blocks(tag, channels) 63 | 64 | def pop_a_free_block_to_trans(self, choice=LEAST_ERASED): 65 | try: 66 | blocknum = self.pool.pick_and_move(src=TFREE, dst=TTRANS, 67 | choice=LEAST_ERASED) 68 | except TagOutOfSpaceError: 69 | raise OutOfSpaceError 70 | return blocknum 71 | 72 | def pop_a_free_block_to_data(self, choice=LEAST_ERASED): 73 | try: 74 | blocknum = self.pool.pick_and_move(src=TFREE, dst=TDATA, 75 | choice=LEAST_ERASED) 76 | except TagOutOfSpaceError: 77 | raise OutOfSpaceError 78 | return blocknum 79 | 80 | def move_used_data_block_to_free(self, blocknum): 81 | self.pool.change_tag(blocknum, src=TDATA, dst=TFREE) 82 | 83 | def move_used_trans_block_to_free(self, blocknum): 84 | self.pool.change_tag(blocknum, src=TTRANS, dst=TFREE) 85 | 86 | def move_used_trans_block_to_data(self, blocknum): 87 | self.pool.change_tag(blocknum, src=TTRANS, dst=TDATA) 88 | 89 | def next_n_data_pages_to_program_striped(self, n, seg_id=0, choice=LEAST_ERASED): 90 | try: 91 | ppns = self.pool.next_ppns(n=n, tag=TDATA, block_index=seg_id, 92 | stripe_size=self.conf['stripe_size']) 93 | except TagOutOfSpaceError: 94 | raise OutOfSpaceError 95 | return ppns 96 | 97 | def next_data_page_to_program(self, seg_id=0): 98 | ppns = self.pool.next_ppns(n=1, tag=TDATA, block_index=seg_id, 99 | stripe_size=1) 100 | return ppns[0] 101 | 102 | def next_translation_page_to_program(self): 103 | ppns = self.pool.next_ppns(n=1, tag=TTRANS, block_index=0, 104 | stripe_size=1) 105 | return ppns[0] 106 | 107 | def next_gc_data_page_to_program(self, choice=LEAST_ERASED): 108 | ppns = self.pool.next_ppns(n=1, tag=TDATA, block_index=0, 109 | stripe_size=1, choice=choice) 110 | return ppns[0] 111 | 112 | def next_gc_translation_page_to_program(self, choice=LEAST_ERASED): 113 | ppns = self.pool.next_ppns(n=1, tag=TTRANS, block_index=0, 114 | stripe_size=1, choice=choice) 115 | return ppns[0] 116 | 117 | def current_blocks(self): 118 | return self.pool.current_blocks() 119 | 120 | def used_ratio(self): 121 | nfree = self.pool.count_blocks(tag=TFREE) 122 | return (self.conf.n_blocks_per_dev - nfree) / float(self.conf.n_blocks_per_dev) 123 | 124 | def total_used_blocks(self): 125 | nfree = self.pool.count_blocks(tag=TFREE) 126 | return self.conf.n_blocks_per_dev - nfree 127 | 128 | def num_freeblocks(self): 129 | nfree = self.pool.count_blocks(tag=TFREE) 130 | return nfree 131 | 132 | def remove_full_cur_blocks(self): 133 | self.pool.remove_full_cur_blocks() 134 | 135 | 136 | -------------------------------------------------------------------------------- /wiscsim/tagblockpool.py: -------------------------------------------------------------------------------- 1 | from collections import Counter 2 | 3 | TFREE = 'TAGFREE' 4 | 5 | LEAST_ERASED = 'least' 6 | MOST_ERASED = 'most' 7 | 8 | 9 | class TagBlockPool(object): 10 | def __init__(self, n, tags): 11 | self._tag_subpool = {tag:[] for tag in tags} 12 | self._tag_subpool[TFREE] = range(n) 13 | 14 | # {blocknum: count} 15 | self._erasure_cnt = Counter() 16 | # have to put the block number in the counter 17 | # otherwise, if a free block is never used, it won't 18 | # appear in the counter. 19 | for block in range(n): 20 | self._erasure_cnt[block] = 0 21 | 22 | def get_blocks_of_tag(self, tag): 23 | return self._tag_subpool[tag] 24 | 25 | def change_tag(self, blocknum, src, dst): 26 | self._tag_subpool[src].remove(blocknum) 27 | self._tag_subpool[dst].append(blocknum) 28 | 29 | if dst == TFREE: 30 | self._erasure_cnt[blocknum] += 1 31 | 32 | def count_blocks(self, tag): 33 | return len(self._tag_subpool[tag]) 34 | 35 | def pick(self, tag, choice=LEAST_ERASED): 36 | return self.get_least_or_most_erased_block(tag, choice) 37 | 38 | def pick_and_move(self, src, dst, choice=LEAST_ERASED): 39 | block = self.pick(src, choice=choice) 40 | 41 | if block is None: 42 | return None 43 | else: 44 | self.change_tag(block, src, dst) 45 | return block 46 | 47 | def get_erasure_count(self, blocknum=None): 48 | if blocknum is None: 49 | return self._erasure_cnt 50 | else: 51 | return self._erasure_cnt[blocknum] 52 | 53 | def get_least_or_most_erased_block(self, tag, choice=LEAST_ERASED): 54 | blocks = self.get_least_or_most_erased_blocks(tag, choice, nblocks=1) 55 | 56 | assert len(blocks) <= 1 57 | if len(blocks) == 1: 58 | return blocks[0] 59 | else: 60 | return None 61 | 62 | def get_least_or_most_erased_blocks(self, tag, choice, nblocks): 63 | if choice == LEAST_ERASED: 64 | blocks_by_cnt = reversed(self._erasure_cnt.most_common()) 65 | elif choice == MOST_ERASED: 66 | blocks_by_cnt = self._erasure_cnt.most_common() 67 | else: 68 | raise NotImplementedError 69 | 70 | tag_blocks = self.get_blocks_of_tag(tag) 71 | 72 | # iterate from least used to most used 73 | blocks = [] 74 | for blocknum, count in blocks_by_cnt: 75 | if blocknum in tag_blocks: 76 | blocks.append(blocknum) 77 | if len(blocks) == nblocks: 78 | break 79 | 80 | return blocks 81 | 82 | def get_erasure_count_dist(self): 83 | return Counter(self._erasure_cnt.values()) 84 | 85 | 86 | class CurrentBlock(object): 87 | def __init__(self, n_pages_per_block, blocknum): 88 | self.n_pages_per_block = n_pages_per_block 89 | self.blocknum = blocknum 90 | self.next_page_offset = 0 91 | 92 | def next_ppns(self, n): 93 | end_offset = min(self.next_page_offset + n, self.n_pages_per_block) 94 | ppns = [] 95 | for offset in range(self.next_page_offset, end_offset): 96 | ppns.append(self.n_pages_per_block * self.blocknum + offset) 97 | 98 | self.next_page_offset = end_offset 99 | return ppns 100 | 101 | def num_free_pages(self): 102 | return self.n_pages_per_block - self.next_page_offset 103 | 104 | def is_full(self): 105 | assert self.next_page_offset <= self.n_pages_per_block 106 | return self.next_page_offset == self.n_pages_per_block 107 | 108 | 109 | class BlockPoolWithCurBlocks(TagBlockPool): 110 | def __init__(self, n, tags, n_pages_per_block): 111 | super(BlockPoolWithCurBlocks, self).__init__(n, tags) 112 | self._n_pages_per_block = n_pages_per_block 113 | 114 | # {TAG1: {0: CurrentBlock obj, 1: CurrentBlock obj}, 115 | # TAG2: {0: CurrentBlock obj, 1: CurrentBlock obj}} 116 | self._cur_blocks = {tag:{} for tag in tags} 117 | 118 | def get_cur_block_obj(self, tag, block_index=None): 119 | """ 120 | There can be several 'current blocks', use block_index to 121 | choose. 122 | """ 123 | if block_index is None: 124 | # return all cur block objs of a tag if block_index is not 125 | # specified. 126 | return [obj for obj in self._cur_blocks[tag].values()] 127 | else: 128 | return self._cur_blocks[tag].get(block_index, None) 129 | 130 | def next_ppns_from_cur_block(self, n, tag, block_index): 131 | """ 132 | Return only what the current block has. If the current block 133 | is full or is None, return empty list. 134 | """ 135 | cur_block_obj = self.get_cur_block_obj(tag, block_index) 136 | if cur_block_obj is None: 137 | return [] 138 | else: 139 | ppns = cur_block_obj.next_ppns(n) 140 | return ppns 141 | 142 | def remove_full_cur_blocks(self): 143 | """ 144 | If cur block is full, we mark it as NON cur block. So garbage collector 145 | can clean it. 146 | """ 147 | for tag, cur_obj_dict in self._cur_blocks.items(): 148 | to_del_block_index = [ 149 | block_index for block_index, obj in cur_obj_dict.items() 150 | if obj.is_full()] 151 | for block_index in to_del_block_index: 152 | del cur_obj_dict[block_index] 153 | 154 | def set_new_cur_block(self, tag, block_index, blocknum): 155 | """ 156 | Set block blocknum to be the current block of tag and block_index. 157 | blocknum must be a fresh new tagged (tag) block got by, usually 158 | pick_and_move(). 159 | blocknum must has been tagged $tag before calling this function. 160 | """ 161 | block_obj = CurrentBlock(self._n_pages_per_block, blocknum=blocknum) 162 | self._cur_blocks[tag][block_index] = block_obj 163 | return block_obj 164 | 165 | 166 | -------------------------------------------------------------------------------- /pyreuse/helpers.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import itertools 4 | import shlex 5 | import json 6 | 7 | def shcmd(cmd, ignore_error=False): 8 | print 'Doing:', cmd 9 | ret = subprocess.call(cmd, shell=True) 10 | print 'Returned', ret, cmd 11 | if ignore_error == False and ret != 0: 12 | exit(ret) 13 | return ret 14 | 15 | 16 | class cd: 17 | """Context manager for changing the current working directory""" 18 | def __init__(self, newPath): 19 | self.newPath = newPath 20 | 21 | def __enter__(self): 22 | self.savedPath = os.getcwd() 23 | os.chdir(self.newPath) 24 | 25 | def __exit__(self, etype, value, traceback): 26 | os.chdir(self.savedPath) 27 | 28 | 29 | def parameter_combinations(parameter_dict): 30 | """ 31 | Get all the cominbation of the values from each key 32 | http://tinyurl.com/nnglcs9 33 | Input: parameter_dict={ 34 | p0:[x, y, z, ..], 35 | p1:[a, b, c, ..], 36 | ...} 37 | Output: [ 38 | {p0:x, p1:a, ..}, 39 | {..}, 40 | ... 41 | ] 42 | """ 43 | d = parameter_dict 44 | return [dict(zip(d, v)) for v in itertools.product(*d.values())] 45 | 46 | 47 | def run_and_get_output(cmd, shell = False): 48 | output = [] 49 | cmd = shlex.split(cmd) 50 | p = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE) 51 | p.wait() 52 | 53 | return p.stdout.readlines() 54 | 55 | 56 | def load_json(fpath): 57 | decoded = json.load(open(fpath, 'r')) 58 | return decoded 59 | 60 | 61 | def dump_json(dic, file_path): 62 | with open(file_path, "w") as f: 63 | json.dump(dic, f, indent=4) 64 | 65 | 66 | def prepare_dir_for_path(path): 67 | "create parent dirs for path if necessary" 68 | dirpath = os.path.dirname(path) 69 | if not os.path.exists(dirpath): 70 | os.makedirs(dirpath) 71 | 72 | 73 | def prepare_dir(dirpath): 74 | "create parent dirs for path if necessary" 75 | if not os.path.exists(dirpath): 76 | os.makedirs(dirpath) 77 | 78 | 79 | def linux_kernel_version(): 80 | kernel_ver = run_and_get_output('uname -r')[0].strip() 81 | return kernel_ver 82 | 83 | 84 | def adjust_width(s, width = 32): 85 | return s.rjust(width) 86 | 87 | 88 | def table_to_str(table, adddic=None, sep=';', width=32): 89 | """ 90 | table is of format: [ 91 | {'col1':data, 'col2':data, ..}, 92 | {'col1':data, 'col2':data, ..}, 93 | {'col1':data, 'col2':data, ..}, 94 | ] 95 | output is: 96 | col1 col2 col3 .. 97 | data data data .. 98 | 99 | """ 100 | if len(table) == 0: 101 | return "" 102 | 103 | tablestr = '' 104 | colnames = table[0].keys() 105 | if adddic != None: 106 | colnames += adddic.keys() 107 | colnamestr = sep.join([adjust_width(s, width=width) for s in colnames]) + '\n' 108 | tablestr += colnamestr 109 | for row in table: 110 | if adddic != None: 111 | rowcopy = dict(row.items() + adddic.items()) 112 | else: 113 | rowcopy = row 114 | rowstr = [rowcopy[k] for k in colnames] 115 | rowstr = [adjust_width(str(x), width=width) for x in rowstr] 116 | rowstr = sep.join(rowstr) + '\n' 117 | tablestr += rowstr 118 | 119 | return tablestr 120 | 121 | 122 | def _tarfilename(tarname): 123 | return '{}.tar.xz'.format(tarname) 124 | 125 | 126 | def download_kernel(dirpath, tarname): 127 | """ 128 | e.g. tarname='linux-4.5.4' 129 | """ 130 | with cd(dirpath): 131 | tarfile = _tarfilename(tarname) 132 | shcmd("wget https://www.kernel.org/pub/linux/kernel/v4.x/{}"\ 133 | .format(tarfile)) 134 | shcmd("tar xf {}".format(tarfile)) 135 | 136 | 137 | def read_byte_range(filepath, start, size): 138 | f = open(filepath, 'rb') 139 | f.seek(start) 140 | 141 | data = [] 142 | for i in range(size): 143 | byte = f.read(1) 144 | value = ord(byte) 145 | data.append(value) 146 | 147 | f.close() 148 | 149 | return data 150 | 151 | def display_binary(data): 152 | for v in data: 153 | print '{v}({h})'.format(v=v, h=hex(v)), 154 | 155 | 156 | def run_cmd_on_nodes(cmd, nodes, sync, id_map, do_not_run=False): 157 | """ 158 | example id_map: 159 | {0:'node-0', 1:'node-1'} 160 | 161 | example usage: 162 | run_cmd_on_nodes(cmd='hostname', nodes=[25], sync=True, id_map=table) 163 | """ 164 | procs = {} 165 | for node_id in nodes: 166 | print '----------', node_id, '----------' 167 | p = run_cmd_on_node(cmd, node_id, sync, id_map, do_not_run) 168 | if not p is None: 169 | procs[node_id] = p 170 | 171 | # wait 172 | for node_id, p in procs.items(): 173 | ret = p.wait() 174 | print 'Node', node_id, 'returned', ret 175 | 176 | 177 | def run_cmd_on_node(cmd, node_id, sync, id_map, do_not_run): 178 | cmd = "ssh {host} '{cmd}'".format(host=id_map[node_id], cmd=cmd) 179 | 180 | if do_not_run is True: 181 | print cmd 182 | return None 183 | 184 | if sync is True: 185 | print 'sync', cmd 186 | shcmd(cmd) 187 | return None 188 | else: 189 | print 'async', cmd 190 | p = subprocess.Popen(cmd, shell=True) 191 | return p 192 | 193 | 194 | ######################################################## 195 | # table = [ 196 | # {'col1':data, 'col2':data, ..}, 197 | # {'col1':data, 'col2':data, ..}, 198 | # ... 199 | # ] 200 | def table_to_file(table, filepath, adddic=None, width=32): 201 | 'save table to a file with additional columns' 202 | with open(filepath, 'w') as f: 203 | if len(table) == 0: 204 | return 205 | f.write( table_to_str(table, adddic=adddic, width=width) ) 206 | 207 | def drop_caches(): 208 | cmd = "echo 3 > /proc/sys/vm/drop_caches" 209 | subprocess.call(cmd, shell=True) 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | -------------------------------------------------------------------------------- /workrunner/filesystem.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import os 3 | 4 | import fshelper 5 | from utilities import utils 6 | 7 | class LoopDevice(object): 8 | def __init__(self, dev_path, tmpfs_mount_point, size_mb, img_file=None): 9 | self.dev_path = dev_path 10 | self.tmpfs_mount_point = tmpfs_mount_point 11 | self.size_mb = size_mb 12 | self.img_file = img_file 13 | 14 | def create(self): 15 | fshelper.make_loop_device(self.dev_path, self.tmpfs_mount_point, 16 | self.size_mb, self.img_file) 17 | 18 | def delete(self): 19 | fshelper.delLoopDev(self.dev_path) 20 | 21 | class FileSystemBase(object): 22 | __metaclass__ = abc.ABCMeta 23 | 24 | def __init__(self, device, mount_point): 25 | self.dev = device 26 | self.mount_point = mount_point 27 | 28 | @abc.abstractmethod 29 | def make(self): 30 | "will never be here" 31 | raise NotImplementedError 32 | 33 | def mount(self, opt_list=None): 34 | opt_str = mountoption_to_str(opt_list) 35 | 36 | utils.prepare_dir(self.mount_point) 37 | ret = utils.shcmd('mount {opt} {dev} {mp}'.format( 38 | opt = opt_str, dev = self.dev, mp = self.mount_point), 39 | ignore_error = True) 40 | if ret != 0: 41 | raise RuntimeError("Failed to mount dev:{} to dir:{}".format( 42 | self.dev, self.mount_point)) 43 | 44 | def umount(self): 45 | ret = fshelper.umountFS(self.mount_point) 46 | if ret != 0: 47 | raise RuntimeError("Failed to umount {}".format(self.mount_point)) 48 | 49 | def sync(self): 50 | common.shcmd("sync") 51 | 52 | def opts_to_str(opt_dic): 53 | """ 54 | This function translate opt_dic to a string complying command requirement 55 | 56 | opt_dic is in the format of: 57 | {'-O':['has_journal', '^uninit_bg'], '-X':['xx']} 58 | 59 | We will turn it to string: 60 | "-O has_journal,^uninit_bg -X xx" 61 | """ 62 | if opt_dic == None or len(opt_dic) == 0: 63 | return '' 64 | 65 | opt_list = [] 66 | for opt, values in opt_dic.items(): 67 | values = [str(s) for s in values] 68 | value_str = ','.join(values) 69 | tmp = ' '.join((opt, value_str)) 70 | opt_list.append(tmp) 71 | 72 | opt_str = ' '.join(opt_list) 73 | 74 | return opt_str 75 | 76 | def mountoption_to_str(options): 77 | """ 78 | options is a list of dictionaries: 79 | for example: 80 | { 'data': {'opt_name':'data', 81 | 'value': 'data', 82 | 'include_name': True}, 83 | 'delalloc': {'opt_name':'dealloc', 84 | 'value': 'dealloc', 85 | 'include_name': False}, 86 | ... 87 | } 88 | 89 | If you want to override mount options from /etc/fstab you have 90 | to use the -o option: 91 | 92 | mount device|dir -o options 93 | 94 | and then the mount options from the command line will be 95 | appended to the list of options from /etc/fstab. The usual 96 | behavior is that the last option wins if there are conflicting 97 | ones. 98 | 99 | """ 100 | if options == None: 101 | return '' 102 | 103 | strs = [] 104 | for _, opt in options.items(): 105 | if opt['value'] != None: 106 | if opt['include_name'] == True: 107 | itemstr = opt['opt_name'] + '=' + str(opt['value']) 108 | else: 109 | itemstr = str(opt['value']) 110 | strs.append(itemstr) 111 | 112 | if len(options) > 0: 113 | opt_str = '-o ' + ','.join(strs) 114 | else: 115 | opt_str = '' 116 | 117 | return opt_str 118 | 119 | class Ext4(FileSystemBase): 120 | def make(self, opt_dic=None): 121 | opt_str = opts_to_str(opt_dic) 122 | 123 | ret = utils.shcmd('mkfs.ext4 {opt_str} -E nodiscard {dev}'.format( 124 | opt_str = opt_str, dev = self.dev), ignore_error = True) 125 | if ret != 0: 126 | raise RuntimeError("Failed to make dev:{}".format(self.dev)) 127 | 128 | class F2fs(FileSystemBase): 129 | def make(self, opt_dic=None): 130 | opt_str = opts_to_str(opt_dic) 131 | 132 | ret = utils.shcmd('mkfs.f2fs -t 0 {opt} {dev}'.format( 133 | opt=opt_str, dev = self.dev), ignore_error = True) 134 | if ret != 0: 135 | raise RuntimeError("Failed to make dev:{}".format(self.dev)) 136 | 137 | def mount(self, opt_list=None): 138 | """ 139 | Overriding mount() in parent since you need to have '-t f2fs' to 140 | mount f2fs, somehow. 141 | """ 142 | opt_str = mountoption_to_str(opt_list) 143 | 144 | utils.prepare_dir(self.mount_point) 145 | ret = utils.shcmd('mount -t f2fs {opt} {dev} {mp}'.format( 146 | opt = opt_str, dev = self.dev, mp = self.mount_point), ignore_error = True) 147 | if ret != 0: 148 | raise RuntimeError("Failed to mount dev:{} to dir:{}".format( 149 | self.dev, self.mount_point)) 150 | def sysfs_setup(self, option, value): 151 | """ 152 | This function sets up the parameters in sysfs. 153 | Option is the file name in sysfs. 154 | """ 155 | devname = os.path.basename(self.dev) 156 | folder = '/sys/fs/f2fs/{dev}'.format(dev = devname) 157 | path = os.path.join(folder, option) 158 | with open(path, 'w') as f: 159 | f.write(str(value)) 160 | 161 | class Btrfs(FileSystemBase): 162 | def make(self, opt_dic=None): 163 | opt_str = opts_to_str(opt_dic) 164 | 165 | ret = utils.shcmd('mkfs.btrfs -f {opt} --nodiscard {dev}'.format( 166 | opt=opt_str, dev = self.dev), ignore_error = True) 167 | if ret != 0: 168 | raise RuntimeError("Failed to make dev:{}".format(self.dev)) 169 | 170 | class Xfs(FileSystemBase): 171 | def make(self, opt_dic=None): 172 | if opt_dic == None: 173 | opt_str = '' 174 | else: 175 | items = [ ' '.join([k,v]) for k,v in opt_dic.items() ] 176 | opt_str = ' '.join(items) 177 | 178 | ret = utils.shcmd("mkfs.xfs {opt} -K -f -s size=4096 -b size=4096 {dev}"\ 179 | .format(opt = opt_str, dev = self.dev), ignore_error = True) 180 | 181 | if ret != 0: 182 | raise RuntimeError("Failed to make dev:{}".format(self.dev)) 183 | 184 | # loopdev = LoopDevice(dev_path = '/dev/loop0', tmpfs_mount_point = '/mnt/tmpfs', 185 | # size_mb = 4096) 186 | # loopdev.create() 187 | 188 | # ext4 = Ext4(device='/dev/loop0', mount_point='/mnt/fsonloop') 189 | # ext4.make() 190 | # ext4.mount() 191 | 192 | # f2fs = F2fs(device='/dev/loop0', mount_point='/mnt/fsonloop') 193 | # f2fs.make() 194 | # f2fs.mount() 195 | 196 | 197 | -------------------------------------------------------------------------------- /tests/lrulist_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import wiscsim 4 | from wiscsim.lrulist import LinkedList, Node, LruDict, LruCache 5 | import profile 6 | 7 | 8 | class Test_lrucache(unittest.TestCase): 9 | def test1(self): 10 | lrucache = wiscsim.lrulist.LruCache() 11 | lrucache[1] = 11 12 | lrucache[2] = 22 13 | lrucache[3] = 33 14 | 15 | self.assertEqual(lrucache.least_recently_used_key(), 1) 16 | self.assertEqual(lrucache.most_recently_used_key(), 3) 17 | lrucache[2] = 222 18 | self.assertEqual(lrucache.most_recently_used_key(), 2) 19 | self.assertEqual(lrucache.peek(1), 11) 20 | # peek should not change order 21 | self.assertEqual(lrucache.most_recently_used_key(), 2) 22 | 23 | lrucache.orderless_update(1, 111) 24 | self.assertEqual(lrucache.most_recently_used_key(), 2) 25 | self.assertEqual(lrucache.peek(1), 111) 26 | 27 | del lrucache[1] 28 | self.assertEqual(lrucache.has_key(1), False) 29 | 30 | class Test_LruCache(unittest.TestCase): 31 | def get_lrucache(self): 32 | d = LruCache() 33 | for i in range(10): 34 | d[i] = i*10 35 | return d 36 | 37 | def test_init(self): 38 | d = LruCache() 39 | d = LruCache({1:2}) 40 | d = LruCache(((1, 2), (2, 3))) 41 | d = LruCache(a = 1, b = 2) 42 | 43 | def test1(self): 44 | d = self.get_lrucache() 45 | 46 | def test_size(self): 47 | d = self.get_lrucache() 48 | self.assertEqual(len(d), 10) 49 | 50 | def test_del(self): 51 | d = self.get_lrucache() 52 | 53 | del d[2] 54 | self.assertEqual(len(d), 9) 55 | self.assertEqual(d.has_key(2), False) 56 | 57 | def test_iter(self): 58 | d = self.get_lrucache() 59 | self.assertListEqual(list(d), list(reversed(range(10)))) 60 | 61 | def test_reversed(self): 62 | d = self.get_lrucache() 63 | self.assertListEqual(list(reversed(d)), list(range(10))) 64 | 65 | def test_items(self): 66 | d = self.get_lrucache() 67 | lk = [] 68 | lv = [] 69 | for k, v in d.items(): 70 | # suppose to go from least to most recently 71 | lk.append(k) 72 | lv.append(v) 73 | self.assertListEqual(lk, list(range(10))) 74 | self.assertListEqual(lv, list(range(0, 100, 10))) 75 | 76 | def test_recency_iter(self): 77 | d = LruDict() 78 | d[1] = 11 79 | d[2] = 22 80 | 81 | l = [] 82 | for k in d.least_to_most_iter(): 83 | l.append(k) 84 | self.assertListEqual(l, [1, 2]) 85 | 86 | l = [] 87 | for k in d.most_to_least_iter(): 88 | l.append(k) 89 | self.assertListEqual(l, [2, 1]) 90 | 91 | def test_hits(self): 92 | d = self.get_lrucache() 93 | a = d[2] 94 | 95 | self.assertEqual(d.victim_key(), 0) 96 | self.assertEqual(d.most_recently_used_key(), 2) 97 | 98 | d[2] = 22 99 | self.assertEqual(d.victim_key(), 0) 100 | self.assertEqual(d.most_recently_used_key(), 2) 101 | 102 | d[3] = 333 103 | self.assertEqual(d.victim_key(), 0) 104 | self.assertEqual(d.most_recently_used_key(), 3) 105 | 106 | def test_peek(self): 107 | d = self.get_lrucache() 108 | 109 | a = d.peek(2) 110 | self.assertEqual(a, 20) 111 | self.assertEqual(d.victim_key(), 0) 112 | self.assertEqual(d.most_recently_used_key(), 9) 113 | 114 | def test_add_to_least_used(self): 115 | d = self.get_lrucache() 116 | 117 | d.add_as_least_used(10, 100) 118 | self.assertEqual(d.victim_key(), 10) 119 | self.assertEqual(d.most_recently_used_key(), 9) 120 | 121 | def _test_performance(self): 122 | d = LruDict() 123 | for i in range(2048): 124 | d[i] = i+1 125 | 126 | 127 | def go_through(self, d): 128 | for k, v in d.least_to_most_items(): 129 | v = 1 130 | 131 | 132 | 133 | 134 | class Test_LruDict(unittest.TestCase): 135 | def get_lrudict(self): 136 | d = LruDict() 137 | for i in range(10): 138 | d[i] = i*10 139 | return d 140 | 141 | def test_init(self): 142 | d = LruDict() 143 | d = LruDict({1:2}) 144 | d = LruDict(((1, 2), (2, 3))) 145 | d = LruDict(a = 1, b = 2) 146 | 147 | def test1(self): 148 | d = self.get_lrudict() 149 | 150 | def test_size(self): 151 | d = self.get_lrudict() 152 | self.assertEqual(len(d), 10) 153 | 154 | def test_del(self): 155 | d = self.get_lrudict() 156 | 157 | del d[2] 158 | self.assertEqual(len(d), 9) 159 | self.assertEqual(d.has_key(2), False) 160 | 161 | def test_iter(self): 162 | d = self.get_lrudict() 163 | self.assertListEqual(list(d), list(range(10))) 164 | 165 | def test_revsersed(self): 166 | d = self.get_lrudict() 167 | self.assertListEqual(list(reversed(d)), list(reversed(range(10)))) 168 | 169 | def test_items(self): 170 | d = self.get_lrudict() 171 | lk = [] 172 | lv = [] 173 | for k, v in d.items(): 174 | lk.append(k) 175 | lv.append(v) 176 | self.assertListEqual(lk, list(range(10))) 177 | self.assertListEqual(lv, list(range(0, 100, 10))) 178 | 179 | def test_recency_iter(self): 180 | d = LruDict() 181 | d[1] = 11 182 | d[2] = 22 183 | 184 | l = [] 185 | for k in d.least_to_most_iter(): 186 | l.append(k) 187 | self.assertListEqual(l, [1, 2]) 188 | 189 | l = [] 190 | for k in d.most_to_least_iter(): 191 | l.append(k) 192 | self.assertListEqual(l, [2, 1]) 193 | 194 | def test_hits(self): 195 | d = self.get_lrudict() 196 | a = d[2] 197 | 198 | self.assertEqual(d.victim_key(), 0) 199 | self.assertEqual(d.most_recent(), 2) 200 | 201 | d[2] = 22 202 | self.assertEqual(d.victim_key(), 0) 203 | self.assertEqual(d.most_recent(), 2) 204 | 205 | d[3] = 333 206 | self.assertEqual(d.victim_key(), 0) 207 | self.assertEqual(d.most_recent(), 3) 208 | 209 | def test_peek(self): 210 | d = self.get_lrudict() 211 | 212 | a = d.peek(2) 213 | self.assertEqual(a, 20) 214 | self.assertEqual(d.victim_key(), 0) 215 | self.assertEqual(d.most_recent(), 9) 216 | 217 | def _test_performance(self): 218 | d = LruDict() 219 | for i in range(2048): 220 | d[i] = i+1 221 | 222 | self.go_through(d) 223 | 224 | def go_through(self, d): 225 | for k, v in d.least_to_most_items(): 226 | v != 1 227 | 228 | 229 | def has_key(d, key): 230 | return d.has_key(key) 231 | 232 | def compare_dict_performance(): 233 | # d = LruCache() 234 | d = dict() 235 | for i in range(40000): 236 | d[i] = i 237 | 238 | for i in range(200000): 239 | # d.has_key(i) 240 | has_key(d, i) 241 | 242 | def main(): 243 | unittest.main() 244 | 245 | 246 | 247 | if __name__ == '__main__': 248 | main() 249 | 250 | 251 | -------------------------------------------------------------------------------- /workrunner/fshelper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import json 3 | import os 4 | import re 5 | import subprocess 6 | import time 7 | 8 | from utilities import utils 9 | from commons import * 10 | 11 | def umountFS(mountpoint): 12 | cmd = ["umount", mountpoint] 13 | p = subprocess.Popen(cmd) 14 | p.wait() 15 | return p.returncode 16 | 17 | def ext4_make(devname, blocksize=4096, makeopts=None): 18 | 19 | if makeopts == None: 20 | cmd = ["mkfs.ext4", 21 | "-b", blocksize, 22 | "-O", "^has_journal,extent,huge_file,flex_bg,uninit_bg,dir_nlink,extra_isize", 23 | devname] 24 | else: 25 | cmd = ["mkfs.ext4", 26 | "-b", blocksize] 27 | cmd.extend(makeopts) 28 | cmd.extend([devname]) 29 | 30 | cmd = [str(x) for x in cmd] 31 | p = subprocess.Popen(cmd) 32 | p.wait() 33 | print "makeExt4:", p.returncode 34 | return p.returncode 35 | 36 | def ext4_mount(devname, mountpoint): 37 | if not os.path.exists(mountpoint): 38 | os.makedirs(mountpoint) 39 | 40 | cmd = ["mount", "-t", "ext4", "-o", "discard", devname, mountpoint] 41 | p = subprocess.Popen(cmd) 42 | p.wait() 43 | print "mountExt4:", p.returncode 44 | return p.returncode 45 | 46 | # def ext4_create_on_loop(): 47 | # makeLoopDevice(config["loop_path"], config["tmpfs_mount_point"], 4096, img_file=None) 48 | # ext4_make(config["loop_path"], blocksize=4096, makeopts=None) 49 | # ext4_mount(devname=config["loop_path"], mountpoint=config["fs_mount_point"]) 50 | 51 | def ext4_make_simple(): 52 | ret = ext4_make(config["loop_path"], blocksize=4096, makeopts=None) 53 | if ret != 0: 54 | print 'error in ext4_make_simple()' 55 | exit(1) 56 | 57 | def ext4_mount_simple(): 58 | ret = ext4_mount(devname=config["loop_path"], mountpoint=config["fs_mount_point"]) 59 | if ret != 0: 60 | print 'error in ext4_mount_simple()' 61 | exit(1) 62 | 63 | def mkLoopDevOnFile(devname, filepath): 64 | cmd = ['losetup', devname, filepath] 65 | cmd = [str(x) for x in cmd] 66 | print " ".join(cmd), "......" 67 | proc = subprocess.Popen(cmd) 68 | proc.wait() 69 | 70 | return proc.returncode 71 | 72 | def delLoopDev(devname): 73 | cmd = ['losetup', '-d', devname] 74 | cmd = [str(x) for x in cmd] 75 | print " ".join(cmd), "......" 76 | proc = subprocess.Popen(cmd) 77 | proc.wait() 78 | 79 | return proc.returncode 80 | 81 | def isMounted(name): 82 | "only check is a name is in mounted list" 83 | name = name.rstrip('/') 84 | print "isMounted: name:", name 85 | with open('/etc/mtab', 'r') as f: 86 | for line in f: 87 | #print "line:", line, 88 | line = " " + line + " " # a hack 89 | if re.search(r'\s'+name+r'\s', line): 90 | #print " YES" 91 | return True 92 | #print " NO" 93 | return False 94 | 95 | def isLoopDevUsed(path): 96 | cmd = ['losetup','-f'] 97 | proc = subprocess.Popen(cmd, 98 | stdout=subprocess.PIPE) 99 | proc.wait() 100 | 101 | outstr = proc.communicate()[0] 102 | outstr = outstr.strip() 103 | if outstr > path: 104 | return True 105 | else: 106 | return False 107 | 108 | def umountFS(mountpoint): 109 | cmd = ["umount", mountpoint] 110 | p = subprocess.Popen(cmd) 111 | p.wait() 112 | return p.returncode 113 | 114 | def make_loop_device(devname, tmpfs_mountpoint, sizeMB, img_file=None): 115 | "size is in MB. The tmpfs for this device might be bigger than sizeMB" 116 | if not devname.startswith('/dev/loop'): 117 | raise RuntimeError('you are requesting to create loop device on a non-loop device path') 118 | 119 | if not os.path.exists(tmpfs_mountpoint): 120 | os.makedirs(tmpfs_mountpoint) 121 | 122 | # umount the FS mounted on loop dev 123 | if isMounted(devname): 124 | if umountFS(devname) != 0: 125 | raise RuntimeError("unable to umount {}".format(devname)) 126 | else: 127 | print devname, 'umounted' 128 | else: 129 | print devname, "is not mounted" 130 | 131 | # delete the loop device 132 | if isLoopDevUsed(devname): 133 | if delLoopDev(devname) != 0: 134 | raise RuntimeError("!!!!!!!!!!!!! Failed to delete loop device") 135 | else: 136 | print devname, 'is deleted' 137 | else: 138 | print devname, "is not in use" 139 | 140 | 141 | # umount the tmpfs the loop device is on 142 | if isMounted(tmpfs_mountpoint): 143 | if umountFS(tmpfs_mountpoint) != 0: 144 | raise RuntimeError("unable to umount tmpfs at {}".format(tmpfs_mountpoint)) 145 | print tmpfs_mountpoint, "umounted" 146 | else: 147 | print tmpfs_mountpoint, "is not mounted" 148 | 149 | 150 | mountTmpfs(tmpfs_mountpoint, int(sizeMB*1024*1024*1.1)) 151 | imgpath = os.path.join(tmpfs_mountpoint, "disk.img") 152 | if img_file == None: 153 | mkImageFile(imgpath, sizeMB) 154 | else: 155 | cmd = ['cp', img_file, imgpath] 156 | subprocess.call(cmd) 157 | 158 | ret = mkLoopDevOnFile(devname, imgpath) 159 | if ret != 0: 160 | raise RuntimeError("Failed at losetup") 161 | 162 | def mkImageFile(filepath, size): 163 | "size is in MB" 164 | cmd = ['truncate', '-s', str(size*1024*1024), filepath] 165 | print " ".join(cmd), "......" 166 | proc = subprocess.Popen(cmd) 167 | proc.wait() 168 | return proc.returncode 169 | 170 | def mountTmpfs(mountpoint, size): 171 | if not os.path.exists(mountpoint): 172 | os.makedirs(mountpoint) 173 | cmd = ['mount', '-t', 'tmpfs', 174 | '-o', 'size='+str(size), 'tmpfs', mountpoint] 175 | cmd = [str(x) for x in cmd] 176 | print " ".join(cmd), "......" 177 | proc = subprocess.Popen(cmd) 178 | proc.wait() 179 | 180 | return proc.returncode 181 | 182 | 183 | # def prepare_loop(): 184 | # make_loop_device(config["loop_path"], config["tmpfs_mount_point"], 4096, img_file=None) 185 | 186 | def partition_disk(dev, part_sizes, padding): 187 | """ 188 | Example: 189 | dev = '/dev/sdc' 190 | part_sizes = [1 * GB, 4 * GB, 8 * GB] 191 | """ 192 | create_layout_file(part_sizes, padding) 193 | 194 | n_tries = 3 195 | success = False 196 | while n_tries > 0: 197 | n_tries -= 1 198 | ret = utils.shcmd("sudo sfdisk {} < /tmp/my.layout".format(dev), 199 | ignore_error = True) 200 | if ret == 0: 201 | success = True 202 | break 203 | print 'parition failed', n_tries, 'left' 204 | time.sleep(1) 205 | 206 | if success == False: 207 | raise RuntimeError("Fail when doing sfdisk") 208 | 209 | utils.shcmd("sudo partprobe -s {}".format(dev)) 210 | 211 | 212 | # partition table of /dev/sdb 213 | #unit: sectors 214 | # 215 | #/dev/sdb1 : start= 4096, size=125829120, Id=a5 216 | #/dev/sdb2 : start=125833216, size=125829120, Id=83 217 | #/dev/sdb3 : start= 0, size= 0, Id= 0 218 | #/dev/sdb4 : start= 0, size= 0, Id= 0 219 | 220 | def create_layout_file(part_sizes, padding=8*MB): 221 | sector_size = 512 222 | 223 | lines = ["unit: sectors", ''] 224 | # Id is to specify Linux/FreeBSD/swap... 225 | line_temp = "/dev/sdb{id} : start= {start}, size={size}, Id=83" 226 | 227 | cur_sectors = padding / sector_size # start with 8 sector 228 | for i, partsize in enumerate(part_sizes): 229 | size_in_sector = partsize / sector_size 230 | line = line_temp.format(id=i, start = cur_sectors, 231 | size = size_in_sector) 232 | lines.append(line) 233 | cur_sectors += size_in_sector 234 | 235 | with open('/tmp/my.layout', 'w') as f: 236 | f.write('\n'.join(lines)) 237 | f.write('\n') 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > Disclaimer: use this software at your own risk. We are not responsible for any data loss. 2 | 3 | > If you have any questions, please open an issue at https://github.com/junhe/wiscsee/issues. I'll be happy to help. 4 | 5 | WiscSee is an I/O workload analyzer that helps you understand your application 6 | performance on SSDs. WiscSee comes with a fully functioning trace-driven SSD simulator, 7 | WiscSim, which supports enhanced versions of multiple well-known FTLs, NCQ, multiple 8 | channels, garbage collections, wear-leveling, page allocation policies and more. 9 | WiscSim is implemented as a Discrete-Event Simulator. 10 | 11 | WiscSee runs your application, collects its block I/O trace, and later feeds the trace 12 | to WiscSim. 13 | 14 | WiscSee was developed for our paper "The Unwritten Contract of Solid State 15 | Drives" (EuroSys'17) http://pages.cs.wisc.edu/~jhe/eurosys17-he.pdf. You may 16 | learn more about WiscSee from the paper. 17 | 18 | In this README file, you will learn 19 | 20 | - How to download and setup WiscSee 21 | - How to run helpful examples of WiscSee 22 | - How to quickly start running your application on an SSD simulator 23 | - How to produce zombie curves (a useful way of studying garbage collection overhead of your applications) 24 | 25 | # Download and Setup 26 | 27 | ### Option 1: VM Image 28 | 29 | We made a VirtualBox VM Image that has the complete environment ready (Ubuntu 30 | 16.04 + WiscSee + dependencies). You do not need to do any configuration. It is the easiest 31 | option in terms of setting up. It is garanteed to run. But the WiscSee in the 32 | image may be out-dated. You probably want to replace it with the latest one. 33 | 34 | In order to use this option, you need to have VirtualBox (https://www.virtualbox.org/) installed before starting the following steps. 35 | 36 | 1. Download VirtualBox Image from the following address: 37 | 38 | ``` 39 | http://pages.cs.wisc.edu/~jhe/wiscsee-vm.tar.gz 40 | ``` 41 | 42 | The SHA256 sum of the file is: 43 | 44 | ``` 45 | 80c5f586d525e0fa54266984065b2b727f71c45de8940aafd7247d49db8e0070 46 | ``` 47 | 48 | 2. Untar the downloaded file 49 | 50 | 3. Open the VM image with VirtualBox. 51 | 52 | This VM image may also work with other VM manager. 53 | 54 | 4. Login to the guest OS 55 | 56 | ``` 57 | Username: wsee 58 | Password: abcabc 59 | ``` 60 | 61 | 5. Run tests 62 | 63 | ``` 64 | cd /home/wsee/workdir/wiscsee 65 | make test_all 66 | ``` 67 | 68 | The root password is: 69 | 70 | ``` 71 | abcabc 72 | ``` 73 | 74 | ### Option 2: Git clone 75 | 76 | WiscSee was developed in Ubuntu 14.04 with kernel 4.5.4. Other variants of Linux 77 | should also work. But you may need to modify `setup.env.sh` to use different 78 | Linux package managers. 79 | 80 | **I strongly recommand using Ubuntu 14.04 and kernel 4.5.4 as a starting point. Other versions have known issues (from blktrace and `/proc/sys/`). If you are on Cloudlab.us, you can use profile ubuntu-14.04-linux-4.5.4, which is compatible with WiscSee.** 81 | 82 | 1. Clone 83 | 84 | ``` 85 | git clone https://github.com/junhe/wiscsee.git 86 | ``` 87 | 88 | 2. Setup 89 | 90 | ``` 91 | cd wiscsee 92 | make setup 93 | ``` 94 | 95 | `make setup` will execute `setup.env.sh`, which installs the dependencies of 96 | WiscSee. 97 | 98 | 3. Run tests 99 | 100 | ``` 101 | make test_all 102 | ``` 103 | 104 | # Run Examples 105 | 106 | Running and reading the examples is a great way to learn WiscSee. The code of 107 | the examples is in `tests/test_demo.py`. (You can also start learning WiscSee 108 | by the Tutorial below.) 109 | 110 | To run the examples, run the following command in the WiscSee directory. 111 | 112 | ``` 113 | make run_demo 114 | ``` 115 | 116 | The examples include: 117 | 118 | 1. Collect I/O traces of running an application on a file system. You can use R 119 | to directly read the refined trace file for analysis. 120 | 2. Collect I/O traces and feed the traces to the SSD simulator with DFTLDES (DFTLDES 121 | is an FTL based on DFTL. It is implemented as discrete-event simulation and 122 | supports multiple channels, NCQ, multiple page allocation strategies, logical 123 | space segmentation, ...) The results show various statistis about the 124 | internal states of the SSD. 125 | 3. Collect I/O traces and feed the traces to the SSD simulator with NKFTL (NKFTL is 126 | a configurable hybrid mapping FTL based on "A reconfigurable FTL (flash 127 | translation layer) architecture for NAND flash-based applications" by Chanik 128 | Park et al.. I call it "NKFTL" because the authors name the two most 129 | important parameters N and K. NKFTL can be configured to act like other FTLs 130 | such as FAST. NKFTL is implemented as discrete-event simulation and 131 | supports multiple channels, NCQ, multiple page allocation strategies, ...) 132 | 4. Feed synthetic trace to the SSD simulator. This is useful if you want to test 133 | customized access patterns on LBA. 134 | 5. Feed existing I/O traces to the SSD simulator. Doing so avoids running the 135 | application for each simulation. 136 | 6. Analyze Request Scale (request size, NCQ depth) of existing traces. 137 | 7. Run an application and analyze its request scale. 138 | 8. Analyze Locality of existing traces. 139 | 9. Analyze Aligned Sequentiality of existing traces. 140 | 10. Analyze Grouping by Death Time of existing traces. By the results, you 141 | will be able to plot zombie curves. 142 | 11. Analyze Uniform Data Lifetime of existing traces. 143 | 144 | 145 | # Tutorial: Run your application with WiscSee 146 | 147 | After walking through this tutorial, you will learn the following. 148 | 149 | - How to run your application with WiscSee and get results for all the rules 150 | - Where the results are located 151 | - What are in the results and how to interpret the results 152 | - How to run a preparation workload before your workload 153 | 154 | The tutorial is located here: https://github.com/junhe/wiscsee/blob/master/TUTORIAL.md 155 | 156 | # Producing zombie curves 157 | 158 | Zombie curve is a way of characterizing GC overhead of a workload. A zombie curve 159 | shows the sorted valid ratios (# of valid pages in a block / # of pages in a 160 | block) of flash blocks with live data. It looks like the one below. 161 | 162 | ![Zombie Curve](media/zombie-curve.png) 163 | 164 | Many of the examples in `tests/test_demo.py` produce data for zombie curves. 165 | The tutorial above also teaches you how to generate data for plotting zombie 166 | curves. The data is stored in `recorder.json`. Here is an example. 167 | 168 | 169 | ``` 170 | "ftl_func_valid_ratios": [ 171 | { 172 | "1.00": 128 173 | }, 174 | { 175 | "1.00": 240, 176 | "0.91": 4, 177 | "0.92": 12 178 | }, 179 | { 180 | "0.69": 6, 181 | "1.00": 368, 182 | "0.67": 10 183 | }, 184 | { 185 | "1.00": 496, 186 | "0.17": 10, 187 | "0.19": 6 188 | }, 189 | ... 190 | ] 191 | ``` 192 | 193 | Each `{...}` is a snapshot of the valid ratio counts. For example, `"0.91": 4` 194 | indicates that there are `4` flash blocks with valid ratio `0.91`. 195 | 196 | Using the data in `ftl_func_valid_ratios`, you can create an animation of how 197 | the valid ratios change over time. 198 | 199 | 200 | # Notes 201 | 202 | The simulation is written with Simpy (https://simpy.readthedocs.io/en/latest/). 203 | You may want to learn some Simpy before modifying the core simulation code. 204 | 205 | # Citation 206 | 207 | Please use the following bib to cite WiscSee: 208 | 209 | ``` 210 | @InProceedings{He17-Eurosys, 211 | title = "{The Unwritten Contract of Solid State Drives}", 212 | author = "{Jun He, Sudarsun Kannan, Andrea C. Arpaci-Dusseau, Remzi H. Arpaci-Dusseau}", 213 | booktitle = "EuroSys '17", 214 | month = "April", 215 | year = "2017", 216 | address = "Belgrade, Serbia", 217 | } 218 | ``` 219 | 220 | 221 | -------------------------------------------------------------------------------- /tests/misc_test.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import unittest 3 | import time 4 | import copy 5 | import pprint 6 | 7 | import workrunner 8 | import wiscsim 9 | from utilities import utils 10 | from config import MountOption as MOpt 11 | from config import ConfigNCQFTL 12 | from workflow import run_workflow 13 | from wiscsim.simulator import GcLog 14 | from wiscsim.ftlsim_commons import Extent, random_channel_id 15 | from wiscsim.ftlcounter import LpnClassification, get_file_range_table, EventNCQParser 16 | from wiscsim import hostevent 17 | from config_helper.rule_parameter import EventFileSets 18 | from commons import * 19 | 20 | class TestCpuhandler(unittest.TestCase): 21 | def test_cpu(self): 22 | possible_cpus = workrunner.cpuhandler.get_possible_cpus() 23 | workrunner.cpuhandler.enable_all_cpus() 24 | 25 | online_cpus = workrunner.cpuhandler.get_online_cpuids() 26 | self.assertListEqual(possible_cpus, online_cpus) 27 | 28 | class TestRandomChannelID(unittest.TestCase): 29 | def test(self): 30 | n = 16 31 | 32 | channels = set() 33 | for i in range(10000): 34 | channel_id = random_channel_id(n) 35 | channels.add(channel_id) 36 | self.assertTrue(channel_id >= 0) 37 | self.assertTrue(channel_id < n) 38 | 39 | self.assertTrue(len(channels) > n/3) 40 | 41 | @unittest.skip("Need real device that supports NCQ") 42 | class TestLinuxNCQDepth(unittest.TestCase): 43 | def test_ncq_depth_setting(self): 44 | if not 'wisc.cloudlab.us' in socket.gethostname(): 45 | return 46 | 47 | depth = 2 48 | utils.set_linux_ncq_depth("sdc", depth) 49 | read_depth = utils.get_linux_ncq_depth("sdc") 50 | self.assertEqual(depth, read_depth) 51 | 52 | @unittest.skip("Need real device that supports setting scheduler") 53 | class TestSettingScheduler(unittest.TestCase): 54 | def test_setting(self): 55 | scheduler = 'noop' 56 | utils.set_linux_io_scheduler("sdc", scheduler) 57 | 58 | read_scheduler = utils.get_linux_io_scheduler("sdc") 59 | self.assertEqual(scheduler, read_scheduler) 60 | 61 | 62 | class Experiment(object): 63 | def __init__(self): 64 | self.conf = wiscsim.dftldes.Config() 65 | 66 | def setup_environment(self): 67 | self.conf['device_path'] = '/dev/loop0' 68 | self.conf['dev_size_mb'] = 256 69 | self.conf['filesystem'] = 'f2fs' 70 | self.conf["n_online_cpus"] = 'all' 71 | 72 | self.conf['linux_ncq_depth'] = 31 73 | self.conf['sort_block_trace'] = True 74 | 75 | def setup_workload(self): 76 | self.conf['workload_class'] = 'NoOp' 77 | self.conf['NoOp'] = {} 78 | self.conf['workload_conf_key'] = 'NoOp' 79 | 80 | def setup_fs(self): 81 | pass 82 | # self.conf['mnt_opts'].update({ 83 | # "f2fs": { 84 | # 'discard': MOpt(opt_name = 'discard', 85 | # value = 'discard', 86 | # include_name = False), 87 | # 'background_gc': MOpt(opt_name = 'background_gc', 88 | # value = 'off', 89 | # include_name = True) 90 | # } 91 | # } 92 | # ) 93 | 94 | def setup_flash(self): 95 | pass 96 | 97 | def setup_ftl(self): 98 | self.conf['enable_blktrace'] = False 99 | self.conf['enable_simulation'] = False 100 | 101 | def run(self): 102 | utils.set_exp_metadata(self.conf, save_data = False, 103 | expname = 'tmp', 104 | subexpname = 'subtmp') 105 | utils.runtime_update(self.conf) 106 | run_workflow(self.conf) 107 | 108 | utils.shcmd("fio -name hello -rw=randwrite -size=16mb -fsync=1 -filename {}/data2"\ 109 | .format(self.conf['fs_mount_point'])) 110 | time.sleep(1) 111 | ret = utils.invoke_f2fs_gc(self.conf['fs_mount_point'], 1) 112 | assert ret == 0 113 | 114 | def main(self): 115 | self.setup_environment() 116 | self.setup_fs() 117 | self.setup_workload() 118 | self.setup_flash() 119 | self.setup_ftl() 120 | self.run() 121 | 122 | @unittest.skip("Need FIO to create some random workload to create F2FS garbage") 123 | class TestF2FSGCCall(unittest.TestCase): 124 | def test(self): 125 | obj = Experiment() 126 | obj.main() 127 | 128 | class TestImportPyreuse(unittest.TestCase): 129 | def test(self): 130 | import pyreuse 131 | pyreuse.helpers.shcmd("echo 33333") 132 | 133 | class TestClassifyGcLOG(unittest.TestCase): 134 | @unittest.skip("Need real device mounted") 135 | def test(self): 136 | gclog = GcLog( 137 | device_path='/dev/sdc1', 138 | result_dir='/tmp/results/1gbnojournalok/Leveldb.ext4.1gbnojournalok.4294967296.devsdc1.128.31.1073741824.1073741824.True.ordered.False.overwrite.1000000.1.True.64.4.4.2000000.dftldes.1-ext4-06-29-08-06-52--7574556694461561217', 139 | flash_page_size=2048 140 | ) 141 | print gclog._get_range_table() 142 | gclog.classify_lpn_in_gclog() 143 | 144 | class TestExtent(unittest.TestCase): 145 | def test_copy(self): 146 | ext1 = Extent(lpn_start=3, lpn_count=8) 147 | ext2 = copy.copy(ext1) 148 | 149 | self.assertEqual(ext1.lpn_start, ext2.lpn_start) 150 | self.assertEqual(ext1.lpn_count, ext2.lpn_count) 151 | self.assertNotEqual(ext1, ext2) 152 | 153 | ext2.lpn_start = 100 154 | self.assertEqual(ext1.lpn_start, 3) 155 | 156 | 157 | class TestGroupToBatches(unittest.TestCase): 158 | def test(self): 159 | a = [0, 1, 2] 160 | batches = utils.group_to_batches(a, 1) 161 | self.assertListEqual(batches, [[0], [1], [2]]) 162 | 163 | def test_larger(self): 164 | a = range(7) 165 | batches = utils.group_to_batches(a, 3) 166 | self.assertListEqual(batches, [[0, 1, 2], 167 | [3, 4, 5], 168 | [6]]) 169 | 170 | @unittest.skip('need real device') 171 | class TestLpnClassification(unittest.TestCase): 172 | def test(self): 173 | classifier = LpnClassification( 174 | lpns = [1, 8], 175 | device_path = '/dev/sdc1', 176 | result_dir = '/tmp/results/test002/subexp--6155052293192590053-ext4-09-16-09-32-22-1439596482389025085', 177 | flash_page_size = 2048) 178 | classifier.classify() 179 | 180 | 181 | class TestNCQParser(unittest.TestCase): 182 | def test(self): 183 | # blkparse-events-for-ftlsim.txt 184 | conf = ConfigNCQFTL() 185 | 186 | workload_line_iter = hostevent.FileLineIterator( 187 | "tests/testdata/blkparse-events-for-ftlsim.txt") 188 | event_workload_iter = hostevent.EventIterator(conf, workload_line_iter) 189 | 190 | # for event in event_workload_iter: 191 | # print str(event) 192 | 193 | parser = EventNCQParser(event_workload_iter) 194 | table = parser.parse() 195 | 196 | self.assertEqual(table[0]['pre_depth'], 0) 197 | self.assertEqual(table[0]['post_depth'], 1) 198 | 199 | self.assertEqual(table[1]['pre_depth'], 1) 200 | self.assertEqual(table[1]['post_depth'], 0) 201 | 202 | 203 | class TestEventFileSets(unittest.TestCase): 204 | def test(self): 205 | filesets = EventFileSets('tests/testdata/64mbfile') 206 | sets = filesets.get_sets() 207 | self.assertEqual(len(sets), 1) 208 | self.assertEqual(sets[0]['mkfs_path'], 209 | 'tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/blkparse-events-for-ftlsim-mkfs.txt') 210 | 211 | 212 | 213 | def main(): 214 | unittest.main() 215 | 216 | if __name__ == '__main__': 217 | main() 218 | 219 | 220 | -------------------------------------------------------------------------------- /wiscsim/recorder.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import os 3 | import pprint 4 | import sys 5 | 6 | from utilities import utils 7 | 8 | FILE_TARGET, STDOUT_TARGET = ('file', 'stdout') 9 | 10 | 11 | def switchable(function): 12 | "decrator for class Recorder's method, so they can be switched on/off" 13 | def wrapper(self, *args, **kwargs): 14 | if self.enabled == None: 15 | raise RuntimeError("You need to explicity enable/disable Recorder." 16 | " We raise exception here because we think you will create" 17 | " unexpected behaviors that are hard to debug.") 18 | if self.enabled == False: 19 | return 20 | else: 21 | return function(self, *args, **kwargs) 22 | return wrapper 23 | 24 | 25 | class Recorder(object): 26 | def __init__(self, output_target, 27 | output_directory = None, 28 | verbose_level = 1, 29 | print_when_finished = False): 30 | self.output_target = output_target 31 | self.output_directory = output_directory 32 | self.verbose_level = verbose_level 33 | self.print_when_finished = print_when_finished 34 | 35 | assert len(self.output_target) > 0 36 | 37 | self.file_pool = {} # {filename:descriptor} 38 | self.file_colnames = {} # {filename:[colname1, 2, ...] 39 | 40 | # {set name: collections.counter} 41 | self.general_accumulator = {} 42 | self.result_dict = {'general_accumulator': self.general_accumulator} 43 | 44 | self.enabled = None 45 | 46 | self.__open_log_file() 47 | 48 | self._unique_num = 0 49 | 50 | self._tag_groups = { 51 | 'read_user': 'foreground', 52 | 'write_user': 'foreground', 53 | 'read_trans': 'background', 54 | 'prog_trans': 'background'} 55 | 56 | def close(self): 57 | self.__close_log_file() 58 | self.__save_accumulator() 59 | self.__save_result_dict() 60 | self._close_file_pool() 61 | 62 | def enable(self): 63 | print "....Recorder is enabled...." 64 | self.enabled = True 65 | 66 | def disable(self): 67 | "Note that this will not clear the previous records" 68 | print "....Recorder is DIS-abled. Now not counting anything." 69 | self.enabled = False 70 | 71 | def _close_file_pool(self): 72 | for _, file_handle in self.file_pool.items(): 73 | os.fsync(file_handle) 74 | file_handle.close() 75 | 76 | def __save_result_dict(self): 77 | result_path = os.path.join(self.output_directory, 'recorder.json') 78 | utils.dump_json(self.result_dict, result_path) 79 | 80 | def __close_log_file(self): 81 | self.log_handle.flush() 82 | os.fsync(self.log_handle) 83 | self.log_handle.close() 84 | 85 | def __open_log_file(self): 86 | # open log file 87 | log_path = os.path.join(self.output_directory, 'recorder.log') 88 | utils.prepare_dir_for_path(log_path) 89 | self.log_handle = open(log_path, 'w') 90 | 91 | def __save_accumulator(self): 92 | counter_set_path = os.path.join(self.output_directory, 93 | 'accumulator_table.txt') 94 | utils.prepare_dir_for_path(counter_set_path) 95 | general_accumulator_table = self._parse_accumulator( 96 | self.general_accumulator) 97 | utils.table_to_file(general_accumulator_table, counter_set_path) 98 | 99 | def __write_log(self, *args): 100 | line = ' '.join( str(x) for x in args) 101 | line += '\n' 102 | if self.output_target == FILE_TARGET: 103 | self.log_handle.write(line) 104 | else: 105 | sys.stdout.write(line) 106 | 107 | def get_result_summary(self): 108 | return self.result_dict 109 | 110 | def set_result_by_one_key(self, key, value): 111 | self.result_dict[key] = value 112 | 113 | def get_result_by_one_key(self, key): 114 | return self.result_dict[key] 115 | 116 | def append_to_value_list(self, key, addon): 117 | """ 118 | Append addon to key's value 119 | {'key': [addon1, addon2]' 120 | """ 121 | valuelist = self.result_dict.setdefault(key, []) 122 | valuelist.append(addon) 123 | 124 | @switchable 125 | def count_me(self, counter_name, item): 126 | """ 127 | use counter named counter_name to count the apperance of item_name 128 | """ 129 | self.add_to_general_accumulater(counter_name, item, 1) 130 | 131 | def get_count_me(self, counter_name, item): 132 | return self.get_general_accumulater_cnt(counter_name, item) 133 | 134 | def get_general_accumulater_cnt(self, 135 | counter_set_name, item_name): 136 | counter_dict = self.general_accumulator.setdefault(counter_set_name, 137 | collections.Counter()) 138 | return counter_dict[item_name] 139 | 140 | @switchable 141 | def add_to_general_accumulater(self, 142 | counter_set_name, item_name, addition): 143 | """ 144 | {counter set 1: 145 | {counter 1: ##, 146 | counter 2: #}, 147 | counter set 2: 148 | {counter 1: ##, 149 | counter 2: #}, 150 | } 151 | """ 152 | counter_dict = self.general_accumulator.setdefault(counter_set_name, 153 | collections.Counter()) 154 | counter_dict[item_name] += addition 155 | 156 | @switchable 157 | def add_to_timer(self, counter_set_name, item_name, addition): 158 | self.add_to_general_accumulater(counter_set_name, item_name, addition) 159 | 160 | def get_unique_num(self): 161 | num = self._unique_num 162 | self._unique_num += 1 163 | return num 164 | 165 | def get_tag(self, op, op_id): 166 | # return '-'.join([op, str(op_id)]) 167 | return {'op': op, 'op_id':op_id} 168 | 169 | def tag_group(self, tag): 170 | try: 171 | return self._tag_groups[tag['op']] 172 | except (KeyError, TypeError): 173 | return tag 174 | # return 'TagGroupUnknown' 175 | 176 | def _parse_accumulator(self, counter_sets): 177 | """ 178 | counter sets 179 | {counter set 1: 180 | {counter 1: ##, 181 | counter 2: #}, 182 | counter set 2: 183 | {counter 1: ##, 184 | counter 2: #}, 185 | } 186 | 187 | 188 | table columns 189 | counter.name item.name count 190 | """ 191 | table = [] 192 | for counter_set_name, counter_set in counter_sets.items(): 193 | for counter_name, count in counter_set.items(): 194 | d = {'counter.set.name': counter_set_name, 195 | 'counter.name' : counter_name, 196 | 'count' : count} 197 | table.append(d) 198 | 199 | return table 200 | 201 | def write_file(self, filename, **kwargs): 202 | """ 203 | Write args to filename as a line 204 | 205 | You must provide kwargs with exactly the same keys. And you must 206 | provide keys in the parameter as they become columns in the file. 207 | """ 208 | width = 20 209 | if not self.file_pool.has_key(filename): 210 | fd = open( os.path.join( self.output_directory, filename ), 'wr+') 211 | self.file_pool[filename] = fd 212 | self.file_colnames[filename] = kwargs.keys() 213 | colnames = [str(colname).rjust(width) for colname in kwargs.keys()] 214 | fd.write(' '.join(colnames) + '\n') 215 | else: 216 | fd = self.file_pool[filename] 217 | 218 | colnames = self.file_colnames[filename] 219 | args = [str(kwargs[colname]).rjust(width) for colname in colnames] 220 | fd.write(' '.join(args) + '\n') 221 | 222 | def debug(self, *args): 223 | if self.verbose_level >= 3: 224 | self.__write_log('DEBUG', *args) 225 | 226 | @switchable 227 | def put(self, operation, page_num, category): 228 | # do statistics 229 | item = '.'.join((operation, category)) 230 | self.add_to_general_accumulater("put", item, 1) 231 | 232 | def warning(self, *args): 233 | if self.verbose_level >= 2: 234 | self.__write_log('WARNING', *args) 235 | 236 | def error(self, *args): 237 | if self.verbose_level >= 0: 238 | self.__write_log('ERROR', *args) 239 | 240 | 241 | -------------------------------------------------------------------------------- /tests/testdata/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "hash": 3141981191822244772, 3 | "snapshot_interval": 100000000.0, 4 | "high_log_block_ratio": 0.4, 5 | "wear_leveling_diff": 10, 6 | "high_data_block_ratio": 0.4, 7 | "workload_conf": { 8 | "filename": "test.file", 9 | "generating_func": "self.generate_random_workload", 10 | "iterations": 1, 11 | "chunk_size": 524288, 12 | "n_col": 5, 13 | "chunk_count": 8 14 | }, 15 | "expname": "64mbfile", 16 | "do_not_check_gc_setting": true, 17 | "verbose_level": -1, 18 | "cache_entry_bytes": 8, 19 | "record_bad_victim_block": false, 20 | "translation_page_entry_bytes": 4, 21 | "exp_parameters": { 22 | "gc_high_ratio": 0.9, 23 | "f2fs_ipu_policy": 16, 24 | "aging_appconfs": null, 25 | "cache_mapped_data_bytes": 2147483648, 26 | "snapshot_interval": 100000000.0, 27 | "do_ncq_depth_time_line": true, 28 | "wear_leveling_diff": 10, 29 | "ssd_ncq_depth": 1, 30 | "dirty_bytes": 4294967296, 31 | "run_seconds": null, 32 | "expname": "64mbfile", 33 | "segment_bytes": 2097152, 34 | "gc_low_ratio": 0.0, 35 | "n_pages_per_block": 64, 36 | "stripe_size": 1, 37 | "enable_blktrace": true, 38 | "over_provisioning": 32, 39 | "ext4hasjournal": true, 40 | "workload_class": "AppMix", 41 | "not_check_gc_setting": true, 42 | "age_workload_class": "NoOp", 43 | "do_wear_leveling": false, 44 | "ftl": "ftlcounter", 45 | "appconfs": [ 46 | { 47 | "name": "RocksDB", 48 | "mem_limit_in_bytes": 10737418240, 49 | "do_strace": false, 50 | "num": 10000000, 51 | "benchmarks": "overwrite,overwrite,overwrite", 52 | "use_existing_db": 0 53 | } 54 | ], 55 | "f2fs_min_fsync_blocks": 8, 56 | "device_path": "/dev/sdc1", 57 | "enable_simulation": true, 58 | "do_gc_after_workload": false, 59 | "linux_ncq_depth": 31, 60 | "n_online_cpus": "all", 61 | "lbabytes": 2147483648, 62 | "fs_discard": true, 63 | "ext4datamode": "ordered", 64 | "n_channels_per_dev": 16, 65 | "only_get_traffic": false, 66 | "snapshot_erasure_count_dist": true, 67 | "f2fs_gc_after_workload": false, 68 | "wear_leveling_factor": 2, 69 | "snapshot_valid_ratios": true, 70 | "write_gc_log": true, 71 | "dump_ext4_after_workload": true, 72 | "filesystem": "ext4", 73 | "max_log_blocks_ratio": 100, 74 | "wear_leveling_check_interval": 10000000000 75 | }, 76 | "segment_bytes": 2097152, 77 | "do_gc_after_workload": true, 78 | "sector_size": 512, 79 | "stripe_size": 1, 80 | "ext4": { 81 | "make_opts": { 82 | "-O": [ 83 | "^uninit_bg", 84 | "has_journal" 85 | ], 86 | "-b": [ 87 | 4096 88 | ] 89 | } 90 | }, 91 | "max_victim_valid_ratio": 0.9, 92 | "perf": { 93 | "perf_path": "perf", 94 | "flamegraph_dir": null 95 | }, 96 | "dump_ext4_after_workload": true, 97 | "linux_version": "4.5.4", 98 | "flash_config": { 99 | "n_chips_per_package": 1, 100 | "n_packages_per_channel": 1, 101 | "n_planes_per_chip": 1, 102 | "t_PROG": 200000, 103 | "n_channels_per_dev": 16, 104 | "t_RC": 0, 105 | "t_R": 20000, 106 | "page_read_time": 20000, 107 | "t_WC": 0, 108 | "n_pages_per_block": 64, 109 | "n_blocks_per_plane": 32768, 110 | "page_size": 2048, 111 | "t_BERS": 1500000.0, 112 | "block_erase_time": 1600000.0, 113 | "page_prog_time": 200000 114 | }, 115 | "simulation_processor": "e2e", 116 | "btrfs": { 117 | "make_opts": {} 118 | }, 119 | "do_fstrim": false, 120 | "dev_padding": 8388608, 121 | "ftl_type": "ftlcounter", 122 | "lba_workload_configs": {}, 123 | "age_workload_class": "NoOp", 124 | "result_dir": "/tmp/results/64mbfile/subexp-3563455040949707047-ext4-10-05-16-29-19-3141981191822244772", 125 | "hybridmapftl": { 126 | "low_log_block_ratio": 0.32 127 | }, 128 | "workload_src": "WLRUNNER", 129 | "do_wear_leveling": false, 130 | "device_path": "/dev/sdc1", 131 | "enable_simulation": true, 132 | "f2fs": { 133 | "sysfs": { 134 | "ipu_policy": 16, 135 | "min_fsync_blocks": 8 136 | }, 137 | "make_opts": {} 138 | }, 139 | "wrap_by_perf": false, 140 | "linux_io_scheduler": "noop", 141 | "linux_ncq_depth": 31, 142 | "fs_mount_point": "/mnt/fsonloop", 143 | "time": "10-05-16-29-19", 144 | "SSDFramework": { 145 | "ncq_depth": 1, 146 | "data_cache_max_n_entries": 4096 147 | }, 148 | "GC_threshold_ratio": 0.95, 149 | "output_target": "file", 150 | "targetdir": "/tmp/results", 151 | "workload_class": "AppMix", 152 | "mnt_opts": { 153 | "ext4": { 154 | "discard": { 155 | "include_name": false, 156 | "opt_name": "discard", 157 | "value": "discard" 158 | } 159 | }, 160 | "f2fs": { 161 | "discard": { 162 | "include_name": false, 163 | "opt_name": "discard", 164 | "value": "discard" 165 | } 166 | }, 167 | "btrfs": { 168 | "discard": { 169 | "include_name": false, 170 | "opt_name": "discard", 171 | "value": "discard" 172 | }, 173 | "autodefrag": { 174 | "include_name": false, 175 | "opt_name": "autodefrag", 176 | "value": "autodefrag" 177 | }, 178 | "ssd": { 179 | "include_name": false, 180 | "opt_name": "ssd", 181 | "value": "ssd" 182 | } 183 | }, 184 | "xfs": { 185 | "discard": { 186 | "include_name": false, 187 | "opt_name": "discard", 188 | "value": "discard" 189 | } 190 | } 191 | }, 192 | "print_when_finished": false, 193 | "aging_workload_config": { 194 | "run_seconds": null, 195 | "appconfs": null 196 | }, 197 | "simulator_class": "SimulatorNonDESSpeed", 198 | "enable_blktrace": true, 199 | "only_get_traffic": false, 200 | "snapshot_erasure_count_dist": false, 201 | "f2fs_gc_after_workload": false, 202 | "mapping_cache_bytes": 8388608, 203 | "filesystem": "ext4", 204 | "wear_leveling_factor": 2, 205 | "aging_config_key": "aging_workload_config", 206 | "lba_workload_class": "Manual", 207 | "snapshot_valid_ratios": false, 208 | "do_ncq_depth_time_line": true, 209 | "over_provisioning": 1.28, 210 | "workload_conf_key": "workload_config", 211 | "simulator_enable_interval": false, 212 | "workload_config": { 213 | "run_seconds": null, 214 | "appconfs": [ 215 | { 216 | "name": "RocksDB", 217 | "mem_limit_in_bytes": 10737418240, 218 | "do_strace": false, 219 | "num": 10000000, 220 | "benchmarks": "overwrite,overwrite,overwrite", 221 | "use_existing_db": 0 222 | } 223 | ] 224 | }, 225 | "event_file_column_names": [ 226 | "pid", 227 | "action", 228 | "operation", 229 | "offset", 230 | "size", 231 | "timestamp", 232 | "pre_wait_time", 233 | "sync" 234 | ], 235 | "aging_config": { 236 | "filename": "aging.file", 237 | "generating_func": "self.generate_random_workload", 238 | "iterations": 1, 239 | "chunk_size": 524288, 240 | "n_col": 5, 241 | "chunk_count": 8 242 | }, 243 | "n_online_cpus": "all", 244 | "subexpname": "subexp-3563455040949707047", 245 | "dev_size_mb": 2048, 246 | "tmpfs_mount_point": "/mnt/tmpfs", 247 | "GC_low_threshold_ratio": 0.9, 248 | "wear_leveling_check_interval": 10000000000, 249 | "process_queue_depth": 32 250 | } -------------------------------------------------------------------------------- /tests/testdata/sqlitewal-update/subexp-7928737328932659543-ext4-10-07-23-50-10--2726320246496492803/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "stop_sim_on_bytes": "Infinit", 3 | "hash": -2726320246496492803, 4 | "snapshot_interval": 100000000.0, 5 | "high_log_block_ratio": 0.4, 6 | "wear_leveling_diff": 10, 7 | "high_data_block_ratio": 0.4, 8 | "workload_conf": { 9 | "filename": "test.file", 10 | "generating_func": "self.generate_random_workload", 11 | "iterations": 1, 12 | "chunk_size": 524288, 13 | "n_col": 5, 14 | "chunk_count": 8 15 | }, 16 | "print_when_finished": false, 17 | "expname": "sqlitewal-reqscale-240000-inserts-3", 18 | "verbose_level": -1, 19 | "cache_entry_bytes": 8, 20 | "record_bad_victim_block": false, 21 | "translation_page_entry_bytes": 4, 22 | "exp_parameters": { 23 | "gc_high_ratio": 0.9, 24 | "stop_sim_on_bytes": "Infinit", 25 | "f2fs_ipu_policy": 16, 26 | "aging_appconfs": null, 27 | "cache_mapped_data_bytes": 1073741824, 28 | "snapshot_interval": 100000000.0, 29 | "do_ncq_depth_time_line": true, 30 | "wear_leveling_diff": 10, 31 | "ssd_ncq_depth": 1, 32 | "dirty_bytes": 4294967296, 33 | "run_seconds": null, 34 | "expname": "sqlitewal-reqscale-240000-inserts-3", 35 | "segment_bytes": 2097152, 36 | "do_gc_after_workload": false, 37 | "n_pages_per_block": 64, 38 | "stripe_size": 1, 39 | "dump_ext4_after_workload": true, 40 | "ext4hasjournal": true, 41 | "workload_class": "AppMix", 42 | "not_check_gc_setting": true, 43 | "age_workload_class": "NoOp", 44 | "do_wear_leveling": false, 45 | "ftl": "ftlcounter", 46 | "appconfs": [ 47 | { 48 | "name": "Sqlite", 49 | "n_insertions": 240000, 50 | "pattern": "random_put", 51 | "mem_limit_in_bytes": 33554432, 52 | "do_strace": false, 53 | "max_key": 240000, 54 | "commit_period": 10, 55 | "journal_mode": "WAL" 56 | } 57 | ], 58 | "f2fs_min_fsync_blocks": 8, 59 | "device_path": "/dev/sdc1", 60 | "enable_simulation": true, 61 | "gc_low_ratio": 0.0, 62 | "linux_ncq_depth": 31, 63 | "n_online_cpus": "all", 64 | "testname": "sqliteWAL_reqscale_w_rand", 65 | "lbabytes": 1073741824, 66 | "fs_discard": true, 67 | "ext4datamode": "ordered", 68 | "enable_blktrace": true, 69 | "only_get_traffic": false, 70 | "snapshot_erasure_count_dist": true, 71 | "f2fs_gc_after_workload": false, 72 | "wear_leveling_factor": 2, 73 | "snapshot_valid_ratios": true, 74 | "over_provisioning": 32, 75 | "write_gc_log": true, 76 | "filesystem": "ext4", 77 | "max_log_blocks_ratio": 100, 78 | "n_channels_per_dev": 16, 79 | "wear_leveling_check_interval": 10000000000 80 | }, 81 | "segment_bytes": 2097152, 82 | "do_gc_after_workload": true, 83 | "sector_size": 512, 84 | "stripe_size": 1, 85 | "ext4": { 86 | "make_opts": { 87 | "-O": [ 88 | "^uninit_bg", 89 | "has_journal" 90 | ], 91 | "-b": [ 92 | 4096 93 | ] 94 | } 95 | }, 96 | "max_victim_valid_ratio": 0.9, 97 | "perf": { 98 | "perf_path": "perf", 99 | "flamegraph_dir": null 100 | }, 101 | "dump_ext4_after_workload": true, 102 | "workload_config": { 103 | "run_seconds": null, 104 | "appconfs": [ 105 | { 106 | "name": "Sqlite", 107 | "n_insertions": 240000, 108 | "pattern": "random_put", 109 | "mem_limit_in_bytes": 33554432, 110 | "do_strace": false, 111 | "max_key": 240000, 112 | "commit_period": 10, 113 | "journal_mode": "WAL" 114 | } 115 | ] 116 | }, 117 | "linux_version": "4.5.4", 118 | "result_dir": "/tmp/results/sqlitewal-reqscale-240000-inserts-3/subexp-7928737328932659543-ext4-10-07-23-50-10--2726320246496492803", 119 | "workload_class": "AppMix", 120 | "btrfs": { 121 | "make_opts": {} 122 | }, 123 | "do_fstrim": false, 124 | "GC_threshold_ratio": 0.95, 125 | "ftl_type": "ftlcounter", 126 | "do_ncq_depth_time_line": true, 127 | "age_workload_class": "NoOp", 128 | "dev_padding": 8388608, 129 | "hybridmapftl": { 130 | "low_log_block_ratio": 0.32 131 | }, 132 | "workload_src": "WLRUNNER", 133 | "do_wear_leveling": false, 134 | "device_path": "/dev/sdc1", 135 | "enable_simulation": true, 136 | "f2fs": { 137 | "sysfs": { 138 | "ipu_policy": 16, 139 | "min_fsync_blocks": 8 140 | }, 141 | "make_opts": {} 142 | }, 143 | "wrap_by_perf": false, 144 | "linux_io_scheduler": "noop", 145 | "linux_ncq_depth": 31, 146 | "fs_mount_point": "/mnt/fsonloop", 147 | "time": "10-07-23-50-10", 148 | "SSDFramework": { 149 | "ncq_depth": 1, 150 | "data_cache_max_n_entries": 4096 151 | }, 152 | "output_target": "file", 153 | "targetdir": "/tmp/results", 154 | "flash_config": { 155 | "n_chips_per_package": 1, 156 | "n_packages_per_channel": 1, 157 | "n_planes_per_chip": 1, 158 | "n_blocks_per_plane": 16384, 159 | "t_PROG": 200000, 160 | "page_size": 2048, 161 | "n_channels_per_dev": 16, 162 | "block_erase_time": 1600000.0, 163 | "t_RC": 0, 164 | "t_R": 20000, 165 | "t_WC": 0, 166 | "page_prog_time": 200000, 167 | "t_BERS": 1500000.0, 168 | "page_read_time": 20000, 169 | "n_pages_per_block": 64 170 | }, 171 | "mnt_opts": { 172 | "btrfs": { 173 | "discard": { 174 | "include_name": false, 175 | "opt_name": "discard", 176 | "value": "discard" 177 | }, 178 | "autodefrag": { 179 | "include_name": false, 180 | "opt_name": "autodefrag", 181 | "value": "autodefrag" 182 | }, 183 | "ssd": { 184 | "include_name": false, 185 | "opt_name": "ssd", 186 | "value": "ssd" 187 | } 188 | }, 189 | "xfs": { 190 | "discard": { 191 | "include_name": false, 192 | "opt_name": "discard", 193 | "value": "discard" 194 | } 195 | }, 196 | "ext4": { 197 | "discard": { 198 | "include_name": false, 199 | "opt_name": "discard", 200 | "value": "discard" 201 | } 202 | }, 203 | "f2fs": { 204 | "discard": { 205 | "include_name": false, 206 | "opt_name": "discard", 207 | "value": "discard" 208 | } 209 | } 210 | }, 211 | "simulation_processor": "e2e", 212 | "aging_workload_config": { 213 | "run_seconds": null, 214 | "appconfs": null 215 | }, 216 | "simulator_class": "SimulatorNonDESSpeed", 217 | "enable_blktrace": true, 218 | "only_get_traffic": false, 219 | "snapshot_erasure_count_dist": false, 220 | "f2fs_gc_after_workload": false, 221 | "mapping_cache_bytes": 4194304, 222 | "filesystem": "ext4", 223 | "wear_leveling_factor": 2, 224 | "lba_workload_configs": {}, 225 | "lba_workload_class": "Manual", 226 | "snapshot_valid_ratios": false, 227 | "over_provisioning": 1.28, 228 | "workload_conf_key": "workload_config", 229 | "simulator_enable_interval": false, 230 | "do_not_check_gc_setting": true, 231 | "event_file_column_names": [ 232 | "pid", 233 | "action", 234 | "operation", 235 | "offset", 236 | "size", 237 | "timestamp", 238 | "pre_wait_time", 239 | "sync" 240 | ], 241 | "aging_config": { 242 | "filename": "aging.file", 243 | "generating_func": "self.generate_random_workload", 244 | "iterations": 1, 245 | "chunk_size": 524288, 246 | "n_col": 5, 247 | "chunk_count": 8 248 | }, 249 | "aging_config_key": "aging_workload_config", 250 | "n_online_cpus": "all", 251 | "subexpname": "subexp-7928737328932659543", 252 | "dev_size_mb": 1024, 253 | "tmpfs_mount_point": "/mnt/tmpfs", 254 | "GC_low_threshold_ratio": 0.9, 255 | "wear_leveling_check_interval": 10000000000, 256 | "process_queue_depth": 32 257 | } -------------------------------------------------------------------------------- /tests/testdata/sqlitewal-update/subexp-7928737328932659543-ext4-10-07-23-50-10--2726320246496492803/blkparse-events-for-ftlsim-mkfs.txt: -------------------------------------------------------------------------------- 1 | 699 D read 0 4096 0.000000000 0 False 2 | 699 D read 134217728 4096 0.024041688 0.024041688 False 3 | 699 D read 402653184 4096 0.024204240 0.000162552 False 4 | 699 D read 671088640 4096 0.024334682 0.000130442 False 5 | 699 D read 939524096 4096 0.024465257 0.000130575 False 6 | 1383 D write 0 319488 0.025853329 0.001388072 True 7 | 1383 D write 331776 32768 0.025859698 6.369e-06 True 8 | 1383 D write 397312 524288 0.025861656 1.958e-06 True 9 | 1383 D write 921600 524288 0.025873274 1.1618e-05 True 10 | 1383 D write 1445888 524288 0.025885119 1.1845e-05 True 11 | 1383 D write 1970176 524288 0.025892429 7.31e-06 True 12 | 1383 D write 2494464 524288 0.025899015 6.586e-06 True 13 | 1383 D write 3018752 524288 0.025905505 6.49e-06 True 14 | 1383 D write 3543040 524288 0.025911852 6.347e-06 True 15 | 1383 D write 4067328 524288 0.025918214 6.362e-06 True 16 | 1383 D write 4591616 524288 0.025924244 6.03e-06 True 17 | 1383 D write 5115904 524288 0.025931039 6.795e-06 True 18 | 1383 D write 5640192 524288 0.025935814 4.775e-06 True 19 | 1383 D write 6164480 524288 0.025941496 5.682e-06 True 20 | 1383 D write 6688768 524288 0.025945985 4.489e-06 True 21 | 1383 D write 7213056 524288 0.025950106 4.121e-06 True 22 | 1383 D write 7737344 524288 0.027030375 0.001080269 True 23 | 1383 D write 8261632 524288 0.027034285 3.91e-06 True 24 | 1383 D write 8785920 524288 0.027037685 3.4e-06 True 25 | 1383 D write 9310208 524288 0.027042868 5.183e-06 True 26 | 1383 D write 9834496 524288 0.027047690 4.822e-06 True 27 | 1383 D write 10358784 524288 0.027052260 4.57e-06 True 28 | 1383 D write 10883072 524288 0.027056958 4.698e-06 True 29 | 1383 D write 11407360 524288 0.027061369 4.411e-06 True 30 | 1383 D write 11931648 524288 0.027065244 3.875e-06 True 31 | 1383 D write 12455936 524288 0.027069055 3.811e-06 True 32 | 1383 D write 12980224 524288 0.027073577 4.522e-06 True 33 | 1383 D write 13504512 524288 0.027078342 4.765e-06 True 34 | 1383 D write 14028800 524288 0.027082923 4.581e-06 True 35 | 1383 D write 14553088 524288 0.027087576 4.653e-06 True 36 | 1383 D write 15077376 524288 0.027091864 4.288e-06 True 37 | 1383 D write 15601664 524288 0.027095921 4.057e-06 True 38 | 1383 D write 16125952 524288 0.027963517 0.000867596 True 39 | 1383 D write 16650240 524288 0.027967657 4.14e-06 True 40 | 1383 D write 17174528 4096 0.027971451 3.794e-06 True 41 | 1383 D write 134217728 8192 0.028785673 0.000814222 True 42 | 1383 D write 402653184 8192 0.029835407 0.001049734 True 43 | 1383 D write 536870912 524288 0.030885555 0.001050148 True 44 | 0 D write 537395200 524288 0.031963902 0.001078347 True 45 | 0 D write 537919488 524288 0.033009155 0.001045253 True 46 | 0 D write 538443776 524288 0.034059541 0.001050386 True 47 | 0 D write 538968064 524288 0.035172381 0.00111284 True 48 | 0 D write 539492352 524288 0.036270081 0.0010977 True 49 | 0 D write 540016640 524288 0.037361256 0.001091175 True 50 | 0 D write 540540928 524288 0.038490788 0.001129532 True 51 | 0 D write 541065216 524288 0.039535120 0.001044332 True 52 | 0 D write 541589504 524288 0.040584733 0.001049613 True 53 | 0 D write 542113792 524288 0.041638441 0.001053708 True 54 | 0 D write 542638080 524288 0.042692562 0.001054121 True 55 | 0 D write 543162368 524288 0.043807951 0.001115389 True 56 | 0 D write 543686656 524288 0.044902444 0.001094493 True 57 | 0 D write 544210944 524288 0.045951019 0.001048575 True 58 | 0 D write 544735232 524288 0.047139504 0.001188485 True 59 | 0 D write 545259520 524288 0.048186161 0.001046657 True 60 | 0 D write 545783808 524288 0.049237462 0.001051301 True 61 | 0 D write 546308096 524288 0.050283208 0.001045746 True 62 | 0 D write 546832384 524288 0.051347824 0.001064616 True 63 | 0 D write 547356672 524288 0.052448041 0.001100217 True 64 | 0 D write 547880960 524288 0.053542278 0.001094237 True 65 | 0 D write 548405248 524288 0.054636687 0.001094409 True 66 | 0 D write 548929536 524288 0.055756417 0.00111973 True 67 | 0 D write 549453824 524288 0.056801546 0.001045129 True 68 | 0 D write 549978112 524288 0.057849462 0.001047916 True 69 | 0 D write 550502400 524288 0.058899878 0.001050416 True 70 | 0 D write 551026688 524288 0.059956421 0.001056543 True 71 | 0 D write 551550976 524288 0.061063725 0.001107304 True 72 | 0 D write 552075264 524288 0.061067294 3.569e-06 True 73 | 234 D write 552599552 524288 0.061074825 7.531e-06 True 74 | 234 D write 553123840 524288 0.061078750 3.925e-06 True 75 | 0 D write 553648128 524288 0.062247930 0.00116918 True 76 | 0 D write 554172416 524288 0.063317644 0.001069714 True 77 | 0 D write 554696704 524288 0.064444235 0.001126591 True 78 | 0 D write 555220992 524288 0.065550352 0.001106117 True 79 | 0 D write 555745280 524288 0.066606409 0.001056057 True 80 | 0 D write 556269568 524288 0.067654374 0.001047965 True 81 | 0 D write 556793856 524288 0.068703494 0.00104912 True 82 | 0 D write 557318144 524288 0.069789511 0.001086017 True 83 | 0 D write 557842432 524288 0.070883580 0.001094069 True 84 | 0 D write 558366720 524288 0.071962034 0.001078454 True 85 | 0 D write 558891008 524288 0.073004399 0.001042365 True 86 | 0 D write 559415296 524288 0.074053248 0.001048849 True 87 | 0 D write 559939584 524288 0.075106663 0.001053415 True 88 | 0 D write 560463872 524288 0.076167520 0.001060857 True 89 | 0 D write 560988160 524288 0.077203573 0.001036053 True 90 | 0 D write 561512448 524288 0.078343061 0.001139488 True 91 | 0 D write 562036736 524288 0.079425842 0.001082781 True 92 | 0 D write 562561024 524288 0.080559394 0.001133552 True 93 | 0 D write 563085312 524288 0.081607389 0.001047995 True 94 | 0 D write 563609600 524288 0.082662738 0.001055349 True 95 | 0 D write 564133888 524288 0.083709797 0.001047059 True 96 | 0 D write 564658176 524288 0.084740237 0.00103044 True 97 | 0 D write 565182464 524288 0.085792063 0.001051826 True 98 | 0 D write 565706752 524288 0.086863521 0.001071458 True 99 | 0 D write 566231040 524288 0.087893262 0.001029741 True 100 | 0 D write 566755328 524288 0.088940174 0.001046912 True 101 | 0 D write 567279616 524288 0.090047875 0.001107701 True 102 | 0 D write 567803904 524288 0.091096669 0.001048794 True 103 | 0 D write 568328192 524288 0.092155135 0.001058466 True 104 | 0 D write 568852480 524288 0.093201069 0.001045934 True 105 | 0 D write 569376768 524288 0.094249888 0.001048819 True 106 | 0 D write 569901056 524288 0.095267027 0.001017139 True 107 | 0 D write 671088640 8192 0.100237835 0.004970808 True 108 | 0 D write 939524096 8192 0.101281005 0.00104317 True 109 | 0 D write 1073676288 65536 0.102333447 0.001052442 True 110 | 1383 D write 0 4096 0.133876645 0.031543198 True 111 | 1326 D read 1073606656 4096 0.134600282 0.000723637 False 112 | 1326 D read 393216 4096 0.134740030 0.000139748 False 113 | 1326 D read -8388608 4096 0.134961957 0.000221927 False 114 | 1385 D read 1024 1024 0.174158429 0.039196472 False 115 | 1385 D read 0 4096 0.175564955 0.001406526 False 116 | 1385 D read 4096 4096 0.175729065 0.00016411 False 117 | 1385 D read 401408 4096 0.175880678 0.000151613 False 118 | 1385 D read 405504 4096 0.175882701 2.02299999999e-06 False 119 | 1385 D read 409600 4096 0.175884837 2.13599999999e-06 False 120 | 1385 D read 413696 4096 0.175887641 2.80400000002e-06 False 121 | 1385 D read 417792 4096 0.175889786 2.14499999998e-06 False 122 | 1385 D read 421888 4096 0.175892012 2.22599999999e-06 False 123 | 1385 D read 425984 4096 0.175894344 2.33200000002e-06 False 124 | 1385 D read 430080 4096 0.175896666 2.322e-06 False 125 | 1385 D read 434176 4096 0.175898730 2.064e-06 False 126 | 1385 D read 438272 4096 0.175900907 2.17699999999e-06 False 127 | 1385 D read 442368 4096 0.175903048 2.14100000001e-06 False 128 | 1385 D read 446464 4096 0.175906164 3.116e-06 False 129 | 1385 D read 450560 4096 0.175908395 2.23099999999e-06 False 130 | 1385 D read 454656 4096 0.175910803 2.40800000001e-06 False 131 | 1385 D read 458752 4096 0.175913003 2.20000000001e-06 False 132 | 1385 D read 462848 4096 0.175915183 2.17999999999e-06 False 133 | 1385 D read 466944 4096 0.175917893 2.70999999999e-06 False 134 | 1385 D read 471040 4096 0.175920066 2.17300000002e-06 False 135 | 1385 D read 475136 4096 0.175922354 2.28799999999e-06 False 136 | 1385 D read 479232 4096 0.175924590 2.23599999999e-06 False 137 | 1385 D read 483328 4096 0.175929246 4.65600000002e-06 False 138 | 1385 D read 487424 4096 0.175931426 2.17999999999e-06 False 139 | 1385 D read 491520 4096 0.175933637 2.211e-06 False 140 | 1385 D read 495616 4096 0.175935731 2.09400000001e-06 False 141 | 1385 D read 499712 4096 0.175937842 2.111e-06 False 142 | 1385 D read 503808 4096 0.175940421 2.579e-06 False 143 | 1385 D read 507904 4096 0.175942964 2.54299999999e-06 False 144 | 1385 D read 512000 4096 0.175945177 2.213e-06 False 145 | 1385 D read 516096 4096 0.175947731 2.55399999999e-06 False 146 | 1385 D read 520192 4096 0.175949910 2.17899999999e-06 False 147 | 1385 D read 524288 4096 0.175952392 2.48200000003e-06 False 148 | 1385 D read 528384 4096 0.175955113 2.72099999998e-06 False 149 | 0 D read 397312 4096 0.176064552 0.000109439 False 150 | 1385 D read 536870912 4096 0.176399999 0.000335447 False 151 | 1385 D write 0 4096 0.176722326 0.000322327 True 152 | -------------------------------------------------------------------------------- /wiscsim/devblockpool.py: -------------------------------------------------------------------------------- 1 | from tagblockpool import * 2 | from ftlsim_commons import random_channel_id 3 | from utilities import utils 4 | 5 | class MultiChannelBlockPoolBase(object): 6 | def __init__(self, n_channels, n_blocks_per_channel, n_pages_per_block, tags, 7 | leveling_factor=2, leveling_diff=10): 8 | self.n_channels = n_channels 9 | self.n_blocks_per_channel = n_blocks_per_channel 10 | self.n_blocks_per_dev = n_blocks_per_channel * n_channels 11 | self.n_pages_per_block = n_pages_per_block 12 | self.n_pages_per_channel = n_pages_per_block * n_blocks_per_channel 13 | self.total_blocks = n_blocks_per_channel * n_channels 14 | self.tags = tags 15 | 16 | self._channel_pool = [ 17 | ChannelBlockPool(n_blocks_per_channel, tags, n_pages_per_block, i) \ 18 | for i in range(n_channels)] 19 | 20 | # TODO: each tag has its own _next_channel 21 | self._next_channel = random_channel_id(self.n_channels) 22 | # self._next_channel = 0 23 | 24 | self.leveling_factor = leveling_factor 25 | self.leveling_diff = leveling_diff 26 | 27 | def _incr_next_channel(self): 28 | self._next_channel = (self._next_channel + 1) % self.n_channels 29 | return self._next_channel 30 | 31 | def count_blocks(self, tag, channels=None): 32 | total = 0 33 | 34 | if channels is None: 35 | # count on all channels 36 | for pool in self._channel_pool: 37 | total += pool.count_blocks(tag) 38 | else: 39 | for i in channels: 40 | total += self._channel_pool[i].count_blocks(tag) 41 | 42 | return total 43 | 44 | def get_blocks_of_tag(self, tag, channel_id=None): 45 | ret = [] 46 | 47 | if channel_id is None: 48 | channels = self._channel_pool 49 | else: 50 | channels = [self._channel_pool[channel_id]] 51 | 52 | for pool in channels: 53 | blocks = pool.get_blocks_of_tag(tag) 54 | ret.extend(self._blocks_channel_to_global(pool.channel_id, blocks)) 55 | 56 | return ret 57 | 58 | def get_erasure_count_dist(self): 59 | aggregated_dist = Counter() 60 | for pool in self._channel_pool: 61 | dist = pool.get_erasure_count_dist() 62 | aggregated_dist += dist 63 | 64 | return aggregated_dist 65 | 66 | def get_top_or_bottom_erasure_total(self, choice, need_nblocks): 67 | dist = self.get_erasure_count_dist() 68 | erase_cnt, block_cnt = utils.top_or_bottom_total(dist, need_nblocks, choice) 69 | 70 | return erase_cnt, block_cnt 71 | 72 | def get_wear_status(self): 73 | """ 74 | Return wear factor and diff 75 | """ 76 | nblocks = self.total_blocks * 0.1 77 | 78 | top_total, top_count = self.get_top_or_bottom_erasure_total( 79 | 'top', nblocks) 80 | top_average = float(top_total) / top_count 81 | bottom_total, bottom_count = self.get_top_or_bottom_erasure_total( 82 | 'bottom', nblocks) 83 | bottom_average = float(bottom_total) / bottom_count 84 | 85 | diff = top_average - bottom_average 86 | if bottom_total == 0: 87 | factor = float('inf') 88 | else: 89 | factor = float(top_total) / bottom_total 90 | 91 | return factor, diff 92 | 93 | def need_wear_leveling(self): 94 | factor, diff = self.get_wear_status() 95 | 96 | print factor, self.leveling_factor 97 | print diff, self.leveling_diff 98 | 99 | if factor > self.leveling_factor and diff > self.leveling_diff: 100 | return True 101 | else: 102 | return False 103 | 104 | def get_least_or_most_erased_blocks(self, tag, choice, nblocks): 105 | global_counter = self.get_erasure_count() 106 | if choice == LEAST_ERASED: 107 | blocks_by_cnt = reversed(global_counter.most_common()) 108 | elif choice == MOST_ERASED: 109 | blocks_by_cnt = global_counter.most_common() 110 | else: 111 | raise NotImplementedError 112 | 113 | tag_blocks = self.get_blocks_of_tag(tag) 114 | 115 | # iterate from least used to most used 116 | blocks = [] 117 | for blocknum, count in blocks_by_cnt: 118 | if blocknum in tag_blocks: 119 | blocks.append(blocknum) 120 | if len(blocks) == nblocks: 121 | break 122 | 123 | return blocks 124 | 125 | def get_erasure_count(self): 126 | global_counter = Counter() 127 | for pool in self._channel_pool: 128 | erasure_cnt = pool.get_erasure_count() 129 | for blocknum, erase_cnt in erasure_cnt.items(): 130 | global_blocknum = self._channel_to_global( 131 | pool.channel_id, blocknum) 132 | global_counter[global_blocknum] = erasure_cnt[blocknum] 133 | 134 | return global_counter 135 | 136 | def pick_and_move(self, src, dst, choice=LEAST_ERASED): 137 | "This function will advance self._next_channel" 138 | blocknum = self.pick(tag=src, choice=choice) 139 | 140 | if blocknum is None: 141 | return None 142 | else: 143 | self.change_tag(blocknum, src, dst) 144 | return blocknum 145 | 146 | def pick(self, tag, channel_id=None, choice=LEAST_ERASED): 147 | if channel_id is None: 148 | cur_channel = self._next_channel 149 | self._incr_next_channel() 150 | else: 151 | cur_channel = channel_id 152 | 153 | block_off = self._channel_pool[cur_channel].pick(tag=tag, 154 | choice=choice) 155 | 156 | if block_off is None: 157 | return None 158 | else: 159 | return self._channel_to_global(cur_channel, block_off) 160 | 161 | def change_tag(self, blocknum, src, dst): 162 | channel_id, block_off = self._global_to_channel(blocknum) 163 | self._channel_pool[channel_id].change_tag(block_off, src, dst) 164 | 165 | def _channel_to_global(self, channel_id, blocknum): 166 | ret = channel_id * self.n_blocks_per_channel + blocknum 167 | assert ret < self.total_blocks 168 | return ret 169 | 170 | def _global_to_channel(self, blocknum): 171 | "return channel_id, block_offset" 172 | assert blocknum < self.total_blocks 173 | return blocknum / self.n_blocks_per_channel, \ 174 | blocknum % self.n_blocks_per_channel 175 | 176 | def _blocks_channel_to_global(self, channel_id, blocks): 177 | return [self._channel_to_global(channel_id, block) 178 | for block in blocks] 179 | 180 | def _ppn_channel_to_global(self, channel_id, ppn): 181 | return channel_id * self.n_pages_per_channel + ppn 182 | 183 | def _ppns_channel_to_global(self, channel_id, ppns): 184 | return [self._ppn_channel_to_global(channel_id, ppn) for ppn in ppns] 185 | 186 | 187 | class MultiChannelBlockPool(MultiChannelBlockPoolBase): 188 | """ 189 | This is for DFTL 190 | """ 191 | def current_blocks(self): 192 | "return all current block numbers" 193 | blocknums = [] 194 | for pool in self._channel_pool: 195 | for tag in self.tags: 196 | cur_blk_objs = pool.get_cur_block_obj(tag) 197 | for obj in cur_blk_objs: 198 | global_blk = self._channel_to_global(pool.channel_id, 199 | obj.blocknum) 200 | blocknums.append(global_blk) 201 | 202 | return blocknums 203 | 204 | def remove_full_cur_blocks(self): 205 | for pool in self._channel_pool: 206 | pool.remove_full_cur_blocks() 207 | 208 | def next_ppns(self, n, tag, block_index, stripe_size, choice=LEAST_ERASED): 209 | """ 210 | We will try to use all the available pages in the one channels' 211 | current block before going to the next. 212 | """ 213 | remaining = n 214 | if stripe_size == 'infinity': 215 | stripe_size = float('inf') 216 | 217 | ret_ppns = [] 218 | empty_channels = set() 219 | while remaining > 0 and len(empty_channels) < self.n_channels: 220 | cur_channel_id = self._next_channel 221 | req = min(remaining, stripe_size) 222 | ppns = self._next_ppns_in_channel( 223 | channel_id=cur_channel_id, 224 | n=req, tag=tag, block_index=block_index, 225 | choice=choice) 226 | if len(ppns) == 0: 227 | # channel out of space 228 | empty_channels.add(cur_channel_id) 229 | 230 | ppns = self._ppns_channel_to_global(cur_channel_id, ppns) 231 | ret_ppns.extend(ppns) 232 | self._incr_next_channel() 233 | remaining -= len(ppns) 234 | 235 | if remaining > 0: 236 | # out of space 237 | raise TagOutOfSpaceError 238 | 239 | return ret_ppns 240 | 241 | def _next_ppns_in_channel(self, channel_id, n, tag, block_index, 242 | choice=LEAST_ERASED): 243 | """ 244 | Return ppns we can find. If returning [], it means this channel 245 | is out of space. 246 | """ 247 | channel_pool = self._channel_pool[channel_id] 248 | remaining = n 249 | 250 | ret_ppns = [] 251 | while remaining > 0: 252 | ppnlist = channel_pool.next_ppns_from_cur_block(n=remaining, 253 | tag=tag, block_index=block_index) 254 | 255 | if len(ppnlist) == 0: 256 | new_block = channel_pool.pick_and_move(src=TFREE, dst=tag, 257 | choice=choice) 258 | if new_block == None: 259 | # this channel is out of space of this tag 260 | break 261 | channel_pool.set_new_cur_block(tag, block_index, new_block) 262 | 263 | ret_ppns.extend(ppnlist) 264 | remaining -= len(ppnlist) 265 | return ret_ppns 266 | 267 | 268 | class ChannelBlockPool(BlockPoolWithCurBlocks): 269 | def __init__(self, n, tags, n_pages_per_block, channel_id): 270 | super(ChannelBlockPool, self).__init__(n, tags, n_pages_per_block) 271 | self.channel_id = channel_id 272 | 273 | 274 | class TagOutOfSpaceError(RuntimeError): 275 | pass 276 | 277 | 278 | -------------------------------------------------------------------------------- /tests/test_demo.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import collections 3 | import shutil 4 | import os 5 | 6 | import config 7 | from workflow import * 8 | import wiscsim 9 | from utilities import utils 10 | from wiscsim.hostevent import Event, ControlEvent 11 | from config_helper import rule_parameter 12 | from pyreuse.helpers import shcmd 13 | from config_helper import experiment 14 | 15 | 16 | class Test_TraceOnly(unittest.TestCase): 17 | def test_run(self): 18 | class LocalExperiment(experiment.Experiment): 19 | def setup_workload(self): 20 | self.conf['workload_class'] = "SimpleRandReadWrite" 21 | 22 | para = experiment.get_shared_nolist_para_dict("test_exp_TraceOnly2", 16*MB) 23 | para['device_path'] = "/dev/loop0" 24 | para['enable_simulation'] = False 25 | para['enable_blktrace'] = True 26 | 27 | Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 28 | obj = LocalExperiment( Parameters(**para) ) 29 | obj.main() 30 | 31 | 32 | class Test_TraceAndSimulateDFTLDES(unittest.TestCase): 33 | def test_run(self): 34 | class LocalExperiment(experiment.Experiment): 35 | def setup_workload(self): 36 | self.conf['workload_class'] = "SimpleRandReadWrite" 37 | 38 | para = experiment.get_shared_nolist_para_dict("test_exp_TraceAndSimulateDFTLDES_xjjj", 16*MB) 39 | para['device_path'] = "/dev/loop0" 40 | para['ftl'] = "dftldes" 41 | Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 42 | obj = LocalExperiment( Parameters(**para) ) 43 | obj.main() 44 | 45 | 46 | class Test_TraceAndSimulateNKFTL(unittest.TestCase): 47 | def test_run(self): 48 | class LocalExperiment(experiment.Experiment): 49 | def setup_workload(self): 50 | self.conf['workload_class'] = "SimpleRandReadWrite" 51 | 52 | para = experiment.get_shared_nolist_para_dict("test_exp_TraceAndSimulateNKFTL_xjjj", 16*MB) 53 | para['device_path'] = "/dev/loop0" 54 | para['ftl'] = "nkftl2" 55 | Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 56 | obj = LocalExperiment( Parameters(**para) ) 57 | obj.main() 58 | 59 | 60 | class Test_SimulateForSyntheticWorkload(unittest.TestCase): 61 | class LocalExperiment(experiment.Experiment): 62 | def setup_workload(self): 63 | self.conf['workload_src'] = config.LBAGENERATOR 64 | self.conf['lba_workload_class'] = "AccessesWithDist" 65 | self.conf['AccessesWithDist'] = { 66 | 'lba_access_dist': 'uniform', 67 | 'traffic_size': 8*MB, 68 | 'chunk_size': 64*KB, 69 | 'space_size': 8*MB, 70 | 'skew_factor': None, 71 | 'zipf_alpha': None, 72 | } 73 | 74 | para = experiment.get_shared_nolist_para_dict("test_exp_SimulateForSyntheticWorkload", 16*MB) 75 | para['ftl'] = "nkftl2" 76 | Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 77 | obj = LocalExperiment( Parameters(**para) ) 78 | obj.main() 79 | 80 | 81 | class TestUsingExistingTraceToSimulate(unittest.TestCase): 82 | def test_run(self): 83 | class LocalExperiment(experiment.Experiment): 84 | def setup_workload(self): 85 | self.conf["workload_src"] = config.LBAGENERATOR 86 | self.conf["lba_workload_class"] = "BlktraceEvents" 87 | self.conf['lba_workload_configs']['mkfs_event_path'] = \ 88 | self.para.mkfs_path 89 | self.conf['lba_workload_configs']['ftlsim_event_path'] = \ 90 | self.para.ftlsim_path 91 | 92 | para = experiment.get_shared_nolist_para_dict("test_exp_TestUsingExistingTraceToSimulate_jj23hx", 1*GB) 93 | para.update({ 94 | 'ftl': "dftldes", 95 | "mkfs_path": "./tests/testdata/sqlitewal-update/subexp-7928737328932659543-ext4-10-07-23-50-10--2726320246496492803/blkparse-events-for-ftlsim-mkfs.txt", 96 | "ftlsim_path": "./tests/testdata/sqlitewal-update/subexp-7928737328932659543-ext4-10-07-23-50-10--2726320246496492803/blkparse-events-for-ftlsim.txt", 97 | }) 98 | 99 | Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 100 | obj = LocalExperiment( Parameters(**para) ) 101 | obj.main() 102 | 103 | 104 | 105 | 106 | class TestUsingExistingTraceToStudyRequestScale(unittest.TestCase): 107 | def test_run(self): 108 | class LocalExperiment(experiment.Experiment): 109 | def setup_workload(self): 110 | self.conf["workload_src"] = config.LBAGENERATOR 111 | self.conf["lba_workload_class"] = "BlktraceEvents" 112 | self.conf['lba_workload_configs']['mkfs_event_path'] = \ 113 | self.para.mkfs_path 114 | self.conf['lba_workload_configs']['ftlsim_event_path'] = \ 115 | self.para.ftlsim_path 116 | 117 | para = experiment.get_shared_nolist_para_dict("test_exp_TestUsingExistingTraceToStudyRequestScale_jj23hx", 1*GB) 118 | para.update({ 119 | 'ftl': "ftlcounter", 120 | "mkfs_path": "./tests/testdata/sqlitewal-update/subexp-7928737328932659543-ext4-10-07-23-50-10--2726320246496492803/blkparse-events-for-ftlsim-mkfs.txt", 121 | "ftlsim_path": "./tests/testdata/sqlitewal-update/subexp-7928737328932659543-ext4-10-07-23-50-10--2726320246496492803/blkparse-events-for-ftlsim.txt", 122 | 'ftl' : 'ftlcounter', 123 | 'enable_simulation': True, 124 | 'dump_ext4_after_workload': True, 125 | 'only_get_traffic': False, 126 | 'trace_issue_and_complete': True, 127 | 'do_dump_lpn_sem': False, 128 | }) 129 | 130 | Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 131 | obj = LocalExperiment( Parameters(**para) ) 132 | obj.main() 133 | 134 | 135 | ################################################################### 136 | # Experiments setting similar to SSD Contract paper 137 | ################################################################### 138 | 139 | class TestRunningWorkloadAndOutputRequestScale(unittest.TestCase): 140 | def test_run(self): 141 | class LocalExperiment(experiment.Experiment): 142 | def setup_workload(self): 143 | self.conf['workload_class'] = "SimpleRandReadWrite" 144 | 145 | para = experiment.get_shared_nolist_para_dict("test_exp_TestRequestScale_jjj3nx", 16*MB) 146 | para['device_path'] = "/dev/loop0" 147 | para.update( 148 | { 149 | 'device_path': "/dev/loop0", 150 | 'ftl' : 'ftlcounter', 151 | 'enable_simulation': True, 152 | 'dump_ext4_after_workload': True, 153 | 'only_get_traffic': False, 154 | 'trace_issue_and_complete': True, 155 | }) 156 | 157 | Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 158 | obj = LocalExperiment( Parameters(**para) ) 159 | obj.main() 160 | 161 | 162 | class TestLocality(unittest.TestCase): 163 | def test(self): 164 | old_dir = "/tmp/results/sqlitewal-update" 165 | if os.path.exists(old_dir): 166 | shutil.rmtree(old_dir) 167 | 168 | # copy the data to 169 | shcmd("cp -r ./tests/testdata/sqlitewal-update /tmp/results/") 170 | 171 | for para in rule_parameter.ParaDict("testexpname", ['sqlitewal-update'], "locality"): 172 | experiment.execute_simulation(para) 173 | 174 | class TestAlignment(unittest.TestCase): 175 | def test(self): 176 | old_dir = "/tmp/results/sqlitewal-update" 177 | if os.path.exists(old_dir): 178 | shutil.rmtree(old_dir) 179 | 180 | # copy the data to 181 | shcmd("cp -r ./tests/testdata/sqlitewal-update /tmp/results/") 182 | 183 | for para in rule_parameter.ParaDict("testexpname", ['sqlitewal-update'], "alignment"): 184 | experiment.execute_simulation(para) 185 | 186 | 187 | class TestGrouping(unittest.TestCase): 188 | def test(self): 189 | old_dir = "/tmp/results/sqlitewal-update" 190 | if os.path.exists(old_dir): 191 | shutil.rmtree(old_dir) 192 | 193 | # copy the data to 194 | shcmd("cp -r ./tests/testdata/sqlitewal-update /tmp/results/") 195 | 196 | for para in rule_parameter.ParaDict("testexpname", ['sqlitewal-update'], "grouping"): 197 | experiment.execute_simulation(para) 198 | 199 | 200 | class TestUniformDataLifetime(unittest.TestCase): 201 | def test_run(self): 202 | class LocalExperiment(experiment.Experiment): 203 | def setup_workload(self): 204 | self.conf['workload_class'] = "SimpleRandReadWrite" 205 | 206 | para = experiment.get_shared_nolist_para_dict("test_exp_TestUniformDataLifetime", 16*MB) 207 | para.update( 208 | { 209 | 'ftl' : 'ftlcounter', 210 | 'device_path' : '/dev/loop0', 211 | 'enable_simulation': True, 212 | 'dump_ext4_after_workload': True, 213 | 'only_get_traffic': False, 214 | 'trace_issue_and_complete': False, 215 | 'gen_ncq_depth_table': False, 216 | 'do_dump_lpn_sem': False, 217 | 'rm_blkparse_events': True, 218 | 'sort_block_trace': False, 219 | }) 220 | 221 | Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 222 | obj = LocalExperiment( Parameters(**para) ) 223 | obj.main() 224 | 225 | 226 | # class Test_TraceAndSimulateLinuxDD(unittest.TestCase): 227 | # def test_run(self): 228 | # class LocalExperiment(experiment.Experiment): 229 | # def setup_workload(self): 230 | # self.conf['workload_class'] = "LinuxDD" 231 | 232 | # para = experiment.get_shared_nolist_para_dict("test_exp_LinuxDD", 16*MB) 233 | # para['device_path'] = "/dev/loop0" 234 | # para['filesystem'] = "ext4" 235 | # para['ftl'] = "dftldes" 236 | # Parameters = collections.namedtuple("Parameters", ','.join(para.keys())) 237 | # obj = LocalExperiment( Parameters(**para) ) 238 | # obj.main() 239 | 240 | 241 | if __name__ == '__main__': 242 | unittest.main() 243 | 244 | -------------------------------------------------------------------------------- /wiscsim/simulator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import abc 3 | import argparse 4 | import random 5 | import simpy 6 | import sys 7 | import os 8 | import csv 9 | import pprint 10 | 11 | import config 12 | import ssdframework 13 | import dftlext 14 | import flash 15 | import nkftl2 16 | import recorder 17 | import hostevent 18 | import dftldes 19 | import ftlcounter 20 | 21 | from commons import * 22 | from ftlsim_commons import * 23 | from .host import Host 24 | from utilities import utils 25 | 26 | from pyreuse.sysutils import blocktrace, blockclassifiers, dumpe2fsparser 27 | from pyreuse.fsutils import ext4dumpextents 28 | from .gc_analysis import GcLog 29 | 30 | class Simulator(object): 31 | __metaclass__ = abc.ABCMeta 32 | 33 | @abc.abstractmethod 34 | def run(self): 35 | return 36 | 37 | @abc.abstractmethod 38 | def get_sim_type(self): 39 | return 40 | 41 | def __init__(self, conf, event_iter): 42 | "conf is class Config" 43 | if not isinstance(conf, config.Config): 44 | raise TypeError("conf is not config.Config, it is {}". 45 | format(type(conf).__name__)) 46 | 47 | self.conf = conf 48 | self.event_iter = event_iter 49 | 50 | # initialize recorder 51 | self.recorder = recorder.Recorder(output_target = self.conf['output_target'], 52 | output_directory = self.conf['result_dir'], 53 | verbose_level = self.conf['verbose_level'], 54 | print_when_finished = self.conf['print_when_finished'] 55 | ) 56 | 57 | if self.conf.has_key('enable_e2e_test'): 58 | raise RuntimeError("enable_e2e_test is deprecated") 59 | 60 | 61 | class SimulatorDESNew(Simulator): 62 | def __init__(self, conf, event_iter): 63 | super(SimulatorDESNew, self).__init__(conf, event_iter) 64 | 65 | self.env = simpy.Environment() 66 | self.host = Host(self.conf, self.env, event_iter) 67 | self.ssd = ssdframework.Ssd(self.conf, self.env, 68 | self.host.get_ncq(), self.recorder) 69 | 70 | def run(self): 71 | self.env.process(self.host.run()) 72 | self.env.process(self.ssd.run()) 73 | 74 | self.env.run() 75 | 76 | self.record_post_run_stats() 77 | 78 | def get_sim_type(self): 79 | return "SimulatorDESNew" 80 | 81 | def record_post_run_stats(self): 82 | self.recorder.set_result_by_one_key( 83 | 'simulation_duration', self.env.now) 84 | pprint.pprint(self.recorder.get_result_summary()) 85 | 86 | self.recorder.close() 87 | 88 | gclog = GcLog(device_path=self.conf['device_path'], 89 | result_dir=self.conf['result_dir'], 90 | flash_page_size=self.conf.page_size 91 | ) 92 | if self.conf['filesystem'] == 'ext4' and \ 93 | os.path.exists(gclog.gclog_path) and \ 94 | os.path.exists(gclog.extents_path): 95 | gclog.classify_lpn_in_gclog() 96 | 97 | 98 | def create_simulator(simulator_class, conf, event_iter): 99 | cls = eval(simulator_class) 100 | return cls(conf, event_iter) 101 | 102 | 103 | def random_data(addr): 104 | randnum = random.randint(0, 10000) 105 | content = "{}.{}".format(addr, randnum) 106 | return content 107 | 108 | 109 | class SimulatorNonDES(Simulator): 110 | __metaclass__ = abc.ABCMeta 111 | 112 | def __init__(self, conf, event_iter): 113 | super(SimulatorNonDES, self).__init__(conf, event_iter) 114 | 115 | if self.conf['ftl_type'] == 'dftlext': 116 | ftl_class = dftlext.Dftl 117 | elif self.conf['ftl_type'] == 'nkftl2': 118 | ftl_class = nkftl2.Ftl 119 | elif self.conf['ftl_type'] == 'ftlcounter': 120 | ftl_class = ftlcounter.Ftl 121 | else: 122 | raise ValueError("ftl_type {} is not defined"\ 123 | .format(self.conf['ftl_type'])) 124 | 125 | self.ftl = ftl_class(self.conf, self.recorder, 126 | flash.Flash(recorder = self.recorder, confobj = self.conf)) 127 | 128 | def run(self): 129 | """ 130 | You must garantee that each item in event_iter is a class Event 131 | """ 132 | cnt = 0 133 | for event in self.event_iter: 134 | self.process_event(event) 135 | cnt += 1 136 | if cnt % 5000 == 0: 137 | print '|', 138 | sys.stdout.flush() 139 | 140 | self.ftl.post_processing() 141 | 142 | self.recorder.close() 143 | 144 | def process_event(self, event): 145 | if event.action != 'D': 146 | return 147 | 148 | if event.operation == OP_READ: 149 | self.read(event) 150 | elif event.operation == OP_WRITE: 151 | self.write(event) 152 | elif event.operation == OP_DISCARD: 153 | self.discard(event) 154 | elif event.operation == OP_ENABLE_RECORDER: 155 | self.ftl.enable_recording() 156 | elif event.operation == OP_DISABLE_RECORDER: 157 | self.ftl.disable_recording() 158 | elif event.operation == OP_WORKLOADSTART: 159 | self.ftl.pre_workload() 160 | elif event.operation in ['finish', OP_BARRIER, OP_REC_TIMESTAMP, OP_CLEAN, 161 | OP_NOOP]: 162 | # ignore this 163 | pass 164 | else: 165 | pass 166 | # print event 167 | # raise RuntimeError("operation '{}' is not supported".format( 168 | # event.operation)) 169 | 170 | 171 | class SimulatorNonDESSpeed(SimulatorNonDES): 172 | """ 173 | This one does not do e2e test 174 | It uses extents 175 | """ 176 | def get_sim_type(self): 177 | return "NonDESSpeed" 178 | 179 | def write(self, event): 180 | self.ftl.sec_write( 181 | sector = event.sector, 182 | count = event.sector_count, 183 | data = None) 184 | 185 | def read(self, event): 186 | """ 187 | read extent from flash and check if the data is correct. 188 | """ 189 | self.ftl.sec_read( 190 | sector = event.sector, 191 | count = event.sector_count) 192 | 193 | def discard(self, event): 194 | self.ftl.sec_discard( 195 | sector = event.sector, 196 | count = event.sector_count) 197 | 198 | 199 | class SimulatorNonDESe2e(SimulatorNonDES): 200 | """ 201 | This one does not do e2e test 202 | It uses extents 203 | """ 204 | def __init__(self, conf, event_iter): 205 | super(SimulatorNonDESe2e, self).__init__(conf, event_iter) 206 | 207 | self.lsn_to_data = {} 208 | 209 | def get_sim_type(self): 210 | return "NonDESe2e" 211 | 212 | def write(self, event): 213 | """ 214 | 1. Generate random data 215 | 2. Copy random data to lsn_to_data 216 | 3. Write data by ftl 217 | """ 218 | data = [] 219 | for sec in range(event.sector, event.sector + event.sector_count): 220 | content = random_data(sec) 221 | self.lsn_to_data[sec] = content 222 | data.append(content) 223 | 224 | self.ftl.sec_write( 225 | sector = event.sector, 226 | count = event.sector_count, 227 | data = data) 228 | 229 | def read(self, event): 230 | """ 231 | read extent from flash and check if the data is correct. 232 | """ 233 | data = self.ftl.sec_read( 234 | sector = event.sector, 235 | count = event.sector_count) 236 | 237 | self.check_read(event, data) 238 | 239 | def check_read(self, event, data): 240 | for sec, sec_data in zip( 241 | range(event.sector, event.sector + event.sector_count), data): 242 | if self.lsn_to_data.get(sec, None) != sec_data: 243 | msg = "Data is not correct. Got: {read}, "\ 244 | "Correct: {correct}. sector={sec}".format( 245 | read = sec_data, 246 | correct = self.lsn_to_data.get(sec, None), 247 | sec = sec) 248 | print msg 249 | # raise RuntimeError(msg) 250 | 251 | def discard(self, event): 252 | self.ftl.sec_discard( 253 | sector = event.sector, 254 | count = event.sector_count) 255 | 256 | for sec in range(event.sector, event.sector + event.sector_count): 257 | try: 258 | del self.lsn_to_data[sec] 259 | except KeyError: 260 | pass 261 | 262 | 263 | class SimulatorDESSync(Simulator): 264 | def __init__(self, conf, event_iters): 265 | """ 266 | event_iters is list of event iterators 267 | """ 268 | super(SimulatorDESSync, self).__init__(conf, None) 269 | 270 | if not isinstance(event_iters, list): 271 | raise RuntimeError("event_iters must be a list of iterators.") 272 | 273 | self.event_iters = event_iters 274 | 275 | self.env = simpy.Environment() 276 | self.ssdframework = ssdframework.SSDFramework(self.conf, self.recorder, self.env) 277 | 278 | def host_proc(self, pid, event_iter): 279 | """ 280 | This process acts like a producer, putting requests to ncq 281 | """ 282 | 283 | # this token is acquired before we issue request to queue. 284 | # it effectively control the queue depth of this process 285 | token = simpy.Resource(self.env, 286 | capacity = self.conf['process_queue_depth']) 287 | 288 | for event in event_iter: 289 | event.token = token 290 | event.token_req = event.token.request() 291 | 292 | yield event.token_req 293 | 294 | yield self.ssdframework.ncq.queue.put(event) 295 | 296 | for i in range(self.conf['SSDFramework']['ncq_depth']): 297 | event = hostevent.ControlEvent(OP_SHUT_SSD) 298 | 299 | event.token = token 300 | event.token_req = event.token.request() 301 | 302 | yield event.token_req 303 | 304 | yield self.ssdframework.ncq.queue.put(event) 305 | 306 | def run(self): 307 | for i, event_iter in enumerate(self.event_iters): 308 | self.env.process(self.host_proc(i, event_iter)) 309 | self.env.process(self.ssdframework.run()) 310 | 311 | self.env.run() 312 | 313 | def get_sim_type(self): 314 | return "SimulatorDES" 315 | 316 | def write(self): 317 | raise NotImplementedError() 318 | 319 | def read(self): 320 | raise NotImplementedError() 321 | 322 | def discard(self): 323 | raise NotImplementedError() 324 | 325 | 326 | 327 | -------------------------------------------------------------------------------- /pyreuse/sysutils/blocktrace.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import subprocess 4 | import time 5 | 6 | from pyreuse.helpers import * 7 | from pyreuse.macros import * 8 | 9 | class BlktraceResult(object): 10 | """ 11 | Parse blkparse output 12 | """ 13 | def __init__(self, sector_size, event_file_column_names, 14 | raw_blkparse_file_path, parsed_output_path, 15 | padding_bytes=0, do_sort=True): 16 | self.raw_blkparse_file_path = raw_blkparse_file_path 17 | self.parsed_output_path = parsed_output_path 18 | self.sector_size = sector_size 19 | self.event_file_column_names = event_file_column_names 20 | self.do_sort = do_sort 21 | 22 | # event offset + padding_bytes = blktrace addr 23 | # 24 | # |--x-- FS address ------- 25 | # Dev: 26 | # |--8MB---|--y--------------------- 27 | # y - 8MB = x 28 | # blktrace address - 8MB = event address 29 | self.padding_bytes = padding_bytes 30 | 31 | def create_event_file(self): 32 | prepare_dir_for_path(self.parsed_output_path) 33 | 34 | out_file = open(self.parsed_output_path, 'w') 35 | in_file = open(self.raw_blkparse_file_path, 'r') 36 | 37 | for line in in_file: 38 | line = line.strip() 39 | if not is_data_line(line): 40 | continue 41 | 42 | # get row dict 43 | row_dict = self.__line_to_dic(line) 44 | row_dict['type'] = 'blkparse' 45 | 46 | line = self.__create_event_line(row_dict) 47 | out_file.write( line + '\n' ) 48 | 49 | out_file.flush() 50 | os.fsync(out_file) 51 | out_file.close() 52 | 53 | 54 | def __line_to_dic(self, line): 55 | """ 56 | is_data_line() must be true for this line"\ 57 | ['8,0', '0', '1', '0.000000000', '440', 'A', 'W', '12912077', '+', '8', '<-', '(8,2)', '606224']" 58 | """ 59 | names = ['devid', 'cpuid', 'seqid', 'timestamp', 'pid', 'action', 'RWBS', 'sector_start', 'ignore1', 'sector_count'] 60 | # 0 1 2 3 4 5 6 7 8 9 61 | items = line.split() 62 | 63 | dic = dict(zip(names, items)) 64 | assert len(items) >= len(names) 65 | 66 | self.__parse_and_add_operation(dic) 67 | self.__parse_and_add_offset_size(dic) 68 | 69 | return dic 70 | 71 | def __parse_and_add_operation(self, row): 72 | if 'D' in row['RWBS']: 73 | operation = 'discard' 74 | elif 'W' in row['RWBS']: 75 | operation = 'write' 76 | elif 'R' in row['RWBS']: 77 | operation = 'read' 78 | else: 79 | raise RuntimeError('unknow operation ' + row['RWBS']) 80 | 81 | row['operation'] = operation 82 | 83 | if 'S' in row['RWBS']: 84 | row['sync'] = 'True' 85 | else: 86 | row['sync'] = 'False' 87 | 88 | def __parse_and_add_offset_size(self, row): 89 | sec_start = int(row['sector_start']) 90 | sec_count = int(row['sector_count']) 91 | byte_offset = sec_start * self.sector_size - self.padding_bytes 92 | byte_size = sec_count * self.sector_size 93 | 94 | row['offset'] = byte_offset 95 | row['size'] = byte_size 96 | 97 | def __create_event_line(self, line_dict): 98 | columns = [str(line_dict.get(colname, 'NA')) 99 | for colname in self.event_file_column_names] 100 | line = ' '.join(columns) 101 | return line 102 | 103 | 104 | class BlktraceResultInMem(object): 105 | """ 106 | Parse blkparse output 107 | """ 108 | def __init__(self, sector_size, event_file_column_names, 109 | raw_blkparse_file_path, parsed_output_path, 110 | padding_bytes=0, do_sort=True): 111 | self.raw_blkparse_file_path = raw_blkparse_file_path 112 | self.parsed_output_path = parsed_output_path 113 | self.sector_size = sector_size 114 | self.event_file_column_names = event_file_column_names 115 | self.do_sort = do_sort 116 | 117 | # event offset + padding_bytes = blktrace addr 118 | # 119 | # |--x-- FS address ------- 120 | # Dev: 121 | # |--8MB---|--y--------------------- 122 | # y - 8MB = x 123 | # blktrace address - 8MB = event address 124 | self.padding_bytes = padding_bytes 125 | 126 | self.__parse_rawfile() 127 | 128 | def __line_to_dic(self, line): 129 | """ 130 | is_data_line() must be true for this line"\ 131 | ['8,0', '0', '1', '0.000000000', '440', 'A', 'W', '12912077', '+', '8', '<-', '(8,2)', '606224']" 132 | """ 133 | names = ['devid', 'cpuid', 'seqid', 'timestamp', 'pid', 'action', 'RWBS', 'sector_start', 'ignore1', 'sector_count'] 134 | # 0 1 2 3 4 5 6 7 8 9 135 | items = line.split() 136 | 137 | dic = dict(zip(names, items)) 138 | assert len(items) >= len(names) 139 | 140 | self.__parse_and_add_operation(dic) 141 | self.__parse_and_add_offset_size(dic) 142 | 143 | return dic 144 | 145 | def __parse_and_add_operation(self, row): 146 | if 'D' in row['RWBS']: 147 | operation = 'discard' 148 | elif 'W' in row['RWBS']: 149 | operation = 'write' 150 | elif 'R' in row['RWBS']: 151 | operation = 'read' 152 | else: 153 | raise RuntimeError('unknow operation ' + row['RWBS']) 154 | 155 | row['operation'] = operation 156 | 157 | if 'S' in row['RWBS']: 158 | row['sync'] = 'True' 159 | else: 160 | row['sync'] = 'False' 161 | 162 | def __parse_and_add_offset_size(self, row): 163 | sec_start = int(row['sector_start']) 164 | sec_count = int(row['sector_count']) 165 | byte_offset = sec_start * self.sector_size - self.padding_bytes 166 | byte_size = sec_count * self.sector_size 167 | 168 | row['offset'] = byte_offset 169 | row['size'] = byte_size 170 | 171 | def __calculate_pre_wait_time(self, event_table): 172 | if self.do_sort is True: 173 | event_table.sort(key = lambda k: float(k['timestamp'])) 174 | 175 | for i, row in enumerate(event_table): 176 | if i == 0: 177 | row['pre_wait_time'] = 0 178 | continue 179 | row['pre_wait_time'] = float(event_table[i]['timestamp']) - \ 180 | float(event_table[i-1]['timestamp']) 181 | if self.do_sort is True: 182 | assert row['pre_wait_time'] >= 0, "data is {}".format(row['pre_wait_time']) 183 | 184 | return event_table 185 | 186 | def __parse_rawfile(self): 187 | with open(self.raw_blkparse_file_path, 'r') as line_iter: 188 | table = [] 189 | for line in line_iter: 190 | line = line.strip() 191 | # print is_data_line(line), line 192 | if is_data_line(line): 193 | ret = self.__line_to_dic(line) 194 | ret['type'] = 'blkparse' 195 | else: 196 | ret = None 197 | 198 | if ret != None: 199 | table.append(ret) 200 | 201 | table = self.__calculate_pre_wait_time(table) 202 | 203 | self.__parsed_table = table 204 | 205 | def __create_event_line(self, line_dict): 206 | columns = [str(line_dict[colname]) 207 | for colname in self.event_file_column_names] 208 | line = ' '.join(columns) 209 | return line 210 | 211 | def create_event_file(self): 212 | prepare_dir_for_path(self.parsed_output_path) 213 | out = open(self.parsed_output_path, 'w') 214 | for row_dict in self.__parsed_table: 215 | if row_dict['type'] == 'blkparse': 216 | line = self.__create_event_line(row_dict) 217 | else: 218 | raise NotImplementedError() 219 | 220 | out.write( line + '\n' ) 221 | 222 | out.flush() 223 | os.fsync(out) 224 | out.close() 225 | 226 | def get_duration(self): 227 | return float(self.__parsed_table[-1]['timestamp']) - \ 228 | float(self.__parsed_table[0]['timestamp']) 229 | 230 | def count_sectors(self, operation): 231 | sectors_cnt = 0 232 | for row in self.__parsed_table: 233 | if row['operation'] == operation: 234 | sectors_cnt += int(row['sector_count']) 235 | 236 | return sectors_cnt 237 | 238 | def get_bandwidth_mb(self, operation): 239 | sec_cnt = self.count_sectors(operation) 240 | size_mb = sec_cnt * self.sector_size / float(MB) 241 | duration = self.get_duration() 242 | 243 | return size_mb / duration 244 | 245 | 246 | class BlockTraceManager(object): 247 | "This class provides interfaces to interact with blktrace" 248 | def __init__(self, dev, event_file_column_names, 249 | resultpath, to_ftlsim_path, sector_size, padding_bytes=0, 250 | do_sort=True): 251 | self.dev = dev 252 | self.sector_size = sector_size 253 | self.event_file_column_names = event_file_column_names 254 | self.resultpath = resultpath 255 | self.to_ftlsim_path = to_ftlsim_path 256 | self.sector_size = sector_size 257 | self.padding_bytes = padding_bytes 258 | self.do_sort = do_sort 259 | 260 | def start_tracing_and_collecting(self, trace_filter=None): 261 | self.proc = start_blktrace_on_bg(self.dev, self.resultpath, trace_filter) 262 | 263 | def stop_tracing_and_collecting(self): 264 | stop_blktrace_on_bg() 265 | 266 | def create_event_file_from_blkparse(self): 267 | if self.do_sort is True: 268 | rawparser = BlktraceResultInMem(self.sector_size, 269 | self.event_file_column_names, 270 | self.resultpath, self.to_ftlsim_path, 271 | padding_bytes=self.padding_bytes, 272 | do_sort=self.do_sort 273 | ) 274 | rawparser.create_event_file() 275 | 276 | else: 277 | rawparser = BlktraceResult(self.sector_size, 278 | self.event_file_column_names, 279 | self.resultpath, self.to_ftlsim_path, 280 | padding_bytes=self.padding_bytes, 281 | do_sort=self.do_sort 282 | ) 283 | rawparser.create_event_file() 284 | 285 | 286 | def start_blktrace_on_bg(dev, resultpath, trace_filter=None): 287 | prepare_dir_for_path(resultpath) 288 | # cmd = "sudo blktrace -a write -a read -d {dev} -o - | blkparse -i - > "\ 289 | # cmd = "sudo blktrace -a queue -d {dev} -o - | blkparse -a queue -i - > "\ 290 | 291 | if trace_filter is None: 292 | # trace_filter = '-a issue' 293 | trace_filter = '' 294 | else: 295 | trace_filter = ' '.join(['-a ' + mask for mask in trace_filter]) 296 | 297 | cmd = "sudo blktrace {filtermask} -d {dev} -o - | "\ 298 | "blkparse {filtermask} -i - >> "\ 299 | "{resultpath}".format(dev = dev, resultpath = resultpath, 300 | filtermask = trace_filter) 301 | print cmd 302 | p = subprocess.Popen(cmd, shell=True) 303 | time.sleep(0.3) # wait to see if there's any immediate error. 304 | 305 | if p.poll() != None: 306 | raise RuntimeError("tracing failed to start") 307 | 308 | return p 309 | 310 | def stop_blktrace_on_bg(): 311 | shcmd('pkill blkparse', ignore_error=True) 312 | shcmd('pkill blktrace', ignore_error=True) 313 | shcmd('sync') 314 | 315 | def is_data_line(line): 316 | # devid sector_start + nblocks 317 | match_obj = re.match( r'\d+,\d+.*\d+\s+\+\s+\d+', line) 318 | if match_obj == None: 319 | return False 320 | else: 321 | return True 322 | 323 | 324 | --------------------------------------------------------------------------------