├── examples └── models │ ├── config.yml │ ├── mm3.yml │ ├── mm1.yml │ └── arrv.yml ├── src ├── misc.pxd ├── Makefile ├── distributions.pxd ├── tests │ ├── check_lnpdf.py │ ├── check_arrv_dup.py │ ├── check_mcint.py │ ├── check_brent_speed.py │ ├── find_path.py │ └── check_difflist_time.py ├── sampling.pxd ├── pwfun.pxd ├── utils.py ├── qnet.pxd ├── setup.py ├── queues.pxd ├── test_mixture_template.py ├── arrivals.pxd ├── misc.pyx ├── cninq.pxd ├── test_arrivals.py ├── hmm.py ├── convert.py ├── pyglpk.pyx ├── model_search.py ├── simulate.py ├── test_stupidlp.py ├── test_delay_station.py ├── test_hist.py ├── lrt.py ├── subset.py ├── test_likdelta.py ├── test_ninqueue.py ├── test_ivltreap.py ├── batch_arrival_queue.py ├── test_modelmgmt.py ├── test_qnetu.py ├── randomkit.h ├── test_qstats.py ├── test_cninq.py ├── test_sem1.py ├── netutils.py ├── ninqueue.py ├── cdist.h ├── test_distributions.py ├── qstats.py ├── cninq.pyx ├── test_ps.py ├── gibbs.py ├── modelmgmt.py ├── stupidlp.py └── randomkit.c ├── LICENSE ├── .gitignore └── README.md /examples/models/config.yml: -------------------------------------------------------------------------------- 1 | debug_integrate: 1 2 | -------------------------------------------------------------------------------- /src/misc.pxd: -------------------------------------------------------------------------------- 1 | cdef double _integrate (fn, double a, double b) 2 | cdef double _mcint (fn, double a, double b, int max_n) 3 | -------------------------------------------------------------------------------- /src/Makefile: -------------------------------------------------------------------------------- 1 | SOURCES= pwfun.pyx sampling.pyx qnet.pyx distributions.pyx queues.pyx arrivals.pyx 2 | 3 | all: gqnet.so 4 | 5 | 6 | gqnet.so: $(SOURCES) 7 | python setup.py build_ext --inplace 8 | 9 | clean: 10 | rm $(SOURCES:.pyx=.c) $(SOURCES:.pyx=.so) 11 | -------------------------------------------------------------------------------- /src/distributions.pxd: -------------------------------------------------------------------------------- 1 | cdef class Distribution: 2 | cdef double _lpdf (self, double x) 3 | cdef double _mean (self) 4 | cdef object _sample (self, int N) 5 | cdef _estimate (self, data) 6 | cdef double _dx_lpdf (self, double x) 7 | cdef double _quantile (self, double p) 8 | 9 | -------------------------------------------------------------------------------- /src/tests/check_lnpdf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import distributions 4 | import misc 5 | import mytime 6 | 7 | ln = distributions.LogNormal (0, 1) 8 | 9 | N = 10000000 10 | tmr = mytime.timeit() 11 | allint = [] 12 | 13 | sample = ln.sample(N) 14 | for i in xrange(N): 15 | ln.lpdf (sample[i]) 16 | 17 | elapsed = tmr.total("Time for %d evaluations" % N) 18 | print "Evaluations per second = %.4f" % (N / elapsed) 19 | -------------------------------------------------------------------------------- /examples/models/mm3.yml: -------------------------------------------------------------------------------- 1 | states: 2 | - name: INITIAL 3 | queues: [ INITIAL ] 4 | successors: [ TIER1 ] 5 | initial: TRUE 6 | - name: TIER1 7 | queues: [ WEB1 ] 8 | successors: [ TIER2 ] 9 | - name: TIER2 10 | queues: [ APP1 ] 11 | queues: 12 | - { name: INITIAL, service: [M, 10.0] } 13 | - { name: WEB1, processors: 3, service: [M, 30.0] } 14 | - { name: APP1, processors: 4, service: [M, 30.0] } 15 | -------------------------------------------------------------------------------- /examples/models/mm1.yml: -------------------------------------------------------------------------------- 1 | states: 2 | - name: INITIAL 3 | queues: [ INITIAL ] 4 | successors: [ TIER1 ] 5 | initial: TRUE 6 | - name: TIER1 7 | queues: [ WEB1, WEB2 ] 8 | successors: [ TIER2 ] 9 | - name: TIER2 10 | queues: [ APP1 ] 11 | queues: 12 | - { name: INITIAL, service: [M, 10.0] } 13 | - { name: WEB1, service: [M, 3.0] } 14 | - { name: WEB2, service: [M, 3.0] } 15 | - { name: APP1, service: [M, 8.0] } 16 | -------------------------------------------------------------------------------- /src/sampling.pxd: -------------------------------------------------------------------------------- 1 | from pwfun cimport Pwfun 2 | 3 | cdef extern from "randomkit.h": 4 | ctypedef struct rk_state 5 | 6 | cdef double _slice (double x0, g, double lower, double upper) except * 7 | cdef double _slice_pwfun (Pwfun fn, double x_initial) except * 8 | 9 | cdef int _roll_die_unnorm (p) 10 | 11 | cdef rk_state *_state() 12 | 13 | cdef double rand () 14 | cdef double uniform (double L, double U) 15 | cdef double exponential (double scale) 16 | 17 | -------------------------------------------------------------------------------- /src/tests/check_arrv_dup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import qnet 4 | import qnetu 5 | import sys 6 | import mytime 7 | 8 | f = open (sys.argv[1]) 9 | net = qnetu.qnet_from_text(f) 10 | 11 | arrv = net.sample(1000) 12 | 13 | N = 250 14 | tmr = mytime.timeit() 15 | for i in xrange(N): 16 | dup = arrv.duplicate() 17 | 18 | elapsed = tmr.total("Time for %d duplications" % N) 19 | print "Dups per second = %.4f" % (N / elapsed) 20 | print "Event dups per second = %.4f" % (N*arrv.num_events() / elapsed) 21 | 22 | -------------------------------------------------------------------------------- /src/pwfun.pxd: -------------------------------------------------------------------------------- 1 | cdef class Pwfun: 2 | cdef readonly object xs 3 | cdef readonly object derivs 4 | cdef readonly object fns 5 | cdef int N 6 | 7 | cdef object OOR 8 | 9 | cdef double value (self, double x) 10 | cdef int _find_knot (self, double x) 11 | cdef double L (self) 12 | cdef double U (self) 13 | 14 | 15 | 16 | cdef class Pwlin: 17 | cdef object xs # Points at which slope changes xs[0] is lower bound xs[-1] upper 18 | cdef object x_tan # Points at which the tangent is taken. 19 | cdef object heights # f(x_tan) 20 | cdef object derivs # df(x_tan) 21 | 22 | cdef object f 23 | cdef object fprime 24 | 25 | cdef double value (self, double x) 26 | cdef c_argmin (self, double x0) 27 | -------------------------------------------------------------------------------- /src/utils.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | def as_str (l): 4 | return ' '.join (map(str, l)) 5 | 6 | def randelt (l): 7 | return l[numpy.random.randint(len(l))] 8 | 9 | def append (*l): 10 | result = [] 11 | for x in l: result.append (x) 12 | return result 13 | 14 | def roll_die (p): 15 | if sum(p) > 1 + 1e-12: 16 | raise "Huh? p: %s" % (p,) 17 | # Weird numpy thing 18 | if sum(p) > 1 - 1e-10: 19 | for i in range(len(p)): 20 | if p[i] > 1e-10: 21 | p[i] -= 1e-10 22 | break 23 | a = numpy.random.multinomial (1, p) 24 | return int(numpy.where(a==1)[0]) 25 | 26 | def delete_all (l, elt): 27 | ndel = 0 28 | for x in l: 29 | if x == elt: ndel+=1 30 | nrem = len(l) - ndel 31 | newl = [None] * nrem 32 | i = 0 33 | for x in l: 34 | if x != elt: 35 | newl[i] = x 36 | i += 1 37 | return newl 38 | -------------------------------------------------------------------------------- /src/qnet.pxd: -------------------------------------------------------------------------------- 1 | cdef class Qnet 2 | 3 | from arrivals cimport Arrivals 4 | from arrivals cimport Event 5 | 6 | cdef class Qnet: 7 | 8 | cdef readonly object queues # List of Queues 9 | cdef readonly object templates # List of all object that have parameters 10 | cdef readonly object fsm # HMM object 11 | 12 | cdef readonly object universe # for auxiliary variables :: maps names to domains 13 | 14 | cdef readonly object qname2id 15 | cdef readonly object sname2id 16 | cdef readonly int eidx 17 | 18 | # private 19 | cdef int gibbs_resample_final (self, Arrivals arrv, Event evt) except -1 20 | cdef int gibbs_resample_pair (self, Arrivals arrv, Event e0, Event e1) except -1 21 | 22 | # private, used by slice sampler 23 | cdef double determine_upper (self, Event evt) 24 | cdef object dfn_range (self, Arrivals arrv, Event evt) 25 | cpdef double log_prob (self, Arrivals arrv) except * 26 | 27 | -------------------------------------------------------------------------------- /src/tests/check_mcint.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from scipy.optimize import * 4 | from scipy.integrate import * 5 | import distributions 6 | import misc 7 | import mytime 8 | import numpy 9 | 10 | ln = distributions.LogNormal (0, 1) 11 | 12 | N = 100 13 | tmr = mytime.timeit() 14 | allint = [] 15 | 16 | for i in xrange(N): 17 | allint.append (misc.mcint (ln.lpdf, 0, 3)) 18 | 19 | elapsed = tmr.total("Time for %d integrations" % N) 20 | print "Integrations per second = %.4f" % (N / elapsed) 21 | 22 | print "Integrals: mean %.4f sd %.4f" % (numpy.mean(allint), numpy.std(allint)) 23 | print "From quad:", quad(ln.lpdf, 0, 3) 24 | 25 | 26 | ln2 = distributions.LogNormal (-3.75, 1) 27 | 28 | N = 10000 29 | tmr = mytime.timeit() 30 | for i in xrange(N): 31 | integral = quad (ln2.lpdf, 0, 3) 32 | 33 | print "integral was %.10f" % integral[0] 34 | 35 | elapsed = tmr.total("Time for %d integrations" % N) 36 | print "Integrations per second = %.4f" % (N / elapsed) 37 | -------------------------------------------------------------------------------- /src/tests/check_brent_speed.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from scipy.optimize import * 4 | from scipy.integrate import * 5 | import distributions 6 | import mytime 7 | 8 | ln = distributions.LogNormal (0, 1) 9 | 10 | N = 100000 11 | tmr = mytime.timeit() 12 | for i in xrange(N): 13 | root = brenth (ln.dx_lpdf, 0.1, 25) 14 | 15 | print "root was %.10f" % root 16 | 17 | elapsed = tmr.total("Time for %d root-findings" % N) 18 | print "Roots per second = %.4f" % (N / elapsed) 19 | 20 | N = 10000 21 | tmr = mytime.timeit() 22 | for i in xrange(N): 23 | the_max = bisect (ln.dx_lpdf, 0.1,25) 24 | 25 | print "max was %.10f" % the_max 26 | 27 | elapsed = tmr.total("Time for %d maxes" % N) 28 | print "Maxes per second = %.4f" % (N / elapsed) 29 | 30 | 31 | N = 10000 32 | tmr = mytime.timeit() 33 | for i in xrange(N): 34 | integral = quad (ln.lpdf, 0.1, 3) 35 | 36 | print "integral was %.10f" % integral[0] 37 | 38 | elapsed = tmr.total("Time for %d integrations" % N) 39 | print "Integrations per second = %.4f" % (N / elapsed) 40 | 41 | -------------------------------------------------------------------------------- /src/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | import os 5 | 6 | incdirs=["/opt/local/include/", "/home/eecs/casutton/include", "/exports/home/csutton/usr/include"] 7 | libdirs=["/opt/local/lib/", "/home/eecs/casutton/lib", "/exports/home/csutton/usr/lib"] 8 | 9 | incdirs = [ l for l in incdirs if os.path.exists(l) ] 10 | libdirs = [ l for l in libdirs if os.path.exists(l) ] 11 | 12 | cflags = [ ] 13 | 14 | setup( 15 | name = "dist", 16 | ext_modules=[ 17 | Extension("cninq", ["cninq.pyx"], extra_compile_args=cflags), 18 | Extension("misc", ["misc.pyx"], extra_compile_args=cflags), 19 | Extension("pwfun", ["pwfun.pyx"], extra_compile_args=cflags), 20 | Extension("sampling", ["sampling.pyx", "randomkit.c", "cdist.c"], extra_compile_args=cflags), 21 | Extension("arrivals", ["arrivals.pyx"], extra_compile_args=cflags), 22 | Extension("queues", ["queues.pyx"], extra_compile_args=cflags), 23 | Extension("qnet", ["qnet.pyx"], extra_compile_args=cflags), 24 | Extension("distributions", ["distributions.pyx"], extra_compile_args=cflags), 25 | ], 26 | cmdclass = {'build_ext': build_ext} 27 | ) 28 | -------------------------------------------------------------------------------- /src/tests/find_path.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import sys 4 | import re 5 | 6 | def main(): 7 | dotf,start,to = sys.argv[1:] 8 | 9 | graph = {} 10 | 11 | exp = re.compile ("(\w+) -> (\w+)") 12 | f = open (dotf) 13 | for line in f: 14 | match = exp.search(line) 15 | if match: 16 | add_arc (graph, match.group(1), match.group(2)) 17 | f.close() 18 | 19 | path = find_path (graph, start, to, [], {}) 20 | if path: 21 | path.reverse() 22 | path.insert (0, start) 23 | path.insert (-1, to) 24 | print path 25 | 26 | def add_arc (graph, start, to): 27 | if not start in graph: 28 | graph[start] = [] 29 | if not to in graph: 30 | graph[to] = [] 31 | graph[start].append (to) 32 | 33 | def find_path (graph, start, to, the_path, traversed): 34 | for child in graph[start]: 35 | if child == to: 36 | return the_path 37 | if not child in traversed: 38 | new_path = [child] 39 | new_path.extend (the_path) 40 | new_traversed = dict(traversed) 41 | new_traversed[start] = True 42 | path = find_path (graph, child, to, new_path, new_traversed) 43 | if path: return path 44 | return None 45 | 46 | main() 47 | -------------------------------------------------------------------------------- /src/queues.pxd: -------------------------------------------------------------------------------- 1 | cdef class Queue 2 | 3 | from distributions cimport Distribution 4 | from pwfun cimport Pwfun 5 | from arrivals cimport Arrivals 6 | from arrivals cimport Event 7 | 8 | 9 | cdef Pwfun _pair_proposal (Arrivals arrv, Event e0, Event e1) 10 | cdef Pwfun _departure_proposal (Arrivals arrv, Event e0) 11 | 12 | cdef class FactorTemplate: 13 | cdef object estimate (self, evtl) 14 | cdef object serviceLikelihood (self, Event evt) 15 | cdef object serviceDerivative (self, Event evt) 16 | cdef void resample_auxiliaries (self, Event evt) 17 | cdef void initialize_auxiliaries (self, Event evt) 18 | 19 | cdef class Queue: 20 | cdef readonly object name 21 | cdef readonly FactorTemplate service 22 | 23 | cdef Pwfun arrivalLik (self, Arrivals arrv, Event evt) 24 | 25 | cdef Pwfun departureLik (self, Arrivals arrv, Event evt) 26 | 27 | cdef double allEventLik (self, Arrivals arrv) except * 28 | 29 | cdef diffListForArrival (self, Event evt, double d) 30 | cdef diffListForDeparture (self, Event evt, double d) 31 | cpdef double likelihoodDelta (self, Arrivals arrv, diffList) except * 32 | 33 | cpdef recompute_service (self, Event e) 34 | cpdef recompute_departure (self, Arrivals arrv, Event e) 35 | cdef double departure_of_service (self, Event e, double s) 36 | 37 | cpdef double service_lpdf (self, Event e) 38 | -------------------------------------------------------------------------------- /src/test_mixture_template.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import qnet 3 | import qnetu 4 | import numpy 5 | import numpy.random 6 | import mytime 7 | import netutils 8 | import estimation 9 | import yaml 10 | import arrivals 11 | 12 | import qstats 13 | 14 | import sys 15 | 16 | class TestMixtureTemplate (unittest.TestCase): 17 | 18 | def test_net_output (self): 19 | print self.net1.as_yaml() 20 | 21 | def test_mean (self): 22 | q1 = self.net1.queue_by_name ("Q") 23 | self.assertAlmostEqual(2.662180, q1.service.mean(), 5) 24 | 25 | def test_std (self): 26 | q1 = self.net1.queue_by_name ("Q") 27 | self.assertAlmostEqual(2.813840, q1.service.std(), 5) 28 | 29 | def setUp (self): 30 | self.net1 = qnetu.qnet_from_text (TestMixtureTemplate.net1_text) 31 | 32 | net1_text = """ 33 | states: 34 | - name: INITIAL 35 | queues: [ INITIAL ] 36 | successors: [ TIER1 ] 37 | - name: TIER1 38 | queues: [ Q ] 39 | queues: 40 | - { name: INITIAL, service: [M, 10.0] } 41 | - { name: Q, service: [ MIX, 0.75, [M, 3.0], 0.25, [LN, 0.0, 1.0] ] } 42 | """ 43 | 44 | 45 | def main(): 46 | if len(sys.argv) > 1: 47 | for test_name in sys.argv[1:]: 48 | suite = unittest.TestLoader().loadTestsFromName("test_mixture_template.TestMixtureTemplate.%s" % (test_name,)) 49 | unittest.TextTestRunner(verbosity=2).run(suite) 50 | else: 51 | unittest.main() 52 | 53 | if __name__ == "__main__": 54 | main() 55 | 56 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, Charles Sutton 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /src/tests/check_difflist_time.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Timing test about how long it takes to generate a diff list 3 | 4 | import qnetu 5 | import sampling 6 | import mytime 7 | 8 | net_yaml = """ 9 | states: 10 | - name: INITIAL 11 | queues: [INITIAL] 12 | successors: [TIER1] 13 | initial: TRUE 14 | - name: TIER1 15 | queues: [ TIER1_0 , TIER1_1 ] 16 | successors: [TIER2] 17 | - name: TIER2 18 | queues: [ TIER2_0 , TIER2_1 ] 19 | queues: 20 | - { name: INITIAL, service: [G, 2, 0.5 ] } 21 | - { name: TIER1_0, processors: 3, service: [LN, 2.25, 0.2] } 22 | - { name: TIER1_1, processors: 3, service: [LN, 2.25, 0.2] } 23 | - { name: TIER2_0, processors: 2, service: [LN, 1.75, 0.2] } 24 | - { name: TIER2_1, processors: 2, service: [LN, 1.75, 0.2] } 25 | """ 26 | 27 | def main (): 28 | sampling.set_seed (13372) 29 | 30 | net = qnetu.qnet_from_text (net_yaml) 31 | arrv = net.sample (1000) 32 | subset = arrv.subset_by_task (0.0) 33 | ne = arrv.num_events() 34 | 35 | tmr = mytime.timeit() 36 | N = 1000 37 | evts = arrv.all_events() 38 | 39 | tot_size = 0 40 | nl = 0 41 | 42 | for rep in range(N): 43 | evt = sampling.random_elt (evts) 44 | q = evt.queue() 45 | if evt.a != 0: 46 | dl = q.pyDiffListForArrival (evt, evt.a + 0.01) 47 | tot_size += len(dl) 48 | nl += 1 49 | 50 | dl = q.pyDiffListForDeparture (evt, evt.d + 0.01) 51 | tot_size += len(dl) 52 | nl += 1 53 | 54 | elapsed = tmr.total ("Time for %d diff lists" % N) 55 | 56 | print "Diff lists per second = %.4f" % (N / elapsed) 57 | print "Average size = %.4f" % (tot_size / float(nl)) 58 | 59 | if __name__ == "__main__": 60 | main() 61 | 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | 103 | # cython stuff 104 | *.o 105 | -------------------------------------------------------------------------------- /examples/models/arrv.yml: -------------------------------------------------------------------------------- 1 | !Arrivals 2 | events: 3 | - !Event {arrival: 0.0, departure: 5.0573782176061455, obs: 1, queue: INITIAL, state: 0, 4 | task: 0} 5 | - !Event {arrival: 0.0, departure: 9.5433894717630476, obs: 1, queue: INITIAL, state: 0, 6 | task: 1} 7 | - !Event {arrival: 0.0, departure: 10.100174658936853, obs: 1, queue: INITIAL, state: 0, 8 | task: 2} 9 | - !Event {arrival: 0.0, departure: 15.399120173836607, obs: 1, queue: INITIAL, state: 0, 10 | task: 3} 11 | - !Event {arrival: 0.0, departure: 20.827496579245874, obs: 1, queue: INITIAL, state: 0, 12 | task: 4} 13 | - !Event {arrival: 5.0573782176061455, departure: 6.1519936034830538, obs: 1, queue: WEB2, 14 | state: 1, task: 0} 15 | - !Event {arrival: 6.1519936034830538, departure: 12.941934313997622, obs: 1, queue: APP1, 16 | state: 2, task: 0} 17 | - !Event {arrival: 9.5433894717630476, departure: 16.177979244871636, obs: 1, queue: WEB2, 18 | state: 1, task: 1} 19 | - !Event {arrival: 10.100174658936853, departure: 17.966515520657275, obs: 1, queue: WEB2, 20 | state: 1, task: 2} 21 | - !Event {arrival: 15.399120173836607, departure: 19.427648828212721, obs: 1, queue: WEB2, 22 | state: 1, task: 3} 23 | - !Event {arrival: 16.177979244871636, departure: 22.911136513937258, obs: 1, queue: APP1, 24 | state: 2, task: 1} 25 | - !Event {arrival: 17.966515520657275, departure: 23.089052981871603, obs: 1, queue: APP1, 26 | state: 2, task: 2} 27 | - !Event {arrival: 19.427648828212721, departure: 28.781177557554468, obs: 1, queue: APP1, 28 | state: 2, task: 3} 29 | - !Event {arrival: 20.827496579245874, departure: 24.478559020698714, obs: 1, queue: WEB1, 30 | state: 1, task: 4} 31 | - !Event {arrival: 24.478559020698714, departure: 33.626615314865091, obs: 1, queue: APP1, 32 | state: 2, task: 4} 33 | -------------------------------------------------------------------------------- /src/arrivals.pxd: -------------------------------------------------------------------------------- 1 | cdef class Event 2 | cdef class Arrivals 3 | 4 | from queues cimport Queue 5 | from qnet cimport Qnet 6 | cimport cython 7 | 8 | cdef class Vars: 9 | cdef object map 10 | 11 | cdef class Event: 12 | cdef readonly int eid 13 | cdef readonly int tid 14 | cdef Queue q 15 | cdef readonly int qid 16 | cdef readonly int state 17 | cdef public double a 18 | cdef public double s 19 | cdef public double c 20 | cdef public double d 21 | cdef public int obs_a 22 | cdef public int obs_d 23 | cdef object prev_byt 24 | cdef object next_byt 25 | cdef object prev_byq 26 | cdef object next_byq 27 | cdef object arrv 28 | 29 | # For G/G/k queues 30 | cdef public int proc 31 | cdef int num_proc 32 | cdef double d_prev 33 | cdef double* d_proc_prev 34 | 35 | # used by mixture distributions, etc. 36 | cdef readonly Vars auxiliaries 37 | 38 | cdef set_service (self, double v) 39 | cdef set_departure (self, double d) 40 | 41 | cpdef Event duplicate (self) 42 | cdef Event dup_with_structure (self) 43 | cdef int update_from_solution (self, lp_soln) except -1 44 | cdef copy_dtimes (self, Event other) 45 | 46 | #private 47 | cdef init_dtimes (self) 48 | cdef Event _dup_inner (self, int dup_aux) 49 | 50 | @cython.final 51 | cdef class Arrivals: 52 | 53 | cdef Qnet net 54 | cdef object byq 55 | cdef object final_byq 56 | cdef object byt 57 | cdef object events 58 | cdef object _initial_events 59 | cdef object queue_orders 60 | cdef object ninq 61 | cdef object inq 62 | cdef object a2e_cache 63 | cdef object c2e_cache 64 | cdef int cap 65 | 66 | cpdef inline Event event (self, int eid) 67 | cpdef inline int num_events (self) 68 | 69 | cdef _is_initial (self, Event evt) 70 | cdef _is_final (self, Event evt) 71 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # bayes-qnet 2 | 3 | Code for Bayesian inference for queueing networks with incomplete data. 4 | 5 | The statistical methods are described in this paper: 6 | 7 | C. Sutton and M. I. Jordan. 8 | Bayesian inference in queueing networks. 9 | Annals of Applied Statistics, 5(1):254–282, 2011. 10 | https://arxiv.org/abs/1001.3355 11 | 12 | ## System Requirements 13 | 14 | This code requires Python 2 and scipy. Also requires 15 | yaml.py 16 | 17 | Significant portions 18 | of the time critical portions of the code 19 | are written in Cython. The C translations 20 | of the Cython modules are included in the git repo. 21 | 22 | ## Installation 23 | 24 | You will need to create the C shared libraries 25 | to use the Python code. If you have Cython properly 26 | installed, this can be done using 27 | 28 | ``` 29 | cd src/ 30 | make 31 | ``` 32 | 33 | ## Getting Started 34 | 35 | Of course, there is no real documentation, 36 | but I have created a Jupyter notebook that walks 37 | through an example usage. This is in the `examples/` 38 | directory. 39 | 40 | I've saved the notebook both in native Jupyter format, 41 | so you can run the code one your machine, and as a static 42 | HTML page, in case you do not have Jupyter installed. 43 | 44 | Hopefully this is enough to get started. 45 | 46 | ## Test Suite 47 | 48 | (unfortunately this part is from memory) 49 | 50 | To test if you have compiled correctly, you can run the unit 51 | tests. These are all in the `src/` directory and their 52 | names begin with `test_`. Why I decided not to put them 53 | in separate directory, who knows. 54 | 55 | The most important one is test_qnet.py. So just 56 | 57 | ``` 58 | cd src/ 59 | python2 test_qnet.py 60 | ``` 61 | 62 | should work. A dozen of the tests will fail because I just 63 | removed some unused (but moderately testing) functionality 64 | to make the package a lot easier to install. 65 | 66 | 67 | -------------------------------------------------------------------------------- /src/misc.pyx: -------------------------------------------------------------------------------- 1 | from numpy import random 2 | import numpy 3 | 4 | from scipy import integrate 5 | 6 | DEBUG = 0 7 | cdef int counter = 0 8 | 9 | def mcint (fn, a, b, iter=100): 10 | return _mcint (fn, a, b, iter) 11 | 12 | def integrate_by_points (fn, points): 13 | total = 0 14 | for l,u in zip(points, points[1:]): 15 | total += integrate.quad (fn, l, u, full_output=DEBUG)[0] 16 | return total 17 | 18 | # Wrapper over scipy quadratue method 19 | cdef double _integrate (fn, double a, double b): 20 | qtup = integrate.quad (fn, a, b, limit=1000, full_output=DEBUG) 21 | value = qtup[0] 22 | 23 | if DEBUG: 24 | if len(qtup) > 3: 25 | print "Integration error:" 26 | print qtup[2:] 27 | debug_write_function (fn, a, b) 28 | 29 | if not numpy.isnan(value): 30 | print qtup 31 | return value 32 | else: 33 | # last resort 34 | print "Warning: Using last-resort Monte Carlo integral" 35 | return _mcint (fn, a, b, 100) 36 | 37 | cdef double _mcint (fn, double a, double b, int max_n): 38 | cdef double the_sum = 0 39 | cdef int N 40 | 41 | for N from 1 <= N < max_n: 42 | x = random.uniform(a,b) 43 | the_sum += fn(x) 44 | 45 | return (b-a)*the_sum/N 46 | 47 | cdef void debug_write_function (fn, double a, double b): 48 | global counter 49 | 50 | if numpy.isinf(a) or numpy.isinf(b): 51 | print "Warning: can't write debugging info for range %f ... %f" % (a,b) 52 | return 53 | cdef int N = 1000 54 | x = a 55 | eps = (b-a)/N 56 | 57 | print counter 58 | f = open ("misc_debug_%d.txt" % counter, "w") 59 | for i from 0 <= i < N: 60 | f.write ("%.17f %.17f\n" % (x, fn(x))) 61 | x += eps 62 | f.write ("%.17f %.17f\n" % (b, fn(b))) 63 | f.close () 64 | 65 | counter += 1 66 | -------------------------------------------------------------------------------- /src/cninq.pxd: -------------------------------------------------------------------------------- 1 | 2 | cdef class SortedDouble 3 | cdef class OverlayIterator 4 | cdef class Overlay 5 | 6 | cdef class Ninq: 7 | 8 | cdef SortedDouble a 9 | cdef SortedDouble d 10 | cdef int Nmax 11 | 12 | cpdef add_birth_death (self, double t_a, double t_d) 13 | cpdef move_arrival (self, double a0, double a1) 14 | cpdef move_departure (self, double a0, double a1) 15 | cpdef int N (self, double t) 16 | cpdef OverlayIterator interval_iterator (self, double l, double r) 17 | cpdef int contains_arrival (self, double v) 18 | cpdef int contains_departure (self, double v) 19 | 20 | 21 | cdef class Overlay: 22 | 23 | cdef Ninq inner 24 | cdef SortedDouble plus 25 | cdef SortedDouble minus 26 | 27 | cpdef move_arrival (self, double a0, double a1) 28 | cpdef move_departure (self, double a0, double a1) 29 | cpdef OverlayIterator interval_iterator (self, double l, double r) 30 | cdef OverlayIterator _interval_iterator (self, double l, double r) 31 | cpdef int N (self, double t) 32 | 33 | 34 | cdef class SortedDouble: 35 | cdef double *val 36 | cdef int N 37 | cdef int capacity 38 | 39 | cpdef double item (self, int i) 40 | cpdef add_time (self, double x) 41 | cpdef move_time (self, double x0, double x1) 42 | cpdef int num_lte (self, v) 43 | cdef int bisect (self, double v) 44 | 45 | 46 | 47 | cdef class OverlayIterator: 48 | 49 | cdef SortedDouble a 50 | cdef SortedDouble d 51 | cdef SortedDouble plus 52 | cdef SortedDouble minus 53 | 54 | cdef double r 55 | 56 | cdef int i_a 57 | cdef int i_d 58 | cdef int i_plus 59 | cdef int i_minus 60 | cdef int is_a 61 | cdef int is_d 62 | cdef int is_plus 63 | cdef int is_minus 64 | 65 | cdef double t0 66 | cdef double t1 67 | 68 | cpdef int has_next (self) 69 | cpdef double T0 (self) 70 | cpdef double T1 (self) 71 | cpdef int N (self) 72 | cpdef advance (self) 73 | -------------------------------------------------------------------------------- /src/test_arrivals.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import qnet 3 | import qnetu 4 | import numpy 5 | import numpy.random 6 | import mytime 7 | import netutils 8 | import estimation 9 | import yaml 10 | import arrivals 11 | import sampling 12 | 13 | import qstats 14 | 15 | import sys 16 | 17 | class TestArrivals (unittest.TestCase): 18 | 19 | def test_subset_by_time (self): 20 | net = self.mm1 21 | sampling.set_seed (2324) 22 | arrv = net.sample (10) 23 | maxTime = 50 24 | 25 | def task_in_time (arrv, tid): 26 | evtl = arrv.events_of_task (tid) 27 | return min(e.d for e in evtl) < maxTime 28 | 29 | subset = arrv.subset_by_task_fn (task_in_time) 30 | 31 | for e in subset.initial_events(): 32 | self.assertTrue (e.d < maxTime) 33 | old_e = arrv.event (e.eid) 34 | self.assertTrue (old_e is not None) 35 | self.assertTrue (e.a == old_e.a) 36 | self.assertTrue (e.d == old_e.d) 37 | for e in arrv.initial_events(): 38 | if e.d < maxTime: 39 | self.assertTrue (subset.event(e.eid) is not None) 40 | 41 | 42 | mm1_text = """ 43 | states: 44 | - name: INITIAL 45 | queues: [ INITIAL ] 46 | successors: [ TIER1 ] 47 | initial: TRUE 48 | - name: TIER1 49 | queues: [ WEB1, WEB2 ] 50 | queues: 51 | - { name: INITIAL, service: [M, 10.0] } 52 | - { name: WEB1, service: [M, 3.0] } 53 | - { name: WEB2, service: [M, 3.0] } 54 | """ 55 | 56 | 57 | def setUp (self): 58 | self.mm1 = qnetu.qnet_from_text (TestArrivals.mm1_text) 59 | 60 | def main(): 61 | if len(sys.argv) > 1: 62 | for test_name in sys.argv[1:]: 63 | suite = unittest.TestLoader().loadTestsFromName("test_arrivals.TestArrivals.%s" % (test_name,)) 64 | unittest.TextTestRunner(verbosity=2).run(suite) 65 | else: 66 | unittest.main() 67 | 68 | if __name__ == "__main__": 69 | main() 70 | 71 | -------------------------------------------------------------------------------- /src/hmm.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | class HMM: 4 | """Simple HMM class. States and observations all integers. 5 | State 0 the initial state, state N the final state.""" 6 | 7 | def __init__ (self, a, o): 8 | self.a = a 9 | self.o = o 10 | self.ns = a.shape[0] 11 | self.n_obs = o.shape[1] 12 | assert a.ndim == 2 13 | assert o.ndim == 2 14 | assert a.shape[0] == a.shape[1] 15 | assert a.shape[0] == o.shape[0] 16 | 17 | def initial_state (self): 18 | return 0 19 | 20 | def is_final (self, state): 21 | return sum(self.a[state,:]) < 1e-10 22 | 23 | def num_states (self): return self.ns 24 | 25 | def successors (self, sid): 26 | return set([ i for i in xrange(self.ns) if self.a[sid,i] > 0 ]) 27 | 28 | def possible_observations (self, sid): 29 | return set([ i for i in xrange(self.n_obs) if self.o[sid,i] > 0 ]) 30 | 31 | def sample (self): 32 | alls = [] 33 | obs = [] 34 | s = self.initial_state() 35 | while self.is_final(s): 36 | s_next = roll_die (self.a[s,:]) 37 | obs_next = self.sample_one_obs (s_next) 38 | obs.append ((s_next, obs_next)) 39 | s = s_next 40 | return obs 41 | 42 | def sample (self, n): 43 | return [ self.sample() for i in xrange(n) ] 44 | 45 | def sample_one_obs (self, state): 46 | return roll_die (self.o[state,:]) 47 | 48 | def sample_state (self, state): 49 | if self.is_final(state): 50 | return None 51 | else: 52 | return roll_die (self.a[state,:]) 53 | 54 | def conditional_state_dist (self, s_prev, s_next): 55 | if s_prev >= 0: 56 | d1 = self.a[s_prev,:] 57 | else: 58 | d1 = [1] * self.ns 59 | 60 | if s_next >= 0: 61 | d2 = self.a[:,s_next] 62 | else: 63 | d2 = [1] * self.ns 64 | 65 | prod = d1*d2 66 | return prod / sum(prod) 67 | -------------------------------------------------------------------------------- /src/convert.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Front end to predict 3 | 4 | 5 | from optparse import OptionParser 6 | 7 | import sampling 8 | import qnetu 9 | import qstats 10 | 11 | import numpy 12 | 13 | MULTI = 0 14 | SINGLE = 1 15 | TASK = 2 16 | NQ = 3 17 | 18 | def main(): 19 | parser = OptionParser(description="Front-end for greedy bottleneck finding.") 20 | parser.add_option("--network", "-n", dest="netf", 21 | help="YAML file describing network governing the data", metavar="FILE") 22 | parser.add_option("--input", dest="inf", help="Input arrivals") 23 | parser.add_option("--output", dest="outf", help="Output arrivals") 24 | parser.add_option("--multi2single", dest="m2s", action="store_true", default=False) 25 | parser.add_option("--multi2tasks", dest="m2t", action="store_true", default=False) 26 | parser.add_option("--multi2nq", dest="m2nq", type="int", default=-1) 27 | parser.add_option("--single2multi", dest="s2m", action="store_true", default=False) 28 | parser.add_option("--single2tasks", dest="s2t", action="store_true", default=False) 29 | 30 | (options, args) = parser.parse_args() 31 | 32 | if options.m2s: 33 | in_format = MULTI 34 | out_format = SINGLE 35 | elif options.s2m: 36 | in_format = SINGLE 37 | out_format = MULTI 38 | elif options.m2t: 39 | in_format = MULTI 40 | out_format = TASK 41 | elif options.s2t: 42 | in_format = SINGLE 43 | out_format = TASK 44 | elif options.m2nq >= 0: 45 | in_format = MULTI 46 | out_format = NQ 47 | else: 48 | print "Specify --multif2single or --single2multif" 49 | sys.exit (1) 50 | 51 | 52 | f = open(options.netf) 53 | net = qnetu.qnet_from_text (f) 54 | f.close() 55 | 56 | arrv = ((in_format == MULTI and qnetu.read_multif_of_prefix (options.inf, net)) or 57 | (in_format == SINGLE and qnetu.read_from_table (net, options.inf))) 58 | 59 | if out_format == MULTI: qnetu.write_multif_to_prefix (options.outf, arrv) 60 | elif out_format == SINGLE: qnetu.write_to_table (options.outf, arrv) 61 | elif out_format == TASK: qnetu.write_tasks (options.outf, arrv) 62 | elif out_format == NQ: write_nq (options.outf, options.m2nq, arrv) 63 | 64 | def write_nq (outf, qi, arrv): 65 | f = open (outf,"w") 66 | f.write ("T EVT N\n") 67 | for t,N,evt in qstats.all_qsize_for_qid (arrv, qi): 68 | f.write ("%.4f %d %d\n" % (t,evt.eid,N)) 69 | f.close() 70 | 71 | if __name__ == "__main__": 72 | main() 73 | -------------------------------------------------------------------------------- /src/pyglpk.pyx: -------------------------------------------------------------------------------- 1 | # Pyrex binding to the GLPK library 2 | # Designed to be simple and efficient 3 | 4 | cdef extern from "glpk.h": 5 | 6 | ctypedef struct glp_prob 7 | 8 | glp_prob *lpx_read_model(char *model, char *data, char *output) 9 | int lpx_simplex(glp_prob *lp) 10 | 11 | int lpx_get_status(glp_prob *lp) 12 | 13 | int lpx_get_num_cols(glp_prob *lp) 14 | char *lpx_get_col_name(glp_prob *lp, int j) 15 | double lpx_get_col_prim(glp_prob *lp, int j) 16 | 17 | enum: 18 | LPX_E_OK = 200 # /* success */ 19 | LPX_E_EMPTY = 201 # /* empty problem */ 20 | LPX_E_BADB = 202 # /* invalid initial basis */ 21 | LPX_E_INFEAS = 203 # /* infeasible initial solution */ 22 | LPX_E_FAULT = 204 # /* unable to start the search */ 23 | LPX_E_OBJLL = 205 # /* objective lower limit reached */ 24 | LPX_E_OBJUL = 206 # /* objective upper limit reached */ 25 | LPX_E_ITLIM = 207 # /* iterations limit exhausted */ 26 | LPX_E_TMLIM = 208 # /* time limit exhausted */ 27 | LPX_E_NOFEAS = 209 # /* no feasible solution */ 28 | LPX_E_INSTAB = 210 # /* numerical instability */ 29 | LPX_E_SING = 211 # /* problems with basis matrix */ 30 | LPX_E_NOCONV = 212 # /* no convergence (interior) */ 31 | LPX_E_NOPFS = 213 # /* no primal feas. sol. (LP presolver) */ 32 | LPX_E_NODFS = 214 # /* no dual feas. sol. (LP presolver) */ 33 | 34 | enum: 35 | LPX_OPT = 180 # /* optimal */ 36 | LPX_FEAS = 181 # /* feasible */ 37 | LPX_INFEAS = 182 # /* infeasible */ 38 | LPX_NOFEAS = 183 # /* no feasible */ 39 | LPX_UNBND = 184 # /* unbounded */ 40 | LPX_UNDEF = 185 # /* undefined */ 41 | 42 | 43 | 44 | def solve_lp (fname): 45 | """Returns a solution to a problem in the GNU MathProb language. 46 | Uses the simplex method 47 | FNAME is the name of the problem file. 48 | Returns a hash where the key is the name of the variable in the file, 49 | and the value is the value in the solution. 50 | If an optimal solution was not found, returns None.""" 51 | print "Reading model from %s ... " % fname 52 | 53 | cdef glp_prob *lp 54 | lp = lpx_read_model (fname, NULL, NULL) 55 | result = lpx_simplex (lp) 56 | if result != LPX_E_OK: 57 | print "Error in solver: %d" % result 58 | return None 59 | 60 | status = lpx_get_status (lp) 61 | if status != LPX_OPT and status != LPX_FEAS: 62 | raise Exception ("GLPK error: Solution not good %d" % result) 63 | 64 | print "GLPK solution: result %d status %d" % (result, status) 65 | 66 | sol = dict() 67 | 68 | nvars = lpx_get_num_cols(lp) 69 | for i from 1 <= i <= nvars: 70 | name = lpx_get_col_name (lp, i) 71 | val = lpx_get_col_prim (lp, i) 72 | sol[name] = val 73 | 74 | return sol 75 | 76 | -------------------------------------------------------------------------------- /src/model_search.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Front end for model selection 3 | 4 | 5 | import getopt, sys 6 | from optparse import OptionParser 7 | 8 | import numpy 9 | import numpy.random 10 | from scipy.stats import distributions 11 | import yaml 12 | 13 | import expt 14 | import mytime 15 | import modelmgmt 16 | import sampling 17 | import qnetu 18 | 19 | def main(): 20 | parser = OptionParser(description="Front-end for greedy bottleneck finding.") 21 | parser.add_option("--network", "-n", dest="net0f", 22 | help="YAML file describing network describing expected behavior", metavar="FILE") 23 | parser.add_option("--arrivals", dest="arrv0f", 24 | help="Prefix for multif files describing arrivals", metavar="FILE_PREFIX") 25 | parser.add_option("--queue-text", dest="qtext", help="YAML description of the type of queue you want to try adding to the network.") 26 | parser.add_option("--state", dest="state", help="Name of the state you want to add bottlenecks to.") 27 | parser.add_option("--config", dest="config", help="YAML file containing configuration") 28 | parser.add_option("--seed", dest="seed", type="int", default=12241, help="Integer random seed.") 29 | parser.add_option ("--gibbs-iter", dest="niter", type="int", default=10, 30 | help="Number of Gibbs iteration for each model") 31 | parser.add_option ("--stats-prefix", dest="statsPrefix", 32 | help="Prefix of stats files output.") 33 | parser.add_option ("--output-arrv", dest="outputArrv", action="store_true", 34 | help="If supplied, write arrivals after every interation") 35 | parser.add_option ("--mdlidx", dest="mdlIdx", default=-1, type="int", 36 | help="If supplied, try only alternative model # MDLIDX (0 == base network)") 37 | 38 | (options, args) = parser.parse_args() 39 | 40 | sampling.set_seed (options.seed) 41 | 42 | tmr = mytime.timeit() 43 | 44 | if options.config: 45 | f = open(options.config) 46 | qnetu.read_configuration (f) 47 | f.close() 48 | 49 | if not options.net0f: 50 | parser.error ("No networks specified (use -n)") 51 | sys.exit(1) 52 | 53 | if not options.arrv0f: 54 | parser.error ("No data specified (use --arrivals)") 55 | sys.exit(1) 56 | 57 | if not options.state: 58 | parser.error ("No state specified (use --state)") 59 | sys.exit(1) 60 | 61 | if not options.qtext: 62 | parser.error ("No queue text specified (use --queue-text)") 63 | sys.exit(1) 64 | 65 | f = open(options.net0f) 66 | net0 = qnetu.qnet_from_text (f) 67 | f.close() 68 | 69 | arrv0 = qnetu.read_multif_of_prefix (options.arrv0f, net0) 70 | arrv0.validate() 71 | tmr.tick ("Reading time") 72 | 73 | print "Evaluating model", options.mdlIdx 74 | modelmgmt.bottleneck_model_selection ( 75 | net0, options.state, arrv0, options.qtext, 76 | gibbs_iter=options.niter, 77 | mdlIx=options.mdlIdx 78 | ) 79 | 80 | tmr.total ("Total time") 81 | 82 | if __name__ == "__main__": 83 | main() 84 | 85 | -------------------------------------------------------------------------------- /src/simulate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Front end for running the Gibbs sampler 3 | 4 | 5 | import getopt, sys 6 | import numpy.random 7 | 8 | import sampling 9 | import qnetu 10 | import yaml 11 | 12 | def usage(): 13 | print """Usage: 14 | simulate.py 15 | Front-end for simulating a general queueing network. 16 | Options: 17 | --network YAML file containing network description (required) 18 | --num-tasks Number of tasks to simulate (default: 10) 19 | --output-file File to write YAML arrivals to (default: arrv.txt) 20 | --subset-pct If supplied, generate a file containing a sample of this percentage of tasks 21 | --subset-file File to write task sumbset to (default: arrv-sampled.txt). Must supply subset-pct as well. 22 | --seed Integer random seed""" 23 | 24 | 25 | def main(): 26 | 27 | subsetPct = None 28 | outf = "arrv.yml" 29 | subf = "arrv-sampled.yml" 30 | numTasks = 10 31 | useMultif = False 32 | 33 | try: 34 | opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "network=", "num-tasks=", "output-file=", "subset-pct=", "subset-file=", "seed=", "use-multif=" ]) 35 | except getopt.GetoptError, err: 36 | # print help information and exit: 37 | print str(err) # will print something like "option -a not recognized" 38 | usage() 39 | sys.exit(2) 40 | output = None 41 | verbose = False 42 | for o, a in opts: 43 | if o in ("-h", "--help"): 44 | usage() 45 | sys.exit() 46 | elif o in ("--network"): 47 | netf = a 48 | elif o in ("--num-tasks"): 49 | numTasks = int(a) 50 | elif o in ("--output-file"): 51 | outf = a 52 | elif o in ("--use-multif"): 53 | useMultif = True 54 | elif o in ("--subset-file"): 55 | subf = a 56 | elif o in ("--subset-pct"): 57 | subsetPct = float(a) 58 | if subsetPct <= 0 or subsetPct > 1: 59 | raise Exception ("Invalid --subset-pct %s Must be in 0..1" % a) 60 | elif o in ("--seed"): 61 | sampling.set_seed (int(a)) 62 | else: 63 | print "Error: Can't understand ", o 64 | sys.exit (1) 65 | 66 | f = open(netf) 67 | net = qnetu.qnet_from_text (f) 68 | f.close() 69 | 70 | arrv = net.sample (numTasks) 71 | 72 | write_arrivals (arrv, outf, useMultif) 73 | 74 | if subsetPct: 75 | subset = arrv.subset_by_task (subsetPct, adapt_fn=copy_evt) 76 | write_arrivals (subset, subf, useMultif) 77 | 78 | def write_arrivals (arrv, outf, useMultif): 79 | if useMultif: 80 | statef = open ("%sstate.txt" % outf, "w") 81 | af = open ("%sa.txt" % outf, "w") 82 | df = open ("%sd.txt" % outf, "w") 83 | arrv = qnetu.write_multifile_arrv (arrv, statef, af, df, obs_only=False) 84 | statef.close() 85 | af.close() 86 | df.close() 87 | else: 88 | arrv = net.sample (numTasks) 89 | f = open (outf, 'w') 90 | yaml.dump (arrv, stream=f) 91 | f.close () 92 | 93 | def copy_evt (e_old, e): 94 | e.a = e_old.a 95 | e.d = e_old.d 96 | e.s = e_old.s 97 | 98 | if __name__ == "__main__": 99 | main() 100 | 101 | -------------------------------------------------------------------------------- /src/test_stupidlp.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import stupidlp 3 | import cvxopt.solvers 4 | 5 | class TestStupidLP (unittest.TestCase): 6 | 7 | def test_qp1 (self): 8 | qp = stupidlp.QP() 9 | 10 | qp.add_objsq (1.0, "V1") 11 | qp.add_objsq (1.5, "V2") 12 | 13 | qp.add_objlin (3., "V1") 14 | qp.add_objlin (-4., "V2") 15 | 16 | soln = qp.solve() 17 | 18 | self.assertAlmostEquals (soln["V1"], -3., 5) 19 | self.assertAlmostEquals (soln["V2"], 2.6667, 3) 20 | 21 | def test_qp2 (self): 22 | qp = stupidlp.QP() 23 | 24 | # minimize variance of a 25 | qp.add_objsq (-1.0, "P2") 26 | qp.add_objsq_cross (-4.0, "P2", "P3") 27 | qp.add_objlin (1., "P2") 28 | 29 | qp.add_le0 ([-1.0], ["P1"]) 30 | qp.add_le0 ([-1.0], ["P2"]) 31 | qp.add_le0 ([-1.0], ["P3"]) 32 | 33 | qp.add_eq ([1.0, 1.0, 1.0 ], ["P1", "P2", "P3" ], 1.0) 34 | qp.add_eq ([0., 1., 2.], ["P1","P2","P3"], 1./3) 35 | 36 | print qp 37 | 38 | # cvxopt.solvers.options['debug'] = 1 39 | 40 | soln = qp.solve() 41 | 42 | self.assertAlmostEquals (soln['P1'], 0.83333327337120411, 5) 43 | self.assertAlmostEquals (soln['P2'], 1.19924258508573e-07, 5) 44 | self.assertAlmostEquals (soln['P3'], 0.1666666067045374, 5) 45 | 46 | 47 | def test_qp3 (self): 48 | qp = stupidlp.QP() 49 | 50 | # minimize variance of a 51 | qp.add_objsq (-1.0, "P2") 52 | qp.add_objsq_cross (-4.0, "P2", "P3") 53 | qp.add_objlin (1., "P2") 54 | 55 | qp.add_le0 ([-1.0], ["P1"]) 56 | qp.add_le0 ([-1.0], ["P2"]) 57 | qp.add_le0 ([-1.0], ["P3"]) 58 | 59 | qp.add_eq ([1.0, 1.0, 1.0 ], ["P1", "P2", "P3" ], 1.0) 60 | qp.add_eq ([0., 1., 2.], ["P1","P2","P3"], 1./3) 61 | qp.add_eq ([1.], ["P2"], 0.1) 62 | soln = qp.solve() 63 | 64 | expected = {'P2': 0.10000000000000001, 'P3': 0.11666666666666667, 'P1': 0.78333333333333321} 65 | for vname in soln: 66 | self.assertAlmostEquals (expected[vname], soln[vname], 5) 67 | 68 | def test_qp4 (self): 69 | qp = stupidlp.QP() 70 | 71 | qp.add_objsq (1., "X") 72 | qp.add_objsq (1., "Y") 73 | 74 | qp.add_eq ([1., 1.], ["X", "Y"], 1.0) 75 | 76 | qp.add_var_le ("X", 0.2) 77 | 78 | soln = qp.solve() 79 | print soln 80 | 81 | expected = {'Y': 0.80000000642238134, 'X': 0.19999999357761869} 82 | for vname in soln: 83 | self.assertAlmostEquals (expected[vname], soln[vname], 5) 84 | 85 | # Below fails because CVXOPT doesn't like the fact that the equality constraint matrix is singular 86 | def ignore_test_qp5 (self): 87 | qp = stupidlp.QP() 88 | 89 | # minimize variance of a, constraining P2 == 0.1 90 | qp.add_objsq (-1.0, "P2") 91 | qp.add_objsq_cross (-4.0, "P2", "P3") 92 | qp.add_objlin (1., "P2") 93 | 94 | qp.add_le0 ([-1.0], ["P1"]) 95 | qp.add_le0 ([-1.0], ["P2"]) 96 | qp.add_le0 ([-1.0], ["P3"]) 97 | 98 | qp.add_eq ([1.0, 1.0, 1.0 ], ["P1", "P2", "P3" ], 1.0) 99 | qp.add_eq ([0., 1., 2.], ["P1","P2","P3"], 1./3) 100 | qp.add_identity ("P2", 0.1) 101 | 102 | soln = qp.solve() 103 | 104 | expected = {'P3': 0.11666666666666667, 'P1': 0.78333333333333321} 105 | for vname in soln: 106 | self.assertAlmostEquals (expected[vname], soln[vname], 5) 107 | 108 | 109 | 110 | if __name__ == "__main__": 111 | unittest.main() 112 | # test_name = "test_qp5" 113 | # suite = unittest.TestLoader().loadTestsFromName("test_stupidlp.TestStupidLP.%s" % (test_name,)) 114 | # unittest.TextTestRunner(verbosity=2).run(suite) 115 | 116 | -------------------------------------------------------------------------------- /src/test_delay_station.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import qnet 3 | import qnetu 4 | import numpy 5 | import mytime 6 | import netutils 7 | import estimation 8 | import yaml 9 | 10 | import pwfun 11 | import sampling 12 | import arrivals 13 | import qstats 14 | import queues 15 | import test_qnet 16 | 17 | from scipy import integrate 18 | from numpy import random 19 | 20 | import gc 21 | import arrivals 22 | 23 | import sys 24 | from math import sqrt 25 | 26 | class TestDelayStation (unittest.TestCase): 27 | 28 | def test_delay_read (self): 29 | self.assertEquals (2, self.del1q.num_queues()) 30 | 31 | q1 = self.del1q.queue_by_name ("I0") 32 | q2 = self.del1q.queue_by_name ("Q") 33 | self.assertTrue (q1 != None) 34 | self.assertTrue (q2 != None) 35 | 36 | self.assertTrue (isinstance (q1, queues.QueueGGk)) 37 | self.assertTrue (isinstance (q2, queues.DelayStation)) 38 | 39 | def test_sample (self): 40 | N = 100 41 | arrv = self.del1q.sample (N) 42 | 43 | mu_task = qstats.mean_task_time (arrv) 44 | mus = qstats.mean_service (arrv) 45 | mur = qstats.mean_response_time (arrv) 46 | 47 | self.assertAlmostEquals (mus[1], mur[1], 10) 48 | self.assertTrue (abs(mur[1] - 2.0) * sqrt(N) * 0.5 < 1.96, "MUR looks wrrong, expected 2.0 was %.5f" % mur[1]) 49 | 50 | def test_arrival_deriv (self): 51 | N = 10 52 | arrv = self.del1q.sample(N) 53 | task5 = arrv.events_of_task (5) 54 | for e in task5: e.obs_a = e.obs_d = 0 55 | for e in task5: print e 56 | 57 | e = task5[1] 58 | fn = e.queue().pyArrivalLik (arrv, e) 59 | 60 | L,U = fn.range() # N.B. not infinite, b/c arrival 61 | xs = [ L + i*(U-L)/10 for i in range(10) ] 62 | 63 | check_derivative (self, fn, fn.fprime, xs) 64 | 65 | 66 | def test_departure_deriv (self): 67 | N = 10 68 | arrv = self.del1q.sample(N) 69 | task5 = arrv.events_of_task (5) 70 | for e in task5: e.obs_a = e.obs_d = 0 71 | 72 | e = task5[1] 73 | fn = e.queue().pyDepartureLik (arrv, e) 74 | 75 | L,U = fn.range() 76 | U = L + 5.0 77 | xs = [ L + i*(U-L)/10 for i in range(10) ] 78 | 79 | check_derivative (self, fn, fn.fprime, xs) 80 | 81 | 82 | def test_delay_sem (self): 83 | nreps = 5 84 | N = 500 85 | pct = 0.5 86 | net = self.del1q 87 | 88 | theta_tot = numpy.zeros(len(net.parameters)) 89 | 90 | for rep in range(nreps): 91 | p_orig = net.parameters[:] 92 | arrv = net.sample (N) 93 | obs = arrv.subset_by_task (pct) 94 | a0 = net.gibbs_initialize (obs) 95 | estimation.sem (net, a0, 0, 100) 96 | print "PARAMS_FINAL ", net.parameters 97 | theta_tot += net.parameters 98 | 99 | theta_avg = theta_tot / nreps 100 | print "THETA_AVG (GOLD, EST):" 101 | for p_gold, p_hat in zip(net.parameters, list(theta_avg)): 102 | print p_gold, p_hat 103 | for p_gold, p_hat in zip(net.parameters, list(theta_avg)): 104 | self.assertTrue (abs(p_hat - p_gold) < 0.25) 105 | 106 | def setUp (self): 107 | self.del1q = qnetu.qnet_from_text (del1q_text) 108 | 109 | EPS = 1e-10 110 | def check_derivative (testcase, fx, dfx, xs): 111 | for x in xs: 112 | df_analytic = dfx(x) 113 | df_numerical = (fx(x+EPS) - fx(x)) / EPS 114 | testcase.assertTrue (abs (df_analytic - df_numerical) < 1e-5, "Derivative mismatch: X %.5f DFX: %.5f f(X): %.15f f(X+eps) %.15f df_numerical %.15f" % (x, df_analytic, fx(x), fx(x+EPS), df_numerical)) 115 | 116 | del1q_text = """ 117 | states: 118 | - name: I0 119 | queues: [I0] 120 | successors: [S] 121 | - name: S 122 | queues: [Q] 123 | queues: 124 | - { name: I0, service: [M, 1.0] } 125 | - { name: Q, service: [M, 2.0], type: DELAY } 126 | """ 127 | 128 | 129 | def main(): 130 | if len(sys.argv) > 1: 131 | for test_name in sys.argv[1:]: 132 | suite = unittest.TestLoader().loadTestsFromName("test_delay_station.TestDelayStation.%s" % (test_name,)) 133 | unittest.TextTestRunner(verbosity=2).run(suite) 134 | else: 135 | unittest.main() 136 | 137 | if __name__ == "__main__": 138 | main() 139 | 140 | -------------------------------------------------------------------------------- /src/test_hist.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import sys 4 | import unittest 5 | import qnetu 6 | import sampling 7 | import numpy 8 | import netutils 9 | 10 | class TestHistogram (unittest.TestCase): 11 | 12 | def test_1proc_conditional (self): 13 | sampling.set_seed(2334) 14 | N = 5000 15 | gi = 2500 16 | eps = 0.01 17 | nevts = 2 18 | self.do_test_conditional (self.net_small, N, gi, eps, nevts) 19 | 20 | def test_2proc_conditional (self): 21 | sampling.set_seed(2334) 22 | N = 5000 23 | gi = 5000 24 | eps = 0.01 25 | nevts = 4 26 | self.do_test_conditional (self.net2, N, gi, eps, nevts, True) 27 | 28 | def do_test_conditional (self, net, N, gi, eps, nevts, print_all=False): 29 | q = net.queue_by_name ("Q") 30 | pick_evt = lambda arrv,i: arrv.events_of_queue(q)[i] 31 | 32 | a2 = [] 33 | all_arrv = [] 34 | for i in range(N): 35 | arrv = net.sample (nevts) 36 | evt = pick_evt (arrv,1) 37 | all_arrv.append (arrv) 38 | a2.append (evt.a) 39 | a_mean = numpy.mean (a2) 40 | 41 | eid_to_fix = nevts / 2 42 | 43 | # collect arrv close to mean 44 | 45 | arrv_eps = [ arrv for arrv in all_arrv if abs(pick_evt(arrv, eid_to_fix).a - a_mean) < eps ] 46 | print "Mean of queue event a_%d = %.4f" % (eid_to_fix, a_mean) 47 | print "Number of selected histories = ", len(arrv_eps) 48 | a0_sampled = [ pick_evt (arrv,0).a for arrv in arrv_eps ] 49 | netutils.print_hist (a0_sampled) 50 | 51 | if print_all: 52 | i = 0 53 | for arrv in arrv_eps: 54 | f = open ("th_arrv_sampled_%d" % i, "w") 55 | f.write (str (arrv)) 56 | f.close() 57 | i += 1 58 | 59 | # now run the gibbs sampling 60 | a0_gibbs = [] 61 | for arrv in arrv_eps: 62 | evts = arrv.events_of_queue(q) 63 | for i in range(nevts/2): 64 | evt0 = evts[i] 65 | evt0.obs_a = 0 66 | evt0.previous_by_task().obs_d = 0 67 | arrv_out = net.gibbs_resample (arrv, 0, gi, return_arrv = False) 68 | a0_gibbs.append (pick_evt(arrv_out[-1],0).a) 69 | print "=====" 70 | netutils.print_hist (a0_gibbs) 71 | 72 | if print_all: 73 | i = 0 74 | for arrv in arrv_eps: 75 | f = open ("th_arrv_gibbs_%d" % i, "w") 76 | f.write (str (arrv)) 77 | f.close() 78 | i += 1 79 | 80 | f = open("test_hist_sampled.txt", "w") 81 | f.write ("\n".join(map (str, a0_sampled))) 82 | f.close() 83 | f = open("test_hist_gibbs.txt", "w") 84 | f.write ("\n".join(map (str, a0_gibbs))) 85 | f.close() 86 | 87 | net_small_text = """ 88 | states: 89 | - name: INITIAL 90 | queues: [INITIAL] 91 | successors: [TIER1] 92 | initial: TRUE 93 | - name: STATE 94 | queues: [ Q ] 95 | queues: 96 | - { name: INITIAL, service: [M, 0.5] } 97 | - { name: Q, service: [M, 0.5] } 98 | """ 99 | net2_text = """ 100 | states: 101 | - name: INITIAL 102 | queues: [INITIAL] 103 | successors: [TIER1] 104 | initial: TRUE 105 | - name: STATE 106 | queues: [ Q ] 107 | queues: 108 | - { name: INITIAL, service: [M, 0.5] } 109 | - { name: Q, service: [M, 1.0], processors: 2 } 110 | """ 111 | 112 | net3_text = """ 113 | states: 114 | - name: INITIAL 115 | queues: [INITIAL] 116 | successors: [TIER1] 117 | initial: TRUE 118 | - name: STATE 119 | queues: [ Q ] 120 | queues: 121 | - { name: INITIAL, service: [M, 0.5] } 122 | - { name: Q, service: [M, 1.5], processors: 3 } 123 | """ 124 | 125 | def setUp (self): 126 | self.net_small = qnetu.qnet_from_text (TestHistogram.net_small_text) 127 | self.net2 = qnetu.qnet_from_text (TestHistogram.net2_text) 128 | self.net3 = qnetu.qnet_from_text (TestHistogram.net3_text) 129 | 130 | 131 | def main(): 132 | if len(sys.argv) > 1: 133 | for test_name in sys.argv[1:]: 134 | suite = unittest.TestLoader().loadTestsFromName("test_hist.TestHistogram.%s" % (test_name,)) 135 | unittest.TextTestRunner(verbosity=2).run(suite) 136 | else: 137 | unittest.main() 138 | 139 | if __name__ == "__main__": 140 | main() 141 | -------------------------------------------------------------------------------- /src/lrt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Front end for model selection 3 | 4 | 5 | import getopt, sys 6 | from optparse import OptionParser 7 | 8 | import numpy 9 | import numpy.random 10 | from scipy.stats import distributions 11 | import yaml 12 | 13 | import expt 14 | import mytime 15 | import estimation, qnetu 16 | import sampling 17 | 18 | import qstats 19 | 20 | def main(): 21 | parser = OptionParser(description="Front-end for model selection using LRT.") 22 | parser.add_option("--network", "-n", dest="allNetf", action="append", default=[], 23 | help="YAML file describing network for each hypothesis (required; use once for each network to compare)\nFirst network supplied is used as the null hypothesis", metavar="FILE") 24 | parser.add_option("--arrivals", dest="allArrvf", action="append", default=list(), 25 | help="Prefix for multif files describing arrivals", metavar="FILE") 26 | parser.add_option("--config", dest="config", help="YAML file containing configuration") 27 | parser.add_option("--seed", dest="seed", type="int", default=12241, help="Integer random seed.") 28 | parser.add_option("--use-multif", dest="isMultif", action="store_true", 29 | help="If supplied, input and output are in multif format.") 30 | parser.add_option ("--gibbs-iter", dest="niter", type="int", default=10, 31 | help="Number of Gibbs iteration for each model") 32 | parser.add_option ("--stats-prefix", dest="statsPrefix", 33 | help="Prefix of stats files output.") 34 | parser.add_option ("--output-arrv", dest="outputArrv", action="store_true", 35 | help="If supplied, write arrivals after every interation") 36 | 37 | (options, args) = parser.parse_args() 38 | 39 | sampling.set_seed (options.seed) 40 | 41 | tmr = mytime.timeit() 42 | 43 | if options.config: 44 | f = open(options.config) 45 | qnetu.read_configuration (f) 46 | f.close() 47 | 48 | if len(options.allNetf) == 0: 49 | parser.error ("No networks specified (use -n)") 50 | sys.exit(1) 51 | 52 | if len(options.allNetf) != len(options.allArrvf): 53 | parser.error ("Need to specify same # of arrivals as networks.\nNets: %s\nArrivals %s" % (options.allNetf, options.allArrvf)) 54 | 55 | allNet = [] 56 | for netf in options.allNetf: 57 | f = open(netf) 58 | allNet.append (qnetu.qnet_from_text (f)) 59 | f.close() 60 | net0 = allNet[0] 61 | 62 | likf = [] 63 | for i in range(len(allNet)): 64 | maxLik = computeLik (i, allNet[i], options.allArrvf[i], options, tmr) 65 | likf.append (maxLik) 66 | 67 | print "Model 0 Lik %.5f" % likf[0] 68 | for i in range(1, len(likf)): 69 | print "Model %d Lik %.10f" % (i, likf[i]) 70 | print "MDL LOG.LR DF p" 71 | for i in range(1, len(likf)): 72 | llr = likf[i] - likf[0] 73 | df = len(allNet[i].parameters) - len(net0.parameters) 74 | if df > 0: 75 | p = 1 - distributions.chi2.cdf (-2*llr, df) 76 | else: 77 | p = -1 78 | print "%d %.5f %d %.5f" % (i, llr, df, p) 79 | 80 | tmr.total ("Total time") 81 | 82 | 83 | def computeLik (mdlIx, net, arrvf, options, tmr): 84 | 85 | statef = open ("%sstate.txt" % arrvf) 86 | af = open ("%sa.txt" % arrvf) 87 | df = open ("%sd.txt" % arrvf) 88 | arrv = qnetu.read_multifile_arrv (net, statef, af, df) 89 | statef.close() 90 | af.close() 91 | df.close() 92 | 93 | print "Number of events = ", arrv.num_events () 94 | print "Number hidden events = ", arrv.num_hidden_events() 95 | 96 | tmr.tick ("MDL %d : Loading arrivals" % mdlIx) 97 | 98 | arrv = net.gibbs_initialize (arrv.duplicate ()) 99 | tmr.tick ("MDL %d : Initialization" % mdlIx) 100 | arrv.validate() 101 | tmr.tick ("MDL %d : Validation" % mdlIx) 102 | 103 | arrv.display() 104 | 105 | allStats = qstats.STATS[:] 106 | train_stats_f = [ open("train_%d_%s.txt" % (mdlIx, fn.__name__), "w") for fn in allStats ] 107 | # reporter = Reporter (allStats, train_stats_f, options) 108 | 109 | lp = estimation.chib_evidence (net, arrv, options.niter, options.niter) 110 | tmr.tick ("Running evidence log prob (Model %d)" % mdlIx) 111 | 112 | for f in train_stats_f: f.close() 113 | 114 | print "MDL %d: Evidence prob %.5f" % (mdlIx, lp) 115 | 116 | return lp 117 | 118 | 119 | if __name__ == "__main__": 120 | main() 121 | 122 | -------------------------------------------------------------------------------- /src/subset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Front end for running the Gibbs sampler 3 | 4 | 5 | import sys 6 | from optparse import OptionParser 7 | 8 | import sampling 9 | import qnetu 10 | import yaml 11 | 12 | 13 | def main(): 14 | 15 | subsetPct = None 16 | retainAll = True 17 | 18 | parser = OptionParser(description="Take a random subset of a given set of arrivals.") 19 | parser.add_option("--network", dest="netf", 20 | help="YAML file containing network description", metavar="FILE") 21 | parser.add_option("--arrivals", dest="arrvf", 22 | help="YAML file describing arrivals", metavar="FILE") 23 | parser.add_option("--output-file", dest="outf", default="arrivals.yml", 24 | help="File to write YAML arrivals to") 25 | parser.add_option("--subset-pct", dest="subsetPct", type="float", default=0.5, 26 | help="Percentage of tasks in subset.") 27 | parser.add_option("--max-time", dest="maxTime", default=-1, type="int") 28 | parser.add_option("--verbose", dest="verbose", action="store_true", default=False, 29 | help="If true, ouptut lots of debugging information.") 30 | parser.add_option("--retain-all", dest="retain_all", action="store_true", default=True, 31 | help="If supplied, keep all arrivals in the output file, marking the ones that aren't observed.") 32 | parser.add_option("--no-retain-all", dest="retain_all", action="store_false", 33 | help="If true, keep all arrivals in the output file, marking the ones that aren't observed.") 34 | parser.add_option("--seed", dest="seed", type="int", default=12241, help="Integer random seed.") 35 | parser.add_option("--use-multif", dest="isMultif", action="store_true", 36 | help="If supplied, input and output are in multif format.") 37 | parser.add_option("--input-multif", dest="inputMultif", 38 | help="Prefix for input multif.") 39 | parser.add_option("--output-multif", dest="outputMultif", 40 | help="Prefix for input multif.") 41 | 42 | (options, args) = parser.parse_args() 43 | 44 | sampling.set_seed (options.seed) 45 | 46 | f = open(options.netf) 47 | net = qnetu.qnet_from_text (f) 48 | f.close() 49 | 50 | if options.isMultif: 51 | statef = open ("%sstate.txt" % options.inputMultif) 52 | af = open ("%sa.txt" % options.inputMultif) 53 | df = open ("%sd.txt" % options.inputMultif) 54 | arrv = qnetu.read_multifile_arrv (net, statef, af, df) 55 | statef.close() 56 | af.close() 57 | df.close() 58 | else: 59 | f = open (options.arrvf) 60 | arrv = qnetu.load_arrivals (net, f) 61 | f.close () 62 | 63 | if options.verbose: 64 | print "Original arrivals", arrv 65 | arrv.validate() 66 | 67 | if options.maxTime > 0: 68 | if options.verbose: print "Subset up to time ", options.maxTime 69 | def task_in_time (arrv, tid): 70 | evtl = arrv.events_of_task (tid) 71 | task_arrival = min(e.d for e in evtl) 72 | ret = task_arrival < options.maxTime 73 | return ret 74 | subset = arrv.subset_by_task_fn (task_in_time) 75 | elif options.retain_all: 76 | subset = arrv.subset_by_task (options.subsetPct, adapt_fn=copy_evt) 77 | else: 78 | subset = arrv.subset_by_task (options.subsetPct) 79 | delete_unobserved_tasks (subset) 80 | 81 | if options.verbose: 82 | subset.validate () 83 | print "Subsetted arrivals", subset 84 | 85 | if options.isMultif: 86 | statef = open ("%sstate.txt" % options.outputMultif, "w") 87 | af = open ("%sa.txt" % options.outputMultif, "w") 88 | df = open ("%sd.txt" % options.outputMultif, "w") 89 | arrv = qnetu.write_multifile_arrv (subset, statef, af, df, obs_only=False) 90 | statef.close() 91 | af.close() 92 | df.close() 93 | else: 94 | f = open (options.outf, 'w') 95 | yaml.dump (subset, stream=f) 96 | f.close() 97 | 98 | def copy_evt (e_old, e): 99 | e.a = e_old.a 100 | e.d = e_old.d 101 | e.s = e_old.s 102 | 103 | def delete_unobserved_tasks (subset): 104 | tids = dict() 105 | for e in subset: 106 | if not e.obs: 107 | tids[e.tid] = True 108 | for tid in tids: 109 | subset.delete_task (tid) 110 | 111 | if __name__ == "__main__": 112 | main() 113 | 114 | -------------------------------------------------------------------------------- /src/test_likdelta.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import qnet 3 | import qnetu 4 | import numpy 5 | import numpy.random 6 | import mytime 7 | import netutils 8 | import estimation 9 | import yaml 10 | import arrivals 11 | import sampling 12 | 13 | import qstats 14 | 15 | import sys 16 | 17 | class TestLikDelta (unittest.TestCase): 18 | 19 | def test_mm1_delta (self): 20 | sampling.set_seed(68310) 21 | nreps = 10 22 | ntasks = 10 23 | pct = 0.5 24 | net = self.mm1 25 | self.do_test_delta_internal (net, nreps, ntasks, pct) 26 | 27 | def test_mmk_delta (self): 28 | sampling.set_seed(68310) 29 | nreps = 10 30 | ntasks = 10 31 | pct = 0.5 32 | net = self.mmk 33 | self.do_test_delta_internal (net, nreps, ntasks, pct) 34 | 35 | def test_mmrss_delta (self): 36 | sampling.set_seed(68310) 37 | nreps = 10 38 | ntasks = 100 39 | pct = 0.25 40 | net = self.mmrss 41 | self.do_test_delta_internal (net, nreps, ntasks, pct) 42 | 43 | def do_test_delta_internal (self, net, nreps, ntasks, pct): 44 | for ri in range(nreps): 45 | arrv = net.sample (ntasks) 46 | obs = arrv.subset_by_task (pct) 47 | samples = net.slice_resample (obs, 0, 5) 48 | arrv_from = samples[len(samples)-1] 49 | print "Computing LIK0" 50 | lik0 = net.log_prob (arrv_from) 51 | for e in arrv_from: 52 | if not e.obs_d: 53 | # print "Testing evt ", e 54 | dfn = qnet.GGkGibbs(net, arrv_from, e, lik0).dfn() 55 | d0 = e.d 56 | d_test = [ d0+delta for delta in [ -0.5, -0.1, 0.1, 0.5, 1.0, 1.5, 3.0 ] ] 57 | for d1 in d_test: 58 | # print "Testing departure ", d1 59 | lik_incremental = dfn(d1) 60 | if numpy.isinf (lik_incremental): continue # probably right 61 | lik_true = self.compute_full_lik (net, arrv_from, e, d1) 62 | print "%d %.4f %.4f %.4f %.4f" % (e.eid, d0, d1, lik_incremental, lik_true) 63 | if numpy.isinf(lik_true): 64 | self.assertTrue (numpy.isinf(lik_incremental)) 65 | else: 66 | self.assertAlmostEquals (lik_true, lik_incremental, 5) 67 | 68 | def compute_full_lik (self, net, arrv0, evt, d): 69 | arrv = arrv0.duplicate() 70 | dl0 = evt.queue().pyDiffListForDeparture (evt, d) 71 | evt_next = evt.next_by_task() 72 | if evt_next: 73 | dl0.extend (evt_next.queue().pyDiffListForArrival(evt_next, d)) 74 | arrv.applyDiffList (dl0, 0) 75 | return net.log_prob (arrv) 76 | 77 | 78 | mm1_text = """ 79 | states: 80 | - name: INITIAL 81 | queues: [ INITIAL ] 82 | successors: [ TIER1 ] 83 | initial: TRUE 84 | - name: TIER1 85 | queues: [ WEB1, WEB2 ] 86 | successors: [ TIER2 ] 87 | - name: TIER2 88 | queues: [ APP1 ] 89 | queues: 90 | - { name: INITIAL, service: [M, 10.0] } 91 | - { name: WEB1, service: [M, 3.0] } 92 | - { name: WEB2, service: [M, 3.0] } 93 | - { name: APP1, service: [M, 8.0] } 94 | """ 95 | 96 | 97 | mmk_text = """ 98 | states: 99 | - name: INITIAL 100 | queues: [ INITIAL ] 101 | successors: [ TIER1 ] 102 | initial: TRUE 103 | - name: TIER1 104 | queues: [ WEB1, WEB2 ] 105 | successors: [ TIER2 ] 106 | - name: TIER2 107 | queues: [ APP1 ] 108 | queues: 109 | - { name: INITIAL, service: [M, 5.0] } 110 | - { name: WEB1, service: [M, 3.0], processors: 3 } 111 | - { name: WEB2, service: [M, 3.0], processors: 4 } 112 | - { name: APP1, service: [M, 8.0], processors: 2 } 113 | """ 114 | 115 | mmrss_text = """ 116 | states: 117 | - name: INITIAL 118 | queues: [ INITIAL ] 119 | successors: [ TIER1 ] 120 | initial: TRUE 121 | - name: TIER1 122 | queues: [ WEB1, WEB2 ] 123 | successors: [ TIER2 ] 124 | - name: TIER2 125 | queues: [ APP1, APP2 ] 126 | successors: [ TIER3 ] 127 | - name: TIER3 128 | queues: [ DB1, DB2 ] 129 | queues: 130 | - { name: INITIAL, service: [M, 10.0] } 131 | - { name: WEB1, service: [M, 15.0], type: GG1R } 132 | - { name: WEB2, service: [M, 17.0], type: GG1R } 133 | - { name: APP1, service: [M, 10.0], type: GG1R } 134 | - { name: APP2, service: [M, 10.0], type: GG1R } 135 | - { name: DB1, service: [M, 7.0], type: GG1R } 136 | - { name: DB2, service: [M, 7.0], type: GG1R } 137 | """ 138 | 139 | def setUp (self): 140 | self.mm1 = qnetu.qnet_from_text (TestLikDelta.mm1_text) 141 | self.mmk = qnetu.qnet_from_text (TestLikDelta.mmk_text) 142 | self.mmrss = qnetu.qnet_from_text (TestLikDelta.mmrss_text) 143 | 144 | def main(): 145 | if len(sys.argv) > 1: 146 | for test_name in sys.argv[1:]: 147 | suite = unittest.TestLoader().loadTestsFromName("test_likdelta.TestLikDelta.%s" % (test_name,)) 148 | unittest.TextTestRunner(verbosity=2).run(suite) 149 | else: 150 | unittest.main() 151 | 152 | if __name__ == "__main__": 153 | main() 154 | 155 | -------------------------------------------------------------------------------- /src/test_ninqueue.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import ninqueue 3 | import sys 4 | 5 | class TestNinqueue (unittest.TestCase): 6 | 7 | def test_add (self): 8 | ninq = ninqueue.NinQueue() 9 | ninq.add_birth_death (1.0, 2.0) 10 | ninq.add_birth_death (1.75, 2.25) 11 | ninq.add_birth_death (1.8, 1.81) 12 | self.assertEquals (ninq.knots(), 13 | zip ([1.0, 1.75, 1.8, 1.81, 2.0, 2.25], 14 | [1, 2, 3, 2, 1, 0])) 15 | ninq.add_birth_death (0.5, 2.5) 16 | self.assertEquals (ninq.knots(), 17 | zip ([0.5, 1.0, 1.75, 1.8, 1.81, 2.0, 2.25, 2.5], 18 | [1, 2, 3, 4, 3, 2, 1, 0])) 19 | ninq.add_birth_death (0.25, 2.1) 20 | self.assertEquals (ninq.knots(), 21 | zip ([0.25, 0.5, 1.0, 1.75, 1.8, 1.81, 2.0, 2.1, 2.25, 2.5], 22 | [1, 2, 3, 4, 5, 4, 3, 2, 1, 0])) 23 | print ninq 24 | 25 | def test_add2 (self): 26 | ninq = ninqueue.NinQueue() 27 | ninq.add_birth_death (1.0, 2.0) 28 | ninq.add_birth_death (1.75, 2.25) 29 | ninq.add_birth_death (2.1, 2.15) 30 | self.assertEquals (ninq.knots(), 31 | zip([1.0, 1.75, 2.0, 2.1, 2.15, 2.25], 32 | [ 1, 2, 1, 2, 1, 0])) 33 | 34 | def test_N (self): 35 | self.assertEquals (0, self.ninq.N(0.1)) 36 | self.assertEquals (2, self.ninq.N(0.75)) 37 | self.assertEquals (2, self.ninq.N(2.1)) 38 | self.assertEquals (0, self.ninq.N(3)) 39 | 40 | def test_move_arrival1 (self): 41 | ninq = ninqueue.NinQueue() 42 | ninq.add_birth_death (1.0, 2.0) 43 | ninq.add_birth_death (1.75, 2.25) 44 | print ninq 45 | ninq.move_arrival (1.0, 0.5) 46 | self.assertEquals (ninq.knots(), 47 | zip ([0.5, 1.75, 2.0, 2.25 ], 48 | [1, 2, 1, 0])) 49 | 50 | def test_move_arrival2 (self): 51 | ninq = ninqueue.NinQueue() 52 | ninq.add_birth_death (1.0, 2.0) 53 | ninq.add_birth_death (1.75, 2.25) 54 | ninq.add_birth_death (2.1, 2.15) 55 | print ninq 56 | ninq.move_arrival (1.0, 0.5) 57 | self.assertEquals (ninq.knots(), 58 | zip([0.5, 1.75, 2.0, 2.1, 2.15, 2.25 ], 59 | [1, 2, 1, 2, 1, 0])) 60 | 61 | 62 | def test_overlay1 (self): 63 | overlay = ninqueue.Overlay(self.ninq) 64 | overlay.move_arrival (0.25, 0.1) 65 | overlay.move_arrival (0.5, 0.7) 66 | overlay.move_departure (1.81, 3.1) 67 | self.assertEquals (0, self.ninq.N(0.1)) 68 | self.assertEquals (2, self.ninq.N(0.75)) 69 | self.assertEquals (2, self.ninq.N(2.1)) 70 | self.assertEquals (0, self.ninq.N(3)) 71 | self.assertEquals (0, overlay.N(0.01)) 72 | self.assertEquals (1, overlay.N(0.1)) 73 | self.assertEquals (1, overlay.N(0.25)) 74 | self.assertEquals (1, overlay.N(0.5)) 75 | self.assertEquals (2, overlay.N(0.7)) 76 | self.assertEquals (2, overlay.N(0.75)) 77 | self.assertEquals (5, overlay.N(1.81)) 78 | self.assertEquals (1, overlay.N(3.0)) 79 | 80 | def test_move_arrival2 (self): 81 | ninq = ninqueue.NinQueue() 82 | ninq.add_birth_death (1.0, 2.0) 83 | ninq.add_birth_death (1.5, 3.5) 84 | print ninq 85 | ninq.move_arrival (1.5, 2.5) 86 | print ninq 87 | self.assertEquals (1, ninq.N(1.0)) 88 | self.assertEquals (1, ninq.N(1.5)) 89 | self.assertEquals (0, ninq.N(2.25)) 90 | self.assertEquals (1, ninq.N(3.0)) 91 | 92 | def test_zero (self): 93 | ninq = ninqueue.NinQueue() 94 | ninq.add_birth_death (1.0, 1.0) 95 | eps = 1e-10 96 | # self.assertEquals (1, ninq.N(1.0), ninq) 97 | self.assertEquals (0, ninq.N(1.0 - eps)) 98 | self.assertEquals (0, ninq.N(1.0 + eps)) 99 | 100 | # tests fix to a bug where knots_in_range couldn't handle simultaneous arrivals 101 | def test_knots_in_range (self): 102 | ninq0 = ninqueue.NinQueue() 103 | ninq0.add_birth_death (1.0, 2.0) 104 | ninq0.add_birth_death (1.25, 1.75) 105 | ninq0.add_birth_death (1.25, 1.3) 106 | 107 | knots_expected = [(1.0, 1), (1.25, 2), (1.25, 3), (1.3, 2), (1.75, 1), (2.0, 0)] 108 | self.assertEquals (ninq0.knots(), knots_expected) 109 | 110 | knots_nonredundant = [(0,0), (1.0, 1), (1.25, 3), (1.3, 2), (1.75, 1), (2.0, 0), (5.0, 0)] 111 | self.assertEquals (ninq0.knots_in_range(0, 5.0), knots_nonredundant) 112 | 113 | knots_small = [(1.1,1), (1.25, 3), (1.3, 2), (1.5, 2)] 114 | self.assertEquals (ninq0.knots_in_range (1.1, 1.5), knots_small) 115 | 116 | def setUp (self): 117 | self.ninq = ninqueue.NinQueue() 118 | self.ninq.add_birth_death (1.0, 2.0) 119 | self.ninq.add_birth_death (1.75, 2.25) 120 | self.ninq.add_birth_death (1.8, 1.81) 121 | self.ninq.add_birth_death (0.5, 2.5) 122 | self.ninq.add_birth_death (0.25, 2.1) 123 | 124 | def main(): 125 | if len(sys.argv) > 1: 126 | for test_name in sys.argv[1:]: 127 | suite = unittest.TestLoader().loadTestsFromName("test_ninqueue.TestNinqueue.%s" % (test_name,)) 128 | unittest.TextTestRunner(verbosity=2).run(suite) 129 | else: 130 | unittest.main() 131 | 132 | if __name__ == "__main__": 133 | main() 134 | 135 | -------------------------------------------------------------------------------- /src/test_ivltreap.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import ivltreap 3 | import sys 4 | import random 5 | 6 | class TestIvltreap (unittest.TestCase): 7 | 8 | def test_intersect (self): 9 | keys = dict() 10 | def adder(n): keys[n.other] = 1 11 | self.treap1.intersect (4,4, adder) 12 | self.assertEquals (2, len(keys), "Found: %s" % keys) 13 | for k in ["A", "E"]: 14 | self.assertTrue (k in keys) 15 | 16 | def test_random_intersect (self): 17 | nrep = 100 18 | ninterval = 100 19 | for ri in range(nrep): 20 | treap = ivltreap.IntervalTreap() 21 | brutef= [] 22 | for ii in range(ninterval): 23 | s = random.uniform (0, 10*ninterval) 24 | e = s + random.uniform (0, 10) 25 | treap.insert (s,e,"") 26 | brutef.append ((s,e)) 27 | self.assertEquals (ninterval, len(treap)) 28 | treap.validate() 29 | s0 = random.uniform (0, 10*ninterval) 30 | e0 = random.uniform (0, 10) 31 | nfound = [0] 32 | def inner (node): nfound[0] += 1 33 | treap.intersect (s0, e0, inner) 34 | n1 = 0 35 | for s1,e1 in brutef: 36 | if s1 < e0 and s0 < e1: n1 += 1 37 | self.assertEquals (n1, nfound[0]) 38 | 39 | def test_random_find (self): 40 | nrep = 100 41 | ninterval = 100 42 | for ri in range(nrep): 43 | treap = ivltreap.IntervalTreap() 44 | brutef= [] 45 | for ii in range(ninterval): 46 | s = random.uniform (0, 10*ninterval) 47 | e = s + random.uniform (0, 10) 48 | treap.insert (s,e,"") 49 | brutef.append ((s,e)) 50 | s0,e0 = random.choice (brutef) 51 | node = treap.find (s0, e0, "") 52 | self.assertTrue (node != None) 53 | self.assertTrue (node.start == s0) 54 | self.assertTrue (node.end == e0) 55 | 56 | def test_remove (self): 57 | def adder(n): keys[n.other] = 1 58 | 59 | # first delete irrelevant interval 60 | keys = dict() 61 | self.treap1.remove (1,2,"B") 62 | self.treap1.intersect (4,4, adder) 63 | self.assertEquals (2, len(keys), "Found: %s" % keys) 64 | for k in ["A", "E"]: 65 | self.assertTrue (k in keys) 66 | 67 | # first delete relevant interval 68 | keys = dict() 69 | self.treap1.remove (3,5,"A") 70 | self.treap1.intersect (4,4, adder) 71 | self.assertEquals (1, len(keys), "Found: %s" % keys) 72 | self.assertTrue ("E" in keys) 73 | 74 | 75 | def test_random_remove (self): 76 | nrep = 100 77 | ninterval = 10 78 | for ri in range(nrep): 79 | # add random intervals 80 | treap = ivltreap.IntervalTreap() 81 | for ii in range(ninterval): 82 | s = random.uniform (0, 5*ninterval) 83 | e = s + random.uniform (0, 10) 84 | treap.insert (s,e,"") 85 | self.assertEquals (len(treap), ninterval) 86 | 87 | # remove random intervals 88 | killme = [] 89 | def do_select (foo): 90 | r = random.uniform (0.0, 1.0) 91 | if r < 0.25: 92 | killme.append(foo) 93 | treap.traverse (do_select) 94 | print "#KILL ", len(killme) 95 | print treap 96 | Nk = 0 97 | for node in killme: 98 | print "KILL [%s,%s)" % (node.start, node.end) 99 | treap.remove (node.start, node.end, node.other) 100 | print treap 101 | Nk += 1 102 | self.assertEquals (len(treap), ninterval - Nk) 103 | 104 | # compute bf list 105 | brutef = treap.all_intersect(0, 5*ninterval) 106 | print "#BRUTEF", len(brutef) 107 | print brutef[0] 108 | self.assertEquals (len(brutef) + len(killme), ninterval, 109 | "Huh? L(KILL) %d L(BRUTEF) %d expected sum %d" % (len(killme), len(brutef), ninterval)) 110 | 111 | # check that a random intersection agrees 112 | s0 = random.uniform (0, 10*ninterval) 113 | e0 = random.uniform (0, 10) 114 | nfound = [0] 115 | def inner (node): nfound[0] += 1 116 | treap.intersect (s0, e0, inner) 117 | n1 = 0 118 | for other in brutef: 119 | if other.start < e0 and s0 < other.end: n1 += 1 120 | self.assertEquals (n1, nfound[0]) 121 | 122 | def test_remove2 (self): 123 | for i in range(100): 124 | treap1 = ivltreap.IntervalTreap() 125 | treap1.insert (1,5,"A") 126 | treap1.insert (1,2,"B") 127 | treap1.insert (1,8,"C") 128 | treap1.remove (1,2,"B") 129 | self.assertEquals (None, treap1.find (1,2,"B")) 130 | treap1.validate() 131 | 132 | def setUp (self): 133 | self.treap1 = ivltreap.IntervalTreap() 134 | self.treap1.insert (3,5,"A") 135 | self.treap1.insert (1,2,"B") 136 | self.treap1.insert (6,8,"C") 137 | self.treap1.insert (4.5,6.5,"D") 138 | self.treap1.insert (2,7,"E") 139 | 140 | def main(): 141 | if len(sys.argv) > 1: 142 | for test_name in sys.argv[1:]: 143 | suite = unittest.TestLoader().loadTestsFromName("test_ivltreap.TestIvltreap.%s" % (test_name,)) 144 | unittest.TextTestRunner(verbosity=2).run(suite) 145 | else: 146 | unittest.main() 147 | 148 | if __name__ == "__main__": 149 | main() 150 | 151 | -------------------------------------------------------------------------------- /src/batch_arrival_queue.py: -------------------------------------------------------------------------------- 1 | import queues 2 | 3 | 4 | class BatchArrivalQueue (Queue): 5 | 6 | def __repr__ (self): 7 | return "\n%s%s\n" % (self.name, self.service, self.batch_size) 8 | 9 | def as_yaml (self): 10 | return " - { name: %s, type: BATCH, service: %s, size: %s }" % (self.name, self.service.as_yaml(), self.size.as_yaml()) 11 | 12 | def when_clear (self, Arrivals arrv): 13 | cdef Event last_evt = arrv.final_arrival_by_queue (self) 14 | return last_evt.d if last_evt else 0.0 15 | 16 | def previous_departing (self, Event e): 17 | return e.prev_byq # previous in arrival order 18 | 19 | cpdef recompute_service (self, Event e1): 20 | cdef Event e0, e2 21 | e0 = e1.prev_byq 22 | e2 = e1.next_byq 23 | 24 | if e0: 25 | e1.s = e1.d - max(e1.a, e0.d) 26 | else: 27 | e1.s = e1.d - e1.a 28 | 29 | if e2: 30 | e2.s = e2.d - max(e2.a, e1.d) 31 | 32 | cdef double departure_of_service (self, Event e, double s): 33 | cdef Event byq = e.prev_byq 34 | cdef double d_prev = byq.d if byq else 0.0 35 | return s + max (e.a, d_prev) 36 | 37 | cpdef recompute_departure (self, Arrivals arrv, Event e1): 38 | cdef Event e_prev = e1.prev_byq 39 | cdef double d_prev = e_prev.d if e_prev else 0 40 | e1.set_departure (e1.s + max(e1.a, d_prev)) 41 | 42 | def validate_service (self, evt): 43 | cdef Event e = evt 44 | d_prev = (e.prev_byq.d if e.prev_byq else 0) 45 | expected = e.d - max(e.a, d_prev) 46 | assert abs(e.s - expected) < 1e-10, "Invalid service time (expected %s was %s)\n %s\n %s" % (expected, e.s, e.prev_byq, e) 47 | if e.prev_byq: 48 | assert e.a >= e.prev_byq.a, "Invalid arrival times for events PREV: %s CURR: %s\n %s\n %s" % (e.prev_byq.a, e.a, e.prev_byq, e) 49 | 50 | cdef Pwfun arrivalLik (self, Arrivals arrv, Event evt): 51 | cdef Event e_prev, e_next 52 | e_prev = evt.prev_byq 53 | e_next = evt.next_byq 54 | 55 | if e_prev: 56 | d_prev = e_prev.d 57 | L = e_prev.a 58 | else: 59 | L = 0 60 | d_prev = 0 61 | 62 | if e_next: 63 | U = min(e_next.a, evt.d) 64 | else: 65 | U = evt.d 66 | 67 | cdef ArrivalFn afn 68 | afn = ArrivalFn (self.service, evt, evt.d, d_prev) 69 | if DEBUG: 70 | print afn 71 | print "@ L(=%.17g) (diff=%.17g) is %s or %s" % (L, L-afn.L(), afn.f1(L), afn.f2(L)) 72 | 73 | if d_prev < L: 74 | return Pwfun ([L, U], [ afn.f2 ], [ afn.df2 ]) 75 | elif U < d_prev: 76 | return Pwfun ([L, U], [ afn.f1 ], [ afn.df1 ]) 77 | else: 78 | return Pwfun ([L, d_prev, U], [ afn.f1, afn.f2 ], [ afn.df1, afn.df2 ]) 79 | 80 | 81 | cdef Pwfun departureLik (self, Arrivals arrv, Event evt): 82 | cdef Event e_prev, e_next 83 | cdef double L, A, U 84 | 85 | e_prev = evt.prev_byq 86 | e_next = evt.next_byq 87 | 88 | if e_prev: 89 | L = max(evt.a, e_prev.d) 90 | else: 91 | L = evt.a 92 | 93 | cdef DepartureFn dfun 94 | cdef FinalDepartureFn fdfun 95 | 96 | if e_next: 97 | A = e_next.a 98 | U = e_next.d 99 | dfun = DepartureFn (self.service, evt, e_next, L, A, U) 100 | if DEBUG: 101 | print dfun 102 | print "@ L(=%.17g) (diff=%.17g) is %s or %s" % (L, L-dfun.L(), dfun.f1(L), dfun.f2(L)) 103 | 104 | # print "Creating departure fn:\n %s\nL: %8.4f A: %8.4f U: %8.4f\ndfun: %s\n" % (format_six(evt), L, A, U, dfun) 105 | if A < L: 106 | return Pwfun([L, U], [dfun.f2], [dfun.df2]) 107 | elif U < A: 108 | return Pwfun([L, U], [dfun.f1], [dfun.df1]) 109 | else: 110 | return Pwfun([L, A, U], [ dfun.f1, dfun.f2 ], [ dfun.df1, dfun.df2 ]) 111 | 112 | else: 113 | fdfun = FinalDepartureFn (self.service, evt, L) 114 | max_x = numpy.inf 115 | return Pwfun ([L, max_x], [ fdfun.f ], [ fdfun.df ]) 116 | 117 | cdef diffListForArrival (self, Event evt, double a): 118 | cdef Event e_new = evt.dup_with_structure() 119 | e_new.a = a 120 | self.recompute_service (e_new) 121 | return [e_new] 122 | 123 | cdef diffListForDeparture (self, Event evt, double d): 124 | cdef double d_prev = evt.prev_byq.d if evt.prev_byq else 0.0 125 | cdef Event e_new = evt.dup_with_structure() 126 | e_new.d = d 127 | e_new.s = d - max(e_new.a, d_prev) 128 | lst = [e_new] 129 | 130 | cdef Event byq = e_new.next_byq 131 | if byq: 132 | byq = byq.dup_with_structure() 133 | byq.prev_byq = e_new 134 | e_new.next_byq = byq 135 | byq.s = byq.d - max(byq.a, d) 136 | lst.append (byq) 137 | 138 | return lst 139 | 140 | def select_job_for_service (self, evt_list): 141 | return evt_list[0] 142 | -------------------------------------------------------------------------------- /src/test_modelmgmt.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import qnetu 3 | import modelmgmt 4 | import sys 5 | 6 | import numpy 7 | 8 | class TestModelMgmt (unittest.TestCase): 9 | 10 | def test_split_state (self): 11 | net1c, converter = modelmgmt.split_state (self.net1, "S1", "COPY0", "COPY1", [ "Q1" ], ["Q2", "Q3"]) 12 | print net1c 13 | self.assertEquals (5, net1c.num_states()) 14 | self.assertEquals (6, net1c.num_queues()) 15 | 16 | print net1c 17 | print net1c.fsm.a 18 | print net1c.fsm.o 19 | 20 | nt = 1200 21 | arrv = net1c.sample (nt) 22 | self.assertEquals (arrv.num_events(), nt*3) 23 | 24 | nbyq,nbys = self.queue_state_counts (net1c, arrv) 25 | s_expected = [1200, 400, 600, 600, 800] 26 | q_expected = [1200, 400, 400, 400, 600, 600] 27 | self.assertArraysAlmostEquals (nbys, s_expected) 28 | self.assertArraysAlmostEquals (nbyq, q_expected) 29 | 30 | arrv_old = self.net1.sample(1200) 31 | nbyq,nbys = self.queue_state_counts (self.net1, arrv_old) 32 | s_expected = [1200, 1200, 600, 600] 33 | q_expected = [1200, 400, 400, 400, 600, 600] 34 | self.assertArraysAlmostEquals (nbys, s_expected) 35 | self.assertArraysAlmostEquals (nbyq, q_expected) 36 | 37 | arrv_new = converter(arrv_old) 38 | nbyq,nbys = self.queue_state_counts (net1c, arrv_new) 39 | s_expected = [1200, 1200, 600, 600] 40 | q_expected = [1200, 400, 400, 400, 600, 600] 41 | self.assertArraysAlmostEquals (nbys, s_expected) 42 | self.assertArraysAlmostEquals (nbyq, q_expected) 43 | 44 | def test_add_hidden_queue (self): 45 | nt = 100 46 | qtext = "{ name: HIDDEN, service: [M, 2.0], type: GG1R }" 47 | arrv = self.net1.sample (nt) 48 | net2,arrv2 = modelmgmt.add_hidden_queue (self.net1, arrv, "S1", "S1_HIDDEN", qtext) 49 | 50 | self.assertTrue (5, net2.num_states()) 51 | self.assertTrue (7, net2.num_queues()) 52 | arrv3 = net2.sample (1000) 53 | nbyq,nbys = self.queue_state_counts (net2, arrv3) 54 | s_expected = [1000, 1000, 500, 500, 1000] 55 | q_expected = [1000, 333, 333, 333, 500, 500, 1000] 56 | self.assertArraysAlmostEquals (nbys, s_expected) 57 | self.assertArraysAlmostEquals (nbyq, q_expected) 58 | 59 | # check arrivals object 60 | print arrv2 61 | self.assertEquals (4*nt, arrv2.num_events()) 62 | arrv2.validate() 63 | 64 | def queue_state_counts (self, net1c, arrv): 65 | nbyq = [0] * net1c.num_queues() 66 | nbys = [0] * net1c.num_states() 67 | for evt in arrv: 68 | nbyq[evt.qid] += 1 69 | nbys[evt.state] += 1 70 | return nbyq,nbys 71 | 72 | def assertArraysAlmostEquals (self, nbys, s_expected): 73 | for act,exp in zip(nbys, s_expected): 74 | self.assertTrue (abs(exp-act) <= 0.1*exp, "Missed! exp %d act %d" % (exp,act)) 75 | 76 | def test_for_model_search (self): 77 | net0 = self.net1 78 | arrv0 = net0.sample (15) 79 | 80 | sname = "S1" 81 | qs = net0.queues_of_state (1) 82 | q0 = qs[0] 83 | q1 = qs[1] 84 | 85 | qlist = map(lambda q: q.name, qs) 86 | qlist.remove(q0.name) 87 | qlist.remove(q1.name) 88 | 89 | sbad = "_%s_ABNORMAL" % sname 90 | sgood = "_%s_OK" % sname 91 | qtext = "{ name: HIDDEN, service: [M, 2.0], type: GG1R }" 92 | 93 | net1, converter = modelmgmt.split_state (net0, sname, sbad, sgood, [q0.name, q1.name], qlist) 94 | arrv1 = converter(arrv0) 95 | arrv1.validate() 96 | 97 | net2, arrv2 = modelmgmt.add_hidden_queue (net1, arrv1, sbad, "HIDDEN", qtext) 98 | 99 | print net0.as_yaml() 100 | print net1.as_yaml() 101 | print net2.as_yaml() 102 | print arrv1 103 | 104 | self.assertEquals (6, net2.num_states()) 105 | 106 | # check that states have been changed in ARRV1 107 | q3 = net1.queue_by_name ("Q3") 108 | si = net1.sid_by_name ("_S1_OK") 109 | evts3 = arrv1.events_of_queue (q3) 110 | for e in evts3: 111 | self.assertEquals (si, e.state) 112 | 113 | # the way I've set things up, if it goes through Q3, 114 | # it shouldn't go through hidden 115 | q3 = net2.queue_by_name ("Q3") 116 | qhid = net2.queue_by_name ("HIDDEN") 117 | evts3 = arrv2.events_of_queue (q3) 118 | for e in evts3: 119 | next = e.next_by_task() 120 | self.assertTrue (next.queue() is not qhid) 121 | 122 | def setUp (self): 123 | self.net1 = qnetu.qnet_from_text (self.net1_text) 124 | 125 | net1_text = """ 126 | states: 127 | - name: I0 128 | queues: [I0] 129 | successors: [S1] 130 | - name: S1 131 | queues: [Q1, Q2, Q3] 132 | successors: [S2,S3] 133 | - name: S2 134 | queues: [Q20] 135 | - name: S3 136 | queues: [Q30] 137 | queues: 138 | - { name: I0, service: [M, 1.0] } 139 | - { name: Q1, service: [M, 2.0], type: GG1R } 140 | - { name: Q2, service: [M, 2.0], type: GG1R } 141 | - { name: Q3, service: [M, 2.0], type: GG1R } 142 | - { name: Q20, service: [M, 2.0], type: GG1R } 143 | - { name: Q30, service: [M, 2.0], type: GG1R } 144 | """ 145 | 146 | def main(): 147 | if len(sys.argv) > 1: 148 | for test_name in sys.argv[1:]: 149 | suite = unittest.TestLoader().loadTestsFromName("test_modelmgmt.TestModelMgmt.%s" % (test_name,)) 150 | unittest.TextTestRunner(verbosity=2).run(suite) 151 | else: 152 | unittest.main() 153 | 154 | if __name__ == "__main__": 155 | main() 156 | 157 | -------------------------------------------------------------------------------- /src/test_qnetu.py: -------------------------------------------------------------------------------- 1 | import unittest, sys 2 | import qnet 3 | import qnetu 4 | import yaml 5 | import sampling 6 | 7 | oneq_text = """ 8 | states: 9 | - name: INITIAL 10 | queues: [ INITIAL ] 11 | successors: [ STATE1 ] 12 | initial: TRUE 13 | - name: STATE1 14 | queues: [ THE_Q ] 15 | queues: 16 | - { name: INITIAL, service: [M, 0.1 ] } 17 | - { name: THE_Q, service: M } 18 | """ 19 | 20 | non_fifo_arrv_text = """ 21 | !Arrivals 22 | events: 23 | - !Event { arrival: 0, departure: 0.75, queue: INITIAL, state: 0, task: 123 } 24 | - !Event { arrival: 0, departure: 1.25, queue: INITIAL, state: 0, task: 555 } 25 | - !Event { arrival: 0, departure: 1.4, queue: INITIAL, state: 0, task: 7 } 26 | - !Event { arrival: 0, departure: 2.0, queue: INITIAL, state: 0, task: 14 } 27 | - !Event { arrival: 0.75, departure: 1.05, queue: THE_Q, state: 1, task: 123 } 28 | - !Event { arrival: 1.25, departure: 1.60, queue: THE_Q, state: 1, task: 555 } 29 | - !Event { arrival: 1.4, departure: 1.55, queue: THE_Q, state: 1, task: 7 } 30 | - !Event { arrival: 2.0, departure: 2.3, queue: THE_Q, state: 1, task: 14 } 31 | """ 32 | 33 | multif_state = """ 34 | 0 0 INITIAL 0 1 35 | 0 1 THE_Q 1 -1 36 | 1 2 INITIAL 0 3 37 | 1 3 THE_Q 1 -1 38 | 2 4 INITIAL 0 5 39 | 2 5 THE_Q 1 -1 40 | """ 41 | 42 | multif_a = """ 43 | 0 0 44 | 1 10 45 | 2 0 46 | 3 20 47 | 4 0 48 | 5 15 49 | """ 50 | 51 | multif_d = """ 52 | 0 10 53 | 1 17 54 | 2 20 55 | 3 22 56 | 4 15 57 | 5 19 58 | """ 59 | 60 | 61 | twoq_text = """ 62 | states: 63 | - name: INITIAL 64 | queues: [ INITIAL ] 65 | successors: [ S1 ] 66 | initial: TRUE 67 | - name: S1 68 | queues: [ Q1 ] 69 | successors: [ S2 ] 70 | - name: S2 71 | queues: [ Q2 ] 72 | queues: 73 | - { name: INITIAL, service: [M, 0.1 ] } 74 | - { name: Q1, service: M } 75 | - { name: Q2, service: M } 76 | """ 77 | 78 | hidden_multif_state = """ 79 | 0 0 INITIAL 0 1 80 | 0 1 Q1 1 2 81 | 0 2 Q2 2 -1 82 | 1 3 INITIAL 0 4 83 | 1 4 Q1 1 5 84 | 1 5 Q2 2 -1 85 | 2 6 INITIAL 0 7 86 | 2 7 Q1 1 8 87 | 2 8 Q2 2 -1 88 | """ 89 | 90 | # EVT 4 --> 5 hidden 91 | hidden_multif_a = """ 92 | 0 0 93 | 1 10 94 | 2 15 95 | 3 0 96 | 4 15 97 | 6 0 98 | 7 30 99 | 8 32 100 | """ 101 | 102 | hidden_multif_d = """ 103 | 0 10 104 | 1 15 105 | 2 17 106 | 3 15 107 | 5 23 108 | 6 30 109 | 7 32 110 | 8 35 111 | """ 112 | 113 | import StringIO 114 | 115 | class TestQnetu (unittest.TestCase): 116 | 117 | def test_dump_arrv (self): 118 | sampling.set_seed (343) 119 | net = self.oneq 120 | arrv = net.sample (5) 121 | arrv.validate() 122 | 123 | yml = yaml.dump (arrv) 124 | print yml 125 | 126 | arrv2 = qnetu.load_arrivals (net, yml) 127 | arrv2.validate() 128 | 129 | print arrv 130 | print arrv2 131 | 132 | for evt in arrv2: 133 | evt_old = arrv.event (evt.eid) 134 | self.assertEquals (evt.a, evt_old.a) 135 | self.assertEquals (evt.d, evt_old.d) 136 | self.assertAlmostEqual (evt.s, evt_old.s) 137 | 138 | # def test_non_fifo (self): 139 | # print self.non_fifo_arrv 140 | # self.assertEquals (6, self.non_fifo_arrv.num_events ()) 141 | # self.non_fifo_arrv.validate() 142 | 143 | def test_multif (self): 144 | statef = StringIO.StringIO (multif_state) 145 | af = StringIO.StringIO (multif_a) 146 | df = StringIO.StringIO (multif_d) 147 | 148 | arrv = qnetu.read_multifile_arrv (self.oneq, statef, af, df) 149 | print arrv 150 | 151 | self.assertEquals (6, arrv.num_events()) 152 | self.assertEquals (22.0, arrv.event(3).d) 153 | self.assertEquals (2.0, arrv.event(3).s) 154 | arrv.validate() 155 | 156 | # Did this ever work? 157 | def fixme_test_hidden_multif (self): 158 | statef = StringIO.StringIO (hidden_multif_state) 159 | af = StringIO.StringIO (hidden_multif_a) 160 | df = StringIO.StringIO (hidden_multif_d) 161 | 162 | arrv = qnetu.read_multifile_arrv (self.twoq, statef, af, df) 163 | print arrv 164 | self.assertEquals (3, arrv.num_tasks()) 165 | self.assertEquals (9, arrv.num_events()) 166 | 167 | e4 = arrv.event (4) 168 | e5 = arrv.event (5) 169 | self.assertTrue (e4.next_by_task() == e5) 170 | self.assertTrue (e5.previous_by_task() == e4) 171 | 172 | for evt in arrv: 173 | prev_byt = evt.previous_by_task() 174 | print "::::::::::\n %s\n %s" % (prev_byt, evt) 175 | if prev_byt: 176 | assert prev_byt.next_by_task() == evt 177 | 178 | qnet.gibbs_initialize_via_minilp (self.twoq, arrv) 179 | arrv.validate() 180 | 181 | # shouldn't technically go here 182 | def test_as_yaml1 (self): 183 | text = self.twoq.as_yaml() 184 | expected = """states: 185 | - name: INITIAL 186 | queues: [ INITIAL ] 187 | successors: [ S1 ] 188 | - name: S1 189 | queues: [ Q1 ] 190 | successors: [ S2 ] 191 | - name: S2 192 | queues: [ Q2 ] 193 | queues: 194 | - { name: INITIAL, type: GGk, processors: 1, service: [ M, 0.1 ] } 195 | - { name: Q1, type: GGk, processors: 1, service: [ M, 1.0 ] } 196 | - { name: Q2, type: GGk, processors: 1, service: [ M, 1.0 ] } 197 | """ 198 | print text 199 | self.assertEquals (expected, text) 200 | 201 | 202 | def setUp (self): 203 | self.oneq = qnetu.qnet_from_text (oneq_text) 204 | self.twoq = qnetu.qnet_from_text (twoq_text) 205 | # self.non_fifo_arrv = qnetu.load_arrivals (self.oneq, non_fifo_arrv_text) 206 | 207 | 208 | if __name__ == "__main__": 209 | if len(sys.argv) > 1: 210 | test_name = sys.argv[1] 211 | suite = unittest.TestLoader().loadTestsFromName("test_qnetu.TestQnetu.%s" % (test_name,)) 212 | unittest.TextTestRunner(verbosity=2).run(suite) 213 | else: 214 | unittest.main() 215 | -------------------------------------------------------------------------------- /src/randomkit.h: -------------------------------------------------------------------------------- 1 | /* Random kit 1.3 */ 2 | 3 | /* 4 | * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) 5 | * 6 | * Permission is hereby granted, free of charge, to any person obtaining a 7 | * copy of this software and associated documentation files (the 8 | * "Software"), to deal in the Software without restriction, including 9 | * without limitation the rights to use, copy, modify, merge, publish, 10 | * distribute, sublicense, and/or sell copies of the Software, and to 11 | * permit persons to whom the Software is furnished to do so, subject to 12 | * the following conditions: 13 | * 14 | * The above copyright notice and this permission notice shall be included 15 | * in all copies or substantial portions of the Software. 16 | * 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 21 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 | */ 25 | 26 | /* @(#) $Jeannot: randomkit.h,v 1.24 2005/07/21 22:14:09 js Exp $ */ 27 | 28 | /* 29 | * Typical use: 30 | * 31 | * { 32 | * rk_state state; 33 | * unsigned long seed = 1, random_value; 34 | * 35 | * rk_seed(seed, &state); // Initialize the RNG 36 | * ... 37 | * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] 38 | * } 39 | * 40 | * Instead of rk_seed, you can use rk_randomseed which will get a random seed 41 | * from /dev/urandom (or the clock, if /dev/urandom is unavailable): 42 | * 43 | * { 44 | * rk_state state; 45 | * unsigned long random_value; 46 | * 47 | * rk_randomseed(&state); // Initialize the RNG with a random seed 48 | * ... 49 | * random_value = rk_random(&state); // Generate random values in [0..RK_MAX] 50 | * } 51 | */ 52 | 53 | /* 54 | * Useful macro: 55 | * RK_DEV_RANDOM: the device used for random seeding. 56 | * defaults to "/dev/urandom" 57 | */ 58 | 59 | #include 60 | 61 | #ifndef _RANDOMKIT_ 62 | #define _RANDOMKIT_ 63 | 64 | #define RK_STATE_LEN 624 65 | 66 | typedef struct rk_state_ 67 | { 68 | unsigned long key[RK_STATE_LEN]; 69 | int pos; 70 | int has_gauss; /* !=0: gauss contains a gaussian deviate */ 71 | double gauss; 72 | 73 | /* The rk_state structure has been extended to store the following 74 | * information for the binomial generator. If the input values of n or p 75 | * are different than nsave and psave, then the other parameters will be 76 | * recomputed. RTK 2005-09-02 */ 77 | 78 | int has_binomial; /* !=0: following parameters initialized for 79 | binomial */ 80 | double psave; 81 | long nsave; 82 | double r; 83 | double q; 84 | double fm; 85 | long m; 86 | double p1; 87 | double xm; 88 | double xl; 89 | double xr; 90 | double c; 91 | double laml; 92 | double lamr; 93 | double p2; 94 | double p3; 95 | double p4; 96 | 97 | } 98 | rk_state; 99 | 100 | typedef enum { 101 | RK_NOERR = 0, /* no error */ 102 | RK_ENODEV = 1, /* no RK_DEV_RANDOM device */ 103 | RK_ERR_MAX = 2 104 | } rk_error; 105 | 106 | /* error strings */ 107 | extern char *rk_strerror[RK_ERR_MAX]; 108 | 109 | /* Maximum generated random value */ 110 | #define RK_MAX 0xFFFFFFFFUL 111 | 112 | #ifdef __cplusplus 113 | extern "C" { 114 | #endif 115 | 116 | /* 117 | * Initialize the RNG state using the given seed. 118 | */ 119 | extern void rk_seed(unsigned long seed, rk_state *state); 120 | 121 | /* 122 | * Initialize the RNG state using a random seed. 123 | * Uses /dev/random or, when unavailable, the clock (see randomkit.c). 124 | * Returns RK_NOERR when no errors occurs. 125 | * Returns RK_ENODEV when the use of RK_DEV_RANDOM failed (for example because 126 | * there is no such device). In this case, the RNG was initialized using the 127 | * clock. 128 | */ 129 | extern rk_error rk_randomseed(rk_state *state); 130 | 131 | /* 132 | * Returns a random unsigned long between 0 and RK_MAX inclusive 133 | */ 134 | extern unsigned long rk_random(rk_state *state); 135 | 136 | /* 137 | * Returns a random long between 0 and LONG_MAX inclusive 138 | */ 139 | extern long rk_long(rk_state *state); 140 | 141 | /* 142 | * Returns a random unsigned long between 0 and ULONG_MAX inclusive 143 | */ 144 | extern unsigned long rk_ulong(rk_state *state); 145 | 146 | /* 147 | * Returns a random unsigned long between 0 and max inclusive. 148 | */ 149 | extern unsigned long rk_interval(unsigned long max, rk_state *state); 150 | 151 | /* 152 | * Returns a random double between 0.0 and 1.0, 1.0 excluded. 153 | */ 154 | extern double rk_double(rk_state *state); 155 | 156 | /* 157 | * fill the buffer with size random bytes 158 | */ 159 | extern void rk_fill(void *buffer, size_t size, rk_state *state); 160 | 161 | /* 162 | * fill the buffer with randombytes from the random device 163 | * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is 164 | * On Unix, if strong is defined, RK_DEV_RANDOM is used. If not, RK_DEV_URANDOM 165 | * is used instead. This parameter has no effect on Windows. 166 | * Warning: on most unixes RK_DEV_RANDOM will wait for enough entropy to answer 167 | * which can take a very long time on quiet systems. 168 | */ 169 | extern rk_error rk_devfill(void *buffer, size_t size, int strong); 170 | 171 | /* 172 | * fill the buffer using rk_devfill if the random device is available and using 173 | * rk_fill if is is not 174 | * parameters have the same meaning as rk_fill and rk_devfill 175 | * Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is 176 | */ 177 | extern rk_error rk_altfill(void *buffer, size_t size, int strong, 178 | rk_state *state); 179 | 180 | /* 181 | * return a random gaussian deviate with variance unity and zero mean. 182 | */ 183 | extern double rk_gauss(rk_state *state); 184 | 185 | #ifdef __cplusplus 186 | } 187 | #endif 188 | 189 | #endif /* _RANDOMKIT_ */ 190 | -------------------------------------------------------------------------------- /src/test_qstats.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import qnet 3 | import qnetu 4 | import numpy 5 | import numpy.random 6 | import mytime 7 | import netutils 8 | import estimation 9 | import yaml 10 | import arrivals 11 | 12 | import qstats 13 | 14 | import sys 15 | 16 | class TestQstats (unittest.TestCase): 17 | 18 | def test_utilization1 (self): 19 | u = qstats.utilization (self.arrv_mm1) 20 | expected = [0.61937, 0.10858, 0.32649, 0.72082] 21 | self.check_lists (u, expected) 22 | 23 | def test_mean_wait (self): 24 | u = qstats.mean_wait (self.arrv_mm1) 25 | expected = [8.020012, 0.0, 2.16130, 2.58173] 26 | self.check_lists (u, expected) 27 | 28 | def test_mean_response (self): 29 | u = qstats.mean_response_time (self.arrv_mm1) 30 | expected = [12.185512, 3.65106, 4.90602, 7.4294440] 31 | self.check_lists (u, expected) 32 | 33 | def test_mean_wait_pct (self): 34 | u = qstats.mean_wait_percentage (self.arrv_mm1) 35 | expected = [ 0.57401, 0.0, 0.352484, 0.3654092 ] 36 | self.check_lists (u, expected) 37 | 38 | def test_mean_qstats_arriving (self): 39 | u = qstats.mean_qsize_arriving (self.arrv_mm1) 40 | expected = [ 2.0, 0, 0.75, 0.8 ] 41 | self.check_lists (u, expected) 42 | 43 | def test_mean_task_time (self): 44 | u = qstats.mean_task_time (self.arrv_mm1) 45 | self.assertAlmostEquals (12.084472, u, 5) 46 | 47 | def test_mean_task_time_in_range (self): 48 | u = qstats.mean_task_time_in_range (self.arrv_mm1, 5, 10.5) 49 | self.assertAlmostEquals (11.4137, u, 3) 50 | u = qstats.mean_task_time_in_range (self.arrv_mm1, 11, 25) 51 | self.assertAlmostEquals (13.09059, u, 3) 52 | 53 | def test_num_tasks_in_range (self): 54 | print self.arrv_mm1 55 | self.assertEquals (3, qstats.num_tasks_in_arrival_range (self.arrv_mm1, 5, 10.5)) 56 | self.assertEquals (2, qstats.num_tasks_in_arrival_range (self.arrv_mm1, 11, 25)) 57 | 58 | def test_mean_obs_response (self): 59 | u = qstats.mean_obs_response (self.arrv_mm1) 60 | print u 61 | print self.arrv_mm1 62 | expected = [12.185512, 3.65106, 5.198517, 8.43051] 63 | self.check_lists (u, expected) 64 | 65 | def test_mean_obs_service (self): 66 | u = qstats.mean_obs_service (self.arrv_mm1) 67 | expected = [4.165502, 3.65106, 3.172583, 5.775833] 68 | self.check_lists (u, expected) 69 | 70 | def test_write_arrv (self): 71 | qstats.write_arrv (sys.stdout, self.arrv_mm1) 72 | 73 | 74 | 75 | def check_lists (self, u, expected): 76 | self.assertEquals (len(u), len(expected)) 77 | for a,b in zip(u,expected): 78 | self.assertAlmostEquals (a, b, 4) 79 | 80 | mm1_text = """ 81 | states: 82 | - name: INITIAL 83 | queues: [ INITIAL ] 84 | successors: [ TIER1 ] 85 | initial: TRUE 86 | - name: TIER1 87 | queues: [ WEB1, WEB2 ] 88 | successors: [ TIER2 ] 89 | - name: TIER2 90 | queues: [ APP1 ] 91 | queues: 92 | - { name: INITIAL, service: [M, 10.0] } 93 | - { name: WEB1, service: [M, 3.0] } 94 | - { name: WEB2, service: [M, 3.0] } 95 | - { name: APP1, service: [M, 8.0] } 96 | 97 | """ 98 | 99 | mm3_text = """ 100 | states: 101 | - name: INITIAL 102 | queues: [ INITIAL ] 103 | successors: [ TIER1 ] 104 | initial: TRUE 105 | - name: TIER1 106 | queues: [ WEB1 ] 107 | successors: [ TIER2 ] 108 | - name: TIER2 109 | queues: [ APP1 ] 110 | queues: 111 | - { name: INITIAL, service: [M, 10.0] } 112 | - { name: WEB1, processors: 3, service: [M, 30.0] } 113 | - { name: APP1, processors: 4, service: [M, 30.0] } 114 | """ 115 | 116 | mm1_arrvls = """ 117 | !Arrivals 118 | events: 119 | - !Event {arrival: 0.0, departure: 5.0573782176061455, obs: 1, queue: INITIAL, state: 0, 120 | task: 0} 121 | - !Event {arrival: 0.0, departure: 9.5433894717630476, obs: 1, queue: INITIAL, state: 0, 122 | task: 1} 123 | - !Event {arrival: 0.0, departure: 10.100174658936853, obs: 1, queue: INITIAL, state: 0, 124 | task: 2} 125 | - !Event {arrival: 0.0, departure: 15.399120173836607, obs: 1, queue: INITIAL, state: 0, 126 | task: 3} 127 | - !Event {arrival: 0.0, departure: 20.827496579245874, obs: 1, queue: INITIAL, state: 0, 128 | task: 4} 129 | - !Event {arrival: 5.0573782176061455, departure: 6.1519936034830538, obs: 1, queue: WEB2, 130 | state: 1, task: 0} 131 | - !Event {arrival: 6.1519936034830538, departure: 12.941934313997622, obs: 1, queue: APP1, 132 | state: 2, task: 0} 133 | - !Event {arrival: 9.5433894717630476, departure: 16.177979244871636, obs: 1, queue: WEB2, 134 | state: 1, task: 1} 135 | - !Event {arrival: 10.100174658936853, departure: 17.966515520657275, obs: 1, queue: WEB2, 136 | state: 1, task: 2} 137 | - !Event {arrival: 15.399120173836607, departure: 19.427648828212721, obs: 0, queue: WEB2, 138 | state: 1, task: 3} 139 | - !Event {arrival: 16.177979244871636, departure: 22.911136513937258, obs: 0, queue: APP1, 140 | state: 2, task: 1} 141 | - !Event {arrival: 17.966515520657275, departure: 23.089052981871603, obs: 0, queue: APP1, 142 | state: 2, task: 2} 143 | - !Event {arrival: 19.427648828212721, departure: 28.781177557554468, obs: 1, queue: APP1, 144 | state: 2, task: 3} 145 | - !Event {arrival: 20.827496579245874, departure: 24.478559020698714, obs: 1, queue: WEB1, 146 | state: 1, task: 4} 147 | - !Event {arrival: 24.478559020698714, departure: 33.626615314865091, obs: 1, queue: APP1, 148 | state: 2, task: 4} 149 | 150 | """ 151 | def setUp (self): 152 | self.mm1 = qnetu.qnet_from_text (TestQstats.mm1_text) 153 | self.mm3 = qnetu.qnet_from_text (TestQstats.mm3_text) 154 | self.arrv_mm1 = qnetu.load_arrivals (self.mm1, TestQstats.mm1_arrvls) 155 | 156 | def main(): 157 | if len(sys.argv) > 1: 158 | for test_name in sys.argv[1:]: 159 | suite = unittest.TestLoader().loadTestsFromName("test_qstats.TestQstats.%s" % (test_name,)) 160 | unittest.TextTestRunner(verbosity=2).run(suite) 161 | else: 162 | unittest.main() 163 | 164 | if __name__ == "__main__": 165 | main() 166 | 167 | -------------------------------------------------------------------------------- /src/test_cninq.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import cninq 3 | import sys 4 | import numpy 5 | 6 | class TestCninq (unittest.TestCase): 7 | 8 | def test_sorted_double (self): 9 | lst = cninq.SortedDouble (100) 10 | lst.add_time (30) 11 | lst.add_time (10) 12 | lst.add_time (12.5) 13 | lst.add_time (32.3) 14 | lst.add_time (3.0) 15 | lst.add_time (12.5) 16 | self.assertEquals (3.0, lst.item (0)) 17 | self.assertEquals (32.3, lst.item (5)) 18 | 19 | def test_bisect1 (self): 20 | lst = cninq.SortedDouble (100) 21 | lst.add_time (1) 22 | lst.add_time (3) 23 | lst.add_time (5) 24 | self.assertEquals (1.0, lst.item (0)) 25 | self.assertEquals (3.0, lst.item (1)) 26 | self.assertEquals (5.0, lst.item (2)) 27 | self.assertEquals (0, lst.num_lte (0.5)) 28 | self.assertEquals (1, lst.num_lte (1.0)) 29 | self.assertEquals (1, lst.num_lte (1.5)) 30 | self.assertEquals (3, lst.num_lte (7.0)) 31 | 32 | def test_move_time (self): 33 | lst = cninq.SortedDouble (100) 34 | lst.add_time (1) 35 | lst.add_time (27) 36 | lst.add_time (3) 37 | lst.add_time (5) 38 | self.check_lst (lst, [1,3,5,27]) 39 | lst.move_time (5,2) 40 | self.check_lst (lst, [1,2,3,27]) 41 | lst.move_time (27, 0.5) 42 | self.check_lst (lst, [0.5,1,2,3]) 43 | lst.move_time (0.5,7.2) 44 | self.check_lst (lst, [1,2,3,7.2]) 45 | lst.move_time (2,4) 46 | self.check_lst (lst, [1,3,4,7.2]) 47 | 48 | def test_add (self): 49 | ninq = cninq.Ninq(100) 50 | ninq.add_birth_death (1.0, 2.0) 51 | ninq.add_birth_death (1.75, 2.25) 52 | ninq.add_birth_death (1.8, 1.81) 53 | self.assertEquals (ninq.knots(), 54 | zip ([1.0, 1.75, 1.8, 1.81, 2.0, 2.25], 55 | [1, 2, 3, 2, 1, 0])) 56 | ninq.add_birth_death (0.5, 2.5) 57 | self.assertEquals (ninq.knots(), 58 | zip ([0.5, 1.0, 1.75, 1.8, 1.81, 2.0, 2.25, 2.5], 59 | [1, 2, 3, 4, 3, 2, 1, 0])) 60 | ninq.add_birth_death (0.25, 2.1) 61 | self.assertEquals (ninq.knots(), 62 | zip ([0.25, 0.5, 1.0, 1.75, 1.8, 1.81, 2.0, 2.1, 2.25, 2.5], 63 | [1, 2, 3, 4, 5, 4, 3, 2, 1, 0])) 64 | 65 | def test_add2 (self): 66 | ninq = cninq.Ninq(100) 67 | ninq.add_birth_death (1.0, 2.0) 68 | ninq.add_birth_death (1.75, 2.25) 69 | ninq.add_birth_death (2.1, 2.15) 70 | self.assertEquals (ninq.knots(), 71 | zip([1.0, 1.75, 2.0, 2.1, 2.15, 2.25], 72 | [ 1, 2, 1, 2, 1, 0])) 73 | 74 | def test_N (self): 75 | self.assertEquals (0, self.ninq.N(0.1)) 76 | self.assertEquals (2, self.ninq.N(0.75)) 77 | self.assertEquals (2, self.ninq.N(2.1)) 78 | self.assertEquals (0, self.ninq.N(3)) 79 | 80 | 81 | 82 | def test_overlay1 (self): 83 | overlay = cninq.Overlay(self.ninq) 84 | overlay.move_arrival (0.25, 0.1) 85 | overlay.move_arrival (0.5, 0.7) 86 | overlay.move_departure (1.81, 3.1) 87 | self.assertEquals (0, self.ninq.N(0.1)) 88 | self.assertEquals (2, self.ninq.N(0.75)) 89 | self.assertEquals (2, self.ninq.N(2.1)) 90 | self.assertEquals (0, self.ninq.N(3)) 91 | self.assertEquals (0, overlay.N(0.01)) 92 | self.assertEquals (1, overlay.N(0.1)) 93 | self.assertEquals (1, overlay.N(0.25)) 94 | self.assertEquals (1, overlay.N(0.5)) 95 | self.assertEquals (2, overlay.N(0.7)) 96 | self.assertEquals (2, overlay.N(0.75)) 97 | self.assertEquals (5, overlay.N(1.81)) 98 | self.assertEquals (1, overlay.N(3.0)) 99 | 100 | def test_iterator (self): 101 | iterator = self.ninq.interval_iterator(0,3) 102 | expected = [ (0,0.25,0), (0.25, 0.5, 1), (0.5, 1., 2), (1., 1.75, 3), (1.75, 1.8, 4), (1.80, 1.81, 5), (1.81, 2, 4), (2., 2.1, 3), (2.1, 2.25, 2), (2.25, 2.5, 1), (2.5, 3., 0) ] 103 | kts = self.iterator2knots (iterator) 104 | for t1,t2 in zip (expected, kts): 105 | self.assertEquals (t1,t2) 106 | 107 | def test_overlay_iterator (self): 108 | overlay = cninq.Overlay (self.ninq) 109 | overlay.move_arrival (0.25, 0.33) 110 | overlay.move_departure (2.5, 2.75) 111 | iterator = overlay.interval_iterator(0,3) 112 | 113 | expected = [ (0,0.33,0), (0.33, 0.5, 1), (0.5, 1., 2), (1., 1.75, 3), (1.75, 1.8, 4), (1.80, 1.81, 5), (1.81, 2, 4), (2., 2.1, 3), (2.1, 2.25, 2), (2.25, 2.75, 1), (2.75, 3., 0) ] 114 | 115 | kts = self.iterator2knots (iterator) 116 | self.assertEquals (0, kts[0][0]) 117 | self.assertEquals (3, kts[-1][1]) 118 | 119 | N_expected = numpy.sum ( (b-a)*N for a,b,N in expected ) 120 | N_actual = numpy.sum ( (b-a)*N for a,b,N in kts ) 121 | self.assertEquals (N_expected, N_actual) 122 | 123 | def test_overlay_iterator2 (self): 124 | ninq = cninq.Ninq(100) 125 | ninq.add_birth_death (50.0, 55.0) 126 | ninq.add_birth_death (52.0, 53.0) 127 | overlay = cninq.Overlay (ninq) 128 | overlay.move_departure(55.0, 50.0) 129 | iterator = overlay.interval_iterator (50.0, 55.0) 130 | expected = [(50.0, 52.0, 0), (52.0, 53.0, 1), (53.0, 55.0, 0)] 131 | self.assertEquals (expected, self.iterator2knots (iterator)) 132 | 133 | def iterator2knots (self, i): 134 | ret = [] 135 | while i.has_next(): 136 | ret.append ( (i.T0(), i.T1(), i.N()) ) 137 | i.advance() 138 | return ret 139 | 140 | def test_move_arrival2 (self): 141 | ninq = cninq.Ninq(10) 142 | ninq.add_birth_death (1.0, 2.0) 143 | ninq.add_birth_death (1.5, 3.5) 144 | print ninq 145 | ninq.move_arrival (1.5, 2.5) 146 | print ninq 147 | self.assertEquals (1, ninq.N(1.0)) 148 | self.assertEquals (1, ninq.N(1.5)) 149 | self.assertEquals (0, ninq.N(2.25)) 150 | self.assertEquals (1, ninq.N(3.0)) 151 | 152 | def setUp (self): 153 | self.ninq = cninq.Ninq(100) 154 | self.ninq.add_birth_death (1.0, 2.0) 155 | self.ninq.add_birth_death (1.75, 2.25) 156 | self.ninq.add_birth_death (1.8, 1.81) 157 | self.ninq.add_birth_death (0.5, 2.5) 158 | self.ninq.add_birth_death (0.25, 2.1) 159 | 160 | def check_lst (self, lst, expected): 161 | self.assertEquals (len(expected), len(lst)) 162 | for i in range(len(expected)): 163 | self.assertEquals (expected[i], lst.item(i)) 164 | 165 | def main(): 166 | if len(sys.argv) > 1: 167 | for test_name in sys.argv[1:]: 168 | suite = unittest.TestLoader().loadTestsFromName("test_cninq.TestCninq.%s" % (test_name,)) 169 | unittest.TextTestRunner(verbosity=2).run(suite) 170 | else: 171 | unittest.main() 172 | 173 | if __name__ == "__main__": 174 | main() 175 | 176 | -------------------------------------------------------------------------------- /src/test_sem1.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import qnet 3 | import qnetu 4 | import queues 5 | import numpy 6 | import numpy.random 7 | import mytime 8 | import netutils 9 | import estimation 10 | import yaml 11 | import arrivals 12 | import qstats 13 | import math 14 | 15 | import test_qnet 16 | 17 | import sys 18 | import sampling 19 | from math import sqrt 20 | 21 | # Test class for stochastic EM on singe-processor queues 22 | # Main tests compare the M-H sampler to a full slice sampler 23 | 24 | class TestSem1 (unittest.TestCase): 25 | 26 | def test_mm1_sem (self): 27 | net = self.mm1 28 | nt = 1000 29 | pct = 0.05 30 | sampling.set_seed (324121) 31 | mu_orig = net.parameters[:] 32 | 33 | queues.set_proposal (0) 34 | estimation.set_sampler ("SLICE") 35 | 36 | arrv = net.sample (nt) 37 | subset = arrv.subset_by_task (pct) 38 | initial = net.gibbs_initialize (subset) 39 | 40 | print "GOLD SERVICE (ALL) ", qstats.mean_service (arrv) 41 | print "GOLD SERVICE (OBS) ", qstats.mean_obs_service (arrv) 42 | 43 | mus, arrvl = estimation.sem (net, initial, 0, 1000, report_fn=estimation.mean_service_fn) 44 | mu_avg = map(numpy.mean, zip(*mus)) 45 | 46 | for mu_star, mu_hat in zip(mu_orig, mu_avg): 47 | print "%.5f %.5f" % (mu_star, mu_hat) 48 | 49 | print "SERVICE_OBS", qstats.mean_obs_service (subset) 50 | 51 | for mu_star, mu_hat in zip(mu_orig, mu_avg): 52 | self.assertTrue (abs (mu_star - mu_hat) < 0.1) 53 | 54 | 55 | # Tests the slice-only sampler in the same circumstance 56 | def test_mm1_slice (self): 57 | net = self.mm1 58 | nt = 1000 59 | pct = 0.02 60 | sampling.set_seed (324121) 61 | mu_orig = net.parameters[:] 62 | 63 | queues.set_proposal (0) 64 | 65 | arrv = net.sample (nt) 66 | subset = arrv.subset_by_task (pct) 67 | initial = net.gibbs_initialize (subset) 68 | 69 | resampled = estimation.slice_resample (net, initial, 100, report_fn=estimation.mean_service_fn) 70 | mu_avg = qstats.mean_service(resampled[-1]) 71 | 72 | for mu_star, mu_hat in zip(mu_orig, mu_avg): 73 | print "%.5f %.5f" % (mu_star, mu_hat) 74 | 75 | print "SERVICE_OBS", qstats.mean_obs_service (subset) 76 | 77 | for mu_star, mu_hat in zip(mu_orig, mu_avg): 78 | self.assertTrue (abs (mu_star - mu_hat) < 0.1) 79 | 80 | def test_mm1_slice_stationary (self): 81 | net = self.mm1 82 | nt = 100 83 | nrep = 100 84 | niter = 1 85 | sampling.set_seed (324121) 86 | self.do_test_stationary (net, nt, nrep, niter) 87 | 88 | def test_gamma1_slice_stationary (self): 89 | net = self.gamma1 90 | nt = 100 91 | nrep = 100 92 | niter = 1 93 | sampling.set_seed (324121) 94 | self.do_test_stationary (net, nt, nrep, niter) 95 | 96 | def test_ln1_slice_stationary (self): 97 | net = self.ln1 98 | nt = 100 99 | nrep = 100 100 | niter = 1 101 | sampling.set_seed (324121) 102 | self.do_test_stationary (net, nt, nrep, niter) 103 | 104 | def do_test_stationary (self, net, nt, nrep, niter): 105 | 106 | mu_tot = numpy.zeros(len (net.parameters)) 107 | mu_orig = net.parameters[:] 108 | 109 | for ri in range(nrep): 110 | arrv = net.sample (nt) 111 | subset = arrv.subset_by_task (0.0, adapt_fn=test_qnet.copy_evt) 112 | 113 | resampled = net.slice_resample (subset, niter) 114 | net.estimate (resampled) 115 | 116 | # resampled = estimation.slice_resample (net, subset, niter, report_fn=estimation.gen_graphing_report_fn(1.0, "stat%d_" % ri)) 117 | 118 | mu_avg = net.parameters[:] 119 | print "SUBSET\n", subset 120 | print "RESAMPLED\n", resampled[-1] 121 | print "THETA ", mu_avg 122 | mu_tot += numpy.array(mu_avg) 123 | 124 | net.parameters = mu_orig[:] 125 | 126 | mu_tot = mu_tot / nrep 127 | 128 | for mu_star, mu_hat in zip(net.parameters, mu_tot): 129 | print "%.5f %.5f" % (mu_star, mu_hat) 130 | 131 | for mu_star, mu_hat in zip(net.parameters, mu_tot): 132 | self.assertTrue (abs (mu_star - mu_hat) < 0.1) 133 | 134 | 135 | def setUp (self): 136 | self.mm1 = qnetu.qnet_from_text (mm1_text) 137 | self.ln1 = qnetu.qnet_from_text (ln1_text) 138 | self.gamma1 = qnetu.qnet_from_text (gamma1_text) 139 | self.oneq = qnetu.qnet_from_text (oneq_text) 140 | 141 | mm1_text = """ 142 | states: 143 | - name: INITIAL 144 | queues: [INITIAL] 145 | successors: [TIER1] 146 | initial: TRUE 147 | - name: TIER1 148 | queues: [ TIER1_0 , TIER1_1 ] 149 | successors: [TIER2] 150 | - name: TIER2 151 | queues: [ TIER2_0 , TIER2_1 ] 152 | successors: [TIER3] 153 | - name: TIER3 154 | queues: [ TIER3_0 , TIER3_1 ] 155 | queues: 156 | - { name: INITIAL, service: [M, 1.0 ] } 157 | - { name: TIER1_0, service: [M, 1.3333 ] } 158 | - { name: TIER1_1, service: [M, 1.3333 ] } 159 | - { name: TIER2_0, service: [M, 1.3333 ] } 160 | - { name: TIER2_1, service: [M, 1.3333 ] } 161 | - { name: TIER3_0, service: [M, 1.3333 ] } 162 | - { name: TIER3_1, service: [M, 1.3333 ] } 163 | """ 164 | 165 | oneq_text = """ 166 | states: 167 | - name: INITIAL 168 | queues: [INITIAL] 169 | successors: [TIER1] 170 | initial: TRUE 171 | - name: TIER1 172 | queues: [ TIER1_0 ] 173 | queues: 174 | - { name: INITIAL, service: [M, 1.5 ] } 175 | - { name: TIER1_0, service: [M, 1.0 ] } 176 | """ 177 | 178 | gamma1_text = """ 179 | states: 180 | - name: INITIAL 181 | queues: [INITIAL] 182 | initial: TRUE 183 | queues: 184 | - { name: INITIAL, service: [G, 2, 1.5 ] } 185 | """ 186 | 187 | ln1_text = """ 188 | states: 189 | - name: INITIAL 190 | queues: [INITIAL] 191 | successors: [TIER1] 192 | initial: TRUE 193 | - name: TIER1 194 | queues: [ TIER1_0 , TIER1_1 ] 195 | successors: [TIER2] 196 | - name: TIER2 197 | queues: [ TIER2_0 , TIER2_1 ] 198 | successors: [TIER3] 199 | - name: TIER3 200 | queues: [ TIER3_0 , TIER3_1 ] 201 | queues: 202 | - { name: INITIAL, service: [G, 1.33333, 1.5] } 203 | - { name: TIER1_0, service: [LN, 0.9, 0.5 ] } 204 | - { name: TIER1_1, service: [LN, 0.9, 0.5 ] } 205 | - { name: TIER2_0, service: [LN, 0.9, 0.5 ] } 206 | - { name: TIER2_1, service: [LN, 0.9, 0.5 ] } 207 | - { name: TIER3_0, service: [LN, 0.9, 0.5 ] } 208 | - { name: TIER3_1, service: [LN, 0.9, 0.5 ] } 209 | """ 210 | 211 | def main(): 212 | if len(sys.argv) > 1: 213 | test_name = sys.argv[1] 214 | suite = unittest.TestLoader().loadTestsFromName("test_sem1.TestSem1.%s" % (test_name,)) 215 | unittest.TextTestRunner(verbosity=2).run(suite) 216 | else: 217 | unittest.main() 218 | 219 | if __name__ == "__main__": 220 | main() 221 | 222 | -------------------------------------------------------------------------------- /src/netutils.py: -------------------------------------------------------------------------------- 1 | from math import exp, sqrt 2 | from numpy import random 3 | from scipy import special 4 | import numpy 5 | import sampling 6 | import pwfun 7 | 8 | # This file is really annoying. Various function that aren't in Cython b/c 9 | # they're easier to do with closures 10 | 11 | def expify (fn, C=0): return lambda x: exp(fn(x)+C) 12 | 13 | def all_true (*args): return True 14 | 15 | def not_in_set2 (inc_set): 16 | return lambda a,e: e.tid not in inc_set 17 | 18 | # uses Miller(1980) conjugate prior, with p=1.0, q=r=s=0.1 19 | def sample_gamma_posterior (data, shape0, scale0): 20 | S = numpy.sum(data) 21 | Slog = numpy.sum(map(numpy.log, data)) 22 | N = len(data) 23 | 24 | logP = 1.0+Slog 25 | q = 1.0 + S 26 | r = 1.0 + N 27 | s = 1.0 + N 28 | # lnf_shape = lambda al: (al-1)*logP + special.gammaln(s*al+1) - (1-al*s)*numpy.log(q) -r*special.gammaln(al) 29 | 30 | lnf_shape = lambda al: (al-1)*logP - q/scale0 - r*special.gammaln(al) - (al*s)*numpy.log(scale0) 31 | shape_new = sampling.slice (lnf_shape, shape0, thin=5, N=1, lower=0, upper=numpy.inf)[0] 32 | # print "Sfoo", shape0, lnf_shape(shape0) 33 | # print "....", shape_new, lnf_shape(shape_new) 34 | 35 | # lnf_rate = lambda beta: -beta*q + s*shape_new*numpy.log(beta) 36 | # rate_new = sampling.slice (lnf_rate, 1.0/scale0, thin=5, N=1, lower=0, upper=numpy.inf)[0] 37 | lnf_scale = lambda beta: (shape_new-1)*logP - q/beta - r*special.gammaln(shape_new) - (shape_new*s)*numpy.log(beta) 38 | scale_new = sampling.slice (lnf_scale, scale0, thin=5, N=1, lower=0, upper=numpy.inf)[0] 39 | # print "Rfoo", 1.0/scale0, lnf_scale(scale0) 40 | # print "...", scale_new, lnf_scale(scale_new) 41 | 42 | return [shape_new, scale_new] 43 | 44 | def gamma_transition_kernel (data, shape0, scale0, shape1, scale1): 45 | S = numpy.sum(data) 46 | Slog = numpy.sum(map(numpy.log, data)) 47 | N = len(data) 48 | 49 | logP = 1.0+Slog 50 | q = 1.0 + S 51 | r = 1.0 + N 52 | s = 1.0 + N 53 | 54 | lnf_shape = lambda al: (al-1)*logP - q/scale0 - r*special.gammaln(al) - (al*s)*numpy.log(scale0) 55 | p_shape = lnf_shape(shape1) 56 | 57 | lnf_scale = lambda beta: (shape1-1)*logP - q/beta - r*special.gammaln(shape1) - (shape1*s)*numpy.log(beta) 58 | p_scale = lnf_scale (scale1) 59 | 60 | return p_shape + p_scale 61 | 62 | 63 | # proposal 64 | 65 | def uniform_proposal (L, U): 66 | if U < numpy.inf: 67 | return pwfun.Pwfun ([L, U], [lambda x: 1.0], [lambda x: 0]) 68 | else: 69 | return pwfun.Pwfun ([L, U], [lambda x: -x], [lambda x: -1]) 70 | 71 | # bar proposal 72 | 73 | NBARS=10 74 | def bar_pair_proposal (arrv, e0, e1): 75 | dfn = e0.queue().pyDepartureLik (arrv, e0) 76 | afn = e1.queue().pyArrivalLik (arrv, e1) 77 | 78 | L = e0.a 79 | U = e1.d 80 | eps = (U-L)/NBARS 81 | x = L 82 | 83 | knots = [ L + i*eps for i in xrange(NBARS) ] 84 | vals = [ dfn(x) + afn(x) for x in knots ] 85 | fns = [ lambda d: v for v in vals ] 86 | derivs = [lambda d: 0] * NBARS 87 | knots.append(U) 88 | 89 | return pwfun.Pwfun (knots, fns, derivs) 90 | 91 | def bar_final_proposal (arrv, e0): 92 | return uniform_proposal (e0.a, numpy.inf) 93 | 94 | # trapezoid proposal 95 | def zoid_pair_proposal (arrv, e0, e1): 96 | dfn = e0.queue().pyDepartureLik (arrv, e0) 97 | afn = e1.queue().pyArrivalLik (arrv, e1) 98 | 99 | # print "ZOID" 100 | # print e0 101 | # print e1 102 | # print afn.dump_table() 103 | # print dfn.dump_table() 104 | 105 | L0, U0 = afn.range() 106 | L1, U1 = dfn.range() 107 | L = max(L0,L1) 108 | U = min(U0,U1) 109 | 110 | eps = (U-L)/NBARS 111 | x = L 112 | 113 | knots = [ L + i*eps for i in xrange(1,NBARS) ] 114 | vals = [ dfn(x) + afn(x) for x in knots ] 115 | for i in xrange(len(vals)): 116 | if numpy.isnan(vals[i]): 117 | vals[i] = -numpy.inf 118 | # print "V", vals 119 | 120 | dxs = [ dfn.fprime(x) + afn.fprime(x) for x in knots ] 121 | derivs = [lambda d: dx for dx in dxs ] 122 | 123 | fns = [] 124 | for v,x,dx in zip(vals,knots,dxs): 125 | def genf (v, x, dx): 126 | return lambda d: v + dx*(d-x) 127 | f = genf(v,x,dx) 128 | fns.append (f) 129 | knots.append(U) 130 | 131 | result = pwfun.Pwfun (knots, fns, derivs) 132 | 133 | #GGG 134 | # print "----------------\nX VAL A+D RESULT(x) FNS[i](x) DXS[i]" 135 | # for i in xrange(len(knots)-1): 136 | # x = knots[i] 137 | # print fns[i], x, fns[i](x) 138 | # print "%.5f %.5f %.5f %.5f %.5f %.5f" % (x, vals[i], afn(x)+dfn(x), result(x), (fns[i])(x), dxs[i]) 139 | 140 | return result 141 | 142 | def zoid_final_proposal (arrv, e0): 143 | dfn = e0.queue().pyDepartureLik (arrv, e0) 144 | L, U = dfn.range() 145 | 146 | eps = 0.1 147 | x = L+eps 148 | 149 | knots = [] 150 | vals = [] 151 | dxs = [] 152 | last_dx = 0 153 | 154 | while last_dx >= 0: 155 | knots.append(x) 156 | vals.append(dfn(x)) 157 | dxs.append(dfn.fprime(x)) 158 | last_dx = dxs[-1] 159 | x += eps 160 | eps *= 2 161 | 162 | derivs = [lambda d: dx for dx in dxs ] 163 | fns = [] 164 | for v,x,dx in zip(vals,knots,dxs): 165 | def genf (v, x, dx): 166 | return lambda d: v + dx*(d-x) 167 | f = genf(v,x,dx) 168 | fns.append (f) 169 | knots.append(U) 170 | 171 | result = pwfun.Pwfun (knots, fns, derivs) 172 | 173 | #GGG 174 | # print "----------------\nX VAL A+D RESULT(x) FNS[i](x) DXS[i]" 175 | # for i in xrange(len(knots)-1): 176 | # x = knots[i] 177 | # print fns[i], x, fns[i](x) 178 | # print "%.5f %.5f %.5f %.5f %.5f %.5f" % (x, vals[i], dfn(x), result(x), (fns[i])(x), dxs[i]) 179 | 180 | return result 181 | 182 | # for unit tests 183 | def check_quantiles (unittest, s1, s2, N): 184 | # check each 10% quantile 185 | s1.sort() 186 | s2.sort() 187 | 188 | bad = None 189 | 190 | for quantile in xrange(3,8): 191 | i1 = (len(s1)/10)*quantile 192 | i2 = (len(s2)/10)*quantile 193 | diff_max = min (0.5, 15 / sqrt(N)) 194 | 195 | print "Decile %d ... %.4f %.4f [delta %.4f max %.4f]" % (quantile, s1[i1], s2[i2], abs(s1[i1]-s2[i2]), diff_max) 196 | 197 | if abs(s1[i1] - s2[i2]) > diff_max: 198 | bad = "Mismatch %s quantile: %.4f (rejection) vs %.4f (ARS) [diff_max %.4f]" % (quantile, s1[i1], s2[i2], diff_max) 199 | 200 | if bad: 201 | raise AssertionError (bad) 202 | 203 | 204 | # for unit tests 205 | def compute_hist (lst): 206 | l2 = lst[:] 207 | l2.sort() 208 | idxs = [ i*len(l2)/10 for i in range(10) ] 209 | idxs.append (len(l2)-1) 210 | return [ l2[i] for i in idxs ] 211 | 212 | def print_hist (lst): 213 | print "\n".join (str(x) for x in compute_hist(lst)) 214 | -------------------------------------------------------------------------------- /src/ninqueue.py: -------------------------------------------------------------------------------- 1 | import bisect 2 | 3 | class NinQueue: 4 | def __init__ (self): 5 | self.a = ArrivalProcess() 6 | self.d = ArrivalProcess() 7 | 8 | def __repr__ (self): 9 | ret = "" 10 | for t,N in self.knots(): 11 | ret += "%.15f %d %d\n" % (t, N, self.N(t)) 12 | return ret 13 | 14 | def knots (self): 15 | N = 0 16 | lst = [] 17 | ret = [] 18 | for t in self.a.times: lst.append ((t,1)) 19 | for t in self.d.times: lst.append ((t,-1)) 20 | lst.sort(cmp_knots) 21 | for t,delta in lst: 22 | N += delta 23 | ret.append ((t, N)) 24 | return ret 25 | 26 | def add_birth_death (self, t_a, t_d): 27 | self.a.add_time (t_a) 28 | self.d.add_time (t_d) 29 | 30 | def move_arrival (self, a0, a1): 31 | self.a.move_time (a0, a1) 32 | 33 | def move_departure (self, a0, a1): 34 | self.d.move_time (a0, a1) 35 | 36 | def N (self, t): 37 | na = self.a.nprev (t) 38 | nd = self.d.nprev (t) 39 | return na - nd 40 | 41 | def contains_arrival (self, t): 42 | return self.a.contains (t) 43 | 44 | def contains_departure (self, t): 45 | return self.d.contains (t) 46 | 47 | def knots_in_range (self, l, r): 48 | knots = dict() 49 | for a in self.a.in_range (l,r): 50 | knots[a] = 1 51 | for d in self.d.in_range (l,r): 52 | knots[d] = 1 53 | knots[l] = 1 54 | knots[r] = 1 55 | 56 | times = knots.keys() 57 | times.sort() 58 | return [ (t, self.N(t)) for t in times ] 59 | 60 | def test_knots (self): 61 | lst = self.knots() 62 | for t,n in lst: 63 | assert n >= 0, ("NINQ less than 0\n%s" % lst) 64 | 65 | # works just like NinQueue 66 | class Overlay: 67 | def __init__(self, ninq): 68 | self.inner = ninq 69 | self.plus = ArrivalProcess() 70 | self.minus = ArrivalProcess() 71 | 72 | def __repr__(self): 73 | xs = [ tup[0] for tup in self.inner.knots() ] 74 | xs.extend (self.plus.times) 75 | xs.extend (self.minus.times) 76 | xs.sort() 77 | 78 | foo = "" 79 | for x in xs: foo += "%.5f %d\n" % (x, self.N(x)) 80 | 81 | return "OVERLAY\n%s\n---\n%s\n---\n%s\n---\n%s/////////" % (foo, self.inner, self.plus, self.minus) 82 | 83 | def move_arrival (self, a0, a1): 84 | self.plus.add_time (a1) 85 | self.minus.add_time (a0) 86 | 87 | def move_departure (self, a0, a1): 88 | self.minus.add_time (a1) 89 | self.plus.add_time (a0) 90 | 91 | def N (self, t): 92 | delta = self.plus.nprev(t) - self.minus.nprev(t) 93 | return self.inner.N(t) + delta 94 | 95 | def knots_in_range (self, l, r): 96 | knots = dict() 97 | for a in self.inner.a.in_range (l,r): 98 | knots[a] = 1 99 | for d in self.inner.d.in_range (l,r): 100 | v = knots.get(d, 0) 101 | knots[d] = v - 1 102 | for a in self.plus.in_range (l,r): 103 | v = knots.get(a, 0) 104 | knots[a] = v + 1 105 | for d in self.minus.in_range (l,r): 106 | v = knots.get(d, 0) 107 | knots[d] = v - 1 108 | if r not in knots: knots[r] = 0 109 | lst = knots.items() 110 | lst.sort() 111 | n0 = self.N(l) - knots.get(l,0) 112 | return deltas_to_cumsum (lst, l, n0) 113 | 114 | 115 | class ArrivalProcess: 116 | 117 | def __init__ (self): 118 | self.times = [] 119 | self.t2ix = None 120 | self.t2ix_stale = True 121 | 122 | def __repr__ (self): return str(self.times) + "\n" + str(self.t2ix) 123 | 124 | def add_time (self, t): 125 | bisect.insort (self.times, t) 126 | 127 | def move_time (self, t0, t1): 128 | # works no matter t0 < t1 or reverse: first delete, then insert 129 | # first delete 130 | ix = bisect.bisect (self.times, t0) 131 | assert abs(self.times[ix-1] - t0) < 1e-13, "Off moving %.10f --> %.10f\n%s" % (t0, t1, self) 132 | del self.times[ix-1] 133 | # then insert 134 | self.add_time (t1) 135 | 136 | def contains (self, t): 137 | ix = bisect.bisect (self.times, t) 138 | return (ix > 0) and (self.times[ix-1] == t) 139 | 140 | def nprev (self, t): 141 | return bisect.bisect_right (self.times, t) 142 | 143 | def in_range (self, l, r): 144 | ix0 = bisect.bisect_left (self.times, l) 145 | ix1 = bisect.bisect_right (self.times, r) 146 | return self.times[ix0:ix1] 147 | 148 | class SortedList: 149 | 150 | def __init__ (self): 151 | self.k = [] 152 | self.k2e = dict() 153 | 154 | def __repr__ (self): 155 | return "SORTED LIST\n%s\n%s\n//////////\n" % ("\n".join(map(str, self.k)), self.k2e) 156 | 157 | def add (self, k, evt): 158 | if k in self.k2e: 159 | self.k2e[k].append (evt) 160 | else: 161 | self.k2e[k] = [evt] 162 | bisect.insort (self.k, k) 163 | 164 | def in_range (self, l, r): 165 | i0 = bisect.bisect_left (self.k, l) 166 | i1 = bisect.bisect_right (self.k, r) 167 | lst = [] 168 | for k0 in self.k[i0:i1]: 169 | lst.extend (self.k2e[k0]) 170 | return lst 171 | 172 | def remove (self, k, eid): 173 | assert isinstance(eid, int), "Bad EID %s" % eid 174 | ix = bisect.bisect (self.k, k) 175 | assert self.k[ix-1] == k, "Tried to remove C %.15f from\n%s" % (k, self) 176 | eixl = [ e.eid for e in self.k2e[k] ] 177 | if eid in eixl: 178 | eix = eixl.index (eid) 179 | del self.k2e[k][eix] 180 | del self.k[ix-1] 181 | if len(self.k2e[k]) == 0: del self.k2e[k] 182 | # for k0 in self.k: 183 | # assert k0 in self.k2e, "Mismatch deleting %s (ix was %d)\n %s\n %s" % (k, ix, self.k, self.k2e) 184 | 185 | def contains_key (self, k): 186 | return k in self.k2e 187 | 188 | def contains_kv (self, k, v): 189 | return (k in self.k2e) and (v in self.k2e[k]) 190 | 191 | def __len__ (self): return len(self.k) 192 | 193 | class EventSet: 194 | def __init__ (self): 195 | self.eids = dict() 196 | self.evts = [] 197 | def add (self, evt): 198 | self.eids[evt.eid] = 1 199 | self.evts.append (evt) 200 | def contains(self, evt): 201 | return evt.eid in self.eids 202 | def items (self): return self.evts 203 | 204 | 205 | 206 | # utilitites 207 | 208 | def deltas_to_cumsum (lst, t0, N0): 209 | Ncur = N0 210 | result = [ (t0, N0) ] 211 | for t,delta in lst: 212 | Ncur += delta 213 | result.append ( (t,Ncur) ) 214 | return result 215 | 216 | def cmp_knots (k0, k1): 217 | return cmp(k0[0], k1[0]) or - cmp(k0[1], k1[1]) 218 | -------------------------------------------------------------------------------- /src/cdist.h: -------------------------------------------------------------------------------- 1 | /* Copyright 2005 Robert Kern (robert.kern@gmail.com) 2 | * 3 | * Permission is hereby granted, free of charge, to any person obtaining a 4 | * copy of this software and associated documentation files (the 5 | * "Software"), to deal in the Software without restriction, including 6 | * without limitation the rights to use, copy, modify, merge, publish, 7 | * distribute, sublicense, and/or sell copies of the Software, and to 8 | * permit persons to whom the Software is furnished to do so, subject to 9 | * the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included 12 | * in all copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 17 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 18 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 19 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 20 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | */ 22 | 23 | #ifndef _RK_DISTR_ 24 | #define _RK_DISTR_ 25 | 26 | #include "randomkit.h" 27 | 28 | #ifdef __cplusplus 29 | extern "C" { 30 | #endif 31 | 32 | /* References: 33 | * 34 | * Devroye, Luc. _Non-Uniform Random Variate Generation_. 35 | * Springer-Verlag, New York, 1986. 36 | * http://cgm.cs.mcgill.ca/~luc/rnbookindex.html 37 | * 38 | * Kachitvichyanukul, V. and Schmeiser, B. W. Binomial Random Variate 39 | * Generation. Communications of the ACM, 31, 2 (February, 1988) 216. 40 | * 41 | * Hoermann, W. The Transformed Rejection Method for Generating Poisson Random 42 | * Variables. Insurance: Mathematics and Economics, (to appear) 43 | * http://citeseer.csail.mit.edu/151115.html 44 | * 45 | * Marsaglia, G. and Tsang, W. W. A Simple Method for Generating Gamma 46 | * Variables. ACM Transactions on Mathematical Software, Vol. 26, No. 3, 47 | * September 2000, Pages 363–372. 48 | */ 49 | 50 | /* Normal distribution with mean=loc and standard deviation=scale. */ 51 | extern double rk_normal(rk_state *state, double loc, double scale); 52 | 53 | /* Standard exponential distribution (mean=1) computed by inversion of the 54 | * CDF. */ 55 | extern double rk_standard_exponential(rk_state *state); 56 | 57 | /* Exponential distribution with mean=scale. */ 58 | extern double rk_exponential(rk_state *state, double scale); 59 | 60 | /* Uniform distribution on interval [loc, loc+scale). */ 61 | extern double rk_uniform(rk_state *state, double loc, double scale); 62 | 63 | /* Standard gamma distribution with shape parameter. 64 | * When shape < 1, the algorithm given by (Devroye p. 304) is used. 65 | * When shape == 1, a Exponential variate is generated. 66 | * When shape > 1, the small and fast method of (Marsaglia and Tsang 2000) 67 | * is used. 68 | */ 69 | extern double rk_standard_gamma(rk_state *state, double shape); 70 | 71 | /* Gamma distribution with shape and scale. */ 72 | extern double rk_gamma(rk_state *state, double shape, double scale); 73 | 74 | /* Beta distribution computed by combining two gamma variates (Devroye p. 432). 75 | */ 76 | extern double rk_beta(rk_state *state, double a, double b); 77 | 78 | /* Chi^2 distribution computed by transforming a gamma variate (it being a 79 | * special case Gamma(df/2, 2)). */ 80 | extern double rk_chisquare(rk_state *state, double df); 81 | 82 | /* Noncentral Chi^2 distribution computed by modifying a Chi^2 variate. */ 83 | extern double rk_noncentral_chisquare(rk_state *state, double df, double nonc); 84 | 85 | /* F distribution computed by taking the ratio of two Chi^2 variates. */ 86 | extern double rk_f(rk_state *state, double dfnum, double dfden); 87 | 88 | /* Noncentral F distribution computed by taking the ratio of a noncentral Chi^2 89 | * and a Chi^2 variate. */ 90 | extern double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc); 91 | 92 | /* Binomial distribution with n Bernoulli trials with success probability p. 93 | * When n*p <= 30, the "Second waiting time method" given by (Devroye p. 525) is 94 | * used. Otherwise, the BTPE algorithm of (Kachitvichyanukul and Schmeiser 1988) 95 | * is used. */ 96 | extern long rk_binomial(rk_state *state, long n, double p); 97 | 98 | /* Binomial distribution using BTPE. */ 99 | extern long rk_binomial_btpe(rk_state *state, long n, double p); 100 | 101 | /* Binomial distribution using inversion and chop-down */ 102 | extern long rk_binomial_inversion(rk_state *state, long n, double p); 103 | 104 | /* Negative binomial distribution computed by generating a Gamma(n, (1-p)/p) 105 | * variate Y and returning a Poisson(Y) variate (Devroye p. 543). */ 106 | extern long rk_negative_binomial(rk_state *state, long n, double p); 107 | 108 | /* Poisson distribution with mean=lam. 109 | * When lam < 10, a basic algorithm using repeated multiplications of uniform 110 | * variates is used (Devroye p. 504). 111 | * When lam >= 10, algorithm PTRS from (Hoermann 1992) is used. 112 | */ 113 | extern long rk_poisson(rk_state *state, double lam); 114 | 115 | /* Poisson distribution computed by repeated multiplication of uniform variates. 116 | */ 117 | extern long rk_poisson_mult(rk_state *state, double lam); 118 | 119 | /* Poisson distribution computer by the PTRS algorithm. */ 120 | extern long rk_poisson_ptrs(rk_state *state, double lam); 121 | 122 | /* Standard Cauchy distribution computed by dividing standard gaussians 123 | * (Devroye p. 451). */ 124 | extern double rk_standard_cauchy(rk_state *state); 125 | 126 | /* Standard t-distribution with df degrees of freedom (Devroye p. 445 as 127 | * corrected in the Errata). */ 128 | extern double rk_standard_t(rk_state *state, double df); 129 | 130 | /* von Mises circular distribution with center mu and shape kappa on [-pi,pi] 131 | * (Devroye p. 476 as corrected in the Errata). */ 132 | extern double rk_vonmises(rk_state *state, double mu, double kappa); 133 | 134 | /* Pareto distribution via inversion (Devroye p. 262) */ 135 | extern double rk_pareto(rk_state *state, double a); 136 | 137 | /* Weibull distribution via inversion (Devroye p. 262) */ 138 | extern double rk_weibull(rk_state *state, double a); 139 | 140 | /* Power distribution via inversion (Devroye p. 262) */ 141 | extern double rk_power(rk_state *state, double a); 142 | 143 | /* Laplace distribution */ 144 | extern double rk_laplace(rk_state *state, double loc, double scale); 145 | 146 | /* Gumbel distribution */ 147 | extern double rk_gumbel(rk_state *state, double loc, double scale); 148 | 149 | /* Logistic distribution */ 150 | extern double rk_logistic(rk_state *state, double loc, double scale); 151 | 152 | /* Log-normal distribution */ 153 | extern double rk_lognormal(rk_state *state, double mean, double sigma); 154 | 155 | /* Rayleigh distribution */ 156 | extern double rk_rayleigh(rk_state *state, double mode); 157 | 158 | /* Wald distribution */ 159 | extern double rk_wald(rk_state *state, double mean, double scale); 160 | 161 | /* Zipf distribution */ 162 | extern long rk_zipf(rk_state *state, double a); 163 | 164 | /* Geometric distribution */ 165 | extern long rk_geometric(rk_state *state, double p); 166 | extern long rk_geometric_search(rk_state *state, double p); 167 | extern long rk_geometric_inversion(rk_state *state, double p); 168 | 169 | /* Hypergeometric distribution */ 170 | extern long rk_hypergeometric(rk_state *state, long good, long bad, long sample); 171 | extern long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample); 172 | extern long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample); 173 | 174 | /* Triangular distribution */ 175 | extern double rk_triangular(rk_state *state, double left, double mode, double right); 176 | 177 | /* Logarithmic series distribution */ 178 | extern long rk_logseries(rk_state *state, double p); 179 | 180 | #ifdef __cplusplus 181 | } 182 | #endif 183 | 184 | 185 | #endif /* _RK_DISTR_ */ 186 | -------------------------------------------------------------------------------- /src/test_distributions.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import distributions 3 | import numpy 4 | from numpy import random 5 | import pwfun 6 | 7 | import netutils 8 | import sampling 9 | 10 | class TestDistributions (unittest.TestCase): 11 | 12 | def test_exponential (self): 13 | exp = distributions.Exponential (scale=0.5) 14 | xs = exp.sample (100000) 15 | mu = numpy.mean (xs) 16 | self.assertTrue (abs(mu - 0.5) < 0.01, "Mean off: was %.4f expected 0.5" % mu) 17 | 18 | def test_exponential_estimate (self): 19 | xs = [ 1.25, 1.5, 1.75 ] 20 | exp = distributions.Exponential () 21 | exp.estimate (xs) 22 | self.assertEquals (1.5, exp.mean(), 5) 23 | 24 | def test_gamma_sample (self): 25 | exp1 = distributions.Exponential (1.5) 26 | gamm = distributions.Gamma (shape=2, scale=1.5) 27 | N = 1000 28 | smp1 = gamm.sample (N) 29 | smp2a = exp1.sample (N) 30 | smp2b = exp1.sample (N) 31 | smp2 = numpy.array(smp2a) + numpy.array(smp2b) 32 | smp1.sort() 33 | smp2.sort() 34 | netutils.check_quantiles (self, smp1, smp2, N) 35 | 36 | def test_gamma_estimate (self): 37 | gamm = distributions.Gamma (shape=3.5, scale=0.75) 38 | N = 100000 39 | smp = gamm.sample (N) 40 | gamm.estimate (smp) 41 | self.assertTrue (abs(3.5 - gamm.shape) < 0.1, "Estimation wrong: expected %.4f was %.4f" % (3.5, gamm.shape)) 42 | self.assertTrue (abs(0.75 - gamm.scale) < 0.1, "Estimation wrong: expected %.4f was %.4f" % (0.75, gamm.shape)) 43 | 44 | def test_gamma_pdf (self): 45 | gamm = distributions.Gamma (shape=3, scale=0.5) 46 | self.assertAlmostEqual (-1.227411, gamm.lpdf(2.0), 5) 47 | 48 | def test_lognormal_estimate (self): 49 | ln = distributions.LogNormal() 50 | smp = ln.sample (1000) 51 | ln.estimate (smp) 52 | self.assertTrue (abs(ln.meanlog) < 0.1) 53 | self.assertTrue (abs(1 - ln.sdlog) < 0.1) 54 | 55 | def test_lognormal_estimate2 (self): 56 | ln = distributions.LogNormal(meanlog=-1, sdlog=2) 57 | smp = ln.sample (1000) 58 | ln.estimate (smp) 59 | self.assertTrue (abs(-1 - ln.meanlog) < 0.1, "Error in mean: Expected %s, got %s" % (-1, ln.meanlog)) 60 | self.assertTrue (abs(2 - ln.sdlog) < 0.1, "Error in sd: expected %s, got %s" % (2, ln.sdlog)) 61 | 62 | def test_lognormal_pdf (self): 63 | ln = distributions.LogNormal(meanlog=1, sdlog=0.5) 64 | self.assertAlmostEqual (ln.lpdf(1), -2.225791, 5) 65 | 66 | ln2 = distributions.LogNormal(meanlog=0.9, sdlog=0.5) 67 | ps = [ -18.436309, -6.779582, -3.695824, -2.321165, -1.616153, -1.245645, -1.065585, -1.003215 ] 68 | xs = [ 0.10, 0.35, 0.60, 0.85, 1.10, 1.35, 1.60, 1.85 ] 69 | for p,x in zip(ps,xs): 70 | self.assertAlmostEqual (ln2.lpdf(x), p, 4) 71 | 72 | def test_lognormal_boundary (self): 73 | ln = distributions.LogNormal(meanlog=1, sdlog=0.5) 74 | print ln.lpdf(0) 75 | self.assertTrue (numpy.isinf (ln.lpdf(0))) 76 | 77 | def test_exponential_sample_parameters (self): 78 | sampling.set_seed(3242) 79 | for rep in range(10): 80 | mu = random.exponential(1.0) 81 | f = distributions.Exponential (mu) 82 | x = f.sample(10000) 83 | params = [ f.sample_parameters(x)[0] for i in xrange(10000) ] 84 | self.assertTrue (abs(mu - numpy.mean(params)) < 0.03, "Mismatch: MU %s params %s" % (mu, numpy.mean(params))) 85 | 86 | def test_ln_sample_parameters (self): 87 | sampling.set_seed(3242) 88 | for rep in range(10): 89 | mu = random.uniform(-1, 1) 90 | sd = random.uniform(0.5, 1.25) 91 | print rep, mu, sd 92 | f = distributions.LogNormal (mu, sd) 93 | x = f.sample(1000) 94 | print numpy.mean(map(numpy.log, x)) 95 | params = [ f.sample_parameters(x) for i in xrange(10000) ] 96 | mu1 = [ p[0] for p in params ] 97 | sd1 = [ p[1] for p in params ] 98 | print numpy.mean(mu1) 99 | print numpy.mean(sd1) 100 | self.assertTrue (abs(mu - numpy.mean(mu1)) < 0.1, "Mismatch: MU %s params %s" % (mu, numpy.mean(mu1))) 101 | self.assertTrue (abs(sd - numpy.mean(sd1)) < 0.1, "Mismatch: std %s params %s" % (sd, numpy.mean(sd1))) 102 | 103 | 104 | def test_gamma_sample_parameters (self): 105 | sampling.set_seed(3242) 106 | for rep in range(1): 107 | shape = random.uniform(0.5, 3.0) 108 | scale = random.uniform(0.0, 10.0) 109 | print "REP", rep, shape, scale 110 | 111 | f = distributions.Gamma (shape, scale) 112 | x = f.sample(1000) 113 | params = [ f.sample_parameters(x) for i in xrange(1000) ] 114 | shape1 = [ p[0] for p in params ] 115 | scale1 = [ p[1] for p in params ] 116 | for p in params: print "P", " ".join (map(str,p)), p[0]*p[1] 117 | 118 | self.assertTrue (abs(scale - numpy.mean(scale1)) < 0.03, "Mismatch: MU %s params %s" % (scale, numpy.mean(scale1))) 119 | self.assertTrue (abs(shape - numpy.mean(shape1)) < 0.03, "Mismatch: SHAPE %s params %s" % (shape, numpy.mean(shape1))) 120 | 121 | def test_exponential_kernel (self): 122 | sampling.set_seed(3242) 123 | N = 100 124 | for rep in range(10): 125 | # pick an Exponential distribution, arbitrarily 126 | f = distributions.Exponential (1.0) 127 | self.do_test_kernel (f, N) 128 | 129 | def test_gamma_kernel (self): 130 | sampling.set_seed(3242) 131 | N = 100 132 | for rep in range(10): 133 | f = distributions.Gamma (2.0, 5.0) 134 | self.do_test_kernel (f, N) 135 | f = distributions.Gamma (5.0, 0.75) 136 | self.do_test_kernel (f, N) 137 | 138 | def test_ln_kernel (self): 139 | sampling.set_seed(3242) 140 | N = 100 141 | for rep in range(10): 142 | f = distributions.LogNormal (0.5, 0.25) 143 | self.do_test_kernel (f, N) 144 | f = distributions.LogNormal (0.0, 1.0) 145 | self.do_test_kernel (f, N) 146 | 147 | def do_test_kernel (self, f, N): 148 | data = f.sample (10) 149 | 150 | # find special point mu1, check E_mu T(mu1 <== mu) = p(mu1) 151 | np = len(f.parameters) 152 | s_mu = numpy.zeros((np, N)) 153 | for i in range(N): 154 | s_mu[:,i] = f.sample_parameters(data) 155 | theta_mean = s_mu.sum(axis=1) / N 156 | theta_std = 0.2 * theta_mean 157 | print "Data MEAN %.5f SD: %.5f" % (numpy.mean(data), numpy.std(data)) 158 | print "Bayes sample MEAN %s SD: %s" % (theta_mean, theta_std) 159 | 160 | # assumes some kind of CLT-like concentration has happend 161 | k01 = f.parameter_kernel(theta_mean - 2*theta_std, theta_mean, data) 162 | k10 = f.parameter_kernel(theta_mean, theta_mean - 2*theta_std, data) 163 | print "K(mu-1SD: %s ==> mu: %s) %.5f" % (theta_mean - 2*theta_std, theta_mean, k01) 164 | print "K(mu ==> mu-1SD)", k10 165 | self.assertTrue (k01 >= k10) 166 | 167 | k21 = f.parameter_kernel(theta_mean + 2*theta_std, theta_mean, data) 168 | k12 = f.parameter_kernel(theta_mean, theta_mean + 2*theta_std, data) 169 | print "K(mu+1SD ==> mu)", k21 170 | print "K(mu ==> mu+1SD)", k12 171 | self.assertTrue (k21 >= k12) 172 | # self.assertTrue (abs(k01 - k21) <= 0.01) 173 | 174 | def test_ig_pdf (self): 175 | ig = distributions.InverseGamma (shape=5.0, scale=2.0) 176 | self.assertAlmostEquals (-5.896807, ig.lpdf(0.1), 5) 177 | 178 | import sys 179 | def main(): 180 | if len(sys.argv) > 1: 181 | for test_name in sys.argv[1:]: 182 | suite = unittest.TestLoader().loadTestsFromName("test_distributions.TestDistributions.%s" % (test_name,)) 183 | unittest.TextTestRunner(verbosity=2).run(suite) 184 | else: 185 | unittest.main() 186 | 187 | if __name__ == "__main__": 188 | main() 189 | 190 | 191 | -------------------------------------------------------------------------------- /src/qstats.py: -------------------------------------------------------------------------------- 1 | # 2 | # List of useful statistics that could be caluclated from an Arrivals object. 3 | # 4 | 5 | import numpy 6 | import arrivals 7 | 8 | ## utilitise 9 | 10 | def averaging (stat): 11 | """Converts a statistic for a single arrival to one that 12 | averages over a list of arrivals.""" 13 | return lambda arrvl: numpy.mean ([ stat(a) for a in arrvl ]) 14 | 15 | def aggregate_by_q (stat, arrv, aggregator=numpy.mean, filter=lambda evt: True): 16 | """Converts a statitstic that returns a value for a single event 17 | to a function that maps an Arrivals to a list of the mean statistic 18 | value for each queue.""" 19 | X = [ list() for i in xrange(arrv.num_queues()) ] 20 | for evt in arrv: 21 | if filter(evt): 22 | X[evt.qid].append (stat(evt)) 23 | return [ aggregator(xs) for xs in X ] 24 | 25 | def aggregate_by_task (stat, arrv, aggregator=numpy.mean, filter=lambda tid: True): 26 | X = [] 27 | for tid in arrv.all_tasks(): 28 | if filter(arrv.events_of_task (tid)): 29 | X.append (stat(arrv.events_of_task(tid))) 30 | if len(X) == 0: return 0 31 | return aggregator (X) 32 | 33 | 34 | def response_time (evt): return evt.d - evt.a 35 | 36 | def is_obs (e): 37 | return e.obs_a and e.obs_d 38 | 39 | ## statistics 40 | 41 | def utilization (arrv): 42 | """Returns observed utilization of all queues in arrv.""" 43 | max_d = 0 44 | tot_time = [0] * arrv.num_queues() 45 | for evt in arrv: 46 | max_d = max (evt.d, max_d) 47 | tot_time[evt.qid] += evt.s 48 | return [ x / max_d for x in tot_time ] 49 | 50 | def mean_wait (arrv): 51 | """Returns mean waiting time for each queue in arrv.""" 52 | return aggregate_by_q (arrivals.Event.wait, arrv) 53 | 54 | def mean_response_time (arrv): 55 | return aggregate_by_q (response_time, arrv) 56 | 57 | def std_response_time (arrv): 58 | return aggregate_by_q (response_time, arrv, aggregator=numpy.std) 59 | 60 | def mean_service (arrv): 61 | return aggregate_by_q (lambda evt: evt.s, arrv) 62 | 63 | def std_service (arrv): 64 | return aggregate_by_q (lambda evt: evt.s, arrv, aggregator=numpy.std) 65 | 66 | def max_service (arrv): 67 | return aggregate_by_q (lambda evt: evt.s, arrv, aggregator=max) 68 | 69 | def mean_obs_response (arrv): 70 | return aggregate_by_q (response_time, arrv, filter=is_obs) 71 | 72 | def std_obs_response (arrv): 73 | return aggregate_by_q (response_time, arrv, aggregator=numpy.std, filter=is_obs) 74 | 75 | def mean_obs_service (arrv): 76 | return aggregate_by_q (lambda evt: evt.s, arrv, filter=is_obs) 77 | 78 | def mean_latent_service (arrv): 79 | return aggregate_by_q (lambda evt: evt.s, arrv, filter=lambda evt: not is_obs(evt)) 80 | 81 | def nobs_by_queue (arrv): 82 | return aggregate_by_q (lambda evt: 1, arrv, filter=is_obs, aggregator=numpy.sum) 83 | 84 | def std_obs_service (arrv): 85 | return aggregate_by_q (lambda evt: evt.s, arrv, aggregator=numpy.std, filter=is_obs) 86 | 87 | def mean_wait_percentage (arrv): 88 | """Returns list, of for each Q, average over evenst of wait time / response time.""" 89 | def wait_pct (evt): 90 | r = evt.d - evt.a 91 | if r < 1e-50: 92 | return 0.0 93 | else: 94 | return 1.0 - (evt.s / (evt.d - evt.a)) 95 | return aggregate_by_q (wait_pct, arrv) 96 | 97 | 98 | def _inner_qsize_for_qid (arrv, qi): 99 | evts = arrv.events_of_qid (qi) 100 | times = [ [e.a, 1, e] for e in evts ] 101 | times.extend ( [e.d, -1, e] for e in evts ) 102 | times.sort() 103 | 104 | cum = 0 105 | for tup in times: 106 | tup.append (cum) 107 | cum += tup[1] 108 | 109 | return times 110 | 111 | def all_qsize_for_qid (arrv, qi): 112 | return [ (t,N,evt) for t,foo,evt,N in _inner_qsize_for_qid(arrv,qi) ] 113 | 114 | def mean_qsize_for_qid (arrv, qi): 115 | times = _inner_qsize_for_qid (arrv, qi) 116 | return numpy.mean([ tup[3] for tup in times if tup[1] == 1 ]) 117 | 118 | def mean_qsize_arriving (arrv): 119 | """Mean queue length as seen by an arriving customer. Return list with one value for each q in network.""" 120 | return [ mean_qsize_for_qid(arrv,qi) for qi in xrange(arrv.num_queues()) ] 121 | 122 | def mean_task_time (arrv): 123 | def time_of_evtl (evtl): 124 | A = min(e.d for e in evtl) 125 | D = max(e.d for e in evtl) 126 | return D-A 127 | return aggregate_by_task (time_of_evtl, arrv) 128 | 129 | def mean_task_time_in_range (arrv, l, r): 130 | def time_of_evtl (evtl): 131 | A = min(e.d for e in evtl) 132 | D = max(e.d for e in evtl) 133 | return D-A 134 | def task_arrival_in_range (evtl): 135 | A = min(e.d for e in evtl) 136 | return ((l <= A) and (A < r)) 137 | return aggregate_by_task (time_of_evtl, arrv, filter=task_arrival_in_range) 138 | 139 | def num_tasks_in_arrival_range (arrv, l, r): 140 | def task_arrival_in_range (evtl): 141 | A = min(e.d for e in evtl) 142 | return ((l <= A) and (A < r)) 143 | return aggregate_by_task (lambda x: 1, arrv, aggregator=len, filter=task_arrival_in_range) 144 | 145 | def mean_task_service (arrv): 146 | def service_of_task (evtl): 147 | return numpy.sum (e.s for e in evtl) 148 | return aggregate_by_task (service_of_task, arrv) 149 | 150 | def mean_task_length (arrv): 151 | return aggregate_by_task (len, arrv) 152 | 153 | def total_service (arrv): 154 | return numpy.sum (evt.s for evt in arrv) 155 | 156 | ## output 157 | 158 | def write_arrv (f, arrv): 159 | evt2n = dict() 160 | for qi in range(arrv.num_queues()): 161 | times = _inner_qsize_for_qid (arrv, qi) 162 | for tup in times: 163 | if tup[1] == 1: 164 | evt2n[tup[2]] = tup[3] 165 | for evt in arrv: 166 | f.write (evt.as_csv()) 167 | f.write (" ") 168 | f.write (str(evt2n[evt])) 169 | f.write ("\n") 170 | 171 | 172 | ## "exported" stats 173 | 174 | STATS = [ \ 175 | utilization, mean_wait, mean_response_time, std_response_time, \ 176 | mean_service, std_service, 177 | mean_obs_response, std_obs_response, 178 | mean_obs_service, std_obs_service, 179 | mean_latent_service, nobs_by_queue, 180 | mean_wait_percentage, mean_qsize_arriving, mean_task_time \ 181 | ] 182 | 183 | def all_stats_string (): 184 | return ' '.join([ fn.__name__ for fn in STATS ]) 185 | 186 | def stringify (val): 187 | if isinstance(val, list): 188 | return " ".join (map (str, val)) 189 | else: 190 | return str(val) 191 | 192 | 193 | ## main method: Run given stats on a given arrivals object 194 | 195 | import sys 196 | import re 197 | from optparse import OptionParser 198 | 199 | import qnetu 200 | 201 | def main(): 202 | 203 | parser = OptionParser(description="Compute usage statistics from tasks in a queueing network.") 204 | parser.add_option("--network", dest="netf", 205 | help="YAML file containing network description", metavar="FILE") 206 | parser.add_option("--arrivals", dest="arrvf", 207 | help="YAML file containing job arrivals", metavar="FILE") 208 | parser.add_option("--use-multif", dest="useMultif", type="int", 209 | help="If true, use multif format.") 210 | parser.add_option("--stats", "--statistics", dest="statsl", 211 | help="""List of statistics to report after every iteration. 212 | Example: --stat "mean_wait, mean_qsize_arriving" 213 | Available statistics: %s""" % all_stats_string(), metavar="LIST") 214 | parser.add_option ("--prefix", dest="prefix", 215 | help="Prefix for constructing output files.") 216 | 217 | (opts, args) = parser.parse_args() 218 | if len(args) > 0: 219 | parser.error ("Invalid option string %s" % args[0]) 220 | sys.exit(1) 221 | 222 | f = open(opts.netf) 223 | net = qnetu.qnet_from_text (f) 224 | f.close() 225 | 226 | if opts.useMultif: 227 | arrv = qnetu.read_multif_of_prefix (opts.arrvf, net) 228 | else: 229 | f = open(opts.arrvf) 230 | arrv = qnetu.load_arrivals (net, f) 231 | f.close() 232 | 233 | if opts.statsl == "ALL": 234 | stat_fns = STATS[:] 235 | else: 236 | comma = re.compile ("[, ]+") 237 | stat_fns = [ globals()[fn_name] for fn_name in comma.split(opts.statsl) ] 238 | 239 | for fn in stat_fns: 240 | f = open ("%s_%s.txt" % (opts.prefix, fn.__name__), "w") 241 | f.write (stringify (fn (arrv))) 242 | f.write ("\n") 243 | f.close() 244 | 245 | if __name__ == "__main__": main() 246 | 247 | 248 | -------------------------------------------------------------------------------- /src/cninq.pyx: -------------------------------------------------------------------------------- 1 | # Faster version of ninqueue.py 2 | 3 | cdef extern from "math.h": 4 | enum: INFINITY 5 | 6 | cdef extern from "Python.h": 7 | void *PyMem_Malloc(unsigned int n) 8 | void PyMem_Free(void *p) 9 | 10 | cdef extern from "string.h": 11 | void *memmove(void *s1, void *s2, size_t n) 12 | 13 | 14 | 15 | cdef class SortedDouble 16 | cdef class OverlayIterator 17 | cdef class Overlay 18 | 19 | cdef class Ninq: 20 | 21 | def __init__ (self, Nmax): 22 | self.a = SortedDouble (Nmax) 23 | self.d = SortedDouble (Nmax) 24 | self.Nmax = Nmax 25 | 26 | def __repr__ (self): 27 | ret = "NINQ:\n" 28 | kts = self.knots() 29 | for t,N in kts: 30 | ret += "%.5f %d\n" % (t,N) 31 | return ret 32 | 33 | def knots (self): 34 | N = 0 35 | lst = [] 36 | ret = [] 37 | for 0 <= i < len(self.a): lst.append (( self.a.item(i), 1 )) 38 | for 0 <= i < len(self.d): lst.append (( self.d.item(i), -1 )) 39 | lst.sort(cmp_knots) 40 | for t,delta in lst: 41 | N += delta 42 | ret.append ((t, N)) 43 | return ret 44 | 45 | cpdef add_birth_death (self, double t_a, double t_d): 46 | self.a.add_time (t_a) 47 | self.d.add_time (t_d) 48 | 49 | cpdef move_arrival (self, double a0, double a1): 50 | self.a.move_time (a0, a1) 51 | 52 | cpdef move_departure (self, double a0, double a1): 53 | self.d.move_time (a0, a1) 54 | 55 | cpdef int N (self, double t): 56 | cdef int na = self.a.num_lte (t) 57 | cdef int nd = self.d.num_lte (t) 58 | return na - nd 59 | 60 | cpdef OverlayIterator interval_iterator (self, double l, double r): 61 | cdef Overlay ovl = Overlay(self) 62 | return ovl.interval_iterator(l, r) 63 | 64 | cpdef int contains_arrival (self, double v): 65 | cdef int idx = self.a.bisect (v)-1 66 | return self.a.item (idx) == v 67 | 68 | cpdef int contains_departure (self, double v): 69 | cdef int idx = self.d.bisect (v)-1 70 | return self.d.item (idx) == v 71 | 72 | # works just like NinQueue 73 | cdef class Overlay: 74 | 75 | def __init__(self, Ninq ninq): 76 | self.inner = ninq 77 | self.plus = SortedDouble(ninq.Nmax) 78 | self.minus = SortedDouble(ninq.Nmax) 79 | 80 | def knots (self): 81 | N = 0 82 | lst = [] 83 | ret = [] 84 | for 0 <= i < len(self.inner.a): lst.append (( self.inner.a.item(i), 1 )) 85 | for 0 <= i < len(self.inner.d): lst.append (( self.inner.d.item(i), -1 )) 86 | for 0 <= i < len(self.plus): lst.append (( self.plus.item(i), 1 )) 87 | for 0 <= i < len(self.minus): lst.append (( self.minus.item(i), -1 )) 88 | lst.sort(cmp_knots) 89 | for t,delta in lst: 90 | N += delta 91 | ret.append ((t, N)) 92 | return ret 93 | 94 | def __repr__(self): 95 | return "OVERLAY\n"+ "\n".join(map(str, self.knots())) 96 | 97 | cpdef move_arrival (self, double a0, double a1): 98 | self.plus.add_time (a1) 99 | self.minus.add_time (a0) 100 | 101 | cpdef move_departure (self, double a0, double a1): 102 | self.minus.add_time (a1) 103 | self.plus.add_time (a0) 104 | 105 | cdef OverlayIterator _interval_iterator (self, double l, double r): 106 | return OverlayIterator (self, l, r) 107 | 108 | cpdef OverlayIterator interval_iterator (self, double l, double r): 109 | return OverlayIterator (self, l, r) 110 | 111 | cpdef int N (self, double t): 112 | cdef int n0 = self.inner.N (t) 113 | cdef int plus0 = self.plus.num_lte (t) 114 | cdef int minus0 = self.minus.num_lte (t) 115 | return n0 + plus0 - minus0 116 | 117 | cdef class SortedDouble: 118 | 119 | def __init__ (self, cap): 120 | self.val = PyMem_Malloc (cap * sizeof(double)) 121 | self.capacity = cap 122 | 123 | def __dealloc__ (self): 124 | if self.val != NULL: 125 | PyMem_Free (self.val) 126 | self.val = NULL 127 | 128 | def __repr__ (self): 129 | ret = "[SL: " 130 | for i in range(self.N): 131 | ret += str(self.val[i]) 132 | ret += ", " 133 | ret += "]" 134 | return ret 135 | 136 | def __len__ (self): return self.N 137 | 138 | cpdef double item (self, int i): 139 | if i < self.N: 140 | return self.val[i] 141 | else: 142 | return INFINITY 143 | 144 | cpdef add_time (self, double x): 145 | cdef int i 146 | if self.N >= self.capacity: 147 | raise Exception ("Can't increment SortedDouble past capacity %d" % self.capacity) 148 | cdef int idx = self.bisect (x) 149 | for i in range(self.N, idx, -1): 150 | self.val[i] = self.val[i-1] 151 | self.N += 1 152 | self.val[idx] = x 153 | 154 | cpdef move_time (self, double x0, double x1): 155 | cdef int i 156 | cdef int idx0 = self.bisect (x0) - 1 157 | cdef int idx1 = self.bisect (x1) 158 | if self.val[idx0] != x0: raise Exception ("Can't find %.5f in list\n%s" % (x0, self)) 159 | if idx0 < idx1: 160 | # need to move stuff backward 161 | for i in range(idx0, idx1): 162 | self.val[i] = self.val[i+1] 163 | self.val[idx1-1] = x1 164 | elif idx1 < idx0: 165 | # move other stuff forward; 166 | for i from idx0 >= i > idx1: 167 | self.val[i] = self.val[i-1] 168 | self.val[idx1] = x1 169 | else: 170 | # else equal, don't do memmove 171 | self.val[idx1] = x1 172 | 173 | cpdef int num_lte (self, v): 174 | return self.bisect (v) 175 | 176 | # Returns index i so that self.val[i-1] <= v < self.val[i] 177 | cdef int bisect (self, double v): 178 | cdef int lo = 0 179 | cdef int hi = self.N 180 | cdef int mid 181 | cdef double midval 182 | while lo < hi: 183 | mid = (lo+hi)//2 184 | midval = self.val[mid] 185 | if midval < v: 186 | lo = mid+1 187 | elif midval > v: 188 | hi = mid 189 | else: 190 | lo = mid 191 | break 192 | # invariant of above: lo <= correct_answer < hi 193 | # special case if values in self.val equal 194 | while (lo < self.N) and (self.val[lo] <= v): lo += 1 195 | return lo 196 | 197 | 198 | cdef class OverlayIterator: 199 | 200 | def __init__ (self, Overlay ovl, double l, double r): 201 | self.r = r 202 | self.t1 = l 203 | 204 | self.a = ovl.inner.a 205 | self.d = ovl.inner.d 206 | self.plus = ovl.plus 207 | self.minus = ovl.minus 208 | 209 | self.i_a = self.a.bisect (l)-1 210 | self.i_d = self.d.bisect (l)-1 211 | self.i_plus = self.plus.bisect (l)-1 212 | self.i_minus = self.minus.bisect (l)-1 213 | self.is_a = 0 214 | self.is_d = 0 215 | self.is_plus = 0 216 | self.is_minus = 0 217 | 218 | self.advance() 219 | 220 | cpdef int has_next (self): 221 | return (self.t0 < self.r) 222 | 223 | cpdef double T0 (self): return self.t0 224 | cpdef double T1 (self): return self.t1 225 | 226 | cpdef int N (self): 227 | return self.i_a - self.i_d + self.i_plus - self.i_minus 228 | 229 | cpdef advance (self): 230 | # new t1 == t0 231 | self.t0 = self.t1 232 | # advance i 233 | self.i_a += self.is_a 234 | self.i_d += self.is_d 235 | self.i_plus += self.is_plus 236 | self.i_minus += self.is_minus 237 | self.is_plus = self.is_minus = self.is_a = self.is_d = 0 238 | # new t1 :: either a_next, d_next or r 239 | cdef double a_next = self.a.item(self.i_a + 1) 240 | cdef double d_next = self.d.item(self.i_d + 1) 241 | cdef double plus_next = self.plus.item(self.i_plus + 1) 242 | cdef double minus_next = self.minus.item(self.i_minus + 1) 243 | if (self.r < a_next) and (self.r < d_next) and (self.r < plus_next) and (self.r < minus_next): 244 | self.t1 = self.r 245 | elif (a_next < d_next) and (a_next < plus_next) and (a_next < minus_next): 246 | self.is_a = 1 247 | self.t1 = a_next 248 | elif (plus_next < minus_next) and (plus_next < d_next): 249 | self.is_plus = 1 250 | self.t1 = plus_next 251 | elif (minus_next < d_next): 252 | self.is_minus = 1 253 | self.t1 = minus_next 254 | else: 255 | self.is_d = 1 256 | self.t1 = d_next 257 | 258 | # utilitites 259 | def cmp_knots (k0, k1): 260 | return cmp(k0[0], k1[0]) or - cmp(k0[1], k1[1]) 261 | -------------------------------------------------------------------------------- /src/test_ps.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import qnet 3 | import qnetu 4 | import numpy 5 | import mytime 6 | import netutils 7 | import estimation 8 | import yaml 9 | import StringIO 10 | 11 | import pwfun 12 | import distributions 13 | import sampling 14 | import arrivals 15 | import qstats 16 | import queues 17 | import test_qnet 18 | 19 | from scipy import integrate 20 | from numpy import random 21 | 22 | import arrivals 23 | 24 | import sys 25 | from math import sqrt 26 | 27 | class TestPS (unittest.TestCase): 28 | 29 | def test_sample_small (self): 30 | sampling.set_seed (2341243) 31 | net = self.twoq 32 | arrv = net.sample (5) 33 | print arrv 34 | arrv.validate() 35 | 36 | def test_sample_validate (self): 37 | sampling.set_seed (2341243) 38 | net = self.twoq 39 | nr = 10 40 | nt = 100 41 | for ri in range(nr): 42 | arrv = net.sample (nt) 43 | arrv.validate() 44 | mu = qstats.mean_service (arrv) 45 | expected = [ 1.0, 0.5, 0.5 ] 46 | for mu0, exp0 in zip(mu,expected): 47 | sd = 1 / (exp0 * sqrt(nt)) 48 | self.assertTrue (abs (mu0 - exp0) < 3*sd, "Mismatch (SD: %.5f)\nTRU %s\nEXP %s" % (sd,mu,expected)) 49 | 50 | def test_read_multif (self): 51 | sampling.set_seed (2341243) 52 | net = self.twoq 53 | nr = 10 54 | nt = 100 55 | for ri in range(nr): 56 | arrv = net.sample (nt) 57 | print "ORIG", arrv 58 | arrv.validate() 59 | qnetu.write_multif_to_prefix ("ps_test_sample_validate", arrv) 60 | arrv2 = qnetu.read_multif_of_prefix ("ps_test_sample_validate", net) 61 | # print "COPY", arrv2 62 | arrv2.validate() 63 | 64 | def test_read_multif2 (self): 65 | sampling.set_seed (2341243) 66 | net = self.twoq 67 | nr = 10 68 | nt = 100 69 | for ri in range(nr): 70 | arrv = net.sample (nt) 71 | arrv.validate() 72 | obs = arrv.subset_by_task (0.5) 73 | # print "ORIG", arrv 74 | qnetu.write_multif_to_prefix ("ps_test_sample_validate2", obs) 75 | arrv2 = qnetu.read_multif_of_prefix ("ps_test_sample_validate2", net) 76 | # print "COPY", arrv2 77 | arrv2.validate() 78 | 79 | def test_initialize (self): 80 | sampling.set_seed (2341243) 81 | net = self.twoq 82 | nr = 10 83 | nt = 100 84 | for ri in range(nr): 85 | arrv = net.sample (nt) 86 | obs = arrv.subset_by_task (0.5) 87 | ini = net.gibbs_initialize (obs) 88 | print "TRUE", arrv 89 | print "INI", ini 90 | ini.validate() 91 | 92 | def test_initialize_for_ps (self): 93 | sampling.set_seed (2341243) 94 | net = self.twoq 95 | nr = 10 96 | nt = 100 97 | for ri in range(nr): 98 | arrv = net.sample (nt) 99 | obs = arrv.subset_by_task (0.5) 100 | ini = qnet.gibbs_initialize_for_ps (net, obs) 101 | ini.validate() 102 | 103 | def test_sem (self): 104 | sampling.set_seed (67826) 105 | # net = self.twoq 106 | net = self.oneq 107 | nr = 1 108 | nt = 50 109 | theta0 = net.parameters[:] 110 | for ri in range(nr): 111 | arrv = net.sample (nt) 112 | obs = arrv.subset_by_task (0.25) 113 | ini = net.gibbs_initialize (obs) 114 | estimation.sem (net, ini, 0, 100) 115 | print "MU ", net.parameters 116 | net.parameters = theta0[:] 117 | print "TRU ", theta0 118 | 119 | def test_bayes (self): 120 | sampling.set_seed (67826) 121 | # net = self.twoq 122 | net = self.oneq 123 | nr = 1 124 | nt = 100 125 | theta0 = net.parameters[:] 126 | 127 | def reporter (net, arrv, iter, lp): 128 | lp_scratch = net.log_prob(arrv) 129 | assert abs(lp - lp_scratch) < 1e-10, \ 130 | "Mismatch LP. Running total %.10f from scratch %.10f" % (lp, lp_scratch) 131 | if 0 == (iter % 10): 132 | f = open ("ps_test_bayes_%d.txt" % iter, "w") 133 | arrv.write_csv(f) 134 | f.close() 135 | 136 | for ri in range(nr): 137 | arrv = net.sample (nt) 138 | obs = arrv.subset_by_task (0.25) 139 | ini = net.gibbs_initialize (obs) 140 | estimation.bayes (net, ini, 100, report_fn=reporter) 141 | print "MU ", net.parameters 142 | net.parameters = theta0[:] 143 | print "TRUE ", theta0 144 | 145 | def test_ps_stationary (self): 146 | nr = 50 147 | nt = 50 148 | net = self.twoq 149 | allmu = numpy.zeros (len(net.parameters)) 150 | allmax = numpy.zeros (len(net.parameters)) 151 | for i in range (nr): 152 | arrv = net.sample (nt) 153 | obs = arrv.subset_by_task (0.0) 154 | net.slice_resample (arrv, 10) 155 | mu = numpy.array (qstats.mean_service (arrv)) 156 | this_max = numpy.array (qstats.max_service (arrv)) 157 | print "MU", mu 158 | allmu += mu 159 | allmax += this_max 160 | avg = allmu / nr 161 | print "AVG", avg 162 | print "TRU", net.parameters 163 | print "MAX", allmax / nr 164 | 165 | def test_ps_likelihood (self): 166 | sampling.set_seed (23134) 167 | net = self.oneq 168 | net.parameters = [ 10.0, 0.1 ] 169 | nt = 10 170 | arrv = net.sample(nt) 171 | tid = 3 172 | evts = arrv.events_of_task (tid) 173 | e1 = evts[1] 174 | e1.obs_d = 0 175 | lp0 = net.log_prob (arrv) 176 | print arrv 177 | print "LP0", lp0 178 | 179 | dexp = distributions.Exponential (net.parameters[1]) 180 | gibbs = qnet.GGkGibbs (net, arrv, e1, lp0) 181 | l = e1.a 182 | u = e1.a + 3.0 183 | diff = gibbs.inner_dfun(l) - dexp.lpdf(0) 184 | for i in range(10): 185 | x = l + 0.1*i*(u-l) 186 | gval = gibbs.inner_dfun(x) 187 | print "%.10f %.10f %.10f %.10f" % (x, gval, gval - diff, dexp.lpdf(x-l)) 188 | 189 | def test_zero_s (self): 190 | sampling.set_seed (23134) 191 | net = self.oneq 192 | 193 | arrv = net.sample (1) 194 | e1 = arrv.event (1) 195 | q1 = e1.queue() 196 | 197 | print net.parameters 198 | print arrv 199 | 200 | e1.d = e1.a 201 | e1.s = 0 202 | lp0 = net.log_prob (arrv) 203 | self.assertAlmostEquals (-1.47313356106, lp0, 5) 204 | 205 | e1.d = e1.a + 1. 206 | e1.s = 1. 207 | dl = q1.pyDiffListForDeparture (e1, e1.a) 208 | lp1 = net.log_prob (arrv) 209 | dlik = q1.likelihoodDelta(arrv, dl) 210 | print arrv 211 | print lp1, dlik 212 | self.assertAlmostEquals (-1.47313356106, lp1 + dlik, 5) 213 | 214 | 215 | def test_likelihood_delta (self): 216 | sampling.set_seed (23134) 217 | net = self.oneq 218 | net.parameters = [ 10.0, 5.0 ] 219 | nt = 10 220 | tid = 3 221 | 222 | arrv = net.sample(nt) 223 | evts = arrv.events_of_task (tid) 224 | e1 = evts[1] 225 | e1.obs_d = 0 226 | q1 = e1.queue() 227 | 228 | lp0 = net.log_prob (arrv) 229 | print arrv 230 | 231 | d0 = e1.d 232 | deltas = [-0.005, 0.0, 0.1, 0.5, 1.0] 233 | for delta in deltas: 234 | d_new = d0 + delta 235 | dl = q1.pyDiffListForDeparture (e1, d_new) 236 | dlik = q1.likelihoodDelta(arrv, dl) 237 | lik_a = lp0 + dlik 238 | 239 | a2 = arrv.duplicate() 240 | dl = q1.pyDiffListForDeparture (e1, d_new) 241 | a2.applyDiffList (dl) 242 | lik_b = net.log_prob (a2) 243 | 244 | print a2 245 | print lik_a, lik_b 246 | 247 | self.assertTrue (abs(lik_b - lik_a) < 1e-5) 248 | 249 | 250 | # mainly for profiling atm 251 | def test_ps_em (self): 252 | nt = 600 253 | niter = 2 254 | net = self.twoq 255 | 256 | arrv = net.sample (nt) 257 | obs = arrv.subset_by_task (0.5) 258 | estimation.sem (net, obs, 0, niter) 259 | 260 | print net.parameters 261 | 262 | def setUp (self): 263 | self.oneq = qnetu.qnet_from_text (oneq_text) 264 | self.twoq = qnetu.qnet_from_text (twoq_text) 265 | 266 | oneq_text = """ 267 | states: 268 | - name: I0 269 | queues: [I0] 270 | successors: [S1] 271 | - name: S1 272 | queues: [Q0] 273 | queues: 274 | - { name: I0, service: [M, 1.0] } 275 | - { name: Q0, service: [M, 0.5], type: PS } 276 | """ 277 | 278 | twoq_text = """ 279 | states: 280 | - name: I0 281 | queues: [I0] 282 | successors: [S1] 283 | - name: S1 284 | queues: [Q1] 285 | successors: [S2] 286 | - name: S2 287 | queues: [Q2] 288 | queues: 289 | - { name: I0, service: [M, 1.0] } 290 | - { name: Q1, service: [M, 0.5], type: PS } 291 | - { name: Q2, service: [M, 0.5], type: PS } 292 | """ 293 | 294 | def main(): 295 | if len(sys.argv) > 1: 296 | for test_name in sys.argv[1:]: 297 | suite = unittest.TestLoader().loadTestsFromName("test_ps.TestPS.%s" % (test_name,)) 298 | unittest.TextTestRunner(verbosity=2).run(suite) 299 | else: 300 | unittest.main() 301 | 302 | if __name__ == "__main__": 303 | main() 304 | 305 | -------------------------------------------------------------------------------- /src/gibbs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Front end for running the Gibbs sampler 3 | 4 | 5 | import getopt, sys 6 | import numpy 7 | import numpy.random 8 | import yaml 9 | 10 | import expt 11 | import mytime 12 | import estimation, qnetu 13 | import sampling 14 | 15 | import qstats 16 | 17 | def usage(): 18 | print """Usage: 19 | gibbs.py 20 | Front-end for running the queueing network Gibbs sampler. 21 | --network (-n) YAML file describing network (required) 22 | --arrivals (-a) YAML file describing arrivals (required) 23 | --multiarr (-A) Three-file prefix describing arrivals (alternative to -a) 24 | --arrvtbl Arrivals file in table format 25 | --burn Number of iterations to burn in (default 100) 26 | --bayes If true, use Bayes instead of StEM 27 | --departures-only If supplied, hold params constant; only sample departure times 28 | --gibbs-iter Number of iterations to sample (default 100) 29 | --output-mu If supplied, write series of average response times to file 30 | --output-arrv If supplied, write all arrivals in YAML to files with given prefix 31 | --arrv-iter Number of iterations at which to output arrivals 32 | --thin Number of iterations to thin in stochastic EM 33 | --statistics List of statistics to report after every iteration. 34 | Example: --stat "mean_wait, mean_qsize_arriving" 35 | --do-init Whether to run initialization (True by default) 36 | Use 0 if you're restarting from a previous sample 37 | --params FILE Read in parameters from file 38 | Available statistics: %s 39 | """ % qstats.all_stats_string() 40 | 41 | def main(): 42 | 43 | nburn = 100 44 | niter = 100 45 | netf = None 46 | arrf = None 47 | multif = None 48 | arrvtbl = None 49 | samplePct = None 50 | outputMu = outputArrv = None 51 | arrvIter = 100 52 | configFile = None 53 | allStats = [] 54 | statsIter = 0 55 | thin = 1 56 | doBayes=0 57 | doInit=1 58 | params=None 59 | departuresOnly=False 60 | sweeten=0 61 | 62 | try: 63 | opts, args = getopt.getopt(sys.argv[1:], "hn:a:A:", ["help", "seed=", "network=", "arrivals=", "gibbs-iter=", "burn=", "output-mu=", "output-arrv=", "config=", "statistics=", "stats=", "thin=", "stats-iter=", "multiarr=", "arrvtbl=", "bayes=", "departures-only", "arrv-iter=", "do-init=", "sweeten=", "params=" ]) 64 | except getopt.GetoptError, err: 65 | # print help information and exit: 66 | print str(err) # will print something like "option -a not recognized" 67 | usage() 68 | sys.exit(2) 69 | output = None 70 | verbose = False 71 | for o, a in opts: 72 | if o == "-v": 73 | verbose = True 74 | elif o in ("-h", "--help"): 75 | usage() 76 | sys.exit() 77 | elif o in ("-n", "--network"): 78 | netf = a 79 | elif o in ("-a", "--arrivals"): 80 | arrf = a 81 | elif o in ("-A", "--multiarr"): 82 | multif = a 83 | elif o == "--arrvtbl": 84 | arrvtbl = a 85 | elif o == "--do-init": 86 | doInit = int(a) 87 | elif o == "--burn": 88 | nburn = int(a) 89 | elif o == "--sweeten": 90 | sweeten = int(a) 91 | elif o == "--gibbs-iter": 92 | niter = int(a) 93 | elif o == "--output-mu": 94 | outputMu = a 95 | elif o == "--output-arrv": 96 | outputArrv = a 97 | elif o == "--arrv-iter": 98 | arrvIter = int(a) 99 | elif o == "--thin": 100 | thin = int(a) 101 | elif o == "--seed": 102 | sampling.set_seed (int(a)) 103 | elif o == "--config": 104 | configFile = a 105 | elif o == "--departures-only": 106 | departuresOnly = True 107 | elif o == "--statistics" or o == "--stats": 108 | if a == "ALL": 109 | allStats = qstats.STATS[:] 110 | else: 111 | statl = a.split (", ") 112 | allStats = [ qstats.__dict__[fn_name] for fn_name in statl ] 113 | elif o == "--stats-iter": 114 | statsIter = int(a) 115 | elif o == "--bayes": 116 | doBayes = int(a) 117 | elif o == "--params": 118 | params = a 119 | else: 120 | assert False, "unhandled option" 121 | 122 | tmr = mytime.timeit() 123 | 124 | if configFile: 125 | f = open(configFile) 126 | qnetu.read_configuration (f) 127 | f.close() 128 | 129 | if netf is None: 130 | print "ERROR: Specify --network" 131 | sys.exit(1) 132 | 133 | f = open(netf) 134 | net = qnetu.qnet_from_text (f) 135 | f.close() 136 | 137 | if params: 138 | f = open(params) 139 | line = f.readlines()[0] 140 | mu = map(float, line.split()) 141 | net.parameters = mu 142 | 143 | if arrf is not None: 144 | f = open(arrf) 145 | arrv = qnetu.load_arrivals (net, f) 146 | f.close() 147 | elif multif is not None: 148 | statef = open ("%sstate.txt" % multif) 149 | af = open ("%sa.txt" % multif) 150 | df = open ("%sd.txt" % multif) 151 | arrv = qnetu.read_multifile_arrv (net, statef, af, df) 152 | statef.close() 153 | af.close() 154 | df.close() 155 | elif arrvtbl is not None: 156 | arrv = qnetu.read_from_table (net, arrvtbl) 157 | # hack 158 | print "WARNING: Hackily resampling mixture components" 159 | for evt in arrv: 160 | evt.queue().resample_auxiliaries(evt) 161 | else: 162 | raise Exception ("Must specify either -a or -A") 163 | 164 | tmr.tick ("Loading arrivals") 165 | # print arrv.dump() 166 | 167 | print net 168 | print "Number of events = ", arrv.num_events () 169 | print "Number hidden events = ", arrv.num_hidden_events() 170 | print "Thin = ", thin 171 | 172 | if doInit: 173 | arrv = net.gibbs_initialize (arrv) 174 | if outputArrv: 175 | inif = open ("initial.txt", 'w') 176 | arrv.write_csv (inif) 177 | inif.write ("\n") 178 | inif.close () 179 | # print "INITIALIZATION" 180 | # print arrv 181 | tmr.tick ("Initialization") 182 | 183 | arrv.validate() 184 | tmr.tick ("Validation") 185 | 186 | train_stats_f = [] 187 | if outputMu: mu_f = open (outputMu, 'w') 188 | for fn in allStats: train_stats_f.append (open ("train_%s.txt" % fn.__name__, "w")) 189 | 190 | def output_train_stats (net, arrv, iter, lp): 191 | if outputMu: 192 | mu_f.write (" ".join (map (str, net.parameters))) 193 | mu_f.write ("\n") 194 | 195 | if outputArrv: 196 | if iter % arrvIter == 0: 197 | arrv_out_f = open ("%s%d.txt" % (outputArrv, iter), 'w') 198 | arrv.write_csv (arrv_out_f) 199 | arrv_out_f.write ("\n") 200 | arrv_out_f.close () 201 | 202 | # output statistics for training arrvials 203 | for i in range(len(allStats)): 204 | f = train_stats_f[i] 205 | fn = allStats[i] 206 | f.write ("%d " % iter) 207 | f.write (qstats.stringify(fn(arrv))) 208 | f.write ("\n") 209 | f.flush () 210 | 211 | if doBayes: 212 | mul,arrvl = estimation.bayes (net, arrv, niter, report_fn=output_train_stats, sweeten=sweeten) 213 | elif departuresOnly: 214 | mul,arrvl = estimation.sample_departures (net, arrv, niter, report_fn=output_train_stats) 215 | else: 216 | mul,arrvl = estimation.sem (net, arrv, nburn, niter, gibbs_iter=thin, report_fn=output_train_stats) 217 | 218 | tmr.tick ("Running EM") 219 | 220 | if outputMu: mu_f.close() 221 | for f in train_stats_f: f.close() 222 | 223 | # output statistics from a fresh sample at the ML soln, if desired 224 | if statsIter > 0: 225 | test_stats_f = [] 226 | for fn in allStats: test_stats_f.append (open ("%s.txt" % fn.__name__, "w")) 227 | 228 | def output_test_stats (net, arrv, iter): 229 | for i in range(len(allStats)): 230 | f = test_stats_f[i] 231 | fn = allStats[i] 232 | f.write (qstats.stringify (fn (arrv))) 233 | f.write ("\n") 234 | 235 | net.parameters = mul[-1] 236 | arrv = arrvl[-1] 237 | arrvl2 = net.gibbs_resample(arrv, 0, statsIter, return_arrv= False, report_fn = output_test_stats) 238 | tmr.tick ("Sampling wait times") 239 | 240 | tmr.tick ("Outputing estimated statistics") 241 | 242 | lp = net.log_prob (arrvl[-1]) 243 | n = numpy.sum (qstats.nobs_by_queue (arrvl[-1])) 244 | print "LOG_PROB ", lp 245 | print "AIC ", 2*len(net.parameters) - 2*lp 246 | print "BIC ", numpy.log(n)*len(net.parameters) -2 * lp 247 | 248 | tmr.total ("Total time") 249 | 250 | if __name__ == "__main__": 251 | main() 252 | 253 | -------------------------------------------------------------------------------- /src/modelmgmt.py: -------------------------------------------------------------------------------- 1 | # Functions for adding and removing queues and states to models, 2 | # keeping an associated Arrivals object up to date 3 | 4 | import qnet 5 | import arrivals 6 | import qnetu 7 | from numpy import linalg 8 | import numpy 9 | import hmm 10 | import estimation 11 | import sys 12 | import mytime 13 | 14 | # TODO: Arrivals conversion (should be easy) 15 | def split_state (net, sname_old, sname_new1, sname_new2, qnames0, qnames1): 16 | """Splits a state in a Qnet. 17 | NET is a qnet object, and ARRV an associated arrivals object. 18 | SNAME_OLD is the state to split, into SNAME_NEW1 and SNAME_NEW2. 19 | (SNAME_OLD, SNAME_NEW1, and SNAME_NEW2 are string state names). 20 | Q0L and Q1L are a partition of the queues that SNAME_OLD 21 | can generate. These are lists of names. 22 | Returns tuple (NEW_NET, FUNC). FUNC is a function 23 | that will convert arrivals to the new NETWORK. 24 | Non-destructive.""" 25 | 26 | q0l = map(lambda n: net.qid_of_queue (net.queue_by_name (n)), qnames0) 27 | q1l = map(lambda n: net.qid_of_queue (net.queue_by_name (n)), qnames1) 28 | 29 | sname_map_new = dict() 30 | 31 | # update the FSM. This is the tricky part 32 | fsm_old = net.fsm 33 | A,O,sid2 = _split_state (fsm_old.a, fsm_old.o, net.sname2id[sname_old], q0l, q1l) 34 | fsm_new = hmm.HMM (A,O) 35 | 36 | sname_map_new = dict(net.sname2id) 37 | sid_old = sname_map_new [sname_old] 38 | del sname_map_new[sname_old] 39 | sname_map_new[sname_new1] = sid_old 40 | sname_map_new[sname_new2] = sid2 41 | 42 | net_new = qnet.Qnet (net.all_queues(), net.all_templates(), net.universe, dict(net.qname2id), sname_map_new, fsm_new) 43 | 44 | converter_fn = gen_arrv_converter (sid_old, sid_old, sid2, q0l, q1l) 45 | 46 | return net_new, converter_fn 47 | 48 | def _split_state (a_old, o_old, state, q0l, q1l): 49 | # use least-squares to find new parameters 50 | # add new state to end 51 | ns = a_old.shape[0] 52 | no = o_old.shape[1] 53 | A = numpy.zeros((ns+1,ns+1)) 54 | O = numpy.zeros((ns+1,no)) 55 | A[0:ns,0:ns] = a_old 56 | O[0:ns,:] = o_old 57 | # fill in p ( * | snew2) 58 | A[ns,:] = A[state,:] 59 | # fill in p (q | snew2) 60 | O[ns,:] = O[state,:] # order of these three lines matters 61 | O[ns,q0l] = 0 62 | w2 = numpy.sum (O[ns,:]) 63 | O[ns,:] = O[ns,:] / w2 64 | # fill in p (q | snew1) 65 | O[state,q1l] = 0 66 | w1 = numpy.sum(O[state,:]) 67 | O[state,:] = O[state,:] / w1 68 | # finally, fill in p( snew1 | *) p ( snew2 | *) 69 | A[:,ns] = A[:,state] 70 | A[:,state] = w1 / (w1+w2) * A[:,state] 71 | A[:,ns] = w2 / (w1++w2) * A[:,ns] 72 | # done 73 | return A,O,ns 74 | 75 | # tricky part is the incoming state reallocate by solving linear system 76 | # (we'll call the coefficients M) 77 | # M = numpy.zeros(( ns + no*ns, 2*(ns+1) )) 78 | # b = numpy.zeros( M.shape[0] ) 79 | # ri = 0 80 | # def idx (s0, s1): return s0 if s1 == state else ns + s0 81 | # for si in range(ns): 82 | # M[ri,idx(si,state)] = M[ri,idx(si,ns)] = 1 83 | # b[ri] = a_old[si,state] 84 | # ri += 1 85 | # # q1 obs constraint 86 | # print M.shape 87 | # for si in range(ns): 88 | # for qi in range(no): 89 | # print "O[state,qi]", O[state,qi] 90 | # M[ri,idx(si,state)] = O[state,qi] 91 | # M[ri,idx(si,ns)] = O[si,qi] 92 | # print ri, "o_old", state, qi, o_old[state,qi] 93 | # print ri, b[ri] 94 | # b[ri] = o_old[state,qi] 95 | # ri += 1 96 | # # solve 97 | # x,resids,rank,s = linalg.lstsq(M,b) 98 | # print "X", x 99 | # print "RESIDS", resids 100 | # A[:,state] = x[0:ns+1] 101 | # A[:,state] = A[:,state] / numpy.sum(A[:,state]) 102 | # A[:,ns] = x[ns+1:] 103 | # A[:,ns] = A[:,ns] / numpy.sum(A[:,ns]) 104 | # # finally 105 | # return A,O,ns 106 | 107 | def gen_arrv_converter (state_old, state1, state2, q0l, q1l): 108 | def converter (arrv): 109 | arrv_new = arrivals.Arrivals (arrv.qnet()) 110 | for evt in arrv: 111 | e_new = evt.duplicate() 112 | if e_new.state == state_old: 113 | if e_new.qid in q0l: 114 | e_new.set_state (state1) 115 | elif e_new.qid in q1l: 116 | e_new.set_state (state2) 117 | else: 118 | raise Exception ("Can't find %s in %s or %s" % evt, q0l, q1l) 119 | arrv_new.insert(e_new) 120 | # below is necessary for things like, events that have 121 | # equal depature times whose orders get flipped. 122 | arrv_new.recompute_all_service() 123 | return arrv_new 124 | return converter 125 | 126 | def add_hidden_queue (net, arrv, sname_from, sname_to, qtext): 127 | # add state 128 | s1 = net.sname2id[sname_from] 129 | s2 = net.num_states() 130 | sname_map_new = dict(net.sname2id) 131 | sname_map_new[sname_to] = s2 132 | 133 | # add queue 134 | q2 = qnetu.queue_from_text (qtext, net.universe) 135 | qname_map_new = dict(net.qname2id) 136 | qname_map_new[q2.name] = len(net.qname2id) 137 | qlist = net.all_queues()[:] 138 | qlist.append (q2) 139 | tmpllist = net.all_templates()[:] 140 | tmpllist.append (q2.service) 141 | 142 | # update the FSM. Only slightly tricky 143 | fsm_old = net.fsm 144 | ns = fsm_old.a.shape[0] 145 | no = fsm_old.o.shape[1] 146 | A = numpy.zeros((ns+1, ns+1)) 147 | O = numpy.zeros((ns+1, no+1)) 148 | A[0:ns,0:ns] = fsm_old.a 149 | A[s2,:] = A[s1,:] # hidden state goes to same places as old state 150 | A[s1,:] = 0; A[s1,s2] = 1 # old state goes only to hidden 151 | O[0:ns,0:no] = fsm_old.o 152 | O[s2,no] = 1 # hidden state must output hidden queue 153 | fsm_new = hmm.HMM (A,O) 154 | 155 | net_new = qnet.Qnet (qlist, tmpllist, net.universe, qname_map_new, sname_map_new, fsm_new) 156 | 157 | # now... update the arrivals 158 | maxeid = arrv.max_eidx()+1 159 | eps = 1e-10 # hack; initialization will pick a better value 160 | arrv_new = arrivals.Arrivals (net_new) 161 | for evt in arrv: 162 | qnew = net_new.queue_by_name (evt.queue().name) # currently unnecessary 163 | if evt.state == s1: 164 | first_evt = arrivals.Event (evt.eid, evt.tid, evt.qid, qnew, evt.a, evt.s, evt.d - eps, evt.state, obs_a=evt.obs_a, obs_d=0, proc=evt.proc, aux=evt.auxiliaries.duplicate()) 165 | arrv_new.insert (first_evt) 166 | # insert hidden state 167 | qid_new = net_new.qid_of_queue(q2) 168 | arrv_new.insert (arrivals.Event (maxeid, evt.tid, qid_new, q2, evt.d - eps, eps, evt.d, s2, obs_a=0, obs_d=evt.obs_d)) 169 | maxeid += 1 170 | else: 171 | arrv_new.insert (arrivals.Event (evt.eid, evt.tid, evt.qid, qnew, evt.a, evt.s, evt.d, evt.state, evt.obs_a, evt.obs_d, evt.proc, evt.auxiliaries.duplicate())) 172 | # recompute services b/c of substracting eps 173 | arrv_new.recompute_all_service() 174 | 175 | # done! 176 | return net_new, arrv_new 177 | 178 | 179 | import pdb 180 | 181 | # Use the above for a unified model selection procedure. 182 | # IDEA: Report 183 | def bottleneck_model_selection (net0, sname, arrv, qtext, outf=sys.stdout, gibbs_iter = 100, cluster_size=2, do_init=True, mdlIx=-1): 184 | tmr = mytime.timeit() 185 | 186 | if cluster_size != 2: raise NotImplementedError() # need to add a choose() function here 187 | arrv0 = arrv.duplicate() 188 | if do_init: 189 | arrv.recompute_all_service() 190 | net0.gibbs_initialize (arrv) 191 | arrv.validate() 192 | sid = net0.sid_by_name (sname) 193 | 194 | ix = 0 195 | 196 | 197 | qs = net0.queues_of_state (sid) 198 | for ix0 in range(len(qs)): 199 | for ix1 in range(ix0+1, len(qs)): 200 | ix += 1 201 | if mdlIx >= 0 and mdlIx != ix: 202 | print "MODEL_SELECTION :: SKIPPING %d (running %d)" % (ix, mdlIx) 203 | continue 204 | 205 | q0 = qs[ix0] 206 | q1 = qs[ix1] 207 | 208 | qlist = map(lambda q: q.name, qs) 209 | qlist.remove(q0.name) 210 | qlist.remove(q1.name) 211 | 212 | print "MODEL_SELECTION :: SPLITTING ", q0.name, q1.name 213 | 214 | sbad = "_%s_ABNORMAL" % sname 215 | sgood = "_%s_OK" % sname 216 | 217 | net1, converter = split_state (net0, sname, sbad, sgood, [q0.name, q1.name], qlist) 218 | print "INTERMEDIATE NET ", net1.as_yaml() 219 | arrv1 = converter(arrv) 220 | arrv1.validate() 221 | 222 | net2, arrv2 = add_hidden_queue (net1, arrv1, sbad, "HIDDEN", qtext) 223 | initial = net2.gibbs_initialize (arrv2) 224 | tmr.tick ("Initialization time [%d %d] " % (ix0, ix1)) 225 | arrv2.validate() 226 | 227 | print "NET ", ix, net2.as_yaml() 228 | 229 | allmu = [] 230 | def reporter (net, arrv, iter, lp): 231 | hdnq = net.queue_by_name ("HIDDEN") 232 | evts = arrv.events_of_queue (hdnq) 233 | allmu.append (numpy.mean ([ e.s for e in evts ])) 234 | 235 | estimation.bayes (net2, initial, gibbs_iter, report_fn = reporter) 236 | 237 | # output statistics 238 | mu_this = numpy.mean(allmu) 239 | std_this = numpy.std (allmu) 240 | print "HDN_Z %s__%s %.15f %.15f %.15f" % (q0.name, q1.name, mu_this, std_this, mu_this/std_this) 241 | 242 | f = open ("mmgmt_%s_%s.txt" % (q0.name, q1.name), "w") 243 | f.write ("\n".join(map(str, allmu))) 244 | f.close () 245 | 246 | 247 | -------------------------------------------------------------------------------- /src/stupidlp.py: -------------------------------------------------------------------------------- 1 | # Simple linear programming wrapper. Not really general 2 | 3 | # Completely stubbing everything out to reduce dependences 4 | # that aren't needed to reproduce paper. July 2017 5 | 6 | class LP: pass 7 | 8 | class QP: pass 9 | 10 | # from cvxopt.base import matrix, spmatrix 11 | # from cvxopt import solvers, lapack 12 | # import mytime 13 | 14 | # solvers.options['maxiters'] = 500 15 | 16 | # import pyglpk 17 | # import os 18 | 19 | # DEBUG = 0 20 | 21 | # class LP: 22 | 23 | # def __init__ (self): 24 | # self.vars = [] 25 | # self.constraints = [] 26 | # self.fname = None 27 | 28 | # def add_vars (self, text): 29 | # self.vars.append (text) 30 | 31 | # def set_objective (self, text): 32 | # self.objective = text 33 | 34 | # def add_constraint (self, text): 35 | # self.constraints.append (text) 36 | 37 | # def solve (self): 38 | # self.fname = os.tempnam (os.getcwd(), "lp-model") 39 | # f = open (self.fname, 'w') 40 | # for var in self.vars: 41 | # f.write (var) 42 | # f.write ("\n") 43 | # f.write (self.objective) 44 | # f.write ("\n") 45 | # for ctr in self.constraints: 46 | # f.write (ctr) 47 | # f.write ("\n") 48 | # f.close() 49 | 50 | # print "Attempting to solve LP: ", self.fname 51 | # return pyglpk.solve_lp (self.fname) 52 | 53 | # def cleanup (self): 54 | # if self.fname: 55 | # os.remove (self.fname) 56 | # self.fname = None 57 | 58 | # class QP: 59 | 60 | # def __init__ (self): 61 | # self.identities = dict() 62 | # self.ctrs_eq = [] 63 | # self.b = [] 64 | # self.ctrs = [] 65 | # self.le_rhs = [] 66 | # self.objsq = [] 67 | # self.objlin = [] 68 | 69 | # def lookup_var (self, vname): 70 | # if vname in self.vars: 71 | # return self.vars[vname] 72 | # else: 73 | # vi = len(self.vars) 74 | # self.vars[vname] = vi 75 | # return vi 76 | 77 | # def add_objsq (self, coeff, var): 78 | # self.objsq.append ( ( coeff, var, var ) ) 79 | 80 | # def add_objsq_cross (self, coeff, v1, v2): 81 | # self.objsq.append ( ( coeff, v1, v2 ) ) 82 | 83 | # def add_objlin (self, coeff, var): 84 | # self.objlin.append ( ( coeff, var ) ) 85 | 86 | # def add_le0 (self, coeff, vars): 87 | # assert len(coeff) == len(vars) 88 | # eqn = ( coeff, vars[:] ) 89 | 90 | # if eqn in self.ctrs: 91 | # raise Exception ("Equation %s already in QP: %s" % (eqn, self.ctrs)) 92 | 93 | # self.ctrs.append (eqn) 94 | # self.le_rhs.append (0.0) 95 | 96 | # def add_var_le (self, var, value): 97 | # self.ctrs.append ( ([1.], [var]) ) 98 | # self.le_rhs.append (value) 99 | 100 | # def add_var_ge (self, var, value): 101 | # self.ctrs.append ( ([-1.], [var]) ) 102 | # self.le_rhs.append (-value) 103 | 104 | # def add_le (self, v1, v2): 105 | # self.ctrs.append ( ([1., -1.], [v1, v2]) ) 106 | # self.le_rhs.append (0) 107 | 108 | # def add_eq (self, coeff, vars, rhs): 109 | # assert len(coeff) == len(vars) 110 | # self.ctrs_eq.append (( coeff, vars[:] )) 111 | # self.b.append (rhs) 112 | 113 | # def add_identity (self, var, value): 114 | # self.identities[var] = value 115 | 116 | # def collapse_identities (self): 117 | # self.collapse_identities_for (self.ctrs, self.le_rhs) 118 | # self.collapse_identities_for (self.ctrs_eq, self.b) 119 | 120 | # lin_new = dict() 121 | # sqnew = [] 122 | # for c,v in self.objlin: 123 | # if not v in self.identities: 124 | # lin_new[v] = c 125 | 126 | # for c, v1, v2 in self.objsq: 127 | # if (v1 in self.identities) and (v2 in self.identities): 128 | # # drop it 129 | # pass 130 | # elif (v1 in self.identities): 131 | # c_old = lin_new.get (v2, 0) 132 | # lin_new[v2] = c_old + c 133 | # elif (v2 in self.identities): 134 | # c_old = lin_new.get (v1, 0) 135 | # lin_new[v1] = c_old + c 136 | # else: 137 | # # neither fixed 138 | # sqnew.append ( (c,v1,v2) ) 139 | 140 | # self.objsq = sqnew 141 | # self.objlin = [ (c,v) for v,c in lin_new.iteritems() ] 142 | 143 | # print self 144 | 145 | # def collapse_identities_for (self, all_lhs, all_rhs): 146 | # for i in xrange(len(all_lhs)): 147 | # w,lhs = all_lhs[i] 148 | # for j in xrange(len(lhs)): 149 | # vi = lhs[j] 150 | # if vi in self.identities: 151 | # lhs[j] = None 152 | # all_rhs[i] -= self.identities[vi] 153 | # j = 0 154 | # while j < len(all_lhs): 155 | # w,lhs = all_lhs[j] 156 | # if all(map(lambda x: x is None, lhs)): 157 | # del all_lhs[j] 158 | # del all_rhs[j] 159 | # j = j + 1 160 | 161 | # def construct_var_map (self): 162 | # self.vars = dict() 163 | # for w,lhs in self.ctrs: 164 | # for v in lhs: 165 | # self.lookup_var (v) 166 | # for w,lhs in self.ctrs_eq: 167 | # for v in lhs: 168 | # self.lookup_var (v) 169 | # for w,v1,v2 in self.objsq: 170 | # self.lookup_var(v1) 171 | # self.lookup_var(v2) 172 | # for w,v1 in self.objlin: 173 | # self.lookup_var(v1) 174 | 175 | # def construct_problem (self): 176 | # self.collapse_identities() 177 | # self.construct_var_map() 178 | 179 | # nv = len(self.vars) 180 | # neq = len(self.ctrs_eq) 181 | # n_ineq = len(self.ctrs) 182 | 183 | # print "QP: Num variables = %d Equality constraints = %d Inequality constraints %d" % (nv, neq, n_ineq) 184 | 185 | # # build P 186 | # x = [] 187 | # I = [] 188 | # J = [] 189 | # for c,v1,v2 in self.objsq: 190 | # idx1 = self.vars[v1] 191 | # idx2 = self.vars[v2] 192 | # idx_min = min(idx1,idx2) 193 | # idx_max = max(idx1,idx2) 194 | # x.append (c) 195 | # I.append (idx_max) 196 | # J.append (idx_min) 197 | # P = spmatrix(x, I, J, (nv, nv)) 198 | 199 | # q = matrix(0.0, (nv,1)) 200 | # for c,v in self.objlin: 201 | # idx = self.vars[v] 202 | # q[idx] = c 203 | 204 | # # build A (inequality constraints) 205 | # G = self.matrix_from_hash (self.ctrs, n_ineq, nv) 206 | # A = self.matrix_from_hash (self.ctrs_eq, neq, nv) 207 | 208 | # b = matrix (0.0, (A.size[0],1)) 209 | # for i in xrange(len(self.b)): 210 | # b[i] = self.b[i] 211 | 212 | # h = matrix (0.0, (G.size[0],1)) 213 | # for i in xrange(len(self.le_rhs)): 214 | # h[i] = self.le_rhs[i] 215 | 216 | # if DEBUG: 217 | # # S = matrix(0.0, (min(A.size),1)) 218 | # # lapack.gesvd(A,S) 219 | # # r = 0 220 | # # for i in S: 221 | # # if abs(i) > 1e-10: 222 | # # r = r+1 223 | # # print "Rank(A) approx ",r 224 | 225 | # print "P: ", P 226 | # print "q: ", q 227 | # print "A: ", A 228 | # print "b: ", b 229 | # print "G: ", G 230 | 231 | # # A = matrix(0.0, (0,nv)) 232 | # # G = matrix(0.0, (0,nv)) 233 | # # b = matrix(0.0, (0,1)) 234 | 235 | # return P,q,A,b,G,h 236 | 237 | # def __repr__ (self): 238 | # s = "min " 239 | # for c,n1,n2 in self.objsq: 240 | # s = s + " + (%.10f * %s * %s)" % (c, n1, n2) 241 | # for c,n1 in self.objlin: 242 | # s = s + " + (%.10f * %s)" % (c, n1) 243 | # s = s + "\nsubject to\n " 244 | # s = s + "%d\n " % len(self.ctrs) 245 | # for ((c_list,v_list), rhs) in zip(self.ctrs, self.le_rhs): 246 | # for i in xrange(len(c_list)): 247 | # v = v_list[i] 248 | # c = c_list[i] 249 | # s = s + " + (%.10f * %s)" % (c,v) 250 | # s = s + " <= %s\n " % rhs 251 | # ri = 0 252 | # for c_list,v_list in self.ctrs_eq: 253 | # for i in xrange(len(c_list)): 254 | # if v_list[i]: 255 | # v = v_list[i] 256 | # c = c_list[i] 257 | # s = s + " + (%.10f * %s)" % (c,v) 258 | # b = self.b[ri] 259 | # s = s + " == %.10f" % b 260 | # s = s + "\n " 261 | # ri = ri + 1 262 | # return s 263 | 264 | # def unpack_soln (self, vec): 265 | # soln = dict() 266 | # for name,vi in self.vars.iteritems(): 267 | # soln[name] = vec[vi] 268 | # return soln 269 | 270 | # def solve (self): 271 | # tmr = mytime.timeit() 272 | 273 | # P,q,A,b,G,h = self.construct_problem() 274 | # tmr.tick ("QP: Constructing problem") 275 | 276 | # soln = solvers.coneqp(P, q, G, h, None, A, b, None, kktsolver="chol2") 277 | # tmr.tick ("QP: solving problem") 278 | 279 | # if soln['status'] != 'optimal': 280 | # print "Error in QP: ", soln['status'] 281 | # return self.unpack_soln (soln['x']) 282 | 283 | 284 | # def matrix_from_hash (self, hash, nr, nc): 285 | # x = [] 286 | # I = [] 287 | # J = [] 288 | 289 | # ri = 0 290 | # for c_list,v_list in hash: 291 | # for i in xrange(len(c_list)): 292 | # vname = v_list[i] 293 | # if vname: 294 | # vidx = self.vars [vname] 295 | # c = c_list[i] 296 | # print "(%d, %s, %s)" % (ri, vidx, c) 297 | # I.append (ri) 298 | # J.append (vidx) 299 | # x.append (c) 300 | # ri = ri + 1 301 | 302 | # print "I:", I 303 | # print "J:", J 304 | # print (nr,nc) 305 | # A = spmatrix (x, I, J, (nr,nc)) 306 | # return A 307 | 308 | -------------------------------------------------------------------------------- /src/randomkit.c: -------------------------------------------------------------------------------- 1 | /* Random kit 1.3 */ 2 | 3 | /* 4 | * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) 5 | * 6 | * The rk_random and rk_seed functions algorithms and the original design of 7 | * the Mersenne Twister RNG: 8 | * 9 | * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, 10 | * All rights reserved. 11 | * 12 | * Redistribution and use in source and binary forms, with or without 13 | * modification, are permitted provided that the following conditions 14 | * are met: 15 | * 16 | * 1. Redistributions of source code must retain the above copyright 17 | * notice, this list of conditions and the following disclaimer. 18 | * 19 | * 2. Redistributions in binary form must reproduce the above copyright 20 | * notice, this list of conditions and the following disclaimer in the 21 | * documentation and/or other materials provided with the distribution. 22 | * 23 | * 3. The names of its contributors may not be used to endorse or promote 24 | * products derived from this software without specific prior written 25 | * permission. 26 | * 27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 | * 39 | * Original algorithm for the implementation of rk_interval function from 40 | * Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by 41 | * Magnus Jonsson. 42 | * 43 | * Constants used in the rk_double implementation by Isaku Wada. 44 | * 45 | * Permission is hereby granted, free of charge, to any person obtaining a 46 | * copy of this software and associated documentation files (the 47 | * "Software"), to deal in the Software without restriction, including 48 | * without limitation the rights to use, copy, modify, merge, publish, 49 | * distribute, sublicense, and/or sell copies of the Software, and to 50 | * permit persons to whom the Software is furnished to do so, subject to 51 | * the following conditions: 52 | * 53 | * The above copyright notice and this permission notice shall be included 54 | * in all copies or substantial portions of the Software. 55 | * 56 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 57 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 58 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 59 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 60 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 61 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 62 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 63 | */ 64 | 65 | /* static char const rcsid[] = 66 | "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */ 67 | 68 | #include 69 | #include 70 | #include 71 | #include 72 | #include 73 | #include 74 | #include 75 | 76 | #ifdef _WIN32 77 | /* Windows */ 78 | #include 79 | #ifndef RK_NO_WINCRYPT 80 | /* Windows crypto */ 81 | #ifndef _WIN32_WINNT 82 | #define _WIN32_WINNT 0x0400 83 | #endif 84 | #include 85 | #include 86 | #endif 87 | #else 88 | /* Unix */ 89 | #include 90 | #include 91 | #endif 92 | 93 | #include "randomkit.h" 94 | 95 | #ifndef RK_DEV_URANDOM 96 | #define RK_DEV_URANDOM "/dev/urandom" 97 | #endif 98 | 99 | #ifndef RK_DEV_RANDOM 100 | #define RK_DEV_RANDOM "/dev/random" 101 | #endif 102 | 103 | char *rk_strerror[RK_ERR_MAX] = 104 | { 105 | "no error", 106 | "random device unvavailable" 107 | }; 108 | 109 | /* static functions */ 110 | static unsigned long rk_hash(unsigned long key); 111 | 112 | void rk_seed(unsigned long seed, rk_state *state) 113 | { 114 | int pos; 115 | seed &= 0xffffffffUL; 116 | 117 | /* Knuth's PRNG as used in the Mersenne Twister reference implementation */ 118 | for (pos=0; poskey[pos] = seed; 121 | seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL; 122 | } 123 | 124 | state->pos = RK_STATE_LEN; 125 | state->has_gauss = 0; 126 | state->has_binomial = 0; 127 | } 128 | 129 | /* Thomas Wang 32 bits integer hash function */ 130 | unsigned long rk_hash(unsigned long key) 131 | { 132 | key += ~(key << 15); 133 | key ^= (key >> 10); 134 | key += (key << 3); 135 | key ^= (key >> 6); 136 | key += ~(key << 11); 137 | key ^= (key >> 16); 138 | return key; 139 | } 140 | 141 | rk_error rk_randomseed(rk_state *state) 142 | { 143 | #ifndef _WIN32 144 | struct timeval tv; 145 | #else 146 | struct _timeb tv; 147 | #endif 148 | int i; 149 | 150 | if(rk_devfill(state->key, sizeof(state->key), 0) == RK_NOERR) 151 | { 152 | state->key[0] |= 0x80000000UL; /* ensures non-zero key */ 153 | state->pos = RK_STATE_LEN; 154 | state->has_gauss = 0; 155 | state->has_binomial = 0; 156 | 157 | for (i=0; i<624; i++) 158 | { 159 | state->key[i] &= 0xffffffffUL; 160 | } 161 | 162 | return RK_NOERR; 163 | } 164 | 165 | #ifndef _WIN32 166 | gettimeofday(&tv, NULL); 167 | rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec) 168 | ^ rk_hash(clock()), state); 169 | #else 170 | _ftime(&tv); 171 | rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state); 172 | #endif 173 | 174 | return RK_ENODEV; 175 | } 176 | 177 | /* Magic Mersenne Twister constants */ 178 | #define N 624 179 | #define M 397 180 | #define MATRIX_A 0x9908b0dfUL 181 | #define UPPER_MASK 0x80000000UL 182 | #define LOWER_MASK 0x7fffffffUL 183 | 184 | /* Slightly optimised reference implementation of the Mersenne Twister */ 185 | unsigned long rk_random(rk_state *state) 186 | { 187 | unsigned long y; 188 | 189 | if (state->pos == RK_STATE_LEN) 190 | { 191 | int i; 192 | 193 | for (i=0;ikey[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK); 196 | state->key[i] = state->key[i+M] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); 197 | } 198 | for (;ikey[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK); 201 | state->key[i] = state->key[i+(M-N)] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); 202 | } 203 | y = (state->key[N-1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); 204 | state->key[N-1] = state->key[M-1] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); 205 | 206 | state->pos = 0; 207 | } 208 | 209 | y = state->key[state->pos++]; 210 | 211 | /* Tempering */ 212 | y ^= (y >> 11); 213 | y ^= (y << 7) & 0x9d2c5680UL; 214 | y ^= (y << 15) & 0xefc60000UL; 215 | y ^= (y >> 18); 216 | 217 | return y; 218 | } 219 | 220 | long rk_long(rk_state *state) 221 | { 222 | return rk_ulong(state) >> 1; 223 | } 224 | 225 | unsigned long rk_ulong(rk_state *state) 226 | { 227 | #if ULONG_MAX <= 0xffffffffUL 228 | return rk_random(state); 229 | #else 230 | return (rk_random(state) << 32) | (rk_random(state)); 231 | #endif 232 | } 233 | 234 | unsigned long rk_interval(unsigned long max, rk_state *state) 235 | { 236 | unsigned long mask = max, value; 237 | 238 | if (max == 0) return 0; 239 | 240 | /* Smallest bit mask >= max */ 241 | mask |= mask >> 1; 242 | mask |= mask >> 2; 243 | mask |= mask >> 4; 244 | mask |= mask >> 8; 245 | mask |= mask >> 16; 246 | #if ULONG_MAX > 0xffffffffUL 247 | mask |= mask >> 32; 248 | #endif 249 | 250 | /* Search a random value in [0..mask] <= max */ 251 | #if ULONG_MAX > 0xffffffffUL 252 | if (max <= 0xffffffffUL) { 253 | while ((value = (rk_random(state) & mask)) > max); 254 | } else { 255 | while ((value = (rk_ulong(state) & mask)) > max); 256 | } 257 | #else 258 | while ((value = (rk_ulong(state) & mask)) > max); 259 | #endif 260 | 261 | return value; 262 | } 263 | 264 | double rk_double(rk_state *state) 265 | { 266 | /* shifts : 67108864 = 0x4000000, 9007199254740992 = 0x20000000000000 */ 267 | long a = rk_random(state) >> 5, b = rk_random(state) >> 6; 268 | return (a * 67108864.0 + b) / 9007199254740992.0; 269 | } 270 | 271 | void rk_fill(void *buffer, size_t size, rk_state *state) 272 | { 273 | unsigned long r; 274 | unsigned char *buf = buffer; 275 | 276 | for (; size >= 4; size -= 4) 277 | { 278 | r = rk_random(state); 279 | *(buf++) = r & 0xFF; 280 | *(buf++) = (r >> 8) & 0xFF; 281 | *(buf++) = (r >> 16) & 0xFF; 282 | *(buf++) = (r >> 24) & 0xFF; 283 | } 284 | 285 | if (!size) return; 286 | 287 | r = rk_random(state); 288 | 289 | for (; size; r >>= 8, size --) 290 | *(buf++) = (unsigned char)(r & 0xFF); 291 | } 292 | 293 | rk_error rk_devfill(void *buffer, size_t size, int strong) 294 | { 295 | #ifndef _WIN32 296 | FILE *rfile; 297 | int done; 298 | 299 | if (strong) 300 | rfile = fopen(RK_DEV_RANDOM, "rb"); 301 | else 302 | rfile = fopen(RK_DEV_URANDOM, "rb"); 303 | if (rfile == NULL) 304 | return RK_ENODEV; 305 | done = fread(buffer, size, 1, rfile); 306 | fclose(rfile); 307 | if (done) 308 | return RK_NOERR; 309 | #else 310 | 311 | #ifndef RK_NO_WINCRYPT 312 | HCRYPTPROV hCryptProv; 313 | BOOL done; 314 | 315 | if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 316 | CRYPT_VERIFYCONTEXT) || !hCryptProv) 317 | return RK_ENODEV; 318 | done = CryptGenRandom(hCryptProv, size, (unsigned char *)buffer); 319 | CryptReleaseContext(hCryptProv, 0); 320 | if (done) 321 | return RK_NOERR; 322 | #endif 323 | 324 | #endif 325 | 326 | return RK_ENODEV; 327 | } 328 | 329 | rk_error rk_altfill(void *buffer, size_t size, int strong, rk_state *state) 330 | { 331 | rk_error err; 332 | 333 | err = rk_devfill(buffer, size, strong); 334 | if (err) 335 | rk_fill(buffer, size, state); 336 | 337 | return err; 338 | } 339 | 340 | double rk_gauss(rk_state *state) 341 | { 342 | if (state->has_gauss) 343 | { 344 | state->has_gauss = 0; 345 | return state->gauss; 346 | } 347 | else 348 | { 349 | double f, x1, x2, r2; 350 | do 351 | { 352 | x1 = 2.0*rk_double(state) - 1.0; 353 | x2 = 2.0*rk_double(state) - 1.0; 354 | r2 = x1*x1 + x2*x2; 355 | } 356 | while (r2 >= 1.0 || r2 == 0.0); 357 | 358 | f = sqrt(-2.0*log(r2)/r2); /* Box-Muller transform */ 359 | state->has_gauss = 1; 360 | state->gauss = f*x1; /* Keep for next call */ 361 | return f*x2; 362 | } 363 | } 364 | 365 | 366 | --------------------------------------------------------------------------------