├── .gitignore ├── LICENSE ├── README.md ├── conf ├── ex1.dot ├── ex2.dot ├── ex_expo1.dot ├── ex_pareto.dot ├── ex_simple.dot ├── fsconfgen.py ├── openflow1.dot ├── openflow_small_cbr.dot ├── openflow_small_harpoon.dot ├── simple_speed.dot ├── simple_speed.json ├── test.dot ├── testconf1.json └── testconf2.json ├── flowexport ├── __init__.py ├── cflow.py ├── cflowdexport.py ├── flowexporter.py ├── ipfix.py ├── ipfixexport.py ├── netflow9.py ├── nullexport.py └── textexport.py ├── fs.py ├── fslib ├── __init__.py ├── common.py ├── configurator.py ├── flowlet.py ├── link.py ├── node.py ├── openflow │ ├── __init__.py │ ├── ofmessage_v1.py │ ├── ofnode_v1.py │ └── pox_bridge.py ├── traffic.py └── util.py ├── requirements.txt ├── script ├── a_counters.txt ├── b_counters.txt ├── b_flow.txt ├── clean.sh ├── convert.py ├── profiler.py ├── run.sh ├── runspeed.sh ├── runtests.sh ├── speed_out.txt └── speedcmp.py ├── spec ├── configurator_spec.py ├── flowlet_spec.py ├── fs_spec.py ├── ofswitch_spec.py ├── spec_base.py └── traffic_spec.py ├── tcpmodels ├── __init__.py ├── csa00.py ├── mathis.py └── msmo97.py └── traffic_generators ├── __init__.py ├── harpoon.py ├── rawflow.py ├── simple.py ├── subtractive.py └── trafgen.py /.gitignore: -------------------------------------------------------------------------------- 1 | tmp 2 | *.pyc 3 | *.pyo 4 | .DS_Store 5 | env.sh 6 | pox 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc. 5 | 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Library General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License 307 | along with this program; if not, write to the Free Software 308 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 309 | 310 | 311 | Also add information on how to contact you by electronic and paper mail. 312 | 313 | If the program is interactive, make it output a short notice like this 314 | when it starts in an interactive mode: 315 | 316 | Gnomovision version 69, Copyright (C) year name of author 317 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 318 | This is free software, and you are welcome to redistribute it 319 | under certain conditions; type `show c' for details. 320 | 321 | The hypothetical commands `show w' and `show c' should show the appropriate 322 | parts of the General Public License. Of course, the commands you use may 323 | be called something other than `show w' and `show c'; they could even be 324 | mouse-clicks or menu items--whatever suits your program. 325 | 326 | You should also get your employer (if you work as a programmer) or your 327 | school, if any, to sign a "copyright disclaimer" for the program, if 328 | necessary. Here is a sample; alter the names: 329 | 330 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 331 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 332 | 333 | , 1 April 1989 334 | Ty Coon, President of Vice 335 | 336 | This General Public License does not permit incorporating your program into 337 | proprietary programs. If your program is a subroutine library, you may 338 | consider it more useful to permit linking proprietary applications with the 339 | library. If this is what you want to do, use the GNU Library General 340 | Public License instead of this License. 341 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FS: a network flow record generator 2 | 3 | FS is a network flow record generator. It contains a discrete event 4 | simulation core to generate the flow records, and relies on existing 5 | TCP throughput models to drive the simulation. 6 | 7 | FS is made available under terms of the GPLv2. 8 | 9 | Note: the originally released version of fs as described in an 10 | INFOCOM '11 research paper (see http://dx.doi.org/10.1109/INFCOM.2011.5935055) 11 | is on branch fs-orig. The master branch is completely revamped 12 | and includes the fs-sdn code as described in a HotSDN '13 paper (http://dl.acm.org/citation.cfm?id=2491202). 13 | 14 | ## Running fs 15 | 16 | FS is implemented in Python and has a few external module dependencies: 17 | 18 | To use fs, you need the following Python packages: 19 | * ipaddr 20 | * networkx 21 | * pydot 22 | * pyparsing 23 | * pytricia (py-radix is no longer supported) 24 | 25 | To install all the above, see the requirements.txt file here and use pip: 26 | 27 | $ pip install -r requirements.txt 28 | 29 | I'd recommend using virtualenv, then installing the packages inside 30 | the venv. See http://pypi.python.org/pypi/virtualenv. 31 | 32 | fs runs fastest using pypy (http://pypy.org) but also works well under the 33 | standard CPython implementation. 34 | 35 | ## Examples 36 | 37 | There are a number of example configuration files in the `conf/` directory. To run a couple of the example configuration files for 600 simulated seconds, you might do something like: 38 | 39 | $ python -OO fs.py -t 600 conf/ex1.dot 40 | $ python -OO fs.py -t 600 conf/testconf1.json 41 | 42 | `fs` supports a DOT configuration file syntax as well as a (basically equivalent) JSON syntax. For now, config file syntax is undocumented; take a look at the examples and our 2011 INFOCOM paper: http://dx.doi.org/10.1109/INFCOM.2011.5935055 43 | 44 | To use the OpenFlow extensions (aka fs-sdn), you'll need to clone the POX git repository and point your PYTHONPATH to it. `fs` currently is only tested with the betta branch of POX. Once you've done those things, there are two example configurations in the `conf` folder that should work out-of-the-box: 45 | 46 | $ git clone git://github.com/noxrepo/pox.git ../pox 47 | $ export PYTHONPATH=`pwd`/../pox 48 | $ python -OO fs.py -t 60 conf/openflow_small_cbr.dot 49 | $ python -OO fs.py -t 60 conf/openflow_small_harpoon.dot 50 | 51 | The first few lines of output from fs when running one of the above example (OpenFlow) configurations should be: 52 | 53 | 0.0000 fslib.config INFO Reading config for graph test. 54 | 0.0000 fslib.config INFO Running measurements on these nodes: 55 | POX 0.1.0 (betta) / Copyright 2011-2013 James McCauley, et al. 56 | 0.0000 fs INFO Monkeypatching POX for integration with fs 57 | 0.0000 core INFO POX 0.1.0 (betta) is up. 58 | 0.0000 fs.core INFO simulation completion: 0.00 59 | 0.0600 openflow.of_01 INFO [00-02-e4-0d-b1-e0 1] connected 60 | 0.0600 openflow.of_01 INFO [00-02-f3-4f-f4-e2 2] connected 61 | 0.0600 openflow.of_01 INFO [00-02-eb-ae-d3-63 3] connected 62 | 63 | 64 | ## Acknowledgments 65 | 66 | This software is based up on work supported by the National Science Foundation under Grant No. CNS-1054985. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation. 67 | 68 | ---------- 69 | 70 | Copyright 2011-2013 Joel Sommers. All rights reserved. 71 | 72 | This file is part of fs, a network flow record generation tool. 73 | 74 | fs is free software; you can redistribute it and/or modify 75 | it under the terms of the GNU General Public License as published by 76 | the Free Software Foundation; either version 2 of the License, or 77 | (at your option) any later version. 78 | 79 | fs is distributed in the hope that it will be useful, 80 | but WITHOUT ANY WARRANTY; without even the implied warranty of 81 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 82 | GNU General Public License for more details. 83 | 84 | You should have received a copy of the GNU General Public License 85 | along with fs; if not, write to the Free Software 86 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 87 | 88 | -------------------------------------------------------------------------------- /conf/ex1.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | // 3 nodes: a, b, and c 3 | flowexport=text 4 | 5 | a [ 6 | autoack="False" 7 | ipdests="10.1.0.0/16 10.10.0.0/16 10.128.0.0/9" 8 | traffic="m1" 9 | 10 | // basic harpoon setup 11 | // build up and withdrawal of harpoon sources; 10 sources for duration 12 | // of 60 sec, followed by 20 sources for 60 sec, etc. 13 | // m1="modulator start=0.0 generator=s1 profile=((60,),(10,20,30,30,20,10))" 14 | 15 | m1="modulator start=0.0 generator=s1 profile=((3600,),(1,))" 16 | s1="harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=pareto(10000,1.2) flowstart=exponential(100.0) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 17 | ]; 18 | 19 | b [ 20 | autoack="False" 21 | ipdests="10.2.0.0/16" 22 | ]; 23 | 24 | c [ 25 | autoack="False" 26 | ipdests="10.3.0.0/16 10.4.0.0/16 10.0.0.0/8" 27 | ]; 28 | 29 | // links 30 | a -- b [weight=10, capacity=100000000, delay=0.043]; 31 | b -- c [weight=10, capacity=100000000, delay=0.031]; 32 | a -- c [weight=30, capacity=100000000, delay=0.123]; 33 | } 34 | -------------------------------------------------------------------------------- /conf/ex2.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | // 3 nodes: a, b, and c 3 | flowexport=text 4 | 5 | a [ 6 | autoack="False" 7 | ipdests="10.1.0.0/16" 8 | traffic="m1" 9 | m1="modulator start=0.0 generator=s1 profile=((600,),(1,))" 10 | s1="harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=randomchoice(10000) flowstart=randomchoice(10) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 11 | ]; 12 | 13 | b [ 14 | autoack="False" 15 | ipdests="10.2.0.0/16" 16 | ]; 17 | 18 | c [ 19 | autoack="False" 20 | ipdests="10.3.0.0/16 10.4.0.0/16 10.0.0.0/8" 21 | ]; 22 | 23 | // links 24 | a -- b [weight=10, capacity=100000000, delay=0.043]; 25 | b -- c [weight=10, capacity=100000000, delay=0.031]; 26 | a -- c [weight=30, capacity=100000000, delay=0.123]; 27 | } 28 | -------------------------------------------------------------------------------- /conf/ex_expo1.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | // 3 nodes: a, b, and c 3 | flowexport=text 4 | usewallclock=True 5 | counterexport=True 6 | counterexportinterval=1 7 | counterexportfile=counters 8 | pktsampling=1.0 9 | flowsampling=1.0 10 | maintenance_cycle=60.0 11 | longflowtmo=-1 12 | flowinactivetmo=-1 13 | 14 | a [ 15 | autoack="False" 16 | ipdests="10.1.0.0/16" 17 | traffic="m1" 18 | m1="modulator start=0.0 generator=s1 profile=((3600,),(1,))" 19 | s1="harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 20 | ]; 21 | 22 | b [ 23 | autoack="False" 24 | ipdests="10.2.0.0/16" 25 | ]; 26 | 27 | c [ 28 | autoack="False" 29 | ipdests="10.3.0.0/16 10.4.0.0/16 10.0.0.0/8" 30 | ]; 31 | 32 | // links 33 | a -- b [weight=10, capacity=100000000, delay=0.043]; 34 | b -- c [weight=10, capacity=100000000, delay=0.031]; 35 | a -- c [weight=30, capacity=100000000, delay=0.123]; 36 | } 37 | -------------------------------------------------------------------------------- /conf/ex_pareto.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | // 3 nodes: a, b, and c 3 | flowexport=text 4 | 5 | a [ 6 | autoack="False" 7 | ipdests="10.1.0.0/16" 8 | traffic="m1" 9 | m1="modulator start=0.0 generator=s1 profile=((3600,),(1,))" 10 | s1="harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=pareto(10000.0,1.2) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 11 | ]; 12 | 13 | b [ 14 | autoack="False" 15 | ipdests="10.2.0.0/16" 16 | ]; 17 | 18 | c [ 19 | autoack="False" 20 | ipdests="10.3.0.0/16 10.4.0.0/16 10.0.0.0/8" 21 | ]; 22 | 23 | // links 24 | a -- b [weight=10, capacity=100000000, delay=0.043]; 25 | b -- c [weight=10, capacity=100000000, delay=0.031]; 26 | a -- c [weight=30, capacity=100000000, delay=0.123]; 27 | } 28 | -------------------------------------------------------------------------------- /conf/ex_simple.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | // 3 nodes: a, b, and c 3 | flowexport=text 4 | counterexportfile="counters" 5 | flowsampling=1.0 6 | pktsampling=1.0 7 | exportcycle=60 8 | counterexport=True 9 | counterexportinterval=1 10 | longflowtmo=60 11 | flowinactivetmo=60 12 | 13 | a [ 14 | autoack="False" 15 | ipdests="10.1.0.0/16" 16 | traffic="m1" 17 | m1="modulator start=0.0 generator=s1 profile=((3600,),(1,))" 18 | s1="harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=randomchoice(10000) flowstart=randomchoice(1) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 19 | ]; 20 | 21 | b [ 22 | autoack="False" 23 | ipdests="10.2.0.0/16" 24 | ]; 25 | 26 | c [ 27 | autoack="False" 28 | ipdests="10.3.0.0/16 10.4.0.0/16 10.0.0.0/8" 29 | ]; 30 | 31 | // links 32 | a -- b [weight=10, capacity=100000000, delay=0.043]; 33 | b -- c [weight=10, capacity=100000000, delay=0.031]; 34 | a -- c [weight=30, capacity=100000000, delay=0.123]; 35 | } 36 | -------------------------------------------------------------------------------- /conf/fsconfgen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | import sys 6 | from optparse import OptionParser 7 | import string 8 | import json 9 | 10 | def add_flow_measurement(cfgdict, nodelist, flowtype="text_export"): 11 | '''Add flow measurement to config for nodes identified in nodelist''' 12 | strlist = " ".join(nodelist) 13 | cfgdict["graph"]["graph"]["measurementnodes"] = strlist 14 | cfgdict["graph"]["graph"]["flowexportfn"] = flowtype + "_factory" 15 | 16 | 17 | def base_config(name, ): 18 | '''Make a base config dictionary for eventual export to JSON''' 19 | cfgdict = { 20 | "directed": False, 21 | "graph": [ 22 | ["node", {}], 23 | ["graph", { 24 | "flowexportfn": "null_export_factory", 25 | "measurementnodes": "", 26 | "flowsampling": 1.0, 27 | "counterexportinterval": 1, 28 | "pktsampling": 1.0, 29 | "longflowtmo": 60, 30 | "exportcycle": 60, 31 | "counterexport": True, 32 | "counterexportfile": "counters", 33 | "flowinactivetmo": 60 34 | }], 35 | ["edge", {}], 36 | ["name", name], 37 | ], 38 | "nodes": [ 39 | ], 40 | "links": [ 41 | ], 42 | "multigraph": True 43 | } 44 | return cfgdict 45 | 46 | def get_graphdict(cfg): 47 | '''Retrieve 'graph' dictionary from configuration''' 48 | for name,xdict in cfg['graph']: 49 | if name == 'graph': 50 | return xdict 51 | return None 52 | 53 | def make_node(cfgdict, dst=-1, addtraffic=False): 54 | nodename,nodeindex = gen_nodename(cfgdict) 55 | srcprefix = "10.{}.{}.0/24".format(nodeindex/256, nodeindex%256) 56 | dstindex = nodeindex+1 57 | if dst >= 0: 58 | dstindex = dst 59 | dstprefix = "10.{}.{}.0/24".format(dstindex/256, dstindex%256) 60 | 61 | nodedict = { 62 | "ipdests": srcprefix, 63 | "id": nodename, "autoack": False 64 | } 65 | 66 | if addtraffic: 67 | harpoon = "flowsize=pareto(10000.0,1.2) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 68 | gdict = get_graphdict(cfgdict) 69 | gdict['commonharpoon'] = harpoon 70 | 71 | trafficcfg = "harpoon ipsrc={} ipdst={} $commonharpoon".format(srcprefix, dstprefix) 72 | nodedict["traffic"] = "modulate" 73 | nodedict["modulate"] = "modulator start=0.0 generator=tcfg profile=((3600,),(1,))" 74 | nodedict["tcfg"] = trafficcfg 75 | 76 | cfgdict['nodes'].append(nodedict) 77 | return nodename,nodeindex 78 | 79 | def make_link(cfgdict, nodea, nodeb): 80 | '''Make a new link from nodea to nodeb and add it to configuration''' 81 | stdlink = { 82 | "delay": "43ms", 83 | "capacity": "1Gb", 84 | "weight": 10 85 | } 86 | sourceidx = get_nodeindex(cfgdict, nodea) 87 | targetidx = get_nodeindex(cfgdict, nodeb) 88 | 89 | stdlink["source"] = sourceidx 90 | stdlink["target"] = targetidx 91 | 92 | cfgdict["links"].append(stdlink) 93 | return len(cfgdict["links"]) 94 | 95 | class MissingNodeException(Exception): 96 | pass 97 | 98 | def get_nodeindex(cfgdict, nodeid): 99 | '''Given a node id (name), return its index in the nodelist''' 100 | for idx, ndict in enumerate(cfgdict["nodes"]): 101 | if ndict['id'] == nodeid: 102 | return idx 103 | raise MissingNodeException("Couldn't get index for missing node name {}".format(nodeid)) 104 | 105 | 106 | def gen_nodename(cfgdict): 107 | '''Generate a new node name given existing node configurations in the cfg dictionary''' 108 | i = len(cfgdict['nodes']) 109 | replication = i / len(string.ascii_lowercase) + 1 110 | return string.ascii_lowercase[i%len(string.ascii_lowercase)] * replication, i 111 | 112 | def write_config(cfg, outname): 113 | '''Write config to json-format file''' 114 | with open(outname, "w") as outfile: 115 | json.dump(cfg, outfile) 116 | 117 | 118 | def main(): 119 | parser = OptionParser() 120 | parser.prog = "fsconfgen.py" 121 | parser.add_option("-n", "--name", 122 | dest="name", 123 | default="myfs", 124 | help="Set the name of the generated configuration.") 125 | (options, args) = parser.parse_args() 126 | cfg = base_config(options.name) 127 | namea, idxa = make_node(cfg, addtraffic=True) 128 | nameb, idxb = make_node(cfg, addtraffic=False) 129 | make_link(cfg, namea, nameb) 130 | write_config(cfg, "test.json") 131 | 132 | 133 | if __name__ == '__main__': 134 | main() 135 | -------------------------------------------------------------------------------- /conf/openflow1.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | // global settings for measurements 3 | counterexportfile="counters" 4 | flowexport=text 5 | flowsampling=1.0 6 | pktsampling=1.0 7 | exportcycle=60 8 | counterexport=True 9 | counterexportinterval=1 10 | longflowtmo=60 11 | flowinactivetmo=60 12 | 13 | a [ 14 | type="OpenflowSwitch" 15 | controller="controller" 16 | autoack="False" 17 | 18 | ipdests="10.1.0.0/24" 19 | m1="modulator start=5 generator=s1 profile=((3600,),(1,))" 20 | s1="simple ipsrc=10.1.0.0/24 ipdst=10.2.0.0/24 flowlets=1 dport=randomunifint(1024,65535) sport=randomunifint(1024,65535) ipproto=udp pkts=randomchoice(8333) bytes=randomchoice(12500000) continuous=True fps=1" 21 | traffic="m1" 22 | ]; 23 | 24 | b [ 25 | type="OpenflowSwitch" 26 | controller="controller" 27 | autoack="False" 28 | ipdests="10.2.0.128/26" 29 | ]; 30 | 31 | c [ 32 | type="OpenflowSwitch" 33 | controller="controller" 34 | autoack="False" 35 | ipdests="10.2.0.192/26" 36 | ]; 37 | 38 | d [ 39 | type="OpenflowSwitch" 40 | controller="controller" 41 | autoack="False" 42 | ipdests="10.2.0.0/25" 43 | ]; 44 | 45 | controller [ 46 | type="OpenflowController" 47 | components="pox.forwarding.l2_learning pox.openflow.discovery" 48 | ]; 49 | 50 | 51 | // data path links 52 | a -- b [weight=10, capacity=100000000, delay=0.010]; 53 | b -- c [weight=10, capacity=100000000, delay=0.010]; 54 | b -- d [weight=10, capacity=100000000, delay=0.010]; 55 | 56 | // control links between ofcontroller and each ofswitch 57 | a -- controller [ capacity=1000000000, delay=0.0001 ] 58 | b -- controller [ capacity=1000000000, delay=0.0001 ] 59 | c -- controller [ capacity=1000000000, delay=0.0001 ] 60 | d -- controller [ capacity=1000000000, delay=0.0001 ] 61 | } 62 | -------------------------------------------------------------------------------- /conf/openflow_small_cbr.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | // global settings for measurements 3 | counterexportfile="counters" 4 | flowexport=text 5 | flowsampling=1.0 6 | pktsampling=1.0 7 | exportcycle=60 8 | counterexport=True 9 | counterexportinterval=1 10 | longflowtmo=60 11 | flowinactivetmo=60 12 | 13 | a [ 14 | type="OpenflowSwitch" 15 | controller="controller" 16 | autoack="False" 17 | ipdests="10.1.0.0/16" 18 | traffic="m1" 19 | // udp vbr flows 20 | m1="modulator start=5 generator=s1 profile=((3600,),(5,))" 21 | s1="simple ipsrc=10.1.1.0/24 ipdst=10.3.1.0/24 flowlets=1 dport=randomunifint(1024,65535) sport=randomunifint(1024,65535) ipproto=udp pkts=normal(600,10) bytes=normal(625000,100) continuous=True fps=1" 22 | ]; 23 | 24 | b [ 25 | type="OpenflowSwitch" 26 | controller="controller" 27 | autoack="False" 28 | ipdests="10.2.0.0/16" 29 | ]; 30 | 31 | c [ 32 | type="OpenflowSwitch" 33 | controller="controller" 34 | autoack="False" 35 | ipdests="10.3.0.0/16 10.4.0.0/16 10.0.0.0/8" 36 | ]; 37 | 38 | controller [ 39 | type="OpenflowController" 40 | components="pox.forwarding.l2_learning" 41 | ]; 42 | 43 | 44 | // data path links 45 | a -- b [weight=10, capacity=100000000, delay=0.043]; 46 | b -- c [weight=10, capacity=100000000, delay=0.031]; 47 | a -- c [weight=30, capacity=100000000, delay=0.123]; 48 | 49 | // control links between ofcontroller and each ofswitch 50 | a -- controller [ capacity=1000000000, delay=0.01 ] 51 | b -- controller [ capacity=1000000000, delay=0.01 ] 52 | c -- controller [ capacity=1000000000, delay=0.01 ] 53 | } 54 | -------------------------------------------------------------------------------- /conf/openflow_small_harpoon.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | // global settings for measurements 3 | counterexportfile="counters" 4 | flowexport=text 5 | flowsampling=1.0 6 | pktsampling=1.0 7 | exportcycle=60 8 | counterexport=True 9 | counterexportinterval=1 10 | longflowtmo=60 11 | flowinactivetmo=60 12 | 13 | a [ 14 | type="OpenflowSwitch" 15 | controller="controller" 16 | autoack="False" 17 | ipdests="10.1.0.0/16" 18 | traffic="m1" 19 | m1="modulator start=5 generator=s1 profile=((3600,),(5,))" 20 | s1="harpoon ipsrc=10.1.1.0/24 ipdst=10.3.1.0/24 flowsize=pareto(5000,1.2) flowstart=exponential(10.0) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 21 | ]; 22 | 23 | b [ 24 | type="OpenflowSwitch" 25 | controller="controller" 26 | autoack="False" 27 | ipdests="10.2.0.0/16" 28 | ]; 29 | 30 | c [ 31 | type="OpenflowSwitch" 32 | controller="controller" 33 | autoack="False" 34 | ipdests="10.3.0.0/16 10.4.0.0/16 10.0.0.0/8" 35 | ]; 36 | 37 | controller [ 38 | type="OpenflowController" 39 | components="pox.forwarding.l2_learning" 40 | ]; 41 | 42 | 43 | // data path links 44 | a -- b [weight=10, capacity=100000000, delay=0.043]; 45 | b -- c [weight=10, capacity=100000000, delay=0.031]; 46 | a -- c [weight=30, capacity=100000000, delay=0.123]; 47 | 48 | // control links between ofcontroller and each ofswitch 49 | a -- controller [ capacity=1000000000, delay=0.01 ] 50 | b -- controller [ capacity=1000000000, delay=0.01 ] 51 | c -- controller [ capacity=1000000000, delay=0.01 ] 52 | } 53 | -------------------------------------------------------------------------------- /conf/simple_speed.dot: -------------------------------------------------------------------------------- 1 | graph simplespeed { 2 | // 3 nodes: a, b, and c 3 | flowexport=text 4 | counterexportfile="counters" 5 | flowsampling=1.0 6 | pktsampling=1.0 7 | exportcycle=60 8 | counterexport=True 9 | counterexportinterval=1 10 | longflowtmo=60 11 | flowinactivetmo=60 12 | measurementnodes="a b" 13 | commonharpoon="flowsize=pareto(10000.0,1.2) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 14 | 15 | a [ 16 | autoack="False" 17 | ipdests="10.0.0.0/24" 18 | traffic="modulate" 19 | modulate="modulator start=0.0 generator=tcfg profile=((3600,),(1,))" 20 | tcfg="harpoon ipsrc=10.0.0.0/24 ipdst=10.0.1.0/24 $commonharpoon" 21 | ]; 22 | 23 | b [ 24 | autoack="False" 25 | ipdests="10.0.1.0/24" 26 | ]; 27 | 28 | 29 | // links 30 | a -- b [weight=10, capacity="1Gb", delay="43ms"]; 31 | } 32 | -------------------------------------------------------------------------------- /conf/simple_speed.json: -------------------------------------------------------------------------------- 1 | {"directed": false, "graph": [["node", {}], ["graph", {"flowsampling": 1.0, "counterexportinterval": 1, "measurementnodes": "a b", "flowexport": "text", "pktsampling": 1.0, "longflowtmo": 60, "exportcycle": 60, "commonharpoon": "flowsize=pareto(10000.0,1.2) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)", "counterexport": true, "counterexportfile": "counters", "flowinactivetmo": 60}], ["edge", {}], ["name", "myfs"]], "nodes": [{"ipdests": "10.0.0.0/24", "id": "a", "tcfg": "harpoon ipsrc=10.0.0.0/24 ipdst=10.0.1.0/24 $commonharpoon", "traffic": "modulate", "modulate": "modulator start=0.0 generator=tcfg profile=((3600,),(1,))", "autoack": false}, {"autoack": false, "ipdests": "10.0.1.0/24", "id": "b"}], "links": [{"delay": "43ms", "source": 0, "capacity": "1Gb", "target": 1, "weight": 10}], "multigraph": true} 2 | -------------------------------------------------------------------------------- /conf/test.dot: -------------------------------------------------------------------------------- 1 | graph test { 2 | flowexport=text 3 | 4 | // nodes 5 | 6 | a [ 7 | autoack="True" 8 | ipdests="10.1.0.0/16 10.10.0.0/16 10.128.0.0/9" 9 | traffic="m1 m2 m3 m4 m5 m6 m7 m8" 10 | 11 | // basic harpoon setup 12 | // build up and withdrawal of harpoon sources; 10 sources for duration 13 | // of 60 sec, followed by 20 sources for 60 sec, etc. 14 | m1="modulator start=0.0 generator=s1 profile=((60,),(10,20,30,30,20,10))" 15 | s1="harpoon ipsrc=10.2.0.0/16 ipdst=10.3.1.0/24 flowsize=pareto(5000,1.2) flowstart=exponential(1/2.0) pktsize=normal(1000,200) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomuniffloat(0.005,0.01) mss=randomchoice(1500,576,1500) iptos=randomchoice(0x0,0x10,0x08,0x04,0x02)" 16 | 17 | // very short flows 18 | // immediate onset/withdrawal, one source started at exponential intervals (mean 1 sec) starting at 19 | // t=10 sec 20 | m2="modulator start=exponential(1/1.0) generator=s2 profile=((10,),(1,))" 21 | s2="rawflow ipsrc=10.1.1.0/24 ipdst=10.3.2.0/24 sport=80 dport=randomunifint(1024,65535) ipproto=tcp bytes=normal(3000,500) iptos=randomchoice(0x0,0x02) pktsize=normal(1000,200) tcpflags=SYN|FIN|ACK continuous=False interval=exponential(1/1.0)" 22 | 23 | // udp vbr flows 24 | m3="modulator start=exponential(1/1.0) generator=s3 profile=((10,),(1,))" 25 | s3="rawflow ipsrc=10.1.1.5/32 ipdst=10.3.2.5/32 flowlets=100 dport=4444 sport=randomunifint(1024,65535) ipproto=udp pkts=normal(10,1) bytes=normal(1000,100) continuous=False interval=1.0" 26 | 27 | // echo 28 | // icmp dport: high nybble=type, low nybble=code 29 | // every second, start up one source; immediate onset and departure 30 | m4="modulator start=exponential(1/1.0) generator=s4 profile=((10,),(1,))" 31 | s4="rawflow ipsrc=10.1.1.5/32 ipdst=10.3.2.5/32 flowlets=1 ipproto=icmp pkts=1 bytes=64 icmptype=8 icmpcode=0 continuous=True interval=1" 32 | 33 | // echo reply 34 | m5="modulator start=exponential(1/1.0) generator=s5 profile=((10,),(1,))" 35 | s5="rawflow ipdst=10.1.1.5/32 ipsrc=10.3.2.5/32 flowlets=1 ipproto=icmp pkts=1 bytes=64 icmptype=0 icmpcode=0 continuous=True interval=1" 36 | 37 | // various icmp 38 | m6="modulator start=0.0 generator=s6 profile=((10,),(1,))" 39 | s6="rawflow ipdst=10.1.1.5/16 ipsrc=10.3.2.5/16 flowlets=1 ipproto=icmp pkts=1 bytes=64 icmptype=randomchoice(0,3,11,13,8,30) icmpcode=0 continuous=True interval=2" 40 | 41 | 42 | 43 | // anomalies 44 | 45 | // SYN flood that ramps up and winds down in a controlled fashion 46 | // at t=10 sec, ramp up as a step function from 1 source up to 100; 47 | // sustain for 30 sec at 100 sources, then withdraw down to 0, 48 | // reducing by 10 sources every second. 49 | m7="modulator start=10 generator=syns emerge=((1,),frange(1,100,10)) sustain=((30,),(100,)) withdraw=((1,),frange(100,0,-10))" 50 | syns="rawflow ipsrc=10.1.0.0/16 ipdst=10.3.5.0/26 dport=80 sport=randomunifint(1,65535) ipproto=tcp pkts=1 bytes=40 tcpflags=SYN flowlets=1 continuous=True interval=exponential(1/1.0) autoack=False" 51 | 52 | 53 | // 'subtractive' anomaly 54 | m8="modulator start=30 generator=sub1 profile=((10,),(1,))" 55 | // sub1="subtractive dstnode=a ipdstfilt=10.3.0.0/16 ipsrcfilt=10.2.0.0/16 ipprotofilt=6 action=removeuniform(0.9)" 56 | sub1="subtractive dstnode=a action=removeuniform(0.001)" 57 | ]; 58 | 59 | b [ 60 | autoack="True" 61 | ipdests="10.2.0.0/16" 62 | ]; 63 | 64 | c [ 65 | autoack="True" 66 | ipdests="10.3.0.0/16 10.4.0.0/16 10.0.0.0/8" 67 | ]; 68 | 69 | 70 | // links (bidirectional) 71 | 72 | a -- b [weight=10, capacity=1000000, delay=0.043, reliability="failureafter=30 downfor=10"]; 73 | b -- c [weight=10, capacity=1000000, delay=0.031]; 74 | a -- c [weight=30, capacity=1000000, delay=0.123, reliability="failureafter=35 downfor=5"]; 75 | } 76 | -------------------------------------------------------------------------------- /conf/testconf1.json: -------------------------------------------------------------------------------- 1 | { 2 | "directed": false, 3 | "graph": [ 4 | ["node", {}], 5 | ["graph", {"flowexport": "text"}], 6 | ["edge", {}], 7 | ["name", "test"]], 8 | "nodes": [ 9 | {"s1": "harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=pareto(10000.0,1.2) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)", 10 | "ipdests": "10.1.0.0/16", 11 | "traffic": "m1", 12 | "defaultroute": "b", 13 | "m1": "modulator start=0.0 generator=s1 profile=((3600,),(1,))", 14 | "id": "a", "autoack": false}, 15 | {"ipdests": "10.3.0.0/16 10.4.0.0/16 10.0.0.0/8", "id": "c", "autoack": false, "defaultroute": true}, 16 | {"ipdests": "10.2.0.0/16", "id": "b", "autoack": false}], 17 | "links": [ 18 | {"delay": 0.123, "source": 0, "capacity": 100000000, "target": 1, "weight": 30}, 19 | {"delay": 0.043, "source": 0, "capacity": 100000000, "target": 2, "weight": 10}, 20 | {"delay": 0.031, "source": 1, "capacity": 100000000, "target": 2, "weight": 10}], 21 | "multigraph": true 22 | } 23 | -------------------------------------------------------------------------------- /conf/testconf2.json: -------------------------------------------------------------------------------- 1 | { 2 | "directed": false, 3 | "graph": [ 4 | ["node", {}], 5 | ["graph", {"flowexport": "text"}], 6 | ["edge", {}], 7 | ["name", "test"]], 8 | "nodes": [ 9 | {"s1": "harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=pareto(10000.0,1.2) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)", 10 | "ipdests": "10.1.0.0/16", 11 | "traffic": "m1", 12 | "m1": "modulator start=0.0 generator=s1 profile=((3600,),(1,))", 13 | "id": "a", "autoack": false}, 14 | {"ipdests": "10.3.0.0/16 10.4.0.0/16 10.0.0.0/8", 15 | "id": "c", "autoack": false}, 16 | {"ipdests": "10.2.0.0/16", "id": "b", "autoack": false}], 17 | "links": [ 18 | {"delay": 0.123, "source": 0, "capacity": 100000000, "target": 1, "weight": 30}, 19 | {"delay": 0.123, "source": 0, "capacity": 100000000, "target": 1, "weight": 30}, 20 | {"delay": 0.043, "source": 0, "capacity": 100000000, "target": 2, "weight": 10}, 21 | {"delay": 0.043, "source": 0, "capacity": 100000000, "target": 2, "weight": 10}, 22 | {"delay": 0.031, "source": 1, "capacity": 100000000, "target": 2, "weight": 10}, 23 | {"delay": 0.031, "source": 1, "capacity": 100000000, "target": 2, "weight": 10}], 24 | "multigraph": true 25 | } 26 | -------------------------------------------------------------------------------- /flowexport/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jsommers/fs/3d85e6d2e2a4c30a7c4716aab75dd766af5d0eb1/flowexport/__init__.py -------------------------------------------------------------------------------- /flowexport/cflow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | # 6 | # header definitions copied from dave plonka's 7 | # cflowd perl module 8 | # 9 | 10 | import struct 11 | import ipaddr 12 | 13 | class cflow(object): 14 | k_routerMask = 0x00000001 15 | k_srcIpAddrMask = 0x00000002 16 | k_dstIpAddrMask = 0x00000004 17 | k_inputIfIndexMask = 0x00000008 18 | k_outputIfIndexMask = 0x00000010 19 | k_srcPortMask = 0x00000020 20 | k_dstPortMask = 0x00000040 21 | k_pktsMask = 0x00000080 22 | k_bytesMask = 0x00000100 23 | k_ipNextHopMask = 0x00000200 24 | k_startTimeMask = 0x00000400 25 | k_endTimeMask = 0x00000800 26 | k_protocolMask = 0x00001000 27 | k_tosMask = 0x00002000 28 | k_srcAsMask = 0x00004000 29 | k_dstAsMask = 0x00008000 30 | k_srcMaskLenMask = 0x00010000 31 | k_dstMaskLenMask = 0x00020000 32 | k_tcpFlagsMask = 0x00040000 33 | k_engineTypeMask = 0x00400000 34 | k_engineIdMask = 0x00800000 35 | 36 | k_fullmask = 0x00c7ffff 37 | 38 | struct_template = '!IIIIHHHHIIIIIBBHHBBBBB' 39 | 40 | @classmethod 41 | def packrecord(cls, inputif=0, outputif=0, rtraddr=int(ipaddr.IPAddress('0.0.0.0')), srcaddr=int(ipaddr.IPAddress('0.0.0.0')), dstaddr=int(ipaddr.IPAddress('0.0.0.0')), pkts=0, bytes=0, start=0, end=0, srcport=0, dstport=0, tcpflags=0, ipproto=0, iptos=0, ipnexthop=int(ipaddr.IPAddress('0.0.0.0')), srcas=0, dstas=0, srcmasklen=0, dstmasklen=0, enginetype=0, engineid=0): 42 | packed = struct.pack(cflow.struct_template, cflow.k_fullmask, rtraddr, srcaddr, dstaddr, inputif, outputif, srcport, dstport, pkts, bytes, ipnexthop, start, end, ipproto, iptos, srcas, dstas, srcmasklen, dstmasklen, tcpflags, enginetype, engineid) 43 | return packed 44 | 45 | 46 | def main(): 47 | xbytes = cflow.packrecord() 48 | print 'cflow record size:',len(xbytes) 49 | 50 | if __name__ == '__main__': 51 | main() 52 | -------------------------------------------------------------------------------- /flowexport/cflowdexport.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | from flowexporter import FlowExporter 6 | from ipaddr import IPAddress 7 | from cflow import cflow 8 | 9 | class CflowdExporter(FlowExporter): 10 | '''Export flowlets to Cflow format''' 11 | 12 | def __init__(self, rname): 13 | FlowExporter.__init__(self, rname) 14 | outname = self.routername + '.cflowd' 15 | self.outfile = open(outname, 'wb') 16 | 17 | def shutdown(self): 18 | self.outfile.close() 19 | 20 | def exportflow(self, ts, flet): 21 | flowrec = cflow.packrecord(srcaddr=int(IPAddress(flet.srcaddr)), dstaddr=int(IPAddress(flet.dstaddr)), pkts=flet.pkts, bytes=flet.size, start=int(flet.flowstart), end=int(flet.flowend), srcport=flet.srcport, dstport=flet.dstport, tcpflags=flet.tcpflags, ipproto=flet.ipproto, iptos=flet.iptos) 22 | self.outfile.write(flowrec) 23 | 24 | -------------------------------------------------------------------------------- /flowexport/flowexporter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | from abc import ABCMeta, abstractmethod 6 | 7 | class FlowExporter(object): 8 | __metaclass__ = ABCMeta 9 | 10 | def __init__(self, rname): 11 | self.routername = str(rname) 12 | 13 | @abstractmethod 14 | def exportflow(self, ts, flet): 15 | pass 16 | 17 | @abstractmethod 18 | def shutdown(self): 19 | pass 20 | 21 | 22 | ## obsolete test code: FIXME 23 | # def main(): 24 | # f1 = Flowlet(ipaddr.IPAddress('10.0.1.1'), ipaddr.IPAddress('192.168.5.2'), 17, 5, 42) 25 | # f1.flowstart = time.time() 26 | # f1.flowend = time.time() + 10 27 | # f1.srcport = 2345 28 | # f1.dstport = 6789 29 | 30 | # f2 = copy.copy(f1) 31 | # f2.flowend = time.time() + 20 32 | # f2.ipproto = 6 33 | # f2.tcpflags = 0xff 34 | # f2.srcport = 9999 35 | # f2.dstport = 80 36 | # f2.iptos = 0x08 37 | 38 | # textexp = text_export_factory('testrouter') 39 | # textexp.exportflow(time.time(),f1) 40 | # textexp.exportflow(time.time(),f2) 41 | # textexp.shutdown() 42 | 43 | # if __name__ == '__main__': 44 | # main() 45 | -------------------------------------------------------------------------------- /flowexport/ipfix.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | # 6 | # basic ipfix flow record format 7 | # 8 | 9 | import struct 10 | import ipaddr 11 | 12 | class ipfix(object): 13 | # header 14 | # version (0x000a) i16 15 | # messagelen i16 16 | # export timestamp i32 17 | # seq number i32 18 | # sourceid i32 19 | 20 | # template 21 | struct_headertemplate = '!HHIII' 22 | 23 | struct_rectemplate = '!IIIIHHHHIIIIIBBHHBBBB' 24 | 25 | @classmethod 26 | def packheader(cls): 27 | packed = struct.pack(ipfix.struct_headertemplate, 0, 0, 0, 0, 0) 28 | return packed 29 | 30 | @classmethod 31 | def packrecord(cls, inputif=0, outputif=0, rtraddr=int(ipaddr.IPAddress('0.0.0.0')), srcaddr=int(ipaddr.IPAddress('0.0.0.0')), dstaddr=int(ipaddr.IPAddress('0.0.0.0')), pkts=0, bytes=0, start=0, end=0, srcport=0, dstport=0, tcpflags=0, ipproto=0, iptos=0, ipnexthop=int(ipaddr.IPAddress('0.0.0.0')), srcas=0, dstas=0, srcmasklen=0, dstmasklen=0, enginetype=0, engineid=0): 32 | packed = struct.pack(ipfix.struct_rectemplate, rtraddr, srcaddr, dstaddr, inputif, outputif, srcport, dstport, pkts, bytes, ipnexthop, start, end, ipproto, iptos, srcas, dstas, srcmasklen, dstmasklen, tcpflags, enginetype, engineid) 33 | return packed 34 | 35 | 36 | def main(): 37 | xbytes = ipfix.packrecord() 38 | print 'ipfix record size:',len(xbytes) 39 | 40 | if __name__ == '__main__': 41 | main() 42 | -------------------------------------------------------------------------------- /flowexport/ipfixexport.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | from flowexporter import FlowExporter 6 | from ipaddr import IPAddress 7 | 8 | class IpfixExporter(FlowExporter): 9 | '''Export flowlets to IPFIX format''' 10 | 11 | def __init__(self, rname): 12 | FlowExporter.__init__(self, rname) 13 | outname = self.routername + '.cflowd' 14 | self.outfile = open(outname, 'wb') 15 | 16 | def shutdown(self): 17 | self.outfile.close() 18 | 19 | def exportflow(self, ts, flet): 20 | assert(False) 21 | # FIXME... 22 | 23 | -------------------------------------------------------------------------------- /flowexport/netflow9.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | # 6 | # basic netflow9 flow record format 7 | # 8 | 9 | import struct 10 | import ipaddr 11 | 12 | 13 | # 14 | # template flowset (defines how ip data flowsets are recorded) 15 | # - flowset id 0 (i16) 16 | # - length (i16) 17 | # - template id (i16) (0-255 template/options; >=256 data) 18 | # - field count (i16) 19 | # - field type 1 (i16) 20 | # - field length 1 (i16) 21 | # - field type 2 (i16) 22 | # - field length 2 (i16) 23 | # - field type k (i16) 24 | # - field length k (i16) 25 | # 26 | # options template flowset (offers info about the recording process itself, e.g., sampling 27 | # rate at an interface) 28 | # - flowset id 1 (i16) 29 | # - length (i16) 30 | # - template id (> 255) (i16) 31 | # - 32 | # 33 | # 34 | # data flowset (an ip data flow, structured according to some template) 35 | # - flowset id 36 | # - length 37 | # - record 1, field value 1 38 | # - record 1, field value 2 39 | # - record 1, field value 3 40 | # - record 1, field value k 41 | # - record 2, field value 1 42 | # - record 2, field value 2 43 | # - record 2, field value 3 44 | # - record 2, field value k 45 | # ... padding (each flowset should be 32-bit aligned) 46 | # 47 | 48 | 49 | class netflow9(object): 50 | # header 51 | # version (0x0009) i16 52 | # messagelen i16 (number of records) 53 | # sysuptime i32 (time in milliseconds since device was booted) 54 | # unix secs i32 (standard unix time) 55 | # seq number i32 (sequence number of export packets from this device) 56 | # sourceid i32 (32 bits that identifies 'exporter observation domain') 57 | 58 | # template 59 | struct_headertemplate = '!HHIIII' 60 | 61 | struct_rectemplate = '!IIIIHHHHIIIIIBBHHBBBB' 62 | 63 | @classmethod 64 | def packheader(cls): 65 | packed = struct.pack(netflow9.struct_headertemplate, 0, 0, 0, 0, 0) 66 | return packed 67 | 68 | @classmethod 69 | def packrecord(cls, inputif=0, outputif=0, rtraddr=int(ipaddr.IPAddress('0.0.0.0')), srcaddr=int(ipaddr.IPAddress('0.0.0.0')), dstaddr=int(ipaddr.IPAddress('0.0.0.0')), pkts=0, bytes=0, start=0, end=0, srcport=0, dstport=0, tcpflags=0, ipproto=0, iptos=0, ipnexthop=int(ipaddr.IPAddress('0.0.0.0')), srcas=0, dstas=0, srcmasklen=0, dstmasklen=0, enginetype=0, engineid=0): 70 | packed = struct.pack(netflow9.struct_rectemplate, rtraddr, srcaddr, dstaddr, inputif, outputif, srcport, dstport, pkts, bytes, ipnexthop, start, end, ipproto, iptos, srcas, dstas, srcmasklen, dstmasklen, tcpflags, enginetype, engineid) 71 | return packed 72 | 73 | 74 | def main(): 75 | xbytes = netflow9.packrecord() 76 | print 'netflow9 record size:',len(xbytes) 77 | 78 | if __name__ == '__main__': 79 | main() 80 | -------------------------------------------------------------------------------- /flowexport/nullexport.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | from flowexporter import FlowExporter 6 | 7 | class NullExporter(FlowExporter): 8 | '''Does nothing except implement minimal required methods''' 9 | def exportflow(self, ts, flet): 10 | pass 11 | 12 | def shutdown(self): 13 | pass 14 | -------------------------------------------------------------------------------- /flowexport/textexport.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | from flowexporter import FlowExporter 6 | 7 | class TextExporter(FlowExporter): 8 | '''Export flowlets to a simple text format''' 9 | def __init__(self, rname, bufsize=500): 10 | FlowExporter.__init__(self, rname) 11 | outname = self.routername + '_flow.txt' 12 | self.outfile = open(outname, 'wb') 13 | self.buffer = [] 14 | self.bufsize = bufsize 15 | 16 | def _flush_buffer(self): 17 | self.outfile.write(''.join(self.buffer)) 18 | self.buffer = [] 19 | 20 | def shutdown(self): 21 | self._flush_buffer() 22 | self.outfile.close() 23 | 24 | def exportflow(self, ts, flet): 25 | record = 'textexport %s %0.06f %s\n' % (self.routername, ts, str(flet)) 26 | self.buffer.append(record) 27 | if len(self.buffer) >= self.bufsize: 28 | self._flush_buffer() 29 | 30 | -------------------------------------------------------------------------------- /fs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | Main class for fs: FsCore 4 | ''' 5 | 6 | __author__ = 'jsommers@colgate.edu' 7 | 8 | 9 | import sys 10 | import signal 11 | import os.path 12 | from optparse import OptionParser 13 | from heapq import heappush, heappop, heapify 14 | from fslib.configurator import NullTopology, FsConfigurator 15 | import fslib.common as fscommon 16 | import random 17 | 18 | 19 | class FsCore(object): 20 | '''Core simulation object --- handles event scheduling and 21 | mediation between configuration and the classes that implement 22 | simulation functionalities.''' 23 | inited = False 24 | 25 | def __init__(self, interval, endtime=1.0, debug=0, progtick=0.05): 26 | if FsCore.inited: 27 | fscommon.get_logger().warn("Trying to initialize a new simulation object.") 28 | sys.exit(-1) 29 | 30 | FsCore.inited = True 31 | 32 | self.__debug = debug 33 | self.__interval = interval 34 | self.__now = 0.0 35 | self.__logger = fscommon.get_logger('fs.core') 36 | 37 | self.__heap = [] 38 | self.endtime = endtime 39 | self.starttime = self.__now 40 | self.intr = False 41 | self.progtick = progtick 42 | self.__topology = NullTopology() 43 | self.monkeypatch() 44 | fscommon.set_fscore(self) 45 | 46 | def progress(self): 47 | '''Callback for printing simulation timeline progress''' 48 | complete = (self.now - self.starttime) / float(self.endtime) 49 | self.logger.info('simulation completion: %2.2f' % (complete)) 50 | self.after(self.endtime*self.progtick, 51 | 'progress indicator', self.progress) 52 | 53 | def sighandler(self, signum, stackframe): 54 | '''Handle INT signal for shutting down simulation''' 55 | self.intr = True 56 | 57 | @property 58 | def topology(self): 59 | '''Return the topology object''' 60 | return self.__topology 61 | 62 | @property 63 | def debug(self): 64 | '''Return debug settings''' 65 | return self.__debug 66 | 67 | @property 68 | def now(self): 69 | '''Get the current simulation time''' 70 | return self.__now 71 | 72 | def nowfn(self): 73 | return self.__now 74 | 75 | def monkeypatch(self): 76 | '''monkey patch current time function in time module to give 77 | simulation time.''' 78 | import time 79 | setattr(self, "walltime", time.time) 80 | setattr(time, "time", self.nowfn) 81 | 82 | def unmonkeypatch(self): 83 | '''Restore time to its regularly scheduled program.''' 84 | import time 85 | setattr(time, "time", self.walltime) 86 | 87 | @property 88 | def logger(self): 89 | '''Get the logger singleton object''' 90 | return self.__logger 91 | 92 | @property 93 | def interval(self): 94 | '''Get the core simulation event interval''' 95 | return self.__interval 96 | 97 | def after(self, delay, evid, callback, *fnargs): 98 | '''Schedule an event after delay seconds, identified by 99 | evid (string), a callback function, and any necessary arguments 100 | to the function''' 101 | if not isinstance(delay, (float,int)): 102 | print "Invalid delay: {}".format(delay) 103 | sys.exit(-1) 104 | expire_time = self.now + delay 105 | heappush(self.__heap, (expire_time, evid, callback, fnargs)) 106 | 107 | def cancel(self, evid): 108 | '''Cancel an event that matches evid''' 109 | i = 0 110 | removed = [] 111 | while i < len(self.__heap): 112 | if self.__heap[i][1] == evid: 113 | removed.append(self.__heap.pop(i)) 114 | else: 115 | i += 1 116 | heapify(self.__heap) 117 | return len(removed) 118 | 119 | def run(self, scenario, configonly=False): 120 | '''Start the simulation using a particular scenario filename''' 121 | cfg = FsConfigurator() 122 | if scenario: 123 | root, ext = os.path.splitext(scenario) 124 | self.__topology = cfg.load_config(scenario, configtype=ext[1:]) 125 | else: 126 | self.logger.info("No simulation scenario specified." + 127 | " I'll just do nothing!") 128 | 129 | if configonly: 130 | self.logger.info("Exiting after doing config.") 131 | return 132 | 133 | self.after(0.0, 'progress indicator', self.progress) 134 | 135 | simstart = self.__now 136 | self.topology.start() 137 | while (self.__now - simstart) < self.endtime and not self.intr: 138 | if len(self.__heap) == 0: 139 | break 140 | expire_time, evid, callback, fnargs = heappop(self.__heap) 141 | if self.debug > 1: 142 | self.logger.debug("FS event: '{}'' @{}".format(evid, expire_time)) 143 | self.__now = expire_time 144 | callback(*fnargs) 145 | 146 | self.logger.debug("Reached simulation end time: {}, {}" 147 | .format(self.now, self.endtime)) 148 | self.topology.stop() 149 | 150 | 151 | def main(): 152 | '''Parse command-line arguments and start up the simulation''' 153 | parser = OptionParser() 154 | parser.prog = "fs.py" 155 | parser.add_option("-f", "--logfile", dest="logfile", 156 | default="", help="Send log to file (default: log to stderr)") 157 | parser.add_option("-d", "--debug", dest="debug", 158 | default=0, action="count", 159 | help="Turn on debugging output (may be given multiple times to increase debug output)") 160 | parser.add_option("-t", "--simtime", dest="simtime", 161 | default=300, type=int, 162 | help="Set amount of simulation time (default: 300 sec)") 163 | parser.add_option("-i", "--interval", dest="interval", 164 | default=1.0, type=float, 165 | help="Set the simulation tick interval (sec) (default: 1 sec)") 166 | parser.add_option("-c", "--configonly", dest="configonly", 167 | default=False, action="store_true", 168 | help="Just do configuration then exit") 169 | parser.add_option("-s", "--seed", dest="seed", 170 | default=None, type="int", 171 | help="Set random number generation seed (default: seed based on system time)") 172 | (options, args) = parser.parse_args() 173 | 174 | if len(args) != 1: 175 | print >> sys.stderr,"Usage: %s [options] " % (sys.argv[0]) 176 | sys.exit(0) 177 | 178 | random.seed(options.seed) 179 | fscommon.setup_logger(options.logfile, options.debug) 180 | 181 | sim = FsCore(options.interval, endtime=options.simtime, debug=options.debug) 182 | signal.signal(signal.SIGINT, sim.sighandler) 183 | sys.path.append(".") 184 | sim.run(args[0], configonly=options.configonly) 185 | 186 | if __name__ == '__main__': 187 | main() 188 | -------------------------------------------------------------------------------- /fslib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jsommers/fs/3d85e6d2e2a4c30a7c4716aab75dd766af5d0eb1/fslib/__init__.py -------------------------------------------------------------------------------- /fslib/common.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | Functions that are commonly used in various fs modules and subsystems. 4 | ''' 5 | import logging 6 | 7 | LOG_FORMAT = '%(created)9.4f %(name)-12s %(levelname)-8s %(message)s' 8 | 9 | _loginit = False 10 | def setup_logger(logfile=None, debug=False): 11 | global _loginit 12 | _loginit = True 13 | loglevel = logging.INFO 14 | if debug: 15 | loglevel = logging.DEBUG 16 | 17 | logging.basicConfig(level=loglevel, format=LOG_FORMAT) 18 | 19 | applog = logging.getLogger() 20 | if logfile: 21 | h = logging.FileHandler(logfile) 22 | h.setLevel(loglevel) 23 | h.setFormatter(logging.Formatter(LOG_FORMAT)) 24 | applog.addHandler(h) 25 | 26 | def get_logger(name='fs'): 27 | global _loginit 28 | if not _loginit: 29 | setup_logger(None, False) 30 | return logging.getLogger(name) 31 | 32 | _obj = None 33 | def set_fscore(obj): 34 | '''Set the fs core object. Heaven forgive me for using global.''' 35 | global _obj 36 | _obj = obj 37 | 38 | def fscore(): 39 | '''Get the fs core object''' 40 | return _obj 41 | -------------------------------------------------------------------------------- /fslib/flowlet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | import copy 6 | import ipaddr 7 | from socket import IPPROTO_TCP, IPPROTO_UDP, IPPROTO_ICMP 8 | import time 9 | from collections import namedtuple 10 | from fslib.util import removeuniform, default_ip_to_macaddr 11 | 12 | 13 | class IncompatibleFlowlets(Exception): 14 | pass 15 | 16 | class InvalidFlowletTimestamps(Exception): 17 | pass 18 | 19 | class InvalidFlowletVolume(Exception): 20 | pass 21 | 22 | class FlowIdent(object): 23 | '''the class formerly known as FiveTuple''' 24 | __slots__ = ['__key'] 25 | 26 | FLOW_IDENTIFIERS = ('srcip','dstip','ipproto','sport','dport') 27 | FlowKey = namedtuple('FlowKey',FLOW_IDENTIFIERS) 28 | 29 | def __init__(self, srcip='0.0.0.0', dstip='0.0.0.0', ipproto=0, sport=0, dport=0): 30 | # store the flow identifier as a (named) tuple for efficiency 31 | self.__key = FlowIdent.FlowKey(srcip, dstip, ipproto, sport, dport) 32 | 33 | def mkreverse(self): 34 | rv = FlowIdent(self.key.dstip, self.key.srcip, self.key.ipproto, self.key.dport, self.key.sport) 35 | return rv 36 | 37 | def __str__(self): 38 | return str(self.key) 39 | 40 | def __repr__(self): 41 | return str(self.key) 42 | 43 | @property 44 | def key(self): 45 | return self.__key 46 | 47 | class Flowlet(object): 48 | __slots__ = ['__srcmac','__dstmac','__mss','__iptos','__pkts', 49 | '__bytes','__flowident','__tcpflags','__ackflow', 50 | '__flowstart','__flowend','ingress_intf'] 51 | def __init__(self, ident, 52 | srcmac=None, dstmac=None, 53 | pkts=0, bytes=0, tcpflags=0): 54 | self.__flowident = ident 55 | self.__flowstart = -1.0 56 | self.__flowend = -1.0 57 | self.__srcmac = srcmac 58 | self.__dstmac = dstmac 59 | self.pkts = pkts 60 | self.bytes = bytes 61 | self.ingress_intf = None 62 | self.iptos = 0x0 63 | self.mss = 1500 64 | self.tcpflags = 0x0 65 | self.ackflow = False 66 | 67 | @property 68 | def flowident(self): 69 | return self.__flowident 70 | 71 | @property 72 | def iptos(self): 73 | return self.__iptos 74 | 75 | @iptos.setter 76 | def iptos(self, iptos): 77 | self.__iptos = iptos 78 | 79 | @property 80 | def mss(self): 81 | return self.__mss 82 | 83 | @mss.setter 84 | def mss(self, m): 85 | assert(100 <= m <= 1500) 86 | self.__mss = m 87 | 88 | @property 89 | def endofflow(self): 90 | # check if tcp and FIN or RST 91 | return self.ipproto == IPPROTO_TCP and (self.tcpflags & 0x01 or self.tcpflags & 0x04) 92 | 93 | @property 94 | def ident(self): 95 | return self.__flowident 96 | 97 | @property 98 | def key(self): 99 | return self.flowident.key 100 | 101 | @property 102 | def size(self): 103 | return self.bytes 104 | 105 | @property 106 | def srcmac(self): 107 | return self.__srcmac 108 | 109 | @srcmac.setter 110 | def srcmac(self, macaddr): 111 | self.__srcmac = macaddr 112 | 113 | @property 114 | def dstmac(self): 115 | return self.__dstmac 116 | 117 | @dstmac.setter 118 | def dstmac(self, macaddr): 119 | self.__dstmac = macaddr 120 | 121 | @property 122 | def srcaddr(self): 123 | return self.flowident.key.srcip 124 | 125 | @property 126 | def dstaddr(self): 127 | return self.flowident.key.dstip 128 | 129 | @property 130 | def ipproto(self): 131 | return self.flowident.key.ipproto 132 | 133 | @property 134 | def ipprotoname(self): 135 | if self.ipproto == IPPROTO_TCP: 136 | return 'tcp' 137 | elif self.ipproto == IPPROTO_UDP: 138 | return 'udp' 139 | elif self.ipproto == IPPROTO_ICMP: 140 | return 'icmp' 141 | else: 142 | return 'ip' 143 | 144 | @property 145 | def srcport(self): 146 | return self.flowident.key.sport 147 | 148 | @property 149 | def dstport(self): 150 | return self.flowident.key.dport 151 | 152 | @property 153 | def pkts(self): 154 | return self.__pkts 155 | 156 | @pkts.setter 157 | def pkts(self, p): 158 | if p < 0: 159 | raise InvalidFlowletVolume() 160 | self.__pkts = p 161 | 162 | @property 163 | def bytes(self): 164 | return self.__bytes 165 | 166 | @bytes.setter 167 | def bytes(self, b): 168 | if b < 0: 169 | raise InvalidFlowletVolume() 170 | self.__bytes = b 171 | 172 | @property 173 | def ackflow(self): 174 | return self.__ackflow 175 | 176 | @ackflow.setter 177 | def ackflow(self, a): 178 | self.__ackflow = a; 179 | 180 | def clear_tcp_flags(self): 181 | self.__tcpflags = 0x0 182 | 183 | def add_tcp_flag(self, flag): 184 | self.__tcpflags |= flag 185 | 186 | @property 187 | def tcpflags(self): 188 | return self.__tcpflags 189 | 190 | @tcpflags.setter 191 | def tcpflags(self, flags): 192 | self.__tcpflags = flags 193 | 194 | @property 195 | def tcpflagsstr(self): 196 | rv = [] 197 | if self.tcpflags & 0x01: #fin 198 | rv.append('F') 199 | if self.tcpflags & 0x02: #syn 200 | rv.append('S') 201 | if self.tcpflags & 0x04: #rst 202 | rv.append('R') 203 | if self.tcpflags & 0x08: #push 204 | rv.append('P') 205 | if self.tcpflags & 0x10: #ack 206 | rv.append('A') 207 | if self.tcpflags & 0x20: #urg 208 | rv.append('U') 209 | if self.tcpflags & 0x40: #ece 210 | rv.append('E') 211 | if self.tcpflags & 0x80: #cwr 212 | rv.append('C') 213 | return ''.join(rv) 214 | 215 | @property 216 | def flowstart(self): 217 | return self.__flowstart 218 | 219 | @flowstart.setter 220 | def flowstart(self, fstart): 221 | if fstart < 0: 222 | raise InvalidFlowletTimestamps() 223 | self.__flowstart = fstart 224 | 225 | @property 226 | def flowend(self): 227 | return self.__flowend 228 | 229 | @flowend.setter 230 | def flowend(self, fend): 231 | if fend < 0 or fend < self.flowstart: 232 | raise InvalidFlowletTimestamps(self.__str__()) 233 | self.__flowend = fend 234 | 235 | def __cmp__(self, other): 236 | return cmp(self.key, other.key) 237 | 238 | def __iadd__(self, other): 239 | if self.key != other.key: 240 | raise IncompatibleFlowlets() 241 | self.pkts += other.pkts 242 | self.bytes += other.bytes 243 | self.tcpflags |= other.tcpflags 244 | return self 245 | 246 | def __add__(self, other): 247 | if self.key != other.key: 248 | raise IncompatibleFlowlets() 249 | rv = copy.copy(self) 250 | rv.pkts += other.pkts 251 | rv.bytes += other.bytes 252 | rv.tcpflags |= other.tcpflags 253 | return rv 254 | 255 | def __str__(self): 256 | return "%0.06f %0.06f %s:%d->%s:%d %s 0x%0x %s %d %d %s" % (self.flowstart, self.flowend, self.srcaddr, self.srcport, self.dstaddr, self.dstport, self.ipprotoname, self.iptos, self.ingress_intf, self.pkts, self.bytes, self.tcpflagsstr) 257 | 258 | 259 | class SubtractiveFlowlet(Flowlet): 260 | __slots__ = ['action'] 261 | 262 | def __init__(self, ident, action): 263 | Flowlet.__init__(self, ident) 264 | self.action = action 265 | 266 | 267 | -------------------------------------------------------------------------------- /fslib/link.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | ''' 4 | Models a single link in fs. Each link knows about it's head (ingress) end 5 | and tail (egress) end, how long (delay) and fat (capacity) it is, and can 6 | optionally keep track of backlog (queuing delay). 7 | ''' 8 | 9 | __author__ = 'jsommers@colgate.edu' 10 | 11 | import sys 12 | import re 13 | from fslib.common import get_logger, fscore 14 | 15 | class Link(object): 16 | ''' 17 | Models a link in fs. 18 | ''' 19 | __slots__ = ['capacity', 'delay', 'egress_node', 'egress_name', 20 | 'ingress_node', 'ingress_name', 'ingress_ip', 21 | 'egress_ip', 'backlog', 'bdp', 'queuealarm', 'lastalarm', 22 | 'alarminterval', 'doqdelay', 'logger' ] 23 | def __init__(self, capacity, delay, ingress_node, egress_node): 24 | self.capacity = Link.parse_capacity(capacity)/8.0 # bytes/sec 25 | self.delay = Link.parse_delay(delay) 26 | self.ingress_ip = 0 27 | self.egress_ip = 0 28 | self.egress_node = egress_node 29 | self.egress_name = Link.make_portname(self.egress_node.name, self.egress_ip) 30 | self.ingress_node = ingress_node 31 | self.ingress_name = Link.make_portname(self.ingress_node.name, self.ingress_ip) 32 | self.backlog = 0 33 | self.bdp = self.capacity * self.delay # bytes 34 | self.queuealarm = 1.0 35 | self.lastalarm = -1 36 | self.alarminterval = 30 37 | self.doqdelay = True 38 | self.logger = get_logger("link {}->{}".format(self.ingress_node.name, self.egress_node.name)) 39 | 40 | def __str__(self): 41 | return "Link {}->{}".format(self.ingress_name, self.egress_name) 42 | 43 | @staticmethod 44 | def parse_capacity(capacity): 45 | '''Parse config file capacity, return capacity as a float in bits/sec''' 46 | if isinstance(capacity, (int,float)): 47 | return float(capacity) 48 | elif isinstance(capacity, (str, unicode)): 49 | if re.match('^(\d+\.?\d*)$', capacity): 50 | return float(capacity) 51 | # [kK]+anything assumed to be kbit/sec 52 | mobj = re.match('^(\d+\.?\d*)[kK]', capacity) 53 | if mobj: 54 | return float(mobj.groups()[0]) * 1000.0 55 | 56 | # [mM]+anything assumed to be mbit/sec 57 | mobj = re.match('^(\d+\.?\d*)[mM]', capacity) 58 | if mobj: 59 | return float(mobj.groups()[0]) * 1000000.0 60 | 61 | # [gG]+anything assumed to be gbit/sec 62 | mobj = re.match('^(\d+\.?\d*)[gG]', capacity) 63 | if mobj: 64 | return float(mobj.groups()[0]) * 1000000000.0 65 | 66 | get_logger().error("Can't parse link capacity: {}".format(capacity)) 67 | sys.exit(-1) 68 | 69 | @staticmethod 70 | def parse_delay(delay): 71 | '''Parse config file delay, return delay as a float in seconds''' 72 | if isinstance(delay, (int,float)): 73 | return float(delay) 74 | elif isinstance(delay, (str, unicode)): 75 | if re.match('^(\d*\.?\d+)$', delay): 76 | return float(delay) 77 | 78 | # [sS]+anything assumed to be seconds 79 | mobj = re.match('^(\d*\.?\d+)s', delay, re.IGNORECASE) 80 | if mobj: 81 | return float(mobj.groups()[0]) 82 | 83 | # [ms]+anything assumed to be milliseconds 84 | mobj = re.match('^(\d*\.?\d+)ms', delay, re.IGNORECASE) 85 | if mobj: 86 | return float(mobj.groups()[0]) / 1000.0 87 | 88 | # [us]+anything assumed to be microseconds 89 | mobj = re.match('^(\d*\.?\d+)us', delay, re.IGNORECASE) 90 | if mobj: 91 | return float(mobj.groups()[0]) / 1000000.0 92 | 93 | get_logger().error("Can't parse link delay: {}".format(delay)) 94 | sys.exit(-1) 95 | 96 | @property 97 | def egress_node_name(self): 98 | return self.egress_node.name 99 | 100 | @staticmethod 101 | def make_portname(node, port): 102 | ''' 103 | Make a canonical string representing a node/port (local ip address) pair. 104 | ''' 105 | return "{}:{}".format(node, port) 106 | 107 | def set_egress_ip(self, ip): 108 | ''' 109 | Set the egress ip address of the link. 110 | ''' 111 | self.egress_ip = str(ip) 112 | self.egress_name = Link.make_portname(self.egress_node.name, self.egress_ip) 113 | 114 | def set_ingress_ip(self, ip): 115 | ''' 116 | Set the ingress ip address of the link. 117 | ''' 118 | self.ingress_ip = str(ip) 119 | self.ingress_name = Link.make_portname(self.ingress_node.name, self.ingress_ip) 120 | 121 | def decrbacklog(self, amt): 122 | ''' 123 | When a flowlet is forwarded, decrement the backlog for this link. 124 | ''' 125 | self.backlog -= amt 126 | 127 | def flowlet_arrival(self, flowlet, prevnode, destnode): 128 | ''' 129 | Handler for when a flowlet arrives on a link. Compute how long the flowlet should be delayed 130 | before arriving at next node, and optionally handle computing queueing delay (backlog) on 131 | the link. 132 | ''' 133 | wait = self.delay + flowlet.size / self.capacity 134 | 135 | if self.doqdelay: 136 | queuedelay = max(0, (self.backlog - self.bdp) / self.capacity) 137 | wait += queuedelay 138 | self.backlog += flowlet.size 139 | if queuedelay > self.queuealarm and fscore().now - self.lastalarm > self.alarminterval: 140 | self.lastalarm = fscore().now 141 | self.logger.warn("Excessive backlog on link {}-{}({:3.2f} sec ({} bytes))".format(self.ingress_name, self.egress_name, queuedelay, self.backlog)) 142 | fscore().after(wait, "link-decrbacklog-{}".format(self.egress_node.name), self.decrbacklog, flowlet.size) 143 | 144 | fscore().after(wait, "link-flowarrival-{}".format(self.egress_name, self.egress_ip), self.egress_node.flowlet_arrival, flowlet, prevnode, destnode, self.egress_ip) 145 | 146 | 147 | class NullLinkClass(object): 148 | '''Link null object''' 149 | IDENT='local null link' 150 | def flowlet_arrival(self, *args): 151 | pass 152 | 153 | @property 154 | def egress_name(self): 155 | return NullLinkClass.IDENT 156 | 157 | @property 158 | def egress_node_name(self): 159 | return NullLinkClass.IDENT 160 | 161 | # a singleton 162 | NullLink = NullLinkClass() 163 | 164 | -------------------------------------------------------------------------------- /fslib/node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | from abc import ABCMeta, abstractmethod 6 | from importlib import import_module 7 | import logging 8 | from random import random 9 | import copy 10 | from fslib.flowlet import * 11 | from collections import Counter, defaultdict, namedtuple 12 | import copy 13 | import networkx 14 | from pytricia import PyTricia 15 | import time 16 | from fslib.common import * 17 | from fslib.link import NullLink 18 | from socket import IPPROTO_TCP 19 | 20 | 21 | class MeasurementConfig(object): 22 | __slots__ = ['__counterexport','__exporttype','__exportinterval','__exportfile','__pktsampling','__flowsampling','__maintenance_cycle','__longflowtmo','__flowinactivetmo'] 23 | def __init__(self, **kwargs): 24 | self.__counterexport = bool(eval(str(kwargs.get('counterexport','False')))) 25 | self.__exporttype = kwargs.get('flowexport','null') 26 | self.__exportinterval = int(kwargs.get('counterexportinterval',1)) 27 | self.__exportfile = kwargs.get('counterexportfile',None) 28 | self.__pktsampling = float(kwargs.get('pktsampling',1.0)) 29 | self.__flowsampling = float(kwargs.get('flowsampling',1.0)) 30 | self.__maintenance_cycle = float(kwargs.get('maintenance_cycle',60.0)) 31 | self.__longflowtmo = int(kwargs.get('longflowtmo',-1)) 32 | self.__flowinactivetmo = int(kwargs.get('flowinactivetmo',-1)) 33 | 34 | @property 35 | def counterexport(self): 36 | return self.__counterexport 37 | 38 | @property 39 | def exporttype(self): 40 | return self.__exporttype 41 | 42 | def exportclass(self): 43 | '''Based on exporter name, return the class object that can be 44 | used to instantiate actual flow exporter objects''' 45 | mod = import_module("flowexport.{}export".format(self.exporttype)) 46 | cls = getattr(mod, "{}Exporter".format(self.exporttype.capitalize())) 47 | return cls 48 | 49 | @property 50 | def exportinterval(self): 51 | return self.__exportinterval 52 | 53 | @property 54 | def exportfile(self): 55 | return self.__exportfile 56 | 57 | @property 58 | def pktsampling(self): 59 | return self.__pktsampling 60 | 61 | @property 62 | def flowsampling(self): 63 | return self.__flowsampling 64 | 65 | @property 66 | def maintenance_cycle(self): 67 | return self.__maintenance_cycle 68 | 69 | @property 70 | def longflowtmo(self): 71 | return self.__longflowtmo 72 | 73 | @property 74 | def flowinactivetmo(self): 75 | return self.__flowinactivetmo 76 | 77 | def __str__(self): 78 | return 'MeasurementConfig <{}, {}, {}>'.format(str(self.exporttype), str(self.counterexport), self.exportfile) 79 | 80 | 81 | class NullMeasurement(object): 82 | def start(self): 83 | pass 84 | def stop(self): 85 | pass 86 | def add(self, flowlet, prevnode, inport): 87 | pass 88 | def remove(self, flowlet, prevnode): 89 | pass 90 | 91 | 92 | class NodeMeasurement(NullMeasurement): 93 | BYTECOUNT = 0 94 | PKTCOUNT = 1 95 | FLOWCOUNT = 2 96 | __slots__ = ['config','counters','flow_table','node_name','exporter','counters','counter_exportfh'] 97 | 98 | def __init__(self, measurement_config, node_name): 99 | self.config = measurement_config 100 | self.node_name = node_name 101 | self.flow_table = {} 102 | self.counters = defaultdict(Counter) 103 | self.counter_exportfh = None 104 | self.exporter = self.config.exportclass()(node_name) 105 | 106 | def start(self): 107 | ''' 108 | start router maintenance loop at random within first 10 seconds. 109 | maintenance loop periodically fires thereafter 110 | (below code is used to desynchronize router maintenance across net) 111 | ''' 112 | fscore().after(random()*self.config.maintenance_cycle, 'node-flowexport-'+str(self.node_name), self.flow_export) 113 | 114 | if self.config.counterexport and self.config.exportinterval > 0: 115 | if self.config.exportfile == 'stdout': 116 | self.counter_exportfh = sys.stdout 117 | else: 118 | self.counter_exportfh = open('{}_{}.txt'.format(self.node_name, self.config.exportfile), 'w') 119 | fscore().after(0, 'router-snmpexport-'+str(self.node_name), self.counter_export) 120 | 121 | def counter_export(self): 122 | if not self.config.counterexport: 123 | return 124 | 125 | for k,v in self.counters.iteritems(): 126 | print >>self.counter_exportfh, '%8.3f %s->%s %d bytes %d pkts %d flows' % (fscore().now, k, self.node_name, v[self.BYTECOUNT], v[self.PKTCOUNT], v[self.FLOWCOUNT]) 127 | self.counters = defaultdict(Counter) 128 | fscore().after(self.config.exportinterval, 'node-snmpexport-'+str(self.node_name), self.counter_export) 129 | 130 | def flow_export(self): 131 | config = self.config 132 | killlist = [] 133 | for k,v in self.flow_table.iteritems(): 134 | # if flow has been inactive for inactivetmo seconds, or 135 | # flow has been active longer than longflowtmo seconds, expire it 136 | if config.flowinactivetmo > 0 and ((fscore().now - v.flowend) >= config.flowinactivetmo) and v.flowend > 0: 137 | self.exporter.exportflow(fscore().now, v) 138 | killlist.append(k) 139 | 140 | if config.longflowtmo > 0 and ((fscore().now - v.flowstart) >= config.longflowtmo) and v.flowend > 0: 141 | self.exporter.exportflow(fscore().now, v) 142 | killlist.append(k) 143 | 144 | for k in killlist: 145 | if k in self.flow_table: 146 | del self.flow_table[k] 147 | 148 | # reschedule next router maintenance 149 | fscore().after(self.config.maintenance_cycle, 'node-flowexport-'+str(self.node_name), self.flow_export) 150 | 151 | def stop(self): 152 | killlist = [] 153 | for k,v in self.flow_table.iteritems(): 154 | if v.flowend < 0: 155 | v.flowend = fscore().now 156 | self.exporter.exportflow(fscore().now, v) 157 | killlist.append(k) 158 | 159 | for k in killlist: 160 | del self.flow_table[k] 161 | self.exporter.shutdown() 162 | if self.counter_exportfh and self.config.exportfile != 'stdout': 163 | self.counter_exportfh.close() 164 | 165 | def __nosample(self): 166 | if self.config.flowsampling < 1.0: 167 | return random() > self.config.flowsampling 168 | 169 | def __addflow(self, flowlet, prevnode, inport): 170 | newflow = 0 171 | flet = None 172 | if flowlet.key in self.flow_table: 173 | flet = self.flow_table[flowlet.key] 174 | # flet.flowend = fscore().now ### FIXME!!! 175 | flet += flowlet 176 | else: 177 | # NB: shallow copy of flowlet; will share same reference to 178 | # five tuple across the entire simulation 179 | newflow = 1 180 | flet = copy.copy(flowlet) 181 | flet.flowend += fscore().now 182 | flet.flowstart = fscore().now 183 | self.flow_table[flet.key] = flet 184 | flet.ingress_intf = "{}:{}".format(prevnode,inport) 185 | return newflow 186 | 187 | def __addcounters(self, flowlet, prevnode, newflow): 188 | counters = self.counters[prevnode] 189 | counters[self.BYTECOUNT] += flowlet.bytes 190 | counters[self.PKTCOUNT] += flowlet.pkts 191 | counters[self.FLOWCOUNT] += newflow 192 | 193 | def add(self, flowlet, prevnode, inport): 194 | if self.__nosample(): 195 | return 196 | newflow = self.__addflow(flowlet, prevnode, inport) 197 | if self.config.counterexport: 198 | self.__addcounters(flowlet, prevnode, newflow) 199 | 200 | def remove(self, flowlet, prevnode): 201 | if flowlet.key not in self.flow_table: 202 | return 203 | 204 | stored_flowlet = self.flow_table[flowlet.key] 205 | if stored_flowlet.flowend < 0: 206 | stored_flowlet.flowend = fscore().now 207 | del self.flow_table[flowlet.key] 208 | self.exporter.exportflow(fscore().now, stored_flowlet) 209 | 210 | class ArpFailure(Exception): 211 | pass 212 | 213 | PortInfo = namedtuple('PortInfo', ('link','localip','remoteip','localmac','remotemac')) 214 | 215 | class Node(object): 216 | '''Base Node class in fs. All subclasses will want to at least override flowlet_arrival to handle 217 | the arrival of a new flowlet at the node.''' 218 | __metaclass__ = ABCMeta 219 | 220 | __slots__ = ['__name','__started','node_measurements','ports','logger','node_to_port_map'] 221 | 222 | def __init__(self, name, measurement_config, **kwargs): 223 | # exportfn, exportinterval, exportfile): 224 | self.__name = name 225 | if measurement_config: 226 | self.node_measurements = NodeMeasurement(measurement_config, name) 227 | else: 228 | self.node_measurements = NullMeasurement() 229 | self.ports = {} 230 | self.node_to_port_map = defaultdict(list) 231 | self.logger = get_logger(self.name) 232 | self.__started = False 233 | 234 | @property 235 | def started(self): 236 | return self.__started 237 | 238 | def portFromNexthopNode(self, nodename, flowkey=None): 239 | '''Given a next-hop node name, return a link object that gets us to that node. Optionally provide 240 | a flowlet key in order to hash correctly to the right link in the case of multiple links.''' 241 | tlist = self.node_to_port_map.get(nodename) 242 | if not tlist: 243 | return None 244 | localip = tlist[hash(flowkey) % len(tlist)] 245 | return self.ports[localip] 246 | 247 | @property 248 | def name(self): 249 | return self.__name 250 | 251 | def start(self): 252 | self.__started = True 253 | self.node_measurements.start() 254 | 255 | def stop(self): 256 | self.node_measurements.stop() 257 | 258 | @abstractmethod 259 | def flowlet_arrival(self, flowlet, prevnode, destnode, input_ident=None): 260 | pass 261 | 262 | def measure_flow(self, flowlet, prevnode, inport): 263 | self.node_measurements.add(flowlet, prevnode, inport) 264 | 265 | def unmeasure_flow(self, flowlet, prevnode): 266 | self.node_measurements.remove(flowlet, prevnode) 267 | 268 | def add_link(self, link, localip, remoteip, next_node): 269 | '''Add a new interface and link to this node. link is the link object connecting 270 | this node to next_node. hostip is the ip address assigned to the local interface for this 271 | link, and remoteip is the ip address assigned to the remote interface of the link.''' 272 | localip = str(localip) 273 | remoteip = str(remoteip) 274 | self.ports[localip] = PortInfo(link, localip, remoteip, None, None) 275 | self.node_to_port_map[next_node].append(localip) 276 | 277 | class ForwardingFailure(Exception): 278 | pass 279 | 280 | class Router(Node): 281 | __slots__ = ['autoack', 'forwarding_table', 'default_link', 'trafgen_ip'] 282 | 283 | def __init__(self, name, measurement_config, **kwargs): 284 | Node.__init__(self, name, measurement_config, **kwargs) 285 | self.autoack=bool(eval(str(kwargs.get('autoack','False')))) 286 | self.forwarding_table = PyTricia(32) 287 | self.default_link = None 288 | 289 | from fslib.configurator import FsConfigurator 290 | ipa,ipb = [ ip for ip in next(FsConfigurator.link_subnetter).iterhosts() ] 291 | self.add_link(NullLink, ipa, ipb, 'remote') 292 | self.trafgen_ip = str(ipa) 293 | 294 | def setDefaultNextHop(self, nexthop): 295 | '''Set up a default next hop route. Assumes that we just select the first link to the next 296 | hop node if there is more than one.''' 297 | self.logger.debug("Default: {}, {}".format(nexthop, str(self.node_to_port_map))) 298 | self.default_link = self.portFromNexthopNode(nexthop).link 299 | if not self.default_link: 300 | raise ForwardingFailure("Error setting default next hop: there's no static ARP entry to get interface") 301 | self.logger.debug("Setting default next hop for {} to {}".format(self.name, nexthop)) 302 | 303 | def addForwardingEntry(self, prefix, nexthop): 304 | '''Add new forwarding table entry to Node, given a destination prefix 305 | and a nexthop (node name)''' 306 | pstr = str(prefix) 307 | self.logger.debug("Adding forwarding table entry: {}->{}".format(pstr, nexthop)) 308 | xnode = self.forwarding_table.get(pstr, None) 309 | if not xnode: 310 | xnode = [] 311 | self.forwarding_table[pstr] = xnode 312 | xnode.append(nexthop) 313 | 314 | def removeForwardingEntry(self, prefix, nexthop): 315 | '''Remove an entry from the Node forwarding table.''' 316 | pstr = str(prefix) 317 | if not self.forwarding_table.has_key(pstr): 318 | return 319 | xnode = self.forwarding_table.get(pstr) 320 | xnode.remove(nexthop) 321 | if not xnode: 322 | del self.forwarding_table[pstr] 323 | 324 | def nextHop(self, destip): 325 | '''Return the next hop from the local forwarding table (next node, ipaddr), based on destination IP address (or prefix)''' 326 | xlist = self.forwarding_table.get(str(destip), None) 327 | if xlist: 328 | return xlist[hash(destip) % len(xlist)] 329 | raise ForwardingFailure() 330 | 331 | def flowlet_arrival(self, flowlet, prevnode, destnode, input_ip=None): 332 | if input_ip is None: 333 | input_ip = self.trafgen_ip 334 | input_port = self.ports[input_ip] 335 | 336 | if isinstance(flowlet, SubtractiveFlowlet): 337 | killlist = [] 338 | ok = [] 339 | self.unmeasure_flow(flowlet, prevnode) 340 | if destnode != self.name: 341 | self.forward(flowlet, destnode) 342 | return 343 | 344 | # a "normal" Flowlet object 345 | self.measure_flow(flowlet, prevnode, str(input_port.localip)) 346 | 347 | if flowlet.endofflow: 348 | self.unmeasure_flow(flowlet, prevnode) 349 | 350 | if destnode == self.name: 351 | if self.__should_make_acknowledgement_flow(flowlet): 352 | revflow = Flowlet(flowlet.flowident.mkreverse()) 353 | 354 | revflow.ackflow = True 355 | revflow.flowstart = revflow.flowend = fscore().now 356 | 357 | if flowlet.tcpflags & 0x04: # RST 358 | return 359 | 360 | if flowlet.tcpflags & 0x02: # SYN 361 | revflow.tcpflags = revflow.tcpflags | 0x10 362 | # print 'setting syn/ack flags',revflow.tcpflagsstr 363 | 364 | if flowlet.tcpflags & 0x01: # FIN 365 | revflow.tcpflags = revflow.tcpflags | 0x10 # ack 366 | revflow.tcpflags = revflow.tcpflags | 0x01 # fin 367 | 368 | revflow.pkts = flowlet.pkts / 2 # brain-dead ack-every-other 369 | revflow.bytes = revflow.pkts * 40 370 | 371 | self.measure_flow(revflow, self.name, input_port) 372 | 373 | # weird, but if reverse flow is short enough, it might only 374 | # stay in the flow cache for a very short period of time 375 | if revflow.endofflow: 376 | self.unmeasure_flow(revflow, prevnode) 377 | 378 | destnode = fscore().topology.destnode(self.name, revflow.dstaddr) 379 | 380 | # guard against case that we can't do the autoack due to 381 | # no "real" source (i.e., source was spoofed or source addr 382 | # has no route) 383 | if destnode and destnode != self.name: 384 | self.forward(revflow, destnode) 385 | else: 386 | self.forward(flowlet, destnode) 387 | 388 | 389 | def __should_make_acknowledgement_flow(self, flowlet): 390 | return self.autoack and flowlet.ipproto == IPPROTO_TCP and (not flowlet.ackflow) 391 | 392 | 393 | def forward(self, flowlet, destnode): 394 | nextnode = self.nextHop(flowlet.dstaddr) 395 | port = self.portFromNexthopNode(nextnode, flowkey=flowlet.key) 396 | link = port.link or self.default_link 397 | link.flowlet_arrival(flowlet, self.name, destnode) 398 | -------------------------------------------------------------------------------- /fslib/openflow/__init__.py: -------------------------------------------------------------------------------- 1 | # version 1 OpenflowSwitch and OpenflowController 2 | # from ofmessage_v1 import * 3 | # from ofnode_v1 import * 4 | 5 | # version 2: direct integration and monkeypatching of POX 6 | 7 | from fslib.common import fscore, get_logger 8 | from fslib.node import Node 9 | from importlib import import_module 10 | 11 | import pox 12 | import pox.core 13 | 14 | if 'initialize' in dir(pox.core): 15 | pox.core.initialize() 16 | 17 | from pox.openflow import libopenflow_01 as oflib 18 | import pox.openflow as openflow_component 19 | import pox.openflow.of_01 as ofcore 20 | 21 | class RuntimeError(Exception): 22 | pass 23 | 24 | class FakePoxTimer(object): 25 | '''Timer class that supports same interface as pox.lib.recoco.Timer''' 26 | 27 | timerid = 0 28 | def __init__ (self, timeToWake, callback, absoluteTime = False, 29 | recurring = False, args = (), kw = {}, scheduler = None, 30 | started = True, selfStoppable = True): 31 | 32 | if absoluteTime and recurring: 33 | raise RuntimeError("Can't have a recurring timer for an absolute time!") 34 | 35 | if absoluteTime: 36 | raise RuntimeError("Can't have an absolute time in FakePoxTimer") 37 | 38 | self._self_stoppable = selfStoppable 39 | self._timeToWake = timeToWake 40 | 41 | self.id = "poxtimer{}".format(FakePoxTimer.timerid) 42 | FakePoxTimer.timerid += 1 43 | 44 | self._recurring = recurring 45 | self._callback = callback 46 | self._args = args 47 | self._kw = kw 48 | get_logger().debug("Setting fake pox timer callback {} {}".format(self._timeToWake, self._callback)) 49 | fscore().after(self._timeToWake, self.id, self.docallback, None) 50 | 51 | def cancel(self): 52 | get_logger().debug("Attempting to cancel fake POX timer {}".format(self.id)) 53 | fscore().cancel(self.id) 54 | 55 | def docallback(self, *args): 56 | get_logger().debug("In fake pox timer callback {} {}".format(self._timeToWake, self._callback)) 57 | rv = self._callback(*self._args, **self._kw) 58 | if rv and self._recurring: 59 | fscore().after(self._timeToWake, self.id, self.docallback, None) 60 | 61 | 62 | class PoxLibPlug(object): 63 | def __getattr__(self, attr): 64 | print "Pox library plug get attribute {}".format(attr) 65 | assert(False),"Unexpected POX call: monkeypatch may need update." 66 | 67 | 68 | origConn = ofcore.Connection 69 | class FakeOpenflowConnection(ofcore.Connection): 70 | def __init__(self, sock, controller_send, switchname="wrong", dpid=None): 71 | self.sendfn = controller_send 72 | self.idle_time = None 73 | self.connect_time = None 74 | self.switchname = switchname 75 | self.sock = -1 76 | origConn.__init__(self, -1) 77 | self.ofnexus = pox.core.core.OpenFlowConnectionArbiter.getNexus(self) 78 | self.dpid = dpid 79 | self.ofnexus.connections[dpid] = self 80 | 81 | def send(self, ofmessage): 82 | get_logger().debug("Doing callback in OF connection from controller->switch {}".format(ofmessage)) 83 | self.sendfn(self.switchname, ofmessage) 84 | 85 | def read(self): 86 | print "Got read() in Fake Connection, but we expect simrecv to be called" 87 | 88 | def simrecv(self, msg): 89 | # print "Received message in FakeOpenflowConnection:", str(msg) 90 | if msg.version != oflib.OFP_VERSION: 91 | log.warning("Bad OpenFlow version (0x%02x) on connection %s" 92 | % (ord(self.buf[offset]), self)) 93 | return False # Throw connection away 94 | 95 | # don't need to pack/unpack because we control message send/recv 96 | # new_offset,msg = unpackers[ofp_type](self.buf, offset) 97 | ofp_type = msg.header_type 98 | 99 | try: 100 | from pox.openflow.of_01 import handlers 101 | h = handlers[ofp_type] 102 | h(self, msg) 103 | except: 104 | log.exception("%s: Exception while handling OpenFlow message:\n" + 105 | "%s %s", self,self, 106 | ("\n" + str(self) + " ").join(str(msg).split('\n'))) 107 | return True 108 | 109 | def fileno(self): 110 | return -1 111 | 112 | def close(self): 113 | pass 114 | 115 | def get_pox_logger(*args, **kwargs): 116 | return get_logger() 117 | 118 | def monkey_patch_pox(): 119 | '''Override two key bits of POX functionality: the Timer class and 120 | the openflow connection class. Other overrides are mainly to ensure 121 | that nothing unexpected happens, but are strictly not necessary at 122 | present (using betta branch of POX)''' 123 | get_logger().info("Monkeypatching POX for integration with fs") 124 | 125 | fakerlib = PoxLibPlug() 126 | import pox.lib.recoco as recoco 127 | setattr(recoco, "Timer", FakePoxTimer) 128 | 129 | import pox.lib 130 | setattr(pox.lib, "ioworker", fakerlib) 131 | setattr(pox.lib, "pxpcap", fakerlib) 132 | setattr(pox.lib, "socketcapture", fakerlib) 133 | 134 | import pox 135 | setattr(pox, "messenger", fakerlib) 136 | setattr(pox, "misc", fakerlib) 137 | 138 | setattr(ofcore, "Connection", FakeOpenflowConnection) 139 | setattr(ofcore, "OpenFlow_01_Task", fakerlib) 140 | 141 | import pox.core 142 | setattr(pox.core, "getLogger", get_pox_logger) 143 | 144 | 145 | def load_pox_component(name): 146 | '''Load a pox component by trying to import the named module and 147 | invoking launch(). Raise a runtime error if something goes wrong.''' 148 | 149 | 150 | log = get_logger() 151 | try: 152 | m = import_module(name) 153 | if 'launch' not in dir(m): 154 | log.error("Can't load POX module {}".format(name)) 155 | raise RuntimeError('No launch function in module {}'.format(name)) 156 | else: 157 | log.debug("Loading POX component {}".format(name)) 158 | 159 | # FIXME: component launch needs some rework. 160 | # import pox.boot 161 | # pox.boot._do_launch([name]) 162 | 163 | if m.launch.func_code.co_argcount == 0: 164 | m.launch() 165 | elif m.launch.func_code.co_argcount >= 1: 166 | m.launch(m.__dict__) 167 | 168 | log.debug("Loaded POX component {}".format(name)) 169 | 170 | except ImportError,e: 171 | log.error("Error trying to import {} POX component".format(name)) 172 | raise RuntimeError(str(e)) 173 | 174 | 175 | monkey_patch_pox() 176 | load_pox_component("pox.openflow") 177 | 178 | get_logger().debug("Kicking POX Up") 179 | pox.core.core.goUp() 180 | get_logger().debug("POX components: {}".format(pox.core.core.components)) 181 | 182 | from pox_bridge import * 183 | -------------------------------------------------------------------------------- /fslib/openflow/ofmessage_v1.py: -------------------------------------------------------------------------------- 1 | from pox.openflow.libopenflow_01 import ofp_match 2 | import pox.openflow.libopenflow_01 as of 3 | from fslib.flowlet import Flowlet, FlowIdent 4 | 5 | # Wrapper Class around POX for openflow messages 6 | class ofp_pox_messages: 7 | __slots__ = ['pox_ofp_message'] 8 | 9 | def __init__(self, message_type, **kargs): 10 | if message_type == 'ofp_packet_out': 11 | self.pox_ofp_message = of.ofp_packet_out() 12 | if 'action' in kargs: 13 | if kargs['action'] == 'flood': 14 | self.pox_ofp_message.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) 15 | elif kargs['action'] == 'ofpp_all': 16 | self.pox_ofp_message.actions.append(of.ofp_action_output(port = of.OFPP_ALL)) 17 | 18 | elif message_type == 'ofp_flow_mod': 19 | if 'match' in kargs: 20 | if 'command' in kargs: 21 | if kargs['command'] == 'add': 22 | self.pox_ofp_message = of.ofp_flow_mod(command=of.OFPFC_ADD, \ 23 | match=kargs['match']) 24 | else: 25 | self.pox_ofp_message = of.ofp_flow_mod(match=kargs['match']) 26 | else: 27 | self.pox_ofp_message = of.ofp_flow_mod() 28 | if 'match_dl_dst' in kargs: 29 | self.pox_ofp_message.match.dl_dst = kargs['match_dl_dst'] 30 | if 'match_dl_src' in kargs: 31 | self.pox_ofp_message.match.dl_src = kargs['match_dl_src'] 32 | if 'idle_timeout' in kargs: 33 | self.pox_ofp_message.idle_timeout = kargs['idle_timeout'] 34 | if 'hard_timeout' in kargs: 35 | self.pox_ofp_message.hard_timeout = kargs['hard_timeout'] 36 | if 'action' in kargs: 37 | if kargs['action'] == 'flood': 38 | self.pox_ofp_message.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD)) 39 | elif isinstance(kargs['action'], dict): 40 | if 'dstmac' in kargs['action'].keys(): 41 | self.pox_ofp_message.actions.append(of.ofp_action_dl_addr.set_dst(\ 42 | kargs['action']['dstmac'])) 43 | if 'port' in kargs['action'].keys(): 44 | self.pox_ofp_message.actions.append(of.ofp_action_output(port = kargs['action']['port'])) 45 | else: 46 | self.pox_ofp_message.actions.append(of.ofp_action_output(port = kargs['action'])) 47 | 48 | elif message_type == 'ofp_packet_in': 49 | self.pox_ofp_message = of.ofp_packet_in() 50 | if 'reason' in kargs: 51 | self.pox_ofp_message.reason = kargs['reason'] 52 | if 'in_port' in kargs: 53 | self.pox_ofp_message.in_port = kargs['in_port'] 54 | 55 | elif message_type == 'ofp_flow_removed': 56 | self.pox_ofp_message = of.ofp_flow_removed() 57 | if 'match' in kargs: 58 | self.pox_ofp_message.match = kargs['match'] 59 | if 'cookie' in kargs: 60 | self.pox_ofp_message.cookie = kargs['cookie'] 61 | if 'priority' in kargs: 62 | self.pox_ofp_message.priority = kargs['priority'] 63 | if 'reason' in kargs: 64 | self.pox_ofp_message.reason = kargs['reason'] 65 | if 'duration_sec' in kargs: 66 | self.pox_ofp_message.suration_sec = kargs['duration_sec'] 67 | if 'duration_nsec' in kargs: 68 | self.pox_ofp_message.duration_nsec = kargs['duration_nsec'] 69 | if 'packet_count' in kargs: 70 | self.pox_ofp_message.packet_count = kargs['packet_count'] 71 | if 'byte_count' in kargs: 72 | self.pox_ofp_message.byte_count = kargs['byte_count'] 73 | 74 | class OpenflowMessage(Flowlet): 75 | __slots__ = ['context', 'message', 'message_type', 'data', 'actions'] 76 | 77 | def __init__(self, flowid, message_type, **kargs): 78 | Flowlet.__init__(self, flowid) 79 | # Creating class object of the message_type 80 | self.message = ofp_pox_messages(message_type, **kargs) # e.g., ofp_packet_out, ofp_flow_mod, 81 | self.message_type = message_type 82 | self.data = None 83 | self.context = (None,None,None) 84 | 85 | def set_context(self, origin, destination, previous): 86 | self.context = (origin, destination, previous) 87 | 88 | def get_context(self): 89 | return self.context 90 | 91 | @property 92 | def in_port(self): 93 | if (self.message_type == 'ofp_packet_in'): 94 | return self.message.pox_ofp_message.in_port 95 | 96 | @property 97 | def actions(self): 98 | if (self.message_type == 'ofp_packet_out'): 99 | return self.message.pox_ofp_message.actions 100 | 101 | def __str__(self): 102 | return "--".join([Flowlet.__str__(self), self.message_type]) 103 | 104 | # Added by Joel 105 | # class OpenflowMessage(Flowlet): 106 | # __slots__ = ['message','data'] 107 | 108 | # def __init__(self, flowid, message): 109 | # Flowlet.__init__(self, flowid) 110 | # self.message = message # e.g., object types ofp_packet_out, ofp_flow_mod 111 | # self.data = None # e.g., an attached data flowlet to go along with OF 112 | # # control message 113 | 114 | 115 | def ofp_match_from_flowlet(flowlet, ports=False): 116 | m = ofp_match() 117 | m.dl_src = flowlet.srcmac 118 | m.dl_dst = flowlet.dstmac 119 | m.dl_vlan = flowlet.vlan 120 | m.nw_src = flowlet.srcaddr 121 | m.nw_dst = flowlet.dstaddr 122 | m.nw_proto = flowlet.ipproto 123 | if ports: 124 | m.tp_src = flowlet.srcport 125 | m.tp_dst = flowlet.dstport 126 | return m 127 | 128 | 129 | def flowident_from_ofp_match(m): 130 | return FlowIdent(srcip=m.nw_src, dstip=m.nw_dst, ipproto=m.nw_proto, sport=m.tp_src, dport=m.tp_dst, srcmac=m.dl_src, dstmac=m.dl_dst, vlan=m.dl_vlan) 131 | 132 | -------------------------------------------------------------------------------- /fslib/traffic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | import random 6 | from fslib.common import fscore, get_logger 7 | from fslib.util import * 8 | 9 | 10 | class InvalidFlowConfiguration(Exception): 11 | pass 12 | 13 | class FlowEventGenModulator(object): 14 | def __init__(self, gfunc, stime=0, emerge_profile=None, sustain_profile=None, withdraw_profile=None): 15 | self.generators = {} 16 | self.generator_generator = gfunc 17 | self.starttime = stime 18 | self.logger = get_logger("fslib.traffic") 19 | if isinstance(self.starttime, (int, float)): 20 | self.starttime = randomchoice(self.starttime) 21 | 22 | # profiles should be generators that return a list of tuples: (time, numsources) 23 | self.emerge = self.sustain = self.withdraw = None 24 | 25 | # print 'emerge',emerge_profile 26 | # print 'sustain',sustain_profile 27 | # print 'withdraw',withdraw_profile 28 | 29 | # examples: 30 | # profile=((10,10,10,10,10,10),(1,2,3,4,5,6))" 31 | # profile=((10,),(1,))" 32 | # emerge=((1,),range(1,100,10)) sustain=((0,30),(100,100)) withdraw=((1,),range(100,1,10))" 33 | 34 | if emerge_profile: 35 | emerge = eval(emerge_profile) 36 | # print 'emerge',emerge 37 | self.emerge = zipit(emerge) 38 | 39 | if sustain_profile: 40 | sustain = eval(sustain_profile) 41 | # print 'sustain',sustain 42 | self.sustain = zipit(sustain) 43 | 44 | if withdraw_profile: 45 | withdraw = eval(withdraw_profile) 46 | print 'withdraw',withdraw 47 | self.withdraw = zipit(withdraw) 48 | 49 | 50 | def start(self): 51 | fscore().after(next(self.starttime), 'flowev modulator startup', self.emerge_phase) 52 | 53 | 54 | def start_generator(self): 55 | g = self.generator_generator() 56 | g.start() 57 | self.generators[g] = 1 58 | 59 | 60 | def kill_all_generator(self): 61 | self.__modulate(0) 62 | 63 | 64 | def kill_generator(self): 65 | g = random.choice(self.generators.keys()) 66 | g.stop() 67 | del self.generators[g] 68 | 69 | 70 | def reap_generators(self): 71 | donelist = [] 72 | for g,x in self.generators.iteritems(): 73 | if g.done: 74 | donelist.append(g) 75 | for g in donelist: 76 | del self.generators[g] 77 | 78 | 79 | def __modulate(self, target_sources): 80 | num_sources = len(self.generators) 81 | 82 | while num_sources != target_sources: 83 | if num_sources < target_sources: 84 | self.start_generator() 85 | num_sources += 1 86 | else: 87 | self.kill_generator() 88 | num_sources -= 1 89 | 90 | 91 | def emerge_phase(self): 92 | self.reap_generators() 93 | nexttime,sources = 0,0 94 | try: 95 | nexttime,sources = next(self.emerge) 96 | except: 97 | self.logger.info('scheduling transition from emerge to sustain') 98 | fscore().after(0.0, 'modulator transition: emerge->sustain', self.sustain_phase) 99 | else: 100 | assert(sources>=0) 101 | self.__modulate(sources) 102 | self.logger.info('emerge: %f %d' % (nexttime,sources)) 103 | fscore().after(nexttime, 'modulator: emerge', self.emerge_phase) 104 | 105 | 106 | def sustain_phase(self): 107 | self.reap_generators() 108 | nexttime,sources = 0,0 109 | try: 110 | nexttime,sources = next(self.sustain) 111 | except: 112 | self.logger.info('scheduling transition from sustain to withdraw') 113 | fscore().after(0.0, 'modulator transition: sustain->withdraw', self.withdraw_phase) 114 | else: 115 | assert(sources>=0) 116 | self.__modulate(sources) 117 | self.logger.info('sustain: %f %d' % (nexttime,sources)) 118 | fscore().after(nexttime, 'modulator: sustain', self.sustain_phase) 119 | 120 | 121 | def withdraw_phase(self): 122 | self.reap_generators() 123 | nexttime,sources = 0,0 124 | try: 125 | nexttime,sources = next(self.withdraw) 126 | except: 127 | self.logger.info('finished with withdraw phase') 128 | fscore().after(0, 'modulator: kill_all', self.kill_all_generator) 129 | else: 130 | assert(sources>=0) 131 | self.__modulate(sources) 132 | self.logger.info('withdraw: %f %d' % (nexttime,sources)) 133 | fscore().after(nexttime, 'modulator: withdraw', self.withdraw_phase) 134 | 135 | -------------------------------------------------------------------------------- /fslib/util.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = 'jsommers@colgate.edu' 4 | 5 | import random 6 | from ipaddr import IPv4Network, IPv4Address 7 | import math 8 | 9 | def zipit(xtup): 10 | assert(len(xtup) == 2) 11 | a = list(xtup[0]) 12 | b = list(xtup[1]) 13 | if len(a) < len(b): 14 | a = a * len(b) 15 | a.insert(0,0) 16 | b.insert(0,b[0]) 17 | mg = modulation_generator(zip(a,b)) 18 | return mg 19 | 20 | def frange(a, b, c): 21 | xlist = [] 22 | if a < b: 23 | assert (c > 0) 24 | while a <= b: 25 | xlist.append(a) 26 | a += c 27 | if a > b and xlist[-1] != b: 28 | xlist.append(b) 29 | else: 30 | assert (c < 0) 31 | while a >= b: 32 | xlist.append(a) 33 | a += c 34 | if a < b and xlist[-1] != b: 35 | xlist.append(b) 36 | return xlist 37 | 38 | def modulation_generator(xlist): 39 | for x in xlist: 40 | yield x 41 | 42 | def randomunifint(lo, hi): 43 | r = random.randint 44 | while True: 45 | yield r(lo, hi) 46 | 47 | def randomuniffloat(lo, hi): 48 | r = random.random 49 | while True: 50 | yield r()*(hi-lo)+lo 51 | 52 | def randomchoice(*choices): 53 | r = random.choice 54 | while True: 55 | yield r(choices) 56 | 57 | def randomchoicefile(infilename): 58 | xlist = [] 59 | with open(infilename) as inf: 60 | for line in inf: 61 | for value in line.strip().split(): 62 | try: 63 | xlist.append(float(value)) 64 | except: 65 | pass 66 | index = 0 67 | while True: 68 | yield xlist[index] 69 | index = (index + 1) % len(xlist) 70 | 71 | def pareto(offset,alpha): 72 | pow = math.pow 73 | r = random.random 74 | while True: 75 | yield (offset * ((1.0/pow(r(), 1.0/alpha)) - 1.0)); 76 | 77 | def exponential(lam): 78 | r = random.expovariate 79 | while True: 80 | yield r(lam) 81 | 82 | def normal(mean, sdev): 83 | r = random.normalvariate 84 | while True: 85 | yield r(mean, sdev) 86 | 87 | def lognormal(mean, sdev): 88 | r = random.lognormvariate 89 | while True: 90 | yield r(mean, sdev) 91 | 92 | def gamma(alpha, beta): 93 | r = random.gammavariate 94 | while True: 95 | yield r(alpha, beta) 96 | 97 | def weibull(alpha, beta): 98 | r = random.weibullvariate 99 | while True: 100 | yield r(alpha, beta) 101 | 102 | def mkdict(s): 103 | xdict = {} 104 | if isinstance(s, str): 105 | s = s.split() 106 | for kvstr in s: 107 | k,v = kvstr.split('=') 108 | xdict[k] = v 109 | return xdict 110 | 111 | def removeuniform(p): 112 | r = random.random 113 | while True: 114 | yield (r() < p) 115 | 116 | def empiricaldistribution(fname): 117 | assert(os.path.exists(fname)) 118 | while True: 119 | with open(fname, 'r') as infile: 120 | for line in infile: 121 | for x in line.split(): 122 | yield float(x) 123 | 124 | # function alias 125 | empirical = empiricaldistribution 126 | 127 | def subnet_generator(prefix, numhosts): 128 | '''Given a prefix and number of hosts to carve out for 129 | subnets within this prefix, create a generator object 130 | that returns a new subnet (as an ipaddr.IPv4Network) with 131 | each subsequent call to next()''' 132 | ceil = math.ceil 133 | log = math.log 134 | 135 | ipfx = IPv4Network(prefix) 136 | prefixhosts = ipfx.numhosts 137 | numhosts += 2 138 | numhosts = int(ceil(log(numhosts, 2)) ** 2) 139 | prefixlen = '/' + str(32 - int(log(numhosts,2))) 140 | baseint = int(ipfx) 141 | numsubnets = prefixhosts / numhosts 142 | for i in xrange(numsubnets): 143 | addr = IPv4Address(baseint + (numhosts * i)) 144 | prefix = IPv4Network(str(addr) + prefixlen) 145 | yield prefix 146 | 147 | 148 | def default_ip_to_macaddr(ipaddr): 149 | '''Convert an IPv4 address to a 48-bit MAC address-like creature. Just 150 | hardcode the two high-order bytes, and fill in remainder with IP address''' 151 | ip = int(IPv4Address(ipaddr)) 152 | mac = [] 153 | for i in xrange(4): 154 | mac.append(((ip >> (8*i)) & 0xff)) 155 | mac.extend([0x02,0x00]) 156 | mac = [ "{:02x}".format(b) for b in reversed(mac) ] 157 | return ':'.join(mac) 158 | 159 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ipaddr==2.1.11 2 | networkx==1.7 3 | pydot==1.0.2 4 | pyparsing==1.5.6 5 | pytricia==0.9.0 6 | -------------------------------------------------------------------------------- /script/a_counters.txt: -------------------------------------------------------------------------------- 1 | 1.000 harpoon->a 2353567 bytes 1818 pkts 102 flows 2 | 2.000 harpoon->a 3297585 bytes 2423 pkts 92 flows 3 | 3.000 harpoon->a 5888232 bytes 4162 pkts 99 flows 4 | 4.000 harpoon->a 5968197 bytes 4241 pkts 106 flows 5 | 5.000 harpoon->a 4500277 bytes 3250 pkts 102 flows 6 | 6.000 harpoon->a 3945500 bytes 2905 pkts 110 flows 7 | 7.000 harpoon->a 5077517 bytes 3639 pkts 103 flows 8 | 8.000 harpoon->a 3369594 bytes 2483 pkts 94 flows 9 | 9.000 harpoon->a 2340568 bytes 1819 pkts 105 flows 10 | 10.000 harpoon->a 2653854 bytes 1989 pkts 91 flows 11 | 11.000 harpoon->a 2659011 bytes 2038 pkts 109 flows 12 | 12.000 harpoon->a 2100361 bytes 1600 pkts 82 flows 13 | 13.000 harpoon->a 3202024 bytes 2377 pkts 97 flows 14 | 14.000 harpoon->a 3306007 bytes 2450 pkts 99 flows 15 | 15.000 harpoon->a 2169867 bytes 1648 pkts 82 flows 16 | 16.000 harpoon->a 3030019 bytes 2252 pkts 96 flows 17 | 17.000 harpoon->a 3662679 bytes 2689 pkts 101 flows 18 | 18.000 harpoon->a 3867902 bytes 2828 pkts 102 flows 19 | 19.000 harpoon->a 2502006 bytes 1971 pkts 123 flows 20 | 20.000 harpoon->a 2424240 bytes 1831 pkts 89 flows 21 | 21.000 harpoon->a 3863526 bytes 2826 pkts 102 flows 22 | 22.000 harpoon->a 2438335 bytes 1851 pkts 92 flows 23 | 23.000 harpoon->a 2612257 bytes 1983 pkts 98 flows 24 | 24.000 harpoon->a 2899225 bytes 2155 pkts 94 flows 25 | 25.000 harpoon->a 3121251 bytes 2308 pkts 93 flows 26 | 26.000 harpoon->a 3038773 bytes 2270 pkts 98 flows 27 | 27.000 harpoon->a 2188356 bytes 1690 pkts 91 flows 28 | 28.000 harpoon->a 3956337 bytes 2913 pkts 112 flows 29 | 29.000 harpoon->a 2667570 bytes 2003 pkts 89 flows 30 | 30.000 harpoon->a 1982887 bytes 1560 pkts 96 flows 31 | 31.000 harpoon->a 2136981 bytes 1644 pkts 89 flows 32 | 32.000 harpoon->a 5013148 bytes 3606 pkts 110 flows 33 | 33.000 harpoon->a 4279199 bytes 3106 pkts 100 flows 34 | 34.000 harpoon->a 4529269 bytes 3216 pkts 79 flows 35 | 35.000 harpoon->a 6741294 bytes 4714 pkts 87 flows 36 | 36.000 harpoon->a 5476370 bytes 3886 pkts 92 flows 37 | 37.000 harpoon->a 4929552 bytes 3554 pkts 109 flows 38 | 38.000 harpoon->a 4743312 bytes 3384 pkts 92 flows 39 | 39.000 harpoon->a 8855434 bytes 6145 pkts 100 flows 40 | 40.000 harpoon->a 8762841 bytes 6077 pkts 98 flows 41 | 41.000 harpoon->a 8331961 bytes 5769 pkts 86 flows 42 | 42.000 harpoon->a 8498902 bytes 5962 pkts 117 flows 43 | 43.000 harpoon->a 8222959 bytes 5731 pkts 99 flows 44 | 44.000 harpoon->a 7913613 bytes 5541 pkts 108 flows 45 | 45.000 harpoon->a 6627324 bytes 4690 pkts 108 flows 46 | 46.000 harpoon->a 4667315 bytes 3372 pkts 105 flows 47 | 47.000 harpoon->a 2981509 bytes 2216 pkts 93 flows 48 | 48.000 harpoon->a 4135513 bytes 3006 pkts 100 flows 49 | 49.000 harpoon->a 2336049 bytes 1786 pkts 93 flows 50 | 50.000 harpoon->a 3442302 bytes 2504 pkts 86 flows 51 | 51.000 harpoon->a 7128860 bytes 4979 pkts 95 flows 52 | 52.000 harpoon->a 6009906 bytes 4226 pkts 89 flows 53 | 53.000 harpoon->a 4428751 bytes 3186 pkts 95 flows 54 | 54.000 harpoon->a 5521311 bytes 3940 pkts 106 flows 55 | 55.000 harpoon->a 6221163 bytes 4385 pkts 97 flows 56 | 56.000 harpoon->a 6866117 bytes 4839 pkts 107 flows 57 | 57.000 harpoon->a 7072616 bytes 5029 pkts 129 flows 58 | 58.000 harpoon->a 7899400 bytes 5482 pkts 87 flows 59 | 59.000 harpoon->a 7317582 bytes 5136 pkts 106 flows 60 | 60.000 harpoon->a 6801633 bytes 4774 pkts 97 flows 61 | 61.000 harpoon->a 6849859 bytes 4801 pkts 95 flows 62 | 62.000 harpoon->a 6853824 bytes 4788 pkts 87 flows 63 | 63.000 harpoon->a 5385465 bytes 3834 pkts 99 flows 64 | 64.000 harpoon->a 4623373 bytes 3324 pkts 96 flows 65 | 65.000 harpoon->a 2220413 bytes 1709 pkts 92 flows 66 | 66.000 harpoon->a 3291399 bytes 2411 pkts 88 flows 67 | 67.000 harpoon->a 3637701 bytes 2669 pkts 99 flows 68 | 68.000 harpoon->a 3656956 bytes 2717 pkts 115 flows 69 | 69.000 harpoon->a 3365335 bytes 2515 pkts 109 flows 70 | 70.000 harpoon->a 2359206 bytes 1805 pkts 94 flows 71 | 71.000 harpoon->a 2034150 bytes 1577 pkts 90 flows 72 | 72.000 harpoon->a 2761318 bytes 2076 pkts 95 flows 73 | 73.000 harpoon->a 3737363 bytes 2739 pkts 104 flows 74 | 74.000 harpoon->a 4092003 bytes 2977 pkts 102 flows 75 | 75.000 harpoon->a 3522322 bytes 2618 pkts 109 flows 76 | 76.000 harpoon->a 3988330 bytes 2908 pkts 101 flows 77 | 77.000 harpoon->a 2930519 bytes 2248 pkts 119 flows 78 | 78.000 harpoon->a 2393306 bytes 1852 pkts 106 flows 79 | 79.000 harpoon->a 5253274 bytes 3742 pkts 99 flows 80 | 80.000 harpoon->a 6353783 bytes 4511 pkts 114 flows 81 | 81.000 harpoon->a 5139323 bytes 3661 pkts 96 flows 82 | 82.000 harpoon->a 8443948 bytes 5864 pkts 95 flows 83 | 83.000 harpoon->a 8326357 bytes 5791 pkts 98 flows 84 | 84.000 harpoon->a 7881764 bytes 5489 pkts 95 flows 85 | 85.000 harpoon->a 6276335 bytes 4426 pkts 101 flows 86 | 86.000 harpoon->a 8690221 bytes 6032 pkts 99 flows 87 | 87.000 harpoon->a 5725378 bytes 4071 pkts 103 flows 88 | 88.000 harpoon->a 4262711 bytes 3061 pkts 87 flows 89 | 89.000 harpoon->a 3189190 bytes 2399 pkts 111 flows 90 | 90.000 harpoon->a 2935354 bytes 2212 pkts 103 flows 91 | 91.000 harpoon->a 4717806 bytes 3422 pkts 110 flows 92 | 92.000 harpoon->a 4593864 bytes 3315 pkts 101 flows 93 | 93.000 harpoon->a 4657840 bytes 3334 pkts 94 flows 94 | 94.000 harpoon->a 3628263 bytes 2677 pkts 105 flows 95 | 95.000 harpoon->a 3164720 bytes 2349 pkts 99 flows 96 | 96.000 harpoon->a 3226265 bytes 2399 pkts 101 flows 97 | 97.000 harpoon->a 6238356 bytes 4400 pkts 100 flows 98 | 98.000 harpoon->a 7762371 bytes 5488 pkts 125 flows 99 | 99.000 harpoon->a 6727204 bytes 4723 pkts 95 flows 100 | 100.000 harpoon->a 4975580 bytes 3573 pkts 104 flows 101 | 101.000 harpoon->a 5681668 bytes 4069 pkts 115 flows 102 | 102.000 harpoon->a 4621883 bytes 3316 pkts 95 flows 103 | 103.000 harpoon->a 4182441 bytes 3043 pkts 102 flows 104 | 104.000 harpoon->a 7097534 bytes 5008 pkts 114 flows 105 | 105.000 harpoon->a 6311244 bytes 4419 pkts 86 flows 106 | 106.000 harpoon->a 4933160 bytes 3552 pkts 106 flows 107 | 107.000 harpoon->a 6296720 bytes 4476 pkts 112 flows 108 | 108.000 harpoon->a 2079315 bytes 1624 pkts 96 flows 109 | 109.000 harpoon->a 3137555 bytes 2353 pkts 105 flows 110 | 110.000 harpoon->a 3450715 bytes 2520 pkts 89 flows 111 | 111.000 harpoon->a 2897829 bytes 2190 pkts 105 flows 112 | 112.000 harpoon->a 3028896 bytes 2260 pkts 99 flows 113 | 113.000 harpoon->a 3523832 bytes 2580 pkts 94 flows 114 | 114.000 harpoon->a 5187009 bytes 3668 pkts 88 flows 115 | 115.000 harpoon->a 5171367 bytes 3689 pkts 99 flows 116 | 116.000 harpoon->a 3706037 bytes 2715 pkts 99 flows 117 | 117.000 harpoon->a 2787253 bytes 2106 pkts 99 flows 118 | 118.000 harpoon->a 4588530 bytes 3332 pkts 111 flows 119 | 119.000 harpoon->a 4309828 bytes 3133 pkts 105 flows 120 | 120.000 harpoon->a 4357400 bytes 3154 pkts 101 flows 121 | 121.000 harpoon->a 3399002 bytes 2491 pkts 91 flows 122 | 122.000 harpoon->a 2941662 bytes 2206 pkts 99 flows 123 | 123.000 harpoon->a 2496068 bytes 1901 pkts 97 flows 124 | 124.000 harpoon->a 3479683 bytes 2552 pkts 96 flows 125 | 125.000 harpoon->a 3771864 bytes 2758 pkts 100 flows 126 | 126.000 harpoon->a 3808236 bytes 2782 pkts 99 flows 127 | 127.000 harpoon->a 4194016 bytes 3057 pkts 107 flows 128 | 128.000 harpoon->a 4188353 bytes 3032 pkts 100 flows 129 | 129.000 harpoon->a 3086983 bytes 2346 pkts 115 flows 130 | 130.000 harpoon->a 2845927 bytes 2154 pkts 104 flows 131 | 131.000 harpoon->a 3900573 bytes 2839 pkts 98 flows 132 | 132.000 harpoon->a 2824894 bytes 2121 pkts 96 flows 133 | 133.000 harpoon->a 1877059 bytes 1469 pkts 89 flows 134 | 134.000 harpoon->a 4869555 bytes 3528 pkts 116 flows 135 | 135.000 harpoon->a 4574796 bytes 3308 pkts 104 flows 136 | 136.000 harpoon->a 4514291 bytes 3297 pkts 114 flows 137 | 137.000 harpoon->a 4565892 bytes 3308 pkts 109 flows 138 | 138.000 harpoon->a 3869936 bytes 2775 pkts 80 flows 139 | 139.000 harpoon->a 3767471 bytes 2731 pkts 90 flows 140 | 140.000 harpoon->a 6912931 bytes 4906 pkts 122 flows 141 | 141.000 harpoon->a 6130950 bytes 4339 pkts 104 flows 142 | 142.000 harpoon->a 7760025 bytes 5428 pkts 103 flows 143 | 143.000 harpoon->a 5469127 bytes 3890 pkts 97 flows 144 | 144.000 harpoon->a 2915327 bytes 2217 pkts 109 flows 145 | 145.000 harpoon->a 2636144 bytes 2012 pkts 101 flows 146 | 146.000 harpoon->a 3216002 bytes 2392 pkts 103 flows 147 | 147.000 harpoon->a 4044835 bytes 2921 pkts 94 flows 148 | 148.000 harpoon->a 3975590 bytes 2900 pkts 100 flows 149 | 149.000 harpoon->a 3644704 bytes 2636 pkts 84 flows 150 | 150.000 harpoon->a 3716833 bytes 2689 pkts 85 flows 151 | 151.000 harpoon->a 4855190 bytes 3506 pkts 112 flows 152 | 152.000 harpoon->a 4195563 bytes 3025 pkts 92 flows 153 | 153.000 harpoon->a 6435275 bytes 4569 pkts 116 flows 154 | 154.000 harpoon->a 5731689 bytes 4074 pkts 103 flows 155 | 155.000 harpoon->a 4163990 bytes 3016 pkts 97 flows 156 | 156.000 harpoon->a 7301526 bytes 5101 pkts 98 flows 157 | 157.000 harpoon->a 7726556 bytes 5360 pkts 83 flows 158 | 158.000 harpoon->a 7822699 bytes 5511 pkts 119 flows 159 | 159.000 harpoon->a 5564181 bytes 3919 pkts 85 flows 160 | 160.000 harpoon->a 8543790 bytes 5935 pkts 97 flows 161 | 161.000 harpoon->a 8539286 bytes 5937 pkts 96 flows 162 | 162.000 harpoon->a 6783628 bytes 4752 pkts 93 flows 163 | 163.000 harpoon->a 5560079 bytes 3971 pkts 106 flows 164 | 164.000 harpoon->a 5320643 bytes 3800 pkts 103 flows 165 | 165.000 harpoon->a 2639500 bytes 1976 pkts 88 flows 166 | 166.000 harpoon->a 3306255 bytes 2440 pkts 96 flows 167 | 167.000 harpoon->a 3070428 bytes 2301 pkts 102 flows 168 | 168.000 harpoon->a 2700511 bytes 2028 pkts 95 flows 169 | 169.000 harpoon->a 5779168 bytes 4124 pkts 110 flows 170 | 170.000 harpoon->a 5780048 bytes 4124 pkts 108 flows 171 | 171.000 harpoon->a 4781459 bytes 3442 pkts 102 flows 172 | 172.000 harpoon->a 3215352 bytes 2383 pkts 95 flows 173 | 173.000 harpoon->a 3875841 bytes 2809 pkts 93 flows 174 | 174.000 harpoon->a 2243965 bytes 1722 pkts 90 flows 175 | 175.000 harpoon->a 3554738 bytes 2636 pkts 111 flows 176 | 176.000 harpoon->a 3684106 bytes 2720 pkts 106 flows 177 | 177.000 harpoon->a 3371329 bytes 2489 pkts 96 flows 178 | 178.000 harpoon->a 2507778 bytes 1893 pkts 91 flows 179 | 179.000 harpoon->a 2757167 bytes 2069 pkts 96 flows 180 | 180.000 harpoon->a 2452913 bytes 1882 pkts 101 flows 181 | 181.000 harpoon->a 5113322 bytes 3650 pkts 100 flows 182 | 182.000 harpoon->a 4919152 bytes 3563 pkts 115 flows 183 | 183.000 harpoon->a 4363677 bytes 3164 pkts 104 flows 184 | 184.000 harpoon->a 7875537 bytes 5485 pkts 98 flows 185 | 185.000 harpoon->a 10283579 bytes 7112 pkts 106 flows 186 | 186.000 harpoon->a 7917405 bytes 5505 pkts 90 flows 187 | 187.000 harpoon->a 6680155 bytes 4676 pkts 88 flows 188 | 188.000 harpoon->a 5217425 bytes 3749 pkts 108 flows 189 | 189.000 harpoon->a 6351249 bytes 4498 pkts 104 flows 190 | 190.000 harpoon->a 3145729 bytes 2347 pkts 101 flows 191 | 191.000 harpoon->a 3023375 bytes 2272 pkts 105 flows 192 | 192.000 harpoon->a 3716114 bytes 2727 pkts 99 flows 193 | 193.000 harpoon->a 3041473 bytes 2260 pkts 93 flows 194 | 194.000 harpoon->a 2078121 bytes 1610 pkts 92 flows 195 | 195.000 harpoon->a 2151328 bytes 1674 pkts 97 flows 196 | 196.000 harpoon->a 3753596 bytes 2753 pkts 101 flows 197 | 197.000 harpoon->a 4546445 bytes 3261 pkts 93 flows 198 | 198.000 harpoon->a 6047253 bytes 4277 pkts 100 flows 199 | 199.000 harpoon->a 5430486 bytes 3856 pkts 97 flows 200 | 200.000 harpoon->a 5753003 bytes 4100 pkts 109 flows 201 | 201.000 harpoon->a 2579529 bytes 1929 pkts 87 flows 202 | 202.000 harpoon->a 3823953 bytes 2801 pkts 102 flows 203 | 203.000 harpoon->a 4018412 bytes 2898 pkts 88 flows 204 | 204.000 harpoon->a 3510151 bytes 2547 pkts 83 flows 205 | 205.000 harpoon->a 2794188 bytes 2097 pkts 93 flows 206 | 206.000 harpoon->a 3039500 bytes 2270 pkts 100 flows 207 | 207.000 harpoon->a 2557413 bytes 1975 pkts 110 flows 208 | 208.000 harpoon->a 3787809 bytes 2797 pkts 113 flows 209 | 209.000 harpoon->a 3120718 bytes 2288 pkts 85 flows 210 | 210.000 harpoon->a 3969894 bytes 2903 pkts 104 flows 211 | 211.000 harpoon->a 1869038 bytes 1467 pkts 90 flows 212 | 212.000 harpoon->a 2633311 bytes 2019 pkts 107 flows 213 | 213.000 harpoon->a 3641946 bytes 2712 pkts 115 flows 214 | 214.000 harpoon->a 3226754 bytes 2443 pkts 119 flows 215 | 215.000 harpoon->a 3077818 bytes 2308 pkts 105 flows 216 | 216.000 harpoon->a 3005213 bytes 2301 pkts 120 flows 217 | 217.000 harpoon->a 2374175 bytes 1796 pkts 86 flows 218 | 218.000 harpoon->a 3644060 bytes 2685 pkts 104 flows 219 | 219.000 harpoon->a 6033083 bytes 4279 pkts 107 flows 220 | 220.000 harpoon->a 5864578 bytes 4169 pkts 106 flows 221 | 221.000 harpoon->a 2314646 bytes 1790 pkts 96 flows 222 | 222.000 harpoon->a 1771492 bytes 1406 pkts 93 flows 223 | 223.000 harpoon->a 1643211 bytes 1307 pkts 87 flows 224 | 224.000 harpoon->a 3151160 bytes 2308 pkts 85 flows 225 | 225.000 harpoon->a 3444832 bytes 2540 pkts 99 flows 226 | 226.000 harpoon->a 2234577 bytes 1722 pkts 95 flows 227 | 227.000 harpoon->a 2081902 bytes 1668 pkts 114 flows 228 | 228.000 harpoon->a 3966780 bytes 2935 pkts 118 flows 229 | 229.000 harpoon->a 2754345 bytes 2066 pkts 94 flows 230 | 230.000 harpoon->a 2850731 bytes 2134 pkts 95 flows 231 | 231.000 harpoon->a 2904067 bytes 2122 pkts 77 flows 232 | 232.000 harpoon->a 4079752 bytes 3016 pkts 120 flows 233 | 233.000 harpoon->a 2639696 bytes 2018 pkts 104 flows 234 | 234.000 harpoon->a 2614239 bytes 1999 pkts 104 flows 235 | 235.000 harpoon->a 3014607 bytes 2270 pkts 106 flows 236 | 236.000 harpoon->a 4567700 bytes 3299 pkts 104 flows 237 | 237.000 harpoon->a 4657369 bytes 3355 pkts 103 flows 238 | 238.000 harpoon->a 8398944 bytes 5851 pkts 104 flows 239 | 239.000 harpoon->a 4475002 bytes 3195 pkts 81 flows 240 | 240.000 harpoon->a 4377318 bytes 3161 pkts 100 flows 241 | 241.000 harpoon->a 5426276 bytes 3832 pkts 89 flows 242 | 242.000 harpoon->a 4681755 bytes 3364 pkts 99 flows 243 | 243.000 harpoon->a 2700053 bytes 2066 pkts 107 flows 244 | 244.000 harpoon->a 1876616 bytes 1472 pkts 89 flows 245 | 245.000 harpoon->a 3304423 bytes 2485 pkts 116 flows 246 | 246.000 harpoon->a 3691255 bytes 2726 pkts 107 flows 247 | 247.000 harpoon->a 2175586 bytes 1684 pkts 96 flows 248 | 248.000 harpoon->a 6212466 bytes 4389 pkts 101 flows 249 | 249.000 harpoon->a 5057423 bytes 3621 pkts 99 flows 250 | 250.000 harpoon->a 5627437 bytes 3971 pkts 89 flows 251 | 251.000 harpoon->a 6855269 bytes 4849 pkts 113 flows 252 | 252.000 harpoon->a 4193955 bytes 3013 pkts 89 flows 253 | 253.000 harpoon->a 4693493 bytes 3400 pkts 111 flows 254 | 254.000 harpoon->a 3897714 bytes 2833 pkts 93 flows 255 | 255.000 harpoon->a 4323423 bytes 3115 pkts 96 flows 256 | 256.000 harpoon->a 5003863 bytes 3569 pkts 96 flows 257 | 257.000 harpoon->a 5226373 bytes 3744 pkts 105 flows 258 | 258.000 harpoon->a 4214846 bytes 3089 pkts 112 flows 259 | 259.000 harpoon->a 2444268 bytes 1843 pkts 85 flows 260 | 260.000 harpoon->a 4002243 bytes 2896 pkts 97 flows 261 | 261.000 harpoon->a 5096171 bytes 3697 pkts 122 flows 262 | 262.000 harpoon->a 4498969 bytes 3273 pkts 112 flows 263 | 263.000 harpoon->a 4160071 bytes 3034 pkts 105 flows 264 | 264.000 harpoon->a 2288357 bytes 1782 pkts 104 flows 265 | 265.000 harpoon->a 2770852 bytes 2072 pkts 90 flows 266 | 266.000 harpoon->a 3295752 bytes 2453 pkts 104 flows 267 | 267.000 harpoon->a 4399498 bytes 3191 pkts 105 flows 268 | 268.000 harpoon->a 4395980 bytes 3211 pkts 112 flows 269 | 269.000 harpoon->a 3102987 bytes 2324 pkts 105 flows 270 | 270.000 harpoon->a 3957146 bytes 2888 pkts 100 flows 271 | 271.000 harpoon->a 4005058 bytes 2931 pkts 105 flows 272 | 272.000 harpoon->a 5612842 bytes 3948 pkts 86 flows 273 | 273.000 harpoon->a 5026427 bytes 3590 pkts 95 flows 274 | 274.000 harpoon->a 3537486 bytes 2610 pkts 102 flows 275 | 275.000 harpoon->a 3342266 bytes 2459 pkts 94 flows 276 | 276.000 harpoon->a 5065883 bytes 3623 pkts 100 flows 277 | 277.000 harpoon->a 3804340 bytes 2790 pkts 102 flows 278 | 278.000 harpoon->a 3262103 bytes 2422 pkts 101 flows 279 | 279.000 harpoon->a 2444636 bytes 1857 pkts 92 flows 280 | 280.000 harpoon->a 2599402 bytes 1996 pkts 104 flows 281 | 281.000 harpoon->a 5158945 bytes 3710 pkts 113 flows 282 | 282.000 harpoon->a 8709452 bytes 6040 pkts 95 flows 283 | 283.000 harpoon->a 7011317 bytes 4906 pkts 92 flows 284 | 284.000 harpoon->a 7194943 bytes 5041 pkts 102 flows 285 | 285.000 harpoon->a 7829828 bytes 5461 pkts 99 flows 286 | 286.000 harpoon->a 6993333 bytes 4899 pkts 95 flows 287 | 287.000 harpoon->a 6168487 bytes 4367 pkts 103 flows 288 | 288.000 harpoon->a 5094785 bytes 3652 pkts 101 flows 289 | 289.000 harpoon->a 4612412 bytes 3277 pkts 83 flows 290 | 290.000 harpoon->a 5191468 bytes 3680 pkts 92 flows 291 | 291.000 harpoon->a 7259516 bytes 5091 pkts 103 flows 292 | 292.000 harpoon->a 6628507 bytes 4610 pkts 76 flows 293 | 293.000 harpoon->a 5040877 bytes 3579 pkts 88 flows 294 | 294.000 harpoon->a 4785448 bytes 3430 pkts 98 flows 295 | 295.000 harpoon->a 6455152 bytes 4509 pkts 85 flows 296 | 296.000 harpoon->a 6748941 bytes 4805 pkts 125 flows 297 | 297.000 harpoon->a 6167365 bytes 4345 pkts 93 flows 298 | 298.000 harpoon->a 5570314 bytes 4012 pkts 118 flows 299 | 299.000 harpoon->a 4864840 bytes 3478 pkts 95 flows 300 | 300.000 harpoon->a 5442754 bytes 3888 pkts 106 flows 301 | -------------------------------------------------------------------------------- /script/b_counters.txt: -------------------------------------------------------------------------------- 1 | 1.000 a->b 2339240 bytes 1803 pkts 100 flows 2 | 2.000 a->b 3297047 bytes 2418 pkts 90 flows 3 | 3.000 a->b 4065793 bytes 2953 pkts 101 flows 4 | 4.000 a->b 5867016 bytes 4167 pkts 104 flows 5 | 5.000 a->b 4538991 bytes 3276 pkts 102 flows 6 | 6.000 a->b 3719669 bytes 2748 pkts 107 flows 7 | 7.000 a->b 5090435 bytes 3649 pkts 104 flows 8 | 8.000 a->b 5456261 bytes 3876 pkts 95 flows 9 | 9.000 a->b 2300473 bytes 1790 pkts 104 flows 10 | 10.000 a->b 2429848 bytes 1844 pkts 92 flows 11 | 11.000 a->b 2911977 bytes 2206 pkts 109 flows 12 | 12.000 a->b 2078726 bytes 1588 pkts 83 flows 13 | 13.000 a->b 3248120 bytes 2406 pkts 96 flows 14 | 14.000 a->b 3195513 bytes 2378 pkts 100 flows 15 | 15.000 a->b 2215286 bytes 1677 pkts 81 flows 16 | 16.000 a->b 3042730 bytes 2256 pkts 95 flows 17 | 17.000 a->b 3658183 bytes 2687 pkts 101 flows 18 | 18.000 a->b 3750077 bytes 2750 pkts 102 flows 19 | 19.000 a->b 2615388 bytes 2039 pkts 120 flows 20 | 20.000 a->b 2369804 bytes 1806 pkts 94 flows 21 | 21.000 a->b 3783452 bytes 2766 pkts 99 flows 22 | 22.000 a->b 2388919 bytes 1816 pkts 91 flows 23 | 23.000 a->b 2503941 bytes 1923 pkts 103 flows 24 | 24.000 a->b 3257989 bytes 2399 pkts 96 flows 25 | 25.000 a->b 2991054 bytes 2209 pkts 88 flows 26 | 26.000 a->b 3125022 bytes 2334 pkts 101 flows 27 | 27.000 a->b 2204225 bytes 1697 pkts 90 flows 28 | 28.000 a->b 3723630 bytes 2749 pkts 108 flows 29 | 29.000 a->b 2844023 bytes 2126 pkts 91 flows 30 | 30.000 a->b 1923266 bytes 1520 pkts 96 flows 31 | 31.000 a->b 2090484 bytes 1616 pkts 90 flows 32 | 32.000 a->b 5178789 bytes 3717 pkts 110 flows 33 | 33.000 a->b 4284638 bytes 3109 pkts 100 flows 34 | 34.000 a->b 4326563 bytes 3080 pkts 79 flows 35 | 35.000 a->b 6927551 bytes 4849 pkts 91 flows 36 | 36.000 a->b 5360269 bytes 3806 pkts 91 flows 37 | 37.000 a->b 5069455 bytes 3644 pkts 108 flows 38 | 38.000 a->b 4750102 bytes 3389 pkts 92 flows 39 | 39.000 a->b 8815965 bytes 6117 pkts 99 flows 40 | 40.000 a->b 8361361 bytes 5807 pkts 97 flows 41 | 41.000 a->b 8768644 bytes 6064 pkts 88 flows 42 | 42.000 a->b 7277935 bytes 5147 pkts 116 flows 43 | 43.000 a->b 8264363 bytes 5752 pkts 98 flows 44 | 44.000 a->b 8942180 bytes 6225 pkts 106 flows 45 | 45.000 a->b 6787954 bytes 4812 pkts 114 flows 46 | 46.000 a->b 4531053 bytes 3267 pkts 99 flows 47 | 47.000 a->b 3118274 bytes 2316 pkts 97 flows 48 | 48.000 a->b 3939112 bytes 2873 pkts 99 flows 49 | 49.000 a->b 2479337 bytes 1881 pkts 93 flows 50 | 50.000 a->b 3444310 bytes 2508 pkts 87 flows 51 | 51.000 a->b 7169425 bytes 5009 pkts 96 flows 52 | 52.000 a->b 6020360 bytes 4230 pkts 88 flows 53 | 53.000 a->b 4432524 bytes 3193 pkts 97 flows 54 | 54.000 a->b 5408189 bytes 3859 pkts 103 flows 55 | 55.000 a->b 5978274 bytes 4212 pkts 93 flows 56 | 56.000 a->b 6697987 bytes 4732 pkts 109 flows 57 | 57.000 a->b 5563067 bytes 4019 pkts 128 flows 58 | 58.000 a->b 8486502 bytes 5880 pkts 90 flows 59 | 59.000 a->b 8554051 bytes 5960 pkts 105 flows 60 | 60.000 a->b 6939279 bytes 4868 pkts 98 flows 61 | 61.000 a->b 6907632 bytes 4842 pkts 96 flows 62 | 62.000 a->b 6827721 bytes 4765 pkts 85 flows 63 | 63.000 a->b 5332202 bytes 3802 pkts 100 flows 64 | 64.000 a->b 4708742 bytes 3382 pkts 97 flows 65 | 65.000 a->b 2147442 bytes 1651 pkts 88 flows 66 | 66.000 a->b 3373173 bytes 2481 pkts 94 flows 67 | 67.000 a->b 3505108 bytes 2565 pkts 93 flows 68 | 68.000 a->b 3730108 bytes 2774 pkts 118 flows 69 | 69.000 a->b 2722618 bytes 2087 pkts 109 flows 70 | 70.000 a->b 2071111 bytes 1606 pkts 92 flows 71 | 71.000 a->b 3012527 bytes 2232 pkts 91 flows 72 | 72.000 a->b 2591088 bytes 1963 pkts 95 flows 73 | 73.000 a->b 3910442 bytes 2860 pkts 106 flows 74 | 74.000 a->b 4092063 bytes 2979 pkts 103 flows 75 | 75.000 a->b 3303447 bytes 2465 pkts 106 flows 76 | 76.000 a->b 4161547 bytes 3026 pkts 102 flows 77 | 77.000 a->b 2707386 bytes 2097 pkts 118 flows 78 | 78.000 a->b 2504182 bytes 1919 pkts 103 flows 79 | 79.000 a->b 5348778 bytes 3815 pkts 103 flows 80 | 80.000 a->b 6395687 bytes 4536 pkts 113 flows 81 | 81.000 a->b 4989635 bytes 3561 pkts 96 flows 82 | 82.000 a->b 8580010 bytes 5962 pkts 98 flows 83 | 83.000 a->b 8336750 bytes 5793 pkts 96 flows 84 | 84.000 a->b 7877045 bytes 5485 pkts 94 flows 85 | 85.000 a->b 6269081 bytes 4425 pkts 103 flows 86 | 86.000 a->b 8647080 bytes 6001 pkts 98 flows 87 | 87.000 a->b 5753037 bytes 4087 pkts 102 flows 88 | 88.000 a->b 4296525 bytes 3083 pkts 87 flows 89 | 89.000 a->b 3187551 bytes 2390 pkts 107 flows 90 | 90.000 a->b 2952832 bytes 2242 pkts 111 flows 91 | 91.000 a->b 4375420 bytes 3181 pkts 105 flows 92 | 92.000 a->b 4585393 bytes 3315 pkts 104 flows 93 | 93.000 a->b 4998090 bytes 3560 pkts 93 flows 94 | 94.000 a->b 3636010 bytes 2677 pkts 103 flows 95 | 95.000 a->b 3139732 bytes 2338 pkts 101 flows 96 | 96.000 a->b 3219057 bytes 2389 pkts 99 flows 97 | 97.000 a->b 6286957 bytes 4447 pkts 106 flows 98 | 98.000 a->b 7738499 bytes 5460 pkts 120 flows 99 | 99.000 a->b 6641156 bytes 4668 pkts 96 flows 100 | 100.000 a->b 4912783 bytes 3519 pkts 99 flows 101 | 101.000 a->b 5830777 bytes 4178 pkts 119 flows 102 | 102.000 a->b 4540582 bytes 3263 pkts 95 flows 103 | 103.000 a->b 4278244 bytes 3111 pkts 104 flows 104 | 104.000 a->b 7101446 bytes 5012 pkts 115 flows 105 | 105.000 a->b 6281429 bytes 4393 pkts 83 flows 106 | 106.000 a->b 4730531 bytes 3414 pkts 105 flows 107 | 107.000 a->b 6450101 bytes 4585 pkts 115 flows 108 | 108.000 a->b 2111672 bytes 1636 pkts 92 flows 109 | 109.000 a->b 3145954 bytes 2366 pkts 108 flows 110 | 110.000 a->b 3403384 bytes 2482 pkts 86 flows 111 | 111.000 a->b 2939702 bytes 2225 pkts 108 flows 112 | 112.000 a->b 2731282 bytes 2054 pkts 96 flows 113 | 113.000 a->b 3530758 bytes 2593 pkts 98 flows 114 | 114.000 a->b 4987128 bytes 3534 pkts 87 flows 115 | 115.000 a->b 5696875 bytes 4046 pkts 102 flows 116 | 116.000 a->b 3705479 bytes 2707 pkts 96 flows 117 | 117.000 a->b 2238012 bytes 1730 pkts 95 flows 118 | 118.000 a->b 4935473 bytes 3571 pkts 114 flows 119 | 119.000 a->b 4366794 bytes 3170 pkts 105 flows 120 | 120.000 a->b 4503925 bytes 3258 pkts 103 flows 121 | 121.000 a->b 3392641 bytes 2486 pkts 91 flows 122 | 122.000 a->b 2872272 bytes 2151 pkts 95 flows 123 | 123.000 a->b 2454341 bytes 1877 pkts 98 flows 124 | 124.000 a->b 3511504 bytes 2577 pkts 98 flows 125 | 125.000 a->b 3836135 bytes 2796 pkts 98 flows 126 | 126.000 a->b 3710936 bytes 2719 pkts 100 flows 127 | 127.000 a->b 4213843 bytes 3069 pkts 106 flows 128 | 128.000 a->b 4105459 bytes 2968 pkts 97 flows 129 | 129.000 a->b 3093398 bytes 2355 pkts 117 flows 130 | 130.000 a->b 3005314 bytes 2268 pkts 107 flows 131 | 131.000 a->b 2780399 bytes 2078 pkts 92 flows 132 | 132.000 a->b 2676523 bytes 2032 pkts 101 flows 133 | 133.000 a->b 3170070 bytes 2340 pkts 92 flows 134 | 134.000 a->b 4789063 bytes 3465 pkts 112 flows 135 | 135.000 a->b 4645428 bytes 3362 pkts 107 flows 136 | 136.000 a->b 4318424 bytes 3154 pkts 109 flows 137 | 137.000 a->b 3362619 bytes 2519 pkts 114 flows 138 | 138.000 a->b 3813809 bytes 2736 pkts 80 flows 139 | 139.000 a->b 3835535 bytes 2776 pkts 90 flows 140 | 140.000 a->b 6928144 bytes 4921 pkts 124 flows 141 | 141.000 a->b 7439354 bytes 5195 pkts 98 flows 142 | 142.000 a->b 7638336 bytes 5359 pkts 106 flows 143 | 143.000 a->b 5258466 bytes 3747 pkts 97 flows 144 | 144.000 a->b 3186433 bytes 2392 pkts 107 flows 145 | 145.000 a->b 2728043 bytes 2077 pkts 102 flows 146 | 146.000 a->b 3135631 bytes 2344 pkts 105 flows 147 | 147.000 a->b 4130411 bytes 2976 pkts 93 flows 148 | 148.000 a->b 3969132 bytes 2894 pkts 100 flows 149 | 149.000 a->b 3606754 bytes 2614 pkts 85 flows 150 | 150.000 a->b 3645603 bytes 2637 pkts 83 flows 151 | 151.000 a->b 4988004 bytes 3599 pkts 114 flows 152 | 152.000 a->b 4075165 bytes 2939 pkts 90 flows 153 | 153.000 a->b 6510802 bytes 4625 pkts 118 flows 154 | 154.000 a->b 5779666 bytes 4099 pkts 100 flows 155 | 155.000 a->b 4012338 bytes 2904 pkts 93 flows 156 | 156.000 a->b 6758986 bytes 4752 pkts 102 flows 157 | 157.000 a->b 7762150 bytes 5396 pkts 89 flows 158 | 158.000 a->b 8483543 bytes 5938 pkts 114 flows 159 | 159.000 a->b 5345481 bytes 3775 pkts 85 flows 160 | 160.000 a->b 8756777 bytes 6082 pkts 99 flows 161 | 161.000 a->b 8400834 bytes 5837 pkts 93 flows 162 | 162.000 a->b 6904762 bytes 4841 pkts 96 flows 163 | 163.000 a->b 5528961 bytes 3949 pkts 106 flows 164 | 164.000 a->b 5383784 bytes 3842 pkts 103 flows 165 | 165.000 a->b 2643781 bytes 1982 pkts 89 flows 166 | 166.000 a->b 3145245 bytes 2330 pkts 95 flows 167 | 167.000 a->b 3160902 bytes 2351 pkts 98 flows 168 | 168.000 a->b 2573436 bytes 1951 pkts 98 flows 169 | 169.000 a->b 5943322 bytes 4236 pkts 111 flows 170 | 170.000 a->b 5086397 bytes 3645 pkts 101 flows 171 | 171.000 a->b 4849549 bytes 3505 pkts 110 flows 172 | 172.000 a->b 3874923 bytes 2822 pkts 95 flows 173 | 173.000 a->b 3847733 bytes 2790 pkts 92 flows 174 | 174.000 a->b 2225596 bytes 1700 pkts 86 flows 175 | 175.000 a->b 3527356 bytes 2615 pkts 110 flows 176 | 176.000 a->b 3535045 bytes 2623 pkts 107 flows 177 | 177.000 a->b 3393460 bytes 2496 pkts 93 flows 178 | 178.000 a->b 2596123 bytes 1965 pkts 96 flows 179 | 179.000 a->b 2829483 bytes 2120 pkts 97 flows 180 | 180.000 a->b 2280847 bytes 1765 pkts 100 flows 181 | 181.000 a->b 5304313 bytes 3776 pkts 100 flows 182 | 182.000 a->b 4902579 bytes 3550 pkts 114 flows 183 | 183.000 a->b 4324934 bytes 3144 pkts 106 flows 184 | 184.000 a->b 7875033 bytes 5484 pkts 98 flows 185 | 185.000 a->b 10335913 bytes 7147 pkts 106 flows 186 | 186.000 a->b 7887738 bytes 5488 pkts 91 flows 187 | 187.000 a->b 6432826 bytes 4509 pkts 87 flows 188 | 188.000 a->b 5432385 bytes 3885 pkts 105 flows 189 | 189.000 a->b 6134289 bytes 4360 pkts 107 flows 190 | 190.000 a->b 3430107 bytes 2539 pkts 102 flows 191 | 191.000 a->b 2918148 bytes 2194 pkts 102 flows 192 | 192.000 a->b 3783007 bytes 2781 pkts 103 flows 193 | 193.000 a->b 3046119 bytes 2259 pkts 91 flows 194 | 194.000 a->b 1928346 bytes 1503 pkts 89 flows 195 | 195.000 a->b 2272986 bytes 1759 pkts 99 flows 196 | 196.000 a->b 3775742 bytes 2767 pkts 100 flows 197 | 197.000 a->b 4560184 bytes 3277 pkts 96 flows 198 | 198.000 a->b 5905550 bytes 4180 pkts 99 flows 199 | 199.000 a->b 5615325 bytes 3986 pkts 100 flows 200 | 200.000 a->b 5731015 bytes 4076 pkts 105 flows 201 | 201.000 a->b 2194416 bytes 1659 pkts 82 flows 202 | 202.000 a->b 4224083 bytes 3084 pkts 108 flows 203 | 203.000 a->b 3910409 bytes 2825 pkts 88 flows 204 | 204.000 a->b 3541701 bytes 2564 pkts 81 flows 205 | 205.000 a->b 2845631 bytes 2135 pkts 95 flows 206 | 206.000 a->b 3027666 bytes 2261 pkts 99 flows 207 | 207.000 a->b 2577305 bytes 1992 pkts 112 flows 208 | 208.000 a->b 3729245 bytes 2748 pkts 109 flows 209 | 209.000 a->b 3070266 bytes 2262 pkts 88 flows 210 | 210.000 a->b 3673550 bytes 2708 pkts 105 flows 211 | 211.000 a->b 2179310 bytes 1671 pkts 89 flows 212 | 212.000 a->b 2722368 bytes 2079 pkts 107 flows 213 | 213.000 a->b 3532588 bytes 2637 pkts 114 flows 214 | 214.000 a->b 3234116 bytes 2454 pkts 122 flows 215 | 215.000 a->b 3141732 bytes 2345 pkts 103 flows 216 | 216.000 a->b 3072377 bytes 2349 pkts 121 flows 217 | 217.000 a->b 2347231 bytes 1774 pkts 84 flows 218 | 218.000 a->b 1845695 bytes 1489 pkts 105 flows 219 | 219.000 a->b 5522828 bytes 3937 pkts 106 flows 220 | 220.000 a->b 5868258 bytes 4176 pkts 109 flows 221 | 221.000 a->b 4617716 bytes 3323 pkts 95 flows 222 | 222.000 a->b 1600458 bytes 1296 pkts 94 flows 223 | 223.000 a->b 1841887 bytes 1442 pkts 88 flows 224 | 224.000 a->b 3024822 bytes 2211 pkts 80 flows 225 | 225.000 a->b 3567015 bytes 2631 pkts 103 flows 226 | 226.000 a->b 2176259 bytes 1670 pkts 89 flows 227 | 227.000 a->b 2099201 bytes 1688 pkts 118 flows 228 | 228.000 a->b 3893055 bytes 2878 pkts 115 flows 229 | 229.000 a->b 2846135 bytes 2138 pkts 98 flows 230 | 230.000 a->b 2878868 bytes 2157 pkts 97 flows 231 | 231.000 a->b 2799133 bytes 2043 pkts 73 flows 232 | 232.000 a->b 4169122 bytes 3079 pkts 121 flows 233 | 233.000 a->b 2159550 bytes 1683 pkts 99 flows 234 | 234.000 a->b 2909151 bytes 2206 pkts 108 flows 235 | 235.000 a->b 2963668 bytes 2230 pkts 103 flows 236 | 236.000 a->b 4788196 bytes 3458 pkts 109 flows 237 | 237.000 a->b 4333968 bytes 3126 pkts 97 flows 238 | 238.000 a->b 8276132 bytes 5767 pkts 104 flows 239 | 239.000 a->b 4797034 bytes 3428 pkts 88 flows 240 | 240.000 a->b 4468471 bytes 3219 pkts 99 flows 241 | 241.000 a->b 5364141 bytes 3793 pkts 90 flows 242 | 242.000 a->b 4774949 bytes 3423 pkts 98 flows 243 | 243.000 a->b 2649756 bytes 2028 pkts 105 flows 244 | 244.000 a->b 1936028 bytes 1519 pkts 92 flows 245 | 245.000 a->b 3336962 bytes 2506 pkts 116 flows 246 | 246.000 a->b 3307531 bytes 2464 pkts 104 flows 247 | 247.000 a->b 2530421 bytes 1923 pkts 97 flows 248 | 248.000 a->b 6141973 bytes 4344 pkts 102 flows 249 | 249.000 a->b 5057845 bytes 3614 pkts 96 flows 250 | 250.000 a->b 5643553 bytes 3986 pkts 91 flows 251 | 251.000 a->b 6832435 bytes 4837 pkts 114 flows 252 | 252.000 a->b 4280153 bytes 3070 pkts 89 flows 253 | 253.000 a->b 4684863 bytes 3394 pkts 111 flows 254 | 254.000 a->b 3916642 bytes 2848 pkts 94 flows 255 | 255.000 a->b 4295484 bytes 3094 pkts 95 flows 256 | 256.000 a->b 4985962 bytes 3553 pkts 94 flows 257 | 257.000 a->b 4657082 bytes 3368 pkts 106 flows 258 | 258.000 a->b 4227582 bytes 3096 pkts 112 flows 259 | 259.000 a->b 3026687 bytes 2234 pkts 87 flows 260 | 260.000 a->b 3921080 bytes 2835 pkts 93 flows 261 | 261.000 a->b 5070599 bytes 3672 pkts 119 flows 262 | 262.000 a->b 4614387 bytes 3362 pkts 117 flows 263 | 263.000 a->b 3948133 bytes 2891 pkts 104 flows 264 | 264.000 a->b 2498573 bytes 1926 pkts 106 flows 265 | 265.000 a->b 2789738 bytes 2084 pkts 90 flows 266 | 266.000 a->b 3255880 bytes 2424 pkts 103 flows 267 | 267.000 a->b 4389425 bytes 3179 pkts 102 flows 268 | 268.000 a->b 4374069 bytes 3200 pkts 114 flows 269 | 269.000 a->b 3011661 bytes 2260 pkts 104 flows 270 | 270.000 a->b 4008960 bytes 2931 pkts 103 flows 271 | 271.000 a->b 4016988 bytes 2926 pkts 100 flows 272 | 272.000 a->b 5637619 bytes 3970 pkts 88 flows 273 | 273.000 a->b 5057630 bytes 3613 pkts 96 flows 274 | 274.000 a->b 3439646 bytes 2537 pkts 99 flows 275 | 275.000 a->b 3334608 bytes 2464 pkts 98 flows 276 | 276.000 a->b 5213493 bytes 3729 pkts 103 flows 277 | 277.000 a->b 3665659 bytes 2683 pkts 96 flows 278 | 278.000 a->b 3345557 bytes 2481 pkts 102 flows 279 | 279.000 a->b 2372969 bytes 1803 pkts 90 flows 280 | 280.000 a->b 2706097 bytes 2077 pkts 108 flows 281 | 281.000 a->b 5045397 bytes 3635 pkts 113 flows 282 | 282.000 a->b 8642946 bytes 5985 pkts 91 flows 283 | 283.000 a->b 7204992 bytes 5048 pkts 98 flows 284 | 284.000 a->b 7021346 bytes 4919 pkts 98 flows 285 | 285.000 a->b 7588115 bytes 5298 pkts 99 flows 286 | 286.000 a->b 7387437 bytes 5170 pkts 99 flows 287 | 287.000 a->b 5996239 bytes 4243 pkts 98 flows 288 | 288.000 a->b 5237709 bytes 3759 pkts 106 flows 289 | 289.000 a->b 4645750 bytes 3292 pkts 80 flows 290 | 290.000 a->b 4872360 bytes 3472 pkts 94 flows 291 | 291.000 a->b 7540424 bytes 5278 pkts 103 flows 292 | 292.000 a->b 6566896 bytes 4574 pkts 78 flows 293 | 293.000 a->b 5018746 bytes 3557 pkts 85 flows 294 | 294.000 a->b 4883407 bytes 3492 pkts 97 flows 295 | 295.000 a->b 6343160 bytes 4435 pkts 85 flows 296 | 296.000 a->b 6751791 bytes 4801 pkts 123 flows 297 | 297.000 a->b 6315792 bytes 4450 pkts 95 flows 298 | 298.000 a->b 5390754 bytes 3875 pkts 111 flows 299 | 299.000 a->b 4882914 bytes 3510 pkts 103 flows 300 | -------------------------------------------------------------------------------- /script/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -f *_flow.txt 4 | rm -f *_counters.txt 5 | find . -name \*\.pyc -exec rm {} \; 6 | find . -name \*\.pyo -exec rm {} \; 7 | -------------------------------------------------------------------------------- /script/convert.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from networkx import read_gml, write_dot,MultiGraph 3 | from os.path import splitext, basename 4 | import math 5 | import random 6 | import ipaddr 7 | 8 | def distance(lon1, lat1, lon2, lat2, sol=2.0e8): 9 | R = 40003.2/(math.pi*2) * 1000 # meters 10 | # R = 3963.1 # miles 11 | 12 | lon1 = math.radians(lon1) 13 | lat1 = math.radians(lat1) 14 | lon2 = math.radians(lon2) 15 | lat2 = math.radians(lat2) 16 | 17 | dlon = lon2 - lon1 18 | dlat = lat2 - lat1 19 | 20 | a = (math.sin(dlat/2))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(dlon/2))**2 21 | c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)) 22 | d = R * c 23 | return '{:.3f}ms'.format(d/sol*1000) 24 | 25 | def add_traffic(graph, pairs=8): 26 | subnet = ipaddr.IPv4Network('10.0.0.0/8') 27 | numsubnets = int(math.log(pairs,2) + 1) 28 | subiter = subnet.iter_subnets(numsubnets) 29 | added = 0 30 | while True: 31 | a, b = random.sample(graph.nodes(),2) 32 | try: 33 | srcnet = next(subiter) 34 | dstnet = next(subiter) 35 | except StopIteration: 36 | break 37 | added += 1 38 | adict = graph.node[a] 39 | bdict = graph.node[b] 40 | if 'ipdests' in graph.node[a]: 41 | graph.node[a]['ipdests'] = '{} {}'.format(graph.node[a]['ipdests'], srcnet) 42 | else: 43 | graph.node[a]['ipdests'] = srcnet 44 | 45 | if 'ipdests' in graph.node[b]: 46 | graph.node[b]['ipdests'] = '{} {}'.format(graph.node[b]['ipdests'], dstnet) 47 | else: 48 | graph.node[b]['ipdests'] = dstnet 49 | graph.node[a]['traffic'] = 'm1' 50 | graph.node[a]['m1'] = "modulator start=0.0 generator=s1 profile=((3600,),(1,))" 51 | graph.node[a]['s1'] = 'harpoon ipsrc={} ipdst={} $harpoonsubspec'.format(srcnet, dstnet) 52 | print "Added {} src/dst traffic generation pairs".format(added) 53 | 54 | def add_measurement(graph, numnodes=1): 55 | mnodes = random.sample(graph.nodes(), numnodes) 56 | return ' '.join([str(x) for x in mnodes]) 57 | 58 | if len(sys.argv) != 2: 59 | print >>sys.stderr,"Error: need a gml graph name" 60 | sys.exit() 61 | 62 | outname = splitext(sys.argv[1])[0] + '.dot' 63 | basename = splitext(basename(sys.argv[1]))[0] 64 | random.seed(1) 65 | 66 | print "Reading {}, writing {}".format(sys.argv[1], outname) 67 | 68 | graph = read_gml(sys.argv[1]) 69 | hsubspec = "flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 70 | 71 | mnodes = add_measurement(graph) 72 | 73 | newgraph = MultiGraph(name=basename, 74 | counterexportfile=basename+"_counters", 75 | flowexport="text", 76 | flowsampling=1.0, 77 | pktsampling=1.0, 78 | exportcycle=60, 79 | counterexport=True, 80 | counterexportinterval=1, 81 | longflowtmo=60, 82 | flowinactivetmo=60, 83 | harpoonsubspec=hsubspec, 84 | measurementnodes=mnodes) 85 | 86 | def get_cap(label): 87 | if 'OC192/STM64' in label: 88 | return '10Gb' 89 | elif 'OC3' in label: 90 | return '155Mb' 91 | elif 'OC12' in label: 92 | return '622Mb' 93 | elif 'OC48' in label: 94 | return '2.4Gb' 95 | else: 96 | return '1Gb' 97 | 98 | for n1,n2,ed in graph.edges_iter(data=True): 99 | # print n1, n2, ed 100 | n1d = graph.node[n1] 101 | n2d = graph.node[n2] 102 | # print n1d,n2d 103 | dist = distance(n1d['Longitude'],n1d['Latitude'],n2d['Longitude'],n2d['Latitude']) 104 | # print dist 105 | loc1 = '{}, {}'.format(n1d['label'], n1d['Country']) 106 | loc2 = '{}, {}'.format(n2d['label'], n2d['Country']) 107 | span = '{} to {}'.format(loc1, loc2) 108 | newgraph.add_node(n1, autoack='False', location=loc1) 109 | newgraph.add_node(n2, autoack='False', location=loc2) 110 | cap = get_cap(ed['LinkLabel']) 111 | newgraph.add_edge(n1, n2, weight=1, capacity=cap, delay=dist, span=span) 112 | 113 | add_measurement(newgraph) 114 | add_traffic(newgraph) 115 | write_dot(newgraph, outname) 116 | -------------------------------------------------------------------------------- /script/profiler.py: -------------------------------------------------------------------------------- 1 | import cProfile 2 | import random 3 | import sys 4 | 5 | random.seed(42) 6 | 7 | sys.path.append(".") 8 | sys.path.append("./traffic_generators") 9 | sys.path.append("./tcpmodels") 10 | sys.path.append("./flowexport") 11 | 12 | import fs 13 | sim = fs.FsCore(1, 10, debug=False) 14 | 15 | p = cProfile.Profile() 16 | # p.run("sim.run('conf/simple_speed.json', configonly=False)") 17 | p.run("sim.run('test.dot', configonly=False)") 18 | p.print_stats(sort=1) 19 | 20 | -------------------------------------------------------------------------------- /script/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH=.:../pox:$PYTHONPATH 4 | # python -O fs.py -i1 -t60 conf/openflow1.dot 5 | # python -O fs.py -i1 -t600 conf/ex_simple.dot 6 | # python -O fs.py -i1 -t600 conf/ex_pareto.dot 7 | 8 | # python -O fs.py -i1 -t600 conf/openflow_spf_small_harpoon.dot 9 | # python -O fs.py -i1 -t60 conf/openflow_spf_small_cbr.dot 10 | 11 | python -O fs.py -i1 -t60 conf/test.conf 12 | -------------------------------------------------------------------------------- /script/runspeed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH=.:../pox:$PYTHONPATH 4 | python -OO fs.py -s42 -i1 -t600 conf/simple_speed.json 5 | -------------------------------------------------------------------------------- /script/runtests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH=.:../pox 4 | for tfile in spec/*spec.py 5 | do 6 | echo "** Running tests in ${tfile} **" 7 | python ${tfile} 8 | done 9 | 10 | -------------------------------------------------------------------------------- /script/speed_out.txt: -------------------------------------------------------------------------------- 1 | ** Tue Apr 16 14:02:15 2013: 2.7.2 (default, Oct 11 2012, 20:14:37) [GCC 4.2.1 Compatible Apple Clang 4.0 (tags/Apple/clang-418.0.60)] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 23.982 2 | ** Tue Apr 16 14:13:18 2013: 2.7.3 (3eef596df459, Apr 06 2013, 09:50:58)[PyPy 2.0.0-beta2 with GCC 4.2.1] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 17.300 3 | ** Thu Apr 25 21:00:10 2013: 2.7.3 (3eef596df459, Apr 06 2013, 09:50:58)[PyPy 2.0.0-beta2 with GCC 4.2.1] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 16.350 4 | ** Fri Apr 26 14:09:03 2013: 2.7.2 (default, Oct 11 2012, 20:14:37) [GCC 4.2.1 Compatible Apple Clang 4.0 (tags/Apple/clang-418.0.60)] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 27.514 5 | ** Fri Apr 26 14:09:29 2013: 2.7.3 (3eef596df459, Apr 06 2013, 09:50:58)[PyPy 2.0.0-beta2 with GCC 4.2.1] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 16.194 6 | ** Sat Apr 27 20:37:17 2013: 2.7.2 (default, Oct 11 2012, 20:14:37) [GCC 4.2.1 Compatible Apple Clang 4.0 (tags/Apple/clang-418.0.60)] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 27.545 7 | ** Sat Apr 27 20:37:50 2013: 2.7.3 (3eef596df459, Apr 06 2013, 09:50:58)[PyPy 2.0.0-beta2 with GCC 4.2.1] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 16.764 8 | ** Mon Apr 29 17:24:01 2013: 2.7.2 (default, Oct 11 2012, 20:14:37) [GCC 4.2.1 Compatible Apple Clang 4.0 (tags/Apple/clang-418.0.60)] Darwin:milky.colgate.edu:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 28.026 9 | ** Mon Apr 29 17:24:48 2013: 2.7.2 (default, Oct 11 2012, 20:14:37) [GCC 4.2.1 Compatible Apple Clang 4.0 (tags/Apple/clang-418.0.60)] Darwin:milky.colgate.edu:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 26.675 10 | ** Tue Apr 30 11:10:21 2013: 2.7.3 (3eef596df459, Apr 06 2013, 09:50:58)[PyPy 2.0.0-beta2 with GCC 4.2.1] Darwin:milky.colgate.edu:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 16.279 11 | ** Tue Apr 30 11:10:39 2013: 2.7.3 (3eef596df459, Apr 06 2013, 09:50:58)[PyPy 2.0.0-beta2 with GCC 4.2.1] Darwin:milky.colgate.edu:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 16.283 12 | ** Tue Apr 30 11:10:58 2013: 2.7.3 (3eef596df459, Apr 06 2013, 09:50:58)[PyPy 2.0.0-beta2 with GCC 4.2.1] Darwin:milky.colgate.edu:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 16.504 13 | ** Thu May 2 13:25:27 2013: 2.7.3 (7e4f0faa3d51, Nov 16 2012, 16:56:51)[PyPy 2.0.0-beta1 with GCC 4.4.3] Linux:clab:3.2.0-40-generic-pae:#64-Ubuntu SMP Mon Mar 25 21:44:41 UTC 2013:i686 16.152 14 | ** Thu May 2 13:26:14 2013: 2.7.3 (7e4f0faa3d51, Nov 16 2012, 16:56:51)[PyPy 2.0.0-beta1 with GCC 4.4.3] Linux:clab:3.2.0-40-generic-pae:#64-Ubuntu SMP Mon Mar 25 21:44:41 UTC 2013:i686 15.692 15 | ** Wed May 15 14:48:26 2013: 2.7.3 (7e4f0faa3d51, Nov 16 2012, 16:56:51)[PyPy 2.0.0-beta1 with GCC 4.4.3] Linux:clab:3.2.0-40-generic-pae:#64-Ubuntu SMP Mon Mar 25 21:44:41 UTC 2013:i686 9.210 16 | ** Wed May 15 17:20:40 2013: 2.7.3 (3eef596df459, Apr 06 2013, 09:50:58)[PyPy 2.0.0-beta2 with GCC 4.2.1] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 11.677 17 | ** Wed May 29 07:09:35 2013: 2.7.3 (5acfe049a5b0, May 21 2013, 13:47:22)[PyPy 2.0.2 with GCC 4.2.1 Compatible Apple LLVM 4.2 (clang-425.0.28)] Darwin:milky.local:12.3.0:Darwin Kernel Version 12.3.0: Sun Jan 6 22:37:10 PST 2013; root:xnu-2050.22.13~1/RELEASE_X86_64:x86_64 9.812 18 | ** Fri Aug 9 07:06:37 2013: 2.7.3 (480845e6b1dd, Jul 31 2013, 10:58:28)[PyPy 2.1.0 with GCC 4.2.1 Compatible Clang Compiler] Darwin:milky.local:12.4.0:Darwin Kernel Version 12.4.0: Wed May 1 17:57:12 PDT 2013; root:xnu-2050.24.15~1/RELEASE_X86_64:x86_64 9.549 19 | -------------------------------------------------------------------------------- /script/speedcmp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import time 6 | 7 | outf = open("script/speed_out.txt", "a") 8 | begin = time.time() 9 | os.system("./script/runspeed.sh") 10 | end = time.time() 11 | print "total time:",end-begin 12 | 13 | #os.system("diff a_counters.txt ./script/a_counters.txt") 14 | #os.system("diff b_counters.txt ./script/b_counters.txt") 15 | #os.system("diff a_flow.txt ./script/a_flow.txt") 16 | #os.system("diff b_flow.txt ./script/b_flow.txt") 17 | 18 | outf.write("** {}: {} {} {:.3f}\n".format(str(time.asctime()).strip(), sys.version.strip().replace('\n',''), ':'.join(os.uname()), end-begin)) 19 | outf.close() 20 | -------------------------------------------------------------------------------- /spec/configurator_spec.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | import tempfile 4 | from spec_base import FsTestBase 5 | import fslib.configurator as configurator 6 | import fslib.common as fscommon 7 | import os 8 | 9 | # dry out configuration stuff 10 | # better conf tests 11 | # work on lower-layer stuff arp/etc. 12 | 13 | dot_conf1 = ''' 14 | graph test { 15 | // 2 nodes: a and b 16 | flowexportfn=text_export_factory 17 | counterexportfile="counters" 18 | flowsampling=1.0 19 | pktsampling=1.0 20 | exportcycle=60 21 | counterexport=True 22 | counterexportinterval=1 23 | longflowtmo=60 24 | flowinactivetmo=60 25 | measurementnodes="a" 26 | 27 | // slightly DRYer form of configuration 28 | harpoon="harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 29 | 30 | // another way to DRY things out; only specify things that change 31 | harpoonsubspec="flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 32 | 33 | harpoonsubspec2="flowsize=empiricaldistribution('/tmp/filesizes.txt') flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 34 | 35 | a [ 36 | autoack="False" 37 | ipdests="10.1.0.0/16" 38 | traffic="m1 m2 m3" 39 | m1="modulator start=0.0 generator=harpoon profile=((3600,),(1,))" 40 | m2="modulator start=0.0 generator=s1 profile=((3600,),(1,))" 41 | m3="modulator start=0.0 generator=s2 profile=((3600,),(1,))" 42 | m4="modulator start=0.0 generator=s3 profile=((3600,),(1,))" 43 | s1="harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)" 44 | s2="harpoon ipsrc=10.2.0.0/16 ipdst=10.4.1.0/24 $harpoonsubspec" 45 | s3="harpoon ipsrc=10.2.0.0/16 ipdst=10.4.1.0/24 $harpoonsubspec2" 46 | ]; 47 | 48 | b [ 49 | autoack="False" 50 | ipdests="10.0.0.0/8 10.2.0.0/16 10.3.0.0/16" 51 | ]; 52 | 53 | // links 54 | a -- b [weight=10, capacity=100000000, delay=0.042]; 55 | } 56 | ''' 57 | 58 | json_conf1 = ''' 59 | { 60 | "directed": false, 61 | "graph": [ 62 | ["node", {}], 63 | ["graph", { 64 | "flowsampling": 1.0, 65 | "counterexportinterval": 1, 66 | "harpoonsubspec2": "flowsize=empiricaldistribution('/tmp/filesizes.txt') flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)", 67 | "measurementnodes": "a", 68 | "harpoon": "harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)", 69 | "flowexportfn": "text_export_factory", 70 | "harpoonsubspec": "flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) $portspartial $lossconfig", 71 | "portspartial": "sport=randomchoice(22,80,443) dport=randomunifint(1025,65535)", 72 | "lossconfig": "lossrate=randomchoice(0.001)", 73 | "linkcap": 1000000000, 74 | "linkdel": 0.042, 75 | "pktsampling": 1.0, 76 | "longflowtmo": 60, 77 | "exportcycle": 60, 78 | "counterexport": true, 79 | "counterexportfile": "counters", 80 | "flowinactivetmo": 60} 81 | ], 82 | ["edge", {}], 83 | ["name", "test"] 84 | ], 85 | "nodes": [ 86 | {"ipdests": "10.1.0.0/16", "traffic": "m1 m2 m3", "id": "a", 87 | "s3": "harpoon ipsrc=10.2.0.0/16 ipdst=10.4.1.0/24 $harpoonsubspec2", 88 | "s2": "harpoon ipsrc=10.2.0.0/16 ipdst=10.4.1.0/24 $harpoonsubspec", 89 | "s1": "harpoon ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) $portspartial $lossconfig", 90 | "m4": "modulator start=0.0 generator=s3 profile=((3600,),(1,))", 91 | "m1": "modulator start=0.0 generator=harpoon profile=((3600,),(1,))", 92 | "m3": "modulator start=0.0 generator=s2 profile=((3600,),(1,))", 93 | "m2": "modulator start=0.0 generator=s1 profile=((3600,),(1,))", "autoack": false}, 94 | {"ipdests": "10.0.0.0/8 10.2.0.0/16 10.3.0.0/16", "id": "b", "autoack": false} 95 | ], 96 | "links": [ 97 | {"delay": "$linkdel", "source": 0, "capacity": "$linkcap", "target": 1, "weight": 10} 98 | ], 99 | "multigraph": true 100 | } 101 | 102 | ''' 103 | 104 | json_conf2 = ''' 105 | { 106 | "directed": false, 107 | "graph": [ 108 | ["node", {}], 109 | ["graph", { 110 | "flowsampling": 1.0, 111 | "counterexportinterval": 1, 112 | "measurementnodes": "a", 113 | "harpooncfg": "ipsrc=10.1.0.0/16 ipdst=10.3.1.0/24 flowsize=exponential(1/10000.0) flowstart=exponential(100) ipproto=randomchoice(6) sport=randomchoice(22,80,443) dport=randomunifint(1025,65535) lossrate=randomchoice(0.001)", 114 | "flowexportfn": "text_export_factory", 115 | "linkdel": 0.042, 116 | "linkcap": 1000000000, 117 | "pktsampling": 1.0, 118 | "longflowtmo": 60, 119 | "exportcycle": 60, 120 | "counterexport": true, 121 | "counterexportfile": "counters", 122 | "flowinactivetmo": 60} 123 | ], 124 | ["edge", {}], 125 | ["name", "test"] 126 | ], 127 | "nodes": [ 128 | {"id": "a", 129 | "ipdests": "10.1.0.0/16", "traffic": "m1", 130 | "s1": "harpoon $harpooncfg", 131 | "m1": "modulator start=0.0 generator=s1 profile=((3600,),(1,))", "autoack": false}, 132 | { 133 | "id": "b", 134 | "ipdests": "10.0.0.0/8 10.2.0.0/16 10.3.0.0/16", 135 | "autoack": false} 136 | ], 137 | "links": [ 138 | {"delay": "$linkdel", "source": 0, "capacity": "$linkcap", "target": 1, "weight": 10}, 139 | {"delay": "$linkdel", "source": 0, "capacity": "$linkcap", "target": 1, "weight": 10}, 140 | {"delay": "$linkdel", "source": 0, "capacity": "$linkcap", "target": 1, "weight": 10} 141 | ], 142 | "multigraph": true 143 | } 144 | 145 | ''' 146 | 147 | class ConfiguratorTests(FsTestBase): 148 | def setUp(self): 149 | fh = open("/tmp/filesizes.txt", "w") 150 | fh.write("100 200 300 400 500\n600 700 800 900 1000\n") 151 | fh.close() 152 | 153 | def tearDown(self): 154 | os.unlink(self.cfgfname) 155 | os.unlink("/tmp/filesizes.txt") 156 | 157 | def mkconfig(self, cfg): 158 | fd,fname = tempfile.mkstemp() 159 | self.cfgfname = fname 160 | print self.cfgfname 161 | fh = os.fdopen(fd, 'w') 162 | fh.write(cfg) 163 | fh.close() 164 | 165 | def testReadConfigDot(self): 166 | self.mkconfig(dot_conf1) 167 | cfg = configurator.FsConfigurator() 168 | topology = cfg.load_config(self.cfgfname, configtype="dot") 169 | self.assertItemsEqual(topology.nodes.keys(), ['a','b']) 170 | self.assertItemsEqual(topology.links.keys(), [('a','b'),('b','a')]) 171 | 172 | def testReadConfigJson1(self): 173 | self.mkconfig(json_conf1) 174 | cfg = configurator.FsConfigurator() 175 | topology = cfg.load_config(self.cfgfname, configtype="json") 176 | self.assertItemsEqual(topology.nodes.keys(), ['a','b']) 177 | self.assertItemsEqual(topology.links.keys(), [('a','b'),('b','a')]) 178 | 179 | def testReadConfigJson2(self): 180 | self.mkconfig(json_conf2) 181 | cfg = configurator.FsConfigurator() 182 | topology = cfg.load_config(self.cfgfname, configtype="json") 183 | self.assertItemsEqual(topology.nodes.keys(), ['a','b']) 184 | self.assertItemsEqual(topology.links.keys(), [('a','b'),('b','a')]) 185 | 186 | if __name__ == '__main__': 187 | unittest.main() 188 | -------------------------------------------------------------------------------- /spec/flowlet_spec.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import MagicMock, Mock 3 | 4 | from spec_base import FsTestBase 5 | 6 | from fslib.flowlet import FlowIdent, Flowlet, SubtractiveFlowlet 7 | import ipaddr 8 | import time 9 | import copy 10 | 11 | 12 | class TestFlowlet(FsTestBase): 13 | def setUp(self): 14 | self.ident1 = FlowIdent(srcip="1.1.1.1",dstip="2.2.2.2",ipproto=6, dport=80, sport=10000) 15 | self.ident2 = FlowIdent(str(ipaddr.IPAddress('10.0.1.1')), str(ipaddr.IPAddress('192.168.5.2')), 17, 5, 42) 16 | 17 | def testFlowIdent(self): 18 | ftfwd2 = self.ident2.mkreverse().mkreverse() 19 | self.assertEqual(self.ident2.key, ftfwd2.key) 20 | 21 | def testBuildFlowlet(self): 22 | f1 = Flowlet(self.ident1) 23 | f1.flowstart = time.time() 24 | f1.flowend = time.time() + 10 25 | self.assertEqual(repr(f1.key), repr(self.ident1)) 26 | 27 | def testCopy(self): 28 | # NB: shallow copy of f1; flow key will be identical 29 | f1 = Flowlet(self.ident2) 30 | f2 = copy.copy(f1) 31 | # test whether FlowIdent keys referred to by each flowlet 32 | # are the same object 33 | self.assertIs(f1.key, f2.key) 34 | 35 | def testAdd(self): 36 | f1 = Flowlet(self.ident1) 37 | f1.pkts = 1 38 | f1.bytes = 1 39 | f2 = copy.copy(f1) 40 | f2.pkts = 1 41 | f2.bytes = 1 42 | f1 += f2 43 | self.assertEqual(f1.pkts, 2) 44 | self.assertEqual(f1.bytes, 2) 45 | self.assertEqual(f2.pkts, 1) 46 | self.assertEqual(f2.bytes, 1) 47 | 48 | def testSubtractive(self): 49 | f1 = SubtractiveFlowlet(self.ident1, "removeuniform(0.001)") 50 | # need to do some mocking to test action 51 | 52 | 53 | if __name__ == '__main__': 54 | unittest.main() 55 | -------------------------------------------------------------------------------- /spec/fs_spec.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | 4 | from spec_base import FsTestBase 5 | from fs import * 6 | from fslib.common import fscore 7 | 8 | class SimTests(FsTestBase): 9 | @classmethod 10 | def setUpClass(cls): 11 | SimTests.sim = FsCore(1.0, debug=True, progtick=1.0) 12 | 13 | def testNewSimulatorSingleton(self): 14 | self.assertIs(fscore(), SimTests.sim) 15 | 16 | def testAfter(self): 17 | def doafter(): 18 | self.assertEqual(SimTests.sim.now, 1.0) 19 | SimTests.sim.after(1.0, "test after", doafter) 20 | self.assertEqual(SimTests.sim.now, 0.0) 21 | SimTests.sim.run(None) 22 | 23 | @classmethod 24 | def tearDownClass(cls): 25 | SimTests.sim.unmonkeypatch() 26 | 27 | if __name__ == '__main__': 28 | unittest.main() 29 | -------------------------------------------------------------------------------- /spec/ofswitch_spec.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | 4 | #from fslib.flowlet import FlowIdent, Flowlet, SubtractiveFlowlet, OpenflowMessage, ofp_match_from_flowlet 5 | #from pox.openflow.flow_table import SwitchFlowTable, TableEntry 6 | #import pox.openflow.libopenflow_01 as poxof 7 | #from node import * 8 | #import ipaddr 9 | #import time 10 | #import copy 11 | 12 | class TestOfSwitch(unittest.TestCase): 13 | def setUp(self): 14 | self.mocksim = Mock() 15 | self.mocksim.now = 0 16 | self.mconfig = Mock() 17 | self.mocknode = Mock() 18 | self.mocklink = Mock() 19 | self.switch = OpenflowSwitch("test",self.mocksim,True,self.mconfig) 20 | self.linkobj = Mock() 21 | self.linkobj.flowlet_arrival = Mock() 22 | self.switch.link_table = {'next': self.linkobj} 23 | 24 | def testFlowletArrNoTableEntry(self): 25 | flowlet = Flowlet(FlowIdent()) 26 | self.switch.match_table = Mock(return_value=None) 27 | controller_link = Mock() 28 | self.switch.link_table['controller'] = controller_link 29 | controller_link.flowlet_arrival = Mock(return_value=None) 30 | self.mocksim.now = 1 31 | self.assertEqual(self.switch.flowlet_arrival(flowlet, "prev","next"),"controller") 32 | self.switch.match_table.assert_called_with(flowlet,"prev") 33 | 34 | def testFlowletArrHasTableEntry(self): 35 | flowlet = Flowlet(FlowIdent()) 36 | self.switch.match_table = Mock(return_value="next") 37 | self.mocksim.now = 1 38 | self.assertEqual(self.switch.flowlet_arrival(flowlet, "prev","next"), "next") 39 | self.switch.match_table.assert_called_with(flowlet, "prev") 40 | self.linkobj.flowlet_arrival.assert_called_with(flowlet, "test", "next") 41 | 42 | def testMatchTableNoMatch(self): 43 | flowlet = Flowlet(FlowIdent()) 44 | self.assertIsNone(self.switch.match_table(flowlet, "prev")) 45 | 46 | def testMatchTableOneExactMatch(self): 47 | flowlet = Flowlet(FlowIdent(srcip='1.1.1.1',dstip='2.2.2.2',ipproto=6,sport=20000,dport=80,srcmac='00:00:00:00:00:01',dstmac='00:00:00:00:00:02',vlan=1)) 48 | new_rule = ofp_match_from_flowlet(flowlet) 49 | queuer = poxof.ofp_action_enqueue() 50 | queuer.port = "next" 51 | self.switch.flow_table.add_entry(TableEntry(match=new_rule,now=self.mocksim.now, actions=[queuer])) 52 | rv = self.switch.match_table(flowlet, "prev") 53 | self.assertEqual(rv, "next") 54 | 55 | def testMatcher(self): 56 | flowlet = Flowlet(FlowIdent(srcip='1.1.1.1',dstip='2.2.2.2',ipproto=6,sport=20000,dport=80,srcmac='00:00:00:00:00:01',dstmac='00:00:00:00:00:02',vlan=1)) 57 | match_obj = poxof.ofp_match() 58 | match_obj.dl_src = flowlet.srcmac 59 | match_obj.dl_dst = flowlet.dstmac 60 | match_obj.dl_vlan = flowlet.vlan 61 | match_obj.nw_src = flowlet.srcaddr 62 | match_obj.nw_dst = flowlet.dstaddr 63 | match_obj.nw_proto = flowlet.ipproto 64 | match_obj.tp_src = flowlet.srcport 65 | match_obj.tp_dst = flowlet.dstport 66 | 67 | matcher = ofp_match_from_flowlet(flowlet, ports=True) 68 | self.assertTrue(match_obj == matcher) 69 | 70 | match_obj = poxof.ofp_match() 71 | # match_obj.dl_src = flowlet.srcmac 72 | # match_obj.dl_dst = flowlet.dstmac 73 | match_obj.dl_vlan = flowlet.vlan 74 | match_obj.nw_src = flowlet.srcaddr 75 | match_obj.nw_dst = flowlet.dstaddr 76 | match_obj.nw_proto = flowlet.ipproto 77 | # match_obj.tp_src = flowlet.srcport 78 | match_obj.tp_dst = flowlet.dstport 79 | self.assertFalse(match_obj == matcher) 80 | self.assertTrue(match_obj.matches_with_wildcards(matcher)) 81 | 82 | flowlet = Flowlet(FlowIdent(ipproto=6)) 83 | matcher = ofp_match_from_flowlet(flowlet) 84 | match_obj = poxof.ofp_match() 85 | self.assertTrue(match_obj.matches_with_wildcards(matcher)) 86 | match_obj.nw_proto = 17 87 | self.assertFalse(match_obj.matches_with_wildcards(matcher)) 88 | 89 | def testMatchTableOneWildcardMatch(self): 90 | flowlet = Flowlet(FlowIdent(srcip='1.1.1.1',dstip='2.2.2.2',ipproto=6,sport=20000,dport=80,srcmac='00:00:00:00:00:01',dstmac='00:00:00:00:00:02',vlan=1)) 91 | # new_rule = self.switch.flow_table.matcher_from_flowlet(flowlet) 92 | new_rule = ofp_match() # as wildcardish as it gets 93 | queuer = poxof.ofp_action_enqueue() 94 | queuer.port = "next" 95 | self.switch.flow_table.add_entry(TableEntry(match=new_rule,now=self.mocksim.now, actions=[queuer])) 96 | rv = self.switch.match_table(flowlet, "prev") 97 | self.assertEqual(rv, "next") 98 | 99 | def testUpdateTable(self): 100 | flet = Flowlet(FlowIdent(srcip='1.1.1.1',dstip='2.2.2.2',ipproto=6,sport=20000,dport=80,srcmac='00:00:00:00:00:01',dstmac='00:00:00:00:00:02',vlan=1)) 101 | actions = {} 102 | actions['port'] = 'fakeport' 103 | match = ofp_match_from_flowlet(flet) 104 | ofm = OpenflowMessage(flet.flowident, message_type = 'ofp_flow_mod', \ 105 | match = match, action = actions, match_dl_src = None, \ 106 | command = "add") 107 | self.assertEqual(ofm.message_type, "ofp_flow_mod") 108 | self.assertTrue(isinstance(ofm.message.pox_ofp_message,poxof.ofp_flow_mod)) 109 | rv = self.switch.update_table(ofm) 110 | self.assertEqual(rv[0],'added') 111 | self.assertEqual(len(self.switch.flow_table.entries), 1) 112 | self.assertEqual(self.switch.flow_table.entries[0].match, match) 113 | print self.switch.flow_table.entries[0] 114 | print "Matching entries for port: ",self.switch.flow_table.entries_for_port('fakeport') 115 | self.assertIsNotNone(self.switch.flow_table.entries_for_port('fakeport')) ## 116 | 117 | def testEvictTableEntry(self): 118 | self.mocksim.now = 2 119 | flet = Flowlet(FlowIdent(srcip='1.1.1.1',dstip='2.2.2.2',ipproto=6,sport=20000,dport=80,srcmac='00:00:00:00:00:01',dstmac='00:00:00:00:00:02',vlan=1)) 120 | actions = {} 121 | actions['port'] = 'fakeport' 122 | match = ofp_match_from_flowlet(flet) 123 | ofm = OpenflowMessage(flet.flowident, message_type = 'ofp_flow_mod', \ 124 | match = match, action = actions, match_dl_src = None, \ 125 | command = "add", idle_timeout=0.5) 126 | self.assertEqual(ofm.message_type, "ofp_flow_mod") 127 | self.assertTrue(isinstance(ofm.message.pox_ofp_message,poxof.ofp_flow_mod)) 128 | rv = self.switch.update_table(ofm) 129 | self.assertEqual(rv[0],'added') 130 | self.assertEqual(len(self.switch.flow_table.entries), 1) 131 | self.assertEqual(self.switch.flow_table.entries[0].match, match) 132 | self.switch.flow_table.entries[0].counters['created'] = 1 133 | self.switch.flow_table.entries[0].counters['last_touched'] = 1 134 | 135 | self.linkobj = Mock() 136 | self.linkobj.flowlet_arrival = Mock() 137 | self.switch.link_table = {'controller': self.linkobj} 138 | print self.switch.flow_table.entries[0] 139 | self.assertEqual(self.switch.table_ager(),1) 140 | self.linkobj.flowlet_arrival.assert_called_once() 141 | 142 | if __name__ == '__main__': 143 | # unittest.main() 144 | raise Exception("Test cases out of date.") 145 | -------------------------------------------------------------------------------- /spec/spec_base.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import fslib.common as fscommon 3 | 4 | class FsTestBase(unittest.TestCase): 5 | 6 | @classmethod 7 | def setUpClass(cls): 8 | '''Set up logging; turn on debug messages''' 9 | fscommon.setup_logger(None, True) 10 | 11 | -------------------------------------------------------------------------------- /spec/traffic_spec.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | class TrafficTests(unittest.TestCase): 4 | pass 5 | 6 | 7 | 8 | # def regression(): 9 | # exporter = text_export_factory 10 | # sim = Simulator(0.05, 'i2.dot', exporter, debug=True, endtime=30) 11 | 12 | # print 'houston->atl delay',sim.delay('houston', 'atlanta') 13 | # print 'houston->atl capacity',sim.capacity('houston', 'atlanta') 14 | # print 'next hop from ny to chicago',sim.nexthop('newyork','chicago') 15 | # print 'next hop from kc to seattle',sim.nexthop('kansascity','seattle') 16 | # print 'next hop from atlanta to losangeles',sim.nexthop('atlanta','losangeles') 17 | 18 | # #dn = sim.destnode('newyork', '10.1.1.5') 19 | # #print 'dest node from ny to 10.1.1.5 is',dn 20 | # #print 'path from ny to',dn,'is:', 21 | # #current = 'newyork' 22 | # #while current != dn: 23 | # # nh = sim.nexthop(current, dn) 24 | # # print nh, 25 | # # current = nh 26 | # #print 27 | 28 | # print 'owd from ny to la:',sim.owd('newyork','losangeles') 29 | 30 | # #gen = SimpleGeneratorNode(sim, 'newyork', ipaddr.IPAddress('10.1.1.5'), ipaddr.IPAddress('10.5.2.5'), 1) 31 | # #sim.after(0.1, gen.start) 32 | # #sim.run() 33 | 34 | 35 | ## FIXME 36 | 37 | 38 | # d1 = { 'ipsrc':'10.4.0.0/16', 'ipdst':'10.7.1.0/24', 'flowsize':'pareto(10000,1.2)', 'flowstart':'exponential(0.1)', 'pktsize':'randomunifint(1000,1500)', 'ipproto':'randomchoice(socket.IPPROTO_TCP)', 'dport':'randomchoice(22,80,443)', 'sport':'randomunifint(1025,65535)', 'emitprocess':'randomchoice(x)' } 39 | 40 | # d2 = 'ipsrc=10.2.0.0/16 ipdst=10.3.1.0/24 flowsize=pareto(50000,1.18) flowstart=exponential(0.5) pktsize=normal(1000,200) ipproto=randomchoice(6) dport=randomchoice(22,80,443) sport=randomunifint(1025,65535) lossrate=randomuniffloat(0.005,0.01) mss=randomchoice(1500,576,1500) emitprocess=normal(x,x*0.1) iptos=randomchoice(0x0,0x10,0x08,0x04,0x02)' 41 | 42 | # d3 = 'ipsrc=10.2.0.0/16 ipdst=10.3.1.0/24 flowsize=exponential(1.0/100000) flowstart=randomchoice(10) ipproto=randomchoice(6) dport=randomchoice(22,80,443) sport=randomunifint(1025,65535) lossrate=randomuniffloat(0.05,0.10)' 43 | 44 | # d3 = mkdict(d3) 45 | # harpoon = HarpoonGeneratorNode(None, 'test', tcpmodel='mathis', **d3) 46 | 47 | # flowlet,sent,emitrv,dnode = harpoon.newflow(test=True, xint=1.0) 48 | # preflowlet = copy.copy(flowlet) 49 | # print "prestuff",flowlet, sent, emitrv, dnode 50 | 51 | # accum = copy.copy(flowlet) 52 | # accum.bytes = 0 53 | # accum.pkts = 0 54 | # # print 'accumulator:',accum 55 | # i = 0 56 | # while flowlet.bytes > 0: 57 | # f,sent,emitrv,dnode = harpoon.flowemit(flowlet,sent,emitrv,dnode,test=True) 58 | # print 'flowemit',i,f 59 | # i += 1 60 | # accum = accum + f 61 | # print 'pre:',preflowlet 62 | # print 'done:',accum 63 | # print 'avg pkt accum:',accum.bytes/float(accum.pkts) 64 | 65 | 66 | # harpoon = HarpoonGeneratorNode(None, 'test', tcpmodel='csa00', **d3) 67 | # flowlet,sent,emitrv,dnode = harpoon.newflow(test=True, xint=1.0) 68 | # preflowlet = copy.copy(flowlet) 69 | # print "prestuff",flowlet, sent, emitrv, dnode 70 | 71 | if __name__ == '__main__': 72 | unittest.main() 73 | -------------------------------------------------------------------------------- /tcpmodels/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jsommers/fs/3d85e6d2e2a4c30a7c4716aab75dd766af5d0eb1/tcpmodels/__init__.py -------------------------------------------------------------------------------- /tcpmodels/csa00.py: -------------------------------------------------------------------------------- 1 | from random import choice 2 | from math import log, floor, ceil, sqrt 3 | 4 | def model(bytes, mss, rtt, interval, p, rwnd=1048576): 5 | '''Implements the cardwell, savage, anderson infocom 2000 improvement on pftk98.''' 6 | 7 | # assume losspr is same in forward and reverse direction 8 | pr = pf = p 9 | 10 | # initial syn timeout = 3.0 sec 11 | ts = 3.0 12 | 13 | initial_window = choice([1,2,3]) 14 | 15 | gamma = 1.5 16 | wmax = rwnd / mss # receive window, in MSS 17 | # print 'wmax:',wmax 18 | 19 | # eq(4): expected handshake time 20 | elh = rtt + ts * ( (1.0-pr) / (1-2.0*pr) + (1.0 - pf) / (1 - 2*pf) - 2.0) 21 | 22 | # eq(5): expected number of packets in initial slow-start phase 23 | d = bytes // mss 24 | if bytes % mss > 0: 25 | d += 1 26 | edss = floor((1 - (1 - p) ** d) * (1 - p) / p + 1) 27 | 28 | # eq(11): expected window at the end of slowstart 29 | ewss = edss * (gamma - 1) / gamma + initial_window/gamma 30 | 31 | # eq(15): expected time to send edss in initial slow start 32 | # NB: assume that sources are not receive window limited 33 | ewss = edss * (gamma - 1) / gamma + initial_window/gamma 34 | if ewss > wmax: 35 | etss = rtt * log(wmax/initial_window, gamma) + 1.0 + 1.0/wmax *(edss - (gamma * wmax - initial_window)/(gamma - 1.0)) 36 | else: 37 | etss = rtt * log(edss*(gamma-1)/initial_window+1,gamma) 38 | 39 | # eq(21) 40 | edca = d - edss 41 | # print 'data left after slowstart:',edca 42 | 43 | # eq(16); pr that slowstart ends with a loss 44 | lss = 1 - (1-p)**d 45 | 46 | # eq(17) 47 | Q = lambda p,w: min(1.0, (1+(1-p)**3*(1-(1-p)**(w-3)))/((1-(1-p)**w)/(1-(1-p)**3))) 48 | 49 | # eq(19) 50 | G = lambda p: 1 + p + 2*p**2 + 4*p**3 + 8*p**4 + 16*p**5 + 32*p**6 51 | 52 | # eq(18); cost of an RTO 53 | # to = rtt * 4 54 | # to = 3 # initial rto (sec) 55 | to = rtt * 2 56 | Ezto = G(p)*to/(1-p) 57 | 58 | # eq(20) 59 | etloss = lss * (Q(p,ewss) * Ezto + (1-Q(p,ewss)) * rtt) 60 | 61 | # eq(23) 62 | b = 2.0 63 | wp = 2+b/3*b + sqrt(8*(1-p)/3*b*p + (2*b/(3*b))**2) 64 | 65 | # eq(22) 66 | if wp < wmax: 67 | R = ((1-p)/p+wp/2.0+Q(p,wp)) / (rtt*(b/2.0*wp+1)+(Q(p,wp)*G(p)*to)/(1-p)) 68 | else: 69 | R = ((1-p)/p+wmax/2.0+Q(p,wmax))/(rtt*(b/8.0*wmax+(1-p)/(p*wmax)+Q(p,wmax)*G(p)*to/(1-p))) 70 | 71 | # eq(24): expected time to send remaining data in congestion avoidance 72 | etca = edca/R 73 | 74 | etdelack = 0.1 75 | 76 | # eq(25): expected time for data transfer 77 | flowduration = etss + etloss + etca + etdelack 78 | 79 | #print 'exp handshake',elh 80 | #print 'data bytes: %d mss %d pkts %d' % (bytes, mss, d) 81 | #print 'expt d in ss',edss 82 | #print 'etss',etss 83 | #print 'etloss',etloss 84 | #print 'etca',etca 85 | #print 'etdelack',etdelack 86 | #print 'entire estimated time',flowduration 87 | 88 | # assert(flowduration >= rtt) 89 | flowduration = max(flowduration, rtt) 90 | 91 | csa00bw = bytes / flowduration 92 | # print "flow duration",flowduration 93 | # print "flow rate",csa00bw 94 | 95 | nintervals = ceil(flowduration / interval) 96 | 97 | nintervals = max(nintervals, 1) 98 | avgemit = bytes/nintervals 99 | 100 | def byteemit(): 101 | for i in xrange(int(nintervals)+1): 102 | yield avgemit 103 | 104 | return flowduration, byteemit() 105 | 106 | 107 | if __name__ == '__main__': 108 | print model(1048576, 1470, 0.060, 1, 0.01) 109 | -------------------------------------------------------------------------------- /tcpmodels/mathis.py: -------------------------------------------------------------------------------- 1 | msmo97.py -------------------------------------------------------------------------------- /tcpmodels/msmo97.py: -------------------------------------------------------------------------------- 1 | from math import sqrt, ceil 2 | 3 | def model(bytes, mss, rtt, interval, p): 4 | '''Function to implement MSMO97 tcp model. Returns flow duration 5 | in seconds and a byte emitter (generator) given number of bytes, rtt, 6 | simulation interval, and an emitter str to eval''' 7 | 8 | # mathis model constant C 9 | C = sqrt(3.0/2) 10 | # C = sqrt(3.0/4) - delack 11 | # C = 1.31 12 | # C = 0.93 13 | bw = mss / rtt * C/sqrt(p) 14 | # print "bw computation",bw 15 | 16 | # how many intervals will this flowlet last? 17 | flowduration = bytes / bw 18 | 19 | nintervals = ceil(flowduration / interval) 20 | nintervals = max(nintervals, 1) 21 | avgemit = bytes/float(nintervals) 22 | assert(avgemit > 0.0) 23 | 24 | def byteemit(): 25 | for i in xrange(int(nintervals)+1): 26 | yield avgemit 27 | 28 | return flowduration, byteemit() 29 | 30 | if __name__ == '__main__': 31 | print model(1048576, 1470, 0.060, 1, 0.01) 32 | -------------------------------------------------------------------------------- /traffic_generators/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jsommers/fs/3d85e6d2e2a4c30a7c4716aab75dd766af5d0eb1/traffic_generators/__init__.py -------------------------------------------------------------------------------- /traffic_generators/harpoon.py: -------------------------------------------------------------------------------- 1 | from trafgen import TrafficGenerator 2 | import socket 3 | import ipaddr 4 | from fslib.common import fscore, get_logger 5 | from fslib.flowlet import Flowlet, FlowIdent 6 | from copy import copy 7 | from importlib import import_module 8 | from fslib.util import * 9 | 10 | haveIPAddrGen = False 11 | try: 12 | import ipaddrgen 13 | haveIPAddrGen = True 14 | except: 15 | pass 16 | 17 | class HarpoonTrafficGenerator(TrafficGenerator): 18 | def __init__(self, srcnode, ipsrc='0.0.0.0', ipdst='0.0.0.0', sport=0, dport=0, flowsize=1500, pktsize=1500, flowstart=0, ipproto=socket.IPPROTO_TCP, lossrate=0.001, mss=1460, iptos=0x0, xopen=True, tcpmodel='csa00'): 19 | TrafficGenerator.__init__(self, srcnode) 20 | self.logger = get_logger('fs.harpoon') 21 | self.srcnet = ipaddr.IPNetwork(ipsrc) 22 | self.dstnet = ipaddr.IPNetwork(ipdst) 23 | if haveIPAddrGen: 24 | self.ipsrcgen = ipaddrgen.initialize_trie(int(self.srcnet), self.srcnet.prefixlen, 0.61) 25 | self.ipdstgen = ipaddrgen.initialize_trie(int(self.dstnet), self.dstnet.prefixlen, 0.61) 26 | 27 | if isinstance(ipproto, (str,unicode)): 28 | self.ipproto = eval(ipproto) 29 | else: 30 | self.ipproto = randomchoice(ipproto) 31 | 32 | if isinstance(sport, (str,unicode)): 33 | self.srcports = eval(sport) 34 | else: 35 | self.srcports = randomchoice(sport) 36 | 37 | if isinstance(dport, (str,unicode)): 38 | self.dstports = eval(dport) 39 | else: 40 | self.dstports = randomchoice(dport) 41 | 42 | if isinstance(flowsize, (str,unicode)): 43 | self.flowsizerv = eval(flowsize) 44 | else: 45 | self.flowsizerv = randomchoice(flowsize) 46 | 47 | if isinstance(pktsize, (str,unicode)): 48 | self.pktsizerv = eval(pktsize) 49 | else: 50 | self.pktsizerv = randomchoice(pktsize) 51 | 52 | if isinstance(flowstart, (str,unicode)): 53 | self.flowstartrv = eval(flowstart) 54 | else: 55 | self.flowstartrv = randomchoice(flowstart) 56 | 57 | if isinstance(lossrate, (str,unicode)): 58 | self.lossraterv = eval(lossrate) 59 | else: 60 | self.lossraterv = randomchoice(lossrate) 61 | 62 | if isinstance(mss, (str,unicode)): 63 | self.mssrv = eval(mss) 64 | else: 65 | self.mssrv = randomchoice(mss) 66 | 67 | if isinstance(iptos, (str,unicode)): 68 | self.iptosrv = eval(iptos) 69 | else: 70 | self.iptosrv = randomchoice(iptos) 71 | 72 | self.xopen = xopen 73 | self.activeflows = {} 74 | 75 | try: 76 | self.tcpmodel = import_module("tcpmodels.{}".format(tcpmodel)) 77 | except ImportError,e: 78 | raise InvalidFlowConfiguration('Unrecognized tcp model for harpoon: {} (Error on import: {})'.format(tcpmodel, str(e))) 79 | 80 | 81 | def start(self): 82 | startt = next(self.flowstartrv) 83 | fscore().after(startt, 'harpoon-start'+str(self.srcnode), self.newflow) 84 | 85 | 86 | def newflow(self, xint=1.0): 87 | if self.done: 88 | print 'harpoon generator done' 89 | return 90 | 91 | flet = self.__makeflow() 92 | self.activeflows[flet.key] = 1 93 | 94 | destnode = fscore().topology.destnode(self.srcnode, flet.dstaddr) 95 | owd = fscore().topology.owd(self.srcnode, destnode) 96 | 97 | # owd may be None if routing is temporarily broken because of 98 | # a link being down and no reachability 99 | if not owd: 100 | owd = 1.0 101 | 102 | flet.mss = next(self.mssrv) 103 | p = next(self.lossraterv) 104 | basertt = owd * 2.0 105 | 106 | flowduration, byteemit = self.tcpmodel.model(flet.size, flet.mss, basertt, fscore().interval, p) 107 | 108 | # FIXME: add an end timestamp onto flow to indicate its estimated 109 | # duration; routers along path can add that end to arrival time to get 110 | # better flow duration in record. 111 | # unclear what to do with raw flows. 112 | flet.flowstart = 0.0 113 | flet.flowend = flowduration 114 | self.logger.debug("Flow duration: %f" % flowduration) 115 | 116 | fscore().after(0.0, 'flowemit-'+str(self.srcnode), self.flowemit, flet, 0, byteemit, destnode) 117 | 118 | # if operating in an 'open-loop' fashion, schedule next 119 | # incoming flow now (otherwise schedule it when this flow ends; 120 | # see code in flowemit()) 121 | if self.xopen: 122 | nextst = next(self.flowstartrv) 123 | # print >>sys.stderr, 'scheduling next new harpoon flow at',nextst 124 | fscore().after(nextst, 'newflow-'+str(self.srcnode), self.newflow) 125 | 126 | 127 | def flowemit(self, flowlet, numsent, emitrv, destnode): 128 | fsend = copy(flowlet) 129 | fsend.bytes = int(min(next(emitrv), flowlet.bytes)) 130 | flowlet.bytes -= fsend.bytes 131 | psize = min(next(self.pktsizerv), flowlet.mss) 132 | psize = int(max(40, psize)) 133 | fsend.pkts = fsend.bytes / psize 134 | if fsend.pkts * psize < fsend.bytes: 135 | fsend.pkts += 1 136 | fsend.bytes += fsend.pkts * 40 137 | 138 | if flowlet.ipproto == socket.IPPROTO_TCP: 139 | flags = 0x0 140 | if numsent == 0: # start of flow 141 | # set SYN flag 142 | flags |= 0x02 143 | 144 | # if first flowlet, add 1 3-way handshake pkt. 145 | # simplifying assumption: 3-way handshake takes place in one 146 | # simulator tick interval with final ack piggybacked with data. 147 | fsend.pkts += 1 148 | fsend.bytes += 40 149 | 150 | if flowlet.bytes == 0: # end of flow 151 | # set FIN flag 152 | flags |= 0x01 153 | 154 | fsend.pkts += 1 155 | fsend.bytes += 40 156 | 157 | # set ACK flag regardless 158 | flags |= 0x10 # ack 159 | fsend.tcpflags = flags 160 | 161 | numsent += 1 162 | 163 | self.logger.debug("sending %d bytes %d pkts %s flags; flowlet has %d bytes remaining" % (fsend.bytes, fsend.pkts, fsend.tcpflagsstr, flowlet.size)) 164 | 165 | 166 | fscore().topology.node(self.srcnode).flowlet_arrival(fsend, 'harpoon', destnode) 167 | 168 | # if there are more flowlets, schedule the next one 169 | if flowlet.bytes > 0: 170 | fscore().after(fscore().interval, "flowemit-{}".format(self.srcnode), self.flowemit, flowlet, numsent, emitrv, destnode) 171 | else: 172 | # if there's nothing more to send, remove from active flows 173 | del self.activeflows[flowlet.key] 174 | 175 | # if we're operating in closed-loop mode, schedule beginning of next flow now that 176 | # we've completed the current one. 177 | if not self.xopen: 178 | fscore().after(next(self.flowstartrv), "newflow-{}".format(self.srcnode), self.newflow) 179 | 180 | def __makeflow(self): 181 | while True: 182 | if haveIPAddrGen: 183 | srcip = str(ipaddr.IPv4Address(ipaddrgen.generate_addressv4(self.ipsrcgen))) 184 | dstip = str(ipaddr.IPv4Address(ipaddrgen.generate_addressv4(self.ipdstgen))) 185 | else: 186 | # srcip = str(ipaddr.IPAddress(int(self.srcnet) + random.randint(0,self.srcnet.numhosts-1))) 187 | # dstip = str(ipaddr.IPAddress(int(self.dstnet) + random.randint(0,self.dstnet.numhosts-1))) 188 | srcip = str(ipaddr.IPAddress(int(self.srcnet) + random.randint(0, 2))) 189 | dstip = str(ipaddr.IPAddress(int(self.dstnet) + random.randint(0, 2))) 190 | 191 | ipproto = next(self.ipproto) 192 | sport = next(self.srcports) 193 | dport = next(self.dstports) 194 | fsize = int(next(self.flowsizerv)) 195 | flet = Flowlet(FlowIdent(srcip, dstip, ipproto, sport, dport), bytes=fsize) 196 | 197 | flet.iptos = next(self.iptosrv) 198 | if flet.key not in self.activeflows: 199 | break 200 | 201 | return flet 202 | -------------------------------------------------------------------------------- /traffic_generators/rawflow.py: -------------------------------------------------------------------------------- 1 | simple.py -------------------------------------------------------------------------------- /traffic_generators/simple.py: -------------------------------------------------------------------------------- 1 | from trafgen import TrafficGenerator 2 | from fslib.util import * 3 | from ipaddr import IPAddress, IPNetwork 4 | from socket import IPPROTO_UDP, IPPROTO_TCP, IPPROTO_ICMP 5 | from fslib.flowlet import Flowlet, FlowIdent 6 | from fslib.common import fscore 7 | import copy 8 | import re 9 | 10 | 11 | # FIXME 12 | haveIPAddrGen = False 13 | 14 | class SimpleTrafficGenerator(TrafficGenerator): 15 | 16 | def __init__(self, srcnode, ipsrc=None, ipdst=None, ipproto=None, 17 | dport=None, sport=None, continuous=True, flowlets=None, tcpflags=None, iptos=None, 18 | fps=None, pps=None, bps=None, pkts=None, bytes=None, pktsize=None, 19 | icmptype=None, icmpcode=None, interval=None, autoack=False): 20 | TrafficGenerator.__init__(self, srcnode) 21 | # assume that all keyword params arrive as strings 22 | # print ipsrc,ipdst 23 | self.ipsrc = IPNetwork(ipsrc) 24 | self.ipdst = IPNetwork(ipdst) 25 | if haveIPAddrGen: 26 | self.ipsrcgen = ipaddrgen.initialize_trie(int(self.ipsrc), self.ipsrc.prefixlen, 0.61) 27 | self.ipdstgen = ipaddrgen.initialize_trie(int(self.ipdst), self.ipdst.prefixlen, 0.61) 28 | 29 | 30 | self.sport = self.dport = None 31 | self.icmptype = self.icmpcode = None 32 | self.autoack = False 33 | if autoack and isinstance(autoack, (str,unicode)): 34 | self.autoack = eval(autoack) 35 | else: 36 | self.autoack = autoack 37 | 38 | try: 39 | self.ipproto = int(ipproto) 40 | except: 41 | if ipproto == 'tcp': 42 | self.ipproto = IPPROTO_TCP 43 | elif ipproto == 'udp': 44 | self.ipproto = IPPROTO_UDP 45 | elif ipproto == 'icmp': 46 | self.ipproto = IPPROTO_ICMP 47 | else: 48 | raise InvalidFlowConfiguration('Unrecognized protocol:'+str(ipproto)) 49 | 50 | if not iptos: 51 | self.iptos = randomchoice(0x0) 52 | else: 53 | if isinstance(iptos, int): 54 | self.iptos = randomchoice(self.iptos) 55 | elif isinstance(iptos, (str,unicode)): 56 | self.iptos = eval(iptos) 57 | 58 | if self.ipproto == IPPROTO_ICMP: 59 | xicmptype = xicmpcode = 0 60 | if icmptype: 61 | xicmptype = eval(icmptype) 62 | if icmpcode: 63 | xicmpcode = eval(icmpcode) 64 | if isinstance(xicmptype, int): 65 | xicmptype = randomchoice(xicmptype) 66 | if isinstance(xicmpcode, int): 67 | xicmpcode = randomchoice(xicmpcode) 68 | self.icmptype = xicmptype 69 | self.icmpcode = xicmpcode 70 | elif self.ipproto == IPPROTO_UDP or self.ipproto == IPPROTO_TCP: 71 | self.dport = eval(dport) 72 | if isinstance(self.dport, int): 73 | self.dport = randomchoice(self.dport) 74 | self.sport = eval(sport) 75 | if isinstance(self.sport, int): 76 | self.sport = randomchoice(self.sport) 77 | # print 'sport,dport',self.sport, self.dport 78 | if self.ipproto == IPPROTO_TCP: 79 | self.tcpflags = randomchoice('') 80 | if tcpflags: 81 | if re.search('\(\S+\)', tcpflags): 82 | self.tcpflags = eval(tcpflags) 83 | else: 84 | self.tcpflags = randomchoice(tcpflags) 85 | else: 86 | self.dport = self.sport = 0 87 | 88 | self.continuous = None 89 | self.nflowlets = None 90 | if continuous: 91 | if isinstance(continuous, (str,unicode)): 92 | self.continuous = eval(continuous) 93 | else: 94 | self.continuous = continuous 95 | 96 | if flowlets: 97 | self.nflowlets = eval(flowlets) 98 | if isinstance(self.nflowlets, (int, float)): 99 | self.nflowlets = randomchoice(self.nflowlets) 100 | 101 | if not self.nflowlets: 102 | self.nflowlets = randomchoice(1) 103 | 104 | 105 | if not fps and not interval: 106 | raise InvalidFlowConfiguration('Need one of fps or interval in rawflow configuration.') 107 | 108 | self.fps = self.interval = None 109 | if fps: 110 | fps = eval(fps) 111 | if isinstance(fps, int): 112 | fps = randomchoice(fps) 113 | self.fps = fps 114 | elif interval: 115 | self.interval = eval(interval) 116 | if isinstance(self.interval, (int, float)): 117 | self.interval = randomchoice(self.interval) 118 | 119 | assert(bytes) 120 | self.bytes = eval(bytes) 121 | if isinstance(self.bytes, int): 122 | self.bytes = randomchoice(self.bytes) 123 | 124 | self.pkts = self.pktsize = None 125 | 126 | if pkts: 127 | self.pkts = eval(pkts) 128 | if isinstance(self.pkts, int): 129 | self.pkts = randomchoice(self.pkts) 130 | 131 | if pktsize: 132 | self.pktsize = eval(pktsize) 133 | if isinstance(self.pktsize, int): 134 | self.pktsize = randomchoice(self.pktsize) 135 | 136 | assert(self.fps or self.interval) 137 | assert(self.pkts or self.pktsize) 138 | 139 | 140 | def __makeflow(self): 141 | if haveIPAddrGen: 142 | srcip = str(IPv4Address(ipaddrgen.generate_addressv4(self.ipsrcgen))) 143 | dstip = str(IPv4Address(ipaddrgen.generate_addressv4(self.ipdstgen))) 144 | else: 145 | srcip = str(IPAddress(int(self.ipsrc) + random.randint(0,self.ipsrc.numhosts-1))) 146 | dstip = str(IPAddress(int(self.ipdst) + random.randint(0,self.ipdst.numhosts-1))) 147 | 148 | ipproto = self.ipproto 149 | sport = dport = 0 150 | if ipproto == IPPROTO_ICMP: 151 | # std way that netflow encodes icmp type/code: 152 | # type in high-order byte of dport, 153 | # code in low-order byte 154 | t = next(self.icmptype) 155 | c = next(self.icmpcode) 156 | dport = t << 8 | c 157 | # print 'icmp t,c,dport',hex(t),hex(c),hex(dport) 158 | else: 159 | if self.sport: 160 | sport = next(self.sport) 161 | if self.dport: 162 | dport = next(self.dport) 163 | 164 | flet = Flowlet(FlowIdent(srcip, dstip, ipproto, sport, dport)) 165 | flet.iptos = next(self.iptos) 166 | flet.flowstart = flet.flowend = fscore().now 167 | 168 | if flet.ipproto == IPPROTO_TCP: 169 | flet.ackflow = not self.autoack 170 | 171 | tcpflags = next(self.tcpflags) 172 | flaglist = tcpflags.split('|') 173 | xtcpflags = 0x0 174 | for f in flaglist: 175 | if f == 'FIN': 176 | xtcpflags |= 0x01 177 | elif f == 'SYN': 178 | xtcpflags |= 0x02 179 | elif f == 'RST': 180 | xtcpflags |= 0x04 181 | elif f == 'PUSH' or f == 'PSH': 182 | xtcpflags |= 0x08 183 | elif f == 'ACK': 184 | xtcpflags |= 0x10 185 | elif f == 'URG': 186 | xtcpflags |= 0x20 187 | elif f == 'ECE': 188 | xtcpflags |= 0x40 189 | elif f == 'CWR': 190 | xtcpflags |= 0x80 191 | else: 192 | raise InvalidFlowConfiguration('Invalid TCP flags mnemonic ' + f) 193 | 194 | flet.tcpflags = xtcpflags 195 | return flet 196 | 197 | 198 | def flowemit(self, flowlet, destnode, xinterval, ticks): 199 | assert(xinterval > 0.0) 200 | f = copy.copy(flowlet) 201 | f.bytes = next(self.bytes) 202 | if self.pktsize: 203 | psize = next(self.pktsize) 204 | f.pkts = f.bytes / psize 205 | if f.bytes % psize > 0: 206 | f.pkts += 1 207 | else: 208 | f.pkts = next(self.pkts) 209 | 210 | fscore().topology.node(self.srcnode).flowlet_arrival(f, 'simple', destnode) 211 | 212 | ticks -= 1 213 | fscore().after(xinterval, 'rawflow-flowemit-'+str(self.srcnode), self.flowemit, flowlet, destnode, xinterval, ticks) 214 | 215 | def start(self): 216 | self.callback() 217 | 218 | def callback(self): 219 | f = self.__makeflow() 220 | f.bytes = next(self.bytes) 221 | if self.pktsize: 222 | psize = next(self.pktsize) 223 | f.pkts = f.bytes / psize 224 | if f.bytes % psize > 0: 225 | f.pkts += 1 226 | else: 227 | f.pkts = next(self.pkts) 228 | 229 | 230 | destnode = fscore().topology.destnode(self.srcnode, f.dstaddr) 231 | 232 | # print 'rawflow:',f 233 | # print 'destnode:',destnode 234 | 235 | xinterval = None 236 | if self.interval: 237 | xinterval = next(self.interval) 238 | xinterval = max(0, xinterval) 239 | else: 240 | fps = next(self.fps) 241 | xinterval = 1.0/fps 242 | 243 | ticks = None 244 | if not self.continuous: 245 | ticks = next(self.nflowlets) 246 | else: 247 | if self.nflowlets: 248 | ticks = next(self.nflowlets) 249 | else: 250 | ticks = 1 251 | 252 | # print 'ticks',ticks 253 | # print 'xinterval',xinterval 254 | 255 | if not ticks or ticks == 1: 256 | fscore().topology.node(self.srcnode).flowlet_arrival(f, 'simple', destnode) 257 | else: 258 | fscore().after(0, "rawflow-flowemit-{}".format(self.srcnode), self.flowemit, f, destnode, xinterval, ticks) 259 | 260 | if self.continuous and not self.done: 261 | fscore().after(xinterval, "rawflow-cb-".format(self.srcnode), self.callback) 262 | else: 263 | self.done = True 264 | 265 | 266 | # rawflow -> alias for simple 267 | RawflowTrafficGenerator = SimpleTrafficGenerator 268 | 269 | -------------------------------------------------------------------------------- /traffic_generators/subtractive.py: -------------------------------------------------------------------------------- 1 | from trafgen import TrafficGenerator 2 | from fslib.flowlet import SubtractiveFlowlet,FlowIdent 3 | from fslib.common import fscore 4 | from fslib.util import * 5 | 6 | class SubtractiveTrafficGenerator(TrafficGenerator): 7 | def __init__(self, srcnode, dstnode=None, action=None, ipdstfilt=None, 8 | ipsrcfilt=None, ipprotofilt=None): 9 | TrafficGenerator.__init__(self, srcnode) 10 | self.dstnode = dstnode 11 | self.logger.debug('subtractive: %s %s %s %s %s %s' % (srcnode,dstnode,action,ipdstfilt, ipsrcfilt, ipprotofilt)) 12 | 13 | self.ipdstfilt = self.ipsrcfilt = '' 14 | self.ipprotofilt = 0 15 | 16 | assert(action) 17 | self.action = eval(action) 18 | 19 | if ipdstfilt: 20 | self.ipdstfilt = ipaddr.IPNetwork(ipdstfilt) 21 | 22 | if ipsrcfilt: 23 | self.ipsrcfilt = ipaddr.IPNetwork(ipsrcfilt) 24 | 25 | if ipprotofilt: 26 | self.ipprotofilt = int(ipprotofilt) 27 | 28 | 29 | def start(self): 30 | fscore().after(0.0, 'subtractive-gen-callback', self.callback) 31 | 32 | def callback(self): 33 | # pass oneself from srcnode to dstnode, performing action at each router 34 | # at end, set done to True 35 | f = SubtractiveFlowlet(FlowIdent(self.ipsrcfilt, self.ipdstfilt, ipproto=self.ipprotofilt), action=self.action) 36 | self.logger.info('Subtractive generator callback') 37 | fscore().topology.node(self.srcnode).flowlet_arrival(f, 'subtractor', self.dstnode) 38 | -------------------------------------------------------------------------------- /traffic_generators/trafgen.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | from fslib.common import fscore, get_logger 3 | 4 | class TrafficGenerator(object): 5 | __metaclass__ = ABCMeta 6 | 7 | def __init__(self, srcnode): 8 | self.srcnode = srcnode 9 | self.done = False 10 | self.logger = get_logger("tgen.{}".format(self.srcnode)) 11 | 12 | @abstractmethod 13 | def start(self): 14 | pass 15 | 16 | def get_done(self): 17 | return self.__done 18 | 19 | def set_done(self, tf): 20 | self.__done = tf 21 | 22 | done = property(get_done, set_done, None, 'done flag') 23 | 24 | def stop(self): 25 | self.done = True 26 | --------------------------------------------------------------------------------