├── .DS_Store ├── LICENSE ├── README.md ├── data ├── memory-snapshots │ ├── mem-flow_stack-321-1.txt │ ├── mem-pkt_stack-321-100-1.txt │ └── mem-regex-backdoor.txt ├── regex-dfa │ ├── backdoor-regex │ │ ├── tcp-http-F-0.txt │ │ └── tcp-other-F-1.txt │ ├── dfa-backdoor-tcp-http-F-0.txt │ ├── dfa-backdoor-tcp-other-F-1.txt │ ├── dfa20-backdoor-tcp-http-F-0.txt │ └── dfa20-backdoor-tcp-other-F-1.txt └── xflow_off_test │ ├── dmx_v1_m_f321p100_mc2_000_cls.nffw │ ├── dmx_v1_m_f321p100_mc2_000_cls.p4cfg │ └── dmx_v1_m_f321p100_mc2_000_cls_pif_design.json ├── deepmatch ├── .DS_Store ├── dm.p4 ├── dm.p4cfg ├── includes │ ├── .DS_Store │ ├── dmx_options.h │ ├── headers.p4 │ └── parser.p4 └── plugin.c └── eval-tools ├── setup_experiment.ini └── setup_experiment.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhypolite/DeepMatch/5c47f3e02f3209def0cacff53061f25550e66a00/.DS_Store -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepMatch 2 | 3 | DeepMatch is a deep packet inspection capability designed for programmable network processors that can be called as an extern in P4 programs. More broadly, our paper provides guidance for applications that need to touch every byte of the payload. Such tasks, that can fit in the compute and memory bounds identified in the paper, are candidates for implementation on this class of network processors, and the paper provides guidance on how to start thinking about implementing them. 4 | 5 | For further details, see our CoNEXT 2020 [paper and video](https://dl.acm.org/doi/abs/10.1145/3386367.3431290). 6 | 7 | ## 1. Great Questions from the Conference: 8 | 9 | #### I have a question about the motivation. what use cases do you envision for this, given that everything is going encrypted now? 10 | 11 | Section 3 of the DeepMatch paper provides a few use cases. 12 | 13 | I would argue that everything is not going to encryption. In fact, many distributed web applications do not encrypt traffic between all components. For example, consider a cloud service offloading packet analysis compute to the SmartNIC. Here, the SmartNIC performs packet classification and adds a new header containing information that a later component can pick up and use to quickly determine how to handle the packet without the need to reprocess it. DeepMatch has the necessary capabilities to support such use cases. 14 | 15 | That said, there are many applications that do use encryption. Note that encryption/decryption is another operation that needs to process every byte of the payload, so an interesting future work to build on this might be to offload decryption to the NPU. Decryption-pattern-match pipelines are already seen in some networks and is a possible direction to explore. 16 | 17 | #### Building on top of the above question, what about using AI algos? It's a RISC platform, but sounds like a possible compromise wrt a TPU (but sticking to int math)? 18 | 19 | Techniques such as knowledge distillation and quantization may be used to deploy such algorithms on network processors. The concept of knowledge distillation looks at how to transfer knowledge from a large model to a small one, The concept of quantization looks at how to approximate a large set of values to a smaller set; e.g. from real numbers to floating point to fixed point integer. 20 | 21 | More broadly, our paper provides guidance for anything that needs to touch every byte of the payload. So, classification tasks that can fit in the compute and memory bounds the paper identify become candidates, and the paper provides guidance on how to start thinking about implementing them. 22 | 23 | #### Is DeepMatch the end-and-be-all for SmartNIC performance? If not, how would you improve the card? 24 | 25 | Netronome does the right thing to make their card small and compact and real time. The memories are not larger because, if they were, then they would be slower and you would fit less FPCs on the card, so you would get less throughput. Netronome is giving us more of a real-time engine but then putting the burden on the programmer in order to actually achieve real time. So, a lot of the simple things like "oh, I need more memory" would eat into the performance that you get. 26 | 27 | One limiting factor in the DeepMatch design is that each FPC is running a pretty tight DFA loop. This loop is a small fraction of the 8K instruction limit. So, if we could replicate the DFA loop in both code stores (when in shared code mode) then we would eliminate contention for that block of code; now the only contention would be for uncommon code that is used for packet headers. This should result in less impact due to code sharing. Unfortunately, such code placement is not supported by the version of Netronome's NFP-6000 that we used. 28 | 29 | #### Will future programmable networking innovation be in the hardware or the software? 30 | 31 | DeepMatch motivates a match-action processing pipeline design with better support for full-packet processing (not just headers). That said, the future is in co-design — software/high-level definitions of tasks that are then supported by specialized hardware. That means definiting good abstractions and compilers/tools to map those abstractions along with developing architectures that can provide both flexibility and programmability and high performance and efficiency. 32 | 33 | ## 2. Setup and Installation 34 | 35 | ### DeepMatch Host 36 | 37 | Install the Netronome SDK6 Run Time Environment, Hardware debug server, and BSP according to their documentation. 38 | 39 | We used the following configuration for the DeepMatch host: 40 | * Dell PowerEdge R720 with dual Intel Xeon E5-2650 v2 8-core 2.60 GHz processors and 64 GB DDR3 1600MHz RAM 41 | * Netronome Agilio-CX Dual-Port 40 Gigabit Ethernet Intelligent Server Adapter (Part ID: ISA-4000-40-2-2) 42 | * Ubuntu 16.04 LTS 43 | * Python 2.7 44 | * Enable SR-IOV in the bios 45 | 46 | ### Netronome SDK IDE Development Host 47 | 48 | Install the Netronome SDK IDE according to their documentation. Note that the current version of DeepMatch is written in P4v14 (not P4v16). We used a Windows 10 host to run the IDE. 49 | 50 | ### Traffic Generator 51 | 52 | We used the following configuration for our traffic generator host: 53 | * Dell R720 with dual Intel Xeon E5-2680 v2 10-core 2.80GHz processors and 256 GB DDR3 1866 MHz RAM 54 | * Mellanox MCX456A-ECA ConnectX-4 dual-port QSFP28 100GbE 55 | * Unbuntu 16.04 LTS 56 | * Anaconda3 with Python 3.7.8 and Python 2.7.12 57 | * dpdk-17.08.1 with 1 GB Hugepages 58 | * pktgen-3.4.9 59 | * latest version of tcpreplay 60 | * latest version of python scapy 61 | * python fabric (http://www.fabfile.org/) is used to automate many of our experiments, including interacting with the NFP card remotely. 62 | 63 | ### Switch backbone 64 | 65 | We use an Arista DCS-7050QX-32-R 32x Port 40G QSFP+ Layer 3 Switch 66 | 67 | ## 3. Build 68 | 69 | Use Netronome Programmer Studio (we used version 6.0.3.1 build 3241) to create a new project with the DeepMatch P4v14 source files. 70 | 71 | Useful Programmer Studio Settings: 72 | 73 | * Chip Setting: 74 | * nfp-6xxxc-b0 75 | * Project Configuation (see "P4/Managed C" tab): 76 | * Number of worker MEs: 80 77 | * Use Shared Code Store: enable 78 | * Reduced thread usage: enable 79 | * Optional Components (see "P4/Managed C" tab): 80 | * GRO: disable 81 | * Preprocessor definitions (see the "General" tab): 82 | * PHAST_FX_PY: where X is no. flows and Y is OoO buffer size; e.g. PHAST_F321_P100 83 | * PHAST_DFA_X: where X identifies the DFA to load; e.g. PHAST_DFA_MAL_BACKDOOR 84 | * PHAST_DLOC_X: where X identifies which memory to place the DFA; e.g. CLS, CTM, IMEM, EMEM 85 | * PHAST_LOCK: enables all locks (needed when reordering is turned on) 86 | 87 | Build the code and transfer the following files to the DeepMatch host: 88 | * file.nffw 89 | * file.p4cfg 90 | * out/pif_design.json 91 | 92 | ## 4. Usage (Manual Eval) 93 | 94 | ### Running DeepMatch 95 | 96 | #### Start the runtime environment (only need to do this once): 97 | `systemctl start nfp-sdk6-rte1` 98 | 99 | #### Check that runtime environment is running properly: 100 | `sudo netstat -tulpn | grep -E 'pif_rte|nfp-sdk-hwdbgs'` 101 | 102 | #### Load the program: 103 | `rtecli -p 20207 design-load -p pif_design.json -f file.nffw` 104 | 105 | #### Load the tables: 106 | `rtecli -p 20207 config-reload -c file.p4cfg` 107 | 108 | #### Check that everything loaded properly: 109 | `rtecli -p 20207 status` 110 | 111 | Another way to check that firmware is loaded: 112 | 113 | `sudo /opt/netronome/bin/nfp-nffw status -n 1` 114 | 115 | #### Load DFA and stack variables: 116 | `sudo setup_experiment.py -o v` 117 | 118 | #### unload the program: 119 | `rtecli -p 20207 design-unload` 120 | 121 | One way to check the status 122 | 123 | `rtecli -p 20207 status` 124 | 125 | Another way to check the status 126 | 127 | `sudo /opt/netronome/bin/nfp-nffw status -n 1` 128 | 129 | ### Evaluating DeepMatch 130 | 131 | Use tcpreplay, dpdk/pktgen, or your favorite tools to transmit/receive packets to/from DeepMatch. 132 | 133 | ### Checking the Evaluation Results 134 | 135 | #### Check if all flows closed properly: 136 | 137 | `sudo /opt/netronome/bin/nfp-rtsym -n 1 -L | grep flow_ht` 138 | 139 | Use this output to determine the location and size of flow_ht. Then dump the variable's memory into a file and check the value. 140 | 141 | `sudo /opt/netronome/bin/nfp-rtsym -n 1 -L | grep pkt_ht` 142 | 143 | Use this output to determine the location and size of pkt_ht. Then dump the variable's memory into a file and check the value. 144 | 145 | #### Check for dropped packets: 146 | 147 | One way to check 148 | 149 | `sudo /opt/netronome/bin/nfp -n 1 -m mac -e show port stats 0 0-8` 150 | 151 | Another way to check 152 | 153 | `rtecli -p 20207 counters list-system` 154 | 155 | ## 5. Usage (Automated Eval) 156 | 157 | We use Python fabric to automate the manual evaluation steps. 158 | http://www.fabfile.org/ 159 | -------------------------------------------------------------------------------- /data/memory-snapshots/mem-flow_stack-321-1.txt: -------------------------------------------------------------------------------- 1 | 0x00000001 0x00020003 0x00040005 0x00060007 2 | 0x00080009 0x000a000b 0x000c000d 0x000e000f 3 | 0x00100011 0x00120013 0x00140015 0x00160017 4 | 0x00180019 0x001a001b 0x001c001d 0x001e001f 5 | 0x00200021 0x00220023 0x00240025 0x00260027 6 | 0x00280029 0x002a002b 0x002c002d 0x002e002f 7 | 0x00300031 0x00320033 0x00340035 0x00360037 8 | 0x00380039 0x003a003b 0x003c003d 0x003e003f 9 | 0x00400041 0x00420043 0x00440045 0x00460047 10 | 0x00480049 0x004a004b 0x004c004d 0x004e004f 11 | 0x00500051 0x00520053 0x00540055 0x00560057 12 | 0x00580059 0x005a005b 0x005c005d 0x005e005f 13 | 0x00600061 0x00620063 0x00640065 0x00660067 14 | 0x00680069 0x006a006b 0x006c006d 0x006e006f 15 | 0x00700071 0x00720073 0x00740075 0x00760077 16 | 0x00780079 0x007a007b 0x007c007d 0x007e007f 17 | 0x00800081 0x00820083 0x00840085 0x00860087 18 | 0x00880089 0x008a008b 0x008c008d 0x008e008f 19 | 0x00900091 0x00920093 0x00940095 0x00960097 20 | 0x00980099 0x009a009b 0x009c009d 0x009e009f 21 | 0x00a000a1 0x00a200a3 0x00a400a5 0x00a600a7 22 | 0x00a800a9 0x00aa00ab 0x00ac00ad 0x00ae00af 23 | 0x00b000b1 0x00b200b3 0x00b400b5 0x00b600b7 24 | 0x00b800b9 0x00ba00bb 0x00bc00bd 0x00be00bf 25 | 0x00c000c1 0x00c200c3 0x00c400c5 0x00c600c7 26 | 0x00c800c9 0x00ca00cb 0x00cc00cd 0x00ce00cf 27 | 0x00d000d1 0x00d200d3 0x00d400d5 0x00d600d7 28 | 0x00d800d9 0x00da00db 0x00dc00dd 0x00de00df 29 | 0x00e000e1 0x00e200e3 0x00e400e5 0x00e600e7 30 | 0x00e800e9 0x00ea00eb 0x00ec00ed 0x00ee00ef 31 | 0x00f000f1 0x00f200f3 0x00f400f5 0x00f600f7 32 | 0x00f800f9 0x00fa00fb 0x00fc00fd 0x00fe00ff 33 | 0x01000101 0x01020103 0x01040105 0x01060107 34 | 0x01080109 0x010a010b 0x010c010d 0x010e010f 35 | 0x01100111 0x01120113 0x01140115 0x01160117 36 | 0x01180119 0x011a011b 0x011c011d 0x011e011f 37 | 0x01200121 0x01220123 0x01240125 0x01260127 38 | 0x01280129 0x012a012b 0x012c012d 0x012e012f 39 | 0x01300131 0x01320133 0x01340135 0x01360137 40 | 0x01380139 0x013a013b 0x013c013d 0x013e013f 41 | 0x01400001 0x00000000 42 | -------------------------------------------------------------------------------- /data/regex-dfa/backdoor-regex/tcp-http-F-0.txt: -------------------------------------------------------------------------------- 1 | &cmd=edit_file&dir= 2 | &cmd=img 3 | &submit=Bind 4 | &submit=Execute 5 | .jsp?ppp= 6 | /AES 7 | /DES 8 | /PostView.nhn?blogId=cjddms52&logNo=130104953765&parentCategoryNo=1 9 | /SUS 10 | /ZES 11 | /a/pwn.jsp? 12 | /appsvc/appmsg4.asp?fmnumber= 13 | /browser/browser/Browser.jsp? 14 | /browser/shell.jsp? 15 | /bynazi/cmd.jsp? 16 | /cmd/cmd.jsp? 17 | /cmd1/cmd.jsp? 18 | /com/cmd.jsp? 19 | /e/e.jsp? 20 | /e/shell.jsp? 21 | /eg/smd.jsp? 22 | /egd/smd.jsp? 23 | /egdus/smd.jsp? 24 | /esc/esc/ss.jsp? 25 | /genesis/genesis.jsp? 26 | /is/cmd.jsp? 27 | /javadev/cmd.jsp? 28 | /jbossass/index.jsp? 29 | /jbossass/jbossass.jsp? 30 | /jbossdoc/jbossdoc.jsp? 31 | /jbossdox/jbossdox.jsp? 32 | /jbossos/jbossos.jsp? 33 | /jbot/jbot.jsp? 34 | /jdev/cmd.jsp? 35 | /jdev2/cmd.jsp? 36 | /jdev3/cmd.jsp? 37 | /jexws3/jexws3.jsp? 38 | /jspshell/index.jsp? 39 | /mela/mela.jsp? 40 | /mgr/lnx.jsp? 41 | /momo/no.jsp? 42 | /oss/smd.jsp? 43 | /phpMyAdmin/server_sync.php 44 | /publickey/ 45 | /sh3ll/sh3ll.jsp? 46 | /shel/shel.jsp? 47 | /shellinvoker/shellinvoker.jsp? 48 | /ssvcss/index.jsp? 49 | /tunnel/tunnel.jsp? 50 | /x/pwn.jsp? 51 | /zecmd/zecmd.jsp? 52 | 007r 53 | EXTRA-DATA-SPACE: 54 | GETLIST|20| 55 | MSG|20| 56 | Mozilla/5.0|20|(compatible|3B 20|MSIE|20|10.0|3B 20|Windows|20|NT|20|6.1|3B 20|Trident/6.0) 57 | PASS-ON 58 | PASS-ON0 59 | PCRatd 60 | POST|20|/favicon.ico 61 | PWD|0A| 62 | PWD|20| 63 | Server:|20|Stalin 64 | VER:GHOST|20|VERSION|20| 65 | Windows|20|PowerShell|20|r 66 | X-CMD:|20|CONNECT|0D 0A| 67 | X-CMD:|20|READ|0D 0A| 68 | ]|00 20 00|h|00|i|00|k|00|i|00|t|00|>|00| 69 | macName= 70 | openbrowser|20| 71 | plugins/ToolsPack/ToolsPack.php 72 | sq|00|~|00 00| 73 | ur|00| 74 | |00 00 00 11 C8 00 00 00 00 00 00 00 00 00 00 00| 75 | |1F 8B 08 00 00 00 00 00 00 00| 76 | |A0 00 00 00| 77 | |C2 E5 E5 E5 9E A0 D7 A4 A6 D0 D5 DD DC C8 D6 DD D7 D5 C8 D1 D6 83 80 C8 DD A4 D1 A1 C8 A4 D2 D5 D7 DD A3 A4 A1 DD A6 D7 DD 98 E5| 78 | |C2 E5 E5 E5 9E D2 DD D6 A0 A4 A6 A7 A3 C8 A0 A3 DD A7 C8 D1 DC DD 80 C8 A4 D5 D0 DC C8 A3 D5 A7 D0 A7 A1 D4 D7 D3 D1 D4 A0 98 E5| 79 | |C2 E5 E5 E5 9E DC DD A1 DC D0 DD A3 A6 C8 A1 D5 A4 D7 C8 D1 83 D4 86 C8 A7 DD D1 D4 C8 D7 D6 D7 A4 A7 D6 D0 D2 A0 D2 A6 DD 98 E5| 80 | |C2 E5 E5 E5 9E DD A4 A3 D4 A6 D4 D3 D1 C8 A0 A7 A1 D3 C8 D1 87 D7 87 C8 A7 A6 D4 A3 C8 D3 D1 D3 D2 D1 A0 DC DD A4 D2 D4 D5 98 E5| 81 | |E7 E7 E7 E7 E7 E7 E7 E7 E7 E7| 82 | -------------------------------------------------------------------------------- /data/regex-dfa/backdoor-regex/tcp-other-F-1.txt: -------------------------------------------------------------------------------- 1 | .Data|03 00 00 00 04|data|05|image|05|bytes 2 | /AES 3 | /DES 4 | /SUS 5 | /ZES 6 | /publickey/ 7 | /res/|7C|1|7C|2|7C|3|7C|4|7C|5|7C|5|7C|5|7C|6|7C|5|7C|7|7C|8|7C|9|7C|10|7C|1|7C|5|7C|11|7C|12|7C|700|7C| 8 | 007r 9 | AOL|20|ADMIN|20|SERVER|20|1.1 10 | COMPNAME_END 11 | GETLIST|20| 12 | Georg|20|says,|20|'All|20|seems|20|fine' 13 | IAMYOURGOD 14 | INFO 15 | JO840112-CRAS8468-11150923-PCI8273V 16 | MSG|20| 17 | NICK|20|Rizee|7C|RYN|7C|05|7C| 18 | PASS-ON 19 | PASS-ON0 20 | PCRatd 21 | POWERFUN 22 | PWD|0A| 23 | PWD|20| 24 | SERVEME|20|1.X 25 | STLUDT|20|V3.3|20|-|20| 26 | Server:|20|Stalin 27 | VER:GHOST|20|VERSION|20| 28 | ]|00 20 00|h|00|i|00|k|00|i|00|t|00|>|00| 29 | catasenha|7C| 30 | client 31 | connected 32 | executafile|7C| 33 | fuckyO 34 | getclient 35 | kstart|7C| 36 | logon|7C| 37 | openbrowser|20| 38 | sq|00|~|00 00| 39 | ur|00| 40 | |00 00 00 11 C8 00 00 00 00 00 00 00 00 00 00 00| 41 | |05 C4 89 84|$p|1A|0[|82|D|8D|y|22|u|04|g|09|N3{ 42 | |1F 8B 08 00 00 00 00 00 00 00| 43 | -------------------------------------------------------------------------------- /data/xflow_off_test/dmx_v1_m_f321p100_mc2_000_cls.nffw: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhypolite/DeepMatch/5c47f3e02f3209def0cacff53061f25550e66a00/data/xflow_off_test/dmx_v1_m_f321p100_mc2_000_cls.nffw -------------------------------------------------------------------------------- /data/xflow_off_test/dmx_v1_m_f321p100_mc2_000_cls.p4cfg: -------------------------------------------------------------------------------- 1 | { 2 | "tables": { 3 | "forwardPacket_table": { 4 | "rules": [ 5 | { 6 | "action": { 7 | "data": { 8 | "theport": { 9 | "value": "p0" 10 | } 11 | }, 12 | "type": "do_forwardPacket" 13 | }, 14 | "name": "bad_flow", 15 | "match": { 16 | "meta.processPayloadResult": { 17 | "value": "5" 18 | } 19 | } 20 | }, 21 | { 22 | "action": { 23 | "data": { 24 | "theport": { 25 | "value": "p0" 26 | } 27 | }, 28 | "type": "do_forwardPacket" 29 | }, 30 | "name": "hit", 31 | "match": { 32 | "meta.processPayloadResult": { 33 | "value": "1" 34 | } 35 | } 36 | }, 37 | { 38 | "action": { 39 | "data": { 40 | "theport": { 41 | "value": "p0" 42 | } 43 | }, 44 | "type": "do_forwardPacket" 45 | }, 46 | "name": "out-of-order", 47 | "match": { 48 | "meta.processPayloadResult": { 49 | "value": "3" 50 | } 51 | } 52 | }, 53 | { 54 | "action": { 55 | "data": { 56 | "theport": { 57 | "value": "p0" 58 | } 59 | }, 60 | "type": "do_forwardPacket" 61 | }, 62 | "name": "low", 63 | "match": { 64 | "meta.processPayloadResult": { 65 | "value": "4" 66 | } 67 | } 68 | }, 69 | { 70 | "action": { 71 | "data": { 72 | "theport": { 73 | "value": "p0" 74 | } 75 | }, 76 | "type": "do_forwardPacket" 77 | }, 78 | "name": "no-match", 79 | "match": { 80 | "meta.processPayloadResult": { 81 | "value": "2" 82 | } 83 | } 84 | } 85 | ], 86 | "default_rule": { 87 | "action": { 88 | "data": { 89 | "theport": { 90 | "value": "p0" 91 | } 92 | }, 93 | "type": "do_forwardPacket" 94 | }, 95 | "name": "default" 96 | } 97 | }, 98 | "processPkt_table": { 99 | "default_rule": { 100 | "action": { 101 | "type": "do_processPkt" 102 | }, 103 | "name": "default" 104 | } 105 | } 106 | } 107 | } -------------------------------------------------------------------------------- /data/xflow_off_test/dmx_v1_m_f321p100_mc2_000_cls_pif_design.json: -------------------------------------------------------------------------------- 1 | { 2 | "generic": { 3 | "tables_info": { 4 | "forwardPacket_table": { 5 | "max_entries": 65536, 6 | "allowed_actions": [ 7 | "do_forwardPacket", 8 | "do_dropPacket" 9 | ], 10 | "name": "forwardPacket_table" 11 | }, 12 | "processPkt_table": { 13 | "max_entries": 1, 14 | "allowed_actions": [ 15 | "do_processPkt" 16 | ], 17 | "name": "processPkt_table" 18 | } 19 | }, 20 | "action_info": { 21 | "do_forwardPacket": { 22 | "name": "do_forwardPacket", 23 | "action_data": { 24 | "theport": { 25 | "size": 16 26 | } 27 | } 28 | }, 29 | "do_processPkt": { 30 | "name": "do_processPkt", 31 | "action_data": {} 32 | }, 33 | "do_dropPacket": { 34 | "name": "do_dropPacket", 35 | "action_data": {} 36 | } 37 | } 38 | }, 39 | "vendor": { 40 | "register_info": {}, 41 | "backend_source_info": { 42 | "date": "2018/12/05 20:24:16", 43 | "source_files": "dmx-v1.yml" 44 | }, 45 | "counters_info": {}, 46 | "meter_info": {}, 47 | "frontend_source_info": { 48 | "date": "2018/12/05 20:24:15", 49 | "source_files": [ 50 | "C:\\Users\\uname\\NFP_SDK\\dmx-v1\\dmx-v1.p4" 51 | ] 52 | }, 53 | "tables_info": { 54 | "forwardPacket_table": { 55 | "name": "forwardPacket_table", 56 | "matches": [ 57 | { 58 | "field": "meta.processPayloadResult", 59 | "type": "exact", 60 | "class": "field", 61 | "lm_layout": [ 62 | { 63 | "width": 8, 64 | "fldbitoff": 0, 65 | "name": "processPayloadResult", 66 | "lmbitoff": 632 67 | } 68 | ] 69 | } 70 | ], 71 | "allowed_actions": [ 72 | "do_forwardPacket", 73 | "do_dropPacket" 74 | ], 75 | "id": 0, 76 | "max_entries": 65536, 77 | "data_shift": 3, 78 | "data_size": 8 79 | }, 80 | "processPkt_table": { 81 | "name": "processPkt_table", 82 | "matches": [], 83 | "allowed_actions": [ 84 | "do_processPkt" 85 | ], 86 | "id": 1, 87 | "max_entries": 1, 88 | "data_shift": 2, 89 | "data_size": 4 90 | } 91 | }, 92 | "action_info": { 93 | "do_forwardPacket": { 94 | "action_data_packed": [ 95 | { 96 | "actiondata": "theport", 97 | "name": "theport", 98 | "pad_cnt": 1, 99 | "doc": null, 100 | "pad_szbytes": 2, 101 | "pad_type": "uint16_t", 102 | "split": false, 103 | "size": 16 104 | }, 105 | { 106 | "pad_cnt": 2, 107 | "pad_type": "uint8_t", 108 | "actiondata": null, 109 | "name": "__pif_padding", 110 | "split": false, 111 | "doc": "padding", 112 | "pad_szbytes": 1, 113 | "size": 8 114 | } 115 | ], 116 | "name": "do_forwardPacket", 117 | "action_data": { 118 | "theport": { 119 | "size": 16 120 | } 121 | }, 122 | "ruleno_field": "", 123 | "noeffect": false, 124 | "id": 0, 125 | "tableno_field": "" 126 | }, 127 | "do_processPkt": { 128 | "action_data_packed": [], 129 | "name": "do_processPkt", 130 | "action_data": {}, 131 | "ruleno_field": "", 132 | "noeffect": false, 133 | "id": 1, 134 | "tableno_field": "" 135 | }, 136 | "do_dropPacket": { 137 | "action_data_packed": [], 138 | "name": "do_dropPacket", 139 | "action_data": {}, 140 | "ruleno_field": "", 141 | "noeffect": false, 142 | "id": 2, 143 | "tableno_field": "" 144 | } 145 | }, 146 | "multicast": { 147 | "table_rtsym": "_pif_mcast_table", 148 | "group_count": 0, 149 | "group_size": 16 150 | }, 151 | "build_uuid": [ 152 | 655417686, 153 | 3026978816, 154 | 4174057960, 155 | 3163610337 156 | ], 157 | "parser_value_set_info": {}, 158 | "digests_info": {} 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /deepmatch/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhypolite/DeepMatch/5c47f3e02f3209def0cacff53061f25550e66a00/deepmatch/.DS_Store -------------------------------------------------------------------------------- /deepmatch/dm.p4: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2017-2020 University of Pennsylvania 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | Joel Hypolite, UPenn 17 | */ 18 | 19 | #include "includes/headers.p4" 20 | #include "includes/parser.p4" 21 | 22 | /********************* 23 | ACTIONS 24 | *******************/ 25 | primitive_action processPkt(); 26 | 27 | action do_processPkt() { 28 | processPkt(); 29 | } 30 | 31 | action do_forwardPacket(theport) { 32 | modify_field(standard_metadata.egress_spec, theport); 33 | } 34 | 35 | action do_dropPacket() { 36 | drop(); 37 | } 38 | 39 | /************************* 40 | TABLE 41 | ************************/ 42 | table processPkt_table { 43 | actions { do_processPkt; } 44 | } 45 | 46 | table forwardPacket_table { 47 | reads { 48 | meta.processPayloadResult : exact; 49 | } actions { 50 | do_forwardPacket; 51 | do_dropPacket; 52 | } 53 | } 54 | 55 | /*********************** 56 | CONTROL 57 | **********************/ 58 | control ingress { 59 | if(ethernet.etherType == ETHERTYPE_IPV4) { 60 | apply(processPkt_table); 61 | } 62 | apply(forwardPacket_table); 63 | } 64 | -------------------------------------------------------------------------------- /deepmatch/dm.p4cfg: -------------------------------------------------------------------------------- 1 | { 2 | "tables": { 3 | "forwardPacket_table": { 4 | "rules": [ 5 | { 6 | "action": { 7 | "data": { 8 | "theport": { 9 | "value": "p0" 10 | } 11 | }, 12 | "type": "do_forwardPacket" 13 | }, 14 | "name": "bad_flow", 15 | "match": { 16 | "meta.processPayloadResult": { 17 | "value": "5" 18 | } 19 | } 20 | }, 21 | { 22 | "action": { 23 | "data": { 24 | "theport": { 25 | "value": "p0" 26 | } 27 | }, 28 | "type": "do_forwardPacket" 29 | }, 30 | "name": "hit", 31 | "match": { 32 | "meta.processPayloadResult": { 33 | "value": "1" 34 | } 35 | } 36 | }, 37 | { 38 | "action": { 39 | "data": { 40 | "theport": { 41 | "value": "p0" 42 | } 43 | }, 44 | "type": "do_forwardPacket" 45 | }, 46 | "name": "out-of-order", 47 | "match": { 48 | "meta.processPayloadResult": { 49 | "value": "3" 50 | } 51 | } 52 | }, 53 | { 54 | "action": { 55 | "data": { 56 | "theport": { 57 | "value": "p0" 58 | } 59 | }, 60 | "type": "do_forwardPacket" 61 | }, 62 | "name": "low", 63 | "match": { 64 | "meta.processPayloadResult": { 65 | "value": "4" 66 | } 67 | } 68 | }, 69 | { 70 | "action": { 71 | "data": { 72 | "theport": { 73 | "value": "p0" 74 | } 75 | }, 76 | "type": "do_forwardPacket" 77 | }, 78 | "name": "no-match", 79 | "match": { 80 | "meta.processPayloadResult": { 81 | "value": "2" 82 | } 83 | } 84 | } 85 | ], 86 | "default_rule": { 87 | "action": { 88 | "data": { 89 | "theport": { 90 | "value": "p0" 91 | } 92 | }, 93 | "type": "do_forwardPacket" 94 | }, 95 | "name": "default" 96 | } 97 | }, 98 | "processPkt_table": { 99 | "default_rule": { 100 | "action": { 101 | "type": "do_processPkt" 102 | }, 103 | "name": "default" 104 | } 105 | } 106 | } 107 | } -------------------------------------------------------------------------------- /deepmatch/includes/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jhypolite/DeepMatch/5c47f3e02f3209def0cacff53061f25550e66a00/deepmatch/includes/.DS_Store -------------------------------------------------------------------------------- /deepmatch/includes/dmx_options.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2017-2020 University of Pennsylvania 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | Joel Hypolite, UPenn 17 | */ 18 | 19 | /******************** 20 | dm_options.h 21 | ********************/ 22 | 23 | 24 | /************************* 25 | MISC GLOBALS 26 | ************************/ 27 | 28 | #define TRUE 1 29 | #define FALSE 0 30 | 31 | #define PHAST_XFLOW_ENABLED 1 32 | 33 | #define CHUNK_LW 8 34 | #define CHUNK_B 32 //(CHUNK_LW * 4) 35 | 36 | #define LOCK_SLEEP 200 37 | 38 | typedef enum { 39 | DM_RESULT = 0, 40 | DM_FOUND_ENTRY = 1, 41 | DM_DO_DFA = 2, 42 | DM_DO_DMA = 3, 43 | DM_CHECK_OOO = 4, 44 | DM_HANDLE_FIN = 5, 45 | DM_HANDLE_RST = 6 46 | } DM_FlagVals; 47 | 48 | #define DM_BIT_SET(X,N) ((X) |= (1 << (N)) ) 49 | #define DM_BIT_CLEAR(X,N) ((X) &= ~(1 << (N)) ) 50 | #define DM_BIT_CHECK(X,N) ( ((X) >> (N)) & 1 ) 51 | 52 | // the following is a variable used by snort. Usefull when using snort rulesets. each CLS has its own copy 53 | _declspec(cls) uint16_t httpPorts[] = {80,81,311,383,591,593,901,1220,1414,1741,1830,2301,2381,2809,3037,3128,3702,4343,4848,5250,6988,7000,7001,7144,7145,7510,7777,7779,8000,8008,8014,8028,8080,8085,8088,8090,8118,8123,8180,8181,8243,8280,8300,8800,8888,8899,9000,9060,9080,9090,9091,9443,9999,11371,34443,34444,41080,50002,55555}; 54 | 55 | /************************************************* 56 | SIZE FLOW & PACKET HASH TABLES & PACKET STORAGE 57 | ***********************************************/ 58 | // 321 flows; support 100 OoO packets per flow 59 | #define FLOW_HASH_TABLE_SIZE 0x1FF 60 | #define FLOW_HASH_HIGH 0xFFFFFE00 61 | #define FLOW_HASH_LOW 0x01FF 62 | #define FLOW_HASH_SHIFT 9 63 | #define FLOW_STORAGE_SIZE 321 64 | #define FLOW_PARTION_SZ 100 65 | #define FLOW_NUM_SLOTS_ERROR 777 66 | #define PKT_HASH_TABLE_SIZE 0x3FF 67 | #define PKT_HASH_HIGH 0xFFFFFC00 68 | #define PKT_HASH_LOW 0x03FF 69 | #define PKT_HASH_SHIFT 10 70 | #define PKT_NUM_SLOTS_ERROR 777 71 | #define OOO_SLOT_SIZE 1536 72 | 73 | /*********************************** 74 | HASH TABLE FOR FLOW MAINTENANCE 75 | **********************************/ 76 | 77 | // each bucket has a lock 78 | volatile __export __mem uint32_t f_lock[FLOW_HASH_TABLE_SIZE]; 79 | 80 | // indexes to the flow hash table 81 | volatile __export __mem uint32_t fht_idx[FLOW_HASH_TABLE_SIZE]; 82 | 83 | // The stack manages the assignment of flow hash table entries 84 | typedef struct stack_flow { 85 | uint16_t stk[FLOW_STORAGE_SIZE]; 86 | int16_t top; 87 | } STACK_flow; 88 | 89 | volatile __export __mem STACK_flow flow_stack; 90 | 91 | uint8_t f_dealloc(uint16_t); 92 | uint16_t f_alloc(void); 93 | 94 | volatile __export __mem uint32_t fstack_lock; 95 | 96 | typedef enum { 97 | FSM_0 = 0, // Initial 98 | FSM_S = 1, // Received SYN 99 | FSM_E = 2, // Established 100 | FSM_F = 3, // Received FIN 101 | FSM_W = 4, // Waiting for final ACK 102 | FSM_C = 5 // Closed 103 | } tcp_fsm_t; 104 | 105 | typedef __declspec(packed) struct { 106 | tcp_fsm_t FSM:3; 107 | uint8_t active:1; 108 | uint8_t eseq_valid:1; 109 | } flow_ctrl_bits_T; 110 | 111 | typedef struct flow_bucket_value_Type { 112 | uint32_t eseq; // expected sequence number 113 | uint16_t oooqLen; // number of packets in the ooo pool 114 | uint16_t cs; // saved dfa state 115 | } flow_bucket_value_T; 116 | 117 | // the flow hash table 118 | typedef struct flow_ht_entry_Type { 119 | uint32_t key[3]; // sip, dip, sport|dport 120 | flow_bucket_value_T value; 121 | uint16_t partition; // slice of memory used for packet storage 122 | uint16_t next_loc; // linked list ptr 123 | flow_ctrl_bits_T ctrl_bits; 124 | } flow_ht_entry_T; 125 | 126 | __shared __export __addr40 __mem flow_ht_entry_T flow_ht[FLOW_STORAGE_SIZE]; 127 | 128 | /************************************* 129 | HASH TABLE FOR Out-Of-Order PACKETS 130 | ***********************************/ 131 | 132 | // each bucket has a lock 133 | volatile __export __mem uint32_t p_lock[FLOW_STORAGE_SIZE][PKT_HASH_TABLE_SIZE]; 134 | 135 | // indexes to the packet hash table 136 | volatile __export __mem uint16_t pht_idx[FLOW_STORAGE_SIZE][PKT_HASH_TABLE_SIZE]; 137 | 138 | // The stack manages the assignment of packet hash table entries 139 | typedef struct stack_pkt { 140 | uint16_t stk[FLOW_PARTION_SZ]; 141 | int16_t top; 142 | } STACK_pkt; 143 | 144 | volatile __export __mem STACK_pkt pkt_stack[FLOW_STORAGE_SIZE]; 145 | 146 | uint8_t p_dealloc(uint16_t, uint16_t); 147 | uint16_t p_alloc(uint16_t); 148 | 149 | volatile __export __mem uint32_t pstack_lock[FLOW_STORAGE_SIZE]; 150 | 151 | typedef __declspec(packed) struct { 152 | uint8_t active:1; 153 | uint8_t syn:1; 154 | uint8_t fin:1; 155 | uint8_t rst:1; 156 | uint8_t ack:1; 157 | } pkt_ctrl_bits_T; 158 | 159 | typedef struct pkt_bucket_value_Type { 160 | uint16_t len; // payload length 161 | uint16_t offset; // payload offset 162 | } pkt_bucket_value_T; 163 | 164 | // the pkt hash table 165 | typedef struct pkt_ht_entry_Type { 166 | uint32_t key; // tcp->seqNo 167 | pkt_bucket_value_T value; 168 | uint16_t next_loc; // linked list ptr 169 | pkt_ctrl_bits_T ctrl_bits; 170 | } pkt_ht_entry_T; 171 | 172 | __shared __export __addr40 __mem pkt_ht_entry_T pkt_ht[FLOW_STORAGE_SIZE][FLOW_PARTION_SZ]; 173 | 174 | // process_ooo lock. Serialize flow processing 175 | volatile __export __mem uint32_t check_ooo_lock[FLOW_STORAGE_SIZE]; 176 | 177 | /********************************* 178 | Out-Of-Order PACKET MALLOC POOL 179 | *******************************/ 180 | 181 | // store packets DMA'ed from ctm to emem 182 | volatile __export __emem __addr40 uint8_t emem_pool[FLOW_STORAGE_SIZE][FLOW_PARTION_SZ][OOO_SLOT_SIZE]; 183 | 184 | // store packets DMA'ed from emem to ctm 185 | _declspec(ctm) uint8_t ctm_checker[OOO_SLOT_SIZE]; //each thread has its own private variable 186 | 187 | /************************* 188 | SET DFA SIZE PARAMETERS 189 | ************************/ 190 | 191 | #if defined(PHAST_DFA_MAL_TOOLS) 192 | 193 | #define NUM_DFA 1 194 | #define DFA_STATES 172 195 | #define MAX_CHAR 256 196 | 197 | #elif defined(PHAST_DFA_MAL_BACKDOOR) 198 | 199 | #define NUM_DFA 2 200 | #define DFA_STATES 1679 201 | #define MAX_CHAR 256 202 | 203 | #elif defined(PHAST_DFA_MAL_OTHER) 204 | 205 | #define NUM_DFA 4 206 | #define DFA_STATES 3446 207 | #define MAX_CHAR 256 208 | 209 | #elif defined(PHAST_DFA_MAL_CNC) 210 | 211 | #define NUM_DFA 4 212 | #define DFA_STATES 43887 213 | #define MAX_CHAR 256 214 | 215 | #elif defined(PHAST_DFA_MAL_CUSTOM) 216 | 217 | #define NUM_DFA 1 218 | #define DFA_STATES 69 219 | #define MAX_CHAR 256 220 | 221 | #elif defined(PHAST_DFA_MAL_CUSTOM2) 222 | 223 | #define NUM_DFA 1 224 | #define DFA_STATES 4 225 | #define MAX_CHAR 256 226 | 227 | #endif 228 | 229 | /***************************** 230 | SET DFA LOCATION PARAMETERS 231 | ****************************/ 232 | //////////////////////////////////////////////////////////////////// 233 | #if defined(PHAST_DLOC_CLS) 234 | _declspec(cls export scope(island)) uint16_t dfa_trans[NUM_DFA][DFA_STATES][MAX_CHAR]; 235 | #elif defined(PHAST_DLOC_CTM) 236 | _declspec(ctm export scope(island)) uint16_t dfa_trans[NUM_DFA][DFA_STATES][MAX_CHAR]; 237 | #elif defined(PHAST_DLOC_IMEM) 238 | _declspec(imem export scope(island)) uint16_t dfa_trans[NUM_DFA][DFA_STATES][MAX_CHAR]; 239 | #elif defined(PHAST_DLOC_EMEM) 240 | _declspec(emem export scope(island)) uint16_t dfa_trans[NUM_DFA][DFA_STATES][MAX_CHAR]; 241 | #endif 242 | 243 | -------------------------------------------------------------------------------- /deepmatch/includes/headers.p4: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2017-2020 University of Pennsylvania 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | Joel Hypolite, UPenn 17 | */ 18 | 19 | /******************** 20 | HEADERS 21 | ********************/ 22 | header_type ethernet_t { 23 | fields { 24 | dstAddr : 48; 25 | srcAddr : 48; 26 | etherType : 16; 27 | } 28 | } 29 | 30 | header_type ipv4_t { 31 | fields { 32 | version : 4; 33 | ihl : 4; 34 | diffserv : 8; 35 | totalLen : 16; 36 | identification : 16; 37 | flags : 3; 38 | fragOffset : 13; 39 | ttl : 8; 40 | protocol : 8; 41 | hdrChecksum : 16; 42 | srcAddr : 32; 43 | dstAddr: 32; 44 | } 45 | } 46 | 47 | header_type tcp_t { 48 | fields { 49 | srcPort : 16; 50 | dstPort : 16; 51 | seqNo : 32; 52 | ackNo : 32; 53 | dataOffset : 4; 54 | res : 4; 55 | flags : 8; 56 | window : 16; 57 | checksum : 16; 58 | urgentPtr : 16; 59 | } 60 | } 61 | 62 | header_type udp_t { 63 | fields { 64 | srcPort : 16; 65 | dstPort : 16; 66 | hd_length : 16; 67 | checksum : 16; 68 | } 69 | } 70 | 71 | header_type icmp_t { 72 | fields { 73 | iType : 8; 74 | iCode : 8; 75 | hdrChecksum : 16; 76 | identifier : 16; 77 | seqNum : 16; 78 | } 79 | } 80 | 81 | header ethernet_t ethernet; 82 | header ipv4_t ipv4; 83 | header tcp_t tcp; 84 | header udp_t udp; 85 | header icmp_t icmp; 86 | 87 | /******************** 88 | METADATA 89 | ********************/ 90 | 91 | header_type meta_t { 92 | fields { 93 | processPayloadResult : 8; 94 | } 95 | } 96 | 97 | metadata meta_t meta; 98 | -------------------------------------------------------------------------------- /deepmatch/includes/parser.p4: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2017-2020 University of Pennsylvania 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | Joel Hypolite, UPenn 17 | */ 18 | 19 | /******************** 20 | PARSER 21 | ********************/ 22 | parser start { 23 | return parse_ethernet; 24 | } 25 | 26 | #define ETHERTYPE_IPV4 0x0800 27 | 28 | parser parse_ethernet { 29 | extract(ethernet); 30 | return select(latest.etherType) { 31 | ETHERTYPE_IPV4 : parse_ipv4; 32 | default: ingress; 33 | } 34 | } 35 | 36 | #define IP_PROT_ICMP 0x01 37 | #define IP_PROT_TCP 0x06 38 | #define IP_PROT_UDP 0x11 39 | 40 | parser parse_ipv4 { 41 | extract(ipv4); 42 | return select(ipv4.protocol) { 43 | IP_PROT_ICMP : parse_icmp; 44 | IP_PROT_TCP : parse_tcp; 45 | IP_PROT_UDP : parse_udp; 46 | default : ingress; 47 | } 48 | } 49 | 50 | parser parse_tcp { 51 | extract(tcp); 52 | return ingress; 53 | } 54 | 55 | parser parse_udp { 56 | extract(udp); 57 | return ingress; 58 | } 59 | 60 | parser parse_icmp { 61 | extract(icmp); 62 | return ingress; 63 | } 64 | -------------------------------------------------------------------------------- /deepmatch/plugin.c: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2017-2020 University of Pennsylvania 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | Joel Hypolite, UPenn 17 | */ 18 | 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include "includes/dmx_options.h" 28 | 29 | 30 | /////////////////////////////////////////////////////// 31 | __inline uint8_t check_tcp_http(uint16_t dport) { 32 | uint8_t start, end, middle; 33 | uint8_t size = sizeof(httpPorts)/sizeof(uint16_t); 34 | end = size - 1; 35 | 36 | start = 0; 37 | while (start <= end) { 38 | middle = start + (end - start)/2; 39 | if(dport == httpPorts[middle]) { 40 | return 1; 41 | } else if (httpPorts[middle] > dport) { 42 | end = middle - 1; 43 | } else { 44 | start = middle + 1; 45 | } 46 | } 47 | return 0; 48 | } 49 | 50 | //////////////////////////////////////////////////////////// 51 | /* 52 | tcp-flags 53 | S: 0x02 54 | A: 0x10 55 | F: 0x01 56 | R: 0x04 57 | */ 58 | __inline uint8_t fsm_trans(uint8_t fsm_state, uint8_t tcp_flags) { 59 | if ( (fsm_state == FSM_0) && (tcp_flags & 0x02) && (tcp_flags & 0x10) ) { // 0, SYN-ACK 60 | return FSM_E; 61 | } else if ( (fsm_state == FSM_C) && (tcp_flags & 0x02) && (tcp_flags & 0x10) ) { // C, SYN-ACK 62 | return FSM_E; 63 | } else if ( (fsm_state == FSM_0) && (tcp_flags & 0x02) ) { // 0, SYN 64 | return FSM_S; 65 | } else if ( (fsm_state == FSM_C) && (tcp_flags & 0x02) ) { // C, SYN 66 | return FSM_S; 67 | } else if ( (fsm_state == FSM_S) && !(tcp_flags & 0x01) && !(tcp_flags & 0x02) && !(tcp_flags & 0x04) ) { 68 | return FSM_E; 69 | } else if ( (fsm_state == FSM_E) && !(tcp_flags & 0x01) && !(tcp_flags & 0x02) && !(tcp_flags & 0x04) ) { 70 | return FSM_E; 71 | } else if ( (tcp_flags & 0x01) && !(fsm_state == FSM_C) && !(fsm_state == FSM_W) ) { // FIN 72 | return FSM_F; 73 | } else if ( (fsm_state == FSM_W) && (tcp_flags & 0x01) ) { // FIN 74 | return FSM_C; 75 | } else if (fsm_state == FSM_F) { // FIN 76 | return FSM_C; 77 | } else if (tcp_flags & 0x04) { // RST 78 | return FSM_C; 79 | } else { 80 | return fsm_state; 81 | } 82 | } 83 | 84 | 85 | //////////////////////////////////////////////////////////// 86 | __inline void init_flow_ht(uint16_t flow_id) { 87 | #if defined(PHAST_ATOMIC_OPS) 88 | memset_mem((__mem void *)&flow_ht[flow_id].value, 0, 8); 89 | #else 90 | flow_ht[flow_id].ctrl_bits.active = TRUE; 91 | flow_ht[flow_id].ctrl_bits.eseq_valid = FALSE; 92 | flow_ht[flow_id].value.oooqLen = 0; 93 | flow_ht[flow_id].value.cs = 0; 94 | flow_ht[flow_id].value.eseq = 0; 95 | #endif 96 | } 97 | 98 | /////////////////////////////////////////////////////// 99 | __inline uint8_t select_dfa(uint16_t tcp_dport, uint8_t dfa_choice) { 100 | #if defined(PHAST_DFA_MAL_BACKDOOR) 101 | if (dfa_choice == 0) { // tcp 102 | if (check_tcp_http(tcp_dport) == 0) { 103 | dfa_choice = 1; 104 | } 105 | } 106 | #elif defined(PHAST_DFA_MAL_OTHER) 107 | if (dfa_choice == 0) { // tcp 108 | if (tcp_dport == 25) { 109 | dfa_choice = 1; 110 | } else if (check_tcp_http(tcp_dport) == 0) { 111 | dfa_choice = 2; 112 | } 113 | } else if (dfa_choice == 1) { // udp 114 | dfa_choice = 3; 115 | } 116 | #elif defined(PHAST_DFA_MAL_CNC) 117 | if (dfa_choice == 0) { // tcp 118 | if (check_tcp_http(tcp_dport) == 0) { 119 | dfa_choice = 1; 120 | } 121 | } else if (dfa_choice == 1) { // udp 122 | dfa_choice = 2; 123 | } else if (dfa_choice == 2) { // icmp 124 | dfa_choice = 3; 125 | } 126 | #endif 127 | 128 | return dfa_choice; // default if nothing hits 129 | } 130 | 131 | 132 | /////////////////////////////////////////////////////// 133 | __inline uint16_t get_flow_hash(EXTRACTED_HEADERS_T *headers, uint8_t rev_head) { 134 | PIF_PLUGIN_ipv4_T *ipv4 = pif_plugin_hdr_get_ipv4(headers); 135 | PIF_PLUGIN_tcp_T *tcp = pif_plugin_hdr_get_tcp(headers); 136 | uint32_t hash_key[3]; 137 | volatile uint32_t hv, hv_high; 138 | volatile uint16_t hv_low; // hash_value for flow id 139 | 140 | // identify the flow for this packet 141 | if (rev_head == TRUE) { // account for tcp->flag=RST 142 | hash_key[0] = ipv4->dstAddr; 143 | hash_key[1] = ipv4->srcAddr; 144 | hash_key[2] = (tcp->dstPort << 16) | tcp->srcPort; 145 | } else { 146 | hash_key[0] = ipv4->srcAddr; 147 | hash_key[1] = ipv4->dstAddr; 148 | hash_key[2] = (tcp->srcPort << 16) | tcp->dstPort; 149 | } 150 | 151 | hv = hash_me_crc32((void *) hash_key, sizeof(hash_key), 1); 152 | hv_high = (hv & FLOW_HASH_HIGH) >> FLOW_HASH_SHIFT; 153 | hv_low = hv & FLOW_HASH_LOW; 154 | hv_low ^= (uint16_t) hv_high; 155 | 156 | return (hv_low & FLOW_HASH_TABLE_SIZE); 157 | } 158 | 159 | /////////////////////////////////////////////////////// 160 | __inline uint16_t get_pkt_hash(EXTRACTED_HEADERS_T *headers, uint32_t tcp_seq, uint8_t rev_head) { 161 | PIF_PLUGIN_ipv4_T *ipv4 = pif_plugin_hdr_get_ipv4(headers); 162 | PIF_PLUGIN_tcp_T *tcp = pif_plugin_hdr_get_tcp(headers); 163 | uint32_t hash_key[4]; 164 | volatile uint32_t hv, hv_high; 165 | volatile uint16_t hv_low; // hash_value for flow id 166 | 167 | // identify the flow for this packet 168 | if (rev_head == TRUE) { // account for tcp->flag=RST 169 | hash_key[0] = ipv4->dstAddr; 170 | hash_key[1] = ipv4->srcAddr; 171 | hash_key[2] = (tcp->dstPort << 16) | tcp->srcPort; 172 | } else { 173 | hash_key[0] = ipv4->srcAddr; 174 | hash_key[1] = ipv4->dstAddr; 175 | hash_key[2] = (tcp->srcPort << 16) | tcp->dstPort; 176 | } 177 | hash_key[3] = tcp_seq; 178 | 179 | hv = hash_me_crc32((void *) hash_key, sizeof(hash_key), 1); 180 | hv_high = (hv & PKT_HASH_HIGH) >> PKT_HASH_SHIFT; 181 | hv_low = hv & PKT_HASH_LOW; 182 | hv_low = (uint16_t) hv_high; 183 | 184 | return (hv_low & PKT_HASH_TABLE_SIZE); 185 | } 186 | 187 | 188 | /////////////////////////////////////////////////////// 189 | void delete_ooo (EXTRACTED_HEADERS_T *headers, uint16_t flow_id) { 190 | if (flow_ht[flow_id].value.oooqLen == 0) { 191 | return; 192 | } else { 193 | PIF_PLUGIN_ipv4_T *ipv4 = pif_plugin_hdr_get_ipv4(headers); 194 | PIF_PLUGIN_tcp_T *tcp = pif_plugin_hdr_get_tcp(headers); 195 | __xrw uint32_t gen_xfer; // test-set-lock register 196 | __xrw uint32_t pstack_xfer; // test-set-lock register 197 | __xwrite uint32_t clear_mem[4] = {0}; 198 | uint16_t i; 199 | 200 | for(i=0; iflags & 0x02) { 278 | tcs = 0; 279 | } else { 280 | tcs = cstate; 281 | } 282 | 283 | fs_check = select_dfa((flow_ht[flow_id].key[2] & 0x0000FFFF), 0); // pass flow.dport; protocol must be TCP 284 | } else { 285 | tcs = 0; 286 | if(pif_plugin_hdr_udp_present(headers)) { 287 | fs_check = select_dfa(0,1); // not tcp so ignore the port, set udp(1) 288 | } else if(pif_plugin_hdr_icmp_present(headers)) { 289 | fs_check = select_dfa(0,2); // not tcp so ignore the port, set icmp(2) 290 | } else if(pif_plugin_hdr_tcp_present(headers)) { // For TCP when xflow is turned off 291 | fs_check = select_dfa(tcp->dstPort,0); 292 | } else { 293 | fs_check = select_dfa(0,3); // not tcp so ignore the port, set ipv4(3) 294 | } 295 | } 296 | 297 | if(DM_BIT_CHECK(opts, 1)) { // if (checking_ooo == TRUE) { 298 | payload = (__mem uint8_t *)ctm_checker; 299 | payload += i_start; 300 | } else { 301 | // get pointer to payload 302 | payload = pif_pkt_info_global.pkt_buf; 303 | payload += pif_pkt_info_global.pkt_pl_off; 304 | } 305 | 306 | while (psize) { 307 | to_read = (psize > CHUNK_B) ? CHUNK_B : psize; 308 | mem_read8(&pl_data, payload, to_read); 309 | 310 | payload += to_read; 311 | psize -= to_read; 312 | 313 | for (i=0; i < CHUNK_LW; i++) { 314 | pl_mem = pl_data[i]; 315 | 316 | tcs = dfa_trans[0][tcs][(pl_mem >> 24) & 0xff]; 317 | val |= tcs; 318 | if (--to_read == 0) { 319 | break; 320 | } 321 | 322 | tcs = dfa_trans[0][tcs][(pl_mem >> 16) & 0xff]; 323 | val |= tcs; 324 | if (--to_read == 0) { 325 | break; 326 | } 327 | 328 | tcs = dfa_trans[0][tcs][(pl_mem >> 8) & 0xff]; 329 | val |= tcs; 330 | if (--to_read == 0) { 331 | break; 332 | } 333 | 334 | tcs = dfa_trans[0][tcs][pl_mem & 0xff]; 335 | val |= tcs; 336 | if (--to_read == 0) { 337 | break; 338 | } 339 | } 340 | 341 | } 342 | 343 | retval = (tcs << 16) | (val >> 15); 344 | return retval; // retval holds 1) current DFA state, 2) DFA result: HIT/MISS 345 | } 346 | 347 | 348 | /////////////////////////////////////////////////////// 349 | uint8_t process_ooo(EXTRACTED_HEADERS_T *headers, uint16_t flow_id, uint16_t hv_flow) { 350 | 351 | // do not process ooo_queue if eseq_valid is false 352 | if ( flow_ht[flow_id].ctrl_bits.eseq_valid == FALSE) { 353 | return FALSE; 354 | } else { 355 | PIF_PLUGIN_ipv4_T *ipv4 = pif_plugin_hdr_get_ipv4(headers); 356 | PIF_PLUGIN_tcp_T *tcp = pif_plugin_hdr_get_tcp(headers); 357 | uint32_t retval=0; 358 | 359 | // read eseaq here in case some other thread updates it when we lose the lock later 360 | uint32_t c_eseq = flow_ht[flow_id].value.eseq; 361 | 362 | #if defined(PHAST_LOCK) 363 | // test set lock register 364 | __xrw uint32_t gen_xfer; 365 | __xrw uint32_t pstack_xfer; 366 | #endif 367 | volatile uint16_t hv_pkt; // hash_value for packet storage 368 | uint16_t p_bucket, p_p_bucket; 369 | uint8_t p_found_entry; 370 | 371 | while (flow_ht[flow_id].value.oooqLen > 0) { 372 | p_p_bucket = 0; 373 | p_found_entry = FALSE; 374 | hv_pkt = get_pkt_hash(headers, c_eseq, FALSE); 375 | 376 | //////////////////////////////////////////////////////////////////////////////////// 377 | // PKT BUCKET: Find the bucket 378 | p_bucket = pht_idx[flow_ht[flow_id].partition][hv_pkt]; 379 | 380 | while(p_bucket != 0) { 381 | // the current live packet idetifies the flow-packet-pool to check against 382 | if( (flow_ht[flow_id].key[0] == ipv4->srcAddr) && 383 | (flow_ht[flow_id].key[1] == ipv4->dstAddr) && 384 | (flow_ht[flow_id].key[2] == (tcp->srcPort << 16) | tcp->dstPort) && 385 | (pkt_ht[flow_ht[flow_id].partition][p_bucket].key == c_eseq) ) { 386 | p_found_entry = TRUE; 387 | break; 388 | } else { 389 | p_p_bucket = p_bucket; 390 | p_bucket = pkt_ht[flow_ht[flow_id].partition][p_bucket].next_loc; 391 | } 392 | } 393 | 394 | // PKT BUCKET: should have correct bucket now 395 | /////////////////////////////////////////////////////////////////////////////////// 396 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.active == TRUE) { 397 | __xwrite uint32_t clear_mem[4] = {0}; 398 | 399 | if(pkt_ht[flow_ht[flow_id].partition][p_bucket].value.len == 0) { 400 | 401 | uint8_t t_flags = 0; 402 | 403 | if( pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.fin == TRUE ) { 404 | #if defined(PHAST_ATOMIC_OPS) 405 | mem_incr32((__mem void *)&flow_ht[flow_id].value.eseq); 406 | #else 407 | c_eseq++; 408 | #endif 409 | DM_BIT_SET(retval, DM_HANDLE_FIN); 410 | } else if( pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.rst == TRUE ) { 411 | DM_BIT_SET(retval, DM_HANDLE_RST); 412 | } 413 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.fin == TRUE) { 414 | t_flags |= (1 << 0); 415 | } 416 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.syn == TRUE) { 417 | t_flags |= (1 << 1); 418 | } 419 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.rst == TRUE) { 420 | t_flags |= (1 << 2); 421 | } 422 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.ack == TRUE) { 423 | t_flags |= (1 << 4); 424 | } 425 | flow_ht[flow_id].ctrl_bits.FSM = fsm_trans(flow_ht[flow_id].ctrl_bits.FSM, t_flags); 426 | 427 | } else if(pkt_ht[flow_ht[flow_id].partition][p_bucket].value.len > 0) { 428 | uint16_t cstate = flow_ht[flow_id].value.cs; 429 | 430 | #if defined(PHAST_LOCK) 431 | // RELEASE MUTEX -- flow /// 432 | pstack_xfer = 0; 433 | mem_write_atomic(&pstack_xfer,(__mem void *)(f_lock+hv_flow), sizeof(pstack_xfer)); 434 | //////////////////// 435 | #endif 436 | 437 | // get the packet 438 | pktdma_mu_to_ctm(ctm_checker, (__mem void *)emem_pool[flow_ht[flow_id].partition][p_bucket], OOO_SLOT_SIZE); 439 | 440 | // process the packet 441 | retval = dm_loop(headers, flow_id, cstate, pkt_ht[flow_ht[flow_id].partition][p_bucket].value.offset, pkt_ht[flow_ht[flow_id].partition][p_bucket].value.len, 3); 442 | 443 | 444 | #if defined(PHAST_LOCK) 445 | // AQUIRE MUTEX -- flow //// 446 | pstack_xfer = 1; 447 | while(1) { 448 | mem_test_set(&pstack_xfer, (__mem void *)(f_lock+hv_flow), sizeof(pstack_xfer)); 449 | if(pstack_xfer == 0) { 450 | break; 451 | } 452 | sleep(LOCK_SLEEP); 453 | } 454 | //////////////////// 455 | #endif 456 | 457 | if(DM_BIT_CHECK(retval, DM_RESULT)) { // fs_check==HIT 458 | if( pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.fin == TRUE ) { 459 | c_eseq = c_eseq + pkt_ht[flow_ht[flow_id].partition][p_bucket].value.len + 1; 460 | DM_BIT_SET(retval, DM_HANDLE_FIN); 461 | } else { 462 | c_eseq = c_eseq + pkt_ht[flow_ht[flow_id].partition][p_bucket].value.len; 463 | } 464 | 465 | flow_ht[flow_id].ctrl_bits.FSM = FSM_C; 466 | } else { 467 | uint8_t t_flags = 0; 468 | 469 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.fin == TRUE) { 470 | t_flags |= (1 << 0); 471 | } 472 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.syn == TRUE) { 473 | t_flags |= (1 << 1); 474 | } 475 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.rst == TRUE) { 476 | t_flags |= (1 << 2); 477 | } 478 | if (pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.ack == TRUE) { 479 | t_flags |= (1 << 4); 480 | } 481 | 482 | if( pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.fin == TRUE ) { 483 | c_eseq = c_eseq + pkt_ht[flow_ht[flow_id].partition][p_bucket].value.len + 1; 484 | DM_BIT_SET(retval, DM_HANDLE_FIN); 485 | } else if( pkt_ht[flow_ht[flow_id].partition][p_bucket].ctrl_bits.rst == TRUE ) { 486 | c_eseq = c_eseq + pkt_ht[flow_ht[flow_id].partition][p_bucket].value.len; 487 | DM_BIT_SET(retval, DM_HANDLE_RST); 488 | }else { 489 | c_eseq = c_eseq + pkt_ht[flow_ht[flow_id].partition][p_bucket].value.len; 490 | } 491 | 492 | flow_ht[flow_id].ctrl_bits.FSM = fsm_trans(flow_ht[flow_id].ctrl_bits.FSM, t_flags); 493 | flow_ht[flow_id].value.cs = (retval >> 16); // tcs; 494 | } 495 | } 496 | 497 | #if defined(PHAST_LOCK) 498 | // AQUIRE MUTEX -- packet //// 499 | gen_xfer = 1; 500 | while(1) { 501 | mem_test_set(&gen_xfer, (__mem void *)(p_lock[flow_ht[flow_id].partition]+hv_pkt), sizeof(gen_xfer)); 502 | if(gen_xfer == 0) { 503 | break; 504 | } 505 | sleep(LOCK_SLEEP); 506 | } 507 | //////////////////// 508 | #endif 509 | 510 | //////////////////////////////////////////////////////////////////////////////////// 511 | // PKT BUCKET: delete bucket 512 | #if defined(PHAST_ATOMIC_OPS) 513 | memcpy_mem_mem((__mem void *)&pkt_ht[flow_ht[flow_id].partition][p_p_bucket].next_loc, (__mem void *)&pkt_ht[flow_ht[flow_id].partition][p_bucket].next_loc, 2); 514 | mem_write_atomic(clear_mem, &pkt_ht[flow_ht[flow_id].partition][p_bucket], sizeof(clear_mem)); 515 | #else 516 | 517 | if (p_p_bucket != 0) { 518 | pkt_ht[flow_ht[flow_id].partition][p_p_bucket].next_loc = pkt_ht[flow_ht[flow_id].partition][p_bucket].next_loc; 519 | } 520 | 521 | if(pht_idx[flow_ht[flow_id].partition][hv_pkt] == p_bucket) { 522 | if(pkt_ht[flow_ht[flow_id].partition][p_bucket].next_loc != 0) { 523 | pht_idx[flow_ht[flow_id].partition][hv_pkt] = pkt_ht[flow_ht[flow_id].partition][p_bucket].next_loc; 524 | } else { 525 | pht_idx[flow_ht[flow_id].partition][hv_pkt] = 0; //p_p_bucket; 526 | } 527 | } 528 | 529 | mem_write_atomic(clear_mem, &pkt_ht[flow_ht[flow_id].partition][p_bucket], sizeof(clear_mem)); 530 | 531 | #endif 532 | 533 | #if defined(PHAST_LOCK) 534 | // AQUIRE MUTEX -- pstack //// 535 | pstack_xfer = 1; 536 | while(1) { 537 | mem_test_set(&pstack_xfer,(__mem void *)&pstack_lock[flow_ht[flow_id].partition], sizeof(pstack_xfer)); 538 | if(pstack_xfer == 0) { 539 | break; 540 | } 541 | sleep(LOCK_SLEEP); 542 | } 543 | //////////////////// 544 | #endif 545 | 546 | p_dealloc(flow_ht[flow_id].partition, p_bucket); 547 | 548 | #if defined(PHAST_LOCK) 549 | // RELEASE MUTEX -- pstack /// 550 | pstack_xfer = 0; 551 | mem_write_atomic(&pstack_xfer,(__mem void *)&pstack_lock[flow_ht[flow_id].partition], sizeof(pstack_xfer)); 552 | //////////////////// 553 | #endif 554 | 555 | #if defined(PHAST_ATOMIC_OPS) 556 | mem_decr32((__mem void *)&flow_ht[flow_id].value.oooqLen); 557 | #else 558 | flow_ht[flow_id].value.oooqLen -= 1; 559 | #endif 560 | 561 | #if defined(PHAST_LOCK) 562 | // RELEASE MUTEX -- packet /// 563 | gen_xfer = 0; 564 | mem_write_atomic(&gen_xfer, (__mem void *)(p_lock[flow_ht[flow_id].partition]+hv_pkt), sizeof(gen_xfer)); 565 | //////////////////// 566 | #endif 567 | 568 | // PKT BUCKET: 569 | /////////////////////////////////////////////////////////////////////////////////// 570 | if(DM_BIT_CHECK(retval, DM_RESULT)) { 571 | flow_ht[flow_id].value.eseq = c_eseq; 572 | return retval; 573 | } 574 | } else { 575 | break; // next packet not found in hash storage 576 | } 577 | }// WHILE 578 | 579 | DM_BIT_CLEAR(retval, DM_RESULT); // clear bit to return that there was not a hit 580 | flow_ht[flow_id].value.eseq = c_eseq; 581 | 582 | return retval; //return FALSE; 583 | } 584 | } 585 | 586 | /////////////////////////////////////////////////////// 587 | int pif_plugin_processPkt(EXTRACTED_HEADERS_T *headers, MATCH_DATA_T *data){ 588 | PIF_PLUGIN_ipv4_T *ipv4 = pif_plugin_hdr_get_ipv4(headers); 589 | PIF_PLUGIN_tcp_T *tcp = pif_plugin_hdr_get_tcp(headers); 590 | 591 | #if defined(PHAST_LOCK) 592 | // test set lock registers 593 | __xrw uint32_t f_m_xfer; // replaces: flow_xfer, flow_xfer2, mpool_xfer 594 | __xrw uint32_t fstack_p_xfer; // replaces: fstack_xfer, pkt_xfer 595 | __xrw uint32_t pstack_xfer; // replaces: pstack_xfer 596 | #endif; 597 | 598 | #if defined(PHAST_ATOMIC_OPS) 599 | __xrw uint32_t flow_key_rw[3]; 600 | #endif 601 | 602 | volatile uint16_t hv; 603 | volatile uint16_t hv2; // hash_values for flow id, reverse direction, packet storage (includes seqNo) 604 | uint32_t ret_result=0; // see dm_loop() for what it returns 605 | uint16_t payload_size; 606 | uint16_t f_bucket, p_bucket, t_p_bucket; 607 | uint8_t dm_ops = 0; 608 | 609 | // Only track TCP flows for now 610 | if(pif_plugin_hdr_tcp_present(headers) && (PHAST_XFLOW_ENABLED == TRUE)) { // PROCESS TCP 611 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 612 | // FIND OR CREATE FLOW HASH ENTRY 613 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 614 | 615 | // calculate TCP payload size 616 | payload_size = ipv4->totalLen - ( (ipv4->ihl + tcp->dataOffset) * 4); 617 | 618 | // identify the flow for this packet 619 | DM_BIT_CLEAR(dm_ops, DM_FOUND_ENTRY); //f_found_entry = FALSE; 620 | t_p_bucket = 0; 621 | hv = get_flow_hash(headers, FALSE); 622 | 623 | //////////////////////////////////////////////////////////////////////////////////// 624 | // FLOW BUCKET: search for the right flow table location (track cur and prev bucket) 625 | #if defined(PHAST_LOCK) 626 | // AQUIRE MUTEX -- flow //// 627 | f_m_xfer = 1; 628 | while(1) { 629 | mem_test_set(&f_m_xfer, (__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 630 | if(f_m_xfer == 0) { 631 | break; 632 | } 633 | sleep(LOCK_SLEEP); 634 | } 635 | //////////////////// 636 | #endif 637 | 638 | // lock must be here to lock the READ of fht_idx[hv] which some other thread may update 639 | f_bucket = fht_idx[hv]; 640 | 641 | // 1) check to see if flow entry already exists 642 | while(f_bucket != 0) { 643 | #if defined(PHAST_ATOMIC_OPS) 644 | mem_read_atomic(flow_key_rw, flow_ht[f_bucket].key, sizeof(flow_key_rw)); 645 | if( (flow_key_rw[0] == ipv4->srcAddr) && (flow_key_rw[1] == ipv4->dstAddr) && (flow_key_rw[2] == (tcp->srcPort << 16) | tcp->dstPort) ) { 646 | #else 647 | if( (flow_ht[f_bucket].key[0] == ipv4->srcAddr) && (flow_ht[f_bucket].key[1] == ipv4->dstAddr) && 648 | (flow_ht[f_bucket].key[2] == (tcp->srcPort << 16) | tcp->dstPort) ) { 649 | #endif 650 | DM_BIT_SET(dm_ops, DM_FOUND_ENTRY) ; //f_found_entry = TRUE; 651 | break; 652 | } else { 653 | t_p_bucket = f_bucket; 654 | f_bucket = flow_ht[f_bucket].next_loc; 655 | } 656 | } 657 | 658 | // 2) flow entry not found, so create a new one (unless it is a RST) 659 | if( (DM_BIT_CHECK(dm_ops, DM_FOUND_ENTRY) == FALSE) && !(tcp->flags & 0x04) ) { 660 | #if defined(PHAST_ATOMIC_OPS) 661 | __addr40 uint32_t *flow_key_addr; 662 | #endif 663 | 664 | #if defined(PHAST_LOCK) 665 | // AQUIRE MUTEX -- fstack //// 666 | fstack_p_xfer = 1; 667 | while(1) { 668 | mem_test_set(&fstack_p_xfer,(__mem void *)&fstack_lock, sizeof(fstack_p_xfer)); 669 | if(fstack_p_xfer == 0) { 670 | break; 671 | } 672 | sleep(LOCK_SLEEP); 673 | } 674 | //////////////////// 675 | #endif 676 | f_bucket = f_alloc(); 677 | #if defined(PHAST_LOCK) 678 | // RELEASE MUTEX -- fstack /// 679 | fstack_p_xfer = 0; 680 | mem_write_atomic(&fstack_p_xfer,(__mem void *)&fstack_lock, sizeof(fstack_p_xfer)); 681 | //////////////////// 682 | #endif 683 | 684 | if(fht_idx[hv] == 0) { 685 | fht_idx[hv] = f_bucket; 686 | } else { 687 | flow_ht[t_p_bucket].next_loc = f_bucket; 688 | } 689 | flow_ht[f_bucket].next_loc = 0; 690 | 691 | #if defined(PHAST_ATOMIC_OPS) 692 | flow_key_addr = flow_ht[f_bucket].key; 693 | flow_key_rw[0] = ipv4->srcAddr; 694 | flow_key_rw[1] = ipv4->dstAddr; 695 | flow_key_rw[2] = (tcp->srcPort << 16) | tcp->dstPort; 696 | mem_write_atomic(flow_key_rw, flow_key_addr, sizeof(flow_key_rw)); 697 | #else 698 | flow_ht[f_bucket].key[0] = ipv4->srcAddr; 699 | flow_ht[f_bucket].key[1] = ipv4->dstAddr; 700 | flow_ht[f_bucket].key[2] = (tcp->srcPort << 16) | tcp->dstPort; 701 | #endif 702 | flow_ht[f_bucket].partition = f_bucket; // # of flow and # of pkt_pool partitions are the same 703 | flow_ht[f_bucket].ctrl_bits.active = TRUE; 704 | } 705 | 706 | // FLOW BUCKET: should have correct bucket now 707 | ////////////////////////////////////////////////////////////////////////////////// 708 | 709 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 710 | // DETERMINE EXECUTION STRATEGY 711 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 712 | 713 | if( (DM_BIT_CHECK(dm_ops, DM_FOUND_ENTRY) == FALSE) && (tcp->flags & 0x04) ) { 714 | // handle the case of a RST being the only packet in the flow-less session 715 | // NOTE: hv and f_bucket have no real meaning, since no flow was ever established 716 | DM_BIT_SET(dm_ops, DM_HANDLE_RST); 717 | if(payload_size > 0) { 718 | DM_BIT_SET(dm_ops, DM_DO_DFA); 719 | } 720 | } else if( (tcp->flags & 0x02) || ( (tcp->seqNo == flow_ht[f_bucket].value.eseq) && (flow_ht[f_bucket].ctrl_bits.eseq_valid == TRUE) ) ) { 721 | DM_BIT_SET(dm_ops, DM_DO_DFA); 722 | } else if( DM_BIT_CHECK(dm_ops, DM_FOUND_ENTRY) && (flow_ht[f_bucket].ctrl_bits.active == FALSE) ) { 723 | DM_BIT_SET(dm_ops, DM_CHECK_OOO); 724 | } else if( (tcp->seqNo > flow_ht[f_bucket].value.eseq) || (flow_ht[f_bucket].ctrl_bits.eseq_valid == FALSE) ) { 725 | DM_BIT_SET(dm_ops, DM_DO_DMA); 726 | } 727 | 728 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 729 | // PRIMARY EXECUTION STRATEGY 730 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 731 | 732 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 733 | if(DM_BIT_CHECK(dm_ops, DM_DO_DFA)) { 734 | if( tcp->flags & 0x02 ) { // SYN or SYN-ACK 735 | if ( (flow_ht[f_bucket].ctrl_bits.FSM == FSM_0) || (flow_ht[f_bucket].ctrl_bits.FSM == FSM_C) ) { 736 | if (flow_ht[f_bucket].ctrl_bits.FSM == FSM_C) { 737 | init_flow_ht(f_bucket); 738 | } 739 | if (payload_size > 0) { 740 | uint16_t cstate = flow_ht[f_bucket].value.cs; 741 | 742 | #if defined(PHAST_LOCK) 743 | // RELEASE MUTEX -- flow /// 744 | f_m_xfer = 0; 745 | mem_write_atomic(&f_m_xfer,(__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 746 | //////////////////// 747 | #endif 748 | ret_result = dm_loop(headers, f_bucket, cstate, 0, payload_size, 1); 749 | 750 | #if defined(PHAST_LOCK) 751 | // AQUIRE MUTEX -- flow //// 752 | f_m_xfer = 1; 753 | while(1) { 754 | mem_test_set(&f_m_xfer, (__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 755 | if(f_m_xfer == 0) { 756 | break; 757 | } 758 | sleep(LOCK_SLEEP); 759 | } 760 | //////////////////// 761 | #endif 762 | } 763 | DM_BIT_SET(dm_ops, DM_CHECK_OOO); 764 | } 765 | } else if(DM_BIT_CHECK(ret_result, DM_HANDLE_RST)) { // (RST may have content -- not part of FLOW) 766 | if(payload_size > 0) { 767 | #if defined(PHAST_LOCK) 768 | // RELEASE MUTEX -- flow /// 769 | f_m_xfer = 0; 770 | mem_write_atomic(&f_m_xfer,(__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 771 | //////////////////// 772 | #endif 773 | ret_result = dm_loop(headers, 0, 0, 0, payload_size, 0); // not part of FLOW 774 | 775 | #if defined(PHAST_LOCK) 776 | // AQUIRE MUTEX -- flow //// 777 | f_m_xfer = 1; 778 | while(1) { 779 | mem_test_set(&f_m_xfer, (__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 780 | if(f_m_xfer == 0) { 781 | break; 782 | } 783 | sleep(LOCK_SLEEP); 784 | } 785 | //////////////////// 786 | #endif 787 | } 788 | } else { 789 | if(payload_size > 0) { 790 | uint16_t cstate = flow_ht[f_bucket].value.cs; 791 | #if defined(PHAST_LOCK) 792 | // RELEASE MUTEX -- flow /// 793 | f_m_xfer = 0; 794 | mem_write_atomic(&f_m_xfer,(__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 795 | //////////////////// 796 | #endif 797 | ret_result = dm_loop(headers, f_bucket, cstate, 0, payload_size, 1); 798 | 799 | #if defined(PHAST_LOCK) 800 | // AQUIRE MUTEX -- flow //// 801 | f_m_xfer = 1; 802 | while(1) { 803 | mem_test_set(&f_m_xfer, (__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 804 | if(f_m_xfer == 0) { 805 | break; 806 | } 807 | sleep(LOCK_SLEEP); 808 | } 809 | //////////////////// 810 | #endif 811 | } 812 | } 813 | DM_BIT_SET(dm_ops,DM_CHECK_OOO); 814 | 815 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 816 | // UPDATE SHARED STATE 817 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 818 | 819 | if(payload_size == 0) { 820 | if ( tcp->flags & 0x02 ) { /////// SYN 821 | flow_ht[f_bucket].value.eseq = tcp->seqNo + 1; 822 | flow_ht[f_bucket].ctrl_bits.eseq_valid = TRUE; 823 | } else if ( tcp->flags & 0x01 ) { /////// FIN 824 | flow_ht[f_bucket].value.eseq = tcp->seqNo + 1; 825 | DM_BIT_SET(dm_ops, DM_HANDLE_FIN); 826 | } else if ( tcp->flags & 0x04 ) { /////// RST 827 | DM_BIT_SET(dm_ops, DM_HANDLE_RST); 828 | } 829 | 830 | flow_ht[f_bucket].ctrl_bits.FSM = fsm_trans(flow_ht[f_bucket].ctrl_bits.FSM, tcp->flags); 831 | } else if(DM_BIT_CHECK(ret_result,DM_RESULT) ) { // fs_check==HIT 832 | DM_BIT_SET(dm_ops, DM_RESULT); //RESULT = HIT (note that it is MISS:0 by default) 833 | 834 | if (DM_BIT_CHECK(dm_ops, DM_FOUND_ENTRY)) { // found_entry==TRUE (not RST w/o flow) 835 | if ( tcp->flags & 0x02 ) { /////// SYN 836 | flow_ht[f_bucket].value.eseq = tcp->seqNo + payload_size + 1; 837 | flow_ht[f_bucket].ctrl_bits.eseq_valid = TRUE; // NOT NECESSARY 838 | } else if ( tcp->flags & 0x01 ) { /////// FIN 839 | flow_ht[f_bucket].value.eseq = tcp->seqNo + payload_size + 1; 840 | DM_BIT_SET(dm_ops, DM_HANDLE_FIN); 841 | } else { 842 | flow_ht[f_bucket].value.eseq = tcp->seqNo + payload_size; 843 | } 844 | 845 | flow_ht[f_bucket].ctrl_bits.FSM = FSM_C; 846 | } 847 | } else if( (DM_BIT_CHECK(ret_result,DM_RESULT) == FALSE) && (DM_BIT_CHECK(dm_ops,DM_FOUND_ENTRY)) ) { 848 | if ( (tcp->flags & 0x02 ) || (tcp->flags & 0x01) ) { /////// SYN || FIN 849 | flow_ht[f_bucket].value.eseq = tcp->seqNo + payload_size + 1; 850 | 851 | if ( tcp->flags & 0x01 ) { /////// FIN 852 | DM_BIT_SET(dm_ops, DM_HANDLE_FIN); 853 | } 854 | } else if ( tcp->flags & 0x04 ) { //// RST 855 | flow_ht[f_bucket].value.eseq = tcp->seqNo + payload_size; 856 | DM_BIT_SET(dm_ops, DM_HANDLE_RST); 857 | } else { 858 | flow_ht[f_bucket].value.eseq = tcp->seqNo + payload_size; 859 | } 860 | 861 | flow_ht[f_bucket].ctrl_bits.FSM = fsm_trans(flow_ht[f_bucket].ctrl_bits.FSM, tcp->flags); 862 | flow_ht[f_bucket].value.cs = (ret_result >> 16); // tcs; 863 | } 864 | 865 | if( (DM_BIT_CHECK(dm_ops,DM_FOUND_ENTRY) == FALSE) && (tcp->flags & 0x04) ) { 866 | dm_ops &= 0x41; // keep DM_RESULT (bit 0) and DM_HANDLE_RST (bit 6) 867 | } 868 | 869 | if( DM_BIT_CHECK(dm_ops,DM_CHECK_OOO) && (flow_ht[f_bucket].ctrl_bits.FSM == FSM_C) ) { 870 | DM_BIT_CLEAR(dm_ops,DM_CHECK_OOO); 871 | } 872 | 873 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 874 | } else if(DM_BIT_CHECK(dm_ops, DM_DO_DMA)) { 875 | 876 | #if defined(PHAST_LOCK) 877 | // RELEASE MUTEX -- flow /// 878 | f_m_xfer = 0; 879 | mem_write_atomic(&f_m_xfer,(__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 880 | //////////////////// 881 | #endif 882 | 883 | DM_BIT_CLEAR(dm_ops, DM_FOUND_ENTRY); 884 | t_p_bucket = 0; 885 | hv2 = get_pkt_hash(headers, tcp->seqNo, FALSE); 886 | 887 | #if defined(PHAST_LOCK) 888 | // AQUIRE MUTEX -- packet //// 889 | fstack_p_xfer = 1; 890 | while(1) { 891 | mem_test_set(&fstack_p_xfer, (__mem void *)(p_lock[flow_ht[f_bucket].partition]+hv2), sizeof(fstack_p_xfer)); 892 | if(fstack_p_xfer == 0) { 893 | break; 894 | } 895 | sleep(LOCK_SLEEP); 896 | } 897 | //////////////////// 898 | #endif 899 | //////////////////////////////////////////////////////////////////////////////////// 900 | // PKT BUCKET: Goto bucket then append to the end 901 | p_bucket = pht_idx[flow_ht[f_bucket].partition][hv2]; 902 | 903 | while(p_bucket != 0) { 904 | t_p_bucket = p_bucket; 905 | p_bucket = pkt_ht[flow_ht[f_bucket].partition][p_bucket].next_loc; 906 | } 907 | 908 | #if defined(PHAST_LOCK) 909 | // AQUIRE MUTEX -- pstack //// 910 | pstack_xfer = 1; 911 | while(1) { 912 | mem_test_set(&pstack_xfer,(__mem void *)&pstack_lock[flow_ht[f_bucket].partition], sizeof(pstack_xfer)); 913 | if(pstack_xfer == 0) { 914 | break; 915 | } 916 | sleep(LOCK_SLEEP); 917 | } 918 | //////////////////// 919 | #endif 920 | p_bucket = p_alloc(flow_ht[f_bucket].partition); 921 | 922 | #if defined(PHAST_LOCK) 923 | // RELEASE MUTEX -- pstack /// 924 | pstack_xfer = 0; 925 | mem_write_atomic(&pstack_xfer,(__mem void *)&pstack_lock[flow_ht[f_bucket].partition], sizeof(pstack_xfer)); 926 | //////////////////// 927 | #endif 928 | 929 | 930 | if(pht_idx[flow_ht[f_bucket].partition][hv2] == 0) { 931 | pht_idx[flow_ht[f_bucket].partition][hv2] = p_bucket; 932 | } else { 933 | pkt_ht[flow_ht[f_bucket].partition][t_p_bucket].next_loc = p_bucket; 934 | } 935 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].next_loc = 0; // does this default to "0"? 936 | // now set the packet metadata using p_bucket 937 | DM_BIT_SET(dm_ops, DM_FOUND_ENTRY); //f_found_entry = TRUE; 938 | 939 | // PKT BUCKET: should have correct bucket now 940 | /////////////////////////////////////////////////////////////////////////////////// 941 | 942 | #if defined(PHAST_LOCK) 943 | // RELEASE MUTEX -- packet /// 944 | fstack_p_xfer = 0; 945 | mem_write_atomic(&fstack_p_xfer, (__mem void *)(p_lock[flow_ht[f_bucket].partition]+hv2), sizeof(fstack_p_xfer)); 946 | //////////////////// 947 | #endif 948 | 949 | // perform the actual DMA 950 | if( (payload_size > 0) && (p_bucket != PKT_NUM_SLOTS_ERROR) ) { 951 | pktdma_ctm_to_mu((__mem void *)emem_pool[flow_ht[f_bucket].partition][p_bucket], pif_pkt_info_global.pkt_buf, pif_pkt_info_global.pkt_len + PIF_PKT_SOP(pif_pkt_info_global.pkt_buf, pif_pkt_info_global.pkt_num)); 952 | } 953 | 954 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 955 | // UPDATE SHARED STATE 956 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 957 | 958 | #if defined(PHAST_LOCK) 959 | // AQUIRE MUTEX -- packet //// 960 | fstack_p_xfer = 1; 961 | while(1) { 962 | mem_test_set(&fstack_p_xfer, (__mem void *)(p_lock[flow_ht[f_bucket].partition]+hv2), sizeof(fstack_p_xfer)); 963 | if(fstack_p_xfer == 0) { 964 | break; 965 | } 966 | sleep(LOCK_SLEEP); 967 | } 968 | //////////////////// 969 | #endif 970 | 971 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].ctrl_bits.active = TRUE; 972 | 973 | if ( tcp->flags & 0x01) { // fin 974 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].ctrl_bits.fin = TRUE; 975 | } 976 | if ( tcp->flags & 0x02) { // syn 977 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].ctrl_bits.syn = TRUE; 978 | } 979 | if ( tcp->flags & 0x04) { // rst 980 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].ctrl_bits.rst = TRUE; 981 | } 982 | if ( tcp->flags & 0x10) { // ack 983 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].ctrl_bits.ack = TRUE; 984 | } 985 | 986 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].key = tcp->seqNo; 987 | 988 | if (payload_size > 0) { 989 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].value.offset = 14 + ( (ipv4->ihl + tcp->dataOffset) * 4 ); 990 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].value.len = payload_size; 991 | } else { 992 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].value.offset = 0; 993 | pkt_ht[flow_ht[f_bucket].partition][p_bucket].value.len = 0; 994 | } 995 | 996 | #if defined(PHAST_LOCK) 997 | // RELEASE MUTEX -- packet /// 998 | fstack_p_xfer = 0; 999 | mem_write_atomic(&fstack_p_xfer, (__mem void *)(p_lock[flow_ht[f_bucket].partition]+hv2), sizeof(fstack_p_xfer)); 1000 | //////////////////// 1001 | #endif 1002 | 1003 | #if defined(PHAST_LOCK) 1004 | // AQUIRE MUTEX -- flow //// 1005 | f_m_xfer = 1; 1006 | while(1) { 1007 | mem_test_set(&f_m_xfer, (__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 1008 | if(f_m_xfer == 0) { 1009 | break; 1010 | } 1011 | sleep(LOCK_SLEEP); 1012 | } 1013 | //////////////////// 1014 | #endif 1015 | 1016 | // update packet metadata 1017 | #if defined(PHAST_ATOMIC_OPS) 1018 | mem_incr32((__mem void *)&flow_ht[f_bucket].value.oooqLen); 1019 | #else 1020 | flow_ht[f_bucket].value.oooqLen += 1; 1021 | #endif 1022 | 1023 | if (flow_ht[f_bucket].ctrl_bits.eseq_valid == TRUE) { 1024 | DM_BIT_SET(dm_ops,DM_CHECK_OOO); 1025 | } 1026 | } 1027 | 1028 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 1029 | // SECONDARY EXECUTION STRATEGY 1030 | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// 1031 | 1032 | ////////////////////////////////////////////////////////////////////////////////////////////////////////// 1033 | if(DM_BIT_CHECK(dm_ops,DM_CHECK_OOO)) { 1034 | 1035 | // AQUIRE MUTEX -- check_ooo_lock //// 1036 | pstack_xfer = 1; 1037 | mem_test_set(&pstack_xfer,(__mem void *)&check_ooo_lock[flow_ht[f_bucket].partition], sizeof(pstack_xfer)); 1038 | if(pstack_xfer == 0) { 1039 | if (flow_ht[f_bucket].value.oooqLen > 0) { 1040 | ret_result = process_ooo(headers, f_bucket, hv); 1041 | 1042 | // PARSE RET_RESULT AGAIN AND SET DM_OPTS FOR del_ooo and handle_rst 1043 | if(DM_BIT_CHECK(ret_result,DM_RESULT)) { 1044 | DM_BIT_SET(dm_ops,DM_RESULT); 1045 | } 1046 | if(DM_BIT_CHECK(ret_result,DM_HANDLE_RST)) { 1047 | DM_BIT_SET(dm_ops,DM_HANDLE_RST); 1048 | } 1049 | if(DM_BIT_CHECK(ret_result,DM_HANDLE_FIN)) { 1050 | DM_BIT_SET(dm_ops,DM_HANDLE_FIN); 1051 | } 1052 | } 1053 | 1054 | // RELEASE MUTEX -- check_ooo_lock /// 1055 | pstack_xfer = 0; 1056 | mem_write_atomic(&pstack_xfer,(__mem void *)&check_ooo_lock[flow_ht[f_bucket].partition], sizeof(pstack_xfer)); 1057 | //////////////////// 1058 | } 1059 | //////////////////// 1060 | } 1061 | 1062 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 1063 | if( flow_ht[f_bucket].ctrl_bits.FSM == FSM_C ) { //if we transitioned to FSM='C' 1064 | t_p_bucket = 0; 1065 | DM_BIT_CLEAR(dm_ops,DM_FOUND_ENTRY); 1066 | 1067 | // Have to compute this again becasue we did not save t_p_bucket 1068 | ///////////////////////////////////////////////////////////////////////////////////// 1069 | // FLOW BUCKET: search for the right flow table location (track cur and prev bucket) 1070 | f_bucket = fht_idx[hv]; 1071 | 1072 | while (f_bucket != 0) { 1073 | if( (flow_ht[f_bucket].key[0] == ipv4->srcAddr) && (flow_ht[f_bucket].key[1] == ipv4->dstAddr) && 1074 | (flow_ht[f_bucket].key[2] == (tcp->srcPort << 16) | tcp->dstPort) ) { 1075 | DM_BIT_SET(dm_ops,DM_FOUND_ENTRY); 1076 | break; 1077 | } else { 1078 | t_p_bucket = f_bucket; 1079 | f_bucket = flow_ht[f_bucket].next_loc; 1080 | } 1081 | } 1082 | // FLOW BUCKET: should have correct bucket now 1083 | /////////////////////////////////////////////////////////////////////////////////// 1084 | 1085 | if(DM_BIT_CHECK(dm_ops,DM_FOUND_ENTRY)) { 1086 | #if defined(PHAST_ATOMIC_OPS) 1087 | __xwrite uint32_t clear_mem[7] = {0}; 1088 | #endif 1089 | 1090 | // get rid of any orphan packets 1091 | if (flow_ht[f_bucket].value.oooqLen > 0) { 1092 | delete_ooo(headers, f_bucket); 1093 | } 1094 | 1095 | // now delete the flow entry if it exists 1096 | //////////////////////////////////////////////////////////////////////////////////// 1097 | // FLOW BUCKET: delete bucket 1098 | #if defined(PHAST_ATOMIC_OPS) 1099 | memcpy_mem_mem((__mem void *)&flow_ht[t_p_bucket].next_loc, (__mem void *)&flow_ht[f_bucket].next_loc, 2); 1100 | mem_write_atomic(clear_mem, &flow_ht[f_bucket], sizeof(clear_mem)); 1101 | #else 1102 | if (t_p_bucket != 0) { 1103 | flow_ht[t_p_bucket].next_loc = flow_ht[f_bucket].next_loc; 1104 | } 1105 | 1106 | flow_ht[f_bucket].partition = 0; 1107 | flow_ht[f_bucket].value.eseq = 0; 1108 | flow_ht[f_bucket].value.oooqLen = 0; 1109 | flow_ht[f_bucket].value.cs = 0; 1110 | flow_ht[f_bucket].key[0] = 0; 1111 | flow_ht[f_bucket].key[1] = 0; 1112 | flow_ht[f_bucket].key[2] = 0; 1113 | flow_ht[f_bucket].ctrl_bits.FSM = FALSE; 1114 | flow_ht[f_bucket].ctrl_bits.eseq_valid = FALSE; 1115 | flow_ht[f_bucket].ctrl_bits.active = FALSE; 1116 | 1117 | if(fht_idx[hv] == f_bucket) { 1118 | if(flow_ht[f_bucket].next_loc != 0) { 1119 | fht_idx[hv] = flow_ht[f_bucket].next_loc; 1120 | flow_ht[f_bucket].next_loc = 0; 1121 | } else { 1122 | fht_idx[hv] = t_p_bucket; 1123 | } 1124 | } 1125 | #endif 1126 | 1127 | #if defined(PHAST_LOCK) 1128 | // AQUIRE MUTEX -- fstack //// 1129 | pstack_xfer = 1; 1130 | while(1) { 1131 | mem_test_set(&pstack_xfer,(__mem void *)&fstack_lock, sizeof(pstack_xfer)); 1132 | if(pstack_xfer == 0) { 1133 | break; 1134 | } 1135 | sleep(LOCK_SLEEP); 1136 | } 1137 | //////////////////// 1138 | #endif 1139 | 1140 | f_dealloc(f_bucket); 1141 | 1142 | #if defined(PHAST_LOCK) 1143 | // RELEASE MUTEX -- fstack /// 1144 | pstack_xfer = 0; 1145 | mem_write_atomic(&pstack_xfer,(__mem void *)&fstack_lock, sizeof(pstack_xfer)); 1146 | //////////////////// 1147 | #endif 1148 | 1149 | } 1150 | } 1151 | 1152 | #if defined(PHAST_LOCK) 1153 | // RELEASE MUTEX -- flow /// 1154 | f_m_xfer = 0; 1155 | mem_write_atomic(&f_m_xfer, (__mem void *)(f_lock+hv), sizeof(f_m_xfer)); 1156 | //////////////////// 1157 | #endif 1158 | 1159 | ////////////////////////////////////////////////////////////////////////////////////////////////////////// 1160 | if( DM_BIT_CHECK(dm_ops,DM_HANDLE_RST) || DM_BIT_CHECK(dm_ops,DM_HANDLE_FIN) ) { 1161 | // get hash index for other direction 1162 | DM_BIT_CLEAR(dm_ops,DM_FOUND_ENTRY); 1163 | t_p_bucket = 0; 1164 | hv2 = get_flow_hash(headers, TRUE); 1165 | 1166 | #if defined(PHAST_LOCK) 1167 | // AQUIRE MUTEX -- flow //// 1168 | f_m_xfer = 1; 1169 | while(1) { 1170 | mem_test_set(&f_m_xfer, (__mem void *)(f_lock+hv2), sizeof(f_m_xfer)); 1171 | if(f_m_xfer == 0) { 1172 | break; 1173 | } 1174 | sleep(LOCK_SLEEP); 1175 | } 1176 | //////////////////// 1177 | #endif 1178 | ///////////////////////////////////////////////////////////////////////////////////// 1179 | // FLOW BUCKET: search for the right flow table location (track cur and prev bucket) 1180 | p_bucket = fht_idx[hv2]; 1181 | 1182 | while (p_bucket != 0) { 1183 | if( (flow_ht[p_bucket].key[0] == ipv4->dstAddr) && (flow_ht[p_bucket].key[1] == ipv4->srcAddr) && 1184 | (flow_ht[p_bucket].key[2] == (tcp->dstPort << 16) | tcp->srcPort) ) { 1185 | DM_BIT_SET(dm_ops,DM_FOUND_ENTRY); 1186 | break; 1187 | } else { 1188 | t_p_bucket = p_bucket; 1189 | p_bucket = flow_ht[p_bucket].next_loc; 1190 | } 1191 | } 1192 | // FLOW BUCKET: should have correct bucket now 1193 | /////////////////////////////////////////////////////////////////////////////////// 1194 | 1195 | if(DM_BIT_CHECK(dm_ops,DM_FOUND_ENTRY)) { 1196 | if(DM_BIT_CHECK(dm_ops,DM_HANDLE_FIN)) { 1197 | if( flow_ht[p_bucket].ctrl_bits.FSM != FSM_C ) { 1198 | flow_ht[p_bucket].ctrl_bits.FSM = FSM_W; // FSM_W 1199 | } 1200 | } else { // to_handle_rst 1201 | #if defined(PHAST_ATOMIC_OPS) 1202 | __xwrite uint32_t clear_mem[7] = {0}; 1203 | #endif 1204 | 1205 | if(flow_ht[p_bucket].value.oooqLen > 0) { 1206 | ret_result = process_ooo(headers, p_bucket, hv2); 1207 | 1208 | // PARSE RET_RESULT AGAIN TO SEE IF THERE WAS A HIT 1209 | if(DM_BIT_CHECK(ret_result,DM_RESULT)) { 1210 | DM_BIT_SET(dm_ops,DM_RESULT); 1211 | } 1212 | } 1213 | 1214 | // now delete the flow entry if it exists 1215 | //////////////////////////////////////////////////////////////////////////////////// 1216 | // FLOW BUCKET: delete bucket 1217 | #if defined(PHAST_ATOMIC_OPS) 1218 | memcpy_mem_mem((__mem void *)&flow_ht[t_p_bucket].next_loc, (__mem void *)&flow_ht[p_bucket].next_loc, 2); 1219 | mem_write_atomic(clear_mem, &flow_ht[p_bucket], sizeof(clear_mem)); 1220 | #else 1221 | if (t_p_bucket != 0) { 1222 | flow_ht[t_p_bucket].next_loc = flow_ht[p_bucket].next_loc; 1223 | } 1224 | 1225 | 1226 | flow_ht[p_bucket].partition = 0; 1227 | flow_ht[p_bucket].value.eseq = 0; 1228 | flow_ht[p_bucket].value.oooqLen = 0; 1229 | flow_ht[p_bucket].value.cs = 0; 1230 | flow_ht[p_bucket].key[0] = 0; 1231 | flow_ht[p_bucket].key[1] = 0; 1232 | flow_ht[p_bucket].key[2] = 0; 1233 | flow_ht[p_bucket].ctrl_bits.FSM = FALSE; 1234 | flow_ht[p_bucket].ctrl_bits.eseq_valid = FALSE; 1235 | flow_ht[p_bucket].ctrl_bits.active = FALSE; 1236 | 1237 | if(fht_idx[hv2] == p_bucket) { 1238 | if(flow_ht[p_bucket].next_loc != 0) { 1239 | fht_idx[hv2] = flow_ht[p_bucket].next_loc; 1240 | flow_ht[p_bucket].next_loc = 0; 1241 | } else { 1242 | fht_idx[hv2] = t_p_bucket; 1243 | } 1244 | } 1245 | #endif 1246 | 1247 | #if defined(PHAST_LOCK) 1248 | // AQUIRE MUTEX -- fstack //// 1249 | pstack_xfer = 1; 1250 | while(1) { 1251 | mem_test_set(&pstack_xfer,(__mem void *)&fstack_lock, sizeof(pstack_xfer)); 1252 | if(pstack_xfer == 0) { 1253 | break; 1254 | } 1255 | sleep(LOCK_SLEEP); 1256 | } 1257 | //////////////////// 1258 | #endif 1259 | 1260 | f_dealloc(p_bucket); 1261 | 1262 | #if defined(PHAST_LOCK) 1263 | // RELEASE MUTEX -- fstack /// 1264 | pstack_xfer = 0; 1265 | mem_write_atomic(&pstack_xfer,(__mem void *)&fstack_lock, sizeof(pstack_xfer)); 1266 | //////////////////// 1267 | #endif 1268 | 1269 | // FLOW BUCKET: 1270 | /////////////////////////////////////////////////////////////////////////////////// 1271 | } 1272 | } 1273 | 1274 | #if defined(PHAST_LOCK) 1275 | // RELEASE MUTEX -- flow /// 1276 | f_m_xfer = 0; 1277 | mem_write_atomic(&f_m_xfer, (__mem void *)(f_lock+hv2), sizeof(f_m_xfer)); 1278 | //////////////////// 1279 | #endif 1280 | } 1281 | 1282 | ////////////////////////////////////////////////////////////////////////////////////////////////////////// 1283 | 1284 | pif_plugin_meta_set__meta__processPayloadResult(headers, DM_BIT_CHECK(dm_ops, DM_RESULT)); 1285 | return PIF_PLUGIN_RETURN_FORWARD; 1286 | 1287 | //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 1288 | } else { // CASE 2: NOT-XFLOW MODE or NOT TCP ////////////////////////////////////////////////////////////////////////// 1289 | PIF_PLUGIN_udp_T *udp = pif_plugin_hdr_get_udp(headers); 1290 | 1291 | // calculate payload size -- different ways depending on the protocol 1292 | if (pif_plugin_hdr_tcp_present(headers) && (PHAST_XFLOW_ENABLED == FALSE)) { 1293 | payload_size = ipv4->totalLen - ( (ipv4->ihl + tcp->dataOffset) * 4); 1294 | } else if (pif_plugin_hdr_udp_present(headers)) { // udp: udp header is always 8 bytes 1295 | payload_size = pif_pkt_info_global.pkt_len - pif_pkt_info_global.pkt_pl_off; //udp->hd_length - 8; 1296 | } else if (pif_plugin_hdr_icmp_present(headers)) { // icmp: ipv4_len - 1297 | payload_size = (pif_plugin_hdr_get_ipv4(headers))->totalLen - ((pif_plugin_hdr_get_ipv4(headers))->ihl * 4) - 8; 1298 | } else { // unknown protocol 1299 | payload_size = pif_pkt_info_global.pkt_len - pif_pkt_info_global.pkt_pl_off; 1300 | } 1301 | 1302 | if(payload_size == 0) { 1303 | pif_plugin_meta_set__meta__processPayloadResult(headers, 0); // skip packet 1304 | return PIF_PLUGIN_RETURN_FORWARD; 1305 | } else { 1306 | ret_result = dm_loop(headers, 0, 0, 0, payload_size, 0); 1307 | pif_plugin_meta_set__meta__processPayloadResult(headers, DM_BIT_CHECK(ret_result, DM_RESULT)); 1308 | return PIF_PLUGIN_RETURN_FORWARD; 1309 | } 1310 | } // END: TCP vs NOT-TCP if statement 1311 | } 1312 | 1313 | /////////////////////////////////////////////////////// 1314 | ///// FLOW HASH TABLE ////////// 1315 | /////////////////////////////////////////////////////// 1316 | uint8_t f_dealloc(uint16_t num) { 1317 | if (flow_stack.top == 1) { 1318 | return 0; // FAILED should never happen but if does then deallocate flow's memory and report error 1319 | } else { 1320 | flow_stack.top -= 1; 1321 | flow_stack.stk[flow_stack.top] = num; 1322 | return 1; // SUCCESS 1323 | } 1324 | } 1325 | 1326 | /////////////////////////////////////////////////////// 1327 | uint16_t f_alloc() { 1328 | if (flow_stack.top == FLOW_STORAGE_SIZE) { 1329 | return FLOW_NUM_SLOTS_ERROR; // need to deallocate this flow's memory and report error 1330 | } else { 1331 | uint16_t num; 1332 | 1333 | num = flow_stack.stk[flow_stack.top]; 1334 | flow_stack.stk[flow_stack.top] = 0; 1335 | flow_stack.top++; 1336 | return num; 1337 | } 1338 | } 1339 | 1340 | /////////////////////////////////////////////////////// 1341 | ///// PACKET HASH TABLE ////////// 1342 | /////////////////////////////////////////////////////// 1343 | uint8_t p_dealloc(uint16_t flow_part, uint16_t num) { 1344 | if (pkt_stack[flow_part].top == 1) { 1345 | return 0; // FAILED should never happen but if does then deallocate flow's memory and report error 1346 | } else { 1347 | pkt_stack[flow_part].top--; 1348 | pkt_stack[flow_part].stk[pkt_stack[flow_part].top] = num; 1349 | return 1; // SUCCESS 1350 | } 1351 | } 1352 | 1353 | /////////////////////////////////////////////////////// 1354 | uint16_t p_alloc(uint16_t flow_part) { 1355 | if (pkt_stack[flow_part].top == FLOW_PARTION_SZ) { 1356 | return PKT_NUM_SLOTS_ERROR; // need to deallocate this flow's memory and report error 1357 | } else { 1358 | uint16_t num; 1359 | 1360 | num = pkt_stack[flow_part].stk[pkt_stack[flow_part].top]; 1361 | pkt_stack[flow_part].stk[pkt_stack[flow_part].top] = 0; 1362 | pkt_stack[flow_part].top++; 1363 | return num; 1364 | } 1365 | } 1366 | 1367 | -------------------------------------------------------------------------------- /eval-tools/setup_experiment.ini: -------------------------------------------------------------------------------- 1 | [deepmatch] 2 | 3 | cmd_rtsym = /opt/netronome/bin/nfp-rtsym 4 | cmd_nfpmem = /opt/netronome/bin/nfp-mem 5 | nfp = 0 6 | 7 | v_dfa = _dfa_trans 8 | f_dfa = /path/to/mem-dfa.txt 9 | 10 | v_flow_pool = _flow_stack 11 | f_flow_pool = /path/to//mem-flow_stack.txt 12 | 13 | v_pkt_pool = _pkt_stack 14 | f_pkt_pool = /path/to/mem-pkt_stack.txt 15 | -------------------------------------------------------------------------------- /eval-tools/setup_experiment.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Copyright 2017-2020 University of Pennsylvania 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | Joel Hypolite, UPenn 17 | ''' 18 | 19 | import os 20 | import sys 21 | import time 22 | import subprocess 23 | import getopt 24 | import configparser 25 | 26 | config = configparser.ConfigParser() 27 | config.read('setup_experiment.ini') 28 | 29 | cmd_rtsym = config['deepmatch']['cmd_rtsym'] 30 | cmd_nfpmem = config['deepmatch']['cmd_nfpmem'] 31 | nfp = config['deepmatch']['nfp'] 32 | v_dfa = config['deepmatch']['v_dfa'] 33 | f_dfa = config['deepmatch']['f_dfa'] 34 | v_flow_pool = config['deepmatch']['v_flow_pool'] 35 | f_flow_pool = config['deepmatch']['f_flow_pool'] 36 | v_pkt_pool = config['deepmatch']['v_pkt_pool'] 37 | f_pkt_pool = config['deepmatch']['f_pkt_pool'] 38 | 39 | v_cputime = "_cputime" 40 | 41 | ################# 42 | def main(choice): 43 | # set the dfa 44 | if choice in 'avd': 45 | result = getVar(v_dfa) 46 | for line in result.splitlines(): 47 | t1 = line.split()[1].strip() 48 | t2 = line.split()[2].strip() 49 | locate = t1 + ":" + t2 50 | print "setting %s @ %s with %s"%(v_dfa, locate, f_dfa) 51 | setVar(f_dfa, locate) 52 | 53 | if choice in 'avs': 54 | # set flow_stack 55 | result = getVar(v_flow_pool) 56 | t1 = result.split()[1].strip() 57 | t2 = result.split()[2].strip() 58 | locate = t1 + ":" + t2 59 | print "setting %s @ %s with %s"%(v_flow_pool, locate, f_flow_pool) 60 | setVar(f_flow_pool, locate) 61 | 62 | # set pkt_stack 63 | result = getVar(v_pkt_pool) 64 | t1 = result.split()[1].strip() 65 | t2 = result.split()[2].strip() 66 | locate = t1 + ":" + t2 67 | print "setting %s @ %s with %s"%(v_pkt_pool, locate, f_pkt_pool) 68 | setVar(f_pkt_pool, locate) 69 | 70 | # check cputime and start the time writer 71 | if choice in 'at': 72 | result = getVar(v_cputime) 73 | t1 = result.split()[1].strip() 74 | t2 = result.split()[2].strip() 75 | locate = t1 + ":" + t2 76 | print "setting %s @ %s with epoch_times"%(v_cputime, locate) 77 | cputime(locate) 78 | 79 | def getVar(svar): 80 | rtsym = subprocess.Popen([cmd_rtsym, '-n', str(nfp), '-L'], stdout=subprocess.PIPE,) 81 | grep = subprocess.Popen(['grep', svar], stdin=rtsym.stdout, stdout=subprocess.PIPE,) 82 | 83 | out, err = grep.communicate() 84 | result = out.decode() 85 | return result 86 | 87 | def setVar(fname, locate): 88 | nfpmem = subprocess.check_output([cmd_nfpmem, '-n', str(nfp), '-i', fname, '-w', str(4), locate]) 89 | 90 | def cputime(locate): 91 | while True: 92 | epoch_time = int(time.time()) 93 | if epoch_time%10 == 0: 94 | print "setting %s @ %s with epoch_time=%s [%d]"%(v_cputime, locate, hex(epoch_time), epoch_time) 95 | 96 | nfpmem= subprocess.check_output([cmd_nfpmem, '-n', str(nfp), '-w', str(4), locate, hex(epoch_time)]) 97 | time.sleep(1) 98 | 99 | ######################################### 100 | # uage: ./ 101 | if __name__ == '__main__': 102 | if len(sys.argv) < 2: 103 | print("Usage: %s -o [a|v|d|s|t]"%sys.argv[0]) 104 | print(" [a : all], [v : set dfa and stack], [d: set dfa], [s: set stack], [t : time]") 105 | exit(1) 106 | 107 | myopts, args = getopt.getopt(sys.argv[1:],"o:") 108 | for o,a in myopts: 109 | if o == '-o': 110 | main(a) 111 | else: 112 | print("Usage: %s -o [a|v|d|s|t]"%sys.argv[0]) 113 | print(" [a : all], [v : set dfa and stack], [d: set dfa], [s: set stack], [t : time]") 114 | --------------------------------------------------------------------------------