├── .github └── workflows │ ├── fuzz.yml │ ├── go.yml │ └── golangci-lint.yml ├── CONTRIBUTING ├── LICENSE ├── README.md ├── fuzz.go ├── fuzz_test.go ├── go.mod ├── go.sum ├── lex.go ├── lex_test.go ├── linters.go ├── linters_test.go ├── optimize.go ├── optimize_test.go ├── parser.go ├── parser_test.go ├── rule.go └── rule_test.go /.github/workflows/fuzz.yml: -------------------------------------------------------------------------------- 1 | name: CIFuzz 2 | on: [pull_request] 3 | jobs: 4 | Fuzzing: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - name: Build Fuzzers 8 | uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master 9 | with: 10 | oss-fuzz-project-name: 'gonids' 11 | dry-run: false 12 | - name: Run Fuzzers 13 | uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master 14 | with: 15 | oss-fuzz-project-name: 'gonids' 16 | fuzz-seconds: 600 17 | dry-run: false 18 | - name: Upload Crash 19 | uses: actions/upload-artifact@v1 20 | if: failure() 21 | with: 22 | name: artifacts 23 | path: ./out/artifacts 24 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | on: [push, pull_request] 3 | jobs: 4 | 5 | build: 6 | name: Build and Test 7 | runs-on: ubuntu-latest 8 | steps: 9 | 10 | - name: Set up Go 1.13 11 | uses: actions/setup-go@v1 12 | with: 13 | go-version: 1.13 14 | id: go 15 | 16 | - name: Check out code into the Go module directory 17 | uses: actions/checkout@v1 18 | 19 | - name: Get dependencies 20 | run: | 21 | go get -v -t -d ./... 22 | if [ -f Gopkg.toml ]; then 23 | curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh 24 | dep ensure 25 | fi 26 | 27 | - name: Build 28 | run: go build -v ./... 29 | 30 | - name: Test 31 | run: go test -v ./... 32 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: golangci-lint 2 | on: 3 | push: 4 | tags: 5 | - v* 6 | branches: 7 | - master 8 | - main 9 | pull_request: 10 | jobs: 11 | golangci: 12 | name: lint 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - name: golangci-lint 17 | uses: golangci/golangci-lint-action@v2 18 | with: 19 | # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version 20 | version: v1.49 21 | 22 | # Optional: working directory, useful for monorepos 23 | # working-directory: somedir 24 | 25 | # Optional: golangci-lint command line arguments. 26 | # args: --issues-exit-code=0 27 | 28 | # Optional: show only new issues if it's a pull request. The default value is `false`. 29 | # only-new-issues: true 30 | 31 | # Optional: if set to true then the action will use pre-installed Go. 32 | # skip-go-installation: true 33 | 34 | # Optional: if set to true then the action don't cache or restore ~/go/pkg. 35 | # skip-pkg-cache: true 36 | 37 | # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. 38 | # skip-build-cache: true 39 | 40 | -------------------------------------------------------------------------------- /CONTRIBUTING: -------------------------------------------------------------------------------- 1 | Want to contribute? Great! First, read this page (including the small print at the end). 2 | 3 | ### Before you contribute 4 | Before we can use your code, you must sign the 5 | [Google Individual Contributor License Agreement] 6 | (https://cla.developers.google.com/about/google-individual) 7 | (CLA), which you can do online. The CLA is necessary mainly because you own the 8 | copyright to your changes, even after your contribution becomes part of our 9 | codebase, so we need your permission to use and distribute your code. We also 10 | need to be sure of various other things—for instance that you'll tell us if you 11 | know that your code infringes on other people's patents. You don't have to sign 12 | the CLA until after you've submitted your code for review and a member has 13 | approved it, but you must do it before we can put your code into our codebase. 14 | Before you start working on a larger contribution, you should get in touch with 15 | us first through the issue tracker with your idea so that we can help out and 16 | possibly guide you. Coordinating up front makes it much easier to avoid 17 | frustration later on. 18 | 19 | ### Code reviews 20 | All submissions, including submissions by project members, require review. We 21 | use GitHub pull requests for this purpose. 22 | 23 | ### The small print 24 | Contributions made by corporations are covered by a different agreement than 25 | the one above, the 26 | [Software Grant and Corporate Contributor License Agreement] 27 | (https://cla.developers.google.com/about/google-corporate). 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2016 Google Inc. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | gonids is a library to parse IDS rules for engines like Snort and Suricata. 2 | 3 | ### Installation 4 | ``` 5 | $ go get github.com/google/gonids 6 | ``` 7 | 8 | ### Quick Start 9 | Add this import line to the file you're working in: 10 | ``` 11 | import "github.com/google/gonids" 12 | ``` 13 | 14 | To parse a rule: 15 | ``` 16 | rule := `alert tcp $HOME_NET any -> $EXTERNAL_NET 80 (msg:"GONIDS TEST hello world"; flow:established,to_server; content:"hello world"; classtype:trojan-activity; sid:1; rev:1;)` 17 | r, err := gonids.ParseRule(rule) 18 | if err != nil { 19 | // Handle parse error 20 | } 21 | // Do something with your rule. 22 | switch r.Action { 23 | case "alert": 24 | // This is an 'alert' rule. 25 | case "drop": 26 | // This is a 'drop' rule. 27 | case "pass": 28 | // This is a 'pass' rule. 29 | default: 30 | // I have no idea what this would be. =) 31 | } 32 | ``` 33 | 34 | To create a rule a DNS rule (using dns_query sticky buffer) and print it: 35 | ``` 36 | r := gonids.Rule{ 37 | Action: "alert", 38 | Protocol: "dns", 39 | Source: Network{ 40 | Nets: []string{"any"}, 41 | Ports: []string{"any"}, 42 | }, 43 | Destination: Network{ 44 | Nets: []string{"any"}, 45 | Ports: []string{"any"}, 46 | }, 47 | SID: 1234, 48 | Revision: 1, 49 | } 50 | 51 | badDomain := "c2.evil.com" 52 | dnsRule.Description = fmt.Sprintf("DNS query for %s", badDomain) 53 | 54 | sb, _ := gonids.StickyBuffer("dns_query") 55 | c := &gonids.Content{ 56 | DataPosition: sb, 57 | Pattern: []byte(badDomain), 58 | Options: []*gonids.ContentOption{ 59 | {"nocase", ""}, 60 | }, 61 | } 62 | } 63 | 64 | fmt.Println(r) 65 | ``` 66 | 67 | To optimize a Snort HTTP rule for Suricata: 68 | ``` 69 | rule := `alert tcp $HOME_NET any -> $EXTERNAL_NET $HTTP_PORTS (msg:"GONIDS TEST hello world"; flow:established,to_server; content:"hello.php"; http_uri; classtype:trojan-activity; sid:1; rev:1;)` 70 | r, err := gonids.ParseRule(rule) 71 | if err != nil { 72 | // Handle parse error 73 | } 74 | r.OptimizeHTTP() 75 | ``` 76 | 77 | ### Miscellaneous 78 | This is not an official Google product. 79 | -------------------------------------------------------------------------------- /fuzz.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "fmt" 20 | "os" 21 | ) 22 | 23 | var fuzzInit = false 24 | 25 | // FuzzParseRule is used by OSS-Fuzz to fuzz the library. 26 | func FuzzParseRule(data []byte) int { 27 | if !fuzzInit { 28 | fmt.Printf("GODEBUG=%s", os.Getenv("GODEBUG")) 29 | fuzzInit = true 30 | } 31 | r, err := ParseRule(string(data)) 32 | if err != nil { 33 | // Handle parse error 34 | return 0 35 | } 36 | r.OptimizeHTTP() 37 | _ = r.String() 38 | return 1 39 | } 40 | -------------------------------------------------------------------------------- /fuzz_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2019 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "testing" 20 | ) 21 | 22 | func TestFuzz(t *testing.T) { 23 | for _, tt := range []struct { 24 | name string 25 | rule string 26 | }{ 27 | { 28 | name: "fuzzer generated garbage", 29 | rule: `alert tcp $EXTEVNAL_NET any <> $HOME_NET 0 e:misc-activity; sid:t 2010_09_#alert tcp $EXTERNAL_NET any -> $SQL_SERVERS 1433 (msg:"ET EXPLOIT xp_servicecontrol accecs"; flow:to_%erv23, upd)er,established; content:"x|00|p|00|_|00|s|00|e|00|r|00|v|00|i|00|c|00|e|00|c|00|o|00|n|00|t|00|r|00|o|00|l|00|"; nocase; reference:url,doc.emergi`, 30 | }, 31 | { 32 | name: "fuzzer goroutines sleep", 33 | rule: ` ert htt $ET any -> Hnz (mjectatay; tls.fingerprint:"65`, 34 | }, 35 | } { 36 | FuzzParseRule([]byte(tt.rule)) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/google/gonids 2 | 3 | go 1.16 4 | 5 | require github.com/kylelemons/godebug v1.1.0 -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 2 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 3 | -------------------------------------------------------------------------------- /lex.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "errors" 20 | "fmt" 21 | "strings" 22 | "unicode" 23 | "unicode/utf8" 24 | ) 25 | 26 | // item represents a token or text string returned from the lexer. 27 | type item struct { 28 | typ itemType // The type of this item. 29 | value string // The value of this item. 30 | } 31 | 32 | // String returns a string describing an item. 33 | func (i item) String() string { 34 | switch i.typ { 35 | case itemEOF: 36 | return "EOF" 37 | case itemError: 38 | return i.value 39 | } 40 | return fmt.Sprintf("%q: %s", i.typ, i.value) 41 | } 42 | 43 | type itemType int 44 | 45 | const ( 46 | itemError itemType = iota 47 | itemComment 48 | itemAction 49 | itemProtocol 50 | itemSourceAddress 51 | itemSourcePort 52 | itemDirection 53 | itemDestinationAddress 54 | itemDestinationPort 55 | itemNot 56 | itemOptionKey 57 | itemOptionValue 58 | itemOptionNoValue 59 | itemOptionValueString 60 | itemEOR 61 | itemEOF 62 | ) 63 | 64 | const eof = -1 65 | 66 | // stateFn represents the state of the scanner as a function that returns the next state. 67 | type stateFn func(*lexer) stateFn 68 | 69 | // lexer holds the state of the scanner. 70 | type lexer struct { 71 | input string // the string being scanned 72 | state stateFn // the next lexing function to enter 73 | pos int // current position in the input 74 | start int // start position of this item 75 | width int // width of last rune read from input 76 | items chan item // channel of scanned items 77 | } 78 | 79 | // next returns the next rune in the input. 80 | func (l *lexer) next() rune { 81 | if l.pos >= len(l.input) { 82 | l.width = 0 83 | return eof 84 | } 85 | r, w := utf8.DecodeRuneInString(l.input[l.pos:]) 86 | if r == utf8.RuneError && w == 1 { 87 | // The whole input string has been validated at init. 88 | panic("invalid UTF-8 character") 89 | } 90 | l.width = w 91 | l.pos += l.width 92 | return r 93 | } 94 | 95 | // skipNext skips over the next rune in the input. 96 | func (l *lexer) skipNext() { 97 | l.next() 98 | l.ignore() 99 | } 100 | 101 | // len returns the current length of the item in processing. 102 | func (l *lexer) len() int { 103 | if l.pos >= len(l.input) { 104 | return -1 105 | } 106 | return l.pos - l.start 107 | } 108 | 109 | // backup steps back one rune. Can only be called once per call of next. 110 | func (l *lexer) backup() { 111 | if l.width == -1 { 112 | panic("double backup") 113 | } 114 | l.pos -= l.width 115 | l.width = -1 116 | } 117 | 118 | // emit passes an item back to the client, trimSpaces can be used to trim spaces around item 119 | // value before emiting. 120 | func (l *lexer) emit(t itemType, trimSpaces bool) { 121 | input := l.input[l.start:l.pos] 122 | if trimSpaces { 123 | input = strings.TrimSpace(input) 124 | } 125 | 126 | // This is a bit of a hack. We lex until `;` now so we end up with extra `"`. 127 | input = strings.TrimSuffix(input, `"`) 128 | l.items <- item{t, input} 129 | l.start = l.pos 130 | } 131 | 132 | // ignore skips over the pending input before this point. 133 | func (l *lexer) ignore() { 134 | l.start = l.pos 135 | } 136 | 137 | // acceptRun consumes a run of runes from the valid set. 138 | func (l *lexer) acceptRun(valid string) { 139 | for strings.ContainsRune(valid, l.next()) { 140 | } 141 | l.backup() 142 | } 143 | 144 | // ignoreSpaces ignores all spaces at the start of the input. 145 | func (l *lexer) ignoreSpaces() { 146 | for unicode.IsSpace(l.next()) { 147 | l.ignore() 148 | } 149 | l.backup() 150 | } 151 | 152 | // errorf returns an error token and terminates the scan by passing 153 | // back a nil pointer that will be the next state, terminating l.nextItem. 154 | func (l *lexer) errorf(format string, args ...interface{}) stateFn { 155 | l.items <- item{itemError, fmt.Sprintf(format, args...)} 156 | return nil 157 | } 158 | 159 | func (l *lexer) unexpectedEOF() stateFn { 160 | return nil 161 | } 162 | 163 | // nextItem returns the next item from the input. 164 | func (l *lexer) nextItem() item { 165 | r, more := <-l.items 166 | if !more { 167 | return item{itemError, "unexpected EOF"} 168 | } 169 | return r 170 | } 171 | 172 | // lex initializes and runs a new scanner for the input string. 173 | func lex(input string) (*lexer, error) { 174 | if !utf8.ValidString(input) { 175 | return nil, errors.New("input is not a valid UTF-8 string") 176 | } 177 | l := &lexer{ 178 | input: input, 179 | items: make(chan item, 0x1000), 180 | } 181 | go l.run() 182 | return l, nil 183 | } 184 | 185 | // TODO: handle error and corner case in all states. 186 | // run runs the state machine for the lexer. 187 | func (l *lexer) run() { 188 | for l.state = lexRule; l.state != nil; { 189 | l.state = l.state(l) 190 | } 191 | close(l.items) 192 | } 193 | 194 | func (l *lexer) close() { 195 | // Reads all items until channel close to be sure goroutine has ended. 196 | more := true 197 | for more { 198 | _, more = <-l.items 199 | } 200 | } 201 | 202 | // lexRule starts the scan of a rule. 203 | func lexRule(l *lexer) stateFn { 204 | r := l.next() 205 | switch { 206 | case unicode.IsSpace(r): 207 | l.ignore() 208 | return lexRule 209 | case r == '#': 210 | return lexComment 211 | case r == eof: 212 | l.emit(itemEOF, false) 213 | return nil 214 | } 215 | return lexAction 216 | } 217 | 218 | // lexComment consumes a commented rule. 219 | func lexComment(l *lexer) stateFn { 220 | // Ignore leading spaces and #. 221 | l.ignore() 222 | for { 223 | r := l.next() 224 | if unicode.IsSpace(r) || r == '#' { 225 | l.ignore() 226 | } else { 227 | break 228 | } 229 | } 230 | l.backup() 231 | 232 | for { 233 | switch l.next() { 234 | case '\r', '\n': 235 | l.emit(itemComment, false) 236 | return lexRule 237 | case eof: 238 | l.backup() 239 | l.emit(itemComment, false) 240 | return lexRule 241 | } 242 | } 243 | } 244 | 245 | // lexAction consumes a rule action. 246 | func lexAction(l *lexer) stateFn { 247 | for { 248 | r := l.next() 249 | switch { 250 | case r == ' ': 251 | l.emit(itemAction, true) 252 | return lexProtocol 253 | case !unicode.IsLetter(r): 254 | return l.errorf("invalid character %q for a rule action", r) 255 | } 256 | } 257 | } 258 | 259 | // lexProtocol consumes a rule protocol. 260 | func lexProtocol(l *lexer) stateFn { 261 | l.ignoreSpaces() 262 | for { 263 | r := l.next() 264 | switch { 265 | case r == ' ': 266 | l.emit(itemProtocol, true) 267 | return lexSourceAddress 268 | case !(unicode.IsLetter(r) || unicode.IsDigit(r) || (l.len() > 0 && r == '-')): 269 | return l.errorf("invalid character %q for a rule protocol", r) 270 | } 271 | } 272 | 273 | } 274 | 275 | // lexSourceAddress consumes a source address. 276 | func lexSourceAddress(l *lexer) stateFn { 277 | l.ignoreSpaces() 278 | for { 279 | switch l.next() { 280 | case ' ': 281 | l.emit(itemSourceAddress, true) 282 | return lexSourcePort 283 | case eof: 284 | return l.unexpectedEOF() 285 | } 286 | } 287 | } 288 | 289 | // lexSourcePort consumes a source port. 290 | func lexSourcePort(l *lexer) stateFn { 291 | l.ignoreSpaces() 292 | for { 293 | switch l.next() { 294 | case ' ': 295 | l.emit(itemSourcePort, true) 296 | return lexDirection 297 | case eof: 298 | return l.unexpectedEOF() 299 | } 300 | } 301 | } 302 | 303 | // lexDirection consumes a rule direction. 304 | func lexDirection(l *lexer) stateFn { 305 | l.ignoreSpaces() 306 | l.acceptRun("<->") 307 | if r := l.next(); r != ' ' { 308 | return l.errorf("invalid character %q for a rule direction", r) 309 | } 310 | l.emit(itemDirection, true) 311 | return lexDestinationAddress 312 | } 313 | 314 | // lexDestinationAddress consumes a destination address. 315 | func lexDestinationAddress(l *lexer) stateFn { 316 | l.ignoreSpaces() 317 | for { 318 | switch l.next() { 319 | case ' ': 320 | l.emit(itemDestinationAddress, true) 321 | return lexDestinationPort 322 | case eof: 323 | return l.unexpectedEOF() 324 | } 325 | } 326 | } 327 | 328 | // lexDestinationPort consumes a destination port. 329 | func lexDestinationPort(l *lexer) stateFn { 330 | for { 331 | switch l.next() { 332 | case '(': 333 | l.backup() 334 | l.emit(itemDestinationPort, true) 335 | l.skipNext() 336 | return lexOptionKey 337 | case eof: 338 | return l.unexpectedEOF() 339 | } 340 | } 341 | } 342 | 343 | // lexOptionKey scans a key from the rule options. 344 | func lexOptionKey(l *lexer) stateFn { 345 | for { 346 | switch l.next() { 347 | case ':': 348 | l.backup() 349 | l.emit(itemOptionKey, true) 350 | l.skipNext() 351 | return lexOptionValueBegin 352 | case ';': 353 | l.backup() 354 | if l.pos > l.start { 355 | l.emit(itemOptionKey, true) 356 | l.emit(itemOptionNoValue, true) 357 | } 358 | l.skipNext() 359 | return lexOptionKey 360 | case ')': 361 | l.backup() 362 | if l.pos > l.start { 363 | l.emit(itemOptionKey, true) 364 | } 365 | l.skipNext() 366 | return lexRuleEnd 367 | case eof: 368 | return l.unexpectedEOF() 369 | } 370 | } 371 | } 372 | 373 | // lexOptionValueBegin scans the beginning of a value from the rule option. 374 | func lexOptionValueBegin(l *lexer) stateFn { 375 | switch l.next() { 376 | case '"': 377 | l.ignore() 378 | return lexOptionValueString 379 | case ' ': 380 | l.ignore() 381 | return lexOptionValueBegin 382 | case '!': 383 | l.emit(itemNot, true) 384 | return lexOptionValueBegin 385 | } 386 | return lexOptionValue 387 | } 388 | 389 | // lexOptionValueString consumes the inner content of a string value from the rule options. 390 | func lexOptionValueString(l *lexer) stateFn { 391 | escaped := false 392 | for { 393 | switch l.next() { 394 | case ';': 395 | l.backup() 396 | l.emit(itemOptionValueString, false) 397 | l.skipNext() 398 | return lexOptionKey 399 | case '\\': 400 | escaped = !escaped 401 | if l.next() != ';' || !escaped { 402 | l.backup() 403 | } 404 | case eof: 405 | return l.unexpectedEOF() 406 | default: 407 | escaped = false 408 | } 409 | } 410 | } 411 | 412 | // lexOptionValue scans a value from the rule options. 413 | func lexOptionValue(l *lexer) stateFn { 414 | for { 415 | switch l.next() { 416 | case ';': 417 | l.backup() 418 | l.emit(itemOptionValue, true) 419 | l.skipNext() 420 | return lexOptionKey 421 | case eof: 422 | return l.unexpectedEOF() 423 | } 424 | } 425 | } 426 | 427 | // lexOptionEnd marks the end of a rule. 428 | func lexRuleEnd(l *lexer) stateFn { 429 | l.emit(itemEOR, false) 430 | return lexRule 431 | } 432 | -------------------------------------------------------------------------------- /lex_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | package gonids 16 | 17 | import ( 18 | "errors" 19 | "reflect" 20 | "testing" 21 | ) 22 | 23 | // collect gathers the emitted items into a slice. 24 | func collect(input string) (items []item, err error) { 25 | l, err := lex(input) 26 | if err != nil { 27 | return nil, err 28 | } 29 | for item := l.nextItem(); item.typ != itemEOF; item = l.nextItem() { 30 | switch item.typ { 31 | case itemError: 32 | return nil, errors.New(item.value) 33 | default: 34 | items = append(items, item) 35 | } 36 | } 37 | return 38 | } 39 | 40 | func TestLexer(t *testing.T) { 41 | for _, tt := range []struct { 42 | name string 43 | input string 44 | wantErr bool 45 | items []item 46 | }{ 47 | { 48 | name: "simple", 49 | input: "alert udp $HOME_NET any -> [1.1.1.1,2.2.2.2] any (key1:value1; key2:value2;)", 50 | items: []item{ 51 | {itemAction, "alert"}, 52 | {itemProtocol, "udp"}, 53 | {itemSourceAddress, "$HOME_NET"}, 54 | {itemSourcePort, "any"}, 55 | {itemDirection, "->"}, 56 | {itemDestinationAddress, "[1.1.1.1,2.2.2.2]"}, 57 | {itemDestinationPort, "any"}, 58 | {itemOptionKey, "key1"}, 59 | {itemOptionValue, "value1"}, 60 | {itemOptionKey, "key2"}, 61 | {itemOptionValue, "value2"}, 62 | {itemEOR, ""}, 63 | }, 64 | }, 65 | { 66 | name: "string value", 67 | input: `alert tcp-pkt $HOME_NET any -> [1.1.1.1,2.2.2.2] any (key1:"value1";)`, 68 | items: []item{ 69 | {itemAction, "alert"}, 70 | {itemProtocol, "tcp-pkt"}, 71 | {itemSourceAddress, "$HOME_NET"}, 72 | {itemSourcePort, "any"}, 73 | {itemDirection, "->"}, 74 | {itemDestinationAddress, "[1.1.1.1,2.2.2.2]"}, 75 | {itemDestinationPort, "any"}, 76 | {itemOptionKey, "key1"}, 77 | {itemOptionValueString, "value1"}, 78 | {itemEOR, ""}, 79 | }, 80 | }, 81 | { 82 | name: "string value not", 83 | input: `alert udp $HOME_NET any -> [1.1.1.1,2.2.2.2] any (key1:!"value1";)`, 84 | items: []item{ 85 | {itemAction, "alert"}, 86 | {itemProtocol, "udp"}, 87 | {itemSourceAddress, "$HOME_NET"}, 88 | {itemSourcePort, "any"}, 89 | {itemDirection, "->"}, 90 | {itemDestinationAddress, "[1.1.1.1,2.2.2.2]"}, 91 | {itemDestinationPort, "any"}, 92 | {itemOptionKey, "key1"}, 93 | {itemNot, "!"}, 94 | {itemOptionValueString, "value1"}, 95 | {itemEOR, ""}, 96 | }, 97 | }, 98 | { 99 | name: "protocol with number", 100 | input: `alert ipv6 $HOME_NET any -> $EXTERNAL_NET any (key1:"value1";)`, 101 | items: []item{ 102 | {itemAction, "alert"}, 103 | {itemProtocol, "ipv6"}, 104 | {itemSourceAddress, "$HOME_NET"}, 105 | {itemSourcePort, "any"}, 106 | {itemDirection, "->"}, 107 | {itemDestinationAddress, "$EXTERNAL_NET"}, 108 | {itemDestinationPort, "any"}, 109 | {itemOptionKey, "key1"}, 110 | {itemOptionValueString, "value1"}, 111 | {itemEOR, ""}, 112 | }, 113 | }, 114 | { 115 | name: "single key", 116 | input: "alert udp $HOME_NET any -> [1.1.1.1,2.2.2.2] any (key;)", 117 | items: []item{ 118 | {itemAction, "alert"}, 119 | {itemProtocol, "udp"}, 120 | {itemSourceAddress, "$HOME_NET"}, 121 | {itemSourcePort, "any"}, 122 | {itemDirection, "->"}, 123 | {itemDestinationAddress, "[1.1.1.1,2.2.2.2]"}, 124 | {itemDestinationPort, "any"}, 125 | {itemOptionKey, "key"}, 126 | {itemOptionNoValue, ""}, 127 | {itemEOR, ""}, 128 | }, 129 | }, 130 | { 131 | name: "multiple spaces", 132 | input: "\talert udp $HOME_NET any -> [1.1.1.1,2.2.2.2] any (key1: value1 ; key2;)", 133 | items: []item{ 134 | {itemAction, "alert"}, 135 | {itemProtocol, "udp"}, 136 | {itemSourceAddress, "$HOME_NET"}, 137 | {itemSourcePort, "any"}, 138 | {itemDirection, "->"}, 139 | {itemDestinationAddress, "[1.1.1.1,2.2.2.2]"}, 140 | {itemDestinationPort, "any"}, 141 | {itemOptionKey, "key1"}, 142 | {itemOptionValue, "value1"}, 143 | {itemOptionKey, "key2"}, 144 | {itemOptionNoValue, ""}, 145 | {itemEOR, ""}, 146 | }, 147 | }, 148 | { 149 | name: "parentheses in value", 150 | input: `alert dns $HOME_NET any -> any any (reference:url,en.wikipedia.org/wiki/Tor_(anonymity_network); sid:42;)`, 151 | items: []item{ 152 | {itemAction, "alert"}, 153 | {itemProtocol, "dns"}, 154 | {itemSourceAddress, "$HOME_NET"}, 155 | {itemSourcePort, "any"}, 156 | {itemDirection, "->"}, 157 | {itemDestinationAddress, "any"}, 158 | {itemDestinationPort, "any"}, 159 | {itemOptionKey, "reference"}, 160 | {itemOptionValue, "url,en.wikipedia.org/wiki/Tor_(anonymity_network)"}, 161 | {itemOptionKey, "sid"}, 162 | {itemOptionValue, "42"}, 163 | {itemEOR, ""}, 164 | }, 165 | }, 166 | { 167 | name: "escaped quote", 168 | input: `alert udp $HOME_NET any -> $EXTERNAL_NET any (pcre:"/[=\"]\w{8}\.jar/Hi";)`, 169 | items: []item{ 170 | {itemAction, "alert"}, 171 | {itemProtocol, "udp"}, 172 | {itemSourceAddress, "$HOME_NET"}, 173 | {itemSourcePort, "any"}, 174 | {itemDirection, "->"}, 175 | {itemDestinationAddress, "$EXTERNAL_NET"}, 176 | {itemDestinationPort, "any"}, 177 | {itemOptionKey, "pcre"}, 178 | {itemOptionValueString, `/[=\"]\w{8}\.jar/Hi`}, 179 | {itemEOR, ""}, 180 | }, 181 | }, 182 | { 183 | name: "escaped backslash", 184 | input: `alert tcp $HOME_NET any -> $EXTERNAL_NET 21 (content:"CWD C|3a|\\WINDOWS\\system32\\"; sid:42;)`, 185 | items: []item{ 186 | {itemAction, "alert"}, 187 | {itemProtocol, "tcp"}, 188 | {itemSourceAddress, "$HOME_NET"}, 189 | {itemSourcePort, "any"}, 190 | {itemDirection, "->"}, 191 | {itemDestinationAddress, "$EXTERNAL_NET"}, 192 | {itemDestinationPort, "21"}, 193 | {itemOptionKey, "content"}, 194 | {itemOptionValueString, `CWD C|3a|\\WINDOWS\\system32\\`}, 195 | {itemOptionKey, "sid"}, 196 | {itemOptionValue, "42"}, 197 | {itemEOR, ""}, 198 | }, 199 | }, 200 | { 201 | name: "comment", 202 | input: "# bla", 203 | items: []item{{itemComment, "bla"}}, 204 | }, 205 | // errors. 206 | { 207 | name: "invalid utf-8", 208 | input: "\xab\x00\xfc", 209 | wantErr: true, 210 | }, 211 | { 212 | name: "invalid action", 213 | input: "42 udp $HOME_NET any -> any any (key);", 214 | wantErr: true, 215 | }, 216 | { 217 | name: "invalid direction", 218 | input: "alert udp $HOME_NET any foo any any (key);", 219 | wantErr: true, 220 | }, 221 | { 222 | name: "source address EOF", 223 | input: "alert udp incomplet", 224 | wantErr: true, 225 | }, 226 | { 227 | name: "source port EOF", 228 | input: "alert udp $HOME_NET incomplet", 229 | wantErr: true, 230 | }, 231 | { 232 | name: "destination address EOF", 233 | input: "alert udp $HOME_NET any -> incomplet", 234 | wantErr: true, 235 | }, 236 | { 237 | name: "destination port EOF", 238 | input: "alert udp $HOME_NET any -> $EXTERNAL_NET incomplet", 239 | wantErr: true, 240 | }, 241 | { 242 | name: "option key EOF", 243 | input: "alert udp $HOME_NET any -> $EXTERNAL_NET any (incomplet", 244 | wantErr: true, 245 | }, 246 | { 247 | name: "value string EOF", 248 | input: "alert udp $HOME_NET any -> $EXTERNAL_NET any (key1:\"incomplet", 249 | wantErr: true, 250 | }, 251 | { 252 | name: "value EOF", 253 | input: "alert udp $HOME_NET any -> $EXTERNAL_NET any (key1:incomplet", 254 | wantErr: true, 255 | }, 256 | } { 257 | lexItems, err := collect(tt.input) 258 | if (err != nil) != tt.wantErr { 259 | t.Fatalf("%s: got err %v; expected err %v", tt.name, err, tt.wantErr) 260 | } 261 | if len(lexItems) != len(tt.items) { 262 | t.Fatalf("%s: got %d items; expected %d items", tt.name, len(lexItems), len(tt.items)) 263 | } 264 | for i, lexItem := range lexItems { 265 | if !reflect.DeepEqual(lexItem, tt.items[i]) { 266 | t.Errorf("%s: got %+v; expected: %+v", tt.name, lexItem, tt.items[i]) 267 | } 268 | } 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /linters.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "bytes" 20 | "strings" 21 | ) 22 | 23 | // ShouldBeHTTP returns true if a rule looks like the protocol should be http, but is not. 24 | func (r *Rule) ShouldBeHTTP() bool { 25 | // If the rule is already HTTP, then stop looking. 26 | if r.Protocol == "http" { 27 | return false 28 | } 29 | // If we look at http buffers or sticky buffers, we should use the HTTP protocol. 30 | for _, c := range r.Contents() { 31 | if strings.HasPrefix(c.DataPosition.String(), "http_") { 32 | return true 33 | } 34 | for _, co := range c.Options { 35 | if strings.HasPrefix(co.Name, "http_") { 36 | return true 37 | } 38 | } 39 | } 40 | return false 41 | } 42 | 43 | // TODO: See if ET folks have any data around this. 44 | // Minimum length of a content to be considered safe for use with a PCRE. 45 | const minPCREContentLen = 5 46 | 47 | // Some of these may be caught by min length check, but including for completeness. 48 | // All lower case for case insenstive checks. 49 | // Many of this come from: https://github.com/EmergingThreats/IDSDeathBlossom/blob/master/config/fpblacklist.txt 50 | var bannedContents = []string{"get", 51 | "post", 52 | "/", 53 | "user-agent", 54 | "user-agent: mozilla", 55 | "host", 56 | "index.php", 57 | "index.php?id=", 58 | "index.html", 59 | "content-length", 60 | ".htm", 61 | ".html", 62 | ".php", 63 | ".asp", 64 | ".aspx", 65 | "content-disposition", 66 | "wp-content/plugins", 67 | "wp-content/themes", 68 | "activexobject", 69 | "default.asp", 70 | "default.aspx", 71 | "default.asp", 72 | } 73 | 74 | // ExpensivePCRE returns true if a rule appears to use a PCRE without 75 | // conditions that make it expensive to compute. 76 | func (r *Rule) ExpensivePCRE() bool { 77 | // No PCRE, not expensive. 78 | if len(r.PCREs()) < 1 { 79 | return false 80 | } 81 | 82 | // If we have PCRE, but no contents, this is probably expensive. 83 | cs := r.Contents() 84 | if len(cs) < 1 { 85 | return true 86 | } 87 | 88 | // Look for a content with sufficient length to make performance acceptable. 89 | short := true 90 | for _, c := range cs { 91 | // TODO: Identify a reasonable length. 92 | if len(c.Pattern) >= minPCREContentLen { 93 | short = false 94 | } 95 | } 96 | if short { 97 | return true 98 | } 99 | 100 | // If all content matches are common strings, also not good. 101 | common := true 102 | for _, c := range cs { 103 | if !inSlice(strings.ToLower(strings.Trim(string(c.Pattern), "\r\n :/?")), bannedContents) { 104 | common = false 105 | } 106 | } 107 | return common 108 | } 109 | 110 | // SnortHTTPHeader returns true if any content contains double CRLF at the end. 111 | func (r *Rule) SnortHTTPHeader() bool { 112 | cs := r.Contents() 113 | if len(cs) < 1 { 114 | return false 115 | } 116 | for _, c := range cs { 117 | if c.SnortHTTPHeader() { 118 | return true 119 | } 120 | } 121 | return false 122 | } 123 | 124 | // SnortHTTPHeader returns true if a specific content contains double CRLF at the end. 125 | func (c Content) SnortHTTPHeader() bool { 126 | for _, o := range c.Options { 127 | if o.Name == "http_header" { 128 | if bytes.HasSuffix(c.Pattern, []byte("\r\n\r\n")) { 129 | return true 130 | } 131 | } 132 | } 133 | return false 134 | } 135 | 136 | // NoReferences returns true if there are no references in the rule. 137 | func (r *Rule) NoReferences() bool { 138 | return len(r.References) == 0 139 | } 140 | 141 | // Length at which we warn if all matchers are this Contents with length or shorter. 142 | // Possibly align this with the minPCREContentLength. 143 | const shortContentLen = 4 144 | 145 | // OnlyShortContents returns true if all Matchers are Contents and all matches are very short. 146 | func (r *Rule) OnlyShortContents() bool { 147 | // There are non-Content matches in the rule. 148 | cs := r.Contents() 149 | if len(r.Matchers) != len(cs) { 150 | return false 151 | } 152 | for _, c := range cs { 153 | // Some content is longer than the minimum. 154 | if len(c.Pattern) > shortContentLen { 155 | return false 156 | } 157 | } 158 | return true 159 | } 160 | -------------------------------------------------------------------------------- /linters_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "testing" 20 | ) 21 | 22 | func TestShouldBeHTTP(t *testing.T) { 23 | for _, tt := range []struct { 24 | name string 25 | input *Rule 26 | want bool 27 | }{ 28 | { 29 | name: "already http", 30 | input: &Rule{ 31 | Protocol: "http", 32 | }, 33 | want: false, 34 | }, 35 | { 36 | name: "content option change", 37 | input: &Rule{ 38 | Protocol: "tcp", 39 | Source: Network{ 40 | Nets: []string{"$HOME_NET"}, 41 | Ports: []string{"any"}, 42 | }, 43 | Destination: Network{ 44 | Nets: []string{"$EXTERNAL_NET"}, 45 | Ports: []string{"$HTTP_PORTS"}, 46 | }, 47 | Matchers: []orderedMatcher{ 48 | &Content{ 49 | Pattern: []byte("AA"), 50 | Options: []*ContentOption{ 51 | {"http_header", ""}, 52 | }, 53 | }, 54 | }, 55 | }, 56 | want: true, 57 | }, 58 | { 59 | name: "sticky buffer change", 60 | input: &Rule{ 61 | Protocol: "tcp", 62 | Source: Network{ 63 | Nets: []string{"$HOME_NET"}, 64 | Ports: []string{"any"}, 65 | }, 66 | Destination: Network{ 67 | Nets: []string{"$EXTERNAL_NET"}, 68 | Ports: []string{"$HTTP_PORTS"}, 69 | }, 70 | Matchers: []orderedMatcher{ 71 | &Content{ 72 | DataPosition: httpProtocol, 73 | Pattern: []byte("AA"), 74 | }, 75 | }, 76 | }, 77 | want: true, 78 | }, 79 | } { 80 | got := tt.input.ShouldBeHTTP() 81 | if got != tt.want { 82 | t.Fatalf("%s: got %v; want %v", tt.name, got, tt.want) 83 | } 84 | } 85 | } 86 | 87 | func TestExpensivePCRE(t *testing.T) { 88 | for _, tt := range []struct { 89 | name string 90 | input *Rule 91 | want bool 92 | }{ 93 | { 94 | name: "No PCRE", 95 | input: &Rule{ 96 | Protocol: "http", 97 | Source: Network{ 98 | Nets: []string{"$HOME_NET"}, 99 | Ports: []string{"any"}, 100 | }, 101 | Destination: Network{ 102 | Nets: []string{"$EXTERNAL_NET"}, 103 | Ports: []string{"$HTTP_PORTS"}, 104 | }, 105 | Matchers: []orderedMatcher{ 106 | &Content{ 107 | Pattern: []byte("AAAAAAAAAA"), 108 | Options: []*ContentOption{ 109 | {"http_header", ""}, 110 | }, 111 | }, 112 | }, 113 | }, 114 | want: false, 115 | }, 116 | { 117 | name: "No Content", 118 | input: &Rule{ 119 | Protocol: "http", 120 | Source: Network{ 121 | Nets: []string{"$HOME_NET"}, 122 | Ports: []string{"any"}, 123 | }, 124 | Destination: Network{ 125 | Nets: []string{"$EXTERNAL_NET"}, 126 | Ports: []string{"$HTTP_PORTS"}, 127 | }, 128 | Matchers: []orderedMatcher{ 129 | &PCRE{ 130 | Pattern: []byte("f.*bar"), 131 | }, 132 | }, 133 | }, 134 | want: true, 135 | }, 136 | { 137 | name: "Short Content", 138 | input: &Rule{ 139 | Protocol: "http", 140 | Source: Network{ 141 | Nets: []string{"$HOME_NET"}, 142 | Ports: []string{"any"}, 143 | }, 144 | Destination: Network{ 145 | Nets: []string{"$EXTERNAL_NET"}, 146 | Ports: []string{"$HTTP_PORTS"}, 147 | }, 148 | Matchers: []orderedMatcher{ 149 | &Content{ 150 | Pattern: []byte("AA"), 151 | Options: []*ContentOption{ 152 | {"http_header", ""}, 153 | }, 154 | }, 155 | &PCRE{ 156 | Pattern: []byte("f.*bar"), 157 | }, 158 | }, 159 | }, 160 | want: true, 161 | }, 162 | { 163 | name: "Only Common Content", 164 | input: &Rule{ 165 | Protocol: "http", 166 | Source: Network{ 167 | Nets: []string{"$HOME_NET"}, 168 | Ports: []string{"any"}, 169 | }, 170 | Destination: Network{ 171 | Nets: []string{"$EXTERNAL_NET"}, 172 | Ports: []string{"$HTTP_PORTS"}, 173 | }, 174 | Matchers: []orderedMatcher{ 175 | &Content{ 176 | Pattern: []byte("POST"), 177 | Options: []*ContentOption{ 178 | {"http_method", ""}, 179 | }, 180 | }, 181 | &PCRE{ 182 | Pattern: []byte("f.*bar"), 183 | }, 184 | }, 185 | }, 186 | want: true, 187 | }, 188 | { 189 | name: "Long Content", 190 | input: &Rule{ 191 | Protocol: "http", 192 | Source: Network{ 193 | Nets: []string{"$HOME_NET"}, 194 | Ports: []string{"any"}, 195 | }, 196 | Destination: Network{ 197 | Nets: []string{"$EXTERNAL_NET"}, 198 | Ports: []string{"$HTTP_PORTS"}, 199 | }, 200 | Matchers: []orderedMatcher{ 201 | &Content{ 202 | Pattern: []byte("ReallyLongThing"), 203 | Options: []*ContentOption{ 204 | {"http_header", ""}, 205 | }, 206 | }, 207 | &PCRE{ 208 | Pattern: []byte("f.*bar"), 209 | }, 210 | }, 211 | }, 212 | want: false, 213 | }, 214 | { 215 | name: "Banned complex content", 216 | input: &Rule{ 217 | Protocol: "http", 218 | Source: Network{ 219 | Nets: []string{"$HOME_NET"}, 220 | Ports: []string{"any"}, 221 | }, 222 | Destination: Network{ 223 | Nets: []string{"$EXTERNAL_NET"}, 224 | Ports: []string{"$HTTP_PORTS"}, 225 | }, 226 | Matchers: []orderedMatcher{ 227 | &Content{ 228 | Pattern: []byte("\r\nUser-Agent: "), 229 | Options: []*ContentOption{ 230 | {"http_header", ""}, 231 | }, 232 | }, 233 | &PCRE{ 234 | Pattern: []byte("f.*bar"), 235 | }, 236 | }, 237 | }, 238 | want: true, 239 | }, 240 | { 241 | name: "Banned complex content, with long content", 242 | input: &Rule{ 243 | Protocol: "http", 244 | Source: Network{ 245 | Nets: []string{"$HOME_NET"}, 246 | Ports: []string{"any"}, 247 | }, 248 | Destination: Network{ 249 | Nets: []string{"$EXTERNAL_NET"}, 250 | Ports: []string{"$HTTP_PORTS"}, 251 | }, 252 | Matchers: []orderedMatcher{ 253 | &Content{ 254 | Pattern: []byte("\r\nUser-Agent: "), 255 | Options: []*ContentOption{ 256 | {"http_header", ""}, 257 | }, 258 | }, 259 | &Content{ 260 | Pattern: []byte("SuperLongUniqueAwesome"), 261 | }, 262 | &PCRE{ 263 | Pattern: []byte("f.*bar"), 264 | }, 265 | }, 266 | }, 267 | want: false, 268 | }, 269 | } { 270 | got := tt.input.ExpensivePCRE() 271 | if got != tt.want { 272 | t.Fatalf("%s: got %v; want %v", tt.name, got, tt.want) 273 | } 274 | } 275 | } 276 | 277 | func TestSnortHTTPHeader(t *testing.T) { 278 | for _, tt := range []struct { 279 | name string 280 | input *Rule 281 | want bool 282 | }{ 283 | { 284 | name: "has trailing CRLF CRLF", 285 | input: &Rule{ 286 | Protocol: "http", 287 | Source: Network{ 288 | Nets: []string{"$HOME_NET"}, 289 | Ports: []string{"any"}, 290 | }, 291 | Destination: Network{ 292 | Nets: []string{"$EXTERNAL_NET"}, 293 | Ports: []string{"$HTTP_PORTS"}, 294 | }, 295 | Matchers: []orderedMatcher{ 296 | &Content{ 297 | Pattern: []byte("AAAAAAAAAA\r\n\r\n"), 298 | Options: []*ContentOption{ 299 | {"http_header", ""}, 300 | }, 301 | }, 302 | }, 303 | }, 304 | want: true, 305 | }, 306 | { 307 | name: "multiple content trailing CRLF CRLF", 308 | input: &Rule{ 309 | Protocol: "http", 310 | Source: Network{ 311 | Nets: []string{"$HOME_NET"}, 312 | Ports: []string{"any"}, 313 | }, 314 | Destination: Network{ 315 | Nets: []string{"$EXTERNAL_NET"}, 316 | Ports: []string{"$HTTP_PORTS"}, 317 | }, 318 | Matchers: []orderedMatcher{ 319 | &Content{ 320 | Pattern: []byte("BBBBBB"), 321 | Options: []*ContentOption{ 322 | {"http_header", ""}, 323 | }, 324 | }, 325 | &Content{ 326 | Pattern: []byte("AAAAAAAAAA\r\n\r\n"), 327 | Options: []*ContentOption{ 328 | {"http_header", ""}, 329 | }, 330 | }, 331 | }, 332 | }, 333 | want: true, 334 | }, 335 | { 336 | name: "no trailing CRLF", 337 | input: &Rule{ 338 | Protocol: "http", 339 | Source: Network{ 340 | Nets: []string{"$HOME_NET"}, 341 | Ports: []string{"any"}, 342 | }, 343 | Destination: Network{ 344 | Nets: []string{"$EXTERNAL_NET"}, 345 | Ports: []string{"$HTTP_PORTS"}, 346 | }, 347 | Matchers: []orderedMatcher{ 348 | &Content{ 349 | Pattern: []byte("BBBBBB"), 350 | Options: []*ContentOption{ 351 | {"http_header", ""}, 352 | }, 353 | }, 354 | }, 355 | }, 356 | want: false, 357 | }, 358 | { 359 | name: "one trailing CRLF", 360 | input: &Rule{ 361 | Protocol: "http", 362 | Source: Network{ 363 | Nets: []string{"$HOME_NET"}, 364 | Ports: []string{"any"}, 365 | }, 366 | Destination: Network{ 367 | Nets: []string{"$EXTERNAL_NET"}, 368 | Ports: []string{"$HTTP_PORTS"}, 369 | }, 370 | Matchers: []orderedMatcher{ 371 | &Content{ 372 | Pattern: []byte("BBBBBB\r\n"), 373 | Options: []*ContentOption{ 374 | {"http_header", ""}, 375 | }, 376 | }, 377 | }, 378 | }, 379 | want: false, 380 | }, 381 | } { 382 | got := tt.input.SnortHTTPHeader() 383 | if got != tt.want { 384 | t.Fatalf("%s: got %v; want %v", tt.name, got, tt.want) 385 | } 386 | } 387 | } 388 | 389 | func TestNoReferences(t *testing.T) { 390 | for _, tt := range []struct { 391 | name string 392 | input *Rule 393 | want bool 394 | }{ 395 | { 396 | name: "has a reference", 397 | input: &Rule{ 398 | References: []*Reference{ 399 | { 400 | Type: "md5", 401 | Value: "68b329da9893e34099c7d8ad5cb9c940", 402 | }, 403 | }, 404 | }, 405 | want: false, 406 | }, 407 | { 408 | name: "has multiple reference", 409 | input: &Rule{ 410 | References: []*Reference{ 411 | { 412 | Type: "md5", 413 | Value: "68b329da9893e34099c7d8ad5cb9c940", 414 | }, 415 | { 416 | Type: "md5", 417 | Value: "68b329da9893e34099c7d8ad5cb9c941", 418 | }, 419 | }, 420 | }, 421 | want: false, 422 | }, 423 | { 424 | name: "has no reference", 425 | input: &Rule{ 426 | References: []*Reference{}, 427 | }, 428 | want: true, 429 | }, 430 | } { 431 | got := tt.input.NoReferences() 432 | if got != tt.want { 433 | t.Fatalf("%s: got %v; want %v", tt.name, got, tt.want) 434 | } 435 | } 436 | } 437 | 438 | func TestOnlyShortContents(t *testing.T) { 439 | for _, tt := range []struct { 440 | name string 441 | input *Rule 442 | want bool 443 | }{ 444 | { 445 | name: "long enough content", 446 | input: &Rule{ 447 | Matchers: []orderedMatcher{ 448 | &Content{ 449 | Pattern: []byte("AAAAAAAAAA\r\n\r\n"), 450 | }, 451 | }, 452 | }, 453 | want: false, 454 | }, 455 | { 456 | name: "one long one short", 457 | input: &Rule{ 458 | Matchers: []orderedMatcher{ 459 | &Content{ 460 | Pattern: []byte("AAAAAAAAAA\r\n\r\n"), 461 | }, 462 | &Content{ 463 | Pattern: []byte("AAAA"), 464 | }, 465 | }, 466 | }, 467 | want: false, 468 | }, 469 | { 470 | name: "short content", 471 | input: &Rule{ 472 | Matchers: []orderedMatcher{ 473 | &Content{ 474 | Pattern: []byte("AAAA"), 475 | }, 476 | }, 477 | }, 478 | want: true, 479 | }, 480 | { 481 | name: "short content and non-content", 482 | input: &Rule{ 483 | Matchers: []orderedMatcher{ 484 | &Content{ 485 | Pattern: []byte("AAAA"), 486 | }, 487 | &ByteMatch{}, 488 | }, 489 | }, 490 | want: false, 491 | }, 492 | } { 493 | got := tt.input.OnlyShortContents() 494 | if got != tt.want { 495 | t.Fatalf("%s: got %v; want %v", tt.name, got, tt.want) 496 | } 497 | } 498 | } 499 | -------------------------------------------------------------------------------- /optimize.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "bytes" 20 | ) 21 | 22 | // Suricata 4.x content options mapped to Suricata 5.0 sticky buffers. 23 | var cOptToStickyBuffer = map[string]DataPos{ 24 | // HTTP Content Modifiers 25 | "http_client_body": httpClientBody, 26 | "http_cookie": httpCookie, 27 | "http_header": httpHeader, 28 | "http_host": httpHost, 29 | "http_method": httpMethod, 30 | "http_raw_header": httpHeaderRaw, 31 | "http_raw_host": httpHostRaw, 32 | "http_raw_uri": httpURIRaw, 33 | "http_request_line": httpRequestLine5, 34 | "http_server_body": httpServerBody, 35 | "http_stat_code": httpStatCode, 36 | "http_stat_msg": httpStatMsg, 37 | "http_uri": httpURI, 38 | "http_user_agent": httpUserAgent, 39 | } 40 | 41 | var suri4StickyTo5Sticky = map[DataPos]DataPos{ 42 | fileData: fileData5, 43 | // HTTP 44 | httpAccept: httpAccept5, 45 | httpAcceptEnc: httpAcceptEnc5, 46 | httpAcceptLang: httpAcceptLang5, 47 | httpConnection: httpConnection5, 48 | httpContentLen: httpContentLen5, 49 | httpContentType: httpContentType5, 50 | httpHeaderNames: httpHeaderNames5, 51 | httpProtocol: httpProtocol5, 52 | httpReferer: httpReferer5, 53 | httpRequestLine: httpRequestLine5, 54 | httpResponseLine: httpResponseLine5, 55 | httpStart: httpStart5, 56 | // TLS 57 | tlsCertSubject: tlsCertSubject5, 58 | tlsCertIssuer: tlsCertIssuer5, 59 | tlsCertSerial: tlsCertSerial5, 60 | tlsCertFingerprint: tlsCertFingerprint5, 61 | tlsSNI: tlsSNI5, 62 | // JA3 63 | ja3Hash: ja3Hash5, 64 | ja3String: ja3String5, 65 | // SSH 66 | sshProto: sshProto5, 67 | sshSoftware: sshSoftware5, 68 | // DNS 69 | dnsQuery: dnsQuery5, 70 | } 71 | 72 | // OptimizeHTTP tunes an old style rule to leverage port independent HTTP detection. 73 | func (r *Rule) OptimizeHTTP() bool { 74 | if !r.ShouldBeHTTP() { 75 | return false 76 | } 77 | // Switch protocol to HTTP. 78 | r.Protocol = "http" 79 | 80 | // Make detection port independent. 81 | for i, p := range r.Source.Ports { 82 | if p == "$HTTP_PORTS" { 83 | r.Source.Ports[i] = "any" 84 | } 85 | } 86 | 87 | for i, p := range r.Destination.Ports { 88 | if p == "$HTTP_PORTS" { 89 | r.Destination.Ports[i] = "any" 90 | } 91 | } 92 | 93 | // Annotate rule to indicate modification 94 | r.Metas = append(r.Metas, MetadataModifier("http_optimize")) 95 | return true 96 | } 97 | 98 | // SnortURILenFix will optimize a urilen keyword from a Snort rule for Suricata. 99 | func (r *Rule) SnortURILenFix() bool { 100 | var modified bool 101 | // Update this once we parse urilen in a better structure. 102 | for _, l := range r.LenMatchers() { 103 | if l.Kind == uriLen && l.Operator == "<>" { 104 | l.Min-- 105 | l.Max++ 106 | modified = true 107 | } 108 | setRaw := true 109 | for _, o := range l.Options { 110 | if o == "norm" || o == "raw" { 111 | // If Snort rule specified norm or raw, trust author. 112 | setRaw = false 113 | break 114 | } 115 | } 116 | // If author did not specify, set 'raw'. 117 | if setRaw { 118 | modified = true 119 | l.Options = append(l.Options, "raw") 120 | } 121 | } 122 | if modified { 123 | r.Metas = append(r.Metas, MetadataModifier("snort_urilen")) 124 | } 125 | return modified 126 | } 127 | 128 | // SnortHTTPHeaderFix will fix broken http_header matches. 129 | func (r *Rule) SnortHTTPHeaderFix() bool { 130 | var modified bool 131 | if !r.SnortHTTPHeader() { 132 | return false 133 | } 134 | for i, m := range r.Matchers { 135 | // If this is a content, check it out. 136 | if c, ok := m.(*Content); ok { 137 | if c.SnortHTTPHeader() { 138 | modified = true 139 | c.Pattern = bytes.TrimSuffix(c.Pattern, []byte("\r\n")) 140 | if err := r.InsertMatcher(&ByteMatch{Kind: isDataAt, Negate: true, NumBytes: "1"}, i+1); err != nil { 141 | return false 142 | } 143 | } 144 | } 145 | } 146 | 147 | if modified { 148 | r.Metas = append(r.Metas, MetadataModifier("snort_http_header")) 149 | } 150 | return modified 151 | } 152 | 153 | // UpgradeToSuri5 optimizes a Suricata 4.x rule to Suricata 5.x features. 154 | func (r *Rule) UpgradeToSuri5() bool { 155 | var modified bool 156 | for _, c := range r.Contents() { 157 | for i, opt := range c.Options { 158 | if sticky, ok := cOptToStickyBuffer[opt.Name]; ok { 159 | // Remove the old modifier. 160 | // TODO(duane): Find a better way to handle this. If I break this into another function I need 161 | // to iterate again across everything. 162 | if i < len(c.Options)-1 { 163 | copy(c.Options[i:], c.Options[i+1:]) 164 | } 165 | c.Options[len(c.Options)-1] = nil // or the zero value of T 166 | c.Options = c.Options[:len(c.Options)-1] 167 | 168 | c.DataPosition = sticky 169 | modified = true 170 | } 171 | } 172 | // old sticky buffer to new sticky buffer 173 | if sticky, ok := suri4StickyTo5Sticky[c.DataPosition]; ok { 174 | c.DataPosition = sticky 175 | modified = true 176 | } 177 | } 178 | 179 | if modified { 180 | r.Metas = append(r.Metas, MetadataModifier("upgrade_to_suri5")) 181 | } 182 | return modified 183 | } 184 | 185 | // MetadataModifier returns a metadata that identifies a given modification. 186 | func MetadataModifier(s string) *Metadata { 187 | return &Metadata{Key: "gonids", Value: s} 188 | } 189 | -------------------------------------------------------------------------------- /optimize_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "testing" 20 | 21 | "github.com/kylelemons/godebug/pretty" 22 | ) 23 | 24 | func TestOptimizeHTTP(t *testing.T) { 25 | for _, tt := range []struct { 26 | name string 27 | input *Rule 28 | output *Rule 29 | wantMod bool 30 | }{ 31 | { 32 | name: "already http", 33 | input: &Rule{ 34 | Protocol: "http", 35 | }, 36 | wantMod: false, 37 | }, 38 | { 39 | name: "content option change", 40 | input: &Rule{ 41 | Protocol: "tcp", 42 | Source: Network{ 43 | Nets: []string{"$HOME_NET"}, 44 | Ports: []string{"any"}, 45 | }, 46 | Destination: Network{ 47 | Nets: []string{"$EXTERNAL_NET"}, 48 | Ports: []string{"$HTTP_PORTS"}, 49 | }, 50 | Matchers: []orderedMatcher{ 51 | &Content{ 52 | Pattern: []byte("AA"), 53 | Options: []*ContentOption{ 54 | {"http_header", ""}, 55 | }, 56 | }, 57 | }, 58 | }, 59 | output: &Rule{ 60 | Protocol: "http", 61 | Source: Network{ 62 | Nets: []string{"$HOME_NET"}, 63 | Ports: []string{"any"}, 64 | }, 65 | Destination: Network{ 66 | Nets: []string{"$EXTERNAL_NET"}, 67 | Ports: []string{"any"}, 68 | }, 69 | Matchers: []orderedMatcher{ 70 | &Content{ 71 | Pattern: []byte("AA"), 72 | Options: []*ContentOption{ 73 | {"http_header", ""}, 74 | }, 75 | }, 76 | }, 77 | Metas: Metadatas{ 78 | &Metadata{ 79 | Key: "gonids", 80 | Value: "http_optimize", 81 | }, 82 | }, 83 | }, 84 | wantMod: true, 85 | }, 86 | { 87 | name: "sticky buffer change", 88 | input: &Rule{ 89 | Protocol: "tcp", 90 | Source: Network{ 91 | Nets: []string{"$HOME_NET"}, 92 | Ports: []string{"any"}, 93 | }, 94 | Destination: Network{ 95 | Nets: []string{"$EXTERNAL_NET"}, 96 | Ports: []string{"$HTTP_PORTS"}, 97 | }, 98 | Matchers: []orderedMatcher{ 99 | &Content{ 100 | DataPosition: httpProtocol, 101 | Pattern: []byte("AA"), 102 | }, 103 | }, 104 | }, 105 | output: &Rule{ 106 | Protocol: "http", 107 | Source: Network{ 108 | Nets: []string{"$HOME_NET"}, 109 | Ports: []string{"any"}, 110 | }, 111 | Destination: Network{ 112 | Nets: []string{"$EXTERNAL_NET"}, 113 | Ports: []string{"any"}, 114 | }, 115 | Matchers: []orderedMatcher{ 116 | &Content{ 117 | DataPosition: httpProtocol, 118 | Pattern: []byte("AA"), 119 | }, 120 | }, 121 | Metas: Metadatas{ 122 | &Metadata{ 123 | Key: "gonids", 124 | Value: "http_optimize", 125 | }, 126 | }, 127 | }, 128 | wantMod: true, 129 | }, 130 | } { 131 | gotMod := tt.input.OptimizeHTTP() 132 | // Expected modification. 133 | if gotMod != tt.wantMod { 134 | t.Fatalf("%s: gotMod %v; expected %v", tt.name, gotMod, tt.wantMod) 135 | } 136 | // Actual modifications correctness. 137 | diff := pretty.Compare(tt.output, tt.input) 138 | if tt.wantMod && diff != "" { 139 | t.Fatalf("diff (-got +want):\n%s", diff) 140 | } 141 | } 142 | } 143 | 144 | func TestSnortURILenFix(t *testing.T) { 145 | for _, tt := range []struct { 146 | name string 147 | input *Rule 148 | output *Rule 149 | wantMod bool 150 | }{ 151 | { 152 | name: "urilen exact raw", 153 | input: &Rule{ 154 | Matchers: []orderedMatcher{ 155 | &LenMatch{ 156 | Kind: uriLen, 157 | Num: 3, 158 | Options: []string{"raw"}, 159 | }, 160 | }, 161 | }, 162 | wantMod: false, 163 | }, 164 | { 165 | name: "urilen exact norm", 166 | input: &Rule{ 167 | Matchers: []orderedMatcher{ 168 | &LenMatch{ 169 | Kind: uriLen, 170 | Num: 3, 171 | Options: []string{"norm"}, 172 | }, 173 | }, 174 | }, 175 | wantMod: false, 176 | }, 177 | { 178 | name: "urilen range", 179 | input: &Rule{ 180 | Matchers: []orderedMatcher{ 181 | &LenMatch{ 182 | Kind: uriLen, 183 | Min: 3, 184 | Max: 7, 185 | Operator: "<>", 186 | }, 187 | }, 188 | }, 189 | output: &Rule{ 190 | Matchers: []orderedMatcher{ 191 | &LenMatch{ 192 | Kind: uriLen, 193 | Min: 2, 194 | Max: 8, 195 | Operator: "<>", 196 | Options: []string{"raw"}, 197 | }, 198 | }, 199 | Metas: Metadatas{ 200 | &Metadata{ 201 | Key: "gonids", 202 | Value: "snort_urilen"}, 203 | }, 204 | }, 205 | wantMod: true, 206 | }, 207 | { 208 | name: "urilen exact", 209 | input: &Rule{ 210 | Matchers: []orderedMatcher{ 211 | &LenMatch{ 212 | Kind: uriLen, 213 | Num: 3, 214 | }, 215 | }, 216 | }, 217 | output: &Rule{ 218 | Matchers: []orderedMatcher{ 219 | &LenMatch{ 220 | Kind: uriLen, 221 | Num: 3, 222 | Options: []string{"raw"}, 223 | }, 224 | }, 225 | Metas: Metadatas{ 226 | &Metadata{ 227 | Key: "gonids", 228 | Value: "snort_urilen"}, 229 | }, 230 | }, 231 | wantMod: true, 232 | }, 233 | { 234 | name: "urilen range norm", 235 | input: &Rule{ 236 | Matchers: []orderedMatcher{ 237 | &LenMatch{ 238 | Kind: uriLen, 239 | Min: 3, 240 | Max: 7, 241 | Operator: "<>", 242 | Options: []string{"norm"}, 243 | }, 244 | }, 245 | }, 246 | output: &Rule{ 247 | Matchers: []orderedMatcher{ 248 | &LenMatch{ 249 | Kind: uriLen, 250 | Min: 2, 251 | Max: 8, 252 | Operator: "<>", 253 | Options: []string{"norm"}, 254 | }, 255 | }, 256 | Metas: Metadatas{ 257 | &Metadata{ 258 | Key: "gonids", 259 | Value: "snort_urilen"}, 260 | }, 261 | }, 262 | wantMod: true, 263 | }, 264 | } { 265 | gotMod := tt.input.SnortURILenFix() 266 | // Expected modification. 267 | if gotMod != tt.wantMod { 268 | t.Fatalf("%s: gotMod %v; expected %v", tt.name, gotMod, tt.wantMod) 269 | } 270 | // Actual modifications correctness. 271 | diff := pretty.Compare(tt.output, tt.input) 272 | if tt.wantMod && diff != "" { 273 | t.Fatalf("diff (-got +want):\n%s", diff) 274 | } 275 | } 276 | } 277 | 278 | func TestSnortHTTPHeaderFix(t *testing.T) { 279 | for _, tt := range []struct { 280 | name string 281 | input *Rule 282 | output *Rule 283 | wantMod bool 284 | }{ 285 | { 286 | name: "basic test", 287 | input: &Rule{ 288 | Matchers: []orderedMatcher{ 289 | &Content{ 290 | Pattern: []byte("foobar\r\n\r\n"), 291 | Options: []*ContentOption{ 292 | {"http_header", ""}, 293 | }, 294 | }, 295 | }, 296 | }, 297 | output: &Rule{ 298 | Matchers: []orderedMatcher{ 299 | &Content{ 300 | Pattern: []byte("foobar\r\n"), 301 | Options: []*ContentOption{ 302 | {"http_header", ""}, 303 | }, 304 | }, 305 | &ByteMatch{ 306 | Kind: isDataAt, 307 | Negate: true, 308 | NumBytes: "1", 309 | }, 310 | }, 311 | Metas: Metadatas{ 312 | &Metadata{ 313 | Key: "gonids", 314 | Value: "snort_http_header"}, 315 | }, 316 | }, 317 | 318 | wantMod: true, 319 | }, 320 | { 321 | name: "insert middle", 322 | input: &Rule{ 323 | Matchers: []orderedMatcher{ 324 | &Content{ 325 | Pattern: []byte("foo"), 326 | Options: []*ContentOption{ 327 | {"http_header", ""}, 328 | }, 329 | }, 330 | &Content{ 331 | Pattern: []byte("bar\r\n\r\n"), 332 | Options: []*ContentOption{ 333 | {"http_header", ""}, 334 | }, 335 | }, 336 | &Content{ 337 | Pattern: []byte("baz"), 338 | Options: []*ContentOption{ 339 | {"http_header", ""}, 340 | }, 341 | }, 342 | }, 343 | }, 344 | output: &Rule{ 345 | Matchers: []orderedMatcher{ 346 | &Content{ 347 | Pattern: []byte("foo"), 348 | Options: []*ContentOption{ 349 | {"http_header", ""}, 350 | }, 351 | }, 352 | &Content{ 353 | Pattern: []byte("bar\r\n"), 354 | Options: []*ContentOption{ 355 | {"http_header", ""}, 356 | }, 357 | }, 358 | &ByteMatch{ 359 | Kind: isDataAt, 360 | Negate: true, 361 | NumBytes: "1", 362 | }, 363 | &Content{ 364 | Pattern: []byte("baz"), 365 | Options: []*ContentOption{ 366 | {"http_header", ""}, 367 | }, 368 | }, 369 | }, 370 | Metas: Metadatas{ 371 | &Metadata{ 372 | Key: "gonids", 373 | Value: "snort_http_header"}, 374 | }, 375 | }, 376 | 377 | wantMod: true, 378 | }, 379 | } { 380 | gotMod := tt.input.SnortHTTPHeaderFix() 381 | // Expected modification. 382 | if gotMod != tt.wantMod { 383 | t.Fatalf("%s: gotMod %v; expected %v", tt.name, gotMod, tt.wantMod) 384 | } 385 | // Actual modifications correctness. 386 | diff := pretty.Compare(tt.output, tt.input) 387 | if tt.wantMod && diff != "" { 388 | t.Fatalf("diff (-got +want):\n%s", diff) 389 | } 390 | } 391 | } 392 | 393 | func TestUpgradeToSuri5(t *testing.T) { 394 | for _, tt := range []struct { 395 | name string 396 | input *Rule 397 | output *Rule 398 | wantMod bool 399 | }{ 400 | { 401 | name: "content modifier", 402 | input: &Rule{ 403 | Matchers: []orderedMatcher{ 404 | &Content{ 405 | Pattern: []byte("/foo.php"), 406 | Options: []*ContentOption{ 407 | {"http_uri", ""}, 408 | }, 409 | }, 410 | &Content{ 411 | Pattern: []byte("?bar=baz"), 412 | Options: []*ContentOption{ 413 | {"http_uri", ""}, 414 | }, 415 | }, 416 | }, 417 | }, 418 | output: &Rule{ 419 | Matchers: []orderedMatcher{ 420 | &Content{ 421 | DataPosition: httpURI, 422 | Pattern: []byte("/foo.php"), 423 | }, 424 | &Content{ 425 | DataPosition: httpURI, 426 | Pattern: []byte("?bar=baz"), 427 | }, 428 | }, 429 | Metas: Metadatas{ 430 | &Metadata{ 431 | Key: "gonids", 432 | Value: "upgrade_to_suri5"}, 433 | }, 434 | }, 435 | 436 | wantMod: true, 437 | }, 438 | { 439 | name: "old sticky buffer", 440 | input: &Rule{ 441 | Matchers: []orderedMatcher{ 442 | &Content{ 443 | DataPosition: httpRequestLine, 444 | Pattern: []byte("foo.php"), 445 | }, 446 | }, 447 | }, 448 | output: &Rule{ 449 | Matchers: []orderedMatcher{ 450 | &Content{ 451 | DataPosition: httpRequestLine5, 452 | Pattern: []byte("foo.php"), 453 | }, 454 | }, 455 | Metas: Metadatas{ 456 | &Metadata{ 457 | Key: "gonids", 458 | Value: "upgrade_to_suri5"}, 459 | }, 460 | }, 461 | 462 | wantMod: true, 463 | }, 464 | { 465 | name: "old sticky buffer", 466 | input: &Rule{ 467 | Matchers: []orderedMatcher{ 468 | &Content{ 469 | Pattern: []byte("/foo.php"), 470 | Options: []*ContentOption{ 471 | {"http_uri", ""}, 472 | }, 473 | }, 474 | 475 | &Content{ 476 | DataPosition: httpRequestLine, 477 | Pattern: []byte("bar"), 478 | }, 479 | &Content{ 480 | Pattern: []byte("?baz=bop"), 481 | Options: []*ContentOption{ 482 | {"http_uri", ""}, 483 | }, 484 | }, 485 | }, 486 | }, 487 | output: &Rule{ 488 | Matchers: []orderedMatcher{ 489 | &Content{ 490 | DataPosition: httpURI, 491 | Pattern: []byte("/foo.php"), 492 | }, 493 | 494 | &Content{ 495 | DataPosition: httpRequestLine5, 496 | Pattern: []byte("bar"), 497 | }, 498 | &Content{ 499 | DataPosition: httpURI, 500 | Pattern: []byte("?baz=bop"), 501 | }, 502 | }, 503 | Metas: Metadatas{ 504 | &Metadata{ 505 | Key: "gonids", 506 | Value: "upgrade_to_suri5"}, 507 | }, 508 | }, 509 | 510 | wantMod: true, 511 | }, 512 | } { 513 | gotMod := tt.input.UpgradeToSuri5() 514 | // Expected modification. 515 | if gotMod != tt.wantMod { 516 | t.Fatalf("%s: gotMod %v; expected %v", tt.name, gotMod, tt.wantMod) 517 | } 518 | // Actual modifications correctness. 519 | diff := pretty.Compare(tt.output, tt.input) 520 | if tt.wantMod && diff != "" { 521 | t.Fatalf("diff (-got +want):\n%s", diff) 522 | } 523 | } 524 | } 525 | -------------------------------------------------------------------------------- /parser.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package gonids implements a basic parser of IDS rules. 17 | // 18 | // For now the parser is very basic and it only parses a subset of fields. 19 | // We intentionally omit http_encode as it doesn't seem to be used in practice. 20 | package gonids 21 | 22 | import ( 23 | "encoding/hex" 24 | "errors" 25 | "fmt" 26 | "net" 27 | "regexp" 28 | "strconv" 29 | "strings" 30 | ) 31 | 32 | // hexRE matches on hexadecimal content like |41 41 41| for example. 33 | var hexRE = regexp.MustCompile(`(?i)(\|(?:\s*[a-f0-9]{2}\s*)+\|)`) 34 | 35 | // escapeRE matches char that needs to escaped in regexp. 36 | var escapeRE = regexp.MustCompile(`([()+.'\\])`) 37 | 38 | // escapeContent matches escaped special characters. 39 | var escapeContent = regexp.MustCompile(`\\([\\;":])`) 40 | 41 | // metaSplitRE matches string in metadata. 42 | var metaSplitRE = regexp.MustCompile(`,\s*`) 43 | 44 | // nestedNetRE matches nested network groups. 45 | var nestedNetRE = regexp.MustCompile(`,(!?\[[^]]*\])`) 46 | 47 | // portSplitRE splits port lists and ranges for validation. 48 | var portSplitRE = regexp.MustCompile(`[:,]`) 49 | 50 | var appLayerProtocols = []string{ 51 | "dcerpc", 52 | "dhcp", 53 | "dnp3", 54 | "dns", 55 | "enip", 56 | "ftp", 57 | "ftp-data", 58 | "http", 59 | "http2", 60 | "icmp", 61 | "icmpv4", 62 | "icmpv6", 63 | "ikev2", 64 | "imap", 65 | "ip", 66 | "ip4", 67 | "ip6", 68 | "ipv4", 69 | "ipv6", 70 | "irc", 71 | "jabber", 72 | "krb5", 73 | "modbus", 74 | "mqtt", 75 | "nfs", 76 | "ntp", 77 | "pkthdr", 78 | "rdp", 79 | "rfb", 80 | "sctp", 81 | "sip", 82 | "smb", 83 | "smtp", 84 | "snmp", 85 | "ssh", 86 | "tcp", 87 | "tcp-pkt", 88 | "tcp-stream", 89 | "tftp", 90 | "tls", 91 | "udp", 92 | } 93 | 94 | // parseContent decodes rule content match. For now it only takes care of escaped and hex 95 | // encoded content. 96 | func parseContent(content string) ([]byte, error) { 97 | // Decode and replace all occurrences of hexadecimal content. 98 | var errpanic error 99 | defer func() { 100 | r := recover() 101 | if r != nil { 102 | errpanic = fmt.Errorf("recovered from panic: %v", r) 103 | } 104 | }() 105 | 106 | if containsUnescaped(content) { 107 | return nil, fmt.Errorf("invalid special characters escaping") 108 | } 109 | 110 | b := escapeContent.ReplaceAllString(content, "$1") 111 | 112 | b = hexRE.ReplaceAllStringFunc(b, 113 | func(h string) string { 114 | r, err := hex.DecodeString(strings.Replace(strings.Trim(h, "|"), " ", "", -1)) 115 | if err != nil { 116 | panic("invalid hexRE regexp") 117 | } 118 | return string(r) 119 | }) 120 | return []byte(b), errpanic 121 | } 122 | 123 | // parsePCRE parses the components of a PCRE. Returns PCRE struct. 124 | func parsePCRE(s string) (*PCRE, error) { 125 | c := strings.Count(s, "/") 126 | if c < 2 { 127 | return nil, fmt.Errorf("all pcre patterns must contain at least 2 '/', found: %d", c) 128 | } 129 | 130 | l := strings.LastIndex(s, "/") 131 | if l < 0 { 132 | return nil, fmt.Errorf("couldn't find options in PCRE") 133 | } 134 | 135 | i := strings.Index(s, "/") 136 | if l < 0 { 137 | return nil, fmt.Errorf("couldn't find start of pattern") 138 | } 139 | 140 | return &PCRE{ 141 | Pattern: []byte(s[i+1 : l]), 142 | Options: []byte(s[l+1:]), 143 | }, nil 144 | } 145 | 146 | // parseLenMatch parses a LenMatch (like urilen). 147 | func parseLenMatch(k lenMatchType, s string) (*LenMatch, error) { 148 | m := new(LenMatch) 149 | m.Kind = k 150 | switch { 151 | // Simple case, no operators. 152 | case !strings.ContainsAny(s, "><"): 153 | // Ignore options after ','. 154 | numTmp := strings.Split(s, ",")[0] 155 | num, err := strconv.Atoi(strings.TrimSpace(numTmp)) 156 | if err != nil { 157 | return nil, fmt.Errorf("%v is not an integer", s) 158 | } 159 | m.Num = num 160 | 161 | // Leading operator, single number. 162 | case strings.HasPrefix(s, ">") || strings.HasPrefix(s, "<"): 163 | m.Operator = s[0:1] 164 | // Strip leading < or >. 165 | numTmp := strings.TrimLeft(s, "><") 166 | // Ignore options after ','. 167 | numTmp = strings.Split(numTmp, ",")[0] 168 | num, err := strconv.Atoi(strings.TrimSpace(numTmp)) 169 | if err != nil { 170 | return nil, fmt.Errorf("%v is not an integer", s) 171 | } 172 | m.Num = num 173 | 174 | // Min/Max center operator. 175 | case strings.Contains(s, "<>"): 176 | m.Operator = "<>" 177 | parts := strings.Split(s, "<>") 178 | if len(parts) != 2 { 179 | return nil, fmt.Errorf("must have exactly 2 parts for min/max operator. got %d", len(parts)) 180 | } 181 | var min, max int 182 | var err error 183 | min, err = strconv.Atoi(strings.TrimSpace(parts[0])) 184 | if err != nil { 185 | return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(parts[0])) 186 | } 187 | maxTmp := strings.Split(parts[1], ",")[0] 188 | max, err = strconv.Atoi(strings.TrimSpace(maxTmp)) 189 | if err != nil { 190 | return nil, fmt.Errorf("%v is not an integer", strings.TrimSpace(maxTmp)) 191 | } 192 | // Do stuff to handle options here. 193 | m.Min = min 194 | m.Max = max 195 | } 196 | 197 | // Parse options: 198 | if strings.Contains(s, ",") { 199 | opts := strings.Split(s, ",")[1:] 200 | for i, o := range opts { 201 | opts[i] = strings.TrimSpace(o) 202 | } 203 | m.Options = opts 204 | } 205 | return m, nil 206 | } 207 | 208 | func parseBase64Decode(k byteMatchType, s string) (*ByteMatch, error) { 209 | if k != b64Decode { 210 | return nil, fmt.Errorf("kind %v is not base64_decode", k) 211 | } 212 | b := new(ByteMatch) 213 | b.Kind = k 214 | 215 | // All options to base64_decode are optional, and specified by their keyword. 216 | for _, p := range strings.Split(s, ",") { 217 | v := strings.TrimSpace(p) 218 | switch { 219 | case strings.HasPrefix(v, "bytes"): 220 | b.NumBytes = strings.TrimSpace(strings.SplitAfter(v, "bytes")[1]) 221 | case strings.HasPrefix(v, "offset"): 222 | val := strings.TrimSpace(strings.SplitAfter(v, "offset")[1]) 223 | i, err := strconv.Atoi(val) 224 | if err != nil { 225 | return nil, fmt.Errorf("offset is not an int: %s; %s", val, err) 226 | } 227 | if i < 1 { 228 | return nil, fmt.Errorf("offset must be positive, non-zero values only") 229 | } 230 | b.Offset = i 231 | case strings.HasPrefix(v, "relative"): 232 | b.Options = []string{"relative"} 233 | } 234 | } 235 | return b, nil 236 | } 237 | 238 | // parseByteMatch parses a ByteMatch. 239 | func parseByteMatch(k byteMatchType, s string) (*ByteMatch, error) { 240 | b := new(ByteMatch) 241 | b.Kind = k 242 | 243 | parts := strings.Split(s, ",") 244 | 245 | // Num bytes is required for all byteMatchType keywords. 246 | if len(parts) < 1 { 247 | return nil, fmt.Errorf("%s keyword has %d parts", s, len(parts)) 248 | } 249 | 250 | b.NumBytes = strings.TrimSpace(parts[0]) 251 | 252 | if len(parts) < b.Kind.minLen() { 253 | return nil, fmt.Errorf("invalid %s length: %d", b.Kind, len(parts)) 254 | } 255 | 256 | if k == bExtract || k == bJump { 257 | // Parse offset. 258 | offset, err := strconv.Atoi(strings.TrimSpace(parts[1])) 259 | if err != nil { 260 | return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) 261 | } 262 | b.Offset = offset 263 | } 264 | 265 | if k == bExtract { 266 | // Parse variable name. 267 | name := parts[2] 268 | b.Variable = name 269 | } 270 | 271 | if k == bTest { 272 | // Parse operator. 273 | b.Operator = strings.TrimSpace(parts[1]) 274 | // Parse value. Can use a variable. 275 | b.Value = strings.TrimSpace(parts[2]) 276 | // Parse offset. 277 | offset, err := strconv.Atoi(strings.TrimSpace(parts[3])) 278 | if err != nil { 279 | return nil, fmt.Errorf("%s offset is not an int: %v; %s", b.Kind, parts[1], err) 280 | } 281 | b.Offset = offset 282 | } 283 | 284 | // The rest of the options, for all types not b64decode 285 | for i, l := b.Kind.minLen(), len(parts); i < l; i++ { 286 | parts[i] = strings.TrimSpace(parts[i]) 287 | b.Options = append(b.Options, parts[i]) 288 | } 289 | 290 | return b, nil 291 | } 292 | 293 | // parseFlowbit parses a flowbit. 294 | func parseFlowbit(s string) (*Flowbit, error) { 295 | parts := strings.Split(s, ",") 296 | if len(parts) < 1 { 297 | return nil, fmt.Errorf("couldn't parse flowbit string: %s", s) 298 | } 299 | // Ensure all actions are of valid type. 300 | a := strings.TrimSpace(parts[0]) 301 | if !inSlice(a, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) { 302 | return nil, fmt.Errorf("invalid action for flowbit: %s", a) 303 | } 304 | fb := &Flowbit{ 305 | Action: a, 306 | } 307 | if fb.Action == "noalert" && len(parts) > 1 { 308 | return nil, fmt.Errorf("noalert shouldn't have a value") 309 | } 310 | if len(parts) == 2 { 311 | fb.Value = strings.TrimSpace(parts[1]) 312 | } 313 | return fb, nil 314 | } 315 | 316 | // parseXbit parses an xbit. 317 | func parseXbit(s string) (*Xbit, error) { 318 | parts := strings.Split(s, ",") 319 | // All xbits must have an action, name and track 320 | if len(parts) < 3 { 321 | return nil, fmt.Errorf("not enough parts for xbits: %s", s) 322 | } 323 | // Ensure all actions are of valid type. 324 | a := strings.TrimSpace(parts[0]) 325 | if !inSlice(a, []string{"set", "unset", "isset", "isnotset", "toggle"}) { 326 | return nil, fmt.Errorf("invalid action for xbits: %s", a) 327 | } 328 | xb := &Xbit{ 329 | Action: a, 330 | Name: strings.TrimSpace(parts[1]), 331 | } 332 | 333 | // Track. 334 | t := strings.Fields(parts[2]) 335 | if len(t) != 2 { 336 | return nil, fmt.Errorf("wrong number of parts for track: %v", t) 337 | } 338 | if t[0] != "track" { 339 | return nil, fmt.Errorf("%s should be 'track'", t[0]) 340 | } 341 | xb.Track = t[1] 342 | 343 | // Expire 344 | if len(parts) == 4 { 345 | e := strings.Fields(parts[3]) 346 | if len(e) != 2 { 347 | return nil, fmt.Errorf("wrong number of parts for expire: %v", e) 348 | } 349 | if e[0] != "expire" { 350 | return nil, fmt.Errorf("%s should be 'expire'", e[0]) 351 | } 352 | xb.Expire = e[1] 353 | } 354 | return xb, nil 355 | 356 | } 357 | 358 | // parseFlowint parses a flowint. 359 | func parseFlowint(s string) (*Flowint, error) { 360 | parts := strings.Split(s, ",") 361 | // All flowints must have a name and modifier 362 | if len(parts) < 2 { 363 | return nil, fmt.Errorf("not enough parts for flowint: %s", s) 364 | } 365 | // Ensure all actions are of valid type. 366 | m := strings.TrimSpace(parts[1]) 367 | if !inSlice(m, []string{"+", "-", "=", ">", "<", ">=", "<=", "==", "!=", "isset", "isnotset"}) { 368 | return nil, fmt.Errorf("invalid modifier for flowint: %s", m) 369 | } 370 | fi := &Flowint{ 371 | Name: strings.TrimSpace(parts[0]), 372 | Modifier: m, 373 | } 374 | 375 | if len(parts) == 3 { 376 | fi.Value = strings.TrimSpace(parts[2]) 377 | } 378 | 379 | return fi, nil 380 | } 381 | 382 | // containsUnescaped checks content whether special characters are properly escaped. 383 | func containsUnescaped(s string) bool { 384 | esc := false 385 | 386 | for _, b := range s { 387 | if esc { 388 | switch b { 389 | case '\\', ';', '"', ':': 390 | esc = false 391 | default: 392 | return true 393 | } 394 | } else { 395 | switch b { 396 | case '\\': 397 | esc = true 398 | case ';', '"': 399 | return true 400 | } 401 | } 402 | } 403 | 404 | return esc 405 | } 406 | 407 | func unquote(s string) string { 408 | if strings.IndexByte(s, '"') < 0 { 409 | return s 410 | } 411 | return strings.Replace(s, `\"`, `"`, -1) 412 | } 413 | 414 | func inSlice(str string, strings []string) bool { 415 | for _, k := range strings { 416 | if str == k { 417 | return true 418 | } 419 | } 420 | return false 421 | } 422 | 423 | // comment decodes a comment (commented rule, or just a comment.) 424 | func (r *Rule) comment(key item, l *lexer) error { 425 | if key.typ != itemComment { 426 | panic("item is not a comment") 427 | } 428 | if r.Disabled { 429 | // ignoring comment for rule with empty action 430 | return nil 431 | } 432 | rule, err := parseRuleAux(key.value, true) 433 | 434 | // If there was an error this means the comment is not a rule. 435 | if err != nil { 436 | return fmt.Errorf("this is not a rule: %s", err) 437 | } 438 | 439 | // We parsed a rule, this was a comment so set the rule to disabled. 440 | rule.Disabled = true 441 | 442 | // Overwrite the rule we're working on with the recently parsed, disabled rule. 443 | *r = *rule 444 | return nil 445 | } 446 | 447 | // action decodes an IDS rule option based on its key. 448 | func (r *Rule) action(key item, l *lexer) error { 449 | if key.typ != itemAction { 450 | panic("item is not an action") 451 | } 452 | if !inSlice(key.value, []string{"alert", "drop", "pass"}) { 453 | return fmt.Errorf("invalid action: %v", key.value) 454 | } 455 | r.Action = key.value 456 | return nil 457 | } 458 | 459 | // protocol decodes an IDS rule protocol based on its key. 460 | func (r *Rule) protocol(key item, l *lexer) error { 461 | if key.typ != itemProtocol { 462 | panic("item is not a protocol") 463 | } 464 | if !inSlice(key.value, appLayerProtocols) { 465 | return fmt.Errorf("invalid protocol: %v", key.value) 466 | } 467 | r.Protocol = key.value 468 | return nil 469 | } 470 | 471 | // network decodes an IDS rule network (networks and ports) based on its key. 472 | func (r *Rule) network(key item, l *lexer) error { 473 | // This is a hack. We use a regexp to replace the outer `,` with `___` 474 | // to give us a discrete string to split on, avoiding the inner `,`. 475 | 476 | // Specify TrimSuffix and TrimPrefix to ensure only one instance of `[` and `]` are trimmed. 477 | tmp := strings.TrimSuffix(strings.TrimPrefix(key.value, "["), "]") 478 | items := strings.Split(nestedNetRE.ReplaceAllString(tmp, "___${1}"), "___") 479 | 480 | // Validate that no items contain spaces. 481 | for _, i := range items { 482 | if len(strings.Fields(i)) > 1 || len(strings.TrimSpace(i)) != len(i) { 483 | return fmt.Errorf("network component contains spaces: %v", i) 484 | } 485 | } 486 | switch key.typ { 487 | case itemSourceAddress: 488 | if validNetworks(items) { 489 | r.Source.Nets = append(r.Source.Nets, items...) 490 | } else { 491 | return fmt.Errorf("some or all source ips are invalid: %v", items) 492 | } 493 | case itemSourcePort: 494 | if portsValid(items) { 495 | r.Source.Ports = append(r.Source.Ports, items...) 496 | } else { 497 | return fmt.Errorf("some or all source ports are invalid: %v", items) 498 | } 499 | case itemDestinationAddress: 500 | if validNetworks(items) { 501 | r.Destination.Nets = append(r.Destination.Nets, items...) 502 | } else { 503 | return fmt.Errorf("some or all destination ips are invalid: %v", items) 504 | } 505 | case itemDestinationPort: 506 | if portsValid(items) { 507 | r.Destination.Ports = append(r.Destination.Ports, items...) 508 | } else { 509 | return fmt.Errorf("some or all destination ports are invalid: %v", items) 510 | } 511 | default: 512 | panic("item is not a network component") 513 | } 514 | return nil 515 | } 516 | 517 | // Validate that every item is between 1 and 65535. 518 | func portsValid(p []string) bool { 519 | for _, u := range p { 520 | 521 | if strings.Count(u, "[") != strings.Count(u, "]") { 522 | // unbalanced groups. 523 | return false 524 | } 525 | 526 | u = strings.TrimPrefix(u, "!") 527 | // If this port range is a grouping, check the inner group. 528 | if strings.HasPrefix(u, "[") { 529 | if portsValid(strings.Split(strings.Trim(u, "[]"), ",")) { 530 | continue 531 | } 532 | return false 533 | } 534 | 535 | ports := portSplitRE.Split(u, -1) 536 | for _, port := range ports { 537 | port = strings.TrimPrefix(port, "!") 538 | if port == "any" || port == "" || strings.HasPrefix(port, "$") { 539 | continue 540 | } 541 | x, err := strconv.Atoi(port) 542 | if err != nil { 543 | return false 544 | } 545 | if x > 65535 || x < 0 { 546 | return false 547 | } 548 | } 549 | } 550 | return true 551 | } 552 | 553 | // Validate item is either a valid ip or ip range. 554 | func validNetwork(i string) bool { 555 | _, _, err := net.ParseCIDR(i) 556 | if err == nil { 557 | return true 558 | } 559 | if net.ParseIP(i) != nil { 560 | return true 561 | } 562 | return false 563 | } 564 | 565 | // Validate every item is either a valid ip or ip range. 566 | func validNetworks(nets []string) bool { 567 | for _, net := range nets { 568 | if strings.Count(net, "[") != strings.Count(net, "]") { 569 | // unbalanced groups. 570 | return false 571 | } 572 | 573 | net = strings.TrimPrefix(net, "!") 574 | // If this network is a grouping, check the inner group. 575 | if strings.HasPrefix(net, "[") || strings.Contains(net, ",") { 576 | if validNetworks(strings.Split(strings.Trim(net, "[]"), ",")) { 577 | continue 578 | } 579 | return false 580 | } 581 | switch { 582 | case net == "any": 583 | continue 584 | case strings.HasPrefix(net, "$"): 585 | continue 586 | case !validNetwork(net): 587 | return false 588 | } 589 | } 590 | return true 591 | } 592 | 593 | // direction decodes an IDS rule direction based on its key. 594 | func (r *Rule) direction(key item, l *lexer) error { 595 | if key.typ != itemDirection { 596 | panic("item is not a direction") 597 | } 598 | switch key.value { 599 | case "->": 600 | r.Bidirectional = false 601 | case "<>": 602 | r.Bidirectional = true 603 | default: 604 | return fmt.Errorf("invalid direction operator %q", key.value) 605 | } 606 | return nil 607 | } 608 | 609 | var dataPosition = pktData 610 | 611 | // option decodes an IDS rule option based on its key. 612 | func (r *Rule) option(key item, l *lexer) error { 613 | if key.typ != itemOptionKey { 614 | panic("item is not an option key") 615 | } 616 | switch { 617 | // TODO: Many of these simple tags could be factored into nicer structures. 618 | case inSlice(key.value, []string{"classtype", "flow", "tag", "priority", "app-layer-protocol", "noalert", "target", 619 | "flags", "ipopts", "ip_proto", "geoip", "fragbits", "fragoffset", "tos", 620 | "window", 621 | "threshold", "detection_filter", 622 | "dce_iface", "dce_opnum", "dce_stub_data", 623 | "asn1"}): 624 | nextItem := l.nextItem() 625 | if nextItem.typ != itemOptionValue { 626 | return fmt.Errorf("no valid value for %s tag", key.value) 627 | } 628 | if r.Tags == nil { 629 | r.Tags = make(map[string]string) 630 | } 631 | r.Tags[key.value] = nextItem.value 632 | case inSlice(key.value, []string{"sameip", "tls.store", "ftpbounce"}): 633 | r.Statements = append(r.Statements, key.value) 634 | case inSlice(key.value, tlsTags): 635 | t := &TLSTag{ 636 | Key: key.value, 637 | } 638 | nextItem := l.nextItem() 639 | if nextItem.typ == itemNot { 640 | t.Negate = true 641 | nextItem = l.nextItem() 642 | } 643 | t.Value = nextItem.value 644 | r.TLSTags = append(r.TLSTags, t) 645 | case key.value == "stream_size": 646 | nextItem := l.nextItem() 647 | parts := strings.Split(nextItem.value, ",") 648 | if len(parts) != 3 { 649 | return fmt.Errorf("invalid number of parts for stream_size: %d", len(parts)) 650 | } 651 | num, err := strconv.Atoi(strings.TrimSpace(parts[2])) 652 | if err != nil { 653 | return fmt.Errorf("comparison number is not an integer: %v", parts[2]) 654 | } 655 | r.StreamMatch = &StreamCmp{ 656 | Direction: parts[0], 657 | Operator: parts[1], 658 | Number: num, 659 | } 660 | case key.value == "reference": 661 | nextItem := l.nextItem() 662 | if nextItem.typ != itemOptionValue { 663 | return errors.New("no valid value for reference") 664 | } 665 | refs := strings.SplitN(nextItem.value, ",", 2) 666 | if len(refs) != 2 { 667 | return fmt.Errorf("invalid reference definition: %s", refs) 668 | } 669 | r.References = append(r.References, &Reference{Type: refs[0], Value: refs[1]}) 670 | case key.value == "metadata": 671 | nextItem := l.nextItem() 672 | if nextItem.typ != itemOptionValue { 673 | return errors.New("no valid value for metadata") 674 | } 675 | metas := metaSplitRE.Split(nextItem.value, -1) 676 | for _, kv := range metas { 677 | metaTmp := strings.SplitN(kv, " ", 2) 678 | if len(metaTmp) != 2 { 679 | return fmt.Errorf("invalid metadata definition: %s", metaTmp) 680 | } 681 | r.Metas = append(r.Metas, &Metadata{Key: strings.TrimSpace(metaTmp[0]), Value: strings.TrimSpace(metaTmp[1])}) 682 | } 683 | case key.value == "sid": 684 | nextItem := l.nextItem() 685 | if nextItem.typ != itemOptionValue { 686 | return errors.New("no value for option sid") 687 | } 688 | sid, err := strconv.Atoi(nextItem.value) 689 | if err != nil { 690 | return fmt.Errorf("invalid sid %s", nextItem.value) 691 | } 692 | r.SID = sid 693 | case key.value == "rev": 694 | nextItem := l.nextItem() 695 | if nextItem.typ != itemOptionValue { 696 | return errors.New("no value for option rev") 697 | } 698 | rev, err := strconv.Atoi(nextItem.value) 699 | if err != nil { 700 | return fmt.Errorf("invalid rev %s", nextItem.value) 701 | } 702 | r.Revision = rev 703 | case key.value == "msg": 704 | nextItem := l.nextItem() 705 | if nextItem.typ != itemOptionValueString { 706 | return errors.New("no value for option msg") 707 | } 708 | r.Description = nextItem.value 709 | case isStickyBuffer(key.value): 710 | var d DataPos 711 | var err error 712 | if d, err = StickyBuffer(key.value); err != nil { 713 | return err 714 | } 715 | dataPosition = d 716 | case inSlice(key.value, []string{"content", "uricontent"}): 717 | nextItem := l.nextItem() 718 | negate := false 719 | if nextItem.typ == itemNot { 720 | nextItem = l.nextItem() 721 | negate = true 722 | } 723 | if nextItem.typ == itemOptionValueString { 724 | c, err := parseContent(nextItem.value) 725 | if err != nil { 726 | return err 727 | } 728 | var options []*ContentOption 729 | if key.value == "uricontent" { 730 | options = append(options, &ContentOption{Name: "http_uri"}) 731 | } 732 | con := &Content{ 733 | DataPosition: dataPosition, 734 | Pattern: c, 735 | Negate: negate, 736 | Options: options, 737 | } 738 | r.Matchers = append(r.Matchers, con) 739 | } else { 740 | return fmt.Errorf("invalid type %q for option content", nextItem.typ) 741 | } 742 | case inSlice(key.value, []string{"http_cookie", "http_raw_cookie", "http_method", "http_header", "http_raw_header", 743 | "http_uri", "http_raw_uri", "http_user_agent", "http_stat_code", "http_stat_msg", 744 | "http_client_body", "http_server_body", "http_host", "nocase", "rawbytes", "startswith", "endswith"}): 745 | lastContent := r.LastContent() 746 | if lastContent == nil { 747 | return fmt.Errorf("invalid content option %q with no content match", key.value) 748 | } 749 | lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value}) 750 | case inSlice(key.value, []string{"depth", "distance", "offset", "within"}): 751 | lastContent := r.LastContent() 752 | if lastContent == nil { 753 | return fmt.Errorf("invalid content option %q with no content match", key.value) 754 | } 755 | nextItem := l.nextItem() 756 | if nextItem.typ != itemOptionValue { 757 | return fmt.Errorf("no value for content option %s", key.value) 758 | } 759 | 760 | lastContent.Options = append(lastContent.Options, &ContentOption{Name: key.value, Value: nextItem.value}) 761 | 762 | case key.value == "fast_pattern": 763 | lastContent := r.LastContent() 764 | if lastContent == nil { 765 | return fmt.Errorf("invalid content option %q with no content match", key.value) 766 | } 767 | var ( 768 | only bool 769 | offset int 770 | length int 771 | ) 772 | nextItem := l.nextItem() 773 | if nextItem.typ == itemOptionValue { 774 | v := nextItem.value 775 | switch { 776 | case v == "only": 777 | only = true 778 | case strings.Contains(v, ","): 779 | s := strings.Split(v, ",") 780 | i, err := strconv.Atoi(s[0]) 781 | if err != nil { 782 | return fmt.Errorf("fast_pattern offset is not an int: %s; %s", s[0], err) 783 | } 784 | offset = i 785 | i, err = strconv.Atoi(s[1]) 786 | if err != nil { 787 | return fmt.Errorf("fast_pattern length is not an int: %s; %s", s[1], err) 788 | } 789 | length = i 790 | } 791 | } 792 | lastContent.FastPattern = FastPattern{true, only, offset, length} 793 | case key.value == "pcre": 794 | nextItem := l.nextItem() 795 | negate := false 796 | if nextItem.typ == itemNot { 797 | nextItem = l.nextItem() 798 | negate = true 799 | } 800 | if nextItem.typ == itemOptionValueString { 801 | p, err := parsePCRE(unquote(nextItem.value)) 802 | if err != nil { 803 | return err 804 | } 805 | p.DataPosition = dataPosition 806 | p.Negate = negate 807 | r.Matchers = append(r.Matchers, p) 808 | } else { 809 | return fmt.Errorf("invalid type %q for option content", nextItem.typ) 810 | } 811 | case inSlice(key.value, allbyteMatchTypeNames()): 812 | k, err := byteMatcher(key.value) 813 | if err != nil { 814 | return fmt.Errorf("%s is not a supported byteMatchType keyword", key.value) 815 | } 816 | 817 | // Handle negation logic here, don't want to pass lexer to parseByteMatch. 818 | nextItem := l.nextItem() 819 | var negate bool 820 | if k == isDataAt && nextItem.typ == itemNot { 821 | negate = true 822 | nextItem = l.nextItem() 823 | } 824 | 825 | var b *ByteMatch 826 | // Parse base64_decode differently as it has odd semantics. 827 | if k == b64Decode { 828 | b, err = parseBase64Decode(k, nextItem.value) 829 | if err != nil { 830 | return fmt.Errorf("could not parse base64Decode: %v", err) 831 | } 832 | // base64_decode allows NumBytes to be empty, an int or a variable. 833 | if i, err := strconv.Atoi(b.NumBytes); err != nil && b.NumBytes != "" { 834 | // NumBytes is not an int, check if it is a variable from byte_extract. 835 | if !r.HasVar(b.NumBytes) { 836 | return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) 837 | } else if i < 1 { 838 | return fmt.Errorf("bytes must be positive, non-zero values only: %d", i) 839 | } 840 | } 841 | } else { 842 | b, err = parseByteMatch(k, nextItem.value) 843 | if err != nil { 844 | return fmt.Errorf("could not parse byteMatch: %v", err) 845 | } 846 | if _, err := strconv.Atoi(b.NumBytes); err != nil { 847 | // NumBytes is not an int, check if it is a variable from byte_extract. 848 | if !r.HasVar(b.NumBytes) { 849 | return fmt.Errorf("number of bytes is not an int, or an extracted variable: %s; %s", b.NumBytes, err) 850 | } 851 | } 852 | } 853 | b.Negate = negate 854 | 855 | r.Matchers = append(r.Matchers, b) 856 | case inSlice(key.value, allLenMatchTypeNames()): 857 | k, err := lenMatcher(key.value) 858 | if err != nil { 859 | return fmt.Errorf("%s is not a support lenMatch keyword", key.value) 860 | } 861 | nextItem := l.nextItem() 862 | m, err := parseLenMatch(k, nextItem.value) 863 | if err != nil { 864 | return fmt.Errorf("could not parse LenMatch: %v", err) 865 | } 866 | m.DataPosition = dataPosition 867 | r.Matchers = append(r.Matchers, m) 868 | case key.value == "flowbits": 869 | nextItem := l.nextItem() 870 | fb, err := parseFlowbit(nextItem.value) 871 | if err != nil { 872 | return fmt.Errorf("error parsing flowbit: %v", err) 873 | } 874 | r.Flowbits = append(r.Flowbits, fb) 875 | case key.value == "xbits": 876 | nextItem := l.nextItem() 877 | xb, err := parseXbit(nextItem.value) 878 | if err != nil { 879 | return fmt.Errorf("error parsing xbits: %v", err) 880 | } 881 | r.Xbits = append(r.Xbits, xb) 882 | case key.value == "flowint": 883 | nextItem := l.nextItem() 884 | fi, err := parseFlowint(nextItem.value) 885 | if err != nil { 886 | return fmt.Errorf("error parsing flowint: %v", err) 887 | } 888 | r.Flowints = append(r.Flowints, fi) 889 | default: 890 | return &UnsupportedOptionError{ 891 | Options: []string{key.value}, 892 | } 893 | } 894 | return nil 895 | } 896 | 897 | // UnsupportedOptionError contains a partially parsed rule, and the options that aren't 898 | // supported for parsing. 899 | type UnsupportedOptionError struct { 900 | Rule *Rule 901 | Options []string 902 | } 903 | 904 | // Error returns a string for UnsupportedOptionError 905 | func (uoe *UnsupportedOptionError) Error() string { 906 | return fmt.Sprintf("rule contains unsupported option(s): %s", strings.Join(uoe.Options, ",")) 907 | } 908 | 909 | // parseRuleAux parses an IDS rule, optionally ignoring comments. 910 | func parseRuleAux(rule string, commented bool) (*Rule, error) { 911 | l, err := lex(rule) 912 | if err != nil { 913 | return nil, err 914 | } 915 | defer l.close() 916 | dataPosition = pktData 917 | r := &Rule{} 918 | var unsupportedOptions = make([]string, 0, 3) 919 | for item := l.nextItem(); item.typ != itemEOR && item.typ != itemEOF && err == nil; item = l.nextItem() { 920 | switch item.typ { 921 | case itemComment: 922 | if r.Action != "" || commented { 923 | // Ignore comment ending rule. 924 | return r, nil 925 | } 926 | err = r.comment(item, l) 927 | // Error here means that the comment was not a commented rule. 928 | // So we're not parsing a rule and we need to break out. 929 | if err != nil { 930 | break 931 | } 932 | // This line was a commented rule. 933 | return r, nil 934 | case itemAction: 935 | err = r.action(item, l) 936 | case itemProtocol: 937 | err = r.protocol(item, l) 938 | case itemSourceAddress, itemDestinationAddress, itemSourcePort, itemDestinationPort: 939 | err = r.network(item, l) 940 | case itemDirection: 941 | err = r.direction(item, l) 942 | case itemOptionKey: 943 | err = r.option(item, l) 944 | // We will continue to parse a rule with unsupported options. 945 | if uerr, ok := err.(*UnsupportedOptionError); ok { 946 | unsupportedOptions = append(unsupportedOptions, uerr.Options...) 947 | // This is ugly but allows the parsing to continue. 948 | err = nil 949 | } 950 | case itemError: 951 | err = errors.New(item.value) 952 | } 953 | // Unrecoverable parse error. 954 | if err != nil { 955 | return nil, err 956 | } 957 | } 958 | 959 | // If we encountered one or more unsupported keys, return an UnsupportedOptionError. 960 | if len(unsupportedOptions) > 0 { 961 | return nil, &UnsupportedOptionError{ 962 | Rule: r, 963 | Options: unsupportedOptions, 964 | } 965 | } 966 | 967 | return r, nil 968 | } 969 | 970 | // ParseRule parses an IDS rule and returns a struct describing the rule. 971 | func ParseRule(rule string) (*Rule, error) { 972 | return parseRuleAux(rule, false) 973 | } 974 | -------------------------------------------------------------------------------- /rule.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "bytes" 20 | "fmt" 21 | "regexp" 22 | "strconv" 23 | "strings" 24 | ) 25 | 26 | // Rule describes an IDS rule. 27 | type Rule struct { 28 | // Disbled identifies if the rule is disabled/commented out. 29 | Disabled bool 30 | // Action is the action the rule will take (alert, pass, drop, etc.). 31 | Action string 32 | // Protocol is the protocol the rule looks at. 33 | Protocol string 34 | // Source is the address and ports for the source of the traffic. 35 | Source Network 36 | // Destination is the address and ports for the source of the traffic. 37 | Destination Network 38 | // Bidirectional indicates the directionality of a rule (-> or <>). 39 | Bidirectional bool 40 | // SID is the identifier of the rule. 41 | SID int 42 | // Revision is the revision of the rule. 43 | Revision int 44 | // Description is the msg field of the rule. 45 | Description string 46 | // References contains references associated to the rule (e.g. CVE number). 47 | References []*Reference 48 | // Contents are all the decoded content matches. 49 | Tags map[string]string 50 | // Statements is a slice of string. These items are similar to Tags, but have no value. (e.g. 'sameip;') 51 | Statements []string 52 | // TLSTags is a slice of TLS related matches. 53 | TLSTags []*TLSTag 54 | // StreamMatch holds stream_size parameters. 55 | StreamMatch *StreamCmp 56 | // Metas is a slice of Metadata. 57 | Metas Metadatas 58 | // Flowbits is a slice of Flowbit. 59 | Flowbits []*Flowbit 60 | // Xbits is a slice of Xbit 61 | Xbits []*Xbit 62 | // Flowints is a slice of Flowint 63 | Flowints []*Flowint 64 | // Matchers are internally used to ensure relative matches are printed correctly. 65 | // Make this private before checkin? 66 | Matchers []orderedMatcher 67 | } 68 | 69 | type orderedMatcher interface { 70 | String() string 71 | } 72 | 73 | // Metadata describes metadata tags in key-value struct. 74 | type Metadata struct { 75 | Key string 76 | Value string 77 | } 78 | 79 | // Flowbit describes a flowbit. A flowbit consists of an Action, and optional Value. 80 | type Flowbit struct { 81 | Action string 82 | Value string 83 | } 84 | 85 | // Flowint describes a flowint. 86 | type Flowint struct { 87 | Name string 88 | Modifier string 89 | Value string 90 | } 91 | 92 | // Xbit describes an Xbit. 93 | // TODO: Consider adding more structure to Track and Expire. 94 | type Xbit struct { 95 | Action string 96 | Name string 97 | Track string 98 | // Expire should be an int, default 0 value makes stringer difficult because this is an 99 | // optional parameter. If we can confirm that this must be > 0 we can convert to int. 100 | Expire string 101 | } 102 | 103 | // Metadatas allows for a Stringer on []*Metadata 104 | type Metadatas []*Metadata 105 | 106 | // Network describes the IP addresses and port numbers used in a rule. 107 | // TODO: Ensure all values either begin with $ (variable) or they are valid IPNet/int. 108 | type Network struct { 109 | Nets []string // Currently just []string because these can be variables $HOME_NET, not a valid IPNet. 110 | Ports []string // Currently just []string because these can be variables $HTTP_PORTS, not just ints. 111 | } 112 | 113 | // DataPos indicates the data position for content matches. These should be referenced for creation 114 | // by using their Suricata keywords and the StickyBuffer() function. 115 | type DataPos int 116 | 117 | const ( 118 | pktData DataPos = iota 119 | fileData 120 | base64Data 121 | // 122 | // Suricata 4.x Sticky Buffers 123 | // 124 | // HTTP Sticky buffers 125 | httpAcceptEnc 126 | httpAccept 127 | httpAcceptLang 128 | httpConnection 129 | httpContentLen 130 | httpContentType 131 | httpHeaderNames 132 | httpProtocol 133 | httpReferer 134 | httpRequestLine 135 | httpResponseLine 136 | httpStart 137 | // TLS Sticky Buffers 138 | tlsCertSubject 139 | tlsCertIssuer 140 | tlsCertSerial 141 | tlsCertFingerprint 142 | tlsSNI 143 | // JA3 Sticky Buffers 144 | ja3Hash 145 | ja3String 146 | // SSH Sticky Buffers 147 | sshProto 148 | sshSoftware 149 | // Kerberos Sticky Buffers 150 | krb5Cname 151 | krb5Sname 152 | // DNS Sticky Buffers 153 | dnsQuery 154 | // SMB Sticky Buffers 155 | smbNamedPipe 156 | smbShare 157 | // 158 | // Suricata 5.0 Sticky Buffers 159 | // 160 | fileData5 161 | // HTTP Sticky Buffers 162 | httpAccept5 163 | httpAcceptEnc5 164 | httpAcceptLang5 165 | httpClientBody 166 | httpConnection5 167 | httpContentLen5 168 | httpContentType5 169 | httpCookie 170 | httpHeader 171 | httpHeaderNames5 172 | httpHeaderRaw 173 | httpHost 174 | httpHostRaw 175 | httpLocation 176 | httpMethod 177 | httpProtocol5 178 | httpReferer5 179 | httpRequestBody 180 | httpRequestLine5 181 | httpResponseBody 182 | httpResponseLine5 183 | httpServer 184 | httpServerBody 185 | httpStart5 186 | httpStatCode 187 | httpStatMsg 188 | httpURI 189 | httpURIRaw 190 | httpUserAgent 191 | // TLS Sticky Buffers 192 | tlsCertSubject5 193 | tlsCertIssuer5 194 | tlsCertSerial5 195 | tlsCertFingerprint5 196 | tlsSNI5 197 | // JA3 Sticky Buffers 198 | ja3Hash5 199 | ja3String5 200 | ja3sHash 201 | ja3sString 202 | // SSH Sticky Buffers 203 | sshProto5 204 | sshSoftware5 205 | // Kerberos Sticky Buffers - Unchanged from Suricata 4.x 206 | // DNS Sticky Buffers 207 | dnsQuery5 208 | // SMB - Documentation lacking. Unknown. 209 | ) 210 | 211 | // Contains both Suricata 4.x and 5.0 buffers. Some day we'll deprecate the 4.x ones. 212 | var stickyBuffers = map[DataPos]string{ 213 | pktData: "pkt_data", 214 | fileData: "file_data", 215 | base64Data: "base64_data", 216 | // Suricata 4.X Sticky Buffers 217 | // HTTP Sticky Buffers 218 | httpAcceptEnc: "http_accept_enc", 219 | httpAccept: "http_accept", 220 | httpAcceptLang: "http_accept_lang", 221 | httpConnection: "http_connection", 222 | httpContentLen: "http_content_len", 223 | httpContentType: "http_content_type", 224 | httpHeaderNames: "http_header_names", 225 | httpProtocol: "http_protocol", 226 | httpReferer: "http_referer", 227 | httpRequestLine: "http_request_line", 228 | httpResponseLine: "http_response_line", 229 | httpStart: "http_start", 230 | // TLS Sticky Buffers 231 | tlsCertSubject: "tls_cert_subject", 232 | tlsCertIssuer: "tls_cert_issuer", 233 | tlsCertSerial: "tls_cert_serial", 234 | tlsCertFingerprint: "tls_cert_fingerprint", 235 | tlsSNI: "tls_sni", 236 | // JA3 Sticky Buffers 237 | ja3Hash: "ja3_hash", 238 | ja3String: "ja3_string", 239 | // SSH Sticky Buffers 240 | sshProto: "ssh_proto", 241 | sshSoftware: "ssh_software", 242 | // Kerberos Sticky Buffers 243 | krb5Cname: "krb5_cname", 244 | krb5Sname: "krb5_sname", 245 | // DNS Sticky Buffers 246 | dnsQuery: "dns_query", 247 | // SMB Sticky Buffers 248 | smbNamedPipe: "smb_named_pipe", 249 | smbShare: "smb_share", 250 | // Suricata 5.0 Sticky Buffers 251 | fileData5: "file.data", 252 | // HTTP Sticky Buffers 253 | httpAccept5: "http.accept", 254 | httpAcceptEnc5: "http.accept_enc", 255 | httpAcceptLang5: "http.accept_lang", 256 | httpClientBody: "http.client_body", 257 | httpConnection5: "http.connection", 258 | httpContentLen5: "http.content_len", 259 | httpContentType5: "http.content_type", 260 | httpCookie: "http.cookie", 261 | httpHeader: "http.header", 262 | httpHeaderNames5: "http.header_names", 263 | httpHeaderRaw: "http.header.raw", 264 | httpHost: "http.host", 265 | httpHostRaw: "http.host.raw", 266 | httpLocation: "http.location", 267 | httpMethod: "http.method", 268 | httpProtocol5: "http.protocol", 269 | httpReferer5: "http.referer", 270 | httpRequestBody: "http.request_body", 271 | httpRequestLine5: "http.request_line", 272 | httpResponseBody: "http.response_body", 273 | httpResponseLine5: "http.response_line", 274 | httpServer: "http.server", 275 | httpServerBody: "http.server_body", 276 | httpStart5: "http.start", 277 | httpStatCode: "http.stat_code", 278 | httpStatMsg: "http.stat_msg", 279 | httpURI: "http.uri", 280 | httpURIRaw: "http.uri.raw", 281 | httpUserAgent: "http.user_agent", 282 | // TLS Sticky Buffers 283 | tlsCertSubject5: "tls.cert_subject", 284 | tlsCertIssuer5: "tls.cert_issuer", 285 | tlsCertSerial5: "tls.cert_serial", 286 | tlsCertFingerprint5: "tls.cert_fingerprint", 287 | tlsSNI5: "tls.sni", 288 | // JA3 Sticky Buffers 289 | ja3Hash5: "ja3.hash", 290 | ja3String5: "ja3.string", 291 | ja3sHash: "ja3s.hash", 292 | ja3sString: "ja3s.string", 293 | // SSH Sticky Buffers 294 | sshProto5: "ssh.proto", 295 | sshSoftware5: "ssh.software", 296 | // Kerberos Sticky Buffers - Unchanged from Suricata 4.x 297 | // DNS Sticky Buffers 298 | dnsQuery5: "dns.query", 299 | // SMB - Documentation lacking. Unknown. 300 | } 301 | 302 | func (d DataPos) String() string { 303 | return stickyBuffers[d] 304 | } 305 | 306 | // StickyBuffer returns the data position value for the string representation of a sticky buffer name (e.g. "file_data") 307 | func StickyBuffer(s string) (DataPos, error) { 308 | for k, v := range stickyBuffers { 309 | if v == s { 310 | return k, nil 311 | } 312 | } 313 | return pktData, fmt.Errorf("%s is not a sticky buffer", s) 314 | } 315 | 316 | // isStickyBuffer returns true if the provided string is a known sticky buffer. 317 | func isStickyBuffer(s string) bool { 318 | _, err := StickyBuffer(s) 319 | return err == nil 320 | } 321 | 322 | // Content describes a rule content. A content is composed of a pattern followed by options. 323 | type Content struct { 324 | // DataPosition defaults to pkt_data state, can be modified to apply to file_data, base64_data locations. 325 | // This value will apply to all following contents, to reset to default you must reset DataPosition during processing. 326 | DataPosition DataPos 327 | // FastPattern settings for the content. 328 | FastPattern FastPattern 329 | // Pattern is the pattern match of a content (e.g. HTTP in content:"HTTP"). 330 | Pattern []byte 331 | // Negate is true for negated content match. 332 | Negate bool 333 | // Options are the option associated to the content (e.g. http_header). 334 | Options []*ContentOption 335 | } 336 | 337 | // byteMatchType describes the kinds of byte matches and comparisons that are supported. 338 | type byteMatchType int 339 | 340 | const ( 341 | bUnknown byteMatchType = iota 342 | bExtract 343 | bTest 344 | bJump 345 | isDataAt 346 | b64Decode 347 | ) 348 | 349 | var byteMatchTypeVals = map[byteMatchType]string{ 350 | bExtract: "byte_extract", 351 | bJump: "byte_jump", 352 | bTest: "byte_test", 353 | isDataAt: "isdataat", 354 | b64Decode: "base64_decode", 355 | } 356 | 357 | // allbyteMatchTypeNames returns a slice of valid byte_* keywords. 358 | func allbyteMatchTypeNames() []string { 359 | b := make([]string, len(byteMatchTypeVals)) 360 | var i int 361 | for _, n := range byteMatchTypeVals { 362 | b[i] = n 363 | i++ 364 | } 365 | return b 366 | } 367 | 368 | // String returns the string representation of a byte_* keyword. 369 | func (b byteMatchType) String() string { 370 | return byteMatchTypeVals[b] 371 | } 372 | 373 | // byteMatcher returns a byteMatchType iota for a provided String. 374 | func byteMatcher(s string) (byteMatchType, error) { 375 | for k, v := range byteMatchTypeVals { 376 | if v == s { 377 | return k, nil 378 | } 379 | } 380 | return bUnknown, fmt.Errorf("%s is not a byteMatchType* keyword", s) 381 | } 382 | 383 | // lenMatcher returns an lenMatchType or an error for a given string. 384 | func lenMatcher(s string) (lenMatchType, error) { 385 | for k, v := range lenMatchTypeVals { 386 | if v == s { 387 | return k, nil 388 | } 389 | } 390 | return lUnknown, fmt.Errorf("%s is not an lenMatch keyword", s) 391 | } 392 | 393 | // Returns the number of mandatory parameters for a byteMatchType keyword, -1 if unknown. 394 | func (b byteMatchType) minLen() int { 395 | switch b { 396 | case bExtract: 397 | return 3 398 | case bJump: 399 | return 2 400 | case bTest: 401 | return 4 402 | case isDataAt: 403 | return 1 404 | case b64Decode: 405 | return 0 406 | } 407 | return -1 408 | } 409 | 410 | // ByteMatch describes a byte matching operation, similar to a Content. 411 | type ByteMatch struct { 412 | // DataPosition defaults to pkt_data state, can be modified to apply to file_data, base64_data locations. 413 | // This value will apply to all following contents, to reset to default you must reset DataPosition during processing. 414 | DataPosition DataPos 415 | // Kind is a specific operation type we're taking. 416 | Kind byteMatchType 417 | // Negate indicates negation of a value, currently only used for isdataat. 418 | Negate bool 419 | // A variable name being extracted by byte_extract. 420 | Variable string 421 | // Number of bytes to operate on. "bytes to convert" in Snort Manual. This can be an int, or a var from byte_extract. 422 | NumBytes string 423 | // Operator for comparison in byte_test. 424 | Operator string 425 | // Value to compare against using byte_test. 426 | Value string 427 | // Offset within given buffer to operate on. 428 | Offset int 429 | // Other specifics required for jump/test here. This might make sense to pull out into a "ByteMatchOption" later. 430 | Options []string 431 | } 432 | 433 | // lenMatchType describes the type of length matches and comparisons that are supported. 434 | type lenMatchType int 435 | 436 | const ( 437 | lUnknown lenMatchType = iota 438 | iType 439 | iCode 440 | iID 441 | iSeq 442 | uriLen 443 | dSize 444 | ipTTL 445 | ipID 446 | tcpSeq 447 | tcpACK 448 | bSize 449 | ) 450 | 451 | // lenMatchTypeVals map len types to string representations. 452 | var lenMatchTypeVals = map[lenMatchType]string{ 453 | iType: "itype", 454 | iCode: "icode", 455 | iID: "icmp_id", 456 | iSeq: "icmp_seq", 457 | uriLen: "urilen", 458 | dSize: "dsize", 459 | ipTTL: "ttl", 460 | ipID: "id", 461 | tcpSeq: "seq", 462 | tcpACK: "ack", 463 | bSize: "bsize", 464 | } 465 | 466 | // allLenMatchTypeNames returns a slice of string containing all length match keywords. 467 | func allLenMatchTypeNames() []string { 468 | i := make([]string, len(lenMatchTypeVals)) 469 | var j int 470 | for _, n := range lenMatchTypeVals { 471 | i[j] = n 472 | j++ 473 | } 474 | return i 475 | } 476 | 477 | // String returns the string keyword for an lenMatchType. 478 | func (i lenMatchType) String() string { 479 | return lenMatchTypeVals[i] 480 | } 481 | 482 | // LenMatch holds the values to represent an Length Match. 483 | type LenMatch struct { 484 | // DataPosition defaults to pkt_data state, can be modified to apply to file_data, base64_data locations. 485 | // This value will apply to all following contents, to reset to default you must reset DataPosition during processing. 486 | DataPosition DataPos 487 | Kind lenMatchType 488 | Min int 489 | Max int 490 | Num int 491 | Operator string 492 | Options []string 493 | } 494 | 495 | // PCRE describes a PCRE item of a rule. 496 | type PCRE struct { 497 | // DataPosition defaults to pkt_data state, can be modified to apply to file_data, base64_data locations. 498 | // This value will apply to all following contents, to reset to default you must reset DataPosition during processing. 499 | DataPosition DataPos 500 | Pattern []byte 501 | Negate bool 502 | Options []byte 503 | } 504 | 505 | // FastPattern describes various properties of a fast_pattern value for a content. 506 | type FastPattern struct { 507 | Enabled bool 508 | Only bool 509 | Offset int 510 | Length int 511 | } 512 | 513 | // ContentOption describes an option set on a rule content. 514 | type ContentOption struct { 515 | // Name is the name of the option (e.g. offset). 516 | Name string 517 | // Value is the value associated to the option, default to "" for option without value. 518 | Value string 519 | } 520 | 521 | // Reference describes a gonids reference in a rule. 522 | type Reference struct { 523 | // Type is the system name for the reference: (url, cve, md5, etc.) 524 | Type string 525 | // Value is the identifier in the system: (address, cvd-id, hash) 526 | Value string 527 | } 528 | 529 | // TODO: Add support for tls_cert_nobefore, tls_cert_notafter, tls_cert_expired, tls_cert_valid. 530 | // Valid keywords for extracting TLS matches. Does not include tls.store, or sticky buffers. 531 | var tlsTags = []string{"ssl_version", "ssl_state", "tls.version", "tls.subject", "tls.issuerdn", "tls.fingerprint"} 532 | 533 | // TLSTag describes a TLS specific match (non-sticky buffer based). 534 | type TLSTag struct { 535 | // Is the match negated (!). 536 | Negate bool 537 | // Key holds the thing we're inspecting (tls.version, tls.fingerprint, etc.). 538 | Key string 539 | // TODO: Consider string -> []byte and handle hex input. 540 | // TODO: Consider supporting []struct if we can support things like: tls.version:!1.2,!1.3 541 | // Value holds the value for the match. 542 | Value string 543 | } 544 | 545 | // StreamCmp represents a stream comparison (stream_size:>20). 546 | type StreamCmp struct { 547 | // Direction of traffic to inspect: server, client, both, either. 548 | Direction string 549 | // Operator is the comparison operator to apply >, <, !=, etc. 550 | Operator string 551 | // TODO: Can this number be a variable, if yes s/int/string. 552 | // Number is the size to compare against 553 | Number int 554 | } 555 | 556 | // escape escapes special char used in regexp. 557 | func escape(r string) string { 558 | return escapeRE.ReplaceAllString(r, `\$1`) 559 | } 560 | 561 | // within returns the within value for a specific content. 562 | func within(options []*ContentOption) string { 563 | for _, o := range options { 564 | if o.Name == "within" { 565 | return o.Value 566 | } 567 | } 568 | return "" 569 | } 570 | 571 | // RE returns all content matches as a single and simple regexp. 572 | func (r *Rule) RE() string { 573 | var re string 574 | for _, c := range r.Contents() { 575 | // TODO: handle pcre, depth, offset, distance. 576 | if d, err := strconv.Atoi(within(c.Options)); err == nil && d > 0 { 577 | re += fmt.Sprintf(".{0,%d}", d) 578 | } else { 579 | re += ".*" 580 | } 581 | re += escape(string(c.Pattern)) 582 | } 583 | return re 584 | } 585 | 586 | // CVE extracts CVE from a rule. 587 | func (r *Rule) CVE() string { 588 | for _, ref := range r.References { 589 | if ref.Type == "cve" { 590 | return ref.Value 591 | } 592 | } 593 | return "" 594 | } 595 | 596 | // LenMatchers returns all *LenMatch for a rule. 597 | func (r *Rule) LenMatchers() []*LenMatch { 598 | lms := make([]*LenMatch, 0, len(r.Matchers)) 599 | for _, m := range r.Matchers { 600 | if lm, ok := m.(*LenMatch); ok { 601 | lms = append(lms, lm) 602 | } 603 | } 604 | return lms 605 | } 606 | 607 | // Contents returns all *Content for a rule. 608 | func (r *Rule) Contents() []*Content { 609 | cs := make([]*Content, 0, len(r.Matchers)) 610 | for _, m := range r.Matchers { 611 | if c, ok := m.(*Content); ok { 612 | cs = append(cs, c) 613 | } 614 | } 615 | return cs 616 | } 617 | 618 | // LastContent returns the last *Content from Matchers 619 | func (r *Rule) LastContent() *Content { 620 | for i := range r.Matchers { 621 | if co, ok := r.Matchers[len(r.Matchers)-i-1].(*Content); ok { 622 | return co 623 | } 624 | } 625 | return nil 626 | } 627 | 628 | // ByteMatchers returns all *ByteMatch for a rule. 629 | func (r *Rule) ByteMatchers() []*ByteMatch { 630 | bs := make([]*ByteMatch, 0, len(r.Matchers)) 631 | for _, m := range r.Matchers { 632 | if b, ok := m.(*ByteMatch); ok { 633 | bs = append(bs, b) 634 | } 635 | } 636 | return bs 637 | } 638 | 639 | // PCREs returns all *PCRE for a rule. 640 | func (r *Rule) PCREs() []*PCRE { 641 | var ps []*PCRE 642 | for _, m := range r.Matchers { 643 | if p, ok := m.(*PCRE); ok { 644 | ps = append(ps, p) 645 | } 646 | } 647 | return ps 648 | } 649 | 650 | func netString(netPart []string) string { 651 | var s strings.Builder 652 | if len(netPart) > 1 { 653 | s.WriteString("[") 654 | } 655 | for i, n := range netPart { 656 | s.WriteString(n) 657 | if i < len(netPart)-1 { 658 | s.WriteString(",") 659 | } 660 | } 661 | if len(netPart) > 1 { 662 | s.WriteString("]") 663 | } 664 | return s.String() 665 | } 666 | 667 | // String retunrs a string for a Network. 668 | func (n Network) String() string { 669 | return fmt.Sprintf("%s %s", netString(n.Nets), netString(n.Ports)) 670 | } 671 | 672 | // String returns a string for a FastPattern. 673 | func (f FastPattern) String() string { 674 | if !f.Enabled { 675 | return "" 676 | } 677 | // This is an invalid state. 678 | if f.Only && (f.Offset != 0 || f.Length != 0) { 679 | return "" 680 | } 681 | 682 | var s strings.Builder 683 | s.WriteString("fast_pattern") 684 | if f.Only { 685 | s.WriteString(":only;") 686 | return s.String() 687 | } 688 | 689 | // "only" and "chop" modes are mutually exclusive. 690 | if f.Offset != 0 || f.Length != 0 { 691 | s.WriteString(fmt.Sprintf(":%d,%d", f.Offset, f.Length)) 692 | } 693 | 694 | s.WriteString(";") 695 | return s.String() 696 | } 697 | 698 | // String returns a string for a ContentOption. 699 | func (co ContentOption) String() string { 700 | if inSlice(co.Name, []string{"depth", "distance", "offset", "within"}) { 701 | return fmt.Sprintf("%s:%v;", co.Name, co.Value) 702 | } 703 | return fmt.Sprintf("%s;", co.Name) 704 | } 705 | 706 | // String returns a string for a Reference. 707 | func (r Reference) String() string { 708 | return fmt.Sprintf("reference:%s,%s;", r.Type, r.Value) 709 | } 710 | 711 | // String returns a string for a Content (ignoring sticky buffers.) 712 | func (c Content) String() string { 713 | var s strings.Builder 714 | s.WriteString("content:") 715 | if c.Negate { 716 | s.WriteString("!") 717 | } 718 | s.WriteString(fmt.Sprintf(`"%s";`, c.FormatPattern())) 719 | for _, o := range c.Options { 720 | s.WriteString(fmt.Sprintf(" %s", o)) 721 | } 722 | if c.FastPattern.Enabled { 723 | s.WriteString(fmt.Sprintf(" %s", c.FastPattern)) 724 | } 725 | 726 | return s.String() 727 | } 728 | 729 | // base64DecodeString returns a string for a base64_decode ByteMatch. 730 | func (b ByteMatch) base64DecodeString() string { 731 | var parts []string 732 | if b.NumBytes != "" { 733 | parts = append(parts, fmt.Sprintf("bytes %s", b.NumBytes)) 734 | } 735 | if b.Offset > 0 { 736 | parts = append(parts, fmt.Sprintf("offset %d", b.Offset)) 737 | } 738 | // This should only be "relative" but we'll support "anything" 739 | parts = append(parts, b.Options...) 740 | if len(parts) == 0 { 741 | return fmt.Sprintf("%s;", byteMatchTypeVals[b.Kind]) 742 | } 743 | return fmt.Sprintf("%s:%s;", byteMatchTypeVals[b.Kind], strings.Join(parts, ",")) 744 | } 745 | 746 | // String returns a string for a ByteMatch. 747 | func (b ByteMatch) String() string { 748 | // TODO: Support dataPos? 749 | // TODO: Write tests. 750 | var s strings.Builder 751 | s.WriteString(fmt.Sprintf("%s:", byteMatchTypeVals[b.Kind])) 752 | 753 | switch b.Kind { 754 | case bExtract: 755 | s.WriteString(fmt.Sprintf("%s,%d,%s", b.NumBytes, b.Offset, b.Variable)) 756 | case bJump: 757 | s.WriteString(fmt.Sprintf("%s,%d", b.NumBytes, b.Offset)) 758 | case bTest: 759 | s.WriteString(fmt.Sprintf("%s,%s,%s,%d", b.NumBytes, b.Operator, b.Value, b.Offset)) 760 | case isDataAt: 761 | if b.Negate { 762 | s.WriteString("!") 763 | } 764 | s.WriteString(b.NumBytes) 765 | // Logic for this case is a bit different so it's handled outside. 766 | case b64Decode: 767 | return b.base64DecodeString() 768 | } 769 | for _, o := range b.Options { 770 | s.WriteString(fmt.Sprintf(",%s", o)) 771 | } 772 | s.WriteString(";") 773 | return s.String() 774 | } 775 | 776 | // String returns a string for an length match. 777 | func (i LenMatch) String() string { 778 | var s strings.Builder 779 | s.WriteString(fmt.Sprintf("%s:", i.Kind)) 780 | switch { 781 | case i.Operator == "<>": 782 | s.WriteString(fmt.Sprintf("%d%s%d", i.Min, i.Operator, i.Max)) 783 | case i.Operator != "": 784 | s.WriteString(fmt.Sprintf("%s%d", i.Operator, i.Num)) 785 | default: 786 | s.WriteString(fmt.Sprintf("%d", i.Num)) 787 | } 788 | for _, o := range i.Options { 789 | s.WriteString(fmt.Sprintf(",%s", o)) 790 | } 791 | s.WriteString(";") 792 | return s.String() 793 | } 794 | 795 | // String returns a string for all of the metadata values. 796 | func (ms Metadatas) String() string { 797 | var s strings.Builder 798 | if len(ms) < 1 { 799 | return "" 800 | } 801 | s.WriteString("metadata:") 802 | for i, m := range ms { 803 | if i < len(ms)-1 { 804 | s.WriteString(fmt.Sprintf("%s %s, ", m.Key, m.Value)) 805 | continue 806 | } 807 | s.WriteString(fmt.Sprintf("%s %s;", m.Key, m.Value)) 808 | } 809 | return s.String() 810 | } 811 | 812 | func (t *TLSTag) String() string { 813 | var s strings.Builder 814 | s.WriteString(fmt.Sprintf("%s:", t.Key)) 815 | if t.Negate { 816 | s.WriteString("!") 817 | } 818 | // Values for these get wrapped in `"`. 819 | if inSlice(t.Key, []string{"tls.issuerdn", "tls.subject", "tls.fingerprint"}) { 820 | s.WriteString(fmt.Sprintf(`"%s";`, t.Value)) 821 | } else { 822 | s.WriteString(fmt.Sprintf("%s;", t.Value)) 823 | } 824 | return s.String() 825 | } 826 | 827 | func (s *StreamCmp) String() string { 828 | return fmt.Sprintf("stream_size:%s,%s,%d;", s.Direction, s.Operator, s.Number) 829 | } 830 | 831 | // String returns a string for a PCRE. 832 | func (p PCRE) String() string { 833 | pattern := p.Pattern 834 | if len(pattern) < 1 { 835 | return "" 836 | } 837 | 838 | // escape quote signs, if necessary 839 | if bytes.IndexByte(pattern, '"') > -1 { 840 | pattern = bytes.Replace(pattern, []byte(`"`), []byte(`\"`), -1) 841 | } 842 | 843 | var s strings.Builder 844 | s.WriteString("pcre:") 845 | if p.Negate { 846 | s.WriteString("!") 847 | } 848 | s.WriteString(fmt.Sprintf(`"/%s/%s";`, pattern, p.Options)) 849 | return s.String() 850 | } 851 | 852 | // String returns a string for a Flowbit. 853 | func (fb Flowbit) String() string { 854 | if !inSlice(fb.Action, []string{"noalert", "isset", "isnotset", "set", "unset", "toggle"}) { 855 | return "" 856 | } 857 | var s strings.Builder 858 | s.WriteString(fmt.Sprintf("flowbits:%s", fb.Action)) 859 | if fb.Value != "" { 860 | s.WriteString(fmt.Sprintf(",%s", fb.Value)) 861 | } 862 | s.WriteString(";") 863 | return s.String() 864 | } 865 | 866 | // String returns a string for a Flowbit. 867 | func (fi Flowint) String() string { 868 | var s strings.Builder 869 | s.WriteString(fmt.Sprintf("flowint:%s", fi.Name)) 870 | if inSlice(fi.Modifier, []string{"isset", "isnotset"}) { 871 | s.WriteString(fmt.Sprintf(",%s", fi.Modifier)) 872 | } 873 | if inSlice(fi.Modifier, []string{"+", "-", "=", ">", "<", ">=", "<=", "==", "!="}) && fi.Value != "" { 874 | s.WriteString(fmt.Sprintf(",%s,%s", fi.Modifier, fi.Value)) 875 | } 876 | s.WriteString(";") 877 | return s.String() 878 | } 879 | 880 | // String returns a string for a Flowbit. 881 | func (xb Xbit) String() string { 882 | var s strings.Builder 883 | s.WriteString(fmt.Sprintf("xbits:%s,%s,track %s", xb.Action, xb.Name, xb.Track)) 884 | if xb.Expire != "" { 885 | s.WriteString(fmt.Sprintf(",expire %s", xb.Expire)) 886 | } 887 | s.WriteString(";") 888 | return s.String() 889 | } 890 | 891 | // String returns a string for a rule. 892 | func (r Rule) String() string { 893 | var s strings.Builder 894 | if r.Disabled { 895 | s.WriteString("#") 896 | } 897 | s.WriteString(fmt.Sprintf("%s %s %s ", r.Action, r.Protocol, r.Source)) 898 | if !r.Bidirectional { 899 | s.WriteString("-> ") 900 | } else { 901 | s.WriteString("<> ") 902 | } 903 | 904 | s.WriteString(fmt.Sprintf(`%s (msg:"%s"; `, r.Destination, r.Description)) 905 | 906 | // Pull flow out of tags if it exists, we like flow at the beginning of rules. 907 | if v, ok := r.Tags["flow"]; ok { 908 | s.WriteString(fmt.Sprintf("flow:%s; ", v)) 909 | } 910 | 911 | // Write out matchers in order (because things can be relative.) 912 | if len(r.Matchers) > 0 { 913 | d := pktData 914 | for _, m := range r.Matchers { 915 | if c, ok := m.(*Content); ok { 916 | if d != c.DataPosition { 917 | d = c.DataPosition 918 | s.WriteString(fmt.Sprintf("%s; ", d)) 919 | } 920 | } 921 | if c, ok := m.(*LenMatch); ok { 922 | if d != c.DataPosition { 923 | d = c.DataPosition 924 | s.WriteString(fmt.Sprintf("%s; ", d)) 925 | } 926 | } 927 | if c, ok := m.(*PCRE); ok { 928 | if d != c.DataPosition { 929 | d = c.DataPosition 930 | s.WriteString(fmt.Sprintf("%s; ", d)) 931 | } 932 | } 933 | s.WriteString(fmt.Sprintf("%s ", m)) 934 | } 935 | } 936 | 937 | if r.StreamMatch != nil { 938 | s.WriteString(fmt.Sprintf("%s ", r.StreamMatch)) 939 | } 940 | 941 | if len(r.TLSTags) > 0 { 942 | for _, t := range r.TLSTags { 943 | s.WriteString(fmt.Sprintf("%s ", t)) 944 | } 945 | } 946 | 947 | if len(r.Metas) > 0 { 948 | s.WriteString(fmt.Sprintf("%s ", r.Metas)) 949 | } 950 | 951 | for k, v := range r.Tags { 952 | if k == "flow" { 953 | continue 954 | } 955 | s.WriteString(fmt.Sprintf("%s:%s; ", k, v)) 956 | } 957 | 958 | for _, v := range r.Statements { 959 | s.WriteString(fmt.Sprintf("%s; ", v)) 960 | } 961 | 962 | for _, fb := range r.Flowbits { 963 | s.WriteString(fmt.Sprintf("%s ", fb)) 964 | } 965 | 966 | for _, fi := range r.Flowints { 967 | s.WriteString(fmt.Sprintf("%s ", fi)) 968 | } 969 | 970 | for _, xb := range r.Xbits { 971 | s.WriteString(fmt.Sprintf("%s ", xb)) 972 | } 973 | 974 | for _, ref := range r.References { 975 | s.WriteString(fmt.Sprintf("%s ", ref)) 976 | } 977 | 978 | s.WriteString(fmt.Sprintf("sid:%d; rev:%d;)", r.SID, r.Revision)) 979 | return s.String() 980 | 981 | } 982 | 983 | // ToRegexp returns a string that can be used as a regular expression 984 | // to identify content matches in an ASCII dump of a packet capture (tcpdump -A). 985 | func (c *Content) ToRegexp() string { 986 | var buffer bytes.Buffer 987 | for _, b := range c.Pattern { 988 | if b > 126 || b < 32 { 989 | buffer.WriteString(".") 990 | } else { 991 | buffer.WriteByte(b) 992 | } 993 | } 994 | return regexp.QuoteMeta(buffer.String()) 995 | } 996 | 997 | // FormatPattern returns a string for a Pattern in a content 998 | func (c *Content) FormatPattern() string { 999 | var buffer bytes.Buffer 1000 | pipe := false 1001 | for _, b := range c.Pattern { 1002 | if b != ' ' && (b > 126 || b < 35 || b == ':' || b == ';' || b == '|' || b == '\\') { 1003 | if !pipe { 1004 | buffer.WriteByte('|') 1005 | pipe = true 1006 | } else { 1007 | buffer.WriteString(" ") 1008 | } 1009 | buffer.WriteString(fmt.Sprintf("%.2X", b)) 1010 | } else { 1011 | if pipe { 1012 | buffer.WriteByte('|') 1013 | pipe = false 1014 | } 1015 | buffer.WriteByte(b) 1016 | } 1017 | } 1018 | if pipe { 1019 | buffer.WriteByte('|') 1020 | } 1021 | return buffer.String() 1022 | } 1023 | 1024 | // InsertMatcher will insert an ordered matcher at a position specified. 1025 | func (r *Rule) InsertMatcher(m orderedMatcher, pos int) error { 1026 | if pos < 0 { 1027 | return fmt.Errorf("cannot insert matcher, position %d < 0", pos) 1028 | } 1029 | if pos > len(r.Matchers) { 1030 | return fmt.Errorf("cannot insert matcher, position %d > %d", pos, len(r.Matchers)) 1031 | } 1032 | 1033 | r.Matchers = append(r.Matchers, &Content{}) 1034 | copy(r.Matchers[pos+1:], r.Matchers[pos:]) 1035 | r.Matchers[pos] = m 1036 | return nil 1037 | } 1038 | 1039 | // HasVar returns true if a variable with the provided name exists. 1040 | func (r *Rule) HasVar(s string) bool { 1041 | for _, m := range r.Matchers { 1042 | if b, ok := m.(*ByteMatch); ok { 1043 | if b.Variable == s { 1044 | return true 1045 | } 1046 | } 1047 | } 1048 | return false 1049 | } 1050 | 1051 | // GetSidMsg returns a string representing a sidmsg.map entry. 1052 | func (r *Rule) GetSidMsg() string { 1053 | var sidmsg strings.Builder 1054 | sidmsg.WriteString(fmt.Sprintf("%s || %s", strconv.Itoa(r.SID), r.Description)) 1055 | for _, ref := range r.References { 1056 | sidmsg.WriteString(fmt.Sprintf(" || %s,%s", ref.Type, ref.Value)) 1057 | } 1058 | return sidmsg.String() 1059 | } 1060 | -------------------------------------------------------------------------------- /rule_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright 2016 Google Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package gonids 17 | 18 | import ( 19 | "reflect" 20 | "testing" 21 | 22 | "github.com/kylelemons/godebug/pretty" 23 | ) 24 | 25 | func TestContentToRegexp(t *testing.T) { 26 | for _, tt := range []struct { 27 | name string 28 | input *Content 29 | want string 30 | wantErr bool 31 | }{ 32 | { 33 | name: "simple content", 34 | input: &Content{ 35 | Pattern: []byte("abcd"), 36 | }, 37 | want: `abcd`, 38 | }, 39 | { 40 | name: "escaped content", 41 | input: &Content{ 42 | Pattern: []byte("abcd;ef"), 43 | }, 44 | want: `abcd;ef`, 45 | }, 46 | { 47 | name: "complex escaped content", 48 | input: &Content{ 49 | Pattern: []byte("abcd;:\r\ne\rf"), 50 | }, 51 | want: `abcd;:\.\.e\.f`, 52 | }, 53 | } { 54 | got := tt.input.ToRegexp() 55 | if !reflect.DeepEqual(got, tt.want) { 56 | t.Fatalf("%s: got %v; expected %v", tt.name, got, tt.want) 57 | } 58 | } 59 | } 60 | 61 | func TestContentFormatPattern(t *testing.T) { 62 | for _, tt := range []struct { 63 | name string 64 | input *Content 65 | want string 66 | wantErr bool 67 | }{ 68 | { 69 | name: "simple content", 70 | input: &Content{ 71 | Pattern: []byte("abcd"), 72 | }, 73 | want: "abcd", 74 | }, 75 | { 76 | name: "escaped content", 77 | input: &Content{ 78 | Pattern: []byte("abcd;ef"), 79 | }, 80 | want: "abcd|3B|ef", 81 | }, 82 | { 83 | name: "complex escaped content", 84 | input: &Content{ 85 | Pattern: []byte("abcd;:\r\ne\rf"), 86 | }, 87 | want: "abcd|3B 3A 0D 0A|e|0D|f", 88 | }, 89 | { 90 | name: "backslash", 91 | input: &Content{ 92 | Pattern: []byte(`C:\WINDOWS\system32\`), 93 | }, 94 | want: `C|3A 5C|WINDOWS|5C|system32|5C|`, 95 | }, 96 | { 97 | name: "content with hex pipe", 98 | input: &Content{ 99 | Pattern: []byte(`C|B`), 100 | }, 101 | want: `C|7C|B`, 102 | }, 103 | { 104 | name: "escaped characters", 105 | input: &Content{ 106 | Pattern: []byte(`A\B;C":`), 107 | }, 108 | want: `A|5C|B|3B|C|22 3A|`, 109 | }, 110 | } { 111 | got := tt.input.FormatPattern() 112 | if !reflect.DeepEqual(got, tt.want) { 113 | t.Fatalf("%s: got %v; expected %v", tt.name, got, tt.want) 114 | } 115 | } 116 | } 117 | 118 | func TestFastPatternString(t *testing.T) { 119 | for _, tt := range []struct { 120 | name string 121 | input FastPattern 122 | want string 123 | }{ 124 | { 125 | name: "fast_pattern", 126 | input: FastPattern{ 127 | Enabled: true, 128 | }, 129 | want: "fast_pattern;", 130 | }, 131 | { 132 | name: "fast_pattern:only;", 133 | input: FastPattern{ 134 | Enabled: true, 135 | Only: true, 136 | }, 137 | want: "fast_pattern:only;", 138 | }, 139 | { 140 | name: "fast_pattern:`chop`", 141 | input: FastPattern{ 142 | Enabled: true, 143 | Offset: 2, 144 | Length: 5, 145 | }, 146 | want: "fast_pattern:2,5;", 147 | }, 148 | { 149 | name: "fast_pattern:`chop` with 0", 150 | input: FastPattern{ 151 | Enabled: true, 152 | Offset: 0, 153 | Length: 5, 154 | }, 155 | want: "fast_pattern:0,5;", 156 | }, 157 | { 158 | name: "invalid state", 159 | input: FastPattern{ 160 | Enabled: true, 161 | Only: true, 162 | Offset: 2, 163 | Length: 5, 164 | }, 165 | want: "", 166 | }, 167 | { 168 | name: "not enabled", 169 | input: FastPattern{ 170 | Only: true, 171 | Offset: 2, 172 | Length: 5, 173 | }, 174 | want: "", 175 | }, 176 | } { 177 | got := tt.input.String() 178 | if got != tt.want { 179 | t.Fatalf("%s: got %v; expected %v", tt.name, got, tt.want) 180 | } 181 | } 182 | } 183 | 184 | func TestContentOptionString(t *testing.T) { 185 | for _, tt := range []struct { 186 | name string 187 | input ContentOption 188 | want string 189 | }{ 190 | { 191 | name: "no value", 192 | input: ContentOption{ 193 | Name: "http_uri", 194 | }, 195 | want: "http_uri;", 196 | }, 197 | { 198 | name: "value", 199 | input: ContentOption{ 200 | Name: "depth", 201 | Value: "0", 202 | }, 203 | want: "depth:0;", 204 | }, 205 | { 206 | name: "invalid value", 207 | input: ContentOption{ 208 | Name: "http_uri", 209 | Value: "1", 210 | }, 211 | want: "http_uri;", 212 | }, 213 | } { 214 | got := tt.input.String() 215 | if got != tt.want { 216 | t.Fatalf("%s: got %v; expected %v", tt.name, got, tt.want) 217 | } 218 | } 219 | } 220 | 221 | func TestReferenceString(t *testing.T) { 222 | for _, tt := range []struct { 223 | name string 224 | input Reference 225 | want string 226 | }{ 227 | { 228 | name: "url", 229 | input: Reference{ 230 | Type: "url", 231 | Value: "www.google.com", 232 | }, 233 | want: "reference:url,www.google.com;", 234 | }, 235 | { 236 | name: "md5", 237 | input: Reference{ 238 | Type: "md5", 239 | Value: "2aee1c40199c7754da766e61452612cc", 240 | }, 241 | want: "reference:md5,2aee1c40199c7754da766e61452612cc;", 242 | }, 243 | } { 244 | got := tt.input.String() 245 | if got != tt.want { 246 | t.Fatalf("%s: got %v; expected %v", tt.name, got, tt.want) 247 | } 248 | } 249 | } 250 | 251 | func TestMetdatasString(t *testing.T) { 252 | for _, tt := range []struct { 253 | name string 254 | input Metadatas 255 | want string 256 | }{ 257 | { 258 | name: "one meta", 259 | input: Metadatas{ 260 | &Metadata{ 261 | Key: "foo", 262 | Value: "bar", 263 | }, 264 | }, 265 | want: "metadata:foo bar;", 266 | }, 267 | { 268 | name: "three meta", 269 | input: Metadatas{ 270 | &Metadata{ 271 | Key: "created_at", 272 | Value: "2019_01_01", 273 | }, 274 | &Metadata{ 275 | Key: "updated_at", 276 | Value: "2019_01_07", 277 | }, 278 | &Metadata{ 279 | Key: "target", 280 | Value: "Windows", 281 | }, 282 | }, 283 | want: "metadata:created_at 2019_01_01, updated_at 2019_01_07, target Windows;", 284 | }, 285 | } { 286 | got := tt.input.String() 287 | if got != tt.want { 288 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 289 | } 290 | } 291 | } 292 | 293 | func TestNetString(t *testing.T) { 294 | for _, tt := range []struct { 295 | name string 296 | input []string 297 | want string 298 | }{ 299 | { 300 | name: "one net", 301 | input: []string{"$HOME_NET"}, 302 | want: "$HOME_NET", 303 | }, 304 | { 305 | name: "three nets", 306 | input: []string{"$HOME_NET", "!$FOO_NET", "192.168.0.0/16"}, 307 | want: "[$HOME_NET,!$FOO_NET,192.168.0.0/16]", 308 | }, 309 | { 310 | name: "grouped ports", 311 | input: []string{"1:80", "![2,4]", "[6,7,8]"}, 312 | want: "[1:80,![2,4],[6,7,8]]", 313 | }, 314 | { 315 | name: "grouped networks", 316 | input: []string{"192.168.0.0/16", "![192.168.86.0/24,192.168.87.0/24]"}, 317 | want: "[192.168.0.0/16,![192.168.86.0/24,192.168.87.0/24]]", 318 | }, 319 | } { 320 | got := netString(tt.input) 321 | if got != tt.want { 322 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 323 | } 324 | } 325 | } 326 | 327 | func TestNetworkString(t *testing.T) { 328 | for _, tt := range []struct { 329 | name string 330 | input Network 331 | want string 332 | }{ 333 | { 334 | name: "simple net", 335 | input: Network{ 336 | Nets: []string{"$HOME_NET"}, 337 | Ports: []string{"$HTTP_PORTS"}, 338 | }, 339 | want: "$HOME_NET $HTTP_PORTS", 340 | }, 341 | { 342 | name: "complex net", 343 | input: Network{ 344 | Nets: []string{"$HOME_NET", "!$FOO_NET", "192.168.0.0/16"}, 345 | Ports: []string{"$HTTP_PORTS", "!53", "$BAR_NET"}, 346 | }, 347 | want: "[$HOME_NET,!$FOO_NET,192.168.0.0/16] [$HTTP_PORTS,!53,$BAR_NET]", 348 | }, 349 | { 350 | name: "grouped ports", 351 | input: Network{ 352 | Nets: []string{"$HOME_NET", "!$FOO_NET", "192.168.0.0/16"}, 353 | Ports: []string{"1:80", "![2,4]", "[6,7,8]"}, 354 | }, 355 | want: "[$HOME_NET,!$FOO_NET,192.168.0.0/16] [1:80,![2,4],[6,7,8]]", 356 | }, 357 | { 358 | name: "grouped networks", 359 | input: Network{ 360 | Nets: []string{"192.168.0.0/16", "![192.168.86.0/24,192.168.87.0/24]"}, 361 | Ports: []string{"$HTTP_PORTS", "!53", "$BAR_NET"}, 362 | }, 363 | want: "[192.168.0.0/16,![192.168.86.0/24,192.168.87.0/24]] [$HTTP_PORTS,!53,$BAR_NET]", 364 | }, 365 | } { 366 | got := tt.input.String() 367 | if got != tt.want { 368 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 369 | } 370 | } 371 | } 372 | 373 | func TestByteMatchString(t *testing.T) { 374 | for _, tt := range []struct { 375 | name string 376 | input ByteMatch 377 | want string 378 | }{ 379 | { 380 | name: "byte_test basic", 381 | input: ByteMatch{ 382 | Kind: bTest, 383 | NumBytes: "3", 384 | Operator: ">", 385 | Value: "300", 386 | Offset: 42, 387 | }, 388 | want: `byte_test:3,>,300,42;`, 389 | }, 390 | { 391 | name: "byte_jump basic", 392 | input: ByteMatch{ 393 | Kind: bJump, 394 | NumBytes: "3", 395 | Offset: 42, 396 | }, 397 | want: `byte_jump:3,42;`, 398 | }, 399 | { 400 | name: "byte_extract basic", 401 | input: ByteMatch{ 402 | Kind: bExtract, 403 | NumBytes: "3", 404 | Offset: 42, 405 | Variable: "foobar", 406 | }, 407 | want: `byte_extract:3,42,foobar;`, 408 | }, 409 | { 410 | name: "byte_test options", 411 | input: ByteMatch{ 412 | Kind: bTest, 413 | NumBytes: "3", 414 | Operator: ">", 415 | Value: "300", 416 | Offset: 42, 417 | Options: []string{"string", "dec"}, 418 | }, 419 | want: `byte_test:3,>,300,42,string,dec;`, 420 | }, 421 | { 422 | name: "byte_jump options", 423 | input: ByteMatch{ 424 | Kind: bJump, 425 | NumBytes: "3", 426 | Offset: 42, 427 | Options: []string{"relative", "post_offset 2", "bitmask 0x03f0"}, 428 | }, 429 | want: `byte_jump:3,42,relative,post_offset 2,bitmask 0x03f0;`, 430 | }, 431 | { 432 | name: "byte_extract options", 433 | input: ByteMatch{ 434 | Kind: bExtract, 435 | NumBytes: "3", 436 | Offset: 42, 437 | Variable: "foobar", 438 | Options: []string{"relative", "bitmask 0x03ff"}, 439 | }, 440 | want: `byte_extract:3,42,foobar,relative,bitmask 0x03ff;`, 441 | }, 442 | } { 443 | got := tt.input.String() 444 | if got != tt.want { 445 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 446 | } 447 | } 448 | } 449 | 450 | func TestBase64DecodeString(t *testing.T) { 451 | for _, tt := range []struct { 452 | name string 453 | input ByteMatch 454 | want string 455 | }{ 456 | { 457 | name: "base64_decode bare", 458 | input: ByteMatch{ 459 | Kind: b64Decode, 460 | }, 461 | want: `base64_decode;`, 462 | }, 463 | { 464 | name: "base64_decode some options", 465 | input: ByteMatch{ 466 | Kind: b64Decode, 467 | NumBytes: "1", 468 | Options: []string{"relative"}, 469 | }, 470 | want: `base64_decode:bytes 1,relative;`, 471 | }, 472 | { 473 | name: "base64_decode all options", 474 | input: ByteMatch{ 475 | Kind: b64Decode, 476 | NumBytes: "1", 477 | Offset: 2, 478 | Options: []string{"relative"}, 479 | }, 480 | want: `base64_decode:bytes 1,offset 2,relative;`, 481 | }, 482 | } { 483 | got := tt.input.base64DecodeString() 484 | if got != tt.want { 485 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 486 | } 487 | } 488 | } 489 | 490 | func TestTLSTagString(t *testing.T) { 491 | for _, tt := range []struct { 492 | name string 493 | input TLSTag 494 | want string 495 | }{ 496 | { 497 | name: "simple quoted", 498 | input: TLSTag{ 499 | Key: "tls.subject", 500 | Value: "CN=*.googleusercontent.com", 501 | }, 502 | want: `tls.subject:"CN=*.googleusercontent.com";`, 503 | }, 504 | { 505 | name: "negated quoted", 506 | input: TLSTag{ 507 | Negate: true, 508 | Key: "tls.issuerdn", 509 | Value: "CN=Google-Internet-Authority", 510 | }, 511 | want: `tls.issuerdn:!"CN=Google-Internet-Authority";`, 512 | }, 513 | { 514 | name: "simple unquoted", 515 | input: TLSTag{ 516 | Key: "tls.version", 517 | Value: "1.2", 518 | }, 519 | want: "tls.version:1.2;", 520 | }, 521 | // TODO(duane): Confirm if negation of this is valid. 522 | { 523 | name: "negated unquoted", 524 | input: TLSTag{ 525 | Negate: true, 526 | Key: "tls.version", 527 | Value: "1.2", 528 | }, 529 | want: "tls.version:!1.2;", 530 | }, 531 | } { 532 | got := tt.input.String() 533 | if got != tt.want { 534 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 535 | } 536 | } 537 | } 538 | 539 | func TestLenMatchString(t *testing.T) { 540 | for _, tt := range []struct { 541 | name string 542 | input *LenMatch 543 | want string 544 | }{ 545 | { 546 | name: "no operator", 547 | input: &LenMatch{ 548 | Kind: iCode, 549 | Num: 3, 550 | }, 551 | want: `icode:3;`, 552 | }, 553 | { 554 | name: "single operator", 555 | input: &LenMatch{ 556 | Kind: iCode, 557 | Operator: ">", 558 | Num: 3, 559 | }, 560 | want: `icode:>3;`, 561 | }, 562 | { 563 | name: "min and max", 564 | input: &LenMatch{ 565 | Kind: iType, 566 | Operator: "<>", 567 | Min: 1, 568 | Max: 2, 569 | }, 570 | want: `itype:1<>2;`, 571 | }, 572 | { 573 | name: "options", 574 | input: &LenMatch{ 575 | Kind: uriLen, 576 | Operator: "<>", 577 | Min: 1, 578 | Max: 2, 579 | Options: []string{"raw"}, 580 | }, 581 | want: `urilen:1<>2,raw;`, 582 | }, 583 | } { 584 | got := tt.input.String() 585 | if got != tt.want { 586 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 587 | } 588 | } 589 | } 590 | 591 | func TestContentString(t *testing.T) { 592 | for _, tt := range []struct { 593 | name string 594 | input Content 595 | want string 596 | }{ 597 | { 598 | name: "basic", 599 | input: Content{ 600 | Pattern: []byte("AA"), 601 | }, 602 | want: `content:"AA";`, 603 | }, 604 | { 605 | name: "basic escaped char", 606 | input: Content{ 607 | Pattern: []byte("AA;"), 608 | }, 609 | want: `content:"AA|3B|";`, 610 | }, 611 | { 612 | name: "negated content", 613 | input: Content{ 614 | Negate: true, 615 | Pattern: []byte("AA"), 616 | }, 617 | want: `content:!"AA";`, 618 | }, 619 | { 620 | name: "content with one option", 621 | input: Content{ 622 | Pattern: []byte("AA"), 623 | Options: []*ContentOption{ 624 | { 625 | Name: "http_uri", 626 | }, 627 | }, 628 | }, 629 | want: `content:"AA"; http_uri;`, 630 | }, 631 | { 632 | name: "content with multiple options", 633 | input: Content{ 634 | Pattern: []byte("AA"), 635 | Options: []*ContentOption{ 636 | { 637 | Name: "http_uri", 638 | }, 639 | { 640 | Name: "depth", 641 | Value: "0", 642 | }, 643 | }, 644 | }, 645 | want: `content:"AA"; http_uri; depth:0;`, 646 | }, 647 | { 648 | name: "content with multiple options and fast_pattern", 649 | input: Content{ 650 | Pattern: []byte("AA"), 651 | Options: []*ContentOption{ 652 | { 653 | Name: "http_uri", 654 | }, 655 | { 656 | Name: "depth", 657 | Value: "0", 658 | }, 659 | }, 660 | FastPattern: FastPattern{ 661 | Enabled: true, 662 | }, 663 | }, 664 | want: `content:"AA"; http_uri; depth:0; fast_pattern;`, 665 | }, 666 | } { 667 | got := tt.input.String() 668 | if got != tt.want { 669 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 670 | } 671 | } 672 | } 673 | 674 | func TestPCREString(t *testing.T) { 675 | for _, tt := range []struct { 676 | name string 677 | input PCRE 678 | want string 679 | }{ 680 | { 681 | name: "basic", 682 | input: PCRE{ 683 | Pattern: []byte("foo.*bar"), 684 | Options: []byte("iU"), 685 | }, 686 | want: `pcre:"/foo.*bar/iU";`, 687 | }, 688 | { 689 | name: "negate", 690 | input: PCRE{ 691 | Negate: true, 692 | Pattern: []byte("foo.*bar"), 693 | Options: []byte("iU"), 694 | }, 695 | want: `pcre:!"/foo.*bar/iU";`, 696 | }, 697 | { 698 | name: "no options", 699 | input: PCRE{ 700 | Pattern: []byte("foo.*bar"), 701 | }, 702 | want: `pcre:"/foo.*bar/";`, 703 | }, 704 | } { 705 | got := tt.input.String() 706 | if got != tt.want { 707 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 708 | } 709 | } 710 | } 711 | 712 | func TestFlowbitsString(t *testing.T) { 713 | for _, tt := range []struct { 714 | name string 715 | input *Flowbit 716 | want string 717 | }{ 718 | { 719 | name: "action only", 720 | input: &Flowbit{ 721 | Action: "noalert", 722 | Value: "", 723 | }, 724 | want: `flowbits:noalert;`, 725 | }, 726 | { 727 | name: "simple flowbits", 728 | input: &Flowbit{ 729 | Action: "set", 730 | Value: "EvilIP", 731 | }, 732 | want: `flowbits:set,EvilIP;`, 733 | }, 734 | } { 735 | got := tt.input.String() 736 | if got != tt.want { 737 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 738 | } 739 | } 740 | } 741 | 742 | func TestXbitsString(t *testing.T) { 743 | for _, tt := range []struct { 744 | name string 745 | input *Xbit 746 | want string 747 | }{ 748 | { 749 | name: "basic set", 750 | input: &Xbit{ 751 | Action: "set", 752 | Name: "foo", 753 | Track: "ip_src", 754 | }, 755 | want: `xbits:set,foo,track ip_src;`, 756 | }, 757 | { 758 | name: "with expire set", 759 | input: &Xbit{ 760 | Action: "set", 761 | Name: "foo", 762 | Track: "ip_src", 763 | Expire: "5", 764 | }, 765 | want: `xbits:set,foo,track ip_src,expire 5;`, 766 | }, 767 | } { 768 | got := tt.input.String() 769 | if got != tt.want { 770 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 771 | } 772 | } 773 | } 774 | 775 | func TestFlowintsString(t *testing.T) { 776 | for _, tt := range []struct { 777 | name string 778 | input *Flowint 779 | want string 780 | }{ 781 | { 782 | name: "action only", 783 | input: &Flowint{ 784 | Name: "foo", 785 | Modifier: "+", 786 | Value: "1", 787 | }, 788 | want: `flowint:foo,+,1;`, 789 | }, 790 | { 791 | name: "isnotset only", 792 | input: &Flowint{ 793 | Name: "foo", 794 | Modifier: "isnotset", 795 | }, 796 | want: `flowint:foo,isnotset;`, 797 | }, 798 | { 799 | name: "extraneous value", 800 | input: &Flowint{ 801 | Name: "foo", 802 | Modifier: "isnotset", 803 | Value: "1", 804 | }, 805 | want: `flowint:foo,isnotset;`, 806 | }, 807 | } { 808 | got := tt.input.String() 809 | if got != tt.want { 810 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 811 | } 812 | } 813 | } 814 | 815 | func TestRuleString(t *testing.T) { 816 | for _, tt := range []struct { 817 | name string 818 | input Rule 819 | want string 820 | }{ 821 | { 822 | name: "rule", 823 | input: Rule{ 824 | Action: "alert", 825 | Protocol: "udp", 826 | Source: Network{ 827 | Nets: []string{"$HOME_NET"}, 828 | Ports: []string{"any"}, 829 | }, 830 | Destination: Network{ 831 | Nets: []string{"$EXTERNAL_NET"}, 832 | Ports: []string{"any"}, 833 | }, 834 | SID: 1337, 835 | Revision: 2, 836 | Description: "foo", 837 | Matchers: []orderedMatcher{ 838 | &Content{ 839 | Pattern: []byte("AA"), 840 | }, 841 | }, 842 | }, 843 | want: `alert udp $HOME_NET any -> $EXTERNAL_NET any (msg:"foo"; content:"AA"; sid:1337; rev:2;)`, 844 | }, 845 | { 846 | name: "rule with datapos", 847 | input: Rule{ 848 | Action: "alert", 849 | Protocol: "udp", 850 | Source: Network{ 851 | Nets: []string{"$HOME_NET"}, 852 | Ports: []string{"any"}, 853 | }, 854 | Destination: Network{ 855 | Nets: []string{"$EXTERNAL_NET"}, 856 | Ports: []string{"any"}, 857 | }, 858 | SID: 1337, 859 | Revision: 2, 860 | Description: "foo", 861 | Matchers: []orderedMatcher{ 862 | &Content{ 863 | Pattern: []byte("AA"), 864 | }, 865 | &Content{ 866 | Pattern: []byte("BB"), 867 | DataPosition: fileData, 868 | }, 869 | }, 870 | }, 871 | want: `alert udp $HOME_NET any -> $EXTERNAL_NET any (msg:"foo"; content:"AA"; file_data; content:"BB"; sid:1337; rev:2;)`, 872 | }, 873 | { 874 | name: "rule with flow and tag", 875 | input: Rule{ 876 | Action: "alert", 877 | Protocol: "tcp", 878 | Source: Network{ 879 | Nets: []string{"$HOME_NET"}, 880 | Ports: []string{"any"}, 881 | }, 882 | Destination: Network{ 883 | Nets: []string{"$EXTERNAL_NET"}, 884 | Ports: []string{"any"}, 885 | }, 886 | SID: 1337, 887 | Revision: 2, 888 | Description: "foo", 889 | Matchers: []orderedMatcher{ 890 | &Content{ 891 | Pattern: []byte("AA"), 892 | }, 893 | }, 894 | Tags: map[string]string{"flow": "to_server", "app-layer-protocol": "tls"}, 895 | }, 896 | want: `alert tcp $HOME_NET any -> $EXTERNAL_NET any (msg:"foo"; flow:to_server; content:"AA"; app-layer-protocol:tls; sid:1337; rev:2;)`, 897 | }, 898 | { 899 | name: "rule with pcre", 900 | input: Rule{ 901 | Action: "alert", 902 | Protocol: "udp", 903 | Source: Network{ 904 | Nets: []string{"$HOME_NET"}, 905 | Ports: []string{"any"}, 906 | }, 907 | Destination: Network{ 908 | Nets: []string{"$EXTERNAL_NET"}, 909 | Ports: []string{"any"}, 910 | }, 911 | SID: 1337, 912 | Revision: 2, 913 | Description: "foo", 914 | Matchers: []orderedMatcher{ 915 | &Content{ 916 | Pattern: []byte("AA"), 917 | }, 918 | &PCRE{ 919 | Pattern: []byte("foo.*bar"), 920 | Options: []byte("Ui"), 921 | }, 922 | }, 923 | }, 924 | want: `alert udp $HOME_NET any -> $EXTERNAL_NET any (msg:"foo"; content:"AA"; pcre:"/foo.*bar/Ui"; sid:1337; rev:2;)`, 925 | }, 926 | { 927 | name: "rule with pcre", 928 | input: Rule{ 929 | Action: "alert", 930 | Protocol: "udp", 931 | Source: Network{ 932 | Nets: []string{"$HOME_NET"}, 933 | Ports: []string{"any"}, 934 | }, 935 | Destination: Network{ 936 | Nets: []string{"$EXTERNAL_NET"}, 937 | Ports: []string{"any"}, 938 | }, 939 | SID: 1337, 940 | Revision: 2, 941 | Description: "foo", 942 | Tags: map[string]string{ 943 | "classtype": "trojan-activity", 944 | }, 945 | Matchers: []orderedMatcher{ 946 | &Content{ 947 | Pattern: []byte("AA"), 948 | }, 949 | }, 950 | }, 951 | want: `alert udp $HOME_NET any -> $EXTERNAL_NET any (msg:"foo"; content:"AA"; classtype:trojan-activity; sid:1337; rev:2;)`, 952 | }, 953 | { 954 | name: "rule with flowbits", 955 | input: Rule{ 956 | Action: "alert", 957 | Protocol: "http", 958 | Source: Network{ 959 | Nets: []string{"$HOME_NET"}, 960 | Ports: []string{"any"}, 961 | }, 962 | Destination: Network{ 963 | Nets: []string{"$EXTERNAL_NET"}, 964 | Ports: []string{"any"}, 965 | }, 966 | SID: 1223, 967 | Revision: 3, 968 | Description: "Flowbits test", 969 | Flowbits: []*Flowbit{ 970 | { 971 | Action: "set", 972 | Value: "testbits", 973 | }, 974 | { 975 | Action: "noalert", 976 | Value: "", 977 | }, 978 | }, 979 | }, 980 | want: `alert http $HOME_NET any -> $EXTERNAL_NET any (msg:"Flowbits test"; flowbits:set,testbits; flowbits:noalert; sid:1223; rev:3;)`, 981 | }, 982 | { 983 | name: "rule with flowints", 984 | input: Rule{ 985 | Action: "alert", 986 | Protocol: "http", 987 | Source: Network{ 988 | Nets: []string{"$HOME_NET"}, 989 | Ports: []string{"any"}, 990 | }, 991 | Destination: Network{ 992 | Nets: []string{"$EXTERNAL_NET"}, 993 | Ports: []string{"any"}, 994 | }, 995 | SID: 1223, 996 | Revision: 3, 997 | Description: "Flowints test", 998 | Flowints: []*Flowint{ 999 | { 1000 | Name: "foo", 1001 | Modifier: ">", 1002 | Value: "1", 1003 | }, 1004 | { 1005 | Name: "bar", 1006 | Modifier: "+", 1007 | Value: "1", 1008 | }, 1009 | }, 1010 | }, 1011 | want: `alert http $HOME_NET any -> $EXTERNAL_NET any (msg:"Flowints test"; flowint:foo,>,1; flowint:bar,+,1; sid:1223; rev:3;)`, 1012 | }, 1013 | { 1014 | name: "rule with xbits", 1015 | input: Rule{ 1016 | Action: "alert", 1017 | Protocol: "http", 1018 | Source: Network{ 1019 | Nets: []string{"$HOME_NET"}, 1020 | Ports: []string{"any"}, 1021 | }, 1022 | Destination: Network{ 1023 | Nets: []string{"$EXTERNAL_NET"}, 1024 | Ports: []string{"any"}, 1025 | }, 1026 | SID: 1223, 1027 | Revision: 3, 1028 | Description: "Xbits test", 1029 | Xbits: []*Xbit{ 1030 | { 1031 | Action: "set", 1032 | Name: "foo", 1033 | Track: "ip_src", 1034 | }, 1035 | { 1036 | Action: "set", 1037 | Name: "bar", 1038 | Track: "ip_src", 1039 | Expire: "60", 1040 | }, 1041 | }, 1042 | }, 1043 | want: `alert http $HOME_NET any -> $EXTERNAL_NET any (msg:"Xbits test"; xbits:set,foo,track ip_src; xbits:set,bar,track ip_src,expire 60; sid:1223; rev:3;)`, 1044 | }, 1045 | { 1046 | name: "rule with bsize", 1047 | input: Rule{ 1048 | Action: "alert", 1049 | Protocol: "http", 1050 | Source: Network{ 1051 | Nets: []string{"$HOME_NET"}, 1052 | Ports: []string{"any"}, 1053 | }, 1054 | Destination: Network{ 1055 | Nets: []string{"$EXTERNAL_NET"}, 1056 | Ports: []string{"any"}, 1057 | }, 1058 | SID: 1234, 1059 | Revision: 2, 1060 | Description: "new sticky buffers", 1061 | Matchers: []orderedMatcher{ 1062 | &Content{ 1063 | DataPosition: httpMethod, 1064 | Pattern: []byte("POST"), 1065 | }, 1066 | &LenMatch{ 1067 | DataPosition: httpURI, 1068 | Kind: bSize, 1069 | Num: 10, 1070 | }, 1071 | &Content{ 1072 | DataPosition: httpURI, 1073 | Pattern: []byte("foo"), 1074 | }, 1075 | }, 1076 | }, 1077 | want: `alert http $HOME_NET any -> $EXTERNAL_NET any (msg:"new sticky buffers"; http.method; content:"POST"; http.uri; bsize:10; content:"foo"; sid:1234; rev:2;)`, 1078 | }, 1079 | { 1080 | name: "rule with port and network groups", 1081 | input: Rule{ 1082 | Action: "alert", 1083 | Protocol: "http", 1084 | Source: Network{ 1085 | Nets: []string{"192.168.0.0/16", "![192.168.86.0/24,192.168.87.0/24]"}, 1086 | Ports: []string{"1:80", "![2,4]", "[6,7,8]"}, 1087 | }, 1088 | Destination: Network{ 1089 | Nets: []string{"$EXTERNAL_NET"}, 1090 | Ports: []string{"any"}, 1091 | }, 1092 | SID: 1234, 1093 | Revision: 2, 1094 | Description: "grouped network bits", 1095 | Matchers: []orderedMatcher{ 1096 | &Content{ 1097 | DataPosition: httpMethod, 1098 | Pattern: []byte("POST"), 1099 | }, 1100 | &Content{ 1101 | DataPosition: httpURI, 1102 | Pattern: []byte("foo"), 1103 | }, 1104 | }, 1105 | }, 1106 | want: `alert http [192.168.0.0/16,![192.168.86.0/24,192.168.87.0/24]] [1:80,![2,4],[6,7,8]] -> $EXTERNAL_NET any (msg:"grouped network bits"; http.method; content:"POST"; http.uri; content:"foo"; sid:1234; rev:2;)`, 1107 | }, 1108 | { 1109 | name: "rule with sticky buffer before pcre", 1110 | input: Rule{ 1111 | Action: "alert", 1112 | Protocol: "http", 1113 | Source: Network{ 1114 | Nets: []string{"$HOME_NET"}, 1115 | Ports: []string{"any"}, 1116 | }, 1117 | Destination: Network{ 1118 | Nets: []string{"$EXTERNAL_NET"}, 1119 | Ports: []string{"any"}, 1120 | }, 1121 | SID: 1234, 1122 | Revision: 2, 1123 | Description: "pcre new sticky buffer", 1124 | Matchers: []orderedMatcher{ 1125 | &Content{ 1126 | DataPosition: httpMethod, 1127 | Pattern: []byte("POST"), 1128 | }, 1129 | &PCRE{ 1130 | DataPosition: pktData, 1131 | Pattern: []byte("foo.*bar"), 1132 | Options: []byte("i"), 1133 | }, 1134 | }, 1135 | }, 1136 | want: `alert http $HOME_NET any -> $EXTERNAL_NET any (msg:"pcre new sticky buffer"; http.method; content:"POST"; pkt_data; pcre:"/foo.*bar/i"; sid:1234; rev:2;)`, 1137 | }, 1138 | } { 1139 | got := tt.input.String() 1140 | if got != tt.want { 1141 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 1142 | } 1143 | } 1144 | } 1145 | 1146 | func TestRE(t *testing.T) { 1147 | for _, tt := range []struct { 1148 | rule string 1149 | want string 1150 | }{ 1151 | { 1152 | rule: `alert udp $HOME_NET any -> $EXTERNAL_NET any (sid:1337; msg:"foo"; content:"|28|foo"; content:".AA"; within:40;)`, 1153 | want: `.*\(foo.{0,40}\.AA`, 1154 | }, 1155 | } { 1156 | r, err := ParseRule(tt.rule) 1157 | if err != nil { 1158 | t.Fatalf("re: parse rule failed: %v", err) 1159 | } 1160 | if got := r.RE(); got != tt.want { 1161 | t.Fatalf("re: got=%v; want=%v", got, tt.want) 1162 | } 1163 | } 1164 | } 1165 | 1166 | func TestLastContent(t *testing.T) { 1167 | for _, tt := range []struct { 1168 | rule string 1169 | want *Content 1170 | }{ 1171 | { 1172 | rule: `alert udp $HOME_NET any -> $EXTERNAL_NET any (sid:1337; msg:"foo"; content:"bar";)`, 1173 | want: &Content{Pattern: []byte("bar")}, 1174 | }, 1175 | { 1176 | rule: `alert udp $HOME_NET any -> $EXTERNAL_NET any (sid:1337; msg:"foo"; content:"bar"; pcre:"/foo.*bar/iU"; content:"foo"; within:40; pcre:"/foo.*bar.*baz/iU";)`, 1177 | want: &Content{ 1178 | Pattern: []byte("foo"), 1179 | Options: []*ContentOption{ 1180 | { 1181 | Name: "within", 1182 | Value: "40", 1183 | }, 1184 | }, 1185 | }, 1186 | }, 1187 | { 1188 | rule: `alert udp $HOME_NET any -> $EXTERNAL_NET any (sid:1337; msg:"foo";)`, 1189 | want: nil, 1190 | }, 1191 | } { 1192 | r, err := ParseRule(tt.rule) 1193 | if err != nil { 1194 | t.Fatalf("re: parse rule failed: %v", err) 1195 | } 1196 | diff := pretty.Compare(r.LastContent(), tt.want) 1197 | if diff != "" { 1198 | t.Fatalf("diff (-got +want):\n%s", diff) 1199 | } 1200 | } 1201 | } 1202 | 1203 | func TestDataPosString(t *testing.T) { 1204 | for _, tt := range []struct { 1205 | val DataPos 1206 | want string 1207 | }{ 1208 | { 1209 | val: pktData, 1210 | want: "pkt_data", 1211 | }, 1212 | { 1213 | val: base64Data, 1214 | want: "base64_data", 1215 | }, 1216 | { 1217 | val: httpRequestLine, 1218 | want: "http_request_line", 1219 | }, 1220 | } { 1221 | s := tt.val.String() 1222 | if s != tt.want { 1223 | t.Fatalf("String: got=%v; want=%v", s, tt.want) 1224 | } 1225 | } 1226 | } 1227 | 1228 | func TestIsStickyBuffer(t *testing.T) { 1229 | for _, tt := range []struct { 1230 | buf string 1231 | want bool 1232 | }{ 1233 | { 1234 | buf: "pkt_data", 1235 | want: true, 1236 | }, 1237 | { 1238 | buf: "foobarbaz", 1239 | want: false, 1240 | }, 1241 | { 1242 | buf: "http_request_line", 1243 | want: true, 1244 | }, 1245 | } { 1246 | got := isStickyBuffer(tt.buf) 1247 | if got != tt.want { 1248 | t.Fatalf("got=%v; want=%v", got, tt.want) 1249 | } 1250 | } 1251 | } 1252 | 1253 | func TestStickyBuffer(t *testing.T) { 1254 | for _, tt := range []struct { 1255 | s string 1256 | want DataPos 1257 | wantErr bool 1258 | }{ 1259 | { 1260 | s: "pkt_data", 1261 | want: pktData, 1262 | wantErr: false, 1263 | }, 1264 | { 1265 | s: "foobarbaz", 1266 | want: pktData, 1267 | wantErr: true, 1268 | }, 1269 | { 1270 | s: "http_request_line", 1271 | want: httpRequestLine, 1272 | wantErr: false, 1273 | }, 1274 | } { 1275 | got, gotErr := StickyBuffer(tt.s) 1276 | if got != tt.want { 1277 | t.Fatalf("got=%v; want=%v", got, tt.want) 1278 | } 1279 | if tt.wantErr != (gotErr != nil) { 1280 | t.Fatalf("gotErr=%v; wantErr=%v", gotErr != nil, tt.wantErr) 1281 | } 1282 | 1283 | } 1284 | } 1285 | 1286 | func TestHasVar(t *testing.T) { 1287 | for _, tt := range []struct { 1288 | name string 1289 | r *Rule 1290 | s string 1291 | want bool 1292 | }{ 1293 | { 1294 | name: "has var", 1295 | r: &Rule{ 1296 | Matchers: []orderedMatcher{ 1297 | &ByteMatch{ 1298 | Variable: "foovar", 1299 | }, 1300 | }, 1301 | }, 1302 | s: "foovar", 1303 | want: true, 1304 | }, 1305 | { 1306 | name: "has var", 1307 | r: &Rule{ 1308 | Matchers: []orderedMatcher{ 1309 | &ByteMatch{ 1310 | Variable: "barvar", 1311 | }, 1312 | }, 1313 | }, 1314 | s: "foovar", 1315 | want: false, 1316 | }, 1317 | { 1318 | name: "no byte matchers", 1319 | r: &Rule{}, 1320 | s: "foovar", 1321 | want: false, 1322 | }, 1323 | } { 1324 | got := tt.r.HasVar(tt.s) 1325 | if got != tt.want { 1326 | t.Fatalf("got=%v; want=%v", got, tt.want) 1327 | } 1328 | 1329 | } 1330 | } 1331 | 1332 | func TestInsertMatcher(t *testing.T) { 1333 | for _, tt := range []struct { 1334 | name string 1335 | input *Rule 1336 | matcher orderedMatcher 1337 | pos int 1338 | want *Rule 1339 | wantErr bool 1340 | }{ 1341 | { 1342 | name: "basic test", 1343 | input: &Rule{ 1344 | Matchers: []orderedMatcher{ 1345 | &Content{ 1346 | Pattern: []byte("foo"), 1347 | }, 1348 | }, 1349 | }, 1350 | matcher: &Content{ 1351 | Pattern: []byte("bar"), 1352 | }, 1353 | pos: 0, 1354 | want: &Rule{ 1355 | Matchers: []orderedMatcher{ 1356 | &Content{ 1357 | Pattern: []byte("bar"), 1358 | }, 1359 | &Content{ 1360 | Pattern: []byte("foo"), 1361 | }, 1362 | }, 1363 | }, 1364 | wantErr: false, 1365 | }, 1366 | { 1367 | name: "insert end", 1368 | input: &Rule{ 1369 | Matchers: []orderedMatcher{ 1370 | &Content{ 1371 | Pattern: []byte("foo"), 1372 | }, 1373 | }, 1374 | }, 1375 | matcher: &Content{ 1376 | Pattern: []byte("bar"), 1377 | }, 1378 | pos: 1, 1379 | want: &Rule{ 1380 | Matchers: []orderedMatcher{ 1381 | &Content{ 1382 | Pattern: []byte("foo"), 1383 | }, 1384 | &Content{ 1385 | Pattern: []byte("bar"), 1386 | }, 1387 | }, 1388 | }, 1389 | wantErr: false, 1390 | }, 1391 | { 1392 | name: "insert middle", 1393 | input: &Rule{ 1394 | Matchers: []orderedMatcher{ 1395 | &Content{ 1396 | Pattern: []byte("foo"), 1397 | }, 1398 | &Content{ 1399 | Pattern: []byte("bar"), 1400 | }, 1401 | }, 1402 | }, 1403 | matcher: &Content{ 1404 | Pattern: []byte("baz"), 1405 | }, 1406 | pos: 1, 1407 | want: &Rule{ 1408 | Matchers: []orderedMatcher{ 1409 | &Content{ 1410 | Pattern: []byte("foo"), 1411 | }, 1412 | &Content{ 1413 | Pattern: []byte("baz"), 1414 | }, 1415 | &Content{ 1416 | Pattern: []byte("bar"), 1417 | }, 1418 | }, 1419 | }, 1420 | wantErr: false, 1421 | }, 1422 | { 1423 | name: "insert different type", 1424 | input: &Rule{ 1425 | Matchers: []orderedMatcher{ 1426 | &Content{ 1427 | Pattern: []byte("foo"), 1428 | }, 1429 | &Content{ 1430 | Pattern: []byte("bar"), 1431 | }, 1432 | }, 1433 | }, 1434 | matcher: &ByteMatch{ 1435 | Kind: isDataAt, 1436 | Negate: true, 1437 | NumBytes: "1", 1438 | }, 1439 | pos: 1, 1440 | want: &Rule{ 1441 | Matchers: []orderedMatcher{ 1442 | &Content{ 1443 | Pattern: []byte("foo"), 1444 | }, 1445 | &ByteMatch{ 1446 | Kind: isDataAt, 1447 | Negate: true, 1448 | NumBytes: "1", 1449 | }, 1450 | &Content{ 1451 | Pattern: []byte("bar"), 1452 | }, 1453 | }, 1454 | }, 1455 | wantErr: false, 1456 | }, 1457 | { 1458 | name: "index too small", 1459 | 1460 | input: &Rule{ 1461 | Matchers: []orderedMatcher{ 1462 | &Content{ 1463 | Pattern: []byte("foo"), 1464 | }, 1465 | }, 1466 | }, 1467 | matcher: &Content{ 1468 | Pattern: []byte("bar"), 1469 | }, 1470 | pos: -1, 1471 | want: &Rule{ 1472 | Matchers: []orderedMatcher{ 1473 | &Content{ 1474 | Pattern: []byte("foo"), 1475 | }, 1476 | }, 1477 | }, 1478 | wantErr: true, 1479 | }, 1480 | { 1481 | name: "index too large", 1482 | 1483 | input: &Rule{ 1484 | Matchers: []orderedMatcher{ 1485 | &Content{ 1486 | Pattern: []byte("foo"), 1487 | }, 1488 | }, 1489 | }, 1490 | matcher: &Content{ 1491 | Pattern: []byte("bar"), 1492 | }, 1493 | pos: 4, 1494 | want: &Rule{ 1495 | Matchers: []orderedMatcher{ 1496 | &Content{ 1497 | Pattern: []byte("foo"), 1498 | }, 1499 | }, 1500 | }, 1501 | wantErr: true, 1502 | }, 1503 | { 1504 | name: "effectively append", 1505 | 1506 | input: &Rule{ 1507 | Matchers: []orderedMatcher{ 1508 | &Content{ 1509 | Pattern: []byte("foo"), 1510 | }, 1511 | }, 1512 | }, 1513 | matcher: &Content{ 1514 | Pattern: []byte("bar"), 1515 | }, 1516 | pos: 1, 1517 | want: &Rule{ 1518 | Matchers: []orderedMatcher{ 1519 | &Content{ 1520 | Pattern: []byte("foo"), 1521 | }, 1522 | &Content{ 1523 | Pattern: []byte("bar"), 1524 | }, 1525 | }, 1526 | }, 1527 | wantErr: false, 1528 | }, 1529 | } { 1530 | gotErr := tt.input.InsertMatcher(tt.matcher, tt.pos) 1531 | if tt.wantErr != (gotErr != nil) { 1532 | t.Fatalf("gotErr=%v; wantErr=%v", gotErr != nil, tt.wantErr) 1533 | } 1534 | diff := pretty.Compare(tt.input, tt.want) 1535 | if diff != "" { 1536 | t.Fatalf("%s: diff (-got +want):\n%s", tt.name, diff) 1537 | } 1538 | } 1539 | } 1540 | 1541 | func TestRuleGetSidMsg(t *testing.T) { 1542 | for _, tt := range []struct { 1543 | name string 1544 | input Rule 1545 | want string 1546 | }{ 1547 | { 1548 | name: "rule", 1549 | input: Rule{ 1550 | SID: 1337, 1551 | Description: "foo", 1552 | }, 1553 | want: `1337 || foo`, 1554 | }, 1555 | { 1556 | name: "rule", 1557 | input: Rule{ 1558 | SID: 1337, 1559 | Description: "foo", 1560 | References: []*Reference{ 1561 | { 1562 | Type: "url", 1563 | Value: "www.google.com", 1564 | }, 1565 | }, 1566 | }, 1567 | want: `1337 || foo || url,www.google.com`, 1568 | }, 1569 | { 1570 | name: "rule", 1571 | input: Rule{ 1572 | SID: 1337, 1573 | Description: "foo", 1574 | References: []*Reference{ 1575 | { 1576 | Type: "url", 1577 | Value: "www.google.com", 1578 | }, 1579 | { 1580 | Type: "md5", 1581 | Value: "2aee1c40199c7754da766e61452612cc", 1582 | }, 1583 | }, 1584 | }, 1585 | want: `1337 || foo || url,www.google.com || md5,2aee1c40199c7754da766e61452612cc`, 1586 | }, 1587 | } { 1588 | got := tt.input.GetSidMsg() 1589 | if got != tt.want { 1590 | t.Fatalf("%s: got %v -- expected %v", tt.name, got, tt.want) 1591 | } 1592 | } 1593 | } 1594 | --------------------------------------------------------------------------------