├── .gitignore
├── LICENSE.md
├── Readme.md
├── go.mod
├── go.sum
├── input
├── .gitkeep
├── example-bases.txt
├── example-domains.txt
├── example-markers.txt
└── example-paths.txt
├── main.go
└── pkg
├── config
└── config.go
├── domain
└── domain.go
├── fasthttp
└── client.go
├── http
└── client.go
├── result
└── result.go
└── utils
└── utils.go
/.gitignore:
--------------------------------------------------------------------------------
1 | input/private*
2 | .idea
3 | ai.sh
4 | ai.txt
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2014 DSecured
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/Readme.md:
--------------------------------------------------------------------------------
1 | # Dynamic File Searcher
2 |
3 | ## Overview
4 |
5 | Dynamic File Searcher is an advanced, Go-based CLI tool designed for intelligent and deep web crawling. Its unique
6 | strength lies in its ability to dynamically generate and explore paths based on the target hosts, allowing for much
7 | deeper and more comprehensive scans than traditional tools. This tool is part of DSecured's eASM Argos since several
8 | years and still generates value for our customers.
9 |
10 | ### Key Differentiators
11 |
12 | - Dynamic path generation based on host structure for deeper, more intelligent scans
13 | - Optional base paths for advanced URL generation
14 | - Flexible word separation options for more targeted searches
15 |
16 | While powerful alternatives like nuclei exist, Dynamic File Searcher offers easier handling and more flexibility in path
17 | generation compared to static, template-based approaches.
18 |
19 | ### Examples of Use Cases
20 |
21 | Imagine this being your input data:
22 |
23 | - Domain: vendorgo.abc.targetdomain.com
24 | - Paths: env
25 | - Markers: "activeProfiles"
26 |
27 | The tool will generate paths like:
28 |
29 | - https://vendorgo.abc.targetdomain.com/env
30 | - https://vendorgo.abc.targetdomain.com/vendorgo/env
31 | - https://vendorgo.abc.targetdomain.com/vendorgo-qa/env
32 | - ... and many more
33 |
34 | If you add base-paths like "admin" to the mix, the tool will generate even more paths:
35 |
36 | - https://vendorgo.abc.targetdomain.com/admin/env
37 | - https://vendorgo.abc.targetdomain.com/admin/vendorgo/env
38 | - https://vendorgo.abc.targetdomain.com/admin/vendorgo-qa/env
39 | - ... and many more
40 |
41 | If you know what you are doing, this tool can be a powerful ally in your arsenal for finding issues in web applications
42 | that common web application scanners will certainly miss.
43 |
44 | ## Features
45 |
46 | - Intelligent path generation based on host structure
47 | - Multi-domain or single-domain scanning
48 | - Optional base paths for additional URL generation
49 | - Concurrent requests for high-speed processing
50 | - Content-based file detection using customizable markers
51 | - Large file detection with configurable size thresholds
52 | - Partial content scanning for efficient marker detection in large files
53 | - HTTP status code filtering for focused results
54 | - Custom HTTP header support for advanced probing
55 | - Skipping certain domains when WAF is detected
56 | - Proxy support for anonymous scanning
57 | - Verbose mode for detailed output and analysis
58 |
59 | ## Installation
60 |
61 | ### Prerequisites
62 |
63 | - Go 1.19 or higher
64 |
65 | ### Compilation
66 |
67 | 1. Clone the repository:
68 | ```
69 | git clone https://github.com/dsecuredcom/dynamic-file-searcher.git
70 | cd dynamic-file-searcher
71 | ```
72 |
73 | 2. Build the binary:
74 | ```
75 | go build -o dynamic_file_searcher
76 | ```
77 |
78 | ## Usage
79 |
80 | Basic usage:
81 |
82 | ```
83 | ./dynamic_file_searcher -domain -paths [-markers ]
84 | ```
85 |
86 | or
87 |
88 | ```
89 | ./dynamic_file_searcher -domains -paths [-markers ]
90 | ```
91 |
92 | ### Command-line Options
93 |
94 | - `-domains`: File containing a list of domains to scan (one per line)
95 | - `-domain`: Single domain to scan (alternative to `-domains`)
96 | - `-paths`: File containing a list of paths to check on each domain (required)
97 | - `-markers`: File containing a list of content markers to search for (optional)
98 | - `-base-paths`: File containing list of base paths for additional URL generation (optional) (e.g., "..;/" - it should
99 | be one per line and end with "/")
100 | - `-concurrency`: Number of concurrent requests (default: 10)
101 | - `-timeout`: Timeout for each request (default: 12s)
102 | - `-verbose`: Enable verbose output
103 | - `-headers`: Extra headers to add to each request (format: 'Header1:Value1,Header2:Value2')
104 | - `-proxy`: Proxy URL (e.g., http://127.0.0.1:8080)
105 | - `-max-content-read`: Maximum size of content to read for marker checking, in bytes (default: 5242880)
106 | - `-force-http`: Force HTTP (instead of HTTPS) requests (default: false)
107 | - `-use-fasthttp`: Use fasthttp instead of net/http (default: false)
108 | - `-host-depth`: How many sub-subdomains to use for path generation (e.g., 2 = test1-abc & test2 [based on test1-abc.test2.test3.example.com])
109 | - `-dont-generate-paths`: Don't generate paths based on host structure (default: false)
110 | - `-dont-append-envs`: Prevent appending environment variables to requests (-qa, ...) (default: false)
111 | - `-append-bypasses-to-words`: Append bypasses to words (admin -> admin; -> admin..;) (default: false)
112 | - `-min-content-size`: Minimum file size to consider, in bytes (default: 0)
113 | - `-http-statuses`: HTTP status code to filter (default: all)
114 | - `-content-types`: Content type to filter(csv allowed, e.g. json,octet)
115 | - `-disallowed-content-types`: Content-Type header value to filter out (csv allowed, e.g. json,octet)
116 | - `-disallowed-content-strings`: Content-Type header value to filter out (csv allowed, e.g. ',')
117 | - `-disable-duplicate-check`: Disables duplicate checks. Keeping it active (default: False)
118 | - `-env-append-words`: Comma-separated list of environment words to append (e.g., dev,prod,api). If not specified, defaults to: prod,qa,dev,test,uat,stg,stage,sit,api
119 |
120 | ### Examples
121 |
122 | 1. Scan a single domain:
123 | ```
124 | ./dynamic_file_searcher -domain example.com -paths paths.txt -markers markers.txt
125 | ```
126 |
127 | 2. Scan multiple domains from a file:
128 | ```
129 | ./dynamic_file_searcher -domains domains.txt -paths paths.txt -markers markers.txt
130 | ```
131 |
132 | 3. Use base paths for additional URL generation:
133 | ```
134 | ./dynamic_file_searcher -domain example.com -paths paths.txt -markers markers.txt -base-paths base_paths.txt
135 | ```
136 |
137 | 4. Scan for large files (>5MB) with content type JSON:
138 | ```
139 | ./dynamic_file_searcher -domains domains.txt -paths paths.txt -min-content-size 5000000 -content-types json -http-statuses 200,206
140 | ```
141 |
142 | 5. Targeted scan through a proxy with custom headers:
143 | ```
144 | ./dynamic_file_searcher -domain example.com -paths paths.txt -markers markers.txt -proxy http://127.0.0.1:8080-headers "User-Agent:CustomBot/1.0"
145 | ```
146 |
147 | 6. Verbose output with custom timeout:
148 | ```
149 | ./dynamic_file_searcher -domain example.com -paths paths.txt -markers markers.txt -verbose -timeout 30s
150 | ```
151 |
152 | 7. Scan only root paths without generating additional paths:
153 | ```
154 | ./dynamic_file_searcher -domain example.com -paths paths.txt -markers markers.txt -dont-generate-paths
155 | ```
156 |
157 | ## Understanding the flags
158 |
159 | There are basically some very important flags that you should understand before using the tool. These flags are:
160 |
161 | - `-host-depth`
162 | - `-dont-generate-paths`
163 | - `-dont-append-envs`
164 | - `-append-bypasses-to-words`
165 | - `-env-append-words`
166 |
167 | Given the following host structure: `housetodo.some-word.thisthat.example.com`
168 |
169 | ### host-depth
170 |
171 | This flag is used to determine how many sub-subdomains to use for path generation. For example, if `-host-depth` is set
172 | to 2, the tool will generate paths based on `housetodo.some-word`. If `-host-depth` is set to 1, the tool will generate
173 | paths based on `housetodo` only.
174 |
175 | ### dont-generate-paths
176 |
177 | This will simply prevent the tool from generating paths based on the host structure. If this flag is enabled, the tool
178 | will only use the paths provided in the `-paths` file as well as in the `-base-paths` file.
179 |
180 | ### dont-append-envs
181 |
182 | This tool tries to generate sane value for relevant words. In our example one of those words would be `housetodo`. If
183 | this flag is enabled, the tool will not append environment variables to the requests. For example, if the tool
184 | detects `housetodo` as a word, it will not append `-qa`, `-dev`, `-prod`, etc. to the word.
185 |
186 | ### append-bypasses-to-words
187 |
188 | This flag is used to append bypasses to words. For example, if the tool detects `admin` as a word, it will
189 | append `admin;` and `admin..;` etc. to the word. This is useful for bypassing filters.
190 |
191 | ### env-append-words
192 | This flag allows you to customize the list of environment words that will be appended to relevant words during path generation.
193 | By default, the tool uses a predefined list: `prod,qa,dev,test,uat,stg,stage,sit,api`.
194 | You can override this with your own comma-separated list of words.
195 |
196 | For example:
197 |
198 | `./dynamic_file_searcher -domain example.com -paths paths.txt -env-append-words "development,production,staging,beta"`
199 |
200 | This would generate paths like:
201 | - /housetodo-development
202 | - /housetodo-production
203 | - /housetodo-staging
204 | - /housetodo-beta
205 |
206 | Note that this flag only has an effect if `-dont-append-envs` is not set.
207 | When `-dont-append-envs` is true, no environment words will be appended regardless of the `-env-append-words` value.
208 |
209 | ## How It Works
210 |
211 | 1. The tool reads the domain(s) from either the `-domain` flag or the `-domains` file.
212 | 2. It reads the list of paths from the specified `-paths` file.
213 | 3. If provided, it reads additional base paths from the `-base-paths` file.
214 | 4. It analyzes each domain to extract meaningful components (subdomains, main domain, etc.).
215 | 5. Using these components and the provided paths (and base paths if available), it dynamically generates a comprehensive
216 | set of URLs to scan.
217 | 6. Concurrent workers send HTTP GET requests to these URLs.
218 | 7. For each response:
219 | - The tool reads up to `max-content-read` bytes for marker checking.
220 | - It determines the full file size by reading (and discarding) the remaining content.
221 | - The response is analyzed based on:
222 | * Presence of specified content markers in the read portion (if markers are provided)
223 | * OR -->
224 | * Total file size (compared against `min-content-size`)
225 | * Content types (if specified) + Disallowed content types (if specified)
226 | * Disallowed content strings (if specified)
227 | * HTTP status code
228 | * Important: These rules are not applied to marker based checks
229 | 8. Results are reported in real-time, with a progress bar indicating overall completion.
230 |
231 | This approach allows for efficient scanning of both small and large files, balancing thorough marker checking with
232 | memory-efficient handling of large files.
233 |
234 | ## Large File Handling
235 |
236 | The tool efficiently handles large files and octet streams by:
237 |
238 | - Reading a configurable portion of the file for marker checking
239 | - Determining the full file size without loading the entire file into memory
240 | - Reporting both on file size and marker presence, even for partially read files
241 |
242 | This allows for effective scanning of large files without running into memory issues.
243 |
244 | It is recommended to use a big timeout to allow the tool to read large files. The default timeout is 10 seconds.
245 |
246 | ## Security Considerations
247 |
248 | - Always ensure you have explicit permission to scan the target domains.
249 | - Use the proxy option for anonymity when necessary.
250 | - Be mindful of the load your scans might place on target servers.
251 | - Respect robots.txt files and website terms of service.
252 |
253 | ## Limitations
254 |
255 | - There's no built-in rate limiting (use the concurrency option to control request rate).
256 | - Very large scale scans might require significant bandwidth and processing power. It is recommended to separate the
257 | input files and run multiple instances of the tool on different machines.
258 |
259 | ## Contributing
260 |
261 | Contributions are welcome! Please feel free to submit a Pull Request.
262 |
263 | ## License
264 |
265 | This project is licensed under the MIT License - see the LICENSE file for details.
266 |
267 |
268 | ## Disclaimer
269 |
270 | This tool is for educational and authorized testing purposes only. Misuse of this tool may be illegal. The authors are
271 | not responsible for any unauthorized use or damage caused by this tool.
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/dsecuredcom/dynamic-file-searcher
2 |
3 | go 1.19
4 |
5 | require (
6 | github.com/fatih/color v1.17.0
7 | github.com/valyala/fasthttp v1.55.0
8 | golang.org/x/time v0.6.0
9 | )
10 |
11 | require (
12 | github.com/andybalholm/brotli v1.1.0 // indirect
13 | github.com/klauspost/compress v1.17.9 // indirect
14 | github.com/mattn/go-colorable v0.1.13 // indirect
15 | github.com/mattn/go-isatty v0.0.20 // indirect
16 | github.com/valyala/bytebufferpool v1.0.0 // indirect
17 | golang.org/x/sys v0.22.0 // indirect
18 | )
19 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
2 | github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
3 | github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
4 | github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
5 | github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
6 | github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
7 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
8 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
9 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
10 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
11 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
12 | github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
13 | github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
14 | github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8=
15 | github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM=
16 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
17 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
18 | golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
19 | golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
20 | golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
21 | golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
22 |
--------------------------------------------------------------------------------
/input/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dsecuredcom/dynamic-file-searcher/aef3d2f9aa093be810a6a065ffa7a27e09dbb83d/input/.gitkeep
--------------------------------------------------------------------------------
/input/example-bases.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dsecuredcom/dynamic-file-searcher/aef3d2f9aa093be810a6a065ffa7a27e09dbb83d/input/example-bases.txt
--------------------------------------------------------------------------------
/input/example-domains.txt:
--------------------------------------------------------------------------------
1 | your-target-domain-1.com
--------------------------------------------------------------------------------
/input/example-markers.txt:
--------------------------------------------------------------------------------
1 | secret-string-1
2 | super-important-string-to-match
3 | regex:\"\s?:\s?\"[a-z0-9\.-_]+\@[a-zA-Z0-9\.-_]+\.[a-z]{2,10}\"
--------------------------------------------------------------------------------
/input/example-paths.txt:
--------------------------------------------------------------------------------
1 | test
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/config"
7 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/domain"
8 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/fasthttp"
9 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/http"
10 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/result"
11 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/utils"
12 | "github.com/fatih/color"
13 | "golang.org/x/time/rate"
14 | "math/rand"
15 | "os"
16 | "strings"
17 | "sync"
18 | "sync/atomic"
19 | "time"
20 | )
21 |
22 | const (
23 | // Buffer sizes tuned for better memory management
24 | urlBufferSize = 5000 // Increased for better worker feeding
25 | resultBufferSize = 100 // Smaller to avoid memory buildup
26 | )
27 |
28 | func main() {
29 | var markers []string
30 |
31 | cfg := config.ParseFlags()
32 |
33 | initialDomains := domain.GetDomains(cfg.DomainsFile, cfg.Domain)
34 | paths := utils.ReadLines(cfg.PathsFile)
35 | if cfg.MarkersFile != "" {
36 | markers = utils.ReadLines(cfg.MarkersFile)
37 | }
38 |
39 | limiter := rate.NewLimiter(rate.Limit(cfg.Concurrency), 1)
40 |
41 | validateInput(initialDomains, paths, markers)
42 |
43 | rand.Seed(time.Now().UnixNano())
44 |
45 | printInitialInfo(cfg, initialDomains, paths)
46 |
47 | urlChan := make(chan string, urlBufferSize)
48 | resultsChan := make(chan result.Result, resultBufferSize)
49 |
50 | var client interface {
51 | MakeRequest(url string) result.Result
52 | }
53 |
54 | if cfg.FastHTTP {
55 | client = fasthttp.NewClient(cfg)
56 | } else {
57 | client = http.NewClient(cfg)
58 | }
59 |
60 | var processedCount int64
61 | var totalURLs int64
62 |
63 | // Start URL generation in a goroutine
64 | go generateURLs(initialDomains, paths, cfg, urlChan, &totalURLs)
65 |
66 | var wg sync.WaitGroup
67 | for i := 0; i < cfg.Concurrency; i++ {
68 | wg.Add(1)
69 | go worker(urlChan, resultsChan, &wg, client, &processedCount, limiter)
70 | }
71 |
72 | done := make(chan bool)
73 | go trackProgress(&processedCount, &totalURLs, done)
74 |
75 | go func() {
76 | wg.Wait()
77 | close(resultsChan)
78 | done <- true
79 | }()
80 |
81 | for res := range resultsChan {
82 | result.ProcessResult(res, cfg, markers)
83 | }
84 |
85 | color.Green("\n[✔] Scan completed.")
86 | }
87 |
88 | func validateInput(initialDomains, paths, markers []string) {
89 | if len(initialDomains) == 0 {
90 | color.Red("[✘] Error: The domain list is empty. Please provide at least one domain.")
91 | os.Exit(1)
92 | }
93 |
94 | if len(paths) == 0 {
95 | color.Red("[✘] Error: The path list is empty. Please provide at least one path.")
96 | os.Exit(1)
97 | }
98 |
99 | if len(markers) == 0 {
100 | color.Yellow("[!] Warning: The marker list is empty. The scan will just use the size filter which might not be very useful.")
101 | }
102 | }
103 |
104 | func printInitialInfo(cfg config.Config, initialDomains, paths []string) {
105 |
106 | color.Cyan("[i] Scanning %d domains with %d paths", len(initialDomains), len(paths))
107 | color.Cyan("[i] Minimum file size to detect: %d bytes", cfg.MinContentSize)
108 | color.Cyan("[i] Filtering for HTTP status code: %s", cfg.HTTPStatusCodes)
109 |
110 | if len(cfg.ExtraHeaders) > 0 {
111 | color.Cyan("[i] Using extra headers:")
112 | for key, value := range cfg.ExtraHeaders {
113 | color.Cyan(" %s: %s", key, value)
114 | }
115 | }
116 | }
117 |
118 | func generateURLs(initialDomains, paths []string, cfg config.Config, urlChan chan<- string, totalURLs *int64) {
119 | defer close(urlChan)
120 |
121 | for _, domainD := range initialDomains {
122 | domainURLCount := generateAndStreamURLs(domainD, paths, &cfg, urlChan)
123 | atomic.AddInt64(totalURLs, int64(domainURLCount))
124 | }
125 | }
126 |
127 | func generateAndStreamURLs(domainD string, paths []string, cfg *config.Config, urlChan chan<- string) int {
128 | var urlCount int
129 |
130 | proto := "https"
131 | if cfg.ForceHTTPProt {
132 | proto = "http"
133 | }
134 |
135 | domainD = strings.TrimPrefix(domainD, "http://")
136 | domainD = strings.TrimPrefix(domainD, "https://")
137 | domainD = strings.TrimSuffix(domainD, "/")
138 |
139 | var sb strings.Builder
140 | sb.Grow(512) // Preallocate sufficient capacity
141 |
142 | for _, path := range paths {
143 | if strings.HasPrefix(path, "##") {
144 | continue
145 | }
146 |
147 | if !cfg.SkipRootFolderCheck {
148 | sb.WriteString(proto)
149 | sb.WriteString("://")
150 | sb.WriteString(domainD)
151 | sb.WriteString("/")
152 | sb.WriteString(path)
153 |
154 | urlChan <- sb.String()
155 | urlCount++
156 | sb.Reset()
157 | }
158 |
159 | for _, basePath := range cfg.BasePaths {
160 | sb.WriteString(proto)
161 | sb.WriteString("://")
162 | sb.WriteString(domainD)
163 | sb.WriteString("/")
164 | sb.WriteString(basePath)
165 | sb.WriteString("/")
166 | sb.WriteString(path)
167 |
168 | urlChan <- sb.String()
169 | urlCount++
170 | sb.Reset()
171 | }
172 |
173 | if cfg.DontGeneratePaths {
174 | continue
175 | }
176 |
177 | words := domain.GetRelevantDomainParts(domainD, cfg)
178 | for _, word := range words {
179 | if len(cfg.BasePaths) == 0 {
180 | sb.WriteString(proto)
181 | sb.WriteString("://")
182 | sb.WriteString(domainD)
183 | sb.WriteString("/")
184 | sb.WriteString(word)
185 | sb.WriteString("/")
186 | sb.WriteString(path)
187 |
188 | urlChan <- sb.String()
189 | urlCount++
190 | sb.Reset()
191 | } else {
192 | for _, basePath := range cfg.BasePaths {
193 | sb.WriteString(proto)
194 | sb.WriteString("://")
195 | sb.WriteString(domainD)
196 | sb.WriteString("/")
197 | sb.WriteString(basePath)
198 | sb.WriteString("/")
199 | sb.WriteString(word)
200 | sb.WriteString("/")
201 | sb.WriteString(path)
202 |
203 | urlChan <- sb.String()
204 | urlCount++
205 | sb.Reset()
206 | }
207 | }
208 | }
209 | }
210 |
211 | return urlCount
212 | }
213 |
214 | func worker(urls <-chan string, results chan<- result.Result, wg *sync.WaitGroup, client interface {
215 | MakeRequest(url string) result.Result
216 | }, processedCount *int64, limiter *rate.Limiter) {
217 | defer wg.Done()
218 |
219 | for url := range urls {
220 | err := limiter.Wait(context.Background())
221 | if err != nil {
222 | continue
223 | }
224 | res := client.MakeRequest(url)
225 | atomic.AddInt64(processedCount, 1)
226 | results <- res
227 | }
228 | }
229 |
230 | func trackProgress(processedCount, totalURLs *int64, done chan bool) {
231 | start := time.Now()
232 | lastProcessed := int64(0)
233 | lastUpdate := start
234 |
235 | for {
236 | select {
237 | case <-done:
238 | return
239 | default:
240 | now := time.Now()
241 | elapsed := now.Sub(start)
242 | currentProcessed := atomic.LoadInt64(processedCount)
243 | total := atomic.LoadInt64(totalURLs)
244 |
245 | // Calculate RPS
246 | intervalElapsed := now.Sub(lastUpdate)
247 | intervalProcessed := currentProcessed - lastProcessed
248 | rps := float64(intervalProcessed) / intervalElapsed.Seconds()
249 |
250 | if total > 0 {
251 | percentage := float64(currentProcessed) / float64(total) * 100
252 | estimatedTotal := float64(elapsed) / (float64(currentProcessed) / float64(total))
253 | remainingTime := time.Duration(estimatedTotal - float64(elapsed))
254 | fmt.Printf("\r%-100s", "")
255 | fmt.Printf("\rProgress: %.2f%% (%d/%d) | RPS: %.2f | Elapsed: %s | ETA: %s",
256 | percentage, currentProcessed, total, rps,
257 | elapsed.Round(time.Second), remainingTime.Round(time.Second))
258 | } else {
259 | fmt.Printf("\r%-100s", "")
260 | fmt.Printf("\rProcessed: %d | RPS: %.2f | Elapsed: %s",
261 | currentProcessed, rps, elapsed.Round(time.Second))
262 | }
263 |
264 | lastProcessed = currentProcessed
265 | lastUpdate = now
266 |
267 | time.Sleep(time.Second)
268 | }
269 | }
270 | }
271 |
--------------------------------------------------------------------------------
/pkg/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "bufio"
5 | "flag"
6 | "fmt"
7 | "net/url"
8 | "os"
9 | "strings"
10 | "time"
11 | )
12 |
13 | var defaultAppendEnvList = []string{"prod", "dev", "test", "admin", "tool", "manager"}
14 |
15 | type Config struct {
16 | DomainsFile string
17 | Domain string
18 | PathsFile string
19 | MarkersFile string
20 | BasePathsFile string
21 | Concurrency int
22 | Timeout time.Duration
23 | Verbose bool
24 | ProxyURL *url.URL
25 | ExtraHeaders map[string]string
26 | FastHTTP bool
27 | ForceHTTPProt bool
28 | HostDepth int
29 | AppendByPassesToWords bool
30 | SkipRootFolderCheck bool
31 | BasePaths []string
32 | DontGeneratePaths bool
33 | NoEnvAppending bool
34 | EnvRemoving bool
35 | MinContentSize int64
36 | MaxContentRead int64
37 | HTTPStatusCodes string
38 | ContentTypes string
39 | DisallowedContentTypes string
40 | DisallowedContentStrings string
41 | EnvAppendWords string
42 | AppendEnvList []string
43 | DisableDuplicateCheck bool
44 | }
45 |
46 | func ParseFlags() Config {
47 | cfg := Config{
48 | ExtraHeaders: make(map[string]string),
49 | }
50 | flag.StringVar(&cfg.DomainsFile, "domains", "", "File containing list of domains")
51 | flag.StringVar(&cfg.Domain, "domain", "", "Single domain to scan")
52 | flag.StringVar(&cfg.PathsFile, "paths", "", "File containing list of paths")
53 | flag.StringVar(&cfg.MarkersFile, "markers", "", "File containing list of markers")
54 | flag.StringVar(&cfg.BasePathsFile, "base-paths", "", "File containing list of base paths")
55 | flag.IntVar(&cfg.Concurrency, "concurrency", 10, "Number of concurrent requests")
56 | flag.IntVar(&cfg.HostDepth, "host-depth", 6, "How many sub-subdomains to use for path generation (e.g., 2 = test1-abc & test2 [based on test1-abc.test2.test3.example.com])")
57 | flag.BoolVar(&cfg.DontGeneratePaths, "dont-generate-paths", false, "If true, only the base paths (or nothing) will be used for scanning")
58 | flag.DurationVar(&cfg.Timeout, "timeout", 12*time.Second, "Timeout for each request")
59 | flag.BoolVar(&cfg.Verbose, "verbose", false, "Verbose output")
60 | flag.BoolVar(&cfg.SkipRootFolderCheck, "skip-root-folder-check", false, "Prevents checking https://domain/PATH")
61 | flag.BoolVar(&cfg.AppendByPassesToWords, "append-bypasses-to-words", false, "Append bypasses to words (admin -> admin; -> admin..;)")
62 | flag.BoolVar(&cfg.FastHTTP, "use-fasthttp", false, "Use fasthttp instead of net/http")
63 | flag.BoolVar(&cfg.ForceHTTPProt, "force-http", false, "Force the usage of http:// instead of https://")
64 | flag.BoolVar(&cfg.NoEnvAppending, "dont-append-envs", false, "Prevent appending environment variables to requests (-qa, ...)")
65 | flag.BoolVar(&cfg.EnvRemoving, "remove-envs", true, "In case a word ends with a known envword, a variant without the envword will be added")
66 | flag.StringVar(&cfg.ContentTypes, "content-types", "", "Content-Type header values to filter (csv allowed, e.g. json,octet)")
67 | flag.StringVar(&cfg.DisallowedContentStrings, "disallowed-content-strings", "", "If this string is present in the response body, the request will be considered as inrelevant (csv allowed, e.g. ','")
68 | flag.StringVar(&cfg.DisallowedContentTypes, "disallowed-content-types", "", "Content-Type header value to filter out (csv allowed, e.g. json,octet)")
69 | flag.Int64Var(&cfg.MinContentSize, "min-content-size", 0, "Minimum file size to detect (in bytes)")
70 | flag.Int64Var(&cfg.MaxContentRead, "max-content-read", 5*1024*1024, "Maximum size of content to read for marker checking (in bytes)")
71 | flag.StringVar(&cfg.HTTPStatusCodes, "http-statuses", "", "HTTP status code to filter (csv allowed)")
72 | flag.BoolVar(&cfg.DisableDuplicateCheck, "disable-duplicate-check", false, "Disable duplicate response check by host and size")
73 |
74 | var proxyURLStr string
75 | flag.StringVar(&proxyURLStr, "proxy", "", "Proxy URL (e.g., http://127.0.0.1:8080)")
76 |
77 | var extraHeaders string
78 | flag.StringVar(&extraHeaders, "headers", "", "Extra headers to add to each request (format: 'Header1:Value1,Header2:Value2')")
79 |
80 | flag.StringVar(&cfg.EnvAppendWords, "env-append-words", "", "Comma-separated list of environment words to append (e.g. dev,prod,api)")
81 |
82 | flag.Parse()
83 |
84 | if (cfg.DomainsFile == "" && cfg.Domain == "") && cfg.PathsFile == "" {
85 | fmt.Println("Please provide either -domains file or -domain, along with -paths")
86 | flag.PrintDefaults()
87 | os.Exit(1)
88 | }
89 |
90 | if (cfg.DomainsFile != "" || cfg.Domain != "") && cfg.PathsFile != "" && cfg.MarkersFile == "" && noRulesSpecified(cfg) {
91 | fmt.Println("If you provide -domains or -domain and -paths, you must provide at least one of -markers, -http-status, -content-types, -min-content-size, or -disallowed-content-types")
92 | flag.PrintDefaults()
93 | os.Exit(1)
94 | }
95 |
96 | if proxyURLStr != "" {
97 | proxyURL, err := url.Parse(proxyURLStr)
98 | if err != nil {
99 | fmt.Printf("Invalid proxy URL: %v\n", err)
100 | os.Exit(1)
101 | }
102 | cfg.ProxyURL = proxyURL
103 | }
104 |
105 | if extraHeaders != "" {
106 | headers := strings.Split(extraHeaders, ",")
107 | for _, header := range headers {
108 | parts := strings.SplitN(header, ":", 2)
109 | if len(parts) == 2 {
110 | cfg.ExtraHeaders[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
111 | }
112 | }
113 | }
114 |
115 | if cfg.BasePathsFile != "" {
116 | var err error
117 | cfg.BasePaths, err = readBasePaths(cfg.BasePathsFile)
118 | if err != nil {
119 | fmt.Printf("Error reading base paths file: %v\n", err)
120 | os.Exit(1)
121 | }
122 | }
123 |
124 | if cfg.EnvAppendWords == "" {
125 | // Use the default if user did not supply anything
126 | cfg.AppendEnvList = defaultAppendEnvList
127 | } else {
128 | // Split the user-supplied CSV
129 | customList := strings.Split(cfg.EnvAppendWords, ",")
130 | for i := range customList {
131 | customList[i] = strings.TrimSpace(customList[i])
132 | }
133 | cfg.AppendEnvList = customList
134 | }
135 |
136 | return cfg
137 | }
138 |
139 | func noRulesSpecified(cfg Config) bool {
140 | noRules := true
141 |
142 | if cfg.HTTPStatusCodes != "" {
143 | noRules = false
144 | }
145 |
146 | if cfg.MinContentSize > 0 {
147 | noRules = false
148 | }
149 |
150 | if cfg.ContentTypes != "" {
151 | noRules = false
152 | }
153 |
154 | if cfg.DisallowedContentTypes != "" {
155 | noRules = false
156 | }
157 |
158 | return noRules
159 | }
160 |
161 | func readBasePaths(filename string) ([]string, error) {
162 | file, err := os.Open(filename)
163 | if err != nil {
164 | return nil, err
165 | }
166 | defer file.Close()
167 |
168 | var basePaths []string
169 | scanner := bufio.NewScanner(file)
170 | for scanner.Scan() {
171 | path := strings.TrimSpace(scanner.Text())
172 | if path != "" {
173 | basePaths = append(basePaths, path)
174 | }
175 | }
176 |
177 | if err := scanner.Err(); err != nil {
178 | return nil, err
179 | }
180 |
181 | return basePaths, nil
182 | }
183 |
--------------------------------------------------------------------------------
/pkg/domain/domain.go:
--------------------------------------------------------------------------------
1 | package domain
2 |
3 | import (
4 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/config"
5 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/utils"
6 | "regexp"
7 | "strconv"
8 | "strings"
9 | )
10 |
11 | type domainProtocol struct {
12 | domain string
13 | protocol string
14 | }
15 |
16 | var (
17 | ipv4Regex = regexp.MustCompile(`^(\d{1,3}\.){3}\d{1,3}$`)
18 | ipv6Regex = regexp.MustCompile(`^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$`)
19 | ipPartRegex = regexp.MustCompile(`(\d{1,3}[-\.]\d{1,3}[-\.]\d{1,3}[-\.]\d{1,3})`)
20 | md5Regex = regexp.MustCompile(`^[a-fA-F0-9]{32}$`)
21 | onlyAlphaRegex = regexp.MustCompile(`^[a-z]+$`)
22 | suffixNumberRegex = regexp.MustCompile(`[\d]+$`)
23 | envRegex = regexp.MustCompile(`(prod|qa|dev|testing|test|uat|stg|stage|staging|developement|production)$`)
24 | // Removed the hard-coded appendEnvList. Use cfg.AppendEnvList instead in splitDomain().
25 | regionPartRegex = regexp.MustCompile(`(us-east|us-west|af-south|ap-east|ap-south|ap-northeast|ap-southeast|ca-central|eu-west|eu-north|eu-south|me-south|sa-east|us-east-1|us-east-2|us-west-1|us-west-2|af-south-1|ap-east-1|ap-south-1|ap-northeast-3|ap-northeast-2|ap-southeast-1|ap-southeast-2|ap-southeast-3|ap-northeast-1|ca-central-1|eu-central-1|eu-west-1|eu-west-2|eu-west-3|eu-north-1|eu-south-1|me-south-1|sa-east-1|useast1|useast2|uswest1|uswest2|afsouth1|apeast1|apsouth1|apnortheast3|apnortheast2|apsoutheast1|apsoutheast2|apsoutheast3|apnortheast1|cacentral1|eucentral1|euwest1|euwest2|euwest3|eunorth1|eusouth1|mesouth1|saeast1)`)
26 | byPassCharacters = []string{";", "..;"}
27 | )
28 |
29 | var commonTLDsMap map[string]struct{}
30 |
31 | func init() {
32 | // Initialize the TLD map once at startup
33 | commonTLDsMap = make(map[string]struct{}, len(commonTLDs))
34 | for _, tld := range commonTLDs {
35 | commonTLDsMap[tld] = struct{}{}
36 | }
37 | }
38 |
39 | var commonTLDs = []string{
40 | // Multi-part TLDs
41 | "co.uk", "co.jp", "co.nz", "co.za", "com.au", "com.br", "com.cn", "com.mx", "com.tr", "com.tw",
42 | "edu.au", "edu.cn", "edu.hk", "edu.sg", "gov.uk", "net.au", "net.cn", "org.au", "org.uk",
43 | "ac.uk", "ac.nz", "ac.jp", "ac.kr", "ne.jp", "or.jp", "org.nz", "govt.nz", "sch.uk", "nhs.uk",
44 |
45 | // Generic TLDs (gTLDs)
46 | "com", "org", "net", "edu", "gov", "int", "mil", "aero", "biz", "cat", "coop", "info", "jobs",
47 | "mobi", "museum", "name", "pro", "tel", "travel", "xxx", "asia", "arpa",
48 |
49 | // New gTLDs
50 | "app", "dev", "io", "ai", "cloud", "digital", "online", "store", "tech", "site", "website",
51 | "blog", "shop", "agency", "expert", "software", "studio", "design", "education", "healthcare",
52 |
53 | // Country Code TLDs (ccTLDs)
54 | "ac", "ad", "ae", "af", "ag", "ai", "al", "am", "an", "ao", "aq", "ar", "as", "at", "au", "aw",
55 | "ax", "az", "ba", "bb", "bd", "be", "bf", "bg", "bh", "bi", "bj", "bm", "bn", "bo", "br", "bs",
56 | "bt", "bv", "bw", "by", "bz", "ca", "cc", "cd", "cf", "cg", "ch", "ci", "ck", "cl", "cm", "cn",
57 | "co", "cr", "cu", "cv", "cx", "cy", "cz", "de", "dj", "dk", "dm", "do", "dz", "ec", "ee", "eg",
58 | "er", "es", "et", "eu", "fi", "fj", "fk", "fm", "fo", "fr", "ga", "gb", "gd", "ge", "gf", "gg",
59 | "gh", "gi", "gl", "gm", "gn", "gp", "gq", "gr", "gs", "gt", "gu", "gw", "gy", "hk", "hm", "hn",
60 | "hr", "ht", "hu", "id", "ie", "il", "im", "in", "io", "iq", "ir", "is", "it", "je", "jm", "jo",
61 | "jp", "ke", "kg", "kh", "ki", "km", "kn", "kp", "kr", "kw", "ky", "kz", "la", "lb", "lc", "li",
62 | "lk", "lr", "ls", "lt", "lu", "lv", "ly", "ma", "mc", "md", "me", "mg", "mh", "mk", "ml", "mm",
63 | "mn", "mo", "mp", "mq", "mr", "ms", "mt", "mu", "mv", "mw", "mx", "my", "mz", "na", "nc", "ne",
64 | "nf", "ng", "ni", "nl", "no", "np", "nr", "nu", "nz", "om", "pa", "pe", "pf", "pg", "ph", "pk",
65 | "pl", "pm", "pn", "pr", "ps", "pt", "pw", "py", "qa", "re", "ro", "rs", "ru", "rw", "sa", "sb",
66 | "sc", "sd", "se", "sg", "sh", "si", "sj", "sk", "sl", "sm", "sn", "so", "sr", "st", "su", "sv",
67 | "sy", "sz", "tc", "td", "tf", "tg", "th", "tj", "tk", "tl", "tm", "tn", "to", "tp", "tr", "tt",
68 | "tv", "tw", "tz", "ua", "ug", "uk", "us", "uy", "uz", "va", "vc", "ve", "vg", "vi", "vn", "vu",
69 | "wf", "ws", "ye", "yt", "za", "zm", "zw",
70 | }
71 |
72 | func splitDomain(host string, cfg *config.Config) []string {
73 | // Strip protocol
74 | if strings.HasPrefix(host, "http://") {
75 | host = strings.TrimPrefix(host, "http://")
76 | }
77 | if strings.HasPrefix(host, "https://") {
78 | host = strings.TrimPrefix(host, "https://")
79 | }
80 |
81 | // Get just the domain part
82 | host = strings.Split(host, "/")[0]
83 |
84 | // Skip IP addresses
85 | if ipv4Regex.MatchString(host) || ipv6Regex.MatchString(host) {
86 | return nil
87 | }
88 |
89 | // Remove port if present
90 | host = strings.Split(host, ":")[0]
91 |
92 | // Remove IP-like parts
93 | host = ipPartRegex.ReplaceAllString(host, "")
94 |
95 | // Remove hash-like parts
96 | host = md5Regex.ReplaceAllString(host, "")
97 |
98 | // Remove TLD
99 | host = removeTLD(host)
100 |
101 | // Remove regional parts
102 | host = regionPartRegex.ReplaceAllString(host, "")
103 |
104 | // Standardize separators
105 | host = strings.ReplaceAll(host, "--", "-")
106 | host = strings.ReplaceAll(host, "..", ".")
107 | host = strings.ReplaceAll(host, "__", "_")
108 |
109 | // Split into parts by dot
110 | parts := strings.Split(host, ".")
111 |
112 | // Remove "www" if it's the first part
113 | if len(parts) > 0 && parts[0] == "www" {
114 | parts = parts[1:]
115 | }
116 |
117 | // Limit host depth if configured
118 | if cfg.HostDepth > 0 && len(parts) >= cfg.HostDepth {
119 | parts = parts[:cfg.HostDepth]
120 | }
121 |
122 | // Pre-allocate the map with a reasonable capacity
123 | estimatedCapacity := len(parts) * 3 // Rough estimate for parts and subparts
124 | relevantParts := make(map[string]struct{}, estimatedCapacity)
125 |
126 | // Process each part
127 | for _, part := range parts {
128 | relevantParts[part] = struct{}{}
129 |
130 | // Split by separators
131 | subParts := strings.FieldsFunc(part, func(r rune) bool {
132 | return r == '-' || r == '_'
133 | })
134 |
135 | // Add each subpart
136 | for _, subPart := range subParts {
137 | relevantParts[subPart] = struct{}{}
138 | }
139 | }
140 |
141 | // Estimate final result size
142 | estimatedResultSize := len(relevantParts)
143 | if !cfg.NoEnvAppending {
144 | // If we'll be adding env variants, estimate additional capacity
145 | estimatedResultSize += len(relevantParts) * len(cfg.AppendEnvList) * 4
146 | }
147 |
148 | // Allocate result slice with appropriate capacity
149 | result := make([]string, 0, estimatedResultSize)
150 |
151 | // Process each relevant part
152 | for part := range relevantParts {
153 | // Skip purely numeric parts
154 | if _, err := strconv.Atoi(part); err == nil {
155 | continue
156 | }
157 |
158 | // Skip single characters
159 | if len(part) <= 1 {
160 | continue
161 | }
162 |
163 | // If part matches environment pattern, add a version without it
164 | if envRegex.MatchString(part) {
165 | result = append(result, strings.TrimSuffix(part, envRegex.FindString(part)))
166 | }
167 |
168 | // If part ends with numbers, add a version without the numbers
169 | if suffixNumberRegex.MatchString(part) {
170 | result = append(result, strings.TrimSuffix(part, suffixNumberRegex.FindString(part)))
171 | }
172 |
173 | // Add the original part
174 | result = append(result, part)
175 | }
176 |
177 | // Add environment variants if enabled
178 | if !cfg.NoEnvAppending {
179 | baseLength := len(result)
180 | for i := 0; i < baseLength; i++ {
181 | part := result[i]
182 | // Skip parts that aren't purely alphabetic
183 | if !onlyAlphaRegex.MatchString(part) {
184 | continue
185 | }
186 |
187 | // Skip if part already ends with an environment suffix
188 | shouldBeAdded := true
189 | for _, env := range cfg.AppendEnvList {
190 | if strings.HasSuffix(part, env) {
191 | shouldBeAdded = false
192 | break
193 | }
194 | }
195 |
196 | if shouldBeAdded {
197 | for _, env := range cfg.AppendEnvList {
198 | // Skip if part already contains the environment name
199 | if strings.Contains(part, env) {
200 | continue
201 | }
202 |
203 | // Add variants with different separators
204 | result = append(result, part+env)
205 | result = append(result, part+"-"+env)
206 | result = append(result, part+"_"+env)
207 | result = append(result, part+"/"+env)
208 | }
209 | }
210 | }
211 | }
212 |
213 | // Remove environment suffixes if enabled
214 | if cfg.EnvRemoving {
215 | baseLength := len(result)
216 | for i := 0; i < baseLength; i++ {
217 | part := result[i]
218 | // Skip parts that aren't purely alphabetic
219 | if !onlyAlphaRegex.MatchString(part) {
220 | continue
221 | }
222 |
223 | // If the part ends with a known env word, produce a version with that suffix trimmed
224 | for _, env := range cfg.AppendEnvList {
225 | if strings.HasSuffix(part, env) {
226 | result = append(result, strings.TrimSuffix(part, env))
227 | break
228 | }
229 | }
230 | }
231 | }
232 |
233 | // Clean up results (trim separators)
234 | cleanedResult := make([]string, 0, len(result))
235 | for _, item := range result {
236 | trimmed := strings.Trim(item, ".-_")
237 | if trimmed != "" {
238 | cleanedResult = append(cleanedResult, trimmed)
239 | }
240 | }
241 |
242 | // Add short prefixes (3 and 4 character) for common patterns
243 | baseLength := len(cleanedResult)
244 | additionalItems := make([]string, 0, baseLength*2)
245 | for i := 0; i < baseLength; i++ {
246 | word := cleanedResult[i]
247 | if len(word) >= 3 {
248 | additionalItems = append(additionalItems, word[:3])
249 | }
250 | if len(word) >= 4 {
251 | additionalItems = append(additionalItems, word[:4])
252 | }
253 | }
254 |
255 | // Combine all items
256 | result = append(cleanedResult, additionalItems...)
257 |
258 | // Deduplicate
259 | result = makeUniqueList(result)
260 |
261 | // Add bypass character variants if enabled
262 | if cfg.AppendByPassesToWords {
263 | baseLength := len(result)
264 | bypassVariants := make([]string, 0, baseLength*len(byPassCharacters))
265 |
266 | for i := 0; i < baseLength; i++ {
267 | for _, bypass := range byPassCharacters {
268 | bypassVariants = append(bypassVariants, result[i]+bypass)
269 | }
270 | }
271 |
272 | result = append(result, bypassVariants...)
273 | }
274 |
275 | return result
276 | }
277 |
278 | func GetRelevantDomainParts(host string, cfg *config.Config) []string {
279 | return splitDomain(host, cfg)
280 | }
281 |
282 | func makeUniqueList(input []string) []string {
283 | // Use a map for deduplication
284 | seen := make(map[string]struct{}, len(input))
285 | result := make([]string, 0, len(input))
286 |
287 | for _, item := range input {
288 | if _, exists := seen[item]; !exists {
289 | seen[item] = struct{}{}
290 | result = append(result, item)
291 | }
292 | }
293 |
294 | return result
295 | }
296 |
297 | func GetDomains(domainsFile, singleDomain string) []string {
298 | if domainsFile != "" {
299 | allLines := utils.ReadLines(domainsFile)
300 | // Pre-allocate with a capacity based on the number of lines
301 | validDomains := make([]string, 0, len(allLines))
302 |
303 | for _, line := range allLines {
304 | trimmedLine := strings.TrimSpace(line)
305 | if trimmedLine != "" && !strings.HasPrefix(trimmedLine, "#") {
306 | validDomains = append(validDomains, trimmedLine)
307 | }
308 | }
309 |
310 | validDomains = utils.ShuffleStrings(validDomains)
311 | return validDomains
312 | }
313 |
314 | // Return single domain as a slice
315 | return []string{singleDomain}
316 | }
317 |
318 | func removeTLD(host string) string {
319 | host = strings.ToLower(host)
320 | parts := strings.Split(host, ".")
321 |
322 | // Iterate through possible multi-part TLDs
323 | for i := 0; i < len(parts); i++ {
324 | potentialTLD := strings.Join(parts[i:], ".")
325 | if _, exists := commonTLDsMap[potentialTLD]; exists {
326 | return strings.Join(parts[:i], ".")
327 | }
328 | }
329 |
330 | return host
331 | }
332 |
--------------------------------------------------------------------------------
/pkg/fasthttp/client.go:
--------------------------------------------------------------------------------
1 | package fasthttp
2 |
3 | import (
4 | "bytes"
5 | "crypto/tls"
6 | "fmt"
7 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/config"
8 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/result"
9 | "github.com/valyala/fasthttp"
10 | "math/rand"
11 | "strconv"
12 | "strings"
13 | )
14 |
15 | var baseUserAgents = []string{
16 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
17 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
18 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
19 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59",
20 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
21 | }
22 |
23 | var acceptLanguages = []string{
24 | "en-US,en;q=0.9", "en-GB,en;q=0.8", "es-ES,es;q=0.9",
25 | "fr-FR,fr;q=0.9", "de-DE,de;q=0.8", "it-IT,it;q=0.9",
26 | }
27 |
28 | type Client struct {
29 | config config.Config
30 | client *fasthttp.Client
31 | }
32 |
33 | func NewClient(cfg config.Config) *Client {
34 | return &Client{
35 | config: cfg,
36 | client: &fasthttp.Client{
37 | ReadTimeout: cfg.Timeout,
38 | WriteTimeout: cfg.Timeout,
39 | DisablePathNormalizing: true,
40 | DisableHeaderNamesNormalizing: true, // Prevent automatic header modifications
41 | TLSConfig: &tls.Config{
42 | InsecureSkipVerify: true,
43 | },
44 | },
45 | }
46 | }
47 |
48 | func (c *Client) MakeRequest(url string) result.Result {
49 | req := fasthttp.AcquireRequest()
50 | defer fasthttp.ReleaseRequest(req)
51 |
52 | req.SetRequestURI(url)
53 | req.URI().DisablePathNormalizing = true
54 | req.Header.DisableNormalizing()
55 | req.Header.SetMethod(fasthttp.MethodGet)
56 | req.Header.Set("Connection", "keep-alive")
57 | req.Header.SetProtocol("HTTP/1.1")
58 | req.Header.Set("Range", fmt.Sprintf("bytes=0-%d", c.config.MaxContentRead-1))
59 |
60 | randomizeRequest(req)
61 | for key, value := range c.config.ExtraHeaders {
62 | req.Header.Set(key, value)
63 | }
64 |
65 | resp := fasthttp.AcquireResponse()
66 | defer fasthttp.ReleaseResponse(resp)
67 |
68 | client := &fasthttp.Client{
69 | ReadTimeout: c.config.Timeout,
70 | WriteTimeout: c.config.Timeout,
71 | DisableHeaderNamesNormalizing: true,
72 | DisablePathNormalizing: true,
73 | TLSConfig: &tls.Config{
74 | InsecureSkipVerify: true,
75 | },
76 | }
77 |
78 | err := client.DoRedirects(req, resp, 0)
79 | if err == fasthttp.ErrMissingLocation {
80 | return result.Result{URL: url, Error: fmt.Errorf("error fetching: %w", err)}
81 | }
82 |
83 | if err != nil {
84 | return result.Result{URL: url, Error: fmt.Errorf("error fetching: %w", err)}
85 | }
86 |
87 | body := resp.Body()
88 |
89 | var totalSize int64
90 |
91 | contentRange := resp.Header.Peek("Content-Range")
92 | if len(contentRange) > 0 {
93 | parts := bytes.Split(contentRange, []byte("/"))
94 | if len(parts) == 2 {
95 | totalSize, _ = strconv.ParseInt(string(parts[1]), 10, 64)
96 | }
97 | } else {
98 | totalSize = int64(len(body))
99 | }
100 |
101 | if int64(len(body)) > c.config.MaxContentRead {
102 | body = body[:c.config.MaxContentRead]
103 | }
104 |
105 | return result.Result{
106 | URL: url,
107 | Content: string(body),
108 | StatusCode: resp.StatusCode(),
109 | FileSize: totalSize,
110 | ContentType: string(resp.Header.Peek("Content-Type")),
111 | }
112 | }
113 |
114 | func randomizeRequest(req *fasthttp.Request) {
115 | req.Header.Set("User-Agent", getRandomUserAgent())
116 | req.Header.Set("Accept-Language", getRandomAcceptLanguage())
117 |
118 | referer := getReferer(req.URI().String())
119 | req.Header.Set("Referer", referer)
120 | req.Header.Set("Origin", referer)
121 | req.Header.Set("Accept", "*/*")
122 |
123 | if rand.Float32() < 0.5 {
124 | req.Header.Set("DNT", "1")
125 | }
126 | if rand.Float32() < 0.3 {
127 | req.Header.Set("Upgrade-Insecure-Requests", "1")
128 | }
129 | }
130 |
131 | func getRandomUserAgent() string {
132 | baseUA := baseUserAgents[rand.Intn(len(baseUserAgents))]
133 | parts := strings.Split(baseUA, " ")
134 |
135 | for i, part := range parts {
136 | if strings.Contains(part, "/") {
137 | versionParts := strings.Split(part, "/")
138 | if len(versionParts) == 2 {
139 | version := strings.Split(versionParts[1], ".")
140 | if len(version) > 2 {
141 | version[2] = fmt.Sprintf("%d", rand.Intn(100))
142 | versionParts[1] = strings.Join(version, ".")
143 | }
144 | }
145 | parts[i] = strings.Join(versionParts, "/")
146 | }
147 | }
148 |
149 | return strings.Join(parts, " ")
150 | }
151 |
152 | func getRandomAcceptLanguage() string {
153 | return acceptLanguages[rand.Intn(len(acceptLanguages))]
154 | }
155 |
156 | func getReferer(url string) string {
157 | return url
158 | }
159 |
--------------------------------------------------------------------------------
/pkg/http/client.go:
--------------------------------------------------------------------------------
1 | package http
2 |
3 | import (
4 | "context"
5 | "crypto/tls"
6 | "fmt"
7 | "io"
8 | "math/rand"
9 | "net/http"
10 | "strconv"
11 | "strings"
12 | "time"
13 |
14 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/config"
15 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/result"
16 | )
17 |
18 | var baseUserAgents = []string{
19 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
20 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
21 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
22 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59",
23 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
24 | }
25 |
26 | var acceptLanguages = []string{
27 | "en-US,en;q=0.9", "en-GB,en;q=0.8", "es-ES,es;q=0.9",
28 | "fr-FR,fr;q=0.9", "de-DE,de;q=0.8", "it-IT,it;q=0.9",
29 | }
30 |
31 | type Client struct {
32 | httpClient *http.Client
33 | config config.Config
34 | }
35 |
36 | func NewClient(cfg config.Config) *Client {
37 | transport := &http.Transport{
38 | MaxIdleConns: 100,
39 | MaxIdleConnsPerHost: 100,
40 | IdleConnTimeout: 90 * time.Second,
41 | TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
42 | }
43 |
44 | if cfg.ProxyURL != nil {
45 | transport.Proxy = http.ProxyURL(cfg.ProxyURL)
46 | }
47 |
48 | client := &http.Client{
49 | Transport: transport,
50 | Timeout: cfg.Timeout + 3*time.Second,
51 | CheckRedirect: func(req *http.Request, via []*http.Request) error {
52 | return http.ErrUseLastResponse
53 | },
54 | }
55 |
56 | return &Client{
57 | httpClient: client,
58 | config: cfg,
59 | }
60 | }
61 |
62 | func (c *Client) MakeRequest(url string) result.Result {
63 | ctx, cancel := context.WithTimeout(context.Background(), c.config.Timeout)
64 | defer cancel()
65 |
66 | req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
67 | if err != nil {
68 | return result.Result{URL: url, Error: fmt.Errorf("error creating request: %w", err)}
69 | }
70 |
71 | randomizeRequest(req)
72 |
73 | for key, value := range c.config.ExtraHeaders {
74 | req.Header.Set(key, value)
75 | }
76 |
77 | req.Header.Set("Range", fmt.Sprintf("bytes=0-%d", c.config.MaxContentRead-1))
78 | resp, err := c.httpClient.Do(req)
79 | if err != nil {
80 | return result.Result{URL: url, Error: fmt.Errorf("error fetching: %w", err)}
81 | }
82 | defer resp.Body.Close()
83 |
84 | buffer, err := io.ReadAll(resp.Body)
85 | if err != nil {
86 | return result.Result{URL: url, Error: fmt.Errorf("error reading body: %w", err)}
87 | }
88 |
89 | var totalSize int64
90 | if contentRange := resp.Header.Get("Content-Range"); contentRange != "" {
91 | parts := strings.Split(contentRange, "/")
92 | if len(parts) == 2 {
93 | totalSize, _ = strconv.ParseInt(parts[1], 10, 64)
94 | }
95 | } else {
96 | totalSize = int64(len(buffer))
97 | }
98 |
99 | return result.Result{
100 | URL: url,
101 | Content: string(buffer),
102 | StatusCode: resp.StatusCode,
103 | FileSize: totalSize,
104 | ContentType: resp.Header.Get("Content-Type"),
105 | }
106 | }
107 |
108 | func randomizeRequest(req *http.Request) {
109 | req.Header.Set("User-Agent", getRandomUserAgent())
110 | req.Header.Set("Accept-Language", getRandomAcceptLanguage())
111 |
112 | referer := getReferer(req.URL.String())
113 | req.Header.Set("Referer", referer)
114 | req.Header.Set("Origin", referer)
115 | req.Header.Set("Accept", "*/*")
116 |
117 | if rand.Float32() < 0.5 {
118 | req.Header.Set("DNT", "1")
119 | }
120 | if rand.Float32() < 0.3 {
121 | req.Header.Set("Upgrade-Insecure-Requests", "1")
122 | }
123 | }
124 |
125 | func getRandomUserAgent() string {
126 | baseUA := baseUserAgents[rand.Intn(len(baseUserAgents))]
127 | parts := strings.Split(baseUA, " ")
128 |
129 | for i, part := range parts {
130 | if strings.Contains(part, "/") {
131 | versionParts := strings.Split(part, "/")
132 | if len(versionParts) == 2 {
133 | version := strings.Split(versionParts[1], ".")
134 | if len(version) > 2 {
135 | version[2] = fmt.Sprintf("%d", rand.Intn(100))
136 | versionParts[1] = strings.Join(version, ".")
137 | }
138 | }
139 | parts[i] = strings.Join(versionParts, "/")
140 | }
141 | }
142 |
143 | return strings.Join(parts, " ")
144 | }
145 |
146 | func getRandomAcceptLanguage() string {
147 | return acceptLanguages[rand.Intn(len(acceptLanguages))]
148 | }
149 |
150 | func getReferer(url string) string {
151 | return url
152 | }
153 |
--------------------------------------------------------------------------------
/pkg/result/result.go:
--------------------------------------------------------------------------------
1 | package result
2 |
3 | import (
4 | "github.com/dsecuredcom/dynamic-file-searcher/pkg/config"
5 | "github.com/fatih/color"
6 | "log"
7 | "net/url"
8 | "regexp"
9 | "strconv"
10 | "strings"
11 | "sync"
12 | )
13 |
14 | type Result struct {
15 | URL string
16 | Content string
17 | Error error
18 | StatusCode int
19 | FileSize int64
20 | ContentType string
21 | }
22 |
23 | type ResponseMap struct {
24 | shards [256]responseShard
25 | }
26 |
27 | type responseShard struct {
28 | mu sync.RWMutex
29 | responses map[uint64]struct{}
30 | }
31 |
32 | func fnv1aHash(data string) uint64 {
33 | var hash uint64 = 0xcbf29ce484222325 // FNV offset basis
34 |
35 | for i := 0; i < len(data); i++ {
36 | hash ^= uint64(data[i])
37 | hash *= 0x100000001b3 // FNV prime
38 | }
39 |
40 | return hash
41 | }
42 |
43 | func NewResponseMap() *ResponseMap {
44 | rm := &ResponseMap{}
45 | for i := range rm.shards {
46 | rm.shards[i].responses = make(map[uint64]struct{}, 64) // Reasonable initial capacity
47 | }
48 | return rm
49 | }
50 |
51 | func (rm *ResponseMap) getShard(key string) *responseShard {
52 | // Use first byte of hash as shard key for even distribution
53 | return &rm.shards[fnv1aHash(key)&0xFF]
54 | }
55 |
56 | // Improved response tracking with better collision avoidance
57 | func (rm *ResponseMap) isNewResponse(host string, size int64) bool {
58 | // Create composite key
59 | key := host + ":" + strconv.FormatInt(size, 10)
60 |
61 | // Get the appropriate shard
62 | shard := rm.getShard(key)
63 |
64 | // Calculate full hash
65 | hash := fnv1aHash(key)
66 |
67 | // Check if response exists with minimal locking
68 | shard.mu.RLock()
69 | _, exists := shard.responses[hash]
70 | shard.mu.RUnlock()
71 |
72 | if exists {
73 | return false
74 | }
75 |
76 | // If not found, acquire write lock and check again
77 | shard.mu.Lock()
78 | defer shard.mu.Unlock()
79 |
80 | if _, exists := shard.responses[hash]; exists {
81 | return false
82 | }
83 |
84 | // Add new entry
85 | shard.responses[hash] = struct{}{}
86 | return true
87 | }
88 |
89 | func extractHost(urlStr string) string {
90 | parsedURL, err := url.Parse(urlStr)
91 | if err != nil {
92 | return urlStr
93 | }
94 | return parsedURL.Host
95 | }
96 |
97 | var tracker = NewResponseMap()
98 |
99 | func ProcessResult(result Result, cfg config.Config, markers []string) {
100 | if result.Error != nil {
101 | if cfg.Verbose {
102 | log.Printf("Error processing %s: %v\n", result.URL, result.Error)
103 | }
104 | return
105 | }
106 |
107 | // Check if content type is disallowed first
108 | DisallowedContentTypes := strings.ToLower(cfg.DisallowedContentTypes)
109 | DisallowedContentTypesList := strings.Split(DisallowedContentTypes, ",")
110 | if isDisallowedContentType(result.ContentType, DisallowedContentTypesList) {
111 | return
112 | }
113 |
114 | // Check if content contains disallowed strings
115 | DisallowedContentStrings := strings.ToLower(cfg.DisallowedContentStrings)
116 | DisallowedContentStringsList := strings.Split(DisallowedContentStrings, ",")
117 | if containsDisallowedStringInContent(result.Content, DisallowedContentStringsList) {
118 | return
119 | }
120 |
121 | markerFound := false
122 | hasMarkers := len(markers) > 0
123 | usedMarker := ""
124 |
125 | if hasMarkers {
126 | for _, marker := range markers {
127 | if strings.HasPrefix(marker, "regex:") == false && strings.Contains(result.Content, marker) {
128 | markerFound = true
129 | usedMarker = marker
130 | break
131 | }
132 |
133 | if strings.HasPrefix(marker, "regex:") {
134 | regex := strings.TrimPrefix(marker, "regex:")
135 | if match, _ := regexp.MatchString(regex, result.Content); match {
136 | markerFound = true
137 | usedMarker = marker
138 | break
139 | }
140 | }
141 | }
142 | }
143 |
144 | rulesMatched := 0
145 | rulesCount := 0
146 |
147 | if cfg.HTTPStatusCodes != "" {
148 | rulesCount++
149 | }
150 |
151 | if cfg.MinContentSize > 0 {
152 | rulesCount++
153 | }
154 |
155 | if cfg.ContentTypes != "" {
156 | rulesCount++
157 | }
158 |
159 | if cfg.HTTPStatusCodes != "" {
160 | AllowedHttpStatusesList := strings.Split(cfg.HTTPStatusCodes, ",")
161 | for _, AllowedHttpStatusString := range AllowedHttpStatusesList {
162 | allowedStatus, err := strconv.Atoi(strings.TrimSpace(AllowedHttpStatusString))
163 | if err != nil {
164 | log.Printf("Error converting status code '%s' to integer: %v", AllowedHttpStatusString, err)
165 | continue
166 | }
167 | if result.StatusCode == allowedStatus {
168 | rulesMatched++
169 | break
170 | }
171 | }
172 | }
173 |
174 | // Check content size
175 | if cfg.MinContentSize > 0 && result.FileSize >= cfg.MinContentSize {
176 | rulesMatched++
177 | }
178 |
179 | // Check content types
180 | if cfg.ContentTypes != "" {
181 | AllowedContentTypes := strings.ToLower(cfg.ContentTypes)
182 | AllowedContentTypesList := strings.Split(AllowedContentTypes, ",")
183 | ResultContentType := strings.ToLower(result.ContentType)
184 | for _, AllowedContentTypeString := range AllowedContentTypesList {
185 | if strings.Contains(ResultContentType, AllowedContentTypeString) {
186 | rulesMatched++
187 | break
188 | }
189 | }
190 | }
191 |
192 | // Determine if rules match
193 | rulesPass := rulesCount == 0 || (rulesCount > 0 && rulesMatched == rulesCount)
194 |
195 | // Final decision based on both markers and rules
196 | if (hasMarkers && !markerFound) || (rulesCount > 0 && !rulesPass) {
197 | // If we have markers but didn't find one, OR if we have rules but they didn't pass, skip
198 | if cfg.Verbose {
199 | log.Printf("Skipped: %s (Status: %d, Size: %d bytes, Type: %s)\n",
200 | result.URL, result.StatusCode, result.FileSize, result.ContentType)
201 | }
202 | return
203 | }
204 |
205 | host := extractHost(result.URL)
206 | if !cfg.DisableDuplicateCheck {
207 | if !tracker.isNewResponse(host, result.FileSize) {
208 | if cfg.Verbose {
209 | log.Printf("Skipped duplicate response size %d for host %s\n", result.FileSize, host)
210 | }
211 | return
212 | }
213 | }
214 |
215 | // If we get here, all configured conditions were met
216 | color.Red("\n[!]\tMatch found in %s", result.URL)
217 | if hasMarkers {
218 | color.Red("\tMarkers check: passed (%s)", usedMarker)
219 | }
220 |
221 | color.Red("\tRules check: passed (S: %d, FS: %d, CT: %s)",
222 | result.StatusCode, result.FileSize, result.ContentType)
223 |
224 | content := result.Content
225 | content = strings.ReplaceAll(content, "\n", "")
226 |
227 | if len(content) > 150 {
228 | color.Green("\n[!]\tBody: %s\n", content[:150])
229 | } else {
230 | color.Green("\n[!]\tBody: %s\n", content)
231 | }
232 |
233 | if cfg.Verbose {
234 | log.Printf("Processed: %s (Status: %d, Size: %d bytes, Type: %s)\n",
235 | result.URL, result.StatusCode, result.FileSize, result.ContentType)
236 | }
237 | }
238 |
239 | func containsDisallowedStringInContent(contentBody string, DisallowedContentStringsList []string) bool {
240 | if len(DisallowedContentStringsList) == 0 {
241 | return false
242 | }
243 |
244 | for _, disallowedContentString := range DisallowedContentStringsList {
245 | if disallowedContentString == "" {
246 | continue
247 | }
248 |
249 | if strings.Contains(contentBody, disallowedContentString) {
250 | return true
251 | }
252 | }
253 |
254 | return false
255 | }
256 |
257 | func isDisallowedContentType(contentType string, DisallowedContentTypesList []string) bool {
258 |
259 | if len(DisallowedContentTypesList) == 0 {
260 | return false
261 | }
262 |
263 | for _, disallowedContentType := range DisallowedContentTypesList {
264 | if disallowedContentType == "" {
265 | continue
266 | }
267 |
268 | if strings.Contains(contentType, disallowedContentType) {
269 | return true
270 | }
271 | }
272 |
273 | return false
274 |
275 | }
276 |
--------------------------------------------------------------------------------
/pkg/utils/utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "bufio"
5 | "log"
6 | "math/rand"
7 | "os"
8 | )
9 |
10 | func ReadLines(filename string) []string {
11 | file, err := os.Open(filename)
12 | if err != nil {
13 | log.Fatalf("Error opening file %s: %v\n", filename, err)
14 | }
15 | defer file.Close()
16 |
17 | var lines []string
18 | scanner := bufio.NewScanner(file)
19 | for scanner.Scan() {
20 | lines = append(lines, scanner.Text())
21 | }
22 |
23 | if err := scanner.Err(); err != nil {
24 | log.Fatalf("Error reading file %s: %v\n", filename, err)
25 | }
26 |
27 | return lines
28 | }
29 |
30 | func ShuffleStrings(slice []string) []string {
31 | for i := len(slice) - 1; i > 0; i-- {
32 | j := rand.Intn(i + 1)
33 | slice[i], slice[j] = slice[j], slice[i]
34 | }
35 | return slice
36 | }
37 |
--------------------------------------------------------------------------------