├── go.mod ├── CONTRIBUTORS.md ├── Dockerfile ├── .gitignore ├── LICENSE ├── output └── output.go ├── go.sum ├── providers ├── otx.go ├── wayback.go ├── provider.go └── common.go ├── README.md └── main.go /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/bp0lr/gauplus 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/bobesa/go-domain-util v0.0.0-20190911083921-4033b5f7dd89 7 | github.com/json-iterator/go v1.1.10 8 | ) 9 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | * [lc](https://github.com/lc) 3 | * [shellbear](https://github.com/shellbear) 4 | 5 | 6 | Thanks to [tomnomnom](https://github.com/tomnomnom) for waybackurls! 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.17.8-alpine3.14 AS build-env 2 | RUN apk add --no-cache build-base 3 | RUN go install github.com/bp0lr/gauplus@latest 4 | 5 | FROM alpine:3.15.0 6 | RUN apk add --no-cache bind-tools ca-certificates 7 | COPY --from=build-env /go/bin/gauplus /usr/local/bin/gauplus 8 | ENTRYPOINT ["gauplus"] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | targets.txt 18 | 19 | test/ 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Corben Leo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /output/output.go: -------------------------------------------------------------------------------- 1 | package output 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "net/url" 7 | "path" 8 | "strings" 9 | 10 | jsoniter "github.com/json-iterator/go" 11 | ) 12 | 13 | type JSONResult struct { 14 | Url string `json:"url"` 15 | } 16 | 17 | func WriteURLs(results <-chan string, writer io.Writer, blacklistMap map[string]struct{}) error { 18 | wr := bufio.NewWriter(writer) 19 | str := &strings.Builder{} 20 | for result := range results { 21 | if len(blacklistMap) != 0 { 22 | u, err := url.Parse(result) 23 | if err != nil { 24 | continue 25 | } 26 | base := strings.Split(path.Base(u.Path),".") 27 | ext := base[len(base)-1] 28 | if ext != "" { 29 | _, ok := blacklistMap[strings.ToLower(ext)] 30 | if ok { 31 | continue 32 | } 33 | } 34 | } 35 | str.WriteString(result) 36 | str.WriteRune('\n') 37 | _, err := wr.WriteString(str.String()) 38 | if err != nil { 39 | wr.Flush() 40 | return err 41 | } 42 | str.Reset() 43 | } 44 | return wr.Flush() 45 | } 46 | func WriteURLsJSON(results <-chan string, writer io.Writer, blacklistMap map[string]struct{}) { 47 | var jr JSONResult 48 | enc := jsoniter.NewEncoder(writer) 49 | for result := range results { 50 | if len(blacklistMap) != 0 { 51 | u, err := url.Parse(result) 52 | if err != nil { 53 | continue 54 | } 55 | base := strings.Split(path.Base(u.Path),".") 56 | ext := base[len(base)-1] 57 | if ext != "" { 58 | _, ok := blacklistMap[strings.ToLower(ext)] 59 | if ok { 60 | continue 61 | } 62 | } 63 | } 64 | jr.Url = result 65 | enc.Encode(jr) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/bobesa/go-domain-util v0.0.0-20190911083921-4033b5f7dd89 h1:2pkAuIM8OF1fy4ToFpMnI4oE+VeUNRbGrpSLKshK0oQ= 2 | github.com/bobesa/go-domain-util v0.0.0-20190911083921-4033b5f7dd89/go.mod h1:/09nEjna1UMoasyyQDhOrIn8hi2v2kiJglPWed1idck= 3 | github.com/bp0lr/gauplus v0.0.0-20201008200520-6668650e753f h1:TmSU88EjkMUc1NqW8Xp1vr0yPfirz2pLjTMehSkUs8o= 4 | github.com/bp0lr/gauplus v0.0.0-20201008200520-6668650e753f/go.mod h1:rJBifePC30jHeRz8l7wrbtPcUnGOPt90wdBOa+v0Swk= 5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 8 | github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= 9 | github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 10 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= 11 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 12 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= 13 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 14 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 15 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 16 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 17 | golang.org/x/net v0.0.0-20180811021610-c39426892332 h1:efGso+ep0DjyCBJPjvoz0HI6UldX4Md2F1rZFe1ir0E= 18 | golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 19 | golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= 20 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 21 | -------------------------------------------------------------------------------- /providers/otx.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strings" 7 | "github.com/bobesa/go-domain-util/domainutil" 8 | ) 9 | 10 | type OTXProvider struct { 11 | *Config 12 | } 13 | 14 | type OTXResult struct { 15 | HasNext bool `json:"has_next"` 16 | ActualSize int `json:"actual_size"` 17 | URLList []struct { 18 | Domain string `json:"domain"` 19 | URL string `json:"url"` 20 | Hostname string `json:"hostname"` 21 | HTTPCode int `json:"httpcode"` 22 | PageNum int `json:"page_num"` 23 | FullSize int `json:"full_size"` 24 | Paged bool `json:"paged"` 25 | } `json:"url_list"` 26 | } 27 | 28 | const otxResultsLimit = 200 29 | 30 | func NewOTXProvider(config *Config) Provider { 31 | return &OTXProvider{Config: config} 32 | } 33 | 34 | func (o *OTXProvider) formatURL(domain string, page int) string { 35 | if !domainutil.HasSubdomain(domain) { 36 | return fmt.Sprintf("https://otx.alienvault.com/api/v1/indicators/domain/%s/url_list?limit=%d&page=%d", 37 | domain, otxResultsLimit, page, 38 | ) 39 | } else if domainutil.HasSubdomain(domain) && o.IncludeSubdomains { 40 | return fmt.Sprintf("https://otx.alienvault.com/api/v1/indicators/domain/%s/url_list?limit=%d&page=%d", 41 | domainutil.Domain(domain), otxResultsLimit, page, 42 | ) 43 | } else { 44 | return fmt.Sprintf("https://otx.alienvault.com/api/v1/indicators/hostname/%s/url_list?limit=%d&page=%d", 45 | domain, otxResultsLimit, page, 46 | ) 47 | } 48 | } 49 | 50 | func (o *OTXProvider) Fetch(domain string, results chan<- string) error { 51 | for page := 0; ; page++ { 52 | resp, err := o.MakeRequest(o.formatURL(domain, page)) 53 | if err != nil { 54 | if o.Config.Verbose { 55 | fmt.Printf("[-] failed to fetch otx results page %d: %s\n", page, err) 56 | } 57 | return fmt.Errorf("failed to fetch otx results page %d: %s", page, err) 58 | } 59 | 60 | var result OTXResult 61 | if err = json.NewDecoder(resp.Body).Decode(&result); err != nil { 62 | _ = resp.Body.Close() 63 | if o.Config.Verbose { 64 | fmt.Printf("[-] failed to decode otx results for page %d: %s\n", page, err) 65 | } 66 | return fmt.Errorf("failed to decode otx results for page %d: %s", page, err) 67 | } 68 | 69 | _ = resp.Body.Close() 70 | 71 | for _, entry := range result.URLList { 72 | if o.IncludeSubdomains { 73 | if !domainutil.HasSubdomain(domain) { 74 | results <- entry.URL 75 | } else { 76 | if strings.Contains(strings.ToLower(entry.Hostname), strings.ToLower(domain)) { 77 | results <- entry.URL 78 | } 79 | } 80 | } else { 81 | if strings.EqualFold(domain, entry.Hostname) { 82 | results <- entry.URL 83 | } 84 | } 85 | } 86 | 87 | if !result.HasNext { 88 | break 89 | } 90 | } 91 | 92 | return nil 93 | } 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Discontinued 3 | 4 | [lc](https://github.com/lc) has made a great job updating gau adding everything that gauplus have and more. 5 | please use his version, is far more advanced that this one. 6 | 7 | visit here: (http://www.github.com/lc/gau) 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | --------------------------------------------------------------------- 18 | # Discontinued 19 | --------------------------------------------------------------------- 20 | 21 | 22 | 23 | # GauPlus 24 | 25 | A modified version of (http://www.github.com/lc/gau) for personal usage. 26 | Support workers, proxies and some extra things. 27 | 28 | ## Usage: 29 | Examples: 30 | 31 | ```bash 32 | $ echo "example.com" | gauplus 33 | $ cat domains.txt | gauplus 34 | $ gauplus example.com 35 | $ gauplus -o example-urls.txt example.com 36 | $ echo "example.com" | gauplus -p "http://proxy.packetstream.io:31112" --random-agent -o result.txt -t 25 37 | $ gauplus -b png,jpg,gif example.com 38 | ``` 39 | ## Run Docker: 40 | Examples: 41 | ```bash 42 | $ docker bulid -t gauplus . 43 | $ docker run -t gauplus -h 44 | ``` 45 | 46 | To display the help for the tool use the `-h` flag: 47 | 48 | ```bash 49 | $ gauplus -h 50 | 51 | -json 52 | write output as json 53 | -o string 54 | filename to write results to 55 | -p string 56 | use proxy 57 | -providers string 58 | providers to fetch urls for (default "wayback,otx,commoncrawl") 59 | -random-agent 60 | use random user-agent 61 | -retries uint 62 | amount of retries for http client (default 5) 63 | -subs 64 | include subdomains of target domain 65 | -t int 66 | amount of parallel workers (default 5) 67 | -v enable verbose mode 68 | -version 69 | show gauplus version 70 | -b 71 | extensions to skip 72 | ``` 73 | 74 | ### comparison 75 | ``` 76 | [root@DarkStar]─[/opt/bp0/lovan/gau] wc -l targets.txt 77 | 31 targets.txt 78 | 79 | [root@DarkStar]─[/opt/bp0/lovan/gau] time cat targets.txt | gau 80 | real 7m17.529s 81 | user 0m0.360s 82 | sys 0m0.345s 83 | 84 | [root@DarkStar]─[/opt/bp0/lovan/gauplus] time cat targets.txt | gauplus -p "http://proxy.packetstream.io:31112" --random-agent -t 25 85 | real 0m49.899s 86 | user 0m0.380s 87 | sys 0m0.408s 88 | ``` 89 | 90 | ## Installation: 91 | ### From source: 92 | ``` 93 | $ go install github.com/bp0lr/gauplus@latest 94 | ``` 95 | 96 | ## Useful?, buy lc some coffe! 97 | 98 | Buy Me A Coffee 99 | -------------------------------------------------------------------------------- /providers/wayback.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | type WaybackProvider struct { 10 | *Config 11 | } 12 | 13 | type WaybackPaginationResult uint 14 | 15 | type WaybackResult [][]string 16 | 17 | func NewWaybackProvider(config *Config) Provider { 18 | return &WaybackProvider{Config: config} 19 | } 20 | 21 | func (w *WaybackProvider) formatURL(domain string, page uint) string { 22 | if w.IncludeSubdomains { 23 | domain = "*." + domain 24 | } 25 | 26 | return fmt.Sprintf( 27 | "https://web.archive.org/cdx/search/cdx?url=%s/*&output=json&collapse=urlkey&fl=original&page=%d", 28 | domain, page, 29 | ) 30 | } 31 | 32 | // Fetch the number of pages. 33 | func (w *WaybackProvider) getPagination(domain string) (WaybackPaginationResult, error) { 34 | url := fmt.Sprintf("%s&showNumPages=true", w.formatURL(domain, 0)) 35 | 36 | resp, err := w.MakeRequest(url) 37 | if err != nil { 38 | if w.Config.Verbose { 39 | fmt.Printf("wayback error %s: %s\n", url, err) 40 | } 41 | return 0, err 42 | } 43 | 44 | defer resp.Body.Close() 45 | 46 | var paginationResult WaybackPaginationResult 47 | if err = json.NewDecoder(resp.Body).Decode(&paginationResult); err != nil { 48 | if w.Config.Verbose { 49 | fmt.Printf("wayback error %s\n", err) 50 | } 51 | return 0, err 52 | } 53 | 54 | time.Sleep(time.Millisecond * 100) 55 | return paginationResult, nil 56 | } 57 | 58 | func (w *WaybackProvider) Fetch(domain string, results chan<- string) error { 59 | pages, err := w.getPagination(domain) 60 | if err != nil { 61 | if w.Config.Verbose { 62 | fmt.Printf("failed to fetch wayback pagination: %s\n", err) 63 | } 64 | return fmt.Errorf("failed to fetch wayback pagination: %s", err) 65 | } 66 | 67 | for page := uint(0); page < uint(pages); page++ { 68 | resp, err := w.MakeRequest(w.formatURL(domain, page)) 69 | if err != nil { 70 | if w.Config.Verbose { 71 | fmt.Printf("failed to fetch wayback results page %d: %s\n", page, err) 72 | } 73 | return fmt.Errorf("failed to fetch wayback results page %d: %s", page, err) 74 | } 75 | 76 | var result WaybackResult 77 | if err = json.NewDecoder(resp.Body).Decode(&result); err != nil { 78 | _ = resp.Body.Close() 79 | if w.Config.Verbose { 80 | fmt.Printf("failed to decode wayback results for page %d: %s\n", page, err) 81 | } 82 | return fmt.Errorf("failed to decode wayback results for page %d: %s", page, err) 83 | } 84 | 85 | _ = resp.Body.Close() 86 | 87 | if w.Config.Verbose { 88 | fmt.Printf("[WBM] domain %v : %v\n", domain, len(result)) 89 | } 90 | 91 | for i, entry := range result { 92 | // Skip first result by default 93 | if i != 0 { 94 | results <- entry[0] 95 | } 96 | } 97 | } 98 | 99 | return nil 100 | } 101 | -------------------------------------------------------------------------------- /providers/provider.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "math/rand" 5 | "net/http" 6 | "time" 7 | ) 8 | 9 | const ( 10 | // Version of gau 11 | Version = `1.2.0` 12 | // UserAgent for the HTTP Client 13 | userAgent = "Mozilla/5.0 (compatible; gauplus/" + Version + "; https://github.com/bp0lr/gauplus)" 14 | ) 15 | 16 | // A generic interface for providers 17 | type Provider interface { 18 | Fetch(string, chan<- string) error 19 | } 20 | type Config struct { 21 | Verbose bool 22 | RandomAgent bool 23 | MaxRetries uint 24 | MaxThreads int 25 | IncludeSubdomains bool 26 | Client *http.Client 27 | Providers []string 28 | Blacklist map[string]struct{} 29 | Output string 30 | JSON bool 31 | } 32 | 33 | func getUserAgent() string { 34 | payload := []string{ 35 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36", 36 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36", 37 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", 38 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36", 39 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15", 40 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36", 41 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0", 42 | "Mozilla/5.0 (iPhone; CPU iPhone OS 8_4_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12H321 Safari/600.1.4", 43 | "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko", 44 | "Mozilla/5.0 (iPad; CPU OS 7_1_2 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) Version/7.0 Mobile/11D257 Safari/9537.53", 45 | "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)", 46 | } 47 | 48 | rand.Seed(time.Now().UnixNano()) 49 | randomIndex := rand.Intn(len(payload)) 50 | 51 | pick := payload[randomIndex] 52 | 53 | return pick 54 | } 55 | 56 | // MakeRequest tries to make a GET request for the given URL and retries on failure. 57 | func (c *Config) MakeRequest(url string) (resp *http.Response, err error) { 58 | for retries := int(c.MaxRetries); ; retries-- { 59 | req, err := http.NewRequest("GET", url, nil) 60 | if err != nil { 61 | return nil, err 62 | } 63 | 64 | if c.RandomAgent { 65 | req.Header.Set("User-Agent", getUserAgent()) 66 | } else { 67 | req.Header.Add("User-Agent", userAgent) 68 | } 69 | 70 | resp, err = c.Client.Do(req) 71 | if err != nil { 72 | if retries == 0 { 73 | return nil, err 74 | } 75 | 76 | continue 77 | } 78 | 79 | break 80 | } 81 | 82 | return 83 | } 84 | -------------------------------------------------------------------------------- /providers/common.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | ) 9 | 10 | type CommonProvider struct { 11 | *Config 12 | apiURL string 13 | } 14 | 15 | type CommonResult struct { 16 | URL string `json:"url"` 17 | Error string `json:"error"` 18 | } 19 | 20 | type CommonPaginationResult struct { 21 | Blocks uint `json:"blocks"` 22 | PageSize uint `json:"pageSize"` 23 | Pages uint `json:"pages"` 24 | } 25 | 26 | type CommonAPIResult []struct { 27 | API string `json:"cdx-api"` 28 | } 29 | 30 | func NewCommonProvider(config *Config) (Provider, error) { 31 | c := CommonProvider{Config: config} 32 | 33 | // Fetch the list of available CommonCrawl Api URLs. 34 | resp, err := c.MakeRequest("http://index.commoncrawl.org/collinfo.json") 35 | if err != nil { 36 | if c.Config.Verbose { 37 | fmt.Printf("[-] Error on request collinfo.json: %s\n", err) 38 | } 39 | return nil, err 40 | } 41 | 42 | defer resp.Body.Close() 43 | 44 | var apiResult CommonAPIResult; 45 | err = json.NewDecoder(resp.Body).Decode(&apiResult); 46 | if err != nil || len(apiResult) < 1{ 47 | if c.Config.Verbose { 48 | fmt.Printf("[-] Erorr on response collinfo.json: %s\n", err) 49 | } 50 | err = errors.New("[-] Commoncrawl, erorr on response collinfo.json.") 51 | return nil, err 52 | } 53 | 54 | c.apiURL = apiResult[0].API 55 | return &c, nil 56 | } 57 | 58 | func (c *CommonProvider) formatURL(domain string, page uint) string { 59 | if c.IncludeSubdomains { 60 | domain = "*." + domain 61 | } 62 | 63 | return fmt.Sprintf("%s?url=%s/*&output=json&fl=url&page=%d", c.apiURL, domain, page) 64 | } 65 | 66 | // Fetch the number of pages. 67 | func (c *CommonProvider) getPagination(domain string) (*CommonPaginationResult, error) { 68 | url := fmt.Sprintf("%s&showNumPages=true", c.formatURL(domain, 0)) 69 | 70 | resp, err := c.MakeRequest(url) 71 | if err != nil { 72 | if c.Config.Verbose { 73 | fmt.Printf("[-] Error request pagination: %s\n", err) 74 | } 75 | return nil, err 76 | } 77 | 78 | defer resp.Body.Close() 79 | 80 | var paginationResult CommonPaginationResult 81 | if err = json.NewDecoder(resp.Body).Decode(&paginationResult); err != nil { 82 | if c.Config.Verbose { 83 | fmt.Printf("[-] Error response pagination: %s\n", err) 84 | } 85 | return nil, err 86 | } 87 | 88 | return &paginationResult, nil 89 | } 90 | 91 | func (c *CommonProvider) Fetch(domain string, results chan<- string) error { 92 | pagination, err := c.getPagination(domain) 93 | if err != nil { 94 | if c.Config.Verbose { 95 | fmt.Printf("[-] failed to fetch common pagination: %s\n", err) 96 | } 97 | return fmt.Errorf("failed to fetch common pagination: %s", err) 98 | } 99 | 100 | for page := uint(0); page < pagination.Pages; page++ { 101 | resp, err := c.MakeRequest(c.formatURL(domain, page)) 102 | if err != nil { 103 | if c.Config.Verbose { 104 | fmt.Printf("[-] failed to fetch common results page %d: %s\n", page, err) 105 | } 106 | return fmt.Errorf("failed to fetch common results page %d: %s", page, err) 107 | } 108 | 109 | sc := bufio.NewScanner(resp.Body) 110 | for sc.Scan() { 111 | var result CommonResult 112 | if err := json.Unmarshal(sc.Bytes(), &result); err != nil { 113 | _ = resp.Body.Close() 114 | if c.Config.Verbose { 115 | fmt.Printf("[-] failed to decode common results for page %d: %s\n", page, err) 116 | } 117 | return fmt.Errorf("failed to decode common results for page %d: %s", page, err) 118 | } 119 | 120 | if result.Error != "" { 121 | if c.Config.Verbose { 122 | fmt.Printf("[-] received an error from common api: %s\n", result.Error) 123 | } 124 | return fmt.Errorf("received an error from common api: %s", result.Error) 125 | } 126 | 127 | if c.Config.Verbose { 128 | fmt.Printf("[Common] domain %v : %v\n", domain, len(result.URL)) 129 | } 130 | results <- result.URL 131 | } 132 | 133 | _ = resp.Body.Close() 134 | } 135 | 136 | return nil 137 | } 138 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "crypto/tls" 6 | "flag" 7 | "fmt" 8 | "io" 9 | "log" 10 | "net" 11 | "net/http" 12 | "net/url" 13 | "os" 14 | "strings" 15 | "sync" 16 | "time" 17 | 18 | "github.com/bp0lr/gauplus/output" 19 | "github.com/bp0lr/gauplus/providers" 20 | ) 21 | 22 | func run(config *providers.Config, domains []string) { 23 | 24 | var providerList []providers.Provider 25 | for _, toUse := range config.Providers { 26 | switch toUse { 27 | case "wayback": 28 | wayback := providers.NewWaybackProvider(config) 29 | providerList = append(providerList, wayback) 30 | case "otx": 31 | otx := providers.NewOTXProvider(config) 32 | providerList = append(providerList, otx) 33 | case "commoncrawl": 34 | common, err := providers.NewCommonProvider(config) 35 | if err == nil { 36 | providerList = append(providerList, common) 37 | } 38 | default: 39 | fmt.Fprintf(os.Stderr, "Error: %s is not a valid provider.\n", toUse) 40 | } 41 | } 42 | 43 | if(len(providerList) == 0){ 44 | fmt.Fprintf(os.Stderr, "Error: All our providers are currently down.\nPlease try again later.\n") 45 | return; 46 | } 47 | 48 | results := make(chan string) 49 | var out io.Writer 50 | // Handle results in background 51 | if config.Output == "" { 52 | out = os.Stdout 53 | } else { 54 | ofp, err := os.OpenFile(config.Output, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) 55 | if err != nil { 56 | log.Fatalf("Could not open output file: %v\n", err) 57 | } 58 | defer ofp.Close() 59 | out = ofp 60 | } 61 | 62 | writewg := &sync.WaitGroup{} 63 | writewg.Add(1) 64 | if config.JSON { 65 | go func() { 66 | output.WriteURLsJSON(results, out, config.Blacklist) 67 | writewg.Done() 68 | }() 69 | } else { 70 | go func() { 71 | output.WriteURLs(results, out, config.Blacklist) 72 | writewg.Done() 73 | }() 74 | } 75 | exitStatus := 0 76 | 77 | var i = 0 78 | wg := &sync.WaitGroup{} 79 | for _, domain := range domains { 80 | 81 | if config.Verbose { 82 | fmt.Printf("[+] Working on: %v\n", domain) 83 | } 84 | domain := domain 85 | wg.Add(len(providerList)) 86 | i++ 87 | for _, provider := range providerList { 88 | go func(provider providers.Provider) { 89 | defer wg.Done() 90 | if err := provider.Fetch(domain, results); err != nil { 91 | if config.Verbose { 92 | _, _ = fmt.Fprintln(os.Stderr, err) 93 | } 94 | } 95 | }(provider) 96 | } 97 | 98 | if i >= config.MaxThreads { 99 | i = 0 100 | wg.Wait() 101 | } 102 | } 103 | 104 | wg.Wait() 105 | close(results) 106 | 107 | // Wait for writer to finish 108 | writewg.Wait() 109 | os.Exit(exitStatus) 110 | } 111 | 112 | func main() { 113 | var domains []string 114 | verbose := flag.Bool("v", false, "enable verbose mode") 115 | includeSubs := flag.Bool("subs", false, "include subdomains of target domain") 116 | maxRetries := flag.Uint("retries", 5, "amount of retries for http client") 117 | useProviders := flag.String("providers", "wayback,otx,commoncrawl", "providers to fetch urls for") 118 | version := flag.Bool("version", false, "show gau version") 119 | proxy := flag.String("p", "", "HTTP proxy to use") 120 | output := flag.String("o", "", "filename to write results to") 121 | jsonOut := flag.Bool("json", false, "write output as json") 122 | randomAgent := flag.Bool("random-agent", false, "use random user-agent") 123 | maxThreads := flag.Int("t", 5, "amount of parallel workers") 124 | blacklist := flag.String("b","","extensions to skip, ex: ttf,woff,svg,png,jpg") 125 | flag.Parse() 126 | 127 | if *version { 128 | fmt.Printf("gau version: %s\n", providers.Version) 129 | os.Exit(0) 130 | } 131 | 132 | if *maxThreads > 100 { 133 | *maxThreads = 100 134 | } 135 | 136 | if flag.NArg() > 0 { 137 | domains = flag.Args() 138 | } else { 139 | s := bufio.NewScanner(os.Stdin) 140 | for s.Scan() { 141 | domains = append(domains, s.Text()) 142 | } 143 | } 144 | 145 | tr := &http.Transport{ 146 | DialContext: (&net.Dialer{ 147 | Timeout: 5 * time.Second, 148 | }).DialContext, 149 | TLSHandshakeTimeout: 5 * time.Second, 150 | TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, 151 | } 152 | 153 | if *proxy != "" { 154 | if p, err := url.Parse(*proxy); err == nil { 155 | tr.Proxy = http.ProxyURL(p) 156 | } 157 | } 158 | 159 | extensions := strings.Split(*blacklist,",") 160 | extMap := make(map[string]struct{}) 161 | for _, ext := range extensions { 162 | ext = strings.Replace(ext, ".", "", -1) 163 | extMap[strings.ToLower(ext)] = struct{}{} 164 | } 165 | 166 | config := providers.Config{ 167 | Verbose: *verbose, 168 | MaxThreads: *maxThreads, 169 | RandomAgent: *randomAgent, 170 | MaxRetries: *maxRetries, 171 | IncludeSubdomains: *includeSubs, 172 | Output: *output, 173 | JSON: *jsonOut, 174 | Blacklist: extMap, 175 | Client: &http.Client{ 176 | Timeout: time.Second * 15, 177 | Transport: tr, 178 | }, 179 | Providers: strings.Split(*useProviders, ","), 180 | } 181 | run(&config, domains) 182 | } 183 | --------------------------------------------------------------------------------