├── .travis.yml ├── LICENSE ├── README.md ├── goreleaser.yml ├── lib ├── crawler.go └── crawler_test.go ├── logo.png └── main.go /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | script: 4 | - go get github.com/schollz/boltdb-server/... 5 | - go get github.com/schollz/linkcrawler/... 6 | - cd $GOPATH/src/github.com/schollz/boltdb-server && go build 7 | - cd $GOPATH/src/github.com/schollz/boltdb-server && ./boltdb-server & 8 | - cd $GOPATH/src/github.com/schollz/linkcrawler/lib && go test 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Zack 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 |
11 | 12 |Cross-platform persistent and distributed web crawler
13 | 14 | *linkcrawler* is persistent because the queue is stored in a remote database that is automatically re-initialized if interrupted. *linkcrawler* is distributed because multiple instances of *linkcrawler* will work on the remotely stored queue, so you can start as many crawlers as you want on separate machines to speed along the process. *linkcrawler* is also fast because it is threaded and uses connection pools. 15 | 16 | Crawl responsibly. 17 | 18 | # This repo has been superseded by [schollz/goredis-crawler](https://github.com/schollz/goredis-crawler) 19 | 20 | Getting Started 21 | =============== 22 | 23 | ## Install 24 | 25 | If you have Go installed, just do 26 | ``` 27 | go get github.com/schollz/linkcrawler/... 28 | go get github.com/schollz/boltdb-server/... 29 | ``` 30 | 31 | Otherwise, use the releases and [download linkcrawler](https://github.com/schollz/linkcrawler/releases/latest) and then [download the boltdb-server](https://github.com/schollz/boltdb-server/releases/latest). 32 | 33 | 34 | ## Run 35 | 36 | ### Crawl a site 37 | 38 | First run the database server which will create a LAN hub: 39 | 40 | ```sh 41 | $ ./boltdb-server 42 | boltdb-server running on http://X.Y.Z.W:8050 43 | ``` 44 | 45 | Then, to capture all the links on a website: 46 | 47 | ```sh 48 | $ linkcrawler --server http://X.Y.Z.W:8050 crawl http://rpiai.com 49 | ``` 50 | 51 | 52 | Make sure to replace `http://X.Y.Z.W:8050` with the IP information outputted from the boltdb-server. 53 | 54 | You can run this last command on as many different machines as you want, which will help to crawl the respective website and add collected links to a universal queue in the server. 55 | 56 | The current state of the crawler is saved. If the crawler is interrupted, you can simply run the command again and it will restart from the last state. 57 | 58 | See the help (`-help`) if you'd like to see more options, such as exclusions/inclusions and modifying the worker pool and connection pools. 59 | 60 | 61 | ### Download a site 62 | 63 | You can also use *linkcrawler* to download webpages from a newline-delimited list of websites. As before, first startup a boltdb-server. Then you can run: 64 | 65 | ```bash 66 | $ linkcrawler --server http://X.Y.Z.W:8050 download links.txt 67 | ``` 68 | 69 | Downloads are saved into a folder `downloaded` with URL of link encoded in Base32 and compressed using gzip. 70 | 71 | ### Dump the current list of links 72 | 73 | To dump the current database, just use 74 | 75 | ```bash 76 | $ linkcrawler --server http://X.Y.Z.W:8050 dump http://rpiai.com 77 | Wrote 32 links to NB2HI4B2F4XXE4DJMFUS4Y3PNU======.txt 78 | ``` 79 | 80 | ## License 81 | 82 | MIT 83 | -------------------------------------------------------------------------------- /goreleaser.yml: -------------------------------------------------------------------------------- 1 | # goreleaser.yml 2 | # Build customization 3 | build: 4 | binary_name: linkcrawler 5 | goos: 6 | - windows 7 | - darwin 8 | - linux 9 | goarch: 10 | - amd64 11 | # Archive customization 12 | archive: 13 | format: zip 14 | replacements: 15 | amd64: 64-bit 16 | darwin: macOS 17 | linux: linux 18 | windows: windows 19 | files: 20 | - README.md 21 | - LICENSE 22 | -------------------------------------------------------------------------------- /lib/crawler.go: -------------------------------------------------------------------------------- 1 | package crawler 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "encoding/base32" 7 | "fmt" 8 | "io/ioutil" 9 | "log" 10 | "math" 11 | "mime" 12 | "net/http" 13 | "net/url" 14 | "os" 15 | "path" 16 | "strconv" 17 | "strings" 18 | "sync" 19 | "time" 20 | 21 | "golang.org/x/net/proxy" 22 | 23 | humanize "github.com/dustin/go-humanize" 24 | "github.com/goware/urlx" 25 | "github.com/jackdanger/collectlinks" 26 | "github.com/jcelliott/lumber" 27 | "github.com/schollz/boltdb-server/connect" 28 | ) 29 | 30 | // Crawler is the crawler instance 31 | type Crawler struct { 32 | client *http.Client 33 | wg sync.WaitGroup 34 | programTime time.Time 35 | curFileList map[string]bool 36 | BaseURL string 37 | KeywordsToExclude []string 38 | KeywordsToInclude []string 39 | MaxNumberWorkers int 40 | MaxNumberConnections int 41 | Verbose bool 42 | UseTor bool 43 | FilePrefix string 44 | Remote, Username, Password string // Parameters for BoltDB remote connection 45 | UserAgent string 46 | TimeIntervalToPrintStats int 47 | numTrash int 48 | numDone int 49 | numToDo int 50 | numDoing int 51 | numberOfURLSParsed int 52 | TrashLimit int 53 | conn *connect.Connection 54 | log *lumber.ConsoleLogger 55 | } 56 | 57 | func encodeURL(url string) string { 58 | return base32.StdEncoding.EncodeToString([]byte(url)) 59 | } 60 | 61 | // New will create a new crawler 62 | func New(url string, boltdbserver string, trace bool) (*Crawler, error) { 63 | var err error 64 | c := new(Crawler) 65 | if trace { 66 | c.log = lumber.NewConsoleLogger(lumber.TRACE) 67 | } else { 68 | c.log = lumber.NewConsoleLogger(lumber.WARN) 69 | } 70 | c.BaseURL = url 71 | c.TrashLimit = 5 72 | c.MaxNumberConnections = 50 73 | c.MaxNumberWorkers = 50 74 | c.FilePrefix = encodeURL(url) 75 | c.TimeIntervalToPrintStats = 5 76 | c.Remote = boltdbserver 77 | c.UseTor = false 78 | c.UserAgent = "" 79 | c.log.Info("Creating new database on %s: %s.db", c.Remote, encodeURL(url)) 80 | c.conn, err = connect.Open(c.Remote, encodeURL(url)) 81 | if err != nil { 82 | c.log.Error("Problem opening database") 83 | return c, err 84 | } 85 | err = c.conn.CreateBuckets([]string{"todo", "trash", "done", "doing"}) 86 | if err != nil { 87 | c.log.Error("Problem creating buckets") 88 | return c, err 89 | } 90 | err = c.updateListCounts() 91 | if err != nil { 92 | c.log.Error("Problem updating list counts") 93 | c.log.Error(err.Error()) 94 | } 95 | return c, err 96 | } 97 | 98 | func (c *Crawler) Name() string { 99 | return encodeURL(c.BaseURL) 100 | } 101 | 102 | func (c *Crawler) GetLinks() (links []string, err error) { 103 | doneLinks, err := c.conn.GetAll("done") 104 | if err != nil { 105 | return links, err 106 | } 107 | todoLinks, err := c.conn.GetAll("todo") 108 | if err != nil { 109 | return links, err 110 | } 111 | trashLinks, err := c.conn.GetAll("trash") 112 | if err != nil { 113 | return links, err 114 | } 115 | doingLinks, err := c.conn.GetAll("doing") 116 | if err != nil { 117 | return links, err 118 | } 119 | links = make([]string, len(doneLinks)+len(todoLinks)+len(trashLinks)+len(doingLinks)) 120 | linksI := 0 121 | for link := range doneLinks { 122 | links[linksI] = link 123 | linksI++ 124 | } 125 | for link := range todoLinks { 126 | links[linksI] = link 127 | linksI++ 128 | } 129 | for link := range trashLinks { 130 | links[linksI] = link 131 | linksI++ 132 | } 133 | for link := range doingLinks { 134 | links[linksI] = link 135 | linksI++ 136 | } 137 | return links, nil 138 | } 139 | 140 | func (c *Crawler) Dump() error { 141 | links, err := c.GetLinks() 142 | if err != nil { 143 | return err 144 | } 145 | 146 | err = ioutil.WriteFile(encodeURL(c.BaseURL)+".txt", []byte(strings.Join(links, "\n")), 0755) 147 | if err != nil { 148 | return err 149 | } 150 | fmt.Printf("Wrote %d links to %s\n", len(links), encodeURL(c.BaseURL)+".txt") 151 | return nil 152 | } 153 | 154 | func (c *Crawler) ResetDoing() error { 155 | keys, err := c.conn.GetKeys("doing") 156 | if err != nil { 157 | return err 158 | } 159 | c.log.Trace("Moved %d keys from doing to todo", len(keys)) 160 | return c.conn.Move("doing", "todo", keys) 161 | } 162 | 163 | func (c *Crawler) downloadOrCrawlLink(url string, currentNumberOfTries int, download bool) error { 164 | // Decrement the counter when the goroutine completes. 165 | defer c.wg.Done() 166 | 167 | if download { 168 | // Check if it is already downloaded and exists as a file 169 | if _, ok := c.curFileList[encodeURL(url)]; ok { 170 | c.log.Trace("Already downloaded %s", url) 171 | c.conn.Post("done", map[string]string{url: strconv.Itoa(currentNumberOfTries)}) 172 | return nil 173 | } 174 | } 175 | 176 | // Try to download 177 | currentNumberOfTries++ 178 | req, err := http.NewRequest("GET", url, nil) 179 | if err != nil { 180 | c.log.Error("Problem making request for %s: %s", url, err.Error()) 181 | return err 182 | } 183 | if c.UserAgent != "" { 184 | c.log.Trace("Setting useragent string to '%s'", c.UserAgent) 185 | req.Header.Set("User-Agent", c.UserAgent) 186 | } 187 | resp, err := c.client.Do(req) 188 | if err != nil { 189 | // Post to trash immedietly if the download fails 190 | err2 := c.conn.Post("trash", map[string]string{url: strconv.Itoa(currentNumberOfTries)}) 191 | if err2 != nil { 192 | return err 193 | } 194 | c.log.Trace("Problem with %s: %s", url, err.Error()) 195 | return nil 196 | } 197 | 198 | defer resp.Body.Close() 199 | if resp.StatusCode == 200 { 200 | c.numberOfURLSParsed++ 201 | 202 | // Download, if downloading 203 | if download { 204 | contentType := resp.Header.Get("Content-type") 205 | contentTypes, contentTypeErr := mime.ExtensionsByType(contentType) 206 | extension := "" 207 | if contentTypeErr == nil { 208 | extension = contentTypes[0] 209 | if extension == ".htm" || extension == ".hxa" { 210 | extension = ".html" 211 | } 212 | } else { 213 | return err 214 | } 215 | fileContent, err := ioutil.ReadAll(resp.Body) 216 | if err != nil { 217 | return err 218 | } 219 | 220 | var buf bytes.Buffer 221 | writer := gzip.NewWriter(&buf) 222 | writer.Write(fileContent) 223 | writer.Close() 224 | filename := encodeURL(url) + extension + ".gz" 225 | os.Mkdir("downloaded", 0755) 226 | err = ioutil.WriteFile(path.Join("downloaded", filename), buf.Bytes(), 0755) 227 | if err != nil { 228 | return err 229 | } 230 | 231 | c.log.Trace("Saved %s to %s", url, encodeURL(url)+extension) 232 | } else { 233 | links := collectlinks.All(resp.Body) 234 | c.log.Info("Got %d links from %s\n", len(links), url) 235 | linkCandidates := make([]string, len(links)) 236 | linkCandidatesI := 0 237 | for _, link := range links { 238 | // Do not use query parameters 239 | if strings.Contains(link, "?") { 240 | link = strings.Split(link, "?")[0] 241 | } 242 | // Add the Base URL to everything if it doesn't have it 243 | if !strings.Contains(link, "http") { 244 | link = c.BaseURL + link 245 | } 246 | // Skip links that have a different Base URL 247 | if !strings.Contains(link, c.BaseURL) { 248 | c.log.Trace("Skipping %s because it has a different base URL", link) 249 | continue 250 | } 251 | // Normalize the link 252 | parsedLink, _ := urlx.Parse(link) 253 | normalizedLink, _ := urlx.Normalize(parsedLink) 254 | if len(normalizedLink) == 0 { 255 | continue 256 | } 257 | 258 | // Exclude keywords, skip if any are found 259 | foundExcludedKeyword := false 260 | for _, keyword := range c.KeywordsToExclude { 261 | if strings.Contains(normalizedLink, keyword) { 262 | foundExcludedKeyword = true 263 | c.log.Trace("Skipping %s because contains %s", link, keyword) 264 | break 265 | } 266 | } 267 | if foundExcludedKeyword { 268 | continue 269 | } 270 | 271 | // Include keywords, skip if any are NOT found 272 | foundIncludedKeyword := false 273 | for _, keyword := range c.KeywordsToInclude { 274 | if strings.Contains(normalizedLink, keyword) { 275 | foundIncludedKeyword = true 276 | break 277 | } 278 | } 279 | if !foundIncludedKeyword && len(c.KeywordsToInclude) > 0 { 280 | continue 281 | } 282 | 283 | // If it passed all the tests, add to link candidates 284 | linkCandidates[linkCandidatesI] = normalizedLink 285 | linkCandidatesI++ 286 | } 287 | linkCandidates = linkCandidates[0:linkCandidatesI] 288 | 289 | // Check to see if any link candidates have already been done 290 | doesHaveKeysMap, err := c.conn.HasKeys([]string{"todo", "trash", "doing", "done"}, linkCandidates) 291 | if err != nil { 292 | return err 293 | } 294 | linksToDo := make(map[string]string) 295 | for link, alreadyDone := range doesHaveKeysMap { 296 | if alreadyDone { 297 | continue 298 | } 299 | linksToDo[link] = "0" 300 | } 301 | // Post new links to todo list 302 | c.log.Trace("Posting %d more links todo", len(linksToDo)) 303 | err = c.conn.Post("todo", linksToDo) 304 | if err != nil { 305 | return err 306 | } 307 | 308 | } 309 | 310 | // Dequeue the current URL 311 | // err = c.conn.Post("done", map[string]string{url: strconv.Itoa(currentNumberOfTries)}) 312 | err = c.conn.Move("doing", "done", []string{url}) 313 | if err != nil { 314 | c.log.Error("Problem posting to done: %s", err.Error()) 315 | } 316 | c.log.Trace("Posted %s to done", url) 317 | } else { 318 | if currentNumberOfTries > 3 { 319 | // Delete this URL as it has been tried too many times 320 | err = c.conn.Move("doing", "trash", []string{url}) 321 | // err = c.conn.Post("trash", map[string]string{url: strconv.Itoa(currentNumberOfTries)}) 322 | if err != nil { 323 | c.log.Error("Problem posting to trash: %s", err.Error()) 324 | } 325 | c.log.Trace("Too many tries, trashing " + url) 326 | } else { 327 | // Update the URL with the number of tries 328 | m := map[string]string{url: strconv.Itoa(currentNumberOfTries)} 329 | c.conn.Post("todo", m) 330 | } 331 | } 332 | return nil 333 | } 334 | 335 | // Crawl downloads the pages specified in the todo file 336 | func (c *Crawler) Download(urls []string) error { 337 | download := true 338 | 339 | // Determine which files have been downloaded 340 | fmt.Printf("Determing with of the %d urls are already downloaded...\n", len(urls)) 341 | c.curFileList = make(map[string]bool) 342 | files, err := ioutil.ReadDir("downloaded") 343 | if err == nil { 344 | for _, f := range files { 345 | name := strings.Split(f.Name(), ".")[0] 346 | if len(name) < 2 { 347 | continue 348 | } 349 | c.curFileList[name] = true 350 | } 351 | } 352 | 353 | fmt.Printf("Determing which of the %d urls are already queued...\n", len(urls)) 354 | urlsAlreadyAdded, err := c.conn.HasKeys([]string{"todo", "trash", "done"}, urls) 355 | if err != nil { 356 | return err 357 | } 358 | urlsStillToDo := make(map[string]string) 359 | for url, alreadyAdded := range urlsAlreadyAdded { 360 | if alreadyAdded { 361 | continue 362 | } 363 | urlsStillToDo[url] = "0" 364 | } 365 | if len(urlsStillToDo) > 0 { 366 | c.conn.Post("todo", urlsStillToDo) 367 | } 368 | 369 | fmt.Println("Starting download...") 370 | return c.downloadOrCrawl(download) 371 | } 372 | 373 | // Crawl is the function to crawl with the set parameters 374 | func (c *Crawler) Crawl() error { 375 | c.log.Trace("Checking to see if database has %s", c.BaseURL) 376 | urlsAlreadyAdded, err := c.conn.HasKeys([]string{"todo", "doing", "trash", "done"}, []string{c.BaseURL}) 377 | if err != nil { 378 | return err 379 | } 380 | c.log.Trace("urlsAlreadyAdded: %v", urlsAlreadyAdded) 381 | urlsStillToDo := make(map[string]string) 382 | for url, alreadyAdded := range urlsAlreadyAdded { 383 | if alreadyAdded { 384 | continue 385 | } 386 | urlsStillToDo[url] = "0" 387 | } 388 | if len(urlsStillToDo) > 0 { 389 | c.log.Trace("Posting todo: %v", urlsStillToDo) 390 | c.conn.Post("todo", urlsStillToDo) 391 | } 392 | download := false 393 | return c.downloadOrCrawl(download) 394 | } 395 | 396 | func (c *Crawler) downloadOrCrawl(download bool) error { 397 | // Generate the connection pool 398 | var tr *http.Transport 399 | if c.UseTor { 400 | tbProxyURL, err := url.Parse("socks5://127.0.0.1:9050") 401 | if err != nil { 402 | c.log.Fatal("Failed to parse proxy URL: %v\n", err) 403 | } 404 | tbDialer, err := proxy.FromURL(tbProxyURL, proxy.Direct) 405 | if err != nil { 406 | c.log.Fatal("Failed to obtain proxy dialer: %v\n", err) 407 | } 408 | tr = &http.Transport{ 409 | MaxIdleConns: c.MaxNumberConnections, 410 | IdleConnTimeout: 30 * time.Second, 411 | DisableCompression: true, 412 | Dial: tbDialer.Dial, 413 | } 414 | } else { 415 | tr = &http.Transport{ 416 | MaxIdleConns: c.MaxNumberConnections, 417 | IdleConnTimeout: 30 * time.Second, 418 | DisableCompression: true, 419 | } 420 | } 421 | 422 | c.client = &http.Client{Transport: tr} 423 | 424 | c.programTime = time.Now() 425 | c.numberOfURLSParsed = 0 426 | it := 0 427 | go c.contantlyPrintStats() 428 | for { 429 | it++ 430 | linksToDo, err := c.conn.Pop("todo", c.MaxNumberWorkers) 431 | if err != nil { 432 | return err 433 | } 434 | err = c.conn.Post("doing", linksToDo) 435 | if err != nil { 436 | return err 437 | } 438 | 439 | if len(linksToDo) == 0 { 440 | break 441 | } 442 | for url, numTriesStr := range linksToDo { 443 | numTries, err := strconv.Atoi(numTriesStr) 444 | if err != nil { 445 | return err 446 | } 447 | c.wg.Add(1) 448 | go c.downloadOrCrawlLink(url, numTries, download) 449 | } 450 | c.wg.Wait() 451 | 452 | if math.Mod(float64(it), 100) == 0 { 453 | // reload the configuration 454 | fmt.Println("Reloading the HTTP pool") 455 | c.client = &http.Client{Transport: tr} 456 | } 457 | } 458 | c.numToDo = 0 459 | c.printStats() 460 | return nil 461 | } 462 | 463 | func round(f float64) int { 464 | if math.Abs(f) < 0.5 { 465 | return 0 466 | } 467 | return int(f + math.Copysign(0.5, f)) 468 | } 469 | 470 | func (c *Crawler) updateListCounts() error { 471 | stats, err := c.conn.Stats() 472 | if err != nil { 473 | return err 474 | } 475 | c.numToDo = stats["todo"] 476 | c.numDone = stats["done"] 477 | c.numDoing = stats["doing"] 478 | c.numTrash = stats["trash"] 479 | return nil 480 | } 481 | 482 | func (c *Crawler) contantlyPrintStats() { 483 | lastTrashed := c.numTrash 484 | for { 485 | time.Sleep(time.Duration(int32(c.TimeIntervalToPrintStats)) * time.Second) 486 | c.updateListCounts() 487 | if c.numToDo == 0 { 488 | fmt.Println("Finished") 489 | return 490 | } 491 | c.printStats() 492 | if c.numTrash-lastTrashed > c.TrashLimit { 493 | fmt.Println("Trash limit per stats generation exceeded, exiting!") 494 | os.Exit(1) 495 | } 496 | lastTrashed = c.numTrash 497 | } 498 | } 499 | 500 | func (c *Crawler) printStats() { 501 | URLSPerSecond := round(float64(c.numberOfURLSParsed) / float64(time.Since(c.programTime).Seconds())) 502 | log.Printf("Node: %s parsed (%d/s). Total: %s todo, %s done, %s trashed\n", 503 | humanize.Comma(int64(c.numberOfURLSParsed)), 504 | URLSPerSecond, 505 | humanize.Comma(int64(c.numToDo)), 506 | humanize.Comma(int64(c.numDone)), 507 | humanize.Comma(int64(c.numTrash))) 508 | } 509 | -------------------------------------------------------------------------------- /lib/crawler_test.go: -------------------------------------------------------------------------------- 1 | package crawler 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/schollz/boltdb-server/connect" 8 | ) 9 | 10 | func TestGeneral(t *testing.T) { 11 | boltdbserver := "http://localhost:8050" 12 | crawl, err := New("http://rpiai.com/", boltdbserver, true) 13 | if err != nil { 14 | t.Error(err) 15 | } 16 | fmt.Println(crawl.BaseURL) 17 | conn, _ := connect.Open(boltdbserver, crawl.Name()) 18 | 19 | // Delete previous 20 | _ = conn.DeleteDatabase() 21 | if err != nil { 22 | t.Error(err) 23 | } 24 | 25 | crawl, err = New("http://rpiai.com/", boltdbserver, true) 26 | if err != nil { 27 | t.Error(err) 28 | } 29 | 30 | if err = crawl.Crawl(); err != nil { 31 | t.Error(err) 32 | } 33 | 34 | allLinks, err := crawl.GetLinks() 35 | if err != nil { 36 | t.Error(err) 37 | } 38 | if len(allLinks) < 30 { 39 | t.Errorf("Only got %d links", len(allLinks)) 40 | } 41 | 42 | // Reload the crawler 43 | conn.DeleteDatabase() 44 | crawl, err = New("http://rpiai.com/", boltdbserver, true) 45 | if err != nil { 46 | t.Error(err) 47 | } 48 | 49 | err = crawl.Download(allLinks) 50 | if err != nil { 51 | t.Errorf("Problem downloading: %s", err.Error()) 52 | } 53 | 54 | err = crawl.Dump() 55 | if err != nil { 56 | t.Errorf("Problem dumping: %s", err.Error()) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schollz/linkcrawler/f20aff4b8896c5b5c973de61609a7f877347da6a/logo.png -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "strings" 8 | "time" 9 | 10 | "github.com/schollz/linkcrawler/lib" 11 | "gopkg.in/urfave/cli.v1" 12 | ) 13 | 14 | var version string 15 | 16 | func main() { 17 | app := cli.NewApp() 18 | app.Name = "linkcrawler" 19 | app.Usage = "crawl a site for links, or download a list of sites" 20 | app.Version = version 21 | app.Compiled = time.Now() 22 | app.Action = func(c *cli.Context) error { 23 | cli.ShowSubcommandHelp(c) 24 | return nil 25 | } 26 | app.Flags = []cli.Flag{ 27 | cli.StringFlag{ 28 | Name: "server, s", 29 | Value: "", 30 | Usage: "boltdb server instance [required]", 31 | }, 32 | cli.StringFlag{ 33 | Name: "useragent,", 34 | Value: "", 35 | Usage: "supply a User-Agent string to be used", 36 | }, 37 | cli.StringFlag{ 38 | Name: "exclude, e", 39 | Value: "", 40 | Usage: "comma-delimted phrases that must NOT be in URL", 41 | }, 42 | cli.StringFlag{ 43 | Name: "include, i", 44 | Value: "", 45 | Usage: "comma-delimted phrases that must be in URL", 46 | }, 47 | cli.IntFlag{ 48 | Name: "workers,w", 49 | Value: 100, 50 | Usage: "Max number of workers", 51 | }, 52 | cli.IntFlag{ 53 | Name: "conn,c", 54 | Value: 100, 55 | Usage: "Max number of connections in HTTP pool", 56 | }, 57 | cli.IntFlag{ 58 | Name: "stats", 59 | Value: 1, 60 | Usage: "Print stats every `X` seconds", 61 | }, 62 | cli.IntFlag{ 63 | Name: "trash-limit", 64 | Value: 5, 65 | Usage: "Exit if trashed URLs accumulates more than `X` / stats check", 66 | }, 67 | cli.BoolFlag{ 68 | Name: "verbose", 69 | Usage: "turn on logging", 70 | }, 71 | cli.StringFlag{ 72 | Name: "prefix, p", 73 | Value: "", 74 | Usage: "override file prefix", 75 | }, 76 | cli.BoolFlag{ 77 | Name: "redo", 78 | Usage: "move doing to todo", 79 | }, 80 | cli.BoolFlag{ 81 | Name: "tor", 82 | Usage: "use tor proxy", 83 | }, 84 | } 85 | app.Commands = []cli.Command{ 86 | { 87 | Name: "crawl", 88 | Aliases: []string{"c"}, 89 | Usage: "crawl a website and get a list of links", 90 | Action: func(c *cli.Context) error { 91 | url := "" 92 | if c.NArg() > 0 { 93 | url = c.Args().Get(0) 94 | } else { 95 | fmt.Println("Must specify url to crawl") 96 | return nil 97 | } 98 | 99 | if c.GlobalString("server") == "" { 100 | fmt.Println("Must specify BoltDB server ") 101 | return nil 102 | } 103 | 104 | fmt.Println(url) 105 | 106 | // Setup crawler to crawl 107 | fmt.Println("Setting up crawler...") 108 | craw, err := crawler.New(url, c.GlobalString("server"), c.GlobalBool("verbose")) 109 | if err != nil { 110 | return err 111 | } 112 | if c.GlobalString("prefix") != "" { 113 | craw.FilePrefix = c.GlobalString("prefix") 114 | } 115 | craw.MaxNumberConnections = c.GlobalInt("conn") 116 | craw.MaxNumberWorkers = c.GlobalInt("workers") 117 | craw.Verbose = c.GlobalBool("verbose") 118 | craw.TimeIntervalToPrintStats = c.GlobalInt("stats") 119 | craw.UserAgent = c.GlobalString("useragent") 120 | craw.TrashLimit = c.GlobalInt("trash-limit") 121 | craw.UseTor = c.GlobalBool("tor") 122 | if len(c.GlobalString("include")) > 0 { 123 | craw.KeywordsToInclude = strings.Split(strings.ToLower(c.GlobalString("include")), ",") 124 | } 125 | if len(c.GlobalString("exclude")) > 0 { 126 | craw.KeywordsToExclude = strings.Split(strings.ToLower(c.GlobalString("exclude")), ",") 127 | } 128 | if c.GlobalBool("redo") { 129 | craw.ResetDoing() 130 | } 131 | fmt.Printf("Starting crawl using DB %s\n", craw.Name()) 132 | err = craw.Crawl() 133 | if err != nil { 134 | return err 135 | } 136 | return craw.Dump() 137 | }, 138 | }, 139 | { 140 | Name: "download", 141 | Aliases: []string{"d"}, 142 | Usage: "download a list of websites", 143 | Action: func(c *cli.Context) error { 144 | fileWithListOfURLS := "" 145 | if c.NArg() > 0 { 146 | fileWithListOfURLS = c.Args().Get(0) 147 | } else { 148 | fmt.Println("Must specify file containing list of URLs") 149 | return nil 150 | } 151 | 152 | if c.GlobalString("server") == "" { 153 | fmt.Println("Must specify BoltDB server ") 154 | return nil 155 | } 156 | 157 | b, err := ioutil.ReadFile(fileWithListOfURLS) 158 | if err != nil { 159 | return err 160 | } 161 | links := strings.Split(string(b), "\n") 162 | 163 | // Setup crawler to download 164 | craw, err := crawler.New(fileWithListOfURLS, c.GlobalString("server"), c.GlobalBool("verbose")) 165 | if err != nil { 166 | return err 167 | } 168 | if c.GlobalString("prefix") != "" { 169 | craw.FilePrefix = c.GlobalString("prefix") 170 | } 171 | craw.MaxNumberConnections = c.GlobalInt("conn") 172 | craw.MaxNumberWorkers = c.GlobalInt("workers") 173 | craw.Verbose = c.GlobalBool("verbose") 174 | craw.TimeIntervalToPrintStats = c.GlobalInt("stats") 175 | craw.UserAgent = c.GlobalString("useragent") 176 | craw.TrashLimit = c.GlobalInt("trash-limit") 177 | craw.UseTor = c.GlobalBool("tor") 178 | if len(c.GlobalString("include")) > 0 { 179 | craw.KeywordsToInclude = strings.Split(strings.ToLower(c.GlobalString("include")), ",") 180 | } 181 | if len(c.GlobalString("exclude")) > 0 { 182 | craw.KeywordsToExclude = strings.Split(strings.ToLower(c.GlobalString("exclude")), ",") 183 | } 184 | if c.GlobalBool("redo") { 185 | craw.ResetDoing() 186 | } 187 | err = craw.Download(links) 188 | if err != nil { 189 | fmt.Printf("Error downloading: %s", err.Error()) 190 | return err 191 | } 192 | fmt.Println("Finished downloading") 193 | return nil 194 | }, 195 | }, 196 | { 197 | Name: "dump", 198 | Usage: "dump a list of links crawled from db", 199 | Action: func(c *cli.Context) error { 200 | url := "" 201 | if c.NArg() > 0 { 202 | url = c.Args().Get(0) 203 | } else { 204 | fmt.Println("Must specify url to dump") 205 | return nil 206 | } 207 | 208 | if c.GlobalString("server") == "" { 209 | fmt.Println("Must specify BoltDB server ") 210 | return nil 211 | } 212 | 213 | // Setup crawler to crawl 214 | craw, err := crawler.New(url, c.GlobalString("server"), c.GlobalBool("verbose")) 215 | if err != nil { 216 | return err 217 | } 218 | return craw.Dump() 219 | }, 220 | }, 221 | } 222 | 223 | app.Run(os.Args) 224 | 225 | } 226 | --------------------------------------------------------------------------------