├── .gitattributes ├── godownloadweb ├── deploy.sh ├── .gitignore ├── go.mod ├── commons └── commons.go ├── LICENSE ├── .github └── workflows │ └── go.yml ├── README.md ├── sitemap └── sitemap.go ├── version.go ├── go.sum ├── scraper ├── save.go ├── scrapper.go └── helpers.go ├── dp └── dp.go └── main.go /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /godownloadweb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/webra2/chronodivide-downloader/HEAD/godownloadweb -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | git add -A 2 | if ! git diff-index --quiet HEAD; then 3 | git commit -m "Message here" 4 | git push origin main 5 | fi 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | go-download-web 14 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/antsanchez/go-download-web 2 | 3 | go 1.23 4 | 5 | toolchain go1.24.5 6 | 7 | require ( 8 | github.com/chromedp/cdproto v0.0.0-20250403032234-65de8f5d025b 9 | github.com/chromedp/chromedp v0.13.7 10 | golang.org/x/net v0.0.0-20211209124913-491a49abca63 11 | ) 12 | 13 | require ( 14 | github.com/chromedp/sysutil v1.1.0 // indirect 15 | github.com/go-json-experiment/json v0.0.0-20250211171154-1ae217ad3535 // indirect 16 | github.com/gobwas/httphead v0.1.0 // indirect 17 | github.com/gobwas/pool v0.2.1 // indirect 18 | github.com/gobwas/ws v1.4.0 // indirect 19 | golang.org/x/sys v0.29.0 // indirect 20 | ) 21 | -------------------------------------------------------------------------------- /commons/commons.go: -------------------------------------------------------------------------------- 1 | package commons 2 | 3 | type Conf struct { 4 | // Root domain Root 5 | Root string 6 | 7 | // PATH Path where to save the website 8 | Path string 9 | } 10 | 11 | func New(root, path string) Conf { 12 | return Conf{ 13 | Root: root, 14 | Path: path, 15 | } 16 | } 17 | 18 | // IsInSlice check if the given link is in a slice 19 | func IsInSlice(search string, array []string) bool { 20 | for _, val := range array { 21 | if val == search { 22 | return true 23 | } 24 | } 25 | 26 | return false 27 | } 28 | 29 | // IsFinal check if the url is a folder-like path, like example.com/path/ 30 | func IsFinal(url string) bool { 31 | return string(url[len(url)-1]) == "/" 32 | } 33 | 34 | // RemoveLastSlash removes the last slash 35 | func RemoveLastSlash(url string) string { 36 | if string(url[len(url)-1]) == "/" { 37 | return url[:len(url)-1] 38 | } 39 | return url 40 | } 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Antonio Sánchez 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | # This workflow will build a golang project 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go 3 | 4 | name: Go 5 | 6 | on: 7 | push: 8 | branches: [ "main" ] 9 | pull_request: 10 | branches: [ "main" ] 11 | schedule: 12 | - cron: '1/5 * * * *' 13 | 14 | jobs: 15 | 16 | build: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v3 20 | 21 | - name: Set up Go 22 | uses: actions/setup-go@v4 23 | with: 24 | go-version: '1.23' 25 | 26 | - name: Build 27 | run: go mod tidy && go build -v ./ 28 | 29 | - name: Test 30 | run: go test -v ./... 31 | - name: mkwebsitedir 32 | run: mkdir website 33 | - uses: actions/checkout@v3 34 | with: 35 | repository: 'webra2/webra2.github.io' 36 | path: website 37 | token: ${{ secrets.API_TOKEN_GITHUB }} 38 | - name: download-cd 39 | run: ./go-download-web 40 | - name: push 41 | run: | 42 | cd website 43 | ls -R -l 44 | git config --local user.email "webra2@webra2.com" 45 | git config --local user.name "webra2" 46 | bash ../deploy.sh 47 | 48 | 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Credit 2 | 3 | First of all, thanks EL President, the creator of Red Alert 2 running on the webpage. He Created great project Chronodivide. 4 | 5 | In most cases, you only need to access [chronodivide](https://game.chronodivide.com/) to play. 6 | 7 | If you enjoy the game and would like to offer your support, please consider making a [donation](https://chronodivide.com/#donate) 8 | 9 | 10 | # Chronodivide Downloader 11 | 12 | An automatic download tool that relies on web-downloader and GitHub actions to download chronodivide , including CSS, JSS, and other assets. 13 | Coded with Go, and deploy it to github pages: https://webra2.github.io 14 | 一个自动下载网页版红警的工具,可以自动下载网页红警并部署到github pages,试玩: https://webra2.github.io 15 | 16 | ## Project status 17 | There are still some to-do's, and some refactoring is needed, but the app is already functional. 18 | 19 | 20 | ## Usage 21 | just run github actions. 22 | 23 | 24 | ## Web Downloader Contributing 25 | Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change. 26 | 27 | Please make sure to update tests as appropriate. 28 | 29 | ## Base On Web Downloader Author 30 | [Antonio Sánchez](https://asanchez.dev) 31 | 32 | ## License 33 | [MIT](https://choosealicense.com/licenses/mit/) 34 | -------------------------------------------------------------------------------- /sitemap/sitemap.go: -------------------------------------------------------------------------------- 1 | package sitemap 2 | 3 | import ( 4 | "encoding/xml" 5 | "io/ioutil" 6 | ) 7 | 8 | const SitemapFile = "sitemap.xml" 9 | 10 | // URLSitemap is the model for every url entry on the sitemap 11 | type URLSitemap struct { 12 | XMLName xml.Name `xml:"url"` 13 | Loc string `xml:"loc"` 14 | } 15 | 16 | func appendBytes(appendTo []byte, toAppend []byte) []byte { 17 | return append(appendTo, toAppend...) 18 | } 19 | 20 | func sitemapPath(filaneme string) string { 21 | if filaneme != "" { 22 | return filaneme + "/" + SitemapFile 23 | } 24 | return SitemapFile 25 | } 26 | 27 | // CreateSitemap creates the sitemap 28 | func CreateSitemap(links []string, filename string) error { 29 | filename = sitemapPath(filename) 30 | 31 | var total = []byte(xml.Header) 32 | total = appendBytes(total, []byte(``)) 33 | total = appendBytes(total, []byte("\n")) 34 | 35 | for _, val := range links { 36 | pos := URLSitemap{Loc: val} 37 | output, err := xml.MarshalIndent(pos, " ", " ") 38 | if err != nil { 39 | return err 40 | } 41 | total = appendBytes(total, output) 42 | total = appendBytes(total, []byte("\n")) 43 | } 44 | 45 | total = appendBytes(total, []byte(``)) 46 | 47 | return ioutil.WriteFile(filename, total, 0644) 48 | } 49 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "net/http" 7 | "os" 8 | "path" 9 | ) 10 | 11 | func checkVersion(cdUrl string, subPath string) error { 12 | resp, err := http.Get(cdUrl + "/old/versions.json") 13 | if err != nil { 14 | return err 15 | } 16 | 17 | nowVersion, err := os.ReadFile(path.Join(subPath, "VERSION")) 18 | if err != nil { 19 | os.Create(path.Join(subPath, "VERSION")) 20 | err = nil 21 | } 22 | 23 | newVersion, err := io.ReadAll(resp.Body) 24 | if err != nil { 25 | return err 26 | } 27 | 28 | // version same return 29 | if bytes.Compare(nowVersion, newVersion) == 0 { 30 | os.Exit(0) 31 | } 32 | return os.WriteFile(path.Join(subPath, "VERSION"), newVersion, 0777) 33 | } 34 | 35 | func updateServiceINI(cdUrl string, subPath string) error { 36 | resp, err := http.Get(cdUrl + "/servers.ini") 37 | if err != nil { 38 | return err 39 | } 40 | 41 | nowVersion, err := os.ReadFile(path.Join(subPath, "")) 42 | if err != nil { 43 | os.Create(path.Join(subPath, "servers.ini")) 44 | err = nil 45 | } 46 | 47 | newVersion, err := io.ReadAll(resp.Body) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | // services.ini same return 53 | if bytes.Compare(nowVersion, newVersion) == 0 { 54 | return nil 55 | } 56 | return os.WriteFile(path.Join(subPath, "servers.ini"), newVersion, 0777) 57 | } 58 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/chromedp/cdproto v0.0.0-20250403032234-65de8f5d025b h1:jJmiCljLNTaq/O1ju9Bzz2MPpFlmiTn0F7LwCoeDZVw= 2 | github.com/chromedp/cdproto v0.0.0-20250403032234-65de8f5d025b/go.mod h1:NItd7aLkcfOA/dcMXvl8p1u+lQqioRMq/SqDp71Pb/k= 3 | github.com/chromedp/chromedp v0.13.7 h1:vt+mslxscyvUr58eC+6DLSeeo74jpV/HI2nWetjv/W4= 4 | github.com/chromedp/chromedp v0.13.7/go.mod h1:h8GPP6ZtLMLsU8zFbTcb7ZDGCvCy8j/vRoFmRltQx9A= 5 | github.com/chromedp/sysutil v1.1.0 h1:PUFNv5EcprjqXZD9nJb9b/c9ibAbxiYo4exNWZyipwM= 6 | github.com/chromedp/sysutil v1.1.0/go.mod h1:WiThHUdltqCNKGc4gaU50XgYjwjYIhKWoHGPTUfWTJ8= 7 | github.com/go-json-experiment/json v0.0.0-20250211171154-1ae217ad3535 h1:yE7argOs92u+sSCRgqqe6eF+cDaVhSPlioy1UkA0p/w= 8 | github.com/go-json-experiment/json v0.0.0-20250211171154-1ae217ad3535/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= 9 | github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= 10 | github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= 11 | github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= 12 | github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= 13 | github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs= 14 | github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc= 15 | github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= 16 | github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= 17 | github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde h1:x0TT0RDC7UhAVbbWWBzr41ElhJx5tXPWkIHA2HWPRuw= 18 | github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= 19 | golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= 20 | golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= 21 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 22 | golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= 23 | golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 24 | -------------------------------------------------------------------------------- /scraper/save.go: -------------------------------------------------------------------------------- 1 | package scraper 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "os" 9 | pathp "path" 10 | "strings" 11 | 12 | "github.com/antsanchez/go-download-web/commons" 13 | ) 14 | 15 | // Download a single link 16 | func (s *Scraper) SaveAttachment(url string) (err error) { 17 | filepath := s.GetPath(url) 18 | fmt.Println("save:", url, filepath) 19 | if filepath == "" { 20 | return 21 | } 22 | 23 | // Get last path 24 | if s.hasPaths(filepath) { 25 | if commons.IsFinal(filepath) { 26 | // if the url is a final url in a folder, like example.com/path/ 27 | // this will create the folder "path" and, inside, the file 28 | filepath = commons.RemoveLastSlash(filepath) 29 | url = commons.RemoveLastSlash(url) 30 | } 31 | 32 | path := s.getOnlyPath(filepath) 33 | if !s.exists(pathp.Join(s.Path, path)) { 34 | os.MkdirAll(pathp.Join(s.Path, path), 0755) // first create directory 35 | } 36 | } 37 | 38 | resp, err := http.Get(url) 39 | if err != nil { 40 | return 41 | } 42 | defer resp.Body.Close() 43 | 44 | f, err := os.Create(pathp.Join(s.Path, filepath)) 45 | if err != nil { 46 | return 47 | } 48 | defer f.Close() 49 | 50 | if strings.Contains(filepath, "config.ini") { 51 | content, _ := io.ReadAll(resp.Body) 52 | newContent := bytes.ReplaceAll(content, []byte("//gameres.chronodivide.com"), []byte(".")) 53 | _, err = f.Write(newContent) 54 | 55 | } else { 56 | _, err = io.Copy(f, resp.Body) 57 | } 58 | 59 | return 60 | } 61 | 62 | // Download a single link 63 | func (s *Scraper) SaveHTML(url string, html string) (err error) { 64 | filepath := "" 65 | if filepath == "" { 66 | filepath = "/index.html" 67 | } 68 | 69 | if s.hasPaths(filepath) { 70 | if commons.IsFinal(filepath) { 71 | // if the url is a final url in a folder, like example.com/path 72 | // this will create the folder "path" and, inside, the index.html file 73 | if !s.exists(pathp.Join(s.Path, filepath)) { 74 | os.MkdirAll(pathp.Join(s.Path, filepath), 0755) // first create directory 75 | filepath = filepath + "index.html" 76 | } 77 | } else { 78 | // if the url is not a final url in a folder, like example.com/path/bum.html 79 | // this will create the folder "path" and, inside, the bum.html file 80 | path := s.getOnlyPath(filepath) 81 | if !s.exists(pathp.Join(s.Path, path)) { 82 | os.MkdirAll(pathp.Join(s.Path, path), 0755) // first create directory 83 | } 84 | } 85 | } 86 | 87 | f, err := os.Create(pathp.Join(s.Path, filepath)) 88 | if err != nil { 89 | return 90 | } 91 | defer f.Close() 92 | 93 | if s.NewDomain != "" && s.OldDomain != s.NewDomain { 94 | newStr := strings.ReplaceAll(html, s.OldDomain, s.NewDomain) 95 | newContent := bytes.NewBufferString(newStr) 96 | _, err = io.Copy(f, newContent) 97 | } else { 98 | _, err = io.Copy(f, bytes.NewBufferString(html)) 99 | } 100 | 101 | return 102 | } 103 | -------------------------------------------------------------------------------- /dp/dp.go: -------------------------------------------------------------------------------- 1 | package dp 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/chromedp/cdproto/cdp" 7 | "github.com/chromedp/cdproto/network" 8 | "github.com/chromedp/cdproto/runtime" 9 | "github.com/chromedp/chromedp" 10 | "log" 11 | "os" 12 | "time" 13 | ) 14 | 15 | func RecordNetwork(cdUrl string) []string { 16 | dir := os.TempDir() 17 | urls := make([]string, 0) 18 | 19 | opts := append(chromedp.DefaultExecAllocatorOptions[:], 20 | chromedp.DisableGPU, 21 | chromedp.NoDefaultBrowserCheck, 22 | //chromedp.Flag("headless", false), 23 | chromedp.Flag("no-sandbox", true), 24 | chromedp.Flag("ignore-certificate-errors", true), 25 | chromedp.Flag("window-size", "800,600"), 26 | chromedp.UserDataDir(dir), 27 | ) 28 | 29 | allocCtx, cancel := chromedp.NewExecAllocator(context.Background(), opts...) 30 | defer cancel() 31 | 32 | // also set up a custom logger 33 | taskCtx, cancel := chromedp.NewContext(allocCtx, chromedp.WithLogf(log.Printf)) 34 | defer cancel() 35 | 36 | // create a timeout 37 | taskCtx, cancel = context.WithTimeout(taskCtx, 1000*time.Second) 38 | defer cancel() 39 | 40 | // ensure that the browser process is started 41 | if err := chromedp.Run(taskCtx); err != nil { 42 | panic(err) 43 | } 44 | 45 | // listen network event 46 | listenForNetworkEvent(taskCtx, &urls) 47 | 48 | err := chromedp.Run(taskCtx, 49 | network.Enable(), 50 | chromedp.Navigate(cdUrl), 51 | chromedp.WaitVisible(`.message-box-content`, chromedp.ByQuery), 52 | chromedp.Sleep(time.Second), 53 | chromedp.Evaluate("document.querySelector('.close-button').click()", nil), 54 | chromedp.ActionFunc(func(ctx context.Context) error { 55 | log.Println("close menu") 56 | return nil 57 | }), 58 | chromedp.Sleep(time.Second), 59 | chromedp.WaitVisible(`.dialog-button`, chromedp.ByQuery), 60 | chromedp.Evaluate("document.querySelectorAll('.dialog-button')[1].click()", nil), 61 | chromedp.ActionFunc(func(ctx context.Context) error { 62 | log.Println("close dialog") 63 | return nil 64 | }), 65 | chromedp.Sleep(time.Second), 66 | chromedp.WaitVisible(".menu-button", chromedp.ByQuery), 67 | chromedp.QueryAfter(".menu-button", func(ctx context.Context, execCtx runtime.ExecutionContextID, nodes ...*cdp.Node) error { 68 | if len(nodes) < 3 { 69 | return fmt.Errorf("selector %q did not return any nodes", ".menu button") 70 | } 71 | return chromedp.MouseClickNode(nodes[2]).Do(ctx) 72 | }, chromedp.ByQueryAll), 73 | chromedp.Sleep(time.Second), 74 | chromedp.WaitVisible(".player-slot", chromedp.ByQuery), 75 | chromedp.Sleep(time.Second), 76 | chromedp.Click(".menu-button", chromedp.ByQuery), 77 | chromedp.Sleep(time.Second*20), 78 | ) 79 | if err != nil { 80 | log.Println("dp error ", err) 81 | } 82 | return urls 83 | } 84 | 85 | // 监听 86 | func listenForNetworkEvent(ctx context.Context, urls *[]string) { 87 | chromedp.ListenTarget(ctx, func(ev interface{}) { 88 | switch ev := ev.(type) { 89 | case *network.EventRequestWillBeSent: 90 | *urls = append(*urls, ev.Request.URL) 91 | } 92 | }) 93 | } 94 | -------------------------------------------------------------------------------- /scraper/scrapper.go: -------------------------------------------------------------------------------- 1 | package scraper 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "log" 7 | "net/http" 8 | "strings" 9 | "time" 10 | "golang.org/x/net/html" 11 | ) 12 | 13 | type Scraper struct { 14 | // Original domain 15 | OldDomain string 16 | 17 | // New domain to rewrite the download HTML sites 18 | NewDomain string 19 | 20 | // Root domain 21 | Root string 22 | 23 | // Path where to save the downloads 24 | Path string 25 | 26 | // Use args on URLs 27 | UseQueries bool 28 | } 29 | 30 | // Links model 31 | type Links struct { 32 | Href string 33 | } 34 | 35 | // Page model 36 | type Page struct { 37 | URL string 38 | Canonical string 39 | Links []Links 40 | NoIndex bool 41 | HTML string 42 | } 43 | 44 | // getLinks Get the links from a HTML site 45 | func (s *Scraper) getLinks(domain string) (page Page, attachments []string, version string, err error) { 46 | req, _ := http.NewRequest("GET", domain, nil) 47 | 48 | req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36") 49 | 50 | client := &http.Client{ 51 | Timeout: 15 * time.Second, 52 | } 53 | resp, err1 := client.Do(req) 54 | if err1 != nil { 55 | log.Println(err1) 56 | return 57 | } 58 | defer resp.Body.Close() 59 | 60 | buf := new(bytes.Buffer) 61 | buf.ReadFrom(resp.Body) 62 | page.HTML = buf.String() 63 | 64 | doc, err := html.Parse(buf) 65 | if err != nil { 66 | log.Println(err) 67 | return 68 | } 69 | 70 | page.URL = domain 71 | 72 | foundMeta := false 73 | 74 | var f func(*html.Node) 75 | f = func(n *html.Node) { 76 | for _, a := range n.Attr { 77 | if a.Key == "style" { 78 | if strings.Contains(a.Val, "url(") { 79 | found := s.getURLEmbeeded(a.Val) 80 | if found != "" { 81 | link, err := resp.Request.URL.Parse(found) 82 | if err == nil { 83 | foundLink := s.sanitizeURL(link.String()) 84 | if s.isValidAttachment(foundLink) { 85 | attachments = append(attachments, foundLink) 86 | } 87 | } 88 | } 89 | } 90 | } 91 | } 92 | 93 | if n.Type == html.ElementNode && n.Data == "meta" { 94 | for _, a := range n.Attr { 95 | if a.Key == "name" && a.Val == "robots" { 96 | foundMeta = true 97 | } 98 | if foundMeta { 99 | if a.Key == "content" && strings.Contains(a.Val, "noindex") { 100 | page.NoIndex = true 101 | } 102 | } 103 | } 104 | } 105 | 106 | // Get CSS and AMP 107 | if n.Type == html.ElementNode && n.Data == "link" { 108 | for _, a := range n.Attr { 109 | if a.Key == "href" { 110 | link, err := resp.Request.URL.Parse(a.Val) 111 | if err == nil { 112 | foundLink := s.sanitizeURL(link.String()) 113 | if s.isValidAttachment(foundLink) { 114 | attachments = append(attachments, foundLink) 115 | } else if s.isValidLink(foundLink) { 116 | page.Links = append(page.Links, Links{Href: foundLink}) 117 | } 118 | } 119 | } 120 | } 121 | } 122 | 123 | // Get JS Scripts 124 | if n.Type == html.ElementNode && n.Data == "script" { 125 | for _, a := range n.Attr { 126 | if a.Key == "src" { 127 | link, err := resp.Request.URL.Parse(a.Val) 128 | if err == nil { 129 | if strings.Contains(link.String(), "dist") { 130 | version = s.GetVersion(link.String()) 131 | } 132 | foundLink := s.sanitizeURL(link.String()) 133 | if s.isValidAttachment(foundLink) { 134 | attachments = append(attachments, foundLink) 135 | } 136 | } 137 | } 138 | } 139 | } 140 | 141 | // Get Images 142 | if n.Type == html.ElementNode && n.Data == "img" { 143 | for _, a := range n.Attr { 144 | if a.Key == "src" { 145 | link, err := resp.Request.URL.Parse(a.Val) 146 | if err == nil { 147 | foundLink := s.sanitizeURL(link.String()) 148 | if s.isValidAttachment(foundLink) { 149 | attachments = append(attachments, foundLink) 150 | } 151 | } 152 | } 153 | if a.Key == "srcset" { 154 | links := strings.Split(a.Val, " ") 155 | for _, val := range links { 156 | link, err := resp.Request.URL.Parse(val) 157 | if err == nil { 158 | foundLink := s.sanitizeURL(link.String()) 159 | if s.isValidAttachment(foundLink) { 160 | attachments = append(attachments, foundLink) 161 | } 162 | } 163 | } 164 | } 165 | } 166 | } 167 | 168 | // Get links 169 | if n.Type == html.ElementNode && n.Data == "a" { 170 | ok := false 171 | newLink := Links{} 172 | 173 | for _, a := range n.Attr { 174 | if a.Key == "href" { 175 | link, err := resp.Request.URL.Parse(a.Val) 176 | if err == nil { 177 | foundLink := s.sanitizeURL(link.String()) 178 | if s.isValidLink(foundLink) { 179 | ok = true 180 | newLink.Href = foundLink 181 | } else if s.isValidAttachment(foundLink) { 182 | attachments = append(attachments, foundLink) 183 | } 184 | } 185 | } 186 | 187 | } 188 | 189 | if ok && !s.doesLinkExist(newLink, page.Links) { 190 | page.Links = append(page.Links, newLink) 191 | } 192 | } 193 | 194 | for c := n.FirstChild; c != nil; c = c.NextSibling { 195 | f(c) 196 | } 197 | } 198 | f(doc) 199 | return 200 | } 201 | 202 | // TakeLinks take links from the given site 203 | func (s *Scraper) TakeLinks( 204 | toScan string, 205 | started chan int, 206 | finished chan int, 207 | scanning chan int, 208 | newLinks chan []Links, 209 | pages chan Page, 210 | attachments chan []string, 211 | v *string, 212 | ) { 213 | started <- 1 214 | scanning <- 1 215 | defer func() { 216 | <-scanning 217 | finished <- 1 218 | fmt.Printf("Started: %6d - Finished %6d", len(started), len(finished)) 219 | }() 220 | 221 | // Get links 222 | page, attached, version, err := s.getLinks(toScan) 223 | if err != nil { 224 | fmt.Println(err) 225 | return 226 | } 227 | if version != "" { 228 | *v = version 229 | } 230 | 231 | // Save Page 232 | pages <- page 233 | 234 | attachments <- attached 235 | // Save links 236 | newLinks <- page.Links 237 | } 238 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 Antonio Sanchez (asanchez.dev). All rights reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package main 15 | 16 | import ( 17 | "errors" 18 | "flag" 19 | "fmt" 20 | "github.com/antsanchez/go-download-web/dp" 21 | "log" 22 | "net/http" 23 | "os" 24 | "strings" 25 | "time" 26 | 27 | "github.com/antsanchez/go-download-web/scraper" 28 | "github.com/antsanchez/go-download-web/sitemap" 29 | ) 30 | 31 | const CDUrl = "https://game.chronodivide.com/" 32 | 33 | var CDResUrl = "https://gameres.chronodivide.com/" 34 | 35 | type Flags struct { 36 | Domain *string 37 | 38 | // New Domain to be set 39 | NewDomain *string 40 | 41 | // Number of concurrent queries 42 | Simultaneus *int 43 | 44 | // Use query parameters on URLs 45 | UseQueries *bool 46 | 47 | // Path where to download the files to 48 | Path *string 49 | } 50 | 51 | func parseFlags() (flags Flags, err error) { 52 | flags.Domain = flag.String("u", "", "chronodivide source site URL") 53 | flags.NewDomain = flag.String("new", "", "New URL") 54 | flags.Simultaneus = flag.Int("s", 3, "Number of concurrent connections") 55 | flags.UseQueries = flag.Bool("q", false, "Ignore queries on URLs") 56 | flags.Path = flag.String("path", "./website", "Local path for downloaded files") 57 | flag.Parse() 58 | 59 | if *flags.Domain == "" { 60 | *flags.Domain = CDUrl 61 | } 62 | 63 | if *flags.Simultaneus <= 0 { 64 | err = errors.New("the number of concurrent connections be at least 1'") 65 | return 66 | } 67 | 68 | log.Println("Domain:", *flags.Domain) 69 | if *flags.NewDomain != "" { 70 | log.Println("New Domain: ", *flags.NewDomain) 71 | } 72 | log.Println("Simultaneus:", *flags.Simultaneus) 73 | log.Println("Use Queries:", *flags.UseQueries) 74 | 75 | return 76 | } 77 | 78 | func main() { 79 | 80 | flags, err := parseFlags() 81 | if err != nil { 82 | log.Fatal(err) 83 | } 84 | 85 | err = updateServiceINI(*flags.Domain, *flags.Path) 86 | if err != nil { 87 | log.Println(err) 88 | } 89 | err = checkVersion(*flags.Domain, *flags.Path) 90 | if err != nil { 91 | log.Println(err) 92 | } 93 | req, _ := http.NewRequest("GET", *flags.Domain, nil) 94 | 95 | req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36") 96 | 97 | client := &http.Client{ 98 | Timeout: 15 * time.Second, 99 | } 100 | _, err = client.Do(req) 101 | if err != nil { 102 | log.Printf("Network error: %v. Please check your proxy settings and network connectivity.", err) 103 | return 104 | } 105 | 106 | 107 | scanUrls := dp.RecordNetwork(*flags.Domain) 108 | 109 | // Create directory for downloaded website 110 | err = os.MkdirAll(*flags.Path, 0755) 111 | if err != nil { 112 | log.Println(*flags.Path) 113 | log.Fatal(err) 114 | } 115 | 116 | scanning := make(chan int, *flags.Simultaneus) // Semaphore 117 | newLinks := make(chan []scraper.Links, 100000) // New links to scan 118 | pages := make(chan scraper.Page, 100000) // Pages scanned 119 | attachments := make(chan []string, 100000) // Attachments 120 | started := make(chan int, 100000) // Crawls started 121 | finished := make(chan int, 100000) // Crawls finished 122 | var version string 123 | 124 | var indexed, forSitemap, files []string 125 | 126 | seen := make(map[string]bool) 127 | 128 | start := time.Now() 129 | 130 | defer func() { 131 | close(newLinks) 132 | close(pages) 133 | close(started) 134 | close(finished) 135 | close(scanning) 136 | 137 | log.Printf("\nDuration: %s\n", time.Since(start)) 138 | log.Printf("Number of pages: %6d\n", len(indexed)) 139 | }() 140 | 141 | // Do First call to domain 142 | resp, err := http.Get(*flags.Domain) 143 | if err != nil { 144 | log.Println("Domain could not be reached!") 145 | return 146 | } 147 | defer resp.Body.Close() 148 | 149 | s := scraper.Scraper{ 150 | OldDomain: *flags.Domain, 151 | NewDomain: *flags.NewDomain, 152 | Root: resp.Request.URL.String(), 153 | Path: *flags.Path, 154 | UseQueries: *flags.UseQueries, 155 | } 156 | 157 | log.Println("\n Start download site...") 158 | 159 | // Take the links from the startsite 160 | s.TakeLinks(*flags.Domain, started, finished, scanning, newLinks, pages, attachments, &version) 161 | seen[*flags.Domain] = true 162 | 163 | founded := false 164 | for { 165 | select { 166 | case links := <-newLinks: 167 | for _, link := range links { 168 | if !seen[link.Href] { 169 | seen[link.Href] = true 170 | go s.TakeLinks(link.Href, started, finished, scanning, newLinks, pages, attachments, &version) 171 | } 172 | } 173 | founded = true 174 | case page := <-pages: 175 | founded = true 176 | if !s.IsURLInSlice(page.URL, indexed) { 177 | indexed = append(indexed, page.URL) 178 | go func() { 179 | err := s.SaveHTML(page.URL, page.HTML) 180 | if err != nil { 181 | log.Println(err) 182 | } 183 | }() 184 | } 185 | 186 | if !page.NoIndex { 187 | if !s.IsURLInSlice(page.URL, forSitemap) { 188 | forSitemap = append(forSitemap, page.URL) 189 | } 190 | } 191 | case attachment := <-attachments: 192 | founded = true 193 | for _, link := range attachment { 194 | if !s.IsURLInSlice(link, files) { 195 | files = append(files, link) 196 | } 197 | } 198 | } 199 | 200 | // Break the for loop once all scans are finished 201 | if founded && len(started) > 0 && len(scanning) == 0 && len(started) == len(finished) && len(attachments) == 0 && len(pages) == 0 && len(newLinks) == 0 { 202 | break 203 | } 204 | } 205 | 206 | log.Println("\nFinished scraping the site, version :", version) 207 | 208 | for _, scanUrl := range scanUrls { 209 | files = append(files, s.RemoveQuery(scanUrl)) 210 | } 211 | // other files 212 | otherFiles := []string{ 213 | "/lib/ffmpeg-core.js","/lib/ffmpeg-core.wasm","/lib/ffmpeg-core.worker.js","/dist/spbots.min.js","/res/ra2cd.mix", 214 | "/dist/ffmpeg.min.js","/dist/7zz.js","/dist/7zz.wasm", 215 | "/dist/ffmped-core.js","/dist/ffmpeg-core.wasm","/servers.ini","/res/locale/zh-CN.json","/res/img/cd-logo.png","/dist/workerVxl.js"} 216 | for _,ofile := range otherFiles { 217 | files = append(files, fmt.Sprintf("%s/%s", *flags.Domain,ofile)) 218 | } 219 | 220 | 221 | log.Println("\nDownloading attachments...", len(files)) 222 | for _, attachedFile := range files { 223 | if !strings.Contains(attachedFile, *flags.Domain) && !strings.Contains(attachedFile, CDResUrl) { 224 | continue 225 | } 226 | // skip mix files 227 | if strings.Contains(attachedFile,".ini") { 228 | continue 229 | } 230 | s.SaveAttachment(attachedFile) 231 | if strings.Contains(attachedFile, "manifest.json") { 232 | moreAttachments := s.GetManifest(attachedFile) 233 | for _, link := range moreAttachments { 234 | if !s.IsURLInSlice(link, files) { 235 | log.Println("Appended Manifest: ", link) 236 | files = append(files, link) 237 | go func() { 238 | err := s.SaveAttachment(link) 239 | if err != nil { 240 | log.Println(err) 241 | } 242 | }() 243 | } 244 | } 245 | } 246 | if strings.Contains(attachedFile, ".css") { 247 | moreAttachments := s.GetInsideAttachments(attachedFile) 248 | for _, link := range moreAttachments { 249 | if !s.IsURLInSlice(link, files) { 250 | log.Println("Appended: ", link) 251 | files = append(files, link) 252 | go func() { 253 | err := s.SaveAttachment(link) 254 | if err != nil { 255 | log.Println(err) 256 | } 257 | }() 258 | } 259 | } 260 | } 261 | } 262 | 263 | log.Println("Creating Sitemap...") 264 | err = sitemap.CreateSitemap(forSitemap, *flags.Path) 265 | if err != nil { 266 | log.Fatal(err) 267 | } 268 | 269 | log.Println("Finished.") 270 | } 271 | -------------------------------------------------------------------------------- /scraper/helpers.go: -------------------------------------------------------------------------------- 1 | package scraper 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "log" 9 | "net/http" 10 | "net/url" 11 | "os" 12 | "regexp" 13 | "strings" 14 | 15 | "github.com/antsanchez/go-download-web/commons" 16 | ) 17 | 18 | var ( 19 | extensions = []string{".png", ".jpg", ".jpeg", ".json", ".js", ".tiff", ".pdf", ".txt", ".gif", ".psd", ".ai", "dwg", ".bmp", ".zip", ".tar", ".gzip", ".svg", ".avi", ".mov", ".json", ".xml", ".mp3", ".wav", ".mid", ".ogg", ".acc", ".ac3", "mp4", ".ogm", ".cda", ".mpeg", ".avi", ".swf", ".acg", ".bat", ".ttf", ".msi", ".lnk", ".dll", ".db", ".css"} 20 | falseURLs = []string{"mailto:", "javascript:", "tel:", "whatsapp:", "callto:", "wtai:", "sms:", "market:", "geopoint:", "ymsgr:", "msnim:", "gtalk:", "skype:"} 21 | validURL = regexp.MustCompile(`\(([^()]*)\)`) 22 | validCSS = regexp.MustCompile(`\{(\s*?.*?)*?\}`) 23 | ) 24 | 25 | // isInternLink checks if a link is intern 26 | func (s *Scraper) isInternLink(link string) bool { 27 | return strings.Index(link, s.Root) == 0 28 | } 29 | 30 | // RemoveQuery removes the query parameters from the given link 31 | func (s *Scraper) RemoveQuery(link string) string { 32 | return strings.Split(link, "?")[0] 33 | } 34 | 35 | // isStart cheks if the site is the startsite 36 | func (s *Scraper) isStart(link string) bool { 37 | return strings.Compare(link, s.Root) == 0 38 | } 39 | 40 | func (s *Scraper) GetVersion(link string) string { 41 | u, _ := url.Parse(link) 42 | if u == nil { 43 | return "" 44 | } 45 | return u.Query().Get("v") 46 | } 47 | 48 | // sanitizeURL sanitizes a URL 49 | func (s *Scraper) sanitizeURL(link string) string { 50 | for _, fal := range falseURLs { 51 | if strings.Contains(link, fal) { 52 | return "" 53 | } 54 | } 55 | 56 | link = strings.TrimSpace(link) 57 | 58 | //if string(link[len(link)-1]) != "/" { 59 | // link = link + "/" 60 | //} 61 | 62 | tram := strings.Split(link, "#")[0] 63 | 64 | if !s.UseQueries { 65 | tram = s.RemoveQuery(tram) 66 | } 67 | 68 | return tram 69 | } 70 | 71 | // IsValidExtension check if an extension is valid 72 | func (s *Scraper) IsValidExtension(link string) bool { 73 | for _, extension := range extensions { 74 | if strings.Contains(strings.ToLower(link), extension) { 75 | return false 76 | } 77 | } 78 | return true 79 | } 80 | 81 | // isValidLink checks if a link is valid 82 | func (s *Scraper) isValidLink(link string) bool { 83 | if s.isInternLink(link) && !s.isStart(link) && s.IsValidExtension(link) { 84 | return true 85 | } 86 | 87 | return false 88 | } 89 | 90 | // isValidAttachment checks if the link is a valid extension 91 | func (s *Scraper) isValidAttachment(link string) bool { 92 | if s.isInternLink(link) && !s.isStart(link) && !s.IsValidExtension(link) { 93 | return true 94 | } 95 | 96 | return false 97 | } 98 | 99 | // doesLinkExist checks if a link exists in a given slice 100 | func (s *Scraper) doesLinkExist(newLink Links, existingLinks []Links) (exists bool) { 101 | for _, val := range existingLinks { 102 | if strings.Compare(newLink.Href, val.Href) == 0 { 103 | exists = true 104 | } 105 | } 106 | 107 | return 108 | } 109 | 110 | // IsURLInSlice checks if a URL is in a slice 111 | func (s *Scraper) IsURLInSlice(search string, array []string) bool { 112 | withSlash := search[:len(search)-1] 113 | withoutSlash := search 114 | 115 | if string(search[len(search)-1]) == "/" { 116 | withSlash = search 117 | withoutSlash = search[:len(search)-1] 118 | } 119 | 120 | for _, val := range array { 121 | if val == withSlash || val == withoutSlash { 122 | return true 123 | } 124 | } 125 | 126 | return false 127 | } 128 | 129 | // IsLinkScanned checks if a link has already been scanned 130 | func (s *Scraper) IsLinkScanned(link string, scanned []string) (exists bool) { 131 | for _, val := range scanned { 132 | if strings.Compare(link, val) == 0 { 133 | exists = true 134 | } 135 | } 136 | 137 | return 138 | } 139 | 140 | // getURLEmbeeded from HTML or CSS 141 | func (s *Scraper) getURLEmbeeded(body string) (url string) { 142 | valid := validURL.Find([]byte(body)) 143 | if valid == nil { 144 | return 145 | } 146 | 147 | url = string(valid) 148 | 149 | // Remove () 150 | if string(url[0]) == `(` { 151 | url = url[1:] 152 | } 153 | if string(url[len(url)-1]) == `)` { 154 | url = url[:len(url)-1] 155 | } 156 | 157 | // Remove " 158 | if string(url[0]) == `"` { 159 | url = url[1:] 160 | } 161 | if string(url[len(url)-1]) == `"` { 162 | url = url[:len(url)-1] 163 | } 164 | 165 | // Remove ' 166 | if string(url[0]) == `'` { 167 | url = url[1:] 168 | } 169 | if string(url[len(url)-1]) == `'` { 170 | url = url[:len(url)-1] 171 | } 172 | 173 | // To do: check if this is a valid url 174 | 175 | return url 176 | } 177 | 178 | // 179 | 180 | type ChecksumData struct { 181 | Version int `json:"version"` 182 | Format string `json:"format"` 183 | Checksums map[string]uint32 `json:"checksums"` 184 | } 185 | 186 | func (s *Scraper) parseChecksumFile(domain string, reader io.Reader) ([]string, error) { 187 | // 读取文件内容 188 | fileContent, err := io.ReadAll(reader) 189 | if err != nil { 190 | return nil, err 191 | } 192 | 193 | // 解析 JSON 数据 194 | var checksumData ChecksumData 195 | err = json.Unmarshal(fileContent, &checksumData) 196 | if err != nil { 197 | return nil, err 198 | } 199 | 200 | // 提取文件列表 201 | var fileList []string 202 | for fileName := range checksumData.Checksums { 203 | fileList = append(fileList, fmt.Sprintf("%s/v%d/%s", domain, checksumData.Version, fileName)) 204 | } 205 | 206 | return fileList, nil 207 | } 208 | 209 | func (s *Scraper) GetManifest(url string) (res []string) { 210 | resp, err := http.Get(url) 211 | if err != nil { 212 | log.Println(err) 213 | return 214 | } 215 | defer resp.Body.Close() 216 | 217 | fileUrls, err := s.parseChecksumFile(s.GetDomain(url), resp.Body) 218 | if err != nil { 219 | log.Println(err) 220 | } 221 | log.Println("mainfest files:", fileUrls) 222 | return fileUrls 223 | } 224 | 225 | // GetInsideAttachments gets inside CSS Files 226 | func (s *Scraper) GetInsideAttachments(url string) (attachments []string) { 227 | if commons.IsFinal(url) { 228 | // if the url is a final url in a folder, like example.com/path/ 229 | // this will create the folder "path" and, inside, the index.html file 230 | url = commons.RemoveLastSlash(url) 231 | } 232 | 233 | resp, err := http.Get(url) 234 | if err != nil { 235 | log.Println(err) 236 | return 237 | } 238 | defer resp.Body.Close() 239 | 240 | buf := new(bytes.Buffer) 241 | buf.ReadFrom(resp.Body) 242 | body := buf.String() 243 | 244 | if strings.Contains(body, "url(") { 245 | // Second, search for backgrounds 246 | blocks := validCSS.FindAll([]byte(body), -1) 247 | for _, b := range blocks { 248 | rules := strings.Split(string(b), ";") 249 | for _, r := range rules { 250 | found := s.getURLEmbeeded(r) 251 | if found != "" { 252 | link, err := resp.Request.URL.Parse(found) 253 | if err == nil { 254 | foundLink := s.sanitizeURL(link.String()) 255 | if s.isValidAttachment(foundLink) { 256 | attachments = append(attachments, foundLink) 257 | } 258 | } 259 | } 260 | } 261 | } 262 | } 263 | 264 | return 265 | } 266 | 267 | func (s *Scraper) hasPaths(url string) bool { 268 | return len(strings.Split(url, "/")) > 1 269 | } 270 | 271 | func (s *Scraper) getOnlyPath(url string) (path string) { 272 | paths := strings.Split(url, "/") 273 | if len(paths) <= 1 { 274 | return url 275 | } 276 | 277 | total := paths[:len(paths)-1] 278 | return strings.Join(total[:], "/") 279 | } 280 | 281 | // GetPath returns only the path, without domain, from the given link 282 | func (s *Scraper) GetPath(link string) string { 283 | re := regexp.MustCompile(`^(https?://)?([^/]+)?(.*)$`) 284 | matches := re.FindStringSubmatch(link) 285 | 286 | // 没有路径 287 | if len(matches) == 3 { 288 | return "" 289 | } 290 | 291 | if len(matches) != 4 { 292 | return "" 293 | } 294 | 295 | // 获取路径 296 | path := matches[3] 297 | if path == "" { 298 | return "/" // 如果路径为空,则默认为根路径 299 | } 300 | 301 | return path 302 | } 303 | func (s *Scraper) GetDomain(link string) string { 304 | re := regexp.MustCompile(`^(https?://)?([^/]+)?(.*)$`) 305 | matches := re.FindStringSubmatch(link) 306 | 307 | if len(matches) != 4 { 308 | return "" 309 | } 310 | 311 | // 获取路径 312 | domain := matches[1] + matches[2] 313 | if domain == "" { 314 | return "" // 如果路径为空,则默认为根路径 315 | } 316 | return domain 317 | } 318 | 319 | // exists returns whether the given file or directory exists 320 | func (s *Scraper) exists(path string) bool { 321 | _, err := os.Stat(path) 322 | return err == nil 323 | } 324 | --------------------------------------------------------------------------------