├── .github └── workflows │ └── docker-publish.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── _assets └── cli.png ├── cmd ├── add.go ├── aws.go ├── cidr.go ├── cloudflare.go ├── definitions.go ├── digitalOcean.go ├── export.go ├── gcp.go ├── oci.go ├── process.go ├── profiling.go ├── root.go ├── scan.go ├── stats.go ├── utils.go └── worker.go ├── default.pgo ├── docs └── Export.md ├── go.mod ├── go.sum ├── k8s-cron-example.yaml ├── main.go └── scripts └── build.groovy /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | # This workflow uses actions that are not certified by GitHub. 4 | # They are provided by a third-party and are governed by 5 | # separate terms of service, privacy policy, and support 6 | # documentation. 7 | 8 | on: 9 | push: 10 | branches: [ "main", "dev" ] 11 | # Publish semver tags as releases. 12 | tags: [ 'v*.*.*' ] 13 | 14 | env: 15 | # Use docker.io for Docker Hub if empty 16 | REGISTRY: ghcr.io 17 | # github.repository as / 18 | IMAGE_NAME: ${{ github.repository }} 19 | 20 | jobs: 21 | build: 22 | runs-on: ubuntu-latest 23 | permissions: 24 | contents: read 25 | packages: write 26 | # This is used to complete the identity challenge 27 | # with sigstore/fulcio when running outside of PRs. 28 | id-token: write 29 | 30 | steps: 31 | - name: Checkout repository 32 | uses: actions/checkout@v4 33 | 34 | # Install the cosign tool except on PR 35 | # https://github.com/sigstore/cosign-installer 36 | - name: Install cosign 37 | if: github.event_name != 'pull_request' 38 | uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 #v3.5.0 39 | with: 40 | cosign-release: 'v2.2.4' 41 | 42 | # Set up BuildKit Docker container builder to be able to build 43 | # multi-platform images and export cache 44 | # https://github.com/docker/setup-buildx-action 45 | - name: Set up Docker Buildx 46 | uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 47 | 48 | # Login against a Docker registry except on PR 49 | # https://github.com/docker/login-action 50 | - name: Log into registry ${{ env.REGISTRY }} 51 | if: github.event_name != 'pull_request' 52 | uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 53 | with: 54 | registry: ${{ env.REGISTRY }} 55 | username: ${{ github.actor }} 56 | password: ${{ secrets.GITHUB_TOKEN }} 57 | 58 | # Extract metadata (tags, labels) for Docker 59 | # https://github.com/docker/metadata-action 60 | - name: Extract Docker metadata 61 | id: meta 62 | uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 # v5.0.0 63 | with: 64 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 65 | 66 | # Build and push Docker image with Buildx (don't push on PR) 67 | # https://github.com/docker/build-push-action 68 | - name: Build and push Docker image 69 | id: build-and-push 70 | uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 71 | with: 72 | context: . 73 | push: ${{ github.event_name != 'pull_request' }} 74 | tags: ${{ steps.meta.outputs.tags }} 75 | labels: ${{ steps.meta.outputs.labels }} 76 | cache-from: type=gha 77 | cache-to: type=gha,mode=max 78 | 79 | # Sign the resulting Docker image digest except on PRs. 80 | # This will only write to the public Rekor transparency log when the Docker 81 | # repository is public to avoid leaking data. If you would like to publish 82 | # transparency data even for private images, pass --force to cosign below. 83 | # https://github.com/sigstore/cosign 84 | - name: Sign the published Docker image 85 | if: ${{ github.event_name != 'pull_request' }} 86 | env: 87 | # https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable 88 | TAGS: ${{ steps.meta.outputs.tags }} 89 | DIGEST: ${{ steps.build-and-push.outputs.digest }} 90 | # This step uses the identity token to provision an ephemeral certificate 91 | # against the sigstore community Fulcio instance. 92 | run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST} 93 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | *.log 3 | *.json 4 | *.csv 5 | output/ 6 | regions/ 7 | .obsidian 8 | sslsearch.prof 9 | .idea 10 | k8s/deployment.yaml 11 | profiles/ 12 | k8s/job.yaml 13 | k8s/ -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine AS builder 2 | WORKDIR /app 3 | COPY . /app/ 4 | RUN apk add make && make linux 5 | 6 | FROM alpine 7 | WORKDIR /app 8 | COPY --from=builder /app/bin/sslsearch_linux /app/sslsearch 9 | ENTRYPOINT ["/app/sslsearch"] 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright © 2023 Harsh Varagiya 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | commons: 2 | echo "building binary ..." 3 | mkdir -p bin 4 | go mod tidy 5 | 6 | linux: commons 7 | GOOS=linux CGO_ENABLED=0 go build -ldflags "-w -s" -pgo=default.pgo -o bin/sslsearch_linux . 8 | 9 | darwin: commons 10 | GOOS=darwin CGO_ENABLED=0 go build -ldflags "-w -s" -pgo=default.pgo -o bin/sslsearch_darwin . 11 | 12 | race: commons 13 | GOOS=linux CGO_ENABLED=1 go build -race -ldflags "-w -s" -pgo=default.pgo -o bin/sslsearch_linux . 14 | 15 | all: linux darwin 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SSL Search 2 | 3 | Hunt SSL Certificates for interesting keywords on major cloud service providers. 4 | 5 | Details - https://medium.com/@harsh8v/ssl-search-a-tool-to-identify-infrastructure-and-discover-attack-surfaces-449c83269574 6 | 7 | ### Installation 8 | 9 | - install as a CLI tool for quick one-off scans 10 | ```bash 11 | git clone https://github.com/HarshVaragiya/sslsearch.git 12 | cd sslsearch 13 | go install 14 | ``` 15 | 16 | - run as a docker container 17 | ```bash 18 | docker run ghcr.io/harshvaragiya/sslsearch:main 19 | ``` 20 | 21 | ## Features 22 | - Search Cloud Service Providers IP Ranges / Given IP CIDR for keywords in SSL Certificate Subject / SANs 23 | - Perform JARM fingerprinting of https services identified 24 | - Grab all http response headers for webservers 25 | - Export integrations for Disk (local file), Elasticsearch, Cassandra / ScyllaDB 26 | 27 | More details about export targets at : [docs/Export.md](docs/Export.md) 28 | 29 | ![_assets/cli.png](_assets/cli.png) 30 | 31 | | Cloud Service Provider | Region String Example | JARM | Server Header | 32 | | --------------------------- | --------------------- | ---- | ------------- | 33 | | Amazon Web Services | us-east-1 | ✅ | ✅ | 34 | | Cloudflare | - | ✅ | ✅ | 35 | | Digital Ocean | NL_NL-NH_Amsterdam | ✅ | ✅ | 36 | | Google Cloud Platform | us-west4 | ✅ | ✅ | 37 | | Oracle Cloud Infrastructure | ca-montreal-1 | ✅ | ✅ | 38 | | Raw CIDR / IP Range | - | ✅ | ✅ | 39 | 40 | 41 | ## Potential uses 42 | 1. Identifying Infrastructure / Attack Surface for a given scope. 43 | 2. Bug Bounty recon. 44 | 3. Scanning a whole CSP Region & Identifying Servers / Services of interest along with SSL certificate information. 45 | 4. Scanning the whole Internet / Country's CIDRs & Collecting JARM fingerprints / Server Headers along with SSL certificate information. 46 | 5. Finding Mail / RDP / Other services belonging to a target that use x509 certificates to secure connections. 47 | 48 | 49 | ## Future plans (not a roadmap) 50 | - [x] Export integrations for cassandra cluster 51 | - [x] Export integration for elasticsearch 52 | - [x] Grab all HTTPS server response headers 53 | - [x] Added profile guided optimization (PGO) 54 | - [x] Added background worker (job queueing) using redis 55 | - [x] Add k8s example deployments, with cronjob for scheduling tasks, executing them 56 | - [x] CI/CD Setup with docker image 57 | - [ ] Export integration to NATS 58 | - [ ] QOL - Split codebase into different packages like libexport, libscan for better code quality. 59 | - [ ] Certificate information like issuer, signature, chain etc to also be stored for analysis. 60 | - [ ] Integration tests with test docker containers??. 61 | 62 | 63 | ## References 64 | Ideated after following the following research projects : 65 | - https://github.com/jhaddix/awsScrape 66 | - https://github.com/femueller/cloud-ip-ranges 67 | - https://github.com/hdm/jarm-go 68 | - https://github.com/salesforce/jarm 69 | -------------------------------------------------------------------------------- /_assets/cli.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HarshVaragiya/sslsearch/cb389773e241defe1292f38b916c2b68bd957726/_assets/cli.png -------------------------------------------------------------------------------- /cmd/add.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2024 Harsh Varagiya 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in 12 | all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 20 | THE SOFTWARE. 21 | */ 22 | package cmd 23 | 24 | import ( 25 | "context" 26 | "encoding/json" 27 | "fmt" 28 | "github.com/google/uuid" 29 | "os" 30 | "strings" 31 | "sync" 32 | "time" 33 | 34 | "github.com/redis/go-redis/v9" 35 | "github.com/spf13/cobra" 36 | ) 37 | 38 | var ( 39 | jobName string 40 | jobDescription string 41 | jobExportIndex string 42 | targetCsps string 43 | ) 44 | 45 | // addCmd represents the add command 46 | var addCmd = &cobra.Command{ 47 | Use: "add", 48 | Short: "add jobs to worker queue", 49 | Run: func(cmd *cobra.Command, args []string) { 50 | 51 | if redisHost == "" { 52 | redisHost = os.Getenv("REDIS_HOST") 53 | if redisHost == "" { 54 | log.Fatalf("missing required parameter for redis host") 55 | } 56 | } 57 | 58 | rdb := redis.NewClient(&redis.Options{ 59 | Addr: redisHost, 60 | Password: "", // no password set 61 | DB: 0, // use default DB 62 | }) 63 | 64 | if jobName == "" { 65 | jobName = fmt.Sprintf("sslsearch-%s", time.Now().Format("2006-01-02")) 66 | } 67 | if jobDescription == "" { 68 | desc := strings.Builder{} 69 | desc.WriteString(fmt.Sprintf("Job Description: %s \n", jobName)) 70 | desc.WriteString(fmt.Sprintf("Trigger: probably cron\n")) 71 | jobDescription = desc.String() 72 | } 73 | if jobExportIndex == "" { 74 | jobExportIndex = jobName 75 | } 76 | 77 | jobId := uuid.New().String() 78 | jobTaskQueue := fmt.Sprintf("sslsearch:task-queue:%s", jobId) 79 | log.Printf("creating job: %s with id: %s", jobName, jobId) 80 | 81 | CheckRegionRegex() 82 | 83 | job := &Job{ 84 | JobId: jobId, 85 | TaskQueue: jobTaskQueue, 86 | Name: jobName, 87 | Description: jobDescription, 88 | ExportIndex: jobExportIndex, 89 | Status: "todo", 90 | JobSubmitTime: time.Now(), 91 | } 92 | 93 | cspStrings := strings.Split(targetCsps, ",") 94 | cidrs := make(chan CidrRange, 32) 95 | subCidrs := make(chan CidrRange, 32) 96 | ctx := context.Background() 97 | 98 | go func() { 99 | for parentCidr := range cidrs { 100 | // log.Printf("splitting CIDR %v into sub-cidr ranges", parentCidr.Cidr) 101 | SplitCIDR(parentCidr, cidrSuffixPerGoRoutine, subCidrs) 102 | } 103 | close(subCidrs) 104 | }() 105 | 106 | go func() { 107 | wg := &sync.WaitGroup{} 108 | for _, cspString := range cspStrings { 109 | log.Printf("attempting to add sub-cidr ranges for %s to job queue", cspString) 110 | cspInstance, err := GetCspInstance(cspString) 111 | if err != nil { 112 | log.Fatalf("error getting CSP instance for %s", cspString) 113 | } 114 | cspCidrs := make(chan CidrRange, 32) 115 | wg.Add(1) 116 | go func() { 117 | defer wg.Done() 118 | for cidr := range cspCidrs { 119 | cidrs <- cidr 120 | } 121 | }() 122 | cspInstance.GetCidrRanges(ctx, cspCidrs, regionRegexString) 123 | log.Printf("done adding all CIDR ranges for %s to job queue", cspString) 124 | } 125 | wg.Wait() 126 | close(cidrs) 127 | }() 128 | 129 | taskCounter := 0x00 130 | jobCache := make([]interface{}, 800) 131 | jobIndex := 0 132 | for cidr := range subCidrs { 133 | data, err := json.Marshal(cidr) 134 | if err != nil { 135 | log.Fatalf("error marshalling CidrRange to JSON. error = %v", err) 136 | } 137 | jobCache[jobIndex] = data 138 | jobIndex++ 139 | if jobIndex >= 800 { 140 | rdb.LPush(ctx, jobTaskQueue, jobCache...) 141 | jobIndex = 0 142 | } 143 | taskCounter += 1 144 | } 145 | rdb.LPush(ctx, jobTaskQueue, jobCache[:jobIndex]...) 146 | 147 | log.Printf("task queue: %s", jobTaskQueue) 148 | listLength := rdb.LLen(ctx, jobTaskQueue).Val() 149 | log.Printf("task queue size : %d", listLength) 150 | log.Printf("adding job to the job queue") 151 | jobData, err := json.Marshal(job) 152 | if err != nil { 153 | log.Errorf("error marshalling job details into JSON. error = %v", err) 154 | } 155 | if length, err := rdb.LPush(ctx, SSLSEARCH_JOB_QUEUE_TODO, jobData).Result(); err != nil { 156 | log.Errorf("error adding job to job queue. error = %v", err) 157 | } else { 158 | log.Infof("job queue size: %d", length) 159 | } 160 | }, 161 | } 162 | 163 | func init() { 164 | workerCmd.AddCommand(addCmd) 165 | addCmd.PersistentFlags().StringVarP(®ionRegexString, "region-regex", "r", ".*", "regex of cloud service provider region to search") 166 | addCmd.PersistentFlags().StringVar(&targetCsps, "target", "aws", "target cloud service providers list") 167 | addCmd.PersistentFlags().StringVar(&jobName, "job-name", "", "job name to be put in job queue") 168 | addCmd.PersistentFlags().StringVar(&jobDescription, "job-description", "", "job description to be put in job queue") 169 | addCmd.PersistentFlags().StringVar(&jobExportIndex, "job-export-index", "", "job export index in elasticsearch") 170 | } 171 | -------------------------------------------------------------------------------- /cmd/aws.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2023 NAME HERE 3 | */ 4 | package cmd 5 | 6 | import ( 7 | "bytes" 8 | "context" 9 | "encoding/json" 10 | "io" 11 | "regexp" 12 | 13 | "github.com/sirupsen/logrus" 14 | "github.com/spf13/cobra" 15 | "github.com/valyala/fasthttp" 16 | ) 17 | 18 | // awsCmd represents the aws command 19 | var awsCmd = &cobra.Command{ 20 | Use: "aws", 21 | Short: "Scan for a target on Amazon Web Services. Region filtering supported", 22 | Run: func(cmd *cobra.Command, args []string) { 23 | 24 | PerformPreRunChecks(true) 25 | ScanCloudServiceProvider(context.TODO(), "AWS", AWS{}) 26 | 27 | }, 28 | } 29 | 30 | func init() { 31 | rootCmd.AddCommand(awsCmd) 32 | awsCmd.Flags().StringVarP(®ionRegexString, "region-regex", "r", ".*", "regex of cloud service provider region to search") 33 | } 34 | 35 | type AWS struct { 36 | } 37 | 38 | type AwsIPRangeResponse struct { 39 | SyncToken string `json:"syncToken"` 40 | CreateDate string `json:"createDate"` 41 | Prefixes []*AwsPrefix `json:"prefixes"` 42 | } 43 | 44 | type AwsPrefix struct { 45 | IPPrefix string `json:"ip_prefix"` 46 | Region string `json:"region"` 47 | Service string `json:"service"` 48 | // NetworkBorderGroup string `json:"network_border_group"` IGNORED 49 | } 50 | 51 | func (aws AWS) GetCidrRanges(ctx context.Context, cidrChan chan CidrRange, region string) { 52 | var ipRangesResponse AwsIPRangeResponse 53 | 54 | defer close(cidrChan) 55 | 56 | req := fasthttp.AcquireRequest() 57 | resp := fasthttp.AcquireResponse() 58 | defer fasthttp.ReleaseRequest(req) 59 | defer fasthttp.ReleaseResponse(resp) 60 | req.SetRequestURI(AWS_IP_RANGES_URL) 61 | 62 | log.WithFields(logrus.Fields{"state": "AWS", "action": "get-cidr-range"}).Info("fetching IP ranges from AWS") 63 | err := fasthttp.Do(req, resp) 64 | 65 | regionRegex := regexp.MustCompile(region) 66 | 67 | if err != nil { 68 | log.WithFields(logrus.Fields{"state": "AWS", "action": "get-cidr-range", "errmsg": err.Error()}).Fatal("error fetching IP ranges from AWS") 69 | } 70 | respBody := resp.Body() 71 | dec := json.NewDecoder(bytes.NewReader(respBody)) 72 | for dec.More() { 73 | if err := dec.Decode(&ipRangesResponse); err != nil { 74 | if err == io.EOF { 75 | break 76 | } 77 | log.WithFields(logrus.Fields{"state": "AWS", "action": "get-cidr-range", "errmsg": err.Error()}).Fatal("error parsing response") 78 | } 79 | for _, prefix := range ipRangesResponse.Prefixes { 80 | select { 81 | case <-ctx.Done(): 82 | log.WithFields(logrus.Fields{"state": "AWS", "action": "get-cidr-range"}).Info("recieved context cancellation") 83 | return 84 | default: 85 | if regionRegex.MatchString(prefix.Region) { 86 | cidrChan <- CidrRange{Cidr: prefix.IPPrefix, CSP: "AWS", Region: prefix.Region, Meta: prefix.Service} 87 | log.WithFields(logrus.Fields{"state": "AWS", "action": "get-cidr-range"}).Debugf("added %v to scan target for region %v", prefix.IPPrefix, prefix.Region) 88 | } else { 89 | log.WithFields(logrus.Fields{"state": "AWS", "action": "get-cidr-range"}).Debugf("skipped %v from region %v", prefix.IPPrefix, prefix.Region) 90 | } 91 | } 92 | } 93 | } 94 | log.WithFields(logrus.Fields{"state": "AWS", "action": "get-cidr-range"}).Info("done adding all IPs from AWS to scan target") 95 | } 96 | -------------------------------------------------------------------------------- /cmd/cidr.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2023 NAME HERE 3 | */ 4 | package cmd 5 | 6 | import ( 7 | "github.com/sirupsen/logrus" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | // cidrCmd represents the cidr command 12 | var cidrCmd = &cobra.Command{ 13 | Use: "cidr", 14 | Short: "Scan SSL certificates for a given CIDR range", 15 | Run: func(cmd *cobra.Command, args []string) { 16 | 17 | PerformPreRunChecks(false) 18 | 19 | // sanity check 20 | if len(args) != 1 { 21 | log.WithFields(logrus.Fields{"state": "main"}).Fatalf("error parsing input args as CIDR. args: %v", args) 22 | } 23 | 24 | cidrChan := make(chan CidrRange, threadCount*5) 25 | // generate input ips 26 | go func() { 27 | defer close(cidrChan) 28 | cidr := CidrRange{Cidr: args[0], CSP: "NA", Region: "NA"} 29 | err := SplitCIDR(cidr, cidrSuffixPerGoRoutine, cidrChan) 30 | if err != nil { 31 | log.WithFields(logrus.Fields{"state": "main", "action": "divide-cidr", "errmsg": err.Error(), "cidr": args[0]}).Fatal("error generating sub-CIDR ranges") 32 | } 33 | }() 34 | 35 | // process input 36 | RunScan(cidrChan) 37 | }, 38 | } 39 | 40 | func init() { 41 | rootCmd.AddCommand(cidrCmd) 42 | } 43 | -------------------------------------------------------------------------------- /cmd/cloudflare.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2023 NAME HERE 3 | */ 4 | package cmd 5 | 6 | import ( 7 | "context" 8 | "strings" 9 | 10 | "github.com/sirupsen/logrus" 11 | "github.com/spf13/cobra" 12 | "github.com/valyala/fasthttp" 13 | ) 14 | 15 | // cloudflareCmd represents the cloudflare command 16 | var cloudflareCmd = &cobra.Command{ 17 | Use: "cloudflare", 18 | Short: "Scan for a target on CloudFlare. Region filtering is not supported", 19 | Run: func(cmd *cobra.Command, args []string) { 20 | 21 | PerformPreRunChecks(false) 22 | ScanCloudServiceProvider(context.TODO(), "CloudFlare", Cloudflare{}) 23 | 24 | }, 25 | } 26 | 27 | func init() { 28 | rootCmd.AddCommand(cloudflareCmd) 29 | } 30 | 31 | type Cloudflare struct { 32 | } 33 | 34 | func (cloudflare Cloudflare) GetCidrRanges(ctx context.Context, cidrChan chan CidrRange, region string) { 35 | defer close(cidrChan) 36 | 37 | log.WithFields(logrus.Fields{"state": "Cloudflare", "action": "get-cidr-range"}).Warning("region filtering not supported!") 38 | 39 | req := fasthttp.AcquireRequest() 40 | resp := fasthttp.AcquireResponse() 41 | defer fasthttp.ReleaseRequest(req) 42 | defer fasthttp.ReleaseResponse(resp) 43 | req.SetRequestURI(CLOUDFLARE_IPv4_RANGES_URL) 44 | 45 | log.WithFields(logrus.Fields{"state": "Cloudflare", "action": "get-cidr-range"}).Info("fetching IP ranges") 46 | err := fasthttp.Do(req, resp) 47 | 48 | if err != nil { 49 | log.WithFields(logrus.Fields{"state": "Cloudflare", "action": "get-cidr-range", "errmsg": err.Error()}).Fatal("error fetching IP ranges") 50 | } 51 | respBody := resp.Body() 52 | 53 | cidrs := strings.Split(string(respBody), "\n") 54 | for _, cidr := range cidrs { 55 | select { 56 | case <-ctx.Done(): 57 | log.WithFields(logrus.Fields{"state": "Cloudflare", "action": "get-cidr-range"}).Info("recieved context cancellation") 58 | return 59 | default: 60 | cidrChan <- CidrRange{Cidr: cidr, CSP: "Cloudflare", Region: "Unknown"} 61 | log.WithFields(logrus.Fields{"state": "Cloudflare", "action": "get-cidr-range"}).Debugf("added %v to scan target", cidr) 62 | } 63 | } 64 | log.WithFields(logrus.Fields{"state": "Cloudflare", "action": "get-cidr-range"}).Info("done adding all IPs") 65 | } 66 | -------------------------------------------------------------------------------- /cmd/definitions.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | const ( 10 | AWS_IP_RANGES_URL = "https://ip-ranges.amazonaws.com/ip-ranges.json" 11 | CLOUDFLARE_IPv4_RANGES_URL = "https://www.cloudflare.com/ips-v4" 12 | DIGITALOCEAN_IP_RANGES_URL = "https://www.digitalocean.com/geo/google.csv" 13 | GOOGLE_CLOUD_IP_RANGES_URL = "https://www.gstatic.com/ipranges/cloud.json" 14 | ORACLE_CLOUD_IP_RANGES_URL = "https://docs.oracle.com/en-us/iaas/tools/public_ip_ranges.json" 15 | 16 | TOTAL_IPv4_ADDR_COUNT = 3706452992 17 | ) 18 | 19 | type CidrRangeInput interface { 20 | GetCidrRanges(context.Context, chan CidrRange, string) 21 | } 22 | 23 | type ExportTarget interface { 24 | Export(resultChan chan *CertResult, resultWg *sync.WaitGroup) error 25 | } 26 | 27 | type CidrRange struct { 28 | Cidr string `json:"cidr"` 29 | CSP string `json:"csp"` 30 | Region string `json:"region"` 31 | Meta string `json:"meta"` 32 | } 33 | 34 | type CertResult struct { 35 | Ip string `json:"ip"` 36 | Port string `json:"port"` 37 | Subject string `json:"subject"` 38 | Issuer string `json:"issuer"` 39 | SANs []string `json:"SANs"` 40 | JARM string `json:"jarm"` 41 | CSP string `json:"cloud"` 42 | Region string `json:"region"` 43 | Meta string `json:"metadata"` 44 | Timestamp time.Time `json:"timestamp"` 45 | Headers map[string]string `json:"headers"` 46 | Server string `json:"server"` 47 | Host string `json:"host"` 48 | } 49 | -------------------------------------------------------------------------------- /cmd/digitalOcean.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2023 NAME HERE 3 | */ 4 | package cmd 5 | 6 | import ( 7 | "bytes" 8 | "context" 9 | "encoding/csv" 10 | "io" 11 | "regexp" 12 | "strings" 13 | 14 | "github.com/sirupsen/logrus" 15 | "github.com/spf13/cobra" 16 | "github.com/valyala/fasthttp" 17 | ) 18 | 19 | // digitalOceanCmd represents the digitalOcean command 20 | var digitalOceanCmd = &cobra.Command{ 21 | Use: "digitalOcean", 22 | Short: "Scan for a target on Digital Ocean. Region filtering supported", 23 | Run: func(cmd *cobra.Command, args []string) { 24 | 25 | PerformPreRunChecks(true) 26 | ScanCloudServiceProvider(context.TODO(), "DigitalOcean", DigitalOcean{}) 27 | 28 | }, 29 | } 30 | 31 | func init() { 32 | rootCmd.AddCommand(digitalOceanCmd) 33 | digitalOceanCmd.Flags().StringVarP(®ionRegexString, "region-regex", "r", ".*", "regex of cloud service provider region to search") 34 | } 35 | 36 | type DigitalOcean struct { 37 | } 38 | 39 | func (digitalOcean DigitalOcean) GetCidrRanges(ctx context.Context, cidrChan chan CidrRange, region string) { 40 | defer close(cidrChan) 41 | 42 | req := fasthttp.AcquireRequest() 43 | resp := fasthttp.AcquireResponse() 44 | defer fasthttp.ReleaseRequest(req) 45 | defer fasthttp.ReleaseResponse(resp) 46 | req.SetRequestURI(DIGITALOCEAN_IP_RANGES_URL) 47 | 48 | log.WithFields(logrus.Fields{"state": "DigitalOcean", "action": "get-cidr-range"}).Info("fetching IP ranges") 49 | err := fasthttp.Do(req, resp) 50 | 51 | regionRegex := regexp.MustCompile(region) 52 | 53 | if err != nil { 54 | log.WithFields(logrus.Fields{"state": "DigitalOcean", "action": "get-cidr-range", "errmsg": err.Error()}).Fatal("error fetching IP ranges") 55 | } 56 | respBody := resp.Body() 57 | reader := csv.NewReader(bytes.NewReader(respBody)) 58 | done := false 59 | for !done { 60 | select { 61 | case <-ctx.Done(): 62 | log.WithFields(logrus.Fields{"state": "DigitalOcean", "action": "get-cidr-range"}).Info("recieved context cancellation") 63 | done = true 64 | return 65 | default: 66 | record, err := reader.Read() 67 | if err != nil && err != io.EOF { 68 | log.WithFields(logrus.Fields{"state": "DigitalOcean", "action": "get-cidr-range", "errmsg": err.Error()}).Errorf("error parsing response") 69 | continue 70 | } else if err == io.EOF { 71 | done = true 72 | break 73 | } 74 | cidr := record[0] 75 | // skip IPv6 addresses 76 | if strings.Contains(cidr, "::") { 77 | continue 78 | } 79 | regionNameString := strings.Join(record[1:4], "_") 80 | if regionRegex.MatchString(regionNameString) { 81 | cidrChan <- CidrRange{Cidr: cidr, CSP: "DigitalOcean", Region: regionNameString} 82 | log.WithFields(logrus.Fields{"state": "DigitalOcean", "action": "get-cidr-range"}).Debugf("added %v to scan target for region %v", cidr, regionNameString) 83 | } else { 84 | log.WithFields(logrus.Fields{"state": "DigitalOcean", "action": "get-cidr-range"}).Debugf("skipped %v from region %v", cidr, regionNameString) 85 | } 86 | } 87 | } 88 | log.WithFields(logrus.Fields{"state": "DigitalOcean", "action": "get-cidr-range"}).Info("done adding all IPs") 89 | } 90 | -------------------------------------------------------------------------------- /cmd/export.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/tls" 7 | "encoding/json" 8 | "fmt" 9 | "net/http" 10 | "os" 11 | "strings" 12 | "sync" 13 | "time" 14 | 15 | "compress/gzip" 16 | 17 | "github.com/elastic/go-elasticsearch/v8" 18 | "github.com/elastic/go-elasticsearch/v8/esutil" 19 | "github.com/gocql/gocql" 20 | "github.com/sirupsen/logrus" 21 | ) 22 | 23 | func GetExportTarget() ExportTarget { 24 | if diskExport { 25 | tg, err := NewDiskTarget(diskFilePath) 26 | if err != nil { 27 | log.WithFields(logrus.Fields{"state": "export", "type": "disk", "errmsg": err}).Fatalf("error configuring disk export target") 28 | } 29 | return tg 30 | } 31 | if cassandraExport { 32 | tg, err := NewCassandra(cassandraConnectionString, cassandraKeyspaceDotTable, cassandraRecordTimeStampKey) 33 | if err != nil { 34 | log.WithFields(logrus.Fields{"state": "export", "type": "cassandra", "errmsg": err}).Fatalf("error configuring cassandra export target") 35 | } 36 | return tg 37 | } 38 | if elasticsearchExport { 39 | tg, err := NewElasticsearch(elasticsearchHost, elasticsearchUsername, elasticsearchPassword, elasticsearchIndex) 40 | if err != nil { 41 | log.WithFields(logrus.Fields{"state": "export", "type": "elastic", "errmsg": err}).Fatalf("error configuring elasticsearch export target") 42 | } 43 | return tg 44 | } 45 | return nil 46 | } 47 | 48 | type Elasticsearch struct { 49 | elasticHost string 50 | elasticUser string 51 | elasticPass string 52 | elasticIndex string 53 | client *elasticsearch.Client 54 | indexer esutil.BulkIndexer 55 | } 56 | 57 | func NewElasticsearch(elasticHost, elasticUser, elasticPass, elasticIndex string) (*Elasticsearch, error) { 58 | client, err := elasticsearch.NewClient(elasticsearch.Config{ 59 | Addresses: []string{elasticHost}, 60 | Username: elasticUser, 61 | Password: elasticPass, 62 | EnableMetrics: true, 63 | RetryBackoff: func(i int) time.Duration { return time.Duration(i*10) * time.Second }, 64 | MaxRetries: 15, 65 | CompressRequestBody: true, 66 | CompressRequestBodyLevel: gzip.BestCompression, 67 | Transport: &http.Transport{ 68 | TLSClientConfig: &tls.Config{ 69 | InsecureSkipVerify: true, 70 | }, 71 | }, 72 | }) 73 | if err != nil { 74 | log.WithFields(logrus.Fields{"state": "elastic", "errmsg": err}).Errorf("error creating elasticsearch client") 75 | return nil, err 76 | } 77 | indexer, err := esutil.NewBulkIndexer(esutil.BulkIndexerConfig{ 78 | Client: client, // The Elasticsearch client 79 | Index: elasticIndex, // The default index name 80 | NumWorkers: 1, // The number of worker goroutines (default: number of CPUs) 81 | FlushBytes: 2e+6, // The flush threshold in bytes 1M 82 | }) 83 | if err != nil { 84 | log.WithFields(logrus.Fields{"state": "elastic", "errmsg": err}).Errorf("error creating elasticsearch bulk indexer") 85 | return nil, err 86 | } 87 | log.WithFields(logrus.Fields{"state": "elastic"}).Infof("exporting to elasticsearch at: %s", elasticsearchHost) 88 | return &Elasticsearch{ 89 | elasticHost: elasticHost, 90 | elasticUser: elasticUser, 91 | elasticPass: elasticPass, 92 | elasticIndex: elasticIndex, 93 | client: client, 94 | indexer: indexer, 95 | }, nil 96 | } 97 | 98 | func (es *Elasticsearch) Export(resultChan chan *CertResult, resultWg *sync.WaitGroup) error { 99 | defer resultWg.Done() 100 | indexSettings := map[string]interface{}{ 101 | "settings": map[string]interface{}{ 102 | "number_of_shards": 20, 103 | "mapping": map[string]interface{}{ 104 | "total_fields": map[string]interface{}{ 105 | "limit": 60000, 106 | }, 107 | }, 108 | }, 109 | } 110 | body, _ := json.Marshal(indexSettings) 111 | resp, err := es.client.Indices.Create(es.elasticIndex, es.client.Indices.Create.WithBody( 112 | bytes.NewReader(body), 113 | )) 114 | if err != nil { 115 | log.WithFields(logrus.Fields{"state": "elastic", "errmsg": err}).Fatal("error creating elasticsearch index") 116 | } else if resp.IsError() && resp.StatusCode != 400 { 117 | log.WithFields(logrus.Fields{"state": "elastic", "errmsg": resp.String()}).Fatal("error creating elasticsearch index. invalid response") 118 | } 119 | log.WithFields(logrus.Fields{"state": "elastic"}).Infof("exporting to elasticsearch index: %s", es.elasticIndex) 120 | for result := range resultChan { 121 | resultBytes, err := json.Marshal(result) 122 | if err != nil { 123 | log.WithFields(logrus.Fields{"state": "elastic", "errmsg": err}).Infof("error marshalling result to JSON") 124 | 125 | } 126 | err = es.indexer.Add(context.TODO(), esutil.BulkIndexerItem{ 127 | Action: "index", 128 | Body: bytes.NewReader(resultBytes), 129 | OnSuccess: func(ctx context.Context, item esutil.BulkIndexerItem, res esutil.BulkIndexerResponseItem) { 130 | resultsExported.Add(1) 131 | }, 132 | OnFailure: func(ctx context.Context, item esutil.BulkIndexerItem, item2 esutil.BulkIndexerResponseItem, err error) { 133 | log.WithFields(logrus.Fields{"state": "elastic", "errmsg": err}).Errorf("error exporting result to elasticsearch") 134 | }, 135 | }) 136 | if err != nil { 137 | log.WithFields(logrus.Fields{"state": "elastic", "errmsg": err}).Errorf("error exporting result to elasticsearch") 138 | } 139 | resultsProcessed.Add(1) 140 | } 141 | if err := es.indexer.Close(context.TODO()); err != nil { 142 | log.WithFields(logrus.Fields{"state": "elastic", "errmsg": err}).Errorf("error flusing bulk indexer") 143 | } 144 | stats := es.indexer.Stats() 145 | log.WithFields(logrus.Fields{"state": "elastic"}).Infof("indexed %d documents with %d errors", stats.NumFlushed, stats.NumFailed) 146 | return nil 147 | } 148 | 149 | type DiskTarget struct { 150 | filename string 151 | outfile *os.File 152 | } 153 | 154 | func NewDiskTarget(filename string) (*DiskTarget, error) { 155 | outfile, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) 156 | if err != nil { 157 | log.WithFields(logrus.Fields{"state": "disk", "errmsg": err}).Errorf("error opening output file") 158 | return nil, err 159 | } 160 | return &DiskTarget{filename: filename, outfile: outfile}, nil 161 | } 162 | 163 | func (tg *DiskTarget) Export(resultChan chan *CertResult, resultWg *sync.WaitGroup) error { 164 | defer resultWg.Done() 165 | defer tg.outfile.Close() 166 | enc := json.NewEncoder(tg.outfile) 167 | log.WithFields(logrus.Fields{"state": "disk"}).Infof("exporting to file: %s", tg.filename) 168 | for result := range resultChan { 169 | if err := enc.Encode(result); err != nil { 170 | log.WithFields(logrus.Fields{"state": "disk", "errmsg": err}).Errorf("error exporting result") 171 | } else { 172 | resultsExported.Add(1) 173 | } 174 | resultsProcessed.Add(1) 175 | } 176 | return nil 177 | } 178 | 179 | type Cassandra struct { 180 | session *gocql.Session 181 | tableName string 182 | recordTimestampKey string 183 | } 184 | 185 | func NewCassandra(connectionString, keyspaceTableName, recordTimestampKey string) (*Cassandra, error) { 186 | cluster := gocql.NewCluster(connectionString) 187 | cluster.Timeout = time.Second * 30 188 | s := strings.Split(keyspaceTableName, ".") 189 | cluster.Keyspace = s[0] 190 | tableName := s[1] 191 | session, err := cluster.CreateSession() 192 | return &Cassandra{session, tableName, recordTimestampKey}, err 193 | } 194 | func (ca *Cassandra) Export(resultChan chan *CertResult, resultWg *sync.WaitGroup) error { 195 | defer resultWg.Done() 196 | log.WithFields(logrus.Fields{"state": "cassandra"}).Infof("exporting to cassandra with RecordTsKey: %s", cassandraRecordTimeStampKey) 197 | for result := range resultChan { 198 | if err := insertRecordIntoCassandra(ca.session, ca.tableName, cassandraRecordTimeStampKey, result); err != nil { 199 | log.WithFields(logrus.Fields{"state": "cassandra", "errmsg": err}).Errorf("error inserting record into cassandra") 200 | } else { 201 | resultsExported.Add(1) 202 | } 203 | resultsProcessed.Add(1) 204 | } 205 | return nil 206 | } 207 | 208 | func insertRecordIntoCassandra(session *gocql.Session, tableName string, cassandraRecordTimeStampKey string, result *CertResult) error { 209 | queryString := fmt.Sprintf("INSERT INTO %s (record_ts, ip, port, subject, issuer, sans, jarm, csp, region, meta, timestamp, headers, server, host) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", tableName) 210 | query := session.Query(queryString, 211 | cassandraRecordTimeStampKey, result.Ip, result.Port, result.Subject, result.Issuer, result.SANs, result.JARM, result.CSP, result.Region, result.Meta, result.Timestamp, result.Headers, result.Server, result.Host, 212 | ) 213 | if err := query.Exec(); err != nil { 214 | return fmt.Errorf("failed to execute query: %v", err) 215 | } 216 | return nil 217 | } 218 | -------------------------------------------------------------------------------- /cmd/gcp.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2023 NAME HERE 3 | */ 4 | package cmd 5 | 6 | import ( 7 | "bytes" 8 | "context" 9 | "encoding/json" 10 | "io" 11 | "regexp" 12 | 13 | "github.com/sirupsen/logrus" 14 | "github.com/spf13/cobra" 15 | "github.com/valyala/fasthttp" 16 | ) 17 | 18 | // gcpCmd represents the gcp command 19 | var gcpCmd = &cobra.Command{ 20 | Use: "gcp", 21 | Short: "Scan for a target on Google Cloud Platform. Region filtering supported", 22 | Run: func(cmd *cobra.Command, args []string) { 23 | 24 | PerformPreRunChecks(true) 25 | ScanCloudServiceProvider(context.TODO(), "GCP", GCP{}) 26 | 27 | }, 28 | } 29 | 30 | func init() { 31 | rootCmd.AddCommand(gcpCmd) 32 | gcpCmd.Flags().StringVarP(®ionRegexString, "region-regex", "r", ".*", "regex of cloud service provider region to search") 33 | } 34 | 35 | type GCP struct { 36 | } 37 | 38 | type GcpPrefix struct { 39 | Ipv4Prefix string `json:"ipv4Prefix"` // IPv4 Cidr that usually appears 40 | Ipv6Prefix string `json:"ipv6Prefix"` // Ipv6 Cidr that appears sometimes 41 | Service string `json:"service"` // mostly remains "Google Cloud" ?? 42 | Scope string `json:"scope"` // Region Key 43 | } 44 | 45 | type GcpIPRangeResponse struct { 46 | SyncToken string `json:"syncToken"` 47 | CreationTime string `json:"creationTime"` 48 | Prefixes []*GcpPrefix `json:"prefixes"` 49 | } 50 | 51 | func (gcp GCP) GetCidrRanges(ctx context.Context, cidrChan chan CidrRange, region string) { 52 | var ipRangesResponse GcpIPRangeResponse 53 | 54 | defer close(cidrChan) 55 | 56 | req := fasthttp.AcquireRequest() 57 | resp := fasthttp.AcquireResponse() 58 | defer fasthttp.ReleaseRequest(req) 59 | defer fasthttp.ReleaseResponse(resp) 60 | req.SetRequestURI(GOOGLE_CLOUD_IP_RANGES_URL) 61 | 62 | log.WithFields(logrus.Fields{"state": "GCP", "action": "get-cidr-range"}).Info("fetching IP ranges") 63 | err := fasthttp.Do(req, resp) 64 | 65 | regionRegex := regexp.MustCompile(region) 66 | 67 | if err != nil { 68 | log.WithFields(logrus.Fields{"state": "GCP", "action": "get-cidr-range", "errmsg": err.Error()}).Fatal("error fetching IP ranges") 69 | } 70 | respBody := resp.Body() 71 | dec := json.NewDecoder(bytes.NewReader(respBody)) 72 | for dec.More() { 73 | if err := dec.Decode(&ipRangesResponse); err != nil { 74 | if err == io.EOF { 75 | break 76 | } 77 | log.WithFields(logrus.Fields{"state": "GCP", "action": "get-cidr-range", "errmsg": err.Error()}).Fatal("error parsing response") 78 | } 79 | for _, prefix := range ipRangesResponse.Prefixes { 80 | select { 81 | case <-ctx.Done(): 82 | log.WithFields(logrus.Fields{"state": "GCP", "action": "get-cidr-range"}).Info("recieved context cancellation") 83 | return 84 | default: 85 | if regionRegex.MatchString(prefix.Scope) { 86 | if prefix.Ipv6Prefix != "" { 87 | continue 88 | } 89 | cidrChan <- CidrRange{Cidr: prefix.Ipv4Prefix, CSP: "GCP", Region: prefix.Scope, Meta: prefix.Service} 90 | log.WithFields(logrus.Fields{"state": "GCP", "action": "get-cidr-range"}).Debugf("added %v to scan target for region %v", prefix.Ipv4Prefix, prefix.Scope) 91 | } else { 92 | log.WithFields(logrus.Fields{"state": "GCP", "action": "get-cidr-range"}).Debugf("skipped %v from region %v", prefix.Ipv4Prefix, prefix.Scope) 93 | } 94 | } 95 | } 96 | } 97 | log.WithFields(logrus.Fields{"state": "GCP", "action": "get-cidr-range"}).Info("done adding all IPs") 98 | } 99 | -------------------------------------------------------------------------------- /cmd/oci.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2023 NAME HERE 3 | */ 4 | package cmd 5 | 6 | import ( 7 | "bytes" 8 | "context" 9 | "encoding/json" 10 | "io" 11 | "regexp" 12 | 13 | "github.com/sirupsen/logrus" 14 | "github.com/spf13/cobra" 15 | "github.com/valyala/fasthttp" 16 | ) 17 | 18 | // ociCmd represents the oci command 19 | var ociCmd = &cobra.Command{ 20 | Use: "oci", 21 | Short: "Scan for a target on Oracle Cloud Infrastructure. Region filtering supported", 22 | Run: func(cmd *cobra.Command, args []string) { 23 | 24 | PerformPreRunChecks(true) 25 | ScanCloudServiceProvider(context.TODO(), "OCI", Oracle{}) 26 | 27 | }, 28 | } 29 | 30 | func init() { 31 | rootCmd.AddCommand(ociCmd) 32 | ociCmd.Flags().StringVarP(®ionRegexString, "region-regex", "r", ".*", "regex of cloud service provider region to search") 33 | } 34 | 35 | type Oracle struct { 36 | } 37 | 38 | type OracleRegionCidr struct { 39 | Cidr string `json:"cidr"` 40 | } 41 | type RegionsElement struct { 42 | Region string `json:"region"` 43 | OracleRegionCidrs []*OracleRegionCidr `json:"cidrs"` 44 | } 45 | 46 | type OracleIPRangeResponse struct { 47 | RegionsElements []*RegionsElement `json:"regions"` 48 | } 49 | 50 | func (oracle Oracle) GetCidrRanges(ctx context.Context, cidrChan chan CidrRange, region string) { 51 | var ipRangesResponse OracleIPRangeResponse 52 | 53 | defer close(cidrChan) 54 | 55 | req := fasthttp.AcquireRequest() 56 | resp := fasthttp.AcquireResponse() 57 | defer fasthttp.ReleaseRequest(req) 58 | defer fasthttp.ReleaseResponse(resp) 59 | req.SetRequestURI(ORACLE_CLOUD_IP_RANGES_URL) 60 | 61 | log.WithFields(logrus.Fields{"state": "OCI", "action": "get-cidr-range"}).Info("fetching IP ranges") 62 | err := fasthttp.Do(req, resp) 63 | 64 | regionRegex := regexp.MustCompile(region) 65 | 66 | if err != nil { 67 | log.WithFields(logrus.Fields{"state": "OCI", "action": "get-cidr-range", "errmsg": err.Error()}).Fatal("error fetching IP ranges") 68 | } 69 | respBody := resp.Body() 70 | dec := json.NewDecoder(bytes.NewReader(respBody)) 71 | for dec.More() { 72 | if err := dec.Decode(&ipRangesResponse); err != nil { 73 | if err == io.EOF { 74 | break 75 | } 76 | log.WithFields(logrus.Fields{"state": "OCI", "action": "get-cidr-range", "errmsg": err.Error()}).Fatal("error parsing response") 77 | } 78 | for _, regionElement := range ipRangesResponse.RegionsElements { 79 | if regionRegex.MatchString(regionElement.Region) { 80 | for _, cidr := range regionElement.OracleRegionCidrs { 81 | select { 82 | case <-ctx.Done(): 83 | log.WithFields(logrus.Fields{"state": "OCI", "action": "get-cidr-range"}).Info("recieved context cancellation") 84 | return 85 | default: 86 | cidrChan <- CidrRange{Cidr: cidr.Cidr, CSP: "OCI", Region: regionElement.Region} 87 | log.WithFields(logrus.Fields{"state": "OCI", "action": "get-cidr-range"}).Debugf("added %v to scan target", cidr.Cidr) 88 | } 89 | } 90 | } else { 91 | log.WithFields(logrus.Fields{"state": "OCI", "action": "get-cidr-range"}).Debugf("skipped region %v", regionElement.Region) 92 | } 93 | } 94 | } 95 | log.WithFields(logrus.Fields{"state": "OCI", "action": "get-cidr-range"}).Info("done adding all IPs") 96 | } 97 | -------------------------------------------------------------------------------- /cmd/process.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2024 Harsh Varagiya 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in 12 | all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 20 | THE SOFTWARE. 21 | */ 22 | package cmd 23 | 24 | import ( 25 | "context" 26 | "encoding/json" 27 | "errors" 28 | "os" 29 | "os/signal" 30 | "sync" 31 | "syscall" 32 | "time" 33 | 34 | "github.com/redis/go-redis/v9" 35 | "github.com/sirupsen/logrus" 36 | "github.com/spf13/cobra" 37 | ) 38 | 39 | var ( 40 | workerScannerPorts = []string{"443"} 41 | ) 42 | 43 | // processCmd represents the process command 44 | var processCmd = &cobra.Command{ 45 | Use: "process", 46 | Short: "process background jobs from queue for scanning", 47 | Run: func(cmd *cobra.Command, args []string) { 48 | 49 | if redisHost == "" { 50 | redisHost = os.Getenv("REDIS_HOST") 51 | if redisHost == "" { 52 | log.Fatalf("missing required parameter for redis host") 53 | } 54 | } 55 | 56 | UpdateLogLevel() 57 | ctx, cancelFunc := context.WithCancel(context.Background()) 58 | defer cancelFunc() 59 | signals := make(chan os.Signal, 1) 60 | signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) 61 | 62 | // we read job from the in-progress queue and only if that is empty, we read from todo queue 63 | go func() { 64 | for { 65 | s := <-signals 66 | log.WithFields(logrus.Fields{"state": "main"}).Infof("received %v ... cancelling context.", s.String()) 67 | cancelFunc() 68 | log.WithFields(logrus.Fields{"state": "main"}).Infof("waiting for threads to finish ...") 69 | s = <-signals 70 | log.WithFields(logrus.Fields{"state": "main"}).Infof("received %v ... forcing exit", s.String()) 71 | os.Exit(-1) 72 | } 73 | }() 74 | 75 | rdb := redis.NewClient(&redis.Options{ 76 | Addr: redisHost, 77 | Password: "", // no password set 78 | DB: 0, // use default DB 79 | }) 80 | 81 | log.WithFields(logrus.Fields{"state": "main"}).Infof("fetching job from job queues") 82 | 83 | job, err := GetJobToBeDone(ctx, rdb) 84 | if err != nil && errors.Is(err, redis.Nil) { 85 | log.WithFields(logrus.Fields{"state": "main"}).Printf("exiting silently") 86 | os.Exit(0) 87 | } else if err != nil { 88 | log.WithFields(logrus.Fields{"state": "main", "errmsg": err}).Printf("error getting job from queue") 89 | os.Exit(-1) 90 | } 91 | exportTarget, err := NewElasticsearch(elasticsearchHost, elasticsearchUsername, elasticsearchPassword, job.ExportIndex) 92 | if err != nil { 93 | log.WithFields(logrus.Fields{"state": "main", "errmsg": err}).Fatalf("error configuring elasticsearch export target") 94 | } 95 | initialResultChan := make(chan *CertResult, threadCount) 96 | scanWg := &sync.WaitGroup{} 97 | scanWg.Add(threadCount) 98 | processCidrRange := make(chan CidrRange, threadCount) 99 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Debugf("starting tls scanning threads") 100 | for i := 0; i < threadCount; i++ { 101 | go ScanCertificatesInCidr(ctx, processCidrRange, workerScannerPorts, initialResultChan, scanWg, ".*") 102 | } 103 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Debugf("starting header grabbing threads") 104 | serverHeaderWg := &sync.WaitGroup{} 105 | headerEnrichedResultsChan := ServerHeaderEnrichment(ctx, initialResultChan, serverHeaderThreadCount, serverHeaderWg) 106 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Debugf("starting jarm fingerprinting") 107 | jarmFingerprintWg := &sync.WaitGroup{} 108 | enrichedResultChan := JARMFingerprintEnrichment(ctx, headerEnrichedResultsChan, jarmFingerprintThreadCount, jarmFingerprintWg) 109 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Debugf("starting export thread") 110 | resultWg := &sync.WaitGroup{} 111 | resultWg.Add(1) 112 | go exportTarget.Export(enrichedResultChan, resultWg) 113 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Info("started all processing threads") 114 | 115 | hostname, _ := os.Hostname() 116 | go ProfileRuntime(ctx, rdb, hostname) 117 | go ExportStatsPeriodically(ctx, rdb, job, hostname, time.Duration(consoleRefreshSeconds)*time.Second) 118 | go PrintProgressToConsole(consoleRefreshSeconds) 119 | 120 | WorkerLoop: 121 | for { 122 | log.WithFields(logrus.Fields{"state": "process", "type": "mgmt", "job-id": job.JobId}).Debugf("getting next task from queue") 123 | select { 124 | case <-ctx.Done(): 125 | log.WithFields(logrus.Fields{"state": "process", "type": "mgmt", "job-id": job.JobId}).Infof("context done. exiting worker loop") 126 | break WorkerLoop 127 | default: 128 | break 129 | } 130 | data, err := rdb.LPop(ctx, job.TaskQueue).Bytes() 131 | if err != nil { 132 | if errors.Is(err, redis.Nil) { 133 | log.WithFields(logrus.Fields{"state": "process", "type": "mgmt", "job-id": job.JobId}).Infof("task queue empty") 134 | jobString, _ := json.Marshal(job) 135 | count, err := rdb.LRem(ctx, SSLSEARCH_JOBS_IN_PROGRESS, 0, jobString).Result() 136 | if err != nil { 137 | log.WithFields(logrus.Fields{"state": "process", "type": "mgmt", "job-id": job.JobId, "errmsg": err}).Errorf("error deleting task from in-progress queue") 138 | } else if count == 1 { 139 | job.JobDoneTime = time.Now() 140 | jobString, _ := json.Marshal(job) 141 | err := rdb.LPush(ctx, SSLSEARCH_JOB_QUEUE_DONE, jobString).Err() 142 | if err != nil { 143 | log.WithFields(logrus.Fields{"state": "process", "type": "mgmt", "job-id": job.JobId, "errmsg": err}).Errorf("error adding task to done queue") 144 | } 145 | log.WithFields(logrus.Fields{"state": "process", "type": "mgmt", "job-id": job.JobId}).Infof("added job to done queue") 146 | } 147 | break WorkerLoop 148 | } 149 | log.WithFields(logrus.Fields{"state": "process", "errmsg": err, "type": "mgmt", "job-id": job.JobId}).Errorf("error popping task from queue") 150 | time.Sleep(time.Minute) 151 | continue 152 | } 153 | var cidrRange CidrRange 154 | if err = json.Unmarshal(data, &cidrRange); err != nil { 155 | log.WithFields(logrus.Fields{"state": "process", "errmsg": err, "type": "mgmt", "job-id": job.JobId}).Error("error parsing task") 156 | } 157 | log.WithFields(logrus.Fields{"state": "process", "csp": cidrRange.CSP, "region": cidrRange.Region, "cidr": cidrRange.Cidr, "job-id": job.JobId}).Infof("processing task") 158 | SplitCIDR(cidrRange, cidrSuffixPerGoRoutine, processCidrRange) 159 | } 160 | 161 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Infof("worker loop ended") 162 | close(processCidrRange) 163 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Infof("waiting for scanner threads to finish!") 164 | scanWg.Wait() 165 | close(initialResultChan) 166 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Info("tls scanning finished") 167 | serverHeaderWg.Wait() 168 | close(headerEnrichedResultsChan) 169 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Info("server header grabbing finished") 170 | jarmFingerprintWg.Wait() 171 | close(enrichedResultChan) 172 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Info("jarm fingerprinting finished") 173 | resultWg.Wait() 174 | log.WithFields(logrus.Fields{"state": "process", "job-id": job.JobId}).Infof("result exporting finished") 175 | }, 176 | } 177 | 178 | func init() { 179 | workerCmd.AddCommand(processCmd) 180 | } 181 | 182 | func GetJobToBeDone(ctx context.Context, rdb *redis.Client) (*Job, error) { 183 | var job Job 184 | err := rdb.Watch(ctx, func(tx *redis.Tx) error { 185 | jobsInProgress, err := tx.LRange(ctx, SSLSEARCH_JOBS_IN_PROGRESS, 0, 1).Result() 186 | if err != nil && !errors.Is(err, redis.Nil) { 187 | log.WithFields(logrus.Fields{"state": "main", "errmsg": err}).Errorf("error getting elements from in-progress job queue") 188 | return err 189 | } 190 | if err == nil && len(jobsInProgress) >= 1 { 191 | jobJson := jobsInProgress[0] 192 | err := json.Unmarshal([]byte(jobJson), &job) 193 | if err != nil { 194 | log.WithFields(logrus.Fields{"state": "main", "errmsg": err}).Errorf("error unmarshalling job into JSON") 195 | return err 196 | } 197 | log.WithFields(logrus.Fields{"state": "main"}).Infof("in-progress job found: %s", job.Name) 198 | return nil 199 | } 200 | jobJson, err := tx.RPopLPush(ctx, SSLSEARCH_JOB_QUEUE_TODO, SSLSEARCH_JOBS_IN_PROGRESS).Result() 201 | if err != nil && !errors.Is(err, redis.Nil) { 202 | log.WithFields(logrus.Fields{"state": "main", "errmsg": err}).Errorf("error getting elements from todo job queue") 203 | return err 204 | } 205 | if err == nil { 206 | if err := json.Unmarshal([]byte(jobJson), &job); err != nil { 207 | log.WithFields(logrus.Fields{"state": "main", "errmsg": err}).Errorf("error unmarshalling job into JSON") 208 | return err 209 | } 210 | log.WithFields(logrus.Fields{"state": "main"}).Infof("todo job found: %s", job.Name) 211 | return nil 212 | } 213 | log.WithFields(logrus.Fields{"state": "main"}).Infof("no jobs found in the queue to be done") 214 | return redis.Nil 215 | }, SSLSEARCH_JOBS_IN_PROGRESS, SSLSEARCH_JOB_QUEUE_TODO) 216 | return &job, err 217 | } 218 | -------------------------------------------------------------------------------- /cmd/profiling.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/google/uuid" 7 | "github.com/minio/minio-go/v7" 8 | "github.com/minio/minio-go/v7/pkg/credentials" 9 | "github.com/redis/go-redis/v9" 10 | "github.com/sirupsen/logrus" 11 | "os" 12 | "runtime/pprof" 13 | "time" 14 | ) 15 | 16 | func ProfileRuntime(ctx context.Context, rdb *redis.Client, hostname string) { 17 | endpoint := os.Getenv("MINIO_ENDPOINT") 18 | accessKey := os.Getenv("ACCESS_KEY") 19 | secretKey := os.Getenv("SECRET_KEY") 20 | bucketName := os.Getenv("BUCKET_NAME") 21 | minioClient, err := minio.New(endpoint, &minio.Options{ 22 | Creds: credentials.NewStaticV4(accessKey, secretKey, ""), 23 | Secure: false, 24 | }) 25 | if err != nil || bucketName == "" { 26 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt", "errmsg": err}).Errorf("error connecting to MinIO server. exiting profiling") 27 | return 28 | } 29 | for { 30 | time.Sleep(time.Minute) 31 | keyPrefix, err := rdb.Get(ctx, "profile").Result() 32 | if err != nil { 33 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt", "errmsg": err}).Debugf("error getting profile control variable") 34 | continue 35 | } 36 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt"}).Infof("attempting to profile application. prefix: %s", keyPrefix) 37 | cpuProfileTmpFileName := "/tmp/cpu-" + uuid.NewString() + ".prof" 38 | heapProfileTmpFileName := "/tmp/heap-" + uuid.NewString() + ".prof" 39 | cpuProfileFile, err := os.Create(cpuProfileTmpFileName) 40 | if err != nil { 41 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt", "errmsg": err}).Errorf("error creating tmp file for CPU profiling") 42 | continue 43 | } 44 | heapProfileFile, err := os.Create(heapProfileTmpFileName) 45 | if err != nil { 46 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt", "errmsg": err}).Errorf("error creating tmp file for HEAP profiling") 47 | continue 48 | } 49 | err = pprof.StartCPUProfile(cpuProfileFile) 50 | if err != nil { 51 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt", "errmsg": err}).Errorf("error starting CPU profiling") 52 | continue 53 | } 54 | err = pprof.WriteHeapProfile(heapProfileFile) 55 | if err != nil { 56 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt", "errmsg": err}).Errorf("error generating HEAP profile") 57 | continue 58 | } 59 | time.Sleep(time.Minute) 60 | pprof.StopCPUProfile() 61 | cpuProfileFile.Close() 62 | heapProfileFile.Close() 63 | cpuObjectName := fmt.Sprintf("%s/cpu/%s-%s.prof", keyPrefix, time.Now().Format("2006-01-02-15-04-05"), hostname) 64 | heapObjectName := fmt.Sprintf("%s/heap/%s-%s.prof", keyPrefix, time.Now().Format("2006-01-02-15-04-05"), hostname) 65 | info, err := minioClient.FPutObject(ctx, bucketName, cpuObjectName, cpuProfileTmpFileName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) 66 | if err != nil { 67 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt", "errmsg": err}).Errorf("error uploading profile to minio server") 68 | continue 69 | } 70 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt"}).Infof("uploaded CPU profile '%s' of size %d bytes", info.Key, info.Size) 71 | info, err = minioClient.FPutObject(ctx, bucketName, heapObjectName, heapProfileTmpFileName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) 72 | if err != nil { 73 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt", "errmsg": err}).Errorf("error uploading profile to minio server") 74 | continue 75 | } 76 | log.WithFields(logrus.Fields{"state": "profile", "type": "mgmt"}).Infof("uploaded HEAP profile '%s' of size %d bytes", info.Key, info.Size) 77 | os.Remove(cpuProfileTmpFileName) 78 | os.Remove(heapProfileTmpFileName) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2023 Harsh Varagiya 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in 12 | all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 20 | THE SOFTWARE. 21 | */ 22 | package cmd 23 | 24 | import ( 25 | "crypto/tls" 26 | "fmt" 27 | "net" 28 | "os" 29 | "sync" 30 | "sync/atomic" 31 | "time" 32 | 33 | "github.com/sirupsen/logrus" 34 | "github.com/spf13/cobra" 35 | "github.com/spf13/viper" 36 | "github.com/valyala/fasthttp" 37 | ) 38 | 39 | var ( 40 | debugFlag bool 41 | keywordRegexString string 42 | regionRegexString string 43 | portsString string 44 | 45 | threadCount int 46 | cidrSuffixPerGoRoutine int 47 | 48 | serverHeaderThreadCount int 49 | jarmFingerprintThreadCount int 50 | 51 | // Export Configuration 52 | diskExport bool 53 | diskFilePath string 54 | cassandraExport bool 55 | cassandraConnectionString string 56 | cassandraKeyspaceDotTable string 57 | cassandraRecordTimeStampKey string 58 | elasticsearchExport bool 59 | elasticsearchHost string 60 | elasticsearchUsername string 61 | elasticsearchPassword string 62 | elasticsearchIndex string 63 | consoleProgressLog bool 64 | ) 65 | 66 | var ( 67 | log = logrus.New() 68 | cidrRangesToScan = atomic.Int64{} 69 | cidrRangesScanned = atomic.Int64{} 70 | ipsToScan = atomic.Int64{} 71 | ipsErrConn = atomic.Int64{} 72 | ipsErrNoTls = atomic.Int64{} 73 | ipsScanned = atomic.Int64{} 74 | ipScanRate = atomic.Int64{} 75 | totalFindings = atomic.Int64{} 76 | jarmFingerprintsGrabbed = atomic.Int64{} 77 | jarmFingerprintsScanned = atomic.Int64{} 78 | serverHeadersGrabbed = atomic.Int64{} 79 | serverHeadersScanned = atomic.Int64{} 80 | resultsExported = atomic.Int64{} 81 | resultsProcessed = atomic.Int64{} 82 | activeJarmThreads = atomic.Int64{} 83 | activeHeaderThreads = atomic.Int64{} 84 | jarmRetryCount = 3 85 | tcpTimeout = 10 86 | consoleRefreshSeconds = 5 87 | 88 | state = 1 89 | 90 | httpClientPool = sync.Pool{ 91 | New: func() interface{} { 92 | return &fasthttp.Client{ 93 | TLSConfig: &tls.Config{ 94 | // for server header check skip SSL validation 95 | InsecureSkipVerify: true, 96 | }, 97 | } 98 | }, 99 | } 100 | dialerPool = sync.Pool{ 101 | New: func() interface{} { 102 | return &net.Dialer{ 103 | Timeout: time.Duration(tcpTimeout) * time.Second, 104 | } 105 | }, 106 | } 107 | tlsConfigPool = sync.Pool{ 108 | New: func() interface{} { 109 | return &tls.Config{ 110 | InsecureSkipVerify: true, 111 | } 112 | }, 113 | } 114 | errConn = fmt.Errorf("could not connect to remote host") 115 | errNoTls = fmt.Errorf("could not find TLS on remote port") 116 | errNoMatch = fmt.Errorf("certificate details did not match requirement") 117 | errCtxCancelled = fmt.Errorf("parent context cancelled") 118 | ) 119 | 120 | // rootCmd represents the base command when called without any subcommands 121 | var rootCmd = &cobra.Command{ 122 | Use: "sslsearch", 123 | Short: "hunt for keywords in SSL certificates on cloud", 124 | Long: `search cloud providers / IP ranges to scan for interesting keywords in 125 | SSL certificates and do some initial recon for the findings like server header grabbing 126 | & JARM Fingerprinting`, 127 | } 128 | 129 | func Execute() { 130 | err := rootCmd.Execute() 131 | if err != nil { 132 | os.Exit(1) 133 | } 134 | } 135 | 136 | func init() { 137 | // refined input flags 138 | viper.AutomaticEnv() 139 | rootCmd.PersistentFlags().StringVarP(&keywordRegexString, "keyword-regex", "k", ".*", "case insensitive keyword regex to search in subject or SAN (ex: .*amazon.* or .* which matches all)") 140 | rootCmd.PersistentFlags().StringVarP(&portsString, "ports", "p", "443", "ports to search") 141 | rootCmd.PersistentFlags().IntVarP(&threadCount, "threads", "t", 1024, "number of parallel threads to use") 142 | rootCmd.PersistentFlags().IntVar(&consoleRefreshSeconds, "refresh", 5, "console progress refresh in seconds") 143 | rootCmd.PersistentFlags().BoolVarP(&debugFlag, "debug", "v", false, "enable debug logs") 144 | rootCmd.PersistentFlags().IntVar(&cidrSuffixPerGoRoutine, "suffix", 4, "CIDR suffix per goroutine [each thread will scan 2^x IPs]") 145 | rootCmd.PersistentFlags().IntVar(&tcpTimeout, "timeout", 10, "tcp connection timeout in seconds") 146 | rootCmd.PersistentFlags().BoolVar(&consoleProgressLog, "console-progress", false, "print progress notes in console instead of progress bar") 147 | 148 | // Export to disk 149 | rootCmd.PersistentFlags().BoolVar(&diskExport, "export.disk", false, "export findings to disk") 150 | rootCmd.PersistentFlags().StringVarP(&diskFilePath, "export.disk.filename", "o", "", "output file name on disk") 151 | rootCmd.MarkFlagsRequiredTogether("export.disk", "export.disk.filename") 152 | 153 | // Export to cassandra 154 | rootCmd.PersistentFlags().BoolVar(&cassandraExport, "export.cassandra", false, "export findings to cassandra") 155 | rootCmd.PersistentFlags().StringVar(&cassandraConnectionString, "export.cassandra.connection-string", "", "cassandra connection string") 156 | rootCmd.PersistentFlags().StringVar(&cassandraKeyspaceDotTable, "export.cassandra.table", "recon.sslsearch", "cassandra keyspace.table name to store data") 157 | rootCmd.PersistentFlags().StringVar(&cassandraRecordTimeStampKey, "export.cassandra.result-ts-key", "", "cassandra default result timestamp key (defaults to YYYY-MM-DD)") 158 | rootCmd.MarkFlagsRequiredTogether("export.cassandra", "export.cassandra.connection-string") 159 | 160 | // Export to elasticsearch 161 | rootCmd.PersistentFlags().BoolVar(&elasticsearchExport, "export.elastic", false, "export findings to elasticsearch") 162 | rootCmd.PersistentFlags().StringVar(&elasticsearchHost, "export.elastic.host", "", "elasticsearch host where data will be sent") 163 | rootCmd.PersistentFlags().StringVar(&elasticsearchUsername, "export.elastic.username", "", "elasticsearch username for authentication") 164 | rootCmd.PersistentFlags().StringVar(&elasticsearchPassword, "export.elastic.password", "", "elasticsearch password for authentication") 165 | rootCmd.PersistentFlags().StringVar(&elasticsearchIndex, "export.elastic.index", "", "elasticsearch index where data will be stored (default: sslsearch-YYYY-MM-DD)") 166 | rootCmd.MarkFlagsRequiredTogether("export.elastic", "export.elastic.host", "export.elastic.username", "export.elastic.password") 167 | 168 | rootCmd.MarkFlagsMutuallyExclusive("export.disk", "export.elastic", "export.cassandra") 169 | 170 | // Recon flags 171 | rootCmd.PersistentFlags().IntVar(&serverHeaderThreadCount, "server-header-threads", 16, "number of threads to use for server header result enrichment") 172 | rootCmd.PersistentFlags().IntVar(&jarmRetryCount, "jarm-retry-count", 3, "retry attempts for JARM fingerprint") 173 | rootCmd.PersistentFlags().IntVar(&jarmFingerprintThreadCount, "jarm-threads", 64, "number of threads to use for JARM fingerprint enrichment") 174 | } 175 | -------------------------------------------------------------------------------- /cmd/scan.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "fmt" 7 | "net" 8 | "regexp" 9 | "sync" 10 | "time" 11 | 12 | "github.com/jedib0t/go-pretty/progress" 13 | 14 | "github.com/sirupsen/logrus" 15 | ) 16 | 17 | func ScanCertificatesInCidr(ctx context.Context, cidrChan chan CidrRange, ports []string, resultChan chan *CertResult, wg *sync.WaitGroup, keywordRegexString string) { 18 | defer wg.Done() 19 | keywordRegex := regexp.MustCompile("(?i)" + keywordRegexString) 20 | for cidr := range cidrChan { 21 | ip, ipNet, err := net.ParseCIDR(cidr.Cidr) 22 | if err != nil { 23 | log.WithFields(logrus.Fields{"state": "scan", "errmsg": err.Error(), "cidr": cidr}).Errorf("failed to parse CIDR") 24 | continue 25 | } 26 | log.WithFields(logrus.Fields{"state": "scan", "cidr": cidr}).Debugf("starting scan for CIDR range") 27 | for ip := ip.Mask(ipNet.Mask); ipNet.Contains(ip); incrementIP(ip) { 28 | for _, port := range ports { 29 | remote := getRemoteAddrString(ip.String(), port) 30 | result, err := ScanRemote(ctx, ip, port, keywordRegex) 31 | if err != nil { 32 | log.WithFields(logrus.Fields{"state": "deepscan", "remote": remote, "errmsg": err.Error()}).Tracef("error") 33 | continue 34 | } else { 35 | result.CSP = cidr.CSP 36 | result.Region = cidr.Region 37 | result.Meta = cidr.Meta 38 | result.Timestamp = time.Now() 39 | resultChan <- result 40 | } 41 | } 42 | } 43 | cidrRangesScanned.Add(1) 44 | } 45 | } 46 | 47 | func ScanRemote(ctx context.Context, ip net.IP, port string, keywordRegex *regexp.Regexp) (*CertResult, error) { 48 | remote := getRemoteAddrString(ip.String(), port) 49 | log.WithFields(logrus.Fields{"state": "deepscan", "remote": remote}).Tracef("scanning") 50 | select { 51 | case <-ctx.Done(): 52 | return nil, errCtxCancelled 53 | default: 54 | dialer := dialerPool.Get().(*net.Dialer) 55 | defer dialerPool.Put(dialer) 56 | tlsConfig := tlsConfigPool.Get().(*tls.Config) 57 | defer tlsConfigPool.Put(tlsConfig) 58 | conn, err := tls.DialWithDialer(dialer, "tcp", remote, tlsConfig) 59 | ipsScanned.Add(1) 60 | ipScanRate.Add(1) 61 | if err != nil { 62 | ipsErrConn.Add(1) 63 | return nil, errConn 64 | } 65 | defer conn.Close() 66 | certs := conn.ConnectionState().PeerCertificates 67 | if len(certs) == 0 { 68 | ipsErrNoTls.Add(1) 69 | return nil, errNoTls 70 | } 71 | subjectMatch := keywordRegex.MatchString(certs[0].Subject.String()) 72 | sanMatch := keywordRegex.MatchString(fmt.Sprintf("%s", certs[0].DNSNames)) 73 | log.WithFields(logrus.Fields{"state": "deepscan", "remote": remote, "subject": certs[0].Subject.String(), "match": subjectMatch || sanMatch}).Debugf("SANs: %s ", certs[0].DNSNames) 74 | if subjectMatch || sanMatch { 75 | totalFindings.Add(1) 76 | return &CertResult{ 77 | Ip: ip.String(), 78 | Port: port, 79 | Subject: certs[0].Subject.CommonName, 80 | Issuer: certs[0].Issuer.CommonName, 81 | SANs: certs[0].DNSNames, 82 | }, nil 83 | } 84 | return nil, errNoMatch 85 | } 86 | } 87 | 88 | func Summarize(start, stop time.Time) { 89 | elapsedTime := stop.Sub(start) 90 | percentage := float64(ipsScanned.Load()) / TOTAL_IPv4_ADDR_COUNT 91 | ipsPerSecond := float64(1000000000*ipsScanned.Load()) / float64(elapsedTime) 92 | findingsPerSecond := float64(1000000000*totalFindings.Load()) / float64(elapsedTime) 93 | fmt.Printf("Total IPs Scanned : %v / %v (%.8f %% of the internet)\n", ipsScanned.Load(), ipsToScan.Load(), percentage) 94 | fmt.Printf("Total Findings : %v \n", totalFindings.Load()) 95 | fmt.Printf("Total CIDR ranges Scanned : %v \n", cidrRangesScanned.Load()) 96 | fmt.Printf("Server Headers : %v / %v \n", serverHeadersGrabbed.Load(), serverHeadersScanned.Load()) 97 | fmt.Printf("Jarm Fingerprints : %v / %v \n", jarmFingerprintsGrabbed.Load(), jarmFingerprintsScanned.Load()) 98 | fmt.Printf("Results Export : %v / %v \n", resultsExported.Load(), resultsProcessed.Load()) 99 | fmt.Printf("Time Elapsed : %v \n", elapsedTime) 100 | fmt.Printf("Scan Speed : %.2f IPs/second | %.2f findings/second \n", ipsPerSecond, findingsPerSecond) 101 | } 102 | 103 | func PrintProgressToConsole(refreshInterval int) { 104 | for { 105 | ipScanRate.Store(0) 106 | fmt.Printf("Progress: CIDRs [ %v / %v ] IPs Scanned: %v / %v | Findings: %v | Headers Grabbed: %v / %v | JARM: %v / %v | Export: %v / %v | JT: %d | HT: %d \n", 107 | cidrRangesScanned.Load(), cidrRangesToScan.Load(), 108 | ipsScanned.Load(), ipsToScan.Load(), totalFindings.Load(), 109 | serverHeadersGrabbed.Load(), serverHeadersScanned.Load(), 110 | jarmFingerprintsGrabbed.Load(), jarmFingerprintsScanned.Load(), 111 | resultsExported.Load(), resultsProcessed.Load(), activeJarmThreads.Load(), activeHeaderThreads.Load()) 112 | time.Sleep(time.Second * time.Duration(int64(refreshInterval))) 113 | } 114 | } 115 | 116 | func ProgressBar(refreshInterval int) { 117 | p := progress.NewWriter() 118 | defer p.Stop() 119 | p.SetMessageWidth(24) 120 | p.SetNumTrackersExpected(5) 121 | p.SetStyle(progress.StyleDefault) 122 | p.SetTrackerLength(40) 123 | p.SetTrackerPosition(progress.PositionRight) 124 | p.SetUpdateFrequency(time.Second * time.Duration(int64(refreshInterval))) 125 | p.SetAutoStop(false) 126 | p.Style().Colors = progress.StyleColorsExample 127 | go p.Render() 128 | cidrTracker := progress.Tracker{Message: "CIDR Ranges Scanned"} 129 | ipTracker := progress.Tracker{Message: "IP Addresses Scanned"} 130 | headerTracker := progress.Tracker{Message: "Headers Grabbed"} 131 | jarmTracker := progress.Tracker{Message: "JARM Fingerprints"} 132 | exportTracker := progress.Tracker{Message: "Exported Results"} 133 | log.Printf("starting progress bar thread") 134 | p.AppendTrackers([]*progress.Tracker{&cidrTracker, &ipTracker, &headerTracker, &jarmTracker, &exportTracker}) 135 | for { 136 | cidrTracker.Total = cidrRangesToScan.Load() 137 | cidrTracker.SetValue(cidrRangesScanned.Load()) 138 | if cidrTracker.IsDone() && state < 2 { 139 | cidrTracker.SetValue(cidrTracker.Total - 1) 140 | } 141 | ipTracker.Total = ipsToScan.Load() 142 | ipTracker.SetValue(ipsScanned.Load()) 143 | if ipTracker.IsDone() && state < 2 { 144 | ipTracker.SetValue(ipTracker.Total - 1) 145 | } 146 | headerTracker.Total = totalFindings.Load() 147 | headerTracker.SetValue(serverHeadersScanned.Load()) 148 | if headerTracker.IsDone() && state < 3 { 149 | headerTracker.SetValue(headerTracker.Total - 1) 150 | } 151 | jarmTracker.Total = serverHeadersScanned.Load() 152 | jarmTracker.SetValue(jarmFingerprintsScanned.Load()) 153 | if jarmTracker.IsDone() && state < 4 { 154 | jarmTracker.SetValue(jarmTracker.Total - 1) 155 | } 156 | exportTracker.Total = jarmFingerprintsScanned.Load() 157 | exportTracker.SetValue(resultsExported.Load()) 158 | if exportTracker.IsDone() && state < 5 { 159 | // progress bar does not update number after it is marked "done" so keep it "undone" till we wait for export to finish 160 | exportTracker.SetValue(exportTracker.Total - 1) 161 | } 162 | time.Sleep(time.Second) 163 | } 164 | } 165 | 166 | func ServerHeaderEnrichment(ctx context.Context, rawResultChan chan *CertResult, enrichmentThreads int, wg *sync.WaitGroup) chan *CertResult { 167 | enrichedResultChan := make(chan *CertResult, enrichmentThreads*800) 168 | wg.Add(enrichmentThreads) 169 | for i := 0; i < enrichmentThreads; i++ { 170 | go headerEnrichmentThread(ctx, rawResultChan, enrichedResultChan, wg) 171 | } 172 | return enrichedResultChan 173 | } 174 | 175 | func JARMFingerprintEnrichment(ctx context.Context, rawResultChan chan *CertResult, enrichmentThreads int, wg *sync.WaitGroup) chan *CertResult { 176 | enrichedResultChan := make(chan *CertResult, enrichmentThreads*400) 177 | wg.Add(enrichmentThreads) 178 | for i := 0; i < enrichmentThreads; i++ { 179 | go jarmFingerprintEnrichmentThread(ctx, rawResultChan, enrichedResultChan, wg) 180 | } 181 | return enrichedResultChan 182 | } 183 | -------------------------------------------------------------------------------- /cmd/stats.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/redis/go-redis/v9" 7 | "github.com/sirupsen/logrus" 8 | "time" 9 | ) 10 | 11 | const STATS_TTL = time.Minute * 10 12 | 13 | func ExportStatsPeriodically(ctx context.Context, rdb *redis.Client, job *Job, hostname string, interval time.Duration) { 14 | prefix := fmt.Sprintf("sslsearch:workers:stats:%s", hostname) 15 | for { 16 | log.WithFields(logrus.Fields{"state": "stats", "type": "mgmt"}).Debugf("updating stats in redis") 17 | rdb.SAdd(ctx, fmt.Sprintf("sslsearch:workers:exec:%s", job.JobId), hostname) 18 | // update the job-id we are working on and all the associated statistics of the worker 19 | ExportStatsToRedis(ctx, rdb, prefix) 20 | time.Sleep(interval) 21 | } 22 | } 23 | 24 | func ExportStatsToRedis(ctx context.Context, rdb *redis.Client, redisKeyPrefix string) { 25 | rdb.Set(ctx, fmt.Sprintf("%s:cidr-ranges-to-scan", redisKeyPrefix), cidrRangesToScan.Load(), STATS_TTL) 26 | rdb.Set(ctx, fmt.Sprintf("%s:cidr-ranges-scanned", redisKeyPrefix), cidrRangesScanned.Load(), STATS_TTL) 27 | rdb.Set(ctx, fmt.Sprintf("%s:ips-to-scan", redisKeyPrefix), ipsToScan.Load(), STATS_TTL) 28 | rdb.Set(ctx, fmt.Sprintf("%s:ips-err-conn", redisKeyPrefix), ipsErrConn.Load(), STATS_TTL) 29 | rdb.Set(ctx, fmt.Sprintf("%s:ips-err-no-tls", redisKeyPrefix), ipsErrNoTls.Load(), STATS_TTL) 30 | rdb.Set(ctx, fmt.Sprintf("%s:ips-scanned", redisKeyPrefix), ipsScanned.Load(), STATS_TTL) 31 | rdb.Set(ctx, fmt.Sprintf("%s:total-findings", redisKeyPrefix), totalFindings.Load(), STATS_TTL) 32 | rdb.Set(ctx, fmt.Sprintf("%s:server-headers-grabbed", redisKeyPrefix), serverHeadersGrabbed.Load(), STATS_TTL) 33 | rdb.Set(ctx, fmt.Sprintf("%s:server-headers-scanned", redisKeyPrefix), serverHeadersScanned.Load(), STATS_TTL) 34 | rdb.Set(ctx, fmt.Sprintf("%s:jarm-fingerprints-grabbed", redisKeyPrefix), jarmFingerprintsGrabbed.Load(), STATS_TTL) 35 | rdb.Set(ctx, fmt.Sprintf("%s:jarm-fingerprints-scanned", redisKeyPrefix), jarmFingerprintsScanned.Load(), STATS_TTL) 36 | rdb.Set(ctx, fmt.Sprintf("%s:results-exported", redisKeyPrefix), resultsExported.Load(), STATS_TTL) 37 | rdb.Set(ctx, fmt.Sprintf("%s:results-processed", redisKeyPrefix), resultsProcessed.Load(), STATS_TTL) 38 | rdb.Set(ctx, fmt.Sprintf("%s:active-jarm-threads", redisKeyPrefix), activeJarmThreads.Load(), STATS_TTL) 39 | rdb.Set(ctx, fmt.Sprintf("%s:active-header-threads", redisKeyPrefix), activeHeaderThreads.Load(), STATS_TTL) 40 | } 41 | -------------------------------------------------------------------------------- /cmd/utils.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math" 8 | "net" 9 | "os" 10 | "os/signal" 11 | "regexp" 12 | "strconv" 13 | "strings" 14 | "sync" 15 | "syscall" 16 | "time" 17 | 18 | "github.com/HarshVaragiya/jarm-go" 19 | "github.com/seancfoley/ipaddress-go/ipaddr" 20 | "github.com/sirupsen/logrus" 21 | "github.com/valyala/fasthttp" 22 | ) 23 | 24 | func PerformPreRunChecks(checkRegion bool) { 25 | CheckInputParameters() 26 | UpdateLogLevel() 27 | PerformOutputChecks() 28 | if checkRegion { 29 | CheckRegionRegex() 30 | } 31 | log.WithFields(logrus.Fields{"state": "main"}).Debugf("sanity checks passed") 32 | } 33 | 34 | func CheckInputParameters() { 35 | if _, err := regexp.Compile("(?i)" + keywordRegexString); err != nil { 36 | log.WithFields(logrus.Fields{"state": "main"}).Fatal("could not compile keyword regex") 37 | } 38 | } 39 | 40 | func CheckRegionRegex() { 41 | if _, err := regexp.Compile(regionRegexString); err != nil { 42 | log.WithFields(logrus.Fields{"state": "main"}).Fatal("could not compile region regex") 43 | } 44 | } 45 | 46 | func getRecordKey() string { 47 | currentTime := time.Now() 48 | formattedTime := currentTime.Format("2006-01-02") 49 | return formattedTime 50 | } 51 | 52 | func UpdateLogLevel() { 53 | if debugFlag { 54 | log.SetLevel(logrus.DebugLevel) 55 | log.WithFields(logrus.Fields{"state": "main"}).Debugf("enabled debug logging") 56 | } 57 | } 58 | 59 | func PerformOutputChecks() { 60 | if !diskExport && !elasticsearchExport && !cassandraExport { 61 | log.WithFields(logrus.Fields{"state": "checks"}).Fatal("export target disk / cassandra / elasticsearch must be configured") 62 | } 63 | if diskExport && diskFilePath != "" { 64 | if _, err := os.Stat(diskFilePath); err == nil { 65 | log.WithFields(logrus.Fields{"state": "checks"}).Fatal("output file already exists!") 66 | } else if errors.Is(err, os.ErrNotExist) { 67 | log.WithFields(logrus.Fields{"state": "checks"}).Debugf("output file does not exist and will be created") 68 | } 69 | } 70 | if cassandraExport && cassandraConnectionString != "" && cassandraRecordTimeStampKey == "" { 71 | cassandraRecordTimeStampKey = getRecordKey() 72 | log.WithFields(logrus.Fields{"state": "checks"}).Infof("cassandra output record key: %s", cassandraRecordTimeStampKey) 73 | } 74 | if elasticsearchExport && elasticsearchHost != "" && elasticsearchIndex == "" { 75 | elasticsearchIndex = fmt.Sprintf("sslsearch-%s", getRecordKey()) 76 | log.WithFields(logrus.Fields{"state": "checks"}).Infof("elasticsearch output index: %s", elasticsearchIndex) 77 | } 78 | } 79 | 80 | func ScanCloudServiceProvider(ctx context.Context, csp string, cloudServiceProvider CidrRangeInput) { 81 | cidrChan := make(chan CidrRange, threadCount*5) 82 | cspCidrChan := make(chan CidrRange, threadCount*2) 83 | 84 | go func() { 85 | cloudServiceProvider.GetCidrRanges(ctx, cspCidrChan, regionRegexString) 86 | }() 87 | go func() { 88 | defer close(cidrChan) 89 | for { 90 | select { 91 | case <-ctx.Done(): 92 | log.WithFields(logrus.Fields{"state": "main", "action": "divide-cidr", "csp": csp}).Info("context done") 93 | return 94 | case cspCidr, open := <-cspCidrChan: 95 | if !open { 96 | log.WithFields(logrus.Fields{"state": "main", "action": "divide-cidr", "csp": csp}).Info("done generating sub cidr ranges") 97 | return 98 | } 99 | if err := SplitCIDR(cspCidr, cidrSuffixPerGoRoutine, cidrChan); err != nil { 100 | log.WithFields(logrus.Fields{"state": "main", "action": "divide-cidr", "errmsg": err.Error(), "csp": csp}).Fatal("error generating sub-CIDR ranges") 101 | } 102 | } 103 | } 104 | }() 105 | RunScan(cidrChan) 106 | } 107 | 108 | func RunScan(cidrChan chan CidrRange) { 109 | ports := strings.Split(portsString, ",") 110 | log.WithFields(logrus.Fields{"state": "main"}).Infof("ports to be scanned: %s", ports) 111 | resultChan := make(chan *CertResult, threadCount*8) 112 | 113 | ctx, cancelFunc := context.WithCancel(context.Background()) 114 | defer cancelFunc() 115 | 116 | // handle interrupt (Ctrl + C) 117 | signals := make(chan os.Signal, 1) 118 | signal.Notify(signals, syscall.SIGINT) 119 | go func() { 120 | s := <-signals 121 | log.WithFields(logrus.Fields{"state": "main"}).Infof("received %v ... cancelling context.", s.String()) 122 | log.WithFields(logrus.Fields{"state": "main"}).Infof("waiting for threads to exit ...") 123 | cancelFunc() 124 | fmt.Printf("\n\n\n\n\n\n") 125 | s = <-signals 126 | log.WithFields(logrus.Fields{"state": "main"}).Fatalf("forcing exit due to %v", s.String()) 127 | }() 128 | 129 | // start scanning 130 | startTime := time.Now() 131 | log.WithFields(logrus.Fields{"state": "main"}).Info("starting scanner threads") 132 | scanWg := &sync.WaitGroup{} 133 | scanWg.Add(threadCount) 134 | for i := 0; i < threadCount; i++ { 135 | go ScanCertificatesInCidr(ctx, cidrChan, ports, resultChan, scanWg, keywordRegexString) 136 | } 137 | 138 | serverHeaderWg := &sync.WaitGroup{} 139 | headerEnrichedResultsChan := ServerHeaderEnrichment(ctx, resultChan, serverHeaderThreadCount, serverHeaderWg) 140 | jarmFingerprintWg := &sync.WaitGroup{} 141 | enrichedResultChan := JARMFingerprintEnrichment(ctx, headerEnrichedResultsChan, jarmFingerprintThreadCount, jarmFingerprintWg) 142 | 143 | // export results 144 | resultWg := &sync.WaitGroup{} 145 | resultWg.Add(1) 146 | exportTarget := GetExportTarget() 147 | go exportTarget.Export(enrichedResultChan, resultWg) 148 | 149 | if consoleProgressLog { 150 | go PrintProgressToConsole(consoleRefreshSeconds) 151 | } else { 152 | go ProgressBar(consoleRefreshSeconds) 153 | } 154 | 155 | // wait for tls scanning to finish 156 | state = 1 157 | log.WithFields(logrus.Fields{"state": "main"}).Info("waiting for tls scanner threads to finish scanning") 158 | scanWg.Wait() 159 | close(resultChan) 160 | 161 | // wait for enrichment to finish 162 | state = 2 163 | log.WithFields(logrus.Fields{"state": "main"}).Info("waiting for server header enrichment threads to finish") 164 | serverHeaderWg.Wait() 165 | close(headerEnrichedResultsChan) 166 | log.WithFields(logrus.Fields{"state": "main"}).Info("server header enrichment threads finished") 167 | 168 | state = 3 169 | log.WithFields(logrus.Fields{"state": "main"}).Infof("waiting for jarm fingerprint enrichment threads to finish") 170 | jarmFingerprintWg.Wait() 171 | close(enrichedResultChan) 172 | log.WithFields(logrus.Fields{"state": "main"}).Info("jarm fingerprint enrichment threads finished") 173 | 174 | // wait for export to finish 175 | state = 4 176 | log.WithFields(logrus.Fields{"state": "main"}).Info("waiting for export threads to finish") 177 | resultWg.Wait() 178 | log.WithFields(logrus.Fields{"state": "main"}).Info("done exporting to target") 179 | 180 | state = 5 181 | stopTime := time.Now() 182 | Summarize(startTime, stopTime) 183 | } 184 | 185 | func GetCspInstance(cspString string) (CidrRangeInput, error) { 186 | cspString = strings.ToLower(cspString) 187 | if cspString == "aws" { 188 | return AWS{}, nil 189 | } else if cspString == "gcp" { 190 | return GCP{}, nil 191 | } else if cspString == "oracle" { 192 | return Oracle{}, nil 193 | } else if cspString == "digital-ocean" { 194 | return DigitalOcean{}, nil 195 | } else if cspString == "cloudflare" { 196 | return Cloudflare{}, nil 197 | } 198 | return nil, fmt.Errorf("unknown cloud service provider") 199 | } 200 | 201 | func incrementIP(ip net.IP) { 202 | for j := len(ip) - 1; j >= 0; j-- { 203 | ip[j]++ 204 | if ip[j] > 0 { 205 | break 206 | } 207 | } 208 | } 209 | 210 | func getRemoteAddrString(ip, port string) string { 211 | return fmt.Sprintf("%v:%v", ip, port) 212 | } 213 | 214 | func SplitCIDR(cidrString CidrRange, suffixLenPerGoRoutine int, cidrChan chan CidrRange) error { 215 | cidr := ipaddr.NewIPAddressString(cidrString.Cidr).GetAddress() 216 | cidrRange := cidr.GetPrefixLen().Len() 217 | adjustPrefixLength := 32 - cidrRange - suffixLenPerGoRoutine 218 | if adjustPrefixLength < 0 { 219 | adjustPrefixLength = 0 220 | } 221 | for i := cidr.AdjustPrefixLen(adjustPrefixLength).PrefixBlockIterator(); i.HasNext(); { 222 | nextCidr := i.Next() 223 | cidrChan <- CidrRange{Cidr: nextCidr.String(), CSP: cidrString.CSP, Region: cidrString.Region} 224 | log.WithFields(logrus.Fields{"state": "split-cidr"}).Debugf("added cidr range %s for scanning", nextCidr) 225 | cidrRangesToScan.Add(1) 226 | ipsToScan.Add(int64(math.Pow(2, float64(32-nextCidr.GetPrefixLen().Len())))) 227 | } 228 | return nil 229 | } 230 | 231 | func headerEnrichmentThread(ctx context.Context, rawResultChan, enrichedResultChan chan *CertResult, wg *sync.WaitGroup) { 232 | defer wg.Done() 233 | activeHeaderThreads.Add(1) 234 | log.WithFields(logrus.Fields{"state": "enrichment"}).Debugf("server header enrichment thread starting") 235 | for rawResult := range rawResultChan { 236 | serverHeader, allHeaders, err := GrabServerHeaderForRemote(ctx, getRemoteAddrString(rawResult.Ip, rawResult.Port)) 237 | if err == nil { 238 | serverHeadersGrabbed.Add(1) 239 | } 240 | if val, ok := allHeaders["Host"]; ok { 241 | rawResult.Host = val 242 | } 243 | rawResult.Server = serverHeader 244 | rawResult.Headers = allHeaders 245 | serverHeadersScanned.Add(1) 246 | enrichedResultChan <- rawResult 247 | } 248 | activeHeaderThreads.Add(-1) 249 | log.WithFields(logrus.Fields{"state": "enrichment"}).Debugf("server header enrichment thread exiting") 250 | } 251 | 252 | func jarmFingerprintEnrichmentThread(ctx context.Context, rawResultChan, enrichedResultChan chan *CertResult, wg *sync.WaitGroup) { 253 | defer wg.Done() 254 | activeJarmThreads.Add(1) 255 | log.WithFields(logrus.Fields{"state": "enrichment"}).Debugf("JARM Fingerprint enrichment thread starting") 256 | for rawResult := range rawResultChan { 257 | if jarmFingerprint, err := GetJARMFingerprint(getRemoteAddrString(rawResult.Ip, rawResult.Port)); err == nil { 258 | rawResult.JARM = jarmFingerprint 259 | jarmFingerprintsGrabbed.Add(1) 260 | log.WithFields(logrus.Fields{"state": "enrichment", "remote": getRemoteAddrString(rawResult.Ip, rawResult.Port)}).Debugf("JARM Fingerprint: %v", jarmFingerprint) 261 | } else { 262 | rawResult.JARM = jarmFingerprint 263 | log.WithFields(logrus.Fields{"state": "enrichment", "remote": getRemoteAddrString(rawResult.Ip, rawResult.Port), "errmsg": err.Error()}).Tracef("JARM Fingerprint: %v ", jarmFingerprint) 264 | } 265 | jarmFingerprintsScanned.Add(1) 266 | enrichedResultChan <- rawResult 267 | } 268 | activeJarmThreads.Add(-1) 269 | log.WithFields(logrus.Fields{"state": "enrichment"}).Debugf("JARM Fingerprint enrichment thread exiting") 270 | } 271 | 272 | func GrabServerHeaderForRemote(ctx context.Context, remote string) (string, map[string]string, error) { 273 | client := httpClientPool.Get().(*fasthttp.Client) 274 | defer httpClientPool.Put(client) 275 | req := fasthttp.AcquireRequest() 276 | resp := fasthttp.AcquireResponse() 277 | defer fasthttp.ReleaseRequest(req) 278 | defer fasthttp.ReleaseResponse(resp) 279 | client.ReadTimeout = time.Second * 15 280 | client.MaxConnDuration = time.Second * 15 281 | client.MaxIdleConnDuration = time.Second * 15 282 | req.SetRequestURI(fmt.Sprintf("https://%s", remote)) 283 | err := client.DoTimeout(req, resp, 10*time.Second) 284 | allHeaders := make(map[string]string) 285 | if err != nil { 286 | return "", allHeaders, err 287 | } 288 | resp.Header.EnableNormalizing() 289 | resp.Header.VisitAll(func(key, value []byte) { 290 | allHeaders[string(key)] = string(value) 291 | }) 292 | return string(resp.Header.Peek("Server")), allHeaders, nil 293 | } 294 | 295 | func GetJARMFingerprint(remote string) (string, error) { 296 | host, port := SplitRemoteAddr(remote) 297 | target := jarm.Target{ 298 | Host: host, 299 | Port: port, 300 | Retries: jarmRetryCount, 301 | } 302 | res, err := jarm.Fingerprint(target) 303 | if res == nil { 304 | return "", err 305 | } 306 | if err == nil { 307 | return res.Hash, res.Error 308 | } 309 | return res.Hash, err 310 | } 311 | 312 | func SplitRemoteAddr(remote string) (host string, port int) { 313 | s := strings.Split(remote, ":") 314 | host = s[0] 315 | port, _ = strconv.Atoi(s[1]) 316 | return 317 | } 318 | -------------------------------------------------------------------------------- /cmd/worker.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2024 Harsh Varagiya 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in 12 | all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 20 | THE SOFTWARE. 21 | */ 22 | package cmd 23 | 24 | import ( 25 | "github.com/spf13/cobra" 26 | "time" 27 | ) 28 | 29 | const SSLSEARCH_JOB_QUEUE_TODO = "sslsearch:jobs:todo" 30 | const SSLSEARCH_JOBS_IN_PROGRESS = "sslsearch:jobs:in-progress" 31 | const SSLSEARCH_JOB_QUEUE_DONE = "sslsearch:jobs:done" 32 | 33 | var ( 34 | redisHost string 35 | ) 36 | 37 | // workerCmd represents the worker command 38 | var workerCmd = &cobra.Command{ 39 | Use: "worker", 40 | Short: "sslsearch worker subcommand", 41 | Long: `used to run sslearch as worker to execute background jobs`, 42 | Run: func(cmd *cobra.Command, args []string) { 43 | }, 44 | } 45 | 46 | func init() { 47 | rootCmd.AddCommand(workerCmd) 48 | workerCmd.PersistentFlags().StringVar(&redisHost, "redis.host", "", "redis host url") 49 | 50 | } 51 | 52 | type Job struct { 53 | JobId string `json:"job_id"` 54 | TaskQueue string `json:"task_queue"` 55 | Name string `json:"name"` 56 | Description string `json:"description"` 57 | ExportIndex string `json:"export_index"` 58 | Status string `json:"status"` 59 | JobSubmitTime time.Time `json:"job_submit_time"` 60 | JobDoneTime time.Time `json:"job_done_time"` 61 | } 62 | -------------------------------------------------------------------------------- /default.pgo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HarshVaragiya/sslsearch/cb389773e241defe1292f38b916c2b68bd957726/default.pgo -------------------------------------------------------------------------------- /docs/Export.md: -------------------------------------------------------------------------------- 1 | # Disk 2 | 3 | - good for testing & one-off tasks. 4 | - export is cert-result JSON (one record per line). 5 | - output has low entropy and can be compressed to not waste disk space. 6 | 7 | ```bash 8 | sslsearch \ 9 | --export.disk # tells sslsearch to export findings to disk 10 | --export.disk.filename 'result.log' # output file name 11 | ``` 12 | 13 | 14 | # Elasticsearch 15 | 16 | - good for historical data, archiving, security monitoring, dashboarding. 17 | - index rate can be ~500-1000 docs/second with 6vCPUs & 12 GB RAM (k8s). 18 | - export index would be `sslsearch-YYYY-MM-DD`. 19 | 20 | ```bash 21 | sslsearch \ 22 | --export.elastic # tells sslsearch to export findings to elasticsearch 23 | --export.elastic.host 'https://192.168.0.192:9200' # elasticsearch host 24 | --export.elastic.username 'elastic' # elasticsearch username 25 | --export.elastic.password 'test-password' # elasticsearch password 26 | ``` 27 | 28 | 29 | # Cassandra / ScyllaDB 30 | 31 | - good for long term storage, archival, historical data. 32 | - enabling `zstd` compression would save a lot of disk space compared to other solutions. 33 | - querying data, etc would be more complicated than elasticsearch. 34 | 35 | ### Setup for Cassandra / ScyllaDB 36 | 37 | - Create `recon` keyspace (you can skip if you have any other existing keyspace). 38 | 39 | ```cqlsh 40 | create keyspace recon with replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; 41 | ``` 42 | 43 | - create the `sslsearch` table in the selected keyspace. 44 | 45 | ```cqlsh 46 | CREATE TABLE IF NOT EXISTS sslsearch ( 47 | record_ts TEXT, 48 | ip TEXT, 49 | port TEXT, 50 | subject TEXT, 51 | issuer TEXT, 52 | sans LIST, 53 | jarm TEXT, 54 | csp TEXT, 55 | region TEXT, 56 | meta TEXT, 57 | timestamp TIMESTAMP, 58 | headers MAP, 59 | server TEXT, 60 | host TEXT, 61 | PRIMARY KEY ((ip, port, record_ts), timestamp) 62 | ) 63 | WITH compression = { 64 | 'sstable_compression' : 'ZstdCompressor', 65 | 'chunk_length_in_kb' : '128', 66 | 'compression_level' : '22' 67 | }; 68 | ``` 69 | 70 | ```bash 71 | sslsearch \ 72 | --export.cassandra # tells sslsearch to export findings to cassandra 73 | --export.cassandra.connection-string 'https://192.168.0.192:9200' # cassandra connection string (host) 74 | --export.cassandra.table 'recon.sslsearch' # cassandra table name: default "recon.sslsearch" 75 | --export.cassandra.result-ts-key '2024-11-25' # cassandra result ts key (for lifecycle managment) 76 | ``` 77 | 78 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/HarshVaragiya/sslsearch 2 | 3 | go 1.23 4 | toolchain go1.24.1 5 | 6 | replace github.com/gocql/gocql => github.com/scylladb/gocql v1.13.0 7 | 8 | require ( 9 | github.com/HarshVaragiya/jarm-go v0.0.1 10 | github.com/elastic/go-elasticsearch/v8 v8.13.1 11 | github.com/gocql/gocql v0.0.0-00010101000000-000000000000 12 | github.com/google/uuid v1.6.0 13 | github.com/jedib0t/go-pretty v4.3.0+incompatible 14 | github.com/minio/minio-go/v7 v7.0.78 15 | github.com/redis/go-redis/v9 v9.6.3 16 | github.com/seancfoley/ipaddress-go v1.5.4 17 | github.com/sirupsen/logrus v1.9.2 18 | github.com/spf13/cobra v1.7.0 19 | github.com/spf13/viper v1.19.0 20 | github.com/valyala/fasthttp v1.47.0 21 | ) 22 | 23 | require ( 24 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 25 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 26 | ) 27 | 28 | require ( 29 | github.com/andybalholm/brotli v1.0.5 // indirect 30 | github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect 31 | github.com/dustin/go-humanize v1.0.1 // indirect 32 | github.com/elastic/elastic-transport-go/v8 v8.5.0 // indirect 33 | github.com/fsnotify/fsnotify v1.7.0 // indirect 34 | github.com/go-ini/ini v1.67.0 // indirect 35 | github.com/go-logr/logr v1.4.1 // indirect 36 | github.com/go-logr/stdr v1.2.2 // indirect 37 | github.com/go-openapi/errors v0.22.0 // indirect 38 | github.com/go-openapi/strfmt v0.23.0 // indirect 39 | github.com/goccy/go-json v0.10.3 // indirect 40 | github.com/golang/snappy v0.0.3 // indirect 41 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect 42 | github.com/hashicorp/hcl v1.0.0 // indirect 43 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 44 | github.com/klauspost/compress v1.17.11 // indirect 45 | github.com/klauspost/cpuid/v2 v2.2.8 // indirect 46 | github.com/magiconair/properties v1.8.7 // indirect 47 | github.com/mattn/go-runewidth v0.0.16 // indirect 48 | github.com/minio/md5-simd v1.1.2 // indirect 49 | github.com/mitchellh/mapstructure v1.5.0 // indirect 50 | github.com/oklog/ulid v1.3.1 // indirect 51 | github.com/pelletier/go-toml/v2 v2.2.3 // indirect 52 | github.com/rivo/uniseg v0.2.0 // indirect 53 | github.com/rs/xid v1.6.0 // indirect 54 | github.com/sagikazarmark/locafero v0.6.0 // indirect 55 | github.com/sagikazarmark/slog-shim v0.1.0 // indirect 56 | github.com/seancfoley/bintree v1.2.1 // indirect 57 | github.com/sourcegraph/conc v0.3.0 // indirect 58 | github.com/spf13/afero v1.11.0 // indirect 59 | github.com/spf13/cast v1.7.0 // indirect 60 | github.com/spf13/pflag v1.0.5 // indirect 61 | github.com/subosito/gotenv v1.6.0 // indirect 62 | github.com/valyala/bytebufferpool v1.0.0 // indirect 63 | go.mongodb.org/mongo-driver v1.14.0 // indirect 64 | go.opentelemetry.io/otel v1.24.0 // indirect 65 | go.opentelemetry.io/otel/metric v1.24.0 // indirect 66 | go.opentelemetry.io/otel/trace v1.24.0 // indirect 67 | go.uber.org/multierr v1.11.0 // indirect 68 | golang.org/x/crypto v0.36.0 // indirect 69 | golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect 70 | golang.org/x/net v0.38.0 // indirect 71 | golang.org/x/sys v0.31.0 // indirect 72 | golang.org/x/text v0.23.0 // indirect 73 | gopkg.in/inf.v0 v0.9.1 // indirect 74 | gopkg.in/ini.v1 v1.67.0 // indirect 75 | gopkg.in/yaml.v3 v3.0.1 // indirect 76 | ) 77 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/HarshVaragiya/jarm-go v0.0.1 h1:kKiUKOkz8SnTsLETyjEvRJ7tADWIGdNgAOVTRSmL7Qs= 2 | github.com/HarshVaragiya/jarm-go v0.0.1/go.mod h1:2H9KlJR20/twNhVmC8MF5P4kJQxINRLu2sEEFioK+ns= 3 | github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= 4 | github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= 5 | github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= 6 | github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= 7 | github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= 8 | github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= 9 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= 10 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= 11 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 12 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 13 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= 14 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 15 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 16 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 17 | github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= 18 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 19 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 20 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 21 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 22 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 23 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 24 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 25 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 26 | github.com/elastic/elastic-transport-go/v8 v8.5.0 h1:v5membAl7lvQgBTexPRDBO/RdnlQX+FM9fUVDyXxvH0= 27 | github.com/elastic/elastic-transport-go/v8 v8.5.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= 28 | github.com/elastic/go-elasticsearch/v8 v8.13.1 h1:du5F8IzUUyCkzxyHdrO9AtopcG95I/qwi2WK8Kf1xlg= 29 | github.com/elastic/go-elasticsearch/v8 v8.13.1/go.mod h1:DIn7HopJs4oZC/w0WoJR13uMUxtHeq92eI5bqv5CRfI= 30 | github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= 31 | github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= 32 | github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= 33 | github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= 34 | github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= 35 | github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= 36 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 37 | github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= 38 | github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 39 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 40 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 41 | github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= 42 | github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= 43 | github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= 44 | github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= 45 | github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= 46 | github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= 47 | github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= 48 | github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 49 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 50 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 51 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 52 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 53 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 54 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= 55 | github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= 56 | github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= 57 | github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= 58 | github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= 59 | github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= 60 | github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo= 61 | github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= 62 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= 63 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= 64 | github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= 65 | github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= 66 | github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= 67 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 68 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 69 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 70 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 71 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 72 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 73 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 74 | github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= 75 | github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= 76 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= 77 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 78 | github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= 79 | github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= 80 | github.com/minio/minio-go/v7 v7.0.78 h1:LqW2zy52fxnI4gg8C2oZviTaKHcBV36scS+RzJnxUFs= 81 | github.com/minio/minio-go/v7 v7.0.78/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= 82 | github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= 83 | github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= 84 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= 85 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= 86 | github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= 87 | github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= 88 | github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= 89 | github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= 90 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 91 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 92 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 93 | github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0= 94 | github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= 95 | github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= 96 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 97 | github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= 98 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 99 | github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= 100 | github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= 101 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 102 | github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= 103 | github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= 104 | github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= 105 | github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= 106 | github.com/scylladb/gocql v1.13.0 h1:QOr2yYuJuAfsoYPICxTj1RPm3Qm7qllSb3Q9B7WFEgU= 107 | github.com/scylladb/gocql v1.13.0/go.mod h1:ZLEJ0EVE5JhmtxIW2stgHq/v1P4fWap0qyyXSKyV8K0= 108 | github.com/seancfoley/bintree v1.2.1 h1:Z/iNjRKkXnn0CTW7jDQYtjW5fz2GH1yWvOTJ4MrMvdo= 109 | github.com/seancfoley/bintree v1.2.1/go.mod h1:hIUabL8OFYyFVTQ6azeajbopogQc2l5C/hiXMcemWNU= 110 | github.com/seancfoley/ipaddress-go v1.5.4 h1:ZdjewWC1J2y5ruQjWHwK6rA1tInWB6mz1ftz6uTm+Uw= 111 | github.com/seancfoley/ipaddress-go v1.5.4/go.mod h1:fpvVPC+Jso+YEhNcNiww8HQmBgKP8T4T6BTp1SLxxIo= 112 | github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= 113 | github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 114 | github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= 115 | github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= 116 | github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= 117 | github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= 118 | github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= 119 | github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= 120 | github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= 121 | github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= 122 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 123 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 124 | github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= 125 | github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= 126 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 127 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 128 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 129 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 130 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 131 | github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= 132 | github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= 133 | github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= 134 | github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= 135 | github.com/valyala/fasthttp v1.47.0 h1:y7moDoxYzMooFpT5aHgNgVOQDrS3qlkfiP9mDtGGK9c= 136 | github.com/valyala/fasthttp v1.47.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA= 137 | go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= 138 | go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= 139 | go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= 140 | go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= 141 | go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= 142 | go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= 143 | go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= 144 | go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= 145 | go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= 146 | go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= 147 | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= 148 | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= 149 | golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= 150 | golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= 151 | golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= 152 | golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= 153 | golang.org/x/net v0.0.0-20220526153639-5463443f8c37/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 154 | golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= 155 | golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 156 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 157 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 158 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 159 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 160 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 161 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 162 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 163 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 164 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 165 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 166 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 167 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 168 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 169 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= 170 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 171 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 172 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 173 | gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= 174 | gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= 175 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 176 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 177 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 178 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 179 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 180 | sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= 181 | sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= 182 | -------------------------------------------------------------------------------- /k8s-cron-example.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: CronJob 4 | metadata: 5 | name: sslsearch-ctl-cron 6 | namespace: sslsearch 7 | labels: 8 | app: sslsearch-ctl-cron 9 | spec: 10 | schedule: "5 0 * * *" 11 | timeZone: Asia/Kolkata 12 | jobTemplate: 13 | metadata: 14 | labels: 15 | app: sslsearch-ctl-cron 16 | spec: 17 | parallelism: 1 18 | completions: 1 19 | backoffLimit: 2 20 | template: 21 | metadata: 22 | labels: 23 | app: sslsearch-ctl-cron 24 | spec: 25 | containers: 26 | - name: sslsearch-ctl 27 | imagePullPolicy: Always 28 | image: ghcr.io/harshvaragiya/sslsearch:latest 29 | env: 30 | - name: REDIS_HOST 31 | value: 192.168.0.100:6379 32 | - name: TZ 33 | value: 'Asia/Kolkata' 34 | command: 35 | [ 36 | "/app/sslsearch", 37 | "worker", 38 | "add", 39 | "--suffix=12", 40 | "--target=aws,gcp,digital-ocean", 41 | ] 42 | resources: 43 | limits: 44 | cpu: "0.5" 45 | memory: "200Mi" 46 | requests: 47 | cpu: "0.1" 48 | memory: "50Mi" 49 | restartPolicy: Never 50 | successfulJobsHistoryLimit: 1 51 | failedJobsHistoryLimit: 1 52 | 53 | --- 54 | apiVersion: batch/v1 55 | kind: CronJob 56 | metadata: 57 | name: sslsearch-scanner-cron 58 | namespace: sslsearch 59 | labels: 60 | app: sslsearch-scanner-cron 61 | spec: 62 | schedule: "5 0 * * *" 63 | timeZone: Asia/Kolkata 64 | jobTemplate: 65 | metadata: 66 | labels: 67 | app: sslsearch-scanner-cron 68 | spec: 69 | activeDeadlineSeconds: 72000 # 20 Hours 70 | parallelism: 12 # change as per your requirements 71 | completions: 12 72 | backoffLimit: 3 73 | template: 74 | metadata: 75 | labels: 76 | app: sslsearch-scanner-cron 77 | spec: 78 | containers: 79 | - name: sslsearch-scanner 80 | imagePullPolicy: Always 81 | image: ghcr.io/harshvaragiya/sslsearch:latest 82 | env: 83 | - name: MINIO_ENDPOINT # for dumping cpu & memory profiles export for debugging 84 | value: "192.168.0.100:9000" 85 | - name: ACCESS_KEY # MinIO access key for cpu & memory profiles export for debugging 86 | value: "" 87 | - name: SECRET_KEY # MinIO secret key for cpu & memory profiles export for debugging 88 | value: "" 89 | - name: BUCKET_NAME # MinIO bucket name for cpu & memory profiles export for debugging 90 | value: "" 91 | - name: REDIS_HOST # for job queue access 92 | value: "192.168.0.100:6379" 93 | - name: GOMEMLIMIT # try to reduce golang memory usage 94 | value: 1200MiB 95 | - name: TZ 96 | value: 'Asia/Kolkata' 97 | command: 98 | [ 99 | "/app/sslsearch", 100 | "worker", 101 | "process", # background worker 102 | "--export.elastic", # elasticsearch export 103 | "--export.elastic.username=elastic", # elasticsearch username 104 | "--export.elastic.password=elastic-password", # elasticsearch password 105 | "--export.elastic.host=https://192.168.0.100:9200", # modify this to elasticsearch host 106 | "--suffix=4", 107 | "--threads=4096", # threads per pod 108 | "--refresh=300", 109 | "--server-header-threads=64", # modify to change channel pressure 110 | "--jarm-threads=256", # modify to change channel pressure 111 | "--timeout=10" # tcp socket timeout 112 | ] 113 | resources: 114 | limits: # max container memory limit. 2x GoMemLimit. can be reduced 115 | cpu: "2" 116 | memory: "2840Mi" 117 | requests: # recommended ~0.5vCPU, 500MB RAM 118 | cpu: "0.4" 119 | memory: "600Mi" 120 | restartPolicy: Never 121 | successfulJobsHistoryLimit: 1 122 | failedJobsHistoryLimit: 1 123 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2023 Harsh Varagiya 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in 12 | all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 20 | THE SOFTWARE. 21 | */ 22 | package main 23 | 24 | import ( 25 | "github.com/HarshVaragiya/sslsearch/cmd" 26 | ) 27 | 28 | func main() { 29 | cmd.Execute() 30 | } 31 | -------------------------------------------------------------------------------- /scripts/build.groovy: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent any 3 | stages { 4 | stage("build"){ 5 | steps { 6 | sh "docker buildx build --push --platform linux/arm64,linux/amd64 --tag ${REPOSITORY}/sslsearch:${IMAGE_TAG} ." 7 | } 8 | } 9 | } 10 | } 11 | --------------------------------------------------------------------------------