├── scope ├── army ├── att ├── navy ├── airbnb └── pry0cc ├── bin ├── worker │ ├── requirements.txt │ ├── worker.py │ ├── Dockerfile │ └── scanner.sh ├── scheduler │ ├── requirements.txt │ ├── Dockerfile │ └── scheduler.py ├── client │ ├── requirements.txt │ └── client.py ├── api │ ├── requirements.txt │ ├── Dockerfile │ └── app.py └── parser │ └── import.py ├── .gitignore ├── config └── notify.yaml ├── docker-compose.yml └── README.md /scope/army: -------------------------------------------------------------------------------- 1 | army.mil 2 | -------------------------------------------------------------------------------- /scope/att: -------------------------------------------------------------------------------- 1 | att.com 2 | -------------------------------------------------------------------------------- /scope/navy: -------------------------------------------------------------------------------- 1 | navy.mil 2 | -------------------------------------------------------------------------------- /scope/airbnb: -------------------------------------------------------------------------------- 1 | airbnb.com 2 | -------------------------------------------------------------------------------- /scope/pry0cc: -------------------------------------------------------------------------------- 1 | pry0.cc 2 | -------------------------------------------------------------------------------- /bin/worker/requirements.txt: -------------------------------------------------------------------------------- 1 | redis 2 | pymongo 3 | -------------------------------------------------------------------------------- /bin/scheduler/requirements.txt: -------------------------------------------------------------------------------- 1 | apscheduler 2 | redis 3 | -------------------------------------------------------------------------------- /bin/client/requirements.txt: -------------------------------------------------------------------------------- 1 | tabulate 2 | requests 3 | argparse 4 | -------------------------------------------------------------------------------- /bin/api/requirements.txt: -------------------------------------------------------------------------------- 1 | pymongo 2 | flask 3 | redis 4 | apscheduler 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | config/notify.yaml 2 | bin/__pycache__ 3 | config/* 4 | notify.yaml 5 | 6 | -------------------------------------------------------------------------------- /config/notify.yaml: -------------------------------------------------------------------------------- 1 | slack: 2 | - id: "slack" 3 | slack_channel: "hacking" 4 | slack_username: "pdiscovery" 5 | slack_format: "{{data}}" 6 | slack_webhook_url: "https://hooks.slack.com/services/KEY" 7 | -------------------------------------------------------------------------------- /bin/scheduler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8-slim-buster 2 | 3 | WORKDIR /python-docker 4 | 5 | COPY requirements.txt requirements.txt 6 | RUN pip3 install -r requirements.txt 7 | 8 | COPY . . 9 | 10 | CMD [ "python3", "scheduler.py" ] 11 | -------------------------------------------------------------------------------- /bin/api/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8-slim-buster 2 | 3 | WORKDIR /python-docker 4 | 5 | COPY requirements.txt requirements.txt 6 | RUN pip3 install -r requirements.txt 7 | 8 | COPY . . 9 | 10 | CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0", "--port=80"] 11 | -------------------------------------------------------------------------------- /bin/scheduler/scheduler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from datetime import datetime 4 | import redis 5 | 6 | from apscheduler.schedulers.blocking import BlockingScheduler 7 | from apscheduler.triggers.interval import IntervalTrigger 8 | r = redis.Redis(host='redis', port=6379, db=0) 9 | 10 | scheduler = BlockingScheduler() 11 | @scheduler.scheduled_job(IntervalTrigger(hours=5)) 12 | def queue_job(): 13 | print('queuing!') 14 | r.rpush('queue','dod:0:asm') 15 | 16 | scheduler.start() 17 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | redis: 3 | image: redis 4 | mongo: 5 | image: mongo 6 | worker: 7 | image: proteus/worker 8 | build: 9 | context: bin/worker/ 10 | volumes: 11 | - /home/op/.axiom/accounts/personal.json:/root/.axiom/accounts/default.json 12 | - /home/op/.axiom/modules:/root/.axiom/modules 13 | - /home/op/.ssh:/root/.ssh 14 | - /home/op/.proteus:/app 15 | depends_on: 16 | - redis 17 | - mongo 18 | api: 19 | image: proteus/api 20 | build: 21 | context: bin/api/ 22 | # flask requires SIGINT to stop gracefully 23 | # (default stop signal from Compose is SIGTERM) 24 | stop_signal: SIGINT 25 | ports: 26 | - '127.0.0.1:80:80' 27 | depends_on: 28 | - redis 29 | -------------------------------------------------------------------------------- /bin/worker/worker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import redis 4 | import time 5 | import subprocess 6 | import threading 7 | 8 | r = redis.Redis(host='redis', port=6379, db=0) 9 | subprocess.call(['/root/.axiom/interact/axiom-account', 'default']) 10 | 11 | def scan(data): 12 | print(str(data.decode("utf-8")).split(':')) 13 | if str(data.decode("utf-8")).split(':')[2] == "spinup": 14 | total = str(data.decode("utf-8")).split(':')[1] 15 | name = str(data.decode("utf-8")).split(':')[0] 16 | subprocess.call(['/root/.axiom/interact/axiom-fleet', name, "-i", total]) 17 | else: 18 | subprocess.call(['sh', '/app/bin/worker/scanner.sh', data]) 19 | 20 | while True: 21 | res = r.rpop('queue') 22 | if res != None: 23 | x = threading.Thread(target=scan, args=(res,)) 24 | x.start() 25 | time.sleep(1) 26 | -------------------------------------------------------------------------------- /bin/worker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8-buster 2 | 3 | ENV PATH=$PATH:/root/.axiom/interact 4 | ENV HOME="/root/" 5 | 6 | RUN apt-get update \ 7 | && apt-get install -yq apt-utils build-essential curl gcc wget \ 8 | readline-common neovim git zsh zsh-syntax-highlighting zsh-autosuggestions jq build-essential python3-pip unzip git p7zip libpcap-dev rubygems ruby-dev grc 9 | 10 | WORKDIR /root/.axiom 11 | RUN git clone https://github.com/pry0cc/axiom /root/.axiom/ 12 | RUN wget -q -O /tmp/doctl.tar.gz https://github.com/digitalocean/doctl/releases/download/v1.66.0/doctl-1.66.0-linux-amd64.tar.gz && tar -xvzf /tmp/doctl.tar.gz && mv doctl /usr/bin/doctl && rm /tmp/doctl.tar.gz 13 | RUN interact/axiom-configure --config "{}" --unattended 14 | RUN /usr/local/go/bin/go install -v github.com/projectdiscovery/notify/cmd/notify@latest 15 | 16 | RUN git clone https://github.com/pry0cc/proteus /app && cd /app 17 | RUN pip3 install -r /app/bin/worker/requirements.txt 18 | 19 | ENTRYPOINT ["python3", "/app/bin/worker/worker.py"] 20 | -------------------------------------------------------------------------------- /bin/parser/import.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import json 5 | from pymongo import MongoClient 6 | 7 | client = MongoClient("mongodb://mongo:27017") 8 | db = client.asm 9 | 10 | filename = sys.argv[1] 11 | scan_id = sys.argv[2] 12 | collection_name = filename.split('.')[0].split("/")[-1] 13 | collection = db[collection_name] 14 | target_id = sys.argv[3] 15 | 16 | scan_meta = {'scan_id':scan_id, 'target_id':target_id} 17 | 18 | def jsonf_to_lines(filename): 19 | parsed_lines = [] 20 | with open(filename, 'r') as reader: 21 | for line in reader.read().split('\n'): 22 | try: 23 | parsed = json.loads(line) 24 | parsed["scan_id"] = scan_id 25 | parsed["target_id"] = target_id 26 | parsed_lines.append(parsed) 27 | except Exception as err: 28 | print("Whoops %s", err) 29 | return parsed_lines 30 | 31 | collection.insert_many(jsonf_to_lines(filename)) 32 | res = db.scans.find({'scan_id':scan_id}) 33 | 34 | i = 0 35 | for row in res: 36 | i += 1 37 | 38 | if i < 1: 39 | db.scans.insert_one(scan_meta) 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /bin/api/app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from pymongo import MongoClient 4 | import redis 5 | from flask import Flask 6 | from flask import request 7 | from flask import jsonify 8 | app = Flask(__name__) 9 | 10 | r = redis.Redis(host='redis', port=6379, db=0) 11 | client = MongoClient("mongodb://mongo:27017") 12 | db = client.asm 13 | 14 | @app.route("/api//") 15 | def get_subdomains(target, datatype): 16 | scan_id = request.args.get("scan_id") 17 | query = {'target_id':target} 18 | 19 | if scan_id != None: 20 | query['scan_id'] = scan_id 21 | 22 | collection = db[datatype] 23 | res = collection.find(query) 24 | data = [] 25 | 26 | for row in res: 27 | row.pop('_id') 28 | data.append(row) 29 | 30 | return jsonify(data) 31 | 32 | 33 | @app.route("/api//launch_scan") 34 | def start_scan(target): 35 | instances = request.args.get("spinup") 36 | module = request.args.get("module") 37 | req = target 38 | 39 | if instances == None: 40 | instances = "0" 41 | 42 | if module == None: 43 | module="asm" 44 | 45 | r.rpush('queue', req+":"+str(instances)+":"+str(module)) 46 | 47 | data = {"message":"Scan launched!"} 48 | return jsonify(data) 49 | 50 | 51 | @app.route("/api//spinup") 52 | def spinup(target): 53 | instances = request.args.get("instances") 54 | req = target 55 | 56 | if instances == None: 57 | instances = "3" 58 | 59 | module = "spinup" 60 | 61 | r.rpush('queue', req+":"+str(instances)+":"+module) 62 | 63 | data = {"message":"Fleet queued for initializing!"} 64 | return jsonify(data) 65 | 66 | -------------------------------------------------------------------------------- /bin/worker/scanner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PATH="$PATH:/root/.axiom/interact:/root/go/bin" 4 | 5 | echo "Scanning $1" 6 | 7 | target_id="$(echo $1 | cut -d: -f 1)" 8 | instances="$(echo $1 | cut -d: -f 2)" 9 | module="$(echo $1 | cut -d: -f 3)" 10 | ppath="/app" 11 | scan_id="$target_id-$(date +%s)" 12 | scan_path="$ppath/scans/$scan_id" 13 | raw_path="$ppath/rawdata/$target_id/" 14 | threads=13 15 | notify="slack" 16 | 17 | mkdir -p "$scan_path" 18 | mkdir -p "$raw_path" 19 | 20 | cd "$scan_path" 21 | cp "$ppath/scope/$target_id" "$scan_path/scope.txt" 22 | 23 | echo "$ppath" 24 | 25 | if [ "$instances" -gt "0" ]; then 26 | axiom-scan scope.txt -m "$module" -o asm --fleet "$target_id" --spinup "$instances" --rm-when-done 27 | else 28 | if [ "$(axiom-ls "$target_id*" | wc -l | awk '{ print $1 }')" -lt "2" ]; then 29 | axiom-scan scope.txt -m "$module" -o asm --fleet "$target_id" --spinup 5 --rm-when-done 30 | fi 31 | axiom-scan scope.txt -m "$module" -o asm --fleet "$target_id" 32 | fi 33 | 34 | # kind of weird logic, but since we're using the asm module, we're basically looking for all the json files inside of the asm directory (and then merging them), finally deleting the asm dir because we merged the data 35 | 36 | # this might become a huge bottleneck with huge data? idk? hope not. 37 | find asm/ -type f -name "*.json*" | cut -d '.' -f 1-2 | cut -d '/' -f 2 | sort -u | while read src; do cat asm/$src* | sort -u > $src; done 38 | 39 | rm -r asm 40 | 41 | find "$scan_path" -type f -name "*.json" -exec "$ppath/bin/parser/import.py" {} "$scan_id" "$target_id" \; 42 | 43 | cat host.json | jq -r '.host' | anew "$raw_path/host.txt" > "$raw_path/host.txt.new" 44 | notify -bulk -i "$raw_path/host.txt.new" -pc "$ppath/config/notify.yaml" -mf "New Hostnames Found! {{data}}" 45 | 46 | cat http.json | jq -r '.url' | anew "$raw_path/url.txt" > "$raw_path/url.txt.new" 47 | notify -bulk -i "$raw_path/url.txt.new" -pc "$ppath/config/notify.yaml" -mf "New URLs found! {{data}}" 48 | 49 | cat dns.json | jq -r '.host' | anew "$raw_path/resolved.txt" 50 | cat dns.json | jq -r '.a?[]?' | anew "$raw_path/ips.txt" 51 | -------------------------------------------------------------------------------- /bin/client/client.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import json 4 | import requests 5 | import argparse 6 | from tabulate import tabulate 7 | 8 | class ProteusClient: 9 | def __init__(self, url): 10 | self.url = url 11 | 12 | def start_scan(self, target, module="asm", spinup=0): 13 | url = self.gen_url(target, "launch_scan") 14 | url += "?module="+module 15 | url += "&spinup="+str(spinup) 16 | 17 | r = requests.get(url) 18 | return r.json() 19 | 20 | def tabulate(self, json_resp): 21 | print(tabulate(json_resp, headers="keys", tablefmt="presto")) 22 | 23 | def gen_url(self, target, dtype, scan_id=""): 24 | url = self.url+"/"+target+"/"+dtype 25 | if scan_id != "": 26 | url += "?scan_id="+scan_id 27 | return url 28 | 29 | def get_data_raw(self, target, dtype, scan_id=""): 30 | url = self.gen_url(target, dtype, scan_id) 31 | 32 | r = requests.get(url) 33 | return r.json() 34 | 35 | def get_data(self, target, dtype, scan_id=""): 36 | self.tabulate(self.get_data_raw(target, dtype, scan_id)) 37 | 38 | def dns(self, target, scan_id=""): 39 | d = self.get_data_raw(target, 'dns', scan_id) 40 | new_arr = [['Hostname', 'A', 'Timestamp']] 41 | 42 | for line in d: 43 | host = line['host'] 44 | a = line['a'] 45 | timestamp = line['timestamp'] 46 | 47 | new_arr.append([host, a, timestamp]) 48 | 49 | print(tabulate(new_arr, headers="firstrow", tablefmt="presto")) 50 | 51 | 52 | def http(self, target, scan_id=""): 53 | d = self.get_data_raw(target, 'http', scan_id) 54 | new_arr = [['URL', 'Title', 'Webserver']] 55 | 56 | for line in d: 57 | url = "" 58 | title = "" 59 | webserver = "" 60 | try: 61 | url = line['url'] 62 | title = line['title'] 63 | webserver = line['webserver'] 64 | except: 65 | pass 66 | 67 | new_arr.append([url, title, webserver]) 68 | 69 | print(tabulate(new_arr, headers="firstrow", tablefmt="presto")) 70 | 71 | 72 | parser = argparse.ArgumentParser() 73 | parser.add_argument('--target', help='Get target of choice') 74 | parser.add_argument('--type', help='Get datatype of choice') 75 | parser.add_argument('--scanid', default="", help='Limit results to a scan_id datatype of choice') 76 | parser.add_argument('--start_scan', action="store_true", help='Limit results to a scan_id datatype of choice') 77 | 78 | args = parser.parse_args() 79 | 80 | client = ProteusClient("http://127.0.0.1:80/api") 81 | 82 | 83 | if args.start_scan: 84 | client.start_scan(args.target) 85 | 86 | if args.type == "http": 87 | client.http(args.target, args.scanid) 88 | elif args.type == "dns": 89 | client.dns(args.target, args.scanid) 90 | elif args.type != "": 91 | client.get_data(args.target, args.type, args.scanid) 92 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Proteus 2 | Proteus - The axiom database and API https://github.com/pry0cc/axiom.
3 | Originally, written for [Hacking Together an ASM Platform Using ProjectDiscovery Tools](https://twitter.com/pdiscoveryio/status/1551558898879893506). 4 | 5 | # Setup 6 | 7 | First, clone the repository 8 | ``` 9 | git clone https://github.com/pry0cc/proteus ~/.proteus 10 | cd ~/.proteus 11 | ``` 12 | 13 | Next, modify config/notify.yaml to include your slack webhook. 14 | 15 | Navigate to the bin directory and modify the docker-compose.yml volumes so they map to your local axiom setup. You'll need to change `/home/op/` to whatever your current $HOME directory is. 16 | 17 | ``` 18 | services: 19 | redis: 20 | image: redis 21 | mongo: 22 | image: mongo 23 | worker: 24 | image: proteus/worker 25 | build: 26 | context: bin/worker/ 27 | volumes: 28 | - /home/op/.axiom/accounts/personal.json:/root/.axiom/accounts/default.json # map your account here 29 | - /home/op/.axiom/modules:/root/.axiom/modules # map modules 30 | - /home/op/.ssh:/root/.ssh # map SSH 31 | - /home/op/.proteus:/app # map proteus folder to the app (for persistence of data like rawdata & scans), not 100% necessary but nice to have. 32 | ``` 33 | 34 | 35 | ``` 36 | cd bin/ 37 | sudo docker compose build 38 | sudo docker compose up 39 | ``` 40 | 41 | Thats it! 42 | 43 | # Usage 44 | Store your target(s) in the local scope folder ( [~/.proteus/scope/](https://github.com/pry0cc/proteus/tree/main/scope) )
45 | All fleets are unique to each target, so there is no crossover of data. You can either spin up instances and then launch scans, in which case, the instances will remain after, or you can just launch scans. If you launch a scan without any instances prensent, it will spin up 5 instances by default and then autoremove them when its done. 46 | 47 | ``` 48 | curl -s http://127.0.0.1:80/api//launch_scan 49 | curl -s http://127.0.0.1:80/api//launch_scan?spinup=8 50 | curl -s http://127.0.0.1:80/api//launch_scan?spinup=8&module=asm 51 | 52 | curl -s http://127.0.0.1:80/api//spinup?instances=15 # spin up instances for a target 53 | 54 | curl -s http://127.0.0.1:80/api//scans 55 | 56 | curl -s http://127.0.0.1:80/api// 57 | curl -s http://127.0.0.1:80/api//dnsx 58 | curl -s http://127.0.0.1:80/api//http 59 | curl -s http://127.0.0.1:80/api//subs 60 | curl -s http://127.0.0.1:80/api//nuclei 61 | 62 | curl -s http://127.0.0.1:80/api//?scan_id= 63 | ``` 64 | 65 | # Client Usage 66 | The client will tabulate data, todo: added JSON or text output. 67 | 68 | ``` 69 | pip3 install -r bin/client/requirements.txt 70 | 71 | bin/client/client.py --target --type http 72 | bin/client/client.py --target --type dns 73 | bin/client/client.py --target --type host 74 | bin/client/client.py --target --type scans 75 | 76 | bin/client/client.py --target --type scans --scanid 77 | ``` 78 | --------------------------------------------------------------------------------