├── README.md ├── buscarSubdominios.sh └── recon.sh /README.md: -------------------------------------------------------------------------------- 1 | # RECON-Scripts 2 | Scripts to automate RECON process 3 | 4 | I made some personal modifications from this: https://github.com/shibli2700/Rekon. Credits to Mohammed Shibli. 5 | 6 | ***buscasubdominios.sh*** 7 | 8 | 1) Subdomain enumeration with "Assetfinder" (by the genius of "Tomnomnom" https://github.com/tomnomnom/assetfinder) and Sublist3r (https://github.com/aboul3la/Sublist3r). 9 | 10 | 2) Sort to remove the duplicate entries (domains.txt) 11 | 12 | 3) Check assets alives with "HTTPROBE" (again... by Tomnomnom https://github.com/tomnomnom/httprobe) 13 | 14 | 4) Saved the alive domains in a different file called alive.txt. 15 | Both outputs files (domains.txt and alive.txt also are in json format) 16 | 17 | 5) Create "headers" and "responsebody" directories 18 | 19 | 6) Looping through all the domains stored in alive.txt and sending cURL requests to fetch headers and response body and then storing them inside headers and responsebody directories 20 | 21 | 7) Collect all the JavaScript files from the response body text which we collected in the previous step 22 | 23 | 8) Using a tool called relative-url-extractor by Jobert Abma (https://github.com/jobertabma/relative-url-extractor) for collect all the relative paths which are present in the JavaScript files. 24 | 25 | 9)At this point, the script will pass all the domains present in domains.txt to nmap and will store the result inside the nmapscans 26 | 27 | Personal additions: 28 | * Creation of folder with domain name 29 | * All the output is in that folder 30 | * Merge response.sh with enum.sh in one single script 31 | 32 | Running the script: 33 | sudo chmod 755 buscarSubdominios.sh #setting file permissions 34 | $ ./buscarSubdominios.sh example.com 35 | 36 | # TODO: 37 | 38 | - Add more sub-finders tools 39 | -------------------------------------------------------------------------------- /buscarSubdominios.sh: -------------------------------------------------------------------------------- 1 | ##!/bin/bash 2 | #starting sublist3r 3 | ~/Sublist3r/sublist3r.py -d $1 -v -o domains.txt 4 | #running assetfinder 5 | ~/go/bin/./assetfinder --subs-only $1 | tee -a domains.txt 6 | #creating and switching a dir 7 | mkdir $1 8 | #moving file 9 | mv domains.txt $1/ 10 | #removing duplicate entries 11 | sort -u $1/domains.txt -o $1/domains.txt 12 | #checking for alive domains 13 | echo "\n\n[+] Checking for alive domains..\n" 14 | cat $1/domains.txt | ~/go/bin/httprobe/./main | tee -a $1/alive.txt 15 | #formatting the data to json 16 | cat $1/alive.txt | python -c "import sys; import json; print (json.dumps({'domains':list(sys.stdin)}))" > $1/alive.json 17 | cat $1/domains.txt | python -c "import sys; import json; print (json.dumps({'domains':list(sys.stdin)}))" > $1/domains.json 18 | #change directory 19 | cd $1 20 | #create new directories 21 | mkdir headers 22 | mkdir responsebody 23 | #sending cURL requests to fetch headers and response body 24 | CURRENT_PATH=$(pwd) 25 | 26 | for x in $(cat alive.txt) 27 | do 28 | NAME=$(echo $x | awk -F/ '{print $3}') 29 | curl -X GET -H "X-Forwarded-For: evil.com" $x -I > "$CURRENT_PATH/headers/$NAME" 30 | curl -s -X GET -H "X-Forwarded-For: evil.com" -L $x > "$CURRENT_PATH/responsebody/$NAME" 31 | done 32 | 33 | mkdir scripts 34 | mkdir scriptsresponse 35 | RED='\033[0;31m' 36 | NC='\033[0m' 37 | CUR_PATH=$(pwd) 38 | for x in $(ls "$CUR_PATH/responsebody") 39 | do 40 | printf "\n\n${RED}$x${NC}\n\n" 41 | END_POINTS=$(cat "$CUR_PATH/responsebody/$x" | grep -Eoi "src=\"[^>]+>" | cut -d '"' -f 2) 42 | for end_point in $END_POINTS 43 | do 44 | len=$(echo $end_point | grep "http" | wc -c) 45 | mkdir "scriptsresponse/$x/" 46 | URL=$end_point 47 | if [ $len == 0 ] 48 | then 49 | URL="https://$x$end_point" 50 | fi 51 | file=$(basename $end_point) 52 | curl -X GET $URL -L > "scriptsresponse/$x/$file" 53 | echo $URL >> "scripts/$x" 54 | done 55 | done 56 | 57 | #looping through the scriptsresponse directory 58 | mkdir endpoints 59 | CUR_DIR=$(pwd) 60 | for domain in $(ls scriptsresponse) 61 | do 62 | #looping through files in each domain 63 | mkdir endpoints/$domain 64 | for file in $(ls scriptsresponse/$domain) 65 | do 66 | ruby ~/relative-url-extractor/extract.rb scriptsresponse/$domain/$file >> endpoints/$domain/$file 67 | done 68 | done 69 | #Nmap 70 | mkdir nmapscans 71 | for domain in $(cat domains.txt) 72 | do 73 | nmap -sC -sV $domain | tee nmapscans/$domain 74 | done -------------------------------------------------------------------------------- /recon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | domain=$1 4 | wordlist="/PATH/TO/WORDLIST.txt" 5 | resolvers="PATH/TO/resolvers.txt" 6 | 7 | domain_enum(){ 8 | 9 | mkdir -p $domain $domain/sources $domain/Recon 10 | 11 | subfinder -d $domain -o $domain/sources/subfinder.txt 12 | assetfinder -subs-only $domain | tee $domain/sources/assetfinder.com 13 | amass enum -passive -d $domain -o $domain/sources/passive.txt 14 | shuffledns -d $domain -w $wordlist -r $resolvers -o $domain/sources/shuffledns.txt 15 | 16 | cat $domain/sources/*.txt > $domain/sources/all.txt 17 | } 18 | 19 | domain_enum 20 | 21 | resolving_domains(){ 22 | shuffledns -massdns PATH/TO/massdns -d $domain -list $domain/sources/all.txt -o $domain/domains.txt -r $resolvers 23 | } 24 | resolving_domains 25 | 26 | http_prob(){ 27 | cat $domain/domains.txt | httpx -threads 200 -o $domain/sources/works.txt 28 | 29 | } 30 | 31 | http_prob 32 | 33 | gauq(){ 34 | clear 35 | echo "Finding SQLi's parameters" 36 | gau $1 -subs | \ 37 | grep "=" | \ 38 | egrep -iv ".(jpg|jpeg|gif|css|tif|tiff|png|ttf|woff|woff2|ico|pdf|svg|txt|js)" | \ 39 | qsreplace -a 40 | 41 | } 42 | 43 | sqliz() { 44 | clear 45 | echo "Executing SQLi scanner..." 46 | gauq $1 | python3 PATH/TO/DSSS/dsss.py > $domain/Recon/possible_sqlis.txt 47 | 48 | } 49 | 50 | bxss() { 51 | clear 52 | echo "Executing XSS scanner Tool for Blinds XSS" 53 | BLIND='">' 54 | gauq $1 | kxss | grep -Eo "(http|https)://[a-zA-Z0-9./?=_-]*" | \ 55 | dalfox pipe -b $BLIND 56 | } 57 | 58 | 59 | zip_files(){ 60 | clear 61 | echo "Ending process, zipping files, GOOD LUCK!" 62 | zip -r $domain.zip $domain 63 | } 64 | 65 | zip_files --------------------------------------------------------------------------------