", moduleOutFile)
227 |
228 | # check if tool has already been executed
229 | if (os.path.exists(moduleOutFile)):
230 | print(f"{bcolor.yellow}###[DUPLICATE]###\t{bcolor.ends} "
231 | + thisModule["name"])
232 | else:
233 | allCommands.append([exeString, thisModule["name"]])
234 |
235 | return allCommands
236 |
237 |
238 | """Return exit status for executed command.
239 | Create thread and execute given tool.
240 |
241 | threading.Thread = The command that will be executed inside a shell.
242 | moduleName = The name of the module
243 | that will be printed after successfull execution.
244 | """
245 | class threadForModule(threading.Thread):
246 | def __init__(self, command, moduleName):
247 | threading.Thread.__init__(self)
248 | self.command = command
249 | self.moduleName = moduleName
250 |
251 | def run(self):
252 | # check if modules should only be printed
253 | if (args.execute):
254 | try:
255 | # run command in new shell and wait for termination
256 | subprocess.check_output(self.command, shell=True,
257 | timeout=float(args.timeout))
258 | print(f"{bcolor.green}###[DONE]###\t{bcolor.ends} "
259 | + self.moduleName)
260 | return(0)
261 |
262 | except subprocess.CalledProcessError as exc:
263 | # print error message
264 | print(f"{bcolor.red}###[ERROR]###\t{bcolor.ends} " + self.command)
265 | return(1)
266 |
267 |
268 | """MAIN
269 |
270 | """
271 | # define and configure static arguments
272 | argumentParser = argparse.ArgumentParser(description="""Automate OSINT/Recon assessment.
273 | Use at your own risk.
274 |
275 | Basic usage:
276 | Print matching modules for a given domain:
277 | python3 corptrace.py -o /tmp/out -d r1cksec.de
278 |
279 | Execute modules for given github user:
280 | python3 corptrace.py -o /tmp/out -gu r1cksec -e
281 |
282 | Print syntax of modules for given file containing domains:
283 | python3 corptrace.py -o /tmp/out -f /tmp/domains -v
284 |
285 | Only execute modules that contain at least one of the given substring in their name:
286 | python3 corptrace.py -o /tmp/out -c 'companyName' -im shodan -e
287 |
288 | Execute modules up to risk level 3, use 8 threads and increase timeout to 35 minutes:
289 | python3 corptrace.py -o /tmp/out -rl 3 -ta 8 -to 2100 -i '192.168.1.1/24' -e
290 |
291 | Print overview of results:
292 | python3 corptrace.py -o /tmp/out -p
293 |
294 | Generate graph based on dnsx_get_coherent_domains results:
295 | python3 corptrace.py -o /tmp/out -g
296 |
297 | """, formatter_class=RawTextHelpFormatter)
298 |
299 | argumentParser.add_argument("-o",
300 | "--output",
301 | dest = "output",
302 | help = "path to output directory",
303 | required = "true")
304 |
305 | argumentParser.add_argument("-e",
306 | "--execute",
307 | dest = "execute",
308 | help = "execute matching commands",
309 | action = "store_true")
310 |
311 | argumentParser.add_argument("-v",
312 | "--verbose",
313 | dest = "verbose",
314 | help = "print full command",
315 | action = "store_true")
316 |
317 | argumentParser.add_argument("-p",
318 | "--print",
319 | dest = "printOverview",
320 | help = "print overview of results",
321 | action = "store_true")
322 |
323 | argumentParser.add_argument("-g",
324 | "--graph",
325 | dest = "generateGraph",
326 | help = "generate graph using dnsx_get_coherent_domains results",
327 | nargs="?",
328 | const="light",
329 | choices=["dark", "light"])
330 |
331 | argumentParser.add_argument("-to",
332 | "--timeOut",
333 | dest="timeout",
334 | help = "maximal time that a single thread"
335 | + " is allowed to run"
336 | + " in seconds (default 1200)",
337 | default = "1200")
338 |
339 | argumentParser.add_argument("-rl",
340 | "--riskLevel",
341 | dest = "riskLevel",
342 | help = "set maximal riskLevel for modules"
343 | + " (possible values 1-4, 2 is default)",
344 | default = "2")
345 |
346 | argumentParser.add_argument("-ta",
347 | "--threadAmount",
348 | dest = "threadAmount",
349 | help = "the amount of parallel running threads"
350 | + " (default 5)",
351 | default = "5")
352 |
353 | argumentParser.add_argument("-em",
354 | "--exludeModules",
355 | dest = "excludeModules",
356 | nargs = "*",
357 | help = "modules that will be excluded "
358 | + "(exclude ovewrites include)",
359 | default = "NULL")
360 |
361 | argumentParser.add_argument("-im",
362 | "--includeModules",
363 | dest = "includeModules",
364 | nargs = "*",
365 | help = "modules that will be included",
366 | default = "NULL")
367 |
368 | # get path to directory that contains the json config
369 | pathToScript = os.path.realpath(__file__)
370 | pathToScriptDir = os.path.dirname(pathToScript)
371 | argsFromJsonConf = getArgsOfJson()
372 |
373 | # add arguments of json file to argumentParser
374 | allCapitalLetters = []
375 | for currJsonArg in argsFromJsonConf:
376 | capitalLetters = currJsonArg[0]
377 |
378 | for char in currJsonArg:
379 | if (char.isupper()):
380 | capitalLetters = capitalLetters + char
381 | try:
382 | argumentParser.add_argument("-" + capitalLetters.lower(),
383 | "--" + currJsonArg,
384 | dest=currJsonArg,
385 | default="NULL")
386 | allCapitalLetters.append("-" + capitalLetters + " " + currJsonArg)
387 |
388 | except:
389 | print("Error in modules.json - "
390 | + "collision for config argument name (args): " + currJsonArg)
391 | print("Argparse conflicting option string: --"
392 | + currJsonArg + "/-" + capitalLetters)
393 | exit(1)
394 |
395 | args = argumentParser.parse_args()
396 |
397 | # print overview
398 | if (args.printOverview):
399 | os.system("bash " + pathToScriptDir + "/ressources/scripts/print-overview.sh " + args.output)
400 | exit(0)
401 |
402 | # generate graph
403 | if (args.generateGraph):
404 | if (args.generateGraph == "dark"):
405 | os.system("bash " + pathToScriptDir + "/ressources/scripts/visualize.sh " + args.output + " dark")
406 | print("Graph generated: " + args.output + "/dnsx_get_coherent_domains/graph.html")
407 | exit(0)
408 |
409 | else:
410 | os.system("bash " + pathToScriptDir + "/ressources/scripts/visualize.sh " + args.output + " light")
411 | print("Graph generated: " + args.output + "/dnsx_get_coherent_domains/graph.html")
412 | exit(0)
413 |
414 | # if set to 0 passed arguments of user are wrong
415 | argumentFlag = "0"
416 |
417 | for currArg in argsFromJsonConf:
418 | if (vars(args)[currArg] != "NULL"):
419 | argumentFlag = "1"
420 | break
421 |
422 | if (argumentFlag == "0"):
423 | print("Error, at least one of the following arguments is required:")
424 |
425 | # add print option to required arguments
426 | allCapitalLetters.append("-p/--print")
427 | print(allCapitalLetters)
428 | exit(1)
429 |
430 | # catch ctrl + c
431 | signal.signal(signal.SIGINT, signalHandler)
432 |
433 | # define colors for printing to stdout
434 | class bcolor:
435 | purple = '\033[95m'
436 | blue = '\033[94m'
437 | green = '\033[92m'
438 | yellow = "\033[1;33m"
439 | red = '\033[91m'
440 | ends= '\033[0m'
441 |
442 | # get a list with modules that matches the arguments given by user
443 | executableModules = getMatchingModules()
444 |
445 | # create commands from template
446 | commandsToExecute = createCommandFromTemplate(executableModules)
447 |
448 | # this variable will contain the running threads
449 | threads = []
450 |
451 | # used to print amount of modules count finished modules
452 | amountOfExecModules = len(commandsToExecute)
453 | counter = 1
454 |
455 | for runCommand in commandsToExecute:
456 | # execute modules inside parallel threads
457 | if (args.execute):
458 | if (args.verbose):
459 | print(f"{bcolor.blue}###[START]###\t{bcolor.ends} "
460 | + runCommand[0] + " - " + str(counter)
461 | + "/" + str(amountOfExecModules))
462 | else:
463 | print(f"{bcolor.blue}###[START]###\t{bcolor.ends} "
464 | + runCommand[1] + " - " + str(counter)
465 | + "/" + str(amountOfExecModules))
466 |
467 | counter += 1
468 |
469 | while 1:
470 | # run 5 threads in parallel
471 | if (threading.active_count() <= int(args.threadAmount)):
472 | currThread = threadForModule(runCommand[0], runCommand[1])
473 | threads.append(currThread)
474 | currThread.start()
475 | break
476 |
477 | else:
478 | time.sleep(3)
479 |
480 | else:
481 | if (args.verbose):
482 | print(runCommand[0])
483 | else:
484 | print(runCommand[1])
485 |
486 | # wait for all modules to finish
487 | for x in threads:
488 | x.join()
489 |
490 | if (args.execute):
491 | # remove empty directories
492 | for directory in os.scandir(args.output):
493 | if os.path.isdir(directory) and not os.listdir(directory):
494 | os.rmdir(directory)
495 |
496 |
--------------------------------------------------------------------------------
/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # stop on error
4 | set -e
5 |
6 | # stop on undefined
7 | set -u
8 |
9 | echo ""
10 | echo "### Setup Script"
11 | echo "Use 'bash install -force' to reinstall each tool."
12 | echo ""
13 |
14 | # check if installation is forced
15 | if [ ${#} -eq 1 ]
16 | then
17 | if [ "${1}" == "-force" ]
18 | then
19 | force="1"
20 | else
21 | force="0"
22 | fi
23 | else
24 | force="0"
25 | fi
26 |
27 | # define variables
28 | pathToRepo="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd )"
29 | pathToRessources="${pathToRepo}/ressources"
30 | pathToScripts="${pathToRepo}/ressources/modules"
31 | pathToTemplates="${pathToRepo}/ressources/templates"
32 | pathToBuild="${pathToRepo}/build"
33 | pathToConfig="${pathToBuild}/config.json"
34 | pathToTemp="${pathToBuild}/temp"
35 | pathToGit="${pathToBuild}/git"
36 | pathToPython="${pathToBuild}/python-env"
37 |
38 | # check if the script is run using sudo
39 | if [ -n "${SUDO_USER:-}" ]
40 | then
41 | pathToHomeDir="/home/${SUDO_USER}"
42 | else
43 | pathToHomeDir="$HOME"
44 | fi
45 |
46 | # create temporay path
47 | if [ ! -d ${pathToTemp} ]
48 | then
49 | mkdir ${pathToTemp}
50 | else
51 | rm -rf ${pathToTemp}
52 | mkdir ${pathToTemp}
53 | fi
54 |
55 | echo ""
56 | echo "### APT Install"
57 | echo ""
58 | sudo apt update
59 |
60 | # determine distribution
61 | osRelease=$(cat /etc/os-release | grep "^ID=" | cut -d "=" -f 2)
62 |
63 | if [ "${osRelease}" == "debian" ]
64 | then
65 | echo "Debian detected."
66 | sudo apt install -y git wget python3 python3-pip python3.11-venv whois curl nmap libimage-exiftool-perl jq dnstwist bc
67 | elif [ "${osRelease}" == "kali" ]
68 | then
69 | echo "Kali detected."
70 | sudo apt install -y git wget python3 python3-pip python3.12-venv whois curl nmap libimage-exiftool-perl jq dnstwist bc
71 | else
72 | echo "No debian nor kali detected, proceed as normal debian."
73 | sudo apt install -y git wget python3 python3-pip python3.11-venv whois curl nmap libimage-exiftool-perl jq dnstwist bc
74 | fi
75 |
76 | echo ""
77 | echo "### Write modules.json"
78 | echo ""
79 |
80 | # check if all keys are empty
81 | if jq -e 'map(. == "") | all' ${pathToConfig} > /dev/null
82 | then
83 | echo "No API key in ${pathToConfig} found!"
84 | echo "Using API keys leads to more extensive results."
85 | echo "The installation process can later be repeated using additional API keys."
86 | echo "Do you want to continue the installation without using API keys? (y/everything else for no)."
87 |
88 | read answer
89 | if [ "${answer}" != "y" ]
90 | then
91 | echo "Abort installation"
92 | exit
93 | fi
94 | fi
95 |
96 | # read api keys
97 | bevigilKey=$(jq -r '.bevigil_com' ${pathToConfig})
98 | binaryedgeKey=$(jq -r '.binaryedge_io' ${pathToConfig})
99 | bufferoverKey=$(jq -r '.bufferover_run' ${pathToConfig})
100 | fullhuntKey=$(jq -r '.fullhunt_io' ${pathToConfig})
101 | githubKey=$(jq -r '.github_com' ${pathToConfig})
102 | grayhatwarfareKey=$(jq -r '.grayhatwarfare_com' ${pathToConfig})
103 | hunterKey=$(jq -r '.hunter_io' ${pathToConfig})
104 | intelxKey=$(jq -r '.intelx_io' ${pathToConfig})
105 | leakixKey=$(jq -r '.leakix_net' ${pathToConfig})
106 | netlasKey=$(jq -r '.netlas_io' ${pathToConfig})
107 | networksdbKey=$(jq -r '.networksdb_io' ${pathToConfig})
108 | projectdiscoveryKey=$(jq -r '.projectdiscovery_io_key' ${pathToConfig})
109 | projectdiscoveryUser=$(jq -r '.projectdiscovery_io_user' ${pathToConfig})
110 | robtexKey=$(jq -r '.robtex_com' ${pathToConfig})
111 | securitytrailsKey=$(jq -r '.securitytrails_com' ${pathToConfig})
112 | shodanKey=$(jq -r '.shodan_io' ${pathToConfig})
113 | spyonwebKey=$(jq -r '.spyonweb_com' ${pathToConfig})
114 | sslmateKey=$(jq -r '.sslmate_com' ${pathToConfig})
115 | tombaKeya=$(jq -r '.tomba_io_ta' ${pathToConfig})
116 | tombaKeys=$(jq -r '.tomba_io_ts' ${pathToConfig})
117 | urlscanKey=$(jq -r '.urlscan_io' ${pathToConfig})
118 | validinKey=$(jq -r '.validin_com' ${pathToConfig})
119 | virustotalKey=$(jq -r '.virustotal_com' ${pathToConfig})
120 | xingPassword=$(jq -r '.xing_com_password' ${pathToConfig})
121 | xingUser=$(jq -r '.xing_com_user' ${pathToConfig})
122 | zoomeyeKey=$(jq -r '.zoomeye_hk' ${pathToConfig})
123 |
124 | # ' inside xing password
125 | if [[ "${xingPassword}" == *"'"* ]]
126 | then
127 | echo "No ' inside xing password allowed."
128 | exit 1
129 | fi
130 |
131 | # write config for subfinder
132 | cat > ${pathToBuild}/subfinder.config << EOL
133 | bevigil: [${bevigilKey}]
134 | binaryedge: [${binaryedgeKey}]
135 | bufferover: [${bufferoverKey}]
136 | certspotter: [${sslmateKey}]
137 | chaos: [${projectdiscoveryKey}]
138 | github: [${githubKey}]
139 | hunter: [${hunterKey}]
140 | intelx: [2.intelx.io:${intelxKey}]
141 | leakix: [${leakixKey}]
142 | netlas: [${netlasKey}]
143 | robtex: [${robtexKey}]
144 | securitytrails: [${securitytrailsKey}]
145 | shodan: [${shodanKey}]
146 | virustotal: [${virustotalKey}]
147 | zoomeyeapi: [${zoomeyeKey}]
148 | EOL
149 |
150 | # write config for dnsx
151 | if [ -n "${projectdiscoveryKey}" ]
152 | then
153 | if [ ! -d "${pathToHomeDir}/.pdcp" ]
154 | then
155 | mkdir "${pathToHomeDir}/.pdcp"
156 | fi
157 |
158 | cat > ${pathToHomeDir}/.pdcp/credentials.yaml << EOL
159 | - username: $(echo "${projectdiscoveryUser}" | cut -d "@" -f 1)
160 | email: ${projectdiscoveryUser}
161 | api-key: ${projectdiscoveryKey}
162 | server: https://api.projectdiscovery.io
163 | EOL
164 | fi
165 |
166 | # write api keys, passwords and absolute paths to modules.json
167 | sed -e "s|REPLACE-GITHUB-APIKEY|${githubKey}|g" \
168 | -e "s|REPLACE-GRAYHATWARFARE-APIKEY|${grayhatwarfareKey}|g" \
169 | -e "s|REPLACE-HUNTER-APIKEY|${hunterKey}|g" \
170 | -e "s|REPLACE-RESSOURCE-PATH|${pathToRessources}|g" \
171 | -e "s|REPLACE-NETWORKSDB-APIKEY|${networksdbKey}|g" \
172 | -e "s|REPLACE-INTELX-APIKEY|${intelxKey}|g" \
173 | -e "s|REPLACE-ROBTEX-APIKEY|${robtexKey}|g" \
174 | -e "s|REPLACE-SECURITYTRAILS-APIKEY|${securitytrailsKey}|g" \
175 | -e "s|REPLACE-SHODAN-APIKEY|${shodanKey}|g" \
176 | -e "s|REPLACE-SPYONWEP-APIKEY|${spyonwebKey}|g" \
177 | -e "s|REPLACE-SUBFINDER-CONFIG|${pathToBuild}/subfinder.config|g" \
178 | -e "s|REPLACE-TOMBATA-APIKEY|${tombaKeya}|g" \
179 | -e "s|REPLACE-TOMBATS-APIKEY|${tombaKeys}|g" \
180 | -e "s|REPLACE-URLSCAN-APIKEY|${urlscanKey}|g" \
181 | -e "s|REPLACE-VALIDIN-APIKEY|${validinKey}|g" \
182 | -e "s|REPLACE-XING-USER|${xingUser}|g" \
183 | -e "s|REPLACE-XING-PASSWORD|${xingPassword}|g" \
184 | -e "s|REPLACE-ZOOMEYE-APIKEY|${zoomeyeKey}|g" \
185 | "${pathToTemplates}/modules.json" > "${pathToTemp}/modules.json"
186 |
187 | # last line in modules.json should not contain an API key, because if key is empty, the line will be removed and the JSON syntax is broken
188 | grep -v " '' " "${pathToTemp}/modules.json" > ${pathToBuild}/modules.json
189 |
190 | echo ""
191 | echo "### Install Golang tools."
192 | echo ""
193 |
194 | # download golang
195 | wget https://go.dev/dl/go1.23.5.linux-amd64.tar.gz -O ${pathToTemp}/go.tar.gz
196 | tar -xf ${pathToTemp}/go.tar.gz -C ${pathToTemp}
197 | rm -r ${pathToTemp}/go.tar.gz
198 | export GOPATH=${pathToTemp}
199 |
200 | if ! [ -x "$(command -v spk)" ] || [ "${force}" == "1" ]
201 | then
202 | ${pathToTemp}/go/bin/go install github.com/dhn/spk@latest
203 | chmod +x ${pathToTemp}/bin/spk
204 | sudo mv ${pathToTemp}/bin/spk /usr/local/bin
205 | else
206 | echo "spk is installed"
207 | fi
208 |
209 | if ! [ -x "$(command -v csprecon)" ] || [ "${force}" == "1" ]
210 | then
211 | ${pathToTemp}/go/bin/go install github.com/edoardottt/csprecon/cmd/csprecon@latest
212 | chmod +x ${pathToTemp}/bin/csprecon
213 | sudo mv ${pathToTemp}/bin/csprecon /usr/local/bin
214 | else
215 | echo "csprecon is installed"
216 | fi
217 |
218 | if ! [ -x "$(command -v subfinder)" ] || [ "${force}" == "1" ]
219 | then
220 | ${pathToTemp}/go/bin/go install github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest
221 | chmod +x ${pathToTemp}/bin/subfinder
222 | sudo mv ${pathToTemp}/bin/subfinder /usr/local/bin
223 | else
224 | echo "subfinder is installed"
225 | fi
226 |
227 | if ! [ -x "$(command -v dnsx)" ] || [ "${force}" == "1" ]
228 | then
229 | ${pathToTemp}/go/bin/go install github.com/projectdiscovery/dnsx/cmd/dnsx@latest
230 | chmod +x ${pathToTemp}/bin/dnsx
231 | sudo mv ${pathToTemp}/bin/dnsx /usr/local/bin
232 | else
233 | echo "dnsx is installed"
234 | fi
235 |
236 | echo ""
237 | echo "### Compile Binary from Git."
238 | echo ""
239 |
240 | git clone https://github.com/blechschmidt/massdns.git ${pathToTemp}/massdns
241 | if ! [ -x "$(command -v massdns)" ] || [ "${force}" == "1" ]
242 | then
243 | cd ${pathToTemp}/massdns && make
244 | chmod +x ${pathToTemp}/massdns/bin/massdns
245 | sudo mv ${pathToTemp}/massdns/bin/massdns /usr/local/bin
246 | cd -
247 | rm -rf ${pathToTemp}/massdns
248 | else
249 | echo "massdns is installed"
250 | fi
251 |
252 | echo ""
253 | echo "### Wget compiled binaries."
254 | echo ""
255 |
256 | if ! [ -x "$(command -v geckodriver)" ] || [ "${force}" == "1" ]
257 | then
258 | latestGeckodriver=$(curl -sL https://api.github.com/repos/mozilla/geckodriver/releases/latest | jq -r ".tag_name")
259 | wget https://github.com/mozilla/geckodriver/releases/download/${latestGeckodriver}/geckodriver-${latestGeckodriver}-linux64.tar.gz -O ${pathToTemp}/geckodriver.tar.gz
260 | tar -xf ${pathToTemp}/geckodriver.tar.gz -C ${pathToTemp}
261 | chmod +x ${pathToTemp}/geckodriver
262 | sudo mv ${pathToTemp}/geckodriver /usr/local/bin
263 | rm ${pathToTemp}/geckodriver.tar.gz
264 | else
265 | echo "geckodriver is installed"
266 | fi
267 |
268 | if ! [ -x "$(command -v gitleaks)" ] || [ "${force}" == "1" ]
269 | then
270 | latestGitleaks=$(curl -sL https://api.github.com/repos/gitleaks/gitleaks/releases/latest | jq -r ".tag_name")
271 | latestGitleaksNoV=$(echo ${latestGitleaks} | sed "s/v//")
272 | wget https://github.com/gitleaks/gitleaks/releases/download/${latestGitleaks}/gitleaks_${latestGitleaksNoV}_linux_x64.tar.gz -O ${pathToTemp}/gitleaks.tar.gz
273 | tar -xf ${pathToTemp}/gitleaks.tar.gz -C ${pathToTemp}
274 | chmod +x ${pathToTemp}/gitleaks
275 | sudo mv ${pathToTemp}/gitleaks /usr/local/bin
276 | rm ${pathToTemp}/README.md ${pathToTemp}/LICENSE ${pathToTemp}/gitleaks.tar.gz
277 | else
278 | echo "gitleaks is installed"
279 | fi
280 |
281 | if ! [ -x "$(command -v trufflehog)" ] || [ "${force}" == "1" ]
282 | then
283 | wget https://github.com/trufflesecurity/trufflehog/releases/download/v3.81.9/trufflehog_3.81.9_linux_amd64.tar.gz -O ${pathToTemp}/truffleHog.tar.gz
284 | tar -xf ${pathToTemp}/truffleHog.tar.gz -C ${pathToTemp}
285 | chmod +x ${pathToTemp}/trufflehog
286 | sudo mv ${pathToTemp}/trufflehog /usr/local/bin
287 | rm ${pathToTemp}/README.md ${pathToTemp}/LICENSE ${pathToTemp}/truffleHog.tar.gz
288 | else
289 | echo "trufflehog is installed"
290 | fi
291 |
292 | if ! [ -x "$(command -v letItGo)" ] || [ "${force}" == "1" ]
293 | then
294 | latestLetitgo=$(curl -sL https://api.github.com/repos/SecurityRiskAdvisors/letItGo/releases/latest | jq -r ".tag_name")
295 | wget https://github.com/SecurityRiskAdvisors/letItGo/releases/download/${latestLetitgo}/letItGo_${latestLetitgo}_linux_amd64 -O ${pathToTemp}/letItGo
296 | chmod +x ${pathToTemp}/letItGo
297 | sudo mv ${pathToTemp}/letItGo /usr/local/bin
298 | else
299 | echo "letItGo is installed"
300 | fi
301 |
302 | if ! [ -x "$(command -v scanrepo)" ] || [ "${force}" == "1" ]
303 | then
304 | wget https://github.com/techjacker/repo-security-scanner/releases/download/0.4.1/repo-security-scanner_0.4.1_Linux_x86_64.tar.gz -O ${pathToTemp}/scanrepo.tar.gz
305 | tar -xf ${pathToTemp}/scanrepo.tar.gz -C ${pathToTemp}
306 | chmod +x ${pathToTemp}/repo-security-scanner
307 | sudo mv ${pathToTemp}/repo-security-scanner /usr/local/bin/scanrepo
308 | rm ${pathToTemp}/scanrepo.tar.gz
309 | else
310 | echo "scanrepo is installed"
311 | fi
312 |
313 | if ! [ -x "$(command -v noseyparker)" ] || [ "${force}" == "1" ]
314 | then
315 | wget https://github.com/praetorian-inc/noseyparker/releases/download/v0.19.0/noseyparker-v0.19.0-x86_64-unknown-linux-gnu.tar.gz -O ${pathToTemp}/noseyparker-v0.16.0-x86_64-unknown-linux-gnu.tar.gz
316 | # prevent directory "bin" conflict with "go install"
317 | mkdir ${pathToTemp}/noseyparker
318 | tar -xf ${pathToTemp}/noseyparker-v0.16.0-x86_64-unknown-linux-gnu.tar.gz -C ${pathToTemp}/noseyparker
319 | chmod +x ${pathToTemp}/noseyparker/bin/noseyparker
320 | sudo mv ${pathToTemp}/noseyparker/bin/noseyparker /usr/local/bin
321 | else
322 | echo "noseyparker is installed"
323 | fi
324 |
325 | echo ""
326 | echo "### Install Python dependencies"
327 | echo ""
328 |
329 | # generate python environment
330 | python3 -m venv ${pathToPython}
331 | source ${pathToPython}/bin/activate
332 |
333 | # prepare git directory
334 | if [ ! -d ${pathToGit} ]
335 | then
336 | mkdir ${pathToGit}
337 | else
338 | rm -rf ${pathToGit}
339 | mkdir ${pathToGit}
340 | fi
341 |
342 | git clone https://github.com/punk-security/dnsreaper ${pathToGit}/dnsreaper
343 | if ! [ -x "$(command -v dnsreaper)" ] || [ "${force}" == "1" ]
344 | then
345 | ${pathToPython}/bin/pip3 install -r ${pathToGit}/dnsreaper/requirements.txt
346 | echo "cd ${pathToGit}/dnsreaper && ${pathToPython}/bin/python3 main.py \"\$@\"" > ${pathToTemp}/dnsreaper
347 | chmod +x ${pathToTemp}/dnsreaper
348 | sudo mv ${pathToTemp}/dnsreaper /usr/local/bin
349 | else
350 | echo "dnsreaper is installed"
351 | fi
352 |
353 | git clone https://github.com/nullenc0de/servicelens ${pathToGit}/servicelens
354 | if ! [ -x "$(command -v servicelens)" ] || [ "${force}" == "1" ]
355 | then
356 | ${pathToPython}/bin/pip3 install dnspython
357 | echo "cd ${pathToGit}/servicelens && ${pathToPython}/bin/python3 servicelens.py \"\$@\"" > ${pathToTemp}/servicelens
358 | chmod +x ${pathToTemp}/servicelens
359 | sudo mv ${pathToTemp}/servicelens /usr/local/bin
360 | else
361 | echo "servicelens is installed"
362 | fi
363 |
364 |
365 | git clone https://github.com/MattKeeley/Spoofy ${pathToGit}/Spoofy
366 | if ! [ -x "$(command -v spoofy)" ] || [ "${force}" == "1" ]
367 | then
368 | ${pathToPython}/bin/pip3 install -r ${pathToGit}/Spoofy/requirements.txt
369 | echo "cd ${pathToGit}/Spoofy && ${pathToPython}/bin/python3 spoofy.py \"\$@\"" > ${pathToTemp}/spoofy
370 | chmod +x ${pathToTemp}/spoofy
371 | sudo mv ${pathToTemp}/spoofy /usr/local/bin
372 | else
373 | echo "spoofy is installed"
374 | fi
375 |
376 | git clone https://github.com/devanshbatham/FavFreak.git ${pathToGit}/FavFreak
377 | if ! [ -x "$(command -v favfreak)" ] || [ "${force}" == "1" ]
378 | then
379 | ${pathToPython}/bin/pip3 install -r ${pathToGit}/FavFreak/requirements.txt
380 | echo "cd ${pathToGit}/FavFreak && ${pathToPython}/bin/python3 favfreak.py \"\$@\"" > ${pathToTemp}/favfreak
381 | chmod +x ${pathToTemp}/favfreak
382 | sudo mv ${pathToTemp}/favfreak /usr/local/bin
383 | else
384 | echo "favfreak is installed"
385 | fi
386 |
387 | echo ""
388 | echo "Copy custom scripts to /usr/local/bin"
389 |
390 | # add custom scripts to $PATH
391 | allCustomScripts=$(ls ${pathToScripts})
392 |
393 | for scriptName in ${allCustomScripts}
394 | do
395 | fileNameNoExt=$(basename "${scriptName}" | cut -d. -f 1)
396 | fileExtension=$(basename "${scriptName}" | cut -d. -f 2)
397 |
398 | if [ "${fileExtension}" == "py" ]
399 | then
400 | # add absolute path to python environment
401 | echo "${pathToPython}/bin/python3 ${pathToScripts}/${scriptName} \"\$@\"" > ${pathToTemp}/${fileNameNoExt}
402 | chmod +x ${pathToTemp}/${fileNameNoExt}
403 | sudo mv ${pathToTemp}/${fileNameNoExt} /usr/local/bin
404 | elif [ "${fileExtension}" == "sh" ]
405 | then
406 | cp ${pathToScripts}/${scriptName} ${pathToTemp}/${fileNameNoExt}
407 | chmod +x ${pathToTemp}/${fileNameNoExt}
408 | sudo mv ${pathToTemp}/${fileNameNoExt} /usr/local/bin
409 | fi
410 | done
411 |
412 | # install selenium and lxml
413 | ${pathToPython}/bin/pip3 install -U selenium lxml requests
414 | deactivate
415 |
416 | # delete temporary directory
417 | sudo rm -rf ${pathToTemp}
418 | echo ""
419 | echo "Done"
420 |
421 |
422 |
--------------------------------------------------------------------------------
/ressources/demo/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/r1cksec/corptrace/008cabd6026d22df0ef23f928748b680054288c6/ressources/demo/demo.gif
--------------------------------------------------------------------------------
/ressources/demo/demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/r1cksec/corptrace/008cabd6026d22df0ef23f928748b680054288c6/ressources/demo/demo.png
--------------------------------------------------------------------------------
/ressources/modules/crtsh_get_rootdomains.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: crtsh_get_rootdomains 'companyName'"
6 | echo "Run curl command and retreive domains linked to the specified company name from crt.sh"
7 | exit
8 | fi
9 |
10 | urlEncodedInput=$(echo ${1} | sed -e 's/ /%20/g' -e 's/:/%3A/g' -e 's/\//%2F/g' -e 's/?/%3F/g' -e 's/=/%3D/g' -e 's/&/%26/g')
11 |
12 | curl -s "https://crt.sh/?q=${urlEncodedInput}" \
13 | | grep "" \
14 | | grep -v "style=" \
15 | | sed -n 's/.* | \([^<]*\)<\/\?\([^>]*\)>.*/\1/p' \
16 | | grep -iE '([[:alnum:]_.-]\.)+[A-Za-z]{2,6}$' \
17 | | grep -v '@' \
18 | | awk -F '.' '{print $(NF-1) "." $NF}' \
19 | | tr '[:upper:]' '[:lower:]' \
20 | | sort -u
21 |
22 |
--------------------------------------------------------------------------------
/ressources/modules/dns_get_records.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: ${0} domain"
6 | echo "Run dig command for different DNS record types"
7 | exit
8 | fi
9 |
10 | domain=${1}
11 | outFile="/tmp/dns-resource-record-result.txt"
12 |
13 | dig +short AAAA ${domain} >> ${outFile}
14 | dig +short AFSDB ${domain} >> ${outFile}
15 | dig +short APL ${domain} >> ${outFile}
16 | dig +short CAA ${domain} >> ${outFile}
17 | dig +short CDNSKEY ${domain} >> ${outFile}
18 | dig +short CDS ${domain} >> ${outFile}
19 | dig +short CERT ${domain} >> ${outFile}
20 | dig +short CNAME ${domain} >> ${outFile}
21 | dig +short DHCID ${domain} >> ${outFile}
22 | dig +short DLV ${domain} >> ${outFile}
23 | dig +short DNAME ${domain} >> ${outFile}
24 | dig +short DNSKEY ${domain} >> ${outFile}
25 | dig +short DS ${domain} >> ${outFile}
26 | dig +short HIP ${domain} >> ${outFile}
27 | dig +short HINFO ${domain} >> ${outFile}
28 | dig +short IPSECKEY ${domain} >> ${outFile}
29 | dig +short ISDN ${domain} >> ${outFile}
30 | dig +short KEY ${domain} >> ${outFile}
31 | dig +short KX ${domain} >> ${outFile}
32 | dig +short MB ${domain} >> ${outFile}
33 | dig +short MD ${domain} >> ${outFile}
34 | dig +short MF ${domain} >> ${outFile}
35 | dig +short MG ${domain} >> ${outFile}
36 | dig +short MR ${domain} >> ${outFile}
37 | dig +short MX ${domain} >> ${outFile}
38 | dig +short NAPTR ${domain} >> ${outFile}
39 | dig +short NS ${domain} >> ${outFile}
40 | dig +short NSAP ${domain} >> ${outFile}
41 | dig +short NSEC ${domain} >> ${outFile}
42 | dig +short NSEC3 ${domain} >> ${outFile}
43 | dig +short NSEC3PARAM ${domain} >> ${outFile}
44 | dig +short NULL ${domain} >> ${outFile}
45 | dig +short NXT ${domain} >> ${outFile}
46 | dig +short OPT ${domain} >> ${outFile}
47 | dig +short RP ${domain} >> ${outFile}
48 | dig +short RRSIG ${domain} >> ${outFile}
49 | dig +short SIG ${domain} >> ${outFile}
50 | dig +short SOA ${domain} >> ${outFile}
51 | dig +short SPF ${domain} >> ${outFile}
52 | dig +short SRV ${domain} >> ${outFile}
53 | dig +short SRV _ldap._tcp.dc._msdcs.${domain} >> ${outFile}
54 | dig +short SRV _ldap._tcp.gc._msdcs.${domain} >> ${outFile}
55 | dig +short SRV _ldap._tcp.pdc._msdcs.${domain} >> ${outFile}
56 | dig +short SRV _ldap._tcp.${domain} >> ${outFile}
57 | dig +short SRV _ldap._tcp.ForestDNSZones.${domain} >> ${outFile}
58 | dig +short SRV _gc._msdcs.${domain} >> ${outFile}
59 | dig +short SRV _kpasswd._tcp.${domain} >> ${outFile}
60 | dig +short SRV _kpasswd._udp.${domain} >> ${outFile}
61 | dig +short SRV _kerberos._tcp.dc._msdcs.${domain} >> ${outFile}
62 | dig +short SRV _kerberos.tcp.dc._msdcs.${domain} >> ${outFile}
63 | dig +short SRV _kerberos-master._tcp.${domain} >> ${outFile}
64 | dig +short SRV _kerberos-master._udp.${domain} >> ${outFile}
65 | dig +short SRV _kerberos._tcp.${domain} >> ${outFile}
66 | dig +short SRV _kerberos._udp.${domain} >> ${outFile}
67 | dig +short SRV _autodiscover._tcp.${domain} >> ${outFile}
68 | dig +short SRV _ntp._udp.${domain} >> ${outFile}
69 | dig +short SRV _nntp._tcp.${domain} >> ${outFile}
70 | dig +short SRV _imap._tcp.${domain} >> ${outFile}
71 | dig +short SRV _imap.tcp.${domain} >> ${outFile}
72 | dig +short SRV _imaps._tcp.${domain} >> ${outFile}
73 | dig +short SRV _pop3._tcp.${domain} >> ${outFile}
74 | dig +short SRV _pop3s._tcp.${domain} >> ${outFile}
75 | dig +short SRV _smtp._tcp.${domain} >> ${outFile}
76 | dig +short SRV _caldav._tcp.${domain} >> ${outFile}
77 | dig +short SRV _caldavs._tcp.${domain} >> ${outFile}
78 | dig +short SRV _carddav._tcp.${domain} >> ${outFile}
79 | dig +short SRV _carddavs._tcp.${domain} >> ${outFile}
80 | dig +short SRV _stun._tcp.${domain} >> ${outFile}
81 | dig +short SRV _stun._udp.${domain} >> ${outFile}
82 | dig +short SRV _stuns._tcp.${domain} >> ${outFile}
83 | dig +short SRV _turn._tcp.${domain} >> ${outFile}
84 | dig +short SRV _turn._udp.${domain} >> ${outFile}
85 | dig +short SRV _turns._tcp.${domain} >> ${outFile}
86 | dig +short SRV _h323be._tcp.${domain} >> ${outFile}
87 | dig +short SRV _h323be._udp.${domain} >> ${outFile}
88 | dig +short SRV _h323cs._tcp.${domain} >> ${outFile}
89 | dig +short SRV _h323cs._udp.${domain} >> ${outFile}
90 | dig +short SRV _h323ls._tcp.${domain} >> ${outFile}
91 | dig +short SRV _h323ls._udp.${domain} >> ${outFile}
92 | dig +short SRV _sip._tcp.${domain} >> ${outFile}
93 | dig +short SRV _sip._tls.${domain} >> ${outFile}
94 | dig +short SRV _sip._udp.${domain} >> ${outFile}
95 | dig +short SRV _sipfederationtls._tcp.${domain} >> ${outFile}
96 | dig +short SRV _sipinternal._tcp.${domain} >> ${outFile}
97 | dig +short SRV _sipinternaltls._tcp.${domain} >> ${outFile}
98 | dig +short SRV _sips._tcp.${domain} >> ${outFile}
99 | dig +short SRV _aix._tcp.${domain} >> ${outFile}
100 | dig +short SRV _certificates._tcp.${domain} >> ${outFile}
101 | dig +short SRV _cmp._tcp.${domain} >> ${outFile}
102 | dig +short SRV _crl._tcp.${domain} >> ${outFile}
103 | dig +short SRV _crls._tcp.${domain} >> ${outFile}
104 | dig +short SRV _finger._tcp.${domain} >> ${outFile}
105 | dig +short SRV _ftp._tcp.${domain} >> ${outFile}
106 | dig +short SRV _gc._tcp.${domain} >> ${outFile}
107 | dig +short SRV _hkp._tcp.${domain} >> ${outFile}
108 | dig +short SRV _hkps._tcp.${domain} >> ${outFile}
109 | dig +short SRV _http._tcp.${domain} >> ${outFile}
110 | dig +short SRV _https._tcp.${domain} >> ${outFile}
111 | dig +short SRV _jabber-client._tcp.${domain} >> ${outFile}
112 | dig +short SRV _jabber-client._udp.${domain} >> ${outFile}
113 | dig +short SRV _jabber._tcp.${domain} >> ${outFile}
114 | dig +short SRV _jabber._udp.${domain} >> ${outFile}
115 | dig +short SRV _ocsp._tcp.${domain} >> ${outFile}
116 | dig +short SRV _pgpkeys._tcp.${domain} >> ${outFile}
117 | dig +short SRV _pgprevokations._tcp.${domain} >> ${outFile}
118 | dig +short SRV _PKIXREP._tcp.${domain} >> ${outFile}
119 | dig +short SRV _submission._tcp.${domain} >> ${outFile}
120 | dig +short SRV _svcp._tcp.${domain} >> ${outFile}
121 | dig +short SRV _telnet._tcp.${domain} >> ${outFile}
122 | dig +short SRV _test._tcp.${domain} >> ${outFile}
123 | dig +short SRV _whois._tcp.${domain} >> ${outFile}
124 | dig +short SRV _x-puppet-ca._tcp.${domain} >> ${outFile}
125 | dig +short SRV _x-puppet._tcp.${domain} >> ${outFile}
126 | dig +short SRV _xmpp-client._tcp.${domain} >> ${outFile}
127 | dig +short SRV _xmpp-client._udp.${domain} >> ${outFile}
128 | dig +short SRV _xmpp-server._tcp.${domain} >> ${outFile}
129 | dig +short SRV _xmpp-server._udp.${domain} >> ${outFile}
130 | dig +short SSHFP ${domain} >> ${outFile}
131 | dig +short TA ${domain} >> ${outFile}
132 | dig +short TKEY ${domain} >> ${outFile}
133 | dig +short TLSA ${domain} >> ${outFile}
134 | dig +short TSIG ${domain} >> ${outFile}
135 | dig +short TXT ${domain} >> ${outFile}
136 | dig +short URI ${domain} >> ${outFile}
137 | dig +short WKS ${domain} >> ${outFile}
138 |
139 | cat ${outFile} | sort -u
140 | rm ${outFile}
141 |
142 |
--------------------------------------------------------------------------------
/ressources/modules/dns_get_top_level_domains.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import threading
3 | import time
4 | import random
5 | import os
6 | import sys
7 |
8 | # check amount of passed arguments
9 | if (len(sys.argv) != 2):
10 | print("usage: {} domain".format(sys.argv[0]))
11 | print("Run host command for different top level domains")
12 | sys.exit(1)
13 |
14 | domain = sys.argv[1]
15 | tldNoDot = domain.split(".")
16 |
17 | tlds = [".aaa",".aarp",".abarth",".abb",".abbott"".abbvie"".abc"".able",
18 | ".abogado",".abudhabi",".ac",".academy",".accenture",".accountant",
19 | ".accountants",".aco",".active",".actor",".ad",".adac",
20 | ".ads",".adult",".ae",".aeg",".aero",".aetna",
21 | ".af",".afamilycompany",".afl",".africa",".ag",".agakhan",
22 | ".agency",".ai",".aig",".aigo",".airbus",".airforce",
23 | ".airtel",".akdn",".al",".alfaromeo",".alibaba",".alipay",
24 | ".allfinanz",".allstate",".ally",".alsace",".alstom",".am",
25 | ".amazon",".americanexpress",".americanfamily",".amex",".amfam",".amica",
26 | ".amsterdam",".an",".analytics",".android",".anquan",".anz",
27 | ".ao",".aol",".apartments",".app",".apple",".aq",
28 | ".aquarelle",".ar",".arab",".aramco",".archi",".army",
29 | ".arpa",".art",".arte",".as",".asda",".asia",
30 | ".associates",".at",".athleta",".attorney",".au",".auction",
31 | ".audi",".audible",".audio",".auspost",".author",".auto",
32 | ".autos",".avianca",".aw",".aws",".ax",".axa",
33 | ".az",".azure",".ba",".baby",".baidu",".banamex",
34 | ".bananarepublic",".band",".bank",".bar",".barcelona",".barclaycard",
35 | ".barclays",".barefoot",".bargains",".baseball",".basketball",".bauhaus",
36 | ".bayern",".bb",".bbc",".bbt",".bbva",".bcg",
37 | ".bcn",".bd",".be",".beats",".beauty",".beer",
38 | ".bentley",".berlin",".best",".bestbuy",".bet",".bf",
39 | ".bg",".bh",".bharti",".bi",".bible",".bid",
40 | ".bike",".bing",".bingo",".bio",".biz",".bj",
41 | ".bl",".black",".blackfriday",".blanco",".blockbuster",".blog",
42 | ".bloomberg",".blue",".bm",".bms",".bmw",".bn",
43 | ".bnl",".bnpparibas",".bo",".boats",".boehringer",".bofa",
44 | ".bom",".bond",".boo",".book",".booking",".boots",
45 | ".bosch",".bostik",".boston",".bot",".boutique",".box",
46 | ".bq",".br",".bradesco",".bridgestone",".broadway",".broker",
47 | ".brother",".brussels",".bs",".bt",".budapest",".bugatti",
48 | ".build",".builders",".business",".buy",".buzz",".bv",
49 | ".bw",".by",".bz",".bzh",".ca",".cab",
50 | ".cafe",".cal",".call",".calvinklein",".cam",".camera",
51 | ".camp",".cancerresearch",".canon",".capetown",".capital",".capitalone",
52 | ".car",".caravan",".cards",".care",".career",".careers",
53 | ".cars",".cartier",".casa",".case",".caseih",".cash",
54 | ".casino",".cat",".catering",".catholic",".cba",".cbn",
55 | ".cbre",".cbs",".cc",".cd",".ceb",".center",
56 | ".ceo",".cern",".cf",".cfa",".cfd",".cg",
57 | ".ch",".chanel",".channel",".charity",".chase",".chat",
58 | ".cheap",".chintai",".chloe",".christmas",".chrome",".chrysler",
59 | ".church",".ci",".cipriani",".circle",".cisco",".citadel",
60 | ".citi",".citic",".city",".cityeats",".ck",".cl",
61 | ".claims",".cleaning",".click",".clinic",".clinique",".clothing",
62 | ".cloud",".club",".clubmed",".cm",".cn",".co",
63 | ".coach",".codes",".coffee",".college",".cologne",".com",
64 | ".comcast",".commbank",".community",".company",".compare",".computer",
65 | ".comsec",".condos",".construction",".consulting",".contact",".contractors",
66 | ".cooking",".cookingchannel",".cool",".coop",".corsica",".country",
67 | ".coupon",".coupons",".courses",".cpa",".cr",".credit",
68 | ".creditcard",".creditunion",".cricket",".crown",".crs",".cruise",
69 | ".cruises",".csc",".cu",".cuisinella",".cv",".cw",
70 | ".cx",".cy",".cymru",".cyou",".cz",".dabur",
71 | ".dad",".dance",".data",".date",".dating",".datsun",
72 | ".day",".dclk",".dds",".de",".deal",".dealer",
73 | ".deals",".degree",".delivery",".dell",".deloitte",".delta",
74 | ".democrat",".dental",".dentist",".desi",".design",".dev",
75 | ".dhl",".diamonds",".diet",".digital",".direct",".directory",
76 | ".discount",".discover",".dish",".diy",".dj",".dk",
77 | ".dm",".dnp",".do",".docs",".doctor",".dodge",
78 | ".dog",".doha",".domains",".doosan",".dot",".download",
79 | ".drive",".dtv",".dubai",".duck",".dunlop",".duns",
80 | ".dupont",".durban",".dvag",".dvr",".dz",".earth",
81 | ".eat",".ec",".eco",".edeka",".edu",".education",
82 | ".ee",".eg",".eh",".email",".emerck",".energy",
83 | ".engineer",".engineering",".enterprises",".epost",".epson",".equipment",
84 | ".er",".ericsson",".erni",".es",".esq",".estate",
85 | ".esurance",".et",".etisalat",".eu",".eurovision",".eus",
86 | ".events",".everbank",".exchange",".expert",".exposed",".express",
87 | ".extraspace",".fage",".fail",".fairwinds",".faith",".family",
88 | ".fan",".fans",".farm",".farmers",".fashion",".fast",
89 | ".fedex",".feedback",".ferrari",".ferrero",".fi",".fiat",
90 | ".fidelity",".fido",".film",".final",".finance",".financial",
91 | ".fire",".firestone",".firmdale",".fish",".fishing",".fit",
92 | ".fitness",".fj",".fk",".flickr",".flights",".flir",
93 | ".florist",".flowers",".flsmidth",".fly",".fm",".fo",
94 | ".foo",".food",".foodnetwork",".football",".ford",".forex",
95 | ".forsale",".forum",".foundation",".fox",".fr",".free",
96 | ".fresenius",".frl",".frogans",".frontdoor",".frontier",".ftr",
97 | ".fujitsu",".fujixerox",".fun",".fund",".furniture",".futbol",
98 | ".fyi",".ga",".gal",".gallery",".gallo",".gallup",
99 | ".game",".games",".gap",".garden",".gay",".gb",
100 | ".gbiz",".gd",".gdn",".ge",".gea",".gent",
101 | ".genting",".george",".gf",".gg",".ggee",".gh",
102 | ".gi",".gift",".gifts",".gives",".giving",".gl",
103 | ".glade",".glass",".gle",".global",".globo",".gm",
104 | ".gmail",".gmbh",".gmo",".gmx",".gn",".godaddy",
105 | ".gold",".goldpoint",".golf",".goo",".goodhands",".goodyear",
106 | ".goog",".google",".gop",".got",".gov",".gp",
107 | ".gq",".gr",".grainger",".graphics",".gratis",".green",
108 | ".gripe",".grocery",".group",".gs",".gt",".gu",
109 | ".guardian",".gucci",".guge",".guide",".guitars",".guru",
110 | ".gw",".gy",".hair",".hamburg",".hangout",".haus",
111 | ".hbo",".hdfc",".hdfcbank",".health",".healthcare",".help",
112 | ".helsinki",".here",".hermes",".hgtv",".hiphop",".hisamitsu",
113 | ".hitachi",".hiv",".hk",".hkt",".hm",".hn",
114 | ".hockey",".holdings",".holiday",".homedepot",".homegoods",".homes",
115 | ".homesense",".honda",".honeywell",".horse",".hospital",".host",
116 | ".hosting",".hot",".hoteles",".hotels",".hotmail",".house",
117 | ".how",".hr",".hsbc",".ht",".htc",".hu",
118 | ".hughes",".hyatt",".hyundai",".ibm",".icbc",".ice",
119 | ".icu",".id",".ie",".ieee",".ifm",".iinet",
120 | ".ikano",".il",".im",".imamat",".imdb",".immo",
121 | ".immobilien",".in",".inc",".industries",".infiniti",".info",
122 | ".ing",".ink",".institute",".insurance",".insure",".int",
123 | ".intel",".international",".intuit",".investments",".io",".ipiranga",
124 | ".iq",".ir",".irish",".is",".iselect",".ismaili",
125 | ".ist",".istanbul",".it",".itau",".itv",".iveco",
126 | ".iwc",".jaguar",".java",".jcb",".jcp",".je",
127 | ".jeep",".jetzt",".jewelry",".jio",".jlc",".jll",
128 | ".jm",".jmp",".jnj",".jo",".jobs",".joburg",
129 | ".jot",".joy",".jp",".jpmorgan",".jprs",".juegos",
130 | ".juniper",".kaufen",".kddi",".ke",".kerryhotels",".kerrylogistics",
131 | ".kerryproperties",".kfh",".kg",".kh",".ki",".kia",
132 | ".kim",".kinder",".kindle",".kitchen",".kiwi",".km",
133 | ".kn",".koeln",".komatsu",".kosher",".kp",".kpmg",
134 | ".kpn",".kr",".krd",".kred",".kuokgroup",".kw",
135 | ".ky",".kyoto",".kz",".la",".lacaixa",".ladbrokes",
136 | ".lamborghini",".lamer",".lancaster",".lancia",".lancome",".land",
137 | ".landrover",".lanxess",".lasalle",".lat",".latino",".latrobe",
138 | ".law",".lawyer",".lb",".lc",".lds",".lease",
139 | ".leclerc",".lefrak",".legal",".lego",".lexus",".lgbt",
140 | ".li",".liaison",".lidl",".life",".lifeinsurance",".lifestyle",
141 | ".lighting",".like",".lilly",".limited",".limo",".lincoln",
142 | ".linde",".link",".lipsy",".live",".living",".lixil",
143 | ".lk",".llc",".llp",".loan",".loans",".locker",
144 | ".locus",".loft",".lol",".london",".lotte",".lotto",
145 | ".love",".lpl",".lplfinancial",".lr",".ls",".lt",
146 | ".ltd",".ltda",".lu",".lundbeck",".lupin",".luxe",
147 | ".luxury",".lv",".ly",".ma",".macys",".madrid",
148 | ".maif",".maison",".makeup",".man",".management",".mango",
149 | ".map",".market",".marketing",".markets",".marriott",".marshalls",
150 | ".maserati",".mattel",".mba",".mc",".mcd",".mcdonalds",
151 | ".mckinsey",".md",".me",".med",".media",".meet",
152 | ".melbourne",".meme",".memorial",".men",".menu",".meo",
153 | ".merckmsd",".metlife",".mf",".mg",".mh",".miami",
154 | ".microsoft",".mil",".mini",".mint",".mit",".mitsubishi",
155 | ".mk",".ml",".mlb",".mls",".mm",".mma",
156 | ".mn",".mo",".mobi",".mobile",".mobily",".moda",
157 | ".moe",".moi",".mom",".monash",".money",".monster",
158 | ".montblanc",".mopar",".mormon",".mortgage",".moscow",".moto",
159 | ".motorcycles",".mov",".movie",".movistar",".mp",".mq",
160 | ".mr",".ms",".msd",".mt",".mtn",".mtpc",
161 | ".mtr",".mu",".museum",".mutual",".mutuelle",".mv",
162 | ".mw",".mx",".my",".mz",".na",".nab",
163 | ".nadex",".nagoya",".name",".nationwide",".natura",".navy",
164 | ".nba",".nc",".ne",".nec",".net",".netbank",
165 | ".netflix",".network",".neustar",".new",".newholland",".news",
166 | ".next",".nextdirect",".nexus",".nf",".nfl",".ng",
167 | ".ngo",".nhk",".ni",".nico",".nike",".nikon",
168 | ".ninja",".nissan",".nissay",".nl",".no",".nokia",
169 | ".northwesternmutual",".norton",".now",".nowruz",".nowtv",".np",
170 | ".nr",".nra",".nrw",".ntt",".nu",".nyc",
171 | ".nz",".obi",".observer",".off",".office",".okinawa",
172 | ".olayan",".olayangroup",".oldnavy",".ollo",".om",".omega",
173 | ".one",".ong",".onl",".online",".onyourside",".ooo",
174 | ".open",".oracle",".orange",".org",".organic",".orientexpress",
175 | ".origins",".osaka",".otsuka",".ott",".ovh",".pa",
176 | ".page",".pamperedchef",".panasonic",".panerai",".paris",".pars",
177 | ".partners",".parts",".party",".passagens",".pay",".pccw",
178 | ".pe",".pet",".pf",".pfizer",".pg",".ph",
179 | ".pharmacy",".phd",".philips",".phone",".photo",".photography",
180 | ".photos",".physio",".piaget",".pics",".pictet",".pictures",
181 | ".pid",".pin",".ping",".pink",".pioneer",".pizza",
182 | ".pk",".pl",".place",".play",".playstation",".plumbing",
183 | ".plus",".pm",".pn",".pnc",".pohl",".poker",
184 | ".porn",".post",".pr",".pramerica",".praxi",
185 | ".press",".prime",".pro",".prod",".productions",".prof",
186 | ".progressive",".promo",".properties",".property",".protection",".pru",
187 | ".prudential",".ps",".pt",".pub",".pw",".pwc",
188 | ".py",".qa",".qpon",".quebec",".quest",".qvc",
189 | ".racing",".radio",".raid",".re",".read",".realestate",
190 | ".realtor",".realty",".recipes",".red",".redstone",".redumbrella",
191 | ".rehab",".reise",".reisen",".reit",".reliance",".ren",
192 | ".rent",".rentals",".repair",".report",".republican",".rest",
193 | ".restaurant",".review",".reviews",".rexroth",".rich",".richardli",
194 | ".ricoh",".rightathome",".ril",".rio",".rip",".rmit",
195 | ".ro",".rocher",".rocks",".rodeo",".rogers",".room",
196 | ".rs",".rsvp",".ru",".rugby",".ruhr",".run",
197 | ".rw",".rwe",".ryukyu",".sa",".saarland",".safe",
198 | ".safety",".sakura",".sale",".salon",".samsclub",".samsung",
199 | ".sandvik",".sandvikcoromant",".sanofi",".sap",".sapo",".sarl",
200 | ".sas",".save",".saxo",".sb",".sbi",".sbs",
201 | ".sc",".sca",".scb",".schaeffler",".schmidt",".scholarships",
202 | ".school",".schule",".schwarz",".science",".scjohnson",".scor",
203 | ".scot",".sd",".se",".search",".seat",".secure",
204 | ".security",".seek",".select",".sener",".services",".ses",
205 | ".seven",".sew",".sex",".sexy",".sfr",".sg",
206 | ".sh",".shangrila",".sharp",".shaw",".shell",".shia",
207 | ".shiksha",".shoes",".shop",".shopping",".shouji",".show",
208 | ".showtime",".shriram",".si",".silk",".sina",".singles",
209 | ".site",".sj",".sk",".ski",".skin",".sky",
210 | ".skype",".sl",".sling",".sm",".smart",".smile",
211 | ".sn",".sncf",".so",".soccer",".social",".softbank",
212 | ".software",".sohu",".solar",".solutions",".song",".sony",
213 | ".soy",".spa",".space",".spiegel",".sport",".spot",
214 | ".spreadbetting",".sr",".srl",".srt",".ss",".st",
215 | ".stada",".staples",".star",".starhub",".statebank",".statefarm",
216 | ".statoil",".stc",".stcgroup",".stockholm",".storage",".store",
217 | ".stream",".studio",".study",".style",".su",".sucks",
218 | ".supplies",".supply",".support",".surf",".surgery",".suzuki",
219 | ".sv",".swatch",".swiftcover",".swiss",".sx",".sy",
220 | ".sydney",".symantec",".systems",".sz",".tab",".taipei",
221 | ".talk",".taobao",".target",".tatamotors",".tatar",".tattoo",
222 | ".tax",".taxi",".tc",".tci",".td",".tdk",
223 | ".team",".tech",".technology",".tel",".telecity",".telefonica",
224 | ".temasek",".tennis",".teva",".tf",".tg",".th",
225 | ".thd",".theater",".theatre",".tiaa",".tickets",".tienda",
226 | ".tiffany",".tips",".tires",".tirol",".tj",".tjmaxx",
227 | ".tjx",".tk",".tkmaxx",".tl",".tm",".tmall",
228 | ".tn",".to",".today",".tokyo",".tools",".top",
229 | ".toray",".toshiba",".total",".tours",".town",".toyota",
230 | ".toys",".tp",".tr",".trade",".trading",".training",
231 | ".travel",".travelchannel",".travelers",".travelersinsurance",".trust",".trv",
232 | ".tt",".tube",".tui",".tunes",".tushu",".tv",
233 | ".tvs",".tw",".tz",".ua",".ubank",".ubs",
234 | ".uconnect",".ug",".uk",".um",".unicom",".university",
235 | ".uno",".uol",".ups",".us",".uy",".uz",
236 | ".va",".vacations",".vana",".vanguard",".vc",".ve",
237 | ".vegas",".ventures",".verisign",".versicherung",".vet",".vg",
238 | ".vi",".viajes",".video",".vig",".viking",".villas",
239 | ".vin",".vip",".virgin",".visa",".vision",".vista",
240 | ".vistaprint",".viva",".vivo",".vlaanderen",".vn",".vodka",
241 | ".volvo",".vote",".voting",".voto",".voyage",
242 | ".vu",".vuelos",".wales",".walmart",".walter",".wang",
243 | ".wanggou",".warman",".watch",".watches",".weather",".weatherchannel",
244 | ".webcam",".weber",".website",".wed",".wedding",".weibo",
245 | ".weir",".wf",".whoswho",".wien",".wiki",".williamhill",
246 | ".win",".windows",".wine",".winners",".wme",".wolterskluwer",
247 | ".woodside",".work",".works",".world",".wow",".ws",
248 | ".wtc",".wtf",".xbox",".xerox",".xfinity",".xihuan",
249 | ".xin",".vermögensberater",".vermögensberatung",".xperia",".xxx",".xyz",
250 | ".yachts",".yahoo",".yamaxun",".yandex",".ye",".yodobashi",
251 | ".yoga",".yokohama",".you",".youtube",".yt",".yun",
252 | ".za",".zappos",".zara",".zero",".zip",".zippo"]
253 |
254 | resolveAbleTlds = []
255 |
256 |
257 | """FUNCTION
258 |
259 | Create thread and execute given command.
260 |
261 | randInd = A random value that defines by index which command should be executed.
262 | """
263 | class threadForCommand(threading.Thread):
264 | def __init__(self, command):
265 | threading.Thread.__init__(self)
266 | self.command = command
267 |
268 | def run(self):
269 | retValue = os.system(self.command + " > /dev/null ")
270 |
271 | if (retValue == 0):
272 | currDomain = self.command.split(" ")
273 | resolveAbleTlds.append(currDomain[1])
274 |
275 |
276 | # run 11 threads in parallel
277 | while tlds:
278 | if (threading.active_count() <= 10):
279 | randInd = random.randint(0,len(tlds)-1)
280 | currentThread = threadForCommand("host " + tldNoDot[0] + tlds[randInd])
281 | del(tlds[randInd])
282 | currentThread.start()
283 |
284 | currentThread.join()
285 | time.sleep(1)
286 |
287 | for i in resolveAbleTlds:
288 | print(i)
289 |
290 |
--------------------------------------------------------------------------------
/ressources/modules/dnslytics_get_rootdomains.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import sys
4 | import time
5 | import os
6 | from selenium.webdriver.common.by import By
7 |
8 | # check amount of passed arguments
9 | if (len(sys.argv) != 2):
10 | print("usage: {} gid".format(sys.argv[0]))
11 | print("ID can either be Google Adsense (pub-X) or Google Analytics (ua-X)")
12 | print("Visit dnslytics.com and extract domains connected to google id")
13 | sys.exit(1)
14 |
15 | # get absolute path to current directory
16 | currentPosition = os.path.realpath(__file__)
17 | dn = os.path.dirname(currentPosition)
18 |
19 | # initiate gecko webdriver
20 | with open(dn + "/initiate_webdriver", "rb") as sourceFile:
21 | code = sourceFile.read()
22 | exec(code)
23 |
24 | gid = sys.argv[1].lower()
25 |
26 | # check id format
27 | if ("ua-" not in gid and "pub-" not in gid):
28 | print("Wrong format for gid!")
29 | driver.close()
30 | exit(1)
31 |
32 | driver.get("https://search.dnslytics.com/search?q=" + gid + "&d=domains")
33 |
34 | domains = driver.find_elements(By.XPATH, './/h4')
35 |
36 | for domain in domains:
37 | print(domain.text)
38 |
39 |
--------------------------------------------------------------------------------
/ressources/modules/dnsx_get_coherent_domains.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 2 ]
4 | then
5 | echo "usage: ${0} filedomain outputDirectory"
6 | echo "filedomain: File containing domains, one per line"
7 | exit 1
8 | fi
9 |
10 | domainFile=${1}
11 | hosts=$(cat ${domainFile} | sort -u)
12 | dnsxResults=$(dnsx -j -silent -l ${domainFile} -mx -ns -asn)
13 |
14 | tempDir=$(echo "/tmp/curl-results-"$(date +"%Y-%m-%d_%T"))
15 | tempResult="${tempDir}/result.csv"
16 | mkdir -p ${tempDir}
17 |
18 | echo "Hostname ; Whois Domain ; Whois IP ; Mailserver ; NS Server ; ASN ; Effective URL ; Copyright ; Title ; Google Adsense ; Google Analytics ; Social Media ; Favicon" > "${tempResult}"
19 |
20 | for domain in ${hosts}
21 | do
22 | # reset values
23 | hostResolveAble=""
24 | ipWhois=""
25 | mxHost=""
26 | nsHost=""
27 | asn=""
28 | effectiveUrl=""
29 | copyright=""
30 | httpTitle=""
31 | googleAdsense=""
32 | googleAnalytics=""
33 | socialMedia=""
34 | faviconStatus=""
35 | faviconHref=""
36 | mdHashFavicon=""
37 | domainWhois=""
38 |
39 | # get IPv4 (remove non printable characters)
40 | ipAddress=$(echo "${dnsxResults}" | jq -r --arg dom "${domain}" 'select(.host == $dom) | .a[]? // empty' | tr '\n' ' ' | sed 's/[^[:print:]]//g' )
41 |
42 | # get mailserver
43 | mxHost=$(echo "${dnsxResults}" | jq -r --arg dom "${domain}" 'select(.host == $dom) | .mx[]? // empty' | tr '\n' ' ' | sed 's/[^[:print:]]//g')
44 |
45 | # get nameserver
46 | nsHost=$(echo "${dnsxResults}" | jq -r --arg dom "${domain}" 'select(.host == $dom) | .ns[]? // empty' | tr '\n' ' ' | sed 's/[^[:print:]]//g')
47 |
48 | ipWhois=$(echo "${dnsxResults}" | jq -r --arg dom "${domain}" 'select(.host == $dom) | .asn["as-name"]? // empty' | tr '\n' ' ' | sed 's/[^[:print:]]//g')
49 |
50 | # get ASN (replace null by empty value)
51 | asn=$(echo "${dnsxResults}" | jq -r --arg dom "${domain}" 'select(.host == $dom) | .asn["as-number"]? // empty' | tr '\n' ' ' | sed 's/[^[:print:]]//g' | grep -v "jq: error ")
52 |
53 | # check if host can be resolved to ip address
54 | if [ -z ${ipAddress} ]
55 | then
56 | # run curl
57 | tmpFile="${tempDir}/${domain}.html"
58 | curlOut=$(curl --connect-timeout 10 --max-time 10 -s -L -A "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36" -o "${tmpFile}" --write-out '%{http_code} %{url_effective}' "${domain}")
59 | httpStatus=$(echo ${curlOut} | cut -d " " -f 1)
60 | effectiveUrl=$(echo ${curlOut} | cut -d " " -f 2 | tr '\n' -d | sed 's/;//g')
61 |
62 | if [ -f ${tmpFile} ]
63 | then
64 | # grep copyright (remove non printable characters)
65 | copyright=$(cat ${tmpFile} | grep -Eio "[^<>\"']*©[^<>\"']*" | tail -n 1 | tr '\n' -d | sed 's/[;*-]/ /g' | sed 's/[^[:print:]]//g')
66 |
67 | # grep title
68 | httpTitle=$(cat ${tmpFile} | grep -Eio "(.*)" | cut -d ">" -f 2 | cut -d "<" -f1 | tr '\n' -d | sed 's/[;*-]/ /g' | sed 's/[^[:print:]]//g')
69 |
70 | # grep Google Adsense
71 | googleAdsense=$(cat ${tmpFile} | grep -Eio "pub-[0-9]{16}" | tr '\n' ',')
72 |
73 | # grep Google Analytics
74 | googleAnalytics=$(cat ${tmpFile} | grep -Eio "UA-[0-9]{9}-[0-9]" | tr '\n' ',')
75 |
76 | # grep social media profiles
77 | socialMedia=$(cat ${tmpFile} | grep -Eio "(linkedin\.com|youtube\.com|facebook\.com|github\.com|xing\.com)/[^?<>'\" ]*" | tr '\n' ',' | sed 's/;//g' | sed 's/[^[:print:]]//g')
78 |
79 | # get favicon hash
80 | tmpFileIco="${tempDir}/${domain}.ico"
81 | faviconStatus=$(curl -s -o "${tmpFileIco}" --write-out "%{http_code}" "${effectiveUrl}/favicon.ico" 2> /dev/null)
82 |
83 | if [[ "${faviconStatus}" -eq 200 ]]
84 | then
85 | mdHashFavicon=$(cat ${tmpFileIco} | md5sum | cut -d "-" -f 1)
86 | elif [ -f ${tmpFile} ]
87 | then
88 | faviconHref=$(cat ${tmpFile} | grep -Eio "[^<>\"']*favicon.ico[^<>\"']*")
89 |
90 | # if href contains https
91 | if [[ ${faviconHref} == *"https://"* ]]
92 | then
93 | mdHashFavicon=$(curl --connect-timeout 10 --max-time 10 -s -L "${faviconHref}" | md5sum | awk -F ' ' '{print $1}' 2> /dev/null)
94 | else
95 | mdHashFavicon=$(curl --connect-timeout 10 --max-time 10 -s -L "${effectiveUrl}/${faviconHref}" | md5sum | awk -F ' ' '{print $1}' 2> /dev/null)
96 | fi
97 |
98 | rm ${tmpFile}
99 | fi
100 | fi
101 | fi
102 |
103 | # get whois of domain
104 | domainWhois=$(whois ${domain} 2> /dev/null)
105 | organisation=$(echo ${domainWhois} | grep "^Registrant Organization: " | awk -F ": " '{print $2}' | sed 's/[^[:print:]]//g')
106 |
107 | # rerun whois command using another source, if rate limit reached
108 | if echo "${domainWhois}" | grep -q "clientTransferProhibited";
109 | then
110 | organisation=$(curl -s "https://www.whois.com/whois/${domain}" | grep -i "Registrant Organization: " | awk -F ": " '{print $2}' | sed 's/[^[:print:]]//g' 2> /dev/null)
111 | fi
112 |
113 | # print csv results
114 | echo "${domain} ; ${organisation} ; ${ipWhois} ; ${mxHost} ; ${nsHost} ; ${asn} ; ${effectiveUrl} ; ${copyright} ; ${httpTitle} ; ${googleAdsense} ; ${googleAnalytics} ; ${socialMedia} ; ${mdHashFavicon}" >> "${tempResult}"
115 | done
116 |
117 | # copy result csv to output directory
118 | resultFileName=$(echo "${domainFile}" | sed 's/\//_/g' | sed 's/.txt//g')
119 | cp ${tempResult} ${2}/${resultFileName}.csv
120 |
121 | echo "Results:"
122 | echo ""
123 |
124 | # print coherent domains
125 | awkResult=$(awk -F ";" '{ for (i=1; i<=NF; i++) count[$i]++ } END { for (word in count) if (count[word] > 1) print word}' "${tempResult}")
126 | while IFS= read -r line
127 | do
128 | # skip empty lines, whitespace and control charactes
129 | if ! echo "${line}" | grep -qP '^[[:space:][:cntrl:],]*$'
130 | then
131 | PURPLE='\033[0;35m'
132 | NC='\033[0m'
133 | printf "${PURPLE}${line}${NC}\n"
134 | grep "$line" "${tempResult}" | cut -d ";" -f 1
135 | echo ""
136 | fi
137 | done <<< "${awkResult}"
138 |
139 | rm -r ${tempDir}
140 |
141 |
--------------------------------------------------------------------------------
/ressources/modules/git_get_secrets.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 2 ]
4 | then
5 | echo "usage: ${0} repository outputDirectory"
6 | echo "Clone repository and run gitleaks, trufflehog, scanrepo, noseyparker"
7 | echo "repository: https://"
8 | exit 1
9 | fi
10 |
11 | GR='\033[1;32m'
12 | OR='\033[0;33m'
13 | NC='\033[0m'
14 |
15 | outputDir=${2}
16 |
17 | timeStamp=$(date +"%Y-%m-%d_%T" | tr ':' '_')
18 | globalTempDir="/tmp/get-github-secrets-${timeStamp}"
19 |
20 | mkdir -p ${globalTempDir}
21 |
22 | # clone repository
23 | repo=${1}
24 | repoName=$(echo ${repo} | awk -F "/" '{print $4"-"$5}')
25 |
26 | # replace unwanted char
27 | clearRepoName=${repoName//./-}
28 | tmpRepoDir="${globalTempDir}/${clearRepoName}"
29 |
30 | # clone repository
31 | if [ ! -d ${tmpRepoDir} ]
32 | then
33 | git clone --quiet ${repo} ${tmpRepoDir}
34 | fi
35 |
36 | # copy repositories
37 | cp -r ${tmpRepoDir} ${tmpRepoDir}-gitleaks
38 | cp -r ${tmpRepoDir} ${tmpRepoDir}-trufflehog
39 | cp -r ${tmpRepoDir} ${tmpRepoDir}-scanrepo
40 | cp -r ${tmpRepoDir} ${tmpRepoDir}-noseyparker
41 |
42 | gitleaks detect --source ${tmpRepoDir}-gitleaks -v > ${globalTempDir}/gitleaks 2>&1 &
43 |
44 | trufflehog git file://${tmpRepoDir}-trufflehog --no-update > ${globalTempDir}/truff 2>&1 &
45 |
46 | timeStamp=$(date +"%Y-%m-%d_%T" | tr ':' '_')
47 | tmpDataStore="${globalTempDir}/${timeStamp}-${repoDir}"
48 | noseyparker scan --datastore ${tmpDataStore} ${tmpRepoDir}-noseyparker --progress never > /dev/null ; noseyparker report --datastore ${tmpDataStore} --progress never > ${globalTempDir}/noseyparker 2>&1 &
49 |
50 | if [ -d ${tmpRepoDir}-scanrepo ]
51 | then
52 | cd ${tmpRepoDir}-scanrepo
53 | git checkout --quiet origin
54 | git log -p | scanrepo > ${globalTempDir}/scanrepo 2>&1
55 | cd - > /dev/null &
56 | else
57 | echo "Error!"
58 | exit 1
59 | fi
60 |
61 | wait
62 |
63 | mkdir ${outputDir}/${clearRepoName}
64 |
65 | echo ""
66 | printf "${GR}###### gitleaks ######${NC}\n"
67 | echo ""
68 | cat ${globalTempDir}/gitleaks > ${outputDir}/${clearRepoName}/gitleaks
69 | cat ${globalTempDir}/gitleaks
70 |
71 | echo ""
72 | printf "${GR}###### trufflehog ######${NC}\n"
73 | echo ""
74 | cat ${globalTempDir}/truff > ${outputDir}/${clearRepoName}/trufflehog
75 | cat ${globalTempDir}/truff
76 |
77 | echo ""
78 | printf "${GR}###### noseyparker ######${NC}\n"
79 | echo ""
80 | cat ${globalTempDir}/noseyparker > ${outputDir}/${clearRepoName}/noseyparker
81 | cat ${globalTempDir}/noseyparker
82 |
83 | echo ""
84 | printf "${GR}###### scanrepo ######${NC}\n"
85 | echo ""
86 | cat ${globalTempDir}/scanrepo > ${outputDir}/${clearRepoName}/scanrepo
87 | cat ${globalTempDir}/scanrepo
88 |
89 | rm -rf "${globalTempDir}"
90 |
91 |
--------------------------------------------------------------------------------
/ressources/modules/git_grep_commits.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: ${0} repository"
6 | echo "Run multiple grep and git log commands for github repository"
7 | exit 1
8 | fi
9 |
10 | Y='\033[0;33m'
11 | N='\033[0m'
12 |
13 | # define paths to temporary files and dir
14 | repo=${1}
15 | repoDir=$(echo ${repo} | awk -F "/" '{print $4"-"$5}')
16 |
17 | timeStamp=$(date +"%Y-%m-%d_%T")
18 | tmpDir="/tmp/grep-inside-${repoDir}-${timeStamp}"
19 | commitFile="${tmpDir}/commits"
20 | tempFinds="${tmpDir}/finds"
21 | tempGreps="${tmpDir}/greps"
22 |
23 | # check if repository exists
24 | repoExists=$(curl -s -o /dev/null -w "%{http_code}" "${repo}")
25 |
26 | if [ "${repoExists}" -ne 200 ];
27 | then
28 | echo "Error repository ${repo} does not exist or is not public."
29 | exit 1
30 | fi
31 |
32 | # clone repository
33 | mkdir -p "${tmpDir}/git-repo"
34 | git clone --quiet ${repo} "${tmpDir}/git-repo"
35 |
36 | cd "${tmpDir}/git-repo"
37 |
38 | # get each commit
39 | git checkout --quiet origin
40 | authorEmails=$(git log --quiet | grep "Author: " | awk -F " <" '{print $2}' | cut -d ">" -f 1 | grep -v "users.noreply" | sort -u)
41 | git log --quiet | grep '^commit ' | cut -d " " -f 2 > ${commitFile}
42 | printf "${Y}########## Commit messages ########## ${N}\n"
43 |
44 | # search for this keywords and file extensions
45 | declare -a allKeyWords=("ssh .*@.*" "ftp .*@.*" "-AsPlainText" "passwor[t,d]=" "access[-_]token" "api[-_]key" "private[-_]key")
46 | declare -a allFileExtensions=("*.conf" "*.cnf" "*.cfg" "*.config" "*.kdb" "*.kdbx" "*.key" "*.p12" "*.pem" "*.rdp" "*.pfx" "*.remmina" "*.vdi" ".ini")
47 |
48 | # iterate through each commit and grep and search for values inside arrays
49 | while read -r commitId || [[ -n "${commitId}" ]]
50 | do
51 | commitDate=$(git log --format="%ad" -n 1 ${commitId})
52 | commitMsg=$(git log --format="%B" -n 1 ${commitId} | tr -d '\n')
53 | echo "${commitDate} ${commitMsg}"
54 | git checkout --quiet ${commitId}
55 |
56 | for k in "${allKeyWords[@]}"
57 | do
58 | grep --color=always -IEiRo ".{0,20}${k}.{0,20}" * >> ${tempGreps}
59 | done
60 |
61 | for f in "${allFileExtensions[@]}"
62 | do
63 | find . -iname "${f}" | cut -c2- | awk -v g="${1}" -v c="${commitId}" '{print g"/blob/"c$1}' >> ${tempFinds}
64 | done
65 |
66 | done < ${commitFile}
67 |
68 | echo ""
69 | printf "${Y}########## Grep results ########## ${N}\n"
70 | sort -u ${tempGreps}
71 | echo ""
72 | printf "${Y}########## Find results ########## ${N}\n"
73 | sort -u ${tempFinds}
74 |
75 | echo ""
76 | printf "${Y}########## Author emails ########## ${N}\n"
77 | echo "${authorEmails}"
78 | echo ""
79 | echo ""
80 |
81 | # remove temporary files
82 | cd /tmp
83 | rm -rf ${tmpDir}
84 |
85 |
--------------------------------------------------------------------------------
/ressources/modules/github_get_organisation_member.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 1 ]
4 | then
5 | read -p "Enter github.com ApiKey: " apiKey
6 | elif [ ${#} -eq 2 ]
7 | then
8 | apiKey=${2}
9 | else
10 | echo "usage: ${0} account [apiKey]"
11 | echo "account: name of user or organisation on github.com"
12 | echo "Run curl commands and retrieve member information from github organisation"
13 | exit 1
14 | fi
15 |
16 | function getOrgMember()
17 | {
18 | echo "Organisation: ${1}"
19 | # get all usernames of organisation
20 | orgMember=$(curl -s "https://api.github.com/orgs/${1}/members" -H "Authorization: Bearer ${apiKey}" | jq -r ".[] .login")
21 | echo "Url ; Created_at ; Company ; Bio ; Blog ; Location ; Email ; Twitter ; Follower ; Following ; Authormail"
22 |
23 | for user in ${orgMember}
24 | do
25 | # get information about each user
26 | userInfo=$(curl -s "https://api.github.com/users/${user}" -H "Authorization: Bearer ${apiKey}")
27 | csvUserInfo=$(echo "${userInfo}" | jq -r '"\(.html_url) ; \(.created_at) ; \(.company) ; \(.bio | if . != null then gsub(";"; "_") else "" end) ; \(.blog) ; \(.location) ; \(.email) ; \(.twitter_username) ; \(.followers) ; \(.following)"')
28 | # remove line breaks
29 | csvUserInfoNoLb=$(echo ${csvUserInfo} | tr -d '\r\n' | tr -d '\n')
30 |
31 | # get user commit author email address
32 | commitsAsc=$(curl -s "https://api.github.com/search/commits?q=author:${user}&sort=author-date&order=asc" -H "Authorization: Bearer ${apiKey}")
33 | commitsDesc=$(curl -s "https://api.github.com/search/commits?q=author:${user}&sort=author-date&order=desc" -H "Authorization: Bearer ${apiKey}")
34 | possAscMails=$(echo ${commitsAsc} | jq -r '.items[] | select(.commit.author.email != null or .commit.committer.email != null) | .commit.author.email, .commit.committer.email')
35 | possDescMails=$(echo ${commitsDesc} | jq -r '.items[] | select(.commit.author.email != null or .commit.committer.email != null) | .commit.author.email, .commit.committer.email')
36 | email=$(echo -e "${possAscMails}\n${possDescMails}" | grep -v "@users.noreply.github.com\|noreply@github.com" | sort -u | tr '\r\n' '/' | tr '\n' ' / ')
37 |
38 | # print result
39 | echo "${csvUserInfoNoLb} ; ${email}"
40 | done
41 |
42 | echo ""
43 | }
44 |
45 | account=${1}
46 | accountInfo=$(curl -s "https://api.github.com/users/${account}" -H "Authorization: Bearer ${apiKey}")
47 | accountType=$(echo "${accountInfo}" | jq -r 'if has("type") then .type else .message end')
48 |
49 | # check if account is an organisation
50 | if [ "${accountType}" == "Organization" ]
51 | then
52 | getOrgMember "${account}"
53 | elif [ "${accountType}" == "User" ]
54 | then
55 | allOrgs=$(curl -s "https://api.github.com/users/${account}/orgs" | jq -r ".[] .login")
56 |
57 | for org in ${allOrgs}
58 | do
59 | getOrgMember "${org}"
60 | done
61 | else
62 | echo ${accountType}
63 | fi
64 |
65 |
--------------------------------------------------------------------------------
/ressources/modules/github_search_accounts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: ${0} 'companyName'"
6 | exit 1
7 | fi
8 |
9 | companyName=$(echo ${1} | sed 's/ /+/g')
10 |
11 | searchUser=$(curl -s "https://api.github.com/search/users?q=${companyName}" | jq -r '.items[] | "\(.html_url) ; \(.type)"' | sort)
12 | searchRepos=$(curl -s "https://api.github.com/search/repositories?q=${companyName}" | jq -r '.items[] | "\(.created_at) ; \(.html_url) ; \(.description) ; \(.homepage)"' | sort)
13 | searchIssues=$(curl -s "https://api.github.com/search/issues?q=${companyName}" | jq -r '.items[] | "\(.created_at) ; \(.user .html_url) ; \(.html_url) ; \(.title))"' | sort)
14 |
15 | echo "Url ; Type "
16 | echo "${searchUser}"
17 | echo ""
18 | echo "Repository created_at ; Url ; Description ; Homepage"
19 | echo "${searchRepos}"
20 | echo ""
21 | echo "Issue Timestamp ; User ; Url ; Title"
22 | echo "${searchIssues}"
23 |
24 |
--------------------------------------------------------------------------------
/ressources/modules/google_get_linkedIn_employees.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from selenium import webdriver
4 | from selenium.webdriver.common.action_chains import ActionChains
5 | from selenium.webdriver.common.by import By
6 | from selenium.webdriver.common.keys import Keys
7 | from selenium.webdriver.firefox.options import Options as FirefoxOptions
8 | from selenium.webdriver.firefox.service import Service
9 | from selenium.webdriver.support import expected_conditions as EC
10 | from selenium.webdriver.support.ui import WebDriverWait
11 | import sys
12 | import time
13 | import urllib.parse
14 | import os
15 |
16 | # check amount of passed arguments
17 | if (len(sys.argv) != 2):
18 | print("usage: {} searchKey".format(sys.argv[0]))
19 | print("Run Google dork and collect linkedin employees")
20 | sys.exit(1)
21 |
22 |
23 | """FUNCTION
24 | Extract username from linkedIn URL
25 |
26 | selObj = Selenium href object
27 | """
28 | def extractEmplName(selObj):
29 | for url in urls:
30 | try:
31 | href = url.get_attribute("href")
32 | except Exception as e:
33 | print("Error while getting href: " + e)
34 | return
35 |
36 | if ("google." in href):
37 | continue
38 |
39 | if ("linkedin.com/in" in href):
40 | try:
41 | splittedHref = href.split(".linkedin.com/in/")
42 | cleanHref = splittedHref[1]
43 | cleanHref = cleanHref.replace("\n","")
44 | cleanHref = urllib.parse.unquote(cleanHref)
45 | splittedName = cleanHref.split("-")
46 | cleanEmplName = ""
47 |
48 | for nameChunk in splittedName:
49 | if ("/" in nameChunk):
50 | splNameChunk = nameChunk.split("/")
51 | nameChunk = splNameChunk[0]
52 | if (not any(chr.isdigit() for chr in nameChunk)):
53 | cleanEmplName = cleanEmplName + " " + nameChunk
54 | if (nameChunk[0] == "%"):
55 | cleanEmplName = cleanEmplName + " " + nameChunk
56 | allEmployees.append(cleanEmplName[1:])
57 |
58 | except Exception as e:
59 | print("Error: " + href + " -- " + e)
60 |
61 |
62 | # get absolute path to current directory
63 | currentPosition = os.path.realpath(__file__)
64 | dn = os.path.dirname(currentPosition)
65 |
66 | # initiate gecko webdriver
67 | with open(dn + "/initiate_webdriver", "rb") as sourceFile:
68 | code = sourceFile.read()
69 | exec(code)
70 |
71 | allEmployees = []
72 |
73 | searchKey = 'intitle:"' + sys.argv[1]+ '" inurl:"linkedin.com/in/" site:linkedin.com'
74 |
75 | # search via Google
76 | url = "https://www.google.com"
77 | driver.get(url)
78 | time.sleep(2)
79 |
80 | # accept cookies
81 | try:
82 | WebDriverWait(driver,5).until(EC.element_to_be_clickable((By.ID, "L2AGLb"))).click()
83 | time.sleep(4)
84 | except:
85 | pass
86 |
87 | # send keys to search field
88 | try:
89 | textField = WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.CLASS_NAME, "gLFyf")))
90 | textField.send_keys(searchKey)
91 | textField.send_keys(Keys.RETURN)
92 | except Exception as e:
93 | print(e)
94 | print("Error while reading response - maybe your IP adress has been blocked")
95 | exit(1)
96 |
97 | time.sleep(5)
98 |
99 | # scroll down
100 | for i in range(10):
101 | actions = ActionChains(driver)
102 | actions.send_keys(Keys.END).perform()
103 | time.sleep(3)
104 |
105 | # press more results button
106 | try:
107 | button = driver.find_element_by_xpath("//h3[contains(@class, 'RVQdVd')]")
108 | button.click()
109 | except:
110 | pass
111 |
112 | # get employee names
113 | urls = driver.find_elements(By.XPATH, "//a[@href]")
114 | extractEmplName(urls)
115 |
116 | driver.close()
117 |
118 | # print employee names
119 | sortedEmployees = sorted(set(allEmployees))
120 |
121 | for e in sortedEmployees:
122 | print(e)
123 |
124 |
--------------------------------------------------------------------------------
/ressources/modules/gpg_get_emails.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: ${0} domain"
6 | exit 1
7 | fi
8 |
9 | domain=${1}
10 |
11 | ubuntuResult=$(curl -s "https://keyserver.ubuntu.com/pks/lookup?fingerprint=on&op=vindex&search=${domain}" | grep -oiE '([[:alnum:]_.-]+@[[:alnum:]_.-]+?\.[[:alpha:].]{2,6})' | tr '[:upper:]' '[:lower:]')
12 | earthResult=$(curl -s "http://the.earth.li:11371/pks/lookup?fingerprint=on&op=vindex&search=${domain}" | grep -oiE '([[:alnum:]_.-]+@[[:alnum:]_.-]+?\.[[:alpha:].]{2,6})' | tr '[:upper:]' '[:lower:]')
13 |
14 | echo -e "${ubuntuResult}\n${earthResult}" | grep "${domain}" | sort -u
15 |
16 |
--------------------------------------------------------------------------------
/ressources/modules/grayhatwarfare_get_buckets.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter grayhatwarfare.com ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} 'companyName' outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve buckets from grayhatwarfare.com"
12 | exit 1
13 | fi
14 |
15 | # use only first part of company name for search
16 | company=$(${1} | cut -d " " -f 1)
17 | outPath=${2}
18 |
19 | result=$(curl -s --request GET --url "https://buckets.grayhatwarfare.com/api/v2/files?keywords=${company}&start=0&limit=1000" --header "Authorization: Bearer ${apiKey}")
20 |
21 | # write json output to file
22 | saveFile="$(echo ${1} | sed 's/[^[:alnum:]]/_/g')"
23 | echo "${result}" | jq -c > ${outPath}/grayhatwarfare-buckets-${saveFile}.json
24 |
25 | echo "Last modified ; Url:"
26 | echo "${result}" | jq -r '.files[] | "\(.lastModified | strftime("%Y-%m-%d")) ; \(.url)"' | sort -n
27 | echo ""
28 |
29 |
--------------------------------------------------------------------------------
/ressources/modules/handelsregister_get_company_names.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from lxml import etree
4 | from selenium.webdriver.common.by import By
5 | from selenium.webdriver.common.keys import Keys
6 | import sys
7 | import time
8 | import os
9 |
10 | # check amount of passed arguments
11 | if (len(sys.argv) != 2):
12 | print("usage: {} companyName".format(sys.argv[0]))
13 | print("Visit handelsregister.de and extract company names")
14 | sys.exit(1)
15 |
16 | # get absolute path to current directory
17 | currentPosition = os.path.realpath(__file__)
18 | dn = os.path.dirname(currentPosition)
19 |
20 | # initiate gecko webdriver
21 | with open(dn + "/initiate_webdriver", "rb") as sourceFile:
22 | code = sourceFile.read()
23 | exec(code)
24 |
25 | url = "https://www.handelsregister.de"
26 | driver.get(url)
27 |
28 | # click on advanced search
29 | advSearch = driver.find_elements(By.ID, "naviForm:erweiterteSucheLink")
30 | advSearch[0].click()
31 | time.sleep(3)
32 |
33 | # enter search key
34 | textField = driver.find_element(By.ID, "form:schlagwoerter")
35 | textField.send_keys(sys.argv[1])
36 | time.sleep(2)
37 |
38 | # choose 100 results from dropdown
39 | dropDown = driver.find_element(By.XPATH, "//div[@id='form:ergebnisseProSeite']")
40 | dropDown.click()
41 | time.sleep(2)
42 | driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
43 | time.sleep(2)
44 | insideDropdown = driver.find_element(By.ID, "form:ergebnisseProSeite_3")
45 | insideDropdown.click()
46 |
47 | # click on search
48 | #obj = driver.find_elements(By.XPATH, "//*[contains(text(), '" + pressSearch + "')]")
49 | obj = driver.find_elements(By.ID, "form:btnSuche")
50 | obj[0].click()
51 |
52 | time.sleep(15)
53 |
54 | # parse results
55 | sourceCode = driver.page_source
56 | tree = etree.HTML(sourceCode)
57 |
58 | tbody = tree.xpath('//*[@id="ergebnissForm:selectedSuchErgebnisFormTable_data"]')[0]
59 | allTrsClasses = ["ui-widget-content ui-datatable-even", "ui-widget-content ui-datatable-odd"]
60 |
61 | for trClass in allTrsClasses:
62 | allEvenTrs = tbody.xpath('.//tr[@class="' + trClass + '"]')
63 |
64 | for tr in allEvenTrs:
65 | # print name of company, location and history
66 | results = tr.xpath('.//span[contains(@class, "marginLeft20") or contains(@class, "verticalText") or contains(@class, "marginLeft20 fontSize85")]')
67 |
68 | for i in results:
69 | # skip status
70 | if (i.text == "aktuell" or i.text == "currently registered"):
71 | continue
72 |
73 | print(i.text, end="; ")
74 |
75 | print("")
76 |
77 | driver.close()
78 |
79 |
--------------------------------------------------------------------------------
/ressources/modules/hunter_get_emails.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter hunter.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command on hunter.io and retrieve social media links and email addresses"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 | result=$(curl -s "https://api.hunter.io/v2/domain-search?domain=${domain}&api_key=${apiKey}")
18 |
19 | # write json output to file
20 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
21 | seFile="${outPath}/hunter-se-${saveFile}.txt"
22 | echo "${result}" | jq -c > ${outPath}/hunter-${saveFile}.json
23 |
24 | echo "E-Mails:" > ${seFile}
25 | echo "${result}" | jq -r '.data.emails[].value' >> ${seFile}
26 | echo "" >> ${seFile}
27 | echo "Social Media:" >> ${seFile}
28 | echo "${result}" | jq -r '.data | .twitter, .facebook, .linkedin, .instagram, .youtube' | sort -u | grep -v "null" >> ${seFile}
29 |
30 | # print results
31 | echo "${result}" | jq -r '.data.emails[].value'
32 |
33 |
--------------------------------------------------------------------------------
/ressources/modules/initiate_webdriver:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | currentPosition = os.path.realpath(__file__)
4 | dn = os.path.dirname(currentPosition)
5 |
6 | from selenium.webdriver.firefox.options import Options as FirefoxOptions
7 | from selenium.webdriver.firefox.service import Service
8 | from selenium import webdriver
9 | import random
10 |
11 | options = FirefoxOptions()
12 |
13 | # headless mode
14 | options.add_argument("--headless")
15 |
16 | # certificate errors
17 | options.add_argument("--ignore-ssl-errors=yes")
18 | options.add_argument('--ignore-certificate-errors')
19 |
20 | # get random user agent
21 | uaPointer = open(dn + "/../wordlists/user-agents.txt", "r")
22 | uas = uaPointer.readlines()
23 | randomUa = uas[random.randint(0,len(uas) - 1)].replace("\n","")
24 | uaPointer.close()
25 | options.set_preference("general.useragent.override", randomUa)
26 |
27 | # prevent creation of geckodriver.log
28 | service = Service(log_output="/dev/null")
29 | driver = webdriver.Firefox(options=options, service=service)
30 |
31 | # redirect
32 | options.set_preference("network.http.redirection-limit", 5)
33 |
34 | # 10 seconds default wait for element to be clickable
35 | driver.implicitly_wait(10)
36 |
37 | # 60 seconds timeout
38 | driver.set_page_load_timeout(60)
39 |
40 |
--------------------------------------------------------------------------------
/ressources/modules/myssl_get_subdomains.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: ${0} domain"
6 | exit 1
7 | fi
8 |
9 | domain=${1}
10 | result=$(curl -s "https://myssl.com/api/v1/discover_sub_domain?domain=${domain}")
11 | echo ${result} | jq -r '.data[] | .domain' | sort -u
12 |
13 |
--------------------------------------------------------------------------------
/ressources/modules/networksdb_get_company_names_from_domain.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter networksdb.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve company names from networksdb.io"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 |
18 | result=$(curl -s -H "X-Api-Key: ${apiKey}" "https://networksdb.io/api/org-search" -d search="${domain}")
19 | companies=$(echo "${result}" | jq -r '.results[] .organisation')
20 |
21 | # write json output to file
22 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
23 | echo "${result}" | jq -c > ${outPath}/networksdb-company-${saveFile}.json
24 |
25 | # print results
26 | echo "${companies}"
27 |
28 |
--------------------------------------------------------------------------------
/ressources/modules/networksdb_get_ipranges_from_company.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter networksdb.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} 'companyName' outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve organisations and ip ranges from networksdb.io"
12 | exit 1
13 | fi
14 |
15 | company=${1}
16 | outPath=${2}
17 |
18 | resultOrgSearch=$(curl -s -H "X-Api-Key: ${apiKey}" "https://networksdb.io/api/org-search" -d search="${company}")
19 | companyIds=$(echo "${resultOrgSearch}" | jq -r '.results[] .id')
20 |
21 | # write json output to file
22 | saveFile="$(echo ${company} | sed 's/[^[:alnum:]]/_/g')"
23 | idFile="${outPath}/networksdb-orgsearch-id-ranges.txt"
24 | echo "${resultOrgSearch}" | jq -c > ${outPath}/networksdb-orgsearch-${saveFile}.json
25 |
26 | # query networksdb for each company id
27 | for id in ${companyIds}
28 | do
29 | resultIpRanges=$(curl -s -H "X-Api-Key: ${apiKey}" "https://networksdb.io/api/org-networks" -d id="${id}")
30 |
31 | # write json output to file
32 | echo "${resultIpRanges}" | jq -c > ${outPath}/networksdb-ipranges-${id}-${saveFile}.json
33 | echo "${id}" >> ${idFile}
34 | echo "${resultIpRanges}" | jq -r '.results[] .cidr' >> ${idFile}
35 | echo "" >> ${idFile}
36 |
37 | # print results
38 | echo "${resultIpRanges}" | jq -r '.results[] .cidr' | grep -v "N/A"
39 | done
40 |
41 | # quick fix
42 | exit 0
43 |
44 |
--------------------------------------------------------------------------------
/ressources/modules/networksdb_get_rootdomains_from_cidr.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import urllib.parse
4 | import sys
5 | import os
6 | import datetime
7 | import time
8 | from selenium.webdriver.common.by import By
9 |
10 | if (len(sys.argv) != 2):
11 | print("usage: {} ipRange".format(sys.argv[0]))
12 | print("Visit networksdb.io and extract domains")
13 | sys.exit(1)
14 |
15 | # get absolute path to current directory
16 | currentPosition = os.path.realpath(__file__)
17 | dn = os.path.dirname(currentPosition)
18 |
19 | # initiate gecko webdriver
20 | with open(dn + "/initiate_webdriver", "rb") as sourceFile:
21 | code = sourceFile.read()
22 | exec(code)
23 |
24 | # use nmap to calculate min and max host
25 | timestamp = datetime.datetime.today().strftime("%d.%m.%Y_%H:%M:%S")
26 | ipRange = sys.argv[1]
27 | nmapOutFile = "/tmp/networksdb-get-domains" + timestamp
28 | nmapCommand = "nmap -n -sL " + ipRange + " > " + nmapOutFile
29 |
30 | # create temporary output file
31 | os.system(nmapCommand)
32 | nmapFp = open(nmapOutFile, "r")
33 | nmapContent = nmapFp.readlines()
34 | nmapFp.close()
35 | minIp = nmapContent[2].replace("Nmap scan report for ","")
36 | maxIp = nmapContent[-2].replace("Nmap scan report for ","")
37 | url = "https://networksdb.io/domains-in-network/" + minIp.replace("\n","") + "/" + maxIp.replace("\n","")
38 |
39 | # remove temporary file
40 | os.remove(nmapOutFile)
41 |
42 | try:
43 | driver.get(url)
44 | time.sleep(10)
45 |
46 | allIps = {}
47 |
48 | # get all ips
49 | elements = driver.find_elements(By.TAG_NAME, "pre")
50 |
51 | for element in elements:
52 | ipElement = element.find_element(By.XPATH, "preceding-sibling::b[1]")
53 |
54 | if ipElement:
55 | ipAddress = ipElement.text.strip(":")
56 |
57 | # collect domains corresponding to IPv4
58 | domains = element.text.strip().split("\n")
59 | allIps[ipAddress] = domains
60 |
61 | # print results
62 | for ip, domains in allIps.items():
63 | print(ip)
64 | for domain in domains:
65 | print(domain)
66 | print("")
67 |
68 | driver.close()
69 |
70 | except Exception as e:
71 | print(e)
72 | driver.close()
73 |
74 |
75 |
--------------------------------------------------------------------------------
/ressources/modules/nmap_get_tls_alternative_names.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import sys
4 | import os
5 | import ipaddress
6 |
7 | # check amount of passed arguments
8 | if (len(sys.argv) != 2):
9 | print("usage: {} ipRange ".format(sys.argv[0]))
10 | print("Run nmap and scan IP Range for port 443 and extract Subject Alternative Name")
11 | sys.exit(1)
12 |
13 | nmapCommand = "nmap --min-rate 300 -sT --script /usr/share/nmap/scripts/ssl-cert.nse -Pn -p 443 -n --open -oN "
14 | outFile = "/tmp/nmap-ssl-cert-" + sys.argv[1].replace("/","_") + ".nmap "
15 | nmapCommand2 = sys.argv[1] + " > /dev/null"
16 | os.system(nmapCommand + outFile + nmapCommand2)
17 |
18 | # used to sort resulting domains unique
19 | allDomains = []
20 |
21 | # grep certificate commonName and alternative name for each host
22 | with open("/tmp/nmap-ssl-cert-" + sys.argv[1].replace("/","_") + ".nmap") as nmapResult:
23 | for line in nmapResult:
24 | # grep for commonName
25 | if ("| ssl-cert: Subject: commonName=" in line):
26 | commonNameofCurrHostWithAppendix = line.split("ssl-cert: Subject: commonName=")
27 |
28 | # remove appendix
29 | commonNameofCurrHost = commonNameofCurrHostWithAppendix[1].split("/")
30 | cleanCurrCommonName = commonNameofCurrHost[0].replace("\n", "")
31 | cleanCurrCommonName = cleanCurrCommonName.replace("*.","")
32 | cleanCurrCommonName = cleanCurrCommonName.replace(".*","")
33 | allDomains.append(cleanCurrCommonName)
34 |
35 | # grep for Subject Alternative Name
36 | if ("| Subject Alternative Name: DNS:" in line):
37 | allSubjectAltName = line.split("DNS:")
38 |
39 | # get each Subject Alternative Name
40 | for subAltNameWithAppendix in allSubjectAltName:
41 | # remove appendix
42 | subAltName = subAltNameWithAppendix.split(",")
43 |
44 | # skip delimiter "| Subject Alternative Name:
45 | if ("| Subject Alternative Name:" in subAltName[0]):
46 | continue
47 |
48 | cleanCurrCommonName = subAltName[0].replace("\n", "")
49 | cleanCurrCommonName = cleanCurrCommonName.replace("*.","")
50 | cleanCurrCommonName = cleanCurrCommonName.replace(".*","")
51 | allDomains.append(cleanCurrCommonName)
52 |
53 | allFinalDomains = sorted(set(allDomains))
54 |
55 | # print results
56 | for currDom in allFinalDomains:
57 | try:
58 | # skip ipv4 addresses
59 | ipaddress.IPv4Address(currDom)
60 | except ipaddress.AddressValueError:
61 | print (currDom)
62 |
63 | os.system("rm " + outFile)
64 |
65 |
--------------------------------------------------------------------------------
/ressources/modules/northdata_get_company_names.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from selenium.webdriver.common.keys import Keys
4 | from selenium.webdriver.support import expected_conditions as EC
5 | from selenium.webdriver.common.by import By
6 | from selenium.webdriver.common.action_chains import ActionChains
7 | import sys
8 | import time
9 | import urllib.parse
10 | import os
11 |
12 | if (len(sys.argv) != 3):
13 | print("usage: {} 'companyName' depth".format(sys.argv[0]))
14 | print("Visit northdata.com and companies related to input company")
15 | sys.exit(1)
16 |
17 | # get absolute path to current directory
18 | currentPosition = os.path.realpath(__file__)
19 | dn = os.path.dirname(currentPosition)
20 |
21 | # initiate gecko webdriver
22 | with open(dn + "/initiate_webdriver", "rb") as sourceFile:
23 | code = sourceFile.read()
24 | exec(code)
25 |
26 | driver.get("https://northdata.com")
27 | time.sleep(1)
28 |
29 | # click allow cookies
30 | try:
31 | driver.find_element(By.ID, "cmpbntyestxt").click()
32 | except:
33 | pass
34 |
35 | # enter company name in search field
36 | textField = driver.find_element(By.CLASS_NAME, "prompt")
37 | textField.send_keys(sys.argv[1])
38 | textField = driver.find_element(By.CLASS_NAME, "prompt")
39 | time.sleep(2)
40 | textField.send_keys(Keys.ARROW_DOWN)
41 | time.sleep(2)
42 | textField.send_keys(Keys.RETURN)
43 |
44 | # scroll tu bottom
45 | actions = ActionChains(driver)
46 | actions.send_keys(Keys.END).perform()
47 | time.sleep(1)
48 | actions.send_keys(Keys.ARROW_UP).perform()
49 | time.sleep(1)
50 |
51 | # wait for network graph
52 | try:
53 | (driver.find_element(By.CLASS_NAME, "node"))
54 | except:
55 | # exit if not nodes returned as result
56 | print("No further nodes found for: " + driver.current_url)
57 | print("")
58 |
59 | nodeNames = []
60 | nodeHref = []
61 | nodeHrefDone = []
62 |
63 | # get initial URL of company to start crawl
64 | currentUrl = driver.current_url
65 | startNodeSplit = currentUrl.split("/")
66 | startNodeName = startNodeSplit[3].split(",")
67 | cleanStartNodeName = urllib.parse.unquote(startNodeName[0]).replace("+" ," ")
68 |
69 | # url scheme: https://www.northdata.com/companyName/locationOfLaw
70 | # some companies do not have locationOfLaw
71 | try:
72 | startNodeHref = startNodeSplit[3] + "/" + startNodeSplit[4]
73 | except:
74 | startNodeHref = startNodeSplit[3]
75 |
76 | nodeNames.append(cleanStartNodeName)
77 | nodeHref.append("/" + startNodeHref)
78 | nodeHrefDone.append("/" + startNodeHref)
79 |
80 | print("Query https://northdata.com/" + startNodeHref)
81 |
82 | nodes = driver.find_elements(By.CLASS_NAME, "node")
83 |
84 | # get all nodes that represents a company
85 | for node in nodes:
86 | if (node is None):
87 | break
88 |
89 | if (node.text is None):
90 | print("Break node text:" + node.text)
91 | break
92 |
93 | icon = node.text[0].encode('utf-8')
94 | name = node.text[1:]
95 | href = node.get_attribute("href")
96 |
97 | # check if the icon is a company, otherwise skip node
98 | if (icon == b'\xef\x86\xad'):
99 | if (href not in nodeHref):
100 | if (href is not None):
101 | nodeNames.append(urllib.parse.unquote(name).replace("+", " "))
102 | nodeHref.append(href["animVal"])
103 |
104 | counter = 1
105 |
106 | # click on a company node and repeat node collection
107 | while 1:
108 | allNodesDoneFlag = "1"
109 |
110 | for h in nodeHref:
111 | if (h in nodeHrefDone):
112 | continue
113 | else:
114 | print("Query https://northdata.com/" + h)
115 | counter = counter + 1
116 |
117 | if (counter == int(sys.argv[2])):
118 | sortedNames = sorted(set(nodeNames))
119 | print("\n\n##### Results #####\n")
120 | for n in sortedNames:
121 | print(n)
122 | driver.close()
123 | exit(0)
124 |
125 | allNodesDoneFlag = "0"
126 | nodeHrefDone.append(h)
127 |
128 | try:
129 | driver.get("https://www.northdata.com" + h)
130 | time.sleep(7)
131 | except:
132 | print("Error getting " + h)
133 |
134 | # wait for page to load
135 | actions.send_keys(Keys.END).perform()
136 | time.sleep(1)
137 | actions.send_keys(Keys.ARROW_UP).perform()
138 | time.sleep(1)
139 |
140 | # wait for network graph
141 | nodes = driver.find_elements(By.CLASS_NAME, "node")
142 |
143 | for node in nodes:
144 | if (node is None):
145 | break
146 |
147 | try:
148 | icon = node.text[0].replace("\n","")
149 | name = node.text[1:]
150 | href = node.get_attribute("href")
151 |
152 | if (icon == "\uf1ad"):
153 | if (href is not None):
154 | if (href["animVal"] not in nodeHref):
155 | nodeNames.append(urllib.parse.unquote(name).replace("+", " "))
156 | nodeHref.append(href["animVal"])
157 | except:
158 | pass
159 |
160 | if (allNodesDoneFlag == "1"):
161 | print("\n\n##### Results #####\n")
162 | sortedNames = sorted(set(nodeNames))
163 | for n in sortedNames:
164 | print(n)
165 |
166 | driver.close()
167 | exit(0)
168 |
169 |
--------------------------------------------------------------------------------
/ressources/modules/phonebook_get_mails.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter intelx.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve email addresses from intelx.io"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 |
18 | # amount of results may vary
19 | curlResult=$(curl -s -X POST -H "Content-Type: application/json" -H "x-key: ${apiKey}" 'https://2.intelx.io/phonebook/search' --data "{\"term\":\"${domain}\",\"lookuplevel\":0,\"maxresults\":1000,\"timeout\":null,\"datefrom\":\"\",\"dateto\":\"\",\"sort\":2,\"media\":0,\"terminate\":[]}")
20 | searchId=$(echo ${curlResult}| jq -r .id)
21 |
22 | if [[ -z "$searchId" ]]
23 | then
24 | echo "SearchId is empty, maybe your intelx API key is not allowed to query phonebook."
25 | exit 1
26 | else
27 | sleep 30
28 | result=$(curl -s -H "x-key: ${apiKey}" "https://2.intelx.io/phonebook/search/result?id=${searchId}")
29 |
30 | # write json output to file
31 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
32 | echo "${result}" | jq -c > ${outPath}/phonebook-${saveFile}.json
33 | echo "${result}" | jq -r '.selectors[] | select(.selectortypeh == "Email Address") | .selectorvalue'
34 | fi
35 |
36 |
--------------------------------------------------------------------------------
/ressources/modules/robtex_get_rootdomains.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: ${0} domain"
6 | echo "Run curl command and retrieve history of domains related to given ip address from robtex.com"
7 | exit 1
8 | fi
9 |
10 | domain=${1}
11 | ip=$(dig +short ${domain})
12 | resultPdns=$(curl -s "https://freeapi.robtex.com/pdns/reverse/${ip}")
13 | resultStatus=$(echo "$resultPdns" | jq -r '.status')
14 |
15 | # check if rate limit has been reached
16 | if [ "${resultStatus}" = "ratelimited" ]
17 | then
18 | echo "Rate limit reached"
19 | else
20 | # write json output to file
21 | echo "${resultPdns}" | jq -r '.rrname' | sort -u
22 | fi
23 |
24 |
--------------------------------------------------------------------------------
/ressources/modules/securitytrails_get_ip_history.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter securitytrails.com ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve ipv4 history of given domain from securitytrails.com"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 | result=$(curl -s --request GET --url "https://api.securitytrails.com/v1/history/${domain}/dns/a" --header "accept: application/json" --header "APIKEY: ${apiKey}")
18 |
19 | # write json output to file
20 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
21 | echo "${result}" | jq -c > ${outPath}/securitytrails-ip-history-${saveFile}.json
22 |
23 | echo "IPv4 history of ${domain}:"
24 | echo "${result}" | jq -r '.records[] | "\(.first_seen) ; \(.last_seen) ; \(.values[] | .ip)"'
25 |
26 |
--------------------------------------------------------------------------------
/ressources/modules/shodan_get_ports_from_cidr.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter shodan.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} ipRangeCidr outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve open ports from shodan.io"
12 | exit 1
13 | fi
14 |
15 | cidr=${1}
16 | outPath=${2}
17 |
18 | result=$(curl -s "https://api.shodan.io/shodan/host/search?key=${apiKey}&query=net:${cidr}")
19 |
20 | # write json output to file
21 | saveFile="$(echo ${cidr} | sed 's/[^[:alnum:]]/_/g')"
22 | echo "${result}" | jq -c > ${outPath}/shodan-ports-cidr-${saveFile}.json
23 |
24 | echo "Open Ports:"
25 | echo "${result}" | jq -r '.matches[] | "\(.ip_str) ; \(.port) ; \(.hostnames)" as $entry | .http | "\($entry) ; \(.title) "' | sort -n
26 |
27 |
--------------------------------------------------------------------------------
/ressources/modules/shodan_get_rootdomains_from_company.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter shodan.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} 'companyName' outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve rootdomains from shodan.io"
12 | exit 1
13 | fi
14 |
15 | companyName=${1}
16 | outPath=${2}
17 | result=$(curl -s "https://api.shodan.io/shodan/host/search?key=${apiKey}&query=org:%27${companyName}%27")
18 |
19 | # write json output to file
20 | saveFile="$(echo ${companyName} | sed 's/[^[:alnum:]]/_/g')"
21 | echo "${result}" | jq -c > ${outPath}/shodan-rootdomains-${saveFile}.json
22 |
23 | echo ""
24 | echo "${result}" | jq -r ".matches[] | .domains[], .hostnames[], .http .host | select(. != null)" | grep -v -E '([0-9]*\.){3}[0-9]*' | awk -F '.' '{print $(NF-1)"."$NF}' | sort -u
25 |
26 |
--------------------------------------------------------------------------------
/ressources/modules/shodan_get_rootdomains_from_domain.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter shodan.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve rootdomains from shodan.io"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 |
18 | resultHostname=$(curl -s "https://api.shodan.io/shodan/host/search?key=${apiKey}&query=hostname:${domain}")
19 | iconHash=$(echo "https://${domain}" | favfreak --shodan | grep "\[DORK\]" | awk -F "http.favicon.hash:" '{print $2}' )
20 | resultIcon=$(curl -s "https://api.shodan.io/shodan/host/search?key=${apiKey}&query=http.favicon.hash:${iconHash}")
21 |
22 | # write json output to file
23 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
24 | rootDomainResults=" ${outPath}/shodan-results.txt"
25 | echo "${resultHostname}" | jq -c > ${outPath}/shodan-rootdomains-hostname-${saveFile}.json
26 | echo "${resultIcon}" | jq -c > ${outPath}/shodan-rootdomains-favicon-${saveFile}.json
27 |
28 | echo "Domains and hostnames:" > ${rootDomainResults}
29 | echo "${resultHostname}" | jq -r '.matches[] | .domains[], .hostnames[], .http.host | select(. != null)' | grep -v -E '([0-9]*\.){3}[0-9]*' | grep -v ":" | awk -F '.' '{print $(NF-1)"."$NF}' | sort -u >> ${rootDomainResults}
30 | echo "" >> ${rootDomainResults}
31 |
32 | echo "Hosts using the same favicon:" >> ${rootDomainResults}
33 | echo "${resultIcon}" | jq -r ".matches[] | .domains[], .hostnames[], .http .host" >> ${rootDomainResults}
34 | echo "" >> ${rootDomainResults}
35 |
36 | # print results
37 | grep -v "Domains and hostnames:\|Hosts using the same favicon:" ${rootDomainResults} | sort -u
38 |
39 |
--------------------------------------------------------------------------------
/ressources/modules/skymem_get_mails.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from selenium.webdriver.common.by import By
4 | import urllib.parse
5 | import sys
6 | import os
7 |
8 | if (len(sys.argv) != 2):
9 | print("usage: {} domain".format(sys.argv[0]))
10 | print("Visit skymem.info and extract e-mail addresses")
11 | sys.exit(1)
12 |
13 | # get absolute path to current directory
14 | currentPosition = os.path.realpath(__file__)
15 | dn = os.path.dirname(currentPosition)
16 |
17 | # initiate gecko webdriver
18 | with open(dn + "/initiate_webdriver", "rb") as sourceFile:
19 | code = sourceFile.read()
20 | exec(code)
21 |
22 | allMails = []
23 |
24 | domain = urllib.parse.quote(sys.argv[1])
25 | try:
26 | driver.get("https://www.skymem.info/srch?q=" + domain)
27 |
28 | allSkymemMails = []
29 | secondPage = ""
30 |
31 | elems = driver.find_elements(By.XPATH, "//a[@href]")
32 |
33 | for elem in elems:
34 | href = elem.get_attribute("href")
35 |
36 | if (domain in href):
37 | allSkymemMails.append(href)
38 |
39 | if ("/domain/" in href):
40 | secondPage = href
41 |
42 | if (secondPage == ""):
43 | print("No results found for: " + sys.argv[1])
44 | exit(0)
45 |
46 | driver.get(secondPage)
47 | elements = driver.find_elements(By.XPATH, "//a[@href]")
48 |
49 | for e in elements:
50 | ref = e.get_attribute("href")
51 |
52 | if (domain in ref):
53 | allSkymemMails.append(ref)
54 |
55 | if ("/domain/" in ref):
56 | secondPage = ref
57 |
58 | driver.close()
59 |
60 | except Exception as e:
61 | print(e)
62 | driver.close()
63 |
64 | # sort results
65 | for skymemLink in allSkymemMails:
66 | if ("@" + domain in skymemLink):
67 | splittedSkymen = skymemLink.split("?q=")
68 | splitSkymem = splittedSkymen[1].split("'")
69 | allMails.append(splittedSkymen[1])
70 |
71 | allMailsSorted = sorted(set(allMails))
72 |
73 | # print results
74 | for m in allMailsSorted:
75 | print(m)
76 |
77 |
78 |
--------------------------------------------------------------------------------
/ressources/modules/spyonweb_get_rootdomains.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter spyonweb.com ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve rootdomains, google IDs and nameserver ips from spyonweb.com"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 | result=$(curl -s "https://api.spyonweb.com/v1/domain/${domain}?access_token=${apiKey}")
18 |
19 | # write json output to file
20 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
21 | reverseGoogleResults="${outPath}/spyonweb-reverse-googleid-${saveFile}.csv"
22 | reverseIpResults="${outPath}/spyonweb-reverse-ip-${saveFile}.csv"
23 | reverseNsResults="${outPath}/spyonweb-reverse-nameserver-${saveFile}.csv"
24 | echo "${result}" | jq -c > ${outPath}/spyonweb-rootdomains-${saveFile}.json
25 |
26 | # quick fix (jq -r 2> /dev/null)
27 | echo "Google Analytics ; Domain ; Timestamp" > ${reverseGoogleResults}
28 | echo "${result}" | (jq -r 'try .result .analytics | to_entries[] | .key as $ua | .value .items | to_entries[] | "\($ua) ; \(.key) ; \(.value)"' 2> /dev/null) >> ${reverseGoogleResults}
29 |
30 | echo "IP ; Domain ; Timestamp " > ${reverseIpResults}
31 | echo "${result}" | (jq -r 'try .result .ip | to_entries[] | .key as $ip | .value .items | to_entries[] | "\($ip) ; \(.key) ; \(.value)"' 2> /dev/null) >> ${reverseIpResults}
32 |
33 | echo "Nameserver ; Domain ; Timestamp" > ${reverseNsResults}
34 | echo "${result}" | (jq -r '.result .dns_domain | to_entries[] | .key as $ns | .value .items | to_entries[] | "\($ns) ; \(.key) ; \(.value)"' 2> /dev/null) >> ${reverseNsResults}
35 |
36 | # print rootdomains
37 | printUa=$(cat ${reverseGoogleResults} | grep -v "Google Analytics ; Domain ; Timestamp" | cut -d ";" -f 2)
38 | printIp=$(cat ${reverseIpResults} | grep -v "IP ; Domain ; Timestamp" | cut -d ";" -f 2)
39 | printNs=$(cat ${reverseNsResults} | grep -v "Nameserver ; Domain ; Timestamp" | cut -d ";" -f 2)
40 | echo -e "${printUa}\n${printIp}\n${printNs}" | sed 's/ //g' | awk -F '.' 'NF >= 2 {print $(NF-1) "." $NF}' | sort -u
41 |
42 |
--------------------------------------------------------------------------------
/ressources/modules/startpage_get_pdf_metadata.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from selenium.webdriver.common.action_chains import ActionChains
4 | from selenium.webdriver.common.by import By
5 | from selenium.webdriver.common.keys import Keys
6 | from selenium.webdriver.support import expected_conditions as EC
7 | from selenium.webdriver.support.ui import WebDriverWait
8 | import datetime
9 | import sys
10 | import time
11 | import os
12 |
13 | # check amount of passed arguments
14 | if (len(sys.argv) != 2):
15 | print("usage: {} domain".format(sys.argv[0]))
16 | print("Run Google dork on startpage.com and browse 3 first pages, wget all pdf files and run exiftool")
17 | sys.exit(1)
18 |
19 | # get absolute path to current directory
20 | currentPosition = os.path.realpath(__file__)
21 | dn = os.path.dirname(currentPosition)
22 |
23 | # initiate gecko webdriver
24 | with open(dn + "/initiate_webdriver", "rb") as sourceFile:
25 | code = sourceFile.read()
26 | exec(code)
27 |
28 | searchKey = 'inurl:"' + sys.argv[1] + '" filetype:pdf'
29 |
30 | # search via startpage
31 | url = "https://startpage.com"
32 | driver.get(url)
33 | time.sleep(2)
34 |
35 | # send keystrokes of searchkey
36 | textField = WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.ID, "q")))
37 | textField.send_keys(searchKey)
38 | textField.send_keys(Keys.RETURN)
39 |
40 | urls = []
41 |
42 | # scroll to end
43 | time.sleep(5)
44 | actions = ActionChains(driver)
45 | actions.send_keys(Keys.END).perform()
46 |
47 | # check if the url contains .pdf
48 | pdfLinks = driver.find_elements(By.XPATH, "//a[@href]")
49 |
50 | for pL in pdfLinks:
51 | pdfUrl = pL.get_attribute("href")
52 |
53 | if (".pdf" in pdfUrl):
54 | urls.append(pdfUrl)
55 |
56 | # search 3 first pages
57 | for i in range(1,3):
58 | # click Next
59 | try:
60 | nextButton = driver.find_elements(By.XPATH, './/button[@class = "pagination__next-prev-button next"]')
61 | for button in nextButton:
62 | button.click()
63 | except Exception as e:
64 | print("Error getting Next Page Number -- " + str(e))
65 |
66 | # scroll to end
67 | time.sleep(5)
68 | actions = ActionChains(driver)
69 | actions.send_keys(Keys.END).perform()
70 |
71 | # check if url contains .pdf
72 | pdfLinks = driver.find_elements(By.XPATH, "//a[@href]")
73 |
74 | for pL in pdfLinks:
75 | urlToPdf = pL.get_attribute("href")
76 | urlExtension = urlToPdf[-4:]
77 |
78 | if (".pdf" in urlExtension):
79 | urls.append(urlToPdf)
80 |
81 | driver.close()
82 |
83 | timestamp = datetime.datetime.today().strftime("%d-%m-%Y_%H:%M:%S")
84 | tempDir = "/tmp/get-pdf-meta-" + timestamp
85 | os.makedirs(tempDir)
86 |
87 | counter = 0
88 |
89 | # download pdf files
90 | uniqUrls = sorted(set(urls))
91 |
92 | counter = 0
93 | print("Results:")
94 |
95 | for u in uniqUrls:
96 | splittedUrl = u.split("/")
97 | os.system("wget --quiet \"" + u + "\" -O \"" + tempDir + "/" + str(counter) + ".pdf\"")
98 | counter = counter + 1
99 |
100 | os.system("cd " + tempDir + "; exiftool * | grep -i \"Producer\|Author\|Creator\|Email\" | sort -u")
101 |
102 | # remove directory
103 | os.system("rm -rf " + tempDir)
104 |
105 | print("\nDocuments:")
106 | for x in uniqUrls:
107 | print(x)
108 |
109 |
--------------------------------------------------------------------------------
/ressources/modules/tmdb_get_company_names.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from selenium.webdriver.common.by import By
4 | import sys
5 | import os
6 |
7 | # check amount of passed arguments
8 | if (len(sys.argv) != 2):
9 | print("usage: {} 'companyName'".format(sys.argv[0]))
10 | print("Visit tmdb.eu and extract brand names")
11 | sys.exit(1)
12 |
13 | # get absolute path to current directory
14 | currentPosition = os.path.realpath(__file__)
15 | dn = os.path.dirname(currentPosition)
16 |
17 | # initiate gecko webdriver
18 | with open(dn + "/initiate_webdriver", "rb") as sourceFile:
19 | code = sourceFile.read()
20 | exec(code)
21 |
22 | companyName = sys.argv[1]
23 | baseUrl = "https://tmdb.eu/suche/marken.html?s=" + companyName + "&in=trademark&db%5B%5D=dpma&db%5B%5D=euipo&db%5B%5D=wipo&db%5B%5D=swiss&db%5B%5D=uspto&match=is&classes=&page="
24 | print("ID ; Brand ; Class ; Owner ; Filling ; Registration ; End of Protection ; Status")
25 |
26 | allBrands = []
27 |
28 | for counter in range(1, 9):
29 | currentUrl = baseUrl + str(counter)
30 | driver.get(currentUrl)
31 |
32 | # get table cells
33 | tableRows = driver.find_elements(By.XPATH, '//div[@class="tm-results-entry"]')
34 |
35 | for row in tableRows:
36 | line = row.text.replace(";","")
37 | line = line.replace("\n"," ; ")
38 |
39 | if (line not in allBrands):
40 | print(line)
41 | allBrands.append(line)
42 |
43 | driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
44 |
45 | currentPage = driver.current_url
46 | pageNumber = currentPage[-1:]
47 |
48 | # exit after last page
49 | if (int(pageNumber) != counter):
50 | driver.close()
51 | exit(0)
52 |
53 |
--------------------------------------------------------------------------------
/ressources/modules/tomba_get_emails.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 4 ]
4 | then
5 | echo "usage: ${0} domain outputDirectory apiKeyTa apiKeyTs"
6 | echo "Run curl command on tomba.io and retrieve social media links and email addresses"
7 | exit 1
8 | fi
9 |
10 | domain=${1}
11 | outPath=${2}
12 | apiKeyTa=${3}
13 | apiKeyTs=${4}
14 | result=$(curl -s --request GET --url "https://api.tomba.io/v1/domain-search?domain=${domain}" --header "X-Tomba-Key: ${apiKeyTa}" --header "X-Tomba-Secret: ${apiKeyTs}")
15 |
16 | # write json output to file
17 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
18 | seFile="${outPath}/tomba-social-media-${saveFile}.txt"
19 | echo "${result}" | jq -c > ${outPath}/tomba-${saveFile}.json
20 |
21 | echo "E-Mails ; Phone" > ${seFile}
22 | echo "${result}" | jq -r '.data .emails[] | "\(.email) ; \(.phone_number)"' >> ${seFile}
23 | echo "" >> ${seFile}
24 | echo "Social Media:" >> ${seFile}
25 | echo "${result}" | jq -r '.data.organization | select(.social_links != null) | .social_links | to_entries[] | select(.value | type == "string" and startswith("http")) | .value' >> ${seFile}
26 |
27 | # print results
28 | echo "${result}" | jq -r '.data .emails[] | .email'
29 |
30 |
--------------------------------------------------------------------------------
/ressources/modules/urlscan_get_rootdomains_from_company.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter urlscan.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} 'companyName' outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve rootdomains from urlscan.io"
12 | exit 1
13 | fi
14 |
15 | companyNameOld=${1}
16 | # add * to company name for wildcard search
17 | companyName=$(echo "*${1}*" | sed 's/ /*/g')
18 | firstCompanyName=$(echo "${companyNameOld}" | cut -d " " -f 1)
19 | outPath=${2}
20 |
21 | # search using domain: filter
22 | resultDomain=$(curl -s "https://urlscan.io/api/v1/search/?q=domain:${companyName}*" -H "API-Key: ${apiKey}")
23 | grepResultDomains=$(echo "${resultDomain}" | jq -r ".results[] | .task .domain, .page .domain" | grep -v -E '([0-9]*\.){3}[0-9]*' | awk -F '.' '{print $(NF-1)"."$NF}')
24 |
25 | # search using filename: filter
26 | fileName="${firstCompanyName}.*"
27 | resultFile=$(curl -s "https://urlscan.io/api/v1/search/?q=filename:${fileName}" -H "API-Key: ${apiKey}")
28 | grepResultFile=$(echo "${resultFile}" | jq -r ".results[] | .task .domain, .page .domain" | grep -v -E '([0-9]*\.){3}[0-9]*' | awk -F '.' '{print $(NF-1)"."$NF}')
29 |
30 | # write json output to file
31 | saveFile="$(echo "${companyNameOld}" | sed 's/[^[:alnum:]]/_/g')"
32 | echo "${resultDomain}" | jq -c > ${outPath}/urlscan-rootdomains-domain-${saveFile}.json
33 | echo "${resultFile}" | jq -c > ${outPath}/urlscan-rootdomains-filename-${saveFile}.json
34 |
35 | # print results
36 | echo -e "${grepResultDomains}\n${grepResultDomains}" | sort -u
37 |
38 |
--------------------------------------------------------------------------------
/ressources/modules/urlscan_get_rootdomains_from_domain.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter urlscan.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve rootdomain from urlscan.io"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 |
18 | # search using domain: filter
19 | result=$(curl -s "https://urlscan.io/api/v1/search/?q=domain:${domain}" -H "API-Key: ${apiKey}")
20 | printResult=$(echo "${result}" | jq -r ".results[] | .task .domain, .page .domain" | grep -v -E '([0-9]*\.){3}[0-9]*' | awk -F '.' '{print $(NF-1)"."$NF}' | sort -u)
21 |
22 | # write json output to file
23 | saveFile="$(echo "${domain}" | sed 's/[^[:alnum:]]/_/g')"
24 | echo "${result}" | jq -c > ${outPath}/urlscan-subdomains-${saveFile}.json
25 |
26 | # print results
27 | echo "${printResult}"
28 |
29 |
--------------------------------------------------------------------------------
/ressources/modules/urlscan_get_subdomains.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter urlscan.io ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve subdomains from urlscan.io"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 |
18 | # search using domain: filter
19 | result=$(curl -s "https://urlscan.io/api/v1/search/?q=domain:*.${domain}" -H "API-Key: ${apiKey}")
20 | printResults=$(echo "${result}" | jq -r '.results[] | .task .domain, .page .domain')
21 |
22 | # write json output to file
23 | saveFile="$(echo "${domain}" | sed 's/[^[:alnum:]]/_/g')"
24 | echo "${result}" | jq -c > ${outPath}/urlscan-subdomains-${saveFile}.json
25 |
26 | # print results
27 | echo "${printResults}" | grep "${domain}" | sort -u
28 |
29 |
--------------------------------------------------------------------------------
/ressources/modules/validin_get_rootdomains_from_cidr.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter validin.com ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} ipRangeCidr outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve rootdomains for given IP range in CIDR notation from validin.com"
12 | exit 1
13 | fi
14 |
15 | cidr=${1}
16 | outPath=${2}
17 | result=$(curl -s -H "Authorization: BEARER ${apiKey}" "https://app.validin.com/api/axon/ip/dns/history/${cidr}")
18 |
19 | # write json output to file
20 | saveFile="$(echo ${cidr} | sed 's/[^[:alnum:]]/_/g')"
21 | echo "${result}" | jq -c > ${outPath}/validin-rootdomains-cidr-${saveFile}.json
22 | timestampFile="${outPath}/validin-rootdomains-timestamps-${saveFile}.csv"
23 | echo "Domain history of ${cidr}:" > ${timestampFile}
24 | echo "${result}" | jq -r '.records .A[] | "\(.value) ; \(.first_seen | strftime("%Y-%m-%d")) ; \(.last_seen | strftime("%Y-%m-%d"))"' | sort -t ';' -k 3 -r >> ${timestampFile}
25 |
26 | echo "${result}" | jq -r '.records .A[] .value' | awk -F '.' '{print $(NF-1)"."$NF}' | sort -u
27 |
28 |
--------------------------------------------------------------------------------
/ressources/modules/validin_get_rootdomains_from_domain.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter validin.com ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command and retrieve rootdomains, nameserver and reverse ip addresses from validin.com"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 |
18 | result=$(curl -s -H "Authorization: BEARER ${apiKey}" "https://app.validin.com/api/axon/domain/dns/history/${domain}")
19 | ips=$(echo "${result}" | jq -r '.records .A[] | "\(.value) ; \(.first_seen | strftime("%Y-%m-%d")) ; \(.last_seen | strftime("%Y-%m-%d"))"' | sort -t ';' -k3 -r)
20 |
21 | firstIp=$(echo "${ips}" | head -n 1 | awk -F " ; " '{print $1}')
22 | historyIp=$(curl -s -H "Authorization: BEARER ${apiKey}" "https://app.validin.com/api/axon/ip/dns/history/$firstIp")
23 |
24 | # write json output to file
25 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
26 | reverseIpResult="${outPath}/validin-dns-history-timestamps-${saveFile}.csv"
27 |
28 | echo "${result}" | jq -c > ${outPath}/validin-rootdomains-dns-history-${saveFile}.json
29 | echo "${historyIp}" | jq -c > ${outPath}/validin-rootdomains-ip-history-${saveFile}.json
30 |
31 | echo "Reverse IPs for ${1}:" > ${reverseIpResult}
32 | echo "${ips}" >> ${reverseIpResult}
33 | echo "" >> ${reverseIpResult}
34 |
35 | echo "Domain history of ${firstIp}:" >> ${reverseIpResult}
36 | echo "${historyIp}" | jq -r '.records .A[] | "\(.value) ; \(.first_seen | strftime("%Y-%m-%d")) ; \(.last_seen | strftime("%Y-%m-%d"))"' | sort >> ${reverseIpResult}
37 |
38 | # print results
39 | echo "${historyIp}" | jq -r '.records .A[] .value' | awk -F '.' '{print $(NF-1)"."$NF}' | sort -u
40 |
41 |
--------------------------------------------------------------------------------
/ressources/modules/whoxy_get_rootdomains.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: ${0} 'companyName'"
6 | echo "Run curl command and retrieve rootdomains from whoxy.com"
7 | exit 1
8 | fi
9 |
10 | # whoxy does not return any results if white space get encoded by +
11 | company=$(echo "${1}" | cut -d " " -f 1)
12 | curl -s "https://www.whoxy.com/keyword/${company}" | grep -o " ${outPath}/zoomeye-ports-${saveFile}.json
22 |
23 | echo "Open Ports:"
24 | echo "${result}" | jq -r '.matches[] | "\(.ip) ; \(.portinfo .port) ; \(.portinfo .service) ; \(.rdns) ; \(.portinfo | .title)"' | sort -n
25 |
26 |
--------------------------------------------------------------------------------
/ressources/modules/zoomeye_get_rootdomains_from_domain.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -eq 2 ]
4 | then
5 | read -p "Enter zoomeye.hk ApiKey: " apiKey
6 | elif [ ${#} -eq 3 ]
7 | then
8 | apiKey=${3}
9 | else
10 | echo "usage: ${0} domain outputDirectory [apiKey]"
11 | echo "Run curl command on zoomeye.hk and retrieve rootdomains"
12 | exit 1
13 | fi
14 |
15 | domain=${1}
16 | outPath=${2}
17 | result=$(curl -s -X GET "https://api.zoomeye.hk/domain/search?q=${domain}" -H "API-KEY:${apiKey}")
18 |
19 | # write json output to file
20 | saveFile="$(echo ${domain} | sed 's/[^[:alnum:]]/_/g')"
21 | echo "${result}" | jq -c > ${outPath}/zoomeye-rootdomains-${saveFile}.json
22 |
23 | echo "${result}" | jq -r ".list[] .name" | awk -F '.' '{print $(NF-1)"."$NF}' | tr '[:upper:]' '[:lower:]' | sort -u
24 |
25 |
--------------------------------------------------------------------------------
/ressources/scripts/print-api-keys.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | exeDir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
4 | apiKeys=$(cat "${exeDir}/../../build/config.json")
5 |
6 | bevigil=$(echo "${apiKeys}" | jq -r '.bevigil_com')
7 | if [[ ! -z ${bevigil} ]]
8 | then
9 | echo "# bevigil.com"
10 | echo "50/month"
11 | echo ""
12 | fi
13 |
14 | binaryedge=$(echo "${apiKeys}" | jq -r '.binaryedge_io')
15 | if [[ ! -z ${binaryedge} ]]
16 | then
17 | echo "# binaryedge.io"
18 | binaryedgeJson=$(curl -s "https://api.binaryedge.io/v2/user/subscription" -H "X-Key: ${binaryedge}")
19 | binaryedgeRequestsPlan=$(echo ${binaryedgeJson} | jq -r '.requests_plan')
20 | binaryedgeRequestsLeft=$(echo ${binaryedgeJson} | jq -r '.requests_left')
21 | binaryedgeRequestsLeft2=$(echo "${binaryedgeRequestsPlan} - ${binaryedgeRequestsLeft}" | bc)
22 | echo "${binaryedgeRequestsLeft2}/${binaryedgeRequestsPlan} -> ${binaryedgeRequestsLeft}"
23 | echo ""
24 | fi
25 |
26 | bufferover=$(echo "${apiKeys}" | jq -r '.bufferover_run')
27 | if [[ ! -z ${bufferover} ]]
28 | then
29 | echo "# bufferover.run"
30 | echo "100/month"
31 | echo ""
32 | fi
33 |
34 | fullhunt=$(echo "${apiKeys}" | jq -r '.fullhunt_io')
35 | if [[ ! -z ${fullhunt} ]]
36 | then
37 | echo "# fullhunt.io"
38 | echo "100/month"
39 | echo ""
40 | fi
41 |
42 | github=$(echo "${apiKeys}" | jq -r '.github_com')
43 | if [[ ! -z ${github} ]]
44 | then
45 | echo "# github.com"
46 | curl -s "https://api.github.com/rate_limit" -H "Authorization: Bearer ${github}" | jq -r '.rate | "\(.used)/\(.limit) -> \(.reset | strftime("%Y-%m-%d %H:%M:%S"))"'
47 | echo ""
48 | fi
49 |
50 | hunter=$(echo "${apiKeys}" | jq -r '.hunter_io')
51 | if [[ ! -z ${hunter} ]]
52 | then
53 | echo "# hunter.io"
54 | curl -s "https://api.hunter.io/v2/account?api_key=${hunter}" | jq -r '.data .requests .searches as $s | "\($s .used)/\($s .available) -> \(.data .reset_date)"'
55 | echo ""
56 | fi
57 |
58 | intelx=$(echo "${apiKeys}" | jq -r '.intelx_io')
59 | if [[ ! -z ${intelx} ]]
60 | then
61 | echo "# intelx.io"
62 | intelxJson=$(curl -s -H "x-key: ${intelx}" "https://2.intelx.io/authenticate/info")
63 | intelxCreditMax=$(echo ${intelxJson} | jq -r '.paths ."/phonebook/search" | .CreditMax')
64 | intelxCredit=$(echo ${intelxJson} | jq -r '.paths ."/phonebook/search" | .Credit')
65 | intelxCredit2=$(echo "${intelxCreditMax} - ${intelxCredit}" | bc)
66 | echo "${intelxCredit2}/${intelxCreditMax} -> 10/day"
67 | echo ""
68 | fi
69 |
70 | if [[ ! -z $(echo "${apiKeys}" | jq -r '.leakix_net') ]]
71 | then
72 | echo "# leakix.net"
73 | echo "3000/month"
74 | echo ""
75 | fi
76 |
77 | netlas=$(echo "${apiKeys}" | jq -r '.netlas_io')
78 | if [[ ! -z ${netlas} ]]
79 | then
80 | echo "# netlas.io"
81 | netlasJson=$(curl -s -H "X-API-Key: ${netlas}" "https://app.netlas.io/api/users/current/")
82 | netlasCoins=$(echo ${netlasJson} | jq -r '.plan .coins')
83 | netlasCoins2=$(echo "(2500 - ${netlasCoins}) / 20" | bc)
84 | netlasUpdate=$(echo ${netlasJson} | jq -r '.api_key .next_time_coins_will_be_updated')
85 | echo "${netlasCoins2}/125 -> 50/day 125/month "
86 | echo ""
87 | fi
88 |
89 | networksdb=$(echo "${apiKeys}" | jq -r '.networksdb_io')
90 | if [[ ! -z ${networksdb} ]]
91 | then
92 | echo "# networksdb.io"
93 | curl -sH "X-Api-Key: ${networksdb}" "https://networksdb.io/api/key" | jq -r '"\(.req_count)/\(.req_limit) -> \(.resets_at)"'
94 | echo ""
95 | fi
96 |
97 | securitytrails=$(echo "${apiKeys}" | jq -r '.securitytrails_com')
98 | if [[ ! -z ${securitytrails} ]]
99 | then
100 | echo "# securitytrails.com"
101 | curl -s --request GET --url "https://api.securitytrails.com/v1/account/usage" -H "APIKEY: ${securitytrails}" | jq -r '"\(.current_monthly_usage)/\(.allowed_monthly_usage) -> \(.allowed_monthly_usage)/month"'
102 | echo ""
103 | fi
104 |
105 | shodan=$(echo "${apiKeys}" | jq -r '.shodan_io')
106 | if [[ ! -z ${shodan} ]]
107 | then
108 | echo "# shodan.io"
109 | shodanJson=$(curl -s "https://api.shodan.io/api-info?key=${shodan}")
110 | shodanCredits=$(echo ${shodanJson} | jq -r '.usage_limits .query_credits')
111 | shodanCreditsLeft=$(echo ${shodanJson} | jq -r '.query_credits')
112 | shodanCredits2=$(echo "${shodanCredits} - ${shodanCreditsLeft}" | bc)
113 | echo "${shodanCredits2}/${shodanCredits} -> ${shodanCredits}/month "
114 | echo ""
115 | fi
116 |
117 | if [[ ! -z $(echo "${apiKeys}" | jq -r '.spyonweb_com') ]]
118 | then
119 | echo "# spyonweb.com"
120 | echo "200/month"
121 | echo ""
122 | fi
123 |
124 | tombats=$(echo "${apiKeys}" | jq -r '.["tomba_io_ts"]')
125 | tombata=$(echo "${apiKeys}" | jq -r '.["tomba_io_ta"]')
126 | if [[ ! -z ${tombats} ]]
127 | then
128 | echo "# tomba.io"
129 | tombaUsage=$(curl -s --request GET --url "https://api.tomba.io/v1/usage" -H "X-Tomba-Key: ${tombata}" -H "X-Tomba-Secret: ${tombats}" | jq -r '.total .search')
130 | echo "${tombaUsage}/50 -> 50/month"
131 | echo ""
132 | fi
133 |
134 | urlscan=$(echo "${apiKeys}" | jq -r '.urlscan_io')
135 | if [[ ! -z ${urlscan} ]]
136 | then
137 | echo "# urlscan.io"
138 | urlscanJson=$(curl -s "https://urlscan.io/user/quotas" -H "API-Key: ${urlscan}")
139 | urlscanLimit=$(echo ${urlscanJson} | jq -r '.limits .search .day .limit')
140 | urlscanUsage=$(echo ${urlscanJson} | jq -r '.limits .search .day .used')
141 | echo "${urlscanUsage}/${urlscanLimit} -> ${urlscanLimit}/day"
142 | echo ""
143 | fi
144 |
145 | validin=$(echo "${apiKeys}" | jq -r '.validin_com')
146 | if [[ ! -z ${validin} ]]
147 | then
148 | echo "# validin.com"
149 | echo "50/day 250/month"
150 | echo ""
151 | fi
152 |
153 | virustotal=$(echo "${apiKeys}" | jq -r '.virustotal_com')
154 | if [[ ! -z ${virustotal} ]]
155 | then
156 | echo "# virustotal.com"
157 | echo "500/day"
158 | echo ""
159 | fi
160 |
161 | zoomeye=$(echo "${apiKeys}" | jq -r '.zoomeye_hk')
162 | if [[ ! -z ${zoomeye} ]]
163 | then
164 | echo "# zoomeye.hk"
165 | curl -s -X GET "https://api.zoomeye.hk/resources-info" -H "API-KEY:${zoomeye}" | jq -r '"Free: \(.quota_info .remain_free_quota) (renewed monthly) \nPay: \(.quota_info .remain_pay_quota) (per account registration)"'
166 | echo ""
167 | fi
168 |
169 |
--------------------------------------------------------------------------------
/ressources/scripts/print-overview.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 1 ]
4 | then
5 | echo "usage: ${0} pathToCorptraceOutDir"
6 | exit 1
7 | fi
8 |
9 | resultDir=${1}
10 |
11 | if [ ! -d ${resultDir} ]
12 | then
13 | # nothing to print
14 | exit
15 | fi
16 |
17 | echo ""
18 |
19 | function collectResults()
20 | {
21 | # path to module directory
22 | pathToDir=${1}
23 |
24 | # name of the result file
25 | nameScheme1=${2}
26 |
27 | # file extension
28 | nameScheme2=${3}
29 |
30 | # type of result (rootdomain, subdomain, email)
31 | resultType=${4}
32 | allFiles=""
33 |
34 | if [ -d "${pathToDir}" ]
35 | then
36 | allFiles=$(ls ${pathToDir})
37 | fi
38 |
39 | for f in ${allFiles}
40 | do
41 | domainName=""
42 |
43 | if [[ "${f}" == *"${nameScheme1}"* && "${f}" == *"${nameScheme2}"* ]]
44 | then
45 | if [ "${nameScheme2}" == "" ]
46 | then
47 | domainName=$(echo ${f} | awk -F "${2}" '{print $2}')
48 | else
49 | domainName=$(echo ${f} | awk -F "${2}" '{print $2}' | awk -F "${3}" '{print $1}')
50 | fi
51 |
52 | # remove lines that should not be included in results
53 | if [[ "${f}" == *"letItGo-"* ]]
54 | then
55 | cat "$pathToDir/${nameScheme1}${domainName}${nameScheme2}" | sed -n '/------/,/Stats:/{//!p}' | grep -v "These domains \|DOMAIN \|---" | awk -F ' ' '{print $1}' | grep -v 'onmicrosoft.com' >> "${resultDir}/${resultType}_${domainName}"
56 |
57 | elif [[ "${f}" == *"subfinder"* ]]
58 | then
59 | cat "$pathToDir/${nameScheme1}${domainName}${nameScheme2}" | grep -v "Current subfinder version" >> "${resultDir}/${resultType}_${domainName}"
60 |
61 | elif [[ "${f}" == *"dnstwist-"* ]]
62 | then
63 | cat "$pathToDir/${nameScheme1}${domainName}${nameScheme2}" | awk -F ' ' '{print $2}' >> "${resultDir}/${resultType}_${domainName}"
64 |
65 | elif [[ "${f}" == *"nmap_reverse_lookup-"* ]]
66 | then
67 | cat "$pathToDir/${nameScheme1}${domainName}${nameScheme2}" | awk -F ' ' '{print $1}' >> "${resultDir}/${resultType}_${domainName}"
68 |
69 | elif [[ "${f}" == *"massdns-"* ]]
70 | then
71 | cat "$pathToDir/${nameScheme1}${domainName}${nameScheme2}" | awk -F '. ' '{print $1}' >> "${resultDir}/${resultType}_${domainName}"
72 |
73 | else
74 | cat "$pathToDir/${nameScheme1}${domainName}${nameScheme2}" >> "${resultDir}/${resultType}_${domainName}"
75 | fi
76 |
77 | # remove jq errors and no results found strings from results
78 | sort -u "${resultDir}/${resultType}_${domainName}" | grep -v "jq: error \|parse error: \|No results found for: \|Domain not found" > "${resultDir}/${resultType}_${domainName}-temp"
79 | mv "${resultDir}/${resultType}_${domainName}-temp" "${resultDir}/${resultType}_${domainName}"
80 | fi
81 | done
82 | }
83 |
84 | # create rootdomain files
85 | collectResults "${resultDir}/letItGo" "letItGo-" "" "rootdomains"
86 | collectResults "${resultDir}/dns_get_top_level_domains" "dns_get_top_level_domains-" "" "rootdomains"
87 | collectResults "${resultDir}/dnstwist" "dnstwist-" "" "rootdomains"
88 | collectResults "${resultDir}/robtex_get_rootdomains" "robtex_get_rootdomains-" ".txt" "rootdomains"
89 | collectResults "${resultDir}/shodan_get_rootdomains_from_domain" "shodan_get_rootdomains_from_domain-" ".txt" "rootdomains"
90 | collectResults "${resultDir}/spyonweb_get_rootdomains" "spyonweb_get_rootdomains-" ".txt" "rootdomains"
91 | collectResults "${resultDir}/urlscan_get_rootdomains_from_domain" "urlscan_get_rootdomains_from_domain-" ".txt" "rootdomains"
92 | collectResults "${resultDir}/validin_get_rootdomains_from_domain" "validin_get_rootdomains_from_domain-" ".txt" "rootdomains"
93 | collectResults "${resultDir}/zoomeye_get_rootdomains_from_domain" "zoomeye_get_rootdomains_from_domain-" ".txt" "rootdomains"
94 |
95 | collectResults "${resultDir}/crtsh_get_rootdomains" "crtsh_get_rootdomains-" "" "rootdomains-company"
96 | collectResults "${resultDir}/shodan_get_rootdomains_from_company" "shodan_get_rootdomains_from_company-" ".txt" "rootdomains-company"
97 | collectResults "${resultDir}/urlscan_get_rootdomains_from_company" "urlscan_get_rootdomains_from_company-" ".txt" "rootdomains-company"
98 | collectResults "${resultDir}/whoxy_get_rootdomains" "whoxy_get_rootdomains-" "" "rootdomains-company"
99 |
100 | collectResults "${resultDir}/dnslytics_get_rootdomains" "dnslytics_get_rootdomains-" "" "rootdomains-ua"
101 | collectResults "${resultDir}/hackertarget_get_rootdomains_from_gid" "hackertarget_get_rootdomains_from_gid-" "" "rootdomains-ua"
102 |
103 | # create subdomain files
104 | collectResults "${resultDir}/hackertarget_get_rootdomains_from_cidr" "hackertarget_get_rootdomains_from_cidr-" "" "domains-cidr"
105 | collectResults "${resultDir}/nmap_get_tls_alternative_names" "nmap_get_tls_alternative_names-" "" "domains-cidr"
106 | collectResults "${resultDir}/nmap_reverse_lookup" "nmap_reverse_lookup-" "" "domains-cidr"
107 | collectResults "${resultDir}/networksdb_get_rootdomains_from_cidr" "networksdb_get_rootdomains_from_cidr-" ".txt" "domains-cidr"
108 | collectResults "${resultDir}/validin_get_rootdomains_from_cidr" "validin_get_rootdomains_from_cidr-" ".txt" "domains-cidr"
109 |
110 | collectResults "${resultDir}/massdns" "massdns-" "" "subdomains"
111 | collectResults "${resultDir}/myssl_get_subdomains" "myssl_get_subdomains-" "" "subdomains"
112 | collectResults "${resultDir}/subdomaincenter_get_subdomains" "subdomaincenter_get_subdomains-" "" "subdomains"
113 | collectResults "${resultDir}/subfinder" "subfinder-" "" "subdomains"
114 | collectResults "${resultDir}/urlscan_get_subdomains" "urlscan_get_subdomains-" ".txt" "subdomains"
115 |
116 | # create email files
117 | collectResults "${resultDir}/gpg_get_emails" "gpg_get_emails-" "" "emails"
118 | collectResults "${resultDir}/hunter_get_emails" "hunter_get_emails-" ".txt" "emails"
119 | collectResults "${resultDir}/phonebook_get_mails" "phonebook_get_mails-" ".txt" "emails"
120 | collectResults "${resultDir}/skymem_get_mails" "skymem_get_mails-" "" "emails"
121 | collectResults "${resultDir}/tomba_get_emails" "tomba_get_emails-" ".txt" "emails"
122 |
123 | # create ip range files
124 | collectResults "${resultDir}/networksdb_get_ipranges" "networksdb_get_ipranges-" ".txt" "iprange"
125 | collectResults "${resultDir}/spk" "spk-" "" "iprange"
126 |
127 | # print overview
128 | shopt -s nullglob
129 |
130 | # collect all rootdomains that contains subdomains
131 | if [ "$(echo "${resultDir}"/subdomains_*)" != "" ]
132 | then
133 | allSubdomains=$(ls "${resultDir}"/subdomains_* | awk -F 'subdomains_' '{print $2}' 2> /dev/null)
134 | fi
135 |
136 | # collect all rootdomains that contains email addresses
137 | if [ "$(echo "${resultDir}"/emails_*)" != "" ]
138 | then
139 | allEmails=$(ls "${resultDir}"/emails_* | awk -F 'emails_' '{print $2}' 2> /dev/null)
140 | fi
141 |
142 | allSpoofys=""
143 |
144 | # collect all rootdomains where spf has been scanned
145 | if [ -d "${resultDir}/spoofy" ]
146 | then
147 | allSpoofys=$(ls "${resultDir}/spoofy" | awk -F 'spoofy-' '{print $2}')
148 | fi
149 |
150 | # merge all rootdomains for table overview
151 | allResults="${allSubdomains} ${allEmails} ${allSpoofys}"
152 | printDomains=$(echo "${allResults}" | tr ' ' '\n' | sort -u)
153 | printf "%-40s %-30s %-30s %-30s\n" "Domain" "Subdomains" "Emails" "SPF"
154 |
155 | for domain in ${printDomains}
156 | do
157 | countSubdomains="?"
158 | countEmails="?"
159 | spoofable="?"
160 |
161 | if [ -f ${resultDir}/subdomains_${domain} ]
162 | then
163 | countSubdomains=$(cat "${resultDir}/subdomains_${domain}" | wc -l)
164 | fi
165 |
166 | if [ -f ${resultDir}/emails_${domain} ]
167 | then
168 | countEmails=$(cat "${resultDir}/emails_${domain}" | wc -l)
169 | fi
170 |
171 | if [ -f ${resultDir}/spoofy/spoofy-${domain} ]
172 | then
173 | spoofable="Configured"
174 | spfRecord=$(cat "${resultDir}/spoofy/spoofy-${domain}" | grep "No SPF record found.")
175 |
176 | if [ $? -eq 0 ]
177 | then
178 | spoofable="Pwn"
179 | fi
180 | fi
181 |
182 | printf "%-40s %-30s %-30s %-30s\n" "${domain}" "${countSubdomains}" "${countEmails}" "${spoofable}"
183 | done
184 |
185 |
--------------------------------------------------------------------------------
/ressources/scripts/visualize.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ${#} -ne 2 ]
4 | then
5 | echo "usage: pathToCorpTraceOutDir [light/dark]"
6 | exit 1
7 | fi
8 |
9 | # construct json object from csv line
10 | function getCoherentDomains()
11 | {
12 | rootdom=${1}
13 | number=${2}
14 | string=${3}
15 | key=${4}
16 | endOfObj=${5}
17 |
18 | attribute=$(echo "${string}" | cut -d ";" -f "${number}")
19 |
20 | # skip empty attributes
21 | if ! [[ "${attribute}" =~ ^[[:space:]]*$ ]]
22 | then
23 | echo -n "\"${key}\":\"${attribute}\","
24 |
25 | echo -n "\"${key} relation\":["
26 | # sed used to remove last , from json
27 | grep "${attribute}" ${tempCsv} | grep -v "${rootdom} ;" | awk '{print "\""$1"\""}' | tr '\n' ',' | sed 's/.$//'
28 |
29 | if [[ -z "${endOfObj}" ]]
30 | then
31 | echo -n "],"
32 | else
33 | echo -n "]"
34 | fi
35 | else
36 | echo -n "\"${key}\":\"?\","
37 |
38 | if [[ -z "${endOfObj}" ]]
39 | then
40 | echo -n "\"${key} relation\":[],"
41 | else
42 | echo -n "\"${key} relation\":[]"
43 | fi
44 | fi
45 | }
46 |
47 | tempCsv="/tmp/$(date +"%Y-%m-%d_%T")-visualize-all-results.csv"
48 | resultFile="${1}/dnsx_get_coherent_domains/graph.html"
49 | theme=${2}
50 |
51 | exeDir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
52 | pathToHtmlTemplate1="${exeDir}/../templates/graph-template1-${theme}.html"
53 | pathToHtmlTemplate2="${exeDir}/../templates/graph-template2-${theme}.html"
54 |
55 | if [ ! -d ${1}/dnsx_get_coherent_domains ]
56 | then
57 | echo "${1}/dnsx_get_coherent_domains does not exists!"
58 | echo "Run: python3 corptrace.py -o ${1} -f ${1}/some-domains.txt -im dnsx -e"
59 | exit 1
60 | fi
61 |
62 | cat ${1}/dnsx_get_coherent_domains/*/* | grep -v "Hostname ; Whois Domain " | sort -u > ${tempCsv}
63 |
64 | if [ $? -ne 0 ]
65 | then
66 | echo "No file found: ${1}/dnsx_get_coherent_domains/*/*"
67 | echo "Check source code of ${exeDir} for more info.."
68 | exit 1
69 | fi
70 |
71 | amountOfLines=$(cat ${tempCsv} | wc -l)
72 | counter=1
73 |
74 | # write first part of html file
75 | cat ${pathToHtmlTemplate1} > ${resultFile}
76 |
77 | echo -n "const data = [" >> ${resultFile}
78 |
79 | # -r prevents backslashes from being interpreted, -n reads last line
80 | while read -r line || [[ -n "${line}" ]]
81 | do
82 | # skip empty lines
83 | if ! [ -z "${line}" ]
84 | then
85 |
86 | rootdomain=$(echo ${line} | cut -d ";" -f 1) >> ${resultFile}
87 |
88 | # get amount of subdomains
89 | if [ ! -f ${1}/subdomains_${rootdomain} ]
90 | then
91 | amountOfSubdomains="?"
92 | else
93 | amountOfSubdomains=$(cat ${1}/subdomains_${rootdomain} | wc -l)
94 | fi
95 |
96 | # get amount of emails
97 | if [ ! -f ${1}/emails_${rootdomain} ]
98 | then
99 | amountOfEmails="?"
100 | else
101 | amountOfEmails=$(cat ${1}/emails_${rootdomain} | wc -l)
102 | fi
103 |
104 | # get SPF status
105 | if [ -f ${1}/spoofy/spoofy-${rootdomain} ]
106 | then
107 | if grep -q "No SPF record found" ${1}/spoofy/spoofy-${rootdomain}
108 | then
109 | spfStatus="Pwn"
110 | else
111 | spfStatus="Configured"
112 | fi
113 | else
114 | spfStatus="?"
115 | fi
116 |
117 | echo -n "{" >> ${resultFile}
118 | echo -n "\"Rootdomain\":\"${rootdomain}\"," >> ${resultFile}
119 | echo -n "\"Subdomains\":\"${amountOfSubdomains}\"," >> ${resultFile}
120 | echo -n "\"Emails\":\"${amountOfEmails}\"," >> ${resultFile}
121 | echo -n "\"SPF\":\"${spfStatus}\"," >> ${resultFile}
122 |
123 | # remove "
124 | lineNoQuote=$(echo "${line}" | sed 's/"/_/g')
125 |
126 | # getCoherentDomains rootdomain columnNumber csvLine jsonKey flag
127 | getCoherentDomains ${rootdomain} 2 "${lineNoQuote}" "Whois Domain" >> ${resultFile}
128 | getCoherentDomains ${rootdomain} 3 "${lineNoQuote}" "Whois Ip" >> ${resultFile}
129 | getCoherentDomains ${rootdomain} 4 "${lineNoQuote}" "Mailserver" >> ${resultFile}
130 | getCoherentDomains ${rootdomain} 5 "${lineNoQuote}" "Nameserver" >> ${resultFile}
131 | getCoherentDomains ${rootdomain} 6 "${lineNoQuote}" "ASN" >> ${resultFile}
132 | getCoherentDomains ${rootdomain} 7 "${lineNoQuote}" "Effective Url" >> ${resultFile}
133 | getCoherentDomains ${rootdomain} 8 "${lineNoQuote}" "Copyright" >> ${resultFile}
134 | getCoherentDomains ${rootdomain} 9 "${lineNoQuote}" "Title" >> ${resultFile}
135 | getCoherentDomains ${rootdomain} 10 "${lineNoQuote}" "Google Adsense" >> ${resultFile}
136 | getCoherentDomains ${rootdomain} 11 "${lineNoQuote}" "Google Analytics" >> ${resultFile}
137 | getCoherentDomains ${rootdomain} 12 "${lineNoQuote}" "Social Media" >> ${resultFile}
138 | getCoherentDomains ${rootdomain} 13 "${lineNoQuote}" "Favicon" "end" >> ${resultFile}
139 |
140 | if [ "${amountOfLines}" == "${counter}" ]
141 | then
142 | echo -n "}" >> ${resultFile}
143 | else
144 | echo -n "}," >> ${resultFile}
145 | fi
146 |
147 | let counter=${counter}+1
148 |
149 | fi
150 | done < ${tempCsv}
151 |
152 | echo "];" >> ${resultFile}
153 |
154 | # write second part of html file
155 | cat ${pathToHtmlTemplate2} >> ${resultFile}
156 |
157 | # remove tempory file
158 | rm -r ${tempCsv}
159 |
160 |
--------------------------------------------------------------------------------
/ressources/templates/graph-template1-dark.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | CorpTrace Visualization
7 |
8 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 | Highlight Nodes:
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
8 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 | Highlight Nodes:
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
301 |
302 |
303 |
304 |
--------------------------------------------------------------------------------
/ressources/templates/graph-template2-light.html:
--------------------------------------------------------------------------------
1 |
2 | // count rootdomains and display inside toolbox
3 | const rootDomains = data.map(item => item.Rootdomain.trim());
4 | const amountOfRootdomains = rootDomains.length;
5 | d3.select("#infoboxContent").html(`Total Rootdomains: ${amountOfRootdomains}`).attr("class", "toolBoxText");
6 |
7 | // import JSON data into D3.js
8 | const nodes = data.map(item => ({
9 | id: item.Rootdomain.trim(),
10 | Subdomains: item.Subdomains,
11 | Emails: item.Emails
12 | }));
13 |
14 | // create edges
15 | const links = [];
16 |
17 | function createLinksFromRelations(domain, relationKey, links, type)
18 | {
19 | if (domain[relationKey] && Array.isArray(domain[relationKey]))
20 | {
21 | domain[relationKey].forEach(relatedDomain => {
22 | // check if there is already an existing link between 2 nodes
23 | const existingLink = links.find(link => link.source === domain.Rootdomain.trim() && link.target === relatedDomain.trim());
24 |
25 | if (existingLink)
26 | {
27 | // append the relation type
28 | existingLink.type += `/${type}`;
29 | }
30 | else
31 | {
32 | // add new link
33 | links.push({ source: domain.Rootdomain.trim(), target: relatedDomain.trim(), type: type });
34 | }
35 | });
36 | }
37 | }
38 |
39 | // create edges for each json attribute
40 | data.forEach(domain => {
41 | createLinksFromRelations(domain, "Whois Ip relation", links, 'ASN');
42 | createLinksFromRelations(domain, "Whois Domain relation", links, 'Whois');
43 | createLinksFromRelations(domain, "Mailserver relation", links, 'Mailserver');
44 | createLinksFromRelations(domain, "Nameserver relation", links, 'Nameserver');
45 | createLinksFromRelations(domain, "Title relation", links, 'Title');
46 | createLinksFromRelations(domain, "Google Adsense relation", links, 'GoogleAdsense');
47 | createLinksFromRelations(domain, "Google Analytics relation", links, 'GoogleAnalytics');
48 | createLinksFromRelations(domain, "Social Media relation", links, 'SocialMedia');
49 | createLinksFromRelations(domain, "Favicon relation", links, 'Favicon');
50 | });
51 |
52 | // create an SVG container with zoom functionality
53 | const svg = d3.select("svg")
54 | .call(d3.zoom().on("zoom", function(event) {
55 | svg.attr("transform", event.transform);
56 | }))
57 | .append("g");
58 |
59 | // initialize the force simulation
60 | const simulation = d3.forceSimulation(nodes)
61 | .force("link", d3.forceLink(links).id(d => d.id).distance(100))
62 | .force("charge", d3.forceManyBody().strength(-100).distanceMax(300))
63 | .force("center", d3.forceCenter(window.innerWidth / 2, (window.innerHeight - 100) / 2))
64 | .on("tick", ticked);
65 |
66 | // add edges to SVG
67 | const link = svg.append("g")
68 | .attr("class", "links")
69 | .selectAll("line")
70 | .data(links)
71 | .enter().append("line")
72 | .attr("class", "link");
73 |
74 | // add edge labels to SVG
75 | const edgeLabel = svg.append("g")
76 | .attr("class", "edge-labels")
77 | .selectAll("text")
78 | .data(links)
79 | .enter().append("text")
80 | .attr("class", "edge-label")
81 | // vertical position of the text
82 | .attr("dy", -1)
83 | .attr("text-anchor", "middle")
84 | .text(d => d.type)
85 | .style("opacity", 0);
86 |
87 | // add nodes to SVG
88 | const node = svg.append("g")
89 | .attr("class", "nodes")
90 | .selectAll("circle")
91 | .data(nodes)
92 | .enter().append("circle")
93 | .attr("class", "node")
94 | .attr("r", d => {
95 | // parse subdomain and email counts as integers, treating ? as 0
96 | const subdomainCount = d.Subdomains && d.Subdomains !== '?' ? parseInt(d.Subdomains, 10) : 0;
97 | const emailCount = d.Emails && d.Emails !== '?' ? parseInt(d.Emails, 10) : 0;
98 | const size = subdomainCount * 2 + emailCount;
99 |
100 | // define node size based on amount of subdomains and emails
101 | const radiusScale = d3.scaleLog()
102 | .domain([1, 800])
103 | .range([3, 13]);
104 |
105 | // use the scale for positive sizes, otherwise use the minimum size
106 | return size > 0 ? radiusScale(size) : 5;
107 | })
108 | .style("fill", "#5b5959")
109 | .style("stroke-width", 1)
110 | .call(d3.drag()
111 | .on("start", dragstarted)
112 | .on("drag", dragged)
113 | .on("end", dragended))
114 | .on("click", handleNodeClick);
115 |
116 | // add node labels
117 | const labels = svg.append("g")
118 | .attr("class", "node-labels")
119 | .selectAll("text")
120 | .data(nodes)
121 | .enter().append("text")
122 | // position of label in regard to node
123 | .attr("class", "node-label")
124 | .attr("dx", 10)
125 | .attr("dy", -7)
126 | .attr("text-anchor", "middle")
127 | .text(d => d.id);
128 |
129 | // update positions of elements
130 | let tickCount = 0;
131 |
132 | function ticked()
133 | {
134 | tickCount++;
135 | // only update every 3 ticks
136 | if (tickCount % 3 !== 0) return;
137 |
138 | link
139 | .attr("x1", d => d.source.x)
140 | .attr("y1", d => d.source.y)
141 | .attr("x2", d => d.target.x)
142 | .attr("y2", d => d.target.y);
143 |
144 | edgeLabel
145 | .attr("x", d => (d.source.x + d.target.x) / 2)
146 | .attr("y", d => (d.source.y + d.target.y) / 2);
147 |
148 | node
149 | .attr("cx", d => d.x)
150 | .attr("cy", d => d.y);
151 |
152 | labels
153 | .attr("x", d => d.x - 10)
154 | .attr("y", d => d.y + 25);
155 | }
156 |
157 | // node drag behavior
158 | function dragstarted(event, d)
159 | {
160 | if (!event.active) simulation.alphaTarget(0.3).restart();
161 | d.fx = d.x;
162 | d.fy = d.y;
163 | }
164 |
165 | function dragged(event, d)
166 | {
167 | d.fx = event.x;
168 | d.fy = event.y;
169 | }
170 |
171 | function dragended(event, d)
172 | {
173 | if (!event.active) simulation.alphaTarget(0);
174 | d.fx = d.x;
175 | d.fy = d.y;
176 | }
177 |
178 | // handle click on a node
179 | function handleNodeClick(event, d)
180 | {
181 | // get data of clicked node
182 | const nodeData = data.find(p => p.Rootdomain.trim() === d.id);
183 |
184 | // update the infobox content
185 | const infobox = d3.select("#infoboxContent");
186 | infobox.html(`
187 | Rootdomain: ${nodeData['Rootdomain']}
188 | Subdomains: ${nodeData['Subdomains']}
189 | Emails: ${nodeData['Emails']}
190 | ASN: ${nodeData['Whois Ip']}
191 | Whois: ${nodeData['Whois Domain']}
192 | Nameserver: ${nodeData['Nameserver']}
193 | Mailserver: ${nodeData['Mailserver']}
194 | Title: ${nodeData['Title']}
195 | SPF: ${nodeData['SPF']}
196 | `);
197 |
198 | // hide all edge labels
199 | edgeLabel.style("opacity", 0);
200 |
201 | // show edge labels connected to the selected node
202 | edgeLabel.filter(function(e) {
203 | return e.source.id === d.id || e.target.id === d.id;
204 | }).style("opacity", 1);
205 | }
206 |
207 | // handle rootdomain/node search
208 | d3.select("#searchButton").on("click", function()
209 | {
210 | const searchTerm = d3.select("#searchInput").property("value").trim().toLowerCase();
211 | const searchBox = d3.select("#searchInput");
212 |
213 | // reset color
214 | node.style("fill", "#5b5959");
215 |
216 | if (!searchTerm)
217 | return;
218 |
219 | // find matching nodes
220 | const foundNodes = nodes.filter(node => node.id.toLowerCase() === searchTerm);
221 |
222 | if (foundNodes.length > 0) {
223 | // change color of matching nodes
224 | node.filter(d => foundNodes.some(n => n.id === d.id))
225 | .style("fill", "#3CFF96");
226 |
227 | // reset the search box background color
228 | searchBox.style("background-color", "");
229 | }
230 | else
231 | {
232 | // change color of search to red if no results are found
233 | searchBox.style("background-color", "red");
234 |
235 | // reset the color after 1 second
236 | setTimeout(() => {
237 | searchBox.style("background-color", "");
238 | }, 1000);
239 | }
240 | });
241 |
242 | // get nodes with the same attributes
243 | document.querySelectorAll('.attribute-button').forEach(button => {
244 | button.addEventListener('click', function() {
245 | const displayAttribute = this.getAttribute('data-attribute');
246 | let actualAttribute = displayAttribute;
247 |
248 | const value = prompt(`Enter the value for ${displayAttribute}:`);
249 |
250 | // only look for nodes if attribute is not empty
251 | if (value !== null)
252 | {
253 | highlightNodesByAttribute(actualAttribute, value.trim());
254 | }
255 | });
256 | });
257 |
258 | function highlightNodesByAttribute(attribute, value)
259 | {
260 | // reset node colors
261 | node.style("fill", "#5b5959");
262 |
263 | if (!value)
264 | {
265 | return;
266 | }
267 |
268 | // trim the attribute and convert to lower case
269 | const normalizedValue = value.trim().toLowerCase();
270 |
271 | // highlight nodes that match the attributes value
272 | node.filter(d => {
273 | const nodeData = data.find(p => p.Rootdomain.trim() === d.id);
274 | if (nodeData)
275 | {
276 | const attributeValue = nodeData[attribute] ? nodeData[attribute].toString().trim().toLowerCase() : '';
277 | return attributeValue === normalizedValue;
278 | }
279 | return false;
280 | }).style("fill", "#AA0000");
281 | }
282 |
283 | document.getElementById('copyRedNodes').addEventListener('click', function()
284 | {
285 | // collect the rootdomains of all red nodes (#AA0000 corresponds to rgb(170, 0, 0))
286 | const redNodeRootDomains = node.filter(function(d) {
287 | return d3.select(this).style('fill') === 'rgb(170, 0, 0)';
288 | }).data().map(d => d.id);
289 |
290 | // join them into a single string with each rootdomain on a new line
291 | const textToCopy = redNodeRootDomains.join('\n');
292 |
293 | // copy the text to the clipboard
294 | navigator.clipboard.writeText(textToCopy).then(() => {
295 | console.log('Rootdomains copied to clipboard:', redNodeRootDomains);
296 | }).catch(err => {
297 | console.error('Failed to copy text: ', err);
298 | });
299 | });
300 |
301 | |