├── README.md ├── secret-finder.py └── secret-finder.sh /README.md: -------------------------------------------------------------------------------- 1 | 2 |

~ 🐝 𝐒𝐞𝐜𝐫𝐞𝐭 𝐅𝐢𝐧𝐝𝐞𝐫 🐝 ~

3 |
4 |
5 | 6 |
7 |
8 |

~ 🦄 𝐃𝐞𝐬𝐜𝐫𝐢𝐩𝐭𝐢𝐨𝐧 🦄 ~

9 |
10 | 𝐓𝐡𝐢𝐬 𝐢𝐬 𝐚 𝐭𝐨𝐨𝐥 𝐰𝐫𝐢𝐭𝐭𝐞𝐧 𝐢𝐧 𝐛𝐨𝐭𝐡 𝐁𝐚𝐬𝐡 𝐚𝐧𝐝 𝐏𝐲𝐭𝐡𝐨𝐧 𝐭𝐨 𝐩𝐚𝐫𝐬𝐞 𝐝𝐨𝐜/𝐝𝐫𝐢𝐯𝐞 𝐥𝐢𝐧𝐤𝐬 𝐢𝐧 𝐚 𝐟𝐢𝐥𝐞 𝐟𝐨𝐫 𝐰𝐨𝐫𝐤𝐢𝐧𝐠 𝐥𝐢𝐧𝐤𝐬 𝐚𝐧𝐝 𝐜𝐨𝐥𝐥𝐞𝐜𝐭 𝐢𝐦𝐩𝐨𝐫𝐭𝐚𝐧𝐭 𝐭𝐡𝐢𝐧𝐠𝐬 𝐢𝐧 𝐭𝐡𝐨𝐬𝐞 𝐥𝐢𝐧𝐤𝐬. 11 |
12 |
13 | 𝐈𝐭 𝐚𝐥𝐬𝐨 𝐜𝐡𝐨𝐨𝐬𝐞𝐬 𝐰𝐨𝐫𝐤𝐢𝐧𝐠 𝐩𝐫𝐨𝐱𝐢𝐞𝐬 𝐟𝐫𝐨𝐦 𝐚 𝐩𝐫𝐨𝐱𝐲 𝐥𝐢𝐬𝐭 𝐟𝐢𝐥𝐞 𝐚𝐧𝐝 𝐮𝐬𝐞𝐬 𝐭𝐡𝐨𝐬𝐞 𝐜𝐡𝐨𝐬𝐞𝐧 𝐰𝐨𝐫𝐤𝐢𝐧𝐠 𝐩𝐫𝐨𝐱𝐢𝐞𝐬 𝐫𝐚𝐧𝐝𝐨𝐦𝐥𝐲 𝐭𝐨 𝐬𝐞𝐧𝐝 𝐭𝐡𝐞𝐬𝐞 𝐫𝐞𝐪𝐮𝐞𝐬𝐭𝐬 𝐢𝐧 𝐨𝐫𝐝𝐞𝐫 𝐧𝐨𝐭 𝐭𝐨 𝐠𝐞𝐭 𝐛𝐥𝐨𝐜𝐤𝐞𝐝 𝐛𝐲 𝐆𝐨𝐨𝐠𝐥𝐞 𝐫𝐞𝐂𝐀𝐏𝐓𝐂𝐇𝐀. 14 |
15 |
16 |
17 |

~ 🍉 𝐔𝐬𝐚𝐠𝐞 🍉 ~

18 |
19 | 𝐂𝐡𝐞𝐜𝐤 𝐭𝐡𝐞 𝐜𝐨𝐝𝐞 𝐚𝐧𝐝 𝐩𝐮𝐭 𝐥𝐢𝐧𝐤𝐬 𝐚𝐧𝐝 𝐩𝐫𝐨𝐱𝐢𝐞𝐬 𝐢𝐧 𝐫𝐞𝐥𝐞𝐯𝐚𝐧𝐭 𝐟𝐢𝐥𝐞𝐬. 20 |
21 | 𝐌𝐚𝐤𝐞 𝐬𝐮𝐫𝐞 𝐲𝐨𝐮 𝐡𝐚𝐯𝐞 𝐫𝐞𝐪𝐮𝐢𝐫𝐞𝐝 𝐥𝐢𝐛𝐫𝐚𝐫𝐢𝐞𝐬 𝐚𝐧𝐝 𝐭𝐨𝐨𝐥𝐬 𝐢𝐧𝐬𝐭𝐚𝐥𝐥𝐞𝐝. 22 |
23 |
24 | python3 -m pip install requests 25 |
26 | go install -v github.com/projectdiscovery/nuclei/v2/cmd/nuclei@latest 27 |
28 |
29 |

~ 🦩 𝐓𝐡𝐚𝐧𝐤𝐬 𝐟𝐨𝐫 𝐕𝐢𝐬𝐢𝐭𝐢𝐧𝐠! 🦩~

30 |
31 |
32 |
33 | 34 |
35 |
36 |
37 | ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ ❤️ ~ 38 |
39 | 40 | -------------------------------------------------------------------------------- /secret-finder.py: -------------------------------------------------------------------------------- 1 | # Install the following by running -> python3 -m pip install requests 2 | import requests 3 | # Importing the grep / regex library 4 | import re 5 | # This is how python perform bash commands 6 | import os 7 | 8 | proxyList = [] 9 | validProxies = [] 10 | cleanDocsUrls = [] 11 | cleanDrivesUrls = [] 12 | 13 | # Disabling python request warnings cause they're pretty ugly :3 14 | requests.packages.urllib3.disable_warnings() 15 | 16 | # Loading and Checking for valid proxies (you can add as many proxies as you want to the proxy-list.txt file) 17 | with open('proxy-list.txt', 'r') as f: 18 | proxies = f.readlines() 19 | for proxy in proxies: 20 | # Adding nothing replacing newlines 21 | proxy = proxy.replace("\n", "") 22 | try: 23 | gResponse = requests.get("https://google.com", proxies={ 24 | "http": f"http://{proxy}", 25 | "https": f"http://{proxy}", 26 | }, verify=False) 27 | if (gResponse.status_code == 200): 28 | validProxies.append(proxy) 29 | print(f"[DEBUG] Valid proxy found - [{proxy}]") 30 | except: 31 | # Add code here to run when a invalid proxy is detected 32 | pass 33 | print(f"[+] Found {len(validProxies)} valid proxies!") 34 | 35 | 36 | os.system("mkdir doc") 37 | os.system("mkdir drive") 38 | 39 | 40 | # Parsing for valid doc URLs 41 | with open('docs.txt', 'r') as f: 42 | content = f.readlines() 43 | regexFilter = "^docs.google.com/(spreadsheets/d/|document/d/|file/d/|folder/d/|forms/d/|presentation/d/)" 44 | for line in content: 45 | if (re.search(regexFilter, line)): 46 | # Adding nothing replacing newlines 47 | cleanDocsUrls.append("https://" + line.replace("\n", "")) 48 | 49 | print(f"Found {len(cleanDocsUrls)} clean Google Docs URLs.") 50 | 51 | # Parsing for valid drive URLs 52 | with open('drive.txt', 'r') as f: 53 | content = f.readlines() 54 | regexFilter = "^drive.google.com/(drive/folders/|file/d|file/d/)" 55 | for line in content: 56 | if (re.search(regexFilter, line)): 57 | # Adding nothing replacing newlines 58 | cleanDrivesUrls.append("https://" + line.replace("\n", "")) 59 | 60 | print(f"Found {len(cleanDrivesUrls)} clean Google Drive URLs.") 61 | 62 | 63 | # Saving files of doc URLs 64 | count = 0 65 | for url in cleanDocsUrls: 66 | # Selecting a random proxy 67 | try: 68 | cProxy = validProxies[count % len(validProxies)] 69 | 70 | httpResponse = requests.get(url, verify=False, proxies={ 71 | "http": f"http://{cProxy}", 72 | "https": f"http://{cProxy}" 73 | }) 74 | print(f"[+] Got {httpResponse.status_code} CODE | {url} | {cProxy}") 75 | if (httpResponse.status_code == 200): 76 | filename = "doc/" + url.replace("?","").replace("&", "").replace("https://", "").replace("/", "-") + ".txt" 77 | with open(filename, 'w') as f: 78 | f.write(httpResponse.content.decode()) 79 | count = count + 1 80 | except: 81 | # Add code here to handle failed URLS 82 | count = count + 1 83 | 84 | 85 | # Saving files of drive URLs 86 | count = 0 87 | for url in cleanDocsUrls: 88 | # Selecting a random proxy 89 | try: 90 | cProxy = validProxies[count % len(validProxies)] 91 | 92 | httpResponse = requests.get(url, verify=False, proxies={ 93 | "http": f"http://{cProxy}", 94 | "https": f"http://{cProxy}" 95 | }) 96 | print(f"[+] Got {httpResponse.status_code} CODE | {url} | {cProxy}") 97 | if (httpResponse.status_code == 200): 98 | filename = "drive/" + url.replace("?","").replace("&", "").replace("https://", "").replace("/", "-") + ".txt" 99 | with open(filename, 'w') as f: 100 | f.write(httpResponse.content.decode()) 101 | count = count + 1 102 | except: 103 | # Add code here to handle failed URLS 104 | count = count + 1 105 | 106 | 107 | # Running Nuclei 108 | os.system("nuclei -target doc -o nuclei-doc.log") 109 | os.system("nuclei -target drive -o nuclei-drive.log") 110 | -------------------------------------------------------------------------------- /secret-finder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Filtering valid URLs 4 | echo "Filtering valid docs URLs..." 5 | cat docs.txt | grep '^docs.google.com/\(spreadsheets/d/\|document/d/\|file/d/\|folder/d/\|forms/d/\|presentation/d/\)' > /tmp/doc-clean.txt 6 | echo -e "\n\nFiltering valid drive URLs..." 7 | cat drive.txt | grep '^drive.google.com/\(drive/folders/\|file/d\|file/d/\)' > /tmp/drive-clean.txt 8 | 9 | # Collecting working URLs 10 | echo -e "\n\nCollecting working doc URLs..." 11 | cat /tmp/doc-clean.txt | httpx -title -rl 10 -status-code -follow-redirects -no-color | grep '200]' | awk -F' ' '{print $1}' | tee /tmp/doc-urls.txt 12 | echo -e "\n\nCollecting working drive URLs..." 13 | cat /tmp/drive-clean.txt | httpx -title -rl 10 -status-code -follow-redirects -no-color | grep '200]' | awk -F' ' '{print $1}' | tee /tmp/drive-urls.txt 14 | 15 | # Pausing process for 5min 16 | echo "Pausing process for 5min to avoid getting banned from google..." 17 | sleep 5m 18 | 19 | # Saving webpages and collecting important info - doc 20 | echo -e "\n\nSaving webpages and collecting important info of docs.txt" 21 | cat /tmp/doc-urls.txt | while read -r link; 22 | do curl -s $link > /tmp/doc-curl.txt 23 | # Collecting titles 24 | title=`cat /tmp/doc-curl.txt | grep -o '[^"]*' | sed -e 's/<\/\?title>//g'` 25 | # Collecting important strings 26 | cat /tmp/doc-curl.txt | stuff=`cat /tmp/doc-curl.txt | grep -i '\(password\|credentials\|token\|api\|secret\|key\)'` 27 | stuff=$(echo $stuff | tr '[:upper:]' '[:lower:]') 28 | if [[ "$stuff" == *"password"* || "$title" == *"password"* ]]; then 29 | pwd="Password"; 30 | else 31 | pwd=" "; 32 | fi 33 | if [[ "$stuff" == *"credentials"* || "$title" == *"credentials"* ]]; then 34 | cre="Credentials"; 35 | else 36 | cre=" "; 37 | fi 38 | if [[ "$stuff" == *"token"* || "$title" == *"token"* ]]; then 39 | tok="Token"; 40 | else 41 | tok=" "; 42 | fi 43 | if [[ "$stuff" == *"api"* || "$title" == *"api"* ]]; then 44 | api="Api"; 45 | else 46 | api=" "; 47 | fi 48 | if [[ "$stuff" == *"secret"* || "$title" == *"secret"* ]]; then 49 | sec="Secret"; 50 | else 51 | sec=" "; 52 | fi 53 | if [[ "$stuff" == *"key"* || "$title" == *"key"* ]]; then 54 | key="Key"; 55 | else 56 | key=" "; 57 | fi 58 | echo -e "\nTitle: $title" >> doc-result.txt & echo "Link: $link" >> doc-result.txt & echo "Link or Web Page contains: $pwd $cre $tok $api $sec $key" >> doc-result.txt 59 | sleep 5s 60 | done 61 | echo -e "\n\nYou can now check doc-result.txt" 62 | 63 | # Pausing process for 5min 64 | echo "Pausing process for 5min to avoid getting banned from google..." 65 | sleep 5m 66 | 67 | # Saving webpages and collecting important info - drive 68 | echo -e "\n\nSaving webpages and collecting important info of drive.txt" 69 | cat /tmp/drive-urls.txt | while read -r link; 70 | do curl -s $link > /tmp/drive-curl.txt 71 | # Collecting titles 72 | title=`cat /tmp/drive-curl.txt | grep -o '[^"]*' | sed -e 's/<\/\?title>//g'` 73 | # Collecting important strings 74 | cat /tmp/doc-curl.txt | stuff=`cat /tmp/doc-curl.txt | grep -i '\(password\|credentials\|token\|api\|secret\|key\)'` 75 | stuff=$(echo $stuff | tr '[:upper:]' '[:lower:]') 76 | if [[ "$stuff" == *"password"* || "$title" == *"password"* ]]; then 77 | pwd="Password"; 78 | else 79 | pwd=" "; 80 | fi 81 | if [[ "$stuff" == *"credentials"* || "$title" == *"credentials"* ]]; then 82 | cre="Credentials"; 83 | else 84 | cre=" "; 85 | fi 86 | if [[ "$stuff" == *"token"* || "$title" == *"token"* ]]; then 87 | tok="Token"; 88 | else 89 | tok=" "; 90 | fi 91 | if [[ "$stuff" == *"api"* || "$title" == *"api"* ]]; then 92 | api="Api"; 93 | else 94 | api=" "; 95 | fi 96 | if [[ "$stuff" == *"secret"* || "$title" == *"secret"* ]]; then 97 | sec="Secret"; 98 | else 99 | sec=" "; 100 | fi 101 | if [[ "$stuff" == *"key"* || "$title" == *"key"* ]]; then 102 | key="Key"; 103 | else 104 | key=" "; 105 | fi 106 | echo -e "\nTitle: $title" >> drive-result.txt & echo "Link: $link" >> drive-result.txt & echo "Link or Web Page contains: $pwd $cre $tok $api $sec $key" >> drive-result.txt 107 | sleep 5s 108 | done 109 | echo -e "\n\nYou can now check drive-result.txt" 110 | --------------------------------------------------------------------------------