├── .gitignore ├── README.md ├── config.json ├── docker ├── Dockerfile └── run.sh ├── hashcloud.py ├── hashcloud ├── AWS_Resources │ ├── __init__.py │ ├── creation.py │ ├── deletion.py │ └── resources.py ├── __init__.py ├── __main__.py ├── crack.py └── wordlist.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | # Vscode 163 | .vscode/ -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HashCloud 2 | 3 | ## Installation 4 | 5 | Use the command below to install the dependencies required for the project. 6 | ``` 7 | pip install -r requirements.txt 8 | ``` 9 | 10 | ## Usage 11 | 12 | The HashCloud uses an S3 bucket to store the wordlists used by hashcat, and Fargate to run the crack batch jobs. Before using the tool, we need to spin up all required cloud resources. Use the setup command (see below) to do so. 13 | 14 | Once resources have been setup, the standard way of using the tool is: 15 | - Uploading a wordlist to the S3 bucket (use the `wordlists` command to manage wordlists). 16 | - Running crack jobs and checking on the results (see the `crack` command below). 17 | 18 | ### Spin up and tear down resources 19 | 20 | ``` 21 | usage: hashcloud.py setup [-h] {create,cleanup} ... 22 | 23 | positional arguments: 24 | {create,cleanup} 25 | create Create AWS resources required. 26 | cleanup Delete AWS resources created. 27 | 28 | ``` 29 | 30 | ### Manage wordlists 31 | 32 | ``` 33 | usage: hashcloud.py wordlists [-h] {list,upload} ... 34 | 35 | positional arguments: 36 | {list,upload} 37 | list List all wordlists. 38 | upload Upload a wordlist. 39 | ``` 40 | 41 | ### Crack files 42 | 43 | ``` 44 | usage: hashcloud.py crack [-h] {initiate,status,result} ... 45 | 46 | positional arguments: 47 | {initiate,status,result} 48 | initiate Initiate a new cracking job. 49 | status Check cracking job status. 50 | result Get the result from a completed cracking job. 51 | ``` 52 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "unique_suffix": "_hashcloud_project", 3 | "vCPU": 2, 4 | "MEMORY": 4096 5 | } -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | ENV TZ=Europe/London 4 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone 5 | 6 | RUN apt update -y 7 | RUN apt -y install unzip awscli hashcat 8 | 9 | RUN useradd -m hashcat 10 | USER hashcat 11 | COPY run.sh /tmp/run.sh 12 | -------------------------------------------------------------------------------- /docker/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copy file to crack 4 | to_crack=${@: -2: 1} 5 | echo "TO_CRACK: $to_crack" 6 | aws s3 cp "${to_crack}" - > "/tmp/tocrack.txt" 7 | 8 | # Copy dictionnary file 9 | wordlist=${!#} 10 | echo "WORDLIST: $wordlist" 11 | aws s3 cp "${wordlist}" - > "/tmp/wordlist.txt" 12 | 13 | args=${@:1:($#-2)} 14 | echo "ARGS: $args" 15 | 16 | crack_filename="${to_crack##*/}" 17 | mkdir -p ~/.local/share/hashcat/sessions 18 | hashcat $args -o /tmp/${crack_filename} /tmp/tocrack.txt /tmp/wordlist.txt 19 | 20 | bucket_path="${wordlist#s3://}" 21 | bucket_name="${bucket_path%%/*}" 22 | aws s3 cp /tmp/${crack_filename} s3://${bucket_name}/cracked/ -------------------------------------------------------------------------------- /hashcloud.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File name : hashcloud.py 4 | # Author : TomPh 5 | # Date created : 29 May 2023 6 | 7 | from hashcloud.__main__ import main 8 | 9 | if __name__ == '__main__': 10 | main() -------------------------------------------------------------------------------- /hashcloud/AWS_Resources/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/T0m-Ph/HashCloud/f8e14c431928a3a78b89463541252150f727c763/hashcloud/AWS_Resources/__init__.py -------------------------------------------------------------------------------- /hashcloud/AWS_Resources/creation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File name : creation.py 4 | # Author : TomPh 5 | # Date created : 29 May 2023 6 | 7 | import boto3 8 | import json 9 | import subprocess 10 | import docker 11 | import base64 12 | import time 13 | 14 | def create_s3_bucket(bucket_name): 15 | s3 = boto3.client('s3') 16 | s3.create_bucket( 17 | Bucket=bucket_name, 18 | ACL='private' 19 | ) 20 | 21 | print(f"S3 bucket '{bucket_name}' created successfully.") 22 | return bucket_name 23 | 24 | def create_iam_role(role_name, bucket_name): 25 | iam = boto3.client('iam') 26 | role_response = iam.create_role( 27 | RoleName=role_name, 28 | AssumeRolePolicyDocument=json.dumps({ 29 | "Version": "2012-10-17", 30 | "Statement": [ 31 | { 32 | "Sid": "", 33 | "Effect": "Allow", 34 | "Principal": { 35 | "Service": "ecs-tasks.amazonaws.com" 36 | }, 37 | "Action": "sts:AssumeRole" 38 | } 39 | ] 40 | }) 41 | ) 42 | 43 | print(f"IAM role '{role_name}' created successfully.") 44 | 45 | iam.attach_role_policy( 46 | RoleName=role_name, 47 | PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess' 48 | ) 49 | iam.attach_role_policy( 50 | RoleName=role_name, 51 | PolicyArn='arn:aws:iam::aws:policy/CloudWatchLogsFullAccess' 52 | ) 53 | iam.attach_role_policy( 54 | RoleName=role_name, 55 | PolicyArn='arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy' 56 | ) 57 | 58 | print(f"Policy attached to IAM role '{role_name}' successfully.") 59 | role_arn = role_response['Role']['Arn'] 60 | return role_name, role_arn 61 | 62 | 63 | def create_batch_job_definition(job_definition_name, job_role_arn, execution_role_arn, container_image, command): 64 | batch = boto3.client('batch') 65 | response = batch.register_job_definition( 66 | jobDefinitionName=job_definition_name, 67 | type='container', 68 | platformCapabilities= ['FARGATE'], 69 | containerProperties={ 70 | 'image': container_image, 71 | 'command': command, 72 | 'jobRoleArn': job_role_arn, 73 | 'executionRoleArn': job_role_arn, 74 | 'user': 'hashcat', 75 | 'networkConfiguration': { 76 | 'assignPublicIp': 'ENABLED' 77 | }, 78 | 'resourceRequirements': [ 79 | { 80 | 'value': '1', 81 | 'type': 'VCPU' 82 | }, 83 | { 84 | 'value': '2048', 85 | 'type': 'MEMORY' 86 | }, 87 | ], 88 | } 89 | ) 90 | 91 | job_definition_arn = response['jobDefinitionArn'] 92 | print(f"Batch job definition '{job_definition_name}' created successfully.") 93 | return job_definition_arn 94 | 95 | def create_batch_job_queue(job_queue_name, compute_environment_order): 96 | batch = boto3.client('batch') 97 | compute_environment_arn = compute_environment_order[0]['computeEnvironment'] 98 | 99 | while True: 100 | response = batch.describe_compute_environments(computeEnvironments=[compute_environment_arn]) 101 | status = response['computeEnvironments'][0]['status'] 102 | 103 | if status == 'VALID': 104 | break 105 | 106 | print(f"Waiting for compute environment '{compute_environment_arn}' to be in a valid state...") 107 | time.sleep(5) 108 | 109 | response = batch.create_job_queue( 110 | jobQueueName=job_queue_name, 111 | state='ENABLED', 112 | priority=1, 113 | computeEnvironmentOrder=compute_environment_order 114 | ) 115 | 116 | job_queue_arn = response['jobQueueArn'] 117 | print(f"Batch job queue '{job_queue_name}' created successfully.") 118 | return job_queue_arn 119 | 120 | 121 | def create_batch_compute_environment(compute_environment_name, service_role_arn, subnet_ids, security_group_ids): 122 | batch = boto3.client('batch') 123 | response = batch.create_compute_environment( 124 | computeEnvironmentName=compute_environment_name, 125 | type='MANAGED', 126 | state='ENABLED', 127 | computeResources={ 128 | 'type': 'FARGATE_SPOT', 129 | 'maxvCpus': 256, 130 | 'subnets': subnet_ids, 131 | 'securityGroupIds': security_group_ids, 132 | }, 133 | serviceRole=service_role_arn 134 | ) 135 | 136 | compute_environment_arn = response['computeEnvironmentArn'] 137 | print(f"Batch compute environment '{compute_environment_name}' created successfully.") 138 | return compute_environment_arn 139 | 140 | def create_subnet(vpc_id, cidr_block): 141 | ec2 = boto3.client('ec2') 142 | response = ec2.create_subnet( 143 | VpcId=vpc_id, 144 | CidrBlock=cidr_block 145 | ) 146 | 147 | subnet_id = response['Subnet']['SubnetId'] 148 | print(f"Subnet '{cidr_block}' created successfully.") 149 | 150 | return subnet_id 151 | 152 | def create_security_group(group_name, description, vpc_id): 153 | ec2 = boto3.client('ec2') 154 | response = ec2.create_security_group( 155 | GroupName=group_name, 156 | Description=description, 157 | VpcId=vpc_id 158 | ) 159 | 160 | group_id = response['GroupId'] 161 | print(f"Security group '{group_name}' created successfully.") 162 | 163 | return group_id 164 | 165 | def build_and_upload_image(dockerfile_path, ecr_repository_name, aws_region, image_name): 166 | client = docker.from_env() 167 | try: 168 | image, build_logs = client.images.build(path=dockerfile_path, tag=image_name, rm=True) 169 | for log in build_logs: 170 | print(log) 171 | 172 | print(f"Docker image '{image_name}' built successfully.") 173 | 174 | except docker.errors.BuildError as e: 175 | print(f"Failed to build Docker image: {str(e)}") 176 | return False 177 | 178 | # Authenticate to the ECR registry 179 | ecr = boto3.client('ecr', region_name=aws_region) 180 | response = ecr.get_authorization_token() 181 | authorization_data = response['authorizationData'][0] 182 | registry = authorization_data['proxyEndpoint'] 183 | token = base64.b64decode(authorization_data['authorizationToken']).decode('utf-8') 184 | username = token.split(':')[0] 185 | password = token.split(':')[1] 186 | subprocess.run(['docker', 'login', '-u', username, '-p', password, registry], check=True) 187 | 188 | # Tag the Docker image 189 | registry = registry.strip("https://") 190 | image_tag = f"{registry}/{ecr_repository_name}:latest" 191 | client = docker.from_env() 192 | image = client.images.get(image_name) 193 | image.tag(image_tag) 194 | 195 | # Push the Docker image to ECR 196 | try: 197 | push_logs = client.images.push(repository=image_tag) 198 | print(push_logs) 199 | print(f"Docker image '{image_tag}' pushed to ECR successfully.") 200 | except docker.errors.APIError as e: 201 | print(f"Failed to push Docker image to ECR: {str(e)}") 202 | 203 | print(f"Docker image '{image_tag}' built and uploaded to ECR repository '{ecr_repository_name}'.") 204 | 205 | def create_ecr_repository(repository_name): 206 | ecr = boto3.client('ecr') 207 | response = ecr.create_repository( 208 | repositoryName=repository_name 209 | ) 210 | 211 | repository_uri = response['repository']['repositoryUri'] 212 | print(f"ECR repository '{repository_name}' created successfully.") 213 | 214 | return repository_name, repository_uri -------------------------------------------------------------------------------- /hashcloud/AWS_Resources/deletion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File name : deletion.py 4 | # Author : TomPh 5 | # Date created : 29 May 2023 6 | 7 | import boto3 8 | import time 9 | 10 | def delete_s3_bucket(bucket_name): 11 | s3 = boto3.resource('s3') 12 | bucket = s3.Bucket(bucket_name) 13 | bucket.objects.all().delete() 14 | s3 = boto3.client('s3') 15 | s3.delete_bucket(Bucket=bucket_name) 16 | print(f"S3 bucket '{bucket_name}' deleted successfully.") 17 | 18 | def delete_iam_role(role_name): 19 | iam = boto3.client('iam') 20 | iam.detach_role_policy(RoleName=role_name, PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess') 21 | iam.detach_role_policy(RoleName=role_name, PolicyArn='arn:aws:iam::aws:policy/CloudWatchLogsFullAccess') 22 | iam.detach_role_policy(RoleName=role_name, PolicyArn='arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy') 23 | iam.delete_role(RoleName=role_name) 24 | 25 | print(f"IAM role '{role_name}' deleted successfully.") 26 | 27 | def delete_batch_job_definition(job_definition_arn): 28 | batch = boto3.client('batch') 29 | batch.deregister_job_definition( 30 | jobDefinition=job_definition_arn 31 | ) 32 | 33 | print(f"Batch job definition '{job_definition_arn}' deleted successfully.") 34 | 35 | def delete_batch_job_queue(job_queue_arn): 36 | batch = boto3.client('batch') 37 | batch.update_job_queue( 38 | jobQueue=job_queue_arn, 39 | state='DISABLED' 40 | ) 41 | 42 | while True: 43 | response = batch.describe_job_queues(jobQueues=[job_queue_arn]) 44 | status = response['jobQueues'][0]['status'] 45 | 46 | if status == 'VALID': 47 | break 48 | print("Waiting") 49 | time.sleep(5) 50 | 51 | batch.delete_job_queue( 52 | jobQueue=job_queue_arn 53 | ) 54 | 55 | print(f"Batch job queue '{job_queue_arn}' deleted successfully.") 56 | 57 | def delete_batch_compute_environment(compute_environment_arn): 58 | batch = boto3.client('batch') 59 | batch.update_compute_environment( 60 | computeEnvironment=compute_environment_arn, 61 | state='DISABLED' 62 | ) 63 | 64 | while True: 65 | response = batch.describe_compute_environments(computeEnvironments=[compute_environment_arn]) 66 | status = response['computeEnvironments'][0]['status'] 67 | 68 | if status == 'VALID': 69 | break 70 | 71 | print(f"Waiting for compute environment '{compute_environment_arn}' to be in a valid state...") 72 | time.sleep(5) 73 | 74 | batch.delete_compute_environment( 75 | computeEnvironment=compute_environment_arn 76 | ) 77 | 78 | print(f"Batch compute environment '{compute_environment_arn}' deleted successfully.") 79 | 80 | def delete_subnet(subnet_id): 81 | ec2 = boto3.client('ec2') 82 | ec2.delete_subnet( 83 | SubnetId=subnet_id 84 | ) 85 | 86 | print(f"Subnet '{subnet_id}' deleted successfully.") 87 | 88 | def delete_security_group(group_id): 89 | ec2 = boto3.client('ec2') 90 | ec2.delete_security_group( 91 | GroupId=group_id 92 | ) 93 | 94 | print(f"Security group '{group_id}' deleted successfully.") 95 | 96 | def delete_ecr_repository(repository_name): 97 | ecr = boto3.client('ecr') 98 | ecr.delete_repository( 99 | repositoryName=repository_name, 100 | force=True 101 | ) 102 | 103 | print(f"ECR repository '{repository_name}' deleted successfully.") -------------------------------------------------------------------------------- /hashcloud/AWS_Resources/resources.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File name : resources.py 4 | # Author : TomPh 5 | # Date created : 29 May 2023 6 | 7 | import boto3 8 | import json 9 | import os 10 | 11 | from hashcloud.AWS_Resources import creation 12 | from hashcloud.AWS_Resources import deletion 13 | 14 | def initialize(**kwargs): 15 | created_resources = {} 16 | 17 | unique_suffix = '_hashcloud_project' 18 | 19 | try: 20 | with open('config.json', 'r') as file: 21 | config = json.load(file) 22 | unique_suffix = config['unique_suffix'] 23 | except Exception as e: 24 | print("No config file found, using default.") 25 | 26 | try: 27 | with open('build/resources.json', 'r') as file: 28 | print("Loading existing config") 29 | created_resources = json.load(file) 30 | except: 31 | print("No existing resources found, building environment.") 32 | 33 | try: 34 | bucket_name = created_resources.get('bucket_name') 35 | if not bucket_name: 36 | # Create S3 bucket 37 | bucket_name = 'bucket' + unique_suffix 38 | bucket_name = creation.create_s3_bucket(bucket_name) 39 | created_resources['bucket_name'] = bucket_name 40 | 41 | role_name = created_resources.get('role_name') 42 | role_arn = created_resources.get('role_arn') 43 | if not role_name: 44 | # Create IAM role 45 | role_name = 'iam' + unique_suffix 46 | role_name, role_arn = creation.create_iam_role(role_name, bucket_name) 47 | created_resources['role_name'] = role_name 48 | created_resources['role_arn'] = role_arn 49 | 50 | repository_name = created_resources.get('repository_name') 51 | repository_uri = created_resources.get('repository_uri') 52 | if not repository_name or not repository_uri: 53 | # Create ECR repository 54 | repository_name = 'ecr_epo' + unique_suffix 55 | repository_name, repository_uri = creation.create_ecr_repository(repository_name) 56 | created_resources['repository_name'] = repository_name 57 | created_resources['repository_uri'] = repository_uri 58 | 59 | # Build and upload docker to ECR 60 | dockerfile_path = 'Docker' 61 | aws_region = 'us-east-1' 62 | image_name = 'docker' + unique_suffix 63 | creation.build_and_upload_image(dockerfile_path, repository_name, aws_region, image_name) 64 | 65 | job_definition_arn = created_resources.get('job_definition_arn') 66 | if not job_definition_arn: 67 | # Create Job definition 68 | job_definition_name = 'batch_job' + unique_suffix 69 | container_image = repository_uri + ":latest" 70 | command = [] 71 | job_definition_arn = creation.create_batch_job_definition(job_definition_name, role_arn, role_arn, container_image, command) 72 | created_resources['job_definition_arn'] = job_definition_arn 73 | 74 | 75 | default_vpc = boto3.client('ec2').describe_vpcs( 76 | Filters=[ 77 | { 78 | 'Name': 'isDefault', 79 | 'Values': ['true'] 80 | } 81 | ] 82 | )['Vpcs'][0] 83 | default_vpc_id = default_vpc['VpcId'] 84 | 85 | subnet_id = created_resources.get('subnet_id') 86 | if not subnet_id: 87 | # Create a subnet in the default VPC 88 | vpc_cidr_block = default_vpc['CidrBlock'] 89 | subnet_cidr_block = f'{vpc_cidr_block[:-6]}100.0/24' 90 | subnet_id = creation.create_subnet(default_vpc_id, subnet_cidr_block) 91 | created_resources['subnet_id'] = subnet_id 92 | 93 | security_group_id = created_resources.get('security_group_id') 94 | if not security_group_id: 95 | # Create a security group in the default VPC 96 | group_name = 'sg' + unique_suffix 97 | description = 'Security Group for ' + unique_suffix 98 | security_group_id = creation.create_security_group(group_name, description, default_vpc_id) 99 | created_resources['security_group_id'] = security_group_id 100 | 101 | compute_environment_arn = created_resources.get('compute_environment_arn') 102 | if not compute_environment_arn: 103 | # Create Compute environment 104 | service_role_arn = boto3.client('iam').get_role(RoleName='AWSServiceRoleForBatch')['Role']['Arn'] 105 | compute_environment_name = 'compute_env' + unique_suffix 106 | subnet_ids = [subnet_id] 107 | security_group_ids = [security_group_id] 108 | compute_environment_arn = creation.create_batch_compute_environment(compute_environment_name, service_role_arn, subnet_ids, security_group_ids) 109 | created_resources['compute_environment_arn'] = compute_environment_arn 110 | 111 | job_queue_arn = created_resources.get('job_queue_arn') 112 | if not job_queue_arn: 113 | # Create a Job Queue 114 | job_queue_name = 'job_q' + unique_suffix 115 | compute_environment_order = [ 116 | { 117 | 'order': 1, 118 | 'computeEnvironment': compute_environment_arn 119 | } 120 | ] 121 | job_queue_arn = creation.create_batch_job_queue(job_queue_name, compute_environment_order) 122 | created_resources['job_queue_arn'] = job_queue_arn 123 | 124 | except Exception as e: 125 | print(e) 126 | finally: 127 | # Save resources to file 128 | resource_file_name = 'build/resources.json' 129 | with open(resource_file_name, 'w') as file: 130 | json.dump(created_resources, file) 131 | print(f"Resources information saved to '{resource_file_name}' file.") 132 | 133 | def cleanup(**kwargs): 134 | with open('build/resources.json', 'r') as file: 135 | created_resources = json.load(file) 136 | 137 | bucket_name = created_resources.get('bucket_name') 138 | if bucket_name: 139 | deletion.delete_s3_bucket(bucket_name) 140 | 141 | role_name = created_resources.get('role_name') 142 | if role_name: 143 | deletion.delete_iam_role(role_name) 144 | 145 | job_definition_arn = created_resources.get('job_definition_arn') 146 | if job_definition_arn: 147 | deletion.delete_batch_job_definition(job_definition_arn) 148 | 149 | job_queue_arn = created_resources.get('job_queue_arn') 150 | if job_queue_arn: 151 | deletion.delete_batch_job_queue(job_queue_arn) 152 | 153 | compute_environment_arn = created_resources.get('compute_environment_arn') 154 | if compute_environment_arn: 155 | deletion.delete_batch_compute_environment(compute_environment_arn) 156 | 157 | subnet_id = created_resources.get('subnet_id') 158 | if subnet_id: 159 | deletion.delete_subnet(subnet_id) 160 | 161 | security_group_id = created_resources.get('security_group_id') 162 | if security_group_id: 163 | deletion.delete_security_group(security_group_id) 164 | 165 | repository_name = created_resources.get('repository_name') 166 | if repository_name: 167 | deletion.delete_ecr_repository(repository_name) 168 | 169 | # Remove the resources file 170 | os.remove('build/resources.json') 171 | os.remove('build/jobs.json') -------------------------------------------------------------------------------- /hashcloud/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/T0m-Ph/HashCloud/f8e14c431928a3a78b89463541252150f727c763/hashcloud/__init__.py -------------------------------------------------------------------------------- /hashcloud/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File name : __main__.py 4 | # Author : TomPh 5 | # Date created : 29 May 2023 6 | 7 | import argparse 8 | from hashcloud.AWS_Resources import resources 9 | from hashcloud import wordlist 10 | from hashcloud import crack 11 | 12 | def main(): 13 | parser = argparse.ArgumentParser(description='Run hashcat in the cloud.') 14 | subparsers = parser.add_subparsers(required=True) 15 | 16 | ########### Setup subparser ########### 17 | setup_parser = subparsers.add_parser('setup', help='Manage setup.') 18 | setup_subparsers = setup_parser.add_subparsers(required=True) 19 | 20 | # Create command 21 | create_parser = setup_subparsers.add_parser('create', help='Create AWS resources required.') 22 | create_parser.set_defaults(func=resources.initialize) 23 | 24 | # Cleanup command 25 | cleanup_parser = setup_subparsers.add_parser('cleanup', help='Delete AWS resources created.') 26 | cleanup_parser.set_defaults(func=resources.cleanup) 27 | 28 | 29 | ########### Wordlists subparser ########### 30 | wordlists_parser = subparsers.add_parser('wordlists', help='Manage wordlists.') 31 | wordlists_subparsers = wordlists_parser.add_subparsers(required=True) 32 | 33 | # List command 34 | list_parser = wordlists_subparsers.add_parser('list', help='List all wordlists.') 35 | list_parser.set_defaults(func=wordlist.list_wordlists) 36 | 37 | # Upload command 38 | upload_parser = wordlists_subparsers.add_parser('upload', help='Upload a wordlist.') 39 | upload_parser.add_argument('-f', type=str, help='Path to the wordlist file.', required=True) 40 | upload_parser.set_defaults(func=wordlist.upload_wordlist) 41 | 42 | 43 | ########### Crack subparser ########### 44 | crack_parser = subparsers.add_parser('crack', help='Crack a file.') 45 | crack_subparsers = crack_parser.add_subparsers(required=True) 46 | 47 | # Initiate command 48 | initiate_parser = crack_subparsers.add_parser('initiate', help='Initiate a new cracking job.') 49 | initiate_parser.add_argument('-f', type=str, help='Path to the file to crack.', required=True) 50 | initiate_parser.add_argument('-w', type=str, help='Name of the wordlist to use for cracking.', required=True) 51 | initiate_parser.add_argument('--options', type=str, help='Specify additional hashcat options for cracking.', required=True) 52 | initiate_parser.set_defaults(func=crack.crack_hashes) 53 | 54 | # Satus command 55 | status_parser = crack_subparsers.add_parser('status', help='Check cracking job status.') 56 | status_parser.set_defaults(func=crack.crack_jobs_status) 57 | 58 | # Result command 59 | result_parser = crack_subparsers.add_parser('result', help='Get the result from a completed cracking job.') 60 | result_parser.add_argument('-f', type=str, help='File to get the results for.', required=True) 61 | result_parser.set_defaults(func=crack.get_results) 62 | 63 | args = parser.parse_args() 64 | args.func(**vars(args)) 65 | 66 | if __name__ == '__main__': 67 | main() -------------------------------------------------------------------------------- /hashcloud/crack.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File name : crack.py 4 | # Author : TomPh 5 | # Date created : 29 May 2023 6 | 7 | import boto3 8 | import json 9 | from tabulate import tabulate 10 | import datetime 11 | 12 | s3 = boto3.client('s3') 13 | 14 | wordlist_folder = 'passlists' 15 | to_crack_folder = 'to_crack' 16 | 17 | bucket_name = None 18 | job_definition_arn = None 19 | job_queue_arn = None 20 | 21 | try: 22 | with open('build/resources.json', 'r') as file: 23 | created_resources = json.load(file) 24 | bucket_name = created_resources.get('bucket_name') 25 | job_definition_arn = created_resources.get('job_definition_arn') 26 | job_queue_arn = created_resources.get('job_queue_arn') 27 | except Exception as e: 28 | print(e) 29 | 30 | def get_wordlist_s3(file_name): 31 | response = s3.list_objects_v2(Bucket=bucket_name, Prefix=f"{wordlist_folder}/{file_name}") 32 | if response['KeyCount'] > 0: 33 | for obj in response['Contents']: 34 | if obj['Key'] == f"{wordlist_folder}/{file_name}": 35 | return f"s3://{bucket_name}/{wordlist_folder}/{file_name}" 36 | return None 37 | 38 | def crack_hashes(f, w, options, **kwargs): 39 | if not bucket_name or not job_definition_arn or not job_queue_arn: 40 | print("Missing resources, run the setup first.") 41 | return 42 | 43 | batch = boto3.client('batch') 44 | job_name = "crack_job" 45 | 46 | file_name = f.split('/')[-1] 47 | s3.upload_file(f, bucket_name, f"{to_crack_folder}/{file_name}") 48 | to_crack_file_path = f"s3://{bucket_name}/{to_crack_folder}/{file_name}" 49 | wordlist_file_path = get_wordlist_s3(w) 50 | if wordlist_file_path is None: 51 | print("Wordlist was not found on the S3 bucket") 52 | return 53 | 54 | vCPU = 1 55 | MEMORY = 2048 56 | 57 | try: 58 | with open('config.json', 'r') as file: 59 | config = json.load(file) 60 | vCPU = config['vCPU'] 61 | MEMORY = config['MEMORY'] 62 | except Exception as e: 63 | print("No config file found, using default.") 64 | 65 | command = ["/tmp/run.sh"] 66 | command.extend(options.split(" ")) 67 | command.append("-w") 68 | command.append("4") 69 | command.append(to_crack_file_path) 70 | command.append(wordlist_file_path) 71 | 72 | response = batch.submit_job( 73 | jobName=job_name, 74 | jobQueue=job_queue_arn, 75 | jobDefinition=job_definition_arn, 76 | containerOverrides={ 77 | 'command': command, 78 | 'resourceRequirements': [ 79 | { 80 | 'value': f"{vCPU}", 81 | 'type': 'VCPU' 82 | }, 83 | { 84 | 'value': f"{MEMORY}", 85 | 'type': 'MEMORY' 86 | }, 87 | ], 88 | } 89 | ) 90 | 91 | job_id = response['jobId'] 92 | jobs = [] 93 | 94 | try: 95 | with open('build/jobs.json', 'r') as file: 96 | jobs = json.load(file) 97 | except Exception as e: 98 | jobs = [] 99 | finally: 100 | jobs.append({ 101 | "id": job_id, 102 | "file": f 103 | }) 104 | 105 | try: 106 | with open('build/jobs.json', 'w') as file: 107 | file.write(json.dumps(jobs)) 108 | except Exception as e: 109 | print(e) 110 | 111 | return job_id 112 | 113 | def crack_jobs_status(**kwargs): 114 | try: 115 | with open('build/jobs.json', 'r') as file: 116 | file_content = file.read() 117 | jobs = json.loads(file_content) 118 | except: 119 | jobs = [] 120 | 121 | if jobs: 122 | job_ids = [j['id'] for j in jobs] 123 | 124 | batch = boto3.client('batch') 125 | response = batch.describe_jobs( 126 | jobs=job_ids 127 | ) 128 | job_statuses = response['jobs'] 129 | 130 | jobs_list = [] 131 | headers = ['Hash File', 'Status', 'Runtime'] 132 | 133 | for j in jobs: 134 | for js in job_statuses: 135 | if j['id'] == js['jobId']: 136 | status = js['status'] 137 | interval_dt = None 138 | time_taken = '-' 139 | if status == 'SUCCEEDED' or status == 'FAILED': 140 | started_dt = datetime.datetime.fromtimestamp(js['startedAt']/1000) 141 | stopped_dt = datetime.datetime.fromtimestamp(js['stoppedAt']/1000) 142 | interval_dt = stopped_dt - started_dt 143 | elif status == 'RUNNING': 144 | started_dt = datetime.datetime.fromtimestamp(js['startedAt']/1000) 145 | now = datetime.datetime.now() 146 | interval_dt = now - started_dt 147 | 148 | if interval_dt != None: 149 | hours = interval_dt.seconds // 3600 150 | minutes = interval_dt.seconds // 60 151 | seconds = interval_dt.seconds % 60 152 | time_taken = f"{hours}h:{minutes}m:{seconds}s" 153 | jobs_list.append([j['file'], status, time_taken]) 154 | break 155 | print(tabulate(jobs_list, headers=headers)) 156 | 157 | def get_results(f, **kwargs): 158 | s3 = boto3.client('s3') 159 | try: 160 | s3.download_file(bucket_name, f"cracked/{f}", f"cracked.txt") 161 | except Exception as e: 162 | print(f"Cracked file not available. Either the job is still running or the hash was not cracked.") 163 | else: 164 | try: 165 | with open('cracked.txt', 'r') as file: 166 | file_content = file.read() 167 | print(file_content) 168 | except: 169 | print("Unknown error happened") -------------------------------------------------------------------------------- /hashcloud/wordlist.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # File name : wordlist.py 4 | # Author : TomPh 5 | # Date created : 29 May 2023 6 | 7 | import boto3 8 | import json 9 | 10 | s3 = boto3.client('s3') 11 | 12 | wordlist_folder = 'passlists' 13 | bucket_name = None 14 | 15 | try: 16 | with open('build/resources.json', 'r') as file: 17 | created_resources = json.load(file) 18 | bucket_name = created_resources.get('bucket_name') 19 | except: 20 | pass 21 | 22 | def list_wordlists(**kwargs): 23 | if not bucket_name: 24 | print("Missing resources, run the setup command first.") 25 | return 26 | response = s3.list_objects_v2(Bucket=bucket_name, Prefix=wordlist_folder) 27 | if response['KeyCount'] > 0: 28 | for obj in response['Contents']: 29 | print(obj['Key'].split('/')[-1]) 30 | else: 31 | print("No wordlists availale") 32 | 33 | def upload_wordlist(f, **kwargs): 34 | if not bucket_name: 35 | print("Missing resources, run the setup command first.") 36 | return 37 | file_name = f.split('/')[-1] 38 | res = s3.upload_file(f, bucket_name, f"{wordlist_folder}/{file_name}") 39 | print(f"File uploaded successfully to s3://{bucket_name}/{wordlist_folder}/{file_name}") -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 2 | botocore 3 | certifi 4 | charset-normalize 5 | docker 6 | idna 7 | jmespath 8 | packaging 9 | python-dateutil 10 | requests 11 | s3transfer 12 | six 13 | tabulate 14 | urllib3 15 | websocket-client --------------------------------------------------------------------------------