├── attacker ├── __init__.py ├── attacker.py └── ldos.py ├── decider ├── __init__.py ├── decider.py ├── random.py └── llm.py ├── defender ├── __init__.py └── defender.py ├── .gitignore ├── log ├── llm_log.py └── log.py ├── utils.py ├── README.md ├── constants.py ├── run.sh ├── main.py ├── env.py └── LICENSE /attacker/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /decider/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /defender/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | output 2 | txt 3 | csv 4 | data 5 | __pycache__ 6 | .env 7 | proxy.sh -------------------------------------------------------------------------------- /attacker/attacker.py: -------------------------------------------------------------------------------- 1 | from .ldos import LDoSAttacker 2 | from constants import AttackerType 3 | 4 | 5 | def attackerFactory(env, attacker_type, attacker_num=10): 6 | if attacker_type == AttackerType.LDOS: 7 | return LDoSAttacker(env, attacker_num) 8 | return None 9 | -------------------------------------------------------------------------------- /decider/decider.py: -------------------------------------------------------------------------------- 1 | import decider.random as random_decider 2 | import decider.llm as llm_decider 3 | from constants import DeciderType 4 | 5 | 6 | def deciderFactory(decider_type): 7 | if decider_type == DeciderType.LLM: 8 | return llm_decider 9 | elif decider_type == DeciderType.RANDOM: 10 | return random_decider 11 | else: 12 | raise ValueError("Invalid decider type") 13 | -------------------------------------------------------------------------------- /log/llm_log.py: -------------------------------------------------------------------------------- 1 | from .log import Logger 2 | import os 3 | import json 4 | 5 | 6 | class LLMLogger(Logger): 7 | def __init__( 8 | self, 9 | prefix: str, 10 | title: str, 11 | log_dir: str = "log", 12 | txt_dir: str = "txt", 13 | prompts_dir: str = "prompts", 14 | ): 15 | super().__init__(prefix, title, log_dir, txt_dir) 16 | prompts_path = f"{self.dir_path}/{prefix}-{title}/{prompts_dir}" 17 | self.init_prompts(prompts_path) 18 | 19 | def close(self): 20 | super().close() 21 | self.close_prompts() 22 | 23 | # prompts 24 | def init_prompts(self, prompts_path: str): 25 | if not os.path.exists(prompts_path): 26 | os.makedirs(prompts_path) 27 | self.prompts_path = prompts_path 28 | 29 | def write_prompts(self, episode: int, prompts: list): 30 | prompts_path = f"{self.prompts_path}/{episode}.json" 31 | with open(prompts_path, "w", encoding="utf-8") as f: 32 | json.dump(prompts, f, ensure_ascii=False, indent=2) 33 | 34 | def close_prompts(self): 35 | pass 36 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from constants import AttackerType 2 | 3 | 4 | def get_action_thresholds(attacker_type: AttackerType): 5 | if attacker_type == AttackerType.DDOS: 6 | return [(0, 0), (0, 0.8), (0.3, 0.3), (0, 0.8), (0.3, 0.3), (0, 0)] 7 | else: 8 | return [(0, 0), (0.8, 0), (0.3, 0.3), (0.8, 0), (0.3, 0.3), (0, 0)] 9 | 10 | 11 | def judge_fail_func(indicators): 12 | success, fail_msg = False, "" 13 | if indicators.C_d > 0: 14 | success = False 15 | fail_msg = "R_d > 0,There are replicas in a dangerous state, possibly because there are too many connections" 16 | elif indicators.M_d > 0: 17 | success = False 18 | fail_msg = "R_d > 0,There are replicas in a dangerous state, possibly because the memory usage is too high" 19 | elif indicators.con_delay > 0.8: 20 | success = False 21 | fail_msg = "con_delay > 0.8,The service delay is too high, possibly because there are too many connections" 22 | elif indicators.mem_delay > 0.8: 23 | success = False 24 | fail_msg = "mem_delay > 0.8,The service delay is too high, which may be caused by excessive memory usage." 25 | else: 26 | success = True 27 | fail_msg = None 28 | return success, fail_msg 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LLM-PD 2 | Official code for the paper entitled "Toward Intelligent and Secure Cloud: Large Language Model Empowered Proactive Defense" 3 | 4 | ## Overview 5 | 6 | Welcome to the repository for LLM-PD," a research project aimed at improving cloud security through Large Language Model (LLM) empowered Proactive Defense (PD). 7 | 8 | ## Project Status 9 | 10 | - **Under Review**: The research paper titled "Toward Intelligent and Secure Cloud: Large Language Model Empowered Proactive Defense" is currently under submission for peer review. 11 | - **Ongoing Development**: This repository will be regularly updated with code, documentation, and other resources as the project progresses. 12 | 13 | ## Citation 14 | 15 | ``` 16 | @article{zhou2024toward, 17 | title={Toward Intelligent and Secure Cloud: Large Language Model Empowered Proactive Defense}, 18 | author={Zhou, Yuyang and Cheng, Guang and Du, Kang and Chen, Zihan}, 19 | journal={arXiv preprint arXiv:2412.21051}, 20 | year={2024} 21 | } 22 | ``` 23 | 24 | ## Disclaimer 25 | 26 | The source code and specific methodologies of "LLM-PD" are currently withheld due to the innovative and confidential nature of the ongoing research. Full disclosure, including the public release of the code, will be considered following the completion of the review process and the paper's potential publication. 27 | -------------------------------------------------------------------------------- /constants.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from dataclasses import dataclass 3 | 4 | 5 | class DefenceStrategy(Enum): 6 | PORT_HOPPING = "PORT_HOPPING" 7 | REPLICA_INCREASE = "REPLICA_INCREASE" 8 | REPLICA_DECREASE = "REPLICA_DECREASE" 9 | REPLICA_EXPAND = "REPLICA_EXPAND" 10 | REPLICA_SHRINK = "REPLICA_SHRINK" 11 | NO_ACTION = "NO_ACTION" 12 | 13 | 14 | class DeciderType(Enum): 15 | RANDOM = "RANDOM" 16 | GREEDY = "GREEDY" 17 | DQN = "DQN" 18 | AC = "AC" 19 | PPO = "PPO" 20 | LLM = "LLM" 21 | 22 | 23 | class AttackerType(Enum): 24 | LDOS = "LDOS" 25 | DDOS = "DDOS" 26 | 27 | 28 | @dataclass 29 | class Indicators: 30 | C_e: float 31 | C_d: float 32 | M_e: float 33 | M_d: float 34 | con_delay: float 35 | mem_delay: float 36 | cost: int 37 | 38 | 39 | map_action_to_defence = { 40 | 0: DefenceStrategy.PORT_HOPPING, 41 | 1: DefenceStrategy.REPLICA_INCREASE, 42 | 2: DefenceStrategy.REPLICA_DECREASE, 43 | 3: DefenceStrategy.REPLICA_EXPAND, 44 | 4: DefenceStrategy.REPLICA_SHRINK, 45 | 5: DefenceStrategy.NO_ACTION, 46 | } 47 | 48 | 49 | def check_attacker_type(attacker_type: str) -> AttackerType: 50 | if attacker_type not in AttackerType.__members__: 51 | raise ValueError(f"Invalid attacker type: {attacker_type}") 52 | return AttackerType.__members__[attacker_type] 53 | 54 | 55 | def check_decider_type(decider_type: str) -> DeciderType: 56 | if decider_type not in DeciderType.__members__: 57 | raise ValueError(f"Invalid decider type: {decider_type}") 58 | return DeciderType.__members__[decider_type] 59 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | # survival_rate and step_num 2 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type LLM --attacker_type LDOS --attacker_num 50 --change_num 0 --enable_log True --prefix survival_rate 3 | 4 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type RANDOM --attacker_type LDOS --attacker_num 50 --change_num 0 --enable_log True --prefix survival_rate 5 | 6 | # convergence_episode 7 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type LLM --attacker_type LDOS --attacker_num 10 --change_num 0 --enable_log True --prefix convergence_episode 8 | 9 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type LLM --attacker_type LDOS --attacker_num 20 --change_num 0 --enable_log True --prefix convergence_episode 10 | 11 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type LLM --attacker_type LDOS --attacker_num 30 --change_num 0 --enable_log True --prefix convergence_episode 12 | 13 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type LLM --attacker_type LDOS --attacker_num 40 --change_num 0 --enable_log True --prefix convergence_episode 14 | 15 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type LLM --attacker_type LDOS --attacker_num 50 --change_num 0 --enable_log True --prefix convergence_episode 16 | 17 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type RANDOM --attacker_type LDOS --attacker_num 10 --change_num 0 --enable_log True --prefix convergence_episode 18 | 19 | # migration_sucess_rate 20 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type LLM --attacker_type LDOS --attacker_num 20 --change_num 50 --enable_log True --prefix migration_sucess_rate 21 | 22 | python main.py --num_episodes 10 --max_fail_num 5 --attack_begin True --attack_sequence 10 --decider_type RANDOM --attacker_type LDOS --attacker_num 20 --change_num 50 --enable_log True --prefix migration_sucess_rate 23 | -------------------------------------------------------------------------------- /log/log.py: -------------------------------------------------------------------------------- 1 | import os 2 | from torch.utils.tensorboard import SummaryWriter 3 | import pandas as pd 4 | 5 | 6 | class Logger: 7 | def __init__( 8 | self, 9 | prefix: str, 10 | title: str, 11 | log_dir: str = "log", 12 | txt_dir: str = "txt", 13 | ): 14 | self.dir_path = f"{os.getcwd()}/output" 15 | log_path = f"{self.dir_path}/{prefix}-{title}/{log_dir}" 16 | txt_path = f"{self.dir_path}/{prefix}-{title}/{txt_dir}" 17 | self.init_log(log_path) 18 | self.init_txt(txt_path) 19 | 20 | def close(self): 21 | self.close_log() 22 | self.close_txt() 23 | 24 | # log 25 | def init_log(self, log_path: str): 26 | if not os.path.exists(log_path): 27 | os.makedirs(log_path) 28 | self.log_path = log_path 29 | self.log_survival_rate = SummaryWriter(f"{log_path}") 30 | self.log_convergence_episode = SummaryWriter(f"{log_path}") 31 | self.log_success_list = SummaryWriter(f"{log_path}") 32 | self.log_step_num_list = SummaryWriter(f"{log_path}") 33 | 34 | def write_log( 35 | self, 36 | num_episodes: int, 37 | survival_rate: list, 38 | convergence_episode: int, 39 | success_list: list, 40 | step_num_list: list 41 | ): 42 | for i in range(num_episodes): 43 | self.log_survival_rate.add_scalar("survival_rate", survival_rate[i], i) 44 | self.log_success_list.add_scalar("success_list", success_list[i], i) 45 | self.log_step_num_list.add_scalar( 46 | "step_num_list", step_num_list[i], i 47 | ) 48 | self.log_convergence_episode.add_scalar( 49 | "convergence_episode", convergence_episode 50 | ) 51 | # save to csv 52 | df = pd.DataFrame( 53 | { 54 | "survival_rate": survival_rate, 55 | "success_list": success_list, 56 | "convergence_episode": convergence_episode, 57 | "step_num_list": step_num_list 58 | } 59 | ) 60 | df.to_csv(f"{self.log_path}/log.csv", index=False) 61 | 62 | def close_log(self): 63 | self.log_survival_rate.close() 64 | self.log_convergence_episode.close() 65 | self.log_success_list.close() 66 | self.log_step_num_list.close() 67 | 68 | # txt 69 | def init_txt(self, txt_path: str): 70 | if not os.path.exists(txt_path): 71 | os.makedirs(txt_path) 72 | self.txt_path = txt_path 73 | 74 | def write_txt(self, episode: int, txt_datas: list[dict]): 75 | for txt_data in txt_datas: 76 | for key, value in txt_data.items(): 77 | file_path = os.path.join(self.txt_path, f"{key}.csv") 78 | with open(file_path, "a", encoding="utf-8") as f: 79 | f.write(f"{value}\n") 80 | 81 | def close_txt(self): 82 | pass 83 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from argparse import Namespace 3 | from env import Env 4 | from decider.decider import deciderFactory 5 | from constants import ( 6 | check_attacker_type, 7 | check_decider_type, 8 | ) 9 | 10 | 11 | if __name__ == "__main__": 12 | # Create the parser 13 | parser = argparse.ArgumentParser(description="argparse") 14 | 15 | # Add arguments 16 | parser.add_argument( 17 | "--num_episodes", type=int, required=False, default=5, help="Number of episodes" 18 | ) 19 | parser.add_argument( 20 | "--max_fail_num", type=int, required=False, default=5, help="Max fail number" 21 | ) 22 | parser.add_argument( 23 | "--max_episode_step", 24 | type=int, 25 | required=False, 26 | default=30, 27 | help="Max episode step", 28 | ) 29 | 30 | parser.add_argument( 31 | "--attack_begin", 32 | type=bool, 33 | required=False, 34 | default=True, 35 | help="Attack from beginning", 36 | ) 37 | parser.add_argument( 38 | "--attack_sequence", 39 | type=int, 40 | nargs="+", 41 | required=False, 42 | default=[10], 43 | help="Attack sequence", 44 | ) 45 | 46 | parser.add_argument( 47 | "--decider_type", type=str, required=True, help="Type of the decider" 48 | ) 49 | parser.add_argument( 50 | "--attacker_type", type=str, required=True, help="Type of the attacker" 51 | ) 52 | parser.add_argument( 53 | "--attacker_num", type=int, required=True, help="Number of attackers" 54 | ) 55 | parser.add_argument( 56 | "--change_num", 57 | type=int, 58 | required=False, 59 | default=0, 60 | help="Changed attacker num", 61 | ) 62 | 63 | parser.add_argument( 64 | "--enable_log", 65 | type=bool, 66 | required=False, 67 | default=False, 68 | help="Enable log or not", 69 | ) 70 | parser.add_argument( 71 | "--prefix", 72 | type=str, 73 | required=False, 74 | default="default", 75 | help="Prefix of the log file", 76 | ) 77 | 78 | args = parser.parse_args() 79 | 80 | # check the type of the attacker and the decider 81 | attacker_type = check_attacker_type(args.attacker_type) 82 | decider_type = check_decider_type(args.decider_type) 83 | 84 | # create the environment 85 | env_args = Namespace( 86 | attacker_type=attacker_type, 87 | attacker_num=args.attacker_num, 88 | ) 89 | env = Env(env_args) 90 | 91 | prefix = args.prefix + "-" + args.decider_type 92 | 93 | # attack sequence 94 | attack_sequence_list = args.attack_sequence 95 | attack_sequence = [] 96 | attack_begin = args.attack_begin 97 | for seq in attack_sequence_list: 98 | attack_sequence += [attack_begin] * seq 99 | attack_begin = not attack_begin 100 | 101 | # create the decider 102 | decider = deciderFactory(decider_type) 103 | decider.train_and_test( 104 | env=env, 105 | prefix=prefix, 106 | num_episodes=args.num_episodes, 107 | max_episode_step=args.max_episode_step, 108 | attack_sequence=attack_sequence, 109 | max_fail_num=args.max_fail_num, 110 | enable_log=args.enable_log, 111 | change_num=args.change_num, 112 | ) 113 | -------------------------------------------------------------------------------- /decider/random.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | import time 3 | import random 4 | from dataclasses import asdict 5 | from log.log import Logger 6 | from utils import judge_fail_func, get_action_thresholds 7 | 8 | 9 | class Random: 10 | def __init__(self, max_fail_num=5): 11 | self.max_fail_num = max_fail_num 12 | self.fail_num = 0 13 | self.success_num = 0 14 | 15 | def reset(self): 16 | self.fail_num = 0 17 | self.success_num = 0 18 | 19 | def take_action(self, state, step, action_thresholds): 20 | action = random.randint(0, 5) 21 | con_threshold, mem_threshold = action_thresholds[action] 22 | return action, con_threshold, mem_threshold 23 | 24 | def judge(self, indicators): 25 | success, fail_msg = judge_fail_func(indicators) 26 | if success: 27 | self.fail_num = 0 28 | self.success_num += 1 29 | else: 30 | self.success_num = 0 31 | self.fail_num += 1 32 | 33 | finish = -1 34 | if self.fail_num >= self.max_fail_num: 35 | finish = 0 36 | if self.success_num >= self.max_fail_num: 37 | finish = 1 38 | return finish, success, fail_msg 39 | 40 | 41 | def train_and_test( 42 | env, 43 | num_episodes, 44 | attack_sequence, 45 | max_fail_num, 46 | max_episode_step=30, 47 | enable_log=True, 48 | prefix="default", 49 | change_num=0, 50 | ): 51 | timestamp = time.strftime("%Y%m%d-%H%M%S") 52 | title = ( 53 | env.attacker.type.value 54 | + "-" 55 | + str(env.attacker.num) 56 | + "-" 57 | + str(num_episodes) 58 | + "-" 59 | + str(change_num) 60 | + "-" 61 | + "(" 62 | + timestamp 63 | + ")" 64 | ) 65 | if enable_log: 66 | logger = Logger(prefix, title) 67 | 68 | agent = Random(max_fail_num) 69 | 70 | for_step = 0 71 | for_episode_success = False 72 | 73 | survival_rate = [] 74 | convergence_episode = max_fail_num 75 | success_list = [] 76 | step_num_list = [] 77 | 78 | for episode in range(num_episodes): 79 | finish = -1 80 | step = 0 81 | attack_len = len(attack_sequence) 82 | max_steps = attack_len 83 | state = env.reset() 84 | agent.reset() 85 | 86 | txt_datas = [] 87 | episode_success = [] 88 | 89 | if change_num != 0 and episode == num_episodes - 1: 90 | env.change_attacker_num(change_num) 91 | 92 | with tqdm(total=max_steps, desc=f"iteration {episode}") as pbar: 93 | while finish == -1: 94 | print(f"\nstep {step}") 95 | do_attack = attack_sequence[step % attack_len] 96 | action_thresholds = get_action_thresholds(env.attacker.type) 97 | attack_indicators = env.cal_indicators(state) 98 | action, con_percent, mem_percent = agent.take_action( 99 | state, step, action_thresholds 100 | ) 101 | print( 102 | "action_msg", 103 | action, 104 | con_percent, 105 | mem_percent, 106 | attack_indicators, 107 | ) 108 | 109 | ( 110 | next_state, 111 | defence_state, 112 | defence_success, 113 | defence_fail_msg, 114 | defence_cost, 115 | ) = env.step( 116 | action, 117 | {"con_percent": con_percent, "mem_percent": mem_percent}, 118 | do_attack, 119 | ) 120 | defence_indicators = env.cal_indicators(defence_state, defence_cost) 121 | finish, success, fail_msg = agent.judge(defence_indicators) 122 | print( 123 | "defence_msg", 124 | defence_success, 125 | defence_fail_msg, 126 | asdict(defence_indicators), 127 | ) 128 | 129 | episode_success.append(success) 130 | 131 | step += 1 132 | max_steps = max(max_steps, step) 133 | state = next_state 134 | 135 | if step >= max_episode_step: 136 | finish = 0 137 | success = 0 138 | fail_msg = ( 139 | f"The defense was unsuccessful after {max_episode_step} steps!" 140 | ) 141 | 142 | txt_datas.append( 143 | { 144 | "action": [action, con_percent, mem_percent], 145 | "indicators": [ 146 | asdict(attack_indicators), 147 | asdict(defence_indicators), 148 | ], 149 | "defence_msg": [defence_success, defence_fail_msg], 150 | "success": [success, fail_msg], 151 | } 152 | ) 153 | 154 | pbar.set_postfix( 155 | { 156 | "episode": step, 157 | "return": "%.3f" % (success / max_steps), 158 | } 159 | ) 160 | pbar.update(1) 161 | if max_steps != attack_len: 162 | pbar.total = max_steps 163 | pbar.refresh() 164 | 165 | for_step = step 166 | for_episode_success = finish == 1 167 | 168 | survival_rate.append(sum(episode_success) / len(episode_success)) 169 | if step == max_fail_num and convergence_episode == max_fail_num: 170 | convergence_episode = episode 171 | success_list.append(for_episode_success) 172 | step_num_list.append(for_step) 173 | 174 | print( 175 | f"The {episode} episode has ended, with a total of {for_step} attack-defense cycles. The defense in this episode was {'successful' if for_episode_success else 'failed'}." 176 | ) 177 | 178 | if enable_log: 179 | logger.write_txt(episode, txt_datas) 180 | 181 | if enable_log: 182 | logger.write_log( 183 | num_episodes, 184 | survival_rate, 185 | convergence_episode, 186 | success_list, 187 | step_num_list, 188 | ) 189 | logger.close() 190 | 191 | return ( 192 | survival_rate, 193 | convergence_episode, 194 | success_list, 195 | step_num_list, 196 | ) 197 | -------------------------------------------------------------------------------- /env.py: -------------------------------------------------------------------------------- 1 | import gymnasium as gym 2 | from gymnasium import spaces 3 | import numpy as np 4 | from attacker.attacker import attackerFactory 5 | from defender.defender import Defender 6 | from constants import Indicators, map_action_to_defence 7 | 8 | 9 | class Env(gym.Env): 10 | def __init__(self, args): 11 | self.pod_max_num = 100 # The total number of resource pods of the service 12 | self.pod_con_num = 256 # Maximum number of connections per pod 13 | self.pod_mem_num = 100 # Maximum memory usage of a single pod 14 | self.ser_max_num = 10 # Maximum number of replicas 15 | self.ser_ind = 4 # The number of indicators of the replicas 16 | self.ser_num = 0 # Current number of replicas 17 | self.con_danger_thresh_percent = 0.9 # Dangerous service connection threshold 18 | self.con_effective_thresh_percent = ( 19 | 0.3 # Inefficient service connection threshold 20 | ) 21 | self.mem_danger_thresh_percent = 0.9 # Dangerous service memory usage threshold 22 | self.mem_effective_thresh_percent = ( 23 | 0.1 # Inefficient service memory usage threshold 24 | ) 25 | 26 | # status indicators of each replica: number of pods, number of connections, port number, memory usage 27 | high = np.zeros((self.ser_max_num, self.ser_ind), dtype=np.int64) 28 | low = np.zeros((self.ser_max_num, self.ser_ind), dtype=np.int64) 29 | for i in range(self.ser_max_num): 30 | high[i] = [100, 25600, 32767, 10000] 31 | low[i] = [0, 0, 30000, 0] 32 | 33 | self.observation_space = spaces.Box( 34 | low, high, shape=(self.ser_max_num, self.ser_ind), dtype=np.int64 35 | ) # Box(10,4) 36 | 37 | self.defence_num = 6 38 | self.action_space = spaces.Discrete( 39 | self.defence_num 40 | ) # The size of the action space, one dimension 41 | 42 | self.attacker = attackerFactory(self, args.attacker_type, args.attacker_num) 43 | self.defender = Defender(self) 44 | self.defence_strategy = None 45 | 46 | def reset(self): 47 | self.state = np.zeros((self.ser_max_num, self.ser_ind), dtype=np.int64) 48 | self.attack_state = np.zeros((self.ser_max_num, 6), dtype=np.int64) 49 | self.steps_beyond_terminated = 0 50 | 51 | self.defender.reset() 52 | self.attacker.reset() 53 | 54 | return np.array(self.state, dtype=np.int64) 55 | 56 | def step(self, action, params, do_attack=True, simulate=False): 57 | err_msg = f"{action!r} ({type(action)}) invalid" 58 | assert self.action_space.contains(action), err_msg 59 | assert self.state is not None, "Call reset before using step method." 60 | 61 | self.pod_remain = self.pod_max_num - np.sum( 62 | self.state[:, 0] 63 | ) # The number of remaining pods is the computing resources 64 | self.port_list = ( 65 | [] 66 | ) # Record the original port of the service that changes the port after the attacker attacks 67 | self.add_ser_list1 = [] # replica service to expand 68 | self.add_ser_list2 = [] # New services generated by extended replicas 69 | self.del_ser_list = [] # deleted replica service 70 | 71 | save_state = self.state.copy() # Save the service status of the previous moment 72 | save_attack_state = ( 73 | self.attack_state.copy() 74 | ) # Save the attack status of the previous moment 75 | save_ser_num = ( 76 | self.ser_num 77 | ) # Save the number of services at the previous moment 78 | 79 | # transfer action to defence_strategy 80 | defence_strategy = map_action_to_defence[action] 81 | defence_success, defence_fail_msg, defence_cost = self.defender.step( 82 | defence_strategy, params 83 | ) # execute defence strategy 84 | 85 | defence_state = self.state.copy() # The service status after defense 86 | 87 | # The current disadvantage is that do_attack actually determines whether to attack in the next round, not the current round. 88 | if do_attack: 89 | self.attacker.step( 90 | defence_strategy, simulate 91 | ) # Input attack traffic and execute attack strategy according to defense strategy 92 | else: 93 | self.state = np.zeros((self.ser_max_num, self.ser_ind), dtype=np.int64) 94 | self.attacker.reset() # Silent for one round, no attack 95 | self.defender.reset() # Normal user traffic, clear attack traffic 96 | next_state = self.state.copy() # The service status at the next moment 97 | 98 | # If it is a simulated execution, then the defense is executed and restored to the previous state 99 | if simulate: 100 | self.state = save_state 101 | self.attack_state = save_attack_state 102 | self.ser_num = save_ser_num 103 | 104 | return ( 105 | next_state, 106 | defence_state, 107 | defence_success, 108 | defence_fail_msg, 109 | defence_cost, 110 | ) 111 | 112 | def change_attacker_num(self, num): 113 | if self.attacker.num == num: 114 | return 115 | self.attacker = attackerFactory(self, self.attacker.type, num) 116 | self.reset() 117 | 118 | def cal_indicators(self, state, cost=0): 119 | con_effective_flag = 0 # Number of inefficient services 120 | con_danger_flag = 0 # Number of dangerous services 121 | mem_effective_flag = 0 122 | mem_danger_flag = 0 123 | pod_num = 0 # The number of pods for the service 124 | pod_con_num = 0 # Number of connections for the service 125 | pod_mem_num = 0 # Memory usage of the service 126 | for i in range(self.ser_max_num): 127 | if state[i][0] > 0: 128 | pod_num += state[i][0] 129 | pod_con_num += state[i][1] 130 | pod_mem_num += state[i][3] 131 | # con 132 | if ( 133 | state[i][1] 134 | > state[i][0] * self.pod_con_num * self.con_danger_thresh_percent 135 | ): 136 | con_danger_flag += 1 137 | elif ( 138 | state[i][1] 139 | < self.con_effective_thresh_percent * state[i][0] * self.pod_con_num 140 | ): 141 | con_effective_flag += 1 142 | # mem 143 | if ( 144 | state[i][3] 145 | > state[i][0] * self.pod_mem_num * self.mem_danger_thresh_percent 146 | ): 147 | mem_danger_flag += 1 148 | elif ( 149 | state[i][3] 150 | < self.mem_effective_thresh_percent * state[i][0] * self.pod_mem_num 151 | ): 152 | mem_effective_flag += 1 153 | C_e = con_effective_flag / self.ser_num # Proportion of inefficient services 154 | C_d = con_danger_flag / self.ser_num # Proportion of dangerous services 155 | M_e = mem_effective_flag / self.ser_num 156 | M_d = mem_danger_flag / self.ser_num 157 | con_delay = pod_con_num / (pod_num * self.pod_con_num) # Service delay 158 | mem_delay = pod_mem_num / (pod_num * self.pod_mem_num) 159 | indicators = Indicators(C_e, C_d, M_e, M_d, con_delay, mem_delay, cost) 160 | return indicators 161 | 162 | def cal_reward( 163 | self, success, defence_success, defence_indicators, success_num, fail_num 164 | ): 165 | alpha, beta, gamma, delta = 10, 1, 1, 5 166 | success_flag = 1 if success else -1 167 | defence_success_flag = 1 if defence_success else -1 168 | time_cost = 2 169 | reward = ( 170 | alpha * success_flag 171 | + beta * (defence_success_flag + success_num - fail_num) 172 | - gamma * (defence_indicators.cost + time_cost) 173 | - delta * (defence_indicators.M_d + defence_indicators.C_d) 174 | ) 175 | return reward 176 | 177 | def get_state_index(self, port): 178 | return self.state[:, 2].tolist().index(port) 179 | 180 | def get_attack_index(self, port): 181 | return self.attack_state[:, 0].tolist().index(port) 182 | -------------------------------------------------------------------------------- /attacker/ldos.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from constants import AttackerType, DefenceStrategy 3 | 4 | 5 | class LDoSAttacker: 6 | def __init__(self, env, num): 7 | self.num = num 8 | self.con_ab = 256 # The attacker's connection occupation capability 9 | self.mem_ab = 30 # Attacker's memory occupation capability 10 | self.env = env 11 | self.type = AttackerType.LDOS 12 | 13 | def reset(self): 14 | # The attacker can observe the information matrix in the environment: 15 | # service port number, load rate (connection or memory load), number of attacks, attack weight, attack traffic, memory usage 16 | self.con_ability = self.num * self.con_ab 17 | self.con_remain = self.con_ability 18 | self.mem_ability = self.num * self.mem_ab 19 | self.mem_remain = self.mem_ability 20 | 21 | def step(self, defence_strategy, simulate=False): 22 | save_con_remain = self.con_remain 23 | save_mem_remain = self.mem_remain 24 | 25 | # Changes in attacker traffic after defense actions 26 | if ( 27 | defence_strategy == DefenceStrategy.PORT_HOPPING 28 | ): # Service attack traffic that has port changes should be recovered 29 | for port in self.env.port_list: 30 | if port in self.env.attack_state[:, 0]: 31 | ind = self.env.get_attack_index(port) 32 | self.con_remain += self.env.attack_state[ind][4] 33 | self.mem_remain += self.env.attack_state[ind][5] 34 | self.env.attack_state[ind][4] = 0 35 | self.env.attack_state[ind][5] = 0 36 | elif ( 37 | defence_strategy == DefenceStrategy.REPLICA_INCREASE 38 | ): # Add a replica, the attack traffic needs to be allocated to half of the new replica, and a new service needs to be added in attack_state 39 | for port in self.env.add_ser_list1: 40 | if port in self.env.attack_state[:, 0]: 41 | ind = self.env.get_attack_index(port) 42 | con_tmp = 0.5 * self.env.attack_state[ind][4] 43 | mem_tmp = 0.5 * self.env.attack_state[ind][5] 44 | self.env.attack_state[ind][4] = con_tmp 45 | self.env.attack_state[ind][5] = mem_tmp 46 | new_port = self.env.add_ser_list2[ 47 | self.env.add_ser_list1.index(port) 48 | ] 49 | for i in range(self.env.ser_max_num): 50 | if self.env.attack_state[i][0] == 0: 51 | self.env.attack_state[i][0] = new_port 52 | self.env.attack_state[i][4] = con_tmp 53 | self.env.attack_state[i][5] = mem_tmp 54 | break 55 | elif defence_strategy == DefenceStrategy.REPLICA_DECREASE: 56 | for port in self.env.del_ser_list: 57 | if port in self.env.attack_state[:, 0]: 58 | ind = self.env.get_attack_index(port) 59 | attack_con = self.env.attack_state[ind][4] 60 | attack_mem = self.env.attack_state[ind][5] 61 | self.env.attack_state[ind][4] = 0 62 | self.env.attack_state[ind][5] = 0 63 | for i in range(self.env.ser_max_num): 64 | if self.env.attack_state[i][0]: 65 | self.env.attack_state[i][4] += ( 66 | attack_con // self.env.ser_num 67 | ) 68 | self.env.attack_state[i][5] += ( 69 | attack_mem // self.env.ser_num 70 | ) 71 | break 72 | 73 | # Reconnaissance phase: The attacker builds an observation matrix in the first round, and then only needs to add or delete ports and corresponding services; 74 | # the defender performs port transformation, and the attacker remains silent for a round and does not attack 75 | if self.env.port_list == []: 76 | # Determine the latency and other parameters based on the port. 77 | for port in self.env.attack_state[ 78 | :, 0 79 | ]: # First delete the ports that no longer exist in the state and assign them all to 0 80 | if port not in self.env.state[:, 2]: 81 | ind = self.env.get_attack_index(port) 82 | self.env.attack_state[ind] = np.array([0, 0, 0, 0, 0, 0]) 83 | for port in self.env.state[ 84 | :, 2 85 | ]: # Add the newly added service in the state: only modify the port number, delay, and weight 86 | ind_s = self.env.get_state_index(port) 87 | if port > 0: 88 | if port in self.env.attack_state[:, 0]: 89 | ind = self.env.get_attack_index(port) 90 | self.env.attack_state[ind][0] = self.env.state[ind_s][ 91 | 2 92 | ] # Service port number detected by the attacker 93 | # The service latency is expressed by dividing the number of service connections by the number of connections that the service can carry. 94 | # The latency is too small to be reflected after rounding, so it is increased by 100 times. 95 | self.env.attack_state[ind][1] = ( 96 | 100 97 | * self.env.state[ind_s][1] 98 | / (self.env.state[ind_s][0] * self.env.pod_con_num) 99 | ) 100 | # Calculating weights by delay 101 | self.env.attack_state[ind][3] = 0.9 * self.env.attack_state[ 102 | ind 103 | ][1] + 0.1 * 100 * ( 104 | self.env.attack_state[ind][2] 105 | / (self.env.steps_beyond_terminated + 1) 106 | ) 107 | else: 108 | for i in range(self.env.ser_max_num): 109 | if self.env.attack_state[i][0] == 0: 110 | self.env.attack_state[i][0] = self.env.state[ind_s][2] 111 | self.env.attack_state[i][1] = ( 112 | 100 113 | * self.env.state[ind_s][1] 114 | / (self.env.state[ind_s][0] * self.env.pod_con_num) 115 | ) 116 | self.env.attack_state[i][ 117 | 3 118 | ] = 0.9 * self.env.attack_state[i][1] + 0.1 * 100 * ( 119 | self.env.attack_state[i][2] 120 | / (self.env.steps_beyond_terminated + 1) 121 | ) 122 | break 123 | 124 | # Attack target selection 125 | target_list = [] 126 | for port in self.env.attack_state[:, 0]: 127 | if port > 0: 128 | target_list.append(port) 129 | 130 | # Start the attack and distribute the attack traffic according to the port 131 | if self.con_remain > 0: 132 | for port in target_list: 133 | # Find the attacked service number in state, because state and attack_state are connected through port 134 | target_ser_num = self.env.get_state_index(port) 135 | target = self.env.get_attack_index(port) 136 | # Number of attacks 137 | self.env.attack_state[target][2] += 1 138 | # Number of attack connections 139 | attack_con = ( 140 | self.con_remain 141 | * self.env.attack_state[target][3] 142 | // np.sum(self.env.attack_state[:, 3]) 143 | ) 144 | if attack_con <= ( 145 | self.env.state[target_ser_num][0] * self.env.pod_con_num 146 | - self.env.state[target_ser_num][1] 147 | ): 148 | self.env.state[target_ser_num][1] += attack_con 149 | self.env.attack_state[target][4] += attack_con 150 | self.con_remain -= attack_con 151 | else: 152 | self.env.attack_state[target][4] += ( 153 | self.env.state[target_ser_num][0] * self.env.pod_con_num 154 | - self.env.state[target_ser_num][1] 155 | ) 156 | self.con_remain -= ( 157 | self.env.state[target_ser_num][0] * self.env.pod_con_num 158 | - self.env.state[target_ser_num][1] 159 | ) 160 | self.env.state[target_ser_num][1] = ( 161 | self.env.state[target_ser_num][0] * self.env.pod_con_num 162 | ) # Fully load the attacked service 163 | # Attack memory usage 164 | attack_mem = ( 165 | self.mem_remain 166 | * self.env.attack_state[target][3] 167 | // np.sum(self.env.attack_state[:, 3]) 168 | ) 169 | if attack_mem <= ( 170 | self.env.state[target_ser_num][0] * self.env.pod_mem_num 171 | - self.env.state[target_ser_num][3] 172 | ): 173 | self.env.state[target_ser_num][3] += attack_mem 174 | self.env.attack_state[target][5] += attack_mem 175 | self.mem_remain -= attack_mem 176 | else: 177 | self.env.attack_state[target][5] += ( 178 | self.env.state[target_ser_num][0] * self.env.pod_mem_num 179 | - self.env.state[target_ser_num][3] 180 | ) 181 | self.mem_remain -= ( 182 | self.env.state[target_ser_num][0] * self.env.pod_mem_num 183 | - self.env.state[target_ser_num][3] 184 | ) 185 | self.env.state[target_ser_num][3] = ( 186 | self.env.state[target_ser_num][0] * self.env.pod_mem_num 187 | ) 188 | 189 | # If it is a simulated execution, then the defense is executed and restored to the previous state 190 | if simulate: 191 | self.con_remain = save_con_remain 192 | self.mem_remain = save_mem_remain 193 | -------------------------------------------------------------------------------- /defender/defender.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | from constants import DefenceStrategy 4 | 5 | 6 | class Defender: 7 | def __init__(self, env): 8 | self.env = env 9 | 10 | def reset(self): 11 | """ 12 | The simulation environment starts with 50 pods, which is half of the total pod count. 13 | The total number of connections is defined to be between 0.5 and 0.6 of the product of the total number of pods and 256, and the memory usage is between 10% and 50%. 14 | """ 15 | self.env.ser_num = 5 16 | for i in range(self.env.ser_num): 17 | port = np.random.randint(30000, 32767) 18 | while port in self.env.state[:, 2]: 19 | port = np.random.randint(30000, 32767) 20 | connection = np.random.randint(int(10 * 256 * 0.5), int(10 * 256 * 0.6)) 21 | mem = np.random.randint(int(10 * 100 * 0.1), int(10 * 100 * 0.5)) 22 | self.env.state[i] = [10, connection, port, mem] 23 | 24 | def step(self, defence_strategy, params): 25 | inf_services = [] 26 | if defence_strategy in [ 27 | DefenceStrategy.REPLICA_INCREASE, 28 | DefenceStrategy.REPLICA_DECREASE, 29 | DefenceStrategy.REPLICA_EXPAND, 30 | DefenceStrategy.REPLICA_SHRINK, 31 | ]: 32 | con_percent = params["con_percent"] 33 | mem_percent = params["mem_percent"] 34 | if ( 35 | con_percent < 0 36 | or con_percent > 1 37 | or mem_percent < 0 38 | or mem_percent > 1 39 | # or (con_percent == 0 and mem_percent == 0) 40 | ): 41 | return False, "The parameters are invalid.", 0 42 | if defence_strategy == DefenceStrategy.PORT_HOPPING: 43 | # Port hopping 44 | for i in range(self.env.ser_max_num): 45 | if self.env.state[i][0] > 0: 46 | inf_services.append(i) 47 | self.env.port_list.append(self.env.state[i][2]) 48 | # Reset the attack traffic while keeping the normal traffic unchanged. 49 | if self.env.state[i][2] in self.env.attack_state[:, 0]: 50 | ind = self.env.get_attack_index(self.env.state[i][2]) 51 | # Connection usage 52 | self.env.state[i][1] = ( 53 | self.env.state[i][1] - self.env.attack_state[ind][4] 54 | ) 55 | # Memory usage 56 | self.env.state[i][3] = ( 57 | self.env.state[i][3] - self.env.attack_state[ind][5] 58 | ) 59 | port = np.random.randint(30000, 32767) 60 | while ( 61 | port in self.env.state[:, 2] 62 | ): # Make sure the port number does not overlap with the original port or other used ports. 63 | port = np.random.randint(30000, 32767) 64 | self.env.state[i][2] = port 65 | return ( 66 | True, 67 | f"The port of service replica {inf_services} was changed successfully", 68 | 4, 69 | ) 70 | elif defence_strategy == DefenceStrategy.REPLICA_INCREASE: 71 | # Select service replicas with a load rate exceeding the specified threshold and create a new replica. 72 | # The number of pods for the new replica will be the same as the original service, and half of the total traffic will be assigned to the new replica. 73 | if self.env.ser_num == self.env.ser_max_num: 74 | return False, "The maximum number of replicas has been reached", 0 75 | elif self.env.pod_remain == 0: 76 | return False, "There are no remaining resource pods to allocate", 0 77 | else: 78 | for i in range(self.env.ser_max_num): 79 | if ( 80 | self.env.state[i][1] 81 | > con_percent * self.env.state[i][0] * self.env.pod_con_num 82 | and self.env.state[i][3] 83 | > mem_percent * self.env.state[i][0] * self.env.pod_mem_num 84 | ): 85 | if ( 86 | self.env.pod_remain >= self.env.state[i][0] 87 | and self.env.ser_num < self.env.ser_max_num 88 | ): 89 | inf_services.append(i) 90 | self.env.add_ser_list1.append(self.env.state[i][2]) 91 | new_pod = self.env.state[i][0] 92 | connection = 0.5 * self.env.state[i][1] 93 | self.env.state[i][1] = connection 94 | mem = 0.5 * self.env.state[i][3] 95 | self.env.state[i][3] = mem 96 | port = np.random.randint(30000, 32767) 97 | while port in self.env.state[:, 2]: 98 | port = np.random.randint(30000, 32767) 99 | for j in range( 100 | self.env.ser_max_num 101 | ): # Locate the extended copy 102 | if self.env.state[j][0] == 0: 103 | self.env.state[j] = np.array( 104 | [new_pod, connection, port, mem] 105 | ) 106 | self.env.add_ser_list2.append(port) 107 | self.env.pod_remain -= new_pod 108 | self.env.ser_num += 1 109 | break 110 | else: 111 | return ( 112 | False, 113 | "There are no remaining resource pods to allocate", 114 | 0, 115 | ) 116 | if len(inf_services) == 0: 117 | return ( 118 | False, 119 | "There are no replicas that exceed the specified load ratio, so there is no need to add replicas", 120 | 0, 121 | ) 122 | else: 123 | return True, f"Replica added successfully {inf_services}", 1 124 | elif defence_strategy == DefenceStrategy.REPLICA_DECREASE: 125 | # Delete the replicas with a load rate lower than the specified one, 126 | # and distribute the traffic of the deleted services (including users and attackers) to other services; 127 | # The number of services cannot be less than 1 128 | all_delete = True 129 | for i in range(self.env.ser_max_num): 130 | if ( 131 | self.env.state[i][1] 132 | >= con_percent * self.env.state[i][0] * self.env.pod_con_num 133 | or self.env.state[i][3] 134 | >= mem_percent * self.env.state[i][0] * self.env.pod_mem_num 135 | ): 136 | all_delete = False 137 | if all_delete: 138 | return ( 139 | False, 140 | "The load rate is set too high and all replicas will be deleted.", 141 | 0, 142 | ) 143 | con_num = 0 144 | mem_num = 0 145 | for i in range(self.env.ser_max_num): 146 | if ( 147 | self.env.state[i][1] 148 | < con_percent * self.env.state[i][0] * self.env.pod_con_num 149 | and self.env.state[i][3] 150 | < mem_percent * self.env.state[i][0] * self.env.pod_mem_num 151 | ): 152 | inf_services.append(i) 153 | con_num += self.env.state[i][1] 154 | mem_num += self.env.state[i][3] 155 | self.env.state[i] = np.array([0, 0, 0, 0]) 156 | self.env.ser_num -= 1 157 | self.env.pod_remain += 1 158 | for j in range(self.env.ser_max_num): 159 | if self.env.state[j][0]: 160 | self.env.state[j][1] += con_num // self.env.ser_num 161 | self.env.state[j][3] += mem_num // self.env.ser_num 162 | if len(inf_services) == 0: 163 | return ( 164 | False, 165 | "There are no replicas with a load ratio lower than the specified value, so there is no need to reduce the number of replicas.", 166 | 0, 167 | ) 168 | else: 169 | return True, f"Replica deleted successfully {inf_services}", 0 170 | elif defence_strategy == DefenceStrategy.REPLICA_EXPAND: 171 | # Select all replicas with a load rate greater than the specified one and expand them. 172 | # After expansion, the load rate will be the specified load rate to ensure service quality; 173 | if self.env.pod_remain == 0: 174 | return False, "There are no remaining resource pods to allocate", 0 175 | else: 176 | for i in range(self.env.ser_max_num): 177 | if ( 178 | self.env.state[i][1] 179 | > con_percent * self.env.state[i][0] * self.env.pod_con_num 180 | and self.env.state[i][3] 181 | > mem_percent * self.env.state[i][0] * self.env.pod_mem_num 182 | ): 183 | con_incre = ( 184 | int( 185 | math.ceil( 186 | self.env.state[i][1] 187 | / (self.env.pod_con_num * con_percent) 188 | - self.env.state[i][0] 189 | ) 190 | ) 191 | if con_percent != 0 192 | else 0 193 | ) 194 | mem_incre = ( 195 | int( 196 | math.ceil( 197 | self.env.state[i][3] 198 | / (self.env.pod_mem_num * mem_percent) 199 | - self.env.state[i][0] 200 | ) 201 | ) 202 | if mem_percent != 0 203 | else 0 204 | ) 205 | pod_incre = max(con_incre, mem_incre) 206 | if self.env.pod_remain >= pod_incre: 207 | inf_services.append(i) 208 | self.env.state[i][0] = self.env.state[i][0] + pod_incre 209 | self.env.pod_remain -= pod_incre 210 | else: 211 | return ( 212 | False, 213 | "There are no remaining resource pods to allocate", 214 | 0, 215 | ) 216 | if len(inf_services) == 0: 217 | return ( 218 | False, 219 | "There are no replicas that exceed the specified load ratio, so there is no need to add resource pods", 220 | 0, 221 | ) 222 | else: 223 | return True, f"服务{inf_services}副本扩容成功", 1 224 | elif defence_strategy == DefenceStrategy.REPLICA_SHRINK: 225 | # Select replicas with a load rate lower than the specified load rate and scale them down. 226 | # The load rate after scaling down is the specified load rate to ensure the lowest energy consumption ratio; 227 | for i in range(self.env.ser_max_num): 228 | if ( 229 | self.env.state[i][1] 230 | < con_percent * self.env.state[i][0] * self.env.pod_con_num 231 | and self.env.state[i][3] 232 | < mem_percent * self.env.state[i][0] * self.env.pod_mem_num 233 | ): 234 | con_decre = int( 235 | self.env.state[i][0] 236 | - self.env.state[i][1] / (self.env.pod_con_num * con_percent) 237 | ) 238 | mem_decre = int( 239 | self.env.state[i][0] 240 | - self.env.state[i][3] / (self.env.pod_mem_num * mem_percent) 241 | ) 242 | pod_decre = max(con_decre, mem_decre) 243 | inf_services.append(i) 244 | self.env.state[i][0] = self.env.state[i][0] - pod_decre 245 | self.env.pod_remain += pod_decre 246 | if len(inf_services) == 0: 247 | return ( 248 | False, 249 | "There are no replicas with a load ratio lower than the specified one, so no scaling down is required.", 250 | 0, 251 | ) 252 | else: 253 | return ( 254 | True, 255 | f"The replica was successfully scaled down {inf_services}", 256 | 0, 257 | ) 258 | elif defence_strategy == DefenceStrategy.NO_ACTION: 259 | return True, "No action", 0 260 | 261 | return True, None, 0 262 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /decider/llm.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | from tenacity import retry, stop_after_attempt 3 | import time 4 | from openai import OpenAI 5 | from pydantic import BaseModel 6 | from dataclasses import asdict 7 | from log.llm_log import LLMLogger 8 | from utils import get_action_thresholds, judge_fail_func 9 | import random 10 | 11 | 12 | class Action(BaseModel): 13 | action: int 14 | # con_percent: float 15 | # mem_percent: float 16 | desc: str 17 | 18 | 19 | class Judge(BaseModel): 20 | success: bool 21 | desc: str 22 | 23 | 24 | class Reflex(BaseModel): 25 | desc: str 26 | 27 | 28 | class LLM: 29 | def __init__(self, num_episodes, max_fail_num=5): 30 | self.client = OpenAI() 31 | self.inital_prompts = [ 32 | { 33 | "role": "system", 34 | "content": """ 35 | You are a security robot capable of continuously improving defense strategies across multiple "episodes" of DoS attacks. Each episode consists of multiple "steps" in the attack and defense processes. You must constantly monitor the service's "number of connections" and "memory usage" to ensure system security and service availability. 36 | In each step, the attacker may either launch an attack or remain stationary. If the attacker launches an attack, it will occupy a large number of replica connections or memory resources, resulting in an increase in the service connection load rate and memory load rate. 37 | As the defender, you need to perform two phases. In the "Decision" phase, you need to select one of six MTD strategies based on the current service status and evaluation indicators. After applying the defense, the "Judgment" phase begins, where the service status evaluation indicators are used to assess whether the defense was successful or failed. 38 | If the defense is successful over a specified number of consecutive steps, it indicates that the current defense strategy is effective against the attacker, and the episode ends. If the defense fails over a specified number of consecutive steps, it indicates that the defense strategy is not effective, and the episode ends. 39 | Between steps and episodes, "Step reflection" and "Episode reflection" are conducted to summarize the success and failure experiences: 40 | Step reflection: Multiple attack and defense steps may form an action sequence with a mix of successes and failures. If the number of successes or failures is below a specified threshold, the entire episode will not be marked as successful or failed. However, if a sequence of consecutive failures occurs, particularly between the first success and the first failure, this sequence is considered a failure pattern. This failure sequence will be used in future step reflections to avoid repeating the same failed actions. 41 | Episode reflection: After each episode, the overall success and failure experiences are summarized. If the episode was successful, the successful strategies will be summarized to guide decision-making in subsequent episodes. If the episode was a failure, the causes of failure will be analyzed, and different defense strategies will be applied in the next episode to avoid repeating the same failure sequence. 42 | A better strategy involves exploring different defense strategies during the initial episodes, ensuring a variety of action sequences while still maintaining successful defenses. This exploration minimizes the number of failed episodes and reduces resource consumption. In later episodes, the successful action sequences from previous episodes can be applied to improve decision-making and optimize defense strategies. 43 | """, 44 | }, 45 | { 46 | "role": "system", 47 | "content": """ 48 | In the decision phase, you need to assess the current service state to determine whether the service is in a critical condition and select a defense action, to either defend against the attacker's traffic or reduce resource consumption, ensuring the service operates normally. 49 | The service state, denoted as "state", is a 10x4 two-dimensional array representing the current service, which can have up to 10 "replicas". Each replica has four monitored status indicators: the number of "pods", the number of connections, the port number, and memory usage: 50 | The number of replica pods (state[:][0]) is an integer between 0 and 100. A value of 0 indicates the replica does not exist; otherwise, the replica is active. 51 | The number of connections (state[:][1]) is the total number of connections across all pods in the replica. Each pod can have between 0 and 256 connections, and a replica can have up to 100 pods, so the total number of connections ranges from 0 to 25600. 52 | The port number (state[:][2]) is an integer between 30000 and 32767. 53 | The memory usage (state[:][3]) is the total memory usage across all pods in the replica. Each pod can have memory usage between 0 and 100, and a replica can have up to 100 pods, so the total memory usage ranges from 0 to 10000. 54 | The defense action, denoted as "action", is an integer between 0 and 5, with each integer corresponding to a different MTD defense strategy. Some actions also include the connection load threshold "con_percent" and memory load threshold "mem_percent" parameters. Each action generates a certain resource consumption cost, denoted as "cost", defined as follows: 55 | Action 0: Port hopping, which reassigns the replica's port numbers to clear all attacker connections and memory usage across all replicas. This action has a resource cost of cost = 4. It is prioritized to minimize resource consumption, but if other strategies fail, this action should be considered. 56 | Action 1: Replica addition, which creates a copy of any replica with a connection load rate >= 0.8 or memory load rate >= 0.8. Half of the connections and memory usage are allocated to the new replica. This action is limited by the total number of replicas and has a resource cost of cost = 1. 57 | Action 2: Replica removal, which deletes any replica with both connection load rate <= 0.3 and memory load rate <= 0.3. The connections and memory usage of the removed replica are redistributed to other replicas to improve resource utilization. The number of replicas cannot be less than 1, and the resource cost is cost = 0. 58 | Action 3: Replica scaling, which increases the number of pods in any replica with a connection load rate >= 0.8 or memory load rate >= 0.8. After scaling, the connection load rate is reduced to below 0.8, or the memory load rate is reduced to below 0.8. This action is limited by the available number of pods and has a resource cost of cost = 1. 59 | Action 4: Replica shrinking, which reduces the number of pods in any replica with a connection load rate <= 0.3 and memory load rate <= 0.3. After shrinking, the connection load rate stays above 0.3, and the memory load rate stays above 0.3, improving resource utilization. This action has a resource cost of cost = 0. 60 | Action 5: No action, which maintains the current state without consuming resources. This action has a resource cost of cost = 0. 61 | "connection load rate" = replica connections / (replica pods * pod max connections). "memory load rate" = replica memory usage / (replica pods * pod max memory usage). Both load rates are float values between 0 and 1. 62 | In making a decision, you should first reason through the potential outcomes of applying a specific action, evaluating how the service state will change. Then, calculate the related indicators to check if the defense might fail or lead to excessive resource consumption. The action with the best match to the conditions should be selected. For example: 63 | If the service is in a critical state, such as the overall connection or memory load rates exceeding a specified threshold, action (Action 0, 1, 3) should be taken to defend against the attacker. Prioritize actions with lower resource costs while ensuring defense success. 64 | If the service is in an inefficient state, with replica connection and memory load rates both below a certain threshold, action (Action 2, 4) should be taken to reduce resource utilization. 65 | If the service is in a normal state, i.e., it does not meet the conditions for being in a critical or inefficient state, no action (Action 5) should be taken. 66 | """, 67 | }, 68 | { 69 | "role": "system", 70 | "content": """ 71 | In the judgment phase, the defender first executes the action. The execution may be successful or fail, for example, due to insufficient resources. 72 | If the execution is successful, it indicates that the selected action, con_percent, and mem_percent are valid. 73 | If the execution fails, it suggests that, under the current service state, choosing this action and parameters is ineffective, and a more appropriate action and parameters should be selected based on the service state next time. 74 | After executing the defense action, the service status evaluation indicators ("indicators") are obtained. You need to use these evaluation indicators to determine whether the defense of this step was successful, outputting the success status ("success") and the reason for failure ("desc"). 75 | The evaluation indicators are a data structure containing seven metrics, as follows: 76 | C_e: The inefficient service rate based on the number of connections (connection load rate < 0.3). It is calculated as C_e = Number of inefficient replicas / Total number of replicas. 77 | C_d: The dangerous service rate based on the number of connections (connection load rate > 0.9). It is calculated as C_d = Number of dangerous replicas / Total number of replicas. 78 | M_e: The inefficient service rate based on memory usage (memory load rate < 0.3). It is calculated as M_e = Number of inefficient replicas / Total number of replicas. 79 | M_d: The dangerous service rate based on memory usage (memory load rate > 0.9). It is calculated as M_d = Number of dangerous replicas / Total number of replicas. 80 | con_delay: The service delay based on the number of connections, i.e., the total number of service connections divided by the maximum possible connections for the service. In formula terms, con_delay = Sum of (replica pods * pod connections) across all replicas / Sum of (replica pods * pod max connections) across all replicas. 81 | mem_delay: The service delay based on memory usage, i.e., the total memory usage of the service divided by the maximum possible memory usage for the service. In formula terms, mem_delay = Sum of (replica pods * pod memory usage) across all replicas / Sum of (replica pods * pod max memory usage) across all replicas. 82 | cost: The resource consumption incurred by the defense action in the current step. 83 | The defense is considered a failure if any of the following conditions are met: 84 | C_d > 0 or M_d > 0.8 or con_delay > 0.8 or mem_delay > 0.8. 85 | Resource utilization evaluation indicators: 86 | While ensuring defense success, the objective is to minimize cost, C_e, and M_e, and also to keep con_delay and mem_delay as low as possible. 87 | """, 88 | }, 89 | { 90 | "role": "system", 91 | "content": """ 92 | In the Step reflection phase, the defender checks whether the current failed action sequence is a repetition of previously recorded failed action sequences. 93 | If it is not a repetition, the sequence is added to the list of failed action sequences. 94 | If it is a repetition, a warning is issued not to repeat the same failed actions and parameters in the current step. 95 | In the Episode reflection phase, the success information from the previous episode's attack-defense outcome, along with the action sequence "success_actions", is provided as input. 96 | The success_actions is a list containing multiple elements, each of which is an object that includes the "action", "defence_success" (whether the defense execution is successful), and "success" (whether the episode's defense is successful) attributes. 97 | If the previous episode's defense was successful, the successful experience can be summarized to guide the defense strategy in the current step. 98 | If the previous episode's defense failed, the cause of failure can be analyzed, and a different defense strategy can be adopted in the current step to avoid repeating the same failed action sequence. 99 | Additionally, in the earlier steps, the strategy should focus on exploration. During decision phase, actions should be chosen that differ from the sequences that were successful in previous steps, exploring whether there is a better action sequence to further reduce the number of failures in the current step. In later steps, successful action sequences from previous steps can be used to guide decision-making for the current step. 100 | """, 101 | }, 102 | ] 103 | self.episode = 0 104 | self.num_episodes = num_episodes 105 | self.explore_x = -1 106 | self.explore_rate = 0 107 | self.explore_base = 0.5 108 | self.best_ep_actions = None 109 | self.take_best_action = False 110 | self.prompts = [] 111 | self.actions = [] 112 | self.defence_successes = [] 113 | self.successes = [] 114 | self.ep_fail_actions = [] 115 | self.ep_success_actions = [] 116 | self.fail_num = 0 117 | self.success_num = 0 118 | self.max_fail_num = max_fail_num 119 | self.step_actions = [] 120 | self.step_fail_actions = [] 121 | 122 | def reset(self): 123 | self.prompts = [] 124 | self.fail_num = 0 125 | self.success_num = 0 126 | self.step_actions = [] 127 | self.step_fail_actions = [] 128 | self.take_best_action = False 129 | 130 | @retry(stop=stop_after_attempt(3)) 131 | def take_action(self, state, attack_indicators, step, action_thresholds): 132 | print("action") 133 | 134 | best_actions = None 135 | if self.take_best_action: 136 | best_actions = self.best_ep_actions["actions"] 137 | if step >= len(best_actions): 138 | best_actions = None 139 | if best_actions: 140 | print("best actions", best_actions) 141 | 142 | prompts = [ 143 | { 144 | "role": "user", 145 | "content": f"At the start of step {step}, the current defense service state is 'state': {str(state.tolist())}, and the action sequence taken in this step is 'cur_actions': {str(self.actions)}.", 146 | }, 147 | { 148 | "role": "assistant", 149 | "content": f"[Decision] In the current service state state, predict whether the load rates have reached the danger threshold. Which defense action 'action' should the defender take to successfully defend against the attack in this step, while minimizing resource utilization indicators? Please provide an explanation for the choice 'desc'. {'The optimal action to consider for this step is ' + str(best_actions[step % len(best_actions)]) if best_actions else ''}.", 150 | }, 151 | ] 152 | completion = self.client.beta.chat.completions.parse( 153 | model="gpt-4o-mini", 154 | messages=self.inital_prompts + self.prompts + prompts, 155 | response_format=Action, 156 | timeout=30, 157 | ) 158 | parsed = completion.choices[0].message.parsed 159 | action = parsed.action 160 | con_percent, mem_percent = action_thresholds[action] 161 | self.actions.append(action) 162 | prompts += [ 163 | { 164 | "role": "assistant", 165 | "content": f"The defense action to be taken in this step is {action}, with the connection load rate threshold set to {con_percent} and the memory usage threshold set to {mem_percent}. The reason for this decision is {parsed.desc}.", 166 | } 167 | ] 168 | self.prompts += prompts 169 | # return parsed.action, parsed.con_percent, parsed.mem_percent 170 | return action, con_percent, mem_percent 171 | 172 | def judge_fail(self, defence_state, defence_success, defence_fail_msg, indicators): 173 | print("judge_fail") 174 | success, fail_msg = judge_fail_func(indicators) 175 | self.defence_successes.append(defence_success) 176 | self.successes.append(success) 177 | 178 | if success: 179 | self.fail_num = 0 180 | self.success_num += 1 181 | else: 182 | self.success_num = 0 183 | self.fail_num += 1 184 | 185 | # whether the episode is finished 186 | finish = -1 187 | if self.fail_num >= self.max_fail_num: 188 | finish = 0 189 | if self.success_num >= self.max_fail_num: 190 | finish = 1 191 | 192 | prompts = [ 193 | { 194 | "role": "user", 195 | "content": f"The defense action was {'successful' if defence_success else ('failed, reason: ' + defence_fail_msg) + ' and other actions may be needed in the next step.'}. The service state after execution is 'defence_state': {str(defence_state.tolist())}, and the evaluation indicators obtained are 'indicators': {asdict(indicators)}.", 196 | }, 197 | { 198 | "role": "assistant", 199 | "content": "[Judgment] Was the defense successful or failed? If failed, what is the reason for failure?", 200 | }, 201 | ] 202 | prompts += [ 203 | { 204 | "role": "assistant", 205 | "content": f"The defense in this step was {'failed, reason for failure: ' + fail_msg if not success else 'successful'}.", 206 | } 207 | ] 208 | 209 | self.prompts += prompts 210 | return finish, success, fail_msg 211 | 212 | @retry(stop=stop_after_attempt(3)) 213 | def reflex_step(self, action, step): 214 | some_steps_fail = self.success_num == 0 215 | repeated_fail_actions = True 216 | if len(self.step_fail_actions) == 0: 217 | repeated_fail_actions = False 218 | self.step_actions.append(action) 219 | if some_steps_fail: 220 | cur_actions = self.step_actions.copy() 221 | for a in self.step_fail_actions: 222 | if a != self.step_actions: 223 | repeated_fail_actions = False 224 | break 225 | if not repeated_fail_actions: 226 | self.step_fail_actions.append(cur_actions) 227 | self.step_actions = [] 228 | 229 | if some_steps_fail: 230 | print("reflex certain steps") 231 | if repeated_fail_actions: 232 | prompt = f"[Step Reflection] The defenses in these steps ultimately ended in failure. The sequence of defense actions was {cur_actions}, which repeats a previous failed action sequence. All previous failed action sequences are {str(self.step_fail_actions)}. Be sure to avoid repeating the failed action sequences and make better defense decisions!" 233 | else: 234 | prompt = f"[Step Reflection] The defenses in these steps ultimately ended in failure. The sequence of defense actions was {cur_actions}. Please reflect on the failure experiences and try to improve the defense measures in the next rounds." 235 | 236 | prompts = [ 237 | { 238 | "role": "assistant", 239 | "content": prompt, 240 | }, 241 | ] 242 | 243 | completion = self.client.beta.chat.completions.parse( 244 | model="gpt-4o-mini", 245 | messages=self.inital_prompts + self.prompts + prompts, 246 | response_format=Reflex, 247 | timeout=30, 248 | ) 249 | parsed = completion.choices[0].message.parsed 250 | print("step fail actions", cur_actions, self.step_fail_actions) 251 | self.prompts += prompts 252 | 253 | @retry(stop=stop_after_attempt(3)) 254 | def reflex_ep(self, step_num, success, episode): 255 | print("reflex per episode") 256 | self.episode = episode 257 | fail_msg = ( 258 | "The consecutive defense failures have reached the specified threshold!" 259 | ) 260 | repeated_fail_actions = False 261 | repeated_success_actions = False 262 | if not success: 263 | for a in self.ep_fail_actions: 264 | if a["actions"] == self.actions: 265 | repeated_fail_actions = True 266 | break 267 | if not repeated_fail_actions: 268 | self.ep_fail_actions.append( 269 | {"actions": self.actions.copy(), "fail_reason": fail_msg} 270 | ) 271 | else: 272 | for a in self.ep_success_actions: 273 | if a["actions"] == self.actions: 274 | repeated_success_actions = True 275 | break 276 | if not repeated_success_actions: 277 | cur_ep_success_actions = { 278 | "actions": self.actions.copy(), 279 | "defence_successes": self.defence_successes, 280 | "successes": self.successes, 281 | } 282 | self.ep_success_actions.append(cur_ep_success_actions) 283 | 284 | for a in self.ep_success_actions: 285 | if not self.best_ep_actions or len(a["actions"]) < len( 286 | self.best_ep_actions["actions"] 287 | ): 288 | self.best_ep_actions = a 289 | 290 | zip_ep_success_actions = [ 291 | [ 292 | { 293 | "action": a["actions"][i], 294 | "defence_success": a["defence_successes"][i], 295 | "success": a["successes"][i], 296 | } 297 | for i in range(len(a["actions"])) 298 | ] 299 | for a in self.ep_success_actions 300 | ] 301 | 302 | x = ( 303 | pow(self.explore_base, episode + self.explore_x) 304 | if (episode + self.explore_x) > 0 305 | else 0 306 | ) 307 | self.explore_rate += x 308 | print("explore rate", episode, self.explore_rate) 309 | best_actions = None 310 | if self.best_ep_actions and (random.random() < self.explore_rate): 311 | best_actions = self.best_ep_actions["actions"] 312 | self.take_best_action = True 313 | else: 314 | self.take_best_action = False 315 | 316 | print("ep success actions", zip_ep_success_actions) 317 | 318 | prompts = [ 319 | { 320 | "role": "user", 321 | "content": f"The attack-defense cycle in the previous episode has ended, with a total of {step_num} steps. The defense was {'successful' if success else ('failed, reason for failure: ' + fail_msg + '. The failed action sequence was: ' + str(self.actions))}.", 322 | }, 323 | { 324 | "role": "user", 325 | "content": f"In the previous episode, { 'the failed actions from earlier episodes were repeated' if repeated_fail_actions else 'the failed actions from earlier episodes were not repeated' }. So far, the list of all failed defense actions across episodes is 'fail_actions': {str(self.ep_fail_actions)}. Please avoid repeating these actions in the current episode! Additionally, the list of all successful defense actions across episodes is 'success_actions': {str(zip_ep_success_actions)}.", 326 | }, 327 | { 328 | "role": "assistant", 329 | "content": f"[Episode Reflection] After reflecting on the previous episode of the attack-defense process, we are currently in episode {episode}. {'In this episode, please plan a sequence of actions different from the previously successful action sequences and explore whether there is a better action sequence to achieve defense success in fewer steps.' if best_actions else ('In this episode, you can directly choose the optimal action sequence from previous episodes! The optimal action sequence to consider is: ' + str(best_actions))}.", 330 | }, 331 | ] 332 | completion = self.client.beta.chat.completions.parse( 333 | model="gpt-4o-mini", 334 | messages=self.inital_prompts + self.prompts + prompts, 335 | response_format=Reflex, 336 | timeout=30, 337 | ) 338 | parsed = completion.choices[0].message.parsed 339 | 340 | prompts += [ 341 | { 342 | "role": "assistant", 343 | "content": f"Defense experience from the previous episode is:{parsed.desc}", 344 | } 345 | ] 346 | self.actions = [] 347 | self.successes = [] 348 | self.defence_successes = [] 349 | self.prompts += prompts 350 | 351 | 352 | def train_and_test( 353 | env, 354 | num_episodes, 355 | attack_sequence, 356 | max_fail_num, 357 | max_episode_step=30, 358 | enable_log=True, 359 | prefix="default", 360 | change_num=0, 361 | ): 362 | timestamp = time.strftime("%Y%m%d-%H%M%S") 363 | title = ( 364 | env.attacker.type.value 365 | + "-" 366 | + str(env.attacker.num) 367 | + "-" 368 | + str(num_episodes) 369 | + "-" 370 | + str(change_num) 371 | + "-" 372 | + "(" 373 | + timestamp 374 | + ")" 375 | ) 376 | if enable_log: 377 | logger = LLMLogger(prefix, title) 378 | 379 | agent = LLM(num_episodes, max_fail_num) 380 | 381 | for_step = 0 382 | for_episode_success = False 383 | 384 | survival_rate = [] 385 | convergence_episode = max_fail_num 386 | success_list = [] 387 | step_num_list = [] 388 | 389 | for episode in range(num_episodes): 390 | finish = -1 391 | step = 0 392 | attack_len = len(attack_sequence) 393 | max_steps = attack_len 394 | state = env.reset() 395 | agent.reset() 396 | 397 | txt_datas = [] 398 | episode_success = [] 399 | 400 | if episode != 0: 401 | agent.reflex_ep(for_step, for_episode_success, episode) 402 | elif change_num != 0 and episode == num_episodes - 1: 403 | env.change_attacker_num(change_num) 404 | 405 | with tqdm(total=max_steps, desc=f"iteration {episode}") as pbar: 406 | while finish == -1: 407 | print(f"\nstep {step}") 408 | do_attack = attack_sequence[step % attack_len] 409 | action_thresholds = get_action_thresholds(env.attacker.type) 410 | attack_indicators = env.cal_indicators(state) 411 | action, con_percent, mem_percent = agent.take_action( 412 | state, attack_indicators, step, action_thresholds 413 | ) 414 | print( 415 | "action_msg", 416 | action, 417 | con_percent, 418 | mem_percent, 419 | attack_indicators, 420 | ) 421 | 422 | ( 423 | next_state, 424 | defence_state, 425 | defence_success, 426 | defence_fail_msg, 427 | defence_cost, 428 | ) = env.step( 429 | action, 430 | {"con_percent": con_percent, "mem_percent": mem_percent}, 431 | do_attack, 432 | ) 433 | defence_indicators = env.cal_indicators(defence_state, defence_cost) 434 | finish, success, fail_msg = agent.judge_fail( 435 | defence_state, defence_success, defence_fail_msg, defence_indicators 436 | ) 437 | print( 438 | "defence_msg", 439 | defence_success, 440 | defence_fail_msg, 441 | asdict(defence_indicators), 442 | ) 443 | 444 | episode_success.append(success) 445 | 446 | agent.reflex_step(action, step) 447 | 448 | step += 1 449 | max_steps = max(max_steps, step) 450 | state = next_state 451 | 452 | if step >= max_episode_step: 453 | finish = 0 454 | success = 0 455 | fail_msg = ( 456 | f"The defense was unsuccessful after {max_episode_step} steps!" 457 | ) 458 | 459 | print("reflex msg", finish, success, fail_msg) 460 | 461 | txt_datas.append( 462 | { 463 | "action": [action, con_percent, mem_percent], 464 | "indicators": [ 465 | asdict(attack_indicators), 466 | asdict(defence_indicators), 467 | ], 468 | "defence_msg": [defence_success, defence_fail_msg], 469 | "success": [success, fail_msg], 470 | } 471 | ) 472 | 473 | pbar.set_postfix( 474 | { 475 | "episode": step, 476 | "return": "%.3f" % (success / max_steps), 477 | } 478 | ) 479 | pbar.update(1) 480 | if max_steps != attack_len: 481 | pbar.total = max_steps 482 | pbar.refresh() 483 | 484 | for_step = step 485 | for_episode_success = finish == 1 486 | 487 | survival_rate.append(sum(episode_success) / len(episode_success)) 488 | if step == max_fail_num and convergence_episode == max_fail_num: 489 | convergence_episode = episode 490 | success_list.append(for_episode_success) 491 | step_num_list.append(for_step) 492 | 493 | print( 494 | f"The {episode} episode has ended, with a total of {for_step} attack-defense cycles. The defense in this episode was {'successful' if for_episode_success else 'failed'}." 495 | ) 496 | 497 | if enable_log: 498 | logger.write_txt(episode, txt_datas) 499 | logger.write_prompts(episode, agent.prompts) 500 | 501 | if enable_log: 502 | logger.write_log( 503 | num_episodes, 504 | survival_rate, 505 | convergence_episode, 506 | success_list, 507 | step_num_list, 508 | ) 509 | logger.close() 510 | 511 | return ( 512 | survival_rate, 513 | convergence_episode, 514 | success_list, 515 | step_num_list, 516 | ) 517 | --------------------------------------------------------------------------------