├── .gitignore ├── requirements.txt ├── subset_traces.py ├── README.md ├── attack.py └── attack_multi.py /.gitignore: -------------------------------------------------------------------------------- 1 | ve 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | contourpy==1.1.0 2 | cycler==0.11.0 3 | fonttools==4.42.1 4 | h5py==3.9.0 5 | kiwisolver==1.4.5 6 | matplotlib==3.7.2 7 | numpy==1.25.2 8 | packaging==23.1 9 | Pillow==10.0.0 10 | py-cpuinfo==9.0.0 11 | pyparsing==3.0.9 12 | python-dateutil==2.8.2 13 | scalib==0.5.6 14 | six==1.16.0 15 | tqdm==4.66.1 16 | -------------------------------------------------------------------------------- /subset_traces.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import argparse 4 | 5 | import h5py 6 | 7 | parser = argparse.ArgumentParser( 8 | description= 9 | "Export a subsed of the raw ASCAD traces in a new file. " 10 | "Useful for fast iteration on small attacks. " 11 | "Exported file has a structure identical to the original file." 12 | ) 13 | parser.add_argument( 14 | "-n", 15 | "--ntraces", 16 | type=int, 17 | help= 18 | "Number of traces in the exported dataset " 19 | "(always takes the n first traces).", 20 | required=True, 21 | ) 22 | parser.add_argument( 23 | "-w", 24 | "--window", 25 | type=str, 26 | default="0,250000", 27 | help="'start,end' window for taking only a part of the traces (default: whole trace).", 28 | ) 29 | parser.add_argument( 30 | "input", 31 | type=str, 32 | help="Location of the 'raw traces' ASCAD database file.", 33 | ) 34 | parser.add_argument( 35 | "output", 36 | type=str, 37 | help="Location of the resulting dataset file.", 38 | ) 39 | args = parser.parse_args() 40 | 41 | W_START, W_END = [int(x.strip()) for x in args.window.split(",")] 42 | 43 | f_database = h5py.File(args.input, "r") 44 | 45 | l = args.ntraces 46 | 47 | traces = f_database["traces"][:l,W_START:W_END] 48 | masks = f_database["metadata"]["masks"][:l,:] 49 | key = f_database["metadata"]["key"][:l,:] 50 | plaintext = f_database["metadata"]["plaintext"][:l,] 51 | 52 | with h5py.File(args.output, "w") as f: 53 | f.create_dataset("traces", data=traces) 54 | md = f.create_group("metadata") 55 | md.create_dataset("masks", data=masks) 56 | md.create_dataset("key", data=key) 57 | md.create_dataset("plaintext", data=plaintext) 58 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Attacking ASCAD with a Single Trace 2 | 3 | This repository contains the scripts associated to the paper [Give Me 5 Minutes: 4 | Attacking ASCAD with a Single Side-Channel 5 | Trace](https://eprint.iacr.org/2021/817). 6 | 7 | ## Installation 8 | 9 | This repository contains simple python scripts, with known-good 10 | dependencies specified in `requirements.txt` (for python 3.10). 11 | A recent version of pip is needed if you want to install the pre-build version 12 | of SCALib, otherwise pip will try to recompile SCALib from scratch (which will 13 | fail if you don't have the build dependencies installed). 14 | 15 | We suggest using a virtual environment: 16 | ``` 17 | git clone https://github.com/cassiersg/ASCAD-5minutes.git 18 | cd ASCAD-5minutes 19 | python3 -m venv ve 20 | 21 | # On linux 22 | source ve/bin/activate 23 | # On windows 24 | RUN ve/Scripts/activate 25 | 26 | pip install -U pip # to get recent pip 27 | pip install -r requirements.txt 28 | ``` 29 | 30 | If the provided `requirements.txt` doesn't work, the direct dependencies are `matplotlib h5py scalib tqdm`. 31 | 32 | The ASCAD database file used for the attack can be found at https://static.data.gouv.fr/resources/ascad-atmega-8515-variable-key/20190730-071646/atmega8515-raw-traces.h5 . 33 | 34 | 35 | ## Usage 36 | 37 | ### attack.py 38 | 39 | This is a simple attack script which minimally reproduces our attack and should 40 | be fairly esay to read or modify. 41 | To run with the default settings: 42 | ``` 43 | python3 attack.py --database /path/to/ASCAD/raw/traces/atmega8515-raw-traces.h5 44 | ``` 45 | 46 | For other attack settings: 47 | ``` 48 | python3 attack.py --help 49 | ``` 50 | 51 | ### attack_multi.py 52 | 53 | This script performs the same attacks as , but stores intermediate 54 | results, run multiple attacks in parallel and produces success rate plots. 55 | 56 | Commands used to generate the success rate figures in the paper: 57 | 58 | Full trace and full key: 59 | ``` 60 | python3 attack_multi.py --database ./atmega8515-raw-traces.h5 --fast-snr --ntracesattack 1 --averageattack 1000 --poi 32,64,128,256,512,1024,2048 --dim 1,2,3,4,5,6,7,8,9,10,15,20 --show snr,model,attack,sr-map 61 | ``` 62 | 63 | Partial trace and one byte of key: 64 | ``` 65 | python3 attack_multi.py --database ./atmega8515-raw-traces.h5 --fast-snr --ntracesattack 1,2,4,8,16,32 --nbytes 1 --averageattack 1000 --ntracesprofile 20000 --window 80945,82345 --show snr,model,attack,sr-boxplot 66 | ``` 67 | 68 | For all attack settings: 69 | ``` 70 | python3 attack_multi.py --help 71 | ``` 72 | 73 | ### subset_traces.py 74 | 75 | A utility script to make subsets of the ASCAD database that are smaller, thus 76 | faster to load, for faster experimentation. 77 | ``` 78 | python3 subset_traces.py --help 79 | ``` 80 | 81 | ## License 82 | 83 | MIT 84 | -------------------------------------------------------------------------------- /attack.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | # Copyright 2021 UCLouvain 3 | # 4 | # Permission to use, copy, modify, and/or distribute this software for any 5 | # purpose with or without fee is hereby granted. 6 | # 7 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 8 | # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 9 | # AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 10 | # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 11 | # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR 12 | # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 13 | # PERFORMANCE OF THIS SOFTWARE 14 | 15 | # Attack on the ASCAD database. 16 | # This performs a side-channel based key-recovery of the masked implementation, 17 | # based on traces from the public ASCAD database. 18 | # The attack is based on the tools provided by the SCAlib library (SNR 19 | # computation for POI selection, LDA/gaussian templates modelling and SASCA). 20 | # See: 21 | # https://github.com/ANSSI-FR/ASCAD 22 | # https://github.com/simple-crypto/SCALib/ 23 | 24 | import argparse 25 | import copy 26 | import collections 27 | import functools as ft 28 | 29 | import h5py 30 | import numpy as np 31 | from scalib.metrics import SNR 32 | import scalib.modeling 33 | import scalib.attacks 34 | import scalib.postprocessing 35 | from tqdm import tqdm 36 | 37 | class Settings: 38 | """Command-line settings (hashable object).""" 39 | pass 40 | 41 | def parse_args(): 42 | parser = argparse.ArgumentParser( 43 | description="Attack against the ASCAD dataset." 44 | ) 45 | parser.add_argument( 46 | "--attacks", 47 | type=int, 48 | default=100, 49 | help="Number of attack runs (default: %(default)s).", 50 | ) 51 | parser.add_argument( 52 | "--profile", 53 | type=int, 54 | default=5000, 55 | help="Number of traces used for profiling (default: %(default)s).", 56 | ) 57 | parser.add_argument( 58 | "--poi", 59 | type=int, 60 | default=512, 61 | help="Number of POIs for each variable (default: %(default)s).", 62 | ) 63 | parser.add_argument( 64 | "--dim", 65 | type=int, 66 | default=8, 67 | help="Dimensionality of projected space for LDA (default: %(default)s).", 68 | ) 69 | parser.add_argument( 70 | "--database", 71 | type=str, 72 | default="./atmega8515-raw-traces.h5", 73 | help="Location of the 'raw traces' ASCAD file (default: %(default)s).", 74 | ) 75 | return parser.parse_args(namespace=Settings()) 76 | 77 | # number of bytes to attack 78 | NBYTES = 14 79 | def target_variables(byte): 80 | """variables that will be profiled""" 81 | return ["rin", "rout"] + [ 82 | f"{base}_{byte}" for base in ("x0", "x1", "xrin", "yrout", "y0", "y1") 83 | ] 84 | # fmt: off 85 | SBOX = np.array( 86 | [ 87 | 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 88 | 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 89 | 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 90 | 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 91 | 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 92 | 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 93 | 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 94 | 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 95 | 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 96 | 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 97 | 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 98 | 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 99 | 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 100 | 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 101 | 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 102 | 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 103 | 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 104 | 0x16, 105 | ], 106 | dtype=np.uint16, 107 | ) 108 | # fmt: on 109 | 110 | 111 | @ft.lru_cache(maxsize=None) 112 | def load_database(settings): 113 | return h5py.File(settings.database, "r") 114 | 115 | def var_labels(key, plaintext, masks, rin, rout): 116 | "Compute value of variables of interest based on ASCAD metadata." 117 | x0 = key ^ plaintext ^ masks 118 | x1 = masks 119 | xrin = ((key ^ plaintext).T ^ rin).T 120 | y0 = SBOX[key ^ plaintext] ^ masks 121 | y1 = masks 122 | yrout = (SBOX[(key ^ plaintext).T] ^ rout).T 123 | labels = {} 124 | for i in range(14): 125 | labels[f"k_{i}"] = key[:, i] 126 | labels[f"p_{i}"] = plaintext[:, i] 127 | labels[f"x0_{i}"] = x0[:, i] 128 | labels[f"x1_{i}"] = x1[:, i] 129 | labels[f"y0_{i}"] = y0[:, i] 130 | labels[f"y1_{i}"] = y1[:, i] 131 | labels[f"xrin_{i}"] = xrin[:, i] 132 | labels[f"yrout_{i}"] = yrout[:, i] 133 | labels[f"rout"] = rout[:] 134 | labels[f"rin"] = rin[:] 135 | return labels 136 | 137 | @ft.lru_cache(maxsize=None) 138 | def get_traces(settings, start, l): 139 | """Load traces and labels from ASCAD database.""" 140 | I = np.arange(start, start + l) 141 | f_database = load_database(settings) 142 | traces = f_database["traces"][start : start + l, :].astype(np.int16) 143 | key = f_database["metadata"]["key"][I, 2:].astype(np.uint16) 144 | plaintext = f_database["metadata"]["plaintext"][I, 2:].astype(np.uint16) 145 | masks = f_database["metadata"]["masks"][I, 2:16].astype(np.uint16) 146 | rin = f_database["metadata"]["masks"][I, 16].astype(np.uint16) 147 | rout = f_database["metadata"]["masks"][I, 17].astype(np.uint16) 148 | labels = var_labels(key, plaintext, masks, rin, rout) 149 | return traces, labels 150 | 151 | 152 | def compute_snr(settings): 153 | """Returns the SNR of the traces samples for each target variable.""" 154 | snrs = {v: dict() for i in range(NBYTES) for v in target_variables(i)} 155 | traces, labels = get_traces(settings, start=0, l=settings.profile) 156 | for v, m in tqdm(snrs.items(), total=len(snrs), desc="SNR Variables"): 157 | snr = SNR(np=1, nc=256, ns=traces.shape[1]) 158 | x = labels[v].reshape((settings.profile, 1)) 159 | # Note: if the traces do not fit in RAM, you can call multiple times fit_u 160 | # on the same SNR object to do incremental SNR computation. 161 | snr.fit_u(traces, x) 162 | m["SNR"] = snr.get_snr()[0, :] 163 | # Avoid NaN in case of scope over-range 164 | np.nan_to_num(m["SNR"], nan=0.0) 165 | return snrs 166 | 167 | def compute_templates(settings, snrs): 168 | """Compute the POIs, LDA and gaussian template for all variables.""" 169 | models = dict() 170 | # Select POIs 171 | for k, m in snrs.items(): 172 | poi = np.argsort(m["SNR"])[-settings.poi:].astype(np.uint32) 173 | poi.sort() 174 | models[k] = {"poi": poi} 175 | traces, labels = get_traces(settings, start=0, l=settings.profile) 176 | vs = list(models.keys()) 177 | mlda = scalib.modeling.MultiLDA( 178 | ncs=len(models) * [256], 179 | ps=len(models) * [settings.dim], 180 | pois=[models[v]["poi"] for v in vs], 181 | ) 182 | x = np.array([labels[v] for v in vs]).transpose() 183 | mlda.fit_u(traces, x) 184 | mlda.solve() 185 | for lda, v in zip(mlda.ldas, vs): 186 | models[v]["lda"] = lda 187 | return models 188 | 189 | SASCA_GRAPH = """ 190 | NC 256 191 | TABLE sbox 192 | 193 | VAR MULTI x0 194 | VAR MULTI x1 195 | VAR MULTI x 196 | VAR MULTI xp 197 | VAR MULTI xrin 198 | VAR MULTI rout 199 | VAR MULTI rin 200 | 201 | VAR MULTI y0 202 | VAR MULTI y1 203 | VAR MULTI y 204 | VAR MULTI yp 205 | VAR MULTI yrout 206 | 207 | VAR MULTI p 208 | VAR SINGLE k 209 | 210 | PROPERTY x = p ^ k 211 | PROPERTY x = x0 ^ x1 212 | PROPERTY x = rin ^ xrin 213 | 214 | PROPERTY y = sbox[x] 215 | PROPERTY y = y0 ^ y1 216 | PROPERTY y = rout ^ yrout 217 | """ 218 | 219 | @ft.lru_cache(maxsize=None) 220 | def sasca_graph(): 221 | sasca = scalib.attacks.SASCAGraph(SASCA_GRAPH, n=1) 222 | sasca.set_table("sbox", SBOX.astype(np.uint32)) 223 | return sasca 224 | 225 | def attack(traces, labels, models): 226 | """Run a SASCA attack on the given traces and evaluate its performance. 227 | Returns the true key and the byte-wise key distribution estimated by the attack. 228 | """ 229 | # correct secret key 230 | secret_key = [labels[f"k_{i}"][0] for i in range(NBYTES)] 231 | # distribution for each of the key bytes 232 | key_distribution = [] 233 | # Run a SASCA for each S-Box 234 | for i in range(NBYTES): 235 | sasca = copy.deepcopy(sasca_graph()) 236 | # Set the labels for the plaintext byte 237 | sasca.set_public(f"p", labels[f"p_{i}"].astype(np.uint32)) 238 | for var in target_variables(i): 239 | model = models[var] 240 | prs = model["lda"].predict_proba(traces[:, model["poi"]]) 241 | sasca.set_init_distribution(var.split('_')[0], prs) 242 | sasca.run_bp(it=3) 243 | distribution = sasca.get_distribution(f"k")[0, :] 244 | key_distribution.append(distribution) 245 | key_distribution = np.array(key_distribution) 246 | return secret_key, key_distribution 247 | 248 | def run_attack_eval(traces, labels, models): 249 | """Run a SASCA attack on the given traces and evaluate its performance. 250 | Returns the log2 of the rank of the true key. 251 | """ 252 | secret_key, key_distribution = attack(traces, labels, models) 253 | rmin, r, rmax = scalib.postprocessing.rank_accuracy( 254 | -np.log2(key_distribution), secret_key, max_nb_bin=2**20 255 | ) 256 | lrmin, lr, lrmax = (np.log2(rmin), np.log2(r), np.log2(rmax)) 257 | return lr 258 | 259 | def run_attacks_eval(settings, models): 260 | """Return the list of the rank of the true key for each attack.""" 261 | # Offset in traces to no attack the training traces 262 | traces, labels = get_traces(settings, start=settings.profile, l=settings.attacks) 263 | return 2**np.array(list(tqdm(map( 264 | lambda a: run_attack_eval( 265 | traces[a:a+1,:], 266 | {k: val[a:a+1] for k, val in labels.items()}, 267 | models 268 | ), 269 | range(settings.attacks), 270 | ), 271 | total=settings.attacks, 272 | desc="attacks", 273 | ))) 274 | 275 | def success_rate(ranks, min_rank=1): 276 | return np.sum(ranks <= min_rank) / ranks.size 277 | 278 | if __name__ == "__main__": 279 | settings = parse_args() 280 | print("Start SNR estimation") 281 | snr = compute_snr(settings) 282 | print("Start modeling") 283 | models = compute_templates(settings, snr) 284 | print("Start attack") 285 | ranks = run_attacks_eval(settings, models) 286 | print('Attack ranks', collections.Counter(ranks)) 287 | print(f'Success rate (rank 1): {success_rate(ranks, min_rank=1)*100:.0f}%') 288 | print(f'Success rate (rank 2**32): {success_rate(ranks, min_rank=2**32)*100:.0f}%') 289 | 290 | -------------------------------------------------------------------------------- /attack_multi.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2021 UCLouvain 3 | # 4 | # Permission to use, copy, modify, and/or distribute this software for any 5 | # purpose with or without fee is hereby granted. 6 | # 7 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 8 | # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY 9 | # AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 10 | # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 11 | # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR 12 | # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 13 | # PERFORMANCE OF THIS SOFTWARE 14 | 15 | # Attack on the ASCAD database, multiple attacks and caching version. 16 | # See attack.py for a more simple script. 17 | 18 | import argparse 19 | import copy 20 | from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor 21 | import itertools as it 22 | import functools as ft 23 | import os 24 | import pickle 25 | 26 | import h5py 27 | import matplotlib.pyplot as plt 28 | import numpy as np 29 | from scalib.metrics import SNR 30 | import scalib.modeling 31 | import scalib.attacks 32 | import scalib.postprocessing 33 | from tqdm import tqdm 34 | 35 | def parse_args(): 36 | "Parse command line arguments." 37 | parser = argparse.ArgumentParser( 38 | description= 39 | "Attack against the ASCAD dataset. " 40 | "Runs attacks with many parameter sets at once by making a grid " 41 | "of all given parameters combinations. " 42 | "Parameters that can have multiple values are number of attack " 43 | "traces, number of profiling traces, number of POIs, LDA output " 44 | "dimension." 45 | ) 46 | parser.add_argument( 47 | "--window", 48 | type=str, 49 | default="0,250000", 50 | help="'start,end' window for taking only a part of the traces (default: whole trace).", 51 | ) 52 | parser.add_argument( 53 | "--ntracesattack", 54 | type=str, 55 | default="1,2", 56 | help= 57 | "Number of traces for the attack. " 58 | "Must be integers separated with comma (default: %(default)s).", 59 | ) 60 | parser.add_argument( 61 | "--averageattack", 62 | type=int, 63 | default=100, 64 | help="Number of runs for each attack parameter set (default: %(default)s).", 65 | ) 66 | parser.add_argument( 67 | "--nbytes", 68 | type=int, 69 | default=14, 70 | help="Number of bytes in the attack (1 to 14, default: %(default)s).", 71 | ) 72 | strategy_help = """Which variables to attack (default=1): 73 | 1: all 74 | 2: only s-box input shares (before re-masking) 75 | 3: only s-box input (re-masked with common rin mask) 76 | 4: only s-box output shares (with common mask) 77 | 5: only s-box output shares (after re-masking) 78 | 6: all except s-box input before re-masking""" 79 | parser.add_argument( 80 | "--strategy", 81 | type=int, 82 | default=1, 83 | help=strategy_help, 84 | ) 85 | parser.add_argument( 86 | "--ntracesprofile", 87 | type=str, 88 | default='5000', 89 | help="Number of traces used for profiling " 90 | "(comma-separated ints, default: %(default)s).", 91 | ) 92 | parser.add_argument( 93 | "--poi", 94 | type=str, 95 | default='512', 96 | help="Number of POIs for each variable " 97 | "(comma-separated ints, default: %(default)s).", 98 | ) 99 | parser.add_argument( 100 | "--dim", 101 | type=str, 102 | default='8', 103 | help="Dimensionality of projected space for LDA " 104 | "(comma-separated ints, default: %(default)s).", 105 | ) 106 | parser.add_argument( 107 | "--database", 108 | type=str, 109 | default="atmega8515-raw-traces.h5", 110 | help="Location of the 'raw traces' ASCAD database file (default: %(default)s).", 111 | ) 112 | parser.add_argument( 113 | "--store-dir", 114 | type=str, 115 | default="./res_store", 116 | help="Location of the result store directory (default: %(default)s).", 117 | ) 118 | parser.add_argument( 119 | "--fast-snr", 120 | action="store_true", 121 | help="Use a faster SNR computation at the expense of large RAM usage (default: false).", 122 | ) 123 | parser.add_argument( 124 | "--show", 125 | dest="show", 126 | action="store_true", 127 | help="Display the result figures (default: don't show).", 128 | ) 129 | parser.add_argument( 130 | "computations", 131 | type=str, 132 | help= 133 | f"Which computations to perform: {', '.join(Settings.all_computations)} and/or all. " 134 | "Computations which are not performed are loaded from the result store." 135 | ) 136 | return parser.parse_args(namespace=Settings()) 137 | 138 | class Settings: 139 | all_computations = {"snr", "model", "attack", "sr-boxplot", "sr-map"} 140 | def parse_settings(self): 141 | self.w_start, self.w_end = [int(x.strip()) for x in self.window.split(",")] 142 | self.window = np.arange(self.w_start, self.w_end, dtype=np.int) 143 | self.ns = len(self.window) 144 | self.ntraces_attacks = [int(x.strip()) for x in self.ntracesattack.split(",")] 145 | # var to include in graph 146 | included_variables = { 147 | 1: ["x0", "x1", "rin", "xrin", "yrout", "rout", "y0", "y1"], 148 | 2: ["x0", "x1"], 149 | 3: ["rin", "xrin"], 150 | 4: ["yrout", "rout"], 151 | 5: ["y0", "y1"], 152 | 6: ["rin", "xrin", "yrout", "rout", "y0", "y1"], 153 | } 154 | self.included_variables = included_variables.get(self.strategy) 155 | if self.included_variables is None: 156 | raise ValueError(f"Unknown strategy {strategy}.") 157 | self.ntraces_profiles = [int(x.strip()) for x in self.ntracesprofile.split(",")] 158 | self.npois = [int(x.strip()) for x in self.poi.split(",")] 159 | self.lda_dims = [int(x.strip()) for x in self.dim.split(",")] 160 | # Storage directory 161 | os.makedirs(self.store_dir, exist_ok=True) 162 | self.computations = set(x.strip().lower() for x in self.computations.split(",")) 163 | for x in self.computations - self.all_computations - {"all"}: 164 | raise ValueError(f"Computation '{x}' not supported.") 165 | if "all" in self.computations: 166 | self.computations = self.all_computations 167 | 168 | def snr_suffix(self, ntraces_profile): 169 | return f"{self.w_start}_{self.w_end}_p{ntraces_profile}" 170 | 171 | def snr_file(self, ntraces_profile): 172 | return os.path.join(self.store_dir, f"snr_{self.snr_suffix(ntraces_profile)}.pkl") 173 | 174 | def model_suffix(self, ntraces_profile, npoi, lda_dim): 175 | return f"{self.snr_suffix(ntraces_profile)}_poi{npoi}_dim{lda_dim}" 176 | 177 | def model_file(self, ntraces_profile, npoi, lda_dim): 178 | return os.path.join( 179 | self.store_dir, 180 | f"models_{self.model_suffix(ntraces_profile, npoi, lda_dim)}.pkl" 181 | ) 182 | 183 | def attack_suffix(self, ntraces_profile, ntraces_attack, npoi, lda_dim): 184 | return "{ms}_{nb}_s{strat}_a{av}_na{ntra}".format( 185 | ms=self.model_suffix(ntraces_profile, npoi, lda_dim), 186 | nb=self.nbytes, 187 | strat=self.strategy, 188 | av=self.averageattack, 189 | ntra=ntraces_attack, 190 | ) 191 | 192 | def attack_file(self, *args): 193 | return os.path.join(self.store_dir, 194 | f"attack_{self.attack_suffix(*args)}.pkl") 195 | 196 | def variables(self): 197 | # All shares labels 198 | return [ 199 | f"{base}_{x}" 200 | for base in ("x0", "x1", "xrin", "yrout", "y0", "y1") 201 | for x in range(self.nbytes) 202 | ] + ["rin", "rout"] 203 | 204 | # fmt: off 205 | SBOX = np.array( 206 | [ 207 | 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 208 | 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 209 | 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 210 | 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 211 | 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 212 | 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 213 | 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 214 | 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 215 | 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 216 | 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 217 | 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 218 | 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 219 | 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 220 | 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 221 | 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 222 | 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 223 | 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 224 | 0x16, 225 | ], 226 | dtype=np.uint16, 227 | ) 228 | # fmt: on 229 | 230 | @ft.lru_cache(maxsize=None) 231 | def load_database(settings): 232 | return h5py.File(settings.database, "r") 233 | 234 | def var_labels(key, plaintext, masks, rin, rout): 235 | "Compute value of variables of interest based on ASCAD metadata." 236 | x0 = key ^ plaintext ^ masks 237 | x1 = masks 238 | xrin = ((key ^ plaintext).T ^ rin).T 239 | y0 = SBOX[key ^ plaintext] ^ masks 240 | y1 = masks 241 | yrout = (SBOX[(key ^ plaintext).T] ^ rout).T 242 | labels = {} 243 | for i in range(14): 244 | labels[f"k_{i}"] = key[:, i] 245 | labels[f"p_{i}"] = plaintext[:, i] 246 | labels[f"x0_{i}"] = x0[:, i] 247 | labels[f"x1_{i}"] = x1[:, i] 248 | labels[f"y0_{i}"] = y0[:, i] 249 | labels[f"y1_{i}"] = y1[:, i] 250 | labels[f"xrin_{i}"] = xrin[:, i] 251 | labels[f"yrout_{i}"] = yrout[:, i] 252 | labels[f"rout"] = rout[:] 253 | labels[f"rin"] = rin[:] 254 | return labels 255 | 256 | @ft.lru_cache(maxsize=None) 257 | def get_traces(settings, start, l, fixed_key=False): 258 | """Load traces and labels from ASCAD database. 259 | 260 | fixed_key: transform a variable key dataset into a fixed key one by 261 | exploiting key^plaintext leakage invariance (for multi-trace attacks) 262 | """ 263 | I = np.arange(start, start + l) 264 | f_database = load_database(settings) 265 | traces = f_database["traces"][start : start + l, settings.window].astype(np.int16) 266 | key = f_database["metadata"]["key"][I, 2:].astype(np.uint16) 267 | plaintext = f_database["metadata"]["plaintext"][I, 2:].astype(np.uint16) 268 | masks = f_database["metadata"]["masks"][I, 2:16].astype(np.uint16) 269 | rin = f_database["metadata"]["masks"][I, 16].astype(np.uint16) 270 | rout = f_database["metadata"]["masks"][I, 17].astype(np.uint16) 271 | if fixed_key: 272 | k = np.random.randint(0, 256, 14, dtype=np.uint16) 273 | k = np.tile(k, (l, 1)) 274 | plaintext = plaintext ^ k ^ key 275 | key = k 276 | labels = var_labels(key, plaintext, masks, rin, rout) 277 | return traces, labels 278 | 279 | def target_variables(byte): 280 | """variables that will be profiled""" 281 | return ["rin", "rout"] + [ 282 | f"{base}_{byte}" for base in ("x0", "x1", "xrin", "yrout", "y0", "y1") 283 | ] 284 | 285 | def compute_snr(settings, ntraces_profile): 286 | """Returns the SNR of the traces samples for each target variable.""" 287 | snrs = {v: dict() for i in range(settings.nbytes) for v in target_variables(i)} 288 | traces, labels = get_traces(settings, start=0, l=ntraces_profile, fixed_key=False) 289 | if settings.fast_snr: 290 | snr = SNR(np=len(snrs), nc=256, ns=settings.ns) 291 | labels_full = np.zeros((traces.shape[0], len(snrs))) 292 | variables = list(settings.variables()) 293 | x = np.array([labels[v] for v in variables]).T 294 | snr.fit_u(traces, x) 295 | snrs_raw = snr.get_snr() 296 | # Avoid NaN in case of scope over-range 297 | np.nan_to_num(snrs_raw, nan=0.0) 298 | for i, v in enumerate(variables): 299 | snrs[v]["SNR"] = snrs_raw[i, :] 300 | else: 301 | for v, m in tqdm(snrs.items(), total=len(snrs), desc="SNR Variables"): 302 | snr = SNR(np=1, nc=256, ns=settings.ns) 303 | x = labels[v].reshape((ntraces_profile, 1)) 304 | # Note: if the traces do not fit in RAM, you can call multiple times fit_u 305 | # on the same SNR object to do incremental SNR computation. 306 | snr.fit_u(traces, x) 307 | m["SNR"] = snr.get_snr()[0, :] 308 | # Avoid NaN in case of scope over-range 309 | np.nan_to_num(m["SNR"], nan=0.0) 310 | return snrs 311 | 312 | def make_snr(settings): 313 | for ntraces_profile in tqdm(settings.ntraces_profiles, desc="SNR_np"): 314 | snrs = compute_snr(settings, ntraces_profile) 315 | with open(settings.snr_file(ntraces_profile), 'wb') as f: 316 | pickle.dump(snrs, f) 317 | 318 | @ft.lru_cache(maxsize=None) 319 | def load_snr(settings, ntraces_profile): 320 | "Load SNR from the store." 321 | with open(settings.snr_file(ntraces_profile), 'rb') as f: 322 | return pickle.load(f) 323 | 324 | def preload_snr(settings): 325 | return { 326 | ntraces_profile: load_snr(settings, ntraces_profile) 327 | for ntraces_profile in settings.ntraces_profiles 328 | } 329 | 330 | def compute_templates(settings, ntraces_profile, npoi, lda_dim): 331 | "LDA and gaussian templates computation" 332 | snrs = load_snr(settings, ntraces_profile) 333 | models = dict() 334 | for k, snr in snrs.items(): 335 | poi = np.argsort(snr["SNR"])[-npoi:].astype(np.uint32) 336 | poi.sort() 337 | models[k] = {"poi": poi} 338 | traces, labels = get_traces(settings, 0, ntraces_profile) 339 | vs = list(models.keys()) 340 | mlda = scalib.modeling.MultiLDA( 341 | ncs=len(models) * [256], 342 | ps=len(models) * [lda_dim], 343 | pois=[models[v]["poi"] for v in vs], 344 | gemm_mode=4, 345 | ) 346 | x = np.array([labels[v] for v in vs]).transpose() 347 | mlda.fit_u(traces, x) 348 | mlda.solve() 349 | for lda, v in zip(mlda.ldas, vs): 350 | models[v]["lda"] = lda 351 | return models 352 | 353 | def make_models(settings): 354 | profile_cases = list(it.product(settings.ntraces_profiles, settings.npois, settings.lda_dims)) 355 | for pc in tqdm(profile_cases, desc="profiling cases"): 356 | models = compute_templates(settings, *pc) 357 | fname = settings.model_file(*pc) 358 | with open(fname, 'wb') as f: 359 | pickle.dump(models, f) 360 | 361 | @ft.lru_cache(maxsize=None) 362 | def load_models(settings, ntraces_profile, npoi, lda_dim): 363 | fname = settings.model_file(ntraces_profile, npoi, lda_dim) 364 | with open(fname, 'rb') as f: 365 | return pickle.load(f) 366 | 367 | def preload_models(settings): 368 | return { 369 | pc: load_models(settings, *pc) 370 | for pc in it.product(settings.ntraces_profiles, settings.npois, settings.lda_dims) 371 | } 372 | 373 | SASCA_GRAPH = """ 374 | NC 256 375 | TABLE sbox 376 | 377 | VAR MULTI x0 378 | VAR MULTI x1 379 | VAR MULTI x 380 | VAR MULTI xp 381 | VAR MULTI xrin 382 | VAR MULTI rout 383 | VAR MULTI rin 384 | 385 | VAR MULTI y0 386 | VAR MULTI y1 387 | VAR MULTI y 388 | VAR MULTI yp 389 | VAR MULTI yrout 390 | 391 | VAR MULTI p 392 | VAR SINGLE k 393 | 394 | PROPERTY x = p ^ k 395 | PROPERTY x = x0 ^ x1 396 | PROPERTY x = rin ^ xrin 397 | 398 | PROPERTY y = sbox[x] 399 | PROPERTY y = y0 ^ y1 400 | PROPERTY y = rout ^ yrout 401 | """ 402 | 403 | @ft.lru_cache(maxsize=None) 404 | def sasca_graph(settings, nattack_traces): 405 | sasca = scalib.attacks.SASCAGraph(SASCA_GRAPH, n=nattack_traces) 406 | sasca.set_table("sbox", SBOX.astype(np.uint32)) 407 | return sasca 408 | 409 | def attack(settings, traces, labels, models): 410 | """Run a SASCA attack on the given traces and evaluate its performance. 411 | 412 | :param labels: 413 | contains the labels of all the variables 414 | :returns: 415 | the true key and the distribution estimated by the attack 416 | """ 417 | # correct secret key 418 | secret_key = [labels[f"k_{i}"][0] for i in range(settings.nbytes)] 419 | # distribution for each of the key bytes 420 | key_distribution = [] 421 | # Run a SASCA for each S-Box 422 | for i in range(settings.nbytes): 423 | sasca = copy.deepcopy(sasca_graph(settings, traces.shape[0])) 424 | # Set the labels for the plaintext byte 425 | sasca.set_public(f"p", labels[f"p_{i}"].astype(np.uint32)) 426 | for var in settings.included_variables: 427 | if var in ("rin", "rout"): 428 | model = models[var] 429 | else: 430 | model = models[var + f"_{i}"] 431 | prs = model["lda"].predict_proba(traces[:, model["poi"]]) 432 | sasca.set_init_distribution(var, prs) 433 | sasca.run_bp(it=3) 434 | distribution = sasca.get_distribution(f"k")[0, :] 435 | key_distribution.append(distribution) 436 | key_distribution = np.array(key_distribution) 437 | return secret_key, key_distribution 438 | 439 | def run_attack(settings, traces, labels, models): 440 | """Run a SASCA attack on the given traces and evaluate its performance. 441 | 442 | :param labels: 443 | contains the labels of all the variables 444 | :returns: 445 | the log2 of the rank of the true key 446 | """ 447 | secret_key, key_distribution = attack(settings, traces, labels, models) 448 | rmin, r, rmax = scalib.postprocessing.rank_accuracy( 449 | -np.log2(key_distribution), secret_key, max_nb_bin=2**20 450 | ) 451 | lrmin, lr, lrmax = (np.log2(rmin), np.log2(r), np.log2(rmax)) 452 | return lr 453 | 454 | def make_attacks(settings): 455 | # Offset in traces to no fall on training traces 456 | traces, labels = get_traces( 457 | settings, 458 | max(settings.ntraces_profiles), 459 | settings.averageattack * max(settings.ntraces_attacks), 460 | fixed_key=True 461 | ) 462 | attack_cases = list(it.product( 463 | settings.ntraces_profiles, 464 | settings.ntraces_attacks, 465 | settings.npois, 466 | settings.lda_dims, 467 | )) 468 | # TODO this loop seems to not be able to fully exploit parallelism. 469 | # (seeing a only ~8x speedup on a large CPU) 470 | with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor, \ 471 | tqdm(total=settings.averageattack*len(attack_cases)) as progress: 472 | futures_all = dict() 473 | for attack_case in attack_cases: 474 | ntraces_profile, ntraces_attack, npoi, lda_dim = attack_case 475 | models = load_models(settings, ntraces_profile, npoi, lda_dim) 476 | futures = [] 477 | for a in range(settings.averageattack): 478 | future = executor.submit( 479 | run_attack, 480 | settings, 481 | traces[a*ntraces_attack:(a+1)*ntraces_attack, :], 482 | { 483 | k: val[a * ntraces_attack : (a+1) * ntraces_attack] 484 | for k, val in labels.items() 485 | }, 486 | models 487 | ) 488 | future.add_done_callback(lambda _: progress.update()) 489 | futures.append(future) 490 | futures_all[attack_case] = futures 491 | 492 | for attack_case, futures in futures_all.items(): 493 | attack_results = np.array([future.result() for future in futures]) 494 | fname = settings.attack_file(*attack_case) 495 | with open(fname, 'wb') as f: 496 | pickle.dump(attack_results, f) 497 | 498 | @ft.lru_cache(maxsize=None) 499 | def load_attacks(settings, ntraces_profile, ntraces_attack, npoi, lda_dim): 500 | fname = settings.attack_file(ntraces_profile, ntraces_attack, npoi, lda_dim) 501 | with open(fname, 'rb') as f: 502 | return pickle.load(f) 503 | 504 | def preload_attacks(settings): 505 | attack_cases = list(it.product( 506 | settings.ntraces_profiles, 507 | settings.ntraces_attacks, 508 | settings.npois, 509 | settings.lda_dims, 510 | )) 511 | return {ac: load_attacks(settings, *ac) for ac in attack_cases} 512 | 513 | ############################# 514 | ### Plots ### 515 | ############################# 516 | 517 | def rank_boxplot(settings, ntraces_attacks, attack_results): 518 | plt.boxplot( 519 | [2**ar for ar in attack_results], 520 | labels=ntraces_attacks, autorange=True 521 | ) 522 | plt.grid(True, which="both", ls="--") 523 | plt.yscale("log", base=2) 524 | plt.xlabel("number of attack traces") 525 | plt.ylabel("key rank") 526 | plt.ylim((1/1.1, 1.1 * 2.0 ** (8 * settings.nbytes))) 527 | ticks = {1: range(0, 9), 2: range(0, 17, 2)}.get( 528 | settings.nbytes, range(0, 8 * settings.nbytes + 1, 8) 529 | ) 530 | plt.yticks([2.0 ** x for x in ticks]) 531 | 532 | def make_sr_boxplot(settings): 533 | plot_cases = dict() 534 | for attack_case, attack_res in preload_attacks(settings).items(): 535 | ntraces_profile, ntraces_attack, npoi, lda_dims = attack_case 536 | plot_cases.setdefault((ntraces_profile, npoi, lda_dims), 537 | list()).append((ntraces_attack, attack_res)) 538 | for plot_case, attacks in plot_cases.items(): 539 | ntraces_profile, npoi, lda_dims = plot_case 540 | attack_case = ntraces_profile, 0, npoi, lda_dims 541 | figname = f"ranks_{settings.attack_suffix(*attack_case)}" 542 | plt.figure(figname) 543 | rank_boxplot(settings, *list(zip(*attacks))) 544 | plt.savefig( 545 | os.path.join(settings.store_dir, figname+".pdf"), 546 | bbox_inches="tight", 547 | pad_inches=0.02 548 | ) 549 | 550 | def success_rate(ranks, min_rank=1): 551 | return np.sum(ranks <= min_rank) / ranks.size 552 | 553 | def make_sr_map(settings): 554 | for ntraces_profile, ntraces_attack in it.product( 555 | settings.ntraces_profiles, settings.ntraces_attacks 556 | ): 557 | plot_matrix = np.array([ 558 | [ 559 | success_rate(2**np.array( 560 | load_attacks(settings, ntraces_profile, ntraces_attack, npoi, lda_dim) 561 | )) 562 | for lda_dim in settings.lda_dims] 563 | for npoi in settings.npois 564 | ]) 565 | attack_case = ntraces_profile, ntraces_attack, 0, 0 566 | figname = f"srs_{settings.attack_suffix(*attack_case)}" 567 | fig, ax = plt.subplots(num=figname) 568 | ax.matshow(plot_matrix, cmap=plt.cm.Greys, origin='lower', vmin=0.0, vmax=1.0) 569 | for i, npoi in enumerate(settings.npois): 570 | for j, lda_dim in enumerate(settings.lda_dims): 571 | c = plot_matrix[i][j] 572 | ax.text(j, i, f"{c:.2f}", va='center', ha='center', color='red') 573 | plt.xlabel("LDA dim") 574 | plt.ylabel("#POI") 575 | ax.set_xticks(range(len(settings.lda_dims))) 576 | ax.set_xticklabels(map(str, settings.lda_dims)) 577 | ax.set_yticks(range(len(settings.npois))) 578 | ax.set_yticklabels(map(str, settings.npois)) 579 | ax.xaxis.set_ticks_position('bottom') 580 | plt.savefig( 581 | os.path.join(settings.store_dir, figname+".pdf"), 582 | bbox_inches="tight", 583 | pad_inches=0.02 584 | ) 585 | 586 | if __name__ == "__main__": 587 | settings = parse_args() 588 | settings.parse_settings() 589 | if "snr" in settings.computations: 590 | print("Start SNR estimation") 591 | make_snr(settings) 592 | if "model" in settings.computations: 593 | print("Start modeling") 594 | # We use preloading of lru_caches to avoid double-loading due to 595 | # multi-threaded computations 596 | preload_snr(settings) 597 | make_models(settings) 598 | if "attack" in settings.computations: 599 | print("Start attack") 600 | preload_models(settings) 601 | make_attacks(settings) 602 | if "sr-boxplot" in settings.computations: 603 | preload_attacks(settings) 604 | make_sr_boxplot(settings) 605 | if "sr-map" in settings.computations: 606 | preload_attacks(settings) 607 | make_sr_map(settings) 608 | if {"sr-boxplot", "sr-map"}.intersection(settings.computations) and settings.show: 609 | plt.show() 610 | --------------------------------------------------------------------------------