├── .gitignore
├── .gitmodules
├── LICENSE.txt
├── design-processing
├── clean_all_designs.py
├── common
│ ├── design.mk
│ ├── dv
│ │ ├── common_functions.cc
│ │ ├── difuzzrtl_utils.h
│ │ ├── drfuzz
│ │ │ ├── afl.h
│ │ │ ├── corpus.cc
│ │ │ ├── corpus.h
│ │ │ ├── dtypes.cc
│ │ │ ├── dtypes.h
│ │ │ ├── mutator.cc
│ │ │ ├── mutator.h
│ │ │ ├── queue.cc
│ │ │ ├── queue.h
│ │ │ ├── testbench.cc
│ │ │ └── toplevel.cc
│ │ ├── elf.h
│ │ ├── elfloader.cc
│ │ ├── sv
│ │ │ ├── clk_rst_gen.sv
│ │ │ └── rst_gen.sv
│ │ └── ticks.h
│ ├── modelsim.mk
│ ├── modelsim
│ │ ├── modelsim_build.tcl
│ │ └── modelsim_run.tcl
│ ├── python_scripts
│ │ ├── add_sim_initbegin.py
│ │ ├── ascend_package_in_pickle.py
│ │ ├── ascend_packages_in_pickle.py
│ │ ├── compress_concats.py
│ │ ├── concatbitwidth.py
│ │ ├── correct_too_wide_lvalues.py
│ │ ├── divide_concat_into_subconcats.py
│ │ ├── expand_left_operand.py
│ │ ├── expand_reduction_operand.py
│ │ ├── expand_right_operand.py
│ │ ├── gen_corefiles.py
│ │ ├── move_verilator_public_attrs.py
│ │ ├── remove_attributes.py
│ │ ├── remove_displays.py
│ │ ├── remove_modules.py
│ │ ├── remove_pragma_translate_off.py
│ │ ├── remove_single_module.py
│ │ ├── remove_unused_modules.py
│ │ └── simplify_yosysout.py
│ ├── src
│ │ └── sram_mem.sv
│ └── yosys
│ │ ├── drfuzz.ys.tcl
│ │ ├── passthrough.ys.tcl
│ │ └── rfuzz.ys.tcl
├── design_repos.json
├── make_all_designs.py
└── python-requirements.txt
├── elfs-for-questa
├── .gitignore
└── Readme.md
├── env.sh
├── fuzzer
├── .gitignore
├── analyzeelfs
│ ├── __init__.py
│ ├── analyze.py
│ ├── dependencies.py
│ ├── genmanyelfs.py
│ ├── plot.py
│ └── util.py
├── benchmarking
│ ├── collectdifuzzcoverage.py
│ ├── findnfailinginstances.py
│ ├── fuzzperf.py
│ └── timereduction.py
├── cascade
│ ├── basicblock.py
│ ├── blacklist.py
│ ├── cfinstructionclasses.py
│ ├── contextreplay.py
│ ├── debug
│ │ ├── compareexecutions.py
│ │ └── debugreduce.py
│ ├── finalblock.py
│ ├── fuzzerstate.py
│ ├── fuzzfromdescriptor.py
│ ├── fuzzsim.py
│ ├── genelf.py
│ ├── initialblock.py
│ ├── memview.py
│ ├── privilegestate.py
│ ├── randomize
│ │ ├── createcfinstr.py
│ │ ├── pickexceptionop.py
│ │ ├── pickfpuop.py
│ │ ├── pickinstrtype.py
│ │ ├── pickisainstrclass.py
│ │ ├── pickmemop.py
│ │ ├── pickprivilegedescentop.py
│ │ ├── pickrandomcsrop.py
│ │ ├── pickreg.py
│ │ └── pickstoreaddr.py
│ ├── reduce.py
│ ├── spikeresolution.py
│ ├── toleratebugs.py
│ └── util.py
├── common
│ ├── bytestoelf.py
│ ├── designcfgs.py
│ ├── profiledesign.py
│ ├── sim
│ │ ├── commonsim.py
│ │ ├── coverageutil.py
│ │ └── modelsim.py
│ ├── spike.py
│ ├── threads.py
│ └── timeout.py
├── do_analyze_cascade_elfs.py
├── do_analyze_dependencies.py
├── do_analyze_difuzzrtl_elfs.py
├── do_collect_difuzz_coverage.py
├── do_compare_cascade_difuzzrtl_modelsim.py
├── do_compareexecutions.py
├── do_debug_reduce.py
├── do_evalreduction.py
├── do_fuzzdesign.py
├── do_fuzzperf.py
├── do_fuzzsingle.py
├── do_genelfs_for_questa.py
├── do_genmanyelfs.py
├── do_numinstrs_statistics.py
├── do_performance_ubenchmark_fewinstructions.py
├── do_plot_bug_bars.py
├── do_plot_fewinstr_execperf_programlength.py
├── do_plot_fewinstr_fuzzperf_programlength.py
├── do_plot_fewinstr_genduration_programlength.py
├── do_plotevalreduction.py
├── do_reducesingle.py
├── do_rfuzz.py
├── do_simcoverage.py
├── do_timetobug.py
├── do_timetobug_boxes.py
├── do_timetobug_boxes_plot.py
├── do_timetobugplot_curves.py
├── miscplots
│ ├── .gitignore
│ ├── __init__.py
│ ├── plotcategories.py
│ └── plotsecuimplications.py
├── modelsim
│ ├── annotateinstrs.py
│ ├── comparedifuzzmodelsim.py
│ ├── countinstrs.py
│ ├── patchwritetohost.py
│ ├── plot.py
│ └── util.py
├── params
│ ├── fuzzparams.py
│ └── runparams.py
├── rfuzz
│ ├── __init__.py
│ ├── collectactiverfuzz.py
│ ├── collectrfuzz.py
│ └── plot.py
├── rv
│ ├── asmutil.py
│ ├── csrids.py
│ ├── rv32d.py
│ ├── rv32f.py
│ ├── rv32i.py
│ ├── rv32m.py
│ ├── rv64d.py
│ ├── rv64f.py
│ ├── rv64i.py
│ ├── rv64m.py
│ ├── rvprivileged.py
│ ├── rvprotoinstrs.py
│ ├── util.py
│ ├── zicsr.py
│ └── zifencei.py
└── top
│ ├── fuzzdesign.py
│ ├── fuzzdesigntiming.py
│ └── fuzzforperfubenchfewerinstructions.py
└── tools
├── .gitignore
├── Makefile
├── miniconda
└── miniconda.sh
└── stack
└── gethaskellstack.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode
2 | __pycache__
3 | *.log
4 | transcript
5 | tmp
6 | figures
7 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "tools/cascade-yosys"]
2 | path = tools/cascade-yosys
3 | url = https://github.com/cascade-artifacts-designs/cascade-yosys
4 | [submodule "tools/riscv-gnu-toolchain"]
5 | path = tools/riscv-gnu-toolchain
6 | url = https://github.com/riscv-collab/riscv-gnu-toolchain
7 | [submodule "tools/morty"]
8 | path = tools/morty
9 | url = https://github.com/pulp-platform/morty.git
10 | [submodule "tools/bender"]
11 | path = tools/bender
12 | url = https://github.com/pulp-platform/bender.git
13 | [submodule "tools/makeelf"]
14 | path = tools/makeelf
15 | url = https://github.com/flaviens/makeelf
16 | [submodule "tools/verilator"]
17 | path = tools/verilator
18 | url = https://github.com/verilator/verilator.git
19 | [submodule "tools/sv2v"]
20 | path = tools/sv2v
21 | url = https://github.com/zachjs/sv2v.git
22 | [submodule "tools/riscv-isa-sim"]
23 | path = tools/riscv-isa-sim
24 | url = https://github.com/riscv-software-src/riscv-isa-sim.git
25 | [submodule "tools/cellift-yosys"]
26 | url = https://github.com/flaviens/disco-yosys.git
27 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (C) 2022 Flavien Solt, ETH Zurich
2 |
3 | This program is free software: you can redistribute it and/or modify
4 | it under the terms of the GNU General Public License as published by
5 | the Free Software Foundation, either version 3 of the License.
6 |
7 | This program is distributed in the hope that it will be useful,
8 | but WITHOUT ANY WARRANTY; without even the implied warranty of
9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 | GNU General Public License for more details.
11 |
12 | See for a copy of the GNU General
13 | Public License.
14 |
--------------------------------------------------------------------------------
/design-processing/clean_all_designs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script cleans all designs listed in the design_repos json file.
6 |
7 | import json
8 | import multiprocessing
9 | import subprocess
10 | import os
11 |
12 | if "CASCADE_ENV_SOURCED" not in os.environ:
13 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
14 |
15 | DESIGN_CFGS_BASENAME = "design_repos.json"
16 | PATH_TO_DESIGN_CFGS = os.path.join(os.getenv("CASCADE_DESIGN_PROCESSING_ROOT"), DESIGN_CFGS_BASENAME)
17 |
18 | # Ask confirmation before cleaning all designs.
19 | confirmation_str = input("This will clean all the design repositories listed in {}. Continue (yes|NO)?".format(DESIGN_CFGS_BASENAME))
20 | if confirmation_str.lower() not in ["y","yes"]:
21 | print("Aborted.")
22 |
23 | # Read the list of paths to the design cascade directories.
24 | with open(PATH_TO_DESIGN_CFGS, "r") as f:
25 | design_json_content = json.load(f)
26 | design_cascade_paths = list(design_json_content.values())
27 |
28 | # Run cleaning in parallel.
29 | num_processes = int(os.getenv("CASCADE_JOBS"))
30 | def worker(design_cascade_path):
31 | cmdline = ["make", "-C", design_cascade_path, "clean"]
32 | subprocess.check_call(cmdline)
33 | my_pool = multiprocessing.Pool(num_processes)
34 | my_pool.map(worker, design_cascade_paths)
35 |
--------------------------------------------------------------------------------
/design-processing/common/design.mk:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Make fragment that designs can include for common functionality
6 |
7 | ifeq "" "$(CASCADE_ENV_SOURCED)"
8 | $(error Please re-source env.sh first, in the meta repo, and run from there, not this repo. See README.md in the meta repo)
9 | endif
10 |
11 | ifeq "" "$(CASCADE_META_ROOT)"
12 | $(error Please re-source env.sh first, in the meta repo, and run from there, not this repo. See README.md in the meta repo)
13 | endif
14 |
15 | extract_timestamps:
16 | for log in generated/out/*.sv.log; \
17 | do echo $$log; \
18 | python3 $(CASCADE_META_ROOT)/python_scripts/extract_timestamps.py $$log split; \
19 | done
20 |
21 | generated generated/common/dv generated/common generated/scattered generated/sv_sources generated/out generated/dv traces logs statistics build modelsim:
22 | mkdir -p $@
23 |
24 | wave: | traces
25 | gtkwave -S scripts/gtkwave_init.tcl traces/sim.vcd
26 | wave_fst: | traces
27 | gtkwave -S scripts/gtkwave_init.tcl traces/sim.fst
28 | wave_fst_vanilla: | traces
29 | gtkwave -S scripts/gtkwave_init.tcl traces/sim_vanilla.fst
30 |
31 | .PHONY: clean
32 | clean:
33 | rm -rf build generated traces logs statistics ../Bender.lock fusesoc.conf modelsim
34 |
--------------------------------------------------------------------------------
/design-processing/common/dv/common_functions.cc:
--------------------------------------------------------------------------------
1 | // Copyright 2023 Flavien Solt, ETH Zurich.
2 | // Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | // SPDX-License-Identifier: GPL-3.0-only
4 |
5 | /* functions all designs can use in their C++ code */
6 |
7 | #include
8 | #include
9 |
10 | extern "C" {
11 | const char *Get_SRAM_ELF_object_filename(void);
12 | const char *Get_BootROM_ELF_object_filename(void);
13 | const char *cascade_getenv(char *varname);
14 | }
15 |
16 | extern "C" const char *Get_SRAM_ELF_object_filename(void)
17 | {
18 | /* This function is used inside the ELF Loader code in ift_sram.sv
19 | * to determine the filename to load. The environment variable
20 | * SIMSRAMELF can be used to override the default.
21 | */
22 | const char* simsram_env = std::getenv("SIMSRAMELF");
23 | if(simsram_env == NULL) { fprintf(stderr, "SIMSRAMELF required\n"); exit(1); }
24 | return simsram_env;
25 | }
26 |
27 | extern "C" const char *Get_BootROM_ELF_object_filename(void)
28 | {
29 | /* As above: allow ROM ELF filename to be overridden using
30 | * SIMROMELF environment variable. Used in ift_boot_rom_hdac.sv.
31 | */
32 | const char* simrom_env = std::getenv("SIMROMELF");
33 | if(simrom_env == NULL) { fprintf(stderr, "SIMROMELF required\n"); exit(1); }
34 | return simrom_env;
35 | }
36 |
37 | /* workaround for inconsistent prototype when getenv() is directly imported; we import cascade_getenv() instead */
38 | extern "C" const char *cascade_getenv(char *varname)
39 | {
40 | return (char *) getenv((char *) varname);
41 | }
42 |
--------------------------------------------------------------------------------
/design-processing/common/dv/difuzzrtl_utils.h:
--------------------------------------------------------------------------------
1 | // Copyright 2023 Flavien Solt, ETH Zurich.
2 | // Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | // SPDX-License-Identifier: GPL-3.0-only
4 |
5 | // Some utilities for DifuzzRTL
6 |
7 | #pragma once
8 |
9 | #include
10 | #include
11 | #include
12 |
13 | /////////
14 | // The 3 functions below differ by the width of the cover port, given how Verilator represent different bit widths.
15 | /////////
16 |
17 | // Get the value of a subset of the input.
18 | static uint32_t get_field_value_single32(uint32_t data, int start_index, int width)
19 | {
20 | assert (start_index >= 0);
21 | assert (width >= 0);
22 | assert (width <= 20); // DiffuzzRTL defines the max hash width as 20 bits.
23 | assert (start_index + width <= 32);
24 | return (data >> start_index) & ((1 << width) - 1);
25 | }
26 |
27 | // Get the value of a subset of the input.
28 | static uint32_t get_field_value_single64(uint64_t data, int start_index, int width)
29 | {
30 | assert (start_index >= 0);
31 | assert (width >= 0);
32 | assert (width <= 20); // DiffuzzRTL defines the max hash width as 20 bits.
33 | assert (start_index + width <= 64);
34 | return (data >> start_index) & ((1 << width) - 1);
35 | }
36 |
37 | // Get the value of a subset of the input.
38 | static uint32_t get_field_value_multi32(std::vector data, int start_index, int width)
39 | {
40 | assert (start_index >= 0);
41 | assert (width >= 0);
42 | assert (width <= 20); // DiffuzzRTL defines the max hash width as 20 bits.
43 | assert (start_index + width <= 32*data.size());
44 |
45 | // Discriminate whether the field is contained in a single word or multiple words (cannot be more than 2 because max 20 bits)
46 | if (start_index / 32 < (start_index + width) / 32) {
47 | assert ((start_index + width) / 32 == (start_index / 32) + 1);
48 | // The field is contained in multiple words.
49 | // Get the first word.
50 | uint32_t first_word = data[start_index / 32];
51 | // Get the second word.
52 | uint32_t second_word = data[(start_index + width) / 32];
53 | // Get the value of the field in the first word.
54 | uint32_t first_word_field = get_field_value_single32(first_word, start_index % 32, 32 - (start_index % 32));
55 | // Get the value of the field in the second word.
56 | uint32_t second_word_field = get_field_value_single32(second_word, 0, (start_index + width) % 32);
57 | // Concatenate the two values.
58 | return (second_word_field << (32 - (start_index % 32))) | first_word_field;
59 | } else {
60 | // The field is contained in a single word.
61 | return get_field_value_single32(data[start_index / 32], start_index % 32, width);
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/design-processing/common/dv/drfuzz/afl.h:
--------------------------------------------------------------------------------
1 | #ifndef AFL_H
2 | #define AFL_H
3 | /*********************
4 | * SOME AFL DEFINES *
5 | ********************/
6 | #include
7 | #define FLIP_BIT(_ar, _b) \
8 | do { \
9 | \
10 | uint8_t *_arf = (uint8_t *)(_ar); \
11 | uint32_t _bf = (_b); \
12 | _arf[(_bf) >> 3] ^= (128 >> ((_bf)&7)); \
13 | \
14 | } while (0)
15 | // >>3 to index the correct byte,
16 | // &7 to take lowest 3 bits that index within byte, 128==2^7 i.e. 1000 0000 -> bit 0 is leftmost bit?
17 | #define SWAP16(_x) \
18 | ({ \
19 | \
20 | uint16_t _ret = (_x); \
21 | (uint16_t)((_ret << 8) | (_ret >> 8)); \
22 | \
23 | })
24 |
25 | #define SWAP32(_x) \
26 | ({ \
27 | \
28 | uint32_t _ret = (_x); \
29 | (uint32_t)((_ret << 24) | (_ret >> 24) | ((_ret << 8) & 0x00FF0000) | \
30 | ((_ret >> 8) & 0x0000FF00)); \
31 | \
32 | })
33 |
34 | #define ARITH_MAX 35
35 |
36 | #define INTERESTING_8 \
37 | -128, /* Overflow signed 8-bit when decremented */ \
38 | -1, /* */ \
39 | 0, /* */ \
40 | 1, /* */ \
41 | 16, /* One-off with common buffer size */ \
42 | 32, /* One-off with common buffer size */ \
43 | 64, /* One-off with common buffer size */ \
44 | 100, /* One-off with common buffer size */ \
45 | 127 /* Overflow signed 8-bit when incremented */
46 |
47 | #define INTERESTING_8_LEN 9
48 |
49 | #define INTERESTING_16 \
50 | -32768, /* Overflow signed 16-bit when decremented */ \
51 | -129, /* Overflow signed 8-bit */ \
52 | 128, /* Overflow signed 8-bit */ \
53 | 255, /* Overflow unsig 8-bit when incremented */ \
54 | 256, /* Overflow unsig 8-bit */ \
55 | 512, /* One-off with common buffer size */ \
56 | 1000, /* One-off with common buffer size */ \
57 | 1024, /* One-off with common buffer size */ \
58 | 4096, /* One-off with common buffer size */ \
59 | 32767 /* Overflow signed 16-bit when incremented */
60 |
61 | #define INTERESTING_16_LEN 10
62 |
63 | #define INTERESTING_32 \
64 | -2147483648LL, /* Overflow signed 32-bit when decremented */ \
65 | -100663046, /* Large negative number (endian-agnostic) */ \
66 | -32769, /* Overflow signed 16-bit */ \
67 | 32768, /* Overflow signed 16-bit */ \
68 | 65535, /* Overflow unsig 16-bit when incremented */ \
69 | 65536, /* Overflow unsig 16 bit */ \
70 | 100663045, /* Large positive number (endian-agnostic) */ \
71 | 2147483647 /* Overflow signed 32-bit when incremented */
72 |
73 | #define INTERESTING_32_LEN 8
74 |
75 | #endif // AFL_H
--------------------------------------------------------------------------------
/design-processing/common/dv/drfuzz/corpus.cc:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "corpus.h"
4 | void Corpus::add_q(Queue *q){
5 | this->qs.push_back(q);
6 | this->accumulate_output(q);
7 | assert(this->qs.size());
8 | }
9 |
10 | void Corpus::dump_current_cov(Testbench *tb){
11 | #ifdef WRITE_COVERAGE
12 | this->acc_output->dump(tb);
13 | #else
14 | std::cout << "enable WRITE_COVERAGE compile flag!\n";
15 | #endif
16 | }
17 |
18 | Queue *Corpus::pop_q(){
19 | assert(this->qs.size());
20 | Queue *front = this->qs.front();
21 | assert(front != nullptr);
22 | this->qs.pop_front();
23 | return front;
24 | }
25 |
26 | bool Corpus::empty(){
27 | return this->qs.size()==0;
28 | }
29 |
30 | void Corpus::accumulate_output(Queue *q){ // we don't need initial coverage here since all the queues are already accumulated
31 | doutput_t *output = q->get_accumulated_output();
32 | if(output == nullptr) return;
33 | if(this->acc_output == nullptr){
34 | this->acc_output = (doutput_t *) malloc(sizeof(doutput_t));
35 | memcpy(this->acc_output, output, sizeof(doutput_t));
36 | }
37 | else{
38 | for(int i=0; iacc_output->coverage[i] |= this->acc_output->coverage[i] ^ output->coverage[i];
40 | }
41 |
42 | for(int i=0; iacc_output->asserts[i] |= output->asserts[i];
44 | }
45 | this->acc_output->check();
46 | }
47 | }
48 |
49 | bool Corpus::is_interesting(Queue *q){
50 | bool is_interesting = false;
51 | if(this->acc_output==nullptr) return true;
52 | doutput_t *new_output = q->get_accumulated_output();
53 | std::deque new_toggles_idx;;
54 | for(int i=0; iacc_output->coverage[i]) & new_output->coverage[i];
56 | if(check != 0){
57 | is_interesting = true;
58 | for(int j=0; j<32; j++){
59 | if(check & (1<get_coverage_amount() + new_toggles_idx.size() << std::endl;
70 | this->acc_output->print_increase(new_output);
71 | }
72 | return is_interesting;
73 | }
74 |
75 | int Corpus::get_coverage_amount() {
76 | assert((this->acc_output->coverage[N_COV_POINTS_b32-1] & ~COV_MASK) == 0);
77 | // Count the bits equal to 1.
78 | int ret = 0;
79 | for (int i = 0; i < N_COV_POINTS_b32; i++) {
80 | ret += __builtin_popcount(this->acc_output->coverage[i]);
81 | }
82 | assert(ret >= 0);
83 | assert(ret <= N_COV_POINTS);
84 | return ret;
85 | }
86 |
87 | void Corpus::print_acc_coverage(){
88 | this->acc_output->print();
89 | }
90 |
91 | doutput_t *Corpus::get_accumulated_output(){
92 | return this->acc_output;
93 | }
94 |
95 |
--------------------------------------------------------------------------------
/design-processing/common/dv/drfuzz/corpus.h:
--------------------------------------------------------------------------------
1 | #ifndef CORPUS_H
2 | #define CORPUS_H
3 |
4 | #include
5 |
6 | #include "queue.h"
7 | #include "macros.h"
8 | #include "testbench.h"
9 | #ifdef WRITE_COVERAGE
10 | #include
11 | #include
12 | #endif
13 | // this class defines the set of queses that contain inputs and, if applied to DUT,
14 | // the resulting outpus. That is, after fuzzing, it holds all test inputs and
15 | // the the DUT ouputs each results in. It is initialized with some random seeds, which
16 | // are then permuted and if new coverage points are reached, readded to the corpus
17 | class Corpus{
18 | private:
19 | std::deque qs;
20 | doutput_t *acc_output;
21 |
22 | public:
23 | void dump_current_cov(Testbench *tb);
24 | void add_q(Queue *q);
25 | void accumulate_output(Queue *q);
26 | doutput_t *get_accumulated_output();
27 | Queue *pop_q();
28 | bool empty();
29 | bool is_interesting(Queue *q);
30 | int get_coverage_amount();
31 | void print_acc_coverage();
32 |
33 | };
34 | #endif // CORPUS_H
--------------------------------------------------------------------------------
/design-processing/common/dv/drfuzz/dtypes.h:
--------------------------------------------------------------------------------
1 | #ifndef DTYPES_H
2 | #define DTYPES_H
3 | #include
4 | #include
5 |
6 | #include "macros.h"
7 | class Testbench; // forward declaration to break cyclic dependencies in headers
8 |
9 | struct dinput_t { // TODO: should we make these classes? They got kind of bloated now...
10 | public:
11 | uint32_t inputs[N_FUZZ_INPUTS_b32];
12 | void print();
13 |
14 | void check();
15 | void clean();
16 | void print_diff(dinput_t *other);
17 | };
18 |
19 | struct doutput_t {
20 | public:
21 | uint32_t coverage[N_COV_POINTS_b32];
22 | uint32_t asserts[N_ASSERTS_b32];
23 |
24 | #ifdef WRITE_COVERAGE
25 | void dump(Testbench *tb);
26 | #endif
27 | void print();
28 | bool failed();
29 | void check_failed();
30 | void check();
31 | void print_diff(doutput_t *other);
32 | void print_asserts_diff(doutput_t *other);
33 | void print_increase(doutput_t *other);
34 | void add_or(doutput_t *other);
35 | void init();
36 | };
37 |
38 | #endif
39 |
--------------------------------------------------------------------------------
/design-processing/common/dv/drfuzz/queue.h:
--------------------------------------------------------------------------------
1 | #ifndef QUEUE_H
2 | #define QUEUE_H
3 |
4 | #include
5 |
6 | #include "macros.h"
7 | #include "dtypes.h"
8 |
9 |
10 | // this Queue class represents one set of inputs to be applied in succession to the DUT
11 | class Queue {
12 | private:
13 | dinput_t *last_input;
14 | doutput_t *last_output;
15 | doutput_t *acc_output;
16 | doutput_t *ini_output;
17 | void accumulate_output(doutput_t *);
18 |
19 | public:
20 | std::deque inputs; // inputs to DUT, FIFO
21 | std::deque outputs; // outputs from DUT, FIFO
22 |
23 | bool has_another_input();
24 | dinput_t *pop_tb_input();
25 | std::deque *pop_tb_inputs();
26 | void push_tb_output(doutput_t *tb_output);
27 | void push_tb_outputs(std::deque *outputs);
28 | void push_tb_input(dinput_t *tb_input);
29 | void push_tb_inputs(std::deque *inputs);
30 | void clear_tb_outputs();
31 | void clear_tb_inputs();
32 | void generate_inputs(int n_inputs = N_MAX_INPUTS);
33 | void seed();
34 | void print_inputs();
35 | void print_outputs();
36 | void print_accumulated_output();
37 | doutput_t *get_accumulated_output();
38 | void clear_accumulated_output();
39 | int get_coverage_amount();
40 | Queue *copy();
41 | size_t size();
42 | bool is_equal(Queue* other);
43 | void print_diff(Queue *other);
44 | ~Queue(){
45 | this->clear_tb_inputs();
46 | this->clear_tb_outputs();
47 | }
48 | };
49 | #endif // QUEUE_H
--------------------------------------------------------------------------------
/design-processing/common/dv/elf.h:
--------------------------------------------------------------------------------
1 | /* MTI_DPI */
2 |
3 | /*
4 | * Copyright 2002-2021 Mentor Graphics Corporation.
5 | *
6 | * Note:
7 | * This file is automatically generated.
8 | * Please do not edit this file - you will lose your edits.
9 | *
10 | * Settings when this file was generated:
11 | * PLATFORM = 'linux_x86_64'
12 | */
13 | #ifndef INCLUDED_ELF
14 | #define INCLUDED_ELF
15 |
16 | #ifdef __cplusplus
17 | #define DPI_LINK_DECL extern "C"
18 | #else
19 | #define DPI_LINK_DECL
20 | #endif
21 |
22 | #include "svdpi.h"
23 |
24 |
25 |
26 | DPI_LINK_DECL DPI_DLLESPEC
27 | const char*
28 | Get_SRAM_ELF_object_filename();
29 |
30 | DPI_LINK_DECL DPI_DLLESPEC
31 | char
32 | get_section(
33 | int64_t* address,
34 | int64_t* len);
35 |
36 | DPI_LINK_DECL DPI_DLLESPEC
37 | svLogic
38 | read_elf(
39 | const char* filename);
40 |
41 | DPI_LINK_DECL DPI_DLLESPEC
42 | char
43 | read_section(
44 | int64_t address,
45 | const svOpenArrayHandle buffer);
46 |
47 | #endif
48 |
--------------------------------------------------------------------------------
/design-processing/common/dv/sv/clk_rst_gen.sv:
--------------------------------------------------------------------------------
1 | module clk_rst_gen #(
2 | parameter time CLK_PERIOD,
3 | parameter unsigned RST_CLK_CYCLES
4 | ) (
5 | output logic clk_o,
6 | output logic rst_no
7 | );
8 |
9 | timeunit 1ns;
10 | timeprecision 10ps;
11 |
12 | logic clk;
13 |
14 | // Clock Generation
15 | initial begin
16 | clk = 1'b0;
17 | end
18 | always begin
19 | #(CLK_PERIOD/2);
20 | clk = ~clk;
21 | end
22 | assign clk_o = clk;
23 |
24 | // Reset Generation
25 | rst_gen #(
26 | .RST_CLK_CYCLES (RST_CLK_CYCLES)
27 | ) i_rst_gen (
28 | .clk_i (clk),
29 | .rst_ni (1'b1),
30 | .rst_o (),
31 | .rst_no (rst_no)
32 | );
33 |
34 | endmodule
35 |
--------------------------------------------------------------------------------
/design-processing/common/dv/sv/rst_gen.sv:
--------------------------------------------------------------------------------
1 | /**
2 | * Synchronous Reset Generator
3 | *
4 | * Generates reset signals synchronous to a reference clock. The resets are asserted after
5 | * initialization or when the external active-low reset is asserted. Once asserted, the resets
6 | * are deasserted after a configurable number of cycles of the reference clock.
7 | */
8 |
9 | module rst_gen #(
10 | parameter integer RST_CLK_CYCLES
11 | ) (
12 | input logic clk_i, // Reference clock
13 | input logic rst_ni, // External active-low reset
14 | output logic rst_o, // Active-high reset output
15 | output logic rst_no // Active-low reset output
16 | );
17 |
18 | logic rst_d, rst_q;
19 | logic [$clog2(RST_CLK_CYCLES+1)-1:0] cnt_d, cnt_q;
20 |
21 | always_comb begin
22 | cnt_d = cnt_q;
23 | if (cnt_q < RST_CLK_CYCLES) begin
24 | cnt_d += 1;
25 | end
26 | end
27 |
28 | assign rst_d = (cnt_q >= RST_CLK_CYCLES) ? 1'b0 : 1'b1;
29 |
30 | assign rst_o = rst_q;
31 | assign rst_no = ~rst_q;
32 |
33 | always @(posedge clk_i) begin
34 | if (~rst_ni) begin
35 | cnt_q <= '0;
36 | rst_q <= 1'b1;
37 | end else begin
38 | cnt_q <= cnt_d;
39 | rst_q <= rst_d;
40 | end
41 | end
42 |
43 | initial begin
44 | cnt_q = '0;
45 | rst_q = 1'b1;
46 | end
47 |
48 | endmodule
49 |
--------------------------------------------------------------------------------
/design-processing/common/dv/ticks.h:
--------------------------------------------------------------------------------
1 | // Copyright 2023 Flavien Solt, ETH Zurich.
2 | // Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | // SPDX-License-Identifier: GPL-3.0-only
4 |
5 | /* common way to execute a testbench, sorry for the lame C-style macro */
6 |
7 | /* used by multiple designs */
8 | #include
9 |
10 | // Design-specific headers.
11 | #include "testbench.h"
12 |
13 | #ifdef LEADTICKS_DESIGN /* design overrides leadticks */
14 | #define LEADTICKS LEADTICKS_DESIGN
15 | #else
16 | #define LEADTICKS 5
17 | #endif
18 |
19 | #ifdef TRAILTICKS_DESIGN /* design overrides TRAILTICKS */
20 | #define TRAILTICKS TRAILTICKS_DESIGN
21 | #else
22 | #define TRAILTICKS 1
23 | #endif
24 |
25 |
26 | #define ARIANE_FLUSH_TICKS 256
27 |
28 | #include
29 | #include
30 | #include
31 |
32 | static int get_sim_length_cycles(int lead_time_cycles)
33 | {
34 | const char* simlen_env = std::getenv("SIMLEN");
35 | if(simlen_env == NULL) { std::cerr << "SIMLEN environment variable not set." << std::endl; exit(1); }
36 | int simlen = atoi(simlen_env);
37 | assert(lead_time_cycles >= 0);
38 | assert(simlen > 0);
39 | assert(simlen > lead_time_cycles);
40 | std::cout << "SIMLEN set to " << simlen << " ticks." << std::endl;
41 | return simlen - lead_time_cycles;
42 | }
43 |
44 | static const char *cl_get_tracefile(void)
45 | {
46 | #if VM_TRACE
47 | const char *trace_env = std::getenv("TRACEFILE"); // allow override for batch execution from python
48 | if(trace_env == NULL) { std::cerr << "TRACEFILE environment variable not set." << std::endl; exit(1); }
49 | return trace_env;
50 | #else
51 | return "";
52 | #endif
53 | }
54 |
55 | static inline long tb_run_ticks(Testbench *tb, int simlen, bool reset = false) {
56 | if (reset)
57 | tb->reset();
58 |
59 | auto start = std::chrono::steady_clock::now();
60 | tb->tick(simlen);
61 | auto stop = std::chrono::steady_clock::now();
62 | long ret = std::chrono::duration_cast(stop - start).count();
63 | tb->tick(TRAILTICKS);
64 | return ret;
65 | }
66 |
--------------------------------------------------------------------------------
/design-processing/common/modelsim/modelsim_build.tcl:
--------------------------------------------------------------------------------
1 | # Modelsim build script
2 | # Not suitable for designs with dependencies.
3 |
4 | # TRACE must be `notrace` or `trace` or `trace_fst`
5 |
6 | if { [info exists ::env(CASCADE_DIR)] } { set CASCADE_DIR $::env(CASCADE_DIR)} else { puts "Please set CASCADE_DIR environment variable"; exit 1 }
7 | if { [info exists ::env(MODELSIM_WORKROOT)] } { set MODELSIM_WORKROOT $::env(MODELSIM_WORKROOT)} else { puts "Please set MODELSIM_WORKROOT environment variable"; exit 1 }
8 | if { [info exists ::env(INSTRUMENTATION)] } { set INSTRUMENTATION $::env(INSTRUMENTATION)} else { puts "Please set INSTRUMENTATION environment variable"; exit 1 }
9 | if { [info exists ::env(TRACE)] } { set TRACE $::env(TRACE)} else { puts "Please set TRACE environment variable"; exit 1 }
10 | if { [info exists ::env(CASCADE_META_COMMON)] } { set CASCADE_META_COMMON $::env(CASCADE_META_COMMON)} else { puts "Please set CASCADE_META_COMMON environment variable"; exit 1 }
11 | if { [info exists ::env(SV_TOP)] } { set SV_TOP $CASCADE_DIR/$::env(SV_TOP) } else { puts "Please set SV_TOP environment variable"; exit 1 }
12 | if { [info exists ::env(TOP_SOC)] } { set TOP_SOC $::env(TOP_SOC) } else { puts "Please set TOP_SOC environment variable"; exit 1 }
13 | if { [info exists ::env(SV_MEM)] } { set SV_MEM $::env(SV_MEM) } else { puts "Please set SV_MEM environment variable"; exit 1 }
14 | if { [info exists ::env(SV_TB)] } { set SV_TB $CASCADE_DIR/$::env(SV_TB) } else { puts "Please set SV_TB environment variable"; exit 1 }
15 | if { [info exists ::env(FUZZCOREID)] } { set FUZZCOREID $::env(FUZZCOREID) } else { puts "No FUZZCOREID specified. Defaulting to 0."; set FUZZCOREID 0 }
16 | # Useful if multiple designs have the same top soc name (typically rocket and boom)
17 | if { [info exists ::env(VARIANT_ID)] } { set VARIANT_ID $::env(VARIANT_ID) } else { set VARIANT_ID "" }
18 | # In case we want to have an include directory, MODELSIM_INCDIRSTR="+incdir+my/first/incdirectory +incdir+my/second/incdirectory" for example
19 | if { [info exists ::env(MODELSIM_INCDIRSTR)] } { set MODELSIM_INCDIRSTR $::env(MODELSIM_INCDIRSTR) } else { set MODELSIM_INCDIRSTR "" }
20 | # Cover flag should be +cover or empty, or for example +cover=bcst
21 | if { [info exists ::env(MODELSIM_VLOG_COVERFLAG)] } { set MODELSIM_VLOG_COVERFLAG $::env(MODELSIM_VLOG_COVERFLAG) } else { set MODELSIM_VLOG_COVERFLAG "" }
22 |
23 | set LIB ${MODELSIM_WORKROOT}/${TOP_SOC}${VARIANT_ID}_${FUZZCOREID}/work_${INSTRUMENTATION}_${TRACE}
24 |
25 | vlog -64 -suppress 7061 -suppress 2583 -suppress 8386 -suppress 13314 -sv -work $LIB $MODELSIM_VLOG_COVERFLAG +define+RANDOMIZE_INIT=1 +define+STOP_COND=0 -ccflags '-std=c++11' $MODELSIM_INCDIRSTR -sv $CASCADE_DIR/generated/out/$INSTRUMENTATION.sv
26 |
27 | vlog -64 -suppress 7061 -suppress 2583 -suppress 8386 -suppress 13314 -sv -work $LIB $MODELSIM_VLOG_COVERFLAG -ccflags '-std=c++11' -sv $SV_TOP
28 | vlog -64 -suppress 7061 -suppress 2583 -suppress 8386 -suppress 13314 -suppress 7034 -sv -work $LIB $MODELSIM_VLOG_COVERFLAG -ccflags '-std=c++11' -sv $SV_MEM
29 | vlog -64 -suppress 7061 -suppress 2583 -suppress 8386 -suppress 13314 -sv -work $LIB $MODELSIM_VLOG_COVERFLAG -ccflags '-std=c++11' -sv $SV_TB
30 |
31 | vlog -64 -ccflags '-std=c++11' -work $LIB $MODELSIM_VLOG_COVERFLAG -dpiheader $CASCADE_META_COMMON/dv/elf.h $CASCADE_META_COMMON/dv/elfloader.cc $CASCADE_META_COMMON/dv/common_functions.cc
32 |
33 | vlog -64 -sv -work $LIB $MODELSIM_VLOG_COVERFLAG $CASCADE_META_COMMON/dv/sv/rst_gen.sv $CASCADE_META_COMMON/dv/sv/clk_rst_gen.sv $SV_TOP
34 |
35 | quit -f
36 |
--------------------------------------------------------------------------------
/design-processing/common/modelsim/modelsim_run.tcl:
--------------------------------------------------------------------------------
1 | # Modelsim script
2 |
3 | if { [info exists ::env(MODELSIM_WORKROOT)] } { set MODELSIM_WORKROOT $::env(MODELSIM_WORKROOT)} else { puts "Please set MODELSIM_WORKROOT environment variable"; exit 1 }
4 | if { [info exists ::env(INSTRUMENTATION)] } { set INSTRUMENTATION $::env(INSTRUMENTATION)} else { puts "Please set INSTRUMENTATION environment variable"; exit 1 }
5 | if { [info exists ::env(TRACE)] } { set TRACE $::env(TRACE)} else { puts "Please set TRACE environment variable"; exit 1 }
6 | if { [info exists ::env(TOP_SOC)] } { set TOP_SOC $::env(TOP_SOC) } else { puts "Please set TOP_SOC environment variable"; exit 1 }
7 | if { [info exists ::env(FUZZCOREID)] } { set FUZZCOREID $::env(FUZZCOREID) } else { puts "No FUZZCOREID specified. Defaulting to 0."; set FUZZCOREID 0 }
8 | if { [info exists ::env(VARIANT_ID)] } { set VARIANT_ID $::env(VARIANT_ID) } else { set VARIANT_ID "" }
9 | # Cover flag should be -coverage or empty
10 | if { [info exists ::env(MODELSIM_VSIM_COVERFLAG)] } { set MODELSIM_VSIM_COVERFLAG $::env(MODELSIM_VSIM_COVERFLAG) } else { set MODELSIM_VSIM_COVERFLAG "" }
11 | if { [info exists ::env(MODELSIM_VSIM_COVERPATH)] } { set MODELSIM_VSIM_COVERPATH $::env(MODELSIM_VSIM_COVERPATH) } else { set MODELSIM_VSIM_COVERPATH "" }
12 |
13 | set LIB ${MODELSIM_WORKROOT}/${TOP_SOC}${VARIANT_ID}_${FUZZCOREID}/work_${INSTRUMENTATION}_${TRACE}
14 |
15 | if { [info exists ::env(TRACEFILE)] } { set TRACEFILE $::env(TRACEFILE) } else { puts "Please set TRACEFILE environment variable"; exit 1 }
16 | if { [info exists ::env(VCD_WILDCARD)] } { set VCD_WILDCARD $::env(VCD_WILDCARD) } else { set VCD_WILDCARD /* }
17 | if { [info exists ::env(MODELSIM_NOQUIT)] } { set MODELSIM_NOQUIT $::env(MODELSIM_NOQUIT) } else { set MODELSIM_NOQUIT "0" }
18 |
19 | if { [string equal $TRACE trace_fst] } {
20 | set VOPTARGS "-voptargs=+acc"
21 | set DEBUGDBARG "-debugdb"
22 | } elseif {[string equal $TRACE trace]} {
23 | set VOPTARGS "-voptargs=-debug"
24 | set DEBUGDBARG "-debugdb"
25 | } else {
26 | set VOPTARGS ""
27 | set DEBUGDBARG ""
28 | }
29 |
30 | vsim -64 -lib $LIB $MODELSIM_VSIM_COVERFLAG $DEBUGDBARG $VOPTARGS tb_top
31 |
32 | if { [string equal $TRACE trace_fst] } {
33 | log -r /*
34 | } elseif { [string equal $TRACE trace] } {
35 | vcd file $TRACEFILE
36 | vcd add -r i_dut/*
37 | # vcd add -r i_dut/i_mem_top/*
38 | }
39 |
40 | run -a
41 |
42 | if { [string equal $TRACE trace_fst] } {
43 | add wave sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/tile_prci_domain/tile_reset_domain/tile/frontend/npc
44 | add wave \
45 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/clock \
46 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/reset \
47 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_valid \
48 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_bits_opcode \
49 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_bits_param \
50 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_bits_size \
51 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_bits_source \
52 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_bits_address \
53 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_bits_mask \
54 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_bits_corrupt \
55 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_d_ready
56 | add wave \
57 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_a_ready \
58 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_d_valid \
59 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_d_bits_size \
60 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_d_bits_source \
61 | sim:/tb_top/i_dut/i_mem_top/i_chip_top/system/bootROMDomainWrapper/bootrom/auto_in_d_bits_data
62 | }
63 |
64 | if { [string equal $MODELSIM_VSIM_COVERFLAG -coverage] } {
65 | coverage save $MODELSIM_VSIM_COVERPATH
66 | }
67 |
68 | if { !([string equal $MODELSIM_NOQUIT 1]) } {
69 | quit -f
70 | }
71 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/add_sim_initbegin.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script replaces initial X states by random values (actually, by zeros), as the primitives are removed by Yosys.
6 | # This is a simplified version that detects all the registers without a reset signal and add an initial begin statement for them.
7 |
8 | import multiprocessing as mp
9 | import re
10 | import sys
11 |
12 | # sys.argv[1]: path to source cascade.sv
13 | # sys.argv[2]: path to target cascade.sv file where the initial states are randomized
14 |
15 | MODULE_REGEX = r"(module(?:\s|\n)+(?:.+?)(?:\s|\n)*(?:\(|#|import)(?:.|\n)+?endmodule)"
16 | MODULE_FF_LINE0 = r"always_ff @\((?:pos|neg)edge\s+[a-zA-Z0-9_]+\)\s*$"
17 |
18 | NUM_PARALLEL_TASKS = mp.cpu_count() - 2
19 |
20 | # @param module_name for debug only
21 | # @return the transformed module definition.
22 | def add_initial_blocks(module_definition: str):
23 | # tactic: cut into lines and find the succession of two line types.
24 | module_definition_lines = list(map(lambda s: s.strip(), module_definition.split('\n')))
25 | signal_names_to_initialize = []
26 | # Find the signal names to initialize
27 | for module_line_id in range(len(module_definition_lines)):
28 | # If this is a single edge sensitive register. Note the absence of `begin` keyword.
29 | if re.match(MODULE_FF_LINE0, module_definition_lines[module_line_id]):
30 | # print(module_line_id, module_definition_lines[module_line_id+1])
31 | curr_module_line_id_for_assignment = module_line_id+1
32 | # Manage cascaded ifs
33 | while re.match(r"if\s*\([\\$!a-zA-Z0-9_]+\)\s*$", module_definition_lines[curr_module_line_id_for_assignment].strip()):
34 | curr_module_line_id_for_assignment += 1
35 | curr_assignment_line = module_definition_lines[curr_module_line_id_for_assignment]
36 | if not '<=' in curr_assignment_line:
37 | print(f"WARNING: Did not find symbol `<=` in register assignment in line `{curr_assignment_line}`.")
38 | else:
39 | signal_names_to_initialize.append(list(filter(lambda s: bool(s), curr_assignment_line.split('<=')[0].split(' ')))[-1])
40 | # Initialize the signals
41 | if signal_names_to_initialize:
42 | lines_to_add = [
43 | '// Added block to randomize initial values.',
44 | '`ifdef RANDOMIZE_INIT',
45 | ' initial begin'
46 | ]
47 | for signal_name in signal_names_to_initialize:
48 | lines_to_add.append(f" {signal_name} = '0;")
49 | lines_to_add.append(' end')
50 | lines_to_add.append('`endif // RANDOMIZE_INIT')
51 | # Insert at the end of the module definition
52 | if module_definition_lines[-1].split(' ')[0].strip() != 'endmodule':
53 | raise ValueError(f"End of module line is unexpectedly `{module_definition_lines[-1]}`.")
54 | last_line = module_definition_lines[-1]
55 | module_definition_lines[-1] = lines_to_add[0]
56 | for line_to_add_id in range(1, len(lines_to_add)):
57 | module_definition_lines.append(lines_to_add[line_to_add_id])
58 | module_definition_lines.append(last_line)
59 | return '\n'.join(module_definition_lines)
60 |
61 | if __name__ == "__main__":
62 | global cascade_in_lines
63 | cascade_in_path = sys.argv[1]
64 | cascade_out_path = sys.argv[2]
65 |
66 | with open(cascade_in_path, "r") as f:
67 | cascade_in_content = f.read()
68 |
69 | # module_definitions will be a list of pairs (module_content including module and endmodule keywords, module name)
70 | module_definitions = re.findall(MODULE_REGEX, cascade_in_content, re.MULTILINE | re.DOTALL)
71 |
72 | with mp.Pool(processes=NUM_PARALLEL_TASKS) as pool:
73 | # ret_strlines: list of per-instruction lists of strings
74 | initialized_module_definitions = list(pool.map(add_initial_blocks, module_definitions))
75 | pool.close()
76 | pool.join()
77 |
78 | with open(cascade_out_path, "w") as f:
79 | f.write('\n\n'.join(initialized_module_definitions))
80 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/ascend_package_in_pickle.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Move the specified package up in the pickle file, works in place.
6 |
7 | import re
8 | import sys
9 |
10 | # sys.argv[1]: Package name
11 | # sys.argv[2]: source and target pickle Verilog file
12 |
13 | if __name__ == "__main__":
14 | pkgname = sys.argv[1]
15 | src_filename = sys.argv[2]
16 |
17 | with open(src_filename, "r") as f:
18 | content = f.read()
19 |
20 | regex_pattern = r"package\s*"+pkgname+r"(?:.|\n)+?endpackage"
21 |
22 | # Get the package texts
23 | packagetexts = re.findall(regex_pattern, content, re.DOTALL)
24 | # Remove them from the pickle
25 | content = re.sub(regex_pattern, '\n\n', content, re.DOTALL)
26 |
27 | # Write them to the top of the pickle file
28 | content = '\n\n'.join(packagetexts) + content
29 |
30 | with open(src_filename, "w") as f:
31 | f.write(content)
32 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/ascend_packages_in_pickle.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Move the packages up in the pickle files, works in place.
6 |
7 | import re
8 | import sys
9 |
10 | # sys.argv[1]: source and target pickle Verilog file
11 |
12 | REGEX = r"package(?:.|\n)+?endpackage"
13 |
14 | if __name__ == "__main__":
15 | src_filename = sys.argv[1]
16 |
17 | with open(src_filename, "r") as f:
18 | content = f.read()
19 |
20 | # Get the package texts
21 | packagetexts = re.findall(REGEX, content, re.DOTALL)
22 | # Remove them from the pickle
23 | content = re.sub(REGEX, '\n\n', content, re.DOTALL)
24 |
25 | # Write them to the top of the pickle file
26 | content = '\n\n'.join(packagetexts) + content
27 |
28 | with open(src_filename, "w") as f:
29 | f.write(content)
30 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/correct_too_wide_lvalues.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Corrects Yosys mistakes such as: assign { 48'hffffffffffff, \gen_special_results[0].active_format.special_results.special_res [31:16] } = info_q[5]
6 |
7 | import sys
8 | import re
9 |
10 | # sys.argv[1] the path to the input file.
11 | # sys.argv[2] the path to the output file.
12 |
13 | if __name__ == "__main__":
14 | with open(sys.argv[1], "r") as f:
15 | verilog_content = f.read()
16 | verilog_content, num_subs = re.subn("assign\s+\{\s*\d+'[a-zA-Z\d]+\s*,", "assign {", verilog_content, count=0, flags=re.MULTILINE)
17 | print(" Num too wide lvalues corrected: {}".format(num_subs))
18 | with open(sys.argv[2], "w") as f:
19 | f.write(verilog_content)
20 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/divide_concat_into_subconcats.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Expands an expression `assign A = & B` by expanding into a new signal.
6 | # This script should be followed by compress_right_side_concat.
7 |
8 | from concatbitwidth import get_bracket_bit_width
9 | import re
10 | import sys
11 | import tqdm
12 |
13 | # sys.argv[1]: source Verilog file
14 | # sys.argv[2]: target Verilog file
15 |
16 | FIND_REGEX = r"\b(assign\s+\w+\s*= )\{([^\n]+)\};"
17 | MAX_TERMS_IN_BRACKET = 1000
18 | TYPE = "logic"
19 | SUFFIX = "inst"
20 | VAR_BASE_NAME = "divide_concat_into_subconcats"
21 |
22 | def reduce_bracket(match):
23 | global num_brackets_treated
24 |
25 | assignment_start = match.group(1)
26 | bracket_content = match.group(2)
27 |
28 | splitted = list(map(lambda x: x.strip(), bracket_content.split(',')))
29 | tot_num_elems = len(splitted)
30 |
31 | if tot_num_elems < MAX_TERMS_IN_BRACKET:
32 | return match.group(0)
33 |
34 | var_name = "{}_{}".format(VAR_BASE_NAME, num_brackets_treated)
35 | var_name_with_suffix = "{}_{}".format(var_name, SUFFIX)
36 | num_brackets_treated += 1
37 |
38 | num_macroterms_floor = tot_num_elems // MAX_TERMS_IN_BRACKET
39 | is_there_remainder = bool(tot_num_elems % MAX_TERMS_IN_BRACKET)
40 |
41 | ret_groups = []
42 |
43 | # Declare the intermediate wires
44 |
45 | ret_groups.append(" {} [{}-1:0] {};".format(TYPE, tot_num_elems, var_name))
46 |
47 | for macroterm_id in range(num_macroterms_floor):
48 | # If this is the last and there is no remainder
49 | if macroterm_id == num_macroterms_floor-1 and not is_there_remainder:
50 | ret_groups.append(" {} [{}-1:0] {}_{};".format(TYPE, MAX_TERMS_IN_BRACKET, var_name_with_suffix, macroterm_id))
51 | break
52 | ret_groups.append(" {} [{}-1:0] {}_{};".format(TYPE, MAX_TERMS_IN_BRACKET+1, var_name_with_suffix, macroterm_id))
53 |
54 | if is_there_remainder:
55 | ret_groups.append(" {} [{}-1:0] {}_{};".format(TYPE, tot_num_elems % MAX_TERMS_IN_BRACKET, var_name_with_suffix, num_macroterms_floor))
56 |
57 | ret_groups.append(" assign {} = {{ {} }};".format(var_name, ', '.join([f'{var_name_with_suffix}_{i}' for i in range(num_macroterms_floor + 1 if is_there_remainder else num_macroterms_floor)])))
58 |
59 | for macroterm_id in range(num_macroterms_floor):
60 | macrobracket_content = ' , '.join(splitted[macroterm_id*MAX_TERMS_IN_BRACKET:(macroterm_id+1)*MAX_TERMS_IN_BRACKET])
61 | if macroterm_id < num_macroterms_floor-1 or is_there_remainder:
62 | ret_groups.append(" assign {}_{} = ".format(var_name_with_suffix, macroterm_id) + "{ " + macrobracket_content + " , {}_{}".format(var_name_with_suffix, macroterm_id+1) + " };")
63 | else:
64 | ret_groups.append(" assign {}_{} = ".format(var_name_with_suffix, macroterm_id) + "{ " + macrobracket_content + " };")
65 |
66 | if is_there_remainder:
67 | remaining_terms = splitted[num_macroterms_floor*MAX_TERMS_IN_BRACKET:]
68 | macrobracket_content = ' , '.join(remaining_terms)
69 | ret_groups.append(" assign {}_{} = ".format(var_name_with_suffix, num_macroterms_floor) + "{ " + macrobracket_content + " };")
70 |
71 | ret_groups.append(" {}{};".format(assignment_start, var_name))
72 |
73 | return '\n'.join(ret_groups)
74 |
75 | if __name__ == "__main__":
76 | global num_brackets_treated
77 | num_brackets_treated = 0
78 | src_filename = sys.argv[1]
79 | tgt_filename = sys.argv[2]
80 |
81 | with open(src_filename, "r") as f:
82 | content = f.read()
83 | content_lines = content.split('\n')
84 | new_lines=[]
85 | print('%d lines' % len(content_lines))
86 |
87 | for oldline in tqdm.tqdm(content_lines):
88 | newline = re.sub(FIND_REGEX, reduce_bracket, oldline)
89 | new_lines.append(newline)
90 |
91 | with open(tgt_filename, "w") as f:
92 | f.write('\n'.join(new_lines))
93 | print('invocations: %d' % num_brackets_treated)
94 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/expand_left_operand.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Expands an expression `assign A = [&|\^] B` by expanding into a new signal.
6 | # This script should be followed by compress_right_side_concat.
7 |
8 | from concatbitwidth import get_bracket_bit_width
9 | import re
10 | import sys
11 | import tqdm
12 |
13 | # sys.argv[1]: source Verilog file
14 | # sys.argv[2]: target Verilog file
15 |
16 | FIND_REGEX = r"\b(assign\s+(\w+)\s*=\s*\{([^\n]+)\}\s*([&|\^])\s*([^;]+));"
17 | MAX_TERMS_IN_BRACKET = 1000
18 | TYPE = "logic"
19 | SUFFIX = "inst"
20 | VAR_BASE_NAME = "expand_left_operand"
21 |
22 | def reduce_bracket(match):
23 | global num_brackets_treated
24 |
25 | assignment_destination = match.group(2)
26 | bracket_content = match.group(3)
27 | operator = match.group(4)
28 | right_side = match.group(5)
29 |
30 | splitted = list(map(lambda x: x.strip(), bracket_content.split(',')))
31 | tot_num_elems = len(splitted)
32 |
33 | if tot_num_elems < MAX_TERMS_IN_BRACKET:
34 | return match.group(0)
35 |
36 | bit_width = get_bracket_bit_width(bracket_content, content_lines, curr_line_id)
37 | if (bit_width < 1):
38 | raise ValueError("Could not determine bit width for bracket content {}.".format(bracket_content))
39 |
40 | var_name = "{}_{}".format(VAR_BASE_NAME, num_brackets_treated)
41 | var_name_with_suffix = "{}_{}".format(var_name, SUFFIX)
42 | num_brackets_treated += 1
43 |
44 | ret_lines = []
45 |
46 | # First, declare the new wire.
47 | ret_lines.append(" {} [{}:0] {};".format(TYPE, bit_width-1, var_name_with_suffix))
48 |
49 | # Second, assign the bracket value to the newly created wire.
50 | ret_lines.append(" assign {} = {{{}}};".format(var_name_with_suffix, bracket_content))
51 |
52 | # Third, modify the original wire.
53 | ret_lines.append(" assign {} = {} {} {};".format(assignment_destination, var_name_with_suffix, operator, right_side))
54 |
55 | return '\n'.join(ret_lines)
56 |
57 |
58 | if __name__ == "__main__":
59 | global num_brackets_treated
60 | global content_lines
61 | global curr_line_id
62 | num_brackets_treated = 0
63 | src_filename = sys.argv[1]
64 | tgt_filename = sys.argv[2]
65 |
66 | with open(src_filename, "r") as f:
67 | content = f.read()
68 | content_lines = content.split('\n')
69 | new_lines=[]
70 | print('%d lines' % len(content_lines))
71 |
72 | n=0
73 | for oldline_id in tqdm.trange(len(content_lines)):
74 | curr_line_id = oldline_id
75 | newline = re.sub(FIND_REGEX, reduce_bracket, content_lines[oldline_id])
76 | new_lines.append(newline)
77 | n+=1
78 |
79 | with open(tgt_filename, "w") as f:
80 | f.write('\n'.join(new_lines))
81 | print('invocations: %d' % num_brackets_treated)
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/expand_reduction_operand.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Expands an expression `assign A = [&|] ` by expanding into a new signal.
6 | # This script should be followed by compress_right_side_concat.
7 |
8 | import re
9 | import sys
10 | import tqdm
11 |
12 | # sys.argv[1]: source Verilog file
13 | # sys.argv[2]: target Verilog file
14 |
15 | FIND_REGEX = r"\b(assign\s+(\w+)\s*=\s*([&|])\s*\{([^\n]+)\});"
16 | MAX_TERMS_IN_BRACKET = 1000
17 | TYPE = "logic"
18 | SUFFIX = "inst"
19 | VAR_BASE_NAME = "expand_reduction_operand"
20 |
21 | def reduce_bracket(match):
22 | global num_brackets_treated
23 |
24 | assignment_destination = match.group(2)
25 | reduction_operator = match.group(3)
26 | bracket_content = match.group(4)
27 |
28 | splitted = list(map(lambda x: x.strip(), bracket_content.split(',')))
29 | tot_num_elems = len(splitted)
30 |
31 | if tot_num_elems < MAX_TERMS_IN_BRACKET:
32 | return match.group(0)
33 |
34 | var_name = "{}_{}".format(VAR_BASE_NAME, num_brackets_treated)
35 | var_name_with_suffix = "{}_{}".format(var_name, SUFFIX)
36 | num_brackets_treated += 1
37 |
38 | ret_lines = []
39 |
40 | # First, declare the new wire.
41 | ret_lines.append(" {} [{}-1:0] {};".format(TYPE, tot_num_elems, var_name_with_suffix))
42 |
43 | # Second, assign the bracket value to the newly created wire.
44 | ret_lines.append(" assign {} = {{{}}};".format(var_name_with_suffix, bracket_content))
45 |
46 | # Third, modify the original wire.
47 | ret_lines.append(" assign {} = {} {};".format(assignment_destination, reduction_operator, var_name_with_suffix))
48 |
49 | return '\n'.join(ret_lines)
50 |
51 |
52 | if __name__ == "__main__":
53 | global num_brackets_treated
54 | num_brackets_treated = 0
55 | src_filename = sys.argv[1]
56 | tgt_filename = sys.argv[2]
57 |
58 | with open(src_filename, "r") as f:
59 | content = f.read()
60 | content_lines = content.split('\n')
61 | new_lines=[]
62 | print('%d lines' % len(content_lines))
63 |
64 | n=0
65 | for oldline in tqdm.tqdm(content_lines):
66 | newline = re.sub(FIND_REGEX, reduce_bracket, oldline)
67 | new_lines.append(newline)
68 | n+=1
69 |
70 | with open(tgt_filename, "w") as f:
71 | f.write('\n'.join(new_lines))
72 | print('invocations: %d' % num_brackets_treated)
73 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/expand_right_operand.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Expands an expression `assign A = [&|\^] B` by expanding into a new signal.
6 | # This script should be followed by compress_right_side_concat.
7 |
8 | from concatbitwidth import get_bracket_bit_width
9 | import re
10 | import sys
11 | import tqdm
12 |
13 | # sys.argv[1]: source Verilog file
14 | # sys.argv[2]: target Verilog file
15 |
16 | FIND_REGEX = r"\b(assign\s+(\w+)\s*=\s*([\w',\{\} ]+)\s*([&|\^])\s*\{([^\n]+)\});"
17 | MAX_TERMS_IN_BRACKET = 1000
18 | TYPE = "logic"
19 | SUFFIX = "inst"
20 | VAR_BASE_NAME = "expand_right_operand"
21 |
22 | def reduce_bracket(match):
23 | global num_brackets_treated
24 |
25 | assignment_destination = match.group(2)
26 | left_side = match.group(3)
27 | operator = match.group(4)
28 | bracket_content = match.group(5)
29 |
30 | splitted = list(map(lambda x: x.strip(), bracket_content.split(',')))
31 | tot_num_elems = len(splitted)
32 |
33 | if tot_num_elems < MAX_TERMS_IN_BRACKET:
34 | return match.group(0)
35 |
36 | bit_width = get_bracket_bit_width(bracket_content, content_lines, curr_line_id)
37 | if (bit_width < 1):
38 | raise ValueError("Could not determine bit width for bracket content {}.".format(bracket_content))
39 |
40 | var_name = "{}_{}".format(VAR_BASE_NAME, num_brackets_treated)
41 | var_name_with_suffix = "{}_{}".format(var_name, SUFFIX)
42 | num_brackets_treated += 1
43 |
44 | ret_lines = []
45 |
46 | # First, declare the new wire.
47 | ret_lines.append(" {} [{}:0] {};".format(TYPE, bit_width-1, var_name_with_suffix))
48 |
49 | # Second, assign the bracket value to the newly created wire.
50 | ret_lines.append(" assign {} = {{{}}};".format(var_name_with_suffix, bracket_content))
51 |
52 | # Third, modify the original wire.
53 | ret_lines.append(" assign {} = {} {} {};".format(assignment_destination, left_side, operator, var_name_with_suffix))
54 |
55 | return '\n'.join(ret_lines)
56 |
57 |
58 | if __name__ == "__main__":
59 | global num_brackets_treated
60 | global content_lines
61 | global curr_line_id
62 | num_brackets_treated = 0
63 | src_filename = sys.argv[1]
64 | tgt_filename = sys.argv[2]
65 |
66 | with open(src_filename, "r") as f:
67 | content = f.read()
68 | content_lines = content.split('\n')
69 | new_lines=[]
70 | print('%d lines' % len(content_lines))
71 |
72 | n=0
73 | for oldline_id in tqdm.trange(len(content_lines)):
74 | curr_line_id = oldline_id
75 | newline = re.sub(FIND_REGEX, reduce_bracket, content_lines[oldline_id])
76 | new_lines.append(newline)
77 | n+=1
78 |
79 | with open(tgt_filename, "w") as f:
80 | f.write('\n'.join(new_lines))
81 | print('invocations: %d' % num_brackets_treated)
82 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/gen_corefiles.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # sys.argv[1]: source template core file
6 | # sys.argv[2]: destination template core file
7 |
8 | import os
9 | import re
10 | import sys
11 |
12 | if __name__ == "__main__":
13 | src_filename = sys.argv[1]
14 | tgt_filename = sys.argv[2]
15 |
16 | with open(src_filename, "r") as f:
17 | content = f.read()
18 |
19 | # Find the occurrences of a dollar sign and alpha-numeric characters
20 | pattern = r'\$([A-Za-z_]+[A-Za-z0-9_]*)'
21 | matches = re.findall(pattern, content)
22 |
23 | # Replace occurrences with the actual value
24 | for match in matches:
25 | env_var_name = match
26 | env_var_value = os.environ.get(env_var_name, '')
27 | content = content.replace('$' + env_var_name, env_var_value)
28 |
29 | with open(tgt_filename, "w") as f:
30 | f.write(content)
31 |
32 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/move_verilator_public_attrs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Modifies the Yosys output attributes `(* verilator_public = "1" *)` into inline /* verilator public */ attributes, understandable by Verilator.
6 | # Assumes that all `(* verilator_public = "1" *)` attributes are followed by a signal declaration in the next line.
7 |
8 | import re
9 | import sys
10 | import tqdm
11 |
12 | # sys.argv[1]: source Verilog file
13 | # sys.argv[2]: target Verilog file
14 |
15 | REGEX_VERILATOR_PUBLIC_YOSYSATTR = r"^\s*\(\* verilator_public = \"1\" \*\)\s*$"
16 | REGEX_SIGDECL = r"^([\s|\n]*(?:(?:input\s*|output\s*)?logic|(?:input\s*|output\s*)?reg|(?:input\s*|output\s*)?wire|(?:input|output)\s*)\s+[^\s]+\s*;)\s*$"
17 |
18 | if __name__ == "__main__":
19 | src_filename = sys.argv[1]
20 | tgt_filename = sys.argv[2]
21 |
22 | num_invocations = 0
23 |
24 | with open(src_filename, "r") as f:
25 | content = f.read()
26 | content_lines = content.split('\n')
27 | print("{} lines".format(len(content_lines)))
28 |
29 | for line_id in tqdm.trange(len(content_lines)):
30 | match_object = re.match(REGEX_VERILATOR_PUBLIC_YOSYSATTR, content_lines[line_id])
31 | if match_object:
32 | num_invocations += 1
33 | # Assume that the next line is a signal declaration.
34 | match_sigdecl = re.match(REGEX_SIGDECL, content_lines[line_id+1])
35 | assert match_sigdecl is not None
36 | content_lines[line_id+1] = match_sigdecl.group(1)[:-1]+" /* verilator public */;"
37 |
38 | with open(tgt_filename, "w") as f:
39 | f.write('\n'.join(content_lines))
40 | print("invocations: {}".format(num_invocations))
41 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/remove_attributes.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Remove modules from given SystemVerilog file.
6 |
7 | import re
8 | import sys
9 |
10 | # sys.argv[1]: source file path.
11 | # sys.argv[2]: target file path (will be a copy of the source file, but without the attributes).
12 |
13 | ATTRIBUTE_REGEX = r"\(\*[^\n]+\*\)"
14 |
15 | if __name__ == "__main__":
16 | if len(sys.argv) < 3:
17 | print("Takes 2 arguments: the Verilog source file path, the Verilog target file path.")
18 |
19 | with open(sys.argv[1], "r") as f:
20 | verilog_content = f.read()
21 |
22 | verilog_content, num_subs = re.subn(ATTRIBUTE_REGEX, "", verilog_content)
23 | print(" Removed {} attributes.".format(num_subs))
24 |
25 | with open(sys.argv[2], "w") as f:
26 | f.write(verilog_content)
27 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/remove_displays.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Corrects Yosys mistakes such as: assign { 48'hffffffffffff, \gen_special_results[0].active_format.special_results.special_res [31:16] } = info_q[5]
6 |
7 | import sys
8 | import re
9 |
10 | # sys.argv[1] the path to the input file.
11 | # sys.argv[2] the path to the output file.
12 |
13 | if __name__ == "__main__":
14 | with open(sys.argv[1], "r") as f:
15 | verilog_content = f.read()
16 | verilog_lines = verilog_content.splitlines()
17 | # Find all lines ids that contain $display
18 | display_line_ids = []
19 | for line_id in range(len(verilog_lines)):
20 | if "$display" in verilog_lines[line_id] or "$finish" in verilog_lines[line_id]:
21 | display_line_ids.append(line_id)
22 | # Replace them with `begin end`
23 | for line_id in display_line_ids:
24 | verilog_lines[line_id] = " begin end"
25 | # Rebuild the verilog content
26 | verilog_content = "\n".join(verilog_lines)
27 | with open(sys.argv[2], "w") as f:
28 | f.write(verilog_content)
29 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/remove_modules.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Remove modules from given SystemVerilog file.
6 |
7 | import re
8 | import sys
9 |
10 | # sys.argv[1]: source file path.
11 | # sys.argv[2]: target file path (will be a copy of the source file, but without the specified modules).
12 | # sys.argv[3]: names of the top modules to remove.
13 |
14 | if __name__ == "__main__":
15 | if len(sys.argv) < 4:
16 | print("Takes 3 arguments: the Verilog source file path, the Verilog target file path and a space-separated list of entities to remove.")
17 |
18 | with open(sys.argv[1], "r") as f:
19 | verilog_content = f.read()
20 |
21 | for module_name in sys.argv[3:]:
22 | verilog_content, num_subs = re.subn("module(\s|\n)+{}(\s|\n)*(\(|#|import)(.|\n)+?endmodule[^\n]*\n".format(module_name), "\n", verilog_content, flags=re.MULTILINE|re.DOTALL) # Weakness: does not ignore comments.
23 | print(" Removed {} occurrences of module {}.".format(num_subs, module_name))
24 |
25 | with open(sys.argv[2], "w") as f:
26 | f.write(verilog_content)
27 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/remove_pragma_translate_off.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Remove //pragma translate_off/on sections.
6 |
7 | import re
8 | import sys
9 |
10 | # sys.argv[1]: source file path.
11 | # sys.argv[2]: target file path (will be a copy of the source file, but without the specified modules).
12 |
13 | REGEX = r'//\s*pragma\s+translate_off(?:.|\n)+?//\s*pragma\s+translate_on'
14 |
15 | if __name__ == "__main__":
16 | if len(sys.argv) < 3:
17 | print("Takes 2 arguments: the Verilog source file path, the Verilog target file path.")
18 |
19 | with open(sys.argv[1], "r") as f:
20 | verilog_content = f.read()
21 |
22 | verilog_content, num_subs = re.subn(REGEX, "\n", verilog_content, flags=re.MULTILINE|re.DOTALL)
23 | print(" Removed {} occurrences of pragma translate on/off.".format(num_subs))
24 |
25 | with open(sys.argv[2], "w") as f:
26 | f.write(verilog_content)
27 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/remove_single_module.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Remove a single module declaration from given SystemVerilog file.
6 |
7 | import re
8 | import sys
9 |
10 | # sys.argv[1]: source file path.
11 | # sys.argv[2]: target file path (will be a copy of the source file, but without the specified modules).
12 | # sys.argv[3]: name of the top module to remove.
13 | # sys.argv[4]: number of expected occurrences of the module.
14 |
15 | if __name__ == "__main__":
16 | if len(sys.argv) < 4:
17 | print("Takes 4 arguments: the Verilog source file path, the Verilog target file path, the module name to remove and the number of expected module declarations.")
18 |
19 | with open(sys.argv[1], "r") as f:
20 | verilog_content = f.read()
21 |
22 | module_name = sys.argv[3]
23 | num_expected_occurrences = int(sys.argv[4])
24 | num_found_occurrences = len(re.findall("module(\s|\n)+{}(\s|\n)*(\(|#|import)(.|\n)+?endmodule[^\n]*\n".format(module_name), verilog_content))
25 | assert num_found_occurrences == num_expected_occurrences, f"Found `{num_found_occurrences}` occurrences of declarations of module `{sys.argv[3]}`, expected `{num_expected_occurrences}`."
26 | # Remove the first occurrence of the module declaration
27 | verilog_content, num_subs = re.subn("module(\s|\n)+{}(\s|\n)*(\(|#|import)(.|\n)+?endmodule[^\n]*\n".format(module_name), "\n", verilog_content, 1, flags=re.MULTILINE|re.DOTALL) # Weakness: does not ignore comments.
28 | print(" Removed {}/{} occurrence of module {}.".format(num_subs, num_expected_occurrences, module_name))
29 |
30 | with open(sys.argv[2], "w") as f:
31 | f.write(verilog_content)
32 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/remove_unused_modules.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Remove modules from given SystemVerilog file.
6 |
7 | import multiprocessing as mp
8 | import re
9 | import sys
10 |
11 | num_workers = mp.cpu_count()
12 |
13 | # sys.argv[1]: source file path.
14 | # sys.argv[2]: target file path (will be a copy of the source file, but without the unused modules).
15 | # sys.argv[3...]: Module names to not remove.
16 |
17 | # MODULE_DEF_REGEX = r'^module\s+([a-zA-Z0-9_]+)\s*(?:#|\(|import)'
18 | MODULE_DEF_REGEX = r'^module(?:\s|\n)+([a-zA-Z0-9_]+)(?:\s|\n)*(?:#|\(|import)'
19 | # Return True iff the module is never referenced except in its own definition and in comments.
20 | def is_single_ref(modulename):
21 | references_to_m = re.findall(r'^.*\b'+modulename+r'\b', verilog_content, re.MULTILINE)
22 | references_to_m = list(filter(lambda s: '//' not in s, references_to_m))
23 | # print(f"References to {modulename}: {references_to_m}")
24 | return len(references_to_m) == 1
25 |
26 | if __name__ == "__main__":
27 | global verilog_content
28 | assert len(sys.argv) < 3, f"Takes at least 2 arguments: the Verilog source file path, the Verilog target file path. The rest of the arguments are modules that must not be removed. Got {len(sys.argv)-1} arguments."
29 |
30 | with open(sys.argv[1], "r") as f:
31 | verilog_content = f.read()
32 |
33 | if len(sys.argv) >= 3:
34 | modules_to_keep = sys.argv[3:]
35 | else:
36 | modules_to_keep = []
37 |
38 | num_removed_modules = 0
39 | MODULE_DEF_REGEX = re.compile(MODULE_DEF_REGEX, re.MULTILINE)
40 | all_modulenames = re.findall(MODULE_DEF_REGEX, verilog_content)
41 | num_tot_modules = len(all_modulenames)
42 |
43 | # Check that the modules to keep are present initially
44 | for module_to_keep in modules_to_keep:
45 | if module_to_keep not in all_modulenames:
46 | print(f"Failed to find definition of module to keep: `{module_to_keep}`")
47 |
48 | all_modulenames = list(filter(lambda s: s not in modules_to_keep, all_modulenames))
49 | # Do it iteratively until none of them is unused anymore
50 | do_continue = True
51 |
52 | while do_continue:
53 | do_continue = False
54 |
55 | # Find references in parallel
56 | print('Finding references to modules...')
57 | with mp.Pool(num_workers) as p:
58 | remove_modules = p.map(is_single_ref, all_modulenames)
59 | print(' Done finding references to modules.')
60 |
61 | newly_removed_modulenames = []
62 | for moduleid, modulename in enumerate(all_modulenames):
63 | if not remove_modules[moduleid]:
64 | continue
65 | do_continue = True
66 | num_removed_modules += 1
67 | newly_removed_modulenames.append(modulename)
68 | # references_to_m = re.findall(r'^.*\b'+modulename+r'\b', verilog_content, re.MULTILINE)
69 | # references_to_m = list(filter(lambda s: '//' not in s, references_to_m))
70 | module_def_regex = r'^module\s+'+modulename+r'\s*(?:#|\()(?:.|\n)+?\n\s*endmodule(\s*:\s*[a-zA-Z0-9_]*)?'
71 | # # # Check that we find the def
72 | # # # assert re.search(module_def_regex, verilog_content, re.DOTALL | re.MULTILINE)
73 | verilog_content, count = re.subn(module_def_regex, '\n\n', verilog_content, flags = re.DOTALL | re.MULTILINE)
74 | print(f" Removed module {modulename} ({count} occurrence(s)).")
75 | for modulename in newly_removed_modulenames:
76 | all_modulenames.remove(modulename)
77 |
78 | print(f"Removed {num_removed_modules}/{num_tot_modules} module(s).")
79 |
80 | with open(sys.argv[2], "w") as f:
81 | f.write(verilog_content)
82 |
--------------------------------------------------------------------------------
/design-processing/common/python_scripts/simplify_yosysout.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Takes a Yosys output with paramod module names, and simplifies these module names.
6 |
7 | import hashlib
8 | import multiprocessing as mp
9 | import re
10 | import sys
11 |
12 | num_workers = mp.cpu_count()
13 |
14 | MODULE_DECL_REGEX = r"\\\$paramod\$([^ ]+) "
15 | BACKARRAY_DECL_REGEX = r"\\([a-zA-Z0-9_]+)\[(\d+)\]([a-zA-Z0-9_.]*)"
16 |
17 | UGLY_MODULENAME_MAX_LENGTH = 30
18 |
19 | # sys.argv[1]: source file path.
20 | # sys.argv[2]: target file path.
21 |
22 | module_hash_correspondances = dict()
23 |
24 | # WARNING: the lines generated into module_hash_correspondances_debugheaderlines may be used by subsequent scripts. Modify their format with care.
25 | module_hash_correspondances_debugheaderlines = []
26 |
27 | def simplify_module_name(modulename_ugly):
28 | global module_hash_correspondances_debugheaderlines
29 | # Remove all backslashes, dollar and equal signs
30 | candidate = modulename_ugly.replace('$', '').replace('\\', '').replace('=', '')
31 | # If the name is still too long, we replace it with a short hash
32 | if len(candidate) > UGLY_MODULENAME_MAX_LENGTH:
33 | if candidate not in module_hash_correspondances:
34 | m = hashlib.sha256()
35 | m.update(candidate.encode('ascii'))
36 | candidate_hash = f"simplif_{m.hexdigest()[:20]}"
37 | assert candidate_hash not in module_hash_correspondances.values(), "Unlucky clash in hashes"
38 | module_hash_correspondances[candidate] = candidate_hash
39 | module_hash_correspondances_debugheaderlines.append(f"// {candidate.ljust(UGLY_MODULENAME_MAX_LENGTH)}: {module_hash_correspondances[candidate]}")
40 | return module_hash_correspondances[candidate]
41 | else:
42 | return candidate
43 |
44 | def simplify_backarray(backarray_name, backarray_index, postarr):
45 | print('triple', backarray_name, backarray_index, postarr)
46 | return "{}__{}_{}".format(backarray_name, backarray_index, postarr.replace('.', '_DOT_'))
47 |
48 | if __name__ == "__main__":
49 | if len(sys.argv) < 3:
50 | print("Takes at least 2 arguments: the Verilog source file path, the Verilog target file path. The rest of the arguments are modules that must not be removed")
51 |
52 | with open(sys.argv[1], "r") as f:
53 | verilog_content = f.read()
54 |
55 | #####################
56 | # Module names
57 | #####################
58 | matches = re.findall(MODULE_DECL_REGEX, verilog_content, re.IGNORECASE)
59 |
60 | for modulesuffix in matches:
61 | simplified_modulename = 'paramod'+simplify_module_name(modulesuffix)
62 | verilog_content = verilog_content.replace('\$paramod$'+modulesuffix, simplified_modulename)
63 |
64 | #####################
65 | # Arrays preceded by a backslash
66 | #####################
67 | matches = re.findall(BACKARRAY_DECL_REGEX, verilog_content, re.IGNORECASE)
68 |
69 | for arrname, arrindex, postarr in matches:
70 | simplified_backarray = simplify_backarray(arrname, arrindex, postarr)
71 | verilog_content = verilog_content.replace('\\'+arrname+'['+arrindex+']'+postarr, simplified_backarray)
72 |
73 | if module_hash_correspondances_debugheaderlines:
74 | verilog_content = '// Simplified module names\n' + '\n'.join(module_hash_correspondances_debugheaderlines) + '\n\n' + verilog_content
75 |
76 | with open(sys.argv[2], "w") as f:
77 | f.write(verilog_content)
78 |
--------------------------------------------------------------------------------
/design-processing/common/src/sram_mem.sv:
--------------------------------------------------------------------------------
1 | // Copyright 2023 Flavien Solt, ETH Zurich.
2 | // Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | // SPDX-License-Identifier: GPL-3.0-only
4 |
5 | module sram_mem #(
6 | parameter int Width = 32, // bit
7 | parameter int Depth = 1 << 15,
8 |
9 | parameter bit PreloadELF = 1,
10 | parameter logic [63:0] RelocateRequestUp = '0,
11 |
12 | // Derived parameters.
13 | localparam int WidthBytes = Width >> 3,
14 | localparam int Aw = $clog2(Depth),
15 | localparam bit [Aw-1:0] AddrMask = {Aw{1'b1}}
16 | ) (
17 | input logic clk_i,
18 | input logic rst_ni,
19 |
20 | input logic req_i,
21 | input logic write_i,
22 | input logic [Aw-1:0] addr_i,
23 | input logic [Width-1:0] wdata_i,
24 | input logic [Width-1:0] wmask_i,
25 | output logic [Width-1:0] rdata_o
26 | );
27 | logic [Width-1:0] mem [bit [31:0]];
28 |
29 | logic [Width-1:0] dbg_addr_masked, dbg_relocated;
30 | assign dbg_addr_masked = AddrMask & (RelocateRequestUp | addr_i);
31 | assign dbg_relocated = RelocateRequestUp | addr_i;
32 |
33 | //
34 | // DPI
35 | //
36 | int sections [bit [31:0]];
37 |
38 | import "DPI-C" function read_elf(input string filename);
39 | import "DPI-C" function byte get_section(output longint address, output longint len);
40 | import "DPI-C" context function byte read_section(input longint address, inout byte buffer[]);
41 | import "DPI-C" function string Get_SRAM_ELF_object_filename();
42 |
43 | localparam int unsigned PreloadBufferSize = 100000000;
44 | initial begin // Load the binary into memory.
45 | if (PreloadELF) begin
46 | automatic string binary = Get_SRAM_ELF_object_filename();
47 | longint section_addr, section_len;
48 | byte buffer[PreloadBufferSize];
49 | $display("Loading RAM ELF: %s", binary);
50 | void'(read_elf(binary));
51 | while (get_section(section_addr, section_len)) begin
52 | automatic int num_words = (section_len+(WidthBytes-1))/WidthBytes;
53 | sections[section_addr/WidthBytes] = num_words;
54 | // buffer = new [num_words*WidthBytes];
55 | assert(num_words*WidthBytes <= PreloadBufferSize);
56 | void'(read_section(section_addr, buffer));
57 |
58 | for (int i = 0; i < num_words; i++) begin
59 | automatic logic [WidthBytes-1:0][7:0] word = '0;
60 | for (int j = 0; j < WidthBytes; j++) begin
61 | word[j] = buffer[i*WidthBytes+j];
62 | if ($isunknown(word[j]))
63 | $display("WARNING: Some ELF word is unknown.");
64 | end
65 | if (|word)
66 | $display("Writing ELF word to SRAM addr %x: %x", (AddrMask§ion_addr)/WidthBytes+i, word);
67 | mem[(AddrMask§ion_addr)/WidthBytes+i] = word;
68 | // $display("mem[0x%x]= %x", (AddrMask§ion_addr)/WidthBytes+i, mem[(AddrMask§ion_addr)/WidthBytes+i]);
69 | end
70 | end
71 | end
72 | end
73 |
74 | //
75 | // Data
76 | //
77 |
78 | always_ff @(posedge clk_i) begin
79 | if (req_i) begin
80 | if (write_i) begin
81 | rdata_o <= '0;
82 | for (int i = 0; i < Width; i = i + 1)
83 | if (wmask_i[i])
84 | mem[AddrMask & (RelocateRequestUp | addr_i)][i] = wdata_i[i];
85 | end
86 | else begin
87 | if (mem.exists(AddrMask & (RelocateRequestUp | addr_i))) begin
88 | rdata_o <= mem[AddrMask & (RelocateRequestUp | addr_i)];
89 | // $display("INFO: Memory known at address %h.", AddrMask & (RelocateRequestUp | addr_i));
90 | end
91 | else begin
92 | rdata_o <= 0;
93 | // $display("WARNING: Memory unknown at address %h.", AddrMask & (RelocateRequestUp | addr_i));
94 | end
95 | end
96 | end
97 | else
98 | rdata_o <= '0;
99 | end
100 |
101 | endmodule
102 |
--------------------------------------------------------------------------------
/design-processing/common/yosys/drfuzz.ys.tcl:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt & Tobias Kovats, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | if { [info exists ::env(VERILOG_INPUT)] } { set VERILOG_INPUT $::env(VERILOG_INPUT) } else { puts "Please set VERILOG_INPUT environment variable"; exit 1 }
6 | if { [info exists ::env(VERILOG_OUTPUT)] } { set VERILOG_OUTPUT $::env(VERILOG_OUTPUT) } else { puts "Please set VERILOG_OUTPUT environment variable"; exit 1 }
7 | if { [info exists ::env(VERBOSE)]} {set VERBOSE -verbose} else { set VERBOSE ""}
8 | if { [info exists ::env(SHALLOW)]} {set SHALLOW -shallow} else { set SHALLOW ""}
9 | if { [info exists ::env(EXCLUDE_SIGNALS)]} {set EXCLUDE_SIGNALS $::env(EXCLUDE_SIGNALS)} else { set EXCLUDE_SIGNALS ""}
10 | if { [info exists ::env(TOP_MODULE)] } { set TOP_MODULE $::env(TOP_MODULE) } else { puts "Please set TOP_MODULE environment variable"; exit 1 }
11 | if { [info exists ::env(TOP_RESET)] } { set TOP_RESET $::env(TOP_RESET) } else { puts "Please set TOP_RESET environment variable"; exit 1 }
12 |
13 | yosys read_verilog -defer -DSTOP_COND=0 -sv $VERILOG_INPUT
14 | yosys hierarchy -top $TOP_MODULE -check
15 | yosys proc
16 | yosys pmuxtree
17 | yosys opt -purge
18 |
19 | yosys mark_resets $VERBOSE $SHALLOW $TOP_RESET
20 | yosys mux_probes $VERBOSE $SHALLOW
21 | yosys port_mux_probes $VERBOSE
22 |
23 | yosys port_fuzz_inputs $VERBOSE $EXCLUDE_SIGNALS
24 | yosys meta_reset $VERBOSE
25 | yosys opt_clean
26 |
27 | yosys write_verilog -simple-lhs -sv $VERILOG_OUTPUT
28 |
--------------------------------------------------------------------------------
/design-processing/common/yosys/passthrough.ys.tcl:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | if { [info exists ::env(VERILOG_INPUT)] } { set VERILOG_INPUT $::env(VERILOG_INPUT) } else { puts "Please set VERILOG_INPUT environment variable"; exit 1 }
6 | if { [info exists ::env(VERILOG_OUTPUT)] } { set VERILOG_OUTPUT $::env(VERILOG_OUTPUT) } else { puts "Please set VERILOG_OUTPUT environment variable"; exit 1 }
7 | if { [info exists ::env(TOP_MODULE)] } { set TOP_MODULE $::env(TOP_MODULE) } else { puts "Please set TOP_MODULE environment variable"; exit 1 }
8 |
9 | yosys read_verilog -defer -sv $VERILOG_INPUT
10 | yosys hierarchy -top $TOP_MODULE -check
11 | yosys proc
12 | yosys opt -purge
13 |
14 | yosys write_verilog -sv -noattr $VERILOG_OUTPUT
15 |
--------------------------------------------------------------------------------
/design-processing/common/yosys/rfuzz.ys.tcl:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt & Tobias Kovats, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | if { [info exists ::env(VERILOG_INPUT)] } { set VERILOG_INPUT $::env(VERILOG_INPUT) } else { puts "Please set VERILOG_INPUT environment variable"; exit 1 }
6 | if { [info exists ::env(VERILOG_OUTPUT)] } { set VERILOG_OUTPUT $::env(VERILOG_OUTPUT) } else { puts "Please set VERILOG_OUTPUT environment variable"; exit 1 }
7 | if { [info exists ::env(TOP_MODULE)] } { set TOP_MODULE $::env(TOP_MODULE) } else { puts "Please set TOP_MODULE environment variable"; exit 1 }
8 |
9 | yosys read_verilog -defer -sv $VERILOG_INPUT
10 | yosys hierarchy -top $TOP_MODULE -check
11 | yosys proc
12 | yosys pmuxtree
13 | yosys opt -purge
14 |
15 | yosys mark_resets
16 | yosys mux_probes
17 | yosys port_mux_probes
18 |
19 | yosys opt_clean
20 |
21 | yosys write_verilog -sv -noattr $VERILOG_OUTPUT
22 |
--------------------------------------------------------------------------------
/design-processing/design_repos.json:
--------------------------------------------------------------------------------
1 | {
2 | "cva6": "../../cascade-designs/cascade-cva6/cascade",
3 | "cva6-c1": "../../cascade-designs/cascade-cva6-c1/cascade",
4 | "cva6-y1": "../../cascade-designs/cascade-cva6-y1/cascade",
5 | "rocket": "../../cascade-designs/cascade-chipyard/cascade-rocket",
6 | "boom": "../../cascade-designs/cascade-chipyard/cascade-boom",
7 | "boom-b1": "../../cascade-designs/cascade-chipyard-b1/cascade-boom",
8 | "picorv32": "../../cascade-designs/cascade-picorv32/cascade",
9 | "picorv32-p5": "../../cascade-designs/cascade-picorv32-p5/cascade",
10 | "kronos": "../../cascade-designs/cascade-kronos/cascade",
11 | "kronos-k1": "../../cascade-designs/cascade-kronos-k1/cascade",
12 | "kronos-k2": "../../cascade-designs/cascade-kronos-k2/cascade",
13 | "vexriscv": "../../cascade-designs/cascade-vexriscv/cascade",
14 | "vexriscv-v1-7": "../../cascade-designs/cascade-vexriscv-v1-7/cascade",
15 | "vexriscv-v8-9-v15": "../../cascade-designs/cascade-vexriscv-v8-9-v15/cascade",
16 | "vexriscv-v10-11": "../../cascade-designs/cascade-vexriscv-v10-11/cascade",
17 | "vexriscv-v12": "../../cascade-designs/cascade-vexriscv-v12/cascade",
18 | "vexriscv-v13": "../../cascade-designs/cascade-vexriscv-v13/cascade"
19 | }
20 |
--------------------------------------------------------------------------------
/design-processing/make_all_designs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script makes all designs for verilator.
6 |
7 | import json
8 | import multiprocessing as mp
9 | import os
10 | import subprocess
11 | import sys
12 |
13 | def make_design_worker(design_path, instrumentation):
14 | print("Making design", design_path, "with instrumentation", instrumentation)
15 | subprocess.run(["rm", "-rf", os.path.join(design_path, f"run_{instrumentation}_notrace.core")], check=True)
16 | subprocess.run(["make", "-C", design_path, f"run_{instrumentation}_notrace"], check=False)
17 | return True
18 |
19 | if __name__ == "__main__":
20 | instrumentation = 'vanilla'
21 | if len(sys.argv) > 1:
22 | instrumentation = sys.argv[1]
23 | assert instrumentation in ['vanilla', 'rfuzz', 'drfuzz'], f"Unknown instrumentation `{instrumentation}`"
24 |
25 | with open("design_repos.json", "r") as f:
26 | design_repos = json.load(f)
27 |
28 | # For rfuzz and drfuzz, we only want the non-buggy designs, i.e., when '-' is not in the name. Also ignore cva6 because of y1
29 | if instrumentation != 'vanilla':
30 | design_repos = {k: v for k, v in design_repos.items() if '-' not in k and 'cva6' not in k}
31 |
32 | all_design_names, all_design_paths = design_repos.keys(), design_repos.values()
33 | design_names_novex, design_paths_novex = list(filter(lambda s: 'vex' not in s, all_design_names)), list(filter(lambda s: 'vex' not in s, all_design_paths))
34 | design_names_vex, design_paths_vex = list(filter(lambda s: 'vex' in s, all_design_names)), list(filter(lambda s: 'vex' in s, all_design_paths))
35 |
36 | worker_cnt = int(os.environ['CASCADE_JOBS'])
37 |
38 | rets = []
39 | with mp.Pool(processes=worker_cnt) as pool:
40 | for design_path in design_paths_novex:
41 | print('Path:', design_path, len(design_path))
42 | rets.append(pool.apply_async(make_design_worker, (design_path, instrumentation)))
43 | pool.close()
44 |
45 | # Do the vexriscv designs separately, since apparently it clashes a bit with the other versions
46 | for design_path in all_design_paths:
47 | if 'vexriscv' in design_path:
48 | print('Vexpath:', design_path)
49 | make_design_worker(design_path, instrumentation)
50 | pool.join()
51 | for ret in rets:
52 | ret.get()
53 |
--------------------------------------------------------------------------------
/design-processing/python-requirements.txt:
--------------------------------------------------------------------------------
1 | tqdm
2 |
--------------------------------------------------------------------------------
/elfs-for-questa/.gitignore:
--------------------------------------------------------------------------------
1 | /*.elf
2 |
--------------------------------------------------------------------------------
/elfs-for-questa/Readme.md:
--------------------------------------------------------------------------------
1 | The ELFs for the Questasim experiment may be stored here.
2 |
--------------------------------------------------------------------------------
/fuzzer/.gitignore:
--------------------------------------------------------------------------------
1 | *.json
2 | *.png
3 | *.pdf
4 | *.elf
5 |
--------------------------------------------------------------------------------
/fuzzer/analyzeelfs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cascade-artifacts-designs/cascade-meta/6c57bd41c204f6abc8a2747ede34c9145abee9b5/fuzzer/analyzeelfs/__init__.py
--------------------------------------------------------------------------------
/fuzzer/analyzeelfs/genmanyelfs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This module is typically used to generate a lot of Cascade programs
6 |
7 | from common.profiledesign import profile_get_medeleg_mask
8 | from common.spike import calibrate_spikespeed
9 | from cascade.fuzzfromdescriptor import gen_fuzzerstate_elf_expectedvals, gen_new_test_instance
10 |
11 | import multiprocessing as mp
12 | import os
13 | import random
14 | import shutil
15 | from tqdm import tqdm
16 |
17 |
18 | # @param in_tuple: instance_id: int, memsize: int, design_name: str, check_pc_spike_again: bool, randseed: int, nmax_bbs: int, authorize_privileges: bool, outdir_path: str
19 | def __gen_elf_worker(in_tuple):
20 | instance_id, memsize, design_name, randseed, nmax_bbs, authorize_privileges, check_pc_spike_again, outdir_path = in_tuple
21 | fuzzerstate, elfpath, _, _, _, _ = gen_fuzzerstate_elf_expectedvals(memsize, design_name, randseed, nmax_bbs, authorize_privileges, check_pc_spike_again)
22 | # Move the file from elfpath to outdir_path, and name it after the design name and instance id.
23 | shutil.move(elfpath, os.path.join(outdir_path, f"{design_name}_{instance_id}.elf"))
24 |
25 | # Write the end address (where spike will fail), for further analysis.
26 | with open(os.path.join(outdir_path, f"{design_name}_{instance_id}_finaladdr.txt"), "w") as f:
27 | f.write(hex(fuzzerstate.final_bb_base_addr))
28 |
29 | # Count the instructions
30 | num_instrs = len(fuzzerstate.final_bb)
31 | for bb in fuzzerstate.instr_objs_seq:
32 | num_instrs += len(bb)
33 | with open(os.path.join(outdir_path, f"{design_name}_{instance_id}_numinstrs.txt"), "w") as f:
34 | f.write(hex(num_instrs))
35 |
36 | # Save the tuple for debug purposes
37 | with open(os.path.join(outdir_path, f"{design_name}_{instance_id}_tuple.txt"), "w") as f:
38 | f.write('(' + ', '.join(map(str, [memsize, design_name, randseed, nmax_bbs, authorize_privileges])) + ')')
39 |
40 |
41 | def gen_many_elfs(design_name: str, num_cores: int, num_elfs: int, outdir_path, verbose: bool = True):
42 | random.seed(0)
43 |
44 | # Ensure that the output directory exists.
45 | os.makedirs(outdir_path, exist_ok=True)
46 |
47 | # Gen the program descriptors.
48 | memsizes, _, randseeds, num_bbss, authorize_privilegess = tuple(zip(*[gen_new_test_instance(design_name, i, True) for i in range(num_elfs)]))
49 | workloads = [(i, memsizes[i], design_name, randseeds[i], num_bbss[i], authorize_privilegess[i], False, outdir_path) for i in range(num_elfs)]
50 |
51 | calibrate_spikespeed()
52 | profile_get_medeleg_mask(design_name)
53 |
54 | print(f"Starting ELF generation on {num_cores} processes.")
55 | progress_bar = tqdm(total=num_elfs)
56 | with mp.Pool(num_cores) as pool:
57 | results = pool.imap(__gen_elf_worker, workloads)
58 |
59 | for result in results:
60 | if verbose:
61 | progress_bar.update(1)
62 |
63 | progress_bar.close()
64 |
--------------------------------------------------------------------------------
/fuzzer/analyzeelfs/util.py:
--------------------------------------------------------------------------------
1 | from params.runparams import PATH_TO_TMP
2 | import subprocess
3 | import os
4 | import re
5 |
6 | CASCADE_NUM_INITIAL_INSTR = 5 + 64 # 64 instructions in the initial block + 5 instructions in the spike bootrom
7 | CASCADE_NUM_FINAL_INSTR = 105 # 105 instructions in the final block
8 |
9 | def get_instance_elfpath(is_difuzzrtl: bool, design_name: str, instance_id: int):
10 | if is_difuzzrtl:
11 | assert design_name == 'rocket', "Only Rocket is supported for difuzz-rtl."
12 | path_to_diffuzzrtl_elfs = os.environ['CASCADE_PATH_TO_DIFUZZRTL_ELFS']
13 | return os.path.join(path_to_diffuzzrtl_elfs, f"id_{instance_id}.elf")
14 | else:
15 | elfdir_path = os.path.join(PATH_TO_TMP, 'manyelfs')
16 | return os.path.join(elfdir_path, f"{design_name}_{instance_id}.elf")
17 |
18 | def get_instance_finaladdr(is_difuzzrtl: bool, design_name: str, instance_id: int, elfpath: str = None):
19 | if is_difuzzrtl:
20 | assert elfpath is not None, "elfpath must be provided for difuzz-rtl."
21 | ret_addr = subprocess.check_output([f"nm {elfpath} | grep _test_end"], shell=True, text=True)
22 | return int(ret_addr.split()[0], 16)
23 | else:
24 | elfdir_path = os.path.join(PATH_TO_TMP, 'manyelfs')
25 | with open(os.path.join(elfdir_path, f"{design_name}_{instance_id}_finaladdr.txt"), "r") as f:
26 | return int(f.read(), 16) + 0x80000000
27 |
28 | # For DifuzzRTL. The last l symbol is always empty (contains only ecall)
29 | def get_instance_max_l_symbol(elfpath: str = None):
30 | ret_lines = subprocess.check_output([f"nm {elfpath}"], shell=True, text=True)
31 | # Among ret_lines, find all lines that have the pattern _l followed by digits
32 | ret_lines = list(filter(lambda s: re.search(r'_l\d+', s), ret_lines.split('\n')))
33 | ret_symbols = list(map(lambda s: s.split()[-1], ret_lines))
34 | ret_vals = list(map(lambda s: int(s[2:], 10), ret_symbols))
35 | return max(ret_vals)
36 |
37 | def get_max_reached_l_symbol(spike_log: str):
38 | ret = []
39 | for line in spike_log.split('\n'):
40 | if re.search(r'_l\d+', line):
41 | symbol_str = line.split()[-1]
42 | symbol_val = int(symbol_str[2:], 10)
43 | ret.append(symbol_val)
44 | if not ret:
45 | return 0
46 | return max(ret)
47 |
48 | # @param finaladdr not used by difuzzrtl
49 | def compute_prevalence(is_difuzzrtl: bool, spike_log: str, finaladdr: int):
50 | num_overhead_instructions = 0
51 | num_effective_instructions = 0
52 |
53 | if is_difuzzrtl:
54 | is_currently_overhead = True
55 |
56 | for line in spike_log.split('\n'):
57 | if re.search(r'_l\d+', line):
58 | is_currently_overhead = False
59 | elif 'exception' in line:
60 | is_currently_overhead = True
61 |
62 | num_overhead_instructions += is_currently_overhead
63 | num_effective_instructions += not is_currently_overhead
64 | return num_effective_instructions, num_overhead_instructions
65 | else:
66 | # Filter the lines that correspond to executed instructions
67 | num_executed_instrs = len(list(filter(lambda l: l.startswith('core 0: 0x'), spike_log.split('\n'))))
68 | num_effective_instructions = num_executed_instrs - CASCADE_NUM_INITIAL_INSTR
69 | num_overhead_instructions = CASCADE_NUM_FINAL_INSTR + CASCADE_NUM_INITIAL_INSTR
70 | return num_effective_instructions, num_overhead_instructions
71 |
72 | # @param only_cf: If True, only return the control-flow instructions. Else, only the non-control-flow.
73 | def filter_list_by_cf(candidate_list, indices_of_cf_instrs, only_cf: bool):
74 | assert only_cf
75 | return [candidate_list[i] for i in indices_of_cf_instrs]
76 |
--------------------------------------------------------------------------------
/fuzzer/benchmarking/findnfailinginstances.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This utility finds n distinct failing program descriptors
6 |
7 | from params.runparams import PATH_TO_TMP
8 | from cascade.fuzzfromdescriptor import gen_new_test_instance, fuzz_single_from_descriptor
9 | from common.profiledesign import profile_get_medeleg_mask
10 | from common.spike import calibrate_spikespeed
11 |
12 | import json
13 | import os
14 | import time
15 | import threading
16 | import multiprocessing as mp
17 | from tqdm import tqdm
18 |
19 | callback_lock = threading.Lock()
20 | newly_finishing_instances = 0
21 | newly_failing_instances = 0
22 | all_failing_instances = []
23 |
24 | def test_done_callback(ret):
25 | global newly_failing_instances
26 | global newly_finishing_instances
27 | global callback_lock
28 | global all_failing_instances
29 | with callback_lock:
30 | newly_finishing_instances += 1
31 | if ret is None:
32 | return
33 | else:
34 | newly_failing_instances += 1
35 | all_failing_instances.append(ret)
36 |
37 | def _find_n_failing_descriptors_worker(memsize, design_name, process_instance_id, num_bbs, authorize_privileges):
38 | result = fuzz_single_from_descriptor(memsize, design_name, process_instance_id, num_bbs, authorize_privileges, None, True)
39 | # result is 0, 0, 0, 0 iff the program descriptor is failing
40 | if result == (0, 0, 0, 0):
41 | return (memsize, design_name, process_instance_id, num_bbs, authorize_privileges)
42 | else:
43 | return None
44 |
45 | def find_n_failing_descriptors(design_name: str, num_testcases: int, num_workers: int, seed_offset: int = 0, can_authorize_privileges: bool = True):
46 | global callback_lock
47 | global newly_failing_instances
48 | global newly_finishing_instances
49 | global all_failing_instances
50 |
51 | newly_failing_instances = 0
52 | newly_finishing_instances = 0
53 | all_failing_instances.clear()
54 |
55 | calibrate_spikespeed()
56 | profile_get_medeleg_mask(design_name)
57 |
58 | pool = mp.Pool(processes=num_workers)
59 | process_instance_id = seed_offset
60 |
61 | # First, apply the function to all the workers.
62 | for _ in range(num_workers):
63 | memsize, _, _, num_bbs, authorize_privileges = gen_new_test_instance(design_name, process_instance_id, can_authorize_privileges)
64 | pool.apply_async(_find_n_failing_descriptors_worker, args=(memsize, design_name, process_instance_id, num_bbs, authorize_privileges), callback=test_done_callback)
65 | process_instance_id += 1
66 |
67 | # Respawn processes until we received the desired number of failing descriptors
68 | with tqdm(total=num_testcases) as pbar:
69 | while newly_finishing_instances < num_testcases:
70 | # Yield the execution
71 | time.sleep(1)
72 | # Check whether we received new coverage paths
73 | with callback_lock:
74 | if newly_failing_instances > 0:
75 | pbar.update(newly_failing_instances)
76 | newly_failing_instances = 0
77 | if newly_finishing_instances > 0:
78 | if len(all_failing_instances) >= num_testcases:
79 | print(f"Received enough failing instances for design `{design_name}`. Stopping.")
80 | break
81 | for new_process_id in range(newly_finishing_instances):
82 | pool.apply_async(_find_n_failing_descriptors_worker, args=(*gen_new_test_instance(design_name, process_instance_id, True),), callback=test_done_callback)
83 | process_instance_id += 1
84 | newly_finishing_instances = 0
85 |
86 | # Kill all remaining processes
87 | pool.close()
88 | pool.terminate()
89 |
90 | # Ensure we do not have too many instances due to parallelism
91 | all_failing_instances = all_failing_instances[:num_testcases]
92 |
93 | # Save the requested number of failing instances
94 | json.dump(all_failing_instances, open(os.path.join(PATH_TO_TMP, f"failinginstances_{design_name}_{num_testcases}.json"), 'w'))
95 | print('Saved failing program descriptors results to', os.path.join(PATH_TO_TMP, f"failinginstances_{design_name}_{num_testcases}.json"))
96 |
--------------------------------------------------------------------------------
/fuzzer/cascade/blacklist.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This module is responsible for blacklisting addresses, aka strong allocations.
6 |
7 | from cascade.cfinstructionclasses import BranchInstruction, PlaceholderProducerInstr0, PlaceholderProducerInstr1, PlaceholderConsumerInstr
8 |
9 | # Blacklisting is typically used for forbidding loads from loading instructions
10 | # that will change between spike resolution and RTL sim.
11 |
12 | # All functions whose bytecode depends on the is_spike_resolution boolean
13 | INSTRUCTION_TYPES_TO_BLACKLIST = [
14 | BranchInstruction,
15 | PlaceholderProducerInstr0,
16 | PlaceholderProducerInstr1,
17 | PlaceholderConsumerInstr
18 | ]
19 |
20 | # Blacklist addresses where instructions change between spike resolution and RTL sim.
21 | def blacklist_changing_instructions(fuzzerstate):
22 | # The first two instructions set up the relocator reg and may change betweend spike and rtl.
23 | fuzzerstate.memview_blacklist.alloc_mem_range(fuzzerstate.bb_start_addr_seq[0], 8) # NO_COMPRESSED
24 |
25 | # Find specific instruction types to blacklist
26 | for bb_id, bb_instrlist in enumerate(fuzzerstate.instr_objs_seq):
27 | for bb_instr_id, bb_instr in enumerate(bb_instrlist):
28 | for instr_type_to_blacklist in INSTRUCTION_TYPES_TO_BLACKLIST:
29 | if isinstance(bb_instr, instr_type_to_blacklist):
30 | curr_addr = fuzzerstate.bb_start_addr_seq[bb_id] + bb_instr_id * 4 # NO_COMPRESSED
31 | fuzzerstate.memview_blacklist.alloc_mem_range(curr_addr, 4)
32 | break
33 |
34 | # Blacklist the last instruction of the initial block because we may steer it
35 | # into other blocks (typically to the context setter before steering the control
36 | # flow to a later bb, skipping some first ones).
37 | last_instr_addr = fuzzerstate.bb_start_addr_seq[0] + (len(fuzzerstate.instr_objs_seq[0]) - 1) * 4
38 | fuzzerstate.memview_blacklist.alloc_mem_range(last_instr_addr, 4) # NO_COMPRESSED
39 |
40 | # Blacklist addresses where instructions change between spike resolution and RTL sim.
41 | def blacklist_final_block(fuzzerstate):
42 | fuzzerstate.memview_blacklist.alloc_mem_range(fuzzerstate.final_bb_base_addr, len(fuzzerstate.final_bb) * 4) # NO_COMPRESSED
43 |
44 | # Blacklist addresses where instructions change between spike resolution and RTL sim.
45 | def blacklist_context_setter(fuzzerstate):
46 | fuzzerstate.memview_blacklist.alloc_mem_range(fuzzerstate.ctxsv_bb_base_addr, fuzzerstate.ctxsv_size_upperbound) # NO_COMPRESSED
47 |
--------------------------------------------------------------------------------
/fuzzer/cascade/debug/compareexecutions.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Compare the execution of two ELFs on spike
6 |
7 | from common.designcfgs import get_design_march_flags_nocompressed, is_design_32bit
8 | from common.spike import SPIKE_STARTADDR, FPREG_ABINAMES, get_spike_timeout_seconds
9 | from params.runparams import DO_ASSERT, NO_REMOVE_TMPFILES, PATH_TO_TMP
10 | from cascade.basicblock import gen_basicblocks
11 | from cascade.cfinstructionclasses import JALInstruction, RegImmInstruction
12 | from cascade.fuzzsim import SimulatorEnum, runtest_simulator
13 | from cascade.spikeresolution import gen_elf_from_bbs, gen_regdump_reqs_reduced, gen_ctx_regdump_reqs, run_trace_regs_at_pc_locs, spike_resolution, run_trace_all_pcs
14 | from cascade.contextreplay import SavedContext, gen_context_setter
15 | from cascade.privilegestate import PrivilegeStateEnum
16 | from cascade.reduce import _save_ctx_and_jump_to_pillar_specific_instr
17 | from cascade.debug.debugreduce import _gen_spike_dbgcmd_file_for_full_trace, parse_full_trace, compare_parsed_traces, NUM_ELEMS_PER_INSTR, INTREG_ABI_NAMES
18 |
19 | import random
20 | import os
21 | from pathlib import Path
22 | import subprocess
23 | from typing import List
24 |
25 | def compare_executions(design_name: str, elf_path_1: str, elf_path_2: str, numinstrs: int):
26 |
27 | spike_out_1 = gen_full_trace_for_instrs(elf_path_1, get_design_march_flags_nocompressed(design_name), SPIKE_STARTADDR, numinstrs, not is_design_32bit(design_name))
28 | spike_out_2 = gen_full_trace_for_instrs(elf_path_2, get_design_march_flags_nocompressed(design_name), SPIKE_STARTADDR, numinstrs, not is_design_32bit(design_name))
29 |
30 | parsed_trace_1 = parse_full_trace(spike_out_1, not is_design_32bit(design_name))
31 | parsed_trace_2 = parse_full_trace(spike_out_2, not is_design_32bit(design_name))
32 |
33 | instr_addr_seq = list(map(lambda x: int(x[0], base=16) - SPIKE_STARTADDR, parsed_trace_1))
34 |
35 | # Compare the traces
36 | compare_parsed_traces(parsed_trace_1, parsed_trace_2, instr_addr_seq)
37 |
38 | def gen_full_trace_for_instrs(elfpath: str, rvflags: str, startpc: int, numinstrs: int, is_64bit: bool):
39 | path_to_debug_file = _gen_spike_dbgcmd_file_for_full_trace(numinstrs, startpc, is_64bit)
40 | print(f"Generated debug file: {path_to_debug_file}")
41 |
42 | # Second, run the Spike command
43 | spike_shell_command = (
44 | "spike",
45 | "-d",
46 | f"--debug-cmd={path_to_debug_file}",
47 | f"--isa={rvflags}",
48 | f"--pc={SPIKE_STARTADDR}",
49 | elfpath
50 | )
51 |
52 | print(f"Running Spike command: {' '.join(filter(lambda s: '--debug-cmd' not in s, spike_shell_command))} Debug file: {path_to_debug_file}")
53 |
54 | try:
55 | spike_out = subprocess.run(spike_shell_command, capture_output=True, text=True, timeout=get_spike_timeout_seconds()).stderr
56 | except Exception as e:
57 | raise Exception(f"Spike timeout in the debug script.\nCommand: {' '.join(spike_shell_command)}")
58 |
59 | spike_out = '\n'.join(filter(lambda s: ':' not in s and len(s) > 0, spike_out.split('\n')))
60 | num_elems_per_instr_according_to_bitwidth = NUM_ELEMS_PER_INSTR + int(not is_64bit)
61 | assert len(spike_out.split('\n')) == numinstrs * num_elems_per_instr_according_to_bitwidth, f"Unexpected number of lines in the full trace:" + str(len(spike_out.split('\n'))) + " -- expected: " + str(numinstrs * NUM_ELEMS_PER_INSTR)
62 | return spike_out
63 |
--------------------------------------------------------------------------------
/fuzzer/cascade/randomize/pickfpuop.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This module is responsible for picking floating-point operations
6 |
7 | from params.runparams import DO_ASSERT
8 | from rv.csrids import CSR_IDS
9 | from params.fuzzparams import FPU_ENDIS_REGISTER_ID
10 | from cascade.privilegestate import PrivilegeStateEnum
11 | from cascade.cfinstructionclasses import CSRRegInstruction, RegImmInstruction
12 | import random
13 |
14 | ROUNDING_MODES = [0, 1, 2, 3, 4] # The non-reserved ones
15 |
16 | def __pick_rounding_mode():
17 | import random
18 | return random.sample(ROUNDING_MODES, 1)[0]
19 |
20 | def create_rmswitch_instrobjs(fuzzerstate):
21 | # Check that the FPU exists and is activated
22 | if DO_ASSERT:
23 | assert fuzzerstate.design_has_fpu
24 | assert fuzzerstate.is_fpu_activated
25 | # Put a random (valid) value to rm
26 | new_rm = __pick_rounding_mode()
27 | rinterm = fuzzerstate.intregpickstate.pick_int_inputreg_nonzero()
28 | # rd = fuzzerstate.intregpickstate.pick_int_outputreg() # Can be equal to rinterm
29 | rd = 0 # FUTURE WARL
30 | # Either through the frm CSR, or through the fcsr register
31 | use_frm_csr = random.randint(0, 1)
32 | if use_frm_csr:
33 | return [
34 | # Put the rounding mode to rinterm, and unset the flag bits
35 | RegImmInstruction("addi", rinterm, 0, new_rm, fuzzerstate.is_design_64bit),
36 | CSRRegInstruction("csrrw", rd, rinterm, CSR_IDS.FRM)
37 | ]
38 | else:
39 | return [
40 | # Put the rounding mode to rinterm, and unset the flag bits
41 | RegImmInstruction("addi", rinterm, 0, new_rm << 5, fuzzerstate.is_design_64bit),
42 | CSRRegInstruction("csrrw", rd, rinterm, CSR_IDS.FCSR)
43 | ]
44 |
45 |
46 | # @return a list of instrobjs
47 | def gen_fpufsm_instrs(fuzzerstate):
48 | if DO_ASSERT:
49 | assert fuzzerstate.design_has_fpu
50 | assert fuzzerstate.privilegestate.privstate == PrivilegeStateEnum.MACHINE
51 |
52 | ret = []
53 | if random.random() < fuzzerstate.proba_turn_on_off_fpu_again:
54 | rd = 0 # FUTURE WARL
55 | if fuzzerstate.is_fpu_activated:
56 | ret = [CSRRegInstruction("csrrs", rd, FPU_ENDIS_REGISTER_ID, CSR_IDS.MSTATUS)]
57 | else:
58 | ret = [CSRRegInstruction("csrrc", rd, FPU_ENDIS_REGISTER_ID, CSR_IDS.MSTATUS)]
59 |
60 | # If the FPU is off, then we turn the FPU on.
61 | elif fuzzerstate.is_fpu_activated:
62 | # Else, we arbitrate randomly between changing the rounding mode and turning off the FPU
63 | do_change_rounding_mode = random.random() < fuzzerstate.proba_change_rm
64 | if do_change_rounding_mode:
65 | ret = create_rmswitch_instrobjs(fuzzerstate)
66 | else:
67 | fuzzerstate.is_fpu_activated = False
68 | rd = 0 # Do not read the value because for triaging we want to be ablw to remove these instructions
69 | ret = [CSRRegInstruction("csrrc", rd, FPU_ENDIS_REGISTER_ID, CSR_IDS.MSTATUS)]
70 | else:
71 | rd = 0 # WARL
72 | fuzzerstate.is_fpu_activated = True
73 | ret = [CSRRegInstruction("csrrs", rd, FPU_ENDIS_REGISTER_ID, CSR_IDS.MSTATUS)]
74 |
75 | if len(ret) == 1: # Equivalent to FPU enable/disable
76 | fuzzerstate.add_fpu_coord()
77 | return ret
78 |
--------------------------------------------------------------------------------
/fuzzer/cascade/randomize/pickmemop.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This module is responsible for choosing the memory operation addresses and address registers.
6 |
7 | from params.runparams import DO_ASSERT
8 | import random
9 |
10 | from params.fuzzparams import MemaddrPickPolicy, MEMADDR_PICK_POLICY_WEIGTHS
11 |
12 | # Helper function for the basic blocks
13 | def is_instrstr_load(instr_str: str):
14 | return instr_str in ("lb", "lh", "lhu", "lw", "lwu", "flw", "ld", "fld", "lbu")
15 |
16 | # Helper function for the basic blocks
17 | def get_alignment_bits(instr_str: str):
18 | if instr_str in ("sb", "lb", "lbu"):
19 | return 0
20 | elif instr_str in ("sh", "lh", "lhu"):
21 | return 1
22 | elif instr_str in ("sw", "lw", "lwu", "flw", "fsw"):
23 | return 2
24 | elif instr_str in ("sd", "ld", "fld", "fsd"):
25 | return 3
26 | else:
27 | raise ValueError(f"Unexpected memory instruction string: `{instr_str}`")
28 |
29 | # Does not update the memview of the fuzzerstate.
30 | # @param is_curr_load: True if load, False if store
31 | # @param alignment_bits: 0, 1, 2 or 3
32 | # @return the address
33 | def pick_memop_addr(fuzzerstate, is_curr_load: bool, alignment_bits: int):
34 | if DO_ASSERT:
35 | assert alignment_bits >= 0
36 | assert alignment_bits <= 3
37 |
38 | # Ensure we don't make a wrong choice
39 | curr_pick_type = None
40 | while curr_pick_type is None or MEMADDR_PICK_POLICY_WEIGTHS[is_curr_load][curr_pick_type] == 0:
41 | curr_pick_type = random.choices(list(MEMADDR_PICK_POLICY_WEIGTHS[is_curr_load].keys()), weights=MEMADDR_PICK_POLICY_WEIGTHS[is_curr_load].values())[0]
42 |
43 | if curr_pick_type == MemaddrPickPolicy.MEM_ANY_STORELOC:
44 | # Pick a store location
45 | ret_addr = fuzzerstate.memstorestate.pick_store_location(alignment_bits)
46 | if not is_curr_load:
47 | fuzzerstate.memstorestate.last_store_addr = ret_addr
48 | return ret_addr
49 | elif curr_pick_type == MemaddrPickPolicy.MEM_ANY:
50 | if DO_ASSERT:
51 | assert is_curr_load
52 | # Pick any location
53 | ret_addr = fuzzerstate.memview_blacklist.gen_random_free_addr(alignment_bits, 1 << alignment_bits, 0, fuzzerstate.memview_blacklist.memsize)
54 | if DO_ASSERT:
55 | assert ret_addr >= 0
56 | assert ret_addr + (1 << alignment_bits) < fuzzerstate.memsize
57 | return ret_addr
58 | else:
59 | raise NotImplementedError(f"Unimplemented MemaddrPickPolicy: `{curr_pick_type}`.")
60 |
--------------------------------------------------------------------------------
/fuzzer/cascade/randomize/pickprivilegedescentop.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script is used to pick an instruction from the privileged descent instruction ISA class.
6 |
7 | from params.runparams import DO_ASSERT
8 | from cascade.privilegestate import PrivilegeStateEnum
9 | from cascade.cfinstructionclasses import PrivilegeDescentInstruction
10 |
11 | # @brief Generate a privileged descent instruction or an mpp/spp write instruction.
12 | # @return a list of instructions
13 | def gen_priv_descent_instr(fuzzerstate):
14 | if DO_ASSERT:
15 | if fuzzerstate.privilegestate.privstate == PrivilegeStateEnum.MACHINE:
16 | # Add `or fuzzerstate.privilegestate.is_sepc_populated` to implement sret in machine mode
17 | assert fuzzerstate.privilegestate.is_mepc_populated, "If we are in machine mode, then mepc or sepc should be populated if we want to descend privileges."
18 | assert fuzzerstate.privilegestate.curr_mstatus_mpp is not None, "mpp should be populated if we want to descend privileges from machine mode."
19 | else:
20 | assert fuzzerstate.privilegestate.privstate == PrivilegeStateEnum.SUPERVISOR
21 | assert fuzzerstate.privilegestate.is_sepc_populated, "If we are in supervisor mode, then sepc should be populated if we want to descend privileges."
22 | assert fuzzerstate.privilegestate.curr_mstatus_spp is not None, "spp should be populated if we want to descend privileges from supervisor mode."
23 |
24 | is_mret = fuzzerstate.privilegestate.privstate == PrivilegeStateEnum.MACHINE
25 |
26 | # Invalidate the corresponding epc and update the current privilege level.
27 | # Do not update or invalidate mpp/spp bits.
28 | if is_mret:
29 | fuzzerstate.privilegestate.is_mepc_populated = False
30 | fuzzerstate.privilegestate.privstate = fuzzerstate.privilegestate.curr_mstatus_mpp
31 | fuzzerstate.privilegestate.curr_mstatus_mpp = PrivilegeStateEnum.USER
32 | else:
33 | fuzzerstate.privilegestate.is_sepc_populated = False
34 | fuzzerstate.privilegestate.privstate = fuzzerstate.privilegestate.curr_mstatus_spp
35 | fuzzerstate.privilegestate.curr_mstatus_spp = PrivilegeStateEnum.USER
36 |
37 | return PrivilegeDescentInstruction(is_mret)
38 |
--------------------------------------------------------------------------------
/fuzzer/cascade/randomize/pickstoreaddr.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # In this module, we propose the simple solution of pre-allocating data ranges before instruction generation starts.
6 | # `Store locations` are 8-byte aligned, 8-byte size locations in memory.
7 |
8 | from params.runparams import DO_ASSERT
9 | from cascade.memview import MemoryView
10 |
11 | import numpy as np
12 | import random
13 |
14 | ALIGNMENT_BITS_MAX = 3 # We support alignments 0, 1, 2 and 3 bits (i.e., we don't support quad RISC-V extensions)
15 |
16 | class MemStoreState:
17 | # @param memview must be a fresh MemoryView. Is modified in place by allocating the store locations.
18 | def __init__(self):
19 | # Generate the store locations at random and allocate them
20 | self.store_locations = []
21 |
22 | # Should be called once the first basic block is already allocated
23 | def init_store_locations(self, num_store_locations: int, memview: MemoryView):
24 | for store_location_id in range(num_store_locations):
25 | next_store_location = memview.gen_random_free_addr(ALIGNMENT_BITS_MAX, 1 << ALIGNMENT_BITS_MAX, 0, memview.memsize)
26 | if next_store_location is None:
27 | raise ValueError(f"Could not find a next store location. You may want to increase the memory size (for the moment: {memview.memsize:,} B)")
28 | memview.alloc_mem_range(next_store_location, (1 << ALIGNMENT_BITS_MAX))
29 | self.store_locations.append(next_store_location)
30 | if DO_ASSERT:
31 | assert next_store_location >= 0
32 | assert next_store_location + (1 << ALIGNMENT_BITS_MAX) <= memview.memsize, ""
33 | assert next_store_location % (1 << ALIGNMENT_BITS_MAX) == 0
34 | self.location_weights = np.ones(num_store_locations)
35 | # Remember the last store operation address
36 | self.last_store_addr = self.store_locations[0]
37 |
38 | # Modifies the weights in place.
39 | # @param alignment_bits is equal to the requested size. This means we do not support misaligned mem reqs.
40 | # @return the picked location, in addition to updating the state.
41 | def pick_store_location(self, alignment_bits: int):
42 | if DO_ASSERT:
43 | assert alignment_bits >= 0
44 | assert alignment_bits <= ALIGNMENT_BITS_MAX
45 |
46 | # We first pick a store location. If the alignment is smaller than this size, then we choose uniformly inside the selected store location.
47 | picked_location_id = random.choices(range(len(self.store_locations)), self.location_weights)[0]
48 | picked_location = self.store_locations[picked_location_id]
49 | # Update the weights using a heuristic algorithm
50 | self.location_weights /= np.sum(self.location_weights)
51 | self.location_weights[picked_location_id] = 1
52 |
53 | if alignment_bits == ALIGNMENT_BITS_MAX:
54 | return picked_location
55 | else:
56 | # Choose uniformly a sub-location
57 | factor = 1 << (ALIGNMENT_BITS_MAX - alignment_bits)
58 | offset_in_location = random.randrange(factor) * (1 << alignment_bits)
59 | return picked_location + offset_in_location
60 |
--------------------------------------------------------------------------------
/fuzzer/common/bytestoelf.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This module is dedicated to transforming bytes to ELF files.
6 | # The script is not super robust, but sufficient for Cascade.
7 | # It may be a performance bottleneck of Cascade, so it will be worth optimizing it at some point.
8 |
9 | from params.runparams import DO_ASSERT
10 | from makeelf.elf import *
11 | import os
12 | import subprocess
13 |
14 | # @param inbytes the bytes to put into the ELF file. Be careful that they must be in little endian format already.
15 | # @param section_addr may be None
16 | # @return None
17 | def gen_elf(inbytes: bytes, start_addr: int, section_addr: int, destination_path: str, is_64bit: bool) -> None:
18 | if DO_ASSERT:
19 | assert destination_path
20 |
21 | elf = ELF(e_machine=EM.EM_RISCV, e_data=ELFDATA.ELFDATA2LSB, e_entry=start_addr)
22 |
23 | # Create the section
24 | SH_FLAGS = 0x6 # Loadable and executable
25 | section_id = elf.append_section('.text.init', inbytes, start_addr, sh_flags=SH_FLAGS, sh_addralign=4)
26 | elf.append_segment(section_id, addr=start_addr, p_offset=0xe2) # Very hacky, we hardcode the section offset.
27 | elf_bytes = bytes(elf) # We first cast to bytes, since casting to bytes has side-effects (such as offset computation) on the ELF object, that are taken into account just before the bytes are generated.
28 |
29 | # Check that the offsets in the program header and in the section header match
30 | assert len(elf.Elf.Phdr_table) == 1, "Expected only a single program header"
31 | assert elf.Elf.Phdr_table[0].p_offset == elf.Elf.Shdr_table[-1].sh_offset, "In ELF: offset mismatch between Phdr and Shdr. Maybe the hack with makeelf did not work this time."
32 |
33 | # Finally, write the bytes into the ELF object
34 | with open(destination_path, 'wb') as f:
35 | f.write(elf_bytes)
36 |
37 | # Relocate the section
38 | if section_addr is not None:
39 | if is_64bit:
40 | subprocess.run([f"riscv{os.environ['CASCADE_RISCV_BITWIDTH']}-unknown-elf-objcopy", '--change-section-address', f".text.init={hex(section_addr)}", '-I', 'elf32-littleriscv', '-O', 'elf64-littleriscv', destination_path])
41 | else:
42 | subprocess.run([f"riscv{os.environ['CASCADE_RISCV_BITWIDTH']}-unknown-elf-objcopy", '--change-section-address', f".text.init={hex(section_addr)}", destination_path])
43 | else:
44 | if is_64bit:
45 | subprocess.run([f"riscv{os.environ['CASCADE_RISCV_BITWIDTH']}-unknown-elf-objcopy", '-I', 'elf32-littleriscv', '-O', 'elf64-littleriscv', destination_path])
46 |
--------------------------------------------------------------------------------
/fuzzer/common/sim/commonsim.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import os
6 | from params.runparams import DO_ASSERT
7 |
8 | # Replace the environment with new values.
9 | # sram_taintfile: path relative to cascadedir
10 | # bootrom_elf: path relative to cascadedir
11 | def setup_sim_env(sram_elf, bootrom_elf, tracefile, simtime, cascadedir, coveragefile, verbose: bool = True):
12 | if DO_ASSERT:
13 | assert isinstance(sram_elf, str)
14 | assert isinstance(bootrom_elf, str) or bootrom_elf is None
15 | assert isinstance(tracefile, str) or tracefile is None
16 | assert isinstance(simtime, int)
17 | assert isinstance(coveragefile, str) or coveragefile is None
18 |
19 | # Make all paths absolute.
20 | if bootrom_elf:
21 | bootrom_elf = os.path.join(cascadedir, bootrom_elf)
22 |
23 | # Copy the OS environment.
24 | my_env = os.environ.copy()
25 |
26 | # Replace the environment simlen.
27 | my_env["SIMLEN"] = str(simtime)
28 |
29 | if tracefile:
30 | my_env["TRACEFILE"] = tracefile
31 | else:
32 | my_env.pop("TRACEFILE", None) # Remove TRACEFILE if it exists
33 |
34 | if coveragefile:
35 | my_env["COVERAGEFILE"] = coveragefile
36 | # For Modelsim
37 | my_env["MODELSIM_VLOG_COVERFLAG"] = '+cover'
38 | my_env["MODELSIM_VSIM_COVERFLAG"] = '-coverage'
39 | my_env["MODELSIM_VSIM_COVERPATH"] = coveragefile
40 | else:
41 | my_env.pop("COVERAGEFILE", None)
42 | # For Modelsim
43 | my_env.pop("MODELSIM_VLOG_COVERFLAG", None)
44 | my_env.pop("MODELSIM_VSIM_COVERFLAG", None)
45 | my_env.pop("MODELSIM_VSIM_COVERPATH", None)
46 |
47 | # Replace the environment ELF paths.
48 | my_env["SIMSRAMELF"] = sram_elf
49 | if bootrom_elf:
50 | my_env["SIMROMELF"] = bootrom_elf
51 | else:
52 | my_env["SIMROMELF"] = sram_elf
53 |
54 | if verbose:
55 | print('setting SIMSRAMELF to {}'.format(my_env["SIMSRAMELF"]))
56 | print('setting SIMROMELF to {}'.format(my_env["SIMROMELF"]))
57 | return my_env
58 |
--------------------------------------------------------------------------------
/fuzzer/common/sim/coverageutil.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script provides utilities to merge coverage files from Verilator and estimate the number of achieved coverage points.
6 |
7 | from params.runparams import PATH_TO_TMP
8 |
9 | import os
10 | import subprocess
11 |
12 | # @param new_coveragepath_list a list of coverage files to merge and from which to extract coverage.
13 | # @param delete_after_merge the coverage files are being deleted on-the-fly if this param is true.
14 | # @param the path to which the merged coverage file should be written.
15 | # @return num_coverage_points. Also writes to the retunr path.
16 | def merge_and_extract_coverages_modelsim(design_name, new_coveragepath_list: list, return_path, delete_after_merge: bool = True, absolute: bool = False, name_complement: str = ''):
17 | # Merge coverage files.
18 | command_file_formerge = os.path.join(PATH_TO_TMP, 'coverage_merge_modelsim'+str(hash(tuple(new_coveragepath_list)))+f"{name_complement}.tcl")
19 | with open(command_file_formerge, 'w') as f:
20 | # print(f"return path: {return_path} (type: {type(return_path)})")
21 | # print(f"new_coveragepath_list (type: {type(new_coveragepath_list)}): {new_coveragepath_list}")
22 | f.write('vcover merge ' + ' '.join(new_coveragepath_list) + ' -out ' + return_path + '\nquit -f')
23 | subprocess.run(['vsim', '-64', '-c', '-do', command_file_formerge], check=True, capture_output=True)
24 | if delete_after_merge:
25 | for coverage_file in new_coveragepath_list:
26 | os.remove(coverage_file)
27 | # Delete the command file
28 | os.remove(command_file_formerge)
29 |
30 | # Extract coverage points.
31 | # ...
32 | # ...
33 | # ...
34 | # ...
35 | # Enabled Coverage Bins Hits Misses Weight Coverage
36 | # ---------------- ---- ---- ------ ------ --------
37 | # Assertions 2 2 0 1 100.00%
38 | # Branches 28954 18911 10043 1 65.31%
39 | # Conditions 19253 2084 17169 1 10.82%
40 | # Expressions 21037 7461 13576 1 35.46%
41 | # FSM States 10 6 4 1 60.00%
42 | # FSM Transitions 22 7 15 1 31.81%
43 | # Statements 39610 34272 5338 1 86.52%
44 | # Toggles 988006 323898 664108 1 32.78%
45 |
46 | # Total coverage (filtered view): 43.77%
47 | # End time: ...
48 |
49 | command_file_forreport = os.path.join(PATH_TO_TMP, 'coverage_report_modelsim'+str(hash(tuple(new_coveragepath_list)))+f"{name_complement}.tcl")
50 | with open(command_file_forreport, 'w') as f:
51 | f.write('vcover report ' + return_path + ' -summary' + '\nquit -f')
52 | exec_out = subprocess.run(['vsim', '-64', '-c', '-do', command_file_forreport], check=True, text=True, capture_output=True)
53 | # Delete the command file
54 | os.remove(command_file_forreport)
55 |
56 | outlines = exec_out.stdout.split('\n')
57 |
58 | # Extract the coverage percentage for the different coverage types.
59 | coverage_types = ['Branches', 'Conditions', 'Expressions', 'FSM States', 'FSM Transitions', 'Statements', 'Toggles']
60 |
61 | if absolute:
62 | coverage_vals = {}
63 | for coverage_type in coverage_types:
64 | for line in outlines:
65 | if coverage_type in line:
66 | if 'FSM' in line:
67 | coverage_vals[coverage_type] = int(line.split()[4])
68 | else:
69 | coverage_vals[coverage_type] = int(line.split()[3])
70 | break
71 | return coverage_vals
72 | else:
73 | coverage_percentages = {}
74 | for coverage_type in coverage_types:
75 | for line in outlines:
76 | if coverage_type in line:
77 | coverage_percentages[coverage_type] = float(line.split()[-1][:-1])
78 | break
79 | return coverage_percentages
80 |
--------------------------------------------------------------------------------
/fuzzer/common/sim/modelsim.py:
--------------------------------------------------------------------------------
1 | import os
2 | import filelock
3 | from pathlib import Path
4 |
5 | def get_next_worker_id():
6 | lockfile_path = os.getenv("MODELSIM_LOCKFILE")
7 | if not os.path.exists(lockfile_path):
8 | Path(os.path.dirname(lockfile_path)).mkdir(parents=True, exist_ok=True)
9 | Path(lockfile_path).touch()
10 | lock = filelock.FileLock(f"{lockfile_path}.lock")
11 | with lock:
12 | with open(lockfile_path, "r") as f:
13 | prev_id = f.read()
14 | if prev_id == "":
15 | prev_id = 0
16 | my_id = ((int(prev_id) + 1) % int(os.getenv("MODELSIM_MAX_INSTANCES")))
17 | with open(lockfile_path, "w") as f:
18 | f.write(str(my_id))
19 | return my_id
20 |
--------------------------------------------------------------------------------
/fuzzer/common/threads.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This is a function to capture the output of a process in real time and to apply a timeout without losing this output.
6 | # It was initially designed for RFUZZ, where we dont necessarily have the control about the max duration of the fuzzing.
7 |
8 | import subprocess
9 | import threading
10 |
11 | def capture_process_output(cmd, timeout=None):
12 | process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
13 |
14 | # Flag to indicate if the process has finished
15 | process_finished = threading.Event()
16 |
17 | out_lines = []
18 |
19 | # Define a function to read process output
20 | def read_output():
21 | while not process_finished.is_set():
22 | output_line = process.stdout.readline()
23 | if output_line == '' and process.poll() is not None:
24 | break
25 | if output_line:
26 | out_lines.append(output_line)
27 |
28 | # Start a thread to read process output
29 | output_thread = threading.Thread(target=read_output)
30 | output_thread.start()
31 |
32 | # Wait for the process to finish or timeout
33 | try:
34 | process.wait(timeout=timeout)
35 | finally:
36 | process_finished.set()
37 | output_thread.join()
38 |
39 | # Terminate the process if it is still running
40 | if process.poll() is None:
41 | process.terminate()
42 |
43 | return out_lines
44 |
--------------------------------------------------------------------------------
/fuzzer/common/timeout.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | from functools import wraps
6 | from multiprocessing.context import TimeoutError
7 | from multiprocessing.pool import ThreadPool
8 |
9 | def timeout(seconds):
10 | def timeout_wrapper(func):
11 | @wraps(func)
12 | def wrapped(*args, **kwargs):
13 | try:
14 | pool = ThreadPool(processes=1)
15 | result = pool.apply_async(func, args, kwargs)
16 | return result.get(timeout=seconds)
17 | except TimeoutError:
18 | return None
19 | return wrapped
20 | return timeout_wrapper
21 |
--------------------------------------------------------------------------------
/fuzzer/do_analyze_cascade_elfs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script analyzes the properties of the Cascade-generated ELFs.
6 |
7 | from params.runparams import PATH_TO_TMP
8 | from analyzeelfs.genmanyelfs import gen_many_elfs
9 | from analyzeelfs.analyze import analyze_elf_prevalence, analyze_elf_dependencies
10 | from analyzeelfs.plot import plot_cascade_dependencies, plot_cascade_prevalences
11 |
12 | import os
13 |
14 | if __name__ == '__main__':
15 | if "CASCADE_ENV_SOURCED" not in os.environ:
16 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
17 |
18 | NUM_ELFS = 500
19 |
20 | num_cores_for_elf_generation = int(os.getenv('CASCADE_JOBS', 160))
21 |
22 | gen_many_elfs('rocket', num_cores_for_elf_generation, NUM_ELFS, os.path.join(PATH_TO_TMP, 'manyelfs'))
23 |
24 | prevalence_json = analyze_elf_prevalence(False, NUM_ELFS)
25 | dependencies_json = analyze_elf_dependencies(False, 'rocket', NUM_ELFS)
26 |
27 | plot_cascade_prevalences(prevalence_json)
28 | plot_cascade_dependencies(dependencies_json)
29 |
30 | else:
31 | raise Exception("This module must be at the toplevel.")
32 |
--------------------------------------------------------------------------------
/fuzzer/do_analyze_dependencies.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script analyzes the properties of the Cascade-generated ELFs.
6 |
7 | from params.runparams import PATH_TO_TMP
8 | from analyzeelfs.genmanyelfs import gen_many_elfs
9 | from analyzeelfs.analyze import analyze_elf_prevalence, analyze_elf_dependencies
10 | from analyzeelfs.plot import plot_cascade_dependencies, plot_cascade_prevalences
11 |
12 | import os
13 |
14 | if __name__ == '__main__':
15 | if "CASCADE_ENV_SOURCED" not in os.environ:
16 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
17 |
18 | NUM_ELFS = 500
19 |
20 | num_cores_for_elf_generation = int(os.getenv('CASCADE_JOBS', 160))
21 |
22 | gen_many_elfs('rocket', num_cores_for_elf_generation, NUM_ELFS, os.path.join(PATH_TO_TMP, 'manyelfs'))
23 |
24 | dependencies_json_path = analyze_elf_dependencies(False, 'rocket', NUM_ELFS)
25 | print('dependencies_json_path', dependencies_json_path)
26 |
27 | plot_cascade_dependencies(dependencies_json_path)
28 |
29 | else:
30 | raise Exception("This module must be at the toplevel.")
31 |
--------------------------------------------------------------------------------
/fuzzer/do_analyze_difuzzrtl_elfs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script analyzes the properties of the Cascade-generated ELFs.
6 |
7 | from analyzeelfs.analyze import analyze_elf_prevalence, analyze_elf_dependencies, analyze_elf_symbols
8 | from analyzeelfs.plot import plot_difuzzrtl_completions, plot_difuzzrtl_prevalences, plot_difuzzrtl_instrages
9 |
10 | import os
11 | import sys
12 |
13 | if __name__ == '__main__':
14 | if "CASCADE_ENV_SOURCED" not in os.environ:
15 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
16 |
17 | num_elfs = 50
18 | if len(sys.argv) > 1:
19 | num_elfs = int(sys.argv[1])
20 |
21 | num_cores_for_elf_generation = int(os.getenv('CASCADE_JOBS', 160))
22 |
23 | prevalence_json = analyze_elf_prevalence(True, num_elfs)
24 | dependencies_json = analyze_elf_dependencies(True, 'rocket', num_elfs)
25 | symbols_json = analyze_elf_symbols(num_elfs)
26 |
27 | plot_difuzzrtl_prevalences(prevalence_json)
28 | plot_difuzzrtl_instrages(dependencies_json)
29 | plot_difuzzrtl_completions(symbols_json)
30 |
31 | else:
32 | raise Exception("This module must be at the toplevel.")
33 |
--------------------------------------------------------------------------------
/fuzzer/do_collect_difuzz_coverage.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script collects coverage of DifuzzRTL.
6 |
7 | from benchmarking.collectdifuzzcoverage import collectdifuzzcoverage, plot_difuzzrtl_coverage
8 |
9 | import os
10 |
11 | if __name__ == '__main__':
12 | if "CASCADE_ENV_SOURCED" not in os.environ:
13 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
14 |
15 | collectdifuzzcoverage()
16 | plot_difuzzrtl_coverage()
17 |
18 | else:
19 | raise Exception("This module must be at the toplevel.")
20 |
--------------------------------------------------------------------------------
/fuzzer/do_compare_cascade_difuzzrtl_modelsim.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script measures the simulator coverage of Cascade and DifuzzRTL.
6 |
7 | from modelsim.comparedifuzzmodelsim import collect_coverage_modelsim_nomerge, merge_coverage_modelsim
8 | from modelsim.plot import plot_coverage_global
9 |
10 | import multiprocessing as mp
11 | import os
12 |
13 | if __name__ == '__main__':
14 | if "CASCADE_ENV_SOURCED" not in os.environ:
15 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
16 |
17 | num_workers = max(int(os.getenv('CASCADE_JOBS', 160)) // 4, 1)
18 | TARGET_NUMINSTRS = 1_100_000
19 | PLOT_NUMINSTRS = 1_000_000
20 | NUM_SERIES = 10
21 |
22 | # Cascade
23 |
24 | # Generate enough ELFs
25 | for series_id in range(NUM_SERIES):
26 | collect_coverage_modelsim_nomerge(False, series_id, 'rocket', num_workers, TARGET_NUMINSTRS, None)
27 |
28 | # DifuzzRTL
29 |
30 | # Generate the DifuzzRTL ELFs
31 | collect_coverage_modelsim_nomerge(True, 0, 'rocket', num_workers, TARGET_NUMINSTRS, None)
32 |
33 | # Run merging the coverage
34 | workloads = [(True, 0, TARGET_NUMINSTRS)]
35 | for series_id in range(NUM_SERIES):
36 | workloads.append((False, series_id, TARGET_NUMINSTRS))
37 | with mp.Pool(min(NUM_SERIES+1, num_workers)) as pool:
38 | pool.starmap(merge_coverage_modelsim, workloads)
39 |
40 | # Plot the coverage
41 | plot_coverage_global(NUM_SERIES, PLOT_NUMINSTRS, TARGET_NUMINSTRS)
42 |
43 | else:
44 | raise Exception("This module must be at the toplevel.")
45 |
--------------------------------------------------------------------------------
/fuzzer/do_compareexecutions.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script compares the execution of two ELFs.
6 | # This is typically useful in development/debug to ensure that AIPS does not break the program control flow.
7 |
8 | from cascade.debug.compareexecutions import compare_executions
9 | from common.spike import calibrate_spikespeed
10 |
11 | import os
12 |
13 | if __name__ == '__main__':
14 | if "CASCADE_ENV_SOURCED" not in os.environ:
15 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
16 |
17 | design_name = 'vexriscv'
18 | numinstrs = 100
19 |
20 | elfpath1 = '/scratch/flsolt/data/python-tmp/spikedoublecheck399003_vexriscv_51_27.elf'
21 | elfpath2 = '/scratch/flsolt/data/python-tmp/spikereduce399003_vexriscv_51_27_12_57_1_0.elf'
22 |
23 | calibrate_spikespeed()
24 |
25 | compare_executions(design_name, elfpath1, elfpath2, numinstrs)
26 |
27 | else:
28 | raise Exception("This module must be at the toplevel.")
29 |
--------------------------------------------------------------------------------
/fuzzer/do_debug_reduce.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This is a debug script.
6 |
7 | from cascade.debug.debugreduce import debug_top
8 | from common.profiledesign import profile_get_medeleg_mask
9 | from common.spike import calibrate_spikespeed
10 | from cascade.toleratebugs import tolerate_bug_for_eval_reduction
11 |
12 | import os
13 |
14 | if __name__ == '__main__':
15 | if "CASCADE_ENV_SOURCED" not in os.environ:
16 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
17 |
18 | design_name = 'cva6'
19 | descriptor = (42750, design_name, 58, 29, True)
20 |
21 | calibrate_spikespeed()
22 | profile_get_medeleg_mask(design_name)
23 |
24 | debug_top(*descriptor, 1, 0, 0x63d8)
25 |
26 | else:
27 | raise Exception("This module must be at the toplevel.")
28 |
--------------------------------------------------------------------------------
/fuzzer/do_evalreduction.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script evaluates the performance of program reduction.
6 |
7 | from benchmarking.timereduction import eval_reduction, plot_eval_reduction
8 | from benchmarking.findnfailinginstances import find_n_failing_descriptors
9 | from cascade.toleratebugs import tolerate_bug_for_eval_reduction
10 |
11 | import os
12 | import sys
13 |
14 | if __name__ == '__main__':
15 | if "CASCADE_ENV_SOURCED" not in os.environ:
16 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
17 |
18 | num_failing_programs_to_consider = 10
19 | if len(sys.argv) > 1:
20 | num_failing_programs_to_consider = int(sys.argv[1])
21 | num_cores = max(int(os.getenv('CASCADE_JOBS', 160)) // 4, 1)
22 |
23 | design_names = [
24 | 'picorv32',
25 | 'kronos',
26 | 'vexriscv',
27 | 'rocket',
28 | 'cva6',
29 | 'boom',
30 | ]
31 |
32 | for design_name in design_names:
33 | tolerate_bug_for_eval_reduction(design_name, True)
34 | find_n_failing_descriptors(design_name, num_failing_programs_to_consider*2, num_cores, 0, True)
35 | eval_reduction(design_name, num_failing_programs_to_consider, num_cores)
36 | tolerate_bug_for_eval_reduction(design_name, False)
37 |
38 | plot_eval_reduction(design_names, num_failing_programs_to_consider)
39 |
40 | else:
41 | raise Exception("This module must be at the toplevel.")
42 |
--------------------------------------------------------------------------------
/fuzzer/do_fuzzdesign.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script executes the fuzzer on a given design to find faulting programs.
6 |
7 | # sys.argv[1]: design name
8 | # sys.argv[2]: num of cores allocated to fuzzing
9 | # sys.argv[3]: offset for seed (to avoid running the fuzzing on the same instances over again)
10 | # sys.argv[4]: authorize privileges (by default 1)
11 | # sys.argv[5]: tolerate some bug (by default 0)
12 |
13 | from top.fuzzdesign import fuzzdesign
14 | from cascade.toleratebugs import tolerate_bug_for_eval_reduction
15 | from common.designcfgs import get_design_cascade_path
16 |
17 | import os
18 | import sys
19 |
20 | if __name__ == '__main__':
21 | if "CASCADE_ENV_SOURCED" not in os.environ:
22 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
23 |
24 | if len(sys.argv) < 4:
25 | raise Exception("Usage: python3 do_fuzzdesign.py ")
26 |
27 | print(get_design_cascade_path(sys.argv[1]))
28 |
29 | if len(sys.argv) > 4:
30 | authorize_privileges = int(sys.argv[4])
31 | else:
32 | authorize_privileges = 1
33 |
34 | if len(sys.argv) > 5:
35 | tolerate_some_bug = int(sys.argv[5])
36 | else:
37 | tolerate_some_bug = 0
38 |
39 | if tolerate_some_bug:
40 | tolerate_bug_for_eval_reduction(sys.argv[1])
41 |
42 | fuzzdesign(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), authorize_privileges)
43 |
44 | else:
45 | raise Exception("This module must be at the toplevel.")
46 |
--------------------------------------------------------------------------------
/fuzzer/do_fuzzperf.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script measures the program construction relative performance.
6 |
7 | from benchmarking.fuzzperf import benchmark_collect_construction_performance, plot_construction_performance
8 |
9 | import os
10 |
11 | if __name__ == '__main__':
12 | if "CASCADE_ENV_SOURCED" not in os.environ:
13 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
14 |
15 | num_cores = max(int(os.environ['CASCADE_JOBS']) // 2, 1)
16 |
17 | benchmark_collect_construction_performance(num_cores)
18 | plot_construction_performance()
19 |
20 | else:
21 | raise Exception("This module must be at the toplevel.")
22 |
--------------------------------------------------------------------------------
/fuzzer/do_fuzzsingle.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script executes a single program.
6 |
7 | from cascade.fuzzfromdescriptor import fuzz_single_from_descriptor
8 | from common.profiledesign import profile_get_medeleg_mask
9 | from common.spike import calibrate_spikespeed
10 | from cascade.toleratebugs import tolerate_bug_for_eval_reduction
11 |
12 | import os
13 |
14 | if __name__ == '__main__':
15 | if "CASCADE_ENV_SOURCED" not in os.environ:
16 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
17 |
18 | design_name = 'boom'
19 | descriptor = (881540, design_name, 5000017, 51, True)
20 |
21 | # tolerate_bug_for_eval_reduction(design_name)
22 |
23 | calibrate_spikespeed()
24 | profile_get_medeleg_mask(design_name)
25 |
26 | fuzz_single_from_descriptor(*descriptor, check_pc_spike_again=True)
27 |
28 | else:
29 | raise Exception("This module must be at the toplevel.")
30 |
--------------------------------------------------------------------------------
/fuzzer/do_genelfs_for_questa.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script generates many Cascade ELFs.
6 |
7 | from common.spike import calibrate_spikespeed
8 | from analyzeelfs.genmanyelfs import gen_many_elfs
9 | from modelsim.patchwritetohost import replace_write_to_host
10 | from modelsim.countinstrs import countinstrs_difuzzrtl
11 |
12 | import os
13 | import subprocess
14 | import multiprocessing as mp
15 |
16 | # Change for your own setup using environment variables.
17 | DIFUZZRTL_FUZZER_DIR_PATH = '/cascade-difuzzrtl/docker/shareddir/savedockerdifuzzrtl/Fuzzer'
18 | SPIKE_PATH_FOR_DIFUZZRTL = '/opt/riscv/bin/spike'
19 | DIFUZZRTL_CASCADE_MOUNTDIR = '/cascade-mountdir'
20 |
21 |
22 | def __patch_difuzzrtl_writetohost_worker(elfpath):
23 | # patched_elfpath is elfpath, where the base file name is prependend with 'patched_'
24 | patched_elfpath = os.path.join(os.path.dirname(elfpath), 'patched_' + os.path.basename(elfpath))
25 | return replace_write_to_host(elfpath, patched_elfpath)
26 |
27 | def __countinstrs_difuzzrtl_worker(elfpath):
28 | # patched_elfpath is elfpath, where the base file name is prependend with 'patched_'
29 | patched_elfpath = os.path.join(os.path.dirname(elfpath), 'patched_' + os.path.basename(elfpath))
30 | numinstrs = countinstrs_difuzzrtl(patched_elfpath)
31 |
32 | # The output path is the elfpath where .elf is substituted with _numinstrs.txt
33 | retpath = os.path.join(os.path.dirname(elfpath), os.path.basename(elfpath).replace('.elf', '_numinstrs.txt'))
34 | with open(retpath, 'w') as f:
35 | f.write(hex(numinstrs))
36 |
37 | if __name__ == '__main__':
38 | if "CASCADE_ENV_SOURCED" not in os.environ:
39 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
40 |
41 | num_cascade_elfs = 5000
42 | num_difuzzrtl_elfs = 25000
43 |
44 | # Get the environment variables if they are defined
45 | DIFUZZRTL_FUZZER_DIR_PATH_CANDIDATE = os.getenv('CASCADE_PATH_TO_DIFUZZRTL_FUZZER')
46 | if DIFUZZRTL_FUZZER_DIR_PATH_CANDIDATE is not None:
47 | DIFUZZRTL_FUZZER_DIR_PATH = DIFUZZRTL_FUZZER_DIR_PATH_CANDIDATE
48 | SPIKE_PATH_FOR_DIFUZZRTL_CANDIDATE = os.getenv('CASCADE_PATH_TO_SPIKE_FOR_DIFUZZRTL')
49 | if SPIKE_PATH_FOR_DIFUZZRTL_CANDIDATE is not None:
50 | SPIKE_PATH_FOR_DIFUZZRTL = SPIKE_PATH_FOR_DIFUZZRTL_CANDIDATE
51 | DIFUZZRTL_CASCADE_MOUNTDIR_CANDIDATE = os.getenv('CASCADE_PATH_TO_DIFUZZRTL_MOUNTDIR')
52 | if DIFUZZRTL_CASCADE_MOUNTDIR_CANDIDATE is not None:
53 | DIFUZZRTL_CASCADE_MOUNTDIR = DIFUZZRTL_CASCADE_MOUNTDIR_CANDIDATE
54 |
55 | target_dir = DIFUZZRTL_CASCADE_MOUNTDIR
56 | num_cores = int(os.getenv('CASCADE_JOBS', 160))
57 |
58 | assert os.path.isdir(target_dir), f"Target directory {target_dir} does not exist."
59 |
60 | # Generate the Cascade ELFs. This is typically fast because parallel.
61 | gen_many_elfs('rocket', num_cores, num_cascade_elfs, target_dir)
62 |
63 | # Generate the DifuzzRTL ELFs. This is typically slow because sequential.
64 | cmd_gen_difuzzrtl_elfs = f"cd {DIFUZZRTL_FUZZER_DIR_PATH} && make SIM_BUILD=builddir VFILE=RocketTile_state TOPLEVEL=RocketTile NUM_ITER={num_difuzzrtl_elfs} OUT=outdir IS_CASCADE=0 IS_RECORD=1 SPIKE={SPIKE_PATH_FOR_DIFUZZRTL}"
65 | print('Running command to generate DifuzzRTL ELFs: ' + cmd_gen_difuzzrtl_elfs)
66 | subprocess.run(cmd_gen_difuzzrtl_elfs, shell=True, check=True)
67 |
68 | # Now we must move the generated ELFs to the target_dir.
69 | cmd_move_difuzzrtl_elfs = f"mv {os.environ['CASCADE_PATH_TO_DIFUZZRTL_ELFS']}* {target_dir}"
70 | print('Running command to move the DifuzzRTL ELFs: ' + cmd_move_difuzzrtl_elfs)
71 | subprocess.run(cmd_move_difuzzrtl_elfs, shell=True, check=True)
72 |
73 | # Patch the DifuzzRTL ELFs
74 | # Get the ELFs
75 | difuzzrtl_elfpaths = []
76 | for elf in os.listdir(target_dir):
77 | if elf.startswith('id_'):
78 | difuzzrtl_elfpaths.append(os.path.join(target_dir, elf))
79 | # Patch each ELF
80 | with mp.Pool(num_cores) as pool:
81 | pool.map(__patch_difuzzrtl_writetohost_worker, difuzzrtl_elfpaths)
82 | # Count the number of instructions in each ELF
83 | calibrate_spikespeed()
84 | with mp.Pool(num_cores) as pool:
85 | pool.map(__countinstrs_difuzzrtl_worker, difuzzrtl_elfpaths)
86 |
87 | print(' Done generating the ELFs for Modelsim.')
88 |
89 | else:
90 | raise Exception("This module must be at the toplevel.")
91 |
--------------------------------------------------------------------------------
/fuzzer/do_genmanyelfs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script generates many Cascade ELFs.
6 |
7 | from analyzeelfs.genmanyelfs import gen_many_elfs
8 | from params.runparams import PATH_TO_TMP
9 |
10 | import os
11 | import sys
12 |
13 | if __name__ == '__main__':
14 | if "CASCADE_ENV_SOURCED" not in os.environ:
15 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
16 |
17 | num_elfs = 500
18 | target_dir = os.path.join(PATH_TO_TMP, 'manyelfs')
19 | if len(sys.argv) > 1:
20 | num_elfs = int(sys.argv[1])
21 | if len(sys.argv) > 2:
22 | target_dir = sys.argv[2]
23 | num_cores = int(os.getenv('CASCADE_JOBS', 160))
24 |
25 | gen_many_elfs('rocket', num_cores, num_elfs, target_dir)
26 |
27 | else:
28 | raise Exception("This module must be at the toplevel.")
29 |
--------------------------------------------------------------------------------
/fuzzer/do_numinstrs_statistics.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script generates some statistics on the number of instructions per test case.
6 |
7 | from cascade.fuzzfromdescriptor import gen_new_test_instance, fuzz_single_from_descriptor
8 | from cascade.basicblock import gen_basicblocks
9 | from cascade.fuzzerstate import FuzzerState
10 | from common.designcfgs import get_design_boot_addr
11 |
12 | import numpy as np
13 | import os
14 | import sys
15 | import random
16 | import json
17 | import multiprocessing as mp
18 | from common.profiledesign import profile_get_medeleg_mask
19 | from common.spike import calibrate_spikespeed
20 |
21 | # sys.argv[1]: num samples
22 | # sys.argv[2]: num cores
23 |
24 | design_name = "rocket"
25 |
26 | def gen_numinstrs_single_elf(randseed: int):
27 | memsize, _, _, num_bbs, authorize_privileges = gen_new_test_instance(design_name, randseed, True)
28 |
29 | random.seed(randseed)
30 | fuzzerstate = FuzzerState(get_design_boot_addr(design_name), design_name, memsize, randseed, num_bbs, authorize_privileges, None, False)
31 | gen_basicblocks(fuzzerstate)
32 | num_fuzzing_instrs = sum([len(bb) for bb in fuzzerstate.instr_objs_seq])
33 | return num_fuzzing_instrs
34 |
35 | if __name__ == '__main__':
36 | if "CASCADE_ENV_SOURCED" not in os.environ:
37 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
38 |
39 | num_samples = int(sys.argv[1])
40 | num_workers = int(sys.argv[2])
41 |
42 | ##############
43 | # Gen the data
44 | ##############
45 |
46 | # # Numbers from 1 to num_samples
47 | # random_seeds = [i for i in range(num_samples)]
48 |
49 | # calibrate_spikespeed()
50 | # profile_get_medeleg_mask(design_name)
51 |
52 | # with mp.Pool(processes=num_workers) as pool:
53 | # all_numinstrs = pool.map(gen_numinstrs_single_elf, random_seeds)
54 |
55 | # json.dump(all_numinstrs, open("numinstrs.json", "w"))
56 |
57 | ##############
58 | # Plot the data
59 | ##############
60 | # Plot as a histogram
61 |
62 | all_numinstrs = json.load(open("numinstrs.json", "r"))
63 | print("Num programs:", len(all_numinstrs))
64 |
65 | import matplotlib.pyplot as plt
66 | BINS = 100
67 |
68 | hist, bin_edges = np.histogram(all_numinstrs, bins=BINS)
69 |
70 | # Define the colors you want to use for the bars alternately
71 | dark_gray = (100/255, 100/255, 100/255)
72 | colors = ['black', dark_gray]
73 |
74 | # Create a figure and axis
75 | fig, ax = plt.subplots(figsize=(6, 1.4))
76 |
77 | # Create the histogram bars with alternating colors
78 | for i in range(BINS):
79 | ax.bar(bin_edges[i], hist[i], width=bin_edges[i+1] - bin_edges[i], color=colors[i % len(colors)], edgecolor='black', linewidth=0.5, zorder=3)
80 |
81 | ax.yaxis.grid(which='major', color='gray', zorder=1, linewidth=0.4)
82 | ax.set_axisbelow(True)
83 |
84 | plt.xlabel('Number of fuzzing instructions')
85 | plt.ylabel('Frequency')
86 | plt.tight_layout()
87 |
88 | # Create the "out" dir if it does not exist
89 | plt.savefig('numinstrs.png', dpi=300)
90 |
91 |
92 | else:
93 | raise Exception("This module must be at the toplevel.")
94 |
--------------------------------------------------------------------------------
/fuzzer/do_performance_ubenchmark_fewinstructions.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script executes the fuzzer on Rocket and measures the performance when we limit ourselves to a certain number of instructions.
6 |
7 | # sys.argv[1]: offset for seed (to avoid running the fuzzing on the same instances over again)
8 | # sys.argv[2]: duration of the experiment in core-seconds
9 |
10 | from top.fuzzforperfubenchfewerinstructions import fuzz_for_perf_ubench_fewerinstructions
11 | from cascade.toleratebugs import tolerate_bug_for_eval_reduction
12 | from params.fuzzparams import get_max_num_instructions_upperbound
13 | from common.spike import calibrate_spikespeed
14 | from common.profiledesign import profile_get_medeleg_mask
15 |
16 | import json
17 | import os
18 | import sys
19 |
20 | def single_measurement(design: str, num_instructions: int, time_limit_seconds: int, seed_offset: int):
21 | authorize_privileges = True
22 | instance_gen_durations, run_durations, effective_num_instructions = fuzz_for_perf_ubench_fewerinstructions(design, num_instructions*1234567 + seed_offset, authorize_privileges, time_limit_seconds, num_instructions)
23 | return instance_gen_durations, run_durations, effective_num_instructions
24 | # print(f"Instance generation durations: {instance_gen_durations}")
25 | # print(f"Run durations: {run_durations}")
26 | # print(f"Effective_num_instructions: {effective_num_instructions}")
27 |
28 | # instructions_per_millisecond_nogen = effective_num_instructions / (sum(run_durations) * 1000)
29 | # instructions_per_millisecond_livegen = effective_num_instructions / ((sum(run_durations) + sum(instance_gen_durations)) * 1000)
30 | # print(f"Instructions per millisecond (nogen): {instructions_per_millisecond_nogen}")
31 | # print(f"Instructions per millisecond (livegen): {instructions_per_millisecond_livegen}")
32 |
33 |
34 | if __name__ == '__main__':
35 | import multiprocessing as mp
36 |
37 | if "CASCADE_ENV_SOURCED" not in os.environ:
38 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
39 |
40 | if len(sys.argv) != 4:
41 | raise Exception("Usage: python3 do_performance_ubenchmark_fewinstructions.py ")
42 |
43 | seed_offset = int(sys.argv[1])
44 | time_limit_seconds_per_core = int(sys.argv[2])
45 | num_workers = int(sys.argv[3])
46 |
47 | design_names = [
48 | 'picorv32',
49 | 'kronos',
50 | 'vexriscv',
51 | 'rocket',
52 | 'cva6',
53 | 'boom',
54 | ]
55 |
56 | nums_instructions = [1, 10, 100, 1000, 10000, 100000]
57 |
58 | calibrate_spikespeed()
59 |
60 | from collections import defaultdict
61 | instance_gen_durations = defaultdict(lambda: defaultdict(list))
62 | run_durations = defaultdict(lambda: defaultdict(list))
63 | effective_num_instructions = defaultdict(lambda: defaultdict(list))
64 |
65 | for design_name in design_names:
66 | profile_get_medeleg_mask(design_name)
67 | for num_instructions in nums_instructions:
68 | print(f"Starting fuzzing for micro-benchmark of programs with {num_instructions} instructions on `{design_name}`.")
69 |
70 | input_pool = [(design_name, num_instructions, time_limit_seconds_per_core, seed_offset+1000*i) for i in range(num_workers)]
71 | seed_offset += 1000000 * num_workers
72 | with mp.Pool(processes=num_workers) as pool:
73 | results = pool.starmap(single_measurement, input_pool)
74 | # results is a list of triples (instance_gen_durations, run_durations, effective_num_instructions)
75 |
76 | for worker_result in results:
77 | new_instance_gen_durations, new_run_durations, new_effective_num_instructions = worker_result
78 | instance_gen_durations[design_name][num_instructions] += new_instance_gen_durations
79 | run_durations[design_name][num_instructions] += new_run_durations
80 | effective_num_instructions[design_name][num_instructions] += new_effective_num_instructions
81 |
82 | with open('perf_ubenchmark_fewinstructions.json', 'w') as f:
83 | json.dump({
84 | 'nums_instructions': nums_instructions,
85 | 'instance_gen_durations': instance_gen_durations,
86 | 'run_durations': run_durations,
87 | 'effective_num_instructions': effective_num_instructions,
88 | }, f)
89 | print("Saved results to perf_ubenchmark_fewinstructions.json")
90 |
91 |
92 |
93 | else:
94 | raise Exception("This module must be at the toplevel.")
95 |
--------------------------------------------------------------------------------
/fuzzer/do_plot_bug_bars.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script plots the bars related to the bug categories and their security implications.
6 |
7 | from miscplots.plotcategories import plot_bugtypes_bars
8 | from miscplots.plotsecuimplications import plot_security_implications
9 |
10 | import os
11 |
12 | if __name__ == '__main__':
13 | if "CASCADE_ENV_SOURCED" not in os.environ:
14 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
15 |
16 | plot_bugtypes_bars()
17 | plot_security_implications()
18 |
--------------------------------------------------------------------------------
/fuzzer/do_plotevalreduction.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script plots the evaluation of the reduction performance.
6 |
7 | from benchmarking.timereduction import plot_eval_reduction
8 |
9 | import os
10 | import sys
11 |
12 | if __name__ == '__main__':
13 | if "CASCADE_ENV_SOURCED" not in os.environ:
14 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
15 |
16 | num_failing_programs_to_consider = 10
17 | if len(sys.argv) > 1:
18 | num_failing_programs_to_consider = int(sys.argv[1])
19 | num_cores = max(int(os.getenv('CASCADE_JOBS', 160)) // 4, 1)
20 |
21 | design_names = [
22 | 'picorv32',
23 | 'kronos',
24 | 'vexriscv',
25 | 'rocket',
26 | 'cva6',
27 | 'boom',
28 | ]
29 |
30 | plot_eval_reduction(design_names, num_failing_programs_to_consider)
31 |
32 | else:
33 | raise Exception("This module must be at the toplevel.")
34 |
--------------------------------------------------------------------------------
/fuzzer/do_reducesingle.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script reduces a single program.
6 |
7 | # sys.argv[1]: design name
8 | # sys.argv[2]: num of cores allocated to fuzzing
9 | # sys.argv[3]: offset for seed (to avoid running the fuzzing on the same instances over again)
10 |
11 | from cascade.reduce import reduce_program
12 | from cascade.toleratebugs import tolerate_bug_for_eval_reduction
13 | from common.profiledesign import profile_get_medeleg_mask
14 | from common.spike import calibrate_spikespeed
15 |
16 | import os
17 |
18 | if __name__ == '__main__':
19 | if "CASCADE_ENV_SOURCED" not in os.environ:
20 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
21 |
22 | design_name = 'cva6'
23 | descriptor = (489442, design_name, 6, 48, False)
24 |
25 | # # Optional
26 | # tolerate_bug_for_eval_reduction(design_name)
27 |
28 | calibrate_spikespeed()
29 | profile_get_medeleg_mask(design_name)
30 |
31 | reduce_program(*descriptor, True, check_pc_spike_again=True)
32 |
33 | else:
34 | raise Exception("This module must be at the toplevel.")
35 |
--------------------------------------------------------------------------------
/fuzzer/do_rfuzz.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script runs RFUZZ.
6 |
7 | # sys.argv[1]: design name
8 | # sys.argv[2]: num of cores allocated to fuzzing
9 | # sys.argv[3]: offset for seed (to avoid running the fuzzing on the same instances over again)
10 |
11 | from rfuzz.collectrfuzz import collect_coverage_rfuzz, _measure_coverage_rfuzz_worker
12 | from rfuzz.collectactiverfuzz import collect_active_coverage_rfuzz
13 | from rfuzz.plot import plot_rfuzz
14 | from common.profiledesign import profile_get_medeleg_mask
15 | from common.spike import calibrate_spikespeed
16 |
17 | import itertools
18 | import multiprocessing as mp
19 | import os
20 |
21 | if __name__ == '__main__':
22 | if "CASCADE_ENV_SOURCED" not in os.environ:
23 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
24 |
25 | TIMEOUT_SECONDS = 120
26 |
27 | calibrate_spikespeed()
28 |
29 | num_cores_for_passive_rfuzz = int(os.getenv('CASCADE_JOBS', 160))
30 |
31 | design_names_for_rfuzz = [
32 | 'vexriscv',
33 | 'kronos',
34 | 'picorv32',
35 | 'rocket',
36 | 'boom',
37 | # 'cva6', Ignored for now as indicated in the paper, until Y1 (CVE-2023-34884) is fixed.
38 | ]
39 |
40 | num_elfs_passive_rfuzz = {
41 | 'vexriscv': 300,
42 | 'kronos': 2500,
43 | 'picorv32': 2500,
44 | 'rocket': 300,
45 | 'boom': 100,
46 | # 'cva6': 1800,
47 | }
48 |
49 | # Passive RFUZZ
50 | design_passive_rfuzz_results = dict()
51 | for design_name in design_names_for_rfuzz:
52 | profile_get_medeleg_mask(design_name)
53 | design_passive_rfuzz_results[design_name] = collect_coverage_rfuzz(design_name, num_cores_for_passive_rfuzz, num_elfs_passive_rfuzz[design_name])
54 | print(f"Passive RFUZZ for design {design_name}:", design_passive_rfuzz_results[design_name])
55 |
56 | # Active RFUZZ
57 | with mp.Pool(mp.cpu_count()) as p:
58 | ret = p.starmap(collect_active_coverage_rfuzz, zip(design_names_for_rfuzz, itertools.repeat(TIMEOUT_SECONDS)))
59 | design_active_rfuzz_results = dict()
60 | for val_id, val in enumerate(ret):
61 | design_active_rfuzz_results[design_names_for_rfuzz[val_id]] = val
62 | print(f"Active RFUZZ for design {design_names_for_rfuzz[val_id]}:", design_active_rfuzz_results[design_names_for_rfuzz[val_id]])
63 |
64 | plot_rfuzz(design_active_rfuzz_results, design_passive_rfuzz_results)
65 | else:
66 | raise Exception("This module must be at the toplevel.")
67 |
--------------------------------------------------------------------------------
/fuzzer/do_simcoverage.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script measures the simulator coverage of Cascade and DifuzzRTL.
6 |
7 | from modelsim.comparedifuzzmodelsim import collect_coverage_modelsim_nomerge, merge_coverage_modelsim
8 | from modelsim.plot import plot_coverage_global
9 |
10 | import multiprocessing as mp
11 | import os
12 |
13 | if __name__ == '__main__':
14 | if "CASCADE_ENV_SOURCED" not in os.environ:
15 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
16 |
17 | num_workers = max(int(os.getenv('CASCADE_JOBS', 160)) // 4, 1)
18 | TARGET_NUMINSTRS = 1_100_000
19 | PLOT_NUMINSTRS = 1_000_000
20 | NUM_SERIES = 10
21 |
22 | # Cascade
23 |
24 | # Generate enough ELFs
25 | for series_id in range(NUM_SERIES):
26 | collect_coverage_modelsim_nomerge(False, series_id, 'rocket', num_workers, TARGET_NUMINSTRS, None)
27 |
28 | # DifuzzRTL
29 |
30 | # Generate the DifuzzRTL ELFs
31 | collect_coverage_modelsim_nomerge(True, 0, 'rocket', num_workers, TARGET_NUMINSTRS, None)
32 |
33 | # Run merging the coverage
34 | workloads = [(True, 0, TARGET_NUMINSTRS)]
35 | for series_id in range(NUM_SERIES):
36 | workloads.append((False, series_id, TARGET_NUMINSTRS))
37 | with mp.Pool(min(NUM_SERIES+1, num_workers)) as pool:
38 | pool.starmap(merge_coverage_modelsim, workloads)
39 |
40 | # Plot the coverage
41 | plot_coverage_global(NUM_SERIES, PLOT_NUMINSTRS, TARGET_NUMINSTRS)
42 |
43 | else:
44 | raise Exception("This module must be at the toplevel.")
45 |
--------------------------------------------------------------------------------
/fuzzer/do_timetobug.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script evaluates the duration to detect each bug.
6 |
7 | from top.fuzzdesigntiming import measure_time_to_bug, plot_bug_timings
8 | from cascade.toleratebugs import tolerate_bug_for_bug_timing
9 |
10 | from params.runparams import PATH_TO_TMP
11 | import json
12 | import os
13 | import sys
14 |
15 | bug_designs = {
16 | 'p1': 'picorv32',
17 | 'p2': 'picorv32',
18 | 'p3': 'picorv32',
19 | 'p4': 'picorv32',
20 | 'p5': 'picorv32-p5',
21 | 'p6': 'picorv32',
22 | 'v1': 'vexriscv-v1-7',
23 | 'v2': 'vexriscv-v1-7',
24 | 'v3': 'vexriscv-v1-7',
25 | 'v4': 'vexriscv-v1-7',
26 | 'v5': 'vexriscv-v1-7',
27 | 'v6': 'vexriscv-v1-7',
28 | 'v7': 'vexriscv-v1-7',
29 | 'v8': 'vexriscv-v8-9-v15',
30 | 'v9': 'vexriscv-v8-9-v15',
31 | 'v10': 'vexriscv-v10-11',
32 | 'v11': 'vexriscv-v10-11',
33 | # 'v12': 'vexriscv-v12',
34 | 'v12': 'vexriscv-v13',
35 | 'v13': 'vexriscv',
36 | 'v14': 'vexriscv-v8-9-v15',
37 | 'k1': 'kronos-k1',
38 | 'k2': 'kronos-k2',
39 | 'k3': 'kronos',
40 | 'k4': 'kronos',
41 | 'k5': 'kronos',
42 | 'c1': 'cva6-c1',
43 | 'c2': 'cva6',
44 | 'c3': 'cva6',
45 | 'c4': 'cva6',
46 | 'c5': 'cva6',
47 | 'c6': 'cva6',
48 | 'c7': 'cva6',
49 | 'c8': 'cva6',
50 | 'c9': 'cva6',
51 | 'c10': 'cva6',
52 | 'b1': 'boom-b1',
53 | 'b2': 'boom',
54 | 'r1': 'rocket',
55 | 'y1': 'cva6-y1',
56 | }
57 |
58 | def gen_path_to_json(bug_name, num_workers, num_reps, max_num_instructions, nodependencybias, timeout_seconds):
59 | filebasename = f"bug_timings_{bug_name}_{num_workers}_{num_reps}"
60 | if max_num_instructions is not None:
61 | filebasename += f"_maxinstr{max_num_instructions}"
62 | else:
63 | filebasename += f"_nomaxinstr"
64 |
65 | if nodependencybias:
66 | filebasename += f"_nodepbias"
67 | else:
68 | filebasename += f"_depbias"
69 | filebasename += f"_timeout{timeout_seconds}"
70 | return os.path.join(PATH_TO_TMP, f"{filebasename}.json")
71 |
72 | if __name__ == '__main__':
73 | if "CASCADE_ENV_SOURCED" not in os.environ:
74 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
75 |
76 | num_workers = int(sys.argv[1])
77 | num_reps = int(sys.argv[2])
78 | timeout_seconds = int(sys.argv[3])
79 |
80 | # Pairs (maxnuminstrs, nodependencybias)
81 | scenarios = [
82 | (1, False),
83 | (10, False),
84 | # (10, True),
85 | (100, False),
86 | # (100, True),
87 | (1000, False),
88 | # (1000, True),
89 | (10000, False),
90 | # (10000, True),
91 | # (100000, False),
92 | # (100000, True),
93 | ]
94 |
95 | # Measure the time to detect each bug.
96 | for bug_name, design_name in bug_designs.items():
97 | tolerate_bug_for_bug_timing(design_name, bug_name, True)
98 | for scenario in scenarios:
99 | max_num_instructions, nodependencybias = scenario
100 | ret = measure_time_to_bug(design_name, num_workers, num_reps, max_num_instructions, nodependencybias, timeout_seconds)
101 | retpath = gen_path_to_json(bug_name, num_workers, num_reps, max_num_instructions, nodependencybias, timeout_seconds)
102 | json.dump(ret, open(retpath, "w"))
103 | print('Saved bug timing results to', retpath)
104 | tolerate_bug_for_bug_timing(design_name, bug_name, False)
105 |
106 | # Regroup the JSONs for convenience
107 | from collections import defaultdict
108 | all_rets = {}
109 | for bug_name, _ in bug_designs.items():
110 | all_rets[bug_name] = defaultdict(dict)
111 | for scenario in scenarios:
112 | max_num_instructions, nodependencybias = scenario
113 | retpath = gen_path_to_json(bug_name, num_workers, num_reps, max_num_instructions, nodependencybias, timeout_seconds)
114 | all_rets[bug_name][max_num_instructions][nodependencybias] = json.load(open(retpath, "r"))
115 | # Write a single json out of them
116 | aggregated_json_path = os.path.join(PATH_TO_TMP, f"bug_timings_all.json")
117 | json.dump(all_rets, open(aggregated_json_path, "w"))
118 | print('Saved aggregated timing results to', aggregated_json_path)
119 |
120 | # Plot these measurements.
121 | # plot_bug_timings(num_workers, num_reps)
122 | # plot_bug_timings_scenarios(num_workers, num_reps)
123 |
124 | else:
125 | raise Exception("This module must be at the toplevel.")
126 |
--------------------------------------------------------------------------------
/fuzzer/do_timetobug_boxes.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script evaluates the duration to detect each bug.
6 |
7 | # sys.argv[1]: design name
8 | # sys.argv[2]: num of cores allocated to fuzzing
9 | # sys.argv[3]: offset for seed (to avoid running the fuzzing on the same instances over again)
10 | # sys.argv[4]: authorize privileges (by default 1)
11 |
12 | from top.fuzzdesigntiming import measure_time_to_bug, plot_bug_timings
13 | from cascade.toleratebugs import tolerate_bug_for_bug_timing
14 |
15 | from params.runparams import PATH_TO_TMP
16 | import json
17 | import os
18 | import sys
19 |
20 | bug_designs = {
21 | 'p1': 'picorv32',
22 | 'p2': 'picorv32',
23 | 'p3': 'picorv32',
24 | 'p4': 'picorv32',
25 | 'p5': 'picorv32-p5',
26 | 'p6': 'picorv32',
27 | 'v1': 'vexriscv-v1-7',
28 | 'v2': 'vexriscv-v1-7',
29 | 'v3': 'vexriscv-v1-7',
30 | 'v4': 'vexriscv-v1-7',
31 | 'v5': 'vexriscv-v1-7',
32 | 'v6': 'vexriscv-v1-7',
33 | 'v7': 'vexriscv-v1-7',
34 | 'v8': 'vexriscv-v8-9-v15',
35 | 'v9': 'vexriscv-v8-9-v15',
36 | 'v10': 'vexriscv-v10-11',
37 | 'v11': 'vexriscv-v10-11',
38 | # 'v12': 'vexriscv-v12',
39 | 'v12': 'vexriscv-v13',
40 | 'v13': 'vexriscv',
41 | 'v14': 'vexriscv-v8-9-v15',
42 | 'k1': 'kronos-k1',
43 | 'k2': 'kronos-k2',
44 | 'k3': 'kronos',
45 | 'k4': 'kronos',
46 | 'k5': 'kronos',
47 | 'c1': 'cva6-c1',
48 | 'c2': 'cva6',
49 | 'c3': 'cva6',
50 | 'c4': 'cva6',
51 | 'c5': 'cva6',
52 | 'c6': 'cva6',
53 | 'c7': 'cva6',
54 | 'c8': 'cva6',
55 | 'c9': 'cva6',
56 | 'c10': 'cva6',
57 | 'b1': 'boom-b1',
58 | 'b2': 'boom',
59 | 'r1': 'rocket',
60 | 'y1': 'cva6-y1',
61 | }
62 |
63 | if __name__ == '__main__':
64 | if "CASCADE_ENV_SOURCED" not in os.environ:
65 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
66 |
67 | NUM_WORKERS = int(sys.argv[1])
68 | NUM_REPS = int(sys.argv[2])
69 |
70 | # Measure the time to detect each bug.
71 |
72 | for bug_name, design_name in bug_designs.items():
73 | tolerate_bug_for_bug_timing(design_name, bug_name, True)
74 | ret = measure_time_to_bug(design_name, NUM_WORKERS, NUM_REPS)
75 | tolerate_bug_for_bug_timing(design_name, bug_name, False)
76 |
77 | retpath = os.path.join(PATH_TO_TMP, f"bug_timings_{bug_name}_{NUM_WORKERS}_{NUM_REPS}.json")
78 | json.dump(ret, open(retpath, "w"))
79 | print('Saved bug timing results to', retpath)
80 |
81 | else:
82 | raise Exception("This module must be at the toplevel.")
83 |
--------------------------------------------------------------------------------
/fuzzer/do_timetobug_boxes_plot.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script evaluates the duration to detect each bug.
6 |
7 | # sys.argv[1]: design name
8 | # sys.argv[2]: num of cores allocated to fuzzing
9 | # sys.argv[3]: offset for seed (to avoid running the fuzzing on the same instances over again)
10 | # sys.argv[4]: authorize privileges (by default 1)
11 |
12 | from top.fuzzdesigntiming import measure_time_to_bug, plot_bug_timings
13 | from cascade.toleratebugs import tolerate_bug_for_bug_timing
14 |
15 | from params.runparams import PATH_TO_TMP
16 | import json
17 | import os
18 | import sys
19 |
20 | bug_designs = {
21 | 'p1': 'picorv32',
22 | 'p2': 'picorv32',
23 | 'p3': 'picorv32',
24 | 'p4': 'picorv32',
25 | 'p5': 'picorv32-p5',
26 | 'p6': 'picorv32',
27 | 'v1': 'vexriscv-v1-7',
28 | 'v2': 'vexriscv-v1-7',
29 | 'v3': 'vexriscv-v1-7',
30 | 'v4': 'vexriscv-v1-7',
31 | 'v5': 'vexriscv-v1-7',
32 | 'v6': 'vexriscv-v1-7',
33 | 'v7': 'vexriscv-v1-7',
34 | 'v8': 'vexriscv-v8-9-v15',
35 | 'v9': 'vexriscv-v8-9-v15',
36 | 'v10': 'vexriscv-v10-11',
37 | 'v11': 'vexriscv-v10-11',
38 | # 'v12': 'vexriscv-v12',
39 | 'v12': 'vexriscv-v13',
40 | 'v13': 'vexriscv',
41 | 'v14': 'vexriscv-v8-9-v15',
42 | 'k1': 'kronos-k1',
43 | 'k2': 'kronos-k2',
44 | 'k3': 'kronos',
45 | 'k4': 'kronos',
46 | 'k5': 'kronos',
47 | 'c1': 'cva6-c1',
48 | 'c2': 'cva6',
49 | 'c3': 'cva6',
50 | 'c4': 'cva6',
51 | 'c5': 'cva6',
52 | 'c6': 'cva6',
53 | 'c7': 'cva6',
54 | 'c8': 'cva6',
55 | 'c9': 'cva6',
56 | 'c10': 'cva6',
57 | 'b1': 'boom-b1',
58 | 'b2': 'boom',
59 | 'r1': 'rocket',
60 | 'y1': 'cva6-y1',
61 | }
62 |
63 | if __name__ == '__main__':
64 | if "CASCADE_ENV_SOURCED" not in os.environ:
65 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
66 |
67 | NUM_WORKERS = int(sys.argv[1])
68 | NUM_REPS = int(sys.argv[2])
69 |
70 | # Plot these measurements.
71 | plot_bug_timings(NUM_WORKERS, NUM_REPS)
72 |
73 | else:
74 | raise Exception("This module must be at the toplevel.")
75 |
--------------------------------------------------------------------------------
/fuzzer/miscplots/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 |
--------------------------------------------------------------------------------
/fuzzer/miscplots/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cascade-artifacts-designs/cascade-meta/6c57bd41c204f6abc8a2747ede34c9145abee9b5/fuzzer/miscplots/__init__.py
--------------------------------------------------------------------------------
/fuzzer/miscplots/plotcategories.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Plots the bars representing bugs per category
6 |
7 | from params.runparams import PATH_TO_FIGURES
8 |
9 | import numpy as np
10 | import os
11 | from matplotlib import pyplot as plt
12 | from collections import defaultdict
13 |
14 | X_TICK_NAMES = [
15 | 'Exceptions',
16 | 'Uarchvals',
17 | 'Archvals',
18 | 'Archflags',
19 | 'Hangs',
20 | 'Perfcnt',
21 | ]
22 |
23 | TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS = X_TICK_NAMES.index('Exceptions')
24 | TYPE_UARCH_VALS = X_TICK_NAMES.index('Uarchvals')
25 | TYPE_ARCH_FPU_VALS = X_TICK_NAMES.index('Archvals')
26 | TYPE_ARCH_FPU_FLAGS = X_TICK_NAMES.index('Archflags')
27 | TYPE_HANGS = X_TICK_NAMES.index('Hangs')
28 | TYPE_PERF_CNT = X_TICK_NAMES.index('Perfcnt')
29 |
30 | bug_classification = {
31 | 'V1': TYPE_UARCH_VALS,
32 | 'V2': TYPE_UARCH_VALS,
33 | 'V3': TYPE_UARCH_VALS,
34 | 'V4': TYPE_UARCH_VALS,
35 | 'V5': TYPE_UARCH_VALS,
36 | 'V6': TYPE_UARCH_VALS,
37 | 'V7': TYPE_UARCH_VALS,
38 | 'V8': TYPE_UARCH_VALS,
39 | 'V9': TYPE_UARCH_VALS,
40 | 'V10': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
41 | 'V11': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
42 | 'V12': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
43 | 'V13': TYPE_HANGS,
44 | 'V14': TYPE_PERF_CNT,
45 | 'V15': TYPE_UARCH_VALS,
46 |
47 | 'P1': TYPE_HANGS,
48 | 'P2': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
49 | 'P3': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
50 | 'P4': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
51 | 'P5': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
52 | 'P6': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
53 |
54 | 'K1': TYPE_UARCH_VALS,
55 | 'K2': TYPE_HANGS,
56 | 'K3': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
57 | 'K4': TYPE_PERF_CNT,
58 | 'K5': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
59 |
60 | 'C1': TYPE_ARCH_FPU_VALS,
61 | 'C2': TYPE_ARCH_FPU_FLAGS,
62 | 'C3': TYPE_ARCH_FPU_FLAGS,
63 | 'C4': TYPE_ARCH_FPU_FLAGS,
64 | 'C5': TYPE_ARCH_FPU_FLAGS,
65 | 'C6': TYPE_ARCH_FPU_VALS,
66 | 'C7': TYPE_ARCH_FPU_VALS,
67 | 'C8': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
68 | 'C9': TYPE_SPURIOUS_OR_MISSING_EXCEPTIONS,
69 |
70 | 'B1': TYPE_ARCH_FPU_VALS,
71 | 'B2': TYPE_PERF_CNT,
72 | }
73 |
74 | DESIGN_PRETTY_NAMES = [
75 | 'PicoRV32',
76 | 'Kronos',
77 | 'VexRiscv',
78 | 'CVA6',
79 | 'BOOM',
80 | # 'Rocket',
81 | ]
82 |
83 | DESIGN_COLORS = [
84 | 'orange',
85 | 'gray',
86 | 'black',
87 | 'red',
88 | 'blue',
89 | # 'purple',
90 | ]
91 |
92 | def plot_bugtypes_bars():
93 | in_dict = defaultdict(lambda: defaultdict(int))
94 |
95 | # Just some assertions
96 | for bugtype_key, bugtype_value in bug_classification.items():
97 | assert bugtype_value < len(X_TICK_NAMES)
98 |
99 | # Get the stats
100 | for bugtype_key, bugtype_value in bug_classification.items():
101 | for design_name in DESIGN_PRETTY_NAMES:
102 | if bugtype_key[0] == design_name[0]:
103 | in_dict[design_name][bugtype_value] += 1
104 |
105 | Ys = []
106 | for design_name in DESIGN_PRETTY_NAMES:
107 | Ys.append([in_dict[design_name][type_id] for type_id in range(len(X_TICK_NAMES))])
108 |
109 | X = np.arange(len(X_TICK_NAMES))
110 |
111 | # Stacked bat chart with X and Ys
112 | fig = plt.figure(figsize=(6, 2.2))
113 | ax = fig.gca()
114 |
115 | width = 0.5
116 |
117 | for i, design_name in enumerate(DESIGN_PRETTY_NAMES):
118 | bars = ax.bar(X, Ys[i], bottom=np.sum(Ys[:i], axis=0), width=width, label=design_name, zorder=3, color=DESIGN_COLORS[i])
119 | for bar in bars:
120 | bar.set(edgecolor='black', linewidth=0.5)
121 |
122 | ax.bar_label(ax.containers[-1], padding=-1)
123 |
124 | ax.yaxis.grid()
125 | ax.set_ylim(0, 13)
126 |
127 | ax.set_ylabel("New bugs")
128 |
129 | ax.set_xticks(X)
130 | ax.set_xticklabels(X_TICK_NAMES)
131 |
132 | fig.legend(framealpha=1)
133 |
134 | fig.tight_layout()
135 |
136 | retpath = os.path.join(PATH_TO_FIGURES, 'bug_categories.png')
137 | print('Saving figure to', retpath)
138 | os.makedirs(PATH_TO_FIGURES, exist_ok=True)
139 | plt.savefig(retpath, dpi=300)
140 |
--------------------------------------------------------------------------------
/fuzzer/miscplots/plotsecuimplications.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Plots the bars representing the security implications of bugs
6 |
7 | from params.runparams import PATH_TO_FIGURES
8 |
9 | import numpy as np
10 | import os
11 | from matplotlib import pyplot as plt
12 | from collections import defaultdict
13 |
14 | A = 'No check'
15 | B = 'Spur. except.'
16 | C = 'Info leakage'
17 | D = 'DoS'
18 | E = 'CF hijack'
19 | F = 'DF violations'
20 | G = 'Logic hiding'
21 |
22 | X_TICK_NAMES = [
23 | A,
24 | B,
25 | C,
26 | D,
27 | E,
28 | F,
29 | G,
30 | ]
31 |
32 | TYPE_MISSING_CHECKS = X_TICK_NAMES.index(A)
33 | TYPE_SPURIOUS_EXCEPTS = X_TICK_NAMES.index(B)
34 | TYPE_INFOLEAK = X_TICK_NAMES.index(C)
35 | TYPE_DOS = X_TICK_NAMES.index(D)
36 | TYPE_CF_HIJACK = X_TICK_NAMES.index(E)
37 | TYPE_DF_INTEGRITY = X_TICK_NAMES.index(F)
38 | TYPE_LOGIC_COMPRO = X_TICK_NAMES.index(G)
39 |
40 | bug_classification = {
41 | TYPE_MISSING_CHECKS: set([
42 | 'V10', 'V11', 'P5', 'K3'
43 | ]),
44 | TYPE_SPURIOUS_EXCEPTS: set([
45 | 'V12', 'P2', 'P3', 'P4', 'P5', 'P6', 'K5', 'C8', 'C9'
46 | ]),
47 | TYPE_INFOLEAK: set([
48 | 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V15', 'K1', 'C1', 'B1',
49 | 'C2', 'C3', 'C4', 'C5'
50 | 'V10', 'V11'
51 | ]),
52 | TYPE_DOS: set([
53 | 'V13', 'P1', 'K2'
54 | ]),
55 | TYPE_CF_HIJACK: set([
56 | 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'V5', 'V6'
57 | ]),
58 | TYPE_DF_INTEGRITY: set([
59 | 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V15', 'K1', 'B1',
60 | ]),
61 | TYPE_LOGIC_COMPRO: set([
62 | 'Y1'
63 | ]),
64 | }
65 |
66 | DESIGN_PRETTY_NAMES = [
67 | 'PicoRV32',
68 | 'Kronos',
69 | 'VexRiscv',
70 | 'CVA6',
71 | 'BOOM',
72 | 'Yosys',
73 | ]
74 |
75 | DESIGN_COLORS = [
76 | 'orange',
77 | 'gray',
78 | 'black',
79 | 'red',
80 | 'blue',
81 | 'darkgreen',
82 | ]
83 |
84 | def plot_security_implications():
85 | global X_TICK_NAMES
86 | in_dict = defaultdict(lambda: defaultdict(int))
87 |
88 | for implication_type in range(len(X_TICK_NAMES)):
89 | for bug_name in bug_classification[implication_type]:
90 | for design_name in DESIGN_PRETTY_NAMES:
91 | if bug_name[0] == design_name[0]:
92 | in_dict[design_name][implication_type] += 1
93 |
94 | Ys = []
95 | for design_name in DESIGN_PRETTY_NAMES:
96 | Ys.append([in_dict[design_name][type_id] for type_id in range(len(X_TICK_NAMES))])
97 |
98 | X = np.arange(len(X_TICK_NAMES))
99 |
100 | # Sort the XTICK_NAMES an Ys in decreasing order Ys
101 | X_TICK_NAMES = [X_TICK_NAMES[i] for i in np.argsort(np.sum(Ys, axis=0))[::-1]]
102 | Ys = [np.array(Ys[i])[np.argsort(np.sum(Ys, axis=0))[::-1]] for i in range(len(Ys))]
103 |
104 |
105 | # Stacked bat chart with X and Ys
106 | fig = plt.figure(figsize=(5, 1.8))
107 | ax = fig.gca()
108 |
109 | width = 0.5
110 |
111 | for i, design_name in enumerate(DESIGN_PRETTY_NAMES):
112 | bars = ax.bar(X, Ys[i], bottom=np.sum(Ys[:i], axis=0), width=width, label=design_name, zorder=3, color=DESIGN_COLORS[i])
113 | for bar in bars:
114 | bar.set(edgecolor='black', linewidth=0.5)
115 |
116 | ax.bar_label(ax.containers[-1], padding=0)
117 |
118 | # Add angle to ticks
119 | for tick in ax.get_xticklabels():
120 | tick.set_rotation(20)
121 |
122 | ax.yaxis.grid()
123 | ax.set_ylim(0, 23)
124 |
125 | ax.set_ylabel("New bugs")
126 |
127 | ax.set_xticks(X)
128 | ax.set_xticklabels(X_TICK_NAMES)
129 |
130 | fig.legend(framealpha=1, ncol=2, bbox_to_anchor=(1.01, 1), loc='upper right')
131 |
132 | fig.tight_layout()
133 |
134 | retpath = os.path.join(PATH_TO_FIGURES, 'security_implications.png')
135 | print('Saving figure to', retpath)
136 | os.makedirs(PATH_TO_FIGURES, exist_ok=True)
137 | plt.savefig(retpath, dpi=300)
138 |
--------------------------------------------------------------------------------
/fuzzer/modelsim/annotateinstrs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This script colocates each DifuzzRTL ELF in the shared directory with its instruction count.
6 | # It is meant to be executed in the Docker container and not locally.
7 |
8 | from modelsim.countinstrs import _gen_spike_dbgcmd_file_for_count_instrs
9 | from common.spike import SPIKE_STARTADDR, get_spike_timeout_seconds
10 |
11 | import os
12 | import subprocess
13 |
14 | def countinstrs_difuzzrtl(elf_id: int) -> int:
15 | elfdir_path = '/cascade-mountdir'
16 | # Check that the shared directory exists
17 | assert os.path.isdir(elfdir_path), f"Shared directory `{elfdir_path}` does not exist."
18 |
19 | rvflags = 'rv64g'
20 | elfpath = os.path.join(elfdir_path, f"patched_id_{elf_id}.elf")
21 |
22 | assert elfpath is not None, "elfpath is None"
23 | assert 'patched' in elfpath, "elfpath is not a patched difuzzrtl elf"
24 |
25 | # Get the final pc
26 | final_addr_str = subprocess.check_output([f"nm {elfpath} | grep write_tohost"], shell=True, text=True)
27 | final_addr = int(final_addr_str.split()[0], 16)
28 |
29 | # Generate the spike debug commands file
30 | path_to_debug_file = _gen_spike_dbgcmd_file_for_count_instrs(identifier_str=f"difuzzrtl_patched{elf_id}", startpc=SPIKE_STARTADDR, endpc=final_addr)
31 |
32 | # Second, run the Spike command
33 | spike_shell_command = (
34 | "spike",
35 | "-d",
36 | f"--debug-cmd={path_to_debug_file}",
37 | f"--isa={rvflags}",
38 | f"--pc={SPIKE_STARTADDR}",
39 | elfpath
40 | )
41 |
42 | try:
43 | spike_out = subprocess.run(spike_shell_command, capture_output=True, text=True, timeout=get_spike_timeout_seconds()).stderr
44 | except Exception as e:
45 | raise Exception(f"Spike timeout (A) for identifier str: difuzzrtl_patched{elf_id}. Command: {' '.join(filter(lambda s: '--debug-cmd' not in s, spike_shell_command))} Debug file: {path_to_debug_file}")
46 |
47 | return len(list(filter(lambda s: s.startswith('core 0: 0x'), spike_out.split('\n'))))
48 |
--------------------------------------------------------------------------------
/fuzzer/modelsim/countinstrs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | from params.runparams import DO_ASSERT, NO_REMOVE_TMPFILES, PATH_TO_TMP
6 | from common.spike import SPIKE_STARTADDR, get_spike_timeout_seconds
7 | import os
8 | import subprocess
9 | from pathlib import Path
10 |
11 | # At the moment, this function is not used.
12 | def countinstrs_cascade_fromelf(elf_id: int, elfpath: str, rvflags: str, final_addr: int) -> int:
13 | # Generate the spike debug commands file
14 | path_to_debug_file = _gen_spike_dbgcmd_file_for_count_instrs(identifier_str=f"cascade_countinstrs{elf_id}", startpc=SPIKE_STARTADDR, endpc=final_addr)
15 |
16 | # Second, run the Spike command
17 | spike_shell_command = (
18 | "spike",
19 | "-d",
20 | f"--debug-cmd={path_to_debug_file}",
21 | f"--isa={rvflags}",
22 | f"--pc={SPIKE_STARTADDR}",
23 | elfpath
24 | )
25 |
26 | try:
27 | spike_out = subprocess.run(spike_shell_command, capture_output=True, text=True, timeout=get_spike_timeout_seconds()).stderr
28 | except Exception as e:
29 | raise Exception(f"Spike timeout (A) for identifier str: difuzzrtl_patched{elf_id}. Command: {' '.join(filter(lambda s: '--debug-cmd' not in s, spike_shell_command))} Debug file: {path_to_debug_file}")
30 | if not NO_REMOVE_TMPFILES:
31 | os.remove(path_to_debug_file)
32 | del path_to_debug_file
33 |
34 | return len(list(filter(lambda s: s.startswith('core 0: 0x'), spike_out.split('\n'))))
35 |
36 |
37 | def countinstrs_cascade(elf_id: int) -> int:
38 | design_name = 'rocket'
39 | num_instrs_path = os.path.join(os.environ['CASCADE_PATH_TO_DIFUZZRTL_ELFS_FOR_MODELSIM'], f"{design_name}_{elf_id}_numinstrs.txt")
40 | with open(num_instrs_path, 'r') as file:
41 | content = file.read()
42 | return int(content, 16)
43 |
44 | # Relies on pre-computed number of instructions
45 | def countinstrs_difuzzrtl_nospike(elf_id: int) -> int:
46 | num_instrs_path = os.path.join(os.environ['CASCADE_PATH_TO_DIFUZZRTL_ELFS_FOR_MODELSIM'], f"id_{elf_id}_numinstrs.txt")
47 | with open(num_instrs_path, 'r') as file:
48 | content = file.read()
49 | return int(content, 16)
50 |
51 | def countinstrs_difuzzrtl(path_to_patched_elf) -> int:
52 | rvflags = 'rv64g'
53 |
54 | assert path_to_patched_elf is not None, "path_to_patched_elf is None"
55 | assert 'patch' in path_to_patched_elf, "path_to_patched_elf is not a patched difuzzrtl elf"
56 |
57 | # Get the final pc
58 | final_addr_str = subprocess.check_output([f"nm {path_to_patched_elf} | grep write_tohost"], shell=True, text=True)
59 | final_addr = int(final_addr_str.split()[0], 16)
60 |
61 | # Generate the spike debug commands file
62 | path_to_debug_file = _gen_spike_dbgcmd_file_for_count_instrs(identifier_str=f"difuzzrtl_patched{hash(path_to_patched_elf)}", startpc=SPIKE_STARTADDR, endpc=final_addr)
63 |
64 | # Second, run the Spike command
65 | spike_shell_command = (
66 | "spike",
67 | "-d",
68 | f"--debug-cmd={path_to_debug_file}",
69 | f"--isa={rvflags}",
70 | f"--pc={SPIKE_STARTADDR}",
71 | path_to_patched_elf
72 | )
73 |
74 | try:
75 | spike_out = subprocess.run(spike_shell_command, capture_output=True, text=True, timeout=get_spike_timeout_seconds()).stderr
76 | except Exception as e:
77 | raise Exception(f"Spike timeout (A) for identifier str: {path_to_patched_elf}. Command: {' '.join(filter(lambda s: '--debug-cmd' not in s, spike_shell_command))} Debug file: {path_to_debug_file}")
78 | if not NO_REMOVE_TMPFILES:
79 | os.remove(path_to_debug_file)
80 | del path_to_debug_file
81 |
82 | return len(list(filter(lambda s: s.startswith('core 0: 0x'), spike_out.split('\n'))))
83 |
84 |
85 |
86 | def _gen_spike_dbgcmd_file_for_count_instrs(identifier_str: str, startpc: int, endpc: int):
87 | path_to_debug_file = os.path.join(PATH_TO_TMP, 'dbgcmds', f"cmds_count_instrs_{identifier_str}")
88 | # if not os.path.exists(path_to_debug_file):
89 | Path(os.path.dirname(path_to_debug_file)).mkdir(parents=True, exist_ok=True)
90 | spike_debug_commands = [
91 | f"until pc 0 0x{startpc:x}",
92 | f"untiln pc 0 0x{endpc:x}",
93 | f"q\n",
94 | ]
95 | spike_debug_commands_str = '\n'.join(spike_debug_commands)
96 |
97 | with open(path_to_debug_file, 'w') as f:
98 | f.write(spike_debug_commands_str)
99 |
100 | return path_to_debug_file
101 |
--------------------------------------------------------------------------------
/fuzzer/modelsim/patchwritetohost.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Only for difuzzrtl. Patches the ELFs to write to the suitable address to stop the testbench.
6 |
7 | import os
8 |
9 | # @return True iff the ELF existed
10 | def replace_write_to_host(path_to_origin_elf, path_to_patched_elf):
11 | assert os.path.exists(path_to_origin_elf), f"ELF {path_to_origin_elf} does not exist."
12 | assert path_to_origin_elf != path_to_patched_elf, f"ELF path and patched ELF path are the same. Difference is not strictly required but this is suspicious."
13 |
14 | if not os.path.exists(path_to_origin_elf):
15 | return False
16 |
17 | # Read the object file as binary
18 | with open(path_to_origin_elf, 'rb') as file:
19 | content = file.read()
20 |
21 | pattern = b'\x93\x01\x10\x00\x17\x1f\x00\x00\x23\x26\x3f\xc6'
22 | pattern_head = b'\x93\x01\x10\x00\x17\x1f\x00\x00\x23'
23 | assert pattern_head in content, f"Pattern {pattern_head} not found in the ELF {path_to_origin_elf}"
24 | assert content.count(pattern_head) == 1, f"Pattern {pattern_head} found more than once in the ELF."
25 | replacement = b'\x37\x05\x00\x60\x23\x20\x05\x00\x6f\x00\x00\x00'
26 | assert len(pattern) == len(replacement), f"Pattern and replacement have different lengths: {len(pattern)} vs {len(replacement)}"
27 |
28 | # Patch now
29 | index_of_pattern = content.index(pattern_head)
30 | content = content[:index_of_pattern] + replacement + content[index_of_pattern + len(pattern):]
31 |
32 | # Write the modified content back to the file
33 | with open(path_to_patched_elf, 'wb') as file:
34 | file.write(content)
35 | return True
36 |
--------------------------------------------------------------------------------
/fuzzer/modelsim/util.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Find and replace in the DifuzzRTL elfs to trigger end of simulation
6 | def find_pattern_in_object_file(file_path, pattern):
7 | pattern_length = len(pattern)
8 | byte_index_for_debug = 0
9 | with open(file_path, 'rb') as file:
10 | window = bytearray(pattern_length)
11 | while True:
12 | bytes_read = file.readinto(window)
13 | if bytes_read < pattern_length:
14 | # Reached end of file
15 | break
16 | if window == pattern:
17 | # Found the pattern
18 | print('byte_index_for_debug', hex(byte_index_for_debug))
19 | return True
20 | # Slide the window by one byte
21 | window[:-1] = window[1:]
22 | byte_index_for_debug += 1
23 |
24 | # Pattern not found
25 | return False
26 |
--------------------------------------------------------------------------------
/fuzzer/params/runparams.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | # tmpdir
4 | if "CASCADE_ENV_SOURCED" not in os.environ:
5 | raise Exception("The Cascade environment must be sourced prior to running the Python recipes.")
6 |
7 | PATH_TO_TMP = os.path.join(os.environ['CASCADE_DATADIR'])
8 | os.makedirs(PATH_TO_TMP, exist_ok=True)
9 |
10 | PATH_TO_FIGURES = os.environ['CASCADE_PATH_TO_FIGURES']
11 | os.makedirs(PATH_TO_FIGURES, exist_ok=True)
12 |
13 | DO_ASSERT = True
14 | DO_EXPENSIVE_ASSERT = False # More expensive assertions
15 |
16 | NO_REMOVE_TMPFILES = False # Used for debugging purposes.
17 |
18 | RUN_TIMEOUT_SECONDS = 60*60*2 # A program is not supposed to run longer than this in RTL simulation.
19 |
--------------------------------------------------------------------------------
/fuzzer/rfuzz/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cascade-artifacts-designs/cascade-meta/6c57bd41c204f6abc8a2747ede34c9145abee9b5/fuzzer/rfuzz/__init__.py
--------------------------------------------------------------------------------
/fuzzer/rfuzz/collectactiverfuzz.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # This module runs RFUZZ and collects multiplexer toggle coverage.
6 | # We call this "Active RFUZZ" as opposed to running Cascade and collecting the multiplexer select coverage.
7 |
8 | from common.threads import capture_process_output
9 | from common.designcfgs import get_design_cascade_path
10 | from params.runparams import PATH_TO_TMP
11 |
12 | import json
13 | import os
14 |
15 | def collect_active_coverage_rfuzz(design_name: str, timeout_seconds: int):
16 | # Run the active RFUZZ on the required design
17 | cmd = ['make', '-C', f"{get_design_cascade_path(design_name)}", 'rerun_drfuzz_notrace']
18 |
19 | lines = capture_process_output(' '.join(cmd), timeout_seconds)
20 |
21 | # Get the start coverage
22 | start_coverage = None
23 | for line_id, line in enumerate(lines):
24 | if line.startswith('COVERAGE:'):
25 | start_coverage = int(lines[line_id+1], 10)
26 | break
27 | if start_coverage is None:
28 | print('lines')
29 | print(lines)
30 | raise Exception(f"Could not find the start coverage for design `{design_name}`.")
31 |
32 | # Get the start timestamp
33 | start_timestamp_milliseconds = None
34 | for line in lines:
35 | if line.startswith('Timestamp start:'):
36 | start_timestamp_milliseconds = int(line.split(' ')[-1], 10)
37 | break
38 | if start_timestamp_milliseconds is None:
39 | raise Exception(f"Could not find the start timestamp for design `{design_name}`.")
40 |
41 | coverage_amounts = [start_coverage]
42 | coverage_durations = [0]
43 |
44 | # Get the coverage amounts and durations
45 | for line_id, line in enumerate(lines):
46 | if line.startswith('Timestamp toggle:'):
47 | coverage_durations.append((int(line.split(' ')[-1], 10) - start_timestamp_milliseconds) / 1000)
48 | coverage_amounts.append(int(lines[line_id+1].split(' ')[-1], 10))
49 |
50 | json_filepath = os.path.join(PATH_TO_TMP, f"rfuzz_active_coverages_{design_name}.json")
51 | with open(json_filepath, 'w') as f:
52 | json.dump({'coverage_sequence': coverage_amounts, 'durations': coverage_durations}, f)
53 | return json_filepath
54 |
55 |
--------------------------------------------------------------------------------
/fuzzer/rv/rv32d.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | RV32D_OPCODE_FLD = 0b0000111
8 | RV32D_OPCODE_FSD = 0b0100111
9 | RV32D_OPCODE_FMADDD = 0b1000011
10 | RV32D_OPCODE_FMSUBD = 0b1000111
11 | RV32D_OPCODE_FNMSUBD = 0b1001011
12 | RV32D_OPCODE_FNMADDD = 0b1001111
13 | RV32D_OPCODE_FALU = 0b1010011
14 |
15 | # All functions return uint32_t
16 |
17 | def rv32d_fld(rd: int, rs1: int, imm: int):
18 | return rvprotoinstrs.instruc_itype(RV32D_OPCODE_FLD, rd, 0b011, rs1, imm)
19 | def rv32d_fsd(rs1: int, rs2: int, imm: int):
20 | return rvprotoinstrs.instruc_stype(RV32D_OPCODE_FSD, 0b011, rs1, rs2, imm)
21 | def rv32d_fmaddd(rd: int, rs1: int, rs2: int, rs3: int, rm: int):
22 | return rvprotoinstrs.instruc_r4type(RV32D_OPCODE_FMADDD, rd, rm, rs1, rs2, rs3, 0b01)
23 | def rv32d_fmsubd(rd: int, rs1: int, rs2: int, rs3: int, rm: int):
24 | return rvprotoinstrs.instruc_r4type(RV32D_OPCODE_FMSUBD, rd, rm, rs1, rs2, rs3, 0b01)
25 | def rv32d_fnmsubd(rd: int, rs1: int, rs2: int, rs3: int, rm: int):
26 | return rvprotoinstrs.instruc_r4type(RV32D_OPCODE_FNMSUBD, rd, rm, rs1, rs2, rs3, 0b01)
27 | def rv32d_fnmaddd(rd: int, rs1: int, rs2: int, rs3: int, rm: int):
28 | return rvprotoinstrs.instruc_r4type(RV32D_OPCODE_FNMADDD, rd, rm, rs1, rs2, rs3, 0b01)
29 |
30 | def rv32d_faddd(rd: int, rs1: int, rs2: int, rm: int):
31 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, rs2, 0b0000001)
32 | def rv32d_fsubd(rd: int, rs1: int, rs2: int, rm: int):
33 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, rs2, 0b0000101)
34 | def rv32d_fmuld(rd: int, rs1: int, rs2: int, rm: int):
35 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, rs2, 0b0001001)
36 | def rv32d_fdivd(rd: int, rs1: int, rs2: int, rm: int):
37 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, rs2, 0b0001101)
38 | def rv32d_fsqrtd(rd: int, rs1: int, rm: int) :
39 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, 0, 0b0101101)
40 | def rv32d_fsgnjd(rd: int, rs1: int, rs2: int):
41 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b000, rs1, rs2, 0b0010001)
42 | def rv32d_fsgnjnd(rd: int, rs1: int, rs2: int):
43 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b001, rs1, rs2, 0b0010001)
44 | def rv32d_fsgnjxd(rd: int, rs1: int, rs2: int):
45 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b010, rs1, rs2, 0b0010001)
46 | def rv32d_fmind(rd: int, rs1: int, rs2: int):
47 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b000, rs1, rs2, 0b0010101)
48 | def rv32d_fmaxd(rd: int, rs1: int, rs2: int):
49 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b001, rs1, rs2, 0b0010101)
50 | def rv32d_fcvtsd(rd: int, rs1: int, rm: int) :
51 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, 0b1, 0b0100000)
52 | def rv32d_fcvtds(rd: int, rs1: int, rm: int) :
53 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, 0b0, 0b0100001)
54 | def rv32d_feqd(rd: int, rs1: int, rs2: int):
55 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b010, rs1, rs2, 0b1010001)
56 | def rv32d_fltd(rd: int, rs1: int, rs2: int):
57 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b001, rs1, rs2, 0b1010001)
58 | def rv32d_fled(rd: int, rs1: int, rs2: int):
59 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b000, rs1, rs2, 0b1010001)
60 | def rv32d_fclassd(rd: int, rs1: int):
61 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, 0b001, rs1, 0b0, 0b1110001)
62 | def rv32d_fcvtwd(rd: int, rs1: int, rm: int):
63 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, 0b0, 0b1100001)
64 | def rv32d_fcvtwud(rd: int, rs1: int, rm: int) :
65 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, 0b1, 0b1100001)
66 | def rv32d_fcvtdw(rd: int, rs1: int, rm: int) :
67 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, 0b0, 0b1101001)
68 | def rv32d_fcvtdwu(rd: int, rs1: int, rm: int):
69 | return rvprotoinstrs.instruc_rtype(RV32D_OPCODE_FALU, rd, rm, rs1, 0b1, 0b1101001)
70 |
--------------------------------------------------------------------------------
/fuzzer/rv/rv32f.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | RV32F_OPCODE_FLW = 0b0000111
8 | RV32F_OPCODE_FSW = 0b0100111
9 | RV32F_OPCODE_FMADDS = 0b1000011
10 | RV32F_OPCODE_FMSUBS = 0b1000111
11 | RV32F_OPCODE_FNMSUBS = 0b1001011
12 | RV32F_OPCODE_FNMADDS = 0b1001111
13 | RV32F_OPCODE_FALU = 0b1010011
14 |
15 | # All functions return uint32_t
16 |
17 | def rv32f_flw(rd: int, rs1: int, imm: int):
18 | return rvprotoinstrs.instruc_itype(RV32F_OPCODE_FLW, rd, 0b010, rs1, imm)
19 | def rv32f_fsw(rs1: int, rs2: int, imm: int):
20 | return rvprotoinstrs.instruc_stype(RV32F_OPCODE_FSW, 0b010, rs1, rs2, imm)
21 | def rv32f_fmadds(rd: int, rs1: int, rs2: int, rs3: int, rm: int):
22 | return rvprotoinstrs.instruc_r4type(RV32F_OPCODE_FMADDS, rd, rm, rs1, rs2, rs3, 0)
23 | def rv32f_fmsubs(rd: int, rs1: int, rs2: int, rs3: int, rm: int):
24 | return rvprotoinstrs.instruc_r4type(RV32F_OPCODE_FMSUBS, rd, rm, rs1, rs2, rs3, 0)
25 | def rv32f_fnmsubs(rd: int, rs1: int, rs2: int, rs3: int, rm: int):
26 | return rvprotoinstrs.instruc_r4type(RV32F_OPCODE_FNMSUBS, rd, rm, rs1, rs2, rs3, 0)
27 | def rv32f_fnmadds(rd: int, rs1: int, rs2: int, rs3: int, rm: int):
28 | return rvprotoinstrs.instruc_r4type(RV32F_OPCODE_FNMADDS, rd, rm, rs1, rs2, rs3, 0)
29 |
30 | def rv32f_fadds(rd: int, rs1: int, rs2: int, rm: int):
31 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, rs2, 0b0000000)
32 | def rv32f_fsubs(rd: int, rs1: int, rs2: int, rm: int):
33 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, rs2, 0b0000100)
34 | def rv32f_fmuls(rd: int, rs1: int, rs2: int, rm: int):
35 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, rs2, 0b0001000)
36 | def rv32f_fdivs(rd: int, rs1: int, rs2: int, rm: int):
37 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, rs2, 0b0001100)
38 | def rv32f_fsqrts(rd: int, rs1: int, rm: int):
39 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, 0, 0b0101100)
40 | def rv32f_fsgnjs(rd: int, rs1: int, rs2: int):
41 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b000, rs1, rs2, 0b0010000)
42 | def rv32f_fsgnjns(rd: int, rs1: int, rs2: int):
43 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b001, rs1, rs2, 0b0010000)
44 | def rv32f_fsgnjxs(rd: int, rs1: int, rs2: int):
45 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b010, rs1, rs2, 0b0010000)
46 | def rv32f_fmins(rd: int, rs1: int, rs2: int):
47 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b000, rs1, rs2, 0b0010100)
48 | def rv32f_fmaxs(rd: int, rs1: int, rs2: int):
49 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b001, rs1, rs2, 0b0010100)
50 | def rv32f_fcvtws(rd: int, rs1: int, rm: int):
51 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, 0b0, 0b1100000)
52 | def rv32f_fcvtwus(rd: int, rs1: int, rm: int):
53 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, 0b1, 0b1100000)
54 | def rv32f_fmvxw(rd: int, rs1: int):
55 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b000, rs1, 0b0, 0b1110000)
56 | def rv32f_feqs(rd: int, rs1: int, rs2: int):
57 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b010, rs1, rs2, 0b1010000)
58 | def rv32f_flts(rd: int, rs1: int, rs2: int):
59 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b001, rs1, rs2, 0b1010000)
60 | def rv32f_fles(rd: int, rs1: int, rs2: int):
61 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b000, rs1, rs2, 0b1010000)
62 | def rv32f_fclasss(rd: int, rs1: int):
63 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b001, rs1, 0b0, 0b1110000)
64 | def rv32f_fcvtsw(rd: int, rs1: int, rm: int):
65 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, 0b0, 0b1101000)
66 | def rv32f_fcvtswu(rd: int, rs1: int, rm: int):
67 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, rm, rs1, 0b1, 0b1101000)
68 | def rv32f_fmvwx(rd: int, rs1: int):
69 | return rvprotoinstrs.instruc_rtype(RV32F_OPCODE_FALU, rd, 0b000, rs1, 0b0, 0b1111000)
70 |
--------------------------------------------------------------------------------
/fuzzer/rv/rv32m.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | RV32M_OPCODE_MUL = 0b0110011
8 |
9 | # All functions return uint32_t
10 |
11 | def rv32m_mul(rd: int, rs1: int, rs2: int):
12 | return rvprotoinstrs.instruc_rtype(RV32M_OPCODE_MUL, rd, 0b000, rs1, rs2, 0b1)
13 | def rv32m_mulh(rd: int, rs1: int, rs2: int):
14 | return rvprotoinstrs.instruc_rtype(RV32M_OPCODE_MUL, rd, 0b001, rs1, rs2, 0b1)
15 | def rv32m_mulhsu(rd: int, rs1: int, rs2: int):
16 | return rvprotoinstrs.instruc_rtype(RV32M_OPCODE_MUL, rd, 0b010, rs1, rs2, 0b1)
17 | def rv32m_mulhu(rd: int, rs1: int, rs2: int):
18 | return rvprotoinstrs.instruc_rtype(RV32M_OPCODE_MUL, rd, 0b011, rs1, rs2, 0b1)
19 | def rv32m_div(rd: int, rs1: int, rs2: int):
20 | return rvprotoinstrs.instruc_rtype(RV32M_OPCODE_MUL, rd, 0b100, rs1, rs2, 0b1)
21 | def rv32m_divu(rd: int, rs1: int, rs2: int):
22 | return rvprotoinstrs.instruc_rtype(RV32M_OPCODE_MUL, rd, 0b101, rs1, rs2, 0b1)
23 | def rv32m_rem(rd: int, rs1: int, rs2: int):
24 | return rvprotoinstrs.instruc_rtype(RV32M_OPCODE_MUL, rd, 0b110, rs1, rs2, 0b1)
25 | def rv32m_remu(rd: int, rs1: int, rs2: int):
26 | return rvprotoinstrs.instruc_rtype(RV32M_OPCODE_MUL, rd, 0b111, rs1, rs2, 0b1)
27 |
--------------------------------------------------------------------------------
/fuzzer/rv/rv64d.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | RV64D_OPCODE_FCVT = 0b1010011
8 |
9 | # All functions return uint32_t
10 |
11 | def rv64d_fcvtld(rd: int, rs1: int, rm: int):
12 | return rvprotoinstrs.instruc_rtype(RV64D_OPCODE_FCVT, rd, rm, rs1, 0b00010, 0b1100001)
13 | def rv64d_fcvtlud(rd: int, rs1: int, rm: int):
14 | return rvprotoinstrs.instruc_rtype(RV64D_OPCODE_FCVT, rd, rm, rs1, 0b00011, 0b1100001)
15 | def rv64d_fmvxd(rd: int, rs1: int):
16 | return rvprotoinstrs.instruc_rtype(RV64D_OPCODE_FCVT, rd, 0b000, rs1, 0b00000, 0b1110001)
17 | def rv64d_fcvtdl(rd: int, rs1: int, rm: int):
18 | return rvprotoinstrs.instruc_rtype(RV64D_OPCODE_FCVT, rd, rm, rs1, 0b00010, 0b1101001)
19 | def rv64d_fcvtdlu(rd: int, rs1: int, rm: int):
20 | return rvprotoinstrs.instruc_rtype(RV64D_OPCODE_FCVT, rd, rm, rs1, 0b00011, 0b1101001)
21 | def rv64d_fmvdx(rd: int, rs1: int):
22 | return rvprotoinstrs.instruc_rtype(RV64D_OPCODE_FCVT, rd, 0b000, rs1, 0b00000, 0b1111001)
23 |
--------------------------------------------------------------------------------
/fuzzer/rv/rv64f.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | RV64F_OPCODE_FCVT = 0b1010011
8 |
9 | # All functions return uint32_t
10 |
11 | def rv64f_fcvtls(rd: int, rs1: int, rm: int):
12 | return rvprotoinstrs.instruc_rtype(RV64F_OPCODE_FCVT, rd, rm, rs1, 0b00010, 0b1100000)
13 | def rv64f_fcvtlus(rd: int, rs1: int, rm: int):
14 | return rvprotoinstrs.instruc_rtype(RV64F_OPCODE_FCVT, rd, rm, rs1, 0b00011, 0b1100000)
15 | def rv64f_fcvtsl(rd: int, rs1: int, rm: int):
16 | return rvprotoinstrs.instruc_rtype(RV64F_OPCODE_FCVT, rd, rm, rs1, 0b00010, 0b1101000)
17 | def rv64f_fcvtslu(rd: int, rs1: int, rm: int):
18 | return rvprotoinstrs.instruc_rtype(RV64F_OPCODE_FCVT, rd, rm, rs1, 0b00011, 0b1101000)
19 |
--------------------------------------------------------------------------------
/fuzzer/rv/rv64i.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | RV64I_OPCODE_LWU = 0b0000011
8 | RV64I_OPCODE_LD = 0b0000011
9 | RV64I_OPCODE_SD = 0b0100011
10 | RV64I_OPCODE_ALU_IMM = 0b0011011 # For all rv64i arithmetics with immediate
11 | RV64I_OPCODE_ALU_REG = 0b0111011 # For all rv64i arithmetics without immediate
12 |
13 | # All functions return uint32_t
14 |
15 | def rv64i_lwu(rd: int, rs1: int, imm: int):
16 | return rvprotoinstrs.instruc_itype(RV64I_OPCODE_LWU, rd, 0b110, rs1, imm)
17 | def rv64i_ld(rd: int, rs1: int, imm: int):
18 | return rvprotoinstrs.instruc_itype(RV64I_OPCODE_LD, rd, 0b011, rs1, imm)
19 | def rv64i_sd(rs1: int, rs2: int, imm: int):
20 | return rvprotoinstrs.instruc_stype(RV64I_OPCODE_SD, 0b011, rs1, rs2, imm)
21 | def rv64i_addiw(rd: int, rs1: int, imm: int):
22 | return rvprotoinstrs.instruc_itype(RV64I_OPCODE_ALU_IMM, rd, 0b000, rs1, imm)
23 | def rv64i_slliw(rd: int, rs1: int, shamt: int):
24 | imm = shamt
25 | return rvprotoinstrs.instruc_itype(RV64I_OPCODE_ALU_IMM, rd, 0b001, rs1, imm)
26 | def rv64i_srliw(rd: int, rs1: int, shamt: int):
27 | imm = shamt
28 | return rvprotoinstrs.instruc_itype(RV64I_OPCODE_ALU_IMM, rd, 0b101, rs1, imm)
29 | def rv64i_sraiw(rd: int, rs1: int, shamt: int):
30 | imm = 0b010000000000 | shamt
31 | return rvprotoinstrs.instruc_itype(RV64I_OPCODE_ALU_IMM, rd, 0b101, rs1, imm)
32 | def rv64i_addw(rd: int, rs1: int, rs2: int):
33 | return rvprotoinstrs.instruc_rtype(RV64I_OPCODE_ALU_REG, rd, 0b000, rs1, rs2, 0)
34 | def rv64i_subw(rd: int, rs1: int, rs2: int):
35 | return rvprotoinstrs.instruc_rtype(RV64I_OPCODE_ALU_REG, rd, 0b000, rs1, rs2, 0b0100000)
36 | def rv64i_sllw(rd: int, rs1: int, rs2: int):
37 | return rvprotoinstrs.instruc_rtype(RV64I_OPCODE_ALU_REG, rd, 0b001, rs1, rs2, 0)
38 | def rv64i_srlw(rd: int, rs1: int, rs2: int):
39 | return rvprotoinstrs.instruc_rtype(RV64I_OPCODE_ALU_REG, rd, 0b101, rs1, rs2, 0)
40 | def rv64i_sraw(rd: int, rs1: int, rs2: int):
41 | return rvprotoinstrs.instruc_rtype(RV64I_OPCODE_ALU_REG, rd, 0b101, rs1, rs2, 0b0100000)
42 |
--------------------------------------------------------------------------------
/fuzzer/rv/rv64m.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | RV64M_OPCODE_MUL = 0b0111011
8 |
9 | # All functions return uint32_t
10 |
11 | def rv64m_mulw(rd: int, rs1: int, rs2: int):
12 | return rvprotoinstrs.instruc_rtype(RV64M_OPCODE_MUL, rd, 0b000, rs1, rs2, 0b1)
13 | def rv64m_divw(rd: int, rs1: int, rs2: int):
14 | return rvprotoinstrs.instruc_rtype(RV64M_OPCODE_MUL, rd, 0b100, rs1, rs2, 0b1)
15 | def rv64m_divuw(rd: int, rs1: int, rs2: int):
16 | return rvprotoinstrs.instruc_rtype(RV64M_OPCODE_MUL, rd, 0b101, rs1, rs2, 0b1)
17 | def rv64m_remw(rd: int, rs1: int, rs2: int):
18 | return rvprotoinstrs.instruc_rtype(RV64M_OPCODE_MUL, rd, 0b110, rs1, rs2, 0b1)
19 | def rv64m_remuw(rd: int, rs1: int, rs2: int):
20 | return rvprotoinstrs.instruc_rtype(RV64M_OPCODE_MUL, rd, 0b111, rs1, rs2, 0b1)
21 |
--------------------------------------------------------------------------------
/fuzzer/rv/rvprivileged.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | RV32I_OPCODE_PRIVILEGED = 0b1110011
8 |
9 | # All functions return uint32_t
10 |
11 | def rvprivileged_sret():
12 | return rvprotoinstrs.instruc_rtype(RV32I_OPCODE_PRIVILEGED, 0, 0, 0, 0b00010, 0b0001000)
13 | def rvprivileged_mret():
14 | return rvprotoinstrs.instruc_rtype(RV32I_OPCODE_PRIVILEGED, 0, 0, 0, 0b00010, 0b0011000)
15 |
16 | def rvprivileged_wfi():
17 | return rvprotoinstrs.instruc_rtype(RV32I_OPCODE_PRIVILEGED, 0, 0, 0, 0b00101, 0b0001000)
18 |
19 |
20 | def rvprivileged_sfence_vma(rs1: int, rs2: int):
21 | return rvprotoinstrs.instruc_rtype(RV32I_OPCODE_PRIVILEGED, 0, 0, rs1, rs2, 0b0001001)
22 | def rvprivileged_sinval_vma(rs1: int, rs2: int):
23 | return rvprotoinstrs.instruc_rtype(RV32I_OPCODE_PRIVILEGED, 0, 0, rs1, rs2, 0b0001011)
24 | def rvprivileged_sfence_w_inval():
25 | return rvprotoinstrs.instruc_rtype(RV32I_OPCODE_PRIVILEGED, 0, 0, 0, 0b00000, 0b0001100)
26 | def rvprivileged_sfence_inval_ir():
27 | return rvprotoinstrs.instruc_rtype(RV32I_OPCODE_PRIVILEGED, 0, 0, 0, 0b00001, 0b0001100)
28 |
--------------------------------------------------------------------------------
/fuzzer/rv/zicsr.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | ZICSR_OPCODE_CSR = 0b1110011
8 |
9 | # All functions return uint32_t
10 |
11 | def zicsr_csrrw(rd: int, rs1: int, csr: int):
12 | return rvprotoinstrs.instruc_itype(ZICSR_OPCODE_CSR, rd, 0b001, rs1, csr)
13 | def zicsr_csrrs(rd: int, rs1: int, csr: int):
14 | return rvprotoinstrs.instruc_itype(ZICSR_OPCODE_CSR, rd, 0b010, rs1, csr)
15 | def zicsr_csrrc(rd: int, rs1: int, csr: int):
16 | return rvprotoinstrs.instruc_itype(ZICSR_OPCODE_CSR, rd, 0b011, rs1, csr)
17 | def zicsr_csrrwi(rd: int, uimm: int, csr: int):
18 | return rvprotoinstrs.instruc_itype(ZICSR_OPCODE_CSR, rd, 0b101, uimm, csr)
19 | def zicsr_csrrsi(rd: int, uimm: int, csr: int):
20 | return rvprotoinstrs.instruc_itype(ZICSR_OPCODE_CSR, rd, 0b110, uimm, csr)
21 | def zicsr_csrrci(rd: int, uimm: int, csr: int):
22 | return rvprotoinstrs.instruc_itype(ZICSR_OPCODE_CSR, rd, 0b111, uimm, csr)
23 |
--------------------------------------------------------------------------------
/fuzzer/rv/zifencei.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | import rv.rvprotoinstrs as rvprotoinstrs
6 |
7 | ZIFENCEI_OPCODE_FENCEI = 0b0001111
8 |
9 | # @return uint32_t
10 | def zifencei_fencei(rd: int = 0, rs1: int = 0):
11 | return rvprotoinstrs.instruc_itype(ZIFENCEI_OPCODE_FENCEI, rd, 0b001, rs1, 0)
12 |
--------------------------------------------------------------------------------
/fuzzer/top/fuzzdesign.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Toplevel for a cycle of program generation and RTL simulation.
6 |
7 | from common.spike import calibrate_spikespeed
8 | from common.profiledesign import profile_get_medeleg_mask
9 | from cascade.fuzzfromdescriptor import gen_new_test_instance, fuzz_single_from_descriptor
10 |
11 | import time
12 | import threading
13 |
14 | callback_lock = threading.Lock()
15 | newly_finished_tests = 0
16 | curr_round_id = 0
17 | all_times_to_detection = []
18 |
19 | def test_done_callback(arg):
20 | global newly_finished_tests
21 | global callback_lock
22 | global curr_round_id
23 | global all_times_to_detection
24 | with callback_lock:
25 | newly_finished_tests += 1
26 |
27 | def fuzzdesign(design_name: str, num_cores: int, seed_offset: int, can_authorize_privileges: bool):
28 | global newly_finished_tests
29 | global callback_lock
30 | global all_times_to_detection
31 | global curr_round_id
32 |
33 | newly_finished_tests = 0
34 | curr_round_id = 0
35 | all_times_to_detection = []
36 |
37 | import multiprocessing as mp
38 |
39 | num_workers = num_cores
40 | assert num_workers > 0
41 |
42 | calibrate_spikespeed()
43 | profile_get_medeleg_mask(design_name)
44 | print(f"Starting parallel testing of `{design_name}` on {num_workers} processes.")
45 |
46 | newly_finished_tests = 0
47 | pool = mp.Pool(processes=num_workers)
48 | process_instance_id = seed_offset
49 | # First, apply the function to all the workers.
50 | for _ in range(num_workers):
51 | memsize, _, _, num_bbs, authorize_privileges = gen_new_test_instance(design_name, process_instance_id, can_authorize_privileges)
52 | pool.apply_async(fuzz_single_from_descriptor, args=(memsize, design_name, process_instance_id, num_bbs, authorize_privileges, None, True), callback=test_done_callback)
53 | process_instance_id += 1
54 |
55 | while True:
56 | time.sleep(2)
57 | # Check whether we received new coverage paths
58 | with callback_lock:
59 | if newly_finished_tests > 0:
60 | for _ in range(newly_finished_tests):
61 | memsize, _, _, num_bbs, authorize_privileges = gen_new_test_instance(design_name, process_instance_id, can_authorize_privileges)
62 | pool.apply_async(fuzz_single_from_descriptor, args=(memsize, design_name, process_instance_id, num_bbs, authorize_privileges, None, True), callback=test_done_callback)
63 | process_instance_id += 1
64 | newly_finished_tests = 0
65 |
66 | # This code should never be reached.
67 | # Kill all remaining processes
68 | pool.close()
69 | pool.terminate()
70 |
--------------------------------------------------------------------------------
/fuzzer/top/fuzzforperfubenchfewerinstructions.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | # Toplevel for a cycle of program generation and RTL simulation.
6 |
7 | from common.spike import calibrate_spikespeed
8 | from common.profiledesign import profile_get_medeleg_mask
9 | from cascade.fuzzfromdescriptor import gen_new_test_instance, fuzz_single_from_descriptor
10 | from cascade.fuzzsim import SimulatorEnum, runtest_simulator
11 | from cascade.fuzzfromdescriptor import gen_fuzzerstate_elf_expectedvals
12 |
13 | from time import time
14 |
15 | def fuzz_for_perf_ubench_fewerinstructions(design_name: str, seed_offset: int, can_authorize_privileges: bool, time_limit_seconds: int, max_num_instructions: int):
16 | instance_gen_durations = []
17 | run_durations = []
18 | effective_num_instrs = []
19 |
20 | memsize = 1 << 20 # Dont want to be limited by the mem size
21 | # Does not matter as long as big enough since the cap will be the num of instructions
22 | nmax_bbs = 10000
23 |
24 | cumulated_time = 0
25 | while cumulated_time < time_limit_seconds:
26 | seed_offset += 1
27 |
28 | # try:
29 | # Gen the test case
30 | instance_gen_start_time = time()
31 | memsize, _, _, num_bbs, authorize_privileges = gen_new_test_instance(design_name, seed_offset, can_authorize_privileges, memsize, nmax_bbs)
32 | fuzzerstate, rtl_elfpath, finalregvals_spikeresol, time_seconds_spent_in_gen_bbs, time_seconds_spent_in_spike_resol, time_seconds_spent_in_gen_elf = gen_fuzzerstate_elf_expectedvals(memsize, design_name, seed_offset, nmax_bbs, authorize_privileges, False, max_num_instructions)
33 | gen_duration = time() - instance_gen_start_time
34 |
35 | # Run the test case
36 | run_start_time = time()
37 | is_success, rtl_msg = runtest_simulator(fuzzerstate, rtl_elfpath, finalregvals_spikeresol, simulator=SimulatorEnum.VERILATOR)
38 | run_duration = time() - run_start_time
39 | # except:
40 | # print(f"Got an exception, typically a Spike timeout. May happen in rare OS scheduling cases. Rerunning this specific test. This is witout consequences.")
41 | # continue
42 |
43 | instance_gen_durations.append(gen_duration)
44 | run_durations.append(run_duration)
45 | cumulated_time += gen_duration + run_duration
46 | effective_num_instrs.append(fuzzerstate.get_num_fuzzing_instructions_sofar()-1) #-1 because it counts the last jump
47 |
48 | return instance_gen_durations, run_durations, effective_num_instrs
49 |
--------------------------------------------------------------------------------
/tools/.gitignore:
--------------------------------------------------------------------------------
1 | log
2 | /makeelf/build
3 |
--------------------------------------------------------------------------------
/tools/Makefile:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Flavien Solt, ETH Zurich.
2 | # Licensed under the General Public License, Version 3.0, see LICENSE for details.
3 | # SPDX-License-Identifier: GPL-3.0-only
4 |
5 | PYTHON=python3.9
6 |
7 | RUSTEXEC=$(CARGO_HOME)/bin/rustc
8 | RUSTUPEXEC=$(CARGO_HOME)/bin/rustup
9 | CARGOEXEC=$(CARGO_HOME)/bin/cargo
10 |
11 | LOGFILES=log
12 |
13 | installtools: install_verilator install_morty install_rust install_bender install_fusesoc install_sv2v install_cascade_python install_yosys install_miniconda install_toolchain install_spike install_makeelf
14 |
15 | install_verilator: | log
16 | ( unset VERILATOR_ROOT && cd verilator && autoconf && ./configure --prefix=$(PREFIX_CASCADE) && make -j$(CASCADE_JOBS) && make install ) >$(LOGFILES)/verilator.log 2>&1
17 |
18 | install_sv2v: $(PREFIX_CASCADE)/bin/stack | log
19 | ( cd sv2v && make -j$(CASCADE_JOBS) && mkdir -p $(PREFIX_CASCADE)/bin/ && cp bin/sv2v $(PREFIX_CASCADE)/bin/ ) >$(LOGFILES)/sv2v.log 2>&1
20 |
21 | $(PREFIX_CASCADE)/bin/stack: | log
22 | sh stack/gethaskellstack.sh -d $(PREFIX_CASCADE)/bin
23 |
24 | install_yosys: | log
25 | ( cd cascade-yosys && make config-gcc && make PREFIX=$(PREFIX_CASCADE) -j$(CASCADE_JOBS) && make PREFIX=$(PREFIX_CASCADE) install ) >$(LOGFILES)/yosys.log 2>&1
26 |
27 | install_rust: $(RUSTEXEC)
28 |
29 | install_cascade_python: $(CASCADE_PYTHON_VENV) | log
30 | ( \
31 | set -e; \
32 | $(CASCADE_PYTHON_VENV)/bin/pip install -r ../design-processing/python-requirements.txt; \
33 | $(CASCADE_PYTHON_VENV)/bin/pip install ninja==1.10.2 \
34 | ) >$(LOGFILES)/cascade-python.log 2>&1
35 |
36 | $(RUSTEXEC): | log
37 | ( CARGO_HOME=\$CARGO_HOME RUSTUP_HOME=\$RUSTUP_HOME curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y ) >$(LOGFILES)/rust.log 2>&1
38 |
39 | install_morty: $(RUSTEXEC) | log
40 | ifdef DO_INSTALL_OLD_VERSIONS
41 | $(CARGOEXEC) install --force --path morty --root $(PREFIX_CASCADE) >$(LOGFILES)/morty.log 2>&1
42 | else
43 | $(CARGOEXEC) install --force morty --root $(PREFIX_CASCADE)
44 | endif
45 |
46 | install_bender: $(RUSTEXEC) | log
47 | ifdef DO_INSTALL_OLD_VERSIONS
48 | $(CARGOEXEC) install --force --path bender --root $(PREFIX_CASCADE) >$(LOGFILES)/bender.log 2>&1
49 | else
50 | $(CARGOEXEC) install --force bender --root $(PREFIX_CASCADE)
51 | endif
52 |
53 | install_fusesoc: $(CASCADE_PYTHON_VENV) | log
54 | $(CASCADE_PYTHON_VENV)/bin/pip install fusesoc >$(LOGFILES)/fusesoc.log 2>&1
55 |
56 | install_miniconda: $(CASCADE_PYTHON_VENV) | log
57 | mkdir -p miniconda
58 | ( wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda/miniconda.sh \
59 | && cd miniconda/ && bash miniconda.sh -u -b -p $(PREFIX_CASCADE)/miniconda \
60 | && $(PREFIX_CASCADE)/miniconda/bin/conda update -y -n base -c defaults conda \
61 | && $(PREFIX_CASCADE)/miniconda/bin/conda config --add channels conda-forge \
62 | && $(PREFIX_CASCADE)/miniconda/bin/conda config --set channel_priority strict \
63 | ) >$(LOGFILES)/miniconda.log 2>&1
64 |
65 | install_toolchain: $(PREFIX_CASCADE)/riscv | log
66 | # make also does install
67 | ( cd riscv-gnu-toolchain/ && ./configure --prefix=$(PREFIX_CASCADE)/riscv --with-arch=rv32imc --with-cmodel=medlow --enable-multilib && make -j $(CASCADE_JOBS) ) >$(LOGFILES)/toolchain.log 2>&1
68 |
69 | install_spike: $(PREFIX_CASCADE)/riscv | log
70 | (cd riscv-isa-sim && mkdir -p build && cd build && ../configure --prefix=$(PREFIX_CASCADE)/riscv && make -j $(CASCADE_JOBS) && make install) >$(LOGFILES)/spike.log 2>&1
71 |
72 | install_makeelf: | log
73 | (cd makeelf && git checkout finercontrol && $(PYTHON) setup.py install) >$(LOGFILES)/makeelf.log 2>&1
74 |
75 | $(CASCADE_PYTHON_VENV): | log
76 | $(PYTHON) -mvenv $(CASCADE_PYTHON_VENV)
77 |
78 | cleantools:
79 | @echo "This is a fairly destructive recipe. Really know what's going to happen?"
80 | @echo "If so, press enter. Otherwise, press ^C."
81 | @echo -n " > "
82 | @read line
83 | set -x; for x in riscv-isa-sim bender morty riscv-gnu-toolchain sv2v verilator; do ( cd $$x && git clean -xfd ); done
84 | rm -rf $(PREFIX_CASCADE)
85 | rm -f $(LOGFILES)/*.log
86 |
87 | $(PREFIX_CASCADE)/riscv log:
88 | mkdir -p $@
89 |
--------------------------------------------------------------------------------
/tools/miniconda/miniconda.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cascade-artifacts-designs/cascade-meta/6c57bd41c204f6abc8a2747ede34c9145abee9b5/tools/miniconda/miniconda.sh
--------------------------------------------------------------------------------