├── .envrc ├── .github └── workflows │ ├── gen-workflows.yml │ ├── gen-workflows │ ├── default.nix │ ├── gen-nix-build-cmds.nix │ └── test-opts-to-strlist.nix │ ├── gh-pages.yml │ └── self-host.nix ├── .gitignore ├── .vimrc ├── LICENSE ├── Makefile ├── README.md ├── TODO ├── backup_checkpoints.py ├── book.toml ├── builders ├── cptBuilder │ ├── 1.profiling.nix │ ├── 2.cluster.nix │ ├── 3.checkpoint.nix │ ├── default.nix │ ├── nemu │ │ └── default.nix │ ├── qemu │ │ ├── allow_get_icount_anytime.patch │ │ └── default.nix │ └── simpoint │ │ └── default.nix ├── default.nix ├── imgBuilder │ ├── default.nix │ ├── gcpt │ │ ├── dual_core.nix │ │ └── single_core.nix │ ├── linux │ │ ├── default.nix │ │ ├── initramfs │ │ │ ├── base │ │ │ │ ├── default.nix │ │ │ │ └── gen_init_cpio │ │ │ │ │ └── default.nix │ │ │ ├── cpio_reset_timestamp.patch │ │ │ ├── default.nix │ │ │ └── overlays │ │ │ │ ├── before_workload │ │ │ │ └── default.nix │ │ │ │ ├── default.nix │ │ │ │ ├── nemu_trap │ │ │ │ └── default.nix │ │ │ │ └── qemu_trap │ │ │ │ └── default.nix │ │ └── patches │ │ │ ├── enable-clint.nix │ │ │ ├── panic_shutdown.nix │ │ │ ├── panic_shutdown.patch │ │ │ └── relaxing_random_entropy.nix │ └── opensbi │ │ ├── default.nix │ │ └── dts │ │ └── default.nix ├── sim.nix └── test-opts.nix ├── default.nix ├── docs ├── designs │ ├── 1.overview.md │ ├── 2.configuration_system.md │ ├── 3.benchmarks │ │ ├── index.md │ │ ├── openblas.md │ │ └── spec2006.md │ └── 4.builders │ │ ├── 1.imgBuilder.md │ │ ├── 2.cptBuilder.md │ │ ├── images │ │ ├── common.py │ │ ├── deps_dot.py │ │ └── overview_dot.py │ │ └── index.md ├── extract_comments.py ├── generate_summary.py ├── index.md ├── references │ ├── 1.configurable.md │ ├── 2.benchmarks_scope.md │ ├── 3.builders_scope.md │ └── 4.status │ │ ├── gen_table.js │ │ └── index.md └── usages │ ├── 1.building_outputs.md │ └── 4.running │ ├── emulators.md │ ├── gem5.md │ ├── index.md │ └── nemu.md ├── dump_result.py ├── examples ├── hello-nolibc │ ├── default.nix │ ├── hello-nolibc.nix │ └── test-opts.nix ├── nanosleep │ ├── default.nix │ └── nanosleep.c ├── nyancat │ └── default.nix ├── openblas │ ├── config.nix │ ├── default.nix │ ├── package.nix │ └── test-opts.nix └── spec2006 │ ├── 483.xalancbmk.patch │ ├── build-all.nix │ ├── build-one.nix │ ├── config.nix │ ├── default.nix │ ├── packages.nix │ └── test-opts.nix ├── shell.nix ├── test-opts.nix └── utils.nix /.envrc: -------------------------------------------------------------------------------- 1 | use nix 2 | -------------------------------------------------------------------------------- /.github/workflows/gen-workflows.yml: -------------------------------------------------------------------------------- 1 | name: gen-workflows 2 | on: 3 | push: 4 | branches: 5 | - main 6 | jobs: 7 | gen-workflows: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | contents: write 11 | steps: 12 | - uses: actions/checkout@v4 13 | with: 14 | fetch-depth: 0 15 | # https://github.com/orgs/community/discussions/35410?sort=top 16 | # TLDR: 17 | # 1. Generate a Fine-grained token in https://github.com/settings/tokens 18 | # 1.1 Choose "All repositories" or "Only select repositories" 19 | # 1.2 Check contents:write, workflows:write 20 | # 2. Add the generated token to repository's action 21 | # 2.1 Repo > Settings > Secrets and variables > Actions 22 | # 2.2 In repository secrets, add a secret named WORKFLOW_TOKEN 23 | # The added secret will be used here 24 | token: ${{ secrets.WORKFLOW_TOKEN }} 25 | - uses: cachix/install-nix-action@v27 26 | with: 27 | nix_path: nixpkgs=channel:nixos-24.11 28 | - run: nix-build .github/workflows/gen-workflows 29 | - name: make change to workflows file 30 | run: | 31 | cp result .github/workflows/generated-build-deterload.yml 32 | git add . 33 | git config --global user.name xieby1-gen-workflows 34 | git config --global user.email "xieby1@gen.workflows" 35 | git commit -m "gen workflows for $(git rev-parse HEAD)" 36 | git merge origin/actions -s ours -m "merge $(git rev-parse HEAD)" 37 | git push origin HEAD:actions 38 | -------------------------------------------------------------------------------- /.github/workflows/gen-workflows/default.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import {}, lib ? pkgs.lib }: let 2 | workflow = { 3 | name = "build-deterload"; 4 | on.push = { 5 | branches = ["actions"]; 6 | paths = ["**" "!docs/**"]; 7 | }; 8 | # TODO: quick-test 9 | jobs = ( 10 | let 11 | zipWithIndex = f: list: let 12 | indices = builtins.genList lib.id (builtins.length list); 13 | in lib.zipListsWith f list indices; 14 | 15 | # cmds = [ "cmd0" "cmd1" ... ] 16 | cmds = import ./gen-nix-build-cmds.nix {}; 17 | 18 | # jobsList = [ {name="jobi"; value="cmdi"} ... ] 19 | jobsList = zipWithIndex (cmd: index: { 20 | name = "job${toString index}"; 21 | value = { 22 | # one week (spec2006 with enableVector needs about 4 days) 23 | timeout-minutes = 10080; 24 | runs-on = ["self-hosted" "Linux" "X64" "nix" "spec2006"]; 25 | steps = [ 26 | {uses = "actions/checkout@v4";} 27 | {run = cmd;} 28 | ]; 29 | }; 30 | }) cmds; 31 | in builtins.listToAttrs jobsList 32 | ) // { 33 | quick-test = { 34 | runs-on = ["self-hosted" "Linux" "X64" "nix" "spec2006"]; 35 | steps = [ 36 | { uses = "actions/checkout@v4"; } 37 | { run = '' 38 | for example in examples/*/default.nix; do 39 | nix-instantiate $example --arg src $(ls -d /spec2006* | head -n1) -A cpt 40 | done 41 | ''; } 42 | ]; 43 | }; 44 | }; 45 | }; 46 | in (pkgs.formats.yaml {}).generate "build-deterload.yaml" workflow 47 | -------------------------------------------------------------------------------- /.github/workflows/gen-workflows/gen-nix-build-cmds.nix: -------------------------------------------------------------------------------- 1 | { lib ? import }: let 2 | /* 3 | examplesDirs = [ 4 | "/examples/nyancat" 5 | "/examples/openblas" 6 | "/examples/spec2006" 7 | ... 8 | ] 9 | */ 10 | examplesDirs = let 11 | # items = { "README.md" = "regular"; nyancat = "directory"; result = "symlink"; ... } 12 | items = builtins.readDir ../../../examples; 13 | # dirs = { nyancat = "directory"; ... } 14 | dirs = lib.filterAttrs (n: v: v=="directory") items; 15 | # paths = [ "/nyancat" ... ] 16 | paths = lib.mapAttrsToList (n: v: ../../../examples + "/${n}") dirs; 17 | paths_contain_defaultnix = builtins.filter 18 | (p: builtins.pathExists (p + "/default.nix")) paths; 19 | in paths_contain_defaultnix; 20 | 21 | /* 22 | cmdStrListL2 = [ 23 | ["nix-build /examples/nyancat --argstr cores 1 --argstr linuxVersion default -A cpt ..." ... ] 24 | ... 25 | ] 26 | */ 27 | cmdStrListL2 = map ( 28 | # examplesDir = "/examples/nyancat" 29 | examplesDir: let 30 | # test-opts = {args={...}; ...} 31 | test-opts = lib.foldl lib.recursiveUpdate {args={};} [ 32 | (import ../../../test-opts.nix) 33 | (import ../../../builders/test-opts.nix) 34 | (lib.optionalAttrs 35 | (builtins.pathExists "${examplesDir}/test-opts.nix") 36 | (import "${examplesDir}/test-opts.nix") 37 | ) 38 | ]; 39 | # optsStrList = [ "--argstr cores 1 --argstr linuxVersion default -A cpt ..." ... ] 40 | optsStrList = import ./test-opts-to-strlist.nix {} test-opts; 41 | # cmdStrList = [ "nix-build /examples/nyancat --argstr cores 1 --argstr linuxVersion default -A cpt ..." ... ] 42 | relPath = builtins.replaceStrings ["${toString ../../..}"] ["."] (toString examplesDir); 43 | cmdStrList = map (optsStr: "nix-build ${relPath} ${optsStr}") optsStrList; 44 | in cmdStrList 45 | ) examplesDirs; 46 | in lib.flatten cmdStrListL2 47 | -------------------------------------------------------------------------------- /.github/workflows/gen-workflows/test-opts-to-strlist.nix: -------------------------------------------------------------------------------- 1 | { lib ? import }: 2 | /* 3 | # input must contains an `args` attrubite 4 | input (test-opts): { 5 | args = { 6 | cc = ["gcc14" "gcc"]; 7 | miao = ["1" "2"]; 8 | wang = [true false]; 9 | }; 10 | A = ["benchmark" "cpt"]; 11 | max-jobs = [20]; 12 | } 13 | output: [ 14 | "--argstr cc gcc14 --argstr miao 1 --arg wang true -A benchmark --max-jobs 20" 15 | "--argstr cc gcc14 --argstr miao 1 --arg wang true -A cpt --max-jobs 20" 16 | "--argstr cc gcc14 --argstr miao 1 --arg wang false -A benchmark --max-jobs 20" 17 | "--argstr cc gcc14 --argstr miao 1 --arg wang false -A cpt --max-jobs 20" 18 | "--argstr cc gcc14 --argstr miao 2 --arg wang true -A benchmark --max-jobs 20" 19 | "--argstr cc gcc14 --argstr miao 2 --arg wang true -A cpt --max-jobs 20" 20 | "--argstr cc gcc14 --argstr miao 2 --arg wang false -A benchmark --max-jobs 20" 21 | "--argstr cc gcc14 --argstr miao 2 --arg wang false -A cpt --max-jobs 20" 22 | "--argstr cc gcc --argstr miao 1 --arg wang true -A benchmark --max-jobs 20" 23 | "--argstr cc gcc --argstr miao 1 --arg wang true -A cpt --max-jobs 20" 24 | "--argstr cc gcc --argstr miao 1 --arg wang false -A benchmark --max-jobs 20" 25 | "--argstr cc gcc --argstr miao 1 --arg wang false -A cpt --max-jobs 20" 26 | "--argstr cc gcc --argstr miao 2 --arg wang true -A benchmark --max-jobs 20" 27 | "--argstr cc gcc --argstr miao 2 --arg wang true -A cpt --max-jobs 20" 28 | "--argstr cc gcc --argstr miao 2 --arg wang false -A benchmark --max-jobs 20" 29 | "--argstr cc gcc --argstr miao 2 --arg wang false -A cpt --max-jobs 20" 30 | ] 31 | */ 32 | test-opts: let 33 | /* 34 | optsArgs = { 35 | cc = ["gcc14" "gcc"]; 36 | miao = ["1" "2"]; 37 | wang = [true false]; 38 | } 39 | */ 40 | optsArgs = test-opts.args; 41 | # optsArgsStrList = ["--argstr cc gcc14 --argstr miao 1 --arg wang true" ...] 42 | optsArgsStrList = lib.mapCartesianProduct ( 43 | /* 44 | argAttr = { 45 | cc = "gcc14"; 46 | miao = "1"; 47 | wang = true; 48 | } 49 | */ 50 | argAttr: toString (lib.mapAttrsToList (n: v: 51 | if builtins.typeOf v == "string" 52 | then "--argstr ${n} ${v}" 53 | else "--arg ${n} ${lib.generators.toPretty {multiline = false;} v}" 54 | ) argAttr) 55 | ) optsArgs; 56 | # If optsArgsStrList is empty make it [""] 57 | optsArgsStrList' = if builtins.length optsArgsStrList == 0 then [""] else optsArgsStrList; 58 | 59 | /* 60 | optsRest = { 61 | A = ["benchmark" "cpt"]; 62 | max-jobs = [20]; 63 | } 64 | !!!Noted: optsRest could be empty 65 | */ 66 | optsRest = builtins.removeAttrs test-opts ["args"]; 67 | # optsRestStrList = [ "-A benchmark --max-jobs 20" ... ] 68 | optsRestStrList = lib.mapCartesianProduct ( 69 | /* 70 | optsAttr = { 71 | A = "benchmark"; 72 | max-jobs = 20; 73 | } 74 | */ 75 | optsAttr: toString (lib.mapAttrsToList (n: v: 76 | if lib.stringLength n == 1 77 | then "-${n} ${toString v}" 78 | else "--${n} ${toString v}" 79 | ) optsAttr) 80 | ) optsRest; 81 | # If optsRestStrList is empty make it [""] 82 | optsRestStrList' = if builtins.length optsRestStrList == 0 then [""] else optsRestStrList; 83 | 84 | optsStrList = lib.mapCartesianProduct ( 85 | /* 86 | opts = { 87 | optsArgsStrList' = "--argstr cc gcc14 --argstr miao 1 --arg wang true"; 88 | optsRestStrList' = "-A benchmark --max-jobs 20"; 89 | } 90 | */ 91 | opts: toString (builtins.attrValues opts) 92 | ) {inherit optsRestStrList' optsArgsStrList';}; 93 | in optsStrList 94 | -------------------------------------------------------------------------------- /.github/workflows/gh-pages.yml: -------------------------------------------------------------------------------- 1 | name: deploy-github-pages 2 | on: 3 | push: 4 | branches: 5 | # push to data branch will not trigger this workflow 6 | - main 7 | jobs: 8 | deploy-github-pages: 9 | runs-on: ubuntu-latest 10 | permissions: 11 | contents: write 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: cachix/install-nix-action@v23 15 | with: 16 | nix_path: nixpkgs=channel:nixos-24.05 17 | - run: nix-shell --run "make doc -j" 18 | - uses: peaceiris/actions-gh-pages@v3 19 | if: ${{ github.ref == 'refs/heads/main' }} 20 | with: 21 | github_token: ${{ secrets.GITHUB_TOKEN }} 22 | publish_dir: ./book 23 | force_orphan: true 24 | -------------------------------------------------------------------------------- /.github/workflows/self-host.nix: -------------------------------------------------------------------------------- 1 | # Usage: `nix-build runner.nix --argstr spec2006src --argstr github_token ` 2 | # * The GitHub token (valid for 366 days, limited by OpenXiangShan) is needed to 3 | # retrieve the runner token (only valid for one hour, limited by GitHub). 4 | # https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-a-registration-token-for-a-repository 5 | # > The fine-grained token must have the following permission set: 6 | # > "Administration" repository permissions (write) 7 | # * Generate the github token here: 8 | # https://github.com/settings/tokens?type=beta 9 | { pkgs ? import {} 10 | , spec2006src 11 | , github_token 12 | }: let 13 | name = "runner-deterload"; 14 | runner = import (pkgs.fetchFromGitHub { 15 | owner = "xieby1"; 16 | repo = "nix_config"; 17 | rev = "3f08da6e040d2004246922b4f532d350cf5ce836"; 18 | hash = "sha256-B2LrDa2sYmFsKpeizJR2Pz0/bajeWBqJ032pgB05CAU="; 19 | } + "/scripts/pkgs/github-runner.nix") { 20 | inherit pkgs; 21 | extraPodmanOpts = ["-v ${spec2006src}:/${builtins.baseNameOf spec2006src}:ro"]; 22 | extraPkgsInPATH = [pkgs.git]; 23 | }; 24 | run-ephemeral = pkgs.writeShellScriptBin name '' 25 | resp=$(curl -L \ 26 | -X POST \ 27 | -H "Accept: application/vnd.github+json" \ 28 | -H "Authorization: Bearer ${github_token}" \ 29 | -H "X-GitHub-Api-Version: 2022-11-28" \ 30 | Https://api.github.com/repos/OpenXiangShan/Deterload/actions/runners/registration-token) 31 | # https://unix.stackexchange.com/questions/13466/can-grep-output-only-specified-groupings-that-match 32 | runner_token=$(echo $resp | grep -oP '"token":\s*"\K[^"]*') 33 | ${runner} \ 34 | --labels 'self-hosted,Linux,X64,nix,spec2006' \ 35 | --ephemeral \ 36 | --url https://github.com/OpenXiangShan/Deterload \ 37 | --token $runner_token 38 | ''; 39 | in run-ephemeral 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | result* 2 | .direnv 3 | checkpoint.lst 4 | cluster-0-0.json 5 | .vscode 6 | backup* 7 | !backup_checkpoints.py 8 | 9 | # doc 10 | book 11 | *_py.dot 12 | *_py.svg 13 | __pycache__ 14 | *.mkd 15 | docs/SUMMARY.md 16 | -------------------------------------------------------------------------------- /.vimrc: -------------------------------------------------------------------------------- 1 | """ show existing tab with 2 spaces width 2 | set tabstop=2 3 | """ when indenting with '>', use 2 spaces width 4 | set shiftwidth=2 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 木兰宽松许可证, 第2版 2 | 3 | 木兰宽松许可证, 第2版 4 | 2020年1月 http://license.coscl.org.cn/MulanPSL2 5 | 6 | 7 | 您对“软件”的复制、使用、修改及分发受木兰宽松许可证,第2版(“本许可证”)的如下条款的约束: 8 | 9 | 0. 定义 10 | 11 | “软件”是指由“贡献”构成的许可在“本许可证”下的程序和相关文档的集合。 12 | 13 | “贡献”是指由任一“贡献者”许可在“本许可证”下的受版权法保护的作品。 14 | 15 | “贡献者”是指将受版权法保护的作品许可在“本许可证”下的自然人或“法人实体”。 16 | 17 | “法人实体”是指提交贡献的机构及其“关联实体”。 18 | 19 | “关联实体”是指,对“本许可证”下的行为方而言,控制、受控制或与其共同受控制的机构,此处的控制是指有受控方或共同受控方至少50%直接或间接的投票权、资金或其他有价证券。 20 | 21 | 1. 授予版权许可 22 | 23 | 每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的版权许可,您可以复制、使用、修改、分发其“贡献”,不论修改与否。 24 | 25 | 2. 授予专利许可 26 | 27 | 每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的(根据本条规定撤销除外)专利许可,供您制造、委托制造、使用、许诺销售、销售、进口其“贡献”或以其他方式转移其“贡献”。前述专利许可仅限于“贡献者”现在或将来拥有或控制的其“贡献”本身或其“贡献”与许可“贡献”时的“软件”结合而将必然会侵犯的专利权利要求,不包括对“贡献”的修改或包含“贡献”的其他结合。如果您或您的“关联实体”直接或间接地,就“软件”或其中的“贡献”对任何人发起专利侵权诉讼(包括反诉或交叉诉讼)或其他专利维权行动,指控其侵犯专利权,则“本许可证”授予您对“软件”的专利许可自您提起诉讼或发起维权行动之日终止。 28 | 29 | 3. 无商标许可 30 | 31 | “本许可证”不提供对“贡献者”的商品名称、商标、服务标志或产品名称的商标许可,但您为满足第4条规定的声明义务而必须使用除外。 32 | 33 | 4. 分发限制 34 | 35 | 您可以在任何媒介中将“软件”以源程序形式或可执行形式重新分发,不论修改与否,但您必须向接收者提供“本许可证”的副本,并保留“软件”中的版权、商标、专利及免责声明。 36 | 37 | 5. 免责声明与责任限制 38 | 39 | “软件”及其中的“贡献”在提供时不带任何明示或默示的担保。在任何情况下,“贡献者”或版权所有者不对任何人因使用“软件”或其中的“贡献”而引发的任何直接或间接损失承担责任,不论因何种原因导致或者基于何种法律理论,即使其曾被建议有此种损失的可能性。 40 | 41 | 6. 语言 42 | “本许可证”以中英文双语表述,中英文版本具有同等法律效力。如果中英文版本存在任何冲突不一致,以中文版为准。 43 | 44 | 条款结束 45 | 46 | 如何将木兰宽松许可证,第2版,应用到您的软件 47 | 48 | 如果您希望将木兰宽松许可证,第2版,应用到您的新软件,为了方便接收者查阅,建议您完成如下三步: 49 | 50 | 1, 请您补充如下声明中的空白,包括软件名、软件的首次发表年份以及您作为版权人的名字; 51 | 52 | 2, 请您在软件包的一级目录下创建以“LICENSE”为名的文件,将整个许可证文本放入该文件中; 53 | 54 | 3, 请将如下声明文本放入每个源文件的头部注释中。 55 | 56 | Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 57 | Deterload is licensed under Mulan PSL v2. 58 | You can use this software according to the terms and conditions of the Mulan PSL v2. 59 | You may obtain a copy of Mulan PSL v2 at: 60 | http://license.coscl.org.cn/MulanPSL2 61 | THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 62 | See the Mulan PSL v2 for more details. 63 | 64 | 65 | Mulan Permissive Software License,Version 2 66 | 67 | Mulan Permissive Software License,Version 2 (Mulan PSL v2) 68 | January 2020 http://license.coscl.org.cn/MulanPSL2 69 | 70 | Your reproduction, use, modification and distribution of the Software shall be subject to Mulan PSL v2 (this License) with the following terms and conditions: 71 | 72 | 0. Definition 73 | 74 | Software means the program and related documents which are licensed under this License and comprise all Contribution(s). 75 | 76 | Contribution means the copyrightable work licensed by a particular Contributor under this License. 77 | 78 | Contributor means the Individual or Legal Entity who licenses its copyrightable work under this License. 79 | 80 | Legal Entity means the entity making a Contribution and all its Affiliates. 81 | 82 | Affiliates means entities that control, are controlled by, or are under common control with the acting entity under this License, ‘control’ means direct or indirect ownership of at least fifty percent (50%) of the voting power, capital or other securities of controlled or commonly controlled entity. 83 | 84 | 1. Grant of Copyright License 85 | 86 | Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable copyright license to reproduce, use, modify, or distribute its Contribution, with modification or not. 87 | 88 | 2. Grant of Patent License 89 | 90 | Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable (except for revocation under this Section) patent license to make, have made, use, offer for sale, sell, import or otherwise transfer its Contribution, where such patent license is only limited to the patent claims owned or controlled by such Contributor now or in future which will be necessarily infringed by its Contribution alone, or by combination of the Contribution with the Software to which the Contribution was contributed. The patent license shall not apply to any modification of the Contribution, and any other combination which includes the Contribution. If you or your Affiliates directly or indirectly institute patent litigation (including a cross claim or counterclaim in a litigation) or other patent enforcement activities against any individual or entity by alleging that the Software or any Contribution in it infringes patents, then any patent license granted to you under this License for the Software shall terminate as of the date such litigation or activity is filed or taken. 91 | 92 | 3. No Trademark License 93 | 94 | No trademark license is granted to use the trade names, trademarks, service marks, or product names of Contributor, except as required to fulfill notice requirements in Section 4. 95 | 96 | 4. Distribution Restriction 97 | 98 | You may distribute the Software in any medium with or without modification, whether in source or executable forms, provided that you provide recipients with a copy of this License and retain copyright, patent, trademark and disclaimer statements in the Software. 99 | 100 | 5. Disclaimer of Warranty and Limitation of Liability 101 | 102 | THE SOFTWARE AND CONTRIBUTION IN IT ARE PROVIDED WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED. IN NO EVENT SHALL ANY CONTRIBUTOR OR COPYRIGHT HOLDER BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE SOFTWARE OR THE CONTRIBUTION IN IT, NO MATTER HOW IT’S CAUSED OR BASED ON WHICH LEGAL THEORY, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 103 | 104 | 6. Language 105 | 106 | THIS LICENSE IS WRITTEN IN BOTH CHINESE AND ENGLISH, AND THE CHINESE VERSION AND ENGLISH VERSION SHALL HAVE THE SAME LEGAL EFFECT. IN THE CASE OF DIVERGENCE BETWEEN THE CHINESE AND ENGLISH VERSIONS, THE CHINESE VERSION SHALL PREVAIL. 107 | 108 | END OF THE TERMS AND CONDITIONS 109 | 110 | How to Apply the Mulan Permissive Software License,Version 2 (Mulan PSL v2) to Your Software 111 | 112 | To apply the Mulan PSL v2 to your work, for easy identification by recipients, you are suggested to complete following three steps: 113 | 114 | i Fill in the blanks in following statement, including insert your software name, the year of the first publication of your software, and your name identified as the copyright owner; 115 | 116 | ii Create a file named “LICENSE” which contains the whole context of this License in the first directory of your software package; 117 | 118 | iii Attach the statement to the appropriate annotated syntax at the beginning of each source file. 119 | 120 | 121 | Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 122 | Deterload is licensed under Mulan PSL v2. 123 | You can use this software according to the terms and conditions of the Mulan PSL v2. 124 | You may obtain a copy of Mulan PSL v2 at: 125 | http://license.coscl.org.cn/MulanPSL2 126 | THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 127 | See the Mulan PSL v2 for more details. 128 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .NOTINTERMEDIATE: 2 | 3 | PYSVGs=$(subst _dot.py,_py.svg,$(shell find docs/ -name "*_dot.py")) 4 | EXTRACTMKDs=docs/references/default_extract.mkd \ 5 | docs/references/builders_extract.mkd \ 6 | docs/references/openblas_extract.mkd \ 7 | docs/references/spec2006_extract.mkd 8 | MDs=$(shell find . -name "*.md") 9 | doc: ${MDs} docs/SUMMARY.md ${PYSVGs} ${EXTRACTMKDs} 10 | mdbook build 11 | 12 | docs/SUMMARY.md: ./docs/generate_summary.py $(filter-out %SUMMARY.md,${MDs}) 13 | $< $(dir $<) > $@ 14 | 15 | %_py.dot: %_dot.py docs/designs/4.builders/images/common.py 16 | python3 $< 17 | %.svg: %.dot 18 | dot -Tsvg $< -o $@ 19 | # css can only recognize intrinsic size in px 20 | # https://developer.mozilla.org/en-US/docs/Glossary/Intrinsic_Size 21 | sed -i 's/\([0-9]\+\)pt/\1px/g' $@ 22 | 23 | docs/references/default_extract.mkd: ./docs/extract_comments.py default.nix 24 | $^ $@ 25 | docs/references/builders_extract.mkd: ./docs/extract_comments.py builders/default.nix 26 | $^ $@ 27 | docs/references/openblas_extract.mkd: ./docs/extract_comments.py examples/openblas/default.nix 28 | $^ $@ 29 | docs/references/spec2006_extract.mkd: ./docs/extract_comments.py examples/spec2006/default.nix 30 | $^ $@ 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # 确定性负载(Deterload) 3 | 4 | **确定性负载**(Deterload)是一个为香山生态(包括 5 | [香山处理器](https://docs.xiangshan.cc)、 6 | [香山NEMU](https://github.com/OpenXiangShan/NEMU) 7 | 和[香山GEM5](https://github.com/OpenXiangShan/GEM5) 8 | )生成**确定性工作负载**的框架。 9 | 10 | **Deterload** is a framework for generating **Deterministic Workloads** for the XiangShan ecosystem (including 11 | [XiangShan Processor](https://github.com/OpenXiangShan/XiangShan), 12 | [XiangShan NEMU](https://github.com/OpenXiangShan/NEMU), 13 | and [XiangShan GEM5](https://github.com/OpenXiangShan/GEM5) 14 | ). 15 | 16 | ## 背景(Background) 17 | 18 | [香山](https://github.com/OpenXiangShan/XiangShan/)是一款开源的高性能RISC-V处理器,其核心理念是敏捷开发。 19 | [香山的工作负载](https://docs.xiangshan.cc/zh-cn/latest/workloads/overview/)指运行在香山处理器上的各类程序,是开发、调试、评估、研究时不可或缺的组件。 20 | 21 | [XiangShan](https://github.com/OpenXiangShan/XiangShan/) is an open-source high-performance RISC-V processor, built around the core concept of agile development. 22 | [XiangShan's workloads](https://docs.xiangshan.cc/zh-cn/latest/workloads/overview/) refer to various programs running on XiangShan processor, 23 | which are essential components for development, debugging, evaluation, and research. 24 | 25 | 为了能更加敏捷地生成各类工作负载,我们开发了Deterload项目。 26 | Deterload在[checkpoint_scripts](https://github.com/xyyy1420/checkpoint_scripts)框架上,引入了**确定性**。 27 | 此外,Deterload不仅支持生成切片镜像,还计划支持香山的各类工作负载,包括非切片镜像和裸机镜像。 28 | 29 | To enable more agile generation of various workloads, we developed the Deterload project. 30 | Deterload is based on the [checkpoint_scripts](https://github.com/xyyy1420/checkpoint_scripts) framework and adds the **deterministic** feature. 31 | Moreover, Deterload not only supports generating checkpoint images but also plans to support various workloads for XiangShan, including non-checkpoint images and bare-metal images. 32 | 33 | ## 关于“确定性”(About "Deterministic") 34 | 35 | 🤔**什么**是“确定性”? 36 | 😺无论何时何地,两次构建同一个工作负载,都应该得到完全相同的结果! 37 | 38 | 🤔**为什么**需要“确定性”? 39 | 😺它能让开发更敏捷。无论何时何地,你都能轻松重现bug和性能异常! 40 | 41 | 🤔**如何**实现“确定性”? 42 | 😺使用确定性包管理器[Nix](https://nixos.org/)并且控制所有随机性! 43 | 44 | 🤔**What** is "Deterministic"? 45 | 😺It means that whenever and wherever building the workload twice should yield the same result! 46 | 47 | 🤔**Why** do we need "Deterministic"? 48 | 😺It enables more agile development. 49 | You can reproduce bugs and performance anomalies anytime, anywhere, without hassle! 50 | 51 | 🤔**How** to achieve "Deterministic"? 52 | 😺Using the deterministic package manager [Nix](https://nixos.org/) and controlling all possible sources of randomness! 53 | 54 | ## 使用方法(Usage) 55 | 56 | Deterload由Nix驱动。 57 | 如果你尚未安装Nix,请参考[Nix官方安装指南](https://nixos.org/download/)。 58 | 59 | Deterload is powered by Nix. 60 | If you haven't installed Nix, please refer to the [Nix official installation](https://nixos.org/download/). 61 | 62 | ```bash 63 | # 进入nix shell(推荐使用direnv自动进入nix shell): 64 | # Enter the nix shell (direnv is recommended for auto entering the nix shell): 65 | nix-shell 66 | 67 | # 用10个线程为生成切片,切片存于result/: 68 | # Generate checkpoints for using 10 threads, saved in result/: 69 | nom-build examples/ -A cpts-simpoint -j10 70 | 71 | # 显示帮助信息: 72 | # Display help information: 73 | h 74 | ``` 75 | 76 | 77 | 78 | ## 更多文档(More Documentation) 79 | 80 | 请参考[本仓库的GitHub Pages](https://openxiangshan.github.io/Deterload/)。 81 | 82 | Please refer to [the GitHub Pages of this repo](https://openxiangshan.github.io/Deterload/) of this documentation. 83 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | * clean *.py 2 | * support dynamically linked benchmark 3 | * why in smp=2 qemu, cat /proc/cpuinfo only have 1 cpu? but cat /proc stats show 2 cpu? Do we really have a dual-cpu system? 4 | * mv rmExt to utils 5 | * Generate checkpoint by function name. 6 | * This can be implemented during TCG generating translated blocks. 7 | * unify qemu command in multiple nix files 8 | -------------------------------------------------------------------------------- /backup_checkpoints.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import shutil 5 | from datetime import datetime 6 | import sys 7 | 8 | def find_nix_path(base_path, suffix): 9 | for item in os.scandir(base_path): 10 | if item.is_symlink() and item.name.startswith('result'): 11 | target_path = os.readlink(item.path) 12 | if target_path.endswith(suffix): 13 | return os.path.realpath(item.path) 14 | return None 15 | 16 | def backup_files(): 17 | # Create backup directory with timestamp 18 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 19 | backup_dir = f"backup_{timestamp}" 20 | 21 | # Find checkpoint directory 22 | checkpoint_path = find_nix_path(os.getcwd(), '3.checkpoints') 23 | if not checkpoint_path: 24 | print("Error: Cannot find checkpoint directory") 25 | sys.exit(1) 26 | print(f"Found checkpoint path: {checkpoint_path}") 27 | 28 | # List of files/directories to backup 29 | files_to_backup = [ 30 | ("checkpoints", checkpoint_path), 31 | ("cluster-0-0.json", "cluster-0-0.json"), 32 | ("checkpoint.lst", "checkpoint.lst") 33 | ] 34 | 35 | try: 36 | # Create backup directory 37 | os.makedirs(backup_dir, exist_ok=True) 38 | 39 | # Perform backup for each file/directory 40 | for backup_name, source_path in files_to_backup: 41 | if os.path.exists(source_path): 42 | target_path = os.path.join(backup_dir, backup_name) 43 | 44 | if os.path.isdir(source_path): 45 | # Copy directory 46 | shutil.copytree(source_path, target_path, dirs_exist_ok=True) 47 | else: 48 | # Copy file 49 | shutil.copy2(source_path, target_path) 50 | print(f"Backed up: {source_path} -> {target_path}") 51 | else: 52 | print(f"Warning: {source_path} does not exist") 53 | 54 | print(f"Backup completed! Files saved to: {backup_dir}") 55 | 56 | except Exception as e: 57 | print(f"Error during backup: {str(e)}") 58 | sys.exit(1) 59 | 60 | if __name__ == "__main__": 61 | # run dump_result.py first 62 | os.system("python3 dump_result.py") 63 | backup_files() -------------------------------------------------------------------------------- /book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["xieby1"] 3 | language = "en" 4 | multilingual = false 5 | src = "docs/" 6 | title = "❄ 确定性负载(Deterload)❄" 7 | 8 | [build] 9 | extra-watch-dirs = ["."] 10 | 11 | [output.html] 12 | git-repository-url = "https://github.com/OpenXiangShan/Deterload" 13 | -------------------------------------------------------------------------------- /builders/cptBuilder/1.profiling.nix: -------------------------------------------------------------------------------- 1 | { runCommand 2 | , rmExt 3 | 4 | , utils 5 | , qemu 6 | , nemu 7 | , img 8 | , workload_name 9 | , intervals 10 | , simulator 11 | , profiling_log 12 | , smp 13 | }@args: 14 | let 15 | name = "${rmExt img.name}." + (builtins.concatStringsSep "_" [ 16 | simulator 17 | (utils.metricPrefix intervals) 18 | "${smp}core" 19 | "1_profiling" 20 | ]); 21 | 22 | qemuCommand = [ 23 | "${qemu}/bin/qemu-system-riscv64" 24 | "-bios ${img}" 25 | "-M nemu" 26 | "-nographic" 27 | "-m 8G" 28 | "-smp ${smp}" 29 | "-cpu rv64,v=true,vlen=128,h=true,sv39=true,sv48=false,sv57=false,sv64=false" 30 | "-plugin ${qemu}/lib/libprofiling.so,workload=${workload_name},intervals=${intervals},target=$out" 31 | "-icount shift=0,align=off,sleep=on" 32 | ]; 33 | 34 | nemuCommand = [ 35 | "${nemu}/bin/riscv64-nemu-interpreter" 36 | "${img}" 37 | "-b" 38 | "-D $out" 39 | "-C ${name}" 40 | "-w ${workload_name}" 41 | "--simpoint-profile" 42 | "--cpt-interval ${intervals}" 43 | ]; 44 | 45 | in runCommand name { 46 | passthru = args; 47 | } '' 48 | mkdir -p $out 49 | 50 | ${if simulator == "qemu" then '' 51 | echo ${builtins.toString qemuCommand} 52 | ${builtins.toString qemuCommand} | tee $out/${profiling_log} 53 | '' else '' 54 | echo ${builtins.toString nemuCommand} 55 | ${builtins.toString nemuCommand} | tee $out/${profiling_log} 56 | cp $out/${name}/${workload_name}/simpoint_bbv.gz $out/ 57 | ''} 58 | '' 59 | -------------------------------------------------------------------------------- /builders/cptBuilder/2.cluster.nix: -------------------------------------------------------------------------------- 1 | { runCommand 2 | , rmExt 3 | 4 | , utils 5 | , maxK 6 | , simpoint 7 | , stage1-profiling 8 | }@args: runCommand "${rmExt stage1-profiling.name}.maxK${utils.metricPrefix maxK}_2_cluster" { 9 | passthru = args; 10 | } ('' 11 | mkdir -p $out 12 | '' + (builtins.toString [ 13 | "${simpoint}/bin/simpoint" 14 | "-loadFVFile ${stage1-profiling}/simpoint_bbv.gz" 15 | "-saveSimpoints $out/simpoints0" 16 | "-saveSimpointWeights $out/weights0" 17 | "-inputVectorsGzipped" 18 | "-maxK ${maxK}" 19 | "-numInitSeeds 2" 20 | "-iters 1000" 21 | "-seedkm 610829" 22 | "-seedproj 829610" 23 | ]) + '' 24 | # chmod from 444 to 644, nemu fstream need write permission 25 | chmod +w $out/simpoints0 $out/weights0 26 | '') 27 | -------------------------------------------------------------------------------- /builders/cptBuilder/3.checkpoint.nix: -------------------------------------------------------------------------------- 1 | { runCommand 2 | , rmExt 3 | 4 | , utils 5 | , qemu 6 | , nemu 7 | , img 8 | , stage2-cluster 9 | , intervals 10 | , workload_name 11 | , checkpoint_format 12 | , simulator 13 | , checkpoint_log 14 | , smp 15 | }@args: 16 | let 17 | qemuCommand = [ 18 | "${qemu}/bin/qemu-system-riscv64" 19 | "-bios ${img}" 20 | "-M nemu,simpoint-path=${stage2-cluster},workload=.,cpt-interval=${intervals},output-base-dir=$out,config-name=${workload_name},checkpoint-mode=SimpointCheckpoint" 21 | "-nographic" 22 | "-m 8G" 23 | "-smp ${smp}" 24 | "-cpu rv64,v=true,vlen=128,h=true,sv39=true,sv48=false,sv57=false,sv64=false" 25 | "-icount shift=0,align=off,sleep=on" 26 | ]; 27 | 28 | nemuCommand = [ 29 | "${nemu}/bin/riscv64-nemu-interpreter" 30 | "${img}" 31 | "-b" 32 | "-D $out" 33 | "-C checkpoint" 34 | "-w ." 35 | "-S ${stage2-cluster}" 36 | "--cpt-interval ${intervals}" 37 | "--checkpoint-format ${checkpoint_format}" 38 | ]; 39 | 40 | in runCommand ("${rmExt stage2-cluster.name}." + (builtins.concatStringsSep "_" [ 41 | simulator 42 | (utils.metricPrefix intervals) 43 | "${smp}core" 44 | "3_checkpoint" 45 | ])) { 46 | passthru = args; 47 | } '' 48 | mkdir -p $out 49 | 50 | ${if simulator == "qemu" then '' 51 | echo "Executing QEMU command: ${builtins.toString qemuCommand}" 52 | ${builtins.toString qemuCommand} | tee $out/${checkpoint_log} 53 | '' else '' 54 | echo "Executing NEMU command: ${builtins.toString nemuCommand}" 55 | ${builtins.toString nemuCommand} | tee $out/${checkpoint_log} 56 | ''} 57 | '' 58 | -------------------------------------------------------------------------------- /builders/cptBuilder/default.nix: -------------------------------------------------------------------------------- 1 | { stage3-checkpoint 2 | , utils 3 | , rmExt 4 | }: let 5 | stage2-cluster = stage3-checkpoint.stage2-cluster; 6 | stage1-profiling = stage2-cluster.stage1-profiling; 7 | in stage3-checkpoint.overrideAttrs (old: { 8 | name = "${rmExt stage1-profiling.img.name}." + (builtins.concatStringsSep "_" [ 9 | stage3-checkpoint.simulator 10 | (utils.metricPrefix stage3-checkpoint.intervals) 11 | "maxK${utils.metricPrefix stage2-cluster.maxK}" 12 | "${stage3-checkpoint.smp}core" 13 | "cpt" 14 | ]); 15 | passthru = { 16 | inherit stage1-profiling stage2-cluster stage3-checkpoint; 17 | qemu = stage1-profiling.qemu; 18 | nemu = stage1-profiling.nemu; 19 | simpoint = stage2-cluster.simpoint; 20 | }; 21 | }) 22 | -------------------------------------------------------------------------------- /builders/cptBuilder/nemu/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , fetchFromGitHub 3 | , pkg-config 4 | , zstd 5 | , readline 6 | , ncurses 7 | , bison 8 | , flex 9 | , git 10 | , zlib 11 | , which 12 | , SDL2 13 | 14 | , riscv64-cc 15 | }: 16 | 17 | let 18 | libCheckpointAlpha = fetchFromGitHub { 19 | owner = "OpenXiangShan"; 20 | repo = "LibCheckpointAlpha"; 21 | rev = "c5c2fef74133fb2b8ef8642633f60e0996493f29"; 22 | sha256 = "sha256-Rxlv47QY273jbcSX/A1PuT7+2aCB2sVW32pL91G3BmI="; 23 | }; 24 | 25 | softfloat = fetchFromGitHub { 26 | owner = "ucb-bar"; 27 | repo = "berkeley-softfloat-3"; 28 | rev = "3b70b5d"; 29 | sha256 = "sha256-uBXfFgKuGixDIupetB/p421YmZM/AlBmJi4VgFOjbC0="; 30 | }; 31 | in 32 | stdenv.mkDerivation { 33 | name = "xs-checkpoint-nemu"; 34 | src = fetchFromGitHub { 35 | owner = "OpenXiangShan"; 36 | repo = "NEMU"; 37 | rev = "3b2a4b4acf410efabcb024f43bad438346e0da12"; 38 | hash = "sha256-3resMrcTgFJapKzc0b/9HNVaXWqd78/0Zd90SI2/NIY="; 39 | }; 40 | buildInputs = [ 41 | git 42 | zlib 43 | which 44 | zstd 45 | readline 46 | ncurses 47 | pkg-config 48 | bison 49 | flex 50 | riscv64-cc 51 | SDL2 52 | ]; 53 | 54 | buildPhase = '' 55 | # Setup LibCheckpointAlpha 56 | mkdir -p resource/gcpt_restore 57 | cp -r ${libCheckpointAlpha}/* resource/gcpt_restore/ 58 | 59 | # Setup berkeley-softfloat-3 60 | mkdir -p resource/softfloat/repo 61 | cp -r ${softfloat}/* resource/softfloat/repo/ 62 | 63 | # Build NEMU 64 | export NEMU_HOME=$PWD 65 | 66 | # Disable ccache 67 | export USE_CCACHE= 68 | export CCACHE_DISABLE=1 69 | 70 | # Ensure all necessary directories exist 71 | mkdir -p tools/kconfig/build 72 | mkdir -p tools/fixdep/build 73 | mkdir -p build/obj-riscv64-nemu-interpreter-so 74 | 75 | # Build necessary tools 76 | make -C tools/kconfig name=conf 77 | make -C tools/fixdep 78 | 79 | # Build gcpt_restore 80 | make -C resource/gcpt_restore 81 | 82 | # gem5 does not support SV48 at present, use SV39 instead 83 | sed -i "s/CONFIG_RV_SV48/CONFIG_RV_SV39/g" configs/riscv64-xs-cpt_defconfig 84 | 85 | make riscv64-xs-cpt_defconfig 86 | 87 | # Ensure softfloat build directory has write permissions 88 | mkdir -p resource/softfloat/repo/build/Linux-x86_64-GCC 89 | chmod -R u+w resource/softfloat/repo/build 90 | 91 | make -j $NIX_BUILD_CORES 92 | 93 | echo "Build phase completed" 94 | ''; 95 | 96 | installPhase = '' 97 | mkdir -p $out/bin 98 | cp build/riscv64-nemu-interpreter $out/bin/ 99 | ''; 100 | 101 | dontFixup = true; 102 | } 103 | -------------------------------------------------------------------------------- /builders/cptBuilder/qemu/allow_get_icount_anytime.patch: -------------------------------------------------------------------------------- 1 | diff --git a/accel/tcg/icount-common.c b/accel/tcg/icount-common.c 2 | index ec57192be8..d1ff3c839a 100644 3 | --- a/accel/tcg/icount-common.c 4 | +++ b/accel/tcg/icount-common.c 5 | @@ -110,10 +110,12 @@ static int64_t icount_get_raw_locked(void) 6 | CPUState *cpu = current_cpu; 7 | 8 | if (cpu && cpu->running) { 9 | - if (!cpu->neg.can_do_io) { 10 | - error_report("Bad icount read"); 11 | - exit(1); 12 | - } 13 | + // Generating checkpoints does not change the machine state. 14 | + // Therefore, it is safe to get_icount regardless of the can_do_io. 15 | + // if (!cpu->neg.can_do_io) { 16 | + // error_report("Bad icount read"); 17 | + // exit(1); 18 | + // } 19 | /* Take into account what has run */ 20 | icount_update_locked(cpu); 21 | } 22 | -------------------------------------------------------------------------------- /builders/cptBuilder/qemu/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , fetchFromGitHub 3 | , fetchFromGitLab 4 | , python3 5 | , ninja 6 | , meson 7 | , glib 8 | , pkg-config 9 | , zstd 10 | , dtc 11 | }: 12 | stdenv.mkDerivation { 13 | name = "xs-checkpoint-qemu"; 14 | src = fetchFromGitHub { 15 | owner = "OpenXiangShan"; 16 | repo = "qemu"; 17 | # latest checkpoint branch 18 | rev = "8758c375de12f09073614cad48f9956fe53b5aa7"; 19 | hash = "sha256-xSJcR4bywPwpBGQfFKGOTrYZBhMoy0gOP8hYoA+hEOM="; 20 | postFetch = let 21 | keycodemapdb = fetchFromGitLab { 22 | owner = "qemu-project"; 23 | repo = "keycodemapdb"; 24 | rev = "f5772a62ec52591ff6870b7e8ef32482371f22c6"; 25 | hash = "sha256-GbZ5mrUYLXMi0IX4IZzles0Oyc095ij2xAsiLNJwfKQ="; 26 | }; 27 | berkeley-softfloat-3 = fetchFromGitLab { 28 | owner = "qemu-project"; 29 | repo = "berkeley-softfloat-3"; 30 | rev = "b64af41c3276f97f0e181920400ee056b9c88037"; 31 | hash = "sha256-Yflpx+mjU8mD5biClNpdmon24EHg4aWBZszbOur5VEA="; 32 | }; 33 | berkeley-testfloat-3 = fetchFromGitLab { 34 | owner = "qemu-project"; 35 | repo = "berkeley-testfloat-3"; 36 | rev = "e7af9751d9f9fd3b47911f51a5cfd08af256a9ab"; 37 | hash = "sha256-inQAeYlmuiRtZm37xK9ypBltCJ+ycyvIeIYZK8a+RYU="; 38 | }; 39 | in '' 40 | cp -r ${keycodemapdb} $out/subprojects/keycodemapdb 41 | find $out/subprojects/keycodemapdb -type d -exec chmod +w {} \; 42 | 43 | cp -r ${berkeley-softfloat-3} $out/subprojects/berkeley-softfloat-3 44 | find $out/subprojects/berkeley-softfloat-3 -type d -exec chmod +w {} \; 45 | cp -r $out/subprojects/packagefiles/berkeley-softfloat-3/* $out/subprojects/berkeley-softfloat-3/ 46 | 47 | cp -r ${berkeley-testfloat-3} $out/subprojects/berkeley-testfloat-3 48 | find $out/subprojects/berkeley-testfloat-3 -type d -exec chmod +w {} \; 49 | cp -r $out/subprojects/packagefiles/berkeley-testfloat-3/* $out/subprojects/berkeley-testfloat-3/ 50 | ''; 51 | }; 52 | patches = [ 53 | ./allow_get_icount_anytime.patch 54 | ]; 55 | postPatch = 56 | /* do not disable timer interrupt */ '' 57 | sed -i 's/nemu_trap_count == 2/nemu_trap_count == 1/g' contrib/plugins/profiling.c 58 | '' /* disable sync cpus in multicore checkpoint */ + '' 59 | sed -i 's/bool sync_end = false;/bool sync_end = true;/g' target/riscv/multicore.c 60 | '' /* fix bug that not taking 0th checkpoint */ + '' 61 | sed -i '/if (first_insns_item->data == 0) {/,/^ }$/d' target/riscv/multicore.c 62 | sed -i 's/limit_instructions==0/\&ns->sync_info.online_cpus==0/' target/riscv/serializer.c 63 | '' /* fix: sstc is disabled in nemu machine if not provide_rdtime*/ + '' 64 | sed -i 's/RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, false/RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true/' hw/riscv/nemu.c 65 | ''; 66 | 67 | buildInputs = [ 68 | python3 69 | ninja 70 | meson 71 | glib 72 | pkg-config 73 | zstd 74 | dtc 75 | ]; 76 | 77 | dontUseMesonConfigure = true; 78 | preConfigure = '' 79 | ''; 80 | configureFlags = [ 81 | "--target-list=riscv64-softmmu" 82 | # "--enable-debug" 83 | "--enable-zstd" 84 | "--enable-plugins" 85 | "--disable-werror" 86 | "--disable-download" 87 | ]; 88 | preBuild = "cd build"; 89 | 90 | # build plugins 91 | postBuild = '' 92 | make -C contrib/plugins 93 | ''; 94 | # install plugins 95 | postInstall = '' 96 | mkdir -p $out/lib 97 | cp contrib/plugins/*.so $out/lib/ 98 | ''; 99 | } 100 | -------------------------------------------------------------------------------- /builders/cptBuilder/simpoint/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | }: 3 | let 4 | pname = "simpoint"; 5 | version = "3.2"; 6 | in stdenv.mkDerivation { 7 | inherit pname version; 8 | src = builtins.fetchurl { 9 | url = "http://cseweb.ucsd.edu/~calder/${pname}/releases/SimPoint.${version}.tar.gz"; 10 | sha256 = "0cp11461ygyskkbzxbl187i3m12b2mzgm4cj7panx5jqpiz491pc"; 11 | }; 12 | patches = [(builtins.fetchurl { 13 | url = "https://github.com/intel/pinplay-tools/raw/60e034fe4bc23ec551870fa382d0a64f21b8aeb7/pinplay-scripts/PinPointsHome/Linux/bin/simpoint_modern_gcc.patch"; 14 | sha256 = "1wh7nvv34yacbk8zmydg4x9kxzd7fcw0k8w1c7i13ynj1dwy743b"; 15 | })]; 16 | installPhase = '' 17 | mkdir -p $out/bin 18 | cp bin/simpoint $out/bin/ 19 | ''; 20 | } 21 | -------------------------------------------------------------------------------- /builders/default.nix: -------------------------------------------------------------------------------- 1 | { lib 2 | , runCommand 3 | , rmExt 4 | , callPackage 5 | , riscv64-pkgs 6 | , riscv64-stdenv 7 | }: { 8 | 9 | /** 10 | cores: Number of cores. 11 | * **Type**: number-in-string 12 | * **Default value**: `"1"` 13 | * **Available values**: `"1"`, `"2"`. 14 | ([LibCheckpoint](https://github.com/OpenXiangShan/LibCheckpoint) is still in development, 15 | its stable configuration current only supports dual core) 16 | * **Note**: `cpt-simulator`: qemu supports multiple cores, however, nemu only supports single core. 17 | */ 18 | cores ? "1" 19 | 20 | /** 21 | cpt-maxK: maxK value for all benchmarks in checkpoint generation. 22 | * **Type**: number-in-string 23 | * **Default value**: `"30"` 24 | * **Description**: 25 | maxK is a parameter in SimPoint algorithm used during the checkpoint's clustering stage. 26 | `cpt-maxK` will set maxK for all benchmarks' clustering stage in checkpoints generation. 27 | To override the maxK for specific benchmarks, refer to the `cpt-maxK-bmk` argument. 28 | */ 29 | , cpt-maxK ? "30" 30 | 31 | /** 32 | cpt-intervals: Number of BBV interval instructions in checkpoint generation. 33 | * **Type**: number-in-string 34 | * **Default value**: `"20000000"` 35 | */ 36 | , cpt-intervals ? "20000000" 37 | 38 | /** 39 | cpt-simulator: Simulator used in checkpoint generation. 40 | * **Type**: string 41 | * **Default value**: `"qemu"` 42 | * **Available values**: `"qemu"`, `"nemu"` 43 | * **Note**: 44 | Though nemu is faster than qemu, 45 | 46 | * nemu does not support multiple cores, 47 | * the current version of nemu is not deterministic. 48 | 49 | Therefore, qemu is chosen as the default simulator. 50 | For more information, refer to [OpenXiangShan/Deterload Issue #8: nemu is not deterministic](https://github.com/OpenXiangShan/Deterload/issues/8). 51 | */ 52 | , cpt-simulator ? "qemu" 53 | 54 | /** 55 | cpt-format: Compress format of output checkpoints. 56 | * **Type**: string 57 | * **Default value**: `"zstd"` 58 | * **Available value**: `"zstd"`, `"gz"` 59 | * **Note**: nemu supports both formats; however, qemu only supports zstd format. 60 | */ 61 | , cpt-format ? "zstd" 62 | 63 | /** 64 | interactive: The image is interactive. 65 | * **Type**: bool 66 | * **Default value**: `false` 67 | * **Note**: This argument only use together with `-A sim` to debug. 68 | */ 69 | , interactive ? false 70 | 71 | /** 72 | enableTrap: Whether to incorporate QEMU/NEMU trap in image. 73 | * **Type**: bool 74 | * **Default value**: `true` 75 | */ 76 | , enableTrap ? true 77 | 78 | /** 79 | linuxVersion: The linux kernel version 80 | * **Type**: string 81 | * **Default value**: `"default"` 82 | * **Available values**: Suffix of any nixpkgs-supported linuxKernel.kernels.linux_xxx. 83 | To list available linuxKernel.kernels.linux_xxx: 84 | ```bash 85 | nix-instantiate --eval -E 'let pkgs=import {}; in builtins.filter (x: pkgs.lib.hasPrefix "linux_" x) (builtins.attrNames pkgs.linuxKernel.kernels)' 86 | ``` 87 | */ 88 | , linuxVersion ? "default" 89 | 90 | /** 91 | linuxStructuredExtraConfig: The extra structured linux config 92 | * **Type**: attr (with lib.kernel; {kernelConfigEntry = kernelItem; ...}) 93 | * **Note1**: 94 | The syntax of kernelConfigEntry is the entry available is Kconfig. 95 | In other words, the CONFIG_XXX with "CONFIG_" removed. 96 | The syntax of kernelItem is lib.kernel.xxx. 97 | * **Note2**: 98 | This argument will used to generate linux config file together with riscv64's defconfig 99 | and built-in configs in builders/imgBuilder/linux/default.nix. 100 | The generated config file can be accessed by `linux.configfile`. 101 | */ 102 | , linuxStructuredExtraConfig ? with lib.kernel; { 103 | MODULES = no; 104 | NFS_FS = no; 105 | KVM = yes; 106 | NONPORTABLE = yes; 107 | RISCV_SBI_V01 = yes; 108 | SERIO_LIBPS2 = yes; 109 | SERIAL_UARTLITE = yes; 110 | SERIAL_UARTLITE_CONSOLE = yes; 111 | HVC_RISCV_SBI = yes; 112 | STACKTRACE = yes; 113 | RCU_CPU_STALL_TIMEOUT = freeform "300"; 114 | CMDLINE = freeform "norandmaps"; 115 | } 116 | 117 | /** 118 | linuxKernelPatches: The linux kernelPatches 119 | * **Type**: list of attrs ([{name = xxx; patch = xxx; extraConfig = xxx;} ...]) 120 | * **Default values:**: ./imgBuilder/linux/patches/*.nix 121 | * **Node**: 122 | The `patch` is a patch file that can be applied by patch executable to linux source code. 123 | The optional `extraConfig` is linux configs, each line of which is in string form without the CONFIG_ prefix. 124 | */ 125 | , linuxKernelPatches ? [ 126 | (import ./imgBuilder/linux/patches/enable-clint.nix) 127 | (import ./imgBuilder/linux/patches/panic_shutdown.nix) 128 | (import ./imgBuilder/linux/patches/relaxing_random_entropy.nix) 129 | ] 130 | 131 | , ... 132 | }: 133 | assert lib.assertOneOf "cores" cores ["1" "2"]; 134 | assert lib.assertMsg (cpt-simulator=="nemu" -> cores=="1") "nemu only supports single core"; 135 | assert lib.assertOneOf "cpt-simulator" cpt-simulator ["qemu" "nemu"]; 136 | assert lib.assertOneOf "cpt-format" cpt-format ["gz" "zstd"]; 137 | assert lib.assertMsg (cpt-simulator=="qemu" -> cpt-format=="zstd") "qemu only supports cpt-format: zstd"; 138 | benchmark: lib.makeScope lib.callPackageWith (self: { 139 | inherit benchmark; 140 | gen_init_cpio = callPackage ./imgBuilder/linux/initramfs/base/gen_init_cpio {}; 141 | initramfs_base = callPackage ./imgBuilder/linux/initramfs/base { 142 | inherit (self) gen_init_cpio; 143 | }; 144 | 145 | riscv64-libc = riscv64-stdenv.cc.libc.static; 146 | riscv64-busybox = riscv64-pkgs.busybox.override { 147 | enableStatic = true; 148 | useMusl = true; 149 | }; 150 | before_workload = callPackage ./imgBuilder/linux/initramfs/overlays/before_workload { 151 | inherit (self) riscv64-libc; 152 | }; 153 | nemu_trap = callPackage ./imgBuilder/linux/initramfs/overlays/nemu_trap { 154 | inherit (self) riscv64-libc; 155 | }; 156 | qemu_trap = callPackage ./imgBuilder/linux/initramfs/overlays/qemu_trap { 157 | inherit (self) riscv64-libc; 158 | }; 159 | initramfs_overlays = callPackage ./imgBuilder/linux/initramfs/overlays { 160 | inherit (self) riscv64-busybox before_workload benchmark; 161 | after_workload = self."${cpt-simulator}_trap"; 162 | inherit interactive enableTrap; 163 | }; 164 | 165 | initramfs = callPackage ./imgBuilder/linux/initramfs { 166 | inherit (self) benchmark; 167 | base = self.initramfs_base; 168 | overlays = self.initramfs_overlays; 169 | }; 170 | 171 | linux = callPackage ./imgBuilder/linux { 172 | inherit (self) initramfs; 173 | riscv64-linux = riscv64-pkgs.linuxKernel.kernels."linux_${linuxVersion}"; 174 | inherit linuxStructuredExtraConfig linuxKernelPatches; 175 | }; 176 | 177 | dts = callPackage ./imgBuilder/opensbi/dts { inherit cores; }; 178 | opensbi = callPackage ./imgBuilder/opensbi { inherit (self) dts linux; }; 179 | gcpt_single_core = callPackage ./imgBuilder/gcpt/single_core.nix { 180 | inherit (self) opensbi; 181 | }; 182 | gcpt_dual_core = callPackage ./imgBuilder/gcpt/dual_core.nix { 183 | inherit (self) opensbi; 184 | }; 185 | gcpt = if cores=="1" then self.gcpt_single_core 186 | else if cores=="2" then self.gcpt_dual_core 187 | else throw "gcpt only support 1 or 2 cores"; 188 | img = callPackage ./imgBuilder { 189 | inherit (self) gcpt; 190 | }; 191 | 192 | nemu = callPackage ./cptBuilder/nemu {}; 193 | qemu = callPackage ./cptBuilder/qemu {}; 194 | simpoint = callPackage ./cptBuilder/simpoint {}; 195 | stage1-profiling = callPackage ./cptBuilder/1.profiling.nix { 196 | inherit (self) qemu nemu img; 197 | workload_name = "miao"; 198 | intervals = cpt-intervals; 199 | simulator = cpt-simulator; 200 | profiling_log = "profiling.log"; 201 | smp = cores; 202 | }; 203 | stage2-cluster = callPackage ./cptBuilder/2.cluster.nix { 204 | inherit (self) simpoint stage1-profiling; 205 | maxK = cpt-maxK; 206 | }; 207 | stage3-checkpoint = callPackage ./cptBuilder/3.checkpoint.nix { 208 | inherit (self) qemu nemu img stage2-cluster; 209 | workload_name = "miao"; 210 | intervals = cpt-intervals; 211 | simulator = cpt-simulator; 212 | checkpoint_format = cpt-format; 213 | checkpoint_log = "checkpoint.log"; 214 | smp = cores; 215 | }; 216 | # TODO: name 217 | # workload_name = "miao"; 218 | # intervals = cpt-intervals; 219 | # simulator = cpt-simulator; 220 | # checkpoint_format = cpt-format; 221 | # checkpoint_log = "checkpoint.log"; 222 | # smp = cores; 223 | # maxK 224 | cpts-simpoint = callPackage ./cptBuilder { 225 | inherit (self) stage3-checkpoint; 226 | }; 227 | 228 | # checkpoint when instruction count = 0 229 | cpt-0th = self.stage3-checkpoint.override { 230 | stage2-cluster = runCommand 231 | "${rmExt self.stage2-cluster.name}.afterLinuxBoot_cluster" {} '' 232 | mkdir -p $out 233 | echo 0 0 > $out/simpoints0 234 | echo 1 0 > $out/weights0 235 | ''; 236 | }; 237 | 238 | sim = callPackage ./sim.nix { 239 | inherit (self) qemu img; 240 | smp = cores; 241 | }; 242 | }) 243 | -------------------------------------------------------------------------------- /builders/imgBuilder/default.nix: -------------------------------------------------------------------------------- 1 | { gcpt }@args: gcpt.overrideAttrs (old: { 2 | passthru = args; 3 | }) 4 | -------------------------------------------------------------------------------- /builders/imgBuilder/gcpt/dual_core.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , fetchFromGitHub 3 | , python3 4 | , riscv64-cc 5 | , rmExt 6 | 7 | , opensbi 8 | }@args: stdenv.mkDerivation { 9 | name = "${rmExt opensbi.name}.gcpt_2core"; 10 | src = fetchFromGitHub { 11 | owner = "OpenXiangShan"; 12 | repo = "LibCheckpoint"; 13 | rev = "f8c33689cdf11aa2f8f25dbf99075dca148ecd44"; 14 | hash = "sha256-UpHhy9dsYs7PAXllAEhvFcYOuEX8US365q1QUwNxqbA="; 15 | fetchSubmodules = true; 16 | }; 17 | buildInputs = [ 18 | (python3.withPackages (pypkgs: [ 19 | pypkgs.protobuf 20 | pypkgs.grpcio-tools 21 | ])) 22 | riscv64-cc 23 | ]; 24 | makeFlags = [ 25 | "CROSS_COMPILE=riscv64-unknown-linux-gnu-" 26 | "USING_QEMU_DUAL_CORE_SYSTEM=1" 27 | "GCPT_PAYLOAD_PATH=${opensbi}/fw_payload.bin" 28 | ]; 29 | installPhase = '' 30 | cp build/gcpt.bin $out 31 | ''; 32 | passthru = args; 33 | } 34 | -------------------------------------------------------------------------------- /builders/imgBuilder/gcpt/single_core.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , fetchFromGitHub 3 | , riscv64-cc 4 | , rmExt 5 | 6 | , opensbi 7 | }@args: stdenv.mkDerivation { 8 | name = "${rmExt opensbi.name}.gcpt_1core"; 9 | src = fetchFromGitHub { 10 | owner = "OpenXiangShan"; 11 | repo = "LibCheckpointAlpha"; 12 | rev = "c5c2fef74133fb2b8ef8642633f60e0996493f29"; 13 | hash = "sha256-Rxlv47QY273jbcSX/A1PuT7+2aCB2sVW32pL91G3BmI="; 14 | }; 15 | buildInputs = [ 16 | riscv64-cc 17 | ]; 18 | makeFlags = [ 19 | "CROSS_COMPILE=riscv64-unknown-linux-gnu-" 20 | "GCPT_PAYLOAD_PATH=${opensbi}/fw_payload.bin" 21 | ]; 22 | installPhase = '' 23 | cp build/gcpt.bin $out 24 | ''; 25 | passthru = args; 26 | } 27 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/default.nix: -------------------------------------------------------------------------------- 1 | { lib 2 | 3 | , riscv64-stdenv 4 | , rmExt 5 | 6 | , initramfs 7 | , riscv64-linux 8 | , linuxStructuredExtraConfig 9 | , linuxKernelPatches 10 | # The `override` is overriding the arguments of pkgs/os-specific/linux/kernel/mainline.nix 11 | # The `argsOverride` attr is overriding the makeOverridable attrs of pkgs/os-specific/linux/kernel/generic.nix 12 | # The `overrideAttrs` is overriding derivation built by pkgs/os-specific/linux/kernel/manual-config.nix 13 | }@args: (riscv64-linux.override { argsOverride = { 14 | stdenv = riscv64-stdenv; 15 | kernelPatches = linuxKernelPatches; 16 | ignoreConfigErrors = false; 17 | enableCommonConfig = false; 18 | structuredExtraConfig = with lib.kernel; { 19 | INITRAMFS_SOURCE = freeform (builtins.toString initramfs); 20 | } // linuxStructuredExtraConfig; 21 | };}).overrideAttrs (old: { 22 | name = "${rmExt initramfs.name}.linux"; 23 | # `postInstall` in pkgs/os-specific/linux/kernel/manual-config.nix is depends on `isModular`, which is a good design. 24 | # However, pkgs/os-specific/linux/kernel/generic.nix hardcode the config = {CONFIG_MODULES = "y";} which is not generic and is a bad design. 25 | # Here, we correct the `postInstall` by checking linuxStructuredExtraConfig. 26 | postInstall = if linuxStructuredExtraConfig?MODULES 27 | && linuxStructuredExtraConfig.MODULES==lib.kernel.yes 28 | # The generated vmlinux (Image is stripped binary, vmlinux is ELF with debug info) is located in linux.dev 29 | then old.postInstall else '' 30 | mkdir -p $dev 31 | cp vmlinux $dev/ 32 | ''; 33 | passthru = args // old.passthru; 34 | }) 35 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/initramfs/base/default.nix: -------------------------------------------------------------------------------- 1 | { writeText 2 | , runCommand 3 | 4 | , gen_init_cpio 5 | }@args: 6 | let 7 | name = "init.cpio"; 8 | cpio_list = writeText "cpio_list" '' 9 | dir /bin 755 0 0 10 | dir /etc 755 0 0 11 | dir /dev 755 0 0 12 | dir /lib 755 0 0 13 | dir /proc 755 0 0 14 | dir /sbin 755 0 0 15 | dir /sys 755 0 0 16 | dir /tmp 755 0 0 17 | dir /usr 755 0 0 18 | dir /mnt 755 0 0 19 | slink /usr/bin /bin 755 0 0 20 | dir /usr/lib 755 0 0 21 | dir /usr/sbin 755 0 0 22 | dir /var 755 0 0 23 | dir /var/tmp 755 0 0 24 | dir /root 755 0 0 25 | dir /var/log 755 0 0 26 | 27 | nod /dev/console 644 0 0 c 5 1 28 | nod /dev/null 644 0 0 c 1 3 29 | ''; 30 | in runCommand name { 31 | passthru = args // { inherit cpio_list; }; 32 | } '' 33 | mkdir -p $out 34 | ${gen_init_cpio}/bin/gen_init_cpio -t 0 ${cpio_list} > $out/${name} 35 | '' 36 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/initramfs/base/gen_init_cpio/default.nix: -------------------------------------------------------------------------------- 1 | { runCommandCC }: 2 | runCommandCC "gen_init_cpio" { 3 | src = builtins.fetchurl { 4 | url = "https://github.com/torvalds/linux/raw/f3b2306bea33b3a86ad2df4dcfab53b629e1bc84/usr/gen_init_cpio.c"; 5 | sha256 = "0i938rf0k0wrvpdghpjm4cb6f6ycz6y5y5lgfnh36cdlsabap71h"; 6 | }; 7 | } '' 8 | mkdir -p $out/bin 9 | cc $src -o $out/bin/gen_init_cpio 10 | '' 11 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/initramfs/cpio_reset_timestamp.patch: -------------------------------------------------------------------------------- 1 | --- old/src/copyout.c 2023-04-29 02:35:54.000000000 +0800 2 | +++ new/src/copyout.c 2024-09-07 03:14:13.246022894 +0800 3 | @@ -338,16 +338,16 @@ 4 | to_ascii_or_warn (p, file_hdr->c_mode, 8, LG_16, file_hdr->c_name, 5 | _("file mode")); 6 | p += 8; 7 | - to_ascii_or_warn (p, file_hdr->c_uid, 8, LG_16, file_hdr->c_name, 8 | + to_ascii_or_warn (p, 0, 8, LG_16, file_hdr->c_name, 9 | _("uid")); 10 | p += 8; 11 | - to_ascii_or_warn (p, file_hdr->c_gid, 8, LG_16, file_hdr->c_name, 12 | + to_ascii_or_warn (p, 0, 8, LG_16, file_hdr->c_name, 13 | _("gid")); 14 | p += 8; 15 | to_ascii_or_warn (p, file_hdr->c_nlink, 8, LG_16, file_hdr->c_name, 16 | _("number of links")); 17 | p += 8; 18 | - to_ascii_or_warn (p, file_hdr->c_mtime, 8, LG_16, file_hdr->c_name, 19 | + to_ascii_or_warn (p, 0, 8, LG_16, file_hdr->c_name, 20 | _("modification time")); 21 | p += 8; 22 | if (to_ascii_or_error (p, file_hdr->c_filesize, 8, LG_16, file_hdr->c_name, 23 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/initramfs/default.nix: -------------------------------------------------------------------------------- 1 | { runCommand 2 | , cpio 3 | , writeClosure 4 | 5 | , benchmark 6 | , base 7 | , overlays 8 | }@args: let 9 | cpioPatched = cpio.overrideAttrs (old: { patches = [./cpio_reset_timestamp.patch]; }); 10 | benchmark-closure = writeClosure [ benchmark ]; 11 | in runCommand "${benchmark.name}.cpio" { 12 | passthru = args // { inherit cpioPatched; }; 13 | } '' 14 | cp ${base}/init.cpio $out 15 | chmod +w $out 16 | 17 | # !!!NOTED!!!: 18 | # Prepare folder nix/store before copying contents in nix/store 19 | # https://www.kernel.org/doc/Documentation/filesystems/ramfs-rootfs-initramfs.txt 20 | # > The Linux kernel cpio extractor won't create files in a directory that 21 | # > doesn't exist, so the directory entries must go before the files that go in 22 | # > those directories. 23 | cd / 24 | echo ./nix | ${cpioPatched}/bin/cpio --reproducible -H newc -oAF $out 25 | echo ./nix/store | ${cpioPatched}/bin/cpio --reproducible -H newc -oAF $out 26 | 27 | for dep in $(cat ${benchmark-closure}); do 28 | find .$dep | sort -n | ${cpioPatched}/bin/cpio --reproducible -H newc -oAF $out 29 | done 30 | 31 | cd ${overlays} 32 | find . | sort -n | ${cpioPatched}/bin/cpio --reproducible -H newc -oAF $out 33 | '' 34 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/initramfs/overlays/before_workload/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , riscv64-cc 3 | 4 | , riscv64-libc 5 | }: 6 | stdenv.mkDerivation rec { 7 | name = "before_workload"; 8 | src = builtins.fetchurl { 9 | url = "https://github.com/OpenXiangShan/riscv-rootfs/raw/da983ec95858dfd6f30e9feadd534b79db37e618/apps/before_workload/before_workload.c"; 10 | sha256 = "09i7ad3cfvlkpwjfci9rhfhgx240v6ip5l1ns8yfhvxg7r6dcg6j"; 11 | }; 12 | dontUnpack = true; 13 | buildInputs = [ 14 | riscv64-cc 15 | riscv64-libc 16 | ]; 17 | # do not disable timer interrupts, so that we can run multithread workloads. 18 | postPatch = '' 19 | sed '/DISABLE_TIME_INTR/d' ${src} > ${name}.c 20 | ''; 21 | buildPhase = '' 22 | riscv64-unknown-linux-gnu-gcc ${name}.c -o ${name} -static 23 | ''; 24 | installPhase = '' 25 | mkdir -p $out/bin 26 | cp ${name} $out/bin/ 27 | ''; 28 | } 29 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/initramfs/overlays/default.nix: -------------------------------------------------------------------------------- 1 | { writeText 2 | , runCommand 3 | 4 | , riscv64-busybox 5 | , before_workload 6 | , after_workload 7 | , benchmark 8 | , enableTrap 9 | , run_sh ? writeText "run.sh" '' 10 | ${if enableTrap then "before_workload" else ""} 11 | echo start 12 | ${benchmark} 13 | echo exit 14 | ${if enableTrap then "after_workload" else ""} 15 | '' 16 | , interactive 17 | }@args: 18 | let 19 | name = "initramfs-overlays"; 20 | inittab = writeText "inittab" '' 21 | ::sysinit:/bin/busybox --install -s 22 | /dev/console::sysinit:-/bin/sh ${if interactive then "" else "/bin/run.sh"} 23 | ''; 24 | in runCommand name { 25 | passthru = args // { inherit inittab run_sh; }; 26 | } ('' 27 | mkdir -p $out/bin 28 | cp ${riscv64-busybox}/bin/busybox $out/bin/ 29 | ln -s /bin/busybox $out/init 30 | 31 | mkdir -p $out/etc 32 | cp ${inittab} $out/etc/inittab 33 | 34 | mkdir -p $out/bin 35 | cp ${run_sh} $out/bin/run.sh 36 | '' + (if enableTrap then '' 37 | cp ${before_workload}/bin/before_workload $out/bin/ 38 | cp ${after_workload}/bin/after_workload $out/bin/ 39 | '' else "")) 40 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/initramfs/overlays/nemu_trap/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , riscv64-cc 3 | 4 | , riscv64-libc 5 | }: 6 | stdenv.mkDerivation rec { 7 | name = "nemu_trap"; 8 | src = builtins.fetchurl { 9 | url = "https://github.com/OpenXiangShan/riscv-rootfs/raw/da983ec95858dfd6f30e9feadd534b79db37e618/apps/trap/trap.c"; 10 | sha256 = "05rlbicdbz9zdv6a82bjm7xp13rzb84sj9pkb5cqmizmlsmf3rzj"; 11 | }; 12 | dontUnpack = true; 13 | buildInputs = [ 14 | riscv64-cc 15 | riscv64-libc 16 | ]; 17 | buildPhase = '' 18 | riscv64-unknown-linux-gnu-gcc ${src} -o after_workload -static 19 | ''; 20 | installPhase = '' 21 | mkdir -p $out/bin 22 | cp after_workload $out/bin/ 23 | ''; 24 | } 25 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/initramfs/overlays/qemu_trap/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , riscv64-cc 3 | 4 | , riscv64-libc 5 | }: 6 | stdenv.mkDerivation rec { 7 | name = "qemu_trap"; 8 | src = builtins.fetchurl { 9 | url = "https://github.com/OpenXiangShan/riscv-rootfs/raw/da983ec95858dfd6f30e9feadd534b79db37e618/apps/qemu_trap/qemu_trap.c"; 10 | sha256 = "0ray1gq841m8n6kyhp2ncj6aa7nw3lwwy3mfjh3848hsy7583vky"; 11 | }; 12 | dontUnpack = true; 13 | buildInputs = [ 14 | riscv64-cc 15 | riscv64-libc 16 | ]; 17 | buildPhase = '' 18 | riscv64-unknown-linux-gnu-gcc ${src} -o after_workload -static 19 | ''; 20 | installPhase = '' 21 | mkdir -p $out/bin 22 | cp after_workload $out/bin/ 23 | ''; 24 | } 25 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/patches/enable-clint.nix: -------------------------------------------------------------------------------- 1 | rec { 2 | name = "enable-clint"; 3 | patch = builtins.toFile name '' 4 | --- a/drivers/clocksource/Kconfig 5 | +++ b/drivers/clocksource/Kconfig 6 | @@ -643,7 +643,7 @@ 7 | required for all RISC-V systems. 8 | 9 | config CLINT_TIMER 10 | - bool "CLINT Timer for the RISC-V platform" if COMPILE_TEST 11 | + bool "CLINT Timer for the RISC-V platform" 12 | depends on GENERIC_SCHED_CLOCK && RISCV 13 | select TIMER_PROBE 14 | select TIMER_OF 15 | ''; 16 | extraConfig = '' 17 | CLINT_TIMER y 18 | ''; 19 | } 20 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/patches/panic_shutdown.nix: -------------------------------------------------------------------------------- 1 | { 2 | name = "panic_shutdown"; 3 | patch = ./panic_shutdown.patch; 4 | } 5 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/patches/panic_shutdown.patch: -------------------------------------------------------------------------------- 1 | --- old/arch/riscv/kernel/reset.c 2024-08-29 23:36:13.000000000 +0800 2 | +++ new/arch/riscv/kernel/reset.c 2024-09-05 14:34:32.353111103 +0800 3 | @@ -15,9 +15,16 @@ 4 | void (*pm_power_off)(void) = NULL; 5 | EXPORT_SYMBOL(pm_power_off); 6 | 7 | +static void nemu_signal(int a){ 8 | + asm volatile ("mv a0, %0\n\t" 9 | + ".insn r 0x6B, 0, 0, x0, x0, x0\n\t" 10 | + : 11 | + : "r"(a) 12 | + : "a0"); 13 | +} 14 | void machine_restart(char *cmd) 15 | { 16 | - do_kernel_restart(cmd); 17 | + nemu_signal(-1); // BAD_TRAP 18 | while (1); 19 | } 20 | 21 | -------------------------------------------------------------------------------- /builders/imgBuilder/linux/patches/relaxing_random_entropy.nix: -------------------------------------------------------------------------------- 1 | rec { 2 | name = "relaxing-random-entropy"; 3 | patch = builtins.toFile name '' 4 | --- a/drivers/char/random.c 5 | +++ b/drivers/char/random.c 6 | @@ -1280,8 +1280,6 @@ 7 | last = stack->entropy; 8 | } 9 | stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1); 10 | - if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT) 11 | - return; 12 | 13 | atomic_set(&stack->samples, 0); 14 | timer_setup_on_stack(&stack->timer, entropy_timer, 0); 15 | ''; 16 | } 17 | -------------------------------------------------------------------------------- /builders/imgBuilder/opensbi/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , fetchFromGitHub 3 | , python3 4 | 5 | , riscv64-cc 6 | , rmExt 7 | , linux 8 | , dts 9 | }@args: stdenv.mkDerivation { 10 | name = "${rmExt linux.name}.opensbi"; 11 | 12 | src = fetchFromGitHub { 13 | owner = "riscv-software-src"; 14 | repo = "opensbi"; 15 | rev = "c4940a9517486413cd676fc8032bb55f9d4e2778"; 16 | hash = "sha256-cV+2DJjlqdG9zR3W6cH6BIZqnuB1kdH3mjc4PO+VPeE="; 17 | }; 18 | 19 | buildInputs = [ 20 | python3 21 | riscv64-cc 22 | ]; 23 | 24 | makeFlags = [ 25 | "CROSS_COMPILE=riscv64-unknown-linux-gnu-" 26 | "PLATFORM=generic" 27 | "FW_FDT_PATH=${dts}/xiangshan.dtb" 28 | "FW_PAYLOAD_PATH=${linux}/Image" 29 | ]; 30 | buildPhase = '' 31 | patchShebangs . 32 | 33 | # Default FW_PAYLOAD memory layout: 34 | # Refers to https://github.com/riscv-software-src/opensbi/blob/master/platform/generic/objects.mk 35 | # and https://docs.xiangshan.cc/zh-cn/latest/tools/opensbi-kernel-for-xs/ 36 | # FW_PAYLOAD_OFFSET=0x100000 37 | # ------------------------------------------------------------------- 38 | # | gcpt | opensbi firmware | payload e.g. linux Image | FDT | 39 | # ------------------------------------------------------------------- 40 | # | | | | 41 | # |OFFSET | FW_PAYLOAD_OFFSET | | 42 | # |(1MB) | (default:0x100000=1MB) | | 43 | # | | 44 | # |---------- FW_PAYLOAD_FDT_OFFSET --------------------------| 45 | # (default:0x2200000=2MB+32MB) 46 | # Noted: In 64bit system, the FW_PAYLOAD_OFFSET and FW_PAYLOAD_FDT_OFFSET must be aligned to 2MB. 47 | 48 | # Calculate the FW_PAYLOAD_FDT_OFFSET 49 | ALIGN=0x200000 50 | FW_PAYLOAD_OFFSET=0x100000 51 | IMAGE_SIZE=$(ls -l ${linux}/Image | awk '{print $5}') 52 | IMAGE_END=$((FW_PAYLOAD_OFFSET + IMAGE_SIZE)) 53 | IMAGE_END_ALIGNED=$(( (IMAGE_END + ALIGN-1) & ~(ALIGN-1) )) 54 | IMAGE_END_ALIGNED_HEX=$(printf "0x%x" $IMAGE_END_ALIGNED) 55 | echo FW_PAYLOAD_FDT_OFFSET=$IMAGE_END_ALIGNED_HEX 56 | 57 | make -j $NIX_BUILD_CORES $makeFlags \ 58 | FW_PAYLOAD_OFFSET=$FW_PAYLOAD_OFFSET \ 59 | FW_PAYLOAD_FDT_OFFSET=$IMAGE_END_ALIGNED_HEX 60 | ''; 61 | 62 | outputs = [ "out" "dev" ]; 63 | installPhase = '' 64 | mkdir -p $out 65 | cp build/platform/generic/firmware/fw_payload.bin $out/ 66 | mkdir -p $dev 67 | cp build/platform/generic/firmware/fw_payload.elf $dev/ 68 | ''; 69 | passthru = args; 70 | } 71 | -------------------------------------------------------------------------------- /builders/imgBuilder/opensbi/dts/default.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , fetchFromGitHub 3 | , dtc 4 | 5 | , cores 6 | }: 7 | let 8 | name = "xiangshan.dtb"; 9 | in stdenv.mkDerivation { 10 | inherit name; 11 | src = fetchFromGitHub { 12 | owner = "OpenXiangShan"; 13 | repo = "nemu_board"; 14 | rev = "37dc20e77a9bbff54dc2e525dc6c0baa3d50f948"; 15 | hash = "sha256-MvmYZqxA1jxHR4Xrw+18EO+b3iqvmn2m9LkcpxqlUg8="; 16 | }; 17 | 18 | buildInputs = [ 19 | dtc 20 | ]; 21 | buildPhase = let 22 | dtsFile = if cores=="1" then "system.dts" 23 | else if cores=="2" then "fpga-dualcore-system.dts" 24 | else if cores=="4" then "fpga-fourcore-system.dts" 25 | else throw "dts only supports 1/2/4 cores"; 26 | in '' 27 | cd dts 28 | dtc -O dtb -o ${name} ${dtsFile} 29 | ''; 30 | installPhase = '' 31 | mkdir -p $out 32 | cp ${name} $out/ 33 | ''; 34 | } 35 | -------------------------------------------------------------------------------- /builders/sim.nix: -------------------------------------------------------------------------------- 1 | { writeShellScriptBin 2 | , rmExt 3 | 4 | , qemu 5 | , img 6 | , smp 7 | }: writeShellScriptBin "${rmExt img.name}.sim" (toString [ 8 | "${qemu}/bin/qemu-system-riscv64" 9 | "-bios ${img}" 10 | "-M nemu" 11 | "-nographic" 12 | "-m 8G" 13 | "-smp ${smp}" 14 | "-cpu rv64,v=true,vlen=128,h=true,sv39=true,sv48=false,sv57=false,sv64=false" 15 | # "-plugin ${qemu}/lib/libprofiling.so,workload=${workload_name},intervals=${intervals},target=$out" 16 | "-icount shift=0,align=off,sleep=on" 17 | ]) 18 | -------------------------------------------------------------------------------- /builders/test-opts.nix: -------------------------------------------------------------------------------- 1 | { 2 | args = { 3 | cores = ["1" "2"]; 4 | # linuxVersion = ["default" "latest"]; 5 | }; 6 | } 7 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | let 2 | lib = import ; 3 | in lib.makeOverridable ( 4 | { pkgs ? import (fetchTarball { 5 | # Currently latest nixpkgs 24.11 6 | url = "https://github.com/NixOS/nixpkgs/archive/9c6b49aeac36e2ed73a8c472f1546f6d9cf1addc.tar.gz"; 7 | sha256 = "0zwnaiw6cryrvwxxa96f72p4w75wq2miyi066f2sk8n7ivj0kxcb"; 8 | }) {} 9 | 10 | /** 11 | cc: Compiler Collection used for compiling RISC-V binaries. 12 | * **Type**: string 13 | * **Default value**: `"gcc14"` 14 | * **Available values**: Prefix of any nixpkgs-supported xxxStdenv. 15 | To list available xxxStdenv: 16 | ```bash 17 | nix-instantiate --eval -E 'let pkgs=import {}; in builtins.filter (x: pkgs.lib.hasSuffix "Stdenv" x)(builtins.attrNames pkgs)' 18 | ``` 19 | * **TODO**: Currently only supports GCC's stdenv. 20 | LLVM's fortran compiler (flang) is needed to support Clang's stdenv. 21 | Preliminary experiments with riscv64-jemalloc show that Clang provides better auto-vectorization than GCC. 22 | */ 23 | , cc ? "gcc14" 24 | , ... 25 | }@args: 26 | assert pkgs.pkgsCross.riscv64 ? "${cc}Stdenv"; 27 | rec { 28 | deterPkgs = pkgs.lib.makeScope pkgs.lib.callPackageWith (self: pkgs // { 29 | riscv64-pkgs = pkgs.pkgsCross.riscv64; 30 | riscv64-stdenv = self.riscv64-pkgs."${cc}Stdenv"; 31 | riscv64-cc = self.riscv64-stdenv.cc; 32 | riscv64-fortran = self.riscv64-pkgs.wrapCCWith { 33 | cc = self.riscv64-stdenv.cc.cc.override { 34 | name = "gfortran"; 35 | langFortran = true; 36 | langCC = false; 37 | langC = false; 38 | profiledCompiler = false; 39 | }; 40 | # fixup wrapped prefix, which only appear if hostPlatform!=targetPlatform 41 | # for more details see /pkgs/build-support/cc-wrapper/default.nix 42 | stdenvNoCC = self.riscv64-pkgs.stdenvNoCC.override { 43 | hostPlatform = pkgs.stdenv.hostPlatform; 44 | }; 45 | # Beginning from 24.05, wrapCCWith receive `runtimeShell`. 46 | # If leave it empty, the default uses riscv64-pkgs.runtimeShell, 47 | # thus executing the sheBang will throw error: 48 | # `cannot execute: required file not found`. 49 | runtimeShell = pkgs.runtimeShell; 50 | }; 51 | rmExt = name: builtins.concatStringsSep "." 52 | (pkgs.lib.init 53 | (pkgs.lib.splitString "." name)); 54 | writeShScript = name: passthru: text: pkgs.writeTextFile { 55 | inherit name; 56 | text = '' 57 | #!/usr/bin/env sh 58 | ${text} 59 | ''; 60 | executable = true; 61 | derivationArgs = { inherit passthru; }; 62 | }; 63 | utils = pkgs.callPackage ./utils.nix {}; 64 | }); 65 | 66 | build = deterPkgs.callPackage ./builders {} args; 67 | } 68 | ) 69 | -------------------------------------------------------------------------------- /docs/designs/1.overview.md: -------------------------------------------------------------------------------- 1 | # 概览(Overview) 2 | 3 | 4 | -------------------------------------------------------------------------------- /docs/designs/2.configuration_system.md: -------------------------------------------------------------------------------- 1 | # 配置系统(Configuration System) 2 | 3 | TODO: 4 | -------------------------------------------------------------------------------- /docs/designs/3.benchmarks/index.md: -------------------------------------------------------------------------------- 1 | # 基准测试(Benchmarks) 2 | 3 | TODO: 4 | -------------------------------------------------------------------------------- /docs/designs/3.benchmarks/openblas.md: -------------------------------------------------------------------------------- 1 | # OpenBLAS 2 | -------------------------------------------------------------------------------- /docs/designs/3.benchmarks/spec2006.md: -------------------------------------------------------------------------------- 1 | # SPEC CPU 2006 2 | 3 | ## Preparing SPEC CPU2006 Source Code 4 | 5 | Before using this project, you need to prepare the SPEC CPU2006 program source code yourself. Please follow these steps: 6 | 7 | 1. Obtain the SPEC CPU2006 source code (we cannot provide the source code due to licensing restrictions). 8 | 2. It is recommended to store the SPEC CPU2006 source code directory separately, not in the same location as this repository. 9 | 3. Rename the obtained source code folder to "spec2006", like ~/workspace/spec2006. 10 | 4. Please do not modify the SPEC CPU2006 source code, as this may cause the build to fail. 11 | 5. Note that the spec2006/default.nix directory in this repository is different from the SPEC CPU2006 source code directory. The former can be considered as a Nix build script. 12 | 13 | Note: Generating checkpoints may take several or more than ten hours, depending on the complexity of the benchmark. 14 | 15 | Please note that the build process may take a considerable amount of time: 16 | 17 | 1. First, the script will fetch and compile the RISC-V GCC toolchain, Linux kernel, QEMU, and other necessary components. This step takes approximately 1 hour. 18 | 19 | 2. Then, it will use QEMU for profiling, SimPoint sampling, and QEMU checkpoint generation. Generating spec2006 ref input checkpoint typically requires about 10 hours. 20 | 21 | If you want to quickly test the system, you can start by setting the input size to "test": 22 | 23 | 1. Edit the `conf.nix` file 24 | 2. Change `size = xxx` to `size = "test"` 25 | 26 | With the test input size, the entire process should complete in about 30 minutes. 27 | 28 | Finally, it will generate a result folder, you will get all the checkpoints in the result folder 29 | 30 | If you want to back up some checkpoints: 31 | run 32 | ```bash 33 | nom-build -j 30 34 | python3 backup_checkpoints.py 35 | ``` 36 | It will copy checkpoints from nix path to local pwd path, named backup_XXX (timestamp). 37 | Notice: backup_XXX is about 100GB! 38 | -------------------------------------------------------------------------------- /docs/designs/4.builders/1.imgBuilder.md: -------------------------------------------------------------------------------- 1 | # 镜像构建器(Image Builder) 2 | -------------------------------------------------------------------------------- /docs/designs/4.builders/2.cptBuilder.md: -------------------------------------------------------------------------------- 1 | # 切片构建器(Checkpoint Builder) 2 | -------------------------------------------------------------------------------- /docs/designs/4.builders/images/common.py: -------------------------------------------------------------------------------- 1 | from pydot import Dot, Edge, Node, Graph, Cluster 2 | from typing import TypeVar 3 | T = TypeVar("T") 4 | 5 | def safe_set(args: dict, key: str, value): 6 | if key not in args: args[key] = value 7 | class CCluster(Cluster): # Connectable Cluster 8 | def __init__(self, name, **args): 9 | safe_set(args, "label", name) 10 | safe_set(args, "penwidth", 2) 11 | Cluster.__init__(self, name, **args) 12 | self._connect_node_ = addNode(self, "_connect_node_", label="", 13 | shape="none", width=0, height=0, margin=0) 14 | class CDot(Dot): # Compound Dot 15 | def __init__(self, *vargs, **args): 16 | args["compound"] = True 17 | safe_set(args, "bgcolor", "transparent") 18 | Dot.__init__(self, *vargs, **args) 19 | self.set_node_defaults(shape="box") 20 | self.set_edge_defaults(color="#00000044") 21 | 22 | def addNode(g: Graph|CCluster, name, **args): 23 | if "label" not in args: args["label"] = name 24 | n = Node(g.get_name()+name, **args) 25 | g.add_node(n) 26 | return n 27 | def addEdge(g: Graph, n1: Node|CCluster, n2: Node|CCluster, **args) -> Edge: 28 | # auto edge color 29 | if isinstance(n1, Node) and n1.get("color"): safe_set(args, "color", n1.get("color")) 30 | if isinstance(n1, Graph) and n1.get("pencolor"): safe_set(args, "color", n1.get("pencolor")) 31 | # auto edge width 32 | if n1.get("penwidth"): safe_set(args, "penwidth", n1.get("penwidth")) 33 | 34 | if isinstance(n1, CCluster): l = n1._connect_node_; args["ltail"] = n1.get_name() 35 | else: l = n1 36 | if isinstance(n2, CCluster): r = n2._connect_node_; args["lhead"] = n2.get_name() 37 | else: r = n2 38 | e = Edge(l.get_name(), r.get_name(), **args) 39 | g.add_edge(e) 40 | return e 41 | def addCluster(g: Graph|CCluster, name, **args): 42 | s=CCluster(name, **args) 43 | g.add_subgraph(s) 44 | return s 45 | def add(g: Graph, item: T) -> T: 46 | if isinstance(item, Node): g.add_node(item) 47 | elif isinstance(item, Edge): g.add_edge(item) 48 | elif isinstance(item, CCluster): g.add_subgraph(item) 49 | else: raise Exception(f"add(g, item): unknown item type [{type(item)}]") 50 | return item 51 | 52 | class _Colors_: 53 | def set(self, item: Node|Graph, background, boundary): 54 | if isinstance(item, Node): 55 | item.set("style", "filled") # TODO: safe add style 56 | item.set("fillcolor", background) 57 | item.set("color", boundary) 58 | elif isinstance(item, Graph): 59 | item.set("bgcolor", background) 60 | item.set("pencolor", boundary) 61 | def benchmark (self, item: Node|Graph): self.set(item, "#D5E8D4", "#82B366") 62 | def builder (self, item: Node|Graph): self.set(item, "#F5F5F5", "#666666") 63 | def imgBuilder (self, item: Node|Graph): self.set(item, "#CCE5FF", "#666666") 64 | def cptBuilder (self, item: Node|Graph): self.set(item, "#F8CECC", "#B85450") 65 | def gcpt (self, item: Node|Graph): self.set(item, "#DAE8FC", "#6C8EBF") 66 | def output (self, item: Node|Graph): self.set(item, "#FFE6CC", "#D79B00") 67 | set_colors = _Colors_() 68 | -------------------------------------------------------------------------------- /docs/designs/4.builders/images/deps_dot.py: -------------------------------------------------------------------------------- 1 | from common import CDot, CCluster, addNode, addEdge, add, set_colors 2 | 3 | graph = CDot(label="Deterload Dependency Graph", splines="line") 4 | graph.set_node_defaults(margin=0) 5 | 6 | class ImgBuilder(CCluster): 7 | class GCPT(CCluster): 8 | class OpenSBI(CCluster): 9 | class Linux(CCluster): 10 | class InitRamFs(CCluster): 11 | class Base(CCluster): 12 | def __init__(self, **args): 13 | CCluster.__init__(self, "base", **args) 14 | self.gen_init_cpio = addNode(self, "gen_init_cpio") 15 | self.cpio_list = addNode(self, "cpio_list") 16 | class Overlays(CCluster): 17 | def __init__(self, **args): 18 | CCluster.__init__(self, "overlays", **args) 19 | self.busybox = addNode(self, "busybox") 20 | self.before_workload = addNode(self, "before_workload") 21 | self.qemu_trap = addNode(self, "qemu_trap") 22 | self.nemu_trap = addNode(self, "nemu_trap") 23 | self.inittab = addNode(self, "inittab") 24 | self.run_sh = addNode(self, "run_sh", label="run.sh") 25 | def __init__(self, **args): 26 | CCluster.__init__(self, "initramfs", **args) 27 | self.base = add(self, self.Base()) 28 | self.overlays = add(self, self.Overlays()) 29 | def __init__(self, **args): 30 | CCluster.__init__(self, "linux", **args) 31 | self.initramfs = add(self, self.InitRamFs()) 32 | def __init__(self, **args): 33 | CCluster.__init__(self, "opensbi", **args) 34 | self.dts = addNode(self, "dts") 35 | self.linux = add(self, self.Linux()) 36 | def __init__(self, **args): 37 | CCluster.__init__(self, "gcpt", **args, penwidth=3) 38 | set_colors.gcpt(self) 39 | self.opensbi = add(self, self.OpenSBI()) 40 | def __init__(self, **args): 41 | CCluster.__init__(self, "imgBuilder", **args) 42 | set_colors.imgBuilder(self) 43 | self.gcpt = add(self, self.GCPT()) 44 | 45 | class CptBuilder(CCluster): 46 | def __init__(self, **args): 47 | CCluster.__init__(self, "cptBuilder", **args) 48 | set_colors.cptBuilder(self) 49 | self.qemu = addNode(self, "qemu") 50 | self.nemu = addNode(self, "nemu") 51 | self.stage1_profiling = addNode(self, "stage1-profiling") 52 | self.simpoint = addNode(self, "simpoint") 53 | self.stage2_cluster = addNode(self, "stage2-cluster") 54 | self.stage3_checkpoint = addNode(self, "stage3-checkpoint") 55 | addEdge(self, self.qemu, self.stage1_profiling) 56 | addEdge(self, self.nemu, self.stage1_profiling) 57 | addEdge(self, self.simpoint, self.stage2_cluster) 58 | addEdge(self, self.stage1_profiling, self.stage2_cluster) 59 | addEdge(self, self.qemu, self.stage3_checkpoint) 60 | addEdge(self, self.nemu, self.stage3_checkpoint) 61 | addEdge(self, self.stage2_cluster,self.stage3_checkpoint) 62 | 63 | class Builder(CCluster): 64 | def __init__(self, **args): 65 | CCluster.__init__(self, "builder", **args) 66 | set_colors.builder(self) 67 | self.imgBuilder = add(self, ImgBuilder()) 68 | self.cptBuilder = add(self, CptBuilder()) 69 | addEdge(self, self.imgBuilder.gcpt, self.cptBuilder.stage1_profiling) 70 | addEdge(self, self.imgBuilder.gcpt, self.cptBuilder.stage3_checkpoint) 71 | builder = add(graph, Builder()) 72 | 73 | inputs = add(graph, CCluster("inputs", label="", pencolor="transparent")) 74 | outputs = add(graph, CCluster("outputs", label="", pencolor="transparent")) 75 | 76 | benchmark = addNode(inputs, "benchmark"); set_colors.benchmark(benchmark) 77 | addEdge(graph, benchmark, builder.imgBuilder.gcpt.opensbi.linux.initramfs.overlays.run_sh) 78 | addEdge(graph, benchmark, builder.imgBuilder.gcpt.opensbi.linux.initramfs) 79 | 80 | class Output(CCluster): 81 | def __init__(self, name, **args): 82 | CCluster.__init__(self, name, **args) 83 | set_colors.output(self) 84 | self._level0_ = add(self, CCluster("_level0_", label="", bgcolor="transparent", pencolor="transparent")) 85 | self.benchmark = addNode(self._level0_, "benchmark"); set_colors.benchmark(self.benchmark) 86 | 87 | self._level1_ = add(self, CCluster("_level1_", label="", bgcolor="transparent", pencolor="transparent")) 88 | addEdge(self, self._level0_, self._level1_, color="transparent") 89 | self.gen_init_cpio = addNode(self._level1_, "gen_init_cpio"); set_colors.gcpt(self.gen_init_cpio) 90 | self.initramfs_base = addNode(self._level1_, "initramfs_base"); set_colors.gcpt(self.initramfs_base) 91 | self.busybox = addNode(self._level1_, "busybox"); set_colors.gcpt(self.busybox) 92 | self.before_workload = addNode(self._level1_, "before_workload"); set_colors.gcpt(self.before_workload) 93 | self.nemu_trap = addNode(self._level1_, "nemu_trap"); set_colors.gcpt(self.nemu_trap) 94 | self.qemu_trap = addNode(self._level1_, "qemu_trap"); set_colors.gcpt(self.qemu_trap) 95 | self.initramfs_overlays = addNode(self._level1_, "initramfs_overlays"); set_colors.gcpt(self.initramfs_overlays) 96 | self.initramfs = addNode(self._level1_, "initramfs"); set_colors.gcpt(self.initramfs) 97 | 98 | self._level2_ = add(self, CCluster("_level2_", label="", bgcolor="transparent", pencolor="transparent")) 99 | addEdge(self, self._level1_, self._level2_, color="transparent") 100 | self.linux = addNode(self._level2_, "linux"); set_colors.gcpt(self.linux) 101 | self.dts = addNode(self._level2_, "dts"); set_colors.gcpt(self.dts) 102 | self.opensbi = addNode(self._level2_, "opensbi"); set_colors.gcpt(self.opensbi) 103 | self.gcpt = addNode(self._level2_, "gcpt"); set_colors.gcpt(self.gcpt) 104 | self.img = addNode(self._level2_, "img"); set_colors.imgBuilder(self.img) 105 | 106 | self._level3_ = add(self, CCluster("_level3_", label="", bgcolor="transparent", pencolor="transparent")) 107 | addEdge(self, self._level2_, self._level3_, color="transparent") 108 | self.nemu = addNode(self._level3_, "nemu"); set_colors.cptBuilder(self.nemu) 109 | self.qemu = addNode(self._level3_, "qemu"); set_colors.cptBuilder(self.qemu) 110 | self.simpoint = addNode(self._level3_, "simpoint"); set_colors.cptBuilder(self.simpoint) 111 | self.stage1_profiling = addNode(self._level3_, "stage1-profiling"); set_colors.cptBuilder(self.stage1_profiling) 112 | self.stage2_cluster = addNode(self._level3_, "stage2-cluster"); set_colors.cptBuilder(self.stage2_cluster) 113 | self.stage3_checkpoint = addNode(self._level3_, "stage3-checkpoint"); set_colors.cptBuilder(self.stage3_checkpoint) 114 | self.cpts_simpoint = addNode(self._level3_, "cpts-simpoint"); set_colors.cptBuilder(self.cpts_simpoint) 115 | cpt_e = addEdge(self._level3_, self.stage3_checkpoint, self.cpts_simpoint, constraint=False, dir="none") 116 | cpt_e.set("color", f"{cpt_e.get('color')}:transparent:{cpt_e.get('color')}") 117 | 118 | 119 | output = add(outputs, Output("output")) 120 | addEdge(graph, builder.cptBuilder.stage3_checkpoint, output) 121 | 122 | from pydot import Graph, Node 123 | def addFlatEdge(g: Graph, n1: Node|CCluster, n2: Node|CCluster, **args): 124 | # args["constraint"] = False 125 | args["dir"] = "none" 126 | e = addEdge(g, n1, n2, **args) 127 | if e.get("color"): e.set("color", e.get("color")+"11") # transparent #xxxxxx11 128 | else: e.set("color", "#00000011") 129 | addFlatEdge(graph, benchmark, output.benchmark) 130 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux.initramfs.base.gen_init_cpio, output.gen_init_cpio) 131 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux.initramfs.base, output.initramfs_base) 132 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux.initramfs.overlays.busybox, output.busybox) 133 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux.initramfs.overlays.before_workload, output.before_workload) 134 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux.initramfs.overlays.nemu_trap, output.nemu_trap) 135 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux.initramfs.overlays.qemu_trap, output.qemu_trap) 136 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux.initramfs.overlays, output.initramfs_overlays) 137 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux.initramfs, output.initramfs) 138 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.linux, output.linux) 139 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi.dts, output.dts) 140 | addFlatEdge(graph, builder.imgBuilder.gcpt.opensbi, output.opensbi) 141 | addFlatEdge(graph, builder.imgBuilder.gcpt, output.gcpt) 142 | addFlatEdge(graph, builder.imgBuilder, output.img) 143 | addFlatEdge(graph, builder.cptBuilder.nemu, output.nemu) 144 | addFlatEdge(graph, builder.cptBuilder.qemu, output.qemu) 145 | addFlatEdge(graph, builder.cptBuilder.simpoint, output.simpoint) 146 | addFlatEdge(graph, builder.cptBuilder.stage1_profiling, output.stage1_profiling) 147 | addFlatEdge(graph, builder.cptBuilder.stage2_cluster, output.stage2_cluster) 148 | addFlatEdge(graph, builder.cptBuilder.stage3_checkpoint, output.stage3_checkpoint) 149 | addFlatEdge(graph, builder.cptBuilder.stage3_checkpoint, output.cpts_simpoint) 150 | 151 | overrideScope = addNode(outputs, "overrideScope", shape="oval", color="black", penwidth=2, fontsize=20) 152 | addEdge(outputs, overrideScope, output, constraint=False) 153 | addEdge(outputs, output.dts, overrideScope, color="transparent") 154 | override = addNode(outputs, "override", shape="oval", color="black") 155 | for attr in dir(output): 156 | obj = getattr(output, attr) 157 | if isinstance(obj, Node) and not attr.startswith("_"): 158 | addEdge(outputs, override, obj, constraint=False, color="#00000022") 159 | 160 | # Tweaks 161 | addEdge(graph, builder.imgBuilder, builder.cptBuilder.qemu, color="transparent") 162 | for i in range(5): 163 | addEdge(graph, builder.cptBuilder.stage3_checkpoint, output.benchmark, color="transparent") 164 | 165 | graph.write(__file__.replace("_dot.py", "_py.dot")) 166 | -------------------------------------------------------------------------------- /docs/designs/4.builders/images/overview_dot.py: -------------------------------------------------------------------------------- 1 | from common import CDot, CCluster, add, addNode, addEdge, set_colors 2 | 3 | graph = CDot(label="Deterload\nWorkflow Overview") 4 | 5 | benchmark = addNode(graph, "benchmark") 6 | set_colors.benchmark(benchmark) 7 | 8 | class Builder(CCluster): 9 | def __init__(self): 10 | CCluster.__init__(self, "builder"); set_colors.builder(self) 11 | self.imgBuilder = addNode(self, "imgBuilder"); set_colors.gcpt(self.imgBuilder) 12 | self.cptBuilder = addNode(self, "cptBuilder"); set_colors.cptBuilder(self.cptBuilder) 13 | addEdge(self, self.imgBuilder, self.cptBuilder) 14 | builder = add(graph, Builder()) 15 | addEdge(graph, benchmark, builder.imgBuilder) 16 | 17 | output = addNode(graph, "output"); set_colors.output(output) 18 | addEdge(graph, builder.cptBuilder, output) 19 | 20 | graph.write(__file__.replace("_dot.py", "_py.dot")) 21 | -------------------------------------------------------------------------------- /docs/designs/4.builders/index.md: -------------------------------------------------------------------------------- 1 | # 构建框架(Builders) 2 | 3 | ![](./images/overview_py.svg) 4 | 5 | ![](./images/deps_py.svg) 6 | 7 | TODO: 8 | 9 | The project uses Nix to manage dependencies and build the necessary components: 10 | 11 | - QEMU: Modified version of QEMU with checkpoint and profiling capabilities 12 | - Simpoint: Simpoint is a tool for profiling and checkpointing in XiangShan 13 | - OpenSBI: RISC-V OpenSBI firmware 14 | - Linux: Custom Linux kernel image 15 | - Profiling tools: Scripts and plugins for analyzing checkpoint data 16 | -------------------------------------------------------------------------------- /docs/extract_comments.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import re, sys, os 3 | 4 | if len(sys.argv) < 2: 5 | print(f"Usage: {sys.argv[0]} ") 6 | sys.exit(1) 7 | 8 | source_file = sys.argv[1] 9 | output_file = sys.argv[2] if len(sys.argv)>2 else os.path.splitext(source_file)[0] + ".md" 10 | 11 | file_contentntent: str 12 | with open(source_file, 'r') as f: file_contentntent = f.read() 13 | 14 | matched: list[tuple[str,str]] = re.findall(r"([^\n]*/\*\*)(.*?)\*/", file_contentntent, re.DOTALL) 15 | processed_comments_content: list[str] = [] 16 | for groups in matched: 17 | # replace the leading .../** with spaces 18 | comment_content = len(groups[0])*" " + groups[1] 19 | lines = comment_content.split('\n') 20 | 21 | # remove leading and tailing empty lines 22 | if len(lines[0].strip(' '))==0: lines = lines[1:] 23 | if len(lines[-1].strip(' '))==0: lines = lines[:-1] 24 | 25 | # get indent of each line 26 | indents = map(lambda line: len(line) - len(line.lstrip(' ')), lines) 27 | min_indent = min(indents) 28 | 29 | processed_lines = lines 30 | # remove min indent 31 | processed_lines = map(lambda line: line[min_indent:], processed_lines) 32 | # remove tailing spaces 33 | processed_lines = map(lambda line: line.rstrip(' '), processed_lines) 34 | 35 | processed_comments_content.append('\n'.join(processed_lines)) 36 | 37 | with open(output_file, "w") as f: f.write('\n\n'.join(processed_comments_content)) 38 | -------------------------------------------------------------------------------- /docs/generate_summary.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import glob, re, sys 3 | 4 | if len(sys.argv) < 2: 5 | print(f"Usage: {sys.argv[0]} ") 6 | sys.exit(1) 7 | 8 | def generate_section_body(root_dir:str, sub_dir:str) -> list[str]: 9 | mds_path = glob.glob(f"{sub_dir}/**/*.md", root_dir=root_dir, recursive=True) 10 | mds_path = sorted(mds_path, key=lambda path: path.rstrip("index.md")) 11 | entries: list[str] = [] 12 | levels: list[int] = [] 13 | for md_path in mds_path: 14 | level = md_path.count('/') 15 | if md_path.endswith("index.md"): 16 | level -= 1 17 | title: str 18 | with open(f"{root_dir}/{md_path}", 'r') as f: title = (re.findall("^# (.*)", f.readline())+[""])[0] 19 | entries.append(f"{' '*level}* [{title}]({md_path})") 20 | levels.append(level) 21 | min_level = min(levels) 22 | well_indented_entries = [entry[2*min_level:] for entry in entries] 23 | return well_indented_entries 24 | 25 | print("# Summary") 26 | print("") 27 | print("# 🏠入门(Get Started)") 28 | print("") 29 | print("* [README.md](./index.md)") 30 | print("") 31 | print("# 📝使用(Usages)") 32 | print("") 33 | for line in generate_section_body(sys.argv[1], "usages/"): 34 | print(line) 35 | print("") 36 | print("# 🖊️设计(Designs)") 37 | print("") 38 | for line in generate_section_body(sys.argv[1], "designs/"): 39 | print(line) 40 | print("") 41 | print("# 🗂️参考(References)") 42 | print("") 43 | for line in generate_section_body(sys.argv[1], "references/"): 44 | print(line) 45 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | {{ #include ../README.md:main }} 2 | -------------------------------------------------------------------------------- /docs/references/1.configurable.md: -------------------------------------------------------------------------------- 1 | # 可配参数(Configurable Arguments) 2 | 3 | 13 | 14 | ## 共通(Common) 15 | 16 | {{ #include ./default_extract.mkd }} 17 | 18 | ## 构建器(Builders) 19 | 20 | {{ #include ./builders_extract.mkd }} 21 | 22 | ## OpenBLAS 23 | 24 | {{ #include ./openblas_extract.mkd }} 25 | 26 | ## SPEC CPU 2006 27 | 28 | {{ #include ./spec2006_extract.mkd }} 29 | -------------------------------------------------------------------------------- /docs/references/2.benchmarks_scope.md: -------------------------------------------------------------------------------- 1 | # 基准测试围(Benchmarks Scope) 2 | -------------------------------------------------------------------------------- /docs/references/3.builders_scope.md: -------------------------------------------------------------------------------- 1 | # 构建器围(Builders Scope) 2 | -------------------------------------------------------------------------------- /docs/references/4.status/gen_table.js: -------------------------------------------------------------------------------- 1 | function gen_table(div_id, csv_url) { Papa.parse(csv_url, { 2 | download: true, 3 | // results.data is a two dimensional array 4 | complete: function(results) { 5 | data = results.data.slice(0,-1) // last line is empty, remove 6 | data = data.map(row => row.slice(0,-1)) // last element of each is empty, remove 7 | indexcol = data[0].map(function(ele) { 8 | m = ele.match(/[0-9]+\.[^\.]+/) 9 | return m ? m[0] : "" 10 | }) 11 | indexcol[0]="Date"; indexcol[1]="Commit"; indexcol[2]="Note"; indexcol[3]="result/"; 12 | data = [indexcol].concat(data) 13 | 14 | headerValues = data.map(row => row[0]) 15 | cellsValues = data.map(row => row.slice(1)) 16 | cellsValues = cellsValues.map(row => row.map(ele => ele.replace("/nix/store/", ""))) 17 | 18 | // https://www.geeksforgeeks.org/how-to-create-hash-from-string-in-javascript/ 19 | function hash(str) { 20 | return str.split('').reduce((hash, char) => 21 | {return char.charCodeAt(0) + (hash << 6) + (hash << 16) - hash;}, 0) 22 | - 1;} // make the empty string color white (return -1 (255)) 23 | function color(i) { return `rgb(${i&0xff},${(i>>8)&0xff},${(i>>16)&0xff})` } 24 | cellsColors = cellsValues.map(row => row.map(ele => color(hash(ele)))) 25 | cellsColors[0] = Array(cellsColors.length).fill("white") 26 | function fontcolor(i) { 27 | // luminance algorithm is provided claude.ai 28 | luminance = (0.299*(i&0xff) + 0.587*((i>>8)&0xff) + 0.114*((i>>16)&0xff)) / 0xff 29 | return luminance>0.5 ? "black" : "white" 30 | } 31 | cellsFontColors = cellsValues.map(row => row.map(ele => fontcolor(hash(ele)))) 32 | cellsFontColors[0] = Array(cellsFontColors.length).fill("black") 33 | 34 | plotDiv = document.getElementById(div_id); 35 | rowHeight = 20 36 | plotDiv.style.width = `${100 * headerValues.length}px` 37 | plotDiv.style.height = `${rowHeight * data[0].length}px` 38 | // https://plotly.com/javascript/reference/table/ 39 | Plotly.newPlot(plotDiv, /*data*/[{ 40 | type: "table", 41 | header: { 42 | values: headerValues, 43 | align: "left", 44 | font: {family: "mono"}, 45 | height: rowHeight, 46 | }, 47 | cells: { 48 | values: cellsValues, 49 | align: "left", 50 | font: {family: "mono", color: cellsFontColors}, 51 | fill: {color: cellsColors}, 52 | height: rowHeight, 53 | }, 54 | }], /*layout*/ { 55 | margin: {b:0, l:0, r:0, t:0}, 56 | }) 57 | } 58 | })} 59 | -------------------------------------------------------------------------------- /docs/references/4.status/index.md: -------------------------------------------------------------------------------- 1 | This page will be deprecated in future, due to it requiring a serial execution of workflows. 2 | Otherwise, simultaneous workflows compete to git merge the `data` branch. 3 | 4 | # 构建状态(Build Status) 5 | 6 | 下面的表格展示构建出工作负载的状态,具体说明如下: 7 | 8 | * `Date`行表示构建开始的时间,格式为年月日时分秒(yymmddhhmmss)。 9 | 各列按照`Date`降序排列(最新排最前面)。 10 | * `Commit`行显示每次构建对应的Git commit的哈希值。 11 | * `Note`行包含简单的说明(主要是说明为什么哈希值发生变化)。 12 | * `result/`行及其下方的行表示构建结果的Nix store哈希值。 13 | 每个单元格都用颜色标记,不同的颜色表示不同的哈希值。 14 | 通过这种颜色标记,可以轻松看出多次构建之间是否保持了**确定性**。 15 | 16 | The tables below demonstrate the status of built workloads, with the following details: 17 | 18 | * The `Date` row indicates the build start time in yymmddhhmmss format. 19 | Columns are sorted by `Date` in descending order (most recent first). 20 | * The `Commit` row displays the Git commit hash associated with each build. 21 | * The `Note` row shows a simple explanation (mainly explains why hash changed). 22 | * The `result/` row and the subsequent rows indicates the Nix store hashes of build results. 23 | Each cell is color-coded, with different colors indicating distinct hash values. 24 | This color coding makes it straightforward to verify **deterministic** build across multiple builds. 25 | 26 | ## SPEC2006 27 | 28 |
29 |
30 |
31 | 32 | 33 | ## OpenBLAS 34 | 35 |
36 |
37 |
38 | 39 | 40 | 41 | 42 | 46 | -------------------------------------------------------------------------------- /docs/usages/1.building_outputs.md: -------------------------------------------------------------------------------- 1 | # 构建工作负载(Building Workloads) 2 | 3 | Deterload是一套基于nix开发的工作负载构建系统。 4 | 构建工作负载主要是使用nix, 5 | 你可能会心头一紧,🙀“我就是想构建一些工作负载,还要需要一套新的编程语言/一个包管理器?”。 6 | 😺放轻松!不用担心! 7 | 尽管nix的完整学习曲线较陡峭, 8 | 但在本项目中,你只需要掌握少量直观的nix命令和语法即可。 9 | 10 | Deterload is a workload building system developed based on nix. 11 | Building workloads mainly involves using nix. 12 | You might tense up, 🙀 "I just want to build some workloads, why do I need a new programming language/package manager?" 13 | 😺 Relax! Don't worry! 14 | Although nix has a steep learning curve overall, 15 | in this project, you only need to master a few intuitive nix commands and syntax. 16 | 17 | ## 基础构建(Basic Building) 18 | 19 | 让我们从最简单的例子开始——构建一套OpenBLAS切片。 20 | 只需一行命令: 21 | 22 | Let's start with the simplest example — building an OpenBLAS checkpoint. 23 | It only takes one command: 24 | 25 | ```bash 26 | nix-build examples/openblas -A cpts-simpoint 27 | ``` 28 | 29 | 这行命令的组成: 30 | 31 | * `nix-build`是nix用于构建包的基础命令 32 | * `examples/openblas`指定了openblas 33 | * `-A cpts-simpoint`指定了构建目标基于simpoint的切片 34 | * 提示1:如果你想看详细的构建信息(很酷炫的树形依赖图、任务数统计、时间统计等等), 35 | 你可以将`nix-build`替换为`nom-build`(一个`nix-build`的第三方包装命令`)。 36 | * 提示2:其中`examples/openblas`是一个结构体(nix里被成为attribute set,类似python字典), 37 | 包含了多个OpenBLAS工作负载相关的包,比如`-A benchmark`、`-A linux`、`-A qemu`和`-A cpts-simpoint`等等。 38 | * 提示3:如果你的shell有命令补全功能,`nix-build -A`敲tab键能给你补全出openblas里所有的包。 39 | 其中`-A cpts-simpoint`是我们需要的切片。 40 | 41 | This command consists of: 42 | 43 | * `nix-build` is nix's basic command for building packages 44 | * `examples/openblas` specifies openblas 45 | * `-A cpts-simpoint` specifies the build target is checkpoints based on simpoint 46 | * Tip 1: If you want to see detailed build information (cool dependency trees, task statistics, time statistics, etc.), 47 | you can replace `nix-build` with `nom-build` (a third-party wrapper for `nix-build`). 48 | * Tip 2: Here `examples/openblas` is a structure (called attribute set in nix, similar to Python dictionary), 49 | containing multiple OpenBLAS workload-related packages, such as `-A benchmark`, `-A linux`, `-A qemu`, and `-A cpts-simpoint`, etc. 50 | * Tip 3: If your shell has command completion, pressing tab after `nix-build -A` will show all packages in openblas. 51 | Among these, `-A cpts-simpoint` is the checkpoint we need. 52 | 53 | 构建OpenBLAS的切片需要几个小时。 54 | 构建完成后会输出类似这样的路径: 55 | 56 | Building an OpenBLAS checkpoin takes several hours. 57 | After completion, it outputs a path like this: 58 | 59 | ``` 60 | /nix/store/6rbfs8nx9xiv1s7z5xbi7m6djbkn9sgh-openblas_gcc_1410_RISCV64_GENERIC_glibc_qemu_20M_maxK30_1core_cpt 61 | ``` 62 | 63 | nix会自动将该路径符号链接到`./result`。 64 | 你可以通过`-o`选项来改变默认符号链接的目标地址: 65 | 66 | nix will automatically create a symbolic link to this path at `./result`. 67 | You can change the default symbolic link target using the `-o` option: 68 | 69 | ```bash 70 | nix-build examples/openblas -A cpts-simpoint -o result-openblas.cpts-simpoint 71 | ``` 72 | 73 | 值得注意的是,这一次构建`cpts-simpoint`会非常快速。 74 | 这是因为nix采用的确定性构建的机制。 75 | 这一次构建和上一次构建除了名字以外没啥不同,所以nix直接复用之前的构建结果。 76 | 77 | Notably, this second build of `cpts-simpoint` will be very quick. 78 | This is due to nix's deterministic build mechanism. 79 | Since this build is identical to the previous one except for the name, nix directly reuses the previous build result. 80 | 81 | ## 配参数(Configuring Arguments) 82 | 83 | 构建产物的路径名(如上面的例子)包含了多个标签,例如: 84 | 85 | * 编译器版本(gcc 14.1.0) 86 | * OpenBLAS的目标架构(RISCV64_GENERIC) 87 | * ... 88 | 89 | The build output path (as in the example above) contains multiple tags, such as: 90 | 91 | * Compiler version (gcc 14.1.0) 92 | * OpenBLAS target architecture (RISCV64_GENERIC) 93 | * ... 94 | 95 | 这些标签都是默认配置中预设好的参数。 96 | 我们可以根据自己的需求配置参数。 97 | Deterload支持三种配置方式: 98 | 99 | * 命令行 100 | * 配置文件 101 | * 命令行+配置文件 102 | 103 | These tags represent parameters set in the default configuration. 104 | We can configure these parameters according to our needs. 105 | Deterload supports three configuration methods: 106 | 107 | * Command line 108 | * Configuration file 109 | * Command line + Configuration file 110 | 111 | ### 命令行(Command Line) 112 | 113 | 使用`--arg key value`的方式配置参数,例如: 114 | 115 | Configure parameters using `--arg key value`, for example: 116 | 117 | ```bash 118 | nix-build examples/openblas --arg cpt-maxK '"10"' -A cpts-simpoint 119 | ``` 120 | 121 | * `--arg cpt-maxK '"10"':设置simpoint的maxK设为10 122 | 123 | 注意:nix对参数类型有严格要求。 124 | 比如`cpt-maxK`是一个字符串类型的参数,因此接收的参数需要加双引号(额外加单引号是为了shell不要吞掉双引号)。 125 | 126 | * `--arg cpt-maxK '"10"'`: Set simpoint's maxK to 10 127 | 128 | Note: nix has strict type requirements for parameters. 129 | For instance, `cpt-maxK` is a string parameter, so it needs double quotes (with extra single quotes to prevent shell from stripping the double quotes). 130 | 131 | 对于字符串类型的参数,双引号单引号过于麻烦,可以用`--argstr key value`来简化`--arg key '"value"'`: 132 | 133 | For string parameters, dealing with double and single quotes is cumbersome, so you can use `--argstr key value` to simplify `--arg key '"value"'`: 134 | 135 | ```bash 136 | nix-build examples/openblas --argstr cpt-maxK 10 -A cpts-simpoint 137 | ``` 138 | 139 | ### 配置文件(Configuration File) 140 | 141 | 你可能会想:“我可以把命令行写入写一个shell脚本,岂不是就有了‘配置文件’了嘛”。像这样: 142 | 143 | You might think: "I could write these command lines into a shell script, and that would be a 'configuration file', right?" Like this: 144 | 145 | ```bash 146 | #!/usr/bin/env bash 147 | # 这是一个难以保证确定性构建的“配置文件” 148 | # This is a "configuration file" that can't guarantee deterministic builds 149 | nix-build examples/openblas --argstr cpt-maxK 10 -A cpts-simpoint 150 | ``` 151 | 152 | 这样的“配置文件”并不适合协同开发,因为: 153 | 154 | * 不同开发者用的Deterload版本可能不同,构建结果难以一致。 155 | * 参数名称和含义可能会因版本变化而有所不同。 156 | 157 | This type of "configuration file" isn't suitable for collaborative development because: 158 | 159 | * Different developers might use different Deterload versions, making build results inconsistent. 160 | * Parameter names and meanings might change between versions. 161 | 162 | 为了解决这些问题,我们可以使用nix来编写配置文件。 163 | 例如,以下是一个与上述命令行等价的配置文件: 164 | 165 | To solve these issues, we can use nix to write configuration files. 166 | Here's a configuration file equivalent to the above command line: 167 | 168 | ```nix 169 | # vec_maxK10.nix 170 | {...}@args: import (builtins.fetchTarball { 171 | url = "https://github.com/OpenXiangShan/Deterload/archive/v0.1.4.tar.gz"; 172 | # nix-prefetch-url --unpack https://github.com/OpenXiangShan/Deterload/archive/v0.1.4.tar.gz 173 | sha256 = "0l7bfjqjjlxkg8addgm6gkjv7p1psisv1wy648xwa8nw3nmgaw5d"; 174 | }) ({ 175 | cpt-maxK = "10"; 176 | } // args) 177 | ``` 178 | 179 | 这段代码主要分成两个部分: 180 | 181 | * 固定版本的部分: 182 | * `url`设定了Deterload的源码来自GitHub,版本为v0.1.4。 183 | * `sha256`是Deterload v0.1.4源码的sha256值,这个nix确定性构建的关键部分。 184 | 你可以用`nix-prefetch-url`获取此值(见代码注释)。 185 | * 配置参数的部分: 186 | * 配置了`cpt-maxK`,具体含义与前文一致。 187 | 188 | This code consists of two main parts: 189 | 190 | * Version fixing part: 191 | * `url` specifies that Deterload's source code comes from GitHub, version v0.1.4. 192 | * `sha256` is the sha256 value of Deterload v0.1.4 source code, crucial for nix's deterministic building. 193 | You can get this value using `nix-prefetch-url` (see code comment). 194 | * Parameter configuration part: 195 | * Configures `cpt-maxK`, with meanings as explained earlier. 196 | 197 | 将上述代码保存为文件(例如`vec_maxK10.nix`)。 198 | 每个开发者只需运行以下命令,就能生成二进制级别一致的`cpts-simpoint`切片: 199 | 200 | Save this code as a file (e.g., `vec_maxK10.nix`). 201 | Any developer can run the following command to generate a binary-identical `cpts-simpoint` checkpoint: 202 | 203 | ```bash 204 | nix-build vec_maxK10.nix -A cpts-simpoint 205 | ``` 206 | 207 | 比如在我的电脑上获得的结果路径,以及第一个切片的md5sum应该和你得到一样: 208 | 209 | For example, the checkpoint path of the result, and the md5sum of the first checkpoint on my computer should match yours: 210 | 211 | ```bash 212 | # cd /nix/store/s3wxbj9rcxksn22v9ghlhikf1rvi4ybf-openblas_gcc_1410_RISCV64_ZVL128B_glibc_qemu_20M_maxK10_1core_cpt/miao && ls 213 | 2 186 2343 3274 4093 4668 5991 6285 6357 214 | # md5sum 2/_2_0.168009.gz 215 | 43305c3b69822ea9fd34b5e08078ad68 result/miao/2/_2_0.168009.gz 216 | ``` 217 | 218 | ### 命令行+配置文件(Command Line + Configuration File) 219 | 220 | Deterload支持命令行+配置文件混合的配置方式。 221 | 以上述`vec_maxK10.nix`为例,命令行参数的优先级高于配置文件参数: 222 | 223 | Deterload supports mixed configuration using command line and configuration files. 224 | Using the above `vec_maxK10.nix` as an example, command line parameters take precedence over configuration file parameters: 225 | 226 | ``` 227 | nix-build vec_maxK10.nix --argstr cpt-maxK 20 --argstr cpt-intervals 1000000 -A openblas.cpt 228 | ``` 229 | 230 | 上述命令覆盖了原本配置文件的`cpt-maxK`改为了`"20"`,并将`cpt-intervals`设置为了`"1000000"`。 231 | 232 | This command overrides the original `cpt-maxK` in the configuration file to `"20"` and sets `cpt-intervals` to `"1000000"`. 233 | -------------------------------------------------------------------------------- /docs/usages/4.running/emulators.md: -------------------------------------------------------------------------------- 1 | # 仿真器(Emulators) 2 | -------------------------------------------------------------------------------- /docs/usages/4.running/gem5.md: -------------------------------------------------------------------------------- 1 | # GEM5 2 | 3 | 生成的工作负载(例如切片)可以在多个香山平台上运行,包括GEM5、Nemu和香山RTL。 4 | 5 | The generated workloads (e.g. checkpoints) are compatible with multiple XiangShan platforms including GEM5, Nemu, and XiangShan RTL. 6 | 7 | ## 用法(Usage) 8 | 9 | 请按照[OpenXiangShan/GEM5](https://github.com/OpenXiangShan/GEM5)仓库中的配置指南进行设置。 10 | 11 | Please follow the configuration guidelines in the [OpenXiangShan/GEM5](https://github.com/OpenXiangShan/GEM5) repository. 12 | 13 | 注意:生成的切片中已包含恢复代码(见`opensbi/default.nix`), 14 | 因此不用设置$GCB_RESTORER环境变量。 15 | 16 | Note: The generated checkpoints has included the restorer code (see `opensbi/default.nix`), 17 | eliminating the need to set the $GCB_RESTORER environment variable. 18 | 19 | ## 故障排除(Troubleshooting) 20 | 21 | 请考虑在[Deterload issues](https://github.com/OpenXiangShan/Deterload/issues)中报告你遇到的问题。 22 | 23 | Please consider reporting your issues in [Deterload issues](https://github.com/OpenXiangShan/Deterload/issues). 24 | 25 | ### Difftest错误(Difftest Errors) 26 | 27 | * 访问香山GEM5的release页面 28 | * 下载稳定版本的NEMU 29 | * 设置相应的$GCBV_REF_SO环境变量 30 | 31 | 32 | * Visit the XiangShan GEM5 releases page 33 | * Download a stable version of NEMU 34 | * Set the corresponding `$GCBV_REF_SO` environment variable 35 | 36 | ### 常见运行时问题(General Runtime Issues) 37 | 38 | * 在gem5命令前添加`gdb --args`进行调试 39 | * 在本仓库或香山GEM5仓库中报告问题 40 | 41 | 42 | * Debug by prefixing your gem5 command with `gdb --args` 43 | * Report issues in either this repository or the XiangShan GEM5 repository 44 | 45 | ### 部分切片运行失败(Failures in Some Checkpoints) 46 | 47 | 虽然目前部分切片可能在GEM5中运行失败(这个问题正在解决中), 48 | 但约90%的切片应该能够正确执行,提供可靠的性能指标。 49 | 50 | While some checkpoints may currently fail in GEM5 (this is being addressed), 51 | approximately 90% should execute correctly, providing reliable performance metrics. 52 | 53 | ## 分数计算(Score Calculation) 54 | 55 | 要在使用GEM5运行切片后计算SPEC CPU 2006分数, 56 | 请使用[gem5_data_proc](https://github.com/shinezyy/gem5_data_proc)工具。 57 | 由于该工具最初是为内部切片设计的,可能需要进行一些小的调整。 58 | 例如,基准测试名称可能需要调整(如"hmmer"改为"456.hmmer")。 59 | 即使偶尔有切片运行失败, 60 | 这个工具仍然能够为SPEC CPU 2006提供准确的整体性能指标。 61 | 62 | To calculate SPEC CPU 2006 scores after running the checkpoints using GEM5, 63 | use the [gem5_data_proc](https://github.com/shinezyy/gem5_data_proc) tool. 64 | Minor adjustments to the tool may be necessary as it was designed for internal checkpoints. 65 | For example, benchmark names may need adaptation (e.g., "hmmer" to "456.hmmer"). 66 | Even with occasional checkpoint failures, 67 | this tool should provide accurate overall performance metrics for SPEC CPU 2006. 68 | -------------------------------------------------------------------------------- /docs/usages/4.running/index.md: -------------------------------------------------------------------------------- 1 | # 运行工作负载(Running Workloads) 2 | 3 | TODO: 4 | -------------------------------------------------------------------------------- /docs/usages/4.running/nemu.md: -------------------------------------------------------------------------------- 1 | # NEMU 2 | -------------------------------------------------------------------------------- /dump_result.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import json 4 | from pathlib import Path 5 | from itertools import product 6 | 7 | app_list = [ 8 | "bwaves", "gamess_cytosine", "gamess_gradient", "gamess_triazolium", 9 | "milc", "zeusmp", "gromacs", "cactusADM", "leslie3d", "namd", "dealII", 10 | "soplex_pds-50", "soplex_ref", "povray", "calculix", "GemsFDTD", "tonto", 11 | "lbm", "wrf", "sphinx3" 12 | ] 13 | 14 | spec_2017_list = [ 15 | "bwaves_1", "bwaves_2", "bwaves_3", "bwaves_4", "cactuBSSN", "namd", 16 | "parest", "povray", "lbm", "wrf", "blender", "cam4", "imagick", "nab", 17 | "fotonik3d", "roms", "perlbench_diff", "perlbench_spam", "perlbench_split", 18 | "gcc_pp_O2", "gcc_pp_O3", "gcc_ref32_O3", "gcc_ref32_O5", "gcc_small_O3", 19 | "mcf", "omnetpp", "xalancbmk", "x264_pass1", "x264_pass2", "x264_seek", 20 | "deepsjeng", "leela", "exchange2", "xz_cld", "xz_combined", "xz_cpu2006" 21 | ] 22 | 23 | spec2017_int_list = [ 24 | "perlbench_diff", "perlbench_spam", "perlbench_split", "gcc_pp_O2", 25 | "gcc_pp_O3", "gcc_ref32_O3", "gcc_ref32_O5", "gcc_small_O3", "mcf", 26 | "omnetpp", "xalancbmk", "x264_pass1", "x264_pass2", "x264_seek", 27 | "deepsjeng", "leela", "exchange2", "xz_cld", "xz_combined", "xz_cpu2006" 28 | ] 29 | 30 | spec2017_fp_list = list(set(spec_2017_list) - set(spec2017_int_list)) 31 | 32 | 33 | def profiling_instrs(profiling_log, spec_app, using_new_script=False): 34 | regex = r".*total guest instructions = (.*)\x1b.*" 35 | new_path = os.path.join(profiling_log, spec_app, "profiling.log") 36 | old_path = os.path.join(profiling_log, "{}-out.log".format(spec_app)) 37 | if using_new_script: 38 | path = new_path 39 | assert os.path.exists(new_path) 40 | elif os.path.exists(old_path): 41 | path = old_path 42 | elif os.path.exists(new_path): 43 | path = new_path 44 | else: 45 | print("Either {} or {} does not exist".format(old_path, new_path)) 46 | raise 47 | 48 | with open(path, "r", encoding="utf-8") as f: 49 | for i in f.readlines(): 50 | if "total guest instructions" in i: 51 | match = re.findall(regex, i) 52 | match = match[0].replace(',', '') 53 | return match 54 | return 0 55 | 56 | 57 | def cluster_weight(cluster_path, spec_app): 58 | points = {} 59 | weights = {} 60 | 61 | weights_path = f"{cluster_path}/{spec_app}/weights0" 62 | simpoints_path = f"{cluster_path}/{spec_app}/simpoints0" 63 | 64 | with open(weights_path, "r") as f: 65 | for line in f.readlines(): 66 | a, b = line.split() 67 | weights.update({"{}".format(b): "{}".format(a)}) 68 | 69 | with open(simpoints_path, "r") as f: 70 | for line in f.readlines(): 71 | a, b = line.split() 72 | # if float(weights[b]) > 1e-4: # ignore small simpoints 73 | points.update({a: weights.get(b)}) 74 | print(points) 75 | return points 76 | 77 | 78 | def per_checkpoint_generate_json(profiling_log, cluster_path, app_list, 79 | target_path): 80 | result = {} 81 | for spec in app_list: 82 | result.update({ 83 | spec: { 84 | "insts": profiling_instrs(profiling_log, spec), 85 | 'points': cluster_weight(cluster_path, spec) 86 | } 87 | }) 88 | with open(os.path.join(target_path), "w") as f: 89 | json.dump(result, f, indent=4) 90 | 91 | 92 | def per_checkpoint_generate_worklist(cpt_path, target_path): 93 | print(cpt_path) 94 | print(target_path) 95 | cpt_path = cpt_path + "/" 96 | checkpoints = [] 97 | for item in os.scandir(cpt_path): 98 | if item.is_dir(): 99 | checkpoints.append(item.path) 100 | 101 | checkpoint_dirs = [] 102 | for item in checkpoints: 103 | item = item + "/checkpoint" 104 | for entry in os.scandir(item): 105 | checkpoint_dirs.append(entry.path) 106 | 107 | with open(target_path, "w") as f: 108 | for i in checkpoint_dirs: 109 | path = i.replace(cpt_path, "") 110 | name = path.replace('/', "_", 1) 111 | print("{} {} 0 0 20 20".format(name, path), file=f) 112 | 113 | 114 | def find_nix_path(base_path, suffix): 115 | for item in os.scandir(base_path): 116 | if item.is_symlink() and item.name.startswith('result'): 117 | target_path = os.readlink(item.path) 118 | if target_path.endswith(suffix): 119 | return item.path 120 | return None 121 | 122 | def generate_result_list(base_path, times, ids): 123 | result_list = [] 124 | 125 | profiling_path = find_nix_path(base_path, '1.profilings') 126 | cluster_path = find_nix_path(base_path, '2.clusters') 127 | checkpoint_path = find_nix_path(base_path, '3.checkpoints') 128 | 129 | 130 | if not profiling_path or not cluster_path: 131 | raise ValueError("无法找到所需的nix路径") 132 | 133 | for i, j, k in product(range(ids[0], times[0]), range(ids[1], times[1]), 134 | range(ids[2], times[2])): 135 | cluster = f"cluster-{i}-{j}" 136 | profiling = f"profiling-{k}" 137 | checkpoint = f"checkpoint-{i}-{j}-{k}" 138 | result_list.append({ 139 | "cl_res": cluster_path, 140 | "profiling_log": profiling_path, 141 | "checkpoint_path": checkpoint_path, # checkpoints dir 142 | "json_path": 143 | os.path.join(base_path, f"{cluster}.json"), # result json, list path 144 | "list_path": 145 | os.path.join(base_path, "checkpoint.lst"), 146 | }) 147 | 148 | print(result_list) 149 | return result_list 150 | 151 | 152 | def dump_result(base_path, spec_app_list, times, ids): 153 | result_list = generate_result_list(base_path, times, ids) 154 | 155 | for result in result_list: 156 | per_checkpoint_generate_json(result["profiling_log"], result["cl_res"], 157 | spec_app_list, result["json_path"]) 158 | per_checkpoint_generate_worklist(result["checkpoint_path"], 159 | result["list_path"]) 160 | 161 | 162 | spec_list=["400.perlbench", "410.bwaves", "433.milc", "436.cactusADM", "445.gobmk", "453.povray", "458.sjeng", "464.h264ref", "471.omnetpp", "482.sphinx3", "401.bzip2", "416.gamess", "434.zeusmp", "437.leslie3d", "447.dealII", "454.calculix", "459.GemsFDTD", "465.tonto", "473.astar", "483.xalancbmk", "403.gcc", "429.mcf", "435.gromacs", "444.namd", "450.soplex", "456.hmmer", "462.libquantum", "470.lbm", "481.wrf"] 163 | # spec_list=["436.cactusADM"] 164 | base_path = os.getcwd() 165 | times = [1, 1, 1] 166 | ids = [0, 0, 0] 167 | 168 | dump_result(base_path, spec_list, times, ids) -------------------------------------------------------------------------------- /examples/hello-nolibc/default.nix: -------------------------------------------------------------------------------- 1 | { ... }@args: let 2 | deterload = import ../.. args; 3 | pkgs = deterload.deterPkgs; 4 | hello-nolibc = pkgs.riscv64-pkgs.callPackage ./hello-nolibc.nix {}; 5 | # TODO: refactor deterload to reduce duplicate here and below 6 | deterload-hello = deterload.build (pkgs.writeShScript "hello-run" {} '' 7 | ${hello-nolibc}/bin/hello 8 | ''); 9 | overrided = deterload-hello.overrideScope (final: prev: { 10 | initramfs_overlays = prev.initramfs_overlays.override { 11 | # TODO: refactor deterload to reduce duplicate here and above 12 | run_sh = pkgs.writeText "run.sh" "${hello-nolibc}/bin/hello"; 13 | }; 14 | }); 15 | in overrided 16 | -------------------------------------------------------------------------------- /examples/hello-nolibc/hello-nolibc.nix: -------------------------------------------------------------------------------- 1 | { runCommand 2 | , runCommandCC 3 | , linux 4 | }: let 5 | nolibc = runCommand "nolibc" {} '' 6 | path=$(tar tf ${linux.src} | grep tools/include/nolibc | sort | head -n1) 7 | tar xf ${linux.src} 8 | mv $path $out 9 | ''; 10 | hello-src = builtins.toFile "hello.c" '' 11 | #define DISABLE_TIME_INTR 0x100 12 | #define NOTIFY_PROFILER 0x101 13 | #define NOTIFY_PROFILE_EXIT 0x102 14 | #define GOOD_TRAP 0x0 15 | void nemu_signal(int a){ 16 | asm volatile ("mv a0, %0\n\t" 17 | ".insn r 0x6B, 0, 0, x0, x0, x0\n\t" 18 | : 19 | : "r"(a) 20 | : "a0"); 21 | } 22 | #include 23 | int main() { 24 | nemu_signal(NOTIFY_PROFILER); 25 | printf("Hello, World!\n"); 26 | nemu_signal(GOOD_TRAP); 27 | return 0; 28 | } 29 | ''; 30 | hello-nolibc = runCommandCC "hello-nolibc" {} '' 31 | mkdir -p $out/bin 32 | $CC -nostdlib -I ${nolibc} ${hello-src} -o $out/bin/hello 33 | ''; 34 | in hello-nolibc 35 | -------------------------------------------------------------------------------- /examples/hello-nolibc/test-opts.nix: -------------------------------------------------------------------------------- 1 | { 2 | A = ["cpt-0th" "sim"]; 3 | } 4 | -------------------------------------------------------------------------------- /examples/nanosleep/default.nix: -------------------------------------------------------------------------------- 1 | { ... }@args: let 2 | 3 | deterload = import ../.. args; 4 | pkgs = deterload.deterPkgs; 5 | riscv64-pkgs = pkgs.riscv64-pkgs; 6 | nolibc = riscv64-pkgs.runCommand "nolibc" {} '' 7 | path=$(tar tf ${riscv64-pkgs.linux.src} | grep tools/include/nolibc | sort | head -n1) 8 | tar xf ${riscv64-pkgs.linux.src} 9 | mv $path $out 10 | ''; 11 | nanosleep = riscv64-pkgs.runCommandCC "nanosleep" { 12 | passthru = {inherit nolibc;}; 13 | } '' 14 | mkdir -p $out/bin 15 | $CC -nostdlib -I ${nolibc} ${./nanosleep.c} -o $out/bin/nanosleep 16 | ''; 17 | # TODO: refactor deterload to reduce duplicate here and below 18 | deterload-nanosleep = deterload.build (pkgs.writeShScript "nanosleep-run" { 19 | passthru = {inherit nanosleep nolibc;}; 20 | } '' 21 | ${nanosleep}/bin/nanosleep 22 | ''); 23 | overrided = deterload-nanosleep.overrideScope (final: prev: { 24 | initramfs_overlays = prev.initramfs_overlays.override { 25 | # TODO: refactor deterload to reduce duplicate here and above 26 | run_sh = pkgs.writeText "run.sh" "${nanosleep}/bin/nanosleep"; 27 | }; 28 | }); 29 | in overrided 30 | -------------------------------------------------------------------------------- /examples/nanosleep/nanosleep.c: -------------------------------------------------------------------------------- 1 | #if !defined(__riscv) || __riscv_xlen != 64 2 | #error "This code is only supported on RISC-V 64-bit platforms." 3 | #endif 4 | 5 | #define DISABLE_TIME_INTR 0x100 6 | #define NOTIFY_PROFILER 0x101 7 | #define NOTIFY_PROFILE_EXIT 0x102 8 | #define GOOD_TRAP 0x0 9 | void nemu_signal(int a){ 10 | asm volatile ("mv a0, %0\n\t" 11 | ".insn r 0x6B, 0, 0, x0, x0, x0\n\t" 12 | : 13 | : "r"(a) 14 | : "a0"); 15 | } 16 | 17 | #include 18 | #include 19 | 20 | void sleep_nanoseconds(long nanoseconds) { 21 | printf("%ldns\n", nanoseconds); 22 | struct timespec req, rem; 23 | req.tv_sec = 0; 24 | req.tv_nsec = nanoseconds; 25 | 26 | if (my_syscall2(__NR_nanosleep, &req, &rem) == -1) { 27 | my_syscall2(__NR_nanosleep, &rem, NULL); 28 | } 29 | } 30 | 31 | int main() { 32 | nemu_signal(NOTIFY_PROFILER); 33 | sleep_nanoseconds(1234L); 34 | sleep_nanoseconds(2134L); 35 | sleep_nanoseconds(1234L); 36 | nemu_signal(GOOD_TRAP); 37 | return 0; 38 | } 39 | -------------------------------------------------------------------------------- /examples/nyancat/default.nix: -------------------------------------------------------------------------------- 1 | { ... }@args: let 2 | deterload = import ../.. args; 3 | in deterload.build (deterload.deterPkgs.writeShScript "nyancat-run" {} '' 4 | timeout 20 ${deterload.deterPkgs.riscv64-pkgs.nyancat}/bin/nyancat -t 5 | '') 6 | -------------------------------------------------------------------------------- /examples/openblas/config.nix: -------------------------------------------------------------------------------- 1 | {...}@args: import ../. ({ 2 | cc = "gcc13"; 3 | cores = "2"; 4 | 5 | TARGET = "RISCV64_ZVL256B"; 6 | 7 | cpt-maxK = "10"; 8 | cpt-maxK-bmk = { 9 | "403.gcc" = "20"; 10 | "483.xalancbmk" = "30"; 11 | "openblas" = "50"; 12 | }; 13 | cpt-intervals = "1000000"; 14 | cpt-simulator = "qemu"; 15 | cpt-format = "zstd"; 16 | } // args) 17 | -------------------------------------------------------------------------------- /examples/openblas/default.nix: -------------------------------------------------------------------------------- 1 | { lib ? import 2 | /** 3 | TARGET: CPU TARGET for OpenBLAS. 4 | * **Type**: string 5 | * **Default value**: "RISCV64_GENERIC"` 6 | * **Available values**: `"RISCV64_GENERIC"`, `"RISCV64_ZVL128B"`, `"RISCV64_ZVL256B"` 7 | */ 8 | , TARGET ? "RISCV64_GENERIC" 9 | , ... 10 | }@args: 11 | assert lib.assertOneOf "TARGET" TARGET ["RISCV64_GENERIC" "RISCV64_ZVL128B" "RISCV64_ZVL256B"]; 12 | let 13 | deterload = import ../.. args; 14 | openblas = deterload.deterPkgs.callPackage ./package.nix { 15 | inherit TARGET; 16 | riscv64-libfortran = deterload.deterPkgs.riscv64-pkgs.gfortran.cc; 17 | riscv64-libc = deterload.deterPkgs.riscv64-stdenv.cc.libc.static; 18 | }; 19 | in deterload.build openblas 20 | -------------------------------------------------------------------------------- /examples/openblas/package.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , fetchFromGitHub 3 | , riscv64-cc 4 | , riscv64-fortran 5 | , writeShScript 6 | , lib 7 | 8 | , utils 9 | , riscv64-stdenv 10 | , riscv64-libc 11 | , riscv64-libfortran 12 | , TARGET 13 | }: let drv = stdenv.mkDerivation { 14 | pname = "openblas"; 15 | version = "0.3.28"; 16 | src = fetchFromGitHub { 17 | owner = "OpenMathLib"; 18 | repo = "OpenBLAS"; 19 | rev = "v0.3.28"; 20 | hash = "sha256-430zG47FoBNojcPFsVC7FA43FhVPxrulxAW3Fs6CHo8="; 21 | }; 22 | 23 | depsBuildBuild = [ 24 | riscv64-cc 25 | riscv64-fortran 26 | riscv64-libc 27 | ]; 28 | 29 | buildPhase = let 30 | makeFlags_common = let 31 | prefix = "${riscv64-cc}/bin/riscv64-unknown-linux-gnu-"; 32 | in [ 33 | "CC=${prefix}gcc" 34 | "AR=${prefix}ar" 35 | "AS=${prefix}as" 36 | "LD=${prefix}ld" 37 | "RANLIB=${prefix}ranlib" 38 | "NM=${prefix}nm" 39 | "FC=${riscv64-fortran}/bin/riscv64-unknown-linux-gnu-gfortran" 40 | 41 | "BINARY=64" 42 | "TARGET=${TARGET}" 43 | "DYNAMIC_ARCH=false" 44 | "CROSS=true" 45 | "HOSTCC=cc" 46 | # "ARCH=riscv64" 47 | # TODO: "USE_OPENMP=true" 48 | "NUM_THREADS=64" 49 | ]; 50 | makeFlags1 = makeFlags_common ++ [ 51 | "NO_STATIC=0" 52 | "NO_SHARED=1" 53 | # not run tests, only compilation 54 | "shared" 55 | ]; 56 | makeFlags2 = makeFlags_common ++ [ 57 | # benchmark/Makefile uses `cc` to compile and link, does not use `ld` directly. 58 | # Therefore, benchmark/Makefile does not receive LDFLAGS, only receives CFLAGS 59 | "CFLAGS=\"-L${riscv64-libfortran}/lib -L${riscv64-libc}/lib -static\"" 60 | "FFLAGS=\"-L${riscv64-libfortran}/lib -L${riscv64-libc}/lib -static\"" 61 | "-C benchmark" 62 | ]; 63 | in '' 64 | # 1. compile libopenblas 65 | make ${builtins.toString makeFlags1} 66 | # 2. compile benchmark 67 | make ${builtins.toString makeFlags2} 68 | ''; 69 | installPhase = '' 70 | mkdir -p $out/bin 71 | cp benchmark/*.goto $out/bin/ 72 | ''; 73 | doCheck = false; 74 | }; in writeShScript (utils.escapeName (builtins.concatStringsSep "_" [ 75 | "openblas" 76 | (lib.removePrefix "${riscv64-stdenv.targetPlatform.config}-" riscv64-stdenv.cc.cc.name) 77 | TARGET 78 | riscv64-libc.pname 79 | ])) {} '' 80 | for goto in ${drv}/bin/*.goto; do 81 | echo running $goto 82 | $goto 83 | done 84 | '' 85 | -------------------------------------------------------------------------------- /examples/openblas/test-opts.nix: -------------------------------------------------------------------------------- 1 | { 2 | args = { 3 | TARGET = ["RISCV64_GENERIC" "RISCV64_ZVL128B"]; 4 | }; 5 | } 6 | -------------------------------------------------------------------------------- /examples/spec2006/483.xalancbmk.patch: -------------------------------------------------------------------------------- 1 | --- a/spec2006/benchspec/CPU2006/483.xalancbmk/src/xercesc/util/NameIdPool.hpp 2 | +++ b/spec2006/benchspec/CPU2006/483.xalancbmk/src/xercesc/util/NameIdPool.hpp 3 | @@ -329,7 +329,7 @@ private : 4 | // ----------------------------------------------------------------------- 5 | unsigned int fCurIndex; 6 | NameIdPool* fToEnum; 7 | - MemoryManager* const fMemoryManager; 8 | + MemoryManager* fMemoryManager; 9 | }; 10 | -------------------------------------------------------------------------------- /examples/spec2006/build-all.nix: -------------------------------------------------------------------------------- 1 | { stdenv 2 | , lib 3 | , fetchFromGitHub 4 | , libxcrypt-legacy 5 | , riscv64-cc 6 | , riscv64-fortran 7 | 8 | , utils 9 | , riscv64-stdenv 10 | , riscv64-libc 11 | , riscv64-jemalloc 12 | , src 13 | , size 14 | , enableVector # TODO: enable vector in libc and jemalloc 15 | , optimize 16 | , march 17 | }: 18 | let 19 | CPU2006LiteWrapper = fetchFromGitHub { 20 | owner = "OpenXiangShan"; 21 | repo = "CPU2006LiteWrapper"; 22 | rev = "6651ffc5127d7391e2741c84f367c59bf4156c16"; 23 | hash = "sha256-v24FLtKjZFYPxrDaiwY4BA10893J9uCNax/PGpKNjFE="; 24 | }; 25 | in stdenv.mkDerivation { 26 | name = utils.escapeName (builtins.concatStringsSep "_" [ 27 | "spec2006" 28 | size 29 | (lib.removePrefix "${riscv64-stdenv.targetPlatform.config}-" riscv64-stdenv.cc.cc.name) 30 | optimize 31 | march 32 | riscv64-libc.pname 33 | riscv64-jemalloc.pname 34 | ]); 35 | system = "x86_64-linux"; 36 | 37 | srcs = [ 38 | src 39 | CPU2006LiteWrapper 40 | ]; 41 | sourceRoot = "."; 42 | 43 | buildInputs = [ 44 | riscv64-cc 45 | riscv64-fortran 46 | riscv64-libc 47 | riscv64-jemalloc 48 | ]; 49 | 50 | patches = [ ./483.xalancbmk.patch ]; 51 | 52 | configurePhase = let 53 | rpath = lib.makeLibraryPath [ 54 | libxcrypt-legacy 55 | ]; 56 | in '' 57 | echo patchelf: ./spec2006/bin/ 58 | for file in $(find ./spec2006/bin -type f \( -perm /0111 -o -name \*.so\* \) ); do 59 | patchelf --set-interpreter "$(cat ${stdenv.cc}/nix-support/dynamic-linker)" "$file" &> /dev/null || true 60 | patchelf --set-rpath ${rpath} $file &> /dev/null || true 61 | done 62 | ''; 63 | 64 | buildPhase = '' 65 | export LiteWrapper=$(realpath ${CPU2006LiteWrapper.name}) 66 | export SPEC=$(realpath ./spec2006) 67 | cd $LiteWrapper 68 | 69 | export SPEC_LITE=$PWD 70 | export ARCH=riscv64 71 | export CROSS_COMPILE=riscv64-unknown-linux-gnu- 72 | export OPTIMIZE="${optimize} -march=${march}" 73 | export SUBPROCESS_NUM=5 74 | 75 | export CFLAGS="$CFLAGS -static -Wno-format-security -I${riscv64-jemalloc}/include " 76 | export CXXFLAGS="$CXXFLAGS -static -Wno-format-security -I${riscv64-jemalloc}/include" 77 | export LDFLAGS="$LDFLAGS -static -ljemalloc -L${riscv64-jemalloc}/lib" 78 | export LIBS="${riscv64-jemalloc}/lib/libjemalloc.a" 79 | 80 | pushd $SPEC && source shrc && popd 81 | make copy-all-src 82 | make build-all -j $NIX_BUILD_CORES 83 | make copy-all-data 84 | ''; 85 | 86 | dontFixup = true; 87 | 88 | # based on https://github.com/OpenXiangShan/CPU2006LiteWrapper/blob/main/scripts/run-template.sh 89 | installPhase = '' 90 | for WORK_DIR in [0-9][0-9][0-9].*; do 91 | echo "Prepare data: $WORK_DIR" 92 | pushd $WORK_DIR 93 | mkdir -p run 94 | if [ -d data/all/input ]; then cp -r data/all/input/* run/; fi 95 | if [ -d data/${size}/input ]; then cp -r data/${size}/input/* run/; fi 96 | if [ -f extra-data/${size}.sh ]; then sh extra-data/${size}.sh ; fi 97 | 98 | mkdir -p $out/$WORK_DIR/run/ 99 | cp -r run/* $out/$WORK_DIR/run/ 100 | cp build/$WORK_DIR $out/$WORK_DIR/run/ 101 | # Replace $APP with executable in run-.sh 102 | # E.g.: 481.wrf/run-ref.sh 103 | # before replace: [run-ref.h]: $APP > rsl.out.0000 104 | # after replace: [run.sh]: ./481.wrf > rsl.out.0000 105 | sed 's,\$APP,./'$WORK_DIR',' run-${size}.sh > $out/$WORK_DIR/run/run-spec.sh 106 | popd 107 | done 108 | 109 | find $out -type d -exec chmod 555 {} + 110 | ''; 111 | } 112 | -------------------------------------------------------------------------------- /examples/spec2006/build-one.nix: -------------------------------------------------------------------------------- 1 | { runCommand 2 | , callPackage 3 | , writeShScript 4 | 5 | , utils 6 | , riscv64-libc 7 | , riscv64-jemalloc 8 | , src 9 | , size 10 | , enableVector 11 | , optimize 12 | , march 13 | , testCase 14 | }@args: let 15 | build-all = callPackage ./build-all.nix { 16 | inherit riscv64-libc riscv64-jemalloc; 17 | inherit src size enableVector optimize march; 18 | }; 19 | build-one = runCommand "${build-all.name}.${utils.escapeName testCase}" {} '' 20 | mkdir -p $out 21 | cp -r ${build-all}/${testCase}/* $out/ 22 | ''; 23 | in writeShScript "${build-one.name}" args '' 24 | cd ${build-one}/run 25 | sh ./run-spec.sh 26 | '' 27 | -------------------------------------------------------------------------------- /examples/spec2006/config.nix: -------------------------------------------------------------------------------- 1 | {...}@args: import ./. ({ 2 | cc = "gcc13"; 3 | cores = "2"; 4 | 5 | enableVector = true; 6 | 7 | size = "test"; 8 | optimize = "-O3"; 9 | march = "rv64gcbv"; 10 | # "464_h264ref" and "465_tonto" will be excluded 11 | testcase-filter = testcase: !(builtins.elem testcase [ 12 | "464_h264ref" 13 | "465_tonto" 14 | ]); 15 | 16 | cpt-maxK = "10"; 17 | cpt-maxK-bmk = { 18 | "403.gcc" = "20"; 19 | "483.xalancbmk" = "30"; 20 | "openblas" = "50"; 21 | }; 22 | cpt-intervals = "1000000"; 23 | cpt-simulator = "qemu"; 24 | cpt-format = "zstd"; 25 | } // args) 26 | -------------------------------------------------------------------------------- /examples/spec2006/default.nix: -------------------------------------------------------------------------------- 1 | { lib ? import 2 | /** 3 | enableVector: Controls compiler's auto-vectorization during benchmark builds. 4 | * **Type**: bool 5 | * **Default value**: `false` 6 | */ 7 | , enableVector ? false 8 | 9 | /** 10 | src: Path to SPEC CPU 2006 source code. 11 | * **Note**: 12 | As SPEC CPU 2006 is a proprietary benchmark, it cannot be incorporated in Deterload's source code. 13 | You need to obatin the its source code through legal means. 14 | * **Type**: path 15 | * **Supported path types**: 16 | * Path to a folder: 17 | 18 | The folder must be the root directory of the SPEC CPU 2006 source code. 19 | 20 | Example: 21 | ```nix 22 | src = /path/miao/spec2006; 23 | ``` 24 | 25 | Required folder structure: 26 | ``` 27 | /path/miao/spec2006 28 | ├── benchspec/ 29 | ├── bin/ 30 | ├── tools/ 31 | ├── shrc 32 | ... 33 | ``` 34 | 35 | * Path to a tar file: 36 | 37 | The tar file must contain a folder named exactly `spec2006`, 38 | with the same folder structure as above. 39 | 40 | Supported tar file extensions: 41 | * gzip (.tar.gz, .tgz or .tar.Z) 42 | * bzip2 (.tar.bz2, .tbz2 or .tbz) 43 | * xz (.tar.xz, .tar.lzma or .txz) 44 | 45 | Example: 46 | ```nix 47 | src = /path/of/spec2006.tar.gz; 48 | ``` 49 | 50 | * For more information about supported path types, 51 | please see [Nixpkgs Manual: The unpack phase](https://nixos.org/manual/nixpkgs/stable/#ssec-unpack-phase). 52 | */ 53 | , src ? throw "Please specify the path of spec2006, for example: /path/of/spec2006.tar.gz" 54 | 55 | /** 56 | size: Input size for SPEC CPU 2006. 57 | * **Type**: string 58 | * **Default value**: `"ref"` 59 | * **Available values**: `"ref"`, `"train"`, `"test"` 60 | */ 61 | , size ? "ref" 62 | 63 | /** 64 | optimize: Compiler optimization flags for SPEC CPU 2006. 65 | * **Type**: string 66 | * **Default value**: `"-O3 -flto"` 67 | */ 68 | , optimize ? "-O3 -flto" 69 | 70 | /** 71 | march: Compiler's `-march` option for SPEC CPU 2006. 72 | * **Type**: string 73 | * **Default value**: "rv64gc${lib.optionalString enableVector "v"}" 74 | * **Description**: The default value depends on `enableVector`: 75 | * If `enableVector` is `true`, the default value is `"rv64gc"`, 76 | * If `enableVector` is `false`, the default value is `"rv64gcv"`. 77 | */ 78 | , march ? "rv64gc${lib.optionalString enableVector "v"}" 79 | 80 | /** 81 | testcase-filter: Function to filter SPEC CPU 2006 testcases. 82 | * **Type**: string -> bool 83 | * **Default value**: `testcase: true` 84 | * **Description**: `testcase-filter` takes a testcase name as input and returns: 85 | * `true`: include this testcase 86 | * `false`: exclude this testcase 87 | * **Example 1**: Include all testcases: 88 | ```nix 89 | testcase-filter = testcase: true; 90 | ``` 91 | * **Example 2**: Only include `403_gcc`: 92 | ```nix 93 | testcase-filter = testcase: testcase == "403_gcc"; 94 | ``` 95 | * **Example 3**: Exlcude `464_h264ref` and `465_tonto`: 96 | ```nix 97 | testcase-filter = testcase: !(builtins.elem testcase [ 98 | "464_h264ref" 99 | "465_tonto" 100 | ]); 101 | ``` 102 | */ 103 | , testcase-filter ? testcase: true 104 | 105 | /** 106 | per-bmk-maxK: maxK values for specifed benchmarks in checkpoint generation. 107 | * **Type**: attr (`{ benchmark-name = number-in-string; ... }`) 108 | * **Default value**: `{ "483_xalancbmk" = "100"; }` 109 | * **Description**: 110 | `per-bmk-maxK` sets the the maxK for specifed benchmarks. 111 | Unspecified benchmarks will use the value from `cpt-maxK`. 112 | This attribute consists of key-value pairs where: 113 | * Key: benchmark name. 114 | * Value: number in a string (same format as `cpt-maxK`). 115 | * **FAQ**: Why set maxK of 483_xalancbmk to 100? 116 | * Setting maxK to 30 for 483_xalancbmk resulted in unstable scores. 117 | */ 118 | , per-bmk-maxK ? { 119 | "483_xalancbmk" = "100"; 120 | } 121 | 122 | , ... 123 | }@args: 124 | assert lib.assertOneOf "size" size ["ref" "train" "test"]; 125 | let 126 | deterload = import ../.. args; 127 | spec2006-full = deterload.deterPkgs.callPackage ./packages.nix { 128 | riscv64-libc = deterload.deterPkgs.riscv64-stdenv.cc.libc.static; 129 | riscv64-jemalloc = deterload.deterPkgs.riscv64-pkgs.jemalloc.overrideAttrs (oldAttrs: { 130 | configureFlags = (oldAttrs.configureFlags or []) ++ [ 131 | "--enable-static" 132 | "--disable-shared" 133 | ]; 134 | preBuild = '' 135 | # Add weak attribute to C++ operators, same as jemalloc_cpp.patch 136 | sed -i '/void/N;s/void[[:space:]]*\*[[:space:]]*operator new/void __attribute__((weak)) *operator new/g' src/jemalloc_cpp.cpp 137 | sed -i '/void/N;s/void[[:space:]]*operator delete/void __attribute__((weak)) operator delete/g' src/jemalloc_cpp.cpp 138 | ''; 139 | # Ensure static libraries are installed 140 | postInstall = '' 141 | ${oldAttrs.postInstall or ""} 142 | cp -v lib/libjemalloc.a $out/lib/ 143 | ''; 144 | }); 145 | inherit src size enableVector optimize march; 146 | }; 147 | spec2006-filtered = lib.filterAttrs (testcase: value: 148 | (testcase-filter testcase) && (lib.isDerivation value)) 149 | spec2006-full; 150 | spec2006-deterload = builtins.mapAttrs 151 | (name: benchmark: (deterload.override ( 152 | if (per-bmk-maxK ? "${name}") then { 153 | cpt-maxK = per-bmk-maxK."${name}"; 154 | } else {} 155 | )).build benchmark) 156 | (lib.filterAttrs (n: v: (lib.isDerivation v)) spec2006-filtered); 157 | in spec2006-deterload // ( deterload.deterPkgs.utils.wrap-l2 spec2006-deterload ) 158 | -------------------------------------------------------------------------------- /examples/spec2006/packages.nix: -------------------------------------------------------------------------------- 1 | { callPackage 2 | 3 | , utils 4 | , riscv64-libc 5 | , riscv64-jemalloc 6 | , src 7 | , size 8 | , enableVector 9 | , optimize 10 | , march 11 | }: let 12 | testCases = [ 13 | "400.perlbench" 14 | "401.bzip2" 15 | "403.gcc" 16 | "410.bwaves" 17 | "416.gamess" 18 | "429.mcf" 19 | "433.milc" 20 | "434.zeusmp" 21 | "435.gromacs" 22 | "436.cactusADM" 23 | "437.leslie3d" 24 | "444.namd" 25 | "445.gobmk" 26 | "447.dealII" 27 | "450.soplex" 28 | "453.povray" 29 | "454.calculix" 30 | "456.hmmer" 31 | "458.sjeng" 32 | "459.GemsFDTD" 33 | "462.libquantum" 34 | "464.h264ref" 35 | "465.tonto" 36 | "470.lbm" 37 | "471.omnetpp" 38 | "473.astar" 39 | "481.wrf" 40 | "482.sphinx3" 41 | "483.xalancbmk" 42 | ]; 43 | in builtins.listToAttrs ( 44 | builtins.map (testCase: { 45 | # change `.` to `_`, e.g. "403.gcc" to "403_gcc" 46 | name = utils.escapeName testCase; 47 | value = callPackage ./build-one.nix { 48 | inherit riscv64-libc riscv64-jemalloc; 49 | inherit src size enableVector optimize march; 50 | inherit testCase; 51 | }; 52 | }) testCases 53 | ) 54 | -------------------------------------------------------------------------------- /examples/spec2006/test-opts.nix: -------------------------------------------------------------------------------- 1 | { 2 | args = { 3 | enableVector = [true false]; 4 | src = [/spec2006.tar.gz]; 5 | }; 6 | # TODO: -j29 7 | } 8 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let 2 | name = "Deterload"; 3 | pkgs = import {}; 4 | my-python3 = pkgs.python3.withPackages (python-pkgs: [ 5 | # for docs 6 | python-pkgs.pydot 7 | ]); 8 | h_content = builtins.toFile "h_content" '' 9 | # ${pkgs.lib.toUpper "${name} usage tips"} 10 | 11 | ## Configuration 12 | 13 | From higher priority to lower priority: 14 | 15 | * Configure by CLI: 16 | * `nom-build ... --arg ...` 17 | * `nom-build ... --argstr ...` 18 | * E.g: Generate spec2006 simpoint-guided checkpoints using given source code: 19 | * `nom-build examples/spec2006/ --arg src -A cpts-simpoint` 20 | * Configure by a file: see `examples/*/config.nix` 21 | 22 | ## Generation 23 | 24 | * Generate the simpoint-guided checkpoints for a given into `result/`: 25 | * `nom-build examples/ -A cpts-simpoint` 26 | * E.g: Generate simpoint-guided checkpoints for openblas: 27 | * `nom-build examples/openblas -A cpts-simpoint` 28 | 29 | ## Documentation 30 | 31 | * Generate html doc into `book/` 32 | * `make doc` 33 | ''; 34 | _h_ = pkgs.writeShellScriptBin "h" '' 35 | ${pkgs.glow}/bin/glow ${h_content} 36 | ''; 37 | in 38 | pkgs.mkShell { 39 | inherit name; 40 | packages = [ 41 | _h_ 42 | pkgs.nix-output-monitor 43 | pkgs.mdbook 44 | pkgs.graphviz 45 | pkgs.glibcLocales 46 | my-python3 47 | ]; 48 | shellHook = '' 49 | export LOCALE_ARCHIVE=${pkgs.glibcLocales}/lib/locale/locale-archive 50 | h 51 | ''; 52 | } 53 | -------------------------------------------------------------------------------- /test-opts.nix: -------------------------------------------------------------------------------- 1 | { 2 | args = { 3 | # cc = ["gcc14"]; 4 | }; 5 | } 6 | -------------------------------------------------------------------------------- /utils.nix: -------------------------------------------------------------------------------- 1 | { lib 2 | , linkFarm 3 | }: rec { 4 | getName = p: if (p?pname) then p.pname else p.name; 5 | escapeName = lib.converge (name: 6 | builtins.replaceStrings 7 | [" " "." "-" "__"] 8 | ["" "_" "_" "_" ] 9 | name); 10 | /*set -> set: filter derivations in a set*/ 11 | filterDrvs = set: lib.filterAttrs (n: v: (lib.isDerivation v)) set; 12 | /*set -> set: 13 | wrap-l2 { 14 | a={x=drv0; y=drv1; z=drv2; w=0;}; 15 | b={x=drv3; y=drv4; z=drv5; w=1;}; 16 | c={x=drv6; y=drv7; z=drv8; w=2;}; 17 | } 18 | returns { 19 | x=linkFarm xNewName [drv0 drv3 drv6]; 20 | y=linkFarm yNewName [drv1 drv4 drv7]; 21 | z=linkFarm zNewName [drv2 drv5 drv8]; 22 | }*/ 23 | wrap-l2 = attrs: let 24 | /*mapToAttrs (name: {inherit name; value=...}) ["a", "b", "c", ...] 25 | returns {x=value0; b=value1; c=value2; ...} */ 26 | mapToAttrs = func: list: builtins.listToAttrs (builtins.map func list); 27 | /*attrDrvNames { 28 | a={x=drv0; y=drv1; z=drv2; w=0;}; 29 | b={x=drv3; y=drv4; z=drv5; w=1;}; 30 | c={x=drv6; y=drv7; z=drv8; w=2;}; 31 | } 32 | returns ["x" "y" "z"] */ 33 | attrDrvNames = set: builtins.attrNames (filterDrvs (builtins.head (builtins.attrValues set))); 34 | in mapToAttrs (name/*represents the name in builders/default.nix, like img, cpt, ...*/: { 35 | inherit name; 36 | value = linkFarm ( 37 | # Assuming the name of drv is mmm.400_perlbmk.nnn, we want mmm.nnn 38 | # Take spec2006 for an example: 39 | # full = spec2006_ref_gcc_14_2_0_O3_flto_rv64gc_glibc_jemalloc.400_perlbench.1core_3_checkpoint 40 | # front= spec2006_ref_gcc_14_2_0_O3_flto_rv64gc_glibc_jemalloc 41 | # tail = 1core_3_checkpoint 42 | # res = spec2006_ref_gcc_14_2_0_O3_flto_rv64gc_glibc_jemalloc.1core_3_checkpoint 43 | let full = (builtins.head (builtins.attrValues attrs))."${name}".name; 44 | split= lib.splitString "." full; 45 | front= lib.init (lib.init split); 46 | last = lib.last split; 47 | in builtins.concatStringsSep "." (front ++ [last]) 48 | ) ( 49 | lib.mapAttrsToList (testCase: attr: { 50 | name = testCase; 51 | path = attr."${name}"; 52 | }) attrs); 53 | }) (attrDrvNames attrs); 54 | 55 | metricPrefix = input: let 56 | num = if builtins.isInt input then input 57 | else if builtins.isString input then lib.toInt input 58 | else throw "metricPrefix: unspported type of ${input}"; 59 | K = 1000; 60 | M = 1000 * K; 61 | G = 1000 * M; 62 | T = 1000 * G; 63 | P = 1000 * T; 64 | E = 1000 * P; 65 | in if num < K then "${toString num }" 66 | else if num < M then "${toString (num / K)}K" 67 | else if num < G then "${toString (num / M)}M" 68 | else if num < T then "${toString (num / G)}G" 69 | else if num < P then "${toString (num / T)}T" 70 | else if num < E then "${toString (num / P)}P" 71 | else "${toString (num / E)}E" 72 | ; 73 | } 74 | --------------------------------------------------------------------------------