├── .credo.exs ├── .exguard.exs ├── .formatter.exs ├── .github └── workflows │ └── main.yml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── LICENSE.md ├── README.md ├── architecture ├── README.md ├── component.pu ├── container.pu ├── context.pu └── rendered │ ├── component.svg │ ├── container.svg │ └── context.svg ├── coveralls.json ├── lib ├── benchee.ex └── benchee │ ├── benchmark.ex │ ├── benchmark │ ├── benchmark_config.ex │ ├── collect.ex │ ├── collect │ │ ├── memory.ex │ │ ├── reductions.ex │ │ └── time.ex │ ├── function_call_overhead.ex │ ├── hooks.ex │ ├── repeated_measurement.ex │ ├── runner.ex │ └── scenario_context.ex │ ├── collection_data.ex │ ├── configuration.ex │ ├── conversion.ex │ ├── conversion │ ├── count.ex │ ├── deviation_percent.ex │ ├── duration.ex │ ├── format.ex │ ├── memory.ex │ ├── scale.ex │ └── unit.ex │ ├── errors.ex │ ├── formatter.ex │ ├── formatters │ ├── console.ex │ ├── console │ │ ├── helpers.ex │ │ ├── memory.ex │ │ ├── reductions.ex │ │ └── run_time.ex │ └── tagged_save.ex │ ├── output │ ├── benchmark_printer.ex │ ├── profile_printer.ex │ └── progress_printer.ex │ ├── profile.ex │ ├── relative_statistics.ex │ ├── scenario.ex │ ├── scenario_loader.ex │ ├── statistics.ex │ ├── suite.ex │ ├── system.ex │ └── utility │ ├── deep_convert.ex │ ├── erlang_version.ex │ ├── file_creation.ex │ ├── parallel.ex │ └── repeat_n.ex ├── mix.exs ├── mix.lock ├── run_samples.sh ├── samples ├── deactivate_output.exs ├── descending_sort.exs ├── fast.exs ├── fast_functions.exs ├── fast_with_profiling.exs ├── formatters.exs ├── macro_benchmark.exs ├── measure_memory.exs ├── measure_reductions.exs ├── memory_breaker.exs ├── memory_changing.exs ├── multiple_inputs.exs ├── parallel_process.exs ├── pre_check.exs ├── reduction_run.exs ├── repeat_n.exs ├── run.exs ├── run_all.exs ├── run_defaults.exs ├── run_extended_statistics.exs ├── run_parallel.exs ├── run_verbose.exs ├── save_and_load.exs ├── save_and_report.exs ├── sort_performance.exs ├── statistics_performance.exs ├── title.exs ├── unit_scaling.exs └── unit_scaling_smallest_vs_none.exs ├── test ├── benchee │ ├── benchmark │ │ ├── collect │ │ │ ├── memory_test.exs │ │ │ └── reductions_test.exs │ │ ├── repeated_measurement_test.exs │ │ ├── runner_test.exs │ │ └── scenario_test.exs │ ├── benchmark_test.exs │ ├── configuration_test.exs │ ├── conversion │ │ ├── count_test.exs │ │ ├── deviation_percent_test.exs │ │ ├── duration_test.exs │ │ ├── format_test.exs │ │ ├── memory_test.exs │ │ └── scale_test.exs │ ├── conversion_test.exs │ ├── formatter_test.exs │ ├── formatters │ │ ├── console │ │ │ ├── memory_test.exs │ │ │ ├── reductions_test.exs │ │ │ └── run_time_test.exs │ │ ├── console_test.exs │ │ └── tagged_save_test.exs │ ├── output │ │ └── benchmark_printer_test.exs │ ├── profile_test.exs │ ├── relative_statistics_test.exs │ ├── scenario_loader_test.exs │ ├── statistics_test.exs │ ├── suite_test.exs │ ├── system_test.exs │ └── utility │ │ ├── deep_convert_test.exs │ │ ├── erlang_version_test.exs │ │ ├── file_creation_integration_test.exs │ │ ├── file_creation_test.exs │ │ └── repeat_n_test.exs ├── benchee_test.exs ├── fixtures │ └── escript │ │ ├── .gitignore │ │ ├── README.md │ │ ├── lib │ │ └── escript.ex │ │ ├── mix.exs │ │ ├── mix.lock │ │ └── test.sh ├── support │ ├── bench_keyword.ex │ ├── fake_benchmark_printer.ex │ ├── fake_benchmark_runner.ex │ ├── fake_formatter.ex │ ├── fake_profile_printer.ex │ ├── fake_progress_printer.ex │ └── test_helpers.ex └── test_helper.exs └── tools └── plts └── .gitignore /.exguard.exs: -------------------------------------------------------------------------------- 1 | use ExGuard.Config 2 | 3 | project_files = ~r{\.(erl|ex|exs|eex|xrl|yrl)\z}i 4 | deps = ~r{deps} 5 | 6 | guard("compile and warn", run_on_start: true) 7 | |> command("MIX_ENV=test mix compile --warnings-as-errors") 8 | |> watch(project_files) 9 | |> ignore(deps) 10 | |> notification(:auto) 11 | 12 | guard("credo", run_on_start: true) 13 | |> command("mix credo") 14 | |> watch(project_files) 15 | |> ignore(deps) 16 | |> notification(:auto) 17 | 18 | guard("mix format", run_on_start: true) 19 | |> command("mix format --check-formatted") 20 | |> watch(project_files) 21 | |> ignore(deps) 22 | |> notification(:auto) 23 | 24 | guard("dialyzer", run_on_start: true) 25 | |> command("mix dialyzer --halt-exit-status") 26 | |> watch(project_files) 27 | |> ignore(deps) 28 | |> notification(:auto) 29 | 30 | guard("test", run_on_start: true) 31 | |> command("mix test --color") 32 | |> watch(project_files) 33 | |> ignore(deps) 34 | |> notification(:auto) 35 | -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | # Used by "mix format" 2 | [ 3 | plugins: [DoctestFormatter], 4 | inputs: ["{mix,.formatter}.exs", "{config,lib,test,samples,mix}/**/*.{ex,exs}"] 5 | ] 6 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | env: 4 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 5 | 6 | on: [pull_request, push] 7 | 8 | jobs: 9 | linux: 10 | name: Test on Ubuntu (Elixir ${{ matrix.elixir_version }}, OTP ${{ matrix.otp_version }}) 11 | runs-on: ubuntu-24.04 12 | 13 | strategy: 14 | matrix: 15 | # Run tests at least once for every supported elixir or erlang version 16 | # 17 | # Since all the code is running at least once with each version, that should cover enough. 18 | # Like, what are the chances a bug would happen on 1.18@26 but not on 1.17@26 or 1.18@27? 19 | # And if it does, it's more likely an elixir bug than a benchee bug. We'll see. 20 | # We've been using enough of githubs CI resources and our own wait time :) 21 | # 22 | # https://hexdocs.pm/elixir/compatibility-and-deprecations.html#between-elixir-and-erlang-otp 23 | # 24 | # We're also further limited by the support the setup-beam action offers: 25 | # https://github.com/erlef/setup-beam?tab=readme-ov-file#compatibility-between-operating-system-and-erlangotp 26 | include: 27 | - elixir_version: '1.11' 28 | otp_version: '24.3' 29 | - elixir_version: '1.12' 30 | otp_version: '24.3' 31 | - elixir_version: '1.13' 32 | otp_version: '25.3' 33 | - elixir_version: '1.14' 34 | otp_version: '25.3' 35 | - elixir_version: '1.15' 36 | otp_version: '26.2' 37 | - elixir_version: '1.16' 38 | otp_version: '26.2' 39 | - elixir_version: '1.17' 40 | otp_version: '27.3' 41 | - elixir_version: '1.18' 42 | otp_version: '27.3' 43 | type_check: true 44 | lint: true 45 | 46 | steps: 47 | - name: Checkout 48 | uses: actions/checkout@v3 49 | - name: Setup Elixir 50 | uses: erlef/setup-beam@v1 51 | with: 52 | elixir-version: ${{ matrix.elixir_version }} 53 | otp-version: ${{ matrix.otp_version }} 54 | - name: Restore deps and _build 55 | uses: actions/cache@v3 56 | with: 57 | path: | 58 | deps 59 | _build 60 | key: erlef-${{ runner.os }}-mix-${{ matrix.elixir_version }}-${{ matrix.otp_version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} 61 | - name: Restore plts 62 | uses: actions/cache@v3 63 | with: 64 | path: tools/plts 65 | key: erlef-${{ runner.os }}-dialyzer-${{ matrix.elixir_version }}-${{ matrix.otp_version }}-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} 66 | if: ${{ matrix.type_check }} 67 | - run: mix deps.get 68 | - run: MIX_ENV=test mix compile --warnings-as-errors 69 | - run: mix credo 70 | if: ${{ matrix.lint }} 71 | - name: Check if formatted 72 | if: ${{ matrix.lint }} 73 | run: mix format --check-formatted 74 | - name: Actual Tests 75 | # this will let warnings slip through but I don't wanna replicate all that magic 76 | # right now 77 | run: MIX_ENV=test mix coveralls.github || mix test --failed 78 | # Apparently the one with `!` can't go without the fancy expression syntax 79 | if: ${{ !matrix.lint }} 80 | # warnings as errors is a form of linting! 81 | - name: Actual Tests WITH warnings as errors 82 | run: MIX_ENV=test mix coveralls.github --warnings-as-errors || mix test --failed 83 | if: ${{ matrix.lint }} 84 | - name: Dialyzer 85 | run: mix dialyzer --halt-exit-status 86 | if: ${{ matrix.type_check }} 87 | 88 | macos: 89 | name: Test on MacOS 90 | runs-on: macos-latest 91 | 92 | steps: 93 | - name: Checkout 94 | uses: actions/checkout@v3 95 | # no versioning on brew but getting asdf or something was a bigger headache 96 | - name: Install Elixir 97 | run: brew install elixir 98 | - name: Restore deps and _build 99 | uses: actions/cache@v3 100 | with: 101 | path: | 102 | deps 103 | _build 104 | key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }} 105 | - run: mix local.hex --force 106 | - run: mix deps.get 107 | - run: mix local.rebar --force 108 | - run: MIX_ENV=test mix compile --warnings-as-errors 109 | - run: mix test || mix test --failed 110 | 111 | windows: 112 | name: Test on Windows 113 | runs-on: windows-2022 114 | 115 | steps: 116 | - name: Checkout 117 | uses: actions/checkout@v3 118 | - name: Setup Elixir 119 | uses: erlef/setup-beam@v1 120 | with: 121 | elixir-version: '1.18' 122 | otp-version: '27.3' 123 | - name: Get deps 124 | run: mix deps.get 125 | - name: Test 126 | run: mix test || mix test --failed 127 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | benchee-*.tar 24 | 25 | # Temporary files for e.g. tests. 26 | /tmp/ 27 | 28 | # Kinda changed my opinion on this, it became annoying to upgrade patch versions on my other 29 | # systems just to be in sync. Since this is a library that should work on _many_ versions and 30 | # runs CI that way there is no need for strict enforcement. 31 | .tool-versions 32 | 33 | # Misc. 34 | save.benchee 35 | /test/tmp/ 36 | /tools/plts/benchee.plt 37 | /tools/plts/benchee.plt.hash 38 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at pragtob@gmail.com (sorry, just me for now). All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [http://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: http://contributor-covenant.org 74 | [version]: http://contributor-covenant.org/version/1/4/ 75 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Tobias Pfeiffer 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /architecture/README.md: -------------------------------------------------------------------------------- 1 | # Architecture 2 | 3 | This folder contains notes about benchee's architecture and some diagrams showing said architecture. 4 | 5 | The architecture is relatively simple but flexible and could probably best be described as _pipes_. 6 | The goal is to allow for extension and customization. 7 | There is a big structure called `Benchee.Suite` which is progressively enhanced by multiple steps. 8 | Each of these steps should be exchangeable and they're all part of the public API. 9 | Each step is just a function to make using and exchanging them easy. 10 | 11 | The order of these steps is best illustrated by the implementation of `Benchee.run/2` to see in which order they're called: 12 | 13 | ```elixir 14 | def run(jobs, config \\ []) when is_list(config) do 15 | config 16 | |> Benchee.init() 17 | |> Benchee.system() 18 | |> add_benchmarking_jobs(jobs) 19 | |> Benchee.collect() 20 | |> Benchee.statistics() 21 | |> Benchee.load() 22 | |> Benchee.relative_statistics() 23 | |> Formatter.output() 24 | |> Benchee.profile() 25 | end 26 | 27 | defp add_benchmarking_jobs(suite, jobs) do 28 | Enum.reduce(jobs, suite, fn {key, function}, suite_acc -> 29 | Benchee.benchmark(suite_acc, key, function) 30 | end) 31 | end 32 | ``` 33 | 34 | ## C4 diagrams 35 | 36 | ### Context 37 | 38 | ![context diagram](rendered/context.svg) 39 | 40 | ### Container 41 | 42 | ![container diagram](rendered/container.svg) 43 | 44 | ### Component 45 | 46 | ![component diagram](rendered/component.svg) 47 | 48 | 49 | ## Working with the diagrams 50 | 51 | To render them you need a a tool such as [the PlantUML plugin for VScode](https://marketplace.visualstudio.com/items?itemName=jebbs.plantuml). 52 | For that to work, you also need to install `graphviz` (`sudo apt-get install graphviz`) 53 | -------------------------------------------------------------------------------- /architecture/component.pu: -------------------------------------------------------------------------------- 1 | @startuml component 2 | !include 3 | !include 4 | 5 | 16 | 17 | title C4 Representation: Level 3 Component Diagram 18 | 19 | System_Boundary(Benchee, "Benchee", "A microbenchmarking tool for Elixir") { 20 | Container(Config, "Benchee.Configuration", "", "Configures the initial benchmarking suite using a series of default and user-defined settings") 21 | Container(System, "Benchee.System", "", "Gathers System data and adds it to the suite") 22 | Boundary(Benchmark, "Benchee.Benchmark", "", "Defines and runs the functions to be benchmarked, collecting raw data") { 23 | Component(BenchmarkConfig, "BenchmarkConfig", "", "Provides just the necessary configuration for the benchmark and lets out all of the suite configuration that is not needed") 24 | 25 | Component(Hooks, "Hooks", "", "Provides support for hooks") 26 | 27 | Component(ScenarioContext, "ScenarioContext", "", "Provides the data the runner needs while running a scenario") 28 | 29 | Component(Runner, "Runner", "", "Runs the benchmarking suite") 30 | 31 | Component(Collect, "Collect", "", "Collects the data for a scenario") 32 | Component(Memory, "Collect.Memory", "", "Measures memory usage") 33 | Component(Time, "Collect.Time", "", "Measures time") 34 | Component(Reductions, "Collect.Reductions", "", "Measures BEAM's 'unit of work'") 35 | } 36 | Container(Statistics, "Benchee.Statistics", "", "Calculates statistics based on the raw data") 37 | Container(RelativeStatistics, "Benchee.RelativeStatistics", "", "Calculates statistics between scenarios (jobs with the same input)") 38 | Container(ScenarioLoader, "Benchee.ScenarioLoader", "", "Loads saved scenarios for comparison") 39 | Container(Output, "Benchee.Fomatter", "", "Formats the statistics in a suitable way") 40 | } 41 | 42 | Rel(Config, System, "Configured suite") 43 | 44 | Rel(System, BenchmarkConfig, "Configured suite + System data") 45 | 46 | Rel(BenchmarkConfig, Runner, "") 47 | Rel(Runner, Hooks, "") 48 | Rel(Runner, ScenarioContext, "") 49 | BiRel_L(Runner, Collect, "") 50 | 51 | BiRel_D(Collect, Time, "") 52 | BiRel_U(Collect, Memory, "") 53 | BiRel(Collect, Reductions, "") 54 | 55 | Rel_R(Runner, Statistics, "Measurements") 56 | Rel(Statistics, ScenarioLoader, "Statistics for all scenarios") 57 | Rel(ScenarioLoader, RelativeStatistics, "Statistics for run and loaded scenarios") 58 | Rel(RelativeStatistics, Output, "Statistics about all scenarios") 59 | 60 | Rel(Output, Report, "Produces") 61 | @enduml 62 | -------------------------------------------------------------------------------- /architecture/container.pu: -------------------------------------------------------------------------------- 1 | @startuml container 2 | !include 3 | !include 4 | 5 | 16 | 17 | title C4 Representation: Level 2 Container Diagram 18 | 19 | Person_Ext(User, "User", "Anyone who wants to benchmark on the BEAM to compare its execution time, memory usage and reductions") 20 | 21 | System_Boundary(Benchee, "Benchee", "A microbenchmarking tool for Elixir") { 22 | Container(Config, "Benchee.Configuration", "", "Configures the initial benchmarking suite using a series of default and user-defined settings") 23 | Container(System, "Benchee.System", "", "Gathers System data and adds it to the suite") 24 | Container(Benchmark, "Benchee.Benchmark", "", "Defines and runs the functions to be benchmarked, collecting raw data") 25 | Container(Statistics, "Benchee.Statistics", "", "Calculates statistics based on the raw data") 26 | Container(RelativeStatistics, "Benchee.RelativeStatistics", "", "Calculates statistics between scenarios (jobs with the same input)") 27 | Container(ScenarioLoader, "Benchee.ScenarioLoader", "", "Loads saved scenarios for comparison") 28 | Container(Output, "Benchee.Fomatter", "", "Formats the statistics in a suitable way") 29 | } 30 | 31 | Rel(User, Config, "Settings") 32 | 33 | Rel(Config, System, "Configured suite") 34 | Rel(System, Benchmark, "Configured suite + System data") 35 | Rel(Benchmark, Statistics, "Raw data") 36 | Rel(Statistics, ScenarioLoader, "Statistics for all scenarios") 37 | Rel(ScenarioLoader, RelativeStatistics, "Statistics for run and loaded scenarios") 38 | Rel(RelativeStatistics, Output, "Statistics about all scenarios") 39 | 40 | Rel(Output, Report, "Produces") 41 | @enduml 42 | -------------------------------------------------------------------------------- /architecture/context.pu: -------------------------------------------------------------------------------- 1 | @startuml context 2 | !include 3 | !include 4 | 5 | 11 | 12 | title C4 Representation: Level 1 Context Diagram 13 | 14 | Person_Ext(User, "User", "Anyone who wants to benchmark on the BEAM to compare its execution time, memory usage and reductions") 15 | 16 | System(Benchee, "Benchee", "A microbenchmarking tool for Elixir") 17 | 18 | Rel(User, Benchee, "Uses") 19 | Rel(Benchee, Report, "Produces") 20 | @enduml 21 | -------------------------------------------------------------------------------- /coveralls.json: -------------------------------------------------------------------------------- 1 | { 2 | "default_stop_words": [ 3 | "defmodule", 4 | "defrecord", 5 | "defimpl", 6 | "defexception", 7 | "defprotocol", 8 | "defstruct", 9 | "def.+(.+\\\\.+).+do", 10 | "^\\s+use\\s+" 11 | ], 12 | 13 | "custom_stop_words": [ 14 | ], 15 | 16 | "coverage_options": { 17 | "treat_no_relevant_lines_as_covered": false, 18 | "output_dir": "cover/", 19 | "minimum_coverage": 0 20 | }, 21 | 22 | "terminal_options": { 23 | "file_column_width": 40 24 | }, 25 | 26 | "skip_files": [ 27 | "test/support" 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /lib/benchee.ex: -------------------------------------------------------------------------------- 1 | # Idea from this: 2 | # credo:disable-for-next-line 3 | # https://github.com/bencheeorg/benchee/commit/b3ddbc132e641cdf1eec0928b322ced1dab8553f#commitcomment-23381474 4 | 5 | elixir_doc = """ 6 | Top level module providing convenience access to needed functions as well 7 | as the very high level `Benchee.run` API. 8 | 9 | Intended Elixir interface. 10 | """ 11 | 12 | erlang_doc = """ 13 | High-Level interface for more convenient usage from Erlang. Same as `Benchee`. 14 | """ 15 | 16 | for {module, moduledoc} <- [{Benchee, elixir_doc}, {:benchee, erlang_doc}] do 17 | defmodule module do 18 | @moduledoc moduledoc 19 | 20 | alias Benchee.Formatter 21 | 22 | @doc """ 23 | Run benchmark jobs defined by a map and optionally provide configuration 24 | options. 25 | 26 | Benchmarks are defined as a map where the keys are a name for the given 27 | function and the values are the functions to benchmark. Users can configure 28 | the run by passing a keyword list as the second argument. For more 29 | information on configuration see `Benchee.Configuration.init/1`. 30 | 31 | ## Examples 32 | 33 | Benchee.run( 34 | %{ 35 | "My Benchmark" => fn -> 1 + 1 end, 36 | "My other benchmrk" => fn -> [1] ++ [1] end 37 | }, 38 | warmup: 2, 39 | time: 3 40 | ) 41 | """ 42 | @spec run(map, keyword) :: Benchee.Suite.t() 43 | def run(jobs, config \\ []) when is_list(config) do 44 | config 45 | |> Benchee.init() 46 | |> Benchee.system() 47 | |> add_benchmarking_jobs(jobs) 48 | |> Benchee.collect() 49 | |> Benchee.statistics() 50 | |> Benchee.load() 51 | |> Benchee.relative_statistics() 52 | |> Formatter.output() 53 | |> Benchee.profile() 54 | end 55 | 56 | defp add_benchmarking_jobs(suite, jobs) do 57 | Enum.reduce(jobs, suite, fn {key, function}, suite_acc -> 58 | Benchee.benchmark(suite_acc, key, function) 59 | end) 60 | end 61 | 62 | @doc """ 63 | A convenience function designed for loading saved benchmarks and running formatters on them. 64 | 65 | Basically takes the input of the map of jobs away from you and skips unnecessary steps with 66 | that data missing (aka not running benchmarks, only running relative statistics). 67 | 68 | You can use config options as normal, but some options related to benchmarking won't take 69 | effect (such as `:time`). The `:load` option however is mandatory to use, as you need to 70 | load some benchmarks to report on them. 71 | 72 | ## Usage 73 | 74 | Benchee.report(load: ["benchmark-*.benchee"]) 75 | """ 76 | @spec report(keyword) :: Benchee.Suite.t() 77 | def report(config) do 78 | if !Access.get(config, :load), do: raise_missing_load() 79 | 80 | config 81 | |> Benchee.init() 82 | |> Benchee.system() 83 | |> Benchee.load() 84 | |> Benchee.relative_statistics() 85 | |> Formatter.output() 86 | end 87 | 88 | defp raise_missing_load do 89 | raise ArgumentError, 90 | "You need to provide at least a :load option for report/1 to make sense" 91 | end 92 | 93 | defdelegate init(), to: Benchee.Configuration 94 | defdelegate init(config), to: Benchee.Configuration 95 | defdelegate system(suite), to: Benchee.System 96 | defdelegate benchmark(suite, name, function), to: Benchee.Benchmark 97 | @doc false 98 | defdelegate benchmark(suite, name, function, printer), to: Benchee.Benchmark 99 | defdelegate collect(suite), to: Benchee.Benchmark 100 | @doc false 101 | defdelegate collect(suite, printer), to: Benchee.Benchmark 102 | defdelegate statistics(suite), to: Benchee.Statistics 103 | @doc false 104 | defdelegate statistics(suite, printer), to: Benchee.Statistics 105 | defdelegate relative_statistics(suite), to: Benchee.RelativeStatistics 106 | defdelegate load(suite), to: Benchee.ScenarioLoader 107 | defdelegate profile(suite), to: Benchee.Profile 108 | end 109 | end 110 | -------------------------------------------------------------------------------- /lib/benchee/benchmark.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark do 2 | @moduledoc """ 3 | Functions related to building and running benchmarking scenarios. 4 | Exposes `benchmark/4` and `collect/3` functions. 5 | """ 6 | 7 | alias Benchee.Benchmark.{BenchmarkConfig, Runner, ScenarioContext} 8 | alias Benchee.Output.BenchmarkPrinter, as: Printer 9 | alias Benchee.Scenario 10 | alias Benchee.Suite 11 | alias Benchee.Utility.DeepConvert 12 | 13 | @no_input :__no_input 14 | 15 | @doc """ 16 | Public access for the special key representing no input for a scenario. 17 | """ 18 | def no_input, do: @no_input 19 | 20 | @doc """ 21 | Takes the current suite and adds a new benchmarking scenario (represented by a 22 | %Scenario{} struct) with the given name and function to the suite's scenarios. 23 | If there are inputs in the suite's config, a scenario will be added for the given 24 | function for each input. 25 | """ 26 | @spec benchmark(Suite.t(), Suite.key(), Scenario.to_benchmark(), module) :: Suite.t() 27 | def benchmark( 28 | suite = %Suite{scenarios: scenarios}, 29 | job_name, 30 | to_be_benchmark, 31 | printer \\ Printer 32 | ) do 33 | warn_if_evaluated(to_be_benchmark, job_name, printer) 34 | 35 | normalized_name = to_string(job_name) 36 | 37 | if duplicate?(scenarios, normalized_name) do 38 | printer.duplicate_benchmark_warning(normalized_name) 39 | suite 40 | else 41 | add_scenario(suite, normalized_name, to_be_benchmark) 42 | end 43 | end 44 | 45 | defp warn_if_evaluated(to_be_benchmark, job_name, printer) do 46 | function = 47 | case to_be_benchmark do 48 | {function, _} -> function 49 | function -> function 50 | end 51 | 52 | if :erlang.fun_info(function, :module) == {:module, :erl_eval} do 53 | printer.evaluated_function_warning(job_name) 54 | end 55 | end 56 | 57 | defp duplicate?(scenarios, job_name) do 58 | Enum.any?(scenarios, fn scenario -> scenario.name == job_name end) 59 | end 60 | 61 | defp add_scenario( 62 | suite = %Suite{scenarios: scenarios, configuration: config}, 63 | job_name, 64 | function 65 | ) do 66 | new_scenarios = build_scenarios_for_job(job_name, function, config) 67 | %Suite{suite | scenarios: List.flatten([scenarios | new_scenarios])} 68 | end 69 | 70 | defp build_scenarios_for_job(job_name, function, %{inputs: nil}) do 71 | [ 72 | build_scenario(%{ 73 | job_name: job_name, 74 | function: function, 75 | input: @no_input, 76 | input_name: @no_input 77 | }) 78 | ] 79 | end 80 | 81 | defp build_scenarios_for_job(job_name, function, %{inputs: inputs}) do 82 | Enum.map(inputs, fn {input_name, input} -> 83 | build_scenario(%{ 84 | job_name: job_name, 85 | function: function, 86 | input: input, 87 | input_name: input_name 88 | }) 89 | end) 90 | end 91 | 92 | defp build_scenario(scenario_data = %{function: {function, options}}) do 93 | scenario_data 94 | |> Map.put(:function, function) 95 | |> Map.merge(DeepConvert.to_map(options)) 96 | |> build_scenario 97 | end 98 | 99 | defp build_scenario(scenario_data) do 100 | struct!(Scenario, add_scenario_name(scenario_data)) 101 | end 102 | 103 | defp add_scenario_name(scenario_data) do 104 | Map.put(scenario_data, :name, Scenario.display_name(scenario_data)) 105 | end 106 | 107 | @doc """ 108 | Kicks off the benchmarking of all scenarios defined in the given suite. 109 | 110 | Hence, this might take a while ;) Passes a list of scenarios and a scenario context to our 111 | benchmark runner. For more information on how benchmarks are actually run, see the 112 | `Benchee.Benchmark.Runner` code (API considered private). 113 | """ 114 | @spec collect(Suite.t(), module, module) :: Suite.t() 115 | def collect( 116 | suite = %Suite{scenarios: scenarios, configuration: config, system: system}, 117 | printer \\ Printer, 118 | runner \\ Runner 119 | ) do 120 | printer.configuration_information(suite) 121 | benchmark_config = BenchmarkConfig.from(config) 122 | 123 | scenario_context = %ScenarioContext{ 124 | config: benchmark_config, 125 | printer: printer, 126 | system: system 127 | } 128 | 129 | scenarios = runner.run_scenarios(scenarios, scenario_context) 130 | %Suite{suite | scenarios: scenarios} 131 | end 132 | end 133 | -------------------------------------------------------------------------------- /lib/benchee/benchmark/benchmark_config.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark.BenchmarkConfig do 2 | @moduledoc """ 3 | Benchmark Configuration, practically a sub set of `Benchee.Configuration` 4 | 5 | `Benchee.Configuration` holds too much data that we don't want to send into the benchmarking 6 | processes - inputs being potentially huge. Hence, we take the sub set the benchmarks need and 7 | put it in here. Since this is a benchmarking library, to no one's surprise these are a lot of 8 | them. 9 | See: https://github.com/bencheeorg/benchee/issues/412 10 | """ 11 | 12 | alias Benchee.Benchmark.Hooks 13 | 14 | @keys [ 15 | :warmup, 16 | :time, 17 | :memory_time, 18 | :reduction_time, 19 | :pre_check, 20 | :measure_function_call_overhead, 21 | :before_each, 22 | :after_each, 23 | :before_scenario, 24 | :after_scenario, 25 | :parallel, 26 | :print, 27 | :max_sample_size 28 | ] 29 | 30 | defstruct @keys 31 | 32 | @type t :: %__MODULE__{ 33 | time: number, 34 | warmup: number, 35 | memory_time: number, 36 | reduction_time: number, 37 | pre_check: boolean, 38 | measure_function_call_overhead: boolean, 39 | print: map, 40 | before_each: Hooks.hook_function() | nil, 41 | after_each: Hooks.hook_function() | nil, 42 | before_scenario: Hooks.hook_function() | nil, 43 | after_scenario: Hooks.hook_function() | nil, 44 | measure_function_call_overhead: boolean, 45 | parallel: pos_integer(), 46 | max_sample_size: pos_integer() 47 | } 48 | 49 | alias Benchee.Configuration 50 | 51 | def from(config = %Configuration{}) do 52 | values = Map.take(config, @keys) 53 | struct!(__MODULE__, values) 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/benchee/benchmark/collect.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark.Collect do 2 | @moduledoc false 3 | 4 | # A thing that collects a data point about a function execution - like time 5 | # or memory needed. 6 | 7 | @doc """ 8 | Takes an anonymous 0 arity function to measure and returns the measurement 9 | and the return value of the function in a tuple. 10 | 11 | The returned measurement may be `nil` if the measurement failed for some 12 | reason - it will then be ignored and not counted. 13 | """ 14 | @type return_value :: {non_neg_integer | nil, any} 15 | @type zero_arity_function :: (-> any) 16 | @type opts :: any 17 | @callback collect(zero_arity_function()) :: return_value() 18 | @callback collect(zero_arity_function(), opts) :: return_value() 19 | 20 | @optional_callbacks collect: 2 21 | end 22 | -------------------------------------------------------------------------------- /lib/benchee/benchmark/collect/memory.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark.Collect.Memory do 2 | @moduledoc false 3 | 4 | # Measure memory consumption of a function. 5 | # 6 | # This is somewhat tricky and hence some resources can be recommended reading alongside 7 | # this code: 8 | # * description of the approach: https://devonestes.herokuapp.com/using-erlang-trace-3 9 | # * devon describing the journey that this feature put us through (includes remarks 10 | # on why certain parts are very important: https://www.youtube.com/watch?v=aqLujfzvUgM) 11 | # * erlang docs on the info data structure we use: 12 | # http://erlang.org/doc/man/erlang.html#gc_minor_start 13 | # 14 | # Returns `{nil, return_value}` in case the memory measurement went bad. 15 | 16 | @behaviour Benchee.Benchmark.Collect 17 | 18 | defmacrop compatible_stacktrace do 19 | if Version.match?(Version.parse!(System.version()), "~> 1.7") do 20 | quote do 21 | __STACKTRACE__ 22 | end 23 | else 24 | quote do 25 | System.stacktrace() 26 | end 27 | end 28 | end 29 | 30 | @spec collect((-> any)) :: {nil | non_neg_integer, any} 31 | def collect(fun) do 32 | ref = make_ref() 33 | Process.flag(:trap_exit, true) 34 | start_runner(fun, ref) 35 | await_results(nil, ref) 36 | end 37 | 38 | defp await_results(return_value, ref) do 39 | receive do 40 | {^ref, memory_usage} -> 41 | return_memory({memory_usage, return_value}) 42 | 43 | {^ref, :shutdown} -> 44 | nil 45 | 46 | # we need a really basic pattern here because sending anything other than 47 | # just what's returned from the function that we're benchmarking will 48 | # involve allocating a new term, which will skew the measurements. 49 | # We need to be very careful to always send the `ref` in every other 50 | # message to this process. 51 | new_result -> 52 | await_results(new_result, ref) 53 | end 54 | end 55 | 56 | defp start_runner(fun, ref) do 57 | parent = self() 58 | 59 | spawn_link(fn -> 60 | tracer = start_tracer(self()) 61 | 62 | try do 63 | _ = measure_memory(fun, tracer, parent) 64 | word_size = :erlang.system_info(:wordsize) 65 | memory_used = get_collected_memory(tracer) 66 | send(parent, {ref, memory_used * word_size}) 67 | catch 68 | kind, reason -> 69 | send(tracer, :done) 70 | send(parent, {ref, :shutdown}) 71 | stacktrace = compatible_stacktrace() 72 | IO.puts(Exception.format(kind, reason, stacktrace)) 73 | exit(:normal) 74 | after 75 | send(tracer, :done) 76 | end 77 | end) 78 | end 79 | 80 | defp return_memory({memory_usage, return_value}) when memory_usage < 0, do: {nil, return_value} 81 | defp return_memory(memory_usage_info), do: memory_usage_info 82 | 83 | defp measure_memory(fun, tracer, parent) do 84 | :erlang.garbage_collect() 85 | send(tracer, :begin_collection) 86 | 87 | receive do 88 | :ready_to_begin -> nil 89 | end 90 | 91 | return_value = fun.() 92 | send(parent, return_value) 93 | 94 | :erlang.garbage_collect() 95 | send(tracer, :end_collection) 96 | 97 | receive do 98 | :ready_to_end -> nil 99 | end 100 | 101 | # We need to reference these variables after we end our collection so 102 | # these don't get GC'd and counted towards the memory usage of the function 103 | # we're benchmarking. 104 | {parent, fun} 105 | end 106 | 107 | defp get_collected_memory(tracer) do 108 | ref = Process.monitor(tracer) 109 | send(tracer, {:get_collected_memory, self(), ref}) 110 | 111 | receive do 112 | {:DOWN, ^ref, _, _, _} -> nil 113 | {^ref, collected} -> collected 114 | end 115 | end 116 | 117 | defp start_tracer(pid) do 118 | spawn(fn -> tracer_loop(pid, 0) end) 119 | end 120 | 121 | defp tracer_loop(pid, acc) do 122 | receive do 123 | :begin_collection -> 124 | :erlang.trace(pid, true, [:garbage_collection, tracer: self()]) 125 | send(pid, :ready_to_begin) 126 | tracer_loop(pid, acc) 127 | 128 | :end_collection -> 129 | :erlang.trace(pid, false, [:garbage_collection]) 130 | send(pid, :ready_to_end) 131 | tracer_loop(pid, acc) 132 | 133 | {:get_collected_memory, reply_to, ref} -> 134 | send(reply_to, {ref, acc}) 135 | 136 | {:trace, ^pid, :gc_minor_start, info} -> 137 | listen_gc_end(pid, :gc_minor_end, acc, total_memory(info)) 138 | 139 | {:trace, ^pid, :gc_major_start, info} -> 140 | listen_gc_end(pid, :gc_major_end, acc, total_memory(info)) 141 | 142 | :done -> 143 | exit(:normal) 144 | end 145 | end 146 | 147 | defp listen_gc_end(pid, tag, acc, mem_before) do 148 | receive do 149 | {:trace, ^pid, ^tag, info} -> 150 | mem_after = total_memory(info) 151 | tracer_loop(pid, acc + mem_before - mem_after) 152 | end 153 | end 154 | 155 | defp total_memory(info) do 156 | # `:heap_size` seems to only contain the memory size of the youngest 157 | # generation `:old_heap_size` has the old generation. There is also 158 | # `:recent_size` but that seems to already be accounted for. 159 | Keyword.fetch!(info, :heap_size) + Keyword.fetch!(info, :old_heap_size) 160 | end 161 | end 162 | -------------------------------------------------------------------------------- /lib/benchee/benchmark/collect/reductions.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark.Collect.Reductions do 2 | @moduledoc false 3 | 4 | @behaviour Benchee.Benchmark.Collect 5 | 6 | def collect(fun) do 7 | parent = self() 8 | 9 | spawn_link(fn -> 10 | start = get_reductions() 11 | output = fun.() 12 | send(parent, {:reductions, get_reductions() - start, output}) 13 | end) 14 | 15 | receive do 16 | {:reductions, reductions, output} -> {reductions, output} 17 | end 18 | end 19 | 20 | defp get_reductions do 21 | {:reductions, reductions} = Process.info(self(), :reductions) 22 | reductions 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /lib/benchee/benchmark/collect/time.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark.Collect.Time do 2 | @moduledoc false 3 | 4 | # Measure the time elapsed while executing a given function. 5 | # 6 | # In contrast to `:timer.tc/1` it always returns the result in nano seconds instead of micro 7 | # seconds. This helps us avoid losing precision as both Linux and MacOSX seem to be able to 8 | # measure in nano seconds. `:timer.tc/n` 9 | # [forfeits this precision]( 10 | # https://github.com/erlang/otp/blob/main/lib/stdlib/src/timer.erl#L164-L169). 11 | 12 | @behaviour Benchee.Benchmark.Collect 13 | 14 | @spec collect((-> any)) :: {non_neg_integer, any} 15 | def collect(function) do 16 | start = :erlang.monotonic_time() 17 | result = function.() 18 | finish = :erlang.monotonic_time() 19 | 20 | duration_nano_seconds = :erlang.convert_time_unit(finish - start, :native, :nanosecond) 21 | 22 | {duration_nano_seconds, result} 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /lib/benchee/benchmark/function_call_overhead.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark.FunctionCallOverhead do 2 | @moduledoc false 3 | 4 | alias Benchee.Benchmark.Collect.Time 5 | alias Benchee.Conversion 6 | 7 | @overhead_determination_time Conversion.Duration.convert_value({0.01, :second}, :nanosecond) 8 | 9 | # Compute the function call overhead on the current system 10 | # 11 | # You might wonder why this isn't done simply through using our existing infrastructure 12 | # and just run it with Scenario, Context etc. - in fact that's how it used to be, 13 | # but it meant that we'd pass half-baked scenarios to functions causing dialyzer to complain, 14 | # as well as running for a lot more code than we strictly need to like the 15 | # `determine_n_times` code that we should rather not go through as it changes 16 | # what this function does. 17 | # This also gives us a way to make sure we definitely take at least one measurement. 18 | @spec measure() :: non_neg_integer() 19 | def measure do 20 | # just the fastest function one can think of... 21 | overhead_function = fn -> nil end 22 | 23 | _ = warmup(overhead_function) 24 | run_times = run(overhead_function) 25 | 26 | Statistex.minimum(run_times) 27 | end 28 | 29 | defp warmup(function) do 30 | run_for(function, @overhead_determination_time / 2) 31 | end 32 | 33 | defp run(function) do 34 | run_for(function, @overhead_determination_time) 35 | end 36 | 37 | defp run_for(function, run_time) do 38 | end_time = current_time() + run_time 39 | 40 | do_run(function, [], end_time) 41 | end 42 | 43 | @spec do_run((-> any), [number], number) :: [number, ...] 44 | defp do_run(function, durations, end_time) do 45 | {duration, _} = Time.collect(function) 46 | 47 | if current_time() < end_time do 48 | do_run(function, [duration | durations], end_time) 49 | else 50 | durations 51 | end 52 | end 53 | 54 | defp current_time, do: :erlang.system_time(:nano_seconds) 55 | end 56 | -------------------------------------------------------------------------------- /lib/benchee/benchmark/hooks.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark.Hooks do 2 | @moduledoc """ 3 | Internal module to support hooks functionality. 4 | """ 5 | 6 | # Non Benchee code should not rely on this module. 7 | 8 | alias Benchee.Benchmark.ScenarioContext 9 | alias Benchee.Scenario 10 | 11 | @type hook_function :: (any -> any) 12 | 13 | @spec run_before_scenario(Scenario.t(), ScenarioContext.t()) :: any 14 | def run_before_scenario( 15 | %Scenario{ 16 | before_scenario: local_before_scenario, 17 | input: input 18 | }, 19 | %ScenarioContext{ 20 | config: %{before_scenario: global_before_scenario} 21 | } 22 | ) do 23 | input 24 | |> run_before_function(global_before_scenario) 25 | |> run_before_function(local_before_scenario) 26 | end 27 | 28 | defp run_before_function(input, nil), do: input 29 | defp run_before_function(input, function), do: function.(input) 30 | 31 | @spec run_before_each(Scenario.t(), ScenarioContext.t()) :: any 32 | def run_before_each( 33 | %{ 34 | before_each: local_before_each 35 | }, 36 | %{ 37 | config: %{before_each: global_before_each}, 38 | scenario_input: input 39 | } 40 | ) do 41 | input 42 | |> run_before_function(global_before_each) 43 | |> run_before_function(local_before_each) 44 | end 45 | 46 | @spec run_after_each(any, Scenario.t(), ScenarioContext.t()) :: any 47 | def run_after_each( 48 | return_value, 49 | %{ 50 | after_each: local_after_each 51 | }, 52 | %{ 53 | config: %{after_each: global_after_each} 54 | } 55 | ) do 56 | if local_after_each, do: local_after_each.(return_value) 57 | if global_after_each, do: global_after_each.(return_value) 58 | end 59 | 60 | @spec run_after_scenario(Scenario.t(), ScenarioContext.t()) :: any 61 | def run_after_scenario( 62 | %{ 63 | after_scenario: local_after_scenario 64 | }, 65 | %{ 66 | config: %{after_scenario: global_after_scenario}, 67 | scenario_input: input 68 | } 69 | ) do 70 | if local_after_scenario, do: local_after_scenario.(input) 71 | if global_after_scenario, do: global_after_scenario.(input) 72 | end 73 | end 74 | -------------------------------------------------------------------------------- /lib/benchee/benchmark/scenario_context.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Benchmark.ScenarioContext do 2 | @moduledoc """ 3 | Internal struct the runner & related modules deal with to run a scenario. 4 | 5 | Practically bundles information the runner needs to be aware of while running 6 | a scenario such as the current_time, end_time, printer, input, function call 7 | overhead etc. 8 | """ 9 | 10 | # This struct holds the context in which any scenario is run. 11 | 12 | defstruct [ 13 | :config, 14 | :printer, 15 | :system, 16 | :current_time, 17 | :end_time, 18 | # before_scenario can alter the original input 19 | :scenario_input, 20 | num_iterations: 1, 21 | function_call_overhead: 0, 22 | sample_size: 0 23 | ] 24 | 25 | @type t :: %__MODULE__{ 26 | config: Benchee.Benchmark.BenchmarkConfig.t(), 27 | printer: module, 28 | current_time: pos_integer | nil, 29 | end_time: pos_integer | nil, 30 | scenario_input: any, 31 | num_iterations: pos_integer, 32 | function_call_overhead: non_neg_integer, 33 | sample_size: non_neg_integer() 34 | } 35 | end 36 | -------------------------------------------------------------------------------- /lib/benchee/collection_data.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.CollectionData do 2 | @moduledoc """ 3 | The unified data structure for a given collection of data. 4 | 5 | Consists of the recorded `samples` and the statistics computed from them. 6 | """ 7 | 8 | alias Benchee.Statistics 9 | 10 | defstruct statistics: %Statistics{}, samples: [] 11 | 12 | @typedoc """ 13 | Samples and statistics. 14 | 15 | Statistics might only come later when they are computed. 16 | """ 17 | @type t :: %__MODULE__{ 18 | samples: [float | non_neg_integer], 19 | statistics: Statistics.t() 20 | } 21 | end 22 | -------------------------------------------------------------------------------- /lib/benchee/conversion.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion do 2 | @moduledoc """ 3 | Integration of the conversion of multiple units with Benchee. 4 | 5 | Can be used by plugins to use Benchee unit scaling logic. 6 | """ 7 | 8 | alias Benchee.Conversion.{Count, Duration, Memory} 9 | 10 | @doc """ 11 | Takes scenarios and a given scaling_strategy, returns the best units for the 12 | given scaling strategy. The return value changes based on whether you want 13 | units for run time or memory usage. 14 | 15 | The units can then be passed on to the appropriate `format` calls to format 16 | the output of arbitrary values with the right unit. 17 | 18 | ## Examples 19 | 20 | iex> statistics = %Benchee.Statistics{average: 1_000_000.0, ips: 1000.0} 21 | ...> 22 | ...> scenario = %Benchee.Scenario{ 23 | ...> run_time_data: %Benchee.CollectionData{statistics: statistics}, 24 | ...> memory_usage_data: %Benchee.CollectionData{statistics: statistics}, 25 | ...> reductions_data: %Benchee.CollectionData{statistics: statistics} 26 | ...> } 27 | ...> 28 | ...> Benchee.Conversion.units([scenario], :best) 29 | %{ 30 | ips: %Benchee.Conversion.Unit{ 31 | label: "K", 32 | long: "Thousand", 33 | magnitude: 1000, 34 | name: :thousand 35 | }, 36 | run_time: %Benchee.Conversion.Unit{ 37 | label: "ms", 38 | long: "Milliseconds", 39 | magnitude: 1_000_000, 40 | name: :millisecond 41 | }, 42 | memory: %Benchee.Conversion.Unit{ 43 | label: "KB", 44 | long: "Kilobytes", 45 | magnitude: 1024, 46 | name: :kilobyte 47 | }, 48 | reduction_count: %Benchee.Conversion.Unit{ 49 | label: "M", 50 | long: "Million", 51 | magnitude: 1_000_000, 52 | name: :million 53 | } 54 | } 55 | """ 56 | def units(scenarios, scaling_strategy) do 57 | run_time_measurements = measurements_for(scenarios, :run_time_data) 58 | reductions_measurements = measurements_for(scenarios, :reductions_data) 59 | memory_measurements = measurements_for(scenarios, :memory_usage_data) 60 | 61 | %{ 62 | run_time: Duration.best(run_time_measurements.average, strategy: scaling_strategy), 63 | ips: Count.best(run_time_measurements.ips, strategy: scaling_strategy), 64 | memory: Memory.best(memory_measurements.average, strategy: scaling_strategy), 65 | reduction_count: Count.best(reductions_measurements.average, strategry: scaling_strategy) 66 | } 67 | end 68 | 69 | defp measurements_for(scenarios, path) do 70 | paths = [Access.key(path), Access.key(:statistics)] 71 | 72 | scenarios 73 | |> Enum.flat_map(fn scenario -> scenario |> get_in(paths) |> Map.to_list() end) 74 | |> Enum.group_by(fn {stat_name, _} -> stat_name end, fn {_, value} -> value end) 75 | end 76 | end 77 | -------------------------------------------------------------------------------- /lib/benchee/conversion/count.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.Count do 2 | @moduledoc """ 3 | Unit scaling for counts, such that 1000000 can be converted to 1 Million. 4 | 5 | Only Benchee plugins should use this code. 6 | """ 7 | 8 | alias Benchee.Conversion.{Format, Scale, Unit} 9 | 10 | @behaviour Scale 11 | @behaviour Format 12 | 13 | @one_billion 1_000_000_000 14 | @one_million 1_000_000 15 | @one_thousand 1_000 16 | 17 | @units_map %{ 18 | billion: %Unit{ 19 | name: :billion, 20 | magnitude: @one_billion, 21 | label: "B", 22 | long: "Billion" 23 | }, 24 | million: %Unit{ 25 | name: :million, 26 | magnitude: @one_million, 27 | label: "M", 28 | long: "Million" 29 | }, 30 | thousand: %Unit{ 31 | name: :thousand, 32 | magnitude: @one_thousand, 33 | label: "K", 34 | long: "Thousand" 35 | }, 36 | one: %Unit{ 37 | name: :one, 38 | magnitude: 1, 39 | label: "", 40 | long: "" 41 | } 42 | } 43 | 44 | @units Map.values(@units_map) 45 | 46 | @type unit_atoms :: :one | :thousand | :million | :billion 47 | @type units :: unit_atoms | Unit.t() 48 | 49 | @doc """ 50 | Scales a value representing a count in ones into a larger unit if appropriate 51 | 52 | ## Examples 53 | 54 | iex> {value, unit} = scale(4_321.09) 55 | ...> value 56 | 4.32109 57 | iex> unit.name 58 | :thousand 59 | 60 | iex> {value, unit} = scale(0.0045) 61 | ...> value 62 | 0.0045 63 | iex> unit.name 64 | :one 65 | 66 | """ 67 | def scale(count) when count >= @one_billion do 68 | scale_with_unit(count, :billion) 69 | end 70 | 71 | def scale(count) when count >= @one_million do 72 | scale_with_unit(count, :million) 73 | end 74 | 75 | def scale(count) when count >= @one_thousand do 76 | scale_with_unit(count, :thousand) 77 | end 78 | 79 | def scale(count) do 80 | scale_with_unit(count, :one) 81 | end 82 | 83 | # Helper function for returning a tuple of {value, unit} 84 | defp scale_with_unit(count, unit) do 85 | {scale(count, unit), unit_for(unit)} 86 | end 87 | 88 | @doc """ 89 | Get a unit by its atom representation. If handed already a %Unit{} struct it 90 | just returns it. 91 | 92 | ## Examples 93 | 94 | iex> unit_for(:thousand) 95 | %Benchee.Conversion.Unit{ 96 | name: :thousand, 97 | magnitude: 1_000, 98 | label: "K", 99 | long: "Thousand" 100 | } 101 | 102 | iex> unit_for(%Benchee.Conversion.Unit{ 103 | ...> name: :thousand, 104 | ...> magnitude: 1_000, 105 | ...> label: "K", 106 | ...> long: "Thousand" 107 | ...> }) 108 | %Benchee.Conversion.Unit{ 109 | name: :thousand, 110 | magnitude: 1_000, 111 | label: "K", 112 | long: "Thousand" 113 | } 114 | """ 115 | def unit_for(unit) do 116 | Scale.unit_for(@units_map, unit) 117 | end 118 | 119 | def units do 120 | @units 121 | end 122 | 123 | @doc """ 124 | Scales a value representing a count in ones into a specified unit 125 | 126 | ## Examples 127 | 128 | iex> scale(12345, :one) 129 | 12345.0 130 | 131 | iex> scale(12345, :thousand) 132 | 12.345 133 | 134 | iex> scale(12345, :billion) 135 | 1.2345e-5 136 | 137 | iex> scale(12345, :million) 138 | 0.012345 139 | 140 | """ 141 | def scale(count, unit) do 142 | Scale.scale(count, unit, __MODULE__) 143 | end 144 | 145 | @doc """ 146 | Converts a value for a specified %Unit or unit atom and converts it to the equivalent of another unit of measure. 147 | 148 | ## Examples 149 | 150 | iex> {value, unit} = convert({2500, :thousand}, :million) 151 | ...> value 152 | 2.5 153 | iex> unit.name 154 | :million 155 | """ 156 | def convert(number_and_unit, desired_unit) do 157 | Scale.convert(number_and_unit, desired_unit, __MODULE__) 158 | end 159 | 160 | @doc """ 161 | Finds the best unit for a list of counts. By default, chooses the most common 162 | unit. In case of tie, chooses the largest of the most common units. 163 | 164 | Pass `[strategy: :smallest]` to always return the smallest unit in the list. 165 | Pass `[strategy: :largest]` to always return the largest unit in the list. 166 | 167 | ## Examples 168 | 169 | iex> best([23, 23_000, 34_000, 2_340_000]).name 170 | :thousand 171 | 172 | iex> best([23, 23_000, 34_000, 2_340_000, 3_450_000]).name 173 | :million 174 | 175 | iex> best([23, 23_000, 34_000, 2_340_000], strategy: :smallest).name 176 | :one 177 | 178 | iex> best([23, 23_000, 34_000, 2_340_000], strategy: :largest).name 179 | :million 180 | 181 | """ 182 | def best(list, opts \\ [strategy: :best]) 183 | 184 | def best(list, opts) do 185 | Scale.best_unit(list, __MODULE__, opts) 186 | end 187 | 188 | @doc """ 189 | The raw count, unscaled. 190 | 191 | ## Examples 192 | 193 | iex> base_unit().name 194 | :one 195 | 196 | """ 197 | def base_unit, do: unit_for(:one) 198 | 199 | @doc """ 200 | Formats a number as a string, with a unit label. 201 | 202 | To specify the unit, pass a tuple of `{value, unit_atom}` like `{1_234, :million}` 203 | 204 | ## Examples 205 | 206 | iex> format(45_678.9) 207 | "45.68 K" 208 | 209 | iex> format(45.6789) 210 | "45.68" 211 | 212 | iex> format({45.6789, :thousand}) 213 | "45.68 K" 214 | 215 | iex> format( 216 | ...> {45.6789, %Benchee.Conversion.Unit{long: "Thousand", magnitude: "1_000", label: "K"}} 217 | ...> ) 218 | "45.68 K" 219 | """ 220 | def format(count) do 221 | Format.format(count, __MODULE__) 222 | end 223 | 224 | @doc """ 225 | Formats in a more "human" way separating by units. 226 | 227 | ## Examples 228 | 229 | iex> format_human(45_678.9) 230 | "45 K 678.90" 231 | 232 | iex> format_human(1_000_000) 233 | "1 M" 234 | 235 | iex> format_human(1_001_000) 236 | "1 M 1 K" 237 | """ 238 | def format_human(count) do 239 | Format.format_human(count, __MODULE__) 240 | end 241 | end 242 | -------------------------------------------------------------------------------- /lib/benchee/conversion/deviation_percent.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.DeviationPercent do 2 | @moduledoc """ 3 | Helps with formatting for the standard deviation ratio converting it into the 4 | more common percent form. 5 | 6 | Only Benchee plugins should use this code. 7 | """ 8 | 9 | alias Benchee.Conversion.Format 10 | 11 | @behaviour Format 12 | 13 | @doc """ 14 | Formats the standard deviation ratio to an equivalent percent number including special signs. 15 | 16 | The ± is an important part of it as it shows that the deviation might be up but also might be 17 | down. 18 | 19 | ## Examples 20 | 21 | iex> format(0.12345) 22 | "±12.35%" 23 | 24 | iex> format(1) 25 | "±100.00%" 26 | """ 27 | def format(std_dev_ratio) do 28 | "~ts~.2f%" 29 | |> :io_lib.format(["±", std_dev_ratio * 100.0]) 30 | |> to_string 31 | end 32 | 33 | @doc """ 34 | Formats standard deviation percent, same as `format/1`. 35 | 36 | Implemented for consistency. 37 | 38 | ## Examples 39 | 40 | iex> format_human(0.1) 41 | "±10.00%" 42 | """ 43 | def format_human(std_dev_ratio) do 44 | format(std_dev_ratio) 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /lib/benchee/conversion/format.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.Format do 2 | @moduledoc """ 3 | Functions for formatting values and their unit labels. Different domains 4 | handle this task differently, for example durations and counts. 5 | 6 | See `Benchee.Conversion.Count` and `Benchee.Conversion.Duration` for examples. 7 | """ 8 | 9 | alias Benchee.Conversion.Unit 10 | 11 | @doc """ 12 | Formats a number as a string, with a unit label. See `Benchee.Conversion.Count` 13 | and `Benchee.Conversion.Duration` for examples 14 | """ 15 | @callback format(number) :: String.t() 16 | 17 | @doc """ 18 | Formats in a more "human" way, one biggest unit at a time. 19 | 20 | So instead of 1.5h it says 1h 30min 21 | """ 22 | @callback format_human(number) :: String.t() 23 | 24 | # Generic formatting functions 25 | 26 | @doc """ 27 | Formats a unit value with specified label and separator 28 | """ 29 | def format(number, label, separator) do 30 | separator = separator(label, separator) 31 | "#{number_format(number)}#{separator}#{label}" 32 | end 33 | 34 | defp number_format(number) when is_float(number) do 35 | number 36 | |> :erlang.float_to_list(decimals: float_precision(number)) 37 | |> to_string 38 | end 39 | 40 | defp number_format(number) when is_integer(number) do 41 | to_string(number) 42 | end 43 | 44 | @doc """ 45 | Formats a unit value in the domain described by `module`. The module should 46 | provide a `units/0` function that returns a Map like 47 | 48 | %{ :unit_name => %Benchee.Conversion.Unit{ ... } } 49 | 50 | Additionally, `module` may specify a `separator/0` function, which provides a 51 | custom separator string that will appear between the value and label in the 52 | formatted output. If no `separator/0` function exists, the default separator 53 | (a single space) will be used. 54 | 55 | iex> format({1.0, :kilobyte}, Benchee.Conversion.Memory) 56 | "1 KB" 57 | 58 | """ 59 | def format({number, unit = %Unit{}}) do 60 | format(number, label(unit), separator()) 61 | end 62 | 63 | def format({number, unit = %Unit{}}, _module) do 64 | format({number, unit}) 65 | end 66 | 67 | def format({number, unit_atom}, module) do 68 | format({number, module.unit_for(unit_atom)}) 69 | end 70 | 71 | def format(number, module) when is_number(number) do 72 | number 73 | |> module.scale() 74 | |> format 75 | end 76 | 77 | @doc """ 78 | Human friendly duration format for time as a string. 79 | 80 | The output is a sequence of values and unit labels separated by a space. 81 | Only units whose value is non-zero are included in the output. 82 | The passed number is duration in the base unit - nanoseconds. 83 | """ 84 | def format_human(0, module) do 85 | format(0, module) 86 | end 87 | 88 | def format_human(+0.0, module) do 89 | format(0, module) 90 | end 91 | 92 | def format_human(number, module) when is_number(number) do 93 | number 94 | |> split_into_place_values(module) 95 | |> Enum.map_join(" ", &format/1) 96 | end 97 | 98 | # Returns a list of place vaules with corresponding units for the `number`. 99 | # The output is sorted descending by magnitude of units and excludes tuples with place value 0. 100 | # Place values are `non_neg_integer` for non-base units, 101 | # however base unit may also be `float` becuase the decimals can't be split further. 102 | @spec split_into_place_values(number, module) :: [{number, Unit.t()}] 103 | defp split_into_place_values(number, module) do 104 | descending_units = units_descending(module) 105 | 106 | place_values(number, descending_units) 107 | end 108 | 109 | defp units_descending(module) do 110 | Enum.sort(module.units(), &(&1.magnitude >= &2.magnitude)) 111 | end 112 | 113 | @spec place_values(number, [Unit.t()]) :: [{number, Unit.t()}] 114 | defp place_values(0, _units), do: [] 115 | defp place_values(+0.0, _units), do: [] 116 | 117 | # smalles unit, carries the decimal 118 | defp place_values(number, [base_unit = %Unit{magnitude: 1}]), do: [{number, base_unit}] 119 | 120 | defp place_values(number, [unit | units]) do 121 | integer_number = trunc(number) 122 | decimal_carry = number - integer_number 123 | int_carry = rem(integer_number, unit.magnitude) 124 | carry = decimal_carry + int_carry 125 | 126 | place_value = div(integer_number, unit.magnitude) 127 | 128 | case place_value do 129 | 0 -> place_values(carry, units) 130 | place_value -> [{place_value, unit} | place_values(carry, units)] 131 | end 132 | end 133 | 134 | @default_separator " " 135 | # should we need it again, a customer separator could be returned 136 | # per module here 137 | defp separator do 138 | @default_separator 139 | end 140 | 141 | # Returns the separator, or an empty string if there isn't a label 142 | defp separator(label, _separator) when label == "" or label == nil, do: "" 143 | defp separator(_label, separator), do: separator 144 | 145 | # Fetches the label for the given unit 146 | defp label(%Unit{label: label}) do 147 | label 148 | end 149 | 150 | defp float_precision(float) when trunc(float) == float, do: 0 151 | defp float_precision(float) when float < 0.01, do: 5 152 | defp float_precision(float) when float < 0.1, do: 4 153 | defp float_precision(float) when float < 0.2, do: 3 154 | defp float_precision(_float), do: 2 155 | end 156 | -------------------------------------------------------------------------------- /lib/benchee/conversion/unit.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.Unit do 2 | @moduledoc """ 3 | A representation of the different units used in `Benchee.Conversion.Format` 4 | and `Benchee.Conversion.Scale` as well as the modules implementing these 5 | behaviours. 6 | 7 | A unit is characterized by: 8 | 9 | * name - an atom representation of the unit for easy access (`:microseconds`, 10 | `thousand`) 11 | * magnitude - compared to he base unit (the smallest unit) what's the factor 12 | you had to multiply it by to get back to the base unit. E.g. the thousand 13 | unit has a magnitude of `1_000`. 14 | * label - a string that is used as a unit label (`"K"` for a thousand f.ex.) 15 | * long - a string giving the long version of the label (`"Thousand"`) 16 | """ 17 | 18 | defstruct [:name, :magnitude, :label, :long] 19 | 20 | @type t :: %Benchee.Conversion.Unit{ 21 | name: atom, 22 | magnitude: non_neg_integer, 23 | label: String.t(), 24 | long: String.t() 25 | } 26 | end 27 | -------------------------------------------------------------------------------- /lib/benchee/errors.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.UnknownProfilerError do 2 | defexception message: "error" 3 | end 4 | 5 | defmodule Benchee.PreCheckError do 6 | defexception message: "error" 7 | end 8 | -------------------------------------------------------------------------------- /lib/benchee/formatters/console.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Formatters.Console do 2 | @moduledoc """ 3 | Formatter to print out the results of benchmarking suite to the console. 4 | 5 | Example: 6 | 7 | Name ips average deviation median 99th % 8 | flat_map 2.40 K 417.00 μs ±9.40% 411.45 μs 715.21 μs 9 | map.flatten 1.24 K 806.89 μs ±16.62% 768.02 μs 1170.67 μs 10 | 11 | Comparison: 12 | flat_map 2.40 K 13 | map.flatten 1.24 K - 1.93x slower 14 | 15 | Memory usage statistics: 16 | 17 | Name Memory usage 18 | flat_map 624.97 KB 19 | map.flatten 781.25 KB - 1.25x memory usage 20 | 21 | **All measurements for memory usage were the same** 22 | 23 | Reduction count statistics: 24 | 25 | Name average deviation median 99th % 26 | flat_map 417.00 ±9.40 411.45 715.21 27 | map.flatten 806.89 ±16.62 768.02 1170.67 28 | 29 | Comparison: 30 | flat_map 417.00 31 | map.flatten 806.89 - 1.93x more reductions 32 | """ 33 | 34 | @behaviour Benchee.Formatter 35 | 36 | alias Benchee.Suite 37 | alias Benchee.Formatters.Console.{Memory, Reductions, RunTime} 38 | 39 | @doc """ 40 | Formats the benchmark statistics to a report suitable for output on the CLI. 41 | 42 | Returns a list of lists, where each list element is a group belonging to one 43 | specific input. So if there only was one (or no) input given through `:inputs` 44 | then there's just one list inside. 45 | 46 | ## Examples 47 | 48 | ``` 49 | iex> scenarios = [ 50 | ...> %Benchee.Scenario{ 51 | ...> name: "My Job", 52 | ...> input_name: "My input", 53 | ...> run_time_data: %Benchee.CollectionData{ 54 | ...> statistics: %Benchee.Statistics{ 55 | ...> average: 200.0, 56 | ...> ips: 5000.0, 57 | ...> std_dev_ratio: 0.1, 58 | ...> median: 190.0, 59 | ...> percentiles: %{99 => 300.1}, 60 | ...> sample_size: 200 61 | ...> } 62 | ...> }, 63 | ...> memory_usage_data: %Benchee.CollectionData{statistics: %Benchee.Statistics{}} 64 | ...> }, 65 | ...> %Benchee.Scenario{ 66 | ...> name: "Job 2", 67 | ...> input_name: "My input", 68 | ...> run_time_data: %Benchee.CollectionData{ 69 | ...> statistics: %Benchee.Statistics{ 70 | ...> average: 400.0, 71 | ...> ips: 2500.0, 72 | ...> std_dev_ratio: 0.2, 73 | ...> median: 390.0, 74 | ...> percentiles: %{99 => 500.1}, 75 | ...> sample_size: 200 76 | ...> } 77 | ...> }, 78 | ...> memory_usage_data: %Benchee.CollectionData{statistics: %Benchee.Statistics{}} 79 | ...> } 80 | ...> ] 81 | ...> 82 | ...> suite = %Benchee.Suite{ 83 | ...> scenarios: scenarios, 84 | ...> configuration: %Benchee.Configuration{ 85 | ...> unit_scaling: :best 86 | ...> } 87 | ...> } 88 | ...> 89 | ...> format(suite, %{comparison: false, extended_statistics: false}) 90 | [ 91 | [ 92 | "\n##### With input My input #####", 93 | "\nName ips average deviation median 99th %\n", 94 | "My Job 5 K 200 ns ±10.00% 190 ns 300.10 ns\n", 95 | "Job 2 2.50 K 400 ns ±20.00% 390 ns 500.10 ns\n" 96 | ] 97 | ] 98 | 99 | ``` 100 | 101 | """ 102 | @impl true 103 | @spec format(Suite.t(), map) :: [any] 104 | def format(%Suite{scenarios: scenarios, configuration: config}, options \\ %{}) do 105 | config = 106 | config 107 | |> Map.take([:unit_scaling, :title]) 108 | |> Map.merge(options) 109 | 110 | scenarios 111 | |> Enum.reduce([], &update_grouped_list/2) 112 | |> Enum.map(fn {input, scenarios} -> 113 | generate_output(scenarios, config, input) 114 | end) 115 | end 116 | 117 | # Normally one would prepend to lists and not append. In this case this lead to 2 118 | # `Enum.reverse` scattered around. As these lists are usually very small (mostly less 119 | # than 10 elements) I opted for `++` here. 120 | defp update_grouped_list(scenario, grouped_scenarios) do 121 | case List.keyfind(grouped_scenarios, scenario.input_name, 0) do 122 | {_, group} -> 123 | new_tuple = {scenario.input_name, group ++ [scenario]} 124 | List.keyreplace(grouped_scenarios, scenario.input_name, 0, new_tuple) 125 | 126 | _ -> 127 | grouped_scenarios ++ [{scenario.input_name, [scenario]}] 128 | end 129 | end 130 | 131 | @doc """ 132 | Takes the output of `format/1` and writes that to the console. 133 | """ 134 | @impl true 135 | @spec write(any, map) :: :ok | {:error, String.t()} 136 | def write(output, _options \\ %{}) do 137 | IO.write(output) 138 | rescue 139 | _ -> {:error, "Unknown Error"} 140 | end 141 | 142 | defp generate_output(scenarios, config, input) do 143 | [ 144 | suite_header(input, config) 145 | | RunTime.format_scenarios(scenarios, config) ++ 146 | Memory.format_scenarios(scenarios, config) ++ 147 | Reductions.format_scenarios(scenarios, config) 148 | ] 149 | end 150 | 151 | defp suite_header(input, config) do 152 | "#{title_header(config)}#{input_header(input)}" 153 | end 154 | 155 | defp title_header(%{title: nil}), do: "" 156 | defp title_header(%{title: title}), do: "\n*** #{title} ***\n" 157 | 158 | @no_input_marker Benchee.Benchmark.no_input() 159 | defp input_header(input) when input == @no_input_marker, do: "" 160 | defp input_header(input), do: "\n##### With input #{input} #####" 161 | end 162 | -------------------------------------------------------------------------------- /lib/benchee/formatters/console/helpers.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Formatters.Console.Helpers do 2 | @moduledoc false 3 | 4 | # These are common functions shared between the formatting of the run time and 5 | # memory usage statistics. 6 | 7 | alias Benchee.Conversion.{Count, DeviationPercent, Format, Scale, Unit} 8 | alias Benchee.Scenario 9 | alias Benchee.Statistics 10 | 11 | @type unit_per_statistic :: %{atom => Unit.t()} 12 | 13 | # Length of column header 14 | @default_label_width 4 15 | 16 | @spec mode_out(Statistics.mode(), Benchee.Conversion.Unit.t()) :: String.t() 17 | def mode_out(modes, _run_time_unit) when is_nil(modes) do 18 | "None" 19 | end 20 | 21 | def mode_out(modes, run_time_unit) when is_list(modes) do 22 | Enum.map_join(modes, ", ", fn mode -> unit_output(mode, run_time_unit) end) 23 | end 24 | 25 | def mode_out(mode, run_time_unit) when is_number(mode) do 26 | unit_output(mode, run_time_unit) 27 | end 28 | 29 | defp unit_output(value, unit) do 30 | Format.format({Scale.scale(value, unit), unit}) 31 | end 32 | 33 | @spec label_width([Scenario.t()]) :: number 34 | def label_width(scenarios) do 35 | max_label_width = 36 | scenarios 37 | |> Enum.map(fn scenario -> String.length(scenario.name) end) 38 | |> Stream.concat([@default_label_width]) 39 | |> Enum.max() 40 | 41 | max_label_width + 1 42 | end 43 | 44 | @spec count_output(number, Count.units()) :: binary 45 | def count_output(count, unit) do 46 | Count.format({Count.scale(count, unit), unit}) 47 | end 48 | 49 | @spec deviation_output(number) :: binary 50 | def deviation_output(std_dev_ratio) do 51 | DeviationPercent.format(std_dev_ratio) 52 | end 53 | 54 | @spec descriptor(String.t()) :: String.t() 55 | def descriptor(header_str), do: "\n#{header_str}: \n" 56 | 57 | def format_comparison( 58 | name, 59 | statistics, 60 | display_value, 61 | comparison_name, 62 | display_unit, 63 | label_width, 64 | column_width 65 | ) do 66 | "~*s~*s ~ts" 67 | |> :io_lib.format([ 68 | -label_width, 69 | name, 70 | column_width, 71 | display_value, 72 | comparison_display(statistics, comparison_name, display_unit) 73 | ]) 74 | |> to_string 75 | end 76 | 77 | defp comparison_display(%Statistics{relative_more: nil, absolute_difference: nil}, _, _), do: "" 78 | 79 | defp comparison_display(statistics, comparison_name, unit) do 80 | "- #{comparison_text(statistics, comparison_name)} #{absolute_difference_text(statistics, unit)}\n" 81 | end 82 | 83 | defp comparison_text(%Statistics{relative_more: :infinity}, name), do: "∞ x #{name}" 84 | defp comparison_text(%Statistics{relative_more: nil}, _), do: "N/A" 85 | 86 | defp comparison_text(statistics, comparison_name) do 87 | "~.2fx ~s" 88 | |> :io_lib.format([statistics.relative_more, comparison_name]) 89 | |> to_string 90 | end 91 | 92 | defp absolute_difference_text(statistics, unit) do 93 | formatted_value = Format.format({Scale.scale(statistics.absolute_difference, unit), unit}) 94 | 95 | if statistics.absolute_difference >= 0 do 96 | "+#{formatted_value}" 97 | else 98 | formatted_value 99 | end 100 | end 101 | end 102 | -------------------------------------------------------------------------------- /lib/benchee/formatters/tagged_save.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Formatters.TaggedSave do 2 | @moduledoc """ 3 | Store the whole suite in the Erlang `ExternalTermFormat` while tagging the 4 | scenarios of the current run with a specified tag - can be used for storing 5 | and later loading the results of previous runs with `Benchee.ScenarioLoader`. 6 | 7 | Automatically configured as the last formatter to run when specifying the 8 | `save` option in the configuration - see `Benchee.Configuration`. 9 | """ 10 | 11 | @behaviour Benchee.Formatter 12 | 13 | alias Benchee.Scenario 14 | alias Benchee.Suite 15 | alias Benchee.Utility.FileCreation 16 | 17 | @doc """ 18 | Tags all scenario with the desired tag and returns it in term_to_binary along with save path. 19 | """ 20 | @impl true 21 | @spec format(Suite.t(), map) :: {binary, String.t()} 22 | def format(suite = %Suite{scenarios: scenarios}, formatter_config) do 23 | tag = determine_tag(scenarios, formatter_config) 24 | tagged_scenarios = tag_scenarios(scenarios, tag) 25 | tagged_suite = %Suite{suite | scenarios: tagged_scenarios} 26 | 27 | {:erlang.term_to_binary(tagged_suite), formatter_config.path} 28 | end 29 | 30 | defp determine_tag(scenarios, %{tag: desired_tag}) do 31 | scenarios 32 | |> Enum.map(fn scenario -> scenario.tag end) 33 | |> Enum.uniq() 34 | |> Enum.filter(fn tag -> 35 | tag != nil && tag =~ ~r/#{Regex.escape(desired_tag)}/ 36 | end) 37 | |> choose_tag(desired_tag) 38 | end 39 | 40 | defp choose_tag([], desired_tag), do: desired_tag 41 | 42 | defp choose_tag(tags, desired_tag) do 43 | max = get_maximum_tag_increaser(tags, desired_tag) 44 | "#{desired_tag}-#{max + 1}" 45 | end 46 | 47 | defp get_maximum_tag_increaser(tags, desired_tag) do 48 | tags 49 | |> Enum.map(fn tag -> String.replace(tag, ~r/#{Regex.escape(desired_tag)}-?/, "") end) 50 | |> Enum.map(&tag_increaser/1) 51 | |> Enum.max() 52 | end 53 | 54 | defp tag_increaser(""), do: 1 55 | defp tag_increaser(string_number), do: String.to_integer(string_number) 56 | 57 | defp tag_scenarios(scenarios, tag) do 58 | Enum.map(scenarios, fn scenario -> 59 | scenario 60 | |> tagged_scenario(tag) 61 | |> update_name 62 | end) 63 | end 64 | 65 | defp tagged_scenario(scenario = %Scenario{tag: nil}, desired_tag) do 66 | %Scenario{scenario | tag: desired_tag} 67 | end 68 | 69 | defp tagged_scenario(scenario, _desired_tag) do 70 | scenario 71 | end 72 | 73 | defp update_name(scenario) do 74 | %Scenario{scenario | name: Scenario.display_name(scenario)} 75 | end 76 | 77 | @doc """ 78 | Writes the binary returned by `format/2` to the indicated location, telling you where that is. 79 | """ 80 | @spec write({binary, String.t()}, map) :: :ok 81 | @impl true 82 | def write({term_binary, filename}, _) do 83 | FileCreation.ensure_directory_exists(filename) 84 | return_value = File.write(filename, term_binary) 85 | 86 | IO.puts("Suite saved in external term format at #{filename}") 87 | 88 | return_value 89 | end 90 | end 91 | -------------------------------------------------------------------------------- /lib/benchee/output/benchmark_printer.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Output.BenchmarkPrinter do 2 | @moduledoc false 3 | 4 | alias Benchee.Benchmark 5 | alias Benchee.Conversion.Duration 6 | alias Benchee.System 7 | 8 | @doc """ 9 | Shown when you try benchmark an evaluated function. 10 | 11 | Compiled functions should be preferred as they are less likely to introduce additional overhead to your benchmark timing. 12 | """ 13 | def evaluated_function_warning(job_name) do 14 | IO.puts(""" 15 | Warning: the benchmark #{job_name} is using an evaluated function. 16 | Evaluated functions perform slower than compiled functions. 17 | You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead. 18 | Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs 19 | """) 20 | end 21 | 22 | @doc """ 23 | Shown when you try to define a benchmark with the same name twice. 24 | 25 | How would you want to discern those anyhow? 26 | """ 27 | def duplicate_benchmark_warning(name) do 28 | IO.puts( 29 | "You already have a job defined with the name \"#{name}\", you can't add two jobs with the same name!" 30 | ) 31 | end 32 | 33 | @doc """ 34 | Prints general information such as system information and estimated 35 | benchmarking time. 36 | """ 37 | def configuration_information(%{configuration: %{print: %{configuration: false}}}) do 38 | nil 39 | end 40 | 41 | def configuration_information(%{scenarios: scenarios, system: sys, configuration: config}) do 42 | system_information(sys) 43 | suite_information(scenarios, config) 44 | end 45 | 46 | defp system_information(%System{ 47 | erlang: erlang_version, 48 | elixir: elixir_version, 49 | jit_enabled?: jit_enabled?, 50 | os: os, 51 | num_cores: num_cores, 52 | cpu_speed: cpu_speed, 53 | available_memory: available_memory 54 | }) do 55 | IO.puts(""" 56 | Operating System: #{os} 57 | CPU Information: #{cpu_speed} 58 | Number of Available Cores: #{num_cores} 59 | Available memory: #{available_memory} 60 | Elixir #{elixir_version} 61 | Erlang #{erlang_version} 62 | JIT enabled: #{jit_enabled?} 63 | """) 64 | end 65 | 66 | defp suite_information(scenarios, %{ 67 | parallel: parallel, 68 | time: time, 69 | warmup: warmup, 70 | inputs: inputs, 71 | memory_time: memory_time, 72 | reduction_time: reduction_time 73 | }) do 74 | scenario_count = length(scenarios) 75 | exec_time = warmup + time + memory_time + reduction_time 76 | total_time = scenario_count * exec_time 77 | 78 | IO.puts(""" 79 | Benchmark suite executing with the following configuration: 80 | warmup: #{Duration.format_human(warmup)} 81 | time: #{Duration.format_human(time)} 82 | memory time: #{Duration.format_human(memory_time)} 83 | reduction time: #{Duration.format_human(reduction_time)} 84 | parallel: #{parallel} 85 | inputs: #{inputs_out(inputs)} 86 | Estimated total run time: #{Duration.format_human(total_time)} 87 | """) 88 | end 89 | 90 | defp inputs_out(nil), do: "none specified" 91 | 92 | defp inputs_out(inputs) do 93 | Enum.map_join(inputs, ", ", fn {name, _} -> name end) 94 | end 95 | 96 | @doc """ 97 | Prints a notice which job is currently being benchmarked. 98 | """ 99 | def benchmarking(_, _, %{print: %{benchmarking: false}}), do: nil 100 | 101 | def benchmarking(name, input_name, config) do 102 | time_configs = [config.time, config.warmup, config.memory_time, config.reduction_time] 103 | 104 | if Enum.all?(time_configs, fn time -> time == 0 end) do 105 | nil 106 | else 107 | IO.puts("Benchmarking #{name}#{input_information(input_name)} ...") 108 | end 109 | end 110 | 111 | @no_input Benchmark.no_input() 112 | defp input_information(@no_input), do: "" 113 | defp input_information(input_name), do: " with input #{input_name}" 114 | 115 | @doc """ 116 | Prints a warning about accuracy of benchmarks when the function is super fast. 117 | """ 118 | def fast_warning do 119 | IO.puts(""" 120 | Warning: The function you are trying to benchmark is super fast, making measurements more unreliable! 121 | This holds especially true for memory measurements or when running with hooks. 122 | 123 | See: https://github.com/bencheeorg/benchee/wiki/Benchee-Warnings#fast-execution-warning 124 | 125 | You may disable this warning by passing print: [fast_warning: false] as configuration options. 126 | """) 127 | end 128 | 129 | @doc """ 130 | Print the measured function call overhead. 131 | """ 132 | @spec function_call_overhead(non_neg_integer()) :: :ok 133 | def function_call_overhead(overhead) do 134 | scaled_overhead = Duration.scale(overhead) 135 | 136 | IO.puts("Measured function call overhead as: #{Duration.format(scaled_overhead)}") 137 | end 138 | end 139 | -------------------------------------------------------------------------------- /lib/benchee/output/profile_printer.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Output.ProfilePrinter do 2 | @moduledoc false 3 | 4 | @doc """ 5 | Prints a notification of which job is being profiled. 6 | """ 7 | def profiling(name, profiler) do 8 | IO.puts("\nProfiling #{name} with #{profiler}...") 9 | end 10 | end 11 | -------------------------------------------------------------------------------- /lib/benchee/output/progress_printer.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Output.ProgressPrinter do 2 | @moduledoc false 3 | 4 | def calculating_statistics(%{print: %{benchmarking: false}}), do: nil 5 | 6 | def calculating_statistics(_config) do 7 | IO.puts("Calculating statistics...") 8 | end 9 | 10 | def formatting(%{print: %{benchmarking: false}}), do: nil 11 | 12 | def formatting(_config) do 13 | IO.puts("Formatting results...") 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /lib/benchee/profile.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Profile do 2 | @moduledoc """ 3 | Profiles each scenario after benchmarking them if the `profile_after` option is either set to: 4 | * `true`, 5 | * a valid `profiler`, 6 | * a tuple of a valid `profiler` and a list of options to pass to it, e.g., `{:fprof, [sort: :own]}`. 7 | 8 | The profiler that will be used is either the one set by the `profiler_after` option or, if set to `true`, 9 | the default one (`:eprof`). It accepts however the following profilers: 10 | * `:cprof` will profile with [`Mix.Task.Profile.Cprof`](https://hexdocs.pm/mix/Mix.Tasks.Profile.Cprof.html). 11 | It provides information related to the number of function calls. 12 | * `:eprof` will profile with [`Mix.Task.Profile.Eprof`](https://hexdocs.pm/mix/Mix.Tasks.Profile.Eprof.html). 13 | It provides information related to the time spent on each function in regard to the total execution time. 14 | * `:fprof` will profile with [`Mix.Task.Profile.Fprof`](https://hexdocs.pm/mix/Mix.Tasks.Profile.Cprof.html). 15 | It provides information related to the time spent on each function, both the *total* time spent on it and the time spent on it, 16 | *excluding* the time of called functions. 17 | """ 18 | 19 | alias Benchee.Benchmark.BenchmarkConfig 20 | alias Benchee.Benchmark.Runner 21 | alias Benchee.Benchmark.ScenarioContext 22 | alias Benchee.Output.ProfilePrinter, as: Printer 23 | alias Benchee.Suite 24 | 25 | @default_profiler :eprof 26 | @builtin_profilers [:cprof, :eprof, :fprof] 27 | # https://hexdocs.pm/mix/1.17.0/Mix.Tasks.Profile.Tprof.html 28 | # Tprof was introduced in elixir 1.17.0 and requires OTP 27 29 | # Elixir errors out fine, but our test kind of put their trust into `builtin_profilers` 30 | # being runnable. 31 | if Version.match?(System.version(), ">= 1.17.0") && 32 | String.to_integer(System.otp_release()) >= 27 do 33 | @builtin_profilers [:tprof | @builtin_profilers] 34 | end 35 | 36 | # we run the function a bunch already, no need for further warmup 37 | @default_profiler_opts [warmup: false] 38 | 39 | @doc """ 40 | Returns the atom corresponding to the default profiler. 41 | """ 42 | @spec default_profiler() :: unquote(@default_profiler) 43 | def default_profiler, do: @default_profiler 44 | 45 | @doc """ 46 | List of supported builtin profilers as atoms. 47 | """ 48 | def builtin_profilers, do: @builtin_profilers 49 | 50 | @doc """ 51 | Runs for each scenario found in the suite the `profile/2` function from the given profiler. 52 | """ 53 | @spec profile(Suite.t(), module) :: Suite.t() 54 | def profile(suite, printer \\ Printer) 55 | def profile(suite = %{configuration: %{profile_after: false}}, _printer), do: suite 56 | 57 | def profile( 58 | suite = %{ 59 | scenarios: scenarios, 60 | configuration: config = %{profile_after: true} 61 | }, 62 | printer 63 | ) do 64 | do_profile(scenarios, {@default_profiler, @default_profiler_opts}, config, printer) 65 | 66 | suite 67 | end 68 | 69 | def profile( 70 | suite = %{ 71 | scenarios: scenarios, 72 | configuration: config = %{profile_after: {profiler, profiler_opts}} 73 | }, 74 | printer 75 | ) do 76 | profiler_opts = Keyword.merge(@default_profiler_opts, profiler_opts) 77 | do_profile(scenarios, {profiler, profiler_opts}, config, printer) 78 | 79 | suite 80 | end 81 | 82 | def profile( 83 | suite = %{ 84 | scenarios: scenarios, 85 | configuration: config = %{profile_after: profiler} 86 | }, 87 | printer 88 | ) do 89 | do_profile(scenarios, {profiler, @default_profiler_opts}, config, printer) 90 | 91 | suite 92 | end 93 | 94 | defp do_profile(scenarios, {profiler, profiler_opts}, config, printer) do 95 | profiler_module = profiler_to_module(profiler) 96 | 97 | Enum.each(scenarios, fn scenario -> 98 | run(scenario, {profiler, profiler_module, profiler_opts}, config, printer) 99 | end) 100 | end 101 | 102 | defp run( 103 | scenario, 104 | {profiler, profiler_module, profiler_opts}, 105 | config, 106 | printer 107 | ) do 108 | printer.profiling(scenario.name, profiler) 109 | 110 | profiler_module.profile( 111 | fn -> 112 | Runner.run_once(scenario, %ScenarioContext{config: BenchmarkConfig.from(config)}) 113 | end, 114 | profiler_opts 115 | ) 116 | end 117 | 118 | # If given a builtin profiler the function will return its proper module. 119 | # In the case of an unknown profiler, it will raise an `UnknownProfilerError` exception. 120 | defp profiler_to_module(profiler) do 121 | if Enum.member?(@builtin_profilers, profiler) do 122 | profiler = 123 | profiler 124 | |> Atom.to_string() 125 | |> String.capitalize() 126 | 127 | Module.concat(Mix.Tasks.Profile, profiler) 128 | else 129 | raise Benchee.UnknownProfilerError, 130 | message: "Got an unknown '#{inspect(profiler)}' built-in profiler." 131 | end 132 | end 133 | end 134 | -------------------------------------------------------------------------------- /lib/benchee/relative_statistics.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.RelativeStatistics do 2 | @moduledoc """ 3 | Statistics that are relative from one scenario to another. 4 | 5 | Such as how much slower/faster something is or what the absolute difference is in the measured 6 | values. 7 | Is its own step because it has to be executed after scenarios have been loaded via 8 | `Benchee.ScenarioLoader` to include them in the calculation, while `Benchee.Statistics` 9 | has to happen before they are loaded to avoid recalculating their statistics. 10 | """ 11 | 12 | alias Benchee.{Scenario, Statistics, Suite} 13 | 14 | @doc """ 15 | Calculate the statistics of scenarios relative to each other and sorts scenarios. 16 | 17 | Such as `relative_more`, `relative_less` and `absolute_difference`, 18 | see `t:Benchee.Statistics.t/0` for more. 19 | 20 | The sorting of scenarios is important so that they always have the same order in 21 | all formatters. Scenarios are sorted first by run time average, then by memory average. 22 | """ 23 | @spec relative_statistics(Suite.t()) :: Suite.t() 24 | def relative_statistics(suite) do 25 | %Suite{suite | scenarios: calculate_relative_statistics(suite.scenarios)} 26 | end 27 | 28 | defp calculate_relative_statistics([]), do: [] 29 | 30 | defp calculate_relative_statistics(scenarios) do 31 | scenarios 32 | |> scenarios_by_input() 33 | |> Enum.flat_map(fn scenarios_with_same_input -> 34 | sorted_scenarios = sort(scenarios_with_same_input) 35 | {reference, others} = split_reference_scenario(sorted_scenarios) 36 | others_with_relative = statistics_relative_to(others, reference) 37 | [reference | others_with_relative] 38 | end) 39 | end 40 | 41 | @spec sort([Scenario.t()]) :: [Scenario.t()] 42 | defp sort(scenarios) do 43 | Enum.sort_by(scenarios, fn scenario -> 44 | {scenario.run_time_data.statistics.average, scenario.memory_usage_data.statistics.average, 45 | scenario.reductions_data.statistics.average} 46 | end) 47 | end 48 | 49 | # we can't just group_by `input_name` because that'd lose the order of inputs which might 50 | # be important 51 | # Thanks past Tobi, I was just about to do that. - Tobi, 5 years later 52 | defp scenarios_by_input(scenarios) do 53 | original_input_order = scenarios |> Enum.map(& &1.input_name) |> Enum.uniq() 54 | 55 | grouped_scenarios = Enum.group_by(scenarios, & &1.input_name) 56 | 57 | Enum.map(original_input_order, fn input_name -> 58 | Map.fetch!(grouped_scenarios, input_name) 59 | end) 60 | end 61 | 62 | # right now we take the first scenario as we sorted them and it is the fastest, 63 | # whenever we implement #179 though this becomes more involved 64 | defp split_reference_scenario(scenarios) do 65 | [reference | others] = scenarios 66 | {reference, others} 67 | end 68 | 69 | defp statistics_relative_to(scenarios, reference) do 70 | Enum.map(scenarios, fn scenario -> 71 | scenario 72 | |> update_in([Access.key!(:run_time_data), Access.key!(:statistics)], fn statistics -> 73 | add_relative_statistics(statistics, reference.run_time_data.statistics) 74 | end) 75 | |> update_in([Access.key!(:memory_usage_data), Access.key!(:statistics)], fn statistics -> 76 | add_relative_statistics(statistics, reference.memory_usage_data.statistics) 77 | end) 78 | |> update_in([Access.key!(:reductions_data), Access.key!(:statistics)], fn statistics -> 79 | add_relative_statistics(statistics, reference.reductions_data.statistics) 80 | end) 81 | end) 82 | end 83 | 84 | # we might not run time/memory --> we shouldn't crash then ;) 85 | defp add_relative_statistics(statistics = %{average: nil}, _reference), do: statistics 86 | defp add_relative_statistics(statistics, %{average: nil}), do: statistics 87 | 88 | defp add_relative_statistics(statistics, reference_statistics) do 89 | %Statistics{ 90 | statistics 91 | | relative_more: zero_safe_division(statistics.average, reference_statistics.average), 92 | relative_less: zero_safe_division(reference_statistics.average, statistics.average), 93 | absolute_difference: statistics.average - reference_statistics.average 94 | } 95 | end 96 | 97 | defp zero_safe_division(+0.0, +0.0), do: 1.0 98 | defp zero_safe_division(_, 0), do: :infinity 99 | defp zero_safe_division(_, +0.0), do: :infinity 100 | defp zero_safe_division(a, b), do: a / b 101 | end 102 | -------------------------------------------------------------------------------- /lib/benchee/scenario_loader.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.ScenarioLoader do 2 | @moduledoc """ 3 | Load scenarios that were saved using the saved option to be included. 4 | 5 | Usually this is done right before the formatters run (that's when it happens 6 | in `Benchee.run/2`) as all measurements and statistics should be there. 7 | However, if you want to recompute statistics or others you can load them at 8 | any time. Just be aware that if you load them before `Benchee.collect/1` then 9 | they'll be rerun and measurements overridden. 10 | """ 11 | 12 | alias Benchee.Suite 13 | 14 | @doc """ 15 | Load the file(s) specified as `load_path` and add the scenarios to the list of the 16 | current scenarios in the suite. 17 | """ 18 | def load(suite = %{configuration: %{load: load_path}, scenarios: scenarios}) do 19 | loaded = load_scenarios(load_path) 20 | %Suite{suite | scenarios: scenarios ++ loaded} 21 | end 22 | 23 | defp load_scenarios(false), do: [] 24 | defp load_scenarios(path) when is_binary(path), do: load_scenarios([path]) 25 | 26 | defp load_scenarios(paths) do 27 | Enum.flat_map(paths, fn path_or_glob -> 28 | Enum.flat_map(Path.wildcard(path_or_glob), &load_scenario/1) 29 | end) 30 | end 31 | 32 | defp load_scenario(path) do 33 | loaded_suite = 34 | path 35 | |> File.read!() 36 | |> :erlang.binary_to_term() 37 | 38 | loaded_suite.scenarios 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /lib/benchee/suite.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Suite do 2 | @moduledoc """ 3 | Main Benchee data structure that aggregates the results from every step. 4 | 5 | Different layers of the benchmarking rely on different data being present 6 | here. For instance for `Benchee.Statistics.statistics/1` to work the 7 | `run_time_data` key of each scenario needs to be filled with the samples 8 | collected by `Benchee.Benchmark.collect/1`. 9 | 10 | Formatters can then use the data to display all of the results and the 11 | configuration. 12 | """ 13 | defstruct [ 14 | :system, 15 | configuration: %Benchee.Configuration{}, 16 | scenarios: [] 17 | ] 18 | 19 | @typedoc """ 20 | Valid key for either input or benchmarking job names. 21 | """ 22 | @type key :: String.t() | atom 23 | 24 | @typedoc """ 25 | The main suite consisting of the configuration data, information about the system and most 26 | importantly a list of `t:Benchee.Scenario.t/0`. 27 | """ 28 | @type t :: %__MODULE__{ 29 | configuration: Benchee.Configuration.t() | nil, 30 | system: Benchee.System.t() | nil, 31 | scenarios: [] | [Benchee.Scenario.t()] 32 | } 33 | end 34 | 35 | defimpl DeepMerge.Resolver, for: Benchee.Suite do 36 | def resolve(original, override = %Benchee.Suite{}, resolver) do 37 | cleaned_override = 38 | override 39 | |> Map.from_struct() 40 | |> Enum.reject(fn {_key, value} -> is_nil(value) end) 41 | |> Map.new() 42 | 43 | Map.merge(original, cleaned_override, resolver) 44 | end 45 | 46 | def resolve(original, override, resolver) when is_map(override) do 47 | Map.merge(original, override, resolver) 48 | end 49 | end 50 | 51 | if Code.ensure_loaded?(Table.Reader) do 52 | defimpl Table.Reader, for: Benchee.Suite do 53 | alias Benchee.CollectionData 54 | alias Benchee.Scenario 55 | 56 | def init(suite) do 57 | measurements_processed = map_measurements_processed(suite) 58 | columns = get_columns_from_suite(suite, measurements_processed) 59 | {rows, count} = extract_rows_from_suite(suite, measurements_processed) 60 | 61 | {:rows, %{columns: columns, count: count}, rows} 62 | end 63 | 64 | defp map_measurements_processed(suite) do 65 | Enum.filter(Scenario.measurement_types(), fn type -> 66 | Enum.any?(suite.scenarios, fn scenario -> Scenario.data_processed?(scenario, type) end) 67 | end) 68 | end 69 | 70 | @run_time_fields [ 71 | "samples", 72 | "ips", 73 | "average", 74 | "std_dev", 75 | "median", 76 | "minimum", 77 | "maximum", 78 | "mode", 79 | "sample_size" 80 | ] 81 | 82 | @non_run_time_fields List.delete(@run_time_fields, "ips") 83 | 84 | defp get_columns_from_suite(suite, measurements_processed) do 85 | config_percentiles = suite.configuration.percentiles 86 | 87 | percentile_labels = 88 | Enum.map(config_percentiles, fn percentile -> 89 | "p_#{percentile}" 90 | end) 91 | 92 | measurement_headers = 93 | Enum.flat_map(measurements_processed, fn measurement_type -> 94 | fields = fields_for(measurement_type) ++ percentile_labels 95 | 96 | Enum.map(fields, fn field -> "#{measurement_type}_#{field}" end) 97 | end) 98 | 99 | ["job_name" | measurement_headers] 100 | end 101 | 102 | defp fields_for(:run_time), do: @run_time_fields 103 | defp fields_for(_), do: @non_run_time_fields 104 | 105 | defp extract_rows_from_suite(suite, measurements_processed) do 106 | config_percentiles = suite.configuration.percentiles 107 | 108 | Enum.map_reduce(suite.scenarios, 0, fn %Scenario{} = scenario, count -> 109 | secenario_data = 110 | Enum.flat_map(measurements_processed, fn measurement_type -> 111 | scenario 112 | |> Scenario.measurement_data(measurement_type) 113 | |> get_stats_from_collection_data(measurement_type, config_percentiles) 114 | end) 115 | 116 | row = [scenario.job_name | secenario_data] 117 | 118 | {row, count + 1} 119 | end) 120 | end 121 | 122 | defp get_stats_from_collection_data( 123 | %CollectionData{statistics: statistics, samples: samples}, 124 | measurement_type, 125 | percentiles 126 | ) do 127 | percentile_data = 128 | Enum.map(percentiles, fn percentile -> statistics.percentiles[percentile] end) 129 | 130 | Enum.concat([ 131 | [samples], 132 | maybe_ips(statistics, measurement_type), 133 | [ 134 | statistics.average, 135 | statistics.std_dev, 136 | statistics.median, 137 | statistics.minimum, 138 | statistics.maximum, 139 | statistics.mode, 140 | statistics.sample_size 141 | ], 142 | percentile_data 143 | ]) 144 | end 145 | 146 | defp maybe_ips(statistics, :run_time), do: [statistics.ips] 147 | defp maybe_ips(_, _not_run_time), do: [] 148 | end 149 | end 150 | -------------------------------------------------------------------------------- /lib/benchee/utility/deep_convert.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.DeepConvert do 2 | @moduledoc false 3 | 4 | @doc """ 5 | Converts a deep keyword list to the corresponding deep map. 6 | 7 | Exclusions can be provided for key names whose value should not be converted. 8 | 9 | ## Examples 10 | 11 | iex> to_map(a: 1, b: 2) 12 | %{a: 1, b: 2} 13 | 14 | iex> to_map(a: [b: 2], c: [d: 3, e: 4, e: 5]) 15 | %{a: %{b: 2}, c: %{d: 3, e: 5}} 16 | 17 | iex> to_map(a: [b: 2], c: [1, 2, 3], d: []) 18 | %{a: %{b: 2}, c: [1, 2, 3], d: []} 19 | 20 | iex> to_map(%{a: %{b: 2}, c: %{d: 3, e: 5}}) 21 | %{a: %{b: 2}, c: %{d: 3, e: 5}} 22 | 23 | iex> to_map([]) 24 | %{} 25 | 26 | iex> to_map([a: [b: [f: 5]]], [:a]) 27 | %{a: [b: [f: 5]]} 28 | 29 | iex> to_map([a: [b: [f: 5]], c: [d: 3]], [:b]) 30 | %{a: %{b: [f: 5]}, c: %{d: 3}} 31 | """ 32 | def to_map(structure, exclusions \\ []) 33 | def to_map([], _exclusions), do: %{} 34 | def to_map(structure, exclusions), do: do_to_map(structure, exclusions) 35 | 36 | defp do_to_map(kwlist = [{_key, _value} | _tail], exclusions) do 37 | kwlist 38 | |> Enum.map(fn tuple -> to_map_element(tuple, exclusions) end) 39 | |> Map.new() 40 | end 41 | 42 | defp do_to_map(no_list, _exclusions), do: no_list 43 | 44 | defp to_map_element({key, value}, exclusions) do 45 | if Enum.member?(exclusions, key) do 46 | {key, value} 47 | else 48 | {key, do_to_map(value, exclusions)} 49 | end 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /lib/benchee/utility/erlang_version.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.ErlangVersion do 2 | @moduledoc false 3 | 4 | # Internal module to deal with erlang version parsing oddity 5 | 6 | @doc """ 7 | Was the given version before the reference version? 8 | 9 | Used to check if a bugfix has already landed. 10 | 11 | Applies some manual massaging, as erlang likes to report versions number not compatible with 12 | SemVer. If we can't parse the version, to minimize false positives, we assume it's newer. 13 | 14 | Only the `version_to_check` is treated loosely. `version_to_check` must be SemVer compatible, 15 | as it is assumed to be provided by project maintainers. 16 | 17 | ## Examples 18 | 19 | iex> includes_fixes_from?("22.0.0", "22.0.0") 20 | true 21 | 22 | iex> includes_fixes_from?("22.0.1", "22.0.0") 23 | true 24 | 25 | iex> includes_fixes_from?("22.0.0", "22.0.1") 26 | false 27 | 28 | iex> includes_fixes_from?("22.0.4", "22.0.5") 29 | false 30 | 31 | iex> includes_fixes_from?("22.0.4", "22.0.4") 32 | true 33 | 34 | iex> includes_fixes_from?("22.0.5", "22.0.4") 35 | true 36 | 37 | iex> includes_fixes_from?("21.999.9999", "22.0.0") 38 | false 39 | 40 | iex> includes_fixes_from?("23.0.0", "22.0.0") 41 | true 42 | 43 | # weird longer version numbers work 44 | iex> includes_fixes_from?("22.0.0.0", "22.0.0") 45 | true 46 | 47 | iex> includes_fixes_from?("22.0.0.14", "22.0.0") 48 | true 49 | 50 | iex> includes_fixes_from?("23.3.5.14", "22.0.0") 51 | true 52 | 53 | iex> includes_fixes_from?("21.3.5.14", "22.0.0") 54 | false 55 | 56 | # weird shorter version numbers work 57 | iex> includes_fixes_from?("22.0", "22.0.0") 58 | true 59 | 60 | iex> includes_fixes_from?("22.0", "22.0.1") 61 | false 62 | 63 | iex> includes_fixes_from?("22.1", "22.0.0") 64 | true 65 | 66 | iex> includes_fixes_from?("21.3", "22.0.0") 67 | false 68 | 69 | # rc version numbers work 70 | iex> includes_fixes_from?("22.0-rc3", "22.0.0") 71 | false 72 | iex> includes_fixes_from?("23.0-rc0", "22.0.0") 73 | true 74 | 75 | # since we are falling back to general OTP versions now, test those as well 76 | iex> includes_fixes_from?("21", "22.0.0") 77 | false 78 | iex> includes_fixes_from?("22", "22.0.0") 79 | true 80 | iex> includes_fixes_from?("23.0", "22.0.0") 81 | true 82 | 83 | # completely broken versions are assumed to be good to avoid false positives 84 | # as this is not a main functionality but code to potentially work around an older erlang 85 | # bug. 86 | iex> includes_fixes_from?("super erlang", "22.0.0") 87 | true 88 | iex> includes_fixes_from?("", "22.0.0") 89 | true 90 | """ 91 | def includes_fixes_from?(version_to_check, reference_version) do 92 | erlang_version = parse_erlang_version(version_to_check) 93 | 94 | case erlang_version do 95 | {:ok, version} -> Version.compare(version, reference_version) != :lt 96 | # we do not know which version this is, so don't trust it? 97 | _ -> true 98 | end 99 | end 100 | 101 | # `Version` only supports full SemVer, Erlang loves version numbers like `22.3.4.24` or `22.0` 102 | # which makes `Version` error out so we gotta manually alter them so that it's `22.3.4` 103 | defp parse_erlang_version(erlang_version) do 104 | last_version_segment = ~r/\.\d+$/ 105 | 106 | # dot count is a heuristic but it should work 107 | dot_count = 108 | erlang_version 109 | |> String.graphemes() 110 | |> Enum.count(&(&1 == ".")) 111 | 112 | version = 113 | case dot_count do 114 | 3 -> Regex.replace(last_version_segment, erlang_version, "") 115 | 1 -> deal_with_major_minor(erlang_version) 116 | 0 -> "#{erlang_version}.0.0" 117 | _ -> erlang_version 118 | end 119 | 120 | Version.parse(version) 121 | end 122 | 123 | # Only major/minor seem to get the rc treatment 124 | # but if it is major/minor/patch `Version` handles it correctly. 125 | # For the 4 digit versions we don't really care right now/normally does not happen. 126 | defp deal_with_major_minor(erlang_version) do 127 | # -rc and other weird versions contain - 128 | if String.contains?(erlang_version, "-") do 129 | String.replace(erlang_version, "-", ".0-") 130 | else 131 | "#{erlang_version}.0" 132 | end 133 | end 134 | end 135 | -------------------------------------------------------------------------------- /lib/benchee/utility/file_creation.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.FileCreation do 2 | @moduledoc """ 3 | Methods to easily handle file creation used in plugins. 4 | """ 5 | 6 | alias Benchee.Benchmark 7 | 8 | @doc """ 9 | Open a file for write for all key/value pairs, interleaves the file name with 10 | the key and calls the given function with file, content and filename. 11 | 12 | Uses `interleave/2` to get the base filename and 13 | the given keys together to one nice file name, then creates these files and 14 | calls the function with the file and the content from the given map so that 15 | data can be written to the file. 16 | 17 | If a directory is specified, it creates the directory. 18 | 19 | Expects: 20 | 21 | * names_to_content - a map from input name to contents that should go into 22 | the corresponding file 23 | * filename - the base file name as desired by the user 24 | * function - a function that is then called for every file with the associated 25 | file content from the map, defaults to just writing the file content via 26 | `IO.write/2` and printing where it put the file. 27 | 28 | ## Examples 29 | 30 | # Just writes the contents to a file 31 | Benchee.Utility.FileCreation.each(%{"My Input" => "_awesome html content_"}, 32 | "my.html", 33 | fn(file, content) -> IO.write(file, content) end) 34 | """ 35 | @spec each(%{(String.t() | list(String.t())) => String.t()}, String.t(), fun()) :: :ok 36 | def each(names_to_content, filename, function \\ &default_each/3) do 37 | ensure_directory_exists(filename) 38 | 39 | Enum.each(names_to_content, fn {input_name, content} -> 40 | input_filename = interleave(filename, input_name) 41 | 42 | File.open!(input_filename, [:write, :utf8], fn file -> 43 | function.(file, content, input_filename) 44 | end) 45 | end) 46 | end 47 | 48 | defp default_each(file, content, input_filename) do 49 | :ok = IO.write(file, content) 50 | IO.puts("Generated #{input_filename}") 51 | end 52 | 53 | @doc """ 54 | Make sure the directory for the given file name exists. 55 | """ 56 | def ensure_directory_exists(filename) do 57 | directory = Path.dirname(filename) 58 | File.mkdir_p!(directory) 59 | end 60 | 61 | @doc """ 62 | Gets file name/path, the input name and others together. 63 | 64 | Takes a list of values to interleave or just a single value. 65 | Handles the special no_input key to do no work at all. 66 | 67 | ## Examples 68 | 69 | iex> interleave("abc.csv", "hello") 70 | "abc_hello.csv" 71 | 72 | iex> interleave("abc.csv", "Big Input") 73 | "abc_big_input.csv" 74 | 75 | iex> interleave("abc.csv", "String.length/1") 76 | "abc_string_length_1.csv" 77 | 78 | iex> interleave("bench/abc.csv", "Big Input") 79 | "bench/abc_big_input.csv" 80 | 81 | iex> interleave( 82 | ...> "bench/abc.csv", 83 | ...> ["Big Input"] 84 | ...> ) 85 | "bench/abc_big_input.csv" 86 | 87 | iex> interleave("abc.csv", []) 88 | "abc.csv" 89 | 90 | iex> interleave( 91 | ...> "bench/abc.csv", 92 | ...> ["Big Input", "Comparison"] 93 | ...> ) 94 | "bench/abc_big_input_comparison.csv" 95 | 96 | iex> interleave( 97 | ...> "bench/A B C.csv", 98 | ...> ["Big Input", "Comparison"] 99 | ...> ) 100 | "bench/A B C_big_input_comparison.csv" 101 | 102 | iex> interleave( 103 | ...> "bench/abc.csv", 104 | ...> ["Big Input", "Comparison", "great Stuff"] 105 | ...> ) 106 | "bench/abc_big_input_comparison_great_stuff.csv" 107 | 108 | iex> marker = Benchee.Benchmark.no_input() 109 | ...> interleave("abc.csv", marker) 110 | "abc.csv" 111 | iex> interleave("abc.csv", [marker]) 112 | "abc.csv" 113 | iex> interleave( 114 | ...> "abc.csv", 115 | ...> [marker, "Comparison"] 116 | ...> ) 117 | "abc_comparison.csv" 118 | iex> interleave( 119 | ...> "abc.csv", 120 | ...> ["Something cool", marker, "Comparison"] 121 | ...> ) 122 | "abc_something_cool_comparison.csv" 123 | """ 124 | @spec interleave(String.t(), String.t() | list(String.t())) :: String.t() 125 | def interleave(filename, names) when is_list(names) do 126 | file_names = 127 | names 128 | |> Enum.map(&to_filename/1) 129 | |> prepend(Path.rootname(filename)) 130 | |> Enum.reject(fn string -> String.length(string) < 1 end) 131 | |> Enum.join("_") 132 | 133 | file_names <> Path.extname(filename) 134 | end 135 | 136 | def interleave(filename, name) do 137 | interleave(filename, [name]) 138 | end 139 | 140 | defp prepend(list, item) do 141 | [item | list] 142 | end 143 | 144 | defp to_filename(name_string) do 145 | no_input = Benchmark.no_input() 146 | 147 | case name_string do 148 | ^no_input -> 149 | "" 150 | 151 | _ -> 152 | String.downcase(String.replace(name_string, ~r/[^0-9A-Z]/i, "_")) 153 | end 154 | end 155 | end 156 | -------------------------------------------------------------------------------- /lib/benchee/utility/parallel.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.Parallel do 2 | @moduledoc false 3 | 4 | @doc """ 5 | A utility function for mapping over an enumerable collection in parallel. 6 | 7 | Take note that this spawns a process for every element in the collection 8 | which is only advisable if the function does some heavy lifting. 9 | """ 10 | @spec map(Enum.t(), fun) :: list 11 | def map(collection, func) do 12 | collection 13 | |> Enum.map(fn element -> Task.async(fn -> func.(element) end) end) 14 | |> Enum.map(&Task.await(&1, :infinity)) 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /lib/benchee/utility/repeat_n.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.RepeatN do 2 | @moduledoc false 3 | 4 | @doc """ 5 | Calls the given function n times. 6 | """ 7 | def repeat_n(_function, 0) do 8 | # noop 9 | end 10 | 11 | def repeat_n(function, 1) do 12 | function.() 13 | end 14 | 15 | def repeat_n(function, count) do 16 | function.() 17 | repeat_n(function, count - 1) 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Mixfile do 2 | use Mix.Project 3 | 4 | @source_url "https://github.com/bencheeorg/benchee" 5 | @version "1.4.0" 6 | 7 | def project do 8 | [ 9 | app: :benchee, 10 | version: @version, 11 | elixir: "~> 1.6", 12 | elixirc_paths: elixirc_paths(Mix.env()), 13 | consolidate_protocols: true, 14 | build_embedded: Mix.env() == :prod, 15 | start_permanent: Mix.env() == :prod, 16 | deps: deps(), 17 | docs: docs(), 18 | package: package(), 19 | test_coverage: [tool: ExCoveralls], 20 | preferred_cli_env: [ 21 | coveralls: :test, 22 | "coveralls.detail": :test, 23 | "coveralls.post": :test, 24 | "coveralls.html": :test, 25 | "coveralls.travis": :test, 26 | "safe_coveralls.travis": :test 27 | ], 28 | dialyzer: [ 29 | flags: [:underspecs], 30 | plt_file: {:no_warn, "tools/plts/benchee.plt"}, 31 | plt_add_apps: [:table] 32 | ], 33 | name: "Benchee", 34 | description: """ 35 | Versatile (micro) benchmarking that is extensible. Get statistics such as: 36 | average, iterations per second, standard deviation and the median. 37 | """ 38 | ] 39 | end 40 | 41 | defp elixirc_paths(:test), do: ["lib", "test/support", "mix"] 42 | defp elixirc_paths(_), do: ["lib"] 43 | 44 | defp deps do 45 | deps = [ 46 | {:deep_merge, "~> 1.0"}, 47 | {:statistex, "~> 1.0"}, 48 | {:ex_guard, "~> 1.3", only: :dev}, 49 | {:credo, "~> 1.7.7-rc.0", only: :dev, runtime: false}, 50 | {:ex_doc, ">= 0.0.0", only: :dev, runtime: false}, 51 | {:excoveralls, "~> 0.13", only: :test}, 52 | {:dialyxir, "~> 1.0", only: :dev, runtime: false}, 53 | {:doctest_formatter, "~> 0.2", only: :dev, runtime: false} 54 | ] 55 | 56 | # table relies on __STACKTRACE__ which was introduced in 1.7, we still support ~>1.6 though 57 | # as it's optional, this does not affect the function of Benchee 58 | if Version.compare(System.version(), "1.7.0") == :gt do 59 | [{:table, "~> 0.1.0", optional: true} | deps] 60 | else 61 | deps 62 | end 63 | end 64 | 65 | defp package do 66 | [ 67 | maintainers: ["Tobias Pfeiffer", "Devon Estes"], 68 | licenses: ["MIT"], 69 | links: %{ 70 | "Changelog" => "https://hexdocs.pm/benchee/changelog.html", 71 | "GitHub" => @source_url, 72 | "Blog posts" => "https://pragtob.wordpress.com/tag/benchee/" 73 | } 74 | ] 75 | end 76 | 77 | defp docs do 78 | [ 79 | extras: [ 80 | "CHANGELOG.md": [], 81 | "LICENSE.md": [title: "License"], 82 | "README.md": [title: "Readme"] 83 | ], 84 | main: "readme", 85 | source_url: @source_url, 86 | source_ref: @version, 87 | api_reference: false, 88 | skip_undefined_reference_warnings_on: ["CHANGELOG.md"] 89 | ] 90 | end 91 | end 92 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, 3 | "credo": {:hex, :credo, "1.7.12", "9e3c20463de4b5f3f23721527fcaf16722ec815e70ff6c60b86412c695d426c1", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8493d45c656c5427d9c729235b99d498bd133421f3e0a683e5c1b561471291e5"}, 4 | "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, 5 | "dialyxir": {:hex, :dialyxir, "1.4.5", "ca1571ac18e0f88d4ab245f0b60fa31ff1b12cbae2b11bd25d207f865e8ae78a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "b0fb08bb8107c750db5c0b324fa2df5ceaa0f9307690ee3c1f6ba5b9eb5d35c3"}, 6 | "doctest_formatter": {:hex, :doctest_formatter, "0.3.1", "a3fd87c1f75e8a78e7737ec4a4494800ddda705998a59320b87fe4c59c030794", [:mix], [], "hexpm", "3c092540d8b73ffc526a92daa2dc2ecd50714f14325eeacbc7b4e790f890443a"}, 7 | "earmark_parser": {:hex, :earmark_parser, "1.4.44", "f20830dd6b5c77afe2b063777ddbbff09f9759396500cdbe7523efd58d7a339c", [:mix], [], "hexpm", "4778ac752b4701a5599215f7030989c989ffdc4f6df457c5f36938cc2d2a2750"}, 8 | "erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"}, 9 | "ex_doc": {:hex, :ex_doc, "0.37.3", "f7816881a443cd77872b7d6118e8a55f547f49903aef8747dbcb345a75b462f9", [:mix], [{:earmark_parser, "~> 1.4.42", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "e6aebca7156e7c29b5da4daa17f6361205b2ae5f26e5c7d8ca0d3f7e18972233"}, 10 | "ex_guard": {:hex, :ex_guard, "1.6.1", "adcb30f379c8048b2cb1474052753adecfe9635b9db3bac1a4a4458c5826f735", [:mix], [{:fs, "~> 8.6.1", [hex: :fs, repo: "hexpm", optional: false]}], "hexpm", "14a4d261c9ce951f9ec2487730afc1a008c0f70c95b7fcc2f446b9d7a5346718"}, 11 | "excoveralls": {:hex, :excoveralls, "0.18.5", "e229d0a65982613332ec30f07940038fe451a2e5b29bce2a5022165f0c9b157e", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "523fe8a15603f86d64852aab2abe8ddbd78e68579c8525ae765facc5eae01562"}, 12 | "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"}, 13 | "fs": {:hex, :fs, "8.6.1", "7c9c0d0211e8c520e4e9eda63b960605c2711839f47285e6166c332d973be8ea", [:rebar3], [], "hexpm", "61ea2bdaedae4e2024d0d25c63e44dccf65622d4402db4a2df12868d1546503f"}, 14 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 15 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, 16 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.1", "e928a4f984e795e41e3abd27bfc09f51db16ab8ba1aebdba2b3a575437efafc2", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "7284900d412a3e5cfd97fdaed4f5ed389b8f2b4cb49efc0eb3bd10e2febf9507"}, 17 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.2", "03e1804074b3aa64d5fad7aa64601ed0fb395337b982d9bcf04029d68d51b6a7", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "af33ff7ef368d5893e4a267933e7744e46ce3cf1f61e2dccf53a111ed3aa3727"}, 18 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.2", "8efba0122db06df95bfaa78f791344a89352ba04baedd3849593bfce4d0dc1c6", [:mix], [], "hexpm", "4b21398942dda052b403bbe1da991ccd03a053668d147d53fb8c4e0efe09c973"}, 19 | "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, 20 | "table": {:hex, :table, "0.1.2", "87ad1125f5b70c5dea0307aa633194083eb5182ec537efc94e96af08937e14a8", [:mix], [], "hexpm", "7e99bc7efef806315c7e65640724bf165c3061cdc5d854060f74468367065029"}, 21 | } 22 | -------------------------------------------------------------------------------- /run_samples.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -e 4 | 5 | for sample in samples/* 6 | do 7 | echo "running $sample" 8 | echo "" 9 | mix run "$sample" 10 | echo "" 11 | echo "------------------------------------------" 12 | echo "" 13 | done 14 | -------------------------------------------------------------------------------- /samples/deactivate_output.exs: -------------------------------------------------------------------------------- 1 | # Deactivate the fast warnings if they annoy you 2 | # You can also deactivate the comparison report 3 | Benchee.run( 4 | %{ 5 | "something" => fn -> Enum.map([1, 2, 3], fn i -> i * i end) end 6 | }, 7 | time: 2, 8 | warmup: 1, 9 | print: [ 10 | benchmarking: false, 11 | configuration: false, 12 | fast_warning: false 13 | ], 14 | formatters: [{Benchee.Formatters.Console, comparison: false}] 15 | ) 16 | 17 | # Name ips average deviation median 99th % 18 | # something 6.62 M 151.08 ns ±11000.02% 100 ns 253 ns 19 | -------------------------------------------------------------------------------- /samples/descending_sort.exs: -------------------------------------------------------------------------------- 1 | list = 1..10_000 |> Enum.to_list() |> Enum.shuffle() 2 | 3 | Benchee.run(%{ 4 | "sort |> reverse" => fn -> list |> Enum.sort() |> Enum.reverse() end, 5 | "sort(fun)" => fn -> Enum.sort(list, &(&1 > &2)) end, 6 | "sort_by(-value)" => fn -> Enum.sort_by(list, fn val -> -val end) end 7 | }) 8 | 9 | # Operating System: Linux 10 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 11 | # Number of Available Cores: 8 12 | # Available memory: 15.61 GB 13 | # Elixir 1.8.1 14 | # Erlang 21.2.7 15 | 16 | # Benchmark suite executing with the following configuration: 17 | # warmup: 2 s 18 | # time: 5 s 19 | # memory time: 0 ns 20 | # parallel: 1 21 | # inputs: none specified 22 | # Estimated total run time: 21 s 23 | 24 | # Benchmarking sort |> reverse... 25 | # Benchmarking sort(fun)... 26 | # Benchmarking sort_by(-value)... 27 | 28 | # Name ips average deviation median 99th % 29 | # sort |> reverse 719.44 1.39 ms ±8.83% 1.35 ms 1.94 ms 30 | # sort(fun) 322.91 3.10 ms ±6.55% 3.06 ms 4.21 ms 31 | # sort_by(-value) 184.07 5.43 ms ±6.81% 5.34 ms 6.49 ms 32 | 33 | # Comparison: 34 | # sort |> reverse 719.44 35 | # sort(fun) 322.91 - 2.23x slower +1.71 ms 36 | # sort_by(-value) 184.07 - 3.91x slower +4.04 ms 37 | -------------------------------------------------------------------------------- /samples/fast.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run( 5 | %{ 6 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 7 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 8 | }, 9 | warmup: 0.1, 10 | time: 0.3, 11 | memory_time: 0.3 12 | ) 13 | 14 | # Operating System: Linux 15 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 16 | # Number of Available Cores: 8 17 | # Available memory: 15.61 GB 18 | # Elixir 1.8.1 19 | # Erlang 21.3.2 20 | 21 | # Benchmark suite executing with the following configuration: 22 | # warmup: 100 ms 23 | # time: 300 ms 24 | # memory time: 300 ms 25 | # parallel: 1 26 | # inputs: none specified 27 | # Estimated total run time: 1.40 s 28 | 29 | # Benchmarking flat_map... 30 | # Benchmarking map.flatten... 31 | 32 | # Name ips average deviation median 99th % 33 | # flat_map 1.95 K 514.08 μs ±29.68% 475.22 μs 754.17 μs 34 | # map.flatten 1.22 K 819.85 μs ±21.26% 769.04 μs 1469.22 μs 35 | 36 | # Comparison: 37 | # flat_map 1.95 K 38 | # map.flatten 1.22 K - 1.59x slower +305.77 μs 39 | 40 | # Memory usage statistics: 41 | 42 | # Name Memory usage 43 | # flat_map 624.97 KB 44 | # map.flatten 781.25 KB - 1.25x memory usage +156.28 KB 45 | 46 | # **All measurements for memory usage were the same** 47 | -------------------------------------------------------------------------------- /samples/fast_functions.exs: -------------------------------------------------------------------------------- 1 | # This benchmark is here to showcase behaviour with too fast functions. 2 | # You can see a lot of it reads _(wrong)_ as the compiler optimizes these cases to return 3 | # constants and thereby doesn't benchmark what you think it does. 4 | 5 | range = 1..10 6 | integer1 = :rand.uniform(100) 7 | integer2 = :rand.uniform(100) 8 | 9 | Benchee.run( 10 | %{ 11 | "Integer addition (wrong)" => fn -> 1 + 1 end, 12 | "Integer addition" => fn -> integer1 + integer2 end, 13 | "String concatention (wrong)" => fn -> "1" <> "1" end, 14 | "adding a head to an array (wrong)" => fn -> [1 | [1]] end, 15 | "++ array concat (wrong)" => fn -> [1] ++ [1] end, 16 | "noop" => fn -> 0 end, 17 | "Enum.map(10)" => fn -> Enum.map(range, fn i -> i end) end 18 | }, 19 | time: 1, 20 | warmup: 1, 21 | memory_time: 1, 22 | formatters: [{Benchee.Formatters.Console, extended_statistics: true}] 23 | ) 24 | 25 | # See how the median of almost all options is 0 or 1 because they essentially do the same thing. 26 | # Randomizing values prevents these optimizations but is still very fast (see the high standard 27 | # deviation) 28 | # 29 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 30 | # Number of Available Cores: 8 31 | # Available memory: 15.61 GB 32 | # Elixir 1.8.1 33 | # Erlang 21.2.7 34 | 35 | # Benchmark suite executing with the following configuration: 36 | # warmup: 1 s 37 | # time: 1 s 38 | # memory time: 1 s 39 | # parallel: 1 40 | # inputs: none specified 41 | # Estimated total run time: 21 s 42 | 43 | # Benchmarking ++ array concat (wrong)... 44 | # Benchmarking Enum.map(10)... 45 | # Benchmarking Integer addition... 46 | # Benchmarking Integer addition (wrong)... 47 | # Benchmarking String concatention (wrong)... 48 | # Benchmarking adding a head to an array (wrong)... 49 | # Benchmarking noop... 50 | 51 | # Name ips average deviation median 99th % 52 | # String concatention (wrong) 1008.75 M 0.99 ns ±3006.13% 0 ns 23 ns 53 | # ++ array concat (wrong) 715.26 M 1.40 ns ±1900.44% 0 ns 28 ns 54 | # adding a head to an array (wrong) 681.71 M 1.47 ns ±1760.70% 0 ns 34 ns 55 | # noop 598.00 M 1.67 ns ±7354.09% 0 ns 22 ns 56 | # Integer addition (wrong) 560.71 M 1.78 ns ±6908.19% 0 ns 28 ns 57 | # Integer addition 361.27 M 2.77 ns ±1187.75% 0 ns 43 ns 58 | # Enum.map(10) 2.23 M 448.05 ns ±3255.01% 351 ns 760 ns 59 | 60 | # Comparison: 61 | # String concatention (wrong) 1008.75 M 62 | # ++ array concat (wrong) 715.26 M - 1.41x slower +0.41 ns 63 | # adding a head to an array (wrong) 681.71 M - 1.48x slower +0.48 ns 64 | # noop 598.00 M - 1.69x slower +0.68 ns 65 | # Integer addition (wrong) 560.71 M - 1.80x slower +0.79 ns 66 | # Integer addition 361.27 M - 2.79x slower +1.78 ns 67 | # Enum.map(10) 2.23 M - 451.97x slower +447.06 ns 68 | 69 | # Extended statistics: 70 | 71 | # Name minimum maximum sample size mode 72 | # String concatention (wrong) 0 ns 9236 ns 1.55 M 0 ns 73 | # ++ array concat (wrong) 0 ns 9246 ns 1.55 M 0 ns 74 | # adding a head to an array (wrong) 0 ns 9019 ns 1.55 M 0 ns 75 | # noop 0 ns 62524 ns 1.55 M 0 ns 76 | # Integer addition (wrong) 0 ns 67609 ns 1.55 M 0 ns 77 | # Integer addition 0 ns 9297 ns 1.55 M 0 ns 78 | # Enum.map(10) 330 ns 9091442 ns 942.59 K 348 ns 79 | 80 | # Memory usage statistics: 81 | 82 | # Name Memory usage 83 | # String concatention (wrong) 0 B 84 | # ++ array concat (wrong) 0 B - 1.00x memory usage +0 B 85 | # adding a head to an array (wrong) 0 B - 1.00x memory usage +0 B 86 | # noop 0 B - 1.00x memory usage +0 B 87 | # Integer addition (wrong) 0 B - 1.00x memory usage +0 B 88 | # Integer addition 0 B - 1.00x memory usage +0 B 89 | # Enum.map(10) 424 B - ∞ x memory usage +424 B 90 | 91 | # **All measurements for memory usage were the same** 92 | -------------------------------------------------------------------------------- /samples/formatters.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | format_fun = fn %{scenarios: scenarios} -> 5 | IO.puts("") 6 | 7 | Enum.each(scenarios, fn scenario -> 8 | sample_size = scenario.run_time_data.statistics.sample_size 9 | IO.puts("Benchee recorded #{sample_size} run times for #{scenario.job_name}!") 10 | end) 11 | end 12 | 13 | Benchee.run( 14 | %{ 15 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 16 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 17 | }, 18 | formatters: [ 19 | format_fun, 20 | Benchee.Formatters.Console, 21 | {Benchee.Formatters.Console, extended_statistics: true} 22 | ] 23 | ) 24 | 25 | # Operating System: Linux 26 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 27 | # Number of Available Cores: 8 28 | # Available memory: 15.61 GB 29 | # Elixir 1.8.1 30 | # Erlang 21.3.2 31 | 32 | # Benchmark suite executing with the following configuration: 33 | # warmup: 2 s 34 | # time: 5 s 35 | # memory time: 0 ns 36 | # parallel: 1 37 | # inputs: none specified 38 | # Estimated total run time: 14 s 39 | 40 | # Benchmarking flat_map... 41 | # Benchmarking map.flatten... 42 | 43 | # Name ips average deviation median 99th % 44 | # flat_map 2.41 K 415.64 μs ±12.00% 406.27 μs 714.89 μs 45 | # map.flatten 1.28 K 781.62 μs ±18.60% 743.39 μs 1166.22 μs 46 | 47 | # Comparison: 48 | # flat_map 2.41 K 49 | # map.flatten 1.28 K - 1.88x slower +365.98 μs 50 | 51 | # Name ips average deviation median 99th % 52 | # flat_map 2.41 K 415.64 μs ±12.00% 406.27 μs 714.89 μs 53 | # map.flatten 1.28 K 781.62 μs ±18.60% 743.39 μs 1166.22 μs 54 | 55 | # Comparison: 56 | # flat_map 2.41 K 57 | # map.flatten 1.28 K - 1.88x slower +365.98 μs 58 | 59 | # Extended statistics: 60 | 61 | # Name minimum maximum sample size mode 62 | # flat_map 345.15 μs 1169.01 μs 12.00 K406.39 μs, 405.98 μs, 406 63 | # map.flatten 492.45 μs 1780.14 μs 6.38 K 741.75 μs 64 | 65 | # Benchee recorded 12001 run times for flat_map! 66 | # Benchee recorded 6385 run times for map.flatten! 67 | -------------------------------------------------------------------------------- /samples/macro_benchmark.exs: -------------------------------------------------------------------------------- 1 | # Intentionally not a real fibonacci to make it both slower and more memory hungry 2 | defmodule Fib do 3 | def fib(0), do: 0 4 | def fib(1), do: 1 5 | def fib(n), do: "#{fib(n - 1)} #{fib(n - 2)}" 6 | end 7 | 8 | Benchee.run( 9 | %{ 10 | "35 fibonacci numbers" => fn -> Fib.fib(35) end, 11 | "43 fibonacci numbers" => fn -> Fib.fib(43) end 12 | }, 13 | time: 10, 14 | warmup: 0, 15 | memory_time: 10 16 | ) 17 | 18 | # Operating System: Linux 19 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 20 | # Number of Available Cores: 8 21 | # Available memory: 15.61 GB 22 | # Elixir 1.8.1 23 | # Erlang 21.3.2 24 | 25 | # Benchmark suite executing with the following configuration: 26 | # warmup: 0 ns 27 | # time: 10 s 28 | # memory time: 10 s 29 | # parallel: 1 30 | # inputs: none specified 31 | # Estimated total run time: 40 s 32 | 33 | # Benchmarking 35 fibonacci numbers... 34 | # Benchmarking 43 fibonacci numbers... 35 | 36 | # Name ips average deviation median 99th % 37 | # 35 fibonacci numbers 0.32 0.0525 min ±1.49% 0.0525 min 0.0534 min 38 | # 43 fibonacci numbers 0.00674 2.47 min ±0.00% 2.47 min 2.47 min 39 | 40 | # Comparison: 41 | # 35 fibonacci numbers 0.32 42 | # 43 fibonacci numbers 0.00674 - 47.06x slower +2.42 min 43 | 44 | # Memory usage statistics: 45 | 46 | # Name Memory usage 47 | # 35 fibonacci numbers 1.14 GB 48 | # 43 fibonacci numbers 53.78 GB - 46.98x memory usage +52.64 GB 49 | 50 | # **All measurements for memory usage were the same** 51 | -------------------------------------------------------------------------------- /samples/measure_memory.exs: -------------------------------------------------------------------------------- 1 | map_fun = fn i -> [i, i * i] end 2 | 3 | Benchee.run( 4 | %{ 5 | "flat_map" => fn input -> Enum.flat_map(input, map_fun) end, 6 | "map.flatten" => fn input -> input |> Enum.map(map_fun) |> List.flatten() end 7 | }, 8 | inputs: %{ 9 | "Small" => Enum.to_list(1..1000), 10 | "Bigger" => Enum.to_list(1..100_000) 11 | }, 12 | time: 0, 13 | warmup: 0, 14 | memory_time: 2 15 | ) 16 | 17 | # Operating System: Linux 18 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 19 | # Number of Available Cores: 8 20 | # Available memory: 15.61 GB 21 | # Elixir 1.8.1 22 | # Erlang 21.3.2 23 | 24 | # Benchmark suite executing with the following configuration: 25 | # warmup: 0 ns 26 | # time: 0 ns 27 | # memory time: 2 s 28 | # parallel: 1 29 | # inputs: Bigger, Small 30 | # Estimated total run time: 8 s 31 | 32 | # Benchmarking flat_map with input Bigger... 33 | # Benchmarking flat_map with input Small... 34 | # Benchmarking map.flatten with input Bigger... 35 | # Benchmarking map.flatten with input Small... 36 | 37 | # ##### With input Bigger ##### 38 | # Memory usage statistics: 39 | 40 | # Name Memory usage 41 | # flat_map 6.10 MB 42 | # map.flatten 7.63 MB - 1.25x memory usage +1.53 MB 43 | 44 | # **All measurements for memory usage were the same** 45 | 46 | # ##### With input Small ##### 47 | # Memory usage statistics: 48 | 49 | # Name Memory usage 50 | # flat_map 62.47 KB 51 | # map.flatten 78.13 KB - 1.25x memory usage +15.66 KB 52 | 53 | # **All measurements for memory usage were the same** 54 | -------------------------------------------------------------------------------- /samples/measure_reductions.exs: -------------------------------------------------------------------------------- 1 | map_fun = fn i -> [i, i * i] end 2 | 3 | Benchee.run( 4 | %{ 5 | "flat_map" => fn input -> 6 | # We need randomness here so we have differing reduction sizes. Otherwise, 7 | # we only get the output when all measurements are the same, which isn't 8 | # very helpful for testing. 9 | if rem(Enum.random(1..5), 2) == 0 do 10 | _ = Enum.random(1..10) + Enum.random(1..10) 11 | Enum.flat_map(input, map_fun) 12 | else 13 | Enum.flat_map(input, map_fun) 14 | end 15 | end, 16 | "map.flatten" => fn input -> 17 | if rem(Enum.random(1..5), 2) == 0 do 18 | _ = Enum.random(1..10) + Enum.random(1..10) 19 | input |> Enum.map(map_fun) |> List.flatten() 20 | else 21 | input |> Enum.map(map_fun) |> List.flatten() 22 | end 23 | end 24 | }, 25 | inputs: %{ 26 | "Small" => Enum.to_list(1..10), 27 | "Bigger" => Enum.to_list(1..100) 28 | }, 29 | time: 0.1, 30 | warmup: 0.1, 31 | reduction_time: 0.1 32 | ) 33 | -------------------------------------------------------------------------------- /samples/memory_changing.exs: -------------------------------------------------------------------------------- 1 | # random by design so that we get some memory statistics 2 | Benchee.run( 3 | %{ 4 | "Enum.to_list" => fn range -> Enum.to_list(range) end, 5 | "Enum.into" => fn range -> Enum.into(range, []) end 6 | }, 7 | # formatters: [{Benchee.Formatters.Console, extended_statistics: true}], 8 | before_each: fn _ -> 0..(:rand.uniform(1_000) + 1000) end, 9 | warmup: 0.1, 10 | time: 0.1, 11 | memory_time: 1 12 | ) 13 | -------------------------------------------------------------------------------- /samples/multiple_inputs.exs: -------------------------------------------------------------------------------- 1 | map_fun = fn i -> [i, i * i] end 2 | 3 | Benchee.run( 4 | %{ 5 | "flat_map" => fn input -> Enum.flat_map(input, map_fun) end, 6 | "map.flatten" => fn input -> input |> Enum.map(map_fun) |> List.flatten() end 7 | }, 8 | inputs: %{ 9 | "Small" => Enum.to_list(1..1_000), 10 | "Medium" => Enum.to_list(1..10_000), 11 | "Bigger" => Enum.to_list(1..100_000) 12 | } 13 | ) 14 | 15 | # Operating System: Linux 16 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 17 | # Number of Available Cores: 8 18 | # Available memory: 15.61 GB 19 | # Elixir 1.8.1 20 | # Erlang 21.3.2 21 | 22 | # Benchmark suite executing with the following configuration: 23 | # warmup: 2 s 24 | # time: 5 s 25 | # memory time: 0 ns 26 | # parallel: 1 27 | # inputs: Bigger, Medium, Small 28 | # Estimated total run time: 42 s 29 | 30 | # Benchmarking flat_map with input Bigger... 31 | # Benchmarking flat_map with input Medium... 32 | # Benchmarking flat_map with input Small... 33 | # Benchmarking map.flatten with input Bigger... 34 | # Benchmarking map.flatten with input Medium... 35 | # Benchmarking map.flatten with input Small... 36 | 37 | # ##### With input Bigger ##### 38 | # Name ips average deviation median 99th % 39 | # flat_map 148.45 6.74 ms ±16.22% 6.47 ms 10.14 ms 40 | # map.flatten 111.47 8.97 ms ±19.02% 8.61 ms 14.02 ms 41 | 42 | # Comparison: 43 | # flat_map 148.45 44 | # map.flatten 111.47 - 1.33x slower +2.23 ms 45 | 46 | # ##### With input Medium ##### 47 | # Name ips average deviation median 99th % 48 | # flat_map 2.34 K 426.50 μs ±17.11% 405.51 μs 817.00 μs 49 | # map.flatten 1.79 K 558.80 μs ±19.87% 523.28 μs 1064.58 μs 50 | 51 | # Comparison: 52 | # flat_map 2.34 K 53 | # map.flatten 1.79 K - 1.31x slower +132.31 μs 54 | 55 | # ##### With input Small ##### 56 | # Name ips average deviation median 99th % 57 | # flat_map 24.42 K 40.95 μs ±31.34% 34.88 μs 77.32 μs 58 | # map.flatten 18.06 K 55.36 μs ±26.46% 49.52 μs 105.45 μs 59 | 60 | # Comparison: 61 | # flat_map 24.42 K 62 | # map.flatten 18.06 K - 1.35x slower +14.41 μs 63 | -------------------------------------------------------------------------------- /samples/parallel_process.exs: -------------------------------------------------------------------------------- 1 | # When passing a flag parallel with value >1 then multiple processes 2 | # will be handled for benchmarking provided function. 3 | 4 | Benchee.run(%{"one" => fn -> :timer.sleep(10) end}, parallel: 1, time: 1) 5 | Benchee.run(%{"three" => fn -> :timer.sleep(10) end}, parallel: 3, time: 1) 6 | Benchee.run(%{"five" => fn -> :timer.sleep(10) end}, parallel: 5, time: 1) 7 | 8 | # output doesn't matter here 9 | -------------------------------------------------------------------------------- /samples/pre_check.exs: -------------------------------------------------------------------------------- 1 | a = 7 2 | 3 | Benchee.run(%{"a*2" => fn -> a * 2 end, "a+a" => fn -> a + a end}, 4 | time: 0, 5 | warmup: 0, 6 | pre_check: true 7 | ) 8 | 9 | Benchee.run(%{"a*2" => fn -> a * 2 end, "a+a" => fn -> a + a end}, 10 | time: 0, 11 | warmup: 0, 12 | pre_check: :all_same 13 | ) 14 | 15 | # this one is expected to fail, commented out by default so that `run_samples.sh` doesn't quit here 16 | # Benchee.run(%{"a*2" => fn -> a * 2 end, "a+a (wrong)" => fn -> a + a + 1 end}, 17 | # time: 0, 18 | # warmup: 0, 19 | # pre_check: :all_same 20 | # ) 21 | -------------------------------------------------------------------------------- /samples/reduction_run.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run( 5 | %{ 6 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 7 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 8 | }, 9 | time: 0, 10 | reduction_time: 2 11 | ) 12 | 13 | # tobi@qiqi:~/github/benchee(docs++)$ mix run samples/reduction_run.exs 14 | # Operating System: Linux 15 | # CPU Information: AMD Ryzen 9 5900X 12-Core Processor 16 | # Number of Available Cores: 24 17 | # Available memory: 31.27 GB 18 | # Elixir 1.13.3 19 | # Erlang 24.2.1 20 | 21 | # Benchmark suite executing with the following configuration: 22 | # warmup: 2 s 23 | # time: 0 ns 24 | # memory time: 0 ns 25 | # reduction time: 2 s 26 | # parallel: 1 27 | # inputs: none specified 28 | # Estimated total run time: 8 s 29 | 30 | # Benchmarking flat_map ... 31 | # Benchmarking map.flatten ... 32 | 33 | # Reduction count statistics: 34 | 35 | # Name Reduction count 36 | # flat_map 65.01 K 37 | # map.flatten 124.52 K - 1.92x reduction count +59.51 K 38 | -------------------------------------------------------------------------------- /samples/repeat_n.exs: -------------------------------------------------------------------------------- 1 | n = 1_000 2 | range = 1..n 3 | list = Enum.to_list(range) 4 | fun = fn -> 0 end 5 | 6 | Benchee.run(%{ 7 | "Enum.each (range)" => fn -> Enum.each(range, fn _ -> fun.() end) end, 8 | "List comprehension (range)" => fn -> for _ <- range, do: fun.() end, 9 | "Enum.each (list)" => fn -> Enum.each(list, fn _ -> fun.() end) end, 10 | "List comprehension (list)" => fn -> for _ <- list, do: fun.() end, 11 | "Recursion" => fn -> Benchee.Utility.RepeatN.repeat_n(fun, n) end 12 | }) 13 | 14 | # Operating System: Linux 15 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 16 | # Number of Available Cores: 8 17 | # Available memory: 15.61 GB 18 | # Elixir 1.8.1 19 | # Erlang 21.3.2 20 | 21 | # Benchmark suite executing with the following configuration: 22 | # warmup: 2 s 23 | # time: 5 s 24 | # memory time: 0 ns 25 | # parallel: 1 26 | # inputs: none specified 27 | # Estimated total run time: 35 s 28 | 29 | # Benchmarking Enum.each (list)... 30 | # Benchmarking Enum.each (range)... 31 | # Benchmarking List comprehension (list)... 32 | # Benchmarking List comprehension (range)... 33 | # Benchmarking Recursion... 34 | 35 | # Name ips average deviation median 99th % 36 | # Recursion 80.33 K 12.45 μs ±12.46% 12.37 μs 15.02 μs 37 | # Enum.each (list) 45.83 K 21.82 μs ±19.54% 20.57 μs 34.33 μs 38 | # List comprehension (list) 43.23 K 23.13 μs ±12.07% 22.72 μs 33.55 μs 39 | # List comprehension (range) 35.26 K 28.36 μs ±10.99% 27.88 μs 36.29 μs 40 | # Enum.each (range) 30.09 K 33.24 μs ±11.21% 32.83 μs 48.55 μs 41 | 42 | # Comparison: 43 | # Recursion 80.33 K 44 | # Enum.each (list) 45.83 K - 1.75x slower +9.37 μs 45 | # List comprehension (list) 43.23 K - 1.86x slower +10.69 μs 46 | # List comprehension (range) 35.26 K - 2.28x slower +15.91 μs 47 | # Enum.each (range) 30.09 K - 2.67x slower +20.79 μs 48 | -------------------------------------------------------------------------------- /samples/run.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run( 5 | %{ 6 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 7 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 8 | }, 9 | time: 10, 10 | memory_time: 2 11 | ) 12 | 13 | # Operating System: Linux 14 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 15 | # Number of Available Cores: 8 16 | # Available memory: 15.61 GB 17 | # Elixir 1.8.1 18 | # Erlang 21.2.7 19 | 20 | # Benchmark suite executing with the following configuration: 21 | # warmup: 2 s 22 | # time: 10 s 23 | # memory time: 2 s 24 | # parallel: 1 25 | # inputs: none specified 26 | # Estimated total run time: 28 s 27 | 28 | # Benchmarking flat_map... 29 | # Benchmarking map.flatten... 30 | 31 | # Name ips average deviation median 99th % 32 | # flat_map 2.34 K 426.84 μs ±9.88% 418.72 μs 720.20 μs 33 | # map.flatten 1.18 K 844.08 μs ±19.73% 778.10 μs 1314.87 μs 34 | 35 | # Comparison: 36 | # flat_map 2.34 K 37 | # map.flatten 1.18 K - 1.98x slower +417.24 μs 38 | 39 | # Memory usage statistics: 40 | 41 | # Name Memory usage 42 | # flat_map 624.97 KB 43 | # map.flatten 781.25 KB - 1.25x memory usage +156.28 KB 44 | 45 | # **All measurements for memory usage were the same** 46 | -------------------------------------------------------------------------------- /samples/run_all.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run( 5 | %{ 6 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 7 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 8 | }, 9 | warmup: 1, 10 | time: 5, 11 | memory_time: 2, 12 | reduction_time: 2 13 | ) 14 | 15 | # tobi@qiqi:~/github/benchee(docs++)$ mix run samples/run_all.exs 16 | # Operating System: Linux 17 | # CPU Information: AMD Ryzen 9 5900X 12-Core Processor 18 | # Number of Available Cores: 24 19 | # Available memory: 31.27 GB 20 | # Elixir 1.13.3 21 | # Erlang 24.2.1 22 | 23 | # Benchmark suite executing with the following configuration: 24 | # warmup: 1 s 25 | # time: 5 s 26 | # memory time: 2 s 27 | # reduction time: 2 s 28 | # parallel: 1 29 | # inputs: none specified 30 | # Estimated total run time: 20 s 31 | 32 | # Benchmarking flat_map ... 33 | # Benchmarking map.flatten ... 34 | 35 | # Name ips average deviation median 99th % 36 | # flat_map 3.61 K 276.99 μs ±10.39% 273.61 μs 490.68 μs 37 | # map.flatten 2.25 K 444.22 μs ±21.30% 410.09 μs 703.06 μs 38 | 39 | # Comparison: 40 | # flat_map 3.61 K 41 | # map.flatten 2.25 K - 1.60x slower +167.22 μs 42 | 43 | # Memory usage statistics: 44 | 45 | # Name Memory usage 46 | # flat_map 625 KB 47 | # map.flatten 781.25 KB - 1.25x memory usage +156.25 KB 48 | 49 | # **All measurements for memory usage were the same** 50 | 51 | # Reduction count statistics: 52 | 53 | # Name Reduction count 54 | # flat_map 65.01 K 55 | # map.flatten 124.52 K - 1.92x reduction count +59.51 K 56 | 57 | # **All measurements for reduction count were the same** 58 | -------------------------------------------------------------------------------- /samples/run_defaults.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run(%{ 5 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 6 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 7 | }) 8 | 9 | # Operating System: Linux 10 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 11 | # Number of Available Cores: 8 12 | # Available memory: 15.61 GB 13 | # Elixir 1.8.1 14 | # Erlang 21.3.2 15 | 16 | # Benchmark suite executing with the following configuration: 17 | # warmup: 2 s 18 | # time: 5 s 19 | # memory time: 0 ns 20 | # parallel: 1 21 | # inputs: none specified 22 | # Estimated total run time: 14 s 23 | 24 | # Benchmarking flat_map... 25 | # Benchmarking map.flatten... 26 | 27 | # Name ips average deviation median 99th % 28 | # flat_map 2.36 K 423.20 μs ±16.84% 405.89 μs 771.85 μs 29 | # map.flatten 1.26 K 795.99 μs ±20.06% 745.23 μs 1260.17 μs 30 | 31 | # Comparison: 32 | # flat_map 2.36 K 33 | # map.flatten 1.26 K - 1.88x slower +372.79 μs 34 | -------------------------------------------------------------------------------- /samples/run_extended_statistics.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run( 5 | %{ 6 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 7 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 8 | }, 9 | time: 10, 10 | formatters: [{Benchee.Formatters.Console, extended_statistics: true}] 11 | ) 12 | 13 | # Operating System: Linux 14 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 15 | # Number of Available Cores: 8 16 | # Available memory: 15.61 GB 17 | # Elixir 1.8.1 18 | # Erlang 21.3.2 19 | 20 | # Benchmark suite executing with the following configuration: 21 | # warmup: 2 s 22 | # time: 10 s 23 | # memory time: 0 ns 24 | # parallel: 1 25 | # inputs: none specified 26 | # Estimated total run time: 24 s 27 | 28 | # Benchmarking flat_map... 29 | # Benchmarking map.flatten... 30 | 31 | # Name ips average deviation median 99th % 32 | # flat_map 2.37 K 421.74 μs ±14.17% 406.44 μs 758.14 μs 33 | # map.flatten 1.24 K 807.37 μs ±20.22% 747.94 μs 1311.65 μs 34 | 35 | # Comparison: 36 | # flat_map 2.37 K 37 | # map.flatten 1.24 K - 1.91x slower +385.64 μs 38 | 39 | # Extended statistics: 40 | 41 | # Name minimum maximum sample size mode 42 | # flat_map 345.15 μs 1182.74 μs 23.64 K 406.28 μs 43 | # map.flatten 492.78 μs 1925.29 μs 12.36 K741.79 μs, 739.21 μs, 738 44 | -------------------------------------------------------------------------------- /samples/run_verbose.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | [time: 3] 5 | |> Benchee.init() 6 | |> Benchee.system() 7 | |> Benchee.benchmark("flat_map", fn -> Enum.flat_map(list, map_fun) end) 8 | |> Benchee.benchmark( 9 | "map.flatten", 10 | fn -> list |> Enum.map(map_fun) |> List.flatten() end 11 | ) 12 | |> Benchee.collect() 13 | |> Benchee.statistics() 14 | |> Benchee.relative_statistics() 15 | |> Benchee.Formatter.output(Benchee.Formatters.Console) 16 | 17 | # Operating System: Linux 18 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 19 | # Number of Available Cores: 8 20 | # Available memory: 15.61 GB 21 | # Elixir 1.8.1 22 | # Erlang 21.2.7 23 | 24 | # Benchmark suite executing with the following configuration: 25 | # warmup: 2 s 26 | # time: 3 s 27 | # memory time: 0 ns 28 | # parallel: 1 29 | # inputs: none specified 30 | # Estimated total run time: 10 s 31 | 32 | # Benchmarking flat_map... 33 | # Benchmarking map.flatten... 34 | 35 | # Name ips average deviation median 99th % 36 | # flat_map 2.32 K 430.44 μs ±16.15% 411.38 μs 732.80 μs 37 | # map.flatten 1.18 K 846.04 μs ±20.56% 779.01 μs 1237.69 μs 38 | 39 | # Comparison: 40 | # flat_map 2.32 K 41 | # map.flatten 1.18 K - 1.97x slower +415.60 μs 42 | -------------------------------------------------------------------------------- /samples/save_and_load.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run( 5 | %{ 6 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 7 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 8 | }, 9 | time: 5, 10 | save: [path: "save.benchee", tag: "first-try"] 11 | ) 12 | 13 | IO.puts("----------------------------------------------") 14 | 15 | Benchee.run( 16 | %{ 17 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 18 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 19 | }, 20 | time: 5, 21 | load: "save.benchee", 22 | formatters: [{Benchee.Formatters.Console, extended_statistics: true}] 23 | ) 24 | 25 | # Operating System: Linux 26 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 27 | # Number of Available Cores: 8 28 | # Available memory: 15.61 GB 29 | # Elixir 1.8.1 30 | # Erlang 21.3.2 31 | 32 | # Benchmark suite executing with the following configuration: 33 | # warmup: 2 s 34 | # time: 5 s 35 | # memory time: 0 ns 36 | # parallel: 1 37 | # inputs: none specified 38 | # Estimated total run time: 14 s 39 | 40 | # Benchmarking flat_map... 41 | # Benchmarking map.flatten... 42 | 43 | # Name ips average deviation median 99th % 44 | # flat_map 2.40 K 417.31 μs ±12.98% 406.10 μs 728.39 μs 45 | # map.flatten 1.27 K 787.55 μs ±18.48% 743.45 μs 1172.37 μs 46 | 47 | # Comparison: 48 | # flat_map 2.40 K 49 | # map.flatten 1.27 K - 1.89x slower +370.24 μs 50 | # Suite saved in external term format at save.benchee 51 | # ---------------------------------------------- 52 | # Operating System: Linux 53 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 54 | # Number of Available Cores: 8 55 | # Available memory: 15.61 GB 56 | # Elixir 1.8.1 57 | # Erlang 21.3.2 58 | 59 | # Benchmark suite executing with the following configuration: 60 | # warmup: 2 s 61 | # time: 5 s 62 | # memory time: 0 ns 63 | # parallel: 1 64 | # inputs: none specified 65 | # Estimated total run time: 14 s 66 | 67 | # Benchmarking flat_map... 68 | # Benchmarking map.flatten... 69 | 70 | # Name ips average deviation median 99th % 71 | # flat_map 2.42 K 414.01 μs ±11.90% 406.17 μs 712.99 μs 72 | # flat_map (first-try) 2.40 K 417.31 μs ±12.98% 406.10 μs 728.39 μs 73 | # map.flatten 1.27 K 784.75 μs ±18.42% 743.95 μs 1190.36 μs 74 | # map.flatten (first-try) 1.27 K 787.55 μs ±18.48% 743.45 μs 1172.37 μs 75 | 76 | # Comparison: 77 | # flat_map 2.42 K 78 | # flat_map (first-try) 2.40 K - 1.01x slower +3.30 μs 79 | # map.flatten 1.27 K - 1.90x slower +370.74 μs 80 | # map.flatten (first-try) 1.27 K - 1.90x slower +373.54 μs 81 | 82 | # Extended statistics: 83 | 84 | # Name minimum maximum sample size mode 85 | # flat_map 345.33 μs 1180.66 μs 12.05 K 405.29 μs 86 | # flat_map (first-try) 345.32 μs 1205.74 μs 11.95 K 405.98 μs, 406.09 μs 87 | # map.flatten 494.22 μs 1811.82 μs 6.36 K 738.26 μs 88 | # map.flatten (first-try) 493.95 μs 1882.94 μs 6.34 K 740.59 μs 89 | -------------------------------------------------------------------------------- /samples/save_and_report.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run( 5 | %{ 6 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 7 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 8 | }, 9 | # report will give us the output 10 | formatters: [], 11 | time: 5, 12 | save: [path: "save.benchee", tag: "save-me"] 13 | ) 14 | 15 | IO.puts("----------------------------------------------") 16 | 17 | Benchee.report( 18 | load: "save.benchee", 19 | formatters: [{Benchee.Formatters.Console, extended_statistics: true}] 20 | ) 21 | 22 | # Operating System: Linux 23 | # CPU Information: AMD Ryzen 9 5900X 12-Core Processor 24 | # Number of Available Cores: 24 25 | # Available memory: 31.25 GB 26 | # Elixir 1.16.0-rc.0 27 | # Erlang 26.1.2 28 | # JIT enabled: true 29 | 30 | # Benchmark suite executing with the following configuration: 31 | # warmup: 2 s 32 | # time: 5 s 33 | # memory time: 0 ns 34 | # reduction time: 0 ns 35 | # parallel: 1 36 | # inputs: none specified 37 | # Estimated total run time: 14 s 38 | 39 | # Benchmarking flat_map ... 40 | # Benchmarking map.flatten ... 41 | # Calculating statistics... 42 | # Formatting results... 43 | # Suite saved in external term format at save.benchee 44 | # ---------------------------------------------- 45 | # Formatting results... 46 | 47 | # Name ips average deviation median 99th % 48 | # flat_map (save-me) 3.69 K 270.69 μs ±21.13% 259.71 μs 703.98 μs 49 | # map.flatten (save-me) 1.88 K 530.50 μs ±45.74% 410.19 μs 1227.46 μs 50 | 51 | # Comparison: 52 | # flat_map (save-me) 3.69 K 53 | # map.flatten (save-me) 1.88 K - 1.96x slower +259.82 μs 54 | 55 | # Extended statistics: 56 | 57 | # Name minimum maximum sample size mode 58 | # flat_map (save-me) 215.30 μs 1066.77 μs 18.44 K 257.25 μs 59 | # map.flatten (save-me) 203.19 μs 1522.54 μs 9.41 K406.65 μs, 406.35 μs, 377 60 | -------------------------------------------------------------------------------- /samples/sort_performance.exs: -------------------------------------------------------------------------------- 1 | list_10k = 1..10_000 |> Enum.to_list() |> Enum.shuffle() 2 | list_100k = 1..100_000 |> Enum.to_list() |> Enum.shuffle() 3 | list_1M = 1..1_000_000 |> Enum.to_list() |> Enum.shuffle() 4 | 5 | Benchee.run( 6 | [ 7 | {"10k", fn -> Enum.sort(list_10k) end}, 8 | {"100k", fn -> Enum.sort(list_100k) end}, 9 | {"1M", fn -> Enum.sort(list_1M) end} 10 | ], 11 | memory_time: 2 12 | ) 13 | 14 | # tobi@qiqi:~/github/benchee(max-sample-size)$ mix run samples/sort_performance.exs 15 | # Operating System: Linux 16 | # CPU Information: AMD Ryzen 9 5900X 12-Core Processor 17 | # Number of Available Cores: 24 18 | # Available memory: 31.26 GB 19 | # Elixir 1.18.3 20 | # Erlang 27.3.2 21 | # JIT enabled: true 22 | 23 | # Benchmark suite executing with the following configuration: 24 | # warmup: 2 s 25 | # time: 5 s 26 | # memory time: 2 s 27 | # reduction time: 0 ns 28 | # parallel: 1 29 | # inputs: none specified 30 | # Estimated total run time: 27 s 31 | 32 | # Benchmarking 10k ... 33 | # Benchmarking 100k ... 34 | # Benchmarking 1M ... 35 | # Calculating statistics... 36 | # Formatting results... 37 | 38 | # Name ips average deviation median 99th % 39 | # 10k 1237.97 0.81 ms ±28.70% 0.73 ms 1.42 ms 40 | # 100k 74.50 13.42 ms ±40.13% 10.96 ms 31.42 ms 41 | # 1M 6.17 162.05 ms ±34.38% 146.08 ms 328.83 ms 42 | 43 | # Comparison: 44 | # 10k 1237.97 45 | # 100k 74.50 - 16.62x slower +12.62 ms 46 | # 1M 6.17 - 200.61x slower +161.24 ms 47 | 48 | # Memory usage statistics: 49 | 50 | # Name Memory usage 51 | # 10k 1.42 MB 52 | # 100k 18.59 MB - 13.09x memory usage +17.17 MB 53 | # 1M 218.22 MB - 153.62x memory usage +216.80 MB 54 | 55 | # **All measurements for memory usage were the same** 56 | -------------------------------------------------------------------------------- /samples/statistics_performance.exs: -------------------------------------------------------------------------------- 1 | list_10k = 1..10_000 |> Enum.to_list() |> Enum.shuffle() 2 | list_100k = 1..100_000 |> Enum.to_list() |> Enum.shuffle() 3 | list_1M = 1..1_000_000 |> Enum.to_list() |> Enum.shuffle() 4 | 5 | Benchee.run( 6 | [ 7 | {"10k", fn -> Statistex.statistics(list_10k) end}, 8 | {"100k", fn -> Statistex.statistics(list_100k) end}, 9 | {"1M", fn -> Statistex.statistics(list_1M) end} 10 | ], 11 | memory_time: 2 12 | ) 13 | 14 | # tobi@qiqi:~/github/benchee(max-sample-size)$ mix run samples/sort_performance.exs 15 | # Operating System: Linux 16 | # CPU Information: AMD Ryzen 9 5900X 12-Core Processor 17 | # Number of Available Cores: 24 18 | # Available memory: 31.26 GB 19 | # Elixir 1.18.3 20 | # Erlang 27.3.2 21 | # JIT enabled: true 22 | 23 | # Benchmark suite executing with the following configuration: 24 | # warmup: 2 s 25 | # time: 5 s 26 | # memory time: 2 s 27 | # reduction time: 0 ns 28 | # parallel: 1 29 | # inputs: none specified 30 | # Estimated total run time: 27 s 31 | 32 | # Benchmarking 10k ... 33 | # Benchmarking 100k ... 34 | # Benchmarking 1M ... 35 | # Calculating statistics... 36 | # Formatting results... 37 | 38 | # Name ips average deviation median 99th % 39 | # 10k 1237.97 0.81 ms ±28.70% 0.73 ms 1.42 ms 40 | # 100k 74.50 13.42 ms ±40.13% 10.96 ms 31.42 ms 41 | # 1M 6.17 162.05 ms ±34.38% 146.08 ms 328.83 ms 42 | 43 | # Comparison: 44 | # 10k 1237.97 45 | # 100k 74.50 - 16.62x slower +12.62 ms 46 | # 1M 6.17 - 200.61x slower +161.24 ms 47 | 48 | # Memory usage statistics: 49 | 50 | # Name Memory usage 51 | # 10k 1.42 MB 52 | # 100k 18.59 MB - 13.09x memory usage +17.17 MB 53 | # 1M 218.22 MB - 153.62x memory usage +216.80 MB 54 | 55 | # **All measurements for memory usage were the same** 56 | -------------------------------------------------------------------------------- /samples/title.exs: -------------------------------------------------------------------------------- 1 | list = Enum.to_list(1..10_000) 2 | map_fun = fn i -> [i, i * i] end 3 | 4 | Benchee.run( 5 | %{ 6 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 7 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 8 | }, 9 | time: 2, 10 | title: "Comparing map.flatten and flat_map" 11 | ) 12 | 13 | # Operating System: Linux 14 | # CPU Information: Intel(R) Core(TM) i7-4790 CPU @ 3.60GHz 15 | # Number of Available Cores: 8 16 | # Available memory: 15.61 GB 17 | # Elixir 1.8.1 18 | # Erlang 21.3.2 19 | 20 | # Benchmark suite executing with the following configuration: 21 | # warmup: 2 s 22 | # time: 2 s 23 | # memory time: 0 ns 24 | # parallel: 1 25 | # inputs: none specified 26 | # Estimated total run time: 8 s 27 | 28 | # Benchmarking flat_map... 29 | # Benchmarking map.flatten... 30 | 31 | # *** Comparing map.flatten and flat_map *** 32 | 33 | # Name ips average deviation median 99th % 34 | # flat_map 2.34 K 427.87 μs ±19.45% 405.68 μs 769.93 μs 35 | # map.flatten 1.25 K 801.46 μs ±19.65% 751.36 μs 1202.12 μs 36 | 37 | # Comparison: 38 | # flat_map 2.34 K 39 | # map.flatten 1.25 K - 1.87x slower +373.59 μs 40 | -------------------------------------------------------------------------------- /samples/unit_scaling.exs: -------------------------------------------------------------------------------- 1 | list_10 = 1..10 |> Enum.to_list() |> Enum.shuffle() 2 | list_100 = 1..100 |> Enum.to_list() |> Enum.shuffle() 3 | list_1k = 1..1_000 |> Enum.to_list() |> Enum.shuffle() 4 | list_10k = 1..10_000 |> Enum.to_list() |> Enum.shuffle() 5 | list_100k = 1..100_000 |> Enum.to_list() |> Enum.shuffle() 6 | list_1M = 1..1_000_000 |> Enum.to_list() |> Enum.shuffle() 7 | list_5M = 1..5_000_000 |> Enum.to_list() |> Enum.shuffle() 8 | 9 | # options documented in README 10 | Benchee.run( 11 | %{ 12 | "10" => fn -> Enum.sort(list_10) end, 13 | "100" => fn -> Enum.sort(list_100) end, 14 | "1k" => fn -> Enum.sort(list_1k) end, 15 | "10k" => fn -> Enum.sort(list_10k) end, 16 | "100k" => fn -> Enum.sort(list_100k) end, 17 | "1M" => fn -> Enum.sort(list_1M) end, 18 | "5M" => fn -> Enum.sort(list_5M) end 19 | }, 20 | warmup: 1, 21 | time: 2, 22 | unit_scaling: :none 23 | ) 24 | 25 | # With :best scaling (default) 26 | # Name ips average deviation median 99th % 27 | # 10 54339465.83 0.00002 ms ±59170.09% 0 ms 0 ms 28 | # 100 274669.84 0.00364 ms ±71.69% 0.00353 ms 0.00618 ms 29 | # 1k 10610.90 0.0942 ms ±7.50% 0.0924 ms 0.133 ms 30 | # 10k 728.48 1.37 ms ±3.88% 1.36 ms 1.56 ms 31 | # 100k 54.92 18.21 ms ±6.44% 17.93 ms 24.40 ms 32 | # 1M 3.33 300.22 ms ±3.56% 299.84 ms 321.05 ms 33 | # 5M 0.62 1601.88 ms ±1.81% 1601.88 ms 1622.44 ms 34 | 35 | # Comparison: 36 | # 10 54339465.83 37 | # 100 274669.84 - 197.84x slower +0.00362 ms 38 | # 1k 10610.90 - 5121.10x slower +0.0942 ms 39 | # 10k 728.48 - 74592.88x slower +1.37 ms 40 | # 100k 54.92 - 989461.84x slower +18.21 ms 41 | # 1M 3.33 - 16313992.34x slower +300.22 ms 42 | # 5M 0.62 - 87045544.33x slower +1601.88 ms 43 | 44 | # With :largest scaling 45 | # Name ips average deviation median 99th % 46 | # 10 26.50 M 0.00000 s ±31888.41% 0 s 0.00000 s 47 | # 100 0.26 M 0.00000 s ±65.08% 0.00000 s 0.00001 s 48 | # 1k 0.0106 M 0.00009 s ±7.10% 0.00009 s 0.00011 s 49 | # 10k 0.00072 M 0.00139 s ±5.50% 0.00136 s 0.00162 s 50 | # 100k 0.00005 M 0.0184 s ±7.38% 0.0180 s 0.0259 s 51 | # 1M 0.00000 M 0.29 s ±2.55% 0.29 s 0.30 s 52 | # 5M 0.00000 M 1.57 s ±0.00% 1.57 s 1.57 s 53 | 54 | # Comparison: 55 | # 10 26.50 M 56 | # 100 0.26 M - 102.14x slower +0.00000 s 57 | # 1k 0.0106 M - 2502.96x slower +0.00009 s 58 | # 10k 0.00072 M - 36778.23x slower +0.00139 s 59 | # 100k 0.00005 M - 488121.76x slower +0.0184 s 60 | # 1M 0.00000 M - 7682423.21x slower +0.29 s 61 | # 5M 0.00000 M - 41519805.39x slower +1.57 s 62 | 63 | # With :smallest scaling 64 | # Name ips average deviation median 99th % 65 | # 10 28235291.15 35.42 ns ±41613.65% 0 ns 0 ns 66 | # 100 265789.35 3762.38 ns ±150.11% 3600 ns 5725.10 ns 67 | # 1k 10852.43 92145.25 ns ±6.48% 91483.50 ns 110076.41 ns 68 | # 10k 727.98 1373671.11 ns ±6.10% 1349783.50 ns 1852423.85 ns 69 | # 100k 57.06 17526359.86 ns ±4.17% 17360028 ns 21152683.36 ns 70 | # 1M 3.35298758918.29 ns ±1.99% 300828679 ns 304052431 ns 71 | # 5M 0.631594076577.50 n ±1.37%1594076577.50 n 1609545725 ns 72 | 73 | # Comparison: 74 | # 10 28235291.15 75 | # 100 265789.35 - 106.23x slower +3726.96 ns 76 | # 1k 10852.43 - 2601.75x slower +92109.83 ns 77 | # 10k 727.98 - 38786.00x slower +1373635.70 ns 78 | # 100k 57.06 - 494861.87x slower +17526324.44 ns 79 | # 1M 3.35 - 8435545.04x slower +298758882.87 ns 80 | # 5M 0.63 - 45009216.28x slower +1594076542.08 ns 81 | 82 | # With :none scaling 83 | # Name ips average deviation median 99th % 84 | # 10 22559861.17 44.33 ns ±36545.13% 0 ns 0 ns 85 | # 100 274192.56 3647.07 ns ±72.97% 3556 ns 5734 ns 86 | # 1k 10602.13 94320.70 ns ±11.69% 92365 ns 108881.82 ns 87 | # 10k 747.65 1337530.97 ns ±9.24% 1271344 ns 1777959.76 ns 88 | # 100k 55.18 18123591.72 ns ±6.20% 17783950 ns 23599979.00 ns 89 | # 1M 3.36297466609.43 ns ±2.22% 296495374 ns 311766061 ns 90 | # 5M 0.63 1582891081 ns ±0.20% 1582891081 ns 1585160748 ns 91 | 92 | # Comparison: 93 | # 10 22559861.17 94 | # 100 274192.56 - 82.28x slower +3602.75 ns 95 | # 1k 10602.13 - 2127.86x slower +94276.37 ns 96 | # 10k 747.65 - 30174.51x slower +1337486.64 ns 97 | # 100k 55.18 - 408865.71x slower +18123547.39 ns 98 | # 1M 3.36 - 6710805.41x slower +297466565.10 ns 99 | # 5M 0.63 - 35709803.03x slower +1582891036.67 ns 100 | -------------------------------------------------------------------------------- /samples/unit_scaling_smallest_vs_none.exs: -------------------------------------------------------------------------------- 1 | # Due to the wide spread of values in unit_scaling.exs there is no difference 2 | # between :none and :smallest -let's fix that here! 3 | 4 | list_10k = 1..10_000 |> Enum.to_list() |> Enum.shuffle() 5 | list_100k = 1..100_000 |> Enum.to_list() |> Enum.shuffle() 6 | 7 | # options document in README 8 | Benchee.run( 9 | %{ 10 | "10k" => fn -> Enum.sort(list_10k) end, 11 | "100k" => fn -> Enum.sort(list_100k) end 12 | }, 13 | unit_scaling: :smallest 14 | ) 15 | 16 | # :smallest 17 | # Name ips average deviation median 99th % 18 | # 10k 794.29 1.26 ms ±2.71% 1.25 ms 1.35 ms 19 | # 100k 57.50 17.39 ms ±3.37% 17.32 ms 19.69 ms 20 | 21 | # Comparison: 22 | # 10k 794.29 23 | # 100k 57.50 - 13.81x slower +16.13 ms 24 | 25 | # :none 26 | # Name ips average deviation median 99th % 27 | # 10k 699.28 1430032.96 ns ±12.25% 1367991 ns 1830810.09 ns 28 | # 100k 57.61 17358597.81 ns ±3.14% 17264780.50 ns 20125840.48 ns 29 | 30 | # Comparison: 31 | # 10k 699.28 32 | # 100k 57.61 - 12.14x slower +15928564.85 ns 33 | -------------------------------------------------------------------------------- /test/benchee/benchmark/collect/memory_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Collect.MemoryTest do 2 | # We cannot use async: true because of the test that we're running to ensure 3 | # there aren't any leaked processes if functions fail while we're tracing 4 | # them. 5 | use ExUnit.Case 6 | alias Benchee.Benchmark.Collect.Memory 7 | import ExUnit.CaptureIO 8 | 9 | @moduletag :memory_measure 10 | 11 | describe "collect/1" do 12 | test "returns the result of the function and the memory used (in bytes)" do 13 | fun_to_run = fn -> Enum.to_list(1..10) end 14 | assert {memory_used, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} = Memory.collect(fun_to_run) 15 | # We need to have some wiggle room here because memory used varies from 16 | # system to system. It's consistent in an environment, but changes 17 | # between environments. 18 | assert memory_used > 150 19 | assert memory_used < 400 20 | end 21 | 22 | test "doesn't return broken values" do 23 | fun = fn -> BenchKeyword.delete_v0(Enum.map(1..100, &{:"k#{&1}", &1}), :k100) end 24 | assert {memory_used, _} = Memory.collect(fun) 25 | 26 | assert memory_used >= 8_000 27 | assert memory_used <= 14_000 28 | end 29 | 30 | test "will not leak processes if the applied function raises an exception" do 31 | starting_processes = Enum.count(Process.list()) 32 | 33 | # We're capturing the IO here so the error output doesn't clog up our 34 | # tests. We don't want stuff in our green dots!! 35 | # We also need to sleep for 1ms because the error output is coming from 36 | # a separate process, so we need to wait for that to emit so we can 37 | # capture it. 38 | capture_io(fn -> 39 | Memory.collect(fn -> exit(:kill) end) 40 | end) 41 | 42 | Process.sleep(50) 43 | 44 | assert Enum.count(Process.list()) == starting_processes 45 | end 46 | end 47 | end 48 | -------------------------------------------------------------------------------- /test/benchee/benchmark/collect/reductions_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Collect.ReductionsTest do 2 | use ExUnit.Case, async: true 3 | alias Benchee.Benchmark.Collect.Reductions 4 | 5 | describe "collect/1" do 6 | test "returns the reduction count and result of the function" do 7 | assert {reductions, [1, 2, 3, 4, 5, 6, 7, 8, 9]} = 8 | Reductions.collect(fn -> Enum.to_list(1..9) end) 9 | 10 | # it seems that the JIT may interfere here with the values as we get flakyness 11 | # on higher OTP versions and so we're loosening up the boundaries here, sadly 12 | assert reductions >= 13 13 | assert reductions <= 250 14 | 15 | assert {reductions, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} = 16 | Reductions.collect(fn -> Enum.to_list(1..10) end) 17 | 18 | assert reductions >= 14 19 | assert reductions <= 250 20 | 21 | assert {reductions, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]} = 22 | Reductions.collect(fn -> Enum.to_list(1..11) end) 23 | 24 | assert reductions >= 14 25 | assert reductions <= 250 26 | end 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /test/benchee/benchmark/scenario_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.ScenarioTest do 2 | use ExUnit.Case, async: true 3 | doctest Benchee.Scenario, import: true 4 | end 5 | -------------------------------------------------------------------------------- /test/benchee/benchmark_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.BenchmarkTest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Benchee.{ 5 | Benchmark, 6 | Configuration, 7 | Scenario, 8 | Suite 9 | } 10 | 11 | alias Benchee.Benchmark.BenchmarkConfig 12 | 13 | alias Benchee.Benchmark.ScenarioContext 14 | alias Benchee.Test.FakeBenchmarkPrinter, as: TestPrinter 15 | alias Benchee.Test.FakeBenchmarkRunner, as: TestRunner 16 | 17 | describe ".benchmark" do 18 | test "can add jobs with atom keys but converts them to string" do 19 | suite = 20 | %Suite{} 21 | |> Benchmark.benchmark("one job", fn -> 1 end) 22 | |> Benchmark.benchmark(:something, fn -> 2 end) 23 | 24 | job_names = Enum.map(suite.scenarios, fn scenario -> scenario.job_name end) 25 | assert job_names == ["one job", "something"] 26 | names = Enum.map(suite.scenarios, fn scenario -> scenario.job_name end) 27 | assert names == ["one job", "something"] 28 | end 29 | 30 | test "warns when adding the same job again even if it's atom and string" do 31 | one_fun = fn -> 1 end 32 | scenario = %Scenario{job_name: "one", function: one_fun, name: "one"} 33 | suite = %Suite{scenarios: [scenario]} 34 | new_suite = Benchmark.benchmark(suite, :one, fn -> 42 end, TestPrinter) 35 | 36 | assert new_suite == %Suite{scenarios: [scenario]} 37 | 38 | assert_receive {:duplicate, "one"} 39 | end 40 | 41 | test "adds a %Scenario{} to the suite for a job with no inputs" do 42 | job_name = "one job" 43 | function = fn -> 1 end 44 | config = %Configuration{inputs: nil} 45 | suite = Benchmark.benchmark(%Suite{configuration: config}, job_name, function) 46 | 47 | expected_scenario = %Scenario{ 48 | name: job_name, 49 | job_name: job_name, 50 | function: function, 51 | input: Benchmark.no_input(), 52 | input_name: Benchmark.no_input() 53 | } 54 | 55 | assert suite.scenarios == [expected_scenario] 56 | end 57 | 58 | test "adds a %Scenario{} to the suite for each input of a job" do 59 | config = %Configuration{inputs: %{"large" => 100_000, "small" => 10}} 60 | suite = Benchmark.benchmark(%Suite{configuration: config}, "one_job", fn -> 1 end) 61 | input_names = Enum.map(suite.scenarios, fn scenario -> scenario.input_name end) 62 | inputs = Enum.map(suite.scenarios, fn scenario -> scenario.input end) 63 | 64 | assert length(suite.scenarios) == 2 65 | assert input_names == ["large", "small"] 66 | assert inputs == [100_000, 10] 67 | end 68 | 69 | test "can deal with the options tuple" do 70 | function = fn -> 1 end 71 | before = fn -> 2 end 72 | after_scenario = fn -> 3 end 73 | 74 | suite = 75 | %Suite{} 76 | |> Benchmark.benchmark( 77 | "job", 78 | {function, before_each: before, after_scenario: after_scenario} 79 | ) 80 | 81 | [scenario] = suite.scenarios 82 | 83 | assert %{ 84 | job_name: "job", 85 | function: ^function, 86 | before_each: ^before, 87 | after_scenario: ^after_scenario 88 | } = scenario 89 | end 90 | 91 | test "doesn't treat tagged scenarios as duplicates" do 92 | suite = %Suite{scenarios: [%Scenario{job_name: "job", tag: "what"}]} 93 | new_suite = Benchmark.benchmark(suite, "job", fn -> 1 end) 94 | 95 | assert length(new_suite.scenarios) == 2 96 | end 97 | end 98 | 99 | describe "collect/3" do 100 | test "prints the configuration information" do 101 | Benchmark.collect(%Suite{}, TestPrinter, TestRunner) 102 | 103 | assert_receive :configuration_information 104 | end 105 | 106 | test "sends the correct data to the benchmark runner" do 107 | scenarios = [%Scenario{job_name: "job_one"}] 108 | config = %Configuration{} 109 | benchmark_config = BenchmarkConfig.from(config) 110 | suite = %Suite{scenarios: scenarios, configuration: config} 111 | scenario_context = %ScenarioContext{config: benchmark_config, printer: TestPrinter} 112 | 113 | Benchmark.collect(suite, TestPrinter, TestRunner) 114 | 115 | assert_receive {:run_scenarios, ^scenarios, ^scenario_context} 116 | end 117 | 118 | test "returns a suite with scenarios returned from the runner" do 119 | scenarios = [%Scenario{job_name: "one", function: fn -> 1 end}] 120 | suite = %Suite{scenarios: scenarios} 121 | 122 | run_times = 123 | suite 124 | |> Benchmark.collect(TestPrinter, TestRunner) 125 | |> Map.fetch!(:scenarios) 126 | |> Enum.map(fn scenario -> scenario.run_time_data.samples end) 127 | 128 | assert length(run_times) == 1 129 | refute Enum.any?(run_times, fn run_time -> Enum.empty?(run_time) end) 130 | end 131 | end 132 | end 133 | -------------------------------------------------------------------------------- /test/benchee/configuration_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.ConfigurationTest do 2 | use ExUnit.Case, async: true 3 | doctest Benchee.Configuration, import: true 4 | 5 | alias Benchee.{Configuration, Suite} 6 | 7 | import DeepMerge 8 | import Benchee.Configuration 9 | 10 | @default_config %Configuration{} 11 | 12 | describe "init/1" do 13 | test "crashes for values that are going to be ignored" do 14 | assert_raise KeyError, fn -> init(runntime: 2) end 15 | end 16 | 17 | test "converts inputs map to a list and input keys to strings" do 18 | assert %Suite{configuration: %{inputs: [{"list", []}, {"map", %{}}]}} = 19 | init(inputs: %{"map" => %{}, list: []}) 20 | end 21 | 22 | test "doesn't convert input lists to maps and retains the order of input lists" do 23 | assert %Suite{configuration: %{inputs: [{"map", %{}}, {"list", []}]}} = 24 | init(inputs: [{"map", %{}}, {:list, []}]) 25 | end 26 | 27 | test "loses duplicated inputs keys after normalization" do 28 | assert %Suite{configuration: %{inputs: [{"map", %{}}]}} = 29 | init(inputs: %{"map" => %{}, map: %{}}) 30 | end 31 | 32 | test "keeps ordered inputs basically as is" do 33 | input_list = [{"map", %{}}, {"A", 1}] 34 | 35 | assert %Suite{configuration: %{inputs: ^input_list}} = 36 | init(inputs: input_list) 37 | end 38 | 39 | test "documents input_names" do 40 | assert %Suite{configuration: %{input_names: ["A", "B"]}} = 41 | init(inputs: %{"A" => 1, "B" => 2}) 42 | end 43 | 44 | test "input_names are normalized" do 45 | assert %Suite{configuration: %{input_names: ["a"]}} = 46 | init(inputs: %{a: 1}) 47 | end 48 | 49 | test "no inputs, no input_names" do 50 | assert %Suite{configuration: %{input_names: []}} = init() 51 | end 52 | 53 | test "uses information from :save to setup the external term formattter" do 54 | assert %Suite{ 55 | configuration: %{ 56 | formatters: [ 57 | Benchee.Formatters.Console, 58 | {Benchee.Formatters.TaggedSave, %{path: "save_one.benchee", tag: "main"}} 59 | ] 60 | } 61 | } = init(save: [path: "save_one.benchee", tag: "main"]) 62 | end 63 | 64 | test ":save tag defaults to date" do 65 | assert %Suite{configuration: %{formatters: [_, {_, %{tag: tag, path: "save_one.benchee"}}]}} = 66 | init(save: [path: "save_one.benchee"]) 67 | 68 | assert tag =~ ~r/\d\d\d\d-\d\d?-\d\d?--\d\d?-\d\d?-\d\d?/ 69 | end 70 | end 71 | 72 | describe ".deep_merge behaviour" do 73 | test "it can be adjusted with a map" do 74 | user_options = %{ 75 | time: 10, 76 | print: %{ 77 | configuration: false, 78 | fast_warning: false 79 | } 80 | } 81 | 82 | result = deep_merge(@default_config, user_options) 83 | 84 | expected = %Configuration{ 85 | time: 10, 86 | print: %{ 87 | configuration: false, 88 | fast_warning: false, 89 | benchmarking: true 90 | } 91 | } 92 | 93 | assert expected == result 94 | end 95 | 96 | test "it just replaces when given another configuration" do 97 | other_config = %Configuration{print: %{some: %{value: true}}} 98 | result = deep_merge(@default_config, other_config) 99 | expected = %Configuration{print: %{some: %{value: true}}} 100 | 101 | assert ^expected = result 102 | end 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /test/benchee/conversion/count_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.CountTest do 2 | use ExUnit.Case, async: true 3 | import Benchee.Conversion.Count 4 | doctest Benchee.Conversion.Count, import: true 5 | 6 | describe ".scale" do 7 | test "123_456_789_012 scales to :billion" do 8 | assert scale(123_456_789_012) == {123.456789012, unit_for(:billion)} 9 | end 10 | 11 | test "12_345_678_901 scales to :billion" do 12 | assert scale(12_345_678_901) == {12.345678901, unit_for(:billion)} 13 | end 14 | 15 | test "1_234_567_890 scales to :billion" do 16 | assert scale(1_234_567_890) == {1.23456789, unit_for(:billion)} 17 | end 18 | 19 | test "123_456_789 scales to :million" do 20 | assert scale(123_456_789) == {123.456789, unit_for(:million)} 21 | end 22 | 23 | test "12_345_678 scales to :million" do 24 | assert scale(12_345_678) == {12.345678, unit_for(:million)} 25 | end 26 | 27 | test "1_234_567 scales to :million" do 28 | assert scale(1_234_567) == {1.234567, unit_for(:million)} 29 | end 30 | 31 | test "123_456.7 scales to :thousand" do 32 | assert scale(123_456.7) == {123.4567, unit_for(:thousand)} 33 | end 34 | 35 | test "12_345.67 scales to :thousand" do 36 | assert scale(12_345.67) == {12.34567, unit_for(:thousand)} 37 | end 38 | 39 | test "1_234.567 scales to :thousand" do 40 | assert scale(1_234.567) == {1.234567, unit_for(:thousand)} 41 | end 42 | 43 | test "123.4567 scales to :one" do 44 | assert scale(123.4567) == {123.4567, unit_for(:one)} 45 | end 46 | 47 | test "12.34567 scales to :one" do 48 | assert scale(12.34567) == {12.34567, unit_for(:one)} 49 | end 50 | 51 | test "1.234567 scales to :one" do 52 | assert scale(1.234567) == {1.234567, unit_for(:one)} 53 | end 54 | 55 | test "0.001234567 scales to :one" do 56 | assert scale(0.001234567) == {0.001234567, unit_for(:one)} 57 | end 58 | end 59 | 60 | describe ".format" do 61 | test "1_000_000" do 62 | assert format(1_000_000) == "1 M" 63 | end 64 | 65 | test "1_000.1234" do 66 | assert format(1_000.1234) == "1.00 K" 67 | end 68 | 69 | test "123.4" do 70 | assert format(123.4) == "123.40" 71 | end 72 | 73 | test "1.234" do 74 | assert format(1.234) == "1.23" 75 | end 76 | end 77 | 78 | describe ".best" do 79 | @list_with_mostly_ones [1, 100, 1_000] 80 | 81 | test "when list is mostly ones" do 82 | assert best(@list_with_mostly_ones) == unit_for(:one) 83 | end 84 | 85 | test "when list is mostly ones, strategy: :smallest" do 86 | assert best(@list_with_mostly_ones, strategy: :smallest) == unit_for(:one) 87 | end 88 | 89 | test "when list is mostly ones, strategy: :largest" do 90 | assert best(@list_with_mostly_ones, strategy: :largest) == unit_for(:thousand) 91 | end 92 | 93 | @list_with_thousands_and_millions_tied_for_most [ 94 | 0.0001, 95 | 1, 96 | 1_000, 97 | 100_000, 98 | 1_000_000, 99 | 10_000_000, 100 | 1_000_000_000 101 | ] 102 | 103 | test "when list has thousands and millions tied for most, billions highest" do 104 | assert best(@list_with_thousands_and_millions_tied_for_most) == unit_for(:million) 105 | end 106 | 107 | test "when list has thousands and millions tied for most, billions highest, strategy: :smallest" do 108 | best_unit = best(@list_with_thousands_and_millions_tied_for_most, strategy: :smallest) 109 | assert best_unit == unit_for(:one) 110 | end 111 | 112 | test "when list has thousands and millions tied for most, billions highest, strategy: :largest" do 113 | best_unit = best(@list_with_thousands_and_millions_tied_for_most, strategy: :largest) 114 | assert best_unit == unit_for(:billion) 115 | end 116 | 117 | @list_with_mostly_thousands [1_000, 2_000, 30_000, 999] 118 | 119 | test "when list is mostly thousands" do 120 | assert best(@list_with_mostly_thousands) == unit_for(:thousand) 121 | end 122 | 123 | test "when list is mostly thousands, strategy: :smallest" do 124 | assert best(@list_with_mostly_thousands, strategy: :smallest) == unit_for(:one) 125 | end 126 | 127 | test "when list is mostly thousands, strategy: :largest" do 128 | assert best(@list_with_mostly_thousands, strategy: :largest) == unit_for(:thousand) 129 | end 130 | 131 | test "when list is mostly thousands, strategy: :none" do 132 | assert best(@list_with_mostly_thousands, strategy: :none) == unit_for(:one) 133 | end 134 | end 135 | end 136 | -------------------------------------------------------------------------------- /test/benchee/conversion/deviation_percent_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.DeviationPercentTest do 2 | use ExUnit.Case, async: true 3 | doctest Benchee.Conversion.DeviationPercent, import: true 4 | end 5 | -------------------------------------------------------------------------------- /test/benchee/conversion/duration_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.DurationTest do 2 | use ExUnit.Case, async: true 3 | import Benchee.Conversion.Duration 4 | doctest Benchee.Conversion.Duration, import: true 5 | 6 | describe ".format" do 7 | test ".format(98.7654321)" do 8 | assert format(98.7654321) == "98.77 ns" 9 | end 10 | 11 | test ".format(987.654321)" do 12 | assert format(987.654321) == "987.65 ns" 13 | end 14 | 15 | test ".format(9_876.54321)" do 16 | assert format(9_876.54321) == "9.88 μs" 17 | end 18 | 19 | test ".format(98_765.4321)" do 20 | assert format(98_765.4321) == "98.77 μs" 21 | end 22 | 23 | test ".format(987_654.321)" do 24 | assert format(987_654.321) == "987.65 μs" 25 | end 26 | 27 | test ".format(9_876_543_210)" do 28 | assert format(9_876_543_210) == "9.88 s" 29 | end 30 | 31 | test ".format(98_765_432_190)" do 32 | assert format(98_765_432_190) == "1.65 min" 33 | end 34 | 35 | test ".format(987_654_321_987.6)" do 36 | assert format(987_654_321_987.6) == "16.46 min" 37 | end 38 | 39 | test ".format(9_876_543_219_876.5)" do 40 | assert format(9_876_543_219_876.5) == "2.74 h" 41 | end 42 | 43 | test ".format(523.0)" do 44 | assert format(523.0) == "523 ns" 45 | end 46 | 47 | test ".format(0)" do 48 | assert format(0) == "0 ns" 49 | end 50 | end 51 | 52 | describe ".format_human" do 53 | test ".format_human(0)" do 54 | assert format_human(0) == "0 ns" 55 | end 56 | 57 | test ".format_human(0.00)" do 58 | assert format_human(0.00) == "0 ns" 59 | end 60 | 61 | test ".format_human(98.7654321)" do 62 | assert format_human(98.7654321) == "98.77 ns" 63 | end 64 | 65 | test ".format_human(523.0)" do 66 | assert format_human(523.0) == "523 ns" 67 | end 68 | 69 | test ".format_human(987.654321)" do 70 | assert format_human(987.654321) == "987.65 ns" 71 | end 72 | 73 | test ".format_human(9_008)" do 74 | assert format_human(9_008) == "9 μs 8 ns" 75 | end 76 | 77 | # particularly nasty bug 78 | test ".format_human()" do 79 | assert format_human(2_000_000_000.0) == "2 s" 80 | end 81 | 82 | test ".format_human(9_876.54321)" do 83 | assert format_human(9_876.54321) == "9 μs 876.54 ns" 84 | end 85 | 86 | test ".format_human(98_765.4321)" do 87 | assert format_human(98_765.4321) == "98 μs 765.43 ns" 88 | end 89 | 90 | test ".format_human(987_654.321)" do 91 | assert format_human(987_654.321) == "987 μs 654.32 ns" 92 | end 93 | 94 | test ".format_human(9_008_000_000)" do 95 | assert format_human(9_008_000_000) == "9 s 8 ms" 96 | end 97 | 98 | test ".format_human(9_876_543_210)" do 99 | assert format_human(9_876_543_210) == "9 s 876 ms 543 μs 210 ns" 100 | end 101 | 102 | test ".format_human(90_000_000_000)" do 103 | assert format_human(90_000_000_000) == "1 min 30 s" 104 | end 105 | 106 | test ".format_human(98_765_432_190)" do 107 | assert format_human(98_765_432_190) == "1 min 38 s 765 ms 432 μs 190 ns" 108 | end 109 | 110 | test ".format_human(987_654_321_987.6)" do 111 | assert format_human(987_654_321_987.6) == "16 min 27 s 654 ms 321 μs 987.60 ns" 112 | end 113 | 114 | test ".format_human(3_900_000_000_000)" do 115 | assert format_human(3_900_000_000_000) == "1 h 5 min" 116 | end 117 | 118 | test ".format_human(9_876_543_219_876.5)" do 119 | assert format_human(9_876_543_219_876.5) == "2 h 44 min 36 s 543 ms 219 μs 876.50 ns" 120 | end 121 | end 122 | 123 | @list_with_mostly_microseconds [1, 200, 3_000, 4_000, 500_000, 6_000_000, 7_777_000_000_000] 124 | 125 | describe ".best" do 126 | test "when list is mostly microseconds" do 127 | assert best(@list_with_mostly_microseconds) == unit_for(:microsecond) 128 | end 129 | 130 | test "when list is mostly microseconds, strategy: :smallest" do 131 | assert best(@list_with_mostly_microseconds, strategy: :smallest) == unit_for(:nanosecond) 132 | end 133 | 134 | test "when list is mostly microseconds, strategy: :largest" do 135 | assert best(@list_with_mostly_microseconds, strategy: :largest) == unit_for(:hour) 136 | end 137 | 138 | test "when list is mostly microseconds, strategy: :none" do 139 | assert best(@list_with_mostly_microseconds, strategy: :none) == unit_for(:nanosecond) 140 | end 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /test/benchee/conversion/format_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.FormatTest do 2 | use ExUnit.Case, async: true 3 | doctest Benchee.Conversion.Format, import: true 4 | end 5 | -------------------------------------------------------------------------------- /test/benchee/conversion/memory_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.MemoryTest do 2 | use ExUnit.Case, async: true 3 | import Benchee.Conversion.Memory 4 | doctest Benchee.Conversion.Memory, import: true 5 | 6 | describe ".format" do 7 | test ".format(1_023)" do 8 | assert format(1_023) == "1023 B" 9 | end 10 | 11 | test ".format(1025)" do 12 | assert format(1_025) == "1.00 KB" 13 | end 14 | 15 | test ".format(876_543_219.8765)" do 16 | assert format(876_543_219.8765) == "835.94 MB" 17 | end 18 | 19 | test ".format(9_876_543_219.8765)" do 20 | assert format(9_876_543_219.8765) == "9.20 GB" 21 | end 22 | 23 | test ".format(14_569_876_543_219.8765)" do 24 | assert format(14_569_876_543_219.8765) == "13.25 TB" 25 | end 26 | 27 | test ".format(523.0)" do 28 | assert format(523.0) == "523 B" 29 | end 30 | 31 | test ".format(0)" do 32 | assert format(0) == "0 B" 33 | end 34 | end 35 | 36 | @list_with_mostly_megabytes [ 37 | 1, 38 | 200, 39 | 3_000_000, 40 | 4_000_000, 41 | 50_000_000, 42 | 50_000_000, 43 | 77_000_000_000 44 | ] 45 | 46 | describe ".best" do 47 | test "when list is mostly megabytes" do 48 | assert best(@list_with_mostly_megabytes) == unit_for(:megabyte) 49 | end 50 | 51 | test "when list is mostly megabytes, strategy: :smallest" do 52 | assert best(@list_with_mostly_megabytes, strategy: :smallest) == unit_for(:byte) 53 | end 54 | 55 | test "when list is mostly megabytes, strategy: :largest" do 56 | assert best(@list_with_mostly_megabytes, strategy: :largest) == unit_for(:gigabyte) 57 | end 58 | 59 | test "when list is mostly megabytes, strategy: :none" do 60 | assert best(@list_with_mostly_megabytes, strategy: :none) == unit_for(:byte) 61 | end 62 | end 63 | 64 | @kilobyte_unit unit_for(:kilobyte) 65 | @megabyte_unit unit_for(:megabyte) 66 | @terabyte_unit unit_for(:terabyte) 67 | 68 | describe ".convert" do 69 | test "convert kb to kb returns same value" do 70 | actual_value = convert({8, @kilobyte_unit}, @kilobyte_unit) 71 | assert actual_value == {8, @kilobyte_unit} 72 | end 73 | 74 | test "convert kb to mb returns correct result" do 75 | actual_value = convert({8, @kilobyte_unit}, @megabyte_unit) 76 | expected_value = 8 / 1024 77 | assert actual_value == {expected_value, @megabyte_unit} 78 | end 79 | 80 | test "convert mb to kb returns correct result" do 81 | actual_value = convert({8, @megabyte_unit}, @kilobyte_unit) 82 | expected_value = 8 * 1024 83 | assert actual_value == {expected_value, @kilobyte_unit} 84 | end 85 | 86 | test "convert kb to tb returns correct result" do 87 | actual_value = convert({800_000_000, @kilobyte_unit}, @terabyte_unit) 88 | expected_value = 0.7450580596923828 89 | assert actual_value == {expected_value, @terabyte_unit} 90 | end 91 | 92 | test "convert mb to kb returns correct result for atom" do 93 | actual_value = convert({8, :megabyte}, :kilobyte) 94 | expected_value = 8 * 1024 95 | assert actual_value == {expected_value, @kilobyte_unit} 96 | end 97 | end 98 | end 99 | -------------------------------------------------------------------------------- /test/benchee/conversion/scale_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Conversion.ScaleTest do 2 | use ExUnit.Case, async: true 3 | doctest Benchee.Conversion.Scale, import: true 4 | end 5 | -------------------------------------------------------------------------------- /test/benchee/conversion_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.ConversionTest do 2 | use ExUnit.Case, async: true 3 | doctest Benchee.Conversion, import: true 4 | end 5 | -------------------------------------------------------------------------------- /test/benchee/formatter_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.FormatterTest do 2 | use ExUnit.Case, async: true 3 | alias Benchee.{Configuration, Formatter, Suite, Test.FakeFormatter} 4 | 5 | @no_print_assigns %{test: %{progress_printer: Benchee.Test.FakeProgressPrinter}} 6 | 7 | describe "output/1" do 8 | test "calls `write/1` with the output of `format/1` on each module" do 9 | Formatter.output(%Suite{ 10 | configuration: %Configuration{ 11 | formatters: [{FakeFormatter, %{}}], 12 | assigns: @no_print_assigns 13 | } 14 | }) 15 | 16 | assert_receive {:write, "output of `format/1` with %{}", %{}} 17 | end 18 | 19 | test "works with just modules without option tuple, defaults to empty map" do 20 | Formatter.output(%Suite{ 21 | configuration: %Configuration{formatters: [FakeFormatter], assigns: @no_print_assigns} 22 | }) 23 | 24 | assert_receive {:write, "output of `format/1` with %{}", %{}} 25 | end 26 | 27 | test "options are passed on correctly" do 28 | Formatter.output(%Suite{ 29 | configuration: %Configuration{ 30 | formatters: [{FakeFormatter, %{a: :b}}], 31 | assigns: @no_print_assigns 32 | } 33 | }) 34 | 35 | assert_receive {:write, "output of `format/1` with %{a: :b}", %{a: :b}} 36 | end 37 | 38 | test "keyword list options are deep converted to maps" do 39 | Formatter.output(%Suite{ 40 | configuration: %Configuration{ 41 | formatters: [{FakeFormatter, [a: [b: :c]]}], 42 | assigns: @no_print_assigns 43 | } 44 | }) 45 | 46 | assert_receive {:write, "output of `format/1` with %{a: %{b: :c}}", %{a: %{b: :c}}} 47 | end 48 | 49 | test "mixing functions and formatters works" do 50 | suite = %Suite{ 51 | configuration: %Configuration{ 52 | formatters: [ 53 | {FakeFormatter, %{}}, 54 | fn suite -> send(self(), {:fun, suite, "me"}) end 55 | ], 56 | assigns: @no_print_assigns 57 | } 58 | } 59 | 60 | Formatter.output(suite) 61 | 62 | assert_receive {:write, "output of `format/1` with %{}", %{}} 63 | assert_receive {:fun, ^suite, "me"} 64 | end 65 | 66 | test "mixing functions and formatters works, even with multiple ones" do 67 | suite = %Suite{ 68 | configuration: %Configuration{ 69 | formatters: [ 70 | {FakeFormatter, %{}}, 71 | fn suite -> send(self(), {:fun, suite, "me"}) end, 72 | fn _suite -> send(self(), :more_fun) end, 73 | {FakeFormatter, %{other: :one}} 74 | ], 75 | assigns: @no_print_assigns 76 | } 77 | } 78 | 79 | Formatter.output(suite) 80 | 81 | assert_receive {:write, "output of `format/1` with %{}", %{}} 82 | assert_receive {:fun, ^suite, "me"} 83 | assert_receive :more_fun 84 | assert_receive {:write, "output of `format/1` with %{other: :one}", %{}} 85 | end 86 | 87 | test "no formatters, no problem" do 88 | suite = %Suite{ 89 | configuration: %Configuration{ 90 | formatters: [], 91 | assigns: @no_print_assigns 92 | } 93 | } 94 | 95 | assert suite == Formatter.output(suite) 96 | end 97 | 98 | test "returns the suite passed in as the first argument unchanged" do 99 | suite = %Suite{ 100 | configuration: %Configuration{ 101 | formatters: [{FakeFormatter, %{}}], 102 | assigns: @no_print_assigns 103 | } 104 | } 105 | 106 | assert Formatter.output(suite) == suite 107 | end 108 | 109 | test "lets you know it starts formatting now" do 110 | Formatter.output(%Suite{ 111 | configuration: %Configuration{formatters: [], assigns: @no_print_assigns} 112 | }) 113 | 114 | assert_received :formatting 115 | end 116 | end 117 | end 118 | -------------------------------------------------------------------------------- /test/benchee/formatters/tagged_save_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Formatters.TaggedSaveTest do 2 | use ExUnit.Case 3 | 4 | alias Benchee.{ 5 | Formatter, 6 | Formatters.TaggedSave, 7 | Scenario, 8 | Statistics, 9 | Suite 10 | } 11 | 12 | import Benchee.Formatters.TaggedSave 13 | import Benchee.Benchmark, only: [no_input: 0] 14 | import ExUnit.CaptureIO 15 | import Benchee.TestHelpers, only: [suite_without_scenario_tags: 1] 16 | 17 | @filename "test/tmp/some_file.etf" 18 | @benchee_tag "benchee-tag" 19 | @suite %Suite{ 20 | scenarios: [ 21 | %Scenario{ 22 | name: "Second", 23 | job_name: "Second", 24 | input_name: no_input(), 25 | input: no_input(), 26 | run_time_data: %{ 27 | statistics: %Statistics{ 28 | average: 200.0, 29 | ips: 5_000.0, 30 | std_dev_ratio: 0.1, 31 | median: 195.5, 32 | percentiles: %{99 => 400.1} 33 | } 34 | } 35 | }, 36 | %Scenario{ 37 | name: "First", 38 | job_name: "First", 39 | input_name: no_input(), 40 | input: no_input(), 41 | run_time_data: %{ 42 | statistics: %Statistics{ 43 | average: 100.0, 44 | ips: 10_000.0, 45 | std_dev_ratio: 0.1, 46 | median: 90.0, 47 | percentiles: %{99 => 300.1} 48 | } 49 | } 50 | } 51 | ] 52 | } 53 | 54 | @options %{ 55 | path: @filename, 56 | tag: @benchee_tag 57 | } 58 | 59 | describe ".format/2" do 60 | test "able to restore the original just fine" do 61 | {binary, path} = format(@suite, @options) 62 | 63 | loaded_suite = 64 | binary 65 | |> :erlang.binary_to_term() 66 | |> suite_without_scenario_tags() 67 | 68 | assert loaded_suite == @suite 69 | assert path == @filename 70 | end 71 | 72 | test "tags the scenarios and adds it to the name" do 73 | {binary, _path} = format(@suite, @options) 74 | 75 | loaded_suite = :erlang.binary_to_term(binary) 76 | 77 | Enum.each(loaded_suite.scenarios, fn scenario -> 78 | assert scenario.tag == @benchee_tag 79 | assert scenario.name =~ ~r/#{@benchee_tag}/ 80 | end) 81 | end 82 | 83 | test "doesn't tag scenarios that already have a tag" do 84 | tagged_scenario = %Scenario{tag: "some-tag"} 85 | suite = %Suite{@suite | scenarios: [tagged_scenario | @suite.scenarios]} 86 | 87 | tags = 88 | suite 89 | |> scenarios_from_formatted 90 | |> sorted_tags 91 | 92 | assert tags == [@benchee_tag, "some-tag"] 93 | end 94 | 95 | test "when duplicating tags for the same job the second gets -2" do 96 | tagged_scenario = %Scenario{job_name: "foo", tag: @benchee_tag} 97 | scenario = %Scenario{job_name: "foo"} 98 | suite = %Suite{@suite | scenarios: [scenario, tagged_scenario]} 99 | 100 | scenarios = scenarios_from_formatted(suite) 101 | tags = sorted_tags(scenarios) 102 | names = sorted_names(scenarios) 103 | 104 | assert tags == [@benchee_tag, @benchee_tag <> "-2"] 105 | assert names == ["foo (#{@benchee_tag})", "foo (#{@benchee_tag}-2)"] 106 | end 107 | 108 | test "when there's already a -2 and -3 tag we end up with -4" do 109 | scenario_1 = %Scenario{job_name: "foo", tag: @benchee_tag} 110 | scenario_2 = %Scenario{job_name: "foo", tag: "#{@benchee_tag}-2"} 111 | scenario_3 = %Scenario{job_name: "foo", tag: "#{@benchee_tag}-3"} 112 | new_scenario = %Scenario{job_name: "foo"} 113 | 114 | suite = %Suite{@suite | scenarios: [scenario_1, new_scenario, scenario_2, scenario_3]} 115 | 116 | scenarios = scenarios_from_formatted(suite) 117 | tags = sorted_tags(scenarios) 118 | names = sorted_names(scenarios) 119 | 120 | assert tags == 121 | [@benchee_tag, @benchee_tag <> "-2", @benchee_tag <> "-3", @benchee_tag <> "-4"] 122 | 123 | assert names == [ 124 | "foo (#{@benchee_tag})", 125 | "foo (#{@benchee_tag}-2)", 126 | "foo (#{@benchee_tag}-3)", 127 | "foo (#{@benchee_tag}-4)" 128 | ] 129 | end 130 | 131 | defp scenarios_from_formatted(suite) do 132 | {binary, _path} = format(suite, @options) 133 | loaded_suite = :erlang.binary_to_term(binary) 134 | loaded_suite.scenarios 135 | end 136 | 137 | defp sorted_tags(scenarios) do 138 | scenarios 139 | |> Enum.map(fn scenario -> scenario.tag end) 140 | |> Enum.uniq() 141 | |> Enum.sort() 142 | end 143 | 144 | defp sorted_names(scenarios) do 145 | scenarios 146 | |> Enum.map(fn scenario -> scenario.name end) 147 | |> Enum.uniq() 148 | |> Enum.sort() 149 | end 150 | end 151 | 152 | describe "Integreating with Formatter.output/3" do 153 | test "able to restore fully from file" do 154 | capture_io(fn -> Formatter.output(@suite, TaggedSave, @options) end) 155 | 156 | etf_data = File.read!(@filename) 157 | 158 | loaded_suite = 159 | etf_data 160 | |> :erlang.binary_to_term() 161 | |> suite_without_scenario_tags() 162 | 163 | assert loaded_suite == @suite 164 | after 165 | if File.exists?(@filename), do: File.rm!(@filename) 166 | end 167 | end 168 | end 169 | -------------------------------------------------------------------------------- /test/benchee/output/benchmark_printer_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Output.BenchmarkPrintertest do 2 | use ExUnit.Case, async: true 3 | 4 | alias Benchee.Benchmark 5 | alias Benchee.Benchmark.BenchmarkConfig 6 | alias Benchee.Configuration 7 | alias Benchee.Scenario 8 | alias Benchee.System 9 | 10 | import ExUnit.CaptureIO 11 | import Benchee.Output.BenchmarkPrinter 12 | 13 | @system_info %System{ 14 | elixir: "1.15.7", 15 | erlang: "26.1", 16 | jit_enabled?: true, 17 | os: :macOS, 18 | num_cores: 4, 19 | cpu_speed: "Intel(R) Core(TM) i5-4260U CPU @ 1.40GHz", 20 | available_memory: 8_568_392_814 21 | } 22 | 23 | test ".duplicate_benchmark_warning" do 24 | output = 25 | capture_io(fn -> 26 | duplicate_benchmark_warning("Something") 27 | end) 28 | 29 | assert output =~ "same name" 30 | assert output =~ "Something" 31 | end 32 | 33 | describe ".configuration_information" do 34 | test "sys information" do 35 | output = 36 | capture_io(fn -> 37 | %{ 38 | configuration: %Configuration{parallel: 2, time: 10_000, warmup: 0, inputs: nil}, 39 | scenarios: [%Scenario{job_name: "one"}, %Scenario{job_name: "two"}], 40 | system: @system_info 41 | } 42 | |> configuration_information 43 | end) 44 | 45 | assert output =~ "Erlang 26.1" 46 | assert output =~ "Elixir 1.15.7" 47 | assert output =~ ~r/jit.*.true/i 48 | assert output =~ "Intel" 49 | assert output =~ "Cores: 4" 50 | assert output =~ "macOS" 51 | assert output =~ "8568392814" 52 | assert output =~ ~r/following configuration/i 53 | assert output =~ "warmup: 0 ns" 54 | assert output =~ "time: 10 μs" 55 | assert output =~ "memory time: 0 ns" 56 | assert output =~ "parallel: 2" 57 | assert output =~ "Estimated total run time: 20 μs" 58 | end 59 | 60 | test "it scales times appropriately" do 61 | output = 62 | capture_io(fn -> 63 | %{ 64 | configuration: %Configuration{ 65 | parallel: 1, 66 | time: 60_000_000_000, 67 | warmup: 10_000_000_000, 68 | memory_time: 5_000_000_000, 69 | inputs: nil 70 | }, 71 | scenarios: [%Scenario{job_name: "one"}, %Scenario{job_name: "two"}], 72 | system: @system_info 73 | } 74 | |> configuration_information 75 | end) 76 | 77 | assert output =~ "warmup: 10 s" 78 | assert output =~ "time: 1 min" 79 | assert output =~ "memory time: 5 s" 80 | assert output =~ "parallel: 1" 81 | assert output =~ "Estimated total run time: 2 min 30 s" 82 | end 83 | 84 | @inputs %{"Arg 1" => 1, "Arg 2" => 2} 85 | test "multiple inputs" do 86 | output = 87 | capture_io(fn -> 88 | %{ 89 | configuration: %{ 90 | parallel: 2, 91 | time: 10_000, 92 | warmup: 0, 93 | memory_time: 1_000, 94 | reduction_time: 0, 95 | inputs: @inputs 96 | }, 97 | scenarios: [ 98 | %Scenario{job_name: "one", input_name: "Arg 1", input: 1}, 99 | %Scenario{job_name: "one", input_name: "Arg 2", input: 2}, 100 | %Scenario{job_name: "two", input_name: "Arg 1", input: 1}, 101 | %Scenario{job_name: "two", input_name: "Arg 2", input: 2} 102 | ], 103 | system: @system_info 104 | } 105 | |> configuration_information 106 | end) 107 | 108 | assert output =~ "time: 10 μs" 109 | assert output =~ "memory time: 1 μs" 110 | assert output =~ "reduction time: 0 ns" 111 | assert output =~ "parallel: 2" 112 | assert output =~ "inputs: Arg 1, Arg 2" 113 | assert output =~ "Estimated total run time: 44 μs" 114 | end 115 | 116 | test "does not print if disabled" do 117 | output = 118 | capture_io(fn -> 119 | %{configuration: %{print: %{configuration: false}}} 120 | |> configuration_information 121 | end) 122 | 123 | assert output == "" 124 | end 125 | end 126 | 127 | describe ".benchmarking" do 128 | @no_input Benchmark.no_input() 129 | test "prints information that it's currently benchmarking without input" do 130 | output = 131 | capture_io(fn -> 132 | benchmarking("Something", @no_input, %BenchmarkConfig{}) 133 | end) 134 | 135 | assert output =~ ~r/Benchmarking.+Something/i 136 | end 137 | 138 | test "prints information that it's currently benchmarking with input" do 139 | output = 140 | capture_io(fn -> 141 | benchmarking("Something", "great input", %BenchmarkConfig{}) 142 | end) 143 | 144 | assert output =~ ~r/Benchmarking.+Something with input great input/i 145 | end 146 | 147 | test "doesn't print if it's deactivated" do 148 | output = 149 | capture_io(fn -> 150 | benchmarking("A", "some", %BenchmarkConfig{print: %{benchmarking: false}}) 151 | end) 152 | 153 | assert output == "" 154 | end 155 | 156 | test "doesn't print if all times are set to 0.0" do 157 | output = 158 | capture_io(fn -> 159 | benchmarking("Never", "don't care", %BenchmarkConfig{ 160 | time: 0.0, 161 | warmup: 0.0, 162 | memory_time: 0.0, 163 | reduction_time: 0.0 164 | }) 165 | end) 166 | 167 | assert output == "" 168 | end 169 | 170 | test "doesn't print if all times are set to 0" do 171 | output = 172 | capture_io(fn -> 173 | benchmarking("Never", "don't care", %BenchmarkConfig{ 174 | time: 0, 175 | warmup: 0, 176 | memory_time: 0, 177 | reduction_time: 0 178 | }) 179 | end) 180 | 181 | assert output == "" 182 | end 183 | end 184 | 185 | test ".fast_warning warns with reference to more information" do 186 | output = 187 | capture_io(fn -> 188 | fast_warning() 189 | end) 190 | 191 | assert output =~ ~r/fast/i 192 | assert output =~ ~r/unreliable/i 193 | assert output =~ "benchee/wiki" 194 | end 195 | end 196 | -------------------------------------------------------------------------------- /test/benchee/scenario_loader_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.ScenarioLoaderTest do 2 | use ExUnit.Case 3 | import Benchee.ScenarioLoader 4 | alias Benchee.{Configuration, Scenario, Suite} 5 | 6 | test "`load` indeed loads scenarios into the suite" do 7 | scenarios = [%Scenario{tag: "old"}] 8 | suite = %Suite{scenarios: scenarios} 9 | 10 | File.write!("save.benchee", :erlang.term_to_binary(suite)) 11 | configuration = %Configuration{load: "save.benchee"} 12 | new_suite = load(%Suite{configuration: configuration}) 13 | 14 | assert new_suite.scenarios == scenarios 15 | after 16 | if File.exists?("save.benchee"), do: File.rm!("save.benchee") 17 | end 18 | 19 | test "`load` with a list of files" do 20 | scenarios1 = [%Scenario{tag: "one"}] 21 | scenarios2 = [%Scenario{tag: "two"}] 22 | suite1 = %Suite{scenarios: scenarios1} 23 | suite2 = %Suite{scenarios: scenarios2} 24 | 25 | File.write!("save1.benchee", :erlang.term_to_binary(suite1)) 26 | File.write!("save2.benchee", :erlang.term_to_binary(suite2)) 27 | 28 | configuration = %Configuration{load: ["save1.benchee", "save2.benchee"]} 29 | new_suite = load(%Suite{configuration: configuration}) 30 | 31 | assert new_suite.scenarios == scenarios1 ++ scenarios2 32 | after 33 | if File.exists?("save1.benchee"), do: File.rm!("save1.benchee") 34 | if File.exists?("save2.benchee"), do: File.rm!("save2.benchee") 35 | end 36 | 37 | test "`load` with a glob" do 38 | scenarios1 = [%Scenario{tag: "one"}] 39 | scenarios2 = [%Scenario{tag: "two"}] 40 | suite1 = %Suite{scenarios: scenarios1} 41 | suite2 = %Suite{scenarios: scenarios2} 42 | 43 | File.write!("save1.benchee", :erlang.term_to_binary(suite1)) 44 | File.write!("save2.benchee", :erlang.term_to_binary(suite2)) 45 | 46 | configuration = %Configuration{load: "save*.benchee"} 47 | new_suite = load(%Suite{configuration: configuration}) 48 | 49 | new_tags = Enum.map(new_suite.scenarios, fn scenario -> scenario.tag end) 50 | assert Enum.sort(new_tags) == ["one", "two"] 51 | after 52 | if File.exists?("save1.benchee"), do: File.rm!("save1.benchee") 53 | if File.exists?("save2.benchee"), do: File.rm!("save2.benchee") 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /test/benchee/utility/deep_convert_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.DeepConvertTest do 2 | use ExUnit.Case, async: true 3 | doctest Benchee.Utility.DeepConvert, import: true 4 | end 5 | -------------------------------------------------------------------------------- /test/benchee/utility/erlang_version_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.ErlangVersionTest do 2 | use ExUnit.Case, async: true 3 | 4 | doctest Benchee.Utility.ErlangVersion, import: true 5 | end 6 | -------------------------------------------------------------------------------- /test/benchee/utility/file_creation_integration_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.FileCreationIntegrationTest do 2 | use ExUnit.Case 3 | import Benchee.Utility.FileCreation 4 | import ExUnit.CaptureIO 5 | 6 | @directory "testing/files" 7 | @filename "#{@directory}/test.txt" 8 | @file_name_1 "#{@directory}/test_small_input.txt" 9 | @file_name_2 "#{@directory}/test_big_list.txt" 10 | @input_to_contents %{ 11 | "small input" => "abc", 12 | "Big list" => "ABC" 13 | } 14 | 15 | describe ".each" do 16 | test "writes file contents just fine" do 17 | try do 18 | each(@input_to_contents, @filename, fn file, content, _ -> 19 | :ok = IO.write(file, content) 20 | end) 21 | 22 | assert_correct_files() 23 | after 24 | File.rm_rf!(@directory) 25 | File.rm_rf!("testing") 26 | end 27 | end 28 | 29 | test "by default writes files" do 30 | try do 31 | capture_io(fn -> each(@input_to_contents, @filename) end) 32 | assert_correct_files() 33 | after 34 | File.rm_rf!(@directory) 35 | File.rm_rf!("testing") 36 | end 37 | end 38 | 39 | test "by default prints out filenames" do 40 | try do 41 | output = capture_io(fn -> each(@input_to_contents, @filename) end) 42 | 43 | assert output =~ @file_name_1 44 | assert output =~ @file_name_2 45 | after 46 | File.rm_rf!(@directory) 47 | File.rm_rf!("testing") 48 | end 49 | end 50 | 51 | test "with String.length/1 as a name it writes the correct file" do 52 | to_contents = %{ 53 | "String.length/1" => "abc" 54 | } 55 | 56 | capture_io(fn -> each(to_contents, @filename) end) 57 | assert File.exists?("#{@directory}/test_string_length_1.txt") 58 | after 59 | File.rm_rf!(@directory) 60 | File.rm_rf!("testing") 61 | end 62 | 63 | defp assert_correct_files do 64 | assert File.exists?(@file_name_1) 65 | assert File.exists?(@file_name_2) 66 | refute File.exists?("#{@directory}/test") 67 | 68 | assert File.read!(@file_name_1) == "abc" 69 | assert File.read!(@file_name_2) == "ABC" 70 | end 71 | 72 | test "is passed the filenames" do 73 | try do 74 | {:ok, agent} = Agent.start(fn -> [] end) 75 | 76 | each(@input_to_contents, @filename, fn file, content, filename -> 77 | :ok = IO.write(file, content) 78 | Agent.update(agent, fn state -> [filename | state] end) 79 | end) 80 | 81 | file_names = Agent.get(agent, fn state -> state end) 82 | assert Enum.member?(file_names, @file_name_1) 83 | assert Enum.member?(file_names, @file_name_2) 84 | after 85 | File.rm_rf!(@directory) 86 | File.rm_rf!("testing") 87 | end 88 | end 89 | end 90 | end 91 | -------------------------------------------------------------------------------- /test/benchee/utility/file_creation_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.FileCreationTest do 2 | use ExUnit.Case, async: true 3 | doctest Benchee.Utility.FileCreation, import: true 4 | end 5 | -------------------------------------------------------------------------------- /test/benchee/utility/repeat_n_test.exs: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Utility.RepeatNTest do 2 | use ExUnit.Case, async: true 3 | import Benchee.Utility.RepeatN 4 | 5 | test "calls it n times" do 6 | assert_called_n(10) 7 | end 8 | 9 | test "calls it only one time when 1 is specified" do 10 | assert_called_n(1) 11 | end 12 | 13 | test "calls it 0 times when 0 is specified" do 14 | assert_called_n(0) 15 | end 16 | 17 | defp assert_called_n(n) do 18 | repeat_n(fn -> send(self(), :called) end, n) 19 | 20 | assert_called_exactly_times(n) 21 | end 22 | 23 | defp assert_called_exactly_times(n) when n <= 0 do 24 | refute_receive :called 25 | end 26 | 27 | defp assert_called_exactly_times(n) do 28 | Enum.each(Enum.to_list(1..n), fn _ -> assert_receive :called end) 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /test/fixtures/escript/.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where third-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Ignore package tarball (built via "mix hex.build"). 23 | escript-*.tar 24 | 25 | # Temporary files, for example, from tests. 26 | /tmp/ 27 | 28 | /escript -------------------------------------------------------------------------------- /test/fixtures/escript/README.md: -------------------------------------------------------------------------------- 1 | # Escript 2 | 3 | **TODO: Add description** 4 | 5 | ## Installation 6 | 7 | If [available in Hex](https://hex.pm/docs/publish), the package can be installed 8 | by adding `escript` to your list of dependencies in `mix.exs`: 9 | 10 | ```elixir 11 | def deps do 12 | [ 13 | {:escript, "~> 0.1.0"} 14 | ] 15 | end 16 | ``` 17 | 18 | Documentation can be generated with [ExDoc](https://github.com/elixir-lang/ex_doc) 19 | and published on [HexDocs](https://hexdocs.pm). Once published, the docs can 20 | be found at . 21 | 22 | -------------------------------------------------------------------------------- /test/fixtures/escript/lib/escript.ex: -------------------------------------------------------------------------------- 1 | defmodule Escript do 2 | # is just testdummy 3 | @moduledoc false 4 | def main(_args \\ []) do 5 | list = Enum.to_list(1..10_000) 6 | map_fun = fn i -> [i, i * i] end 7 | 8 | Benchee.run( 9 | %{ 10 | "flat_map" => fn -> Enum.flat_map(list, map_fun) end, 11 | "map.flatten" => fn -> list |> Enum.map(map_fun) |> List.flatten() end 12 | }, 13 | time: 0.01, 14 | warmup: 0.005 15 | ) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /test/fixtures/escript/mix.exs: -------------------------------------------------------------------------------- 1 | defmodule Escript.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :escript, 7 | version: "0.1.0", 8 | elixir: "~> 1.6", 9 | start_permanent: Mix.env() == :prod, 10 | deps: deps(), 11 | escript: [main_module: Escript] 12 | ] 13 | end 14 | 15 | # Run "mix help compile.app" to learn about applications. 16 | def application do 17 | [ 18 | extra_applications: [:logger] 19 | ] 20 | end 21 | 22 | # Run "mix help deps" to learn about dependencies. 23 | defp deps do 24 | [ 25 | {:benchee, "~> 1.0", path: "../../.."} 26 | # {:dep_from_hexpm, "~> 0.3.0"}, 27 | # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"} 28 | ] 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /test/fixtures/escript/mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"}, 3 | "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, 4 | } 5 | -------------------------------------------------------------------------------- /test/fixtures/escript/test.sh: -------------------------------------------------------------------------------- 1 | mix deps.get && mix escript.build && ./escript -------------------------------------------------------------------------------- /test/support/bench_keyword.ex: -------------------------------------------------------------------------------- 1 | # Original code by Michal Muskala 2 | # https://gist.github.com/michalmuskala/5ff53581b4b53adec2fff7fb4d69fd52 3 | defmodule BenchKeyword do 4 | @compile :inline_list_funcs 5 | 6 | @moduledoc """ 7 | Together with the benchmark illustrated multiple problems in the memory measurement code. 8 | """ 9 | 10 | def delete_v0(keywords, key) when is_list(keywords) and is_atom(key) do 11 | :lists.filter(fn {k, _} -> k != key end, keywords) 12 | end 13 | 14 | def delete_v1(keywords, key) when is_list(keywords) and is_atom(key) do 15 | do_delete(keywords, key, _deleted? = false) 16 | catch 17 | :not_deleted -> keywords 18 | end 19 | 20 | defp do_delete([{key, _} | rest], key, _deleted?), 21 | do: do_delete(rest, key, true) 22 | 23 | defp do_delete([{_, _} = pair | rest], key, deleted?), 24 | do: [pair | do_delete(rest, key, deleted?)] 25 | 26 | defp do_delete([], _key, _deleted? = true), 27 | do: [] 28 | 29 | defp do_delete([], _key, _deleted? = false), 30 | do: throw(:not_deleted) 31 | 32 | def delete_v2(keywords, key) when is_list(keywords) and is_atom(key) do 33 | delete_v2_key(keywords, key, []) 34 | end 35 | 36 | defp delete_v2_key([{key, _} | tail], key, heads) do 37 | delete_v2_key(tail, key, heads) 38 | end 39 | 40 | defp delete_v2_key([{_, _} = pair | tail], key, heads) do 41 | delete_v2_key(tail, key, [pair | heads]) 42 | end 43 | 44 | defp delete_v2_key([], _key, heads) do 45 | :lists.reverse(heads) 46 | end 47 | 48 | def delete_v3(keywords, key) when is_list(keywords) and is_atom(key) do 49 | case :lists.keymember(key, 1, keywords) do 50 | true -> delete_v3_key(keywords, key, []) 51 | _ -> keywords 52 | end 53 | end 54 | 55 | defp delete_v3_key([{key, _} | tail], key, heads) do 56 | delete_v3_key(tail, key, heads) 57 | end 58 | 59 | defp delete_v3_key([{_, _} = pair | tail], key, heads) do 60 | delete_v3_key(tail, key, [pair | heads]) 61 | end 62 | 63 | defp delete_v3_key([], _key, heads) do 64 | :lists.reverse(heads) 65 | end 66 | end 67 | -------------------------------------------------------------------------------- /test/support/fake_benchmark_printer.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Test.FakeBenchmarkPrinter do 2 | @moduledoc false 3 | 4 | def duplicate_benchmark_warning(name) do 5 | send(self(), {:duplicate, name}) 6 | end 7 | 8 | def configuration_information(_) do 9 | send(self(), :configuration_information) 10 | end 11 | 12 | def benchmarking(name, input_information, _) do 13 | send(self(), {:benchmarking, name, input_information}) 14 | end 15 | 16 | def fast_warning do 17 | send(self(), :fast_warning) 18 | end 19 | 20 | def function_call_overhead(overhead) do 21 | send(self(), {:function_call_overhead, overhead}) 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /test/support/fake_benchmark_runner.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Test.FakeBenchmarkRunner do 2 | @moduledoc false 3 | 4 | def run_scenarios(scenarios, scenario_context) do 5 | send(self(), {:run_scenarios, scenarios, scenario_context}) 6 | 7 | Enum.map(scenarios, fn scenario -> 8 | %{scenario | run_time_data: %{scenario.run_time_data | samples: [1.0]}} 9 | end) 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /test/support/fake_formatter.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Test.FakeFormatter do 2 | @moduledoc false 3 | 4 | @behaviour Benchee.Formatter 5 | 6 | def format(_, options) do 7 | "output of `format/1` with #{inspect(options)}" 8 | end 9 | 10 | def write(output, options) do 11 | send(self(), {:write, output, options}) 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /test/support/fake_profile_printer.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Test.FakeProfilePrinter do 2 | @moduledoc false 3 | 4 | def profiling(name, profiler) do 5 | send(self(), {:profiling, name, profiler}) 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /test/support/fake_progress_printer.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.Test.FakeProgressPrinter do 2 | @moduledoc false 3 | 4 | def calculating_statistics(_) do 5 | send(self(), :calculating_statistics) 6 | end 7 | 8 | def formatting(_) do 9 | send(self(), :formatting) 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /test/support/test_helpers.ex: -------------------------------------------------------------------------------- 1 | defmodule Benchee.TestHelpers do 2 | @moduledoc false 3 | 4 | import ExUnit.Assertions 5 | 6 | @default_retries 10 7 | 8 | # retry tests that are doing actual benchmarking and are flaky 9 | # on overloaded and/or slower systems 10 | def retrying(asserting_function, n \\ @default_retries) 11 | 12 | def retrying(asserting_function, 1) do 13 | asserting_function.() 14 | end 15 | 16 | def retrying(asserting_function, n) do 17 | asserting_function.() 18 | rescue 19 | # The profile tests have been too flakey due to process not being started 20 | some_error -> 21 | # credo:disable-for-next-line Credo.Check.Warning.IoInspect 22 | IO.inspect(some_error, label: "Error being retried:") 23 | retrying(asserting_function, n - 1) 24 | end 25 | 26 | def assert_received_exactly(expected) do 27 | Enum.each(expected, fn message -> assert_received ^message end) 28 | 29 | expected 30 | |> Enum.uniq() 31 | |> Enum.each(fn message -> refute_received(^message) end) 32 | end 33 | 34 | def suite_without_scenario_tags(suite) do 35 | scenarios = 36 | Enum.map(suite.scenarios, fn scenario -> 37 | %Benchee.Scenario{scenario | tag: nil, name: scenario.job_name} 38 | end) 39 | 40 | %Benchee.Suite{suite | scenarios: scenarios} 41 | end 42 | 43 | @doc """ 44 | Get a `:timer.sleep/1` time, that does not run into danger of triggering repeated measurements. 45 | 46 | `:timer.sleep/1` measures time in milliseconds. If the resolution of the native clock is in 47 | nanoseconds then 1 is fine (1_000_000 nanoseconds), whereas if the resolution is milliseconds or 48 | less it becomes hard to hit the limit we've setup in `Benchee.Benchmark.RepeatedMeasurement`, 49 | which is to hit at least 10 time units of measurement. 50 | 51 | We can do this at compile time, as the system clock should not change. 52 | 53 | Specifically this used on Windows CI, which for whatever reason that I do not understand 54 | seems to have a resolution of 100 which is... 10 milliseconds. Which is... way too little. 55 | """ 56 | @clock_resolution Access.get(:erlang.system_info(:os_monotonic_time_source), :resolution) 57 | @milliseconds Benchee.Conversion.Duration.convert_value({1, :second}, :millisecond) 58 | @minimum_measured_time 10 59 | @min_sleep_time 1 60 | @safe_test_sleep_time_float max( 61 | @minimum_measured_time / (@clock_resolution / @milliseconds), 62 | @min_sleep_time 63 | ) 64 | @safe_test_sleep_time trunc(@safe_test_sleep_time_float) 65 | def sleep_safe_time do 66 | :timer.sleep(@safe_test_sleep_time) 67 | end 68 | 69 | def safe_sleep_time do 70 | @safe_test_sleep_time 71 | end 72 | end 73 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | # OTP 18 doesn't support the memory measurement things we need 2 | otp_release = List.to_integer(:erlang.system_info(:otp_release)) 3 | exclusions = if otp_release > 18, do: [], else: [memory_measure: true] 4 | 5 | # On Windows we have by far worse time measurements (millisecond level) 6 | # see: https://github.com/bencheeorg/benchee/pull/195#issuecomment-377010006 7 | {_, os} = :os.type() 8 | 9 | # mac and windows just aren't as fast 10 | exclusions = 11 | case os do 12 | :nt -> 13 | [{:performance, true} | exclusions] 14 | 15 | :darwin -> 16 | [{:performance, true} | exclusions] 17 | 18 | _ -> 19 | exclusions 20 | end 21 | 22 | # Somehow at some point the resolution on the Windows CI got very bad to 100 (which is 10ms) 23 | # and so we gotta get some level in here not to work around them too much. 24 | clock_resolution = Access.get(:erlang.system_info(:os_monotonic_time_source), :resolution) 25 | 26 | minimum_millisecond_resolution = 1000 27 | 28 | exclusions = 29 | if clock_resolution < minimum_millisecond_resolution do 30 | [{:minimum_millisecond_resolution_clock, true} | exclusions] 31 | else 32 | exclusions 33 | end 34 | 35 | # to trigger fast function repetition we'd need to have a clock with a at most a resolution of 36 | # ~100ns 37 | ns_100_resolution = 10_000_000 38 | 39 | exclusions = 40 | if clock_resolution > ns_100_resolution do 41 | [{:needs_fast_function_repetition, true} | exclusions] 42 | else 43 | exclusions 44 | end 45 | 46 | # to trigger fast function repetition we'd need to have a clock with a at most a resolution of 47 | # ~100ns 48 | ns_resolution = 1_000_000_000 49 | 50 | exclusions = 51 | if clock_resolution < ns_resolution do 52 | [{:nanosecond_resolution_clock, true} | exclusions] 53 | else 54 | exclusions 55 | end 56 | 57 | # somehow on CI macos doesn't have the JIT enabled installed via brew: https://github.com/bencheeorg/benchee/pull/426 58 | exclusions = 59 | case os do 60 | :darwin -> 61 | [{:guaranteed_jit, true} | exclusions] 62 | 63 | _ -> 64 | exclusions 65 | end 66 | 67 | ExUnit.start(exclude: exclusions) 68 | -------------------------------------------------------------------------------- /tools/plts/.gitignore: -------------------------------------------------------------------------------- 1 | # Dialyzer needs the directory to exist beforehand --------------------------------------------------------------------------------