├── .github ├── ISSUE_TEMPLATE.md ├── dependabot.yml └── workflows │ ├── main.yml │ ├── mypy.yml │ └── publish.yml ├── .gitignore ├── .readthedocs.yaml ├── AUTHORS ├── BENCHMARKS_FORMAT.md ├── COPYING ├── MANIFEST.in ├── README.rst ├── TODO.rst ├── azure-pipelines.yml ├── benchmarks ├── dev.py ├── doc ├── Makefile ├── benchmark.conf ├── benchmark.conf.sample ├── benchmarks.rst ├── changelog.rst ├── conf.py ├── cpython_results_2017.rst ├── custom_benchmarks.rst ├── html5lib.json.gz ├── images │ ├── bm_chaos.png │ ├── bm_raytrace.jpg │ ├── html5lib.png │ └── sympy_sum.png ├── index.rst ├── make.bat ├── sympy_sum.json.gz └── usage.rst ├── pyperformance ├── __init__.py ├── __main__.py ├── _benchmark.py ├── _benchmark_metadata.py ├── _benchmark_selections.py ├── _manifest.py ├── _pip.py ├── _pyproject_toml.py ├── _python.py ├── _pythoninfo.py ├── _utils.py ├── _venv.py ├── cli.py ├── commands.py ├── compare.py ├── compile.py ├── data-files │ └── benchmarks │ │ ├── MANIFEST │ │ ├── bm_2to3 │ │ ├── data │ │ │ └── 2to3 │ │ │ │ ├── README.txt │ │ │ │ ├── __init__.py.txt │ │ │ │ ├── context_processors.py.txt │ │ │ │ ├── exceptions.py.txt │ │ │ │ ├── mail.py.txt │ │ │ │ ├── paginator.py.txt │ │ │ │ ├── signals.py.txt │ │ │ │ ├── template_loader.py.txt │ │ │ │ ├── urlresolvers.py.txt │ │ │ │ └── xheaders.py.txt │ │ ├── pyproject.toml │ │ ├── run_benchmark.py │ │ └── vendor │ │ │ ├── LICENSE.txt │ │ │ ├── pyproject.toml │ │ │ └── src │ │ │ └── lib2to3 │ │ │ ├── Grammar.txt │ │ │ ├── PatternGrammar.txt │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ ├── btm_matcher.py │ │ │ ├── btm_utils.py │ │ │ ├── fixer_base.py │ │ │ ├── fixer_util.py │ │ │ ├── fixes │ │ │ ├── __init__.py │ │ │ ├── fix_apply.py │ │ │ ├── fix_asserts.py │ │ │ ├── fix_basestring.py │ │ │ ├── fix_buffer.py │ │ │ ├── fix_dict.py │ │ │ ├── fix_except.py │ │ │ ├── fix_exec.py │ │ │ ├── fix_execfile.py │ │ │ ├── fix_exitfunc.py │ │ │ ├── fix_filter.py │ │ │ ├── fix_funcattrs.py │ │ │ ├── fix_future.py │ │ │ ├── fix_getcwdu.py │ │ │ ├── fix_has_key.py │ │ │ ├── fix_idioms.py │ │ │ ├── fix_import.py │ │ │ ├── fix_imports.py │ │ │ ├── fix_imports2.py │ │ │ ├── fix_input.py │ │ │ ├── fix_intern.py │ │ │ ├── fix_isinstance.py │ │ │ ├── fix_itertools.py │ │ │ ├── fix_itertools_imports.py │ │ │ ├── fix_long.py │ │ │ ├── fix_map.py │ │ │ ├── fix_metaclass.py │ │ │ ├── fix_methodattrs.py │ │ │ ├── fix_ne.py │ │ │ ├── fix_next.py │ │ │ ├── fix_nonzero.py │ │ │ ├── fix_numliterals.py │ │ │ ├── fix_operator.py │ │ │ ├── fix_paren.py │ │ │ ├── fix_print.py │ │ │ ├── fix_raise.py │ │ │ ├── fix_raw_input.py │ │ │ ├── fix_reduce.py │ │ │ ├── fix_reload.py │ │ │ ├── fix_renames.py │ │ │ ├── fix_repr.py │ │ │ ├── fix_set_literal.py │ │ │ ├── fix_standarderror.py │ │ │ ├── fix_sys_exc.py │ │ │ ├── fix_throw.py │ │ │ ├── fix_tuple_params.py │ │ │ ├── fix_types.py │ │ │ ├── fix_unicode.py │ │ │ ├── fix_urllib.py │ │ │ ├── fix_ws_comma.py │ │ │ ├── fix_xrange.py │ │ │ ├── fix_xreadlines.py │ │ │ └── fix_zip.py │ │ │ ├── main.py │ │ │ ├── patcomp.py │ │ │ ├── pgen2 │ │ │ ├── __init__.py │ │ │ ├── conv.py │ │ │ ├── driver.py │ │ │ ├── grammar.py │ │ │ ├── literals.py │ │ │ ├── parse.py │ │ │ ├── pgen.py │ │ │ ├── token.py │ │ │ └── tokenize.py │ │ │ ├── pygram.py │ │ │ ├── pytree.py │ │ │ └── refactor.py │ │ ├── bm_argparse │ │ ├── bm_argparse_subparsers.toml │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_async_generators │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_async_tree │ │ ├── bm_async_tree_cpu_io_mixed.toml │ │ ├── bm_async_tree_cpu_io_mixed_tg.toml │ │ ├── bm_async_tree_eager.toml │ │ ├── bm_async_tree_eager_cpu_io_mixed.toml │ │ ├── bm_async_tree_eager_cpu_io_mixed_tg.toml │ │ ├── bm_async_tree_eager_io.toml │ │ ├── bm_async_tree_eager_io_tg.toml │ │ ├── bm_async_tree_eager_memoization.toml │ │ ├── bm_async_tree_eager_memoization_tg.toml │ │ ├── bm_async_tree_eager_tg.toml │ │ ├── bm_async_tree_io.toml │ │ ├── bm_async_tree_io_tg.toml │ │ ├── bm_async_tree_memoization.toml │ │ ├── bm_async_tree_memoization_tg.toml │ │ ├── bm_async_tree_tg.toml │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_asyncio_tcp │ │ ├── bm_asyncio_tcp_ssl.toml │ │ ├── pyproject.toml │ │ ├── run_benchmark.py │ │ ├── ssl_cert.pem │ │ └── ssl_key.pem │ │ ├── bm_asyncio_websockets │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_barnes_hut │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_bpe_tokeniser │ │ ├── data │ │ │ └── frankenstein_intro.txt │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_chameleon │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_chaos │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_comprehensions │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_concurrent_imap │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_coroutines │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_coverage │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_crypto_pyaes │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_dask │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_decimal_factorial │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_decimal_pi │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_deepcopy │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_deltablue │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_django_template │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_docutils │ │ ├── data │ │ │ └── docs │ │ │ │ ├── api │ │ │ │ ├── publisher.txt │ │ │ │ ├── runtime-settings.txt │ │ │ │ └── transforms.txt │ │ │ │ ├── dev │ │ │ │ ├── distributing.txt │ │ │ │ ├── enthought-plan.txt │ │ │ │ ├── enthought-rfp.txt │ │ │ │ ├── hacking.txt │ │ │ │ ├── policies.txt │ │ │ │ ├── pysource.txt │ │ │ │ ├── release.txt │ │ │ │ ├── repository.txt │ │ │ │ ├── rst │ │ │ │ │ ├── alternatives.txt │ │ │ │ │ └── problems.txt │ │ │ │ ├── runtime-settings-processing.txt │ │ │ │ ├── semantics.txt │ │ │ │ ├── testing.txt │ │ │ │ ├── todo.txt │ │ │ │ └── website.txt │ │ │ │ ├── howto │ │ │ │ ├── cmdline-tool.txt │ │ │ │ ├── html-stylesheets.txt │ │ │ │ ├── i18n.txt │ │ │ │ ├── rst-directives.txt │ │ │ │ ├── rst-roles.txt │ │ │ │ └── security.txt │ │ │ │ ├── index.txt │ │ │ │ ├── peps │ │ │ │ ├── pep-0256.txt │ │ │ │ ├── pep-0257.txt │ │ │ │ ├── pep-0258.txt │ │ │ │ └── pep-0287.txt │ │ │ │ ├── ref │ │ │ │ ├── doctree.txt │ │ │ │ └── rst │ │ │ │ │ ├── definitions.txt │ │ │ │ │ ├── directives.txt │ │ │ │ │ ├── introduction.txt │ │ │ │ │ ├── mathematics.txt │ │ │ │ │ ├── restructuredtext.txt │ │ │ │ │ └── roles.txt │ │ │ │ └── user │ │ │ │ ├── config.txt │ │ │ │ ├── emacs.txt │ │ │ │ ├── html.txt │ │ │ │ ├── images │ │ │ │ ├── big-black.png │ │ │ │ ├── big-white.png │ │ │ │ ├── default.png │ │ │ │ ├── happy_monkey.png │ │ │ │ ├── medium-black.png │ │ │ │ ├── medium-white.png │ │ │ │ ├── rsp-all.png │ │ │ │ ├── rsp-breaks.png │ │ │ │ ├── rsp-covers.png │ │ │ │ ├── rsp-cuts.png │ │ │ │ ├── rsp-empty.png │ │ │ │ ├── rsp-objects.png │ │ │ │ ├── rsp.svg │ │ │ │ ├── s5-files.png │ │ │ │ ├── s5-files.svg │ │ │ │ ├── small-black.png │ │ │ │ └── small-white.png │ │ │ │ ├── latex.txt │ │ │ │ ├── links.txt │ │ │ │ ├── mailing-lists.txt │ │ │ │ ├── manpage.txt │ │ │ │ ├── odt.txt │ │ │ │ ├── rst │ │ │ │ ├── cheatsheet.txt │ │ │ │ ├── demo.txt │ │ │ │ ├── images │ │ │ │ │ ├── biohazard-bitmap-scaling.svg │ │ │ │ │ ├── biohazard-bitmap.svg │ │ │ │ │ ├── biohazard-scaling.svg │ │ │ │ │ ├── biohazard.png │ │ │ │ │ ├── biohazard.svg │ │ │ │ │ ├── biohazard.swf │ │ │ │ │ ├── pens.mp4 │ │ │ │ │ ├── title-scaling.svg │ │ │ │ │ ├── title.png │ │ │ │ │ └── title.svg │ │ │ │ └── quickstart.txt │ │ │ │ ├── smartquotes.txt │ │ │ │ └── tools.txt │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_dulwich_log │ │ ├── data │ │ │ └── asyncio.git │ │ │ │ ├── COMMIT_EDITMSG │ │ │ │ ├── FETCH_HEAD │ │ │ │ ├── HEAD │ │ │ │ ├── ORIG_HEAD │ │ │ │ ├── config │ │ │ │ ├── description │ │ │ │ ├── hooks │ │ │ │ ├── applypatch-msg.sample │ │ │ │ ├── commit-msg.sample │ │ │ │ ├── post-update.sample │ │ │ │ ├── pre-applypatch.sample │ │ │ │ ├── pre-commit.sample │ │ │ │ ├── pre-push.sample │ │ │ │ ├── pre-rebase.sample │ │ │ │ ├── prepare-commit-msg.sample │ │ │ │ └── update.sample │ │ │ │ ├── index │ │ │ │ ├── info │ │ │ │ ├── exclude │ │ │ │ └── refs │ │ │ │ ├── logs │ │ │ │ ├── HEAD │ │ │ │ └── refs │ │ │ │ │ ├── heads │ │ │ │ │ └── master │ │ │ │ │ └── remotes │ │ │ │ │ └── origin │ │ │ │ │ ├── HEAD │ │ │ │ │ ├── bind_modules │ │ │ │ │ ├── master │ │ │ │ │ └── zero_timeout │ │ │ │ ├── objects │ │ │ │ ├── info │ │ │ │ │ └── packs │ │ │ │ └── pack │ │ │ │ │ ├── pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx │ │ │ │ │ └── pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack │ │ │ │ ├── packed-refs │ │ │ │ └── refs │ │ │ │ └── remotes │ │ │ │ └── origin │ │ │ │ └── HEAD │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_fannkuch │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_float │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_gc_collect │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_gc_traversal │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_generators │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_genshi │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_go │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_hexiom │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_hg_startup │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_html5lib │ │ ├── data │ │ │ └── w3_tr_html5.html │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_json_dumps │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_json_loads │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_logging │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_mako │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_mdp │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_meteor_contest │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_nbody │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_networkx │ │ ├── bm_networkx_connected_components.toml │ │ ├── bm_networkx_k_core.toml │ │ ├── data │ │ │ └── amazon0302.txt.gz │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_nqueens │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_pathlib │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_pickle │ │ ├── bm_pickle_dict.toml │ │ ├── bm_pickle_list.toml │ │ ├── bm_pickle_pure_python.toml │ │ ├── bm_unpickle.toml │ │ ├── bm_unpickle_list.toml │ │ ├── bm_unpickle_pure_python.toml │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_pidigits │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_pprint │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_pyflate │ │ ├── data │ │ │ └── interpreter.tar.bz2 │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_python_startup │ │ ├── bm_python_startup_no_site.toml │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_raytrace │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_regex_compile │ │ ├── bm_regex_effbot.py │ │ ├── bm_regex_v8.py │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_regex_dna │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_regex_effbot │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_regex_v8 │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_richards │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_richards_super │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_scimark │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_spectral_norm │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_sphinx │ │ ├── data │ │ │ └── Doc │ │ │ │ ├── about.rst │ │ │ │ ├── bugs.rst │ │ │ │ ├── conf.py │ │ │ │ ├── constraints.txt │ │ │ │ ├── contents.rst │ │ │ │ ├── copyright.rst │ │ │ │ ├── glossary.rst │ │ │ │ ├── howto │ │ │ │ ├── annotations.rst │ │ │ │ ├── argparse.rst │ │ │ │ ├── clinic.rst │ │ │ │ ├── cporting.rst │ │ │ │ ├── curses.rst │ │ │ │ ├── descriptor.rst │ │ │ │ ├── enum.rst │ │ │ │ ├── free-threading-extensions.rst │ │ │ │ ├── functional.rst │ │ │ │ ├── gdb_helpers.rst │ │ │ │ ├── index.rst │ │ │ │ ├── instrumentation.rst │ │ │ │ ├── ipaddress.rst │ │ │ │ └── isolating-extensions.rst │ │ │ │ ├── license.rst │ │ │ │ └── tools │ │ │ │ └── extensions │ │ │ │ └── pyspecific.py │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_sqlalchemy_declarative │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_sqlalchemy_imperative │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_sqlglot_v2 │ │ ├── bm_sqlglot_v2_optimize.toml │ │ ├── bm_sqlglot_v2_parse.toml │ │ ├── bm_sqlglot_v2_transpile.toml │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_sqlite_synth │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_sympy │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_telco │ │ ├── data │ │ │ └── telco-bench.b │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_tomli_loads │ │ ├── data │ │ │ └── tomli-bench-data.toml │ │ ├── generate_data.py │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_tornado_http │ │ ├── pyproject.toml │ │ ├── requirements.txt │ │ └── run_benchmark.py │ │ ├── bm_typing_runtime_protocols │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ ├── bm_unpack_sequence │ │ ├── pyproject.toml │ │ └── run_benchmark.py │ │ └── bm_xml_etree │ │ ├── pyproject.toml │ │ └── run_benchmark.py ├── requirements │ └── requirements.txt ├── run.py ├── tests │ ├── __init__.py │ ├── __main__.py │ ├── data │ │ ├── MANIFEST │ │ ├── bm_local_wheel │ │ │ ├── pyproject.toml │ │ │ ├── requirements.txt │ │ │ ├── run_benchmark.py │ │ │ └── this_is-1.0.2-py2.py3-none-any.whl │ │ ├── find-pyperformance.py │ │ ├── mem1.json │ │ ├── mem2.json │ │ ├── py36.json │ │ ├── py38.json │ │ └── py3_performance03.json │ ├── test_commands.py │ ├── test_python.py │ ├── test_pythoninfo.py │ └── test_venv.py └── venv.py ├── pyproject.toml ├── requirements.in ├── requirements.txt ├── runtests.py └── tox.ini /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Hi! :wave: Thanks for using the Python Performance Benchmark Suite. 2 | 3 | 4 | ## Step 1 - Questions 5 | 6 | If you have a general question about how to use something or how to run benchmarks, 7 | please ask on the https://mail.python.org/pipermail/speed/ mailing list. The 8 | GitHub issue tracker should only be used to report bugs. Thanks. 9 | 10 | 11 | ## Step 2 - Use the search tool 12 | 13 | If you are reporting an issue, please take a moment to check GitHub and our 14 | documentation to see if we've already answered a similar issue or have available 15 | help in our documentation: 16 | 17 | - Please use the [GitHub issue search feature](https://help.github.com/articles/searching-issues/) 18 | to check if your issue has been filed already. If it has, please add your 19 | comments to the existing issue. 20 | [GitHub Help on Search](https://help.github.com/articles/searching-issues/) 21 | 22 | - [Search documentation](http://pyperformance.readthedocs.io/). 23 | 24 | 25 | ## Step 3 - File an issue 26 | 27 | Where applicable, please fill out the details below to help us troubleshoot the 28 | issue that you are facing. :cake: 29 | 30 | **How to reproduce the issue** 31 | 32 | **What you expected to happen** 33 | 34 | **What actually happens** 35 | 36 | 37 | ### Step 4 - Thanks 38 | 39 | Thanks for filling this out! The community can be more helpful with this 40 | information. :+1: 41 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/pyperformance/requirements" 5 | schedule: 6 | interval: "monthly" 7 | -------------------------------------------------------------------------------- /.github/workflows/mypy.yml: -------------------------------------------------------------------------------- 1 | name: mypy 2 | 3 | on: [push, pull_request, workflow_dispatch] 4 | 5 | permissions: 6 | contents: read 7 | 8 | env: 9 | FORCE_COLOR: 1 10 | TERM: xterm-256color # needed for FORCE_COLOR to work on mypy on Ubuntu, see https://github.com/python/mypy/issues/13817 11 | 12 | jobs: 13 | mypy: 14 | name: Check code with mypy 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | - uses: actions/setup-python@v5 19 | with: 20 | cache: "pip" 21 | cache-dependency-path: "pyproject.toml" 22 | python-version: "3.11" 23 | - run: pip install -e .[dev] 24 | - run: pip freeze --all 25 | - run: mypy 26 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | # For more information, see the "Release a new version" notes in pyproject.toml. 2 | 3 | name: Upload Python Package 4 | 5 | on: 6 | release: 7 | types: [published] 8 | 9 | jobs: 10 | deploy: 11 | runs-on: ubuntu-latest 12 | 13 | permissions: 14 | # IMPORTANT: this permission is mandatory for trusted publishing 15 | id-token: write 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Set up Python 20 | uses: actions/setup-python@v5 21 | with: 22 | python-version: '3.x' 23 | cache: pip 24 | cache-dependency-path: pyproject.toml 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade build 28 | - name: Build 29 | run: | 30 | python -m build 31 | - name: Publish distribution 📦 to PyPI 32 | if: startsWith(github.event.ref, 'refs/tags') || github.event_name == 'release' 33 | uses: pypa/gh-action-pypi-publish@release/v1 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python cached bytecode files 2 | *.pyc 3 | *.pyo 4 | *.pyd 5 | 6 | # Temporary files, usually created by text editors (vim) 7 | *.swp 8 | *~ 9 | 10 | # Created by setup.py sdist 11 | build/ 12 | dist/ 13 | pyperformance.egg-info/ 14 | 15 | # Created by the pyperformance script 16 | venv/ 17 | .venvs/ 18 | 19 | # Create during tests 20 | pyperformance/tests/data/cpython/ 21 | 22 | # Created by the tox program 23 | .tox/ 24 | 25 | # coverage 26 | .coverage 27 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | version: 2 5 | 6 | build: 7 | os: ubuntu-22.04 8 | tools: 9 | python: "3.11" 10 | 11 | sphinx: 12 | configuration: doc/conf.py 13 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Alex Gaynor 2 | Alexandre Vassalotti 3 | Antoine Pitrou 4 | Anuj Gupta 5 | Benjamin Peterson 6 | Bobby Impollonia 7 | Brett Cannon 8 | Collin Winter 9 | David Laing 10 | David Malcolm 11 | Dmitry Jemerov 12 | Florin Papa 13 | Georg Brandl 14 | James Abbatiello 15 | Jeffrey Yasskin 16 | Maciej Fijalkowski 17 | Reid Klecker 18 | Robert Grimm 19 | Skip Montanaro 20 | Stefan Behnel 21 | Thomas Wouters 22 | Victor Stinner 23 | Zachary Ware 24 | -------------------------------------------------------------------------------- /BENCHMARKS_FORMAT.md: -------------------------------------------------------------------------------- 1 | See doc/custom_benchmarks.rst. 2 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Permission is hereby granted, free of charge, to any person 4 | obtaining a copy of this software and associated documentation 5 | files (the "Software"), to deal in the Software without 6 | restriction, including without limitation the rights to use, 7 | copy, modify, merge, publish, distribute, sublicense, and/or 8 | sell copies of the Software, and to permit persons to whom the 9 | Software is furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included 12 | in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 | DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include AUTHORS 2 | include COPYING 3 | include MANIFEST.in 4 | include README.rst 5 | include TODO.rst 6 | include requirements.in 7 | include requirements.txt 8 | 9 | include doc/*.rst doc/images/*.png doc/images/*.jpg 10 | include doc/conf.py doc/Makefile doc/make.bat 11 | 12 | include pyperformance/*.py 13 | include pyperformance/requirements/requirements.txt 14 | include pyperformance/data-files/benchmarks/MANIFEST 15 | include pyperformance/data-files/benchmarks/bm_*/*.toml 16 | include pyperformance/data-files/benchmarks/bm_*/*.py 17 | include pyperformance/data-files/benchmarks/bm_*/requirements.txt 18 | include pyperformance/data-files/benchmarks/bm_*/*.pem 19 | recursive-include pyperformance/data-files/benchmarks/bm_*/vendor * 20 | recursive-include pyperformance/data-files/benchmarks/bm_*/data * 21 | recursive-exclude pyperformance/tests * 22 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ########################## 2 | The Python Benchmark Suite 3 | ########################## 4 | 5 | .. image:: https://img.shields.io/pypi/v/pyperformance.svg 6 | :alt: Latest pyperformance release on the Python Cheeseshop (PyPI) 7 | :target: https://pypi.python.org/pypi/pyperformance 8 | 9 | .. image:: https://github.com/python/pyperformance/actions/workflows/main.yml/badge.svg 10 | :alt: Build status of pyperformance on GitHub Actions 11 | :target: https://github.com/python/pyperformance/actions 12 | 13 | The ``pyperformance`` project is intended to be an authoritative source of 14 | benchmarks for all Python implementations. The focus is on real-world 15 | benchmarks, rather than synthetic benchmarks, using whole applications when 16 | possible. 17 | 18 | * `pyperformance documentation `_ 19 | * `pyperformance GitHub project `_ 20 | (source code, issues) 21 | * `Download pyperformance on PyPI `_ 22 | 23 | pyperformance is not tuned for PyPy yet: use the `PyPy benchmarks project 24 | `_ instead to measure PyPy 25 | performances. 26 | 27 | pyperformance is distributed under the MIT license. 28 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - job: win32 3 | timeoutInMinutes: 60 4 | strategy: 5 | matrix: 6 | Py36-x86-win2016: 7 | python.version: '3.6' 8 | python.arch: 'x86' 9 | vmImage: vs2017-win2016 10 | Py37-x64-win2016: 11 | python.version: '3.7' 12 | python.arch: 'x64' 13 | vmImage: vs2017-win2016 14 | pool: 15 | vmImage: $(vmImage) 16 | steps: 17 | - task: UsePythonVersion@0 18 | inputs: 19 | versionSpec: '$(python.version)' 20 | architecture: '$(python.arch)' 21 | - script: | 22 | pip install --user -U setuptools pip 23 | displayName: Install Dependencies 24 | - script: | 25 | pip install -e . 26 | displayName: Build 27 | - script: | 28 | python runtests.py 29 | displayName: Run tests 30 | -------------------------------------------------------------------------------- /benchmarks: -------------------------------------------------------------------------------- 1 | pyperformance/data-files/benchmarks -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = PythonPerformanceBenchmarkSuite 8 | SOURCEDIR = . 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /doc/benchmark.conf: -------------------------------------------------------------------------------- 1 | [config] 2 | json_dir = ~/prog/python/bench_json 3 | 4 | [scm] 5 | repo_dir = ~/prog/python/master 6 | update = True 7 | 8 | [compile] 9 | bench_dir = ~/prog/python/bench_tmpdir 10 | 11 | [run_benchmark] 12 | system_tune = True 13 | affinity = 2,3 14 | -------------------------------------------------------------------------------- /doc/html5lib.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/doc/html5lib.json.gz -------------------------------------------------------------------------------- /doc/images/bm_chaos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/doc/images/bm_chaos.png -------------------------------------------------------------------------------- /doc/images/bm_raytrace.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/doc/images/bm_raytrace.jpg -------------------------------------------------------------------------------- /doc/images/html5lib.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/doc/images/html5lib.png -------------------------------------------------------------------------------- /doc/images/sympy_sum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/doc/images/sympy_sum.png -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | ###################################### 2 | The Python Performance Benchmark Suite 3 | ###################################### 4 | 5 | The ``pyperformance`` project is intended to be an authoritative source of 6 | benchmarks for all Python implementations. The focus is on real-world 7 | benchmarks, rather than synthetic benchmarks, using whole applications when 8 | possible. 9 | 10 | * `pyperformance documentation `_ 11 | * `pyperformance GitHub project `_ 12 | (source code, issues) 13 | * `Download pyperformance on PyPI `_ 14 | 15 | pyperformance is distributed under the MIT license. 16 | 17 | Documentation: 18 | 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | usage 23 | benchmarks 24 | custom_benchmarks 25 | cpython_results_2017 26 | changelog 27 | 28 | Other Python Benchmarks: 29 | 30 | * CPython: `speed.python.org `_ uses pyperf, 31 | pyperformance and `Codespeed `_ (Django 32 | web application) 33 | * PyPy: `speed.pypy.org `_ 34 | uses `PyPy benchmarks `_ 35 | * Pyston: `pyston-perf `_ 36 | and `speed.pyston.org `_ 37 | * `Numba benchmarks `_ 38 | * Cython: `Cython Demos/benchmarks 39 | `_ 40 | * pythran: `numpy-benchmarks 41 | `_ 42 | 43 | See also the `Python speed mailing list 44 | `_ and the `Python pyperf module 45 | `_ (used by pyperformance). 46 | 47 | pyperformance is not tuned for PyPy yet: use the `PyPy benchmarks project 48 | `_ instead to measure PyPy 49 | performances. 50 | 51 | Image generated by bm_raytrace (pure Python raytrace): 52 | 53 | .. image:: images/bm_raytrace.jpg 54 | :alt: Pure Python raytracer 55 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=build 12 | set SPHINXPROJ=PythonPerformanceBenchmarkSuite 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /doc/sympy_sum.json.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/doc/sympy_sum.json.gz -------------------------------------------------------------------------------- /pyperformance/__init__.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os.path 3 | import sys 4 | from importlib.metadata import distribution 5 | 6 | 7 | VERSION = (1, 11, 0) 8 | __version__ = '.'.join(map(str, VERSION)) 9 | 10 | 11 | PKG_ROOT = os.path.dirname(__file__) 12 | DATA_DIR = os.path.join(PKG_ROOT, 'data-files') 13 | 14 | 15 | def is_installed(): 16 | if not is_dev(): 17 | return True 18 | if _is_venv(): 19 | return True 20 | return _is_devel_install() 21 | 22 | 23 | def is_dev(): 24 | parent = os.path.dirname(PKG_ROOT) 25 | return os.path.exists(os.path.join(parent, 'pyproject.toml')) 26 | 27 | 28 | def _is_venv(): 29 | if sys.base_prefix == sys.prefix: 30 | return False 31 | return True 32 | 33 | 34 | def _is_devel_install(): 35 | # pip install -e will do a "devel" install. 36 | # This means it creates a link back to the checkout instead 37 | # of copying the files. 38 | 39 | direct_url = distribution("pyperformance").read_text("direct_url.json") 40 | if direct_url: 41 | return json.loads(direct_url).get("dir_info", {}).get("editable", False) 42 | return False 43 | -------------------------------------------------------------------------------- /pyperformance/__main__.py: -------------------------------------------------------------------------------- 1 | import pyperformance.cli 2 | pyperformance.cli.main() 3 | -------------------------------------------------------------------------------- /pyperformance/_python.py: -------------------------------------------------------------------------------- 1 | # Generic helpers for working with a Python executable. 2 | 3 | import hashlib 4 | 5 | from pyperformance import _pythoninfo 6 | 7 | 8 | def get_id(python=None, prefix=None, *, short=True): 9 | """Return a string that uniquely identifies the given Python executable.""" 10 | if isinstance(python, str): 11 | python = _pythoninfo.get_info(python) 12 | 13 | data = [ 14 | # "executable" represents the install location 15 | # (and build, to an extent). 16 | python.sys.executable, 17 | # sys.version encodes version, git info, build_date, and build_tool. 18 | python.sys.version, 19 | python.sys.implementation.name.lower(), 20 | '.'.join(str(v) for v in python.sys.implementation.version), 21 | str(python.sys.api_version), 22 | python.pyc_magic_number.hex(), 23 | ] 24 | # XXX Add git info if a dev build. 25 | 26 | h = hashlib.sha256() 27 | for value in data: 28 | h.update(value.encode('utf-8')) 29 | # XXX Also include the sorted output of "python -m pip freeze"? 30 | py_id = h.hexdigest() 31 | if short: 32 | py_id = py_id[:12] 33 | 34 | if prefix: 35 | if prefix is True: 36 | major, minor = python.sys.version_info[:2] 37 | py_id = f'{python.sys.implementation.name}{major}.{minor}-{py_id}' 38 | else: 39 | py_id = prefix + py_id 40 | 41 | return py_id 42 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/data/2to3/README.txt: -------------------------------------------------------------------------------- 1 | Copy of django/core/*.py files of Django 1.1.4. 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/data/2to3/__init__.py.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_2to3/data/2to3/__init__.py.txt -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/data/2to3/exceptions.py.txt: -------------------------------------------------------------------------------- 1 | "Global Django exceptions" 2 | 3 | class ObjectDoesNotExist(Exception): 4 | "The requested object does not exist" 5 | silent_variable_failure = True 6 | 7 | class MultipleObjectsReturned(Exception): 8 | "The query returned multiple objects when only one was expected." 9 | pass 10 | 11 | class SuspiciousOperation(Exception): 12 | "The user did something suspicious" 13 | pass 14 | 15 | class PermissionDenied(Exception): 16 | "The user did not have permission to do that" 17 | pass 18 | 19 | class ViewDoesNotExist(Exception): 20 | "The requested view does not exist" 21 | pass 22 | 23 | class MiddlewareNotUsed(Exception): 24 | "This middleware is not used in this server configuration" 25 | pass 26 | 27 | class ImproperlyConfigured(Exception): 28 | "Django is somehow improperly configured" 29 | pass 30 | 31 | class FieldError(Exception): 32 | """Some kind of problem with a model field.""" 33 | pass 34 | 35 | class ValidationError(Exception): 36 | """An error while validating data.""" 37 | pass 38 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/data/2to3/signals.py.txt: -------------------------------------------------------------------------------- 1 | from django.dispatch import Signal 2 | 3 | request_started = Signal() 4 | request_finished = Signal() 5 | got_request_exception = Signal(providing_args=["request"]) 6 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/data/2to3/template_loader.py.txt: -------------------------------------------------------------------------------- 1 | # This module is DEPRECATED! 2 | # 3 | # You should no longer be using django.template_loader. 4 | # 5 | # Use django.template.loader instead. 6 | 7 | from django.template.loader import * 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/data/2to3/xheaders.py.txt: -------------------------------------------------------------------------------- 1 | """ 2 | Pages in Django can are served up with custom HTTP headers containing useful 3 | information about those pages -- namely, the content type and object ID. 4 | 5 | This module contains utility functions for retrieving and doing interesting 6 | things with these special "X-Headers" (so called because the HTTP spec demands 7 | that custom headers are prefixed with "X-"). 8 | 9 | Next time you're at slashdot.org, watch out for X-Fry and X-Bender. :) 10 | """ 11 | 12 | def populate_xheaders(request, response, model, object_id): 13 | """ 14 | Adds the "X-Object-Type" and "X-Object-Id" headers to the given 15 | HttpResponse according to the given model and object_id -- but only if the 16 | given HttpRequest object has an IP address within the INTERNAL_IPS setting 17 | or if the request is from a logged in staff member. 18 | """ 19 | from django.conf import settings 20 | if (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS 21 | or (hasattr(request, 'user') and request.user.is_authenticated() 22 | and request.user.is_staff)): 23 | response['X-Object-Type'] = "%s.%s" % (model._meta.app_label, model._meta.object_name.lower()) 24 | response['X-Object-Id'] = str(object_id) 25 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "pyperformance_bm_2to3" 7 | requires-python = ">=3.8" 8 | dependencies = ["pyperf"] 9 | urls = {repository = "https://github.com/python/pyperformance"} 10 | dynamic = ["version"] 11 | 12 | [tool.pyperformance] 13 | name = "2to3" 14 | tags = "apps" 15 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/run_benchmark.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os.path 3 | import sys 4 | import subprocess 5 | 6 | import pyperf 7 | 8 | 9 | if __name__ == "__main__": 10 | runner = pyperf.Runner() 11 | 12 | runner.metadata['description'] = "Performance of the Python 2to3 program" 13 | args = runner.parse_args() 14 | 15 | datadir = os.path.join(os.path.dirname(__file__), 'data', '2to3') 16 | pyfiles = glob.glob(os.path.join(datadir, '*.py.txt')) 17 | command = [sys.executable, "-m", "lib2to3", "-f", "all"] + pyfiles 18 | 19 | try: 20 | import lib2to3 21 | except ModuleNotFoundError: 22 | vendor = os.path.join(os.path.dirname(__file__), 'vendor') 23 | subprocess.run([sys.executable, "-m", "pip", "install", vendor], check=True) 24 | 25 | runner.bench_command('2to3', command) 26 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "lib2to3" 7 | version = "3.12" 8 | description = "lib2to3 for Python 3.13+" 9 | requires-python = ">=3.13.0a0" 10 | license = {text = "PSF-2.0"} 11 | classifiers = [ 12 | "License :: OSI Approved :: Python Software Foundation License", 13 | ] 14 | 15 | [tool.setuptools.package-data] 16 | "*" = ["*.txt"] 17 | 18 | [tool.setuptools.packages.find] 19 | where = ["src"] 20 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/PatternGrammar.txt: -------------------------------------------------------------------------------- 1 | # Copyright 2006 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | # A grammar to describe tree matching patterns. 5 | # Not shown here: 6 | # - 'TOKEN' stands for any token (leaf node) 7 | # - 'any' stands for any node (leaf or interior) 8 | # With 'any' we can still specify the sub-structure. 9 | 10 | # The start symbol is 'Matcher'. 11 | 12 | Matcher: Alternatives ENDMARKER 13 | 14 | Alternatives: Alternative ('|' Alternative)* 15 | 16 | Alternative: (Unit | NegatedUnit)+ 17 | 18 | Unit: [NAME '='] ( STRING [Repeater] 19 | | NAME [Details] [Repeater] 20 | | '(' Alternatives ')' [Repeater] 21 | | '[' Alternatives ']' 22 | ) 23 | 24 | NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') 25 | 26 | Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' 27 | 28 | Details: '<' Alternatives '>' 29 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/__init__.py -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from .main import main 3 | 4 | sys.exit(main("lib2to3.fixes")) 5 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/__init__.py: -------------------------------------------------------------------------------- 1 | # Dummy file to make this directory a package. 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_asserts.py: -------------------------------------------------------------------------------- 1 | """Fixer that replaces deprecated unittest method names.""" 2 | 3 | # Author: Ezio Melotti 4 | 5 | from ..fixer_base import BaseFix 6 | from ..fixer_util import Name 7 | 8 | NAMES = dict( 9 | assert_="assertTrue", 10 | assertEquals="assertEqual", 11 | assertNotEquals="assertNotEqual", 12 | assertAlmostEquals="assertAlmostEqual", 13 | assertNotAlmostEquals="assertNotAlmostEqual", 14 | assertRegexpMatches="assertRegex", 15 | assertRaisesRegexp="assertRaisesRegex", 16 | failUnlessEqual="assertEqual", 17 | failIfEqual="assertNotEqual", 18 | failUnlessAlmostEqual="assertAlmostEqual", 19 | failIfAlmostEqual="assertNotAlmostEqual", 20 | failUnless="assertTrue", 21 | failUnlessRaises="assertRaises", 22 | failIf="assertFalse", 23 | ) 24 | 25 | 26 | class FixAsserts(BaseFix): 27 | 28 | PATTERN = """ 29 | power< any+ trailer< '.' meth=(%s)> any* > 30 | """ % '|'.join(map(repr, NAMES)) 31 | 32 | def transform(self, node, results): 33 | name = results["meth"][0] 34 | name.replace(Name(NAMES[str(name)], prefix=name.prefix)) 35 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_basestring.py: -------------------------------------------------------------------------------- 1 | """Fixer for basestring -> str.""" 2 | # Author: Christian Heimes 3 | 4 | # Local imports 5 | from .. import fixer_base 6 | from ..fixer_util import Name 7 | 8 | class FixBasestring(fixer_base.BaseFix): 9 | BM_compatible = True 10 | 11 | PATTERN = "'basestring'" 12 | 13 | def transform(self, node, results): 14 | return Name("str", prefix=node.prefix) 15 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_buffer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2007 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer that changes buffer(...) into memoryview(...).""" 5 | 6 | # Local imports 7 | from .. import fixer_base 8 | from ..fixer_util import Name 9 | 10 | 11 | class FixBuffer(fixer_base.BaseFix): 12 | BM_compatible = True 13 | 14 | explicit = True # The user must ask for this fixer 15 | 16 | PATTERN = """ 17 | power< name='buffer' trailer< '(' [any] ')' > any* > 18 | """ 19 | 20 | def transform(self, node, results): 21 | name = results["name"] 22 | name.replace(Name("memoryview", prefix=name.prefix)) 23 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_exec.py: -------------------------------------------------------------------------------- 1 | # Copyright 2006 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer for exec. 5 | 6 | This converts usages of the exec statement into calls to a built-in 7 | exec() function. 8 | 9 | exec code in ns1, ns2 -> exec(code, ns1, ns2) 10 | """ 11 | 12 | # Local imports 13 | from .. import fixer_base 14 | from ..fixer_util import Comma, Name, Call 15 | 16 | 17 | class FixExec(fixer_base.BaseFix): 18 | BM_compatible = True 19 | 20 | PATTERN = """ 21 | exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > 22 | | 23 | exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > 24 | """ 25 | 26 | def transform(self, node, results): 27 | assert results 28 | syms = self.syms 29 | a = results["a"] 30 | b = results.get("b") 31 | c = results.get("c") 32 | args = [a.clone()] 33 | args[0].prefix = "" 34 | if b is not None: 35 | args.extend([Comma(), b.clone()]) 36 | if c is not None: 37 | args.extend([Comma(), c.clone()]) 38 | 39 | return Call(Name("exec"), args, prefix=node.prefix) 40 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_funcattrs.py: -------------------------------------------------------------------------------- 1 | """Fix function attribute names (f.func_x -> f.__x__).""" 2 | # Author: Collin Winter 3 | 4 | # Local imports 5 | from .. import fixer_base 6 | from ..fixer_util import Name 7 | 8 | 9 | class FixFuncattrs(fixer_base.BaseFix): 10 | BM_compatible = True 11 | 12 | PATTERN = """ 13 | power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals' 14 | | 'func_name' | 'func_defaults' | 'func_code' 15 | | 'func_dict') > any* > 16 | """ 17 | 18 | def transform(self, node, results): 19 | attr = results["attr"][0] 20 | attr.replace(Name(("__%s__" % attr.value[5:]), 21 | prefix=attr.prefix)) 22 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_future.py: -------------------------------------------------------------------------------- 1 | """Remove __future__ imports 2 | 3 | from __future__ import foo is replaced with an empty line. 4 | """ 5 | # Author: Christian Heimes 6 | 7 | # Local imports 8 | from .. import fixer_base 9 | from ..fixer_util import BlankLine 10 | 11 | class FixFuture(fixer_base.BaseFix): 12 | BM_compatible = True 13 | 14 | PATTERN = """import_from< 'from' module_name="__future__" 'import' any >""" 15 | 16 | # This should be run last -- some things check for the import 17 | run_order = 10 18 | 19 | def transform(self, node, results): 20 | new = BlankLine() 21 | new.prefix = node.prefix 22 | return new 23 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_getcwdu.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fixer that changes os.getcwdu() to os.getcwd(). 3 | """ 4 | # Author: Victor Stinner 5 | 6 | # Local imports 7 | from .. import fixer_base 8 | from ..fixer_util import Name 9 | 10 | class FixGetcwdu(fixer_base.BaseFix): 11 | BM_compatible = True 12 | 13 | PATTERN = """ 14 | power< 'os' trailer< dot='.' name='getcwdu' > any* > 15 | """ 16 | 17 | def transform(self, node, results): 18 | name = results["name"] 19 | name.replace(Name("getcwd", prefix=name.prefix)) 20 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_imports2.py: -------------------------------------------------------------------------------- 1 | """Fix incompatible imports and module references that must be fixed after 2 | fix_imports.""" 3 | from . import fix_imports 4 | 5 | 6 | MAPPING = { 7 | 'whichdb': 'dbm', 8 | 'anydbm': 'dbm', 9 | } 10 | 11 | 12 | class FixImports2(fix_imports.FixImports): 13 | 14 | run_order = 7 15 | 16 | mapping = MAPPING 17 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_input.py: -------------------------------------------------------------------------------- 1 | """Fixer that changes input(...) into eval(input(...)).""" 2 | # Author: Andre Roberge 3 | 4 | # Local imports 5 | from .. import fixer_base 6 | from ..fixer_util import Call, Name 7 | from .. import patcomp 8 | 9 | 10 | context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >") 11 | 12 | 13 | class FixInput(fixer_base.BaseFix): 14 | BM_compatible = True 15 | PATTERN = """ 16 | power< 'input' args=trailer< '(' [any] ')' > > 17 | """ 18 | 19 | def transform(self, node, results): 20 | # If we're already wrapped in an eval() call, we're done. 21 | if context.match(node.parent.parent): 22 | return 23 | 24 | new = node.clone() 25 | new.prefix = "" 26 | return Call(Name("eval"), [new], prefix=node.prefix) 27 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_intern.py: -------------------------------------------------------------------------------- 1 | # Copyright 2006 Georg Brandl. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer for intern(). 5 | 6 | intern(s) -> sys.intern(s)""" 7 | 8 | # Local imports 9 | from .. import fixer_base 10 | from ..fixer_util import ImportAndCall, touch_import 11 | 12 | 13 | class FixIntern(fixer_base.BaseFix): 14 | BM_compatible = True 15 | order = "pre" 16 | 17 | PATTERN = """ 18 | power< 'intern' 19 | trailer< lpar='(' 20 | ( not(arglist | argument) any ','> ) 22 | rpar=')' > 23 | after=any* 24 | > 25 | """ 26 | 27 | def transform(self, node, results): 28 | if results: 29 | # I feel like we should be able to express this logic in the 30 | # PATTERN above but I don't know how to do it so... 31 | obj = results['obj'] 32 | if obj: 33 | if (obj.type == self.syms.argument and 34 | obj.children[0].value in {'**', '*'}): 35 | return # Make no change. 36 | names = ('sys', 'intern') 37 | new = ImportAndCall(node, results, names) 38 | touch_import(None, 'sys', node) 39 | return new 40 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_isinstance.py: -------------------------------------------------------------------------------- 1 | # Copyright 2008 Armin Ronacher. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer that cleans up a tuple argument to isinstance after the tokens 5 | in it were fixed. This is mainly used to remove double occurrences of 6 | tokens as a leftover of the long -> int / unicode -> str conversion. 7 | 8 | eg. isinstance(x, (int, long)) -> isinstance(x, (int, int)) 9 | -> isinstance(x, int) 10 | """ 11 | 12 | from .. import fixer_base 13 | from ..fixer_util import token 14 | 15 | 16 | class FixIsinstance(fixer_base.BaseFix): 17 | BM_compatible = True 18 | PATTERN = """ 19 | power< 20 | 'isinstance' 21 | trailer< '(' arglist< any ',' atom< '(' 22 | args=testlist_gexp< any+ > 23 | ')' > > ')' > 24 | > 25 | """ 26 | 27 | run_order = 6 28 | 29 | def transform(self, node, results): 30 | names_inserted = set() 31 | testlist = results["args"] 32 | args = testlist.children 33 | new_args = [] 34 | iterator = enumerate(args) 35 | for idx, arg in iterator: 36 | if arg.type == token.NAME and arg.value in names_inserted: 37 | if idx < len(args) - 1 and args[idx + 1].type == token.COMMA: 38 | next(iterator) 39 | continue 40 | else: 41 | new_args.append(arg) 42 | if arg.type == token.NAME: 43 | names_inserted.add(arg.value) 44 | if new_args and new_args[-1].type == token.COMMA: 45 | del new_args[-1] 46 | if len(new_args) == 1: 47 | atom = testlist.parent 48 | new_args[0].prefix = atom.prefix 49 | atom.replace(new_args[0]) 50 | else: 51 | args[:] = new_args 52 | node.changed() 53 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_itertools.py: -------------------------------------------------------------------------------- 1 | """ Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and 2 | itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) 3 | 4 | imports from itertools are fixed in fix_itertools_import.py 5 | 6 | If itertools is imported as something else (ie: import itertools as it; 7 | it.izip(spam, eggs)) method calls will not get fixed. 8 | """ 9 | 10 | # Local imports 11 | from .. import fixer_base 12 | from ..fixer_util import Name 13 | 14 | class FixItertools(fixer_base.BaseFix): 15 | BM_compatible = True 16 | it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" 17 | PATTERN = """ 18 | power< it='itertools' 19 | trailer< 20 | dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > 21 | | 22 | power< func=%(it_funcs)s trailer< '(' [any] ')' > > 23 | """ %(locals()) 24 | 25 | # Needs to be run after fix_(map|zip|filter) 26 | run_order = 6 27 | 28 | def transform(self, node, results): 29 | prefix = None 30 | func = results['func'][0] 31 | if ('it' in results and 32 | func.value not in ('ifilterfalse', 'izip_longest')): 33 | dot, it = (results['dot'], results['it']) 34 | # Remove the 'itertools' 35 | prefix = it.prefix 36 | it.remove() 37 | # Replace the node which contains ('.', 'function') with the 38 | # function (to be consistent with the second part of the pattern) 39 | dot.remove() 40 | func.parent.replace(func) 41 | 42 | prefix = prefix or func.prefix 43 | func.replace(Name(func.value[1:], prefix=prefix)) 44 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_long.py: -------------------------------------------------------------------------------- 1 | # Copyright 2006 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer that turns 'long' into 'int' everywhere. 5 | """ 6 | 7 | # Local imports 8 | from lib2to3 import fixer_base 9 | from lib2to3.fixer_util import is_probably_builtin 10 | 11 | 12 | class FixLong(fixer_base.BaseFix): 13 | BM_compatible = True 14 | PATTERN = "'long'" 15 | 16 | def transform(self, node, results): 17 | if is_probably_builtin(node): 18 | node.value = "int" 19 | node.changed() 20 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_methodattrs.py: -------------------------------------------------------------------------------- 1 | """Fix bound method attributes (method.im_? -> method.__?__). 2 | """ 3 | # Author: Christian Heimes 4 | 5 | # Local imports 6 | from .. import fixer_base 7 | from ..fixer_util import Name 8 | 9 | MAP = { 10 | "im_func" : "__func__", 11 | "im_self" : "__self__", 12 | "im_class" : "__self__.__class__" 13 | } 14 | 15 | class FixMethodattrs(fixer_base.BaseFix): 16 | BM_compatible = True 17 | PATTERN = """ 18 | power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > 19 | """ 20 | 21 | def transform(self, node, results): 22 | attr = results["attr"][0] 23 | new = MAP[attr.value] 24 | attr.replace(Name(new, prefix=attr.prefix)) 25 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_ne.py: -------------------------------------------------------------------------------- 1 | # Copyright 2006 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer that turns <> into !=.""" 5 | 6 | # Local imports 7 | from .. import pytree 8 | from ..pgen2 import token 9 | from .. import fixer_base 10 | 11 | 12 | class FixNe(fixer_base.BaseFix): 13 | # This is so simple that we don't need the pattern compiler. 14 | 15 | _accept_type = token.NOTEQUAL 16 | 17 | def match(self, node): 18 | # Override 19 | return node.value == "<>" 20 | 21 | def transform(self, node, results): 22 | new = pytree.Leaf(token.NOTEQUAL, "!=", prefix=node.prefix) 23 | return new 24 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_nonzero.py: -------------------------------------------------------------------------------- 1 | """Fixer for __nonzero__ -> __bool__ methods.""" 2 | # Author: Collin Winter 3 | 4 | # Local imports 5 | from .. import fixer_base 6 | from ..fixer_util import Name 7 | 8 | class FixNonzero(fixer_base.BaseFix): 9 | BM_compatible = True 10 | PATTERN = """ 11 | classdef< 'class' any+ ':' 12 | suite< any* 13 | funcdef< 'def' name='__nonzero__' 14 | parameters< '(' NAME ')' > any+ > 15 | any* > > 16 | """ 17 | 18 | def transform(self, node, results): 19 | name = results["name"] 20 | new = Name("__bool__", prefix=name.prefix) 21 | name.replace(new) 22 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_numliterals.py: -------------------------------------------------------------------------------- 1 | """Fixer that turns 1L into 1, 0755 into 0o755. 2 | """ 3 | # Copyright 2007 Georg Brandl. 4 | # Licensed to PSF under a Contributor Agreement. 5 | 6 | # Local imports 7 | from ..pgen2 import token 8 | from .. import fixer_base 9 | from ..fixer_util import Number 10 | 11 | 12 | class FixNumliterals(fixer_base.BaseFix): 13 | # This is so simple that we don't need the pattern compiler. 14 | 15 | _accept_type = token.NUMBER 16 | 17 | def match(self, node): 18 | # Override 19 | return (node.value.startswith("0") or node.value[-1] in "Ll") 20 | 21 | def transform(self, node, results): 22 | val = node.value 23 | if val[-1] in 'Ll': 24 | val = val[:-1] 25 | elif val.startswith('0') and val.isdigit() and len(set(val)) > 1: 26 | val = "0o" + val[1:] 27 | 28 | return Number(val, prefix=node.prefix) 29 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_paren.py: -------------------------------------------------------------------------------- 1 | """Fixer that adds parentheses where they are required 2 | 3 | This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``.""" 4 | 5 | # By Taek Joo Kim and Benjamin Peterson 6 | 7 | # Local imports 8 | from .. import fixer_base 9 | from ..fixer_util import LParen, RParen 10 | 11 | # XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2] 12 | class FixParen(fixer_base.BaseFix): 13 | BM_compatible = True 14 | 15 | PATTERN = """ 16 | atom< ('[' | '(') 17 | (listmaker< any 18 | comp_for< 19 | 'for' NAME 'in' 20 | target=testlist_safe< any (',' any)+ [','] 21 | > 22 | [any] 23 | > 24 | > 25 | | 26 | testlist_gexp< any 27 | comp_for< 28 | 'for' NAME 'in' 29 | target=testlist_safe< any (',' any)+ [','] 30 | > 31 | [any] 32 | > 33 | >) 34 | (']' | ')') > 35 | """ 36 | 37 | def transform(self, node, results): 38 | target = results["target"] 39 | 40 | lparen = LParen() 41 | lparen.prefix = target.prefix 42 | target.prefix = "" # Make it hug the parentheses 43 | target.insert_child(0, lparen) 44 | target.append_child(RParen()) 45 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_raw_input.py: -------------------------------------------------------------------------------- 1 | """Fixer that changes raw_input(...) into input(...).""" 2 | # Author: Andre Roberge 3 | 4 | # Local imports 5 | from .. import fixer_base 6 | from ..fixer_util import Name 7 | 8 | class FixRawInput(fixer_base.BaseFix): 9 | 10 | BM_compatible = True 11 | PATTERN = """ 12 | power< name='raw_input' trailer< '(' [any] ')' > any* > 13 | """ 14 | 15 | def transform(self, node, results): 16 | name = results["name"] 17 | name.replace(Name("input", prefix=name.prefix)) 18 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_reduce.py: -------------------------------------------------------------------------------- 1 | # Copyright 2008 Armin Ronacher. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer for reduce(). 5 | 6 | Makes sure reduce() is imported from the functools module if reduce is 7 | used in that module. 8 | """ 9 | 10 | from lib2to3 import fixer_base 11 | from lib2to3.fixer_util import touch_import 12 | 13 | 14 | 15 | class FixReduce(fixer_base.BaseFix): 16 | 17 | BM_compatible = True 18 | order = "pre" 19 | 20 | PATTERN = """ 21 | power< 'reduce' 22 | trailer< '(' 23 | arglist< ( 24 | (not(argument) any ',' 27 | not(argument 31 | > 32 | """ 33 | 34 | def transform(self, node, results): 35 | touch_import('functools', 'reduce', node) 36 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_reload.py: -------------------------------------------------------------------------------- 1 | """Fixer for reload(). 2 | 3 | reload(s) -> importlib.reload(s)""" 4 | 5 | # Local imports 6 | from .. import fixer_base 7 | from ..fixer_util import ImportAndCall, touch_import 8 | 9 | 10 | class FixReload(fixer_base.BaseFix): 11 | BM_compatible = True 12 | order = "pre" 13 | 14 | PATTERN = """ 15 | power< 'reload' 16 | trailer< lpar='(' 17 | ( not(arglist | argument) any ','> ) 19 | rpar=')' > 20 | after=any* 21 | > 22 | """ 23 | 24 | def transform(self, node, results): 25 | if results: 26 | # I feel like we should be able to express this logic in the 27 | # PATTERN above but I don't know how to do it so... 28 | obj = results['obj'] 29 | if obj: 30 | if (obj.type == self.syms.argument and 31 | obj.children[0].value in {'**', '*'}): 32 | return # Make no change. 33 | names = ('importlib', 'reload') 34 | new = ImportAndCall(node, results, names) 35 | touch_import(None, 'importlib', node) 36 | return new 37 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_repr.py: -------------------------------------------------------------------------------- 1 | # Copyright 2006 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer that transforms `xyzzy` into repr(xyzzy).""" 5 | 6 | # Local imports 7 | from .. import fixer_base 8 | from ..fixer_util import Call, Name, parenthesize 9 | 10 | 11 | class FixRepr(fixer_base.BaseFix): 12 | 13 | BM_compatible = True 14 | PATTERN = """ 15 | atom < '`' expr=any '`' > 16 | """ 17 | 18 | def transform(self, node, results): 19 | expr = results["expr"].clone() 20 | 21 | if expr.type == self.syms.testlist1: 22 | expr = parenthesize(expr) 23 | return Call(Name("repr"), [expr], prefix=node.prefix) 24 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_set_literal.py: -------------------------------------------------------------------------------- 1 | """ 2 | Optional fixer to transform set() calls to set literals. 3 | """ 4 | 5 | # Author: Benjamin Peterson 6 | 7 | from lib2to3 import fixer_base, pytree 8 | from lib2to3.fixer_util import token, syms 9 | 10 | 11 | 12 | class FixSetLiteral(fixer_base.BaseFix): 13 | 14 | BM_compatible = True 15 | explicit = True 16 | 17 | PATTERN = """power< 'set' trailer< '(' 18 | (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > 19 | | 20 | single=any) ']' > 21 | | 22 | atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > 23 | ) 24 | ')' > > 25 | """ 26 | 27 | def transform(self, node, results): 28 | single = results.get("single") 29 | if single: 30 | # Make a fake listmaker 31 | fake = pytree.Node(syms.listmaker, [single.clone()]) 32 | single.replace(fake) 33 | items = fake 34 | else: 35 | items = results["items"] 36 | 37 | # Build the contents of the literal 38 | literal = [pytree.Leaf(token.LBRACE, "{")] 39 | literal.extend(n.clone() for n in items.children) 40 | literal.append(pytree.Leaf(token.RBRACE, "}")) 41 | # Set the prefix of the right brace to that of the ')' or ']' 42 | literal[-1].prefix = items.next_sibling.prefix 43 | maker = pytree.Node(syms.dictsetmaker, literal) 44 | maker.prefix = node.prefix 45 | 46 | # If the original was a one tuple, we need to remove the extra comma. 47 | if len(maker.children) == 4: 48 | n = maker.children[2] 49 | n.remove() 50 | maker.children[-1].prefix = n.prefix 51 | 52 | # Finally, replace the set call with our shiny new literal. 53 | return maker 54 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_standarderror.py: -------------------------------------------------------------------------------- 1 | # Copyright 2007 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer for StandardError -> Exception.""" 5 | 6 | # Local imports 7 | from .. import fixer_base 8 | from ..fixer_util import Name 9 | 10 | 11 | class FixStandarderror(fixer_base.BaseFix): 12 | BM_compatible = True 13 | PATTERN = """ 14 | 'StandardError' 15 | """ 16 | 17 | def transform(self, node, results): 18 | return Name("Exception", prefix=node.prefix) 19 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_sys_exc.py: -------------------------------------------------------------------------------- 1 | """Fixer for sys.exc_{type, value, traceback} 2 | 3 | sys.exc_type -> sys.exc_info()[0] 4 | sys.exc_value -> sys.exc_info()[1] 5 | sys.exc_traceback -> sys.exc_info()[2] 6 | """ 7 | 8 | # By Jeff Balogh and Benjamin Peterson 9 | 10 | # Local imports 11 | from .. import fixer_base 12 | from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms 13 | 14 | class FixSysExc(fixer_base.BaseFix): 15 | # This order matches the ordering of sys.exc_info(). 16 | exc_info = ["exc_type", "exc_value", "exc_traceback"] 17 | BM_compatible = True 18 | PATTERN = """ 19 | power< 'sys' trailer< dot='.' attribute=(%s) > > 20 | """ % '|'.join("'%s'" % e for e in exc_info) 21 | 22 | def transform(self, node, results): 23 | sys_attr = results["attribute"][0] 24 | index = Number(self.exc_info.index(sys_attr.value)) 25 | 26 | call = Call(Name("exc_info"), prefix=sys_attr.prefix) 27 | attr = Attr(Name("sys"), call) 28 | attr[1].children[0].prefix = results["dot"].prefix 29 | attr.append(Subscript(index)) 30 | return Node(syms.power, attr, prefix=node.prefix) 31 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_throw.py: -------------------------------------------------------------------------------- 1 | """Fixer for generator.throw(E, V, T). 2 | 3 | g.throw(E) -> g.throw(E) 4 | g.throw(E, V) -> g.throw(E(V)) 5 | g.throw(E, V, T) -> g.throw(E(V).with_traceback(T)) 6 | 7 | g.throw("foo"[, V[, T]]) will warn about string exceptions.""" 8 | # Author: Collin Winter 9 | 10 | # Local imports 11 | from .. import pytree 12 | from ..pgen2 import token 13 | from .. import fixer_base 14 | from ..fixer_util import Name, Call, ArgList, Attr, is_tuple 15 | 16 | class FixThrow(fixer_base.BaseFix): 17 | BM_compatible = True 18 | PATTERN = """ 19 | power< any trailer< '.' 'throw' > 20 | trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > 21 | > 22 | | 23 | power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > > 24 | """ 25 | 26 | def transform(self, node, results): 27 | syms = self.syms 28 | 29 | exc = results["exc"].clone() 30 | if exc.type is token.STRING: 31 | self.cannot_convert(node, "Python 3 does not support string exceptions") 32 | return 33 | 34 | # Leave "g.throw(E)" alone 35 | val = results.get("val") 36 | if val is None: 37 | return 38 | 39 | val = val.clone() 40 | if is_tuple(val): 41 | args = [c.clone() for c in val.children[1:-1]] 42 | else: 43 | val.prefix = "" 44 | args = [val] 45 | 46 | throw_args = results["args"] 47 | 48 | if "tb" in results: 49 | tb = results["tb"].clone() 50 | tb.prefix = "" 51 | 52 | e = Call(exc, args) 53 | with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])] 54 | throw_args.replace(pytree.Node(syms.power, with_tb)) 55 | else: 56 | throw_args.replace(Call(exc, args)) 57 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_types.py: -------------------------------------------------------------------------------- 1 | # Copyright 2007 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Fixer for removing uses of the types module. 5 | 6 | These work for only the known names in the types module. The forms above 7 | can include types. or not. ie, It is assumed the module is imported either as: 8 | 9 | import types 10 | from types import ... # either * or specific types 11 | 12 | The import statements are not modified. 13 | 14 | There should be another fixer that handles at least the following constants: 15 | 16 | type([]) -> list 17 | type(()) -> tuple 18 | type('') -> str 19 | 20 | """ 21 | 22 | # Local imports 23 | from .. import fixer_base 24 | from ..fixer_util import Name 25 | 26 | _TYPE_MAPPING = { 27 | 'BooleanType' : 'bool', 28 | 'BufferType' : 'memoryview', 29 | 'ClassType' : 'type', 30 | 'ComplexType' : 'complex', 31 | 'DictType': 'dict', 32 | 'DictionaryType' : 'dict', 33 | 'EllipsisType' : 'type(Ellipsis)', 34 | #'FileType' : 'io.IOBase', 35 | 'FloatType': 'float', 36 | 'IntType': 'int', 37 | 'ListType': 'list', 38 | 'LongType': 'int', 39 | 'ObjectType' : 'object', 40 | 'NoneType': 'type(None)', 41 | 'NotImplementedType' : 'type(NotImplemented)', 42 | 'SliceType' : 'slice', 43 | 'StringType': 'bytes', # XXX ? 44 | 'StringTypes' : '(str,)', # XXX ? 45 | 'TupleType': 'tuple', 46 | 'TypeType' : 'type', 47 | 'UnicodeType': 'str', 48 | 'XRangeType' : 'range', 49 | } 50 | 51 | _pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING] 52 | 53 | class FixTypes(fixer_base.BaseFix): 54 | BM_compatible = True 55 | PATTERN = '|'.join(_pats) 56 | 57 | def transform(self, node, results): 58 | new_value = _TYPE_MAPPING.get(results["name"].value) 59 | if new_value: 60 | return Name(new_value, prefix=node.prefix) 61 | return None 62 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_unicode.py: -------------------------------------------------------------------------------- 1 | r"""Fixer for unicode. 2 | 3 | * Changes unicode to str and unichr to chr. 4 | 5 | * If "...\u..." is not unicode literal change it into "...\\u...". 6 | 7 | * Change u"..." into "...". 8 | 9 | """ 10 | 11 | from ..pgen2 import token 12 | from .. import fixer_base 13 | 14 | _mapping = {"unichr" : "chr", "unicode" : "str"} 15 | 16 | class FixUnicode(fixer_base.BaseFix): 17 | BM_compatible = True 18 | PATTERN = "STRING | 'unicode' | 'unichr'" 19 | 20 | def start_tree(self, tree, filename): 21 | super(FixUnicode, self).start_tree(tree, filename) 22 | self.unicode_literals = 'unicode_literals' in tree.future_features 23 | 24 | def transform(self, node, results): 25 | if node.type == token.NAME: 26 | new = node.clone() 27 | new.value = _mapping[node.value] 28 | return new 29 | elif node.type == token.STRING: 30 | val = node.value 31 | if not self.unicode_literals and val[0] in '\'"' and '\\' in val: 32 | val = r'\\'.join([ 33 | v.replace('\\u', r'\\u').replace('\\U', r'\\U') 34 | for v in val.split(r'\\') 35 | ]) 36 | if val[0] in 'uU': 37 | val = val[1:] 38 | if val == node.value: 39 | return node 40 | new = node.clone() 41 | new.value = val 42 | return new 43 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_ws_comma.py: -------------------------------------------------------------------------------- 1 | """Fixer that changes 'a ,b' into 'a, b'. 2 | 3 | This also changes '{a :b}' into '{a: b}', but does not touch other 4 | uses of colons. It does not touch other uses of whitespace. 5 | 6 | """ 7 | 8 | from .. import pytree 9 | from ..pgen2 import token 10 | from .. import fixer_base 11 | 12 | class FixWsComma(fixer_base.BaseFix): 13 | 14 | explicit = True # The user must ask for this fixers 15 | 16 | PATTERN = """ 17 | any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]> 18 | """ 19 | 20 | COMMA = pytree.Leaf(token.COMMA, ",") 21 | COLON = pytree.Leaf(token.COLON, ":") 22 | SEPS = (COMMA, COLON) 23 | 24 | def transform(self, node, results): 25 | new = node.clone() 26 | comma = False 27 | for child in new.children: 28 | if child in self.SEPS: 29 | prefix = child.prefix 30 | if prefix.isspace() and "\n" not in prefix: 31 | child.prefix = "" 32 | comma = True 33 | else: 34 | if comma: 35 | prefix = child.prefix 36 | if not prefix: 37 | child.prefix = " " 38 | comma = False 39 | return new 40 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_xreadlines.py: -------------------------------------------------------------------------------- 1 | """Fix "for x in f.xreadlines()" -> "for x in f". 2 | 3 | This fixer will also convert g(f.xreadlines) into g(f.__iter__).""" 4 | # Author: Collin Winter 5 | 6 | # Local imports 7 | from .. import fixer_base 8 | from ..fixer_util import Name 9 | 10 | 11 | class FixXreadlines(fixer_base.BaseFix): 12 | BM_compatible = True 13 | PATTERN = """ 14 | power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > > 15 | | 16 | power< any+ trailer< '.' no_call='xreadlines' > > 17 | """ 18 | 19 | def transform(self, node, results): 20 | no_call = results.get("no_call") 21 | 22 | if no_call: 23 | no_call.replace(Name("__iter__", prefix=no_call.prefix)) 24 | else: 25 | node.replace([x.clone() for x in results["call"]]) 26 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/fixes/fix_zip.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) 3 | unless there exists a 'from future_builtins import zip' statement in the 4 | top-level namespace. 5 | 6 | We avoid the transformation if the zip() call is directly contained in 7 | iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. 8 | """ 9 | 10 | # Local imports 11 | from .. import fixer_base 12 | from ..pytree import Node 13 | from ..pygram import python_symbols as syms 14 | from ..fixer_util import Name, ArgList, in_special_context 15 | 16 | 17 | class FixZip(fixer_base.ConditionalFix): 18 | 19 | BM_compatible = True 20 | PATTERN = """ 21 | power< 'zip' args=trailer< '(' [any] ')' > [trailers=trailer*] 22 | > 23 | """ 24 | 25 | skip_on = "future_builtins.zip" 26 | 27 | def transform(self, node, results): 28 | if self.should_skip(node): 29 | return 30 | 31 | if in_special_context(node): 32 | return None 33 | 34 | args = results['args'].clone() 35 | args.prefix = "" 36 | 37 | trailers = [] 38 | if 'trailers' in results: 39 | trailers = [n.clone() for n in results['trailers']] 40 | for n in trailers: 41 | n.prefix = "" 42 | 43 | new = Node(syms.power, [Name("zip"), args], prefix="") 44 | new = Node(syms.power, [Name("list"), ArgList([new])] + trailers) 45 | new.prefix = node.prefix 46 | return new 47 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/pgen2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """The pgen2 package.""" 5 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/pgen2/literals.py: -------------------------------------------------------------------------------- 1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Safely evaluate Python string literals without using eval().""" 5 | 6 | import re 7 | 8 | simple_escapes = {"a": "\a", 9 | "b": "\b", 10 | "f": "\f", 11 | "n": "\n", 12 | "r": "\r", 13 | "t": "\t", 14 | "v": "\v", 15 | "'": "'", 16 | '"': '"', 17 | "\\": "\\"} 18 | 19 | def escape(m): 20 | all, tail = m.group(0, 1) 21 | assert all.startswith("\\") 22 | esc = simple_escapes.get(tail) 23 | if esc is not None: 24 | return esc 25 | if tail.startswith("x"): 26 | hexes = tail[1:] 27 | if len(hexes) < 2: 28 | raise ValueError("invalid hex string escape ('\\%s')" % tail) 29 | try: 30 | i = int(hexes, 16) 31 | except ValueError: 32 | raise ValueError("invalid hex string escape ('\\%s')" % tail) from None 33 | else: 34 | try: 35 | i = int(tail, 8) 36 | except ValueError: 37 | raise ValueError("invalid octal string escape ('\\%s')" % tail) from None 38 | return chr(i) 39 | 40 | def evalString(s): 41 | assert s.startswith("'") or s.startswith('"'), repr(s[:1]) 42 | q = s[0] 43 | if s[:3] == q*3: 44 | q = q*3 45 | assert s.endswith(q), repr(s[-len(q):]) 46 | assert len(s) >= 2*len(q) 47 | s = s[len(q):-len(q)] 48 | return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) 49 | 50 | def test(): 51 | for i in range(256): 52 | c = chr(i) 53 | s = repr(c) 54 | e = evalString(s) 55 | if e != c: 56 | print(i, c, s, e) 57 | 58 | 59 | if __name__ == "__main__": 60 | test() 61 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/pgen2/token.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | """Token constants (from "token.h").""" 4 | 5 | # Taken from Python (r53757) and modified to include some tokens 6 | # originally monkeypatched in by pgen2.tokenize 7 | 8 | #--start constants-- 9 | ENDMARKER = 0 10 | NAME = 1 11 | NUMBER = 2 12 | STRING = 3 13 | NEWLINE = 4 14 | INDENT = 5 15 | DEDENT = 6 16 | LPAR = 7 17 | RPAR = 8 18 | LSQB = 9 19 | RSQB = 10 20 | COLON = 11 21 | COMMA = 12 22 | SEMI = 13 23 | PLUS = 14 24 | MINUS = 15 25 | STAR = 16 26 | SLASH = 17 27 | VBAR = 18 28 | AMPER = 19 29 | LESS = 20 30 | GREATER = 21 31 | EQUAL = 22 32 | DOT = 23 33 | PERCENT = 24 34 | BACKQUOTE = 25 35 | LBRACE = 26 36 | RBRACE = 27 37 | EQEQUAL = 28 38 | NOTEQUAL = 29 39 | LESSEQUAL = 30 40 | GREATEREQUAL = 31 41 | TILDE = 32 42 | CIRCUMFLEX = 33 43 | LEFTSHIFT = 34 44 | RIGHTSHIFT = 35 45 | DOUBLESTAR = 36 46 | PLUSEQUAL = 37 47 | MINEQUAL = 38 48 | STAREQUAL = 39 49 | SLASHEQUAL = 40 50 | PERCENTEQUAL = 41 51 | AMPEREQUAL = 42 52 | VBAREQUAL = 43 53 | CIRCUMFLEXEQUAL = 44 54 | LEFTSHIFTEQUAL = 45 55 | RIGHTSHIFTEQUAL = 46 56 | DOUBLESTAREQUAL = 47 57 | DOUBLESLASH = 48 58 | DOUBLESLASHEQUAL = 49 59 | AT = 50 60 | ATEQUAL = 51 61 | OP = 52 62 | COMMENT = 53 63 | NL = 54 64 | RARROW = 55 65 | AWAIT = 56 66 | ASYNC = 57 67 | ERRORTOKEN = 58 68 | COLONEQUAL = 59 69 | N_TOKENS = 60 70 | NT_OFFSET = 256 71 | #--end constants-- 72 | 73 | tok_name = {} 74 | for _name, _value in list(globals().items()): 75 | if isinstance(_value, int): 76 | tok_name[_value] = _name 77 | 78 | 79 | def ISTERMINAL(x): 80 | return x < NT_OFFSET 81 | 82 | def ISNONTERMINAL(x): 83 | return x >= NT_OFFSET 84 | 85 | def ISEOF(x): 86 | return x == ENDMARKER 87 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_2to3/vendor/src/lib2to3/pygram.py: -------------------------------------------------------------------------------- 1 | # Copyright 2006 Google, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | """Export the Python grammar and symbols.""" 5 | 6 | # Python imports 7 | import os 8 | 9 | # Local imports 10 | from .pgen2 import token 11 | from .pgen2 import driver 12 | from . import pytree 13 | 14 | # The grammar file 15 | _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") 16 | _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), 17 | "PatternGrammar.txt") 18 | 19 | 20 | class Symbols(object): 21 | 22 | def __init__(self, grammar): 23 | """Initializer. 24 | 25 | Creates an attribute for each grammar symbol (nonterminal), 26 | whose value is the symbol's type (an int >= 256). 27 | """ 28 | for name, symbol in grammar.symbol2number.items(): 29 | setattr(self, name, symbol) 30 | 31 | 32 | python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE) 33 | 34 | python_symbols = Symbols(python_grammar) 35 | 36 | python_grammar_no_print_statement = python_grammar.copy() 37 | del python_grammar_no_print_statement.keywords["print"] 38 | 39 | python_grammar_no_print_and_exec_statement = python_grammar_no_print_statement.copy() 40 | del python_grammar_no_print_and_exec_statement.keywords["exec"] 41 | 42 | pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE) 43 | pattern_symbols = Symbols(pattern_grammar) 44 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_argparse/bm_argparse_subparsers.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_argparse" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "argparse_subparsers" 10 | extra_opts = ["subparsers"] 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_argparse/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_argparse" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "argparse_many_optionals" 10 | extra_opts = ["many_optionals"] 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_generators/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_async_generators" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "async_generators" 10 | tags = "asyncio" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_generators/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark recursive async generators implemented in python 3 | by traversing a binary tree. 4 | 5 | Author: Kumar Aditya 6 | """ 7 | 8 | from __future__ import annotations 9 | 10 | from collections.abc import AsyncIterator 11 | 12 | import pyperf 13 | 14 | 15 | class Tree: 16 | def __init__(self, left: Tree | None, value: int, right: Tree | None) -> None: 17 | self.left = left 18 | self.value = value 19 | self.right = right 20 | 21 | async def __aiter__(self) -> AsyncIterator[int]: 22 | if self.left: 23 | async for i in self.left: 24 | yield i 25 | yield self.value 26 | if self.right: 27 | async for i in self.right: 28 | yield i 29 | 30 | 31 | def tree(input: range) -> Tree | None: 32 | n = len(input) 33 | if n == 0: 34 | return None 35 | i = n // 2 36 | return Tree(tree(input[:i]), input[i], tree(input[i + 1:])) 37 | 38 | async def bench_async_generators() -> None: 39 | async for _ in tree(range(100000)): 40 | pass 41 | 42 | if __name__ == "__main__": 43 | runner = pyperf.Runner() 44 | runner.metadata['description'] = "Benchmark async generators" 45 | runner.bench_async_func('async_generators', bench_async_generators) 46 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "async_tree_cpu_io_mixed" 3 | extra_opts = ["cpu_io_mixed"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_cpu_io_mixed_tg.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.11" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_cpu_io_mixed_tg" 7 | extra_opts = ["cpu_io_mixed", "--task-groups"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.12" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_eager" 7 | extra_opts = ["eager"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_cpu_io_mixed.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.12" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_eager_cpu_io_mixed" 7 | extra_opts = ["eager_cpu_io_mixed"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_cpu_io_mixed_tg.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.12" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_eager_cpu_io_mixed_tg" 7 | extra_opts = ["eager_cpu_io_mixed", "--task-groups"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_io.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.12" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_eager_io" 7 | extra_opts = ["eager_io"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_io_tg.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.12" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_eager_io_tg" 7 | extra_opts = ["eager_io", "--task-groups"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_memoization.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.12" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_eager_memoization" 7 | extra_opts = ["eager_memoization"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_memoization_tg.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.12" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_eager_memoization_tg" 7 | extra_opts = ["eager_memoization", "--task-groups"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_eager_tg.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.12" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_eager_tg" 7 | extra_opts = ["eager", "--task-groups"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "async_tree_io" 3 | extra_opts = ["io"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_io_tg.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.11" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_io_tg" 7 | extra_opts = ["io", "--task-groups"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "async_tree_memoization" 3 | extra_opts = ["memoization"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_memoization_tg.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.11" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_memoization_tg" 7 | extra_opts = ["memoization", "--task-groups"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/bm_async_tree_tg.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | requires-python = ">=3.11" 3 | dynamic = ["version"] 4 | 5 | [tool.pyperformance] 6 | name = "async_tree_tg" 7 | extra_opts = ["none", "--task-groups"] 8 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_async_tree/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_async_tree" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "async_tree" 10 | tags = "asyncio" 11 | extra_opts = ["none"] 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_asyncio_tcp/bm_asyncio_tcp_ssl.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "asyncio_tcp_ssl" 3 | extra_opts = ["--ssl"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_asyncio_tcp/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_asyncio_tcp" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "asyncio_tcp" 10 | tags = "asyncio" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_asyncio_tcp/ssl_cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEWTCCAsGgAwIBAgIJAJinz4jHSjLtMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV 3 | BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u 4 | IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xODA4 5 | MjkxNDIzMTVaFw0yODA4MjYxNDIzMTVaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH 6 | DA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9uIFNvZnR3YXJlIEZvdW5k 7 | YXRpb24xEjAQBgNVBAMMCWxvY2FsaG9zdDCCAaIwDQYJKoZIhvcNAQEBBQADggGP 8 | ADCCAYoCggGBALKUqUtopT6E68kN+uJNEt34i2EbmG/bwjcD8IaMsgJPSsMO2Bpd 9 | 3S6qWgkCeOyCfmAwBxK2kNbxGb63ouysEv7l8GCTJTWv3hG/HQcejJpnAEGi6K1U 10 | fDbyE/db6yZ12SoHVTGkadN4vYGCPd1Wj9ZO1F877SHQ8rDWX3xgTWkxN2ojBw44 11 | T8RHSDiG8D/CvG4uEy+VUszL+Uvny5y2poNSqvI3J56sptWSrh8nIIbkPZPBdUne 12 | LYMOHTFK3ZjXSmhlXgziTxK71nnzM3Y9K9gxPnRqoXbvu/wFo55hQCkETiRkYgmm 13 | jXcBMZ0TClQVnQWuLjMthRnWFZs4Lfmwqjs7FZD/61581R2BYehvpWbLvvuOJhwv 14 | DFzexL2sXcAl7SsxbzeQKRHqGbIDfbnQTXfs3/VC6Ye5P82P2ucj+XC32N9piRmO 15 | gCBP8L3ub+YzzdxikZN2gZXXE2jsb3QyE/R2LkWdWyshpKe+RsZP1SBRbHShUyOh 16 | yJ90baoiEwj2mwIDAQABoxgwFjAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJKoZI 17 | hvcNAQELBQADggGBAHRUO/UIHl3jXQENewYayHxkIx8t7nu40iO2DXbicSijz5bo 18 | 5//xAB6RxhBAlsDBehgQP1uoZg+WJW+nHu3CIVOU3qZNZRaozxiCl2UFKcNqLOmx 19 | R3NKpo1jYf4REQIeG8Yw9+hSWLRbshNteP6bKUUf+vanhg9+axyOEOH/iOQvgk/m 20 | b8wA8wNa4ujWljPbTQnj7ry8RqhTM0GcAN5LSdSvcKcpzLcs3aYwh+Z8e30sQWna 21 | F40sa5u7izgBTOrwpcDm/w5kC46vpRQ5fnbshVw6pne2by0mdMECASid/p25N103 22 | jMqTFlmO7kpf/jpCSmamp3/JSEE1BJKHwQ6Ql4nzRA2N1mnvWH7Zxcv043gkHeAu 23 | 0x8evpvwuhdIyproejNFlBpKmW8OX7yKTCPPMC/VkX8Q1rVkxU0DQ6hmvwZlhoKa 24 | 9Wc2uXpw9xF8itV4Uvcdr3dwqByvIqn7iI/gB+4l41e0u8OmH2MKOx4Nxlly5TNW 25 | HcVKQHyOeyvnINuBAQ== 26 | -----END CERTIFICATE----- 27 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_asyncio_websockets/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_asyncio_websockets" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf", "websockets"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "asyncio_websockets" 10 | tags = "asyncio" -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_asyncio_websockets/requirements.txt: -------------------------------------------------------------------------------- 1 | websockets==11.0.3 -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_asyncio_websockets/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark for asyncio websocket server and client performance 3 | transferring 1MB of data. 4 | 5 | Author: Kumar Aditya 6 | """ 7 | 8 | import pyperf 9 | import websockets.server 10 | import websockets.client 11 | import websockets.exceptions 12 | import asyncio 13 | 14 | CHUNK_SIZE = 1024 ** 2 15 | DATA = b"x" * CHUNK_SIZE 16 | 17 | stop: asyncio.Event 18 | 19 | 20 | async def handler(websocket) -> None: 21 | for _ in range(100): 22 | await websocket.recv() 23 | 24 | stop.set() 25 | 26 | 27 | async def send(ws): 28 | try: 29 | await ws.send(DATA) 30 | except websockets.exceptions.ConnectionClosedOK: 31 | pass 32 | 33 | 34 | async def main() -> None: 35 | global stop 36 | t0 = pyperf.perf_counter() 37 | stop = asyncio.Event() 38 | async with websockets.server.serve(handler, "", 8001): 39 | async with websockets.client.connect("ws://localhost:8001") as ws: 40 | await asyncio.gather(*[send(ws) for _ in range(100)]) 41 | await stop.wait() 42 | return pyperf.perf_counter() - t0 43 | 44 | 45 | if __name__ == "__main__": 46 | runner = pyperf.Runner() 47 | runner.metadata['description'] = "Benchmark asyncio websockets" 48 | runner.bench_async_func('asyncio_websockets', main) 49 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_barnes_hut/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_barnes_hut" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "barnes_hut" 10 | tags = "math" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_bpe_tokeniser/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_bpe_tokeniser" 3 | requires-python = ">=3.7" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "bpe_tokeniser" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_chameleon/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_chameleon" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "Chameleon", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "chameleon" 13 | tags = "apps" 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_chameleon/requirements.txt: -------------------------------------------------------------------------------- 1 | chameleon==3.9.1 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_chameleon/run_benchmark.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | import pyperf 4 | 5 | from chameleon import PageTemplate 6 | 7 | 8 | BIGTABLE_ZPT = """\ 9 | 11 | 12 | 17 | 18 |
13 | 16 |
""" 19 | 20 | 21 | def main(): 22 | runner = pyperf.Runner() 23 | runner.metadata['description'] = "Chameleon template" 24 | 25 | tmpl = PageTemplate(BIGTABLE_ZPT) 26 | table = [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10) 27 | for x in range(500)] 28 | options = {'table': table} 29 | 30 | func = functools.partial(tmpl, options=options) 31 | runner.bench_func('chameleon', func) 32 | 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_chaos/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_chaos" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "chaos" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_comprehensions/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_comprehensions" 3 | requires-python = ">=3.7" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "comprehensions" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_concurrent_imap/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_concurrent_imap" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "concurrent_imap" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_concurrent_imap/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark for concurrent model communication. 3 | """ 4 | import pyperf 5 | 6 | from multiprocessing.pool import Pool, ThreadPool 7 | 8 | 9 | def f(x: int) -> int: 10 | return x 11 | 12 | 13 | def bench_mp_pool(p: int, n: int, chunk: int) -> None: 14 | with Pool(p) as pool: 15 | for _ in pool.imap(f, range(n), chunk): 16 | pass 17 | 18 | 19 | def bench_thread_pool(c: int, n: int, chunk: int) -> None: 20 | with ThreadPool(c) as pool: 21 | for _ in pool.imap(f, range(n), chunk): 22 | pass 23 | 24 | 25 | if __name__ == "__main__": 26 | runner = pyperf.Runner() 27 | runner.metadata["description"] = "concurrent model communication benchmark" 28 | count = 1000 29 | chunk = 10 30 | num_core = 2 31 | runner.bench_func("bench_mp_pool", bench_mp_pool, num_core, count, chunk) 32 | runner.bench_func("bench_thread_pool", bench_thread_pool, num_core, count, chunk) 33 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_coroutines/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_coroutines" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "coroutines" 10 | tags = "asyncio" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_coroutines/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark for recursive coroutines. 3 | 4 | Author: Kumar Aditya 5 | """ 6 | 7 | import pyperf 8 | 9 | 10 | async def fibonacci(n: int) -> int: 11 | if n <= 1: 12 | return n 13 | return await fibonacci(n - 1) + await fibonacci(n - 2) 14 | 15 | 16 | def bench_coroutines(loops: int) -> float: 17 | range_it = range(loops) 18 | t0 = pyperf.perf_counter() 19 | for _ in range_it: 20 | coro = fibonacci(25) 21 | try: 22 | while True: 23 | coro.send(None) 24 | except StopIteration: 25 | pass 26 | return pyperf.perf_counter() - t0 27 | 28 | 29 | if __name__ == "__main__": 30 | runner = pyperf.Runner() 31 | runner.metadata['description'] = "Benchmark coroutines" 32 | runner.bench_time_func('coroutines', bench_coroutines) 33 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_coverage/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_coverage" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "coverage", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "coverage" 13 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_coverage/requirements.txt: -------------------------------------------------------------------------------- 1 | coverage==7.3.2 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_coverage/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark coverage performance with a recursive fibonacci function. 3 | """ 4 | 5 | import coverage 6 | import pyperf 7 | 8 | 9 | def fibonacci(n: int) -> int: 10 | if n <= 1: 11 | return n 12 | return fibonacci(n - 1) + fibonacci(n - 2) 13 | 14 | 15 | def bench_coverage(loops: int) -> None: 16 | range_it = range(loops) 17 | cov = coverage.Coverage() 18 | cov.start() 19 | t0 = pyperf.perf_counter() 20 | for _ in range_it: 21 | fibonacci(25) 22 | cov.stop() 23 | return pyperf.perf_counter() - t0 24 | 25 | 26 | if __name__ == "__main__": 27 | runner = pyperf.Runner() 28 | runner.metadata['description'] = "Benchmark coverage" 29 | runner.bench_time_func('coverage', bench_coverage) 30 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_crypto_pyaes/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_crypto_pyaes" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "pyaes", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "crypto_pyaes" 13 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_crypto_pyaes/requirements.txt: -------------------------------------------------------------------------------- 1 | pyaes==1.6.1 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_crypto_pyaes/run_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Pure-Python Implementation of the AES block-cipher. 4 | 5 | Benchmark AES in CTR mode using the pyaes module. 6 | """ 7 | 8 | import pyperf 9 | 10 | import pyaes 11 | 12 | # 23,000 bytes 13 | CLEARTEXT = b"This is a test. What could possibly go wrong? " * 500 14 | 15 | # 128-bit key (16 bytes) 16 | KEY = b'\xa1\xf6%\x8c\x87}_\xcd\x89dHE8\xbf\xc9,' 17 | 18 | 19 | def bench_pyaes(loops): 20 | range_it = range(loops) 21 | t0 = pyperf.perf_counter() 22 | 23 | for loops in range_it: 24 | aes = pyaes.AESModeOfOperationCTR(KEY) 25 | ciphertext = aes.encrypt(CLEARTEXT) 26 | 27 | # need to reset IV for decryption 28 | aes = pyaes.AESModeOfOperationCTR(KEY) 29 | plaintext = aes.decrypt(ciphertext) 30 | 31 | # explicitly destroy the pyaes object 32 | aes = None 33 | 34 | dt = pyperf.perf_counter() - t0 35 | if plaintext != CLEARTEXT: 36 | raise Exception("decrypt error!") 37 | 38 | return dt 39 | 40 | 41 | if __name__ == "__main__": 42 | runner = pyperf.Runner() 43 | runner.metadata['description'] = ("Pure-Python Implementation " 44 | "of the AES block-cipher") 45 | runner.bench_time_func('crypto_pyaes', bench_pyaes) 46 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dask/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_dask" 3 | requires-python = ">=3.12" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "dask" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dask/requirements.txt: -------------------------------------------------------------------------------- 1 | dask[distributed]==2024.10.0 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dask/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark the Dask scheduler running a large number of simple jobs. 3 | 4 | Author: Matt Rocklin, Michael Droettboom 5 | """ 6 | 7 | from dask.distributed import Client, Worker, Scheduler, wait 8 | from dask import distributed 9 | 10 | import pyperf 11 | 12 | 13 | def inc(x): 14 | return x + 1 15 | 16 | 17 | async def benchmark(): 18 | async with Scheduler() as scheduler: 19 | async with Worker(scheduler.address): 20 | async with Client(scheduler.address, asynchronous=True) as client: 21 | 22 | futures = client.map(inc, range(100)) 23 | for _ in range(10): 24 | futures = client.map(inc, futures) 25 | 26 | await wait(futures) 27 | 28 | 29 | if __name__ == "__main__": 30 | runner = pyperf.Runner() 31 | runner.metadata['description'] = "Benchmark dask" 32 | runner.bench_async_func('dask', benchmark) 33 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_decimal_factorial/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_decimal_factorial" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "decimal_factorial" 10 | tags = "decimal" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_decimal_factorial/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate `factorial` using the decimal module. 3 | 4 | - 2024-06-14: Michael Droettboom copied this from 5 | Modules/_decimal/tests/bench.py in the CPython source and adapted to use 6 | pyperf. 7 | """ 8 | 9 | # Original copyright notice in CPython source: 10 | 11 | # 12 | # Copyright (C) 2001-2012 Python Software Foundation. All Rights Reserved. 13 | # Modified and extended by Stefan Krah. 14 | # 15 | 16 | 17 | import decimal 18 | 19 | 20 | import pyperf 21 | 22 | 23 | def factorial(n, m): 24 | if n > m: 25 | return factorial(m, n) 26 | elif m == 0: 27 | return 1 28 | elif n == m: 29 | return n 30 | else: 31 | return factorial(n, (n + m) // 2) * factorial((n + m) // 2 + 1, m) 32 | 33 | 34 | def bench_decimal_factorial(): 35 | c = decimal.getcontext() 36 | c.prec = decimal.MAX_PREC 37 | c.Emax = decimal.MAX_EMAX 38 | c.Emin = decimal.MIN_EMIN 39 | 40 | for n in [10000, 100000]: 41 | # C version of decimal 42 | _ = factorial(decimal.Decimal(n), 0) 43 | 44 | 45 | if __name__ == "__main__": 46 | runner = pyperf.Runner() 47 | runner.metadata["description"] = "decimal_factorial benchmark" 48 | 49 | args = runner.parse_args() 50 | runner.bench_func("decimal_factorial", bench_decimal_factorial) 51 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_decimal_pi/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_decimal_pi" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "decimal_pi" 10 | tags = "decimal" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_decimal_pi/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Calculate `pi` using the decimal module. 3 | 4 | The `pidigits` benchmark does a similar thing using regular (long) ints. 5 | 6 | - 2024-06-14: Michael Droettboom copied this from 7 | Modules/_decimal/tests/bench.py in the CPython source and adapted to use 8 | pyperf. 9 | """ 10 | 11 | # Original copyright notice in CPython source: 12 | 13 | # 14 | # Copyright (C) 2001-2012 Python Software Foundation. All Rights Reserved. 15 | # Modified and extended by Stefan Krah. 16 | # 17 | 18 | 19 | import decimal 20 | 21 | 22 | import pyperf 23 | 24 | 25 | def pi_decimal(): 26 | """decimal""" 27 | D = decimal.Decimal 28 | lasts, t, s, n, na, d, da = D(0), D(3), D(3), D(1), D(0), D(0), D(24) 29 | while s != lasts: 30 | lasts = s 31 | n, na = n + na, na + 8 32 | d, da = d + da, da + 32 33 | t = (t * n) / d 34 | s += t 35 | return s 36 | 37 | 38 | def bench_decimal_pi(): 39 | for prec in [9, 19]: 40 | decimal.getcontext().prec = prec 41 | for _ in range(10000): 42 | _ = pi_decimal() 43 | 44 | 45 | if __name__ == "__main__": 46 | runner = pyperf.Runner() 47 | runner.metadata["description"] = "decimal_pi benchmark" 48 | 49 | args = runner.parse_args() 50 | runner.bench_func("decimal_pi", bench_decimal_pi) 51 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_deepcopy/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_deepcopy" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "deepcopy" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_deltablue/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_deltablue" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "deltablue" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_django_template/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_django_template" 3 | requires-python = ">=3.10" 4 | dependencies = [ 5 | "pyperf", 6 | "django", 7 | "legacy-cgi", 8 | ] 9 | urls = {repository = "https://github.com/python/pyperformance"} 10 | dynamic = ["version"] 11 | 12 | [tool.pyperformance] 13 | name = "django_template" 14 | tags = "template" 15 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_django_template/requirements.txt: -------------------------------------------------------------------------------- 1 | asgiref==3.3.4 2 | django==3.2.4 3 | pytz==2021.1 4 | sqlparse==0.4.1 5 | legacy-cgi==2.6 -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_django_template/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """Test the performance of the Django template system. 2 | 3 | This will have Django generate a 150x150-cell HTML table. 4 | """ 5 | 6 | import pyperf 7 | 8 | import django.conf 9 | from django.template import Context, Template 10 | 11 | 12 | # 2016-10-10: Python 3.6 takes 380 ms 13 | DEFAULT_SIZE = 100 14 | 15 | 16 | def bench_django_template(runner, size): 17 | template = Template(""" 18 | {% for row in table %} 19 | {% for col in row %}{% endfor %} 20 | {% endfor %} 21 |
{{ col|escape }}
22 | """) 23 | table = [range(size) for _ in range(size)] 24 | context = Context({"table": table}) 25 | 26 | runner.bench_func('django_template', template.render, context) 27 | 28 | 29 | def prepare_cmd(runner, cmd): 30 | cmd.append("--table-size=%s" % runner.args.table_size) 31 | 32 | 33 | if __name__ == "__main__": 34 | django.conf.settings.configure(TEMPLATES=[{ 35 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 36 | }]) 37 | django.setup() 38 | 39 | runner = pyperf.Runner() 40 | cmd = runner.argparser 41 | cmd.add_argument("--table-size", 42 | type=int, default=DEFAULT_SIZE, 43 | help="Size of the HTML table, height and width " 44 | "(default: %s)" % DEFAULT_SIZE) 45 | 46 | args = runner.parse_args() 47 | runner.metadata['description'] = "Django template" 48 | runner.metadata['django_version'] = django.__version__ 49 | runner.metadata['django_table_size'] = args.table_size 50 | 51 | bench_django_template(runner, args.table_size) 52 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/big-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/big-black.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/big-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/big-white.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/default.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/happy_monkey.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/happy_monkey.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/medium-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/medium-black.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/medium-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/medium-white.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-all.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-breaks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-breaks.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-covers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-covers.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-cuts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-cuts.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-empty.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-empty.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-objects.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp-objects.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/rsp.svg -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/s5-files.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/s5-files.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/s5-files.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/s5-files.svg -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/small-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/small-black.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/small-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/images/small-white.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard-bitmap-scaling.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard-bitmap-scaling.svg -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard-bitmap.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard-bitmap.svg -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard-scaling.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard-scaling.svg -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard.svg -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard.swf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/biohazard.swf -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/pens.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/pens.mp4 -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/title-scaling.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/title-scaling.svg -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/title.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/title.png -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/title.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_docutils/data/docs/user/rst/images/title.svg -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_docutils" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "docutils", 7 | ] 8 | urls.repository = "https://github.com/python/pyperformance" 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "docutils" 13 | tags = "apps" 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/requirements.txt: -------------------------------------------------------------------------------- 1 | docutils==0.18.1 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_docutils/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Convert Docutils' documentation from reStructuredText to . 3 | """ 4 | 5 | import contextlib 6 | from pathlib import Path 7 | 8 | import docutils 9 | from docutils import core 10 | import pyperf 11 | 12 | try: 13 | from docutils.utils.math.math2html import Trace 14 | except ImportError: 15 | pass 16 | else: 17 | Trace.show = lambda message, channel: ... # don't print to console 18 | 19 | DOC_ROOT = (Path(__file__).parent / "data" / "docs").resolve() 20 | 21 | 22 | def build_html(doc_root): 23 | elapsed = 0 24 | for file in doc_root.rglob("*.txt"): 25 | file_contents = file.read_text(encoding="utf-8") 26 | t0 = pyperf.perf_counter() 27 | with contextlib.suppress(docutils.ApplicationError): 28 | core.publish_string(source=file_contents, 29 | reader_name="standalone", 30 | parser_name="restructuredtext", 31 | writer_name="html5", 32 | settings_overrides={ 33 | "input_encoding": "unicode", 34 | "output_encoding": "unicode", 35 | "report_level": 5, 36 | }) 37 | elapsed += pyperf.perf_counter() - t0 38 | return elapsed 39 | 40 | 41 | def bench_docutils(loops, doc_root): 42 | runs_total = 0 43 | for _ in range(loops): 44 | runs_total += build_html(doc_root) 45 | return runs_total 46 | 47 | 48 | if __name__ == "__main__": 49 | runner = pyperf.Runner() 50 | 51 | runner.metadata['description'] = "Render documentation with Docutils" 52 | args = runner.parse_args() 53 | 54 | runner.bench_time_func("docutils", bench_docutils, DOC_ROOT) 55 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/COMMIT_EDITMSG: -------------------------------------------------------------------------------- 1 | fix ResourceWarning related to subprocesses 2 | 3 | Python issue #26741: BaseSubprocessTransport._process_exited() now 4 | copies the return code from the child watched to the returncode 5 | attribute of the Popen object. On Python 3.6, it is required to avoid 6 | a ResourceWarning. 7 | 8 | # Veuillez saisir le message de validation pour vos modifications. Les lignes 9 | # commençant par '#' seront ignorées, et un message vide abandonne la validation. 10 | # Sur la branche master 11 | # Votre branche est à jour avec 'origin/master'. 12 | # 13 | # Modifications qui seront validées : 14 | # modifié : asyncio/base_subprocess.py 15 | # 16 | # Fichiers non suivis non affichés 17 | # ------------------------ >8 ------------------------ 18 | # Ne touchez pas à la ligne ci-dessus 19 | # Tout se qui suit sera éliminé. 20 | diff --git a/asyncio/base_subprocess.py b/asyncio/base_subprocess.py 21 | index 08080bd..8fc253c 100644 22 | --- a/asyncio/base_subprocess.py 23 | +++ b/asyncio/base_subprocess.py 24 | @@ -210,6 +210,10 @@ def _process_exited(self, returncode): 25 | logger.info('%r exited with return code %r', 26 | self, returncode) 27 | self._returncode = returncode 28 | + if self._proc.returncode is None: 29 | + # asyncio uses a child watcher: copy the status into the Popen 30 | + # object. On Python 3.6, it is required to avoid a ResourceWarning. 31 | + self._proc.returncode = returncode 32 | self._call(self._protocol.process_exited) 33 | self._try_finish() 34 | 35 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/FETCH_HEAD: -------------------------------------------------------------------------------- 1 | bea3a4247a450be7fb82dec111429bb2752aac4d branch 'master' of github.com:python/asyncio 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/HEAD: -------------------------------------------------------------------------------- 1 | ref: refs/heads/master 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/ORIG_HEAD: -------------------------------------------------------------------------------- 1 | 38fdb004db674f32b7a38ee294db5546bae0a432 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/config: -------------------------------------------------------------------------------- 1 | [core] 2 | repositoryformatversion = 0 3 | filemode = true 4 | bare = false 5 | logallrefupdates = true 6 | [remote "origin"] 7 | url = git@github.com:python/asyncio.git 8 | fetch = +refs/heads/*:refs/remotes/origin/* 9 | [branch "master"] 10 | remote = origin 11 | merge = refs/heads/master 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/description: -------------------------------------------------------------------------------- 1 | Unnamed repository; edit this file 'description' to name the repository. 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/applypatch-msg.sample: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # An example hook script to check the commit log message taken by 4 | # applypatch from an e-mail message. 5 | # 6 | # The hook should exit with non-zero status after issuing an 7 | # appropriate message if it wants to stop the commit. The hook is 8 | # allowed to edit the commit message file. 9 | # 10 | # To enable this hook, rename this file to "applypatch-msg". 11 | 12 | . git-sh-setup 13 | test -x "$GIT_DIR/hooks/commit-msg" && 14 | exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"} 15 | : 16 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/commit-msg.sample: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # An example hook script to check the commit log message. 4 | # Called by "git commit" with one argument, the name of the file 5 | # that has the commit message. The hook should exit with non-zero 6 | # status after issuing an appropriate message if it wants to stop the 7 | # commit. The hook is allowed to edit the commit message file. 8 | # 9 | # To enable this hook, rename this file to "commit-msg". 10 | 11 | # Uncomment the below to add a Signed-off-by line to the message. 12 | # Doing this in a hook is a bad idea in general, but the prepare-commit-msg 13 | # hook is more suited to it. 14 | # 15 | # SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') 16 | # grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" 17 | 18 | # This example catches duplicate Signed-off-by lines. 19 | 20 | test "" = "$(grep '^Signed-off-by: ' "$1" | 21 | sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { 22 | echo >&2 Duplicate Signed-off-by lines. 23 | exit 1 24 | } 25 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/post-update.sample: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # An example hook script to prepare a packed repository for use over 4 | # dumb transports. 5 | # 6 | # To enable this hook, rename this file to "post-update". 7 | 8 | exec git update-server-info 9 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-applypatch.sample: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # An example hook script to verify what is about to be committed 4 | # by applypatch from an e-mail message. 5 | # 6 | # The hook should exit with non-zero status after issuing an 7 | # appropriate message if it wants to stop the commit. 8 | # 9 | # To enable this hook, rename this file to "pre-applypatch". 10 | 11 | . git-sh-setup 12 | test -x "$GIT_DIR/hooks/pre-commit" && 13 | exec "$GIT_DIR/hooks/pre-commit" ${1+"$@"} 14 | : 15 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-commit.sample: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # An example hook script to verify what is about to be committed. 4 | # Called by "git commit" with no arguments. The hook should 5 | # exit with non-zero status after issuing an appropriate message if 6 | # it wants to stop the commit. 7 | # 8 | # To enable this hook, rename this file to "pre-commit". 9 | 10 | if git rev-parse --verify HEAD >/dev/null 2>&1 11 | then 12 | against=HEAD 13 | else 14 | # Initial commit: diff against an empty tree object 15 | against=4b825dc642cb6eb9a060e54bf8d69288fbee4904 16 | fi 17 | 18 | # If you want to allow non-ASCII filenames set this variable to true. 19 | allownonascii=$(git config --bool hooks.allownonascii) 20 | 21 | # Redirect output to stderr. 22 | exec 1>&2 23 | 24 | # Cross platform projects tend to avoid non-ASCII filenames; prevent 25 | # them from being added to the repository. We exploit the fact that the 26 | # printable range starts at the space character and ends with tilde. 27 | if [ "$allownonascii" != "true" ] && 28 | # Note that the use of brackets around a tr range is ok here, (it's 29 | # even required, for portability to Solaris 10's /usr/bin/tr), since 30 | # the square bracket bytes happen to fall in the designated range. 31 | test $(git diff --cached --name-only --diff-filter=A -z $against | 32 | LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 33 | then 34 | cat <<\EOF 35 | Error: Attempt to add a non-ASCII file name. 36 | 37 | This can cause problems if you want to work with people on other platforms. 38 | 39 | To be portable it is advisable to rename the file. 40 | 41 | If you know what you are doing you can disable this check using: 42 | 43 | git config hooks.allownonascii true 44 | EOF 45 | exit 1 46 | fi 47 | 48 | # If there are whitespace errors, print the offending file names and fail. 49 | exec git diff-index --check --cached $against -- 50 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/pre-push.sample: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # An example hook script to verify what is about to be pushed. Called by "git 4 | # push" after it has checked the remote status, but before anything has been 5 | # pushed. If this script exits with a non-zero status nothing will be pushed. 6 | # 7 | # This hook is called with the following parameters: 8 | # 9 | # $1 -- Name of the remote to which the push is being done 10 | # $2 -- URL to which the push is being done 11 | # 12 | # If pushing without using a named remote those arguments will be equal. 13 | # 14 | # Information about the commits which are being pushed is supplied as lines to 15 | # the standard input in the form: 16 | # 17 | # 18 | # 19 | # This sample shows how to prevent push of commits where the log message starts 20 | # with "WIP" (work in progress). 21 | 22 | remote="$1" 23 | url="$2" 24 | 25 | z40=0000000000000000000000000000000000000000 26 | 27 | while read local_ref local_sha remote_ref remote_sha 28 | do 29 | if [ "$local_sha" = $z40 ] 30 | then 31 | # Handle delete 32 | : 33 | else 34 | if [ "$remote_sha" = $z40 ] 35 | then 36 | # New branch, examine all commits 37 | range="$local_sha" 38 | else 39 | # Update to existing branch, examine new commits 40 | range="$remote_sha..$local_sha" 41 | fi 42 | 43 | # Check for WIP commit 44 | commit=`git rev-list -n 1 --grep '^WIP' "$range"` 45 | if [ -n "$commit" ] 46 | then 47 | echo >&2 "Found WIP commit in $local_ref, not pushing" 48 | exit 1 49 | fi 50 | fi 51 | done 52 | 53 | exit 0 54 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/hooks/prepare-commit-msg.sample: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # An example hook script to prepare the commit log message. 4 | # Called by "git commit" with the name of the file that has the 5 | # commit message, followed by the description of the commit 6 | # message's source. The hook's purpose is to edit the commit 7 | # message file. If the hook fails with a non-zero status, 8 | # the commit is aborted. 9 | # 10 | # To enable this hook, rename this file to "prepare-commit-msg". 11 | 12 | # This hook includes three examples. The first comments out the 13 | # "Conflicts:" part of a merge commit. 14 | # 15 | # The second includes the output of "git diff --name-status -r" 16 | # into the message, just before the "git status" output. It is 17 | # commented because it doesn't cope with --amend or with squashed 18 | # commits. 19 | # 20 | # The third example adds a Signed-off-by line to the message, that can 21 | # still be edited. This is rarely a good idea. 22 | 23 | case "$2,$3" in 24 | merge,) 25 | /usr/bin/perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' "$1" ;; 26 | 27 | # ,|template,) 28 | # /usr/bin/perl -i.bak -pe ' 29 | # print "\n" . `git diff --cached --name-status -r` 30 | # if /^#/ && $first++ == 0' "$1" ;; 31 | 32 | *) ;; 33 | esac 34 | 35 | # SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') 36 | # grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" 37 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/index -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/exclude: -------------------------------------------------------------------------------- 1 | # git ls-files --others --exclude-from=.git/info/exclude 2 | # Lines that start with '#' are comments. 3 | # For a project mostly in C, the following would be a good set of 4 | # exclude patterns (uncomment them if you want to use them): 5 | # *.[oa] 6 | # *~ 7 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/info/refs: -------------------------------------------------------------------------------- 1 | bea3a4247a450be7fb82dec111429bb2752aac4d refs/heads/master 2 | bea3a4247a450be7fb82dec111429bb2752aac4d refs/remotes/origin/HEAD 3 | c2da455baf9870e06f54a153e48e0cbea3090d58 refs/remotes/origin/asyncio 4 | 441917fd241da911a188fc1ab96738ea3c6f4a12 refs/remotes/origin/bind_modules 5 | 1ca80cbecb37a90feedb9e26a95b6b0484e897ae refs/remotes/origin/iocp 6 | 18cc4f3dcbc5959a1346cc91591ca8cf9a7ffc5b refs/remotes/origin/issue-26 7 | bea3a4247a450be7fb82dec111429bb2752aac4d refs/remotes/origin/master 8 | 3494d4a2dc4a69104949c1c4ca67259a28cfaeca refs/remotes/origin/newcancel 9 | c47bdcc4718cb3daf9a75065dc62014267cf0f1d refs/remotes/origin/proactor 10 | 8422f4e4f91a986288070e1c518953c1c9b81259 refs/remotes/origin/remove-joinable-queue 11 | a16bc82f60238164e7cf150df6b2f12479d75348 refs/remotes/origin/subproces-stream 12 | 2f4f359d460c8895691d8bb91d3d6c582dc601f1 refs/remotes/origin/subprocess_stream 13 | 40dc88f8d3185a67e283e303ad519ad51418cc53 refs/remotes/origin/traceback 14 | f7f53aab4cc5f6e976e6b7533fdcff5876eaa776 refs/remotes/origin/uninterpolate 15 | c28372755c1bdbf814ec950c64ffa1d64edfcae2 refs/remotes/origin/wiki 16 | d3d44466999e5d73b03781583400230faf9cb9c8 refs/remotes/origin/zero_timeout 17 | 13d7f672626cb13bf9ec2ca3a4fb63d60a3bfaf6 refs/tags/0.1.1 18 | 036bbfb768de845b3495b99d212fffbf98ba5571 refs/tags/0.2.1 19 | 5e7da72c065e7d9c72a47cc1b091e705a65e3190 refs/tags/0.3.1 20 | 3783143135d1c33b8ca75149441441165b4f069b refs/tags/0.4.1 21 | 93dcad1500ec151f51521412cbc1ac86804150e4 refs/tags/3.4.1 22 | f9401d73b8e9d0973f86e1d7903b2b16b78b39c2 refs/tags/3.4.2 23 | 7b2d8abfce1d7ef18ef516f9b1b7032172630375 refs/tags/3.4.3 24 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/HEAD: -------------------------------------------------------------------------------- 1 | 38fdb004db674f32b7a38ee294db5546bae0a432 bea3a4247a450be7fb82dec111429bb2752aac4d Victor Stinner 1473666088 +0200 pull --rebase: checkout bea3a4247a450be7fb82dec111429bb2752aac4d 2 | bea3a4247a450be7fb82dec111429bb2752aac4d bea3a4247a450be7fb82dec111429bb2752aac4d Victor Stinner 1473666088 +0200 rebase finished: returning to refs/heads/master 3 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/heads/master: -------------------------------------------------------------------------------- 1 | 38fdb004db674f32b7a38ee294db5546bae0a432 bea3a4247a450be7fb82dec111429bb2752aac4d Victor Stinner 1473666088 +0200 rebase finished: refs/heads/master onto bea3a4247a450be7fb82dec111429bb2752aac4d 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/HEAD -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/bind_modules -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/master: -------------------------------------------------------------------------------- 1 | 38fdb004db674f32b7a38ee294db5546bae0a432 bea3a4247a450be7fb82dec111429bb2752aac4d Victor Stinner 1473666088 +0200 pull --rebase: fast-forward 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/logs/refs/remotes/origin/zero_timeout -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/info/packs: -------------------------------------------------------------------------------- 1 | P pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack 2 | 3 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.idx -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/objects/pack/pack-7e1b1ace85030071ca314cd565ae038bacc302a4.pack -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/packed-refs: -------------------------------------------------------------------------------- 1 | # pack-refs with: peeled fully-peeled 2 | bea3a4247a450be7fb82dec111429bb2752aac4d refs/heads/master 3 | c2da455baf9870e06f54a153e48e0cbea3090d58 refs/remotes/origin/asyncio 4 | 441917fd241da911a188fc1ab96738ea3c6f4a12 refs/remotes/origin/bind_modules 5 | 1ca80cbecb37a90feedb9e26a95b6b0484e897ae refs/remotes/origin/iocp 6 | 18cc4f3dcbc5959a1346cc91591ca8cf9a7ffc5b refs/remotes/origin/issue-26 7 | bea3a4247a450be7fb82dec111429bb2752aac4d refs/remotes/origin/master 8 | 3494d4a2dc4a69104949c1c4ca67259a28cfaeca refs/remotes/origin/newcancel 9 | c47bdcc4718cb3daf9a75065dc62014267cf0f1d refs/remotes/origin/proactor 10 | 8422f4e4f91a986288070e1c518953c1c9b81259 refs/remotes/origin/remove-joinable-queue 11 | a16bc82f60238164e7cf150df6b2f12479d75348 refs/remotes/origin/subproces-stream 12 | 2f4f359d460c8895691d8bb91d3d6c582dc601f1 refs/remotes/origin/subprocess_stream 13 | 40dc88f8d3185a67e283e303ad519ad51418cc53 refs/remotes/origin/traceback 14 | f7f53aab4cc5f6e976e6b7533fdcff5876eaa776 refs/remotes/origin/uninterpolate 15 | c28372755c1bdbf814ec950c64ffa1d64edfcae2 refs/remotes/origin/wiki 16 | d3d44466999e5d73b03781583400230faf9cb9c8 refs/remotes/origin/zero_timeout 17 | 13d7f672626cb13bf9ec2ca3a4fb63d60a3bfaf6 refs/tags/0.1.1 18 | 036bbfb768de845b3495b99d212fffbf98ba5571 refs/tags/0.2.1 19 | 5e7da72c065e7d9c72a47cc1b091e705a65e3190 refs/tags/0.3.1 20 | 3783143135d1c33b8ca75149441441165b4f069b refs/tags/0.4.1 21 | 93dcad1500ec151f51521412cbc1ac86804150e4 refs/tags/3.4.1 22 | f9401d73b8e9d0973f86e1d7903b2b16b78b39c2 refs/tags/3.4.2 23 | 7b2d8abfce1d7ef18ef516f9b1b7032172630375 refs/tags/3.4.3 24 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/data/asyncio.git/refs/remotes/origin/HEAD: -------------------------------------------------------------------------------- 1 | ref: refs/remotes/origin/master 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_dulwich_log" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "dulwich", # optional? 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "dulwich_log" 13 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/requirements.txt: -------------------------------------------------------------------------------- 1 | certifi==2021.5.30 2 | dulwich==0.20.23 3 | urllib3==1.26.5 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_dulwich_log/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Iterate on commits of the asyncio Git repository using the Dulwich module. 3 | """ 4 | 5 | import os 6 | 7 | import pyperf 8 | 9 | import dulwich.repo 10 | 11 | 12 | def iter_all_commits(repo): 13 | # iterate on all changes on the Git repository 14 | for entry in repo.get_walker(head): 15 | pass 16 | 17 | 18 | if __name__ == "__main__": 19 | runner = pyperf.Runner() 20 | runner.metadata['description'] = ("Dulwich benchmark: " 21 | "iterate on all Git commits") 22 | 23 | repo_path = os.path.join(os.path.dirname(__file__), 'data', 'asyncio.git') 24 | 25 | repo = dulwich.repo.Repo(repo_path) 26 | head = repo.head() 27 | runner.bench_func('dulwich_log', iter_all_commits, repo) 28 | repo.close() 29 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_fannkuch/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_fannkuch" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "fannkuch" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_fannkuch/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | The Computer Language Benchmarks Game 3 | http://benchmarksgame.alioth.debian.org/ 4 | 5 | Contributed by Sokolov Yura, modified by Tupteq. 6 | """ 7 | 8 | import pyperf 9 | 10 | 11 | DEFAULT_ARG = 9 12 | 13 | 14 | def fannkuch(n): 15 | count = list(range(1, n + 1)) 16 | max_flips = 0 17 | m = n - 1 18 | r = n 19 | perm1 = list(range(n)) 20 | perm = list(range(n)) 21 | perm1_ins = perm1.insert 22 | perm1_pop = perm1.pop 23 | 24 | while 1: 25 | while r != 1: 26 | count[r - 1] = r 27 | r -= 1 28 | 29 | if perm1[0] != 0 and perm1[m] != m: 30 | perm = perm1[:] 31 | flips_count = 0 32 | k = perm[0] 33 | while k: 34 | perm[:k + 1] = perm[k::-1] 35 | flips_count += 1 36 | k = perm[0] 37 | 38 | if flips_count > max_flips: 39 | max_flips = flips_count 40 | 41 | while r != n: 42 | perm1_ins(r, perm1_pop(0)) 43 | count[r] -= 1 44 | if count[r] > 0: 45 | break 46 | r += 1 47 | else: 48 | return max_flips 49 | 50 | 51 | if __name__ == "__main__": 52 | runner = pyperf.Runner() 53 | arg = DEFAULT_ARG 54 | runner.bench_func('fannkuch', fannkuch, arg) 55 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_float/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_float" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "float" 10 | tags = "math" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_float/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Artificial, floating point-heavy benchmark originally used by Factor. 3 | """ 4 | import pyperf 5 | 6 | from math import sin, cos, sqrt 7 | 8 | 9 | POINTS = 100000 10 | 11 | 12 | class Point(object): 13 | __slots__ = ('x', 'y', 'z') 14 | 15 | def __init__(self, i): 16 | self.x = x = sin(i) 17 | self.y = cos(i) * 3 18 | self.z = (x * x) / 2 19 | 20 | def __repr__(self): 21 | return "" % (self.x, self.y, self.z) 22 | 23 | def normalize(self): 24 | x = self.x 25 | y = self.y 26 | z = self.z 27 | norm = sqrt(x * x + y * y + z * z) 28 | self.x /= norm 29 | self.y /= norm 30 | self.z /= norm 31 | 32 | def maximize(self, other): 33 | self.x = self.x if self.x > other.x else other.x 34 | self.y = self.y if self.y > other.y else other.y 35 | self.z = self.z if self.z > other.z else other.z 36 | return self 37 | 38 | 39 | def maximize(points): 40 | next = points[0] 41 | for p in points[1:]: 42 | next = next.maximize(p) 43 | return next 44 | 45 | 46 | def benchmark(n): 47 | points = [None] * n 48 | for i in range(n): 49 | points[i] = Point(i) 50 | for p in points: 51 | p.normalize() 52 | return maximize(points) 53 | 54 | 55 | if __name__ == "__main__": 56 | runner = pyperf.Runner() 57 | runner.metadata['description'] = "Float benchmark" 58 | 59 | points = POINTS 60 | runner.bench_func('float', benchmark, points) 61 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_gc_collect/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_gc_collect" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "gc_collect" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_gc_collect/run_benchmark.py: -------------------------------------------------------------------------------- 1 | import pyperf 2 | import gc 3 | 4 | CYCLES = 100 5 | LINKS = 20 6 | 7 | 8 | class Node: 9 | def __init__(self): 10 | self.next = None 11 | self.prev = None 12 | 13 | def link_next(self, next): 14 | self.next = next 15 | self.next.prev = self 16 | 17 | 18 | def create_cycle(node, n_links): 19 | """Create a cycle of n_links nodes, starting with node.""" 20 | 21 | if n_links == 0: 22 | return 23 | 24 | current = node 25 | for i in range(n_links): 26 | next_node = Node() 27 | current.link_next(next_node) 28 | current = next_node 29 | 30 | current.link_next(node) 31 | 32 | 33 | def create_gc_cycles(n_cycles, n_links): 34 | """Create n_cycles cycles n_links+1 nodes each.""" 35 | 36 | cycles = [] 37 | for _ in range(n_cycles): 38 | node = Node() 39 | cycles.append(node) 40 | create_cycle(node, n_links) 41 | return cycles 42 | 43 | 44 | def benchamark_collection(loops, cycles, links): 45 | total_time = 0 46 | for _ in range(loops): 47 | gc.collect() 48 | all_cycles = create_gc_cycles(cycles, links) 49 | 50 | # Main loop to measure 51 | del all_cycles 52 | t0 = pyperf.perf_counter() 53 | collected = gc.collect() 54 | total_time += pyperf.perf_counter() - t0 55 | 56 | assert collected is None or collected >= cycles * (links + 1) 57 | 58 | return total_time 59 | 60 | 61 | if __name__ == "__main__": 62 | runner = pyperf.Runner() 63 | runner.metadata["description"] = "GC link benchmark" 64 | runner.bench_time_func("create_gc_cycles", benchamark_collection, CYCLES, LINKS) 65 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_gc_traversal/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_gc_traversal" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "gc_traversal" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_gc_traversal/run_benchmark.py: -------------------------------------------------------------------------------- 1 | import pyperf 2 | import gc 3 | 4 | N_LEVELS = 1000 5 | 6 | 7 | def create_recursive_containers(n_levels): 8 | 9 | current_list = [] 10 | for n in range(n_levels): 11 | new_list = [None] * n 12 | for index in range(n): 13 | new_list[index] = current_list 14 | current_list = new_list 15 | 16 | return current_list 17 | 18 | 19 | def benchamark_collection(loops, n_levels): 20 | total_time = 0 21 | all_cycles = create_recursive_containers(n_levels) 22 | for _ in range(loops): 23 | gc.collect() 24 | # Main loop to measure 25 | t0 = pyperf.perf_counter() 26 | collected = gc.collect() 27 | total_time += pyperf.perf_counter() - t0 28 | 29 | assert collected is None or collected == 0 30 | 31 | return total_time 32 | 33 | 34 | if __name__ == "__main__": 35 | runner = pyperf.Runner() 36 | runner.metadata["description"] = "GC traversal benchmark" 37 | runner.bench_time_func("gc_traversal", benchamark_collection, N_LEVELS) 38 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_generators/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_generators" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "generators" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_generators/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark recursive generators implemented in python 3 | by traversing a binary tree. 4 | 5 | Author: Kumar Aditya 6 | """ 7 | 8 | from __future__ import annotations 9 | 10 | from collections.abc import Iterator 11 | 12 | import pyperf 13 | 14 | 15 | class Tree: 16 | def __init__(self, left: Tree | None, value: int, right: Tree | None) -> None: 17 | self.left = left 18 | self.value = value 19 | self.right = right 20 | 21 | def __iter__(self) -> Iterator[int]: 22 | if self.left: 23 | yield from self.left 24 | yield self.value 25 | if self.right: 26 | yield from self.right 27 | 28 | 29 | def tree(input: range) -> Tree | None: 30 | n = len(input) 31 | if n == 0: 32 | return None 33 | i = n // 2 34 | return Tree(tree(input[:i]), input[i], tree(input[i + 1:])) 35 | 36 | def bench_generators(loops: int) -> float: 37 | assert list(tree(range(10))) == list(range(10)) 38 | range_it = range(loops) 39 | iterable = tree(range(100000)) 40 | t0 = pyperf.perf_counter() 41 | for _ in range_it: 42 | for _ in iterable: 43 | pass 44 | return pyperf.perf_counter() - t0 45 | 46 | if __name__ == "__main__": 47 | runner = pyperf.Runner() 48 | runner.metadata['description'] = "Benchmark generators" 49 | runner.bench_time_func('generators', bench_generators) 50 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_genshi/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_genshi" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "Genshi", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "genshi" 13 | tags = "template" 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_genshi/requirements.txt: -------------------------------------------------------------------------------- 1 | genshi==0.7.7 2 | six==1.16.0 3 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_genshi/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Render a template using Genshi module. 3 | """ 4 | 5 | import pyperf 6 | 7 | from genshi.template import MarkupTemplate, NewTextTemplate 8 | 9 | 10 | BIGTABLE_XML = """\ 11 | 12 | 13 | 15 |
14 |
16 | """ 17 | 18 | BIGTABLE_TEXT = """\ 19 | 20 | {% for row in table %} 21 | {% for c in row.values() %}{% end %} 22 | {% end %} 23 |
$c
24 | """ 25 | 26 | 27 | def bench_genshi(loops, tmpl_cls, tmpl_str): 28 | tmpl = tmpl_cls(tmpl_str) 29 | table = [dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9, j=10) 30 | for _ in range(1000)] 31 | range_it = range(loops) 32 | t0 = pyperf.perf_counter() 33 | 34 | for _ in range_it: 35 | stream = tmpl.generate(table=table) 36 | stream.render() 37 | 38 | return pyperf.perf_counter() - t0 39 | 40 | 41 | def add_cmdline_args(cmd, args): 42 | if args.benchmark: 43 | cmd.append(args.benchmark) 44 | 45 | 46 | BENCHMARKS = { 47 | 'xml': (MarkupTemplate, BIGTABLE_XML), 48 | 'text': (NewTextTemplate, BIGTABLE_TEXT), 49 | } 50 | 51 | 52 | if __name__ == "__main__": 53 | runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) 54 | runner.metadata['description'] = "Render a template using Genshi module" 55 | runner.argparser.add_argument("benchmark", nargs='?', 56 | choices=sorted(BENCHMARKS)) 57 | 58 | args = runner.parse_args() 59 | if args.benchmark: 60 | benchmarks = (args.benchmark,) 61 | else: 62 | benchmarks = sorted(BENCHMARKS) 63 | 64 | for bench in benchmarks: 65 | name = 'genshi_%s' % bench 66 | tmpl_cls, tmpl_str = BENCHMARKS[bench] 67 | runner.bench_time_func(name, bench_genshi, tmpl_cls, tmpl_str) 68 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_go/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_go" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "go" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_hexiom/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_hexiom" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "hexiom" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_hg_startup/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_hg_startup" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "mercurial", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "hg_startup" 13 | tags = "startup" 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_hg_startup/requirements.txt: -------------------------------------------------------------------------------- 1 | mercurial==5.8 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_hg_startup/run_benchmark.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import subprocess 3 | 4 | import pyperf 5 | from pyperformance.venv import get_venv_program 6 | 7 | 8 | def get_hg_version(hg_bin): 9 | # Fast-path: use directly the Python module 10 | try: 11 | from mercurial.__version__ import version 12 | if isinstance(version, bytes): 13 | return version.decode('utf8') 14 | else: 15 | return version 16 | except ImportError: 17 | pass 18 | 19 | # Slow-path: run the "hg --version" command 20 | proc = subprocess.Popen([sys.executable, hg_bin, "--version"], 21 | stdout=subprocess.PIPE, 22 | universal_newlines=True) 23 | stdout = proc.communicate()[0] 24 | if proc.returncode: 25 | print("ERROR: Mercurial command failed!") 26 | sys.exit(proc.returncode) 27 | return stdout.splitlines()[0] 28 | 29 | 30 | if __name__ == "__main__": 31 | runner = pyperf.Runner(values=25) 32 | 33 | runner.metadata['description'] = "Performance of the Python startup" 34 | args = runner.parse_args() 35 | 36 | hg_bin = get_venv_program('hg') 37 | runner.metadata['hg_version'] = get_hg_version(hg_bin) 38 | 39 | command = [sys.executable, hg_bin, "help"] 40 | runner.bench_command('hg_startup', command) 41 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_html5lib/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_html5lib" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "html5lib", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "html5lib" 13 | tags = "apps" 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_html5lib/requirements.txt: -------------------------------------------------------------------------------- 1 | html5lib==1.1 2 | six==1.16.0 3 | webencodings==0.5.1 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_html5lib/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """Wrapper script for testing the performance of the html5lib HTML 5 parser. 2 | 3 | The input data is the spec document for HTML 5, written in HTML 5. 4 | The spec was pulled from http://svn.whatwg.org/webapps/index. 5 | """ 6 | import io 7 | import os.path 8 | 9 | import html5lib 10 | import pyperf 11 | 12 | 13 | __author__ = "collinwinter@google.com (Collin Winter)" 14 | 15 | 16 | def bench_html5lib(html_file): 17 | html_file.seek(0) 18 | html5lib.parse(html_file) 19 | 20 | 21 | if __name__ == "__main__": 22 | runner = pyperf.Runner() 23 | runner.metadata['description'] = ( 24 | "Test the performance of the html5lib parser.") 25 | runner.metadata['html5lib_version'] = html5lib.__version__ 26 | 27 | # Get all our IO over with early. 28 | filename = os.path.join(os.path.dirname(__file__), 29 | "data", "w3_tr_html5.html") 30 | with open(filename, "rb") as fp: 31 | html_file = io.BytesIO(fp.read()) 32 | 33 | runner.bench_func('html5lib', bench_html5lib, html_file) 34 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_json_dumps/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_json_dumps" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "json_dumps" 10 | tags = "serialize" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_json_dumps/run_benchmark.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | 4 | import pyperf 5 | 6 | 7 | EMPTY = ({}, 2000) 8 | SIMPLE_DATA = {'key1': 0, 'key2': True, 'key3': 'value', 'key4': 'foo', 9 | 'key5': 'string'} 10 | SIMPLE = (SIMPLE_DATA, 1000) 11 | NESTED_DATA = {'key1': 0, 'key2': SIMPLE[0], 'key3': 'value', 'key4': SIMPLE[0], 12 | 'key5': SIMPLE[0], 'key': '\u0105\u0107\u017c'} 13 | NESTED = (NESTED_DATA, 1000) 14 | HUGE = ([NESTED[0]] * 1000, 1) 15 | 16 | CASES = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE'] 17 | 18 | 19 | def bench_json_dumps(data): 20 | for obj, count_it in data: 21 | for _ in count_it: 22 | json.dumps(obj) 23 | 24 | 25 | def add_cmdline_args(cmd, args): 26 | if args.cases: 27 | cmd.extend(("--cases", args.cases)) 28 | 29 | 30 | def main(): 31 | runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) 32 | runner.argparser.add_argument("--cases", 33 | help="Comma separated list of cases. Available cases: %s. By default, run all cases." 34 | % ', '.join(CASES)) 35 | runner.metadata['description'] = "Benchmark json.dumps()" 36 | 37 | args = runner.parse_args() 38 | if args.cases: 39 | cases = [] 40 | for case in args.cases.split(','): 41 | case = case.strip() 42 | if case: 43 | cases.append(case) 44 | if not cases: 45 | print("ERROR: empty list of cases") 46 | sys.exit(1) 47 | else: 48 | cases = CASES 49 | 50 | data = [] 51 | for case in cases: 52 | obj, count = globals()[case] 53 | data.append((obj, range(count))) 54 | 55 | runner.bench_func('json_dumps', bench_json_dumps, data) 56 | 57 | 58 | if __name__ == '__main__': 59 | main() 60 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_json_loads/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_json_loads" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "json_loads" 10 | tags = "serialize" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_logging/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_logging" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "logging" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_mako/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_mako" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "Mako", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "mako" 13 | tags = "template" 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_mako/requirements.txt: -------------------------------------------------------------------------------- 1 | mako==1.1.4 2 | markupsafe==2.0.1 3 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_mdp/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_mdp" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "mdp" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_meteor_contest/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_meteor_contest" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "meteor_contest" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_nbody/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_nbody" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "nbody" 10 | tags = "math" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_networkx/bm_networkx_connected_components.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "networkx_connected_components" 3 | extra_opts = ["connected_components"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_networkx/bm_networkx_k_core.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "networkx_k_core" 3 | extra_opts = ["k_core"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_networkx/data/amazon0302.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_networkx/data/amazon0302.txt.gz -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_networkx/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_networkx" 3 | requires-python = ">=3.11" 4 | dependencies = [ 5 | "pyperf", 6 | "networkx", 7 | ] 8 | urls.repository = "https://github.com/python/pyperformance" 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "networkx_shortest_path" 13 | extra_opts = ["shortest_path"] 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_networkx/requirements.txt: -------------------------------------------------------------------------------- 1 | networkx==3.4.2 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_networkx/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some graph algorithm benchmarks using networkx 3 | 4 | This uses the public domain Amazon data set from the SNAP benchmarks: 5 | 6 | https://snap.stanford.edu/data/amazon0302.html 7 | 8 | Choice of benchmarks inspired by Timothy Lin's work here: 9 | 10 | https://www.timlrx.com/blog/benchmark-of-popular-graph-network-packages 11 | """ 12 | 13 | import collections 14 | from pathlib import Path 15 | 16 | import networkx 17 | 18 | import pyperf 19 | 20 | 21 | DATA_FILE = Path(__file__).parent / "data" / "amazon0302.txt.gz" 22 | 23 | 24 | graph = networkx.read_adjlist(DATA_FILE) 25 | 26 | 27 | def bench_shortest_path(): 28 | collections.deque(networkx.shortest_path_length(graph, "0")) 29 | 30 | 31 | def bench_connected_components(): 32 | networkx.number_connected_components(graph) 33 | 34 | 35 | def bench_k_core(): 36 | networkx.k_core(graph) 37 | 38 | 39 | BENCHMARKS = { 40 | "shortest_path": bench_shortest_path, 41 | "connected_components": bench_connected_components, 42 | "k_core": bench_k_core, 43 | } 44 | 45 | 46 | def add_cmdline_args(cmd, args): 47 | cmd.append(args.benchmark) 48 | 49 | 50 | def add_parser_args(parser): 51 | parser.add_argument("benchmark", choices=BENCHMARKS, help="Which benchmark to run.") 52 | 53 | 54 | if __name__ == "__main__": 55 | runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) 56 | runner.metadata["description"] = "NetworkX benchmark" 57 | add_parser_args(runner.argparser) 58 | args = runner.parse_args() 59 | benchmark = args.benchmark 60 | 61 | runner.bench_func(args.benchmark, BENCHMARKS[args.benchmark]) 62 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_nqueens/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_nqueens" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "nqueens" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_nqueens/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """Simple, brute-force N-Queens solver.""" 2 | 3 | import pyperf 4 | 5 | __author__ = "collinwinter@google.com (Collin Winter)" 6 | 7 | 8 | # Pure-Python implementation of itertools.permutations(). 9 | def permutations(iterable, r=None): 10 | """permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)""" 11 | pool = tuple(iterable) 12 | n = len(pool) 13 | if r is None: 14 | r = n 15 | indices = list(range(n)) 16 | cycles = list(range(n - r + 1, n + 1))[::-1] 17 | yield tuple(pool[i] for i in indices[:r]) 18 | while n: 19 | for i in reversed(range(r)): 20 | cycles[i] -= 1 21 | if cycles[i] == 0: 22 | indices[i:] = indices[i + 1:] + indices[i:i + 1] 23 | cycles[i] = n - i 24 | else: 25 | j = cycles[i] 26 | indices[i], indices[-j] = indices[-j], indices[i] 27 | yield tuple(pool[i] for i in indices[:r]) 28 | break 29 | else: 30 | return 31 | 32 | 33 | # From http://code.activestate.com/recipes/576647/ 34 | def n_queens(queen_count): 35 | """N-Queens solver. 36 | 37 | Args: 38 | queen_count: the number of queens to solve for. This is also the 39 | board size. 40 | 41 | Yields: 42 | Solutions to the problem. Each yielded value is looks like 43 | (3, 8, 2, 1, 4, ..., 6) where each number is the column position for the 44 | queen, and the index into the tuple indicates the row. 45 | """ 46 | cols = range(queen_count) 47 | for vec in permutations(cols): 48 | if (queen_count == len(set(vec[i] + i for i in cols)) 49 | == len(set(vec[i] - i for i in cols))): 50 | yield vec 51 | 52 | 53 | def bench_n_queens(queen_count): 54 | list(n_queens(queen_count)) 55 | 56 | 57 | if __name__ == "__main__": 58 | runner = pyperf.Runner() 59 | runner.metadata['description'] = "Simple, brute-force N-Queens solver" 60 | 61 | queen_count = 8 62 | runner.bench_func('nqueens', bench_n_queens, queen_count) 63 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pathlib/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_pathlib" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "pathlib" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pathlib/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the performance of pathlib operations. 3 | 4 | This benchmark stresses the creation of small objects, globbing, and system 5 | calls. 6 | """ 7 | 8 | # Python imports 9 | import os 10 | import pathlib 11 | import shutil 12 | import tempfile 13 | 14 | import pyperf 15 | 16 | 17 | NUM_FILES = 2000 18 | 19 | 20 | def generate_filenames(tmp_path, num_files): 21 | i = 0 22 | while num_files: 23 | for ext in [".py", ".txt", ".tar.gz", ""]: 24 | i += 1 25 | yield os.path.join(tmp_path, str(i) + ext) 26 | num_files -= 1 27 | 28 | 29 | def setup(num_files): 30 | tmp_path = tempfile.mkdtemp() 31 | for fn in generate_filenames(tmp_path, num_files): 32 | with open(fn, "wb") as f: 33 | f.write(b'benchmark') 34 | 35 | return tmp_path 36 | 37 | 38 | def bench_pathlib(loops, tmp_path): 39 | base_path = pathlib.Path(tmp_path) 40 | 41 | # Warm up the filesystem cache and keep some objects in memory. 42 | path_objects = list(base_path.iterdir()) 43 | # FIXME: does this code really cache anything? 44 | for p in path_objects: 45 | p.stat() 46 | assert len(path_objects) == NUM_FILES, len(path_objects) 47 | 48 | range_it = range(loops) 49 | t0 = pyperf.perf_counter() 50 | 51 | for _ in range_it: 52 | # Do something simple with each path. 53 | for p in base_path.iterdir(): 54 | p.stat() 55 | for p in base_path.glob("*.py"): 56 | p.stat() 57 | for p in base_path.iterdir(): 58 | p.stat() 59 | for p in base_path.glob("*.py"): 60 | p.stat() 61 | 62 | return pyperf.perf_counter() - t0 63 | 64 | 65 | if __name__ == "__main__": 66 | runner = pyperf.Runner() 67 | runner.metadata['description'] = ("Test the performance of " 68 | "pathlib operations.") 69 | 70 | modname = pathlib.__name__ 71 | runner.metadata['pathlib_module'] = modname 72 | 73 | tmp_path = setup(NUM_FILES) 74 | try: 75 | runner.bench_time_func('pathlib', bench_pathlib, tmp_path) 76 | finally: 77 | shutil.rmtree(tmp_path) 78 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_dict.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_pickle_dict" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "pickle_dict" 10 | tags = "serialize" 11 | extra_opts = ["pickle_dict"] 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_list.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_pickle_list" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "pickle_list" 10 | tags = "serialize" 11 | extra_opts = ["pickle_list"] 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pickle/bm_pickle_pure_python.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_pickle_pure_python" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "pickle_pure_python" 10 | tags = "serialize" 11 | extra_opts = ["--pure-python", "pickle"] 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_unpickle" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "unpickle" 10 | tags = "serialize" 11 | extra_opts = ["unpickle"] 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_list.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_unpickle_list" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "unpickle_list" 10 | tags = "serialize" 11 | extra_opts = ["unpickle_list"] 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pickle/bm_unpickle_pure_python.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_unpickle_pure_python" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "unpickle_pure_python" 10 | tags = "serialize" 11 | extra_opts = ["--pure-python", "unpickle"] 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pickle/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_pickle" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "pickle" 10 | tags = "serialize" 11 | extra_opts = ["pickle"] 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pidigits/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_pidigits" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "pidigits" 10 | tags = "math" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pidigits/run_benchmark.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | """ 3 | Calculating some of the digits of π. 4 | 5 | This benchmark stresses big integer arithmetic. 6 | 7 | Adapted from code on: 8 | http://benchmarksgame.alioth.debian.org/ 9 | """ 10 | 11 | import itertools 12 | 13 | import pyperf 14 | 15 | 16 | DEFAULT_DIGITS = 2000 17 | icount = itertools.count 18 | islice = itertools.islice 19 | 20 | 21 | def gen_x(): 22 | return map(lambda k: (k, 4 * k + 2, 0, 2 * k + 1), icount(1)) 23 | 24 | 25 | def compose(a, b): 26 | aq, ar, as_, at = a 27 | bq, br, bs, bt = b 28 | return (aq * bq, 29 | aq * br + ar * bt, 30 | as_ * bq + at * bs, 31 | as_ * br + at * bt) 32 | 33 | 34 | def extract(z, j): 35 | q, r, s, t = z 36 | return (q * j + r) // (s * j + t) 37 | 38 | 39 | def gen_pi_digits(): 40 | z = (1, 0, 0, 1) 41 | x = gen_x() 42 | while 1: 43 | y = extract(z, 3) 44 | while y != extract(z, 4): 45 | z = compose(z, next(x)) 46 | y = extract(z, 3) 47 | z = compose((10, -10 * y, 0, 1), z) 48 | yield y 49 | 50 | 51 | def calc_ndigits(n): 52 | return list(islice(gen_pi_digits(), n)) 53 | 54 | 55 | def add_cmdline_args(cmd, args): 56 | cmd.extend(("--digits", str(args.digits))) 57 | 58 | 59 | if __name__ == "__main__": 60 | runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) 61 | 62 | cmd = runner.argparser 63 | cmd.add_argument("--digits", type=int, default=DEFAULT_DIGITS, 64 | help="Number of computed pi digits (default: %s)" 65 | % DEFAULT_DIGITS) 66 | 67 | args = runner.parse_args() 68 | runner.metadata['description'] = "Compute digits of pi." 69 | runner.metadata['pidigits_ndigit'] = args.digits 70 | runner.bench_func('pidigits', calc_ndigits, args.digits) 71 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pprint/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_pprint" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "pprint" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pprint/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """Test the performance of pprint.PrettyPrinter. 2 | 3 | This benchmark was available as `python -m pprint` until Python 3.12. 4 | 5 | Authors: Fred Drake (original), Oleg Iarygin (pyperformance port). 6 | """ 7 | 8 | import pyperf 9 | from pprint import PrettyPrinter 10 | 11 | 12 | printable = [('string', (1, 2), [3, 4], {5: 6, 7: 8})] * 100_000 13 | p = PrettyPrinter() 14 | 15 | 16 | if __name__ == '__main__': 17 | runner = pyperf.Runner() 18 | runner.metadata['description'] = 'pprint benchmark' 19 | 20 | if hasattr(p, '_safe_repr'): 21 | runner.bench_func('pprint_safe_repr', p._safe_repr, 22 | printable, {}, None, 0) 23 | runner.bench_func('pprint_pformat', p.pformat, printable) 24 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pyflate/data/interpreter.tar.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_pyflate/data/interpreter.tar.bz2 -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_pyflate/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_pyflate" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "pyflate" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_python_startup/bm_python_startup_no_site.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_python_startup_no_site" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "python_startup_no_site" 10 | extra_opts = ["--no-site"] 11 | tags = "startup" 12 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_python_startup/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_python_startup" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "python_startup" 10 | tags = "startup" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_python_startup/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark Python startup. 3 | """ 4 | import sys 5 | 6 | import pyperf 7 | 8 | 9 | def add_cmdline_args(cmd, args): 10 | if args.no_site: 11 | cmd.append("--no-site") 12 | if args.exit: 13 | cmd.append("--exit") 14 | 15 | 16 | if __name__ == "__main__": 17 | runner = pyperf.Runner(values=10, add_cmdline_args=add_cmdline_args) 18 | runner.argparser.add_argument("--no-site", action="store_true") 19 | runner.argparser.add_argument("--exit", action="store_true") 20 | 21 | runner.metadata['description'] = "Performance of the Python startup" 22 | args = runner.parse_args() 23 | name = 'python_startup' 24 | if args.no_site: 25 | name += "_no_site" 26 | if args.exit: 27 | name += "_exit" 28 | 29 | command = [sys.executable] 30 | if args.no_site: 31 | command.append("-S") 32 | if args.exit: 33 | command.extend(("-c", "import os; os._exit(0)")) 34 | else: 35 | command.extend(("-c", "pass")) 36 | 37 | runner.bench_command(name, command) 38 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_raytrace/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_raytrace" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "raytrace" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_regex_compile/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_regex_compile" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "regex_compile" 10 | tags = "regex" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_regex_compile/run_benchmark.py: -------------------------------------------------------------------------------- 1 | 2 | """Benchmark how quickly Python's regex implementation can compile regexes. 3 | 4 | We bring in all the regexes used by the other regex benchmarks, capture them by 5 | stubbing out the re module, then compile those regexes repeatedly. We muck with 6 | the re module's caching to force it to recompile every regex we give it. 7 | """ 8 | 9 | # Python imports 10 | import re 11 | 12 | # Local imports 13 | import pyperf 14 | 15 | 16 | def capture_regexes(): 17 | regexes = [] 18 | 19 | real_compile = re.compile 20 | real_search = re.search 21 | real_sub = re.sub 22 | 23 | def capture_compile(regex, flags=0): 24 | regexes.append((regex, flags)) 25 | return real_compile(regex, flags) 26 | 27 | def capture_search(regex, target, flags=0): 28 | regexes.append((regex, flags)) 29 | return real_search(regex, target, flags) 30 | 31 | def capture_sub(regex, *args): 32 | regexes.append((regex, 0)) 33 | return real_sub(regex, *args) 34 | 35 | re.compile = capture_compile 36 | re.search = capture_search 37 | re.sub = capture_sub 38 | try: 39 | import bm_regex_effbot 40 | bm_regex_effbot.bench_regex_effbot(1) 41 | 42 | import bm_regex_v8 43 | bm_regex_v8.bench_regex_v8(1) 44 | finally: 45 | re.compile = real_compile 46 | re.search = real_search 47 | re.sub = real_sub 48 | return regexes 49 | 50 | 51 | def bench_regex_compile(loops, regexes): 52 | range_it = range(loops) 53 | t0 = pyperf.perf_counter() 54 | 55 | for _ in range_it: 56 | for regex, flags in regexes: 57 | re.purge() 58 | # ignore result (compiled regex) 59 | re.compile(regex, flags) 60 | 61 | return pyperf.perf_counter() - t0 62 | 63 | 64 | if __name__ == "__main__": 65 | runner = pyperf.Runner() 66 | runner.metadata['description'] = "Test regex compilation performance" 67 | 68 | regexes = capture_regexes() 69 | runner.bench_time_func('regex_compile', bench_regex_compile, regexes) 70 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_regex_dna/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_regex_dna" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "regex_dna" 10 | tags = "regex" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_regex_effbot/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_regex_effbot" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "regex_effbot" 10 | tags = "regex" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_regex_v8/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_regex_v8" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "regex_v8" 10 | tags = "regex" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_richards/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_richards" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "richards" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_richards_super/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_richards_super" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "richards_super" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_scimark/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_scimark" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "scimark" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_spectral_norm/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_spectral_norm" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "spectral_norm" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_spectral_norm/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | MathWorld: "Hundred-Dollar, Hundred-Digit Challenge Problems", Challenge #3. 3 | http://mathworld.wolfram.com/Hundred-DollarHundred-DigitChallengeProblems.html 4 | 5 | The Computer Language Benchmarks Game 6 | http://benchmarksgame.alioth.debian.org/u64q/spectralnorm-description.html#spectralnorm 7 | 8 | Contributed by Sebastien Loisel 9 | Fixed by Isaac Gouy 10 | Sped up by Josh Goldfoot 11 | Dirtily sped up by Simon Descarpentries 12 | Concurrency by Jason Stitt 13 | """ 14 | 15 | import pyperf 16 | 17 | 18 | DEFAULT_N = 130 19 | 20 | 21 | def eval_A(i, j): 22 | return 1.0 / ((i + j) * (i + j + 1) // 2 + i + 1) 23 | 24 | 25 | def eval_times_u(func, u): 26 | return [func((i, u)) for i in range(len(list(u)))] 27 | 28 | 29 | def eval_AtA_times_u(u): 30 | return eval_times_u(part_At_times_u, eval_times_u(part_A_times_u, u)) 31 | 32 | 33 | def part_A_times_u(i_u): 34 | i, u = i_u 35 | partial_sum = 0 36 | for j, u_j in enumerate(u): 37 | partial_sum += eval_A(i, j) * u_j 38 | return partial_sum 39 | 40 | 41 | def part_At_times_u(i_u): 42 | i, u = i_u 43 | partial_sum = 0 44 | for j, u_j in enumerate(u): 45 | partial_sum += eval_A(j, i) * u_j 46 | return partial_sum 47 | 48 | 49 | def bench_spectral_norm(loops): 50 | range_it = range(loops) 51 | t0 = pyperf.perf_counter() 52 | 53 | for _ in range_it: 54 | u = [1] * DEFAULT_N 55 | 56 | for dummy in range(10): 57 | v = eval_AtA_times_u(u) 58 | u = eval_AtA_times_u(v) 59 | 60 | vBv = vv = 0 61 | 62 | for ue, ve in zip(u, v): 63 | vBv += ue * ve 64 | vv += ve * ve 65 | 66 | return pyperf.perf_counter() - t0 67 | 68 | 69 | if __name__ == "__main__": 70 | runner = pyperf.Runner() 71 | runner.metadata['description'] = ( 72 | 'MathWorld: "Hundred-Dollar, Hundred-Digit Challenge Problems", ' 73 | 'Challenge #3.') 74 | runner.bench_time_func('spectral_norm', bench_spectral_norm) 75 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/data/Doc/about.rst: -------------------------------------------------------------------------------- 1 | ===================== 2 | About these documents 3 | ===================== 4 | 5 | 6 | These documents are generated from `reStructuredText`_ sources by `Sphinx`_, a 7 | document processor specifically written for the Python documentation. 8 | 9 | .. _reStructuredText: https://docutils.sourceforge.io/rst.html 10 | .. _Sphinx: https://www.sphinx-doc.org/ 11 | 12 | .. In the online version of these documents, you can submit comments and suggest 13 | changes directly on the documentation pages. 14 | 15 | Development of the documentation and its toolchain is an entirely volunteer 16 | effort, just like Python itself. If you want to contribute, please take a 17 | look at the :ref:`reporting-bugs` page for information on how to do so. New 18 | volunteers are always welcome! 19 | 20 | Many thanks go to: 21 | 22 | * Fred L. Drake, Jr., the creator of the original Python documentation toolset 23 | and writer of much of the content; 24 | * the `Docutils `_ project for creating 25 | reStructuredText and the Docutils suite; 26 | * Fredrik Lundh for his Alternative Python Reference project from which Sphinx 27 | got many good ideas. 28 | 29 | 30 | Contributors to the Python Documentation 31 | ---------------------------------------- 32 | 33 | Many people have contributed to the Python language, the Python standard 34 | library, and the Python documentation. See :source:`Misc/ACKS` in the Python 35 | source distribution for a partial list of contributors. 36 | 37 | It is only with the input and contributions of the Python community 38 | that Python has such wonderful documentation -- Thank You! 39 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/data/Doc/conf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.abspath('tools/extensions')) 5 | 6 | extensions = [ 7 | 'pyspecific', 8 | 'sphinx.ext.extlinks', 9 | ] 10 | 11 | manpages_url = 'https://manpages.debian.org/{path}' 12 | 13 | # General substitutions. 14 | project = 'Python' 15 | copyright = f"2001, Python Software Foundation" 16 | 17 | version = release = sys.version.split(" ", 1)[0] 18 | 19 | rst_epilog = f""" 20 | .. |python_version_literal| replace:: ``Python {version}`` 21 | .. |python_x_dot_y_literal| replace:: ``python{version}`` 22 | .. |usr_local_bin_python_x_dot_y_literal| replace:: ``/usr/local/bin/python{version}`` 23 | """ 24 | 25 | # There are two options for replacing |today|: either, you set today to some 26 | # non-false value, then it is used: 27 | today = '' 28 | # Else, today_fmt is used as the format for a strftime call. 29 | today_fmt = '%B %d, %Y' 30 | 31 | # By default, highlight as Python 3. 32 | highlight_language = 'python3' 33 | 34 | # Minimum version of sphinx required 35 | needs_sphinx = '6.2.1' 36 | 37 | # Create table of contents entries for domain objects (e.g. functions, classes, 38 | # attributes, etc.). Default is True. 39 | toc_object_entries = False 40 | 41 | # Disable Docutils smartquotes for several translations 42 | smartquotes_excludes = { 43 | 'languages': ['ja', 'fr', 'zh_TW', 'zh_CN'], 44 | 'builders': ['man', 'text'], 45 | } 46 | 47 | # Avoid a warning with Sphinx >= 4.0 48 | root_doc = 'contents' 49 | 50 | extlinks = { 51 | "cve": ("https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s", "CVE-%s"), 52 | "cwe": ("https://cwe.mitre.org/data/definitions/%s.html", "CWE-%s"), 53 | "pypi": ("https://pypi.org/project/%s/", "%s"), 54 | "source": ('https://github.com/python/cpython/tree/3.13/%s', "%s"), 55 | } 56 | extlinks_detect_hardcoded_links = True 57 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/data/Doc/constraints.txt: -------------------------------------------------------------------------------- 1 | # We have upper bounds on our transitive dependencies here 2 | # To avoid new releases unexpectedly breaking our build. 3 | # This file can be updated on an ad-hoc basis, 4 | # though it will probably have to be updated 5 | # whenever Doc/requirements.txt is updated. 6 | 7 | # Direct dependencies of Sphinx 8 | babel<3 9 | colorama<0.5 10 | imagesize<2 11 | Jinja2<4 12 | packaging<25 13 | Pygments<3 14 | requests<3 15 | snowballstemmer<3 16 | # keep lower-bounds until Sphinx 8.1 is released 17 | # https://github.com/sphinx-doc/sphinx/pull/12756 18 | sphinxcontrib-applehelp>=1.0.7,<3 19 | sphinxcontrib-devhelp>=1.0.6,<3 20 | sphinxcontrib-htmlhelp>=2.0.6,<3 21 | sphinxcontrib-jsmath>=1.0.1,<2 22 | sphinxcontrib-qthelp>=1.0.6,<3 23 | sphinxcontrib-serializinghtml>=1.1.9,<3 24 | 25 | # Direct dependencies of Jinja2 (Jinja is a dependency of Sphinx, see above) 26 | MarkupSafe<3 27 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/data/Doc/contents.rst: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | Python Documentation contents 3 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 4 | 5 | .. toctree:: 6 | 7 | whatsnew/index.rst 8 | tutorial/index.rst 9 | using/index.rst 10 | reference/index.rst 11 | library/index.rst 12 | extending/index.rst 13 | c-api/index.rst 14 | installing/index.rst 15 | howto/index.rst 16 | faq/index.rst 17 | deprecations/index.rst 18 | glossary.rst 19 | 20 | about.rst 21 | bugs.rst 22 | copyright.rst 23 | license.rst 24 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/data/Doc/copyright.rst: -------------------------------------------------------------------------------- 1 | ********* 2 | Copyright 3 | ********* 4 | 5 | Python and this documentation is: 6 | 7 | Copyright © 2001-2024 Python Software Foundation. All rights reserved. 8 | 9 | Copyright © 2000 BeOpen.com. All rights reserved. 10 | 11 | Copyright © 1995-2000 Corporation for National Research Initiatives. All rights 12 | reserved. 13 | 14 | Copyright © 1991-1995 Stichting Mathematisch Centrum. All rights reserved. 15 | 16 | ------- 17 | 18 | See :ref:`history-and-license` for complete license and permissions information. 19 | 20 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/data/Doc/howto/clinic.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | .. This page is retained solely for existing links to /howto/clinic.html. 4 | Direct readers to the devguide. 5 | 6 | ********************** 7 | Argument Clinic How-To 8 | ********************** 9 | 10 | 11 | .. note:: 12 | 13 | The Argument Clinic How-TO has been moved to the `Python Developer's Guide 14 | `__. 15 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/data/Doc/howto/cporting.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: c 2 | 3 | .. _cporting-howto: 4 | 5 | ************************************* 6 | Porting Extension Modules to Python 3 7 | ************************************* 8 | 9 | We recommend the following resources for porting extension modules to Python 3: 10 | 11 | * The `Migrating C extensions`_ chapter from 12 | *Supporting Python 3: An in-depth guide*, a book on moving from Python 2 13 | to Python 3 in general, guides the reader through porting an extension 14 | module. 15 | * The `Porting guide`_ from the *py3c* project provides opinionated 16 | suggestions with supporting code. 17 | * The `Cython`_ and `CFFI`_ libraries offer abstractions over 18 | Python's C API. 19 | Extensions generally need to be re-written to use one of them, 20 | but the library then handles differences between various Python 21 | versions and implementations. 22 | 23 | .. _Migrating C extensions: http://python3porting.com/cextensions.html 24 | .. _Porting guide: https://py3c.readthedocs.io/en/latest/guide.html 25 | .. _Cython: https://cython.org/ 26 | .. _CFFI: https://cffi.readthedocs.io/en/latest/ 27 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/data/Doc/howto/index.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Python HOWTOs 3 | *************** 4 | 5 | Python HOWTOs are documents that cover a specific topic in-depth. 6 | Modeled on the Linux Documentation Project's HOWTO collection, this collection is an 7 | effort to foster documentation that's more detailed than the 8 | Python Library Reference. 9 | 10 | .. toctree:: 11 | :maxdepth: 1 12 | :hidden: 13 | 14 | cporting.rst 15 | curses.rst 16 | descriptor.rst 17 | gdb_helpers.rst 18 | enum.rst 19 | functional.rst 20 | logging.rst 21 | logging-cookbook.rst 22 | regex.rst 23 | sockets.rst 24 | sorting.rst 25 | unicode.rst 26 | urllib2.rst 27 | argparse.rst 28 | ipaddress.rst 29 | instrumentation.rst 30 | perf_profiling.rst 31 | annotations.rst 32 | isolating-extensions.rst 33 | timerfd.rst 34 | mro.rst 35 | free-threading-extensions.rst 36 | 37 | General: 38 | 39 | * :ref:`annotations-howto` 40 | * :ref:`argparse-tutorial` 41 | * :ref:`descriptorhowto` 42 | * :ref:`enum-howto` 43 | * :ref:`functional-howto` 44 | * :ref:`ipaddress-howto` 45 | * :ref:`logging-howto` 46 | * :ref:`logging-cookbook` 47 | * :ref:`regex-howto` 48 | * :ref:`sortinghowto` 49 | * :ref:`unicode-howto` 50 | * :ref:`urllib-howto` 51 | 52 | Advanced development: 53 | 54 | * :ref:`curses-howto` 55 | * :ref:`freethreading-extensions-howto` 56 | * :ref:`isolating-extensions-howto` 57 | * :ref:`python_2.3_mro` 58 | * :ref:`socket-howto` 59 | * :ref:`timerfd-howto` 60 | * :ref:`cporting-howto` 61 | 62 | Debugging and profiling: 63 | 64 | * :ref:`gdb` 65 | * :ref:`instrumentation` 66 | * :ref:`perf_profiling` 67 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_sphinx" 3 | requires-python = ">=3.11" 4 | dependencies = [ 5 | "pyperf", 6 | "sphinx", 7 | ] 8 | urls.repository = "https://github.com/python/pyperformance" 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "sphinx" 13 | tags = "apps" 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sphinx/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx==7.3.7 2 | python-docs-theme==2024.6 3 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_sqlalchemy_declarative" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "SQLAlchemy", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "sqlalchemy_declarative" 13 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlalchemy_declarative/requirements.txt: -------------------------------------------------------------------------------- 1 | greenlet==3.1.0 2 | sqlalchemy==1.4.19 3 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_sqlalchemy_imperative" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "SQLAlchemy", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "sqlalchemy_imperative" 13 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlalchemy_imperative/requirements.txt: -------------------------------------------------------------------------------- 1 | greenlet==3.1.0 2 | sqlalchemy==1.4.19 3 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlglot_v2/bm_sqlglot_v2_optimize.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "sqlglot_v2_optimize" 3 | extra_opts = ["optimize"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlglot_v2/bm_sqlglot_v2_parse.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "sqlglot_v2_parse" 3 | extra_opts = ["parse"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlglot_v2/bm_sqlglot_v2_transpile.toml: -------------------------------------------------------------------------------- 1 | [tool.pyperformance] 2 | name = "sqlglot_v2_transpile" 3 | extra_opts = ["transpile"] 4 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlglot_v2/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_sqlglot_v2" 3 | requires-python = ">=3.7" 4 | dependencies = [ 5 | "pyperf", 6 | "sqlglot_v2", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "sqlglot_v2" 13 | extra_opts = ["normalize"] 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlglot_v2/requirements.txt: -------------------------------------------------------------------------------- 1 | sqlglot==4.6.0 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlite_synth/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_sqlite_synth" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "sqlite_synth" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sqlite_synth/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | SQLite benchmark. 3 | 4 | The goal of the benchmark is to test CFFI performance and going back and forth 5 | between SQLite and Python a lot. Therefore the queries themselves are really 6 | simple. 7 | """ 8 | 9 | import sqlite3 10 | import math 11 | 12 | import pyperf 13 | 14 | 15 | class AvgLength(object): 16 | 17 | def __init__(self): 18 | self.sum = 0 19 | self.count = 0 20 | 21 | def step(self, x): 22 | if x is not None: 23 | self.count += 1 24 | self.sum += len(x) 25 | 26 | def finalize(self): 27 | return self.sum / float(self.count) 28 | 29 | 30 | def bench_sqlite(loops): 31 | t0 = pyperf.perf_counter() 32 | 33 | conn = sqlite3.connect(":memory:") 34 | conn.execute('create table cos (x, y, z);') 35 | for i in range(loops): 36 | cos_i = math.cos(i) 37 | conn.execute('insert into cos values (?, ?, ?)', 38 | [i, cos_i, str(i)]) 39 | 40 | conn.create_function("cos", 1, math.cos) 41 | for x, cosx1, cosx2 in conn.execute("select x, cos(x), y from cos"): 42 | assert math.cos(x) == cosx1 == cosx2 43 | 44 | conn.create_aggregate("avglength", 1, AvgLength) 45 | cursor = conn.execute("select avglength(z) from cos;") 46 | cursor.fetchone()[0] 47 | 48 | conn.execute("delete from cos;") 49 | conn.close() 50 | 51 | return pyperf.perf_counter() - t0 52 | 53 | 54 | if __name__ == "__main__": 55 | runner = pyperf.Runner() 56 | runner.metadata['description'] = "Benchmark Python aggregate for SQLite" 57 | runner.bench_time_func('sqlite_synth', bench_sqlite) 58 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sympy/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_sympy" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "sympy", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "sympy" 13 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sympy/requirements.txt: -------------------------------------------------------------------------------- 1 | mpmath==1.2.1 2 | sympy==1.8 3 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_sympy/run_benchmark.py: -------------------------------------------------------------------------------- 1 | import pyperf 2 | 3 | from sympy import expand, symbols, integrate, tan, summation 4 | from sympy.core.cache import clear_cache 5 | 6 | 7 | def bench_expand(): 8 | x, y, z = symbols('x y z') 9 | expand((1 + x + y + z) ** 20) 10 | 11 | 12 | def bench_integrate(): 13 | x, y = symbols('x y') 14 | f = (1 / tan(x)) ** 10 15 | return integrate(f, x) 16 | 17 | 18 | def bench_sum(): 19 | x, i = symbols('x i') 20 | summation(x ** i / i, (i, 1, 400)) 21 | 22 | 23 | def bench_str(): 24 | x, y, z = symbols('x y z') 25 | str(expand((x + 2 * y + 3 * z) ** 30)) 26 | 27 | 28 | def bench_sympy(loops, func): 29 | timer = pyperf.perf_counter 30 | dt = 0 31 | 32 | for _ in range(loops): 33 | # Don't benchmark clear_cache(), exclude it of the benchmark 34 | clear_cache() 35 | 36 | t0 = timer() 37 | func() 38 | dt += (timer() - t0) 39 | 40 | return dt 41 | 42 | 43 | BENCHMARKS = ("expand", "integrate", "sum", "str") 44 | 45 | 46 | def add_cmdline_args(cmd, args): 47 | if args.benchmark: 48 | cmd.append(args.benchmark) 49 | 50 | 51 | if __name__ == "__main__": 52 | runner = pyperf.Runner(add_cmdline_args=add_cmdline_args) 53 | runner.metadata['description'] = "SymPy benchmark" 54 | runner.argparser.add_argument("benchmark", nargs='?', 55 | choices=BENCHMARKS) 56 | 57 | import gc 58 | gc.disable() 59 | 60 | args = runner.parse_args() 61 | if args.benchmark: 62 | benchmarks = (args.benchmark,) 63 | else: 64 | benchmarks = BENCHMARKS 65 | 66 | for bench in benchmarks: 67 | name = 'sympy_%s' % bench 68 | func = globals()['bench_' + bench] 69 | runner.bench_time_func(name, bench_sympy, func) 70 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_telco/data/telco-bench.b: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/data-files/benchmarks/bm_telco/data/telco-bench.b -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_telco/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_telco" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "telco" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_tomli_loads/generate_data.py: -------------------------------------------------------------------------------- 1 | from urllib.request import urlopen 2 | import json 3 | import toml 4 | 5 | BASE_URL = "https://api.github.com/repos/python/cpython/pulls?per_page=1000&state=all" 6 | 7 | def main(): 8 | all_issues = [] 9 | for page in range(1, 11): 10 | with urlopen(f"{BASE_URL}&page={page}") as response: 11 | issues = json.loads(response.read()) 12 | if not issues: 13 | break 14 | all_issues.extend(issues) 15 | print(f"Page: {page} Total Issues: {len(all_issues)}") 16 | with open("issues.toml", "w") as f: 17 | f.write(toml.dumps({"data": all_issues})) 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_tomli_loads/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_tomli_loads" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf", "tomli"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "tomli_loads" 10 | tags = "serialize" 11 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_tomli_loads/requirements.txt: -------------------------------------------------------------------------------- 1 | tomli==2.0.1 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_tomli_loads/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Benchmark ``loads()`` function of the ``tomli`` module 3 | on a large TOML file of GitHub's real world data generated by 4 | the ``generate_data.py`` script. 5 | 6 | It heavily exercises string operations such as concatenation, 7 | subscripting and iteration. 8 | 9 | Author: Kumar Aditya 10 | """ 11 | 12 | from pathlib import Path 13 | 14 | import pyperf 15 | import tomli 16 | 17 | DATA_FILE = Path(__file__).parent / "data" / "tomli-bench-data.toml" 18 | 19 | def bench_tomli_loads(loops: int) -> float: 20 | data = DATA_FILE.read_text('utf-8') 21 | range_it = range(loops) 22 | t0 = pyperf.perf_counter() 23 | for _ in range_it: 24 | tomli.loads(data) 25 | return pyperf.perf_counter() - t0 26 | 27 | if __name__ == "__main__": 28 | runner = pyperf.Runner() 29 | runner.metadata['description'] = "Benchmark tomli.loads()" 30 | runner.bench_time_func('tomli_loads', bench_tomli_loads) 31 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_tornado_http/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_tornado_http" 3 | requires-python = ">=3.8" 4 | dependencies = [ 5 | "pyperf", 6 | "tornado", 7 | ] 8 | urls = {repository = "https://github.com/python/pyperformance"} 9 | dynamic = ["version"] 10 | 11 | [tool.pyperformance] 12 | name = "tornado_http" 13 | tags = "apps" 14 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_tornado_http/requirements.txt: -------------------------------------------------------------------------------- 1 | tornado==6.2 2 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_typing_runtime_protocols/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_typing_runtime_protocols" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "typing_runtime_protocols" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_unpack_sequence/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_unpack_sequence" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "unpack_sequence" 10 | -------------------------------------------------------------------------------- /pyperformance/data-files/benchmarks/bm_xml_etree/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_xml_etree" 3 | requires-python = ">=3.8" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | dynamic = ["version"] 7 | 8 | [tool.pyperformance] 9 | name = "xml_etree" 10 | tags = "serialize" 11 | -------------------------------------------------------------------------------- /pyperformance/requirements/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.11 3 | # by the following command: 4 | # 5 | # pip-compile --output-file=pyperformance/requirements/requirements.txt requirements.in 6 | # 7 | packaging==23.1 8 | # via -r requirements.in 9 | psutil==5.9.5 10 | # via 11 | # -r requirements.in 12 | # pyperf 13 | pyperf==2.9.0 14 | # via -r requirements.in 15 | -------------------------------------------------------------------------------- /pyperformance/tests/__main__.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from pyperformance import tests 4 | 5 | 6 | def load_tests(loader, standard_tests, pattern): 7 | pkgtests = loader.discover( 8 | start_dir=tests.TESTS_ROOT, 9 | top_level_dir=tests.TESTS_ROOT, 10 | pattern=pattern or 'test*', 11 | ) 12 | standard_tests.addTests(pkgtests) 13 | return standard_tests 14 | 15 | 16 | if __name__ == "__main__": 17 | unittest.main() 18 | -------------------------------------------------------------------------------- /pyperformance/tests/data/MANIFEST: -------------------------------------------------------------------------------- 1 | [benchmarks] 2 | 3 | name metafile 4 | local_wheel 5 | -------------------------------------------------------------------------------- /pyperformance/tests/data/bm_local_wheel/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "pyperformance_bm_local_wheel" 3 | requires-python = ">=3.7" 4 | dependencies = ["pyperf"] 5 | urls = {repository = "https://github.com/python/pyperformance"} 6 | version = "1.0" 7 | 8 | [tool.pyperformance] 9 | name = "local_wheel" 10 | -------------------------------------------------------------------------------- /pyperformance/tests/data/bm_local_wheel/requirements.txt: -------------------------------------------------------------------------------- 1 | this_is-1.0.2-py2.py3-none-any.whl#egg=this_is 2 | -------------------------------------------------------------------------------- /pyperformance/tests/data/bm_local_wheel/run_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | A dummy benchmark that uses a local wheel 3 | """ 4 | 5 | import pyperf 6 | 7 | 8 | def bench(): 9 | return 1.0 10 | 11 | 12 | if __name__ == "__main__": 13 | runner = pyperf.Runner() 14 | runner.metadata['description'] = "A dummy benchmark that has a local wheel dependency" 15 | 16 | args = runner.parse_args() 17 | runner.bench_func('local_wheel', bench) 18 | 19 | -------------------------------------------------------------------------------- /pyperformance/tests/data/bm_local_wheel/this_is-1.0.2-py2.py3-none-any.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/pyperformance/ec9e2913f2169aa6e937c5e5a0b867aa76e68906/pyperformance/tests/data/bm_local_wheel/this_is-1.0.2-py2.py3-none-any.whl -------------------------------------------------------------------------------- /pyperformance/tests/data/find-pyperformance.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | try: 4 | import pyperformance 5 | except ModuleNotFoundError: 6 | sys.exit(0) 7 | else: 8 | print(pyperformance.PKG_ROOT) 9 | 10 | # Make sure pyperformance.PKG_ROOT matches expectations. 11 | import os.path 12 | datadir = os.path.dirname(os.path.abspath(__file__)) 13 | testsroot = os.path.dirname(datadir) 14 | pkgroot = os.path.dirname(testsroot) 15 | reporoot = os.path.realpath(os.path.dirname(pkgroot)) 16 | marker = os.path.join(reporoot, 'pyproject.toml') 17 | if not os.path.exists(marker): 18 | sys.exit(f'ERROR: pyperformance is not an editable install ({reporoot})') 19 | actual = os.path.realpath(os.path.abspath(pyperformance.PKG_ROOT)) 20 | if actual != os.path.join(reporoot, 'pyperformance'): 21 | print('ERROR: mismatch on pyperformance repo root:') 22 | print(f' actual: {actual}') 23 | print(f' expected: {reporoot}') 24 | sys.exit(1) 25 | -------------------------------------------------------------------------------- /pyperformance/tests/data/mem1.json: -------------------------------------------------------------------------------- 1 | {"benchmarks":[{"runs":[{"values":[8085504.0]}]}],"metadata":{"name":"call_simple","performance_version":"0.2","unit":"byte"},"version":"1.0"} 2 | -------------------------------------------------------------------------------- /pyperformance/tests/data/mem2.json: -------------------------------------------------------------------------------- 1 | {"benchmarks":[{"runs":[{"values":[8089600.0]}]}],"metadata":{"name":"call_simple","performance_version":"0.2","unit":"byte"},"version":"1.0"} 2 | -------------------------------------------------------------------------------- /pyperformance/tests/data/py3_performance03.json: -------------------------------------------------------------------------------- 1 | {"benchmarks":[{"runs":[{"values":[0.013479645,0.013512941]},{"values":[0.013684981,0.016157783]},{"values":[0.01363128,0.01363188]},{"values":[0.013653313,0.013630584]},{"values":[0.013273289,0.013267363]},{"values":[0.013293472,0.013285883]},{"values":[0.018995846,0.013574852]},{"values":[0.013909884,0.013909393]},{"values":[0.013637751,0.013639002]},{"values":[0.014413738,0.01436785]}]}],"metadata":{"name":"call_simple","performance_version":"0.3"},"version":"1.0"} 2 | -------------------------------------------------------------------------------- /pyperformance/tests/test_python.py: -------------------------------------------------------------------------------- 1 | import types 2 | import unittest 3 | 4 | from pyperformance import _python 5 | 6 | 7 | class GetIDTests(unittest.TestCase): 8 | 9 | def _dummy_info(self): 10 | info = types.SimpleNamespace( 11 | sys=types.SimpleNamespace( 12 | executable='/a/b/c/bin/spam-python', 13 | version='3.8.10 (default, May 5 2021, 03:01:07) \n[GCC 7.5.0]', 14 | version_info=(3, 8, 10, 'final', 0), 15 | api_version=1013, 16 | implementation=types.SimpleNamespace( 17 | name='cpython', 18 | version=(3, 8, 10, 'final', 0), 19 | ), 20 | ), 21 | pyc_magic_number=b'U\r\r\n', 22 | ) 23 | base_id = 'b14d92fd0e6f' 24 | return info, base_id 25 | 26 | def test_no_prefix(self): 27 | info, expected = self._dummy_info() 28 | 29 | pyid = _python.get_id(info) 30 | 31 | self.assertEqual(pyid, expected) 32 | 33 | def test_default_prefix(self): 34 | info, expected = self._dummy_info() 35 | expected = f'cpython3.8-{expected}' 36 | 37 | pyid = _python.get_id(info, prefix=True) 38 | 39 | self.assertEqual(pyid, expected) 40 | 41 | def test_given_prefix(self): 42 | info, expected = self._dummy_info() 43 | expected = f'spam-{expected}' 44 | 45 | pyid = _python.get_id(info, prefix='spam-') 46 | 47 | self.assertEqual(pyid, expected) 48 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | # When one of these dependencies is upgraded, the pyperformance major version 2 | # should be increased to respect semantic versionning. Comparison between 3 | # two pyperformance results of two different major versions is not reliable. 4 | # 5 | # To rebuild requirements.txt: 6 | # 7 | # pip-compile --upgrade -o requirements.txt requirements.in 8 | 9 | # pyperformance dependencies 10 | # -------------------------- 11 | 12 | pyperf 13 | 14 | # for benchmark metadata: 15 | packaging 16 | tomli; python_version < '3.11' 17 | 18 | 19 | # Optional dependencies 20 | # --------------------- 21 | # 22 | # The list of optional dependencies is hardcoded in pyperformance/venv.py 23 | 24 | # XXX Do we still need this? 25 | psutil 26 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pyperformance/requirements/requirements.txt -------------------------------------------------------------------------------- /runtests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os.path 3 | import subprocess 4 | import sys 5 | 6 | from dev import ensure_venv_ready 7 | 8 | 9 | def main(): 10 | venvroot, python = ensure_venv_ready(kind='tests') 11 | if python != sys.executable: 12 | # Now re-run using the venv. 13 | os.execv(python, [python, *sys.argv]) 14 | # 15 | 16 | # Now run the tests. 17 | proc = subprocess.run( 18 | [sys.executable, '-u', '-m', 'pyperformance.tests'], 19 | cwd=os.path.dirname(__file__) or None, 20 | env=dict( 21 | os.environ, 22 | PYPERFORMANCE_TESTS_VENV=venvroot, 23 | ) 24 | ) 25 | sys.exit(proc.returncode) 26 | 27 | 28 | if __name__ == "__main__": 29 | main() 30 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py3, pypy3, doc, pep8, mypy 3 | isolated_build = True 4 | 5 | [testenv] 6 | commands = python runtests.py 7 | # don't override basepython to python3 here, to use pypy3 in testenv:pypy3 8 | 9 | [testenv:py3] 10 | basepython = python3 11 | 12 | [testenv:doc] 13 | basepython = python3 14 | deps= 15 | sphinx 16 | allowlist_externals = make 17 | commands= 18 | make -C doc clean html 19 | 20 | [testenv:pep8] 21 | basepython = python3 22 | deps = flake8 23 | commands = flake8 pyperformance runtests.py setup.py 24 | 25 | [flake8] 26 | # E501 line too long (88 > 79 characters) 27 | # E741 ambiguous variable name 'l' (don't modify benhcmarks just for that) 28 | # W503 line break before binary operator 29 | ignore = E501,E741,W503 30 | 31 | [testenv:mypy] 32 | basepython = python3 33 | deps= 34 | mypy 35 | tomli 36 | commands = mypy 37 | --------------------------------------------------------------------------------