├── .gitignore ├── FoMA_Eval.py ├── LICENSE ├── README.md ├── assets ├── FormalMATH.pdf ├── domain-pie.png ├── logo.png ├── performance_compare_v2.png ├── pipeline.png └── star-history-202556.png ├── evaluate_results.py ├── generate_answers.py ├── requirements.txt └── verify_answers.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # Ruff stuff: 171 | .ruff_cache/ 172 | 173 | # PyPI configuration file 174 | .pypirc 175 | -------------------------------------------------------------------------------- /FoMA_Eval.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import sys 5 | from generate_answers import process_data 6 | from verify_answers import verify_answers 7 | from evaluate_results import monte_carlo_evaluate 8 | 9 | def parse_args(): 10 | parser = argparse.ArgumentParser(description="Pipeline for Lean theorem proof (generation, verification, and evaluation)") 11 | 12 | # File paths 13 | parser.add_argument("--input_file", default=None, 14 | help="Path to the initial input file") 15 | parser.add_argument("--generated_file", default=None, 16 | help="Path to the output file containing generated answers") 17 | parser.add_argument("--verification_file", default=None, 18 | help="Path to the output file containing verification results") 19 | parser.add_argument("--evaluation_file", default=None, 20 | help="Path to the output file containing evaluation results") 21 | 22 | # Task control 23 | parser.add_argument("--auto_dl", action="store_true", default=True, 24 | help="Automatically download dataset") 25 | parser.add_argument("--generate", action="store_true", default=False, 26 | help="Enable generation of answers") 27 | parser.add_argument("--verify", action="store_true", default=False, 28 | help="Enable verification of generated answers") 29 | parser.add_argument("--evaluate", action="store_true", default=False, 30 | help="Enable evaluation of verification results") 31 | parser.add_argument("--datasets", default="FomaMATH-All", 32 | help="Choose dataset version: FomaMATH-All or FomaMATH-Lite") 33 | 34 | # Generation parameters - Add all parameters from the first script 35 | parser.add_argument("--model", default=None, 36 | help="Path to the model used for generating answers.") 37 | parser.add_argument("--n", type=int, default=200, 38 | help="Number of answers to generate per process via vllm.") 39 | parser.add_argument("--nums_answer", type=int, default=3200, 40 | help="Number of answers to generate per question.") 41 | 42 | # Verification parameters 43 | parser.add_argument("--repl_path", default="./repl", 44 | help="Path to the Lean REPL used for verification") 45 | parser.add_argument("--lean_env_path", default="./repl/test/Mathlib", 46 | help="Path to the Lean environment used for verification") 47 | parser.add_argument("--num_batches", default=32, type=int, 48 | help="Number of parallel batches for verification") 49 | parser.add_argument("--session_timeout", default=600, type=int, 50 | help="Timeout for interactive sessions in seconds") 51 | parser.add_argument("--expect_timeout", default=120, type=int, 52 | help="Timeout for the expect command in seconds") 53 | 54 | # Evaluation parameters 55 | parser.add_argument("--n_simulations", default=50, type=int, 56 | help="Number of Monte Carlo simulations") 57 | parser.add_argument("--n_processes", default=50, type=int, 58 | help="Number of parallel processes for Monte Carlo simulation") 59 | parser.add_argument("--custom_sample_sizes", default=None, type=str, 60 | help="Custom sampling sizes as a comma-separated list (e.g., '1,5,10,50,100')") 61 | 62 | return parser.parse_args() 63 | def set_up_logging(level=logging.INFO): 64 | """Set up logging with the specified level.""" 65 | logging.basicConfig(level=level, format='%(asctime)s - %(levelname)s - %(message)s') 66 | 67 | def main(): 68 | args = parse_args() 69 | # set_up_logging() 70 | # Ensure at least one task is selected 71 | if not (args.generate or args.verify or args.evaluate): 72 | print("Please select at least one task (--generate, --verify, or --evaluate)") 73 | return 74 | 75 | # Step 0: Download datasets 76 | if args.auto_dl: 77 | from datasets import load_dataset 78 | 79 | if args.datasets == "FomaMATH-All": 80 | input_dataset_id = "SphereLab/FormalMATH-All" 81 | elif args.datasets == "FomaMATH-Lite": 82 | input_dataset_id = "SphereLab/FormalMATH-Lite" 83 | else: 84 | raise ValueError(f"Unknown dataset: {args.datasets}") 85 | 86 | input_dataset_branch = "main" 87 | local_dataset_path = "./data/" 88 | 89 | os.makedirs(local_dataset_path, exist_ok=True) 90 | 91 | try: 92 | args.input_file = os.path.join(local_dataset_path, "FomaMATH.json") 93 | args.generated_file = os.path.join(local_dataset_path, "FomaMATH_generated.json") 94 | args.verification_file = os.path.join(local_dataset_path, "FomaMATH_verification.json") 95 | args.evaluation_file = os.path.join(local_dataset_path, "FomaMATH_evaluation.json") 96 | 97 | ds = load_dataset(input_dataset_id, split="train", revision=input_dataset_branch) 98 | ds.to_json(args.input_file) 99 | print(f"Dataset has been saved to: {local_dataset_path}") 100 | 101 | except Exception as e: 102 | print(f"Error occurred while downloading dataset: {e}") 103 | 104 | # Step 1: Generate answers 105 | if args.generate: 106 | try: 107 | print(f"Generating answers using model {args.model}") 108 | process_data( 109 | model_path=args.model, 110 | input_file=args.input_file, 111 | output_file=args.generated_file, 112 | batch_size=args.n, 113 | num_answers=args.nums_answer 114 | ) 115 | print(f"Answers have been generated and saved to {args.generated_file}") 116 | except Exception as e: 117 | logging.error(f"Error during answer generation: {e}") 118 | return 119 | 120 | # Step 2: Verify answers 121 | if args.verify: 122 | try: 123 | print("Starting verification of answers") 124 | # Use the generated file as input if answers were generated, otherwise use the provided input file 125 | 126 | verification_input = args.generated_file 127 | # Check if the input file exists 128 | if not os.path.exists(verification_input): 129 | print(f"Verification input file {verification_input} does not exist. Please check the path or generate answers first.") 130 | if args.evaluate: 131 | print("Answer verification failed, proceeding to evaluation") 132 | else: 133 | return 134 | 135 | verify_answers( 136 | input_file=verification_input, 137 | output_file=args.verification_file, 138 | repl_path=args.repl_path, 139 | lean_env_path=args.lean_env_path, 140 | num_batches=args.num_batches, 141 | session_timeout=args.session_timeout, 142 | expect_timeout=args.expect_timeout 143 | ) 144 | print(f"Verification complete. Results have been saved to {args.verification_file}") 145 | except Exception as e: 146 | logging.error(f"Error during answer verification: {e}") 147 | return 148 | 149 | # Step 3: Evaluate verification results 150 | if args.evaluate: 151 | try: 152 | print("Starting evaluation of verification results") 153 | # Check if the verification result file exists 154 | if not os.path.exists(args.verification_file): 155 | print(f"Verification result file {args.verification_file} does not exist. Please check the path or verify answers first.") 156 | return 157 | 158 | # Parse custom sampling sizes 159 | sample_sizes = None 160 | if args.custom_sample_sizes: 161 | sample_sizes = [int(size) for size in args.custom_sample_sizes.split(',')] 162 | 163 | monte_carlo_evaluate( 164 | input_filepath=args.verification_file, 165 | output_filepath=args.evaluation_file, 166 | sample_sizes=sample_sizes, 167 | n_simulations=args.n_simulations, 168 | n_processes=args.n_processes 169 | ) 170 | print(f"Evaluation complete. Results have been saved to {args.evaluation_file}") 171 | except Exception as e: 172 | logging.error(f"Error during evaluation: {e}") 173 | return 174 | 175 | print(f"All requested tasks have been completed successfully! You can check your successful rate in {args.evaluation_file}") 176 | 177 | if __name__ == "__main__": 178 | main() -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FormalMATH 2 | 3 | > **[Arxiv] FormalMATH: Benchmarking Formal Mathematical Reasoning of Large Language Models**. 4 | [Paper Link](https://arxiv.org/abs/2505.02735) 5 |

6 | 7 | ### Open-Source Links 8 | 9 | 10 | | Datasets | Paper | Project Page | 11 | |:-----------------:|:----------------:|:--------------:| 12 | |[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/SphereLab)|[![arXiv](https://img.shields.io/badge/arXiv-2505.02735-b31b1b.svg)](https://arxiv.org/abs/2505.02735)|Project Page| 13 | ## 📊 Introduction 14 | FormalMATH is a large-scale benchmark dataset for formal mathematical reasoning, consisting of 5,560 formally verified mathematical statements across various domains and difficulty levels in Lean4. It is designed to advance research in automated theorem proving by providing a comprehensive and reliable testbed for evaluating AI systems, and introduces a human-in-the-loop pipeline that leverages language models and automated checking to efficiently generate formalized math statements. 15 |

16 | 17 | ## 🗼 Pipeline of FormalMATH Construction 18 | The FormalMATH pipeline combines fine-tuned large language models with a best-of-N sampling approach to automatically generate formal mathematical statements. It then applies a multi-step automated validation process, including compiler checking, semantic verification by multiple LLMs, logical filtering using a pre-trained prover, and final human review to ensure correctness. 19 |

20 | 21 | ## 📰 News 22 | * [5/04/2025] **Open-Sourcing datasets** For specific steps, refer to Get Started. 23 | 24 | ## 🏆 Prover Performance 25 | Performance comparison of theorem prover LLMs on **FormalMATH-All**. 26 | 27 | | Method | Sampling budget | Pass@K(%) | 28 | | --------- | :-------: | :-------: | 29 | | DeepSeek-V2-671B | $32$ | $28.31$ | 30 | | DeepSeek-V2-7B | $32$ | $22.41$ | 31 | | Kimina-Prover-7B | $32$ | $16.46$ | 32 | | STP | $32$ | $13.87$ | 33 | | Goedel-Prover | $32$ | $13.53$ | 34 | | DeepSeek-V1.5-RL | $32$ | $10.18$ | 35 | | DeepSeek-V1.5-SFT | $32$ | $8.97$ | 36 | | InterLM-Prover | $32$ | $11.13$ | 37 | | BFS-Prover | $32$ | $1.16$ | 38 | 39 | Performance comparison of theorem prover LLMs on **FormalMATH-Lite**. 40 | 41 | **Best-First Tree Search Methods** 42 | | Method | Sampling budget | Pass@K(%) | 43 | | --------- | :-------: | :-------: | 44 | | BFS(DeepSeek-V1.5-RL) | $32\times32\times100$ | $17.41$ | 45 | | BFS(InternLM-V2.5) | $32\times32\times100$ | $25.65$ | 46 | | BFS(BFS-Prover) | $32\times32\times100$ | $45.88$ | 47 | 48 | **Single-Pass Generation Methods** 49 | | Method | Sampling budget | Pass@K(%) | 50 | | --------- | :-------: | :-------: | 51 | | Kimina-Prover-7B | $3200$ | $48.94$ | 52 | | STP | $3200$ | $53.17$ | 53 | | DeepSeek-V1.5-SFT | $3200$ | $46.82$ | 54 | | DeepSeek-V1.5-RL | $3200$ | $50.35$ | 55 | | Goedel-Prover | $3200$ | $49.41$ | 56 | 57 | 58 | 59 | ## 🔧 Installation 60 | ### Step1 : Installing Evaluation Environment on Host Machine 61 | - Python 3 62 | - Pytorch 63 | - Install the required dependency packages 64 | ```bash 65 | pip install -r requirements.txt 66 | ``` 67 | ### Step2 : Installing LEAN4 & REPL Enviroment on Host Machine 68 | Lean installation 69 | ``` 70 | cd ~ 71 | curl https://raw.githubusercontent.com/leanprover/elan/master/elan-init.sh -sSf | sh 72 | source $HOME/.elan/env 73 | ``` 74 | 75 | REPL installation 76 | ``` 77 | git clone https://github.com/leanprover-community/repl.git && cd repl && git checkout adbbfcb9d4e61c12db96c45d227de92f21cc17dd 78 | lake build 79 | cd .. 80 | ``` 81 | 82 | Mathlib installation 83 | ``` 84 | cd ~/repl/test/Mathlib 85 | bash test.sh 86 | ``` 87 | 88 | 89 | ## 🏃 Get Started 90 | ### 📌 Core Configuration Parameters 91 | 92 | Please make sure you have correctly configured the following key parameters for generating answers, verifying answers, and evaluating results in the evaluation system. 93 | | Parameter | Description | Default | 94 | | --------- | ----------- | ------- | 95 | | `--auto_dl` | Automatically download dataset. | `True` | 96 | | `--datasets` | Choose dataset version: FomaMATH-All or FomaMATH-Lite. | `FomaMATH-All` | 97 | | `--generate` | Enable generation of answers. | `False` | 98 | | `--verify` | Enable verification of generated answers. | `False` | 99 | | `--evaluate` | Enable evaluation of verification results. | `False` | 100 | | `--input_file` | Path to the input file containing the questions. | `None` | 101 | | `--generated_file` | Path to the output file for generated answers. | `None` | 102 | | `--verification_file` | Path to the output file for verification results. | `None` | 103 | | `--evaluation_file` | Path to the output file for evaluation results. | `None` | 104 | | `--model` | Path to the model used for generating answers. | `None` | 105 | | `--repl_path` | Path to the REPL environment. | `./repl` | 106 | | `--lean_env_path` | Path to the Mathlib4 environment. | `./repl/test/Mathlib` | 107 | | `--n` | Number of answers to generate per process. | `1` | 108 | | `--nums_answer` | Number of answers to generate per question. | `1` | 109 | | `--num_batches` | Number of processes to verify answers per question. | `1` | 110 | 111 | For more personalized parameter settings, please refer to `FoMA_Eval.py.` 112 | 113 | 114 | Note 1: Note that if `args.auto_dl` is `true`, it will automatically download the dataset to `./data` by default, and automatically preset the paths for `args.input_file`, `args.generated_file`, `args.verification_file`, and `args.evaluation_file`. If you want to customize the paths, please set this parameter to `False`. 115 | 116 | Note 2: If you meet the error `"RuntimeError: Aborted due to the lack of CPU swap space. Please increase the swap space to avoid this error."`, try reduce parameter `args.n`. 117 | 118 | ### 📌 Quick Evaluation 119 | If you want to directly obtain the test results of the model from FomalMATH, we provide a one-time testing tool `FoMA_Eval.py`. Please run the following: 120 | ```bash 121 | # If you want to automatically download the dataset FomaMATH-All 122 | python FoMA_Eval.py --auto_dl --generate --verify --evaluate \ 123 | --datasets FomaMATH-All \ 124 | --model your_model_path \ 125 | --n 32 \ 126 | --nums_answer 32 \ 127 | --num_batches 1 128 | 129 | # If you want to customize file paths 130 | python FoMA_Eval.py --generate --verify --evaluate \ 131 | --input_file your_datasets_path \ 132 | --generated_file your_generated_file_path \ 133 | --verification_file your_verify_file_path \ 134 | --evaluation_file your_evalute_file_path \ 135 | --model your_model_path \ 136 | --repl_path your_repl_path \ 137 | --lean_env_path your_mathlib_path \ 138 | --n 200 \ 139 | --nums_answer 3200 \ 140 | --num_batches 128 141 | ``` 142 | ### 📌 Detailed Evaluation 143 | `FoMA_Eval.py` can independently perform generation, verification, and evaluation tasks. It can also save intermediate results to meet the needs of different downstream tasks. Please refer to the following instructions for details: 144 | 145 | - If you only want to generate answers, please run the following: 146 | ```bash 147 | python generate_answers.py --generate \ 148 | --input_file your_datasets_path \ 149 | --output_file your_generated_file_path \ 150 | --model your_model_path \ 151 | --n 200 \ 152 | --nums_answer 3200 153 | ``` 154 | - If you only want to verify the generated answers, please run the following: 155 | ```bash 156 | python lean_proof_pipeline.py --verify \ 157 | --generated_file your_generated_file_path \ 158 | --verification_file your_verify_file_path \ 159 | --num_batches 128 \ 160 | --expect_timeout 120 161 | ``` 162 | - If you only want to evaluate verify result, please run the following: 163 | ```bash 164 | python evaluate_results.py --generate \ 165 | --verification_file your_verify_file_path \ 166 | --evaluation_file your_evalute_file_path 167 | ``` 168 | ## 📋 Citation 169 | If you find our project interesting, please cite us 😊 170 | ```bibtex 171 | @misc{yu2025formalmathbenchmarkingformalmathematical, 172 | title={FormalMATH: Benchmarking Formal Mathematical Reasoning of Large Language Models}, 173 | author={Zhouliang Yu and Ruotian Peng and Keyi Ding and Yizhe Li and Zhongyuan Peng and Minghao Liu and Yifan Zhang and Zheng Yuan and Huajian Xin and Wenhao Huang and Yandong Wen and Ge Zhang and Weiyang Liu}, 174 | year={2025}, 175 | eprint={2505.02735}, 176 | archivePrefix={arXiv}, 177 | primaryClass={cs.AI}, 178 | url={https://arxiv.org/abs/2505.02735}, 179 | } 180 | ``` 181 | ## 📈 Star Rising 182 | [![Star History Chart](https://api.star-history.com/svg?repos=Sphere-AI-Lab/FormalMATH-Bench&type=Date)](https://www.star-history.com/#Sphere-AI-Lab/FormalMATH-Bench&Date) 183 | -------------------------------------------------------------------------------- /assets/FormalMATH.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sphere-AI-Lab/FormalMATH-Bench/317f2e40936f8588bb17d7cf6213e7ece039825c/assets/FormalMATH.pdf -------------------------------------------------------------------------------- /assets/domain-pie.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sphere-AI-Lab/FormalMATH-Bench/317f2e40936f8588bb17d7cf6213e7ece039825c/assets/domain-pie.png -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sphere-AI-Lab/FormalMATH-Bench/317f2e40936f8588bb17d7cf6213e7ece039825c/assets/logo.png -------------------------------------------------------------------------------- /assets/performance_compare_v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sphere-AI-Lab/FormalMATH-Bench/317f2e40936f8588bb17d7cf6213e7ece039825c/assets/performance_compare_v2.png -------------------------------------------------------------------------------- /assets/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sphere-AI-Lab/FormalMATH-Bench/317f2e40936f8588bb17d7cf6213e7ece039825c/assets/pipeline.png -------------------------------------------------------------------------------- /assets/star-history-202556.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Sphere-AI-Lab/FormalMATH-Bench/317f2e40936f8588bb17d7cf6213e7ece039825c/assets/star-history-202556.png -------------------------------------------------------------------------------- /evaluate_results.py: -------------------------------------------------------------------------------- 1 | import json 2 | import random 3 | import argparse 4 | from multiprocessing import Pool 5 | from tqdm import tqdm 6 | 7 | def check_correct(answers, sample_size): 8 | # Randomly sample a subset of answers and check if any of them has 'answer_bool' set to True 9 | sampled_answers = random.sample(answers, sample_size) 10 | return any(answer['answer_bool'] for answer in sampled_answers) 11 | 12 | def simulate_single(args): 13 | data, sample_sizes = args 14 | all_theorems = list(data.keys()) 15 | correct_counts = {size: 0 for size in sample_sizes} 16 | applicable_counts = {size: 0 for size in sample_sizes} 17 | 18 | for theorem in all_theorems: 19 | answers = data[theorem] 20 | num_answers = len(answers) 21 | 22 | for size in sample_sizes: 23 | # Skip sample sizes larger than the number of available answers 24 | if size > num_answers: 25 | continue 26 | applicable_counts[size] += 1 27 | if check_correct(answers, size): 28 | correct_counts[size] += 1 29 | 30 | # Calculate the success rate for each sample size 31 | aggregate_rates = {} 32 | for size in sample_sizes: 33 | rate = correct_counts[size] / applicable_counts[size] if applicable_counts[size] > 0 else 0 34 | aggregate_rates[str(size)] = rate 35 | print(f"size,{correct_counts[size]}") 36 | return aggregate_rates 37 | 38 | def monte_carlo_evaluate( 39 | input_filepath, 40 | output_filepath, 41 | sample_sizes=None, 42 | n_simulations=50, 43 | n_processes=50 44 | ): 45 | """ 46 | Evaluate the verification results using Monte Carlo simulation. 47 | 48 | Args: 49 | input_filepath (str): Path to the verification results file 50 | output_filepath (str): Path to save the evaluation results 51 | sample_sizes (list, optional): List of sample sizes to evaluate. Defaults to None. 52 | n_simulations (int, optional): Number of Monte Carlo simulations. Defaults to 50. 53 | n_processes (int, optional): Number of processes for parallel computation. Defaults to 50. 54 | """ 55 | # Default sample sizes if not provided 56 | if sample_sizes is None: 57 | sample_sizes = sorted(list(range(1, 3200, 5)) + [32, 64, 128, 328, 648, 1024, 2048, 3200]) 58 | 59 | # Load input data file 60 | with open(input_filepath, 'r', encoding='utf-8') as f: 61 | data = json.load(f) 62 | 63 | aggregate_results = {} 64 | 65 | # Perform Monte Carlo simulation using multiprocessing 66 | with Pool(processes=n_processes) as pool: 67 | tasks = [(data, sample_sizes) for _ in range(n_simulations)] 68 | results = list(tqdm(pool.imap(simulate_single, tasks), total=n_simulations, desc="Monte Carlo in process")) 69 | 70 | # Aggregate results from each simulation run 71 | for sim, result in enumerate(results, start=1): 72 | aggregate_key = f"Aggregate_{sim}" 73 | aggregate_results[aggregate_key] = result 74 | 75 | # Save results to the output file 76 | with open(output_filepath, 'w', encoding='utf-8') as f: 77 | json.dump(aggregate_results, f, ensure_ascii=False, indent=4) 78 | 79 | print(f"\nMonte Carlo simulation finished, results saved to {output_filepath}") 80 | return aggregate_results 81 | 82 | def parse_args(): 83 | parser = argparse.ArgumentParser(description="Evaluate theorem proof verification results") 84 | 85 | # File paths 86 | parser.add_argument("--input_file", default="/workspace/ky_ding/math/verify/0411/verified_stp_3200.json", 87 | help="Path to verification results file") 88 | parser.add_argument("--output_file", default="/workspace/ky_ding/math/verify/0411/verified_stp_3200_success_rate_0414.json", 89 | help="Path to evaluation results file") 90 | 91 | # Evaluation parameters 92 | parser.add_argument("--n_simulations", default=50, type=int, 93 | help="Number of Monte Carlo simulations") 94 | parser.add_argument("--n_processes", default=50, type=int, 95 | help="Number of processes for Monte Carlo simulation") 96 | parser.add_argument("--custom_sample_sizes", default=None, type=str, 97 | help="Comma-separated list of custom sample sizes (e.g., '1,5,10,50,100')") 98 | 99 | return parser.parse_args() 100 | 101 | def main(): 102 | args = parse_args() 103 | 104 | # Parse custom sample sizes if provided 105 | sample_sizes = None 106 | if args.custom_sample_sizes: 107 | sample_sizes = [int(size) for size in args.custom_sample_sizes.split(',')] 108 | 109 | monte_carlo_evaluate( 110 | input_filepath=args.input_file, 111 | output_filepath=args.output_file, 112 | sample_sizes=sample_sizes, 113 | n_simulations=args.n_simulations, 114 | n_processes=args.n_processes 115 | ) 116 | 117 | if __name__ == "__main__": 118 | main() -------------------------------------------------------------------------------- /generate_answers.py: -------------------------------------------------------------------------------- 1 | import re 2 | import json 3 | import torch 4 | from transformers import AutoTokenizer 5 | from vllm import LLM, SamplingParams 6 | import random 7 | from tqdm import tqdm 8 | import os 9 | import argparse 10 | import math 11 | from multiprocessing import Pool, cpu_count, Manager 12 | from pathlib import Path 13 | import jsonlines 14 | def init_worker(model_path, gpu_id): 15 | """Initialize worker process, set GPU environment, and initialize the model""" 16 | os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) 17 | print(f"Process {os.getpid()} using GPU {gpu_id}") 18 | 19 | global model 20 | model = LLM( 21 | model=model_path, 22 | max_num_batched_tokens=8192, 23 | max_model_len=8192, 24 | seed=1, 25 | trust_remote_code=True, 26 | tensor_parallel_size=1 27 | ) 28 | 29 | def process_single_item(item, sampling_params, num_batches): 30 | """Process a single data item""" 31 | global model 32 | item['autoformalization'] = "\nComplete the following Lean 4 code:\n```lean4\n"+item['autoformalization'] 33 | prompt = item['autoformalization'] 34 | try: 35 | all_answers = [] 36 | for _ in tqdm(range(num_batches), desc=f"Processing item {item.get('source', 'unknown')}", leave=False): 37 | # Generate batch answers 38 | model_outputs = model.generate( 39 | [prompt], # Only pass one prompt 40 | sampling_params, 41 | use_tqdm=False 42 | ) 43 | batch_answers = [output.text for output in model_outputs[0].outputs] 44 | all_answers.extend(batch_answers) 45 | 46 | # Update item 47 | item['answers'] = all_answers 48 | item['autoformalization'] = prompt 49 | return item 50 | 51 | except Exception as e: 52 | print(f"Error processing item: {str(e)}") 53 | item['answers'] = [] 54 | item['error'] = str(e) 55 | return item 56 | 57 | def load_checkpoint(checkpoint_file): 58 | """Load checkpoint file""" 59 | try: 60 | with open(checkpoint_file, 'r') as file: 61 | return json.load(file) 62 | except (FileNotFoundError, json.JSONDecodeError): 63 | return [] 64 | 65 | def get_processed_items(results): 66 | """Get set of identifiers for processed items""" 67 | return {(item.get('source', ''), item.get('refined_statement', '')) for item in results} 68 | 69 | def process_batch(args): 70 | """Process a batch of data""" 71 | start_idx, end_idx, data, sampling_params, process_id, num_batches, checkpoint_dir = args 72 | 73 | # Create a unique checkpoint file for each process 74 | checkpoint_file = os.path.join(checkpoint_dir, f'checkpoint_process_{process_id}.json') 75 | batch_results = [] 76 | 77 | # Load this process's checkpoint 78 | existing_results = load_checkpoint(checkpoint_file) 79 | processed_items = get_processed_items(existing_results) 80 | 81 | for i in tqdm(range(start_idx, end_idx), desc=f"Process {os.getpid()} progress"): 82 | item = data[i] 83 | # Check if already processed 84 | if (item.get('source', ''), item.get('refined_statement', '')) in processed_items: 85 | continue 86 | 87 | result = process_single_item(item, sampling_params, num_batches) 88 | if result: 89 | batch_results.append(result) 90 | 91 | # Periodically save checkpoint 92 | if len(batch_results) % 10 == 0: # Save every 10 items 93 | existing_results.extend(batch_results) 94 | with open(checkpoint_file, 'w') as f: 95 | json.dump(existing_results, f, ensure_ascii=False, indent=2) 96 | batch_results = [] # Clear saved results 97 | 98 | # Save remaining results 99 | if batch_results: 100 | existing_results.extend(batch_results) 101 | with open(checkpoint_file, 'w') as f: 102 | json.dump(existing_results, f, ensure_ascii=False, indent=2) 103 | 104 | return checkpoint_file 105 | 106 | def merge_checkpoints(checkpoint_files, output_file): 107 | """Merge results from all checkpoint files""" 108 | all_results = [] 109 | for checkpoint_file in checkpoint_files: 110 | if os.path.exists(checkpoint_file): 111 | results = load_checkpoint(checkpoint_file) 112 | all_results.extend(results) 113 | # Optionally delete temporary checkpoint files 114 | # os.remove(checkpoint_file) 115 | 116 | # Save merged results 117 | with open(output_file, 'w') as f: 118 | json.dump(all_results, f, ensure_ascii=False, indent=2) 119 | 120 | return all_results 121 | 122 | def process_data( 123 | model_path, 124 | input_file, 125 | output_file, 126 | api_port=8012, # Not used but kept for compatibility 127 | num_processes=96, # Not used but kept for compatibility 128 | batch_size=200, # This will be used as 'n' (answers per batch) 129 | save_interval=16, # Not used but kept for compatibility 130 | resume=True, # Will be handled via checkpoint mechanism 131 | mode=None, # Not used but kept for compatibility 132 | num_answers=3200 # This will be used as 'nums_answer' (total answers) 133 | ): 134 | """ 135 | Process data using vLLM to generate answers. 136 | This function provides compatibility with the original pipeline interface. 137 | 138 | Args: 139 | model_path (str): Path to the model 140 | input_file (str): Path to input JSON file 141 | output_file (str): Path to output JSON file 142 | api_port (int): Not used with vLLM, kept for compatibility 143 | num_processes (int): Not used with vLLM, kept for compatibility 144 | batch_size (int): Used as 'n' - number of answers per batch 145 | save_interval (int): Not used with vLLM, kept for compatibility 146 | resume (bool): Will use checkpoint mechanism 147 | mode (str): Not used with vLLM, kept for compatibility 148 | num_answers (int): Total number of answers to generate per theorem 149 | 150 | Returns: 151 | list: The processed data 152 | """ 153 | # Setup checkpoint directory 154 | current_directory = os.getcwd() 155 | checkpoint_dir = os.path.join(current_directory, 'checkpoint_mp') 156 | os.makedirs(checkpoint_dir, exist_ok=True) 157 | 158 | # Read data 159 | print(f"Reading data from {input_file}...") 160 | data = [] 161 | with jsonlines.open(input_file) as reader: 162 | for obj in reader: 163 | data.append(obj) 164 | 165 | # Calculate num_batches 166 | n = batch_size # Use batch_size as 'n' 167 | nums_answer = num_answers 168 | num_batches = math.ceil(nums_answer / n) 169 | 170 | # Set sampling parameters 171 | sampling_params = SamplingParams( 172 | temperature=1.0, 173 | max_tokens=2048, 174 | top_p=0.95, 175 | n=n, 176 | ) 177 | 178 | # Get available GPUs 179 | available_gpus = os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") 180 | 181 | if not available_gpus[0]: 182 | import torch 183 | available_gpus = list(range(torch.cuda.device_count())) 184 | else: 185 | available_gpus = [int(gpu) for gpu in available_gpus] 186 | 187 | num_gpus = len(available_gpus) 188 | if num_gpus == 0: 189 | raise RuntimeError("No available GPUs") 190 | 191 | print(f"Using {num_gpus} GPUs: {available_gpus}") 192 | 193 | # Calculate range of data for each process 194 | batch_size_per_gpu = len(data) // num_gpus 195 | if batch_size_per_gpu == 0: 196 | batch_size_per_gpu = 1 197 | num_gpus = len(data) 198 | 199 | # Prepare arguments for the process pool 200 | pool_args = [] 201 | for i in range(num_gpus): 202 | start_idx = i * batch_size_per_gpu 203 | end_idx = start_idx + batch_size_per_gpu if i < num_gpus - 1 else len(data) 204 | pool_args.append((start_idx, end_idx, data, sampling_params, i, num_batches, checkpoint_dir)) 205 | 206 | # Create process pool and assign tasks 207 | pools = [] 208 | tasks = [] 209 | 210 | for gpu_id in available_gpus[:num_gpus]: 211 | 212 | pool = Pool( 213 | processes=1, 214 | initializer=init_worker, 215 | initargs=(model_path, gpu_id) 216 | ) 217 | pools.append(pool) 218 | 219 | task = pool.apply_async(process_batch, args=[pool_args[len(tasks)]]) 220 | tasks.append(task) 221 | 222 | # Wait for all tasks to complete and collect checkpoint file paths 223 | checkpoint_files = [] 224 | for task in tqdm(tasks, desc="Waiting for tasks to complete"): 225 | checkpoint_file = task.get() 226 | checkpoint_files.append(checkpoint_file) 227 | 228 | # Close process pools 229 | for pool in pools: 230 | pool.close() 231 | pool.join() 232 | 233 | # Merge results from all checkpoint files 234 | print("Merging results...") 235 | final_results = merge_checkpoints(checkpoint_files, output_file) 236 | 237 | print(f"Processing complete! Total of {len(final_results)} items processed") 238 | print(f"Final results saved to: {output_file}") 239 | 240 | return final_results 241 | 242 | def parse_arguments(): 243 | parser = argparse.ArgumentParser(description='Generate answers using vLLM') 244 | parser.add_argument('--model', type=str, default=None, 245 | help='Path to the model') 246 | parser.add_argument('--input_file', type=str, default=None, 247 | help='Path to the input data file') 248 | parser.add_argument('--generated_file', type=str, default=None, 249 | help='Path to the final output file') 250 | parser.add_argument('--n', type=int, default=200, 251 | help='Number of answers generated per sample') 252 | parser.add_argument('--nums_answer', type=int, default=3200, 253 | help='Total number of answers to generate per input') 254 | 255 | return parser.parse_args() 256 | 257 | def main(): 258 | args = parse_arguments() 259 | 260 | return process_data( 261 | model_path=args.model, 262 | input_file=args.input_file, 263 | output_file=args.generated_file, 264 | batch_size=args.n, 265 | num_answers=args.nums_answer 266 | ) 267 | 268 | if __name__ == "__main__": 269 | main() -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tqdm==4.66.5 2 | torch==2.5.1 3 | transformers==4.51.3 4 | vllm==0.7.3 5 | pexpect==4.9.0 6 | datasets==2.17.1 7 | jsonlines==4.0.0 -------------------------------------------------------------------------------- /verify_answers.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import pexpect 3 | import json 4 | import os 5 | import time 6 | import tempfile 7 | import re 8 | import pdb 9 | import heapq 10 | import argparse 11 | import math 12 | from tqdm import tqdm 13 | from concurrent.futures import ThreadPoolExecutor 14 | import concurrent.futures 15 | import gc 16 | import logging 17 | 18 | # Interactive thread class 19 | class InteractiveThread(threading.Thread): 20 | def __init__(self, session_id, repl_path, lean_env_path, initial_context=None, 21 | timeout=600, expect_timeout=120): 22 | super().__init__() 23 | self.session_id = session_id 24 | self.repl_path = repl_path 25 | self.lean_env_path = lean_env_path 26 | self.context = initial_context 27 | self.session = None 28 | self.expect_timeout = expect_timeout 29 | 30 | self.cmd_response_condition = threading.Event() 31 | self.cmd_query_condition = threading.Event() 32 | self.init_complete = threading.Event() 33 | self.response = None 34 | 35 | self.stop_flag = False 36 | self.timer = threading.Timer(timeout, self.stop) 37 | 38 | def initialize_check(self): 39 | try: 40 | if self.context == None: 41 | initialize_check = {"cmd": "def init_check : Nat := 42"} 42 | self.send_cmd(initialize_check) 43 | self.session.expect('"env": 0}\r\n\r\n', timeout=self.expect_timeout) # If the context contains 'sorries', it will have more keys other than 'env' 44 | self.init_complete.set() 45 | except: 46 | self.init_complete.set() 47 | print(f"Session {self.session_id}: Failed to initialize Lean REPL") 48 | print(self.context) 49 | print(self.session.before) 50 | self.stop() 51 | 52 | def send_cmd(self, cmd): 53 | cmd_str = json.dumps(cmd, ensure_ascii=False) 54 | self.session.sendline(cmd_str + '\n') 55 | 56 | def submit_and_receive(self, cmd): 57 | if self.stop_flag: 58 | return None 59 | 60 | self.init_complete.wait() 61 | 62 | self.send_cmd(cmd) 63 | 64 | self.cmd_query_condition.set() 65 | 66 | self.cmd_response_condition.wait() # Wait for the response 67 | self.cmd_response_condition.clear() 68 | if self.response: 69 | output = self.response 70 | self.response = None 71 | return output 72 | return None 73 | 74 | def process_responses(self): 75 | while not self.stop_flag: 76 | self.cmd_query_condition.wait() # Wait for input 77 | self.cmd_query_condition.clear() 78 | 79 | if self.stop_flag: # Terminate session 80 | break 81 | 82 | try: 83 | self.session.expect('\r\n\r\n', timeout=self.expect_timeout) # Filter out input; pexpect prints the input twice for unknown reasons 84 | self.session.expect(['\r\n\r\n', pexpect.EOF], timeout=self.expect_timeout) 85 | output = self.session.before.strip() 86 | output_dict = json.loads(output) 87 | self.response = output_dict 88 | self.cmd_response_condition.set() 89 | 90 | except pexpect.TIMEOUT: 91 | print("Output timeout") 92 | self.cmd_response_condition.set() # Prevent thread deadlock 93 | break # Terminate session 94 | except pexpect.EOF: 95 | print("Session ended unexpectedly.") 96 | self.cmd_response_condition.set() # Prevent thread deadlock 97 | break 98 | except json.JSONDecodeError as e: 99 | self.cmd_response_condition.set() # Prevent thread deadlock 100 | print(output) 101 | break 102 | 103 | except Exception as e: 104 | print(f"Error in process_responses: {e}") 105 | self.cmd_response_condition.set() 106 | break 107 | 108 | def remove_last_comment(self): 109 | pattern = r'/--[^/]*?-/(\n*)$' 110 | self.context = re.sub(pattern, '', self.context, flags=re.DOTALL) 111 | 112 | def run(self): 113 | self.timer.start() 114 | try: 115 | self.session = pexpect.spawn('bash', encoding='utf-8', cwd=self.lean_env_path) 116 | if self.context != None: 117 | self.remove_last_comment() 118 | with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp: 119 | json.dump({"cmd": self.context}, temp, ensure_ascii=False) 120 | temp.write("\n\n") 121 | temp.flush() 122 | command = f'lake env {self.repl_path}/.lake/build/bin/repl < <(cat {temp.name} -)' 123 | else: 124 | command = f'lake env {self.repl_path}/.lake/build/bin/repl' 125 | 126 | self.session.sendline(command) 127 | self.initialize_check() 128 | self.process_responses() # Continuously process responses 129 | self.stop() 130 | 131 | except Exception as e: 132 | print(f"Session {self.session_id}: An error occurred: {e}") 133 | self.init_complete.set() 134 | self.stop() 135 | 136 | def stop(self): 137 | self.stop_flag = True 138 | self.init_complete.set() 139 | self.cmd_query_condition.set() 140 | self.cmd_response_condition.set() 141 | self.timer.cancel() 142 | # Terminate the session 143 | if hasattr(self, 'session') and self.session: 144 | try: 145 | self.session.close(force=True) 146 | del self.session 147 | except: 148 | pass 149 | 150 | # Process a proof batch 151 | def process_batch(batch_id, item, batch_answers, context, autoformalization, 152 | repl_path, lean_env_path, session_timeout, expect_timeout): 153 | # Initialize interactive thread 154 | thread = InteractiveThread( 155 | batch_id, 156 | repl_path=repl_path, 157 | lean_env_path=lean_env_path, 158 | initial_context=context, 159 | timeout=session_timeout, 160 | expect_timeout=expect_timeout 161 | ) 162 | thread.start() 163 | thread.init_complete.wait() # Wait for initialization to complete 164 | 165 | results = [] 166 | try: 167 | for answer in batch_answers: 168 | # Verify each answer in the batch 169 | verified_answer, answer_bool = process_answer(item, answer, autoformalization, thread) 170 | results.append({"answer": verified_answer, "answer_bool": answer_bool}) # Collect results 171 | finally: 172 | thread.stop() 173 | thread.join() 174 | 175 | return results 176 | 177 | def process_answer(item, answer, autoformalization, thread): 178 | answer = answer.split("```")[0] 179 | try: 180 | outcome = thread.submit_and_receive({"cmd": autoformalization + answer, "env": 0}) 181 | if outcome is None: 182 | return answer, False 183 | 184 | # Check for errors or incomplete content in the result 185 | if "messages" in outcome: 186 | is_error = False 187 | is_sorries = False 188 | for i in range(len(outcome["messages"])): 189 | if outcome["messages"][i]["severity"] == "error": 190 | is_error = True 191 | elif outcome["messages"][i]["severity"] == "sorries" or 'sorries' in outcome.keys(): 192 | is_sorries = True 193 | if is_error or is_sorries: 194 | return answer, False 195 | else: 196 | return answer, True 197 | return answer, True 198 | except Exception as e: 199 | print(f"Error in process_answer: {e}") 200 | return answer, False 201 | 202 | # Load existing progress (if available) 203 | def load_progress_from_file(filepath): 204 | """ 205 | Load a JSON progress file, attempting to recover data from incomplete JSON 206 | 207 | Args: 208 | filepath: The path to the JSON file 209 | 210 | Returns: 211 | dict: The loaded data dictionary, or an empty dictionary if loading fails 212 | """ 213 | if os.path.exists(filepath): 214 | try: 215 | with open(filepath, "r", encoding="utf-8") as f: 216 | data = json.load(f) 217 | print(f"Loaded progress from {filepath}") 218 | return data 219 | except Exception as e: 220 | print(f"Error loading file {filepath}: {e}") 221 | return {} 222 | 223 | # Save data to a file 224 | def save_to_file(filepath, data): 225 | try: 226 | with open(filepath, "w", encoding="utf-8") as f: 227 | json.dump(data, f, ensure_ascii=False, indent=4) 228 | print(f"Progress saved to {filepath}") 229 | except Exception as e: 230 | print(f"Error saving to file {filepath}: {e}") 231 | 232 | def verify_answers( 233 | input_file, 234 | output_file, 235 | repl_path="/workspace/ky_ding/math/minictx-eval/repl", 236 | lean_env_path="/workspace/ky_ding/math/minictx-eval/repl/test/Mathlib", 237 | num_batches=32, 238 | session_timeout=600, 239 | expect_timeout=120 240 | ): 241 | """ 242 | Verify answers and save the results 243 | 244 | Args: 245 | input_file (str): Path to the input file containing answers to be verified 246 | output_file (str): Path to the output file to save verification results 247 | repl_path (str): Path to Lean REPL 248 | lean_env_path (str): Path to Lean environment 249 | num_batches (int): Number of parallel verification batches 250 | session_timeout (int): Timeout for interactive sessions (in seconds) 251 | expect_timeout (int): Timeout for expect commands (in seconds) 252 | 253 | Returns: 254 | dict: Verification results 255 | """ 256 | # Set up logging 257 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 258 | 259 | # Load existing data 260 | final_proof_dict = load_progress_from_file(output_file) 261 | 262 | # Load theorems to be processed 263 | with open(input_file, "r") as f: 264 | data = json.load(f) 265 | 266 | # Initialize thread lock 267 | lock = threading.Lock() 268 | 269 | for item in data: 270 | theorem_name = item["theorem_names"] 271 | 272 | # Skip if theorem has already been processed 273 | if theorem_name in final_proof_dict: 274 | print(f"Theorem {theorem_name} already processed. Skipping...") 275 | continue 276 | 277 | # item["autoformalization"]= "\nComplete the following Lean 4 code:\n```lean4\n"+item['autoformalization'].replace("sorry", "\n") 278 | 279 | autoformalization = item["autoformalization"].split("```lean4\n")[1] 280 | context = autoformalization.split("theorem")[0] or autoformalization.split("def")[0] 281 | autoformalization = autoformalization.replace(context, "", 1) 282 | # Allocate resources according to thread count 283 | answers = item["answers"] 284 | 285 | batch_size = math.ceil(len(answers) / num_batches) 286 | batches = [answers[i:i+batch_size] for i in range(0, len(answers), batch_size)] 287 | print(f"Processing {len(answers)} answers for theorem {theorem_name} in {len(batches)} batches") 288 | 289 | all_results = [] 290 | 291 | # Process batches in parallel using thread pool 292 | with ThreadPoolExecutor(max_workers=num_batches) as executor: 293 | futures = [] 294 | for batch_id, batch in enumerate(batches): 295 | futures.append(executor.submit( 296 | process_batch, 297 | batch_id, 298 | item, 299 | batch, 300 | context, 301 | autoformalization, 302 | repl_path, 303 | lean_env_path, 304 | session_timeout, 305 | expect_timeout 306 | )) 307 | 308 | # Collect results for each batch 309 | for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)): 310 | batch_results = future.result() 311 | all_results.extend(batch_results) 312 | 313 | # Save new theorem results to the final dictionary 314 | with lock: 315 | final_proof_dict[theorem_name] = all_results 316 | 317 | # Save progress 318 | save_to_file(output_file, final_proof_dict) 319 | 320 | save_to_file(output_file, final_proof_dict) 321 | return final_proof_dict 322 | 323 | def parse_args(): 324 | parser = argparse.ArgumentParser(description="Verify Lean theorem proofs") 325 | 326 | # File paths 327 | parser.add_argument("--input_file", required=True, 328 | help="Path to the input file containing answers to be verified") 329 | parser.add_argument("--output_file", required=True, 330 | help="Path to the output file to save verification results") 331 | 332 | # Verification parameters 333 | parser.add_argument("--repl_path", default="/workspace/ky_ding/math/minictx-eval/repl", 334 | help="Path to Lean REPL") 335 | parser.add_argument("--lean_env_path", default="/workspace/ky_ding/math/minictx-eval/repl/test/Mathlib", 336 | help="Path to Lean environment") 337 | parser.add_argument("--num_batches", default=96, type=int, 338 | help="Number of parallel verification batches") 339 | 340 | # Timeout parameters 341 | parser.add_argument("--session_timeout", default=600, type=int, 342 | help="Timeout for interactive sessions (in seconds)") 343 | parser.add_argument("--expect_timeout", default=120, type=int, 344 | help="Timeout for the expect command (in seconds)") 345 | 346 | return parser.parse_args() 347 | 348 | def main(): 349 | args = parse_args() 350 | 351 | try: 352 | print("Starting answer verification...") 353 | verify_answers( 354 | input_file=args.input_file, 355 | output_file=args.output_file, 356 | repl_path=args.repl_path, 357 | lean_env_path=args.lean_env_path, 358 | num_batches=args.num_batches, 359 | session_timeout=args.session_timeout, 360 | expect_timeout=args.expect_timeout 361 | ) 362 | print(f"Verification complete. Results have been saved to {args.output_file}") 363 | except Exception as e: 364 | logging.error(f"Error during verification: {e}") 365 | 366 | if __name__ == "__main__": 367 | main() --------------------------------------------------------------------------------