├── .gitignore ├── LICENSE ├── README.md ├── config ├── config.yaml ├── context_parser │ ├── import_copy.yaml │ └── smart.yaml ├── model │ ├── codellama.yaml │ ├── codeparrot.yaml │ ├── deepseek.yaml │ ├── local.yaml │ ├── mistral.yaml │ ├── phi1.yaml │ ├── qwen25coder.yaml │ ├── starcoder.yaml │ └── starcoder15b.yaml └── task │ ├── FG.yaml │ └── SG.yaml ├── data ├── .gitignore └── generations │ ├── FG │ └── .gitignore │ └── SG │ └── .gitignore ├── lm_eval ├── __init__.py ├── context_parser.py ├── datatypes.py ├── evaluator.py ├── generators.py └── utils.py ├── main.py ├── prepare_data └── run.py ├── requirements.txt ├── results └── .gitignore ├── tests ├── __init__.py └── test_evaluator.py └── workdir └── .gitignore /.gitignore: -------------------------------------------------------------------------------- 1 | test_incorrest_answers_fails.json 2 | test_perfect_preds_fails.json 3 | outputs/ 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # poetry 101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 105 | #poetry.lock 106 | 107 | # pdm 108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 109 | #pdm.lock 110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 111 | # in version control. 112 | # https://pdm.fming.dev/#use-with-ide 113 | .pdm.toml 114 | 115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 116 | __pypackages__/ 117 | 118 | # Celery stuff 119 | celerybeat-schedule 120 | celerybeat.pid 121 | 122 | # SageMath parsed files 123 | *.sage.py 124 | 125 | # Environments 126 | .env 127 | .venv 128 | env/ 129 | venv/ 130 | ENV/ 131 | env.bak/ 132 | venv.bak/ 133 | 134 | # Spyder project settings 135 | .spyderproject 136 | .spyproject 137 | 138 | # Rope project settings 139 | .ropeproject 140 | 141 | # mkdocs documentation 142 | /site 143 | 144 | # mypy 145 | .mypy_cache/ 146 | .dmypy.json 147 | dmypy.json 148 | 149 | # Pyre type checker 150 | .pyre/ 151 | 152 | # pytype static type analyzer 153 | .pytype/ 154 | 155 | # Cython debug symbols 156 | cython_debug/ 157 | 158 | # PyCharm 159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 161 | # and can be added to the global gitignore or merged into this file. For a more nuclear 162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 163 | #.idea/ 164 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2023 Kokosinskii Denis 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RealCode_eval 2 | **RealCode_eval** is a benchmark to perform **execution-based** evaluation of LLM code generation capabilities in **real Github repositories**. The model-generated code is evaluated by running tests in respective repositories. 3 | 4 | **RealCode v3** includes two indenpendent benchmarks: **FG (Function Generation)** and **SG (Scope Generation)**. Each benchmark has **1000 tasks** built from **154 Python GitHub repositories**. 5 | 6 | To avoid data contamination for popular Code LLMs we only use repositories created in 2024. 7 | 8 | ## Realcode v3 Function Generation 9 | Each task of Realcode v3 FG requires the model to generate the body of a function (or of a class method), based on a function signature, a **docstring** and the rest of source code file. 10 | 11 |
12 | Example 13 | 14 | 15 | ```python 16 | """ 17 | Feature Extraction Methods for DimSense 18 | """ 19 | 20 | import numpy as np 21 | import tensorflow as tf 22 | from sklearn.decomposition import PCA, FastICA 23 | from sklearn.manifold import TSNE 24 | from sklearn.feature_extraction.text import TfidfVectorizer 25 | from sklearn.feature_extraction.text import CountVectorizer 26 | from sklearn.decomposition import LatentDirichletAllocation 27 | from sklearn.base import BaseEstimator, TransformerMixin 28 | 29 | 30 | class AutoencoderExtractor(BaseEstimator, TransformerMixin): 31 | """ 32 | AutoencoderExtractor provides feature extraction using autoencoders. 33 | """ 34 | def __init__(self, encoding_dim=10): 35 | """ 36 | Initialize the AutoencoderExtractor. 37 | 38 | Parameters: 39 | - encoding_dim (int): Dimension of the encoded representation. 40 | """ 41 | self.tf = None 42 | self.encoding_dim = encoding_dim 43 | 44 | def _import_tensorflow(self): 45 | try: 46 | import tensorflow as tf 47 | self.tf = tf 48 | except ImportError: 49 | raise ImportError("TensorFlow is required for using AutoencoderExtractor.") 50 | 51 | def build_autoencoder(self): 52 | if self.tf is not None: 53 | input_layer = self.tf.keras.layers.Input(shape=(self.input_dim,)) 54 | encoded = tf.keras.layers.Dense(self.encoding_dim, activation='relu')(input_layer) 55 | decoded = tf.keras.layers.Dense(self.input_dim, activation='sigmoid')(encoded) 56 | autoencoder = tf.keras.models.Model(input_layer, decoded) 57 | autoencoder.compile(optimizer='adam', loss='mean_squared_error') 58 | return autoencoder 59 | else: return None 60 | 61 | def fit_transform(self, X): 62 | """ 63 | Fit the autoencoder model and transform the data. 64 | 65 | Parameters: 66 | - X (array-like): Input data. 67 | 68 | Returns: 69 | - X_extracted (array-like): Extracted features. 70 | """ 71 | # >>> THIS NEEDS TO BE GENERATED >>>> 72 | if self.tf is None: 73 | self._import_tensorflow() 74 | self.input_dim = X.shape[1] 75 | self.autoencoder = self.build_autoencoder() 76 | self.autoencoder.fit(X, X, epochs=50, batch_size=32, shuffle=True, verbose=0) 77 | encoder = tf.keras.models.Model(inputs=self.autoencoder.input, outputs=self.autoencoder.layers[1].output) 78 | X_extracted = encoder.predict(X) 79 | return X_extracted 80 | # <<<< <<<< 81 | 82 | def set_encoding_dim(self, encoding_dim): 83 | """ 84 | Set the dimension of the encoded representation. 85 | 86 | Parameters: 87 | - encoding_dim (int): Dimension of the encoded representation. 88 | """ 89 | self.encoding_dim = encoding_dim 90 | self.autoencoder = self.build_autoencoder() 91 | 92 | ... 93 | ``` 94 |
95 | 96 | ## Realcode v3 Scope Generation 97 | Each task of Realcode v3 SG requires the model to generate an arbitrary block of code (a body of a function, a for-loop, an if-statement, etc.), based on the rest of the source code file. Unlike FG task, the docstring (or any other description of the code in natural language) of the code may not be provided. 98 | 99 |
100 | 101 | Example 102 | 103 | 104 | ```python 105 | import click 106 | from geekbot_cli.api_client import APIClient 107 | from geekbot_cli.config_manager import ConfigManager 108 | from geekbot_cli.cli import CLI 109 | import sys 110 | 111 | @click.command() 112 | @click.option('--clear-api-key', is_flag=True, help='Removes the saved API key from keyring') 113 | def main(clear_api_key): 114 | """ 115 | Entry point for the CLI that can now handle `--clear-api-key` to remove the saved API key. 116 | """ 117 | config_manager = ConfigManager() 118 | if clear_api_key: 119 | if click.confirm('Are you sure you want to remove the API key?'): 120 | # >>> THIS NEEDS TO BE GENERATED >>>> 121 | config_manager.delete_api_key() 122 | click.echo("API key has been removed.") 123 | # <<<< <<<< 124 | else: 125 | click.echo("Operation cancelled.") 126 | else: 127 | # Normal CLI operation 128 | try: 129 | api_client = APIClient() 130 | cli = CLI(api_client, config_manager) 131 | cli.start() 132 | except Exception as e: 133 | click.echo(f"Error: {e}") 134 | sys.exit(1) 135 | 136 | if __name__ == '__main__': 137 | main() 138 | ``` 139 |
140 | 141 | ## Evaluation 142 | We use the following evaluation procedure for each task and the generated code snippet: 143 | 1. The generated code snippet is placed in the appropriate position of the source code file. 144 | 2. The entire repository gets copied to ./workdir, including the file from Step 1. 145 | 3. All tests are executed in the copied repository. 146 | 4. If the number of passed tests differs from the prerecorded number of the passed tests in the repository, we consider the generated code incorrect. If the two numbers are equal, the code is correct. 147 | 148 | # Getting started 149 | Every repository in RealCode has dependencies and, as a result, necessitates properly configured environments. We utilize Conda to create individual environments for each repository. 150 | 151 | **1.** Install requirements in your main environment 152 | ```python 153 | pip install -r requirements.txt 154 | pip install flash-attn --no-build-isolation 155 | ``` 156 | 157 | **2.** Download repositories and meta files 158 | ``` 159 | wget https://zenodo.org/records/13378983/files/realcode_v3_repos_upd.tar.gz 160 | tar -xvf ../RealCode_eval/realcode_v3_repos_upd.tar.gz -C data 161 | ``` 162 | Expected file structure: 163 | ``` 164 | data/realcode_v3/realcode_v3_SG.json 165 | data/realcode_v3/realcode_v3_FG.json 166 | data/realcode_v3/*Repository names* 167 | ``` 168 | 169 | **3.** Build environments for each repository in the benchmark (takes about an hour) 170 | ```bash 171 | cd prepare_data 172 | python run.py 173 | cd .. 174 | ``` 175 | 176 | **4.** Check installation **(IMPORTANT!)** 177 | ```bash 178 | pytest tests/test_evaluator.py 179 | ``` 180 | > [!NOTE] 181 | > Number of passed tests in the repositories may vary depending on your system. If this test fails on your system feel free to open an issue. We need your feedback to create a more stable version of the benchmark. 182 | 183 | **5.** Run the evaluation of your model (see config/config.yaml for details). E.g. for [codeparrot-small](https://huggingface.co/codeparrot/codeparrot-small): 184 | ```bash 185 | CUDA_VISIBLE_DEVICES=0 python main.py +model=codeparrot generation_params.max_new_tokens=512 max_context_length=500 186 | ``` 187 | > [!WARNING] 188 | > **Generated code is executed without any isolation in the benchmark! The repositories themselves are checked to be safe, but the generated code may not!** 189 | 190 | # Examples 191 | * Run deepseek-ai/deepseek-coder-1.3b-base with left context only, 1024 tokens in prompt: 192 | ```bash 193 | CUDA_VISIBLE_DEVICES=0 python main.py +model=deepseek size=1.3b max_context_length=1024 194 | ``` 195 | * Run deepseek-ai/deepseek-coder-1.3b-base with **left and right context**, 1024 tokens in prompt: 196 | ```bash 197 | CUDA_VISIBLE_DEVICES=0 python main.py +model=deepseek size=1.3b max_context_length=1024 198 | ``` 199 | * (Recommended mode) Run deepseek-ai/deepseek-coder-1.3b-base with left and right context, 1024 tokens in prompt, **3:1 left to right context ratio**: 200 | ```bash 201 | CUDA_VISIBLE_DEVICES=0 python main.py +model=deepseek size=1.3b max_context_length=1024 left_context_ratio=3 202 | ``` 203 | * You can evalute your awesome model from a local HuggingFace checkpoint: 204 | ```bash 205 | CUDA_VISIBLE_DEVICES=0 python main.py +model=local model_path=*path_to_HF_checkpoint* model_name=*my_awesome_model* max_context_length=1024 206 | ``` 207 | > [!NOTE] 208 | > You may need to edit the config/model/local.yaml with FIM tokens for your model 209 | 210 | * The model is inferenced with device_map='auto' by default. If you instead wish to distribute tasks between several GPUs, you can use accelerate: 211 | ```bash 212 | CUDA_VISIBLE_DEVICES=0,1 accelerate launch --num_processes 2 main.py +model=deepseek size=1.3b max_context_length=1024 213 | ``` 214 | 215 | -------------------------------------------------------------------------------- /config/config.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - task: SG 3 | 4 | hydra: 5 | job: 6 | config: 7 | override_dirname: 8 | exclude_keys: 9 | - model_path 10 | - dataset_root 11 | - dataset_meta_file 12 | - generations_save_path 13 | - working_dir 14 | - metrics_save_path 15 | - use_cached_generations 16 | 17 | do_eval: True 18 | do_generation: True 19 | # random seed 20 | seed: 42 21 | 22 | # dataset paths 23 | dataset_root: '${hydra:runtime.cwd}/data/realcode_v3' 24 | limit: 10_000 25 | 26 | # model related values that must be overriden, see for example config/model/codellama.yaml 27 | model_path: 28 | model_short_name: 29 | size: 30 | lm_prefix_tokens: "" 31 | prefix_tokens: "" 32 | middle_tokens: "" 33 | suffix_tokens: "" 34 | max_context_length: 100000 35 | left_context_ratio: 1 36 | 37 | # 'lm' or 'infill', whether to use right context in generation 38 | generator_mode: lm 39 | # number of samples to generate per task 40 | num_samples: 1 41 | # datatype to use (fp32, fp16 or bf16) 42 | dtype: bf16 43 | # params to be passed to .generate method 44 | generation_params: 45 | do_sample: False 46 | max_new_tokens: 1024 47 | 48 | # NOT USED anymore 49 | # eos_sequences: ["\\sclass\\s", "\\sdef\\s", "^def\\s", "^class\\s", "^if\\s", "@", "^#", "<|endoftext|>"] 50 | # # fix tokenization issue with llamatokenizer, set to 1 if the first generated line is underidented 51 | # tokenizer_fix: 0 52 | 53 | # path where generations are stored 54 | generations_save_path: "${hydra:runtime.cwd}/data/generations/${hydra:job.override_dirname}.json" 55 | # whether to reuse saved generations 56 | use_cached_generations: True 57 | # list of Pass@k (https://arxiv.org/abs/2107.03374) metrics. [1,3] means Pass@1 and Pass@3 will be calculated 58 | pass_k_list: [1] 59 | # evaluation n_jobs 60 | njobs: 8 61 | working_dir: "${hydra:runtime.cwd}/workdir/${hydra:job.override_dirname}" 62 | metrics_save_path: "${hydra:runtime.cwd}/results/${hydra:job.override_dirname}.json" -------------------------------------------------------------------------------- /config/context_parser/import_copy.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | context_parser: 4 | _target_: lm_eval.context_parser.ImportCopyParser 5 | data_root: ${dataset_root} 6 | left_config: ['imports', 'outer', 'inner'] 7 | right_config: ['outer'] -------------------------------------------------------------------------------- /config/context_parser/smart.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | context_parser: 4 | _target_: lm_eval.context_parser.SmartContextParser 5 | left_config: ['imports', 'outer', 'inner'] 6 | right_config: ['outer'] -------------------------------------------------------------------------------- /config/model/codellama.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | 3 | # number of parameters in the model. For codellama it is 7b, 13b or 34b 4 | size: "7b" 5 | # name of the model on HuggingFace 6 | model_path: 'codellama/CodeLlama-${size}-hf' 7 | model_short_name: "codellama-${size}" 8 | # codellama special tokens 9 | lm_prefix_tokens: "" 10 | prefix_tokens: "
"
11 | middle_tokens:  ""
12 | suffix_tokens:  ""
13 | # context truncation length
14 | max_context_length: 15500
15 | eos_sequences: ["\\sclass\\s", "\\sdef\\s", "^def\\s", "^class\\s", "@", ""]
16 | tokenizer_fix: 1
17 | 
18 | model_kwargs:
19 |   use_flash_attention_2: True
20 | 


--------------------------------------------------------------------------------
/config/model/codeparrot.yaml:
--------------------------------------------------------------------------------
 1 | # @package _global_
 2 | 
 3 | # number of parameters in the model. For codellama it is 7b, 13b or 34b
 4 | size: "110m"
 5 | # name of the model on HuggingFace
 6 | model_path: 'codeparrot/codeparrot-small'
 7 | model_short_name: "codeparrot-small"
 8 | # codellama special tokens
 9 | lm_prefix_tokens: ""
10 | prefix_tokens: ""
11 | middle_tokens:  ""
12 | suffix_tokens:  ""
13 | # context truncation length
14 | max_context_length: 512
15 | # model_kwargs:
16 | #   use_flash_attention_2: True
17 | 


--------------------------------------------------------------------------------
/config/model/deepseek.yaml:
--------------------------------------------------------------------------------
 1 | # @package _global_
 2 | 
 3 | size: "1.3b"
 4 | model_path: 'deepseek-ai/deepseek-coder-${size}-base'
 5 | model_short_name: "deepseek-coder-1.3b"
 6 | lm_prefix_tokens: ""
 7 | lm_suffix_tokens: ""
 8 | prefix_tokens: "<|fim▁begin|>"
 9 | middle_tokens: "<|fim▁hole|>"
10 | suffix_tokens: "<|fim▁end|>"
11 | max_context_length: 7500
12 | model_kwargs:
13 |   use_flash_attention_2: True
14 | 


--------------------------------------------------------------------------------
/config/model/local.yaml:
--------------------------------------------------------------------------------
 1 | # @package _global_
 2 | 
 3 | size: ""
 4 | model_path: '${model_base_path}/${model_short_name}'
 5 | model_base_path: ""
 6 | model_short_name: ""
 7 | lm_prefix_tokens: ""
 8 | lm_suffix_tokens: ""
 9 | prefix_tokens: ""
10 | middle_tokens: ""
11 | suffix_tokens: ""
12 | max_context_length: 1024
13 | 


--------------------------------------------------------------------------------
/config/model/mistral.yaml:
--------------------------------------------------------------------------------
 1 | # @package _global_
 2 | 
 3 | size: "7B"
 4 | model_path: 'mistralai/Mistral-${size}-v0.1'
 5 | model_short_name: "Mistral-${size}"
 6 | lm_prefix_tokens: ""
 7 | lm_suffix_tokens: ""
 8 | prefix_tokens: ""
 9 | middle_tokens: ""
10 | suffix_tokens: ""
11 | max_context_length: 7500
12 | model_kwargs:
13 |   use_flash_attention_2: True
14 | 


--------------------------------------------------------------------------------
/config/model/phi1.yaml:
--------------------------------------------------------------------------------
 1 | # @package _global_
 2 | 
 3 | model_path: 'microsoft/phi-1'
 4 | model_short_name: "phi1"
 5 | lm_prefix_tokens: ""
 6 | prefix_tokens: "0"
 7 | middle_tokens: "0"
 8 | suffix_tokens: "0"
 9 | eos_sequences: ["\\sclass\\s", "\\sdef\\s", "^def\\s", "^class\\s", "@", "from", "import", "<|endoftext|>"]
10 | max_context_length: 7500
11 | 


--------------------------------------------------------------------------------
/config/model/qwen25coder.yaml:
--------------------------------------------------------------------------------
 1 | # @package _global_
 2 | 
 3 | size: "1.5B"
 4 | model_path: Qwen/${model_short_name}
 5 | model_short_name: Qwen2.5-Coder-${size}
 6 | lm_prefix_tokens: ""
 7 | lm_suffix_tokens: ""
 8 | prefix_tokens: "<|fim_prefix|>"
 9 | middle_tokens: "<|fim_suffix|>"
10 | suffix_tokens: "<|fim_middle|>"
11 | max_context_length: 7500
12 | model_kwargs: 
13 |   attn_implementation: flash_attention_2
14 | 


--------------------------------------------------------------------------------
/config/model/starcoder.yaml:
--------------------------------------------------------------------------------
 1 | # @package _global_
 2 | 
 3 | size: "1b"
 4 | model_path: 'bigcode/starcoderbase-${size}'
 5 | model_short_name: "starcoderbase-${size}"
 6 | lm_prefix_tokens: ""
 7 | lm_suffix_tokens: "\\n\\n" # weirdly, this works better simple lm
 8 | prefix_tokens: ""
 9 | middle_tokens: ""
10 | suffix_tokens: ""
11 | max_context_length: 7500
12 | 


--------------------------------------------------------------------------------
/config/model/starcoder15b.yaml:
--------------------------------------------------------------------------------
 1 | # @package _global_
 2 | 
 3 | variant: "rbase"
 4 | model_path: 'bigcode/starcode${variant}'
 5 | model_short_name: "starcode${variant}"
 6 | prefix_tokens: ""
 7 | middle_tokens: ""
 8 | suffix_tokens: ""
 9 | max_context_length: 7500
10 | 


--------------------------------------------------------------------------------
/config/task/FG.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | dataset_meta_file: 'realcode_v3_FG.json'


--------------------------------------------------------------------------------
/config/task/SG.yaml:
--------------------------------------------------------------------------------
1 | # @package _global_
2 | dataset_meta_file: 'realcode_v3_SG.json'


--------------------------------------------------------------------------------
/data/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore


--------------------------------------------------------------------------------
/data/generations/FG/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore


--------------------------------------------------------------------------------
/data/generations/SG/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore


--------------------------------------------------------------------------------
/lm_eval/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NLP-Core-Team/RealCode_eval/f70984fb83022eb191ba94bcf55729c2fc64aa80/lm_eval/__init__.py


--------------------------------------------------------------------------------
/lm_eval/context_parser.py:
--------------------------------------------------------------------------------
  1 | from dataclasses import dataclass
  2 | import typing as tp
  3 | from collections import deque, namedtuple
  4 | import re
  5 | import ast
  6 | from pathlib import Path
  7 | 
  8 | from .datatypes import Task
  9 | from transformers import AutoTokenizer
 10 | 
 11 |     
 12 | Import = namedtuple("Import", ["module", "name", "alias"])
 13 | 
 14 | """
 15 | >>> Imports
 16 | import math
 17 | <<< imports
 18 | 
 19 | >>> file scope
 20 | def get_c():
 21 |  	return 1
 22 | 
 23 | <<< file scope
 24 | >>> outer scope
 25 | class Foo:
 26 |  	def __init__(self, a):
 27 | 		self.a = a
 28 | <<< outer scope
 29 | >>> inner scope
 30 |     @staticmethod
 31 | 	def bar():
 32 | 		'''
 33 | 		Turn Foo into bar
 34 | 		'''
 35 | <<< inner scope
 36 | >>> body (unavailable for model)
 37 |         bar = 'B'
 38 |         self.a = bar
 39 |         return self
 40 | <<< body (unavailable for model)
 41 | >>> outer scope
 42 |     def bar2():
 43 |      	self.a = 'C'
 44 |      	return self
 45 | <<< outer scope
 46 | >>> file scope
 47 | class Foo2:
 48 | 	...
 49 | 
 50 | <<< file scope
 51 | """
 52 | 
 53 | @dataclass(frozen=False)
 54 | class ParsedContext:
 55 |     imports = ''
 56 |     file = ''
 57 |     outer = ''
 58 |     inner = ''
 59 |     
 60 |     def __setitem__(self, key, value):
 61 |         setattr(self, key, value)
 62 | 
 63 |     def __getitem__(self, key):
 64 |         return getattr(self, key)
 65 |     
 66 |     def __str__(self):
 67 |         return (
 68 | 
 69 |             '\n----- imports -----\n' +
 70 |             self.imports +
 71 |             '\n----- end imports -----\n' +
 72 |             '\n----- file -----\n' +
 73 |             (
 74 |                 ('\n'.join(self.file.split('\n')[:10]) + '\n...\n' + '\n'.join(self.file.split('\n')[-15:])) if len(self.file.split('\n')) > 20 else self.file
 75 |             ) + 
 76 |             '\n----- end file -----\n' +
 77 |             '\n----- outer -----\n' +
 78 |             (
 79 |                 ('\n'.join(self.outer.split('\n')[:15]) + '\n...\n' + '\n'.join(self.outer.split('\n')[-15:])) if len(self.outer.split('\n')) > 20 else self.outer
 80 |             ) + 
 81 |             '\n----- end outer -----\n' +
 82 |             '\n----- inner -----\n' +
 83 |             '\n'.join(self.inner.split('\n')) + 
 84 |             '\n----- end inner -----\n' 
 85 | 
 86 |         )
 87 | 
 88 | 
 89 | def get_indent(code):
 90 |     line = code.split('\n')[0]
 91 |     return len(line) - len(line.strip())
 92 | 
 93 | 
 94 | def parse_context(context: str, indent: int, side: tp.Literal['left', 'right']) -> ParsedContext:
 95 |     res = ParsedContext()
 96 |     if side == 'left':
 97 |         cur_scope = deque()
 98 |         state = 'inner'
 99 | 
100 |         for line in reversed(context.split('\n')):
101 |             if line.startswith('import') or (line.startswith('from') and ' import ' in line):
102 |                 res['imports'] += line + '\n' 
103 |                 continue
104 | 
105 |             if state == 'inner_wait@':
106 |                 if not line.lstrip().startswith('@'):
107 |                     res['inner'] = "\n".join(cur_scope)
108 |                     cur_scope = deque()
109 |                     if indent > 0: 
110 |                         state = 'outer'
111 |                     else:
112 |                         state = 'file'
113 | 
114 |             cur_scope.appendleft(line)
115 |             if state == 'inner':
116 |                 if line.strip().startswith('def '):
117 |                     state = 'inner_wait@'
118 |             if state == 'outer':
119 |                 if line.startswith('class'):
120 |                     res['outer'] = "\n".join(cur_scope)
121 |                     state = 'file'
122 |                     cur_scope = deque()
123 |         if state == 'inner_wait@':
124 |             state = 'inner'
125 |         res[state] = "\n".join(cur_scope)
126 |     elif side == 'right':
127 |         cur_scope = deque()
128 |         state = 'outer'
129 | 
130 |         for line in context.split('\n'):
131 |             if state == 'outer':
132 |                 if (
133 |                     line.strip()
134 |                     and not line.startswith(' ')
135 |                 ):
136 |                     res['outer'] = "\n".join(cur_scope)
137 |                     state = 'file'
138 |                     cur_scope = deque()
139 |             cur_scope.append(line)
140 |         res[state] = "\n".join(cur_scope)
141 |     return res
142 | 
143 | 
144 | class BaseParser:
145 |     def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:
146 |         """
147 |         main method, that returns tuple (left_context, right_context) for the task
148 |         """
149 |         raise NotImplementedError()
150 | 
151 | 
152 | class TrivialContextParser(BaseParser):
153 |     def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:
154 |         """
155 |         returns left and right context without processing
156 |         """
157 |         return task.left_context, task.right_context
158 | 
159 | 
160 | class SmartContextParser(BaseParser):
161 |     def __init__(self, 
162 |         left_config = ['imports', 'file', 'outer', 'inner'],
163 |         right_config = ['outer', 'file']          
164 |     ):
165 |         self.left_config = left_config
166 |         self.right_config = right_config
167 | 
168 |     def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:
169 |         """
170 |         
171 |         """
172 |         indent = (len(task.gt) - len(task.gt.lstrip()))
173 |         left_context_parsed = parse_context(task.left_context, indent, 'left')
174 |         left_context = "\n".join([left_context_parsed[k] for k in self.left_config])
175 |         right_context_parsed = parse_context(task.right_context, indent, 'right')   
176 |         right_context = "\n".join([right_context_parsed[k] for k in self.right_config])
177 |         return left_context, right_context
178 |     
179 | class ImportResolutionParser(BaseParser):
180 |     def __init__(self,
181 |         data_root: str,
182 |         left_config = ['imports', 'file', 'outer', 'inner'],
183 |         right_config = ['outer', 'file']          
184 |     ):
185 |         """
186 | 
187 |         """
188 |         self.data_root = data_root
189 |         self.left_config = left_config
190 |         self.right_config = right_config
191 | 
192 |     def _desc_func(self, functionNode, lines):
193 |         return " ".join([t.strip() for t in lines[functionNode.lineno-1: functionNode.body[0].lineno - 1]])
194 | 
195 |     def _parse_file(self, filename, func_names):
196 |         ans = []
197 |         with open(filename, 'r', encoding='UTF-8') as f:
198 |             text = f.read()
199 |             lines = text.split('\n')
200 |             node = ast.parse(text)
201 |         if func_names:
202 |             functions = [n for n in node.body if isinstance(n, ast.FunctionDef) and n.name in func_names]
203 |             classes = [n for n in node.body if isinstance(n, ast.ClassDef) and n.name in func_names]
204 |         else:
205 |             functions = [n for n in node.body if isinstance(n, ast.FunctionDef)]
206 |             classes = [n for n in node.body if isinstance(n, ast.ClassDef)]
207 | 
208 |         for function in functions:
209 |             s = self._desc_func(function, lines)
210 |             ans.append('' + s)
211 | 
212 |         for class_ in classes:
213 |             ans.append("class " + class_.name)
214 |             methods = [n for n in class_.body if isinstance(n, ast.FunctionDef)]
215 |             for method in methods:
216 |                 s = self._desc_func(method, lines)
217 |                 ans.append('    ' + s)
218 |         return "\n".join(ans)
219 | 
220 |     def _get_imports(self, code):     
221 |         root = ast.parse(code)
222 | 
223 |         for node in ast.iter_child_nodes(root):
224 |             if isinstance(node, ast.Import):
225 |                 module = [t.name for t in node.names]
226 |                 yield (
227 |                     Import(module, [], []), 
228 |                     " ".join(code.split('\n')[node.lineno-1: node.end_lineno])
229 |                 )
230 |             elif isinstance(node, ast.ImportFrom):  
231 |                 module = node.module.split('.')
232 |                 yield (
233 |                     Import(module, [n.name for n in node.names], [n.name for n in node.names]), 
234 |                     " ".join(code.split('\n')[node.lineno-1: node.end_lineno])
235 |                 )
236 |             else:
237 |                 continue
238 |     
239 |     def _resolve_imports(self, task: Task) -> str:
240 |         repo = (Path(self.data_root) / task.repo).resolve()
241 |         ans = []
242 |         for imp, line in self._get_imports(task.left_context):
243 |             pth = repo / ("/".join(imp.module) + '.py')
244 |             if imp.module and pth.exists():
245 |                 ans.append(line)
246 |                 ans.append(self._parse_file(pth, imp.name))
247 |             else:
248 |                 ans.append(line)
249 |         return '\n'.join(ans)
250 |         
251 |     def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:
252 |         indent = (len(task.gt) - len(task.gt.lstrip()))
253 |         left_context_parsed = parse_context(task.left_context, indent, 'left')
254 |         left_context = "\n".join([
255 |             left_context_parsed[k] if k != 'imports' else self._resolve_imports(task) + '\n'
256 |             for k in self.left_config
257 |         ])
258 |         right_context_parsed = parse_context(task.right_context, indent, 'right')   
259 |         right_context = "\n".join([right_context_parsed[k] for k in self.right_config])
260 |         return left_context, right_context
261 | 
262 | 
263 | class ImportCopyParser(ImportResolutionParser):
264 |     def _parse_file(self, filename, func_names):
265 |         ans = []
266 |         with open(filename, 'r', encoding='UTF-8') as f:
267 |             text = f.read()
268 |             lines = text.split('\n')
269 |             node = ast.parse(text)
270 |         if func_names:
271 |             functions = [n for n in node.body if isinstance(n, ast.FunctionDef) and n.name not in func_names and n.col_offset == 0]
272 |             classes = [n for n in node.body if isinstance(n, ast.ClassDef) and n.name not in func_names and n.col_offset == 0]
273 |             skip_intervals = [(t.lineno-1, t.end_lineno-1) for t in functions + classes]
274 |             skip_intervals.sort()
275 |         else:
276 |             functions = [n for n in node.body if isinstance(n, ast.FunctionDef)]
277 |             classes = [n for n in node.body if isinstance(n, ast.ClassDef)]
278 |             skip_intervals = []
279 |         interval_id = 0
280 |         i = 0
281 |         while i < len(lines):
282 |             if interval_id < len(skip_intervals) and i >= skip_intervals[interval_id][0]:
283 |                 i = skip_intervals[interval_id][1]
284 |                 interval_id += 1
285 |             else:
286 |                 ans.append(lines[i])
287 |                 i += 1
288 |         return "\n".join(ans)
289 |     
290 |     def _resolve_imports(self, task: Task) -> str:
291 |         repo = (Path(self.data_root) / task.repo).resolve()
292 |         ans = []
293 |         for imp, line in self._get_imports(task.left_context):
294 |             module_pth = ("/".join(imp.module) + '.py')
295 |             pth = repo / module_pth
296 |             if imp.module and pth.exists():
297 |                 ans.append('#' + module_pth)
298 |                 ans.append(self._parse_file(pth, imp.name))
299 | 
300 |         cur_module = task.path_from_root.replace('/', '.').replace('.py', '')
301 |         for file in [
302 |             f for f in repo.rglob('*.py') 
303 |             if {"venv_bench", '.ipynb_checkpoints'}.isdisjoint(set([str(p) for p in f.parts]))
304 |         ]:
305 |             file = file.absolute()
306 |             with open(file, 'r', encoding='UTF-8') as f:
307 |                 text = f.read()
308 |             if cur_module in text:
309 |                 ans.append('#' + str(file.relative_to(repo)))
310 |                 ans.append(text)
311 |         ans.append('#' + task.path_from_root)
312 |         for imp, line in self._get_imports(task.left_context):
313 |             ans.append(line)
314 |         return '\n'.join(ans)
315 | 


--------------------------------------------------------------------------------
/lm_eval/datatypes.py:
--------------------------------------------------------------------------------
 1 | from dataclasses import dataclass
 2 | import typing as tp
 3 | 
 4 | @dataclass(frozen=True)
 5 | class Task:
 6 |     repo: str
 7 |     repo_n: int
 8 |     path_from_root: str
 9 |     left_context: str
10 |     right_context: str
11 |     gt: str
12 |     total_tests: int
13 |     doc: str = ''
14 | 
15 | 
16 | 


--------------------------------------------------------------------------------
/lm_eval/evaluator.py:
--------------------------------------------------------------------------------
  1 | import os
  2 | import typing as tp
  3 | import math
  4 | from collections import defaultdict
  5 | import json
  6 | import re
  7 | from statistics import mean
  8 | from dataclasses import asdict
  9 | from multiprocessing import Pool, Manager
 10 | 
 11 | from .utils import evaluate_override, evaluate_override_wrapped
 12 | from .datatypes import Task
 13 | 
 14 | import logging
 15 | logger = logging.getLogger("RealCode")
 16 | 
 17 | os.environ["TOKENIZERS_PARALLELISM"] = "false"
 18 | 
 19 | def get_num_lines_bin(t: Task):
 20 |     lines = t.gt.strip().count('\n') + 1
 21 |     if 1 <= lines <= 2:
 22 |         return '1-2'
 23 |     elif 3 <= lines <= 5:
 24 |         return '3-5'
 25 |     elif 6 <= lines <= 10:
 26 |         return '6-10'
 27 |     elif lines > 10:
 28 |         return '10+'
 29 | 
 30 | 
 31 | METRIC_AGGREGATIONS = {
 32 |     'total': lambda t: 1, 
 33 |     'repo': lambda t: t.repo,
 34 |     'nlines_bin': get_num_lines_bin,
 35 |     # 'detailed': lambda t: t,
 36 | }
 37 | 
 38 | class PassK:
 39 |     def __init__(self, k: int, n: int):
 40 |         self.k = k
 41 |         self.n = n
 42 | 
 43 |     def __call__(self, correct: int):
 44 |         return (1 - (math.comb(self.n - correct, self.k) / math.comb(self.n, self.k)))
 45 |     
 46 |     def name(self):
 47 |         return f"Pass@{self.k}"
 48 | 
 49 | 
 50 | class Evaluator:
 51 |     def __init__(self, 
 52 |         dataset_root: os.PathLike,
 53 |         num_samples: int,
 54 |         pass_k_list: tp.List[int] = [1],
 55 |         njobs: int = 1,
 56 |         working_dir: tp.Optional[os.PathLike] = None,
 57 |         metric_aggregations: tp.Dict[str, tp.Callable[[Task], int]] = METRIC_AGGREGATIONS
 58 |     ):
 59 |         self.metrics = []
 60 |         for pass_k in  pass_k_list:
 61 |             if num_samples < pass_k:
 62 |                 raise ValueError(f"num_samples {num_samples} must be greater than or equal to PassK={pass_k}")
 63 |             self.metrics.append(PassK(pass_k, num_samples))
 64 |         self.dataset_root = dataset_root
 65 |         self.num_samples = num_samples
 66 |         self.njobs = njobs
 67 |         self.working_dir = working_dir
 68 |         self.metric_aggregations = metric_aggregations
 69 |         
 70 |     def evaluate(self, 
 71 |         tasks: tp.List[Task],
 72 |         generations: tp.List[tp.List[str]],
 73 |     ) -> tp.Dict[tp.Literal["aggregated", "detailed"], tp.Any]:
 74 |         logger.info(f"Evaluating {len(tasks)} tasks with {self.num_samples} samples on {self.njobs} CPUs")
 75 |         # Run test evaluation
 76 |         if self.njobs == 1:
 77 |             results = [
 78 |                 [evaluate_override( self.dataset_root, task, gen, os.path.join(self.working_dir) ) for gen in generations[i]]
 79 |                 for i, task in enumerate(tasks)
 80 |             ]
 81 |         else:
 82 |             with Manager() as manager:
 83 |                 cache = manager.dict()
 84 |                 with manager.Pool(processes=self.njobs) as pool:
 85 |                     results = [[None for _2 in range(self.num_samples)] for _ in tasks]
 86 |                     async_result = pool.starmap_async(
 87 |                         evaluate_override_wrapped, [
 88 |                             ( self.dataset_root, task, gen, os.path.join(self.working_dir, f"{j}_{i}"), j, i, cache )
 89 |                                 for j, task in enumerate(tasks) for i, gen in enumerate(generations[j])
 90 |                         ]
 91 |                     )
 92 |                     res = async_result.get()
 93 |                     for task_n, gen_n, result in res:
 94 |                         results[task_n][gen_n] = result
 95 |                         if task_n % 25 == 0 and gen_n == 0:
 96 |                             logger.debug(result['output'])
 97 | 
 98 |         # Calculate metrics per task
 99 |         all_metric_names = ['compilation_error_rate', 'exact_match'] + [t.name() for t in self.metrics]
100 |         metrics = []
101 |         agg_metrics = {level: {metric_name: defaultdict(list) for metric_name in all_metric_names} for level in self.metric_aggregations}
102 |         for task, task_results, task_generations in zip(tasks, results, generations):
103 |             if len(task_results) != self.num_samples:
104 |                 raise ValueError(f"Task {task} has {len(task_results)} samples, expected {self.num_samples}")
105 |             correct = sum([int(t['passed'] == task.total_tests) for t in task_results])
106 |             not_compiles = mean([int(t['passed'] + t['failed'] == 0) for t in task_results])
107 |             exact_match = mean([int(re.sub(r'\W+', '', task.gt) == re.sub(r'\W+', '', gen)) for gen in task_generations])
108 |             task_metrics = {'compilation_error_rate': not_compiles, 'exact_match': exact_match}
109 |             for metric in self.metrics:
110 |                 # If generated exact repository code, Pass@1 is 1
111 |                 if exact_match > 1 - 1e-3:
112 |                     task_metrics[metric.name()] = 1.0
113 |                 else:
114 |                     task_metrics[metric.name()] = metric(correct)
115 |             task_metrics['evaluations'] = [t['output'] for t in task_results]
116 |             metrics.append(task_metrics)
117 |             for level, level_func in self.metric_aggregations.items():
118 |                 for metric in all_metric_names:
119 |                     agg_metrics[level][metric][level_func(task)].append(task_metrics[metric])
120 |    
121 |         for level in self.metric_aggregations:
122 |             for metric_name in all_metric_names:
123 |                 means = {val: mean(agg_metrics[level][metric_name][val]) for val in agg_metrics[level][metric_name]}
124 |                 agg_metrics[level][metric_name] = means
125 | 
126 |         # Save metics
127 |         metrics = agg_metrics | {
128 |             "detailed": [asdict(task) | task_metric for task, task_metric in zip(tasks, metrics)]
129 |         }
130 |         return metrics
131 | 


--------------------------------------------------------------------------------
/lm_eval/generators.py:
--------------------------------------------------------------------------------
  1 | import os
  2 | import typing as tp
  3 | import json
  4 | from pathlib import Path
  5 | from dataclasses import asdict, fields
  6 | import re
  7 | 
  8 | from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList
  9 | import torch
 10 | from tqdm import tqdm
 11 | 
 12 | from .datatypes import Task
 13 | from .context_parser import BaseParser, TrivialContextParser
 14 | import logging
 15 | logger = logging.getLogger("RealCode")
 16 | 
 17 | 
 18 | def get_indent(code):
 19 |     line = [t for t in code.split('\n') if t.strip()][0]
 20 |     return len(line) - len(line.strip())
 21 | 
 22 | 
 23 | class InfillGenerator:
 24 |     def __init__(self,
 25 |         accelerator,
 26 |         model_path: str,
 27 |         num_samples: int,
 28 |         prefix_tokens: tp.Union[str, tp.List[int]] = [],
 29 |         middle_tokens: tp.Union[str, tp.List[int]] = [],
 30 |         suffix_tokens: tp.Union[str, tp.List[int]] = [],
 31 |         max_context_length: int = None,
 32 |         left_context_ratio: int = 1,
 33 |         dtype = torch.bfloat16,
 34 |         model_kwargs: tp.Dict = {},
 35 |         generation_params: tp.Dict[str, tp.Any] = {},
 36 |         context_parser: BaseParser = TrivialContextParser(),
 37 |     ):
 38 |         """
 39 |         Class to generate code in fill-in-the-middle mode
 40 |         params:
 41 |             model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained
 42 |             num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params
 43 |             prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens
 44 |             middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
 45 |             suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
 46 |             max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length) 
 47 |             left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context 
 48 |             dtype=torch.bfloat16 - torch dtype to use for inference
 49 |             eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", ""] - regular expressions that determine end of geneartion
 50 |             model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained
 51 |             generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate
 52 |             context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts
 53 |             add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)
 54 |         """
 55 |         logger.info(f"Loading model from {model_path} with kwargs f{model_kwargs}")
 56 |         self.device = accelerator.device
 57 |         self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 58 |         model = AutoModelForCausalLM.from_pretrained(model_path, 
 59 |             torch_dtype=dtype, trust_remote_code=True, **model_kwargs
 60 |         )
 61 |         self.model = model.to(self.device).eval()
 62 |         logger.info(f"Loaded model from {model_path} with kwargs f{model_kwargs}")
 63 |         logger.info(f"{self.model}")
 64 | 
 65 |         self.num_samples = num_samples
 66 |         
 67 |         self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)
 68 |         self.middle_tokens = self.tokenize_special_tokens(middle_tokens)
 69 |         self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)
 70 | 
 71 |         logger.debug(f"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}")
 72 | 
 73 |         #context truncation parameters
 74 |         self.max_context_length = max_context_length
 75 |         self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)
 76 |         self.right_context_truncate_at = 1 / (left_context_ratio + 1)
 77 | 
 78 |         self.generation_params = generation_params
 79 |         self.generation_params['num_return_sequences'] = self.num_samples
 80 | 
 81 |         self.context_parser = context_parser
 82 | 
 83 |     def tokenize_special_tokens(self, str_or_list:  tp.Union[str, tp.List[int]]) -> torch.Tensor:        
 84 |         if type(str_or_list) == str:
 85 |             return self.tokenizer.encode(str_or_list, return_tensors="pt", add_special_tokens=False) # ['input_ids']
 86 |         else:
 87 |             return torch.as_tensor(str_or_list).unsqueeze(0)
 88 | 
 89 |     def _prepare_tokens(self, task: Task) -> torch.Tensor:
 90 |         left_context_str, right_context_str = self.context_parser.get_left_and_right_context(task)
 91 |         logger.info("Task\n" + "\n".join(left_context_str.split('\n')[-20:]))
 92 |         left_tokens = self.tokenizer.encode(
 93 |             left_context_str, return_tensors="pt", add_special_tokens=False, max_length=self.max_context_length)# ['input_ids']
 94 |         right_tokens = self.tokenizer.encode(
 95 |             right_context_str, return_tensors="pt", add_special_tokens=False) # ['input_ids']
 96 |         if self.max_context_length and left_tokens.shape[1] + right_tokens.shape[1] > self.max_context_length:
 97 |             logger.debug("Truncating context")
 98 |             
 99 |             left_tokens = left_tokens[:, -min(int(self.max_context_length * self.left_context_truncate_at), left_tokens.shape[1]) + 1:]
100 |             right_tokens = right_tokens[:, :min(int(self.max_context_length * self.right_context_truncate_at), right_tokens.shape[1]) - 1]
101 |         tokens = torch.cat([self.prefix_tokens, left_tokens, self.middle_tokens, right_tokens, self.suffix_tokens], dim=-1).type(torch.long)
102 |         return tokens
103 |     
104 |     def _postprocess(self, generation: str, indent: int):
105 |         new_gen = []
106 |         for i, line in enumerate(generation.split('\n')):
107 |             line = line.replace("<|fim_pad|>", "")
108 |             if i == 0:
109 |                 print("/".join(line))
110 |                 print(len(line) - len(line.lstrip()))
111 |             if i == 0 and (len(line) - len(line.lstrip())) % 4 == 3:
112 |                 line = " " + line
113 |             if line.strip() != '' and get_indent(line) < indent:
114 |                 break
115 |             new_gen.append(line)
116 |         return "\n".join(new_gen).rstrip() + '\n\n'
117 | 
118 |     @torch.no_grad()
119 |     def generate(self, tasks: tp.List[Task]) -> tp.List[tp.List[str]]:
120 |         res = []
121 |         for i, task in tqdm(enumerate(tasks), desc='Generating (main process)', total=len(tasks)):
122 |             tokens = self._prepare_tokens(task).to(self.device)
123 |             if i == 0:
124 |                 logger.debug(f"\nTokens: {tokens[:, :5]} ... {tokens[:, -5:]}\n")
125 |             generated_tokens = self.model.generate(tokens, **self.generation_params)
126 |             generations = self.tokenizer.batch_decode(generated_tokens[:, tokens.shape[1]:], skip_special_tokens=True)
127 |             gt_indent = get_indent(task.gt)
128 |             if i % 1 == 0:
129 |                 logger.info(f"Raw Generation for task {i}:\n{generations[0]}")
130 |                 logger.info(f"Generation for task {i}:\n{self._postprocess(generations[0], gt_indent)}")
131 |             res.append([self._postprocess(t, gt_indent) for t in generations])
132 |         return res
133 | 
134 | 
135 | class LMGenerator(InfillGenerator):
136 |     def __init__(self, 
137 |         lm_prefix_tokens: tp.Union[str, tp.List[int]] = [],
138 |         lm_suffix_tokens: tp.Union[str, tp.List[int]] = [],
139 |         **kwargs
140 |     ):
141 |         """
142 |         Class to generate code in causal LM mode, uses only left context
143 |         params:
144 |             lm_prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the context. Can be either str or list of int tokens
145 |             lm_suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the context. Can be either str or list of int tokens
146 |         """
147 |         super().__init__(**kwargs)
148 |         self.lm_prefix_tokens = super().tokenize_special_tokens(lm_prefix_tokens)
149 |         self.lm_suffix_tokens = super().tokenize_special_tokens(lm_suffix_tokens)
150 |         logger.debug(f"lm_prefix_tokens: {self.lm_prefix_tokens}, lm_suffix_tokens: {self.lm_suffix_tokens}")
151 | 
152 |     def _prepare_tokens(self, task: Task) -> torch.Tensor:
153 |         left_context_str, _ = self.context_parser.get_left_and_right_context(task)
154 |         logger.info("\n" + "\n".join(left_context_str.split('\n')[-20:]))
155 |         left_tokens = self.tokenizer.encode(
156 |             left_context_str, return_tensors="pt", add_special_tokens=False) # ['input_ids']
157 |         if self.max_context_length and left_tokens.shape[1] > self.max_context_length:
158 |             left_tokens = left_tokens[:, -self.max_context_length:]
159 |         tokens = torch.cat([self.lm_prefix_tokens, left_tokens, self.lm_suffix_tokens], dim=-1).type(torch.long)
160 |         return tokens
161 | 
162 | 
163 |     
164 | 


--------------------------------------------------------------------------------
/lm_eval/utils.py:
--------------------------------------------------------------------------------
  1 | import os
  2 | from typing import List, Dict, Any, Tuple
  3 | from shutil import copytree
  4 | from pathlib import Path
  5 | import json
  6 | from subprocess import Popen, TimeoutExpired, PIPE, run
  7 | import os
  8 | import re
  9 | import shutil
 10 | 
 11 | from .datatypes import Task
 12 | CONDA_BIN = '/home/user/conda/bin/conda'
 13 | 
 14 | 
 15 | TIMEOUT = 30
 16 | 
 17 | def get_indent(code):
 18 |     line = code.split('\n')[0]
 19 |     return len(line) - len(line.strip())
 20 | 
 21 | def run_wrapper(cmd, cwd):
 22 |     my_env = os.environ.copy()
 23 |     my_env['PATH'] = f"{cwd}:" + my_env['PATH']
 24 |     my_env['PYTHONPATH'] = f"{cwd}"
 25 |     res = run([cmd.replace('\n', ' ')], shell=True, capture_output=True, check=False, env=my_env, timeout=TIMEOUT)
 26 |     return res.stdout.decode("utf-8") + res.stderr.decode("utf-8")
 27 | 
 28 | 
 29 | def run_tests(bin: os.PathLike, repo: os.PathLike) -> Dict[str, int]:
 30 |     """
 31 |     Execute all tests in the given path using pytest from bin
 32 |     """
 33 |     try:
 34 |         cmd = run_wrapper(f"cd {str(repo)} && conda run -p {str(bin)} pytest tests --color=no -p no:cacheprovider", cwd=str(repo))
 35 |     except TimeoutExpired:
 36 |         print('TIMEOUT CAUGHT')
 37 |         return {'passed': 0, 'failed': 0,  'output': 'TIMEOUT'}
 38 |     passed = re.findall(r" \d+ passed", cmd)
 39 |     if passed: 
 40 |         passed = int(passed[0][1:-7])
 41 |     else:
 42 |         passed = 0
 43 |     failed = re.findall(r" \d+ failed", cmd)
 44 |     if failed: 
 45 |         failed = int(failed[0][1:-7])
 46 |     else:
 47 |         failed = 0
 48 |     if cmd.find("short test summary info") != -1:
 49 |         out = '\n'.join(cmd.split('\n')[-50:]) # cmd[cmd.find("short test summary info"):]
 50 |     else:
 51 |         out = '\n'.join(cmd.split('\n')[:])
 52 |     return {'passed': passed, 'failed': failed, 'output': out}
 53 |             
 54 | def evaluate_override(
 55 |         root_path: os.PathLike, task: Task, generation: str, workdir: os.PathLike
 56 | ) -> Dict[str, Any]:
 57 |     root_path  = Path(root_path)
 58 |     workdir = Path(workdir).absolute()
 59 |     if os.path.exists(workdir):
 60 |         try:
 61 |             shutil.rmtree(workdir)
 62 |         except FileNotFoundError as e:
 63 |             print(f"Caught file not found at rmtree {workdir}")
 64 |         workdir.mkdir(parents=True, exist_ok=True)
 65 |         
 66 |     copytree(root_path / task.repo, workdir, dirs_exist_ok=True, # we do not want to copy venv, it is very slow
 67 |         ignore=shutil.ignore_patterns(
 68 |             'venv_bench', '.github', '.git', '.pytest_cache', '*.egg-info', '__pycache__', 'testtemp'
 69 |         )
 70 |     )
 71 |     new_content = task.left_context + generation + task.right_context
 72 |     with open(workdir / task.path_from_root, 'w', encoding='utf-8') as f:
 73 |         f.write(new_content)
 74 | 
 75 |     metrics = run_tests(root_path / task.repo / "venv_bench", workdir)
 76 |     
 77 |     try:
 78 |         shutil.rmtree(workdir)
 79 |     except FileNotFoundError as e:
 80 |         print(f"Caught file not found at rmtree {workdir}")
 81 |     except OSError as e:
 82 |         print(f"OSError {e} while rm {workdir}")
 83 |     return metrics
 84 | 
 85 | def evaluate_override_wrapped(
 86 |     root_path: os.PathLike, task: Task, generation: str, workdir: os.PathLike, task_n: int, gen_n: int, cache: dict
 87 | ) -> Tuple[int, int, Dict[str, Any]]:
 88 |     cache_key = task.left_context + generation + task.right_context
 89 |     if cache_key in cache:
 90 |         return (task_n, gen_n, cache[cache_key])
 91 |     else:
 92 |         res = evaluate_override(root_path, task, generation, workdir)
 93 |         cache[cache_key] = res
 94 |         return (task_n, gen_n, res)
 95 | 
 96 | 
 97 | def load_dataset(root_path: os.PathLike, meta_file: str = 'dataset.json', limit: int = 10_000) -> List[Task]:
 98 |     with open(Path(root_path) / meta_file, 'r') as f:
 99 |         dataset = [Task(**t) for t in json.load(f)][:limit]
100 |     return dataset 


--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
  1 | 
  2 | import hydra
  3 | import torch
  4 | import numpy as np
  5 | import random
  6 | import json
  7 | import os
  8 | 
  9 | from lm_eval.generators import InfillGenerator, LMGenerator
 10 | from lm_eval.evaluator import Evaluator
 11 | from lm_eval.context_parser import TrivialContextParser
 12 | from lm_eval.utils import load_dataset
 13 | 
 14 | from omegaconf import DictConfig, OmegaConf
 15 | from accelerate import Accelerator
 16 | from accelerate.utils import gather_object
 17 | 
 18 | 
 19 | 
 20 | import logging
 21 | logger = logging.getLogger("RealCode")
 22 | logger.setLevel(logging.INFO)
 23 | 
 24 | def seed_all(seed):
 25 |     random.seed(seed)
 26 |     np.random.seed(seed)
 27 |     torch.manual_seed(seed)
 28 |     torch.cuda.manual_seed(seed)
 29 | 
 30 | @hydra.main(config_path="config", config_name="config", version_base="1.3")
 31 | def main(cfg: DictConfig) -> None:
 32 |     seed_all(cfg.seed)
 33 |     print(cfg)
 34 |     accelerator = Accelerator()
 35 |     dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit)
 36 |     logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}")
 37 |     if cfg.do_generation:
 38 |         if 'context_parser' in cfg:
 39 |             parser = hydra.utils.instantiate(cfg.context_parser)
 40 |         else:
 41 |             parser = TrivialContextParser()
 42 | 
 43 |         dtype_map = {'fp16': torch.float16, 'fp32': torch.float, 'bf16': torch.bfloat16}
 44 |         if cfg.generator_mode == 'infill':
 45 |             generator = InfillGenerator(
 46 |                 accelerator=accelerator,
 47 |                 model_path=cfg.model_path,
 48 |                 dtype=dtype_map[cfg.dtype],
 49 |                 num_samples=cfg.num_samples,
 50 |                 prefix_tokens=cfg.prefix_tokens,
 51 |                 middle_tokens=cfg.middle_tokens,
 52 |                 suffix_tokens=cfg.suffix_tokens,
 53 |                 max_context_length=cfg.max_context_length,
 54 |                 generation_params=dict(cfg.generation_params),
 55 |                 model_kwargs=cfg.model_kwargs if 'model_kwargs' in cfg else {},
 56 |                 context_parser=parser,
 57 |                 left_context_ratio=cfg.left_context_ratio,
 58 |             )
 59 |         elif cfg.generator_mode == 'lm':
 60 |             generator = LMGenerator(
 61 |                 accelerator=accelerator,
 62 |                 model_path=cfg.model_path,
 63 |                 dtype=dtype_map[cfg.dtype],
 64 |                 num_samples=cfg.num_samples,
 65 |                 lm_prefix_tokens=cfg.lm_prefix_tokens if 'lm_prefix_tokens' in cfg else [],
 66 |                 lm_suffix_tokens=cfg.lm_suffix_tokens if 'lm_suffix_tokens' in cfg else [],
 67 |                 max_context_length=cfg.max_context_length,
 68 |                 generation_params=dict(cfg.generation_params),
 69 |                 model_kwargs=cfg.model_kwargs if 'model_kwargs' in cfg else {},
 70 |                 context_parser=parser,
 71 |             )
 72 |         else:
 73 |             raise ValueError(f"generator_mode can be either 'lm' or 'infill', found {cfg.generator_mode}")
 74 |         
 75 | 
 76 | 
 77 |         logger.info(f"Starting generation")
 78 |         with accelerator.split_between_processes(dataset) as part:
 79 |             part_generations = generator.generate(part)
 80 |             generations = gather_object(part_generations)
 81 |         if accelerator.is_main_process:
 82 |             with open(cfg.generations_save_path, "w") as f:
 83 |                 json.dump(generations, f)
 84 |         del generator.model
 85 |     else:
 86 |         with open(cfg.generations_save_path, "r") as f:
 87 |             generations = json.load(f)
 88 | 
 89 |     if cfg.do_eval and accelerator.is_main_process:
 90 |         evaluator = Evaluator(
 91 |             dataset_root=cfg.dataset_root,
 92 |             num_samples=cfg.num_samples,
 93 |             pass_k_list=cfg.pass_k_list,
 94 |             njobs=cfg.njobs,
 95 |             working_dir=cfg.working_dir,
 96 |         )
 97 |         logger.info(f"Starting evaluation")
 98 |         metrics = evaluator.evaluate(dataset, generations)
 99 |         logger.info(json.dumps(metrics['total'], indent=4))
100 |         if cfg.metrics_save_path:
101 |             try:
102 |                 with open(cfg.metrics_save_path, "w") as f:
103 |                     json.dump(metrics, f)
104 |             except FileNotFoundError:
105 |                 logger.warn("Found slashes in your cli args, metrics will not be saved")
106 | 
107 | 
108 | if __name__ == "__main__":
109 |     main()
110 | 
111 | 


--------------------------------------------------------------------------------
/prepare_data/run.py:
--------------------------------------------------------------------------------
 1 | import pandas as pd
 2 | from git import Repo
 3 | import os
 4 | import shutil
 5 | import subprocess
 6 | from pathlib import Path
 7 | from tqdm import tqdm
 8 | from joblib import Parallel, delayed
 9 | 
10 | os.environ['GIT_PYTHON_TRACE'] = 'full'
11 | 
12 | 
13 | def run(cmd, check=False):
14 |     return subprocess.run(
15 |         [cmd.replace('\n', ' ')], 
16 |         shell=True, capture_output=True, check=check,
17 |     ).stdout.decode("utf-8")
18 | 
19 | def delete_and_report(path):
20 |     if os.path.exists(path):
21 |         shutil.rmtree(path, ignore_errors=False)
22 |         if os.path.exists(path):
23 |             raise ValueError(f"Unable to delete {path}, please delete it manually")
24 | 
25 | def setup(repo):
26 |     """
27 |     builds conda environments in repo, that will be used to run tests
28 |     """
29 |     base_path = str(repo.resolve().absolute())
30 |     delete_and_report(f"{base_path}/venv_bench")
31 |     delete_and_report(f"{base_path}/build")
32 |     delete_and_report(f"{base_path}/*.egg-info")
33 |     try:
34 |         d = run(f"conda create -p {base_path}/venv_bench --copy -y python=3.11 poetry", check=True)      
35 |     except subprocess.CalledProcessError as e:
36 |         print(repo, 'create')
37 |         print(e.stdout)
38 |         print(e.stderr)
39 |         raise e
40 |     if os.path.exists(f"{base_path}/poetry.lock"):
41 |         run(f"rm {base_path}/reqs_p.txt")
42 |         out = run(f"cd {base_path} && conda run -p {base_path}/venv_bench poetry export -o reqs_p.txt --without-hashes")
43 |         out = run(f"cd {base_path} && conda run -p {base_path}/venv_bench poetry export --with dev -o reqs_p.txt --without-hashes")
44 |         out = run(f"cd {base_path} && conda run -p {base_path}/venv_bench poetry export --with test -o reqs_p.txt --without-hashes")
45 |     
46 |     for req_filename in ["reqs_p.txt", "requirements.txt", "linux_requirements.txt",
47 |         "requirements-ci.txt","requirements_ci.txt", "dev-requirements.txt",
48 |         'requirements_dev.txt', "requirements-dev.txt"]:
49 |         if os.path.exists(f"{base_path}/{req_filename}"):
50 |             out = run(f"conda run -p {base_path}/venv_bench python -m pip install -r {base_path}/{req_filename}", check=True)
51 |     skip_install = False
52 |     try: 
53 |         if not skip_install and (os.path.exists(f"{base_path}/setup.py") or os.path.exists(f"{base_path}/pyproject.toml")):
54 |             out = run(f"conda run -p {base_path}/venv_bench python -m pip install {base_path}", check=True)        
55 |     except subprocess.CalledProcessError as e:
56 |         print('='*40)
57 |         print(repo, 'pip install warn')
58 |         print('='*40)
59 |     for toml_option in ["[test]", "[dev]", "[all]"]: 
60 |         out = run(f"conda run -p {base_path}/venv_bench python -m pip install {base_path}.{toml_option}")
61 |     out = run(f"conda run -p {base_path}/venv_bench pip install pytest")
62 |     if not os.path.exists(f"{repo}/venv_bench/bin/python"):
63 |         raise ValueError(f"{repo}/venv_bench/bin/python not found")
64 |     print(repo, "done")
65 |     return base_path
66 | 
67 | 
68 | def build_envs(source_dir):
69 |     repos_parent = Path(source_dir)
70 |     Parallel(n_jobs=8)(
71 |         delayed(setup)(path)
72 |         for path in tqdm([t for t in repos_parent.iterdir() if os.path.isdir(t)], desc='building_envs')
73 |     )
74 | 
75 | 
76 | 
77 | if __name__ == '__main__':
78 |     dataset_dir = '../data/realcode_v3'
79 |     build_envs(dataset_dir)
80 |     print('Done')
81 | 


--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | hydra-core==1.3.2
2 | hydra-joblib-launcher==1.2.0
3 | pandas
4 | tqdm
5 | pytest
6 | transformers==4.48.0
7 | 


--------------------------------------------------------------------------------
/results/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore


--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NLP-Core-Team/RealCode_eval/f70984fb83022eb191ba94bcf55729c2fc64aa80/tests/__init__.py


--------------------------------------------------------------------------------
/tests/test_evaluator.py:
--------------------------------------------------------------------------------
 1 | import pytest
 2 | from pathlib import Path
 3 | import os
 4 | import json
 5 | import joblib
 6 | 
 7 | import lm_eval.utils
 8 | import lm_eval.evaluator
 9 | 
10 | @pytest.fixture
11 | def dataset_path():
12 |     return str(Path('./data/realcode_v3').resolve())
13 | 
14 | def get_indent(code):
15 |     line = code.split('\n')[0]
16 |     return len(line) - len(line.strip())
17 | 
18 | 
19 | @pytest.mark.parametrize('dataset_file', ['realcode_v3_SG.json', 'realcode_v3_FG.json'])
20 | def test_perfect_preds(dataset_path, dataset_file, workdir='./workdir'):
21 |     print("Testing where Pass@1 should be 1")
22 |     root = Path(dataset_path)
23 |     print(f"Dataset is at ", root, dataset_file)
24 |     NJOBS = 8
25 | 
26 |     dataset = lm_eval.utils.load_dataset(root, dataset_file, limit=10_000)
27 |     empty_ans = [[t.gt] for t in dataset]
28 |     evaluator = lm_eval.evaluator.Evaluator(
29 |         root,
30 |         num_samples=1,
31 |         pass_k_list=[1],
32 |         njobs=NJOBS,
33 |         working_dir=workdir
34 |     )
35 |     metrics = evaluator.evaluate(dataset, empty_ans)
36 |     wrong = []
37 |     for metric in metrics['detailed']:
38 |         if  metric['Pass@1'] < 1 - 1e-3:
39 |             wrong.append(metric)
40 |             print(metric['Pass@1'], metric['repo'], metric['repo_n'], metric['path_from_root'], metric['evaluations'][0])
41 |     with open('test_perfect_preds_fails.json', 'w') as f:
42 |         json.dump(wrong, f)
43 |     for x in wrong:
44 |         print(x['repo'], x['path_from_root'], x['repo_n'])
45 |     assert len(wrong) == 0
46 | 
47 | 
48 | @pytest.mark.parametrize('dataset_file', ['realcode_v3_SG.json', 'realcode_v3_FG.json'])
49 | def test_incorrect_answers(dataset_path, dataset_file):
50 |     print("Testing where Pass@1 should be 0")
51 |     root = Path(dataset_path)
52 |     print(f"Dataset is at ", root, dataset_file)
53 |     NJOBS = 8
54 | 
55 |     dataset = lm_eval.utils.load_dataset(root, dataset_file, limit=10_000)
56 |     empty_ans = [[" "*get_indent(t.gt) + 'pass\n'] for t in dataset]
57 |     evaluator = lm_eval.evaluator.Evaluator(
58 |         root,
59 |         1,
60 |         [1],
61 |         njobs=NJOBS,
62 |         working_dir='./workdir'
63 |     )
64 |     metrics = evaluator.evaluate(dataset, empty_ans)
65 |     wrong = []
66 |     for metric in metrics['detailed']:
67 |         if metric['Pass@1'] > 1e-3:
68 |             wrong.append(metric)
69 |             print(metric['Pass@1'], metric['repo'], metric['repo_n'], metric['path_from_root'], metric['evaluations'][0])
70 |     print('\n' * 10)
71 |     with open('test_incorrest_answers_fails.json', 'w') as f:
72 |         json.dump(wrong, f)
73 |     for x in wrong:
74 |         print(x['repo'], x['path_from_root'], x['repo_n'])
75 |     assert len(wrong) == 0
76 | 
77 | 
78 | 
79 | @pytest.mark.parametrize('dataset_file', ['realcode_v3_SG.json', 'realcode_v3_FG.json'])
80 | def test_perfect_preds_parallel(dataset_path, dataset_file):
81 |     """
82 |     Like test_perfect_preds but with parallel evaluation
83 |     """
84 |     joblib.Parallel(n_jobs=8)(joblib.delayed(test_perfect_preds)(dataset_path, dataset_file, workdir=str(i)) for i in range(2))
85 | 


--------------------------------------------------------------------------------
/workdir/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore


--------------------------------------------------------------------------------