├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ ├── feature-request.md
│ └── usage-question.md
└── stale.yml
├── .gitignore
├── CITATION.cff
├── CONTRIBUTING.md
├── DISCLAIMER
├── LICENSE
├── README.md
├── README_EN.md
├── codeassist
├── __init__.py
├── create_dataset.py
├── gpt2_coder.py
└── wizard_coder.py
├── docs
├── 36-text-rep-examples.md
├── api.png
├── codeassist.png
├── hf_model.png
├── source_code_datasets
│ ├── source_code_dataset.md
│ └── upload_dataset.py
└── wechat.jpeg
├── examples
├── data
│ ├── code_alpaca_20k_50.jsonl
│ └── train_code_5k.txt
├── distilgpt2_demo.py
├── gpt2_demo.py
├── gradio_demo.py
├── inference_demo.py
├── original_gpt2_demo.py
├── prepare_code_data.py
├── server.py
├── training_gpt2_mydata.py
├── training_wizardcoder_mydata.py
├── use_transformers_gpt2.py
└── wizardcoder_demo.py
├── requirements.txt
├── setup.cfg
├── setup.py
└── tests
├── test.txt
├── test_issue.py
└── test_qps.py
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug Report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | ### Describe the bug
11 | Please provide a clear and concise description of what the bug is. If applicable, add screenshots to help explain your problem, especially for visualization related problems.
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | - [ ] I checked to make sure that this is not a duplicate issue
11 |
12 | ### Describe the solution you'd like
13 | A clear and concise description of what you want to happen.
14 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/usage-question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Usage Question
3 | about: Ask a question about usage
4 | title: ''
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 | ### Describe the Question
11 | Please provide a clear and concise description of what the question is.
12 |
13 |
--------------------------------------------------------------------------------
/.github/stale.yml:
--------------------------------------------------------------------------------
1 | # Number of days of inactivity before an issue becomes stale
2 | daysUntilStale: 60
3 | # Number of days of inactivity before a stale issue is closed
4 | daysUntilClose: 7
5 | # Issues with these labels will never be considered stale
6 | exemptLabels:
7 | - pinned
8 | - security
9 | # Label to use when marking an issue as stale
10 | staleLabel: wontfix
11 | # Comment to post when marking an issue as stale. Set to `false` to disable
12 | markComment: >
13 | This issue has been automatically marked as stale because it has not had
14 | recent activity. It will be closed if no further activity occurs. Thank you
15 | for your contributions.(由于长期不活动,机器人自动关闭此问题,如果需要欢迎提问)
16 | # Comment to post when closing a stale issue. Set to `false` to disable
17 | closeComment: false
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 | .idea
106 | .github
107 | *.zip
108 | download/
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - family-names: "Xu"
5 | given-names: "Ming"
6 | title: "code-autocomplete: Code AutoComplete with GPT2 model"
7 | url: "https://github.com/shibing624/code-autocomplete"
8 | data-released: 2022-03-01
9 | version: 0.0.4
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | We are happy to accept your contributions to make `codeassist` better and more awesome! To avoid unnecessary work on either
4 | side, please stick to the following process:
5 |
6 | 1. Check if there is already [an issue](https://github.com/shibing624/codeassist/issues) for your concern.
7 | 2. If there is not, open a new one to start a discussion. We hate to close finished PRs!
8 | 3. If we decide your concern needs code changes, we would be happy to accept a pull request. Please consider the
9 | commit guidelines below.
--------------------------------------------------------------------------------
/DISCLAIMER:
--------------------------------------------------------------------------------
1 | The software project, data, and models provided by our GitHub project are provided "as is," without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and non-infringement.
2 |
3 | In no event shall the project owners or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software project, data, or models, even if advised of the possibility of such damage.
4 |
5 | Users of this software project, data, and models are solely responsible for any consequences of their use. The project owners and contributors shall not be held responsible for any subsequent or potential harm caused by the use of this software project, data, or models.
6 |
7 | By using this software project, data, or models, users accept and agree to this disclaimer. If users do not agree to the terms of this disclaimer, they should not use this software project, data, or models.
8 |
9 | It is important to note that this software project, data, and models are still in the research phase and are provided for experimental purposes only. As such, the project owners and contributors do not guarantee the accuracy, completeness, or usefulness of the software project, data, or models.
10 |
11 | Furthermore, due to the experimental nature of this software project, data, and models, it is possible that they may contain or generate inappropriate responses, errors, or inconsistencies. Users should exercise caution when using this software project, data, or models, and should not rely solely on them for any critical or sensitive tasks.
12 |
13 | The project owners and contributors shall not be held responsible for any damages, losses, or liabilities arising from the use of this software project, data, or models, including but not limited to, any inappropriate responses generated by the software project, data, or models.
14 |
15 | By using this software project, data, or models, users acknowledge and accept the experimental nature of the software project, data, and models, and understand the potential risks and limitations associated with their use. If users do not agree to the terms of this disclaimer, they should not use this software project, data, or models.
16 |
17 | The software project, data, and models provided by our GitHub project are intended for research purposes only. They should not be used for any commercial, business, or legal purposes, and should not be relied upon as a substitute for professional advice or judgment.
18 |
19 | Users of this software project, data, and models are strictly prohibited from using them for any commercial purposes, including but not limited to, selling, licensing, or distributing the software project, data, or models to third parties.
20 |
21 | The project owners and contributors shall not be held responsible for any damages, losses, or liabilities arising from the use of this software project, data, or models for any commercial or business purposes.
22 |
23 | By using this software project, data, or models, users agree to use them for research purposes only, and not for any commercial or business purposes. If users do not agree to the terms of this disclaimer, they should not use this software project, data, or models.
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [**🇨🇳中文**](https://github.com/shibing624/codeassist/blob/main/README.md) | [**🌐English**](https://github.com/shibing624/codeassist/blob/main/README_EN.md) | [**📖文档/Docs**](https://github.com/shibing624/codeassist/wiki) | [**🤖模型/Models**](https://huggingface.co/shibing624)
2 |
3 |
8 |
9 | -----------------
10 |
11 | # CodeAssist: Advanced Code Completion Tool
12 | [](https://badge.fury.io/py/CodeAssist)
13 | [](CONTRIBUTING.md)
14 | [](https://github.com/shibing624/CodeAssist/graphs/contributors)
15 | [](LICENSE)
16 | [](requirements.txt)
17 | [](https://github.com/shibing624/CodeAssist/issues)
18 | [](#Contact)
19 |
20 | ## Introduction
21 |
22 | **CodeAssist** is an advanced code completion tool that intelligently provides high-quality code completions for Python, Java, and C++ and so on.
23 |
24 | CodeAssist 是一个高级代码补全工具,高质量为 Python、Java 和 C++ 等编程语言补全代码
25 |
26 |
27 | ## Features
28 |
29 | - GPT based code completion
30 | - Code completion for `Python`, `Java`, `C++`, `javascript` and so on
31 | - Line and block code completion
32 | - Train(Fine-tuning) and predict model with your own data
33 |
34 | ### Release Models
35 |
36 | | Arch | BaseModel | Model | Model Size |
37 | |:-------|:------------------|:------------------------------------------------------------------------------------------------------------------------|:----------:|
38 | | GPT | gpt2 | [shibing624/code-autocomplete-gpt2-base](https://huggingface.co/shibing624/code-autocomplete-gpt2-base) | 487MB |
39 | | GPT | distilgpt2 | [shibing624/code-autocomplete-distilgpt2-python](https://huggingface.co/shibing624/code-autocomplete-distilgpt2-python) | 319MB |
40 | | GPT | bigcode/starcoder | [WizardLM/WizardCoder-15B-V1.0](https://huggingface.co/WizardLM/WizardCoder-15B-V1.0) | 29GB |
41 |
42 |
43 |
44 | ## Install
45 |
46 | ```shell
47 | pip install torch # conda install pytorch
48 | pip install -U codeassist
49 | ```
50 |
51 | or
52 |
53 | ```shell
54 | git clone https://github.com/shibing624/codeassist.git
55 | cd CodeAssist
56 | python setup.py install
57 | ```
58 |
59 | ## Usage
60 |
61 | ### WizardCoder model
62 |
63 | WizardCoder-15b is fine-tuned `bigcode/starcoder` with alpaca code data, you can use the following code to generate code:
64 |
65 | example: [examples/wizardcoder_demo.py](https://github.com/shibing624/CodeAssist/blob/main/examples/wizardcoder_demo.py)
66 |
67 | ```python
68 | import sys
69 |
70 | sys.path.append('..')
71 | from codeassist import WizardCoder
72 |
73 | m = WizardCoder("WizardLM/WizardCoder-15B-V1.0")
74 | print(m.generate('def load_csv_file(file_path):')[0])
75 | ```
76 |
77 | output:
78 |
79 |
80 | ```python
81 | import csv
82 |
83 | def load_csv_file(file_path):
84 | """
85 | Load data from a CSV file and return a list of dictionaries.
86 | """
87 | # Open the file in read mode
88 | with open(file_path, 'r') as file:
89 | # Create a CSV reader object
90 | csv_reader = csv.DictReader(file)
91 | # Initialize an empty list to store the data
92 | data = []
93 | # Iterate over each row of data
94 | for row in csv_reader:
95 | # Append the row of data to the list
96 | data.append(row)
97 | # Return the list of data
98 | return data
99 | ```
100 |
101 | model output is impressively effective, it currently supports English and Chinese input, you can enter instructions or code prefixes as required.
102 |
103 | ### distilgpt2 model
104 |
105 |
106 | distilgpt2 fine-tuned code autocomplete model, you can use the following code:
107 |
108 | example: [examples/distilgpt2_demo.py](https://github.com/shibing624/CodeAssist/blob/main/examples/distilgpt2_demo.py)
109 |
110 | ```python
111 | import sys
112 |
113 | sys.path.append('..')
114 | from codeassist import GPT2Coder
115 |
116 | m = GPT2Coder("shibing624/code-autocomplete-distilgpt2-python")
117 | print(m.generate('import torch.nn as')[0])
118 | ```
119 |
120 | output:
121 |
122 | ```shell
123 | import torch.nn as nn
124 | import torch.nn.functional as F
125 | ```
126 |
127 | ### Use with huggingface/transformers:
128 |
129 | example: [examples/use_transformers_gpt2.py](https://github.com/shibing624/CodeAssist/blob/main/examples/use_transformers_gpt2.py)
130 |
131 | ### Train Model
132 | #### Train WizardCoder model
133 | example: [examples/training_wizardcoder_mydata.py](https://github.com/shibing624/CodeAssist/blob/main/examples/training_wizardcoder_mydata.py)
134 |
135 | ```shell
136 | cd examples
137 | CUDA_VISIBLE_DEVICES=0,1 python training_wizardcoder_mydata.py --do_train --do_predict --num_epochs 1 --output_dir outputs-wizard --model_name WizardLM/WizardCoder-15B-V1.0
138 | ```
139 |
140 | - GPU memory: 31GB
141 | - finetune need 2*V100(32GB)
142 | - inference need 1*V100(32GB)
143 |
144 | #### Train distilgpt2 model
145 | example: [examples/training_gpt2_mydata.py](https://github.com/shibing624/CodeAssist/blob/main/examples/training_gpt2_mydata.py)
146 |
147 | ```shell
148 | cd examples
149 | python training_gpt2_mydata.py --do_train --do_predict --num_epochs 15 --output_dir outputs-gpt2 --model_name gpt2
150 | ```
151 |
152 | PS: fine-tuned result model is GPT2-python: [shibing624/code-autocomplete-gpt2-base](https://huggingface.co/shibing624/code-autocomplete-gpt2-base),
153 | I spent about 24 hours with V100 to fine-tune it.
154 |
155 |
156 | ### Server
157 |
158 | start FastAPI server:
159 |
160 | example: [examples/server.py](https://github.com/shibing624/CodeAssist/blob/main/examples/server.py)
161 |
162 | ```shell
163 | cd examples
164 | python server.py
165 | ```
166 |
167 | open url: http://0.0.0.0:8001/docs
168 |
169 | 
170 |
171 |
172 |
173 | ## Dataset
174 |
175 | This allows to customize dataset building. Below is an example of the building process.
176 |
177 | Let's use Python codes from [Awesome-pytorch-list](https://github.com/bharathgs/Awesome-pytorch-list)
178 |
179 | 1. We want the model to help auto-complete codes at a general level. The codes of The Algorithms suits the need.
180 | 2. This code from this project is well written (high-quality codes).
181 |
182 | dataset tree:
183 |
184 | ```shell
185 | examples/download/python
186 | ├── train.txt
187 | └── valid.txt
188 | └── test.txt
189 | ```
190 |
191 | There are three ways to build dataset:
192 | 1. Use the huggingface/datasets library load the dataset
193 | huggingface datasets [https://huggingface.co/datasets/shibing624/source_code](https://huggingface.co/datasets/shibing624/source_code)
194 |
195 | ```python
196 | from datasets import load_dataset
197 | dataset = load_dataset("shibing624/source_code", "python") # python or java or cpp
198 | print(dataset)
199 | print(dataset['test'][0:10])
200 | ```
201 |
202 | output:
203 | ```shell
204 | DatasetDict({
205 | train: Dataset({
206 | features: ['text'],
207 | num_rows: 5215412
208 | })
209 | validation: Dataset({
210 | features: ['text'],
211 | num_rows: 10000
212 | })
213 | test: Dataset({
214 | features: ['text'],
215 | num_rows: 10000
216 | })
217 | })
218 | {'text': [
219 | " {'max_epochs': [1, 2]},\n",
220 | ' refit=False,\n', ' cv=3,\n',
221 | " scoring='roc_auc',\n", ' )\n',
222 | ' search.fit(*data)\n',
223 | '',
224 | ' def test_module_output_not_1d(self, net_cls, data):\n',
225 | ' from skorch.toy import make_classifier\n',
226 | ' module = make_classifier(\n'
227 | ]}
228 | ```
229 |
230 | 2. Download dataset from Cloud
231 |
232 | | Name | Source | Download | Size |
233 | | :------- | :--------- | :---------: | :---------: |
234 | | Python+Java+CPP source code | Awesome-pytorch-list(5.22 Million lines) | [github_source_code.zip](https://github.com/shibing624/codeassist/releases/download/0.0.4/source_code.zip) | 105M |
235 |
236 | download dataset and unzip it, put to `examples/`.
237 |
238 | 3. Get source code from scratch and build dataset
239 |
240 | [prepare_code_data.py](https://github.com/shibing624/CodeAssist/blob/main/examples/prepare_code_data.py)
241 |
242 | ```shell
243 | cd examples
244 | python prepare_code_data.py --num_repos 260
245 | ```
246 |
247 |
248 | ## Contact
249 |
250 | - Issue(建议)
251 | :[](https://github.com/shibing624/CodeAssist/issues)
252 | - 邮件我:xuming: xuming624@qq.com
253 | - 微信我: 加我*微信号:xuming624, 备注:个人名称-公司-NLP* 进NLP交流群。
254 |
255 |
256 |
257 | ## Citation
258 |
259 | 如果你在研究中使用了codeassist,请按如下格式引用:
260 |
261 | APA:
262 | ```latex
263 | Xu, M. codeassist: Code AutoComplete with GPT model (Version 1.0.0) [Computer software]. https://github.com/shibing624/codeassist
264 | ```
265 |
266 | BibTeX:
267 | ```latex
268 | @software{Xu_codeassist,
269 | author = {Ming Xu},
270 | title = {CodeAssist: Code AutoComplete with Generation model},
271 | url = {https://github.com/shibing624/codeassist},
272 | version = {1.0.0}
273 | }
274 | ```
275 |
276 | ## License
277 | This repository is licensed under the [The Apache License 2.0](LICENSE).
278 |
279 | Please follow the [Attribution-NonCommercial 4.0 International](https://github.com/nlpxucan/WizardLM/blob/main/WizardCoder/MODEL_WEIGHTS_LICENSE) to use the WizardCoder model.
280 |
281 |
282 | ## Contribute
283 |
284 | 项目代码还很粗糙,如果大家对代码有所改进,欢迎提交回本项目,在提交之前,注意以下两点:
285 |
286 | - 在`tests`添加相应的单元测试
287 | - 使用`python setup.py test`来运行所有单元测试,确保所有单测都是通过的
288 |
289 | 之后即可提交PR。
290 |
291 | ## Reference
292 |
293 | - [gpt-2-simple](https://github.com/minimaxir/gpt-2-simple)
294 | - [galois-autocompleter](https://github.com/galois-autocompleter/galois-autocompleter)
295 | - [WizardLM/WizardCoder-15B-V1.0](https://huggingface.co/WizardLM/WizardCoder-15B-V1.0)
296 |
--------------------------------------------------------------------------------
/README_EN.md:
--------------------------------------------------------------------------------
1 | [**🇨🇳中文**](https://github.com/shibing624/CodeAssist/blob/main/README.md) | [**🌐English**](https://github.com/shibing624/CodeAssist/blob/main/README_EN.md) | [**📖文档/Docs**](https://github.com/shibing624/CodeAssist/wiki) | [**🤖模型/Models**](https://huggingface.co/shibing624)
2 |
3 |
8 |
9 | -----------------
10 |
11 | # CodeAssist: Advanced Code Completion Tool
12 | [](https://badge.fury.io/py/CodeAssist)
13 | [](CONTRIBUTING.md)
14 | [](https://github.com/shibing624/CodeAssist/graphs/contributors)
15 | [](LICENSE)
16 | [](requirements.txt)
17 | [](https://github.com/shibing624/CodeAssist/issues)
18 | [](#Contact)
19 |
20 | ## Introduction
21 |
22 | **CodeAssist** is an advanced code completion tool that intelligently provides high-quality code completions for Python, Java, and C++ and so on.
23 |
24 | CodeAssist 是一个高级代码补全工具,高质量为 Python、Java 和 C++ 等编程语言补全代码
25 |
26 |
27 | **Guide**
28 |
29 | - [Feature](#Feature)
30 | - [Install](#install)
31 | - [Usage](#usage)
32 | - [Contact](#Contact)
33 | - [Citation](#Citation)
34 | - [Reference](#reference)
35 |
36 | ## Feature
37 |
38 | - GPT based code completion
39 | - Code completion for `Python`, `Java`, `C++`, `javascript` and so on
40 | - Line and block code completion
41 | - Train(Fine-tuning) and predict model with your own data
42 |
43 | ### Release Models
44 |
45 | | Arch | BaseModel | Model | Model Size |
46 | |:-------|:------------------|:------------------------------------------------------------------------------------------------------------------------|:----------:|
47 | | GPT | gpt2 | [shibing624/code-autocomplete-gpt2-base](https://huggingface.co/shibing624/code-autocomplete-gpt2-base) | 487MB |
48 | | GPT | distilgpt2 | [shibing624/code-autocomplete-distilgpt2-python](https://huggingface.co/shibing624/code-autocomplete-distilgpt2-python) | 319MB |
49 | | GPT | bigcode/starcoder | [WizardLM/WizardCoder-15B-V1.0](https://huggingface.co/WizardLM/WizardCoder-15B-V1.0) | 29GB |
50 |
51 |
52 | ### Demo
53 |
54 | HuggingFace Demo: https://huggingface.co/spaces/shibing624/code-autocomplete
55 |
56 | backend model: `shibing624/code-autocomplete-gpt2-base`
57 |
58 | ## Install
59 |
60 | ```
61 | pip install torch # conda install pytorch
62 | pip install -U codeassist
63 | ```
64 |
65 | or
66 |
67 | ```
68 | git clone https://github.com/shibing624/codeassist.git
69 | cd CodeAssist
70 | python setup.py install
71 | ```
72 |
73 | ## Usage
74 |
75 | ### WizardCoder model
76 |
77 | WizardCoder-15b is fine-tuned `bigcode/starcoder` with alpaca code data, you can use the following code to generate code:
78 |
79 | example: [examples/wizardcoder_demo.py](https://github.com/shibing624/CodeAssist/blob/main/examples/wizardcoder_demo.py)
80 |
81 | ```python
82 | import sys
83 |
84 | sys.path.append('..')
85 | from codeassist import WizardCoder
86 |
87 | m = WizardCoder("WizardLM/WizardCoder-15B-V1.0")
88 | print(m.generate('def load_csv_file(file_path):')[0])
89 | ```
90 |
91 | output:
92 |
93 |
94 | ```python
95 | import csv
96 |
97 | def load_csv_file(file_path):
98 | """
99 | Load data from a CSV file and return a list of dictionaries.
100 | """
101 | # Open the file in read mode
102 | with open(file_path, 'r') as file:
103 | # Create a CSV reader object
104 | csv_reader = csv.DictReader(file)
105 | # Initialize an empty list to store the data
106 | data = []
107 | # Iterate over each row of data
108 | for row in csv_reader:
109 | # Append the row of data to the list
110 | data.append(row)
111 | # Return the list of data
112 | return data
113 | ```
114 |
115 | model output is impressively effective, it currently supports English input only, and you can enter instructions or code prefixes as required.
116 |
117 | ### distilgpt2 model
118 |
119 |
120 | distilgpt2 fine-tuned code autocomplete model, you can use the following code:
121 |
122 | example: [examples/distilgpt2_demo.py](https://github.com/shibing624/CodeAssist/blob/main/examples/distilgpt2_demo.py)
123 |
124 | ```python
125 | import sys
126 |
127 | sys.path.append('..')
128 | from codeassist import GPT2Coder
129 |
130 | m = GPT2Coder("shibing624/code-autocomplete-distilgpt2-python")
131 | print(m.generate('import torch.nn as')[0])
132 | ```
133 |
134 | output:
135 |
136 | ```shell
137 | import torch.nn as nn
138 | import torch.nn.functional as F
139 | ```
140 |
141 | ### Use with huggingface/transformers:
142 |
143 | example: [examples/use_transformers_gpt2.py](https://github.com/shibing624/CodeAssist/blob/main/examples/use_transformers_gpt2.py)
144 |
145 | ### Train Model
146 | #### Train WizardCoder model
147 | example: [examples/training_wizardcoder_mydata.py](https://github.com/shibing624/CodeAssist/blob/main/examples/training_wizardcoder_mydata.py)
148 |
149 | ```shell
150 | cd examples
151 | python training_wizardcoder_mydata.py --do_train --do_predict --num_epochs 1 --output_dir outputs-wizard --model_name WizardLM/WizardCoder-15B-V1.0
152 | ```
153 |
154 | #### Train distilgpt2 model
155 | example: [examples/training_gpt2_mydata.py](https://github.com/shibing624/CodeAssist/blob/main/examples/training_gpt2_mydata.py)
156 |
157 | ```shell
158 | cd examples
159 | python training_gpt2_mydata.py --do_train --do_predict --num_epochs 15 --output_dir outputs-gpt2 --model_name gpt2
160 | ```
161 |
162 | PS: fine-tuned result model is GPT2-python: [shibing624/code-autocomplete-gpt2-base](https://huggingface.co/shibing624/code-autocomplete-gpt2-base),
163 | I spent about 24 hours with V100 to fine-tune it.
164 |
165 |
166 | ### Server
167 |
168 | start FastAPI server:
169 |
170 | example: [examples/server.py](https://github.com/shibing624/CodeAssist/blob/main/examples/server.py)
171 |
172 | ```shell
173 | cd examples
174 | python server.py
175 | ```
176 |
177 | open url: http://0.0.0.0:8001/docs
178 |
179 | 
180 |
181 |
182 |
183 | ## Dataset
184 |
185 | This allows to customize dataset building. Below is an example of the building process.
186 |
187 | Let's use Python codes from [Awesome-pytorch-list](https://github.com/bharathgs/Awesome-pytorch-list)
188 |
189 | 1. We want the model to help auto-complete codes at a general level. The codes of The Algorithms suits the need.
190 | 2. This code from this project is well written (high-quality codes).
191 |
192 | dataset tree:
193 |
194 | ```shell
195 | examples/download/python
196 | ├── train.txt
197 | └── valid.txt
198 | └── test.txt
199 | ```
200 |
201 | There are three ways to build dataset:
202 | 1. Use the huggingface/datasets library load the dataset
203 | huggingface datasets [https://huggingface.co/datasets/shibing624/source_code](https://huggingface.co/datasets/shibing624/source_code)
204 |
205 | ```python
206 | from datasets import load_dataset
207 | dataset = load_dataset("shibing624/source_code", "python") # python or java or cpp
208 | print(dataset)
209 | print(dataset['test'][0:10])
210 | ```
211 |
212 | output:
213 | ```shell
214 | DatasetDict({
215 | train: Dataset({
216 | features: ['text'],
217 | num_rows: 5215412
218 | })
219 | validation: Dataset({
220 | features: ['text'],
221 | num_rows: 10000
222 | })
223 | test: Dataset({
224 | features: ['text'],
225 | num_rows: 10000
226 | })
227 | })
228 | {'text': [
229 | " {'max_epochs': [1, 2]},\n",
230 | ' refit=False,\n', ' cv=3,\n',
231 | " scoring='roc_auc',\n", ' )\n',
232 | ' search.fit(*data)\n',
233 | '',
234 | ' def test_module_output_not_1d(self, net_cls, data):\n',
235 | ' from skorch.toy import make_classifier\n',
236 | ' module = make_classifier(\n'
237 | ]}
238 | ```
239 |
240 | 2. Download dataset from Cloud
241 |
242 | | Name | Source | Download | Size |
243 | | :------- | :--------- | :---------: | :---------: |
244 | | Python+Java+CPP source code | Awesome-pytorch-list(5.22 Million lines) | [github_source_code.zip](https://github.com/shibing624/CodeAssist/releases/download/0.0.4/source_code.zip) | 105M |
245 |
246 | download dataset and unzip it, put to `examples/`.
247 |
248 | 3. Get source code from scratch and build dataset
249 |
250 | [prepare_code_data.py](https://github.com/shibing624/CodeAssist/blob/main/examples/prepare_code_data.py)
251 |
252 | ```shell
253 | cd examples
254 | python prepare_code_data.py --num_repos 260
255 | ```
256 |
257 |
258 | ## Contact
259 |
260 | - Issue(建议)
261 | :[](https://github.com/shibing624/CodeAssist/issues)
262 | - 邮件我:xuming: xuming624@qq.com
263 | - 微信我: 加我*微信号:xuming624, 备注:个人名称-公司-NLP* 进NLP交流群。
264 |
265 |
266 |
267 | ## Citation
268 |
269 | 如果你在研究中使用了CodeAssist,请按如下格式引用:
270 |
271 | APA:
272 | ```latex
273 | Xu, M. CodeAssist: Code AutoComplete with GPT model (Version 1.0.0) [Computer software]. https://github.com/shibing624/CodeAssist
274 | ```
275 |
276 | BibTeX:
277 | ```latex
278 | @software{Xu_CodeAssist,
279 | author = {Ming Xu},
280 | title = {CodeAssist: Code AutoComplete with Generation model},
281 | url = {https://github.com/shibing624/CodeAssist},
282 | version = {1.0.0}
283 | }
284 | ```
285 |
286 | ## License
287 |
288 | 授权协议为 [The Apache License 2.0](/LICENSE),可免费用做商业用途。请在产品说明中附加CodeAssist的链接和授权协议。
289 |
290 | ## Contribute
291 |
292 | 项目代码还很粗糙,如果大家对代码有所改进,欢迎提交回本项目,在提交之前,注意以下两点:
293 |
294 | - 在`tests`添加相应的单元测试
295 | - 使用`python setup.py test`来运行所有单元测试,确保所有单测都是通过的
296 |
297 | 之后即可提交PR。
298 |
299 | ## Reference
300 |
301 | - [gpt-2-simple](https://github.com/minimaxir/gpt-2-simple)
302 | - [galois-autocompleter](https://github.com/galois-autocompleter/galois-autocompleter)
303 | - [WizardLM/WizardCoder-15B-V1.0](https://huggingface.co/WizardLM/WizardCoder-15B-V1.0)
304 |
--------------------------------------------------------------------------------
/codeassist/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 |
7 | __version__ = "1.0.0"
8 |
9 | from codeassist.gpt2_coder import GPT2Coder
10 | from codeassist.wizard_coder import WizardCoder
11 |
--------------------------------------------------------------------------------
/codeassist/create_dataset.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:https://github.com/labmlai
4 | @description: Parse all files and write to a single file
5 |
6 | part of code refer: https://github.com/labmlai/python_autocomplete/blob/master/python_autocomplete/create_dataset.py
7 | """
8 | import glob
9 | import os
10 | import re
11 | import ssl
12 | import string
13 | import urllib.error
14 | import urllib.request
15 | import zipfile
16 | from pathlib import Path
17 | from typing import Optional
18 | from typing import Set
19 |
20 | from loguru import logger
21 |
22 | ssl._create_default_https_context = ssl._create_unverified_context
23 | PRINTABLE = set(string.printable)
24 |
25 |
26 | def read_file(path: str) -> str:
27 | """
28 | Read a file
29 | """
30 | with open(path, 'r', encoding='utf8') as f:
31 | try:
32 | content = f.read()
33 | except UnicodeDecodeError as e:
34 | logger.warning(f"UnicodeDecodeError: {path}, file pass")
35 | content = ""
36 | content = ''.join(filter(lambda x: x in PRINTABLE, content))
37 |
38 | return content
39 |
40 |
41 | def save_file(content, file_path: str):
42 | with open(file_path, 'w', encoding='utf8') as f:
43 | f.write(str(content))
44 |
45 |
46 | def merge_and_save(source_files, path):
47 | with open(path, 'w', encoding='utf8') as f:
48 | for src in source_files:
49 | f.write(read_file(src) + "\n\n")
50 |
51 |
52 | def get_repos_from_readme(readme_content):
53 | link_pattern = re.compile(r"""
54 | \[(?P[^\]]*)\] # title
55 | \((?P[^\)]*)\) # url
56 | """, re.VERBOSE)
57 |
58 | res = link_pattern.findall(readme_content)
59 |
60 | github_repos = []
61 | repo_pattern = re.compile(r'https://github.com/(?P[^/]*)/(?P[^/#]*)$')
62 | for title, url in res:
63 | repos = repo_pattern.findall(url)
64 | for r in repos:
65 | github_repos.append((r[0], r[1]))
66 |
67 | return github_repos
68 |
69 |
70 | def download_repo(save_dir: str, org: str, repo: str, idx: Optional[int]):
71 | zip_file = Path(f'{save_dir}/{org}_{repo}.zip')
72 |
73 | if zip_file.exists():
74 | return zip_file
75 |
76 | if idx is not None:
77 | idx_str = f"{idx:03}: "
78 | else:
79 | idx_str = ""
80 |
81 | try:
82 | zip = urllib.request.urlopen(f'https://github.com/{org}/{repo}/archive/master.zip')
83 | except urllib.error.HTTPError as e:
84 | print(e)
85 | return
86 | content = zip.read()
87 |
88 | size = len(content) // 1024
89 | logger.debug(f"{idx_str} {org}/{repo} {size :,}KB")
90 |
91 | with open(str(zip_file), 'wb') as f:
92 | f.write(content)
93 |
94 | return zip_file
95 |
96 |
97 | def extract_zip(source_dir, file_path: Path):
98 | source = Path(source_dir)
99 | logger.debug(f"Extract {file_path}")
100 | repo_source = source / file_path.stem
101 | if repo_source.exists():
102 | logger.debug(f"Exists: {repo_source}")
103 | return repo_source
104 | try:
105 | with zipfile.ZipFile(file_path, 'r') as repo_zip:
106 | repo_zip.extractall(repo_source)
107 | except zipfile.BadZipfile as e:
108 | print(file_path, e)
109 |
110 | return repo_source
111 |
112 |
113 | def remove_files(path: Path, keep: Set[str]):
114 | """
115 | Remove files
116 | """
117 | for p in path.iterdir():
118 | if p.is_symlink():
119 | p.unlink()
120 | continue
121 | if p.is_dir():
122 | remove_files(p, keep)
123 | else:
124 | if p.suffix not in keep:
125 | p.unlink()
126 |
127 |
128 | def get_source_code_by_language(code_languages=("python", "java", "cpp"),
129 | save_dir='download/',
130 | each_limit_repos=3):
131 | sources = dict()
132 | Path(save_dir).mkdir(parents=True, exist_ok=True)
133 | if isinstance(code_languages, str):
134 | code_languages = [code_languages]
135 | logger.info(f"Get source code by language: {code_languages}")
136 |
137 | def get_source_files_by_readme(readme_content, sub_save_dir, limit_size):
138 | zip_dir = sub_save_dir + "/zip"
139 | src_dir = sub_save_dir + "/src"
140 | Path(zip_dir).mkdir(parents=True, exist_ok=True)
141 | Path(src_dir).mkdir(parents=True, exist_ok=True)
142 | repos = get_repos_from_readme(readme_content)
143 | if limit_size:
144 | repos = repos[:limit_size]
145 | # Download repos
146 | for i, r in enumerate(repos):
147 | zip_file = download_repo(zip_dir, r[0], r[1], i)
148 | if not zip_file:
149 | continue
150 | extracted = extract_zip(src_dir, zip_file)
151 | remove_files(extracted, keep={suffix})
152 | source_files = glob.glob(f"{src_dir}/**/*{suffix}", recursive=True)
153 | logger.info(f"Path: {src_dir}/**/*{suffix}, file size: {len(source_files)}")
154 | return source_files
155 |
156 | if "python" in code_languages:
157 | logger.debug('Get awesome-python')
158 | suffix = '.py'
159 | sub_save_dir = os.path.join(save_dir, 'python')
160 | readme_file = sub_save_dir + '/README.md'
161 | if os.path.exists(readme_file):
162 | readme_content = read_file(readme_file)
163 | else:
164 | readme_content = urllib.request.urlopen(
165 | 'https://raw.githubusercontent.com/bharathgs/Awesome-pytorch-list/master/README.md').read()
166 | readme_content = str(readme_content)
167 | save_file(readme_content, readme_file)
168 | sources['python'] = get_source_files_by_readme(readme_content, sub_save_dir, each_limit_repos)
169 | logger.info(f"Get source code by language: python done")
170 | if "java" in code_languages:
171 | logger.debug('Get awesome-java')
172 | suffix = '.java'
173 | sub_save_dir = os.path.join(save_dir, 'java')
174 | readme_file = sub_save_dir + '/README.md'
175 | if os.path.exists(readme_file):
176 | readme_content = read_file(readme_file)
177 | else:
178 | readme_content = urllib.request.urlopen(
179 | 'https://raw.githubusercontent.com/akullpp/awesome-java/master/README.md').read()
180 | readme_content = str(readme_content)
181 | save_file(readme_content, readme_file)
182 | sources['java'] = get_source_files_by_readme(readme_content, sub_save_dir, each_limit_repos)
183 | logger.info(f"Get source code by language: java done")
184 | if "cpp" in code_languages:
185 | logger.debug('Get awesome-cpp')
186 | suffix = '.cpp'
187 | sub_save_dir = os.path.join(save_dir, 'cpp')
188 | readme_file = sub_save_dir + '/README.md'
189 | if os.path.exists(readme_file):
190 | readme_content = read_file(readme_file)
191 | else:
192 | readme_content = urllib.request.urlopen(
193 | 'https://raw.githubusercontent.com/fffaraz/awesome-cpp/master/README.md').read()
194 | readme_content = str(readme_content)
195 | save_file(readme_content, readme_file)
196 | sources['cpp'] = get_source_files_by_readme(readme_content, sub_save_dir, each_limit_repos)
197 | logger.info(f"Get source code by language: cpp done")
198 |
199 | return sources
200 |
--------------------------------------------------------------------------------
/codeassist/gpt2_coder.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description: Rewrite the original gpt2 model to support the autocomplete
5 | """
6 |
7 | import math
8 | import os
9 | import random
10 | from typing import Dict, List
11 |
12 | import numpy as np
13 | import pandas as pd
14 | import torch
15 | from loguru import logger
16 | from torch.nn.utils.rnn import pad_sequence
17 | from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
18 | from torch.utils.data.distributed import DistributedSampler
19 | from tqdm import tqdm, trange
20 | from transformers import GPT2LMHeadModel, GPT2Tokenizer
21 | from transformers.data.datasets.language_modeling import TextDataset
22 | from transformers.optimization import AdamW, get_linear_schedule_with_warmup
23 |
24 | os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
25 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
26 |
27 |
28 | class GPT2Coder:
29 | def __init__(
30 | self,
31 | model_name_or_path: str = "shibing624/code-autocomplete-gpt2-base",
32 | max_length: int = 128,
33 | do_lower_case: bool = False,
34 | special_words_dict: Dict = None
35 | ):
36 | """
37 | Initializes a GPT2 LanguageModelingModel.
38 |
39 | Args:
40 | model_name_or_path: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).
41 | max_length: The maximum total input sequence length after tokenization.
42 | do_lower_case: Set this flag if you are using an uncased model.
43 | special_words_dict: A dictionary of special words and their token ids.
44 | """
45 | self.model_name_or_path = model_name_or_path
46 | self.do_lower_case = do_lower_case
47 | if max_length > 1024:
48 | logger.warning("GPT only allows a max_length of 1024. Value will be set to 1024")
49 | max_length = 1024
50 | self.max_length = max_length
51 | self.model = GPT2LMHeadModel.from_pretrained(model_name_or_path)
52 | self.model.to(device)
53 | self.tokenizer = GPT2Tokenizer.from_pretrained(model_name_or_path, do_lower_case=do_lower_case)
54 | if special_words_dict is not None:
55 | self.add_special_words(special_words_dict)
56 | self.results = {}
57 |
58 | def set_seed(self, seed):
59 | logger.debug(f"Set seed for random, numpy and torch: {seed}")
60 | random.seed(seed)
61 | np.random.seed(seed)
62 | torch.manual_seed(seed)
63 | if torch.cuda.is_available():
64 | torch.cuda.manual_seed_all(seed)
65 |
66 | def add_special_words(self, special_words_dict):
67 | origin_num_tokens = len(self.tokenizer)
68 | num_added_tokens = self.tokenizer.add_special_tokens(special_words_dict)
69 | if num_added_tokens > 0:
70 | self.model.resize_token_embeddings(new_num_tokens=origin_num_tokens + num_added_tokens)
71 |
72 | def train_model(
73 | self,
74 | train_file: str,
75 | output_dir: str,
76 | eval_file: str = None,
77 | verbose: bool = True,
78 | batch_size: int = 8,
79 | num_epochs: int = 1,
80 | weight_decay: float = 0.01,
81 | seed: int = 42,
82 | warmup_ratio: float = 0.1,
83 | lr: float = 5e-5,
84 | eps: float = 1e-6,
85 | gradient_accumulation_steps: int = 1,
86 | max_grad_norm: float = 1.0,
87 | max_steps: int = -1
88 | ):
89 | """
90 | Trains the model on 'train_file'
91 |
92 | Args:
93 | train_file: Path to text file containing the text to train the language model on.
94 | output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
95 | eval_file (optional): Path to eval file containing the text to evaluate the language model on.
96 | verbose (optional): Print logger or not.
97 | batch_size (optional): Batch size for training.
98 | num_epochs (optional): Number of epochs for training.
99 | weight_decay (optional): Weight decay for optimization.
100 | seed (optional): Seed for initialization.
101 | warmup_ratio (optional): Warmup ratio for learning rate.
102 | lr (optional): Learning rate.
103 | eps (optional): Adam epsilon.
104 | gradient_accumulation_steps (optional): Number of updates steps to accumulate before performing a backward/update pass.
105 | max_grad_norm (optional): Max gradient norm.
106 | max_steps (optional): If > 0: set total number of training steps to perform. Override num_epochs.
107 | Returns:
108 | global_step: Number of global steps trained
109 | training_details: Average training loss if evaluate_during_training is False or full training progress scores if evaluate_during_training is True
110 | """
111 | os.makedirs(output_dir, exist_ok=True)
112 | self.model.to(device)
113 | train_dataset = TextDataset(self.tokenizer, train_file, self.max_length, overwrite_cache=True,
114 | cache_dir=output_dir)
115 |
116 | global_step, training_details = self.train(
117 | train_dataset,
118 | output_dir,
119 | eval_file=eval_file,
120 | verbose=verbose,
121 | batch_size=batch_size,
122 | num_epochs=num_epochs,
123 | weight_decay=weight_decay,
124 | seed=seed,
125 | warmup_ratio=warmup_ratio,
126 | lr=lr,
127 | eps=eps,
128 | gradient_accumulation_steps=gradient_accumulation_steps,
129 | max_grad_norm=max_grad_norm,
130 | max_steps=max_steps
131 | )
132 | logger.info(f" Training model done. Saved to {output_dir}.")
133 |
134 | return global_step, training_details
135 |
136 | def train(
137 | self,
138 | train_dataset: Dataset,
139 | output_dir: str,
140 | eval_file: str = None,
141 | verbose: bool = True,
142 | batch_size: int = 8,
143 | num_epochs: int = 1,
144 | weight_decay: float = 0.01,
145 | seed: int = 42,
146 | warmup_ratio: float = 0.1,
147 | lr: float = 5e-5,
148 | eps: float = 1e-6,
149 | gradient_accumulation_steps: int = 1,
150 | max_grad_norm: float = 1.0,
151 | max_steps: int = -1
152 | ):
153 | """
154 | Trains the model on train_dataset.
155 |
156 | Utility function to be used by the train_model() method. Not intended to be used directly.
157 | """
158 | self.set_seed(seed)
159 |
160 | def collate(examples: List[torch.Tensor]):
161 | if self.tokenizer._pad_token is None:
162 | return pad_sequence(examples, batch_first=True)
163 | return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)
164 |
165 | train_sampler = RandomSampler(train_dataset)
166 | train_dataloader = DataLoader(
167 | train_dataset,
168 | batch_size=batch_size,
169 | sampler=train_sampler,
170 | collate_fn=collate,
171 | )
172 |
173 | total_steps = len(train_dataloader) * num_epochs
174 | param_optimizer = list(self.model.named_parameters())
175 | no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
176 | optimizer_grouped_parameters = [
177 | {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
178 | 'weight_decay': weight_decay},
179 | {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
180 | ]
181 |
182 | warmup_steps = math.ceil(total_steps * warmup_ratio) # by default 10% of train data for warm-up
183 | optimizer = AdamW(optimizer_grouped_parameters, lr=lr, eps=eps, correct_bias=False)
184 | scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
185 | num_training_steps=total_steps)
186 | logger.info("***** Running training *****")
187 | logger.info(f" Num examples = {len(train_dataset)}")
188 | logger.info(f" Batch size = {batch_size}")
189 | logger.info(f" Num steps = {total_steps}")
190 | logger.info(f" Warmup-steps: {warmup_steps}")
191 |
192 | logger.info(" Training started")
193 | global_step = 0
194 | tr_loss, logging_loss = 0.0, 0.0
195 | self.model.zero_grad()
196 | epoch_number = 0
197 | best_eval_metric = 1e3
198 | steps_trained_in_current_epoch = 0
199 | epochs_trained = 0
200 |
201 | if self.model_name_or_path and os.path.exists(self.model_name_or_path):
202 | try:
203 | # set global_step to global_step of last saved checkpoint from model path
204 | checkpoint_suffix = self.model_name_or_path.split("/")[-1].split("-")
205 | if len(checkpoint_suffix) > 2:
206 | checkpoint_suffix = checkpoint_suffix[1]
207 | else:
208 | checkpoint_suffix = checkpoint_suffix[-1]
209 | global_step = int(checkpoint_suffix)
210 | epochs_trained = global_step // (len(train_dataloader) // gradient_accumulation_steps)
211 | steps_trained_in_current_epoch = global_step % (len(train_dataloader) // gradient_accumulation_steps)
212 | logger.info(" Continuing training from checkpoint, will skip to saved global_step")
213 | logger.info(" Continuing training from epoch %d" % epochs_trained)
214 | logger.info(" Continuing training from global step %d" % global_step)
215 | logger.info(" Will skip the first %d steps in the current epoch" % steps_trained_in_current_epoch)
216 | except ValueError:
217 | logger.info(" Starting fine-tuning.")
218 |
219 | training_progress_scores = {
220 | "global_step": [],
221 | "perplexity": [],
222 | "eval_loss": [],
223 | "train_loss": [],
224 | }
225 | train_iterator = trange(int(num_epochs), desc="Epoch", disable=False, mininterval=0)
226 | for current_epoch in train_iterator:
227 | self.model.train()
228 | current_loss = 0
229 | if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
230 | train_dataloader.sampler.set_epoch(current_epoch)
231 | if epochs_trained > 0:
232 | epochs_trained -= 1
233 | continue
234 | train_iterator.set_description(f"Epoch {epoch_number + 1} of {num_epochs}")
235 | batch_iterator = tqdm(train_dataloader,
236 | desc=f"Running Epoch {epoch_number} of {num_epochs}",
237 | disable=False,
238 | mininterval=0)
239 | for step, batch in enumerate(batch_iterator):
240 | if steps_trained_in_current_epoch > 0:
241 | steps_trained_in_current_epoch -= 1
242 | continue
243 |
244 | inputs = batch.to(device)
245 | outputs = self.model(inputs, labels=inputs)
246 | loss = outputs[0]
247 | current_loss = loss.item()
248 | if verbose:
249 | batch_iterator.set_description(
250 | f"Epochs {epoch_number}/{num_epochs}. Running Loss: {current_loss:9.4f}")
251 |
252 | if gradient_accumulation_steps > 1:
253 | loss = loss / gradient_accumulation_steps
254 |
255 | loss.backward()
256 | tr_loss += loss.item()
257 | if (step + 1) % gradient_accumulation_steps == 0:
258 | torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
259 | optimizer.step()
260 | scheduler.step() # Update learning rate schedule
261 | self.model.zero_grad()
262 | global_step += 1
263 | epoch_number += 1
264 | output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
265 | results = self.eval_model(eval_file, output_dir_current, verbose=verbose, batch_size=batch_size)
266 | self.save_model(output_dir_current, model=self.model, results=results)
267 | training_progress_scores["global_step"].append(global_step)
268 | training_progress_scores["train_loss"].append(current_loss)
269 | for key in results:
270 | training_progress_scores[key].append(results[key])
271 | report = pd.DataFrame(training_progress_scores)
272 | report.to_csv(os.path.join(output_dir, "training_progress_scores.csv"), index=False)
273 |
274 | eval_loss = results["eval_loss"]
275 | if eval_loss < best_eval_metric:
276 | best_eval_metric = eval_loss
277 | self.save_model(output_dir, model=self.model, results=results)
278 |
279 | if 0 < max_steps < global_step:
280 | return global_step, training_progress_scores
281 |
282 | return global_step, training_progress_scores
283 |
284 | def eval_model(self, eval_file: str, output_dir: str = None, verbose: bool = True, batch_size: int = 16):
285 | """
286 | Evaluates the model on eval_df. Saves results to args.output_dir
287 | result: Dictionary containing evaluation results.
288 | """
289 | self.model.to(device)
290 | eval_dataset = TextDataset(self.tokenizer, eval_file, self.max_length, overwrite_cache=True)
291 | result = self.evaluate(eval_dataset, output_dir, batch_size=batch_size)
292 | self.results.update(result)
293 |
294 | if verbose:
295 | logger.info(self.results)
296 |
297 | return result
298 |
299 | def evaluate(self, eval_dataset, output_dir: str = None, batch_size: int = 16):
300 | """
301 | Evaluates the model on eval_dataset.
302 |
303 | Utility function to be used by the eval_model() method. Not intended to be used directly.
304 | """
305 | results = {}
306 |
307 | def collate(examples: List[torch.Tensor]):
308 | if self.tokenizer._pad_token is None:
309 | return pad_sequence(examples, batch_first=True)
310 | return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)
311 |
312 | eval_sampler = SequentialSampler(eval_dataset)
313 | eval_dataloader = DataLoader(eval_dataset,
314 | batch_size=batch_size,
315 | sampler=eval_sampler,
316 | collate_fn=collate)
317 | eval_loss = 0.0
318 | nb_eval_steps = 0
319 | self.model.eval()
320 |
321 | for batch in tqdm(eval_dataloader, disable=False, desc="Running Evaluation"):
322 | inputs = batch.to(device)
323 | with torch.no_grad():
324 | outputs = self.model(inputs, labels=inputs)
325 | lm_loss = outputs[0]
326 | eval_loss += lm_loss.item()
327 | nb_eval_steps += 1
328 |
329 | eval_loss = eval_loss / nb_eval_steps
330 | perplexity = torch.exp(torch.tensor(eval_loss))
331 |
332 | results["eval_loss"] = eval_loss
333 | results["perplexity"] = perplexity
334 | if output_dir:
335 | os.makedirs(output_dir, exist_ok=True)
336 | with open(os.path.join(output_dir, "eval_results.txt"), "w") as writer:
337 | for key in sorted(results.keys()):
338 | writer.write("{} = {}\n".format(key, str(results[key])))
339 |
340 | return results
341 |
342 | def save_model(self, output_dir, model, results=None):
343 | """
344 | Saves the model to output_dir.
345 | :param output_dir:
346 | :param model:
347 | :param results:
348 | :return:
349 | """
350 | logger.info("Saving model checkpoint to %s", output_dir)
351 | os.makedirs(output_dir, exist_ok=True)
352 | model_to_save = model.module if hasattr(model, "module") else model
353 | model_to_save.save_pretrained(output_dir)
354 | self.tokenizer.save_pretrained(output_dir)
355 | if results:
356 | output_eval_file = os.path.join(output_dir, "eval_results.txt")
357 | with open(output_eval_file, "w") as writer:
358 | for key in sorted(results.keys()):
359 | writer.write("{} = {}\n".format(key, str(results[key])))
360 |
361 | def generate(
362 | self,
363 | prompt: str,
364 | is_add_prompt: bool = True,
365 | max_length: int = 128,
366 | temperature: int = 1.0,
367 | top_k: int = 50,
368 | top_p: float = 0.95,
369 | repetition_penalty: float = 1.0,
370 | do_sample: bool = True,
371 | num_return_sequences: int = 1,
372 | length_penalty: float = 2.0,
373 | early_stopping: bool = True,
374 | stop_word: str = "\n\n",
375 | bad_words: list = None,
376 | **kwargs,
377 | ):
378 | """
379 | Generate text using a GPT2 LanguageGenerationModel
380 |
381 | Args:
382 | prompt: A prompt text for the model.
383 | is_add_prompt: Whether to add the prompt to the returned text.
384 | max_length: The maximum length of the sequence to be generated.
385 | temperature: The sampling temperature.
386 | top_k: The number of top k tokens to be considered by sampling.
387 | top_p: The sampling probability for top p tokens.
388 | repetition_penalty: The repetition penalty parameter.
389 | do_sample: Boolean value indicating whether to sample or greedy generate.
390 | num_return_sequences: The number of samples to return.
391 | length_penalty: The length penalty parameter.
392 | early_stopping: Boolean value indicating whether to do early stopping or not.
393 | stop_word: A stop word to stop generation.
394 | bad_words: A list of bad words to be ignored.
395 | Returns:
396 | generated_sequences: list, Sequences of text generated by the model.
397 | """
398 | encoded_prompt = self.tokenizer(prompt, return_tensors="pt").to(device)
399 | encoded_prompt_ids = encoded_prompt.input_ids
400 | # Get tokens of words that should not be generated
401 | bad_words_ids = [self.tokenizer(bad_word, add_prefix_space=True).input_ids for bad_word in
402 | bad_words] if bad_words else None
403 | output_sequences = self.model.generate(
404 | input_ids=encoded_prompt_ids,
405 | max_length=max_length if max_length is not None else self.max_length,
406 | temperature=temperature,
407 | top_k=top_k,
408 | top_p=top_p,
409 | repetition_penalty=repetition_penalty,
410 | do_sample=do_sample,
411 | num_return_sequences=num_return_sequences,
412 | length_penalty=length_penalty,
413 | early_stopping=early_stopping,
414 | bad_words_id=bad_words_ids,
415 | bos_token_id=self.tokenizer.bos_token_id,
416 | pad_token_id=self.tokenizer.eos_token_id, # tokenizer.pad_token_ids is None
417 | eos_token_id=self.tokenizer.eos_token_id,
418 | **kwargs
419 | )
420 |
421 | # Remove the batch dimension when returning multiple sequences
422 | if len(output_sequences.shape) > 2:
423 | output_sequences.squeeze_()
424 |
425 | generated_sequences = []
426 | for generated_sequence in output_sequences:
427 | generated_sequence = generated_sequence.tolist()
428 | # Decode text
429 | text = self.tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
430 | # Remove all text after the stop word token
431 | text = text[: text.find(stop_word) if stop_word else None]
432 | # Remove the excess text that was used for pre-processing
433 | total_sequence = text[len(self.tokenizer.decode(encoded_prompt_ids[0], clean_up_tokenization_spaces=True)):]
434 | # Add the prompt at the beginning of the sequence.
435 | if is_add_prompt:
436 | total_sequence = prompt + total_sequence
437 | generated_sequences.append(total_sequence)
438 |
439 | return generated_sequences
440 |
--------------------------------------------------------------------------------
/codeassist/wizard_coder.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description: Rewrite the original WizardCoder to support code completion.
5 | """
6 |
7 | import math
8 | import os
9 | import random
10 | from copy import deepcopy
11 | from dataclasses import dataclass
12 | from typing import List, Optional, Dict, Sequence, Union
13 |
14 | import numpy as np
15 | import torch
16 | from datasets import load_dataset
17 | from loguru import logger
18 | from peft import (
19 | PeftModel,
20 | LoraConfig,
21 | get_peft_model,
22 | prepare_model_for_int8_training,
23 | )
24 | from tqdm import tqdm
25 | from transformers import (
26 | PreTrainedTokenizer,
27 | PreTrainedModel,
28 | AutoModelForCausalLM,
29 | AutoTokenizer,
30 | Trainer,
31 | TrainingArguments,
32 | )
33 |
34 | os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
35 | has_cuda = torch.cuda.is_available()
36 | IGNORE_INDEX = -100
37 | PROMPT_DICT = {
38 | "prompt_input": (
39 | "Below is an instruction that describes a task, paired with an input that provides further context. "
40 | "Write a response that appropriately completes the request.\n\n"
41 | "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"
42 | ),
43 | "prompt_no_input": (
44 | "Below is an instruction that describes a task. "
45 | "Write a response that appropriately completes the request.\n\n"
46 | "### Instruction:\n{instruction}\n\n### Response:"
47 | ),
48 | }
49 |
50 |
51 | class WizardCoder:
52 | def __init__(
53 | self,
54 | model_name_or_path: str = "WizardLM/WizardCoder-15B-V1.0",
55 | peft_name: Optional[str] = None,
56 | special_words_dict: Dict = None,
57 | use_cuda: Optional[bool] = has_cuda,
58 | cuda_device: Optional[int] = -1,
59 | fp16: bool = True,
60 | bf16: bool = False,
61 | **kwargs,
62 | ):
63 | """
64 | Initializes a AutoModelForCausalLM
65 |
66 | Args:
67 | model_name_or_path: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).
68 | peft_name: The name of the PEFT model to use.
69 | special_words_dict: A dictionary of special words and their token ids.
70 | use_cuda: Use GPU if available.
71 | cuda_device: Which cuda device to use.
72 | fp16: Use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit.
73 | bf16: Use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit.
74 | **kwargs: Additional kwargs for the Transformers `PreTrainedModel` and `PreTrainedTokenizer` classes.
75 | """
76 | self.model_name_or_path = model_name_or_path
77 | self.fp16 = fp16
78 | self.bf16 = bf16
79 | self.device_map = "auto"
80 | if use_cuda:
81 | if torch.cuda.is_available():
82 | if cuda_device == -1:
83 | self.device = torch.device("cuda")
84 | else:
85 | self.device = torch.device(f"cuda:{cuda_device}")
86 | self.device_map = {"": int(cuda_device)}
87 | else:
88 | raise ValueError(
89 | "'use_cuda' set to True when cuda is unavailable."
90 | "Make sure CUDA is available or set `use_cuda=False`."
91 | )
92 | else:
93 | if torch.backends.mps.is_available():
94 | self.device = torch.device("mps")
95 | self.device_map = {"": "mps"}
96 | else:
97 | self.device = "cpu"
98 | self.device_map = {"": "cpu"}
99 | logger.debug(f"Device: {self.device}")
100 | if not use_cuda:
101 | self.fp16 = False
102 | self.bf16 = False
103 | world_size = int(os.environ.get("WORLD_SIZE", 1))
104 | self.ddp = world_size != 1
105 | if self.ddp:
106 | self.device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
107 | if torch.cuda.is_bf16_supported() and not self.bf16:
108 | logger.warning("GPU supports bf16, you can enable bf16.")
109 | self.torch_dtype = torch.bfloat16 if self.bf16 else (torch.float16 if self.fp16 else torch.float32)
110 | self.model = AutoModelForCausalLM.from_pretrained(
111 | model_name_or_path,
112 | torch_dtype=self.torch_dtype,
113 | device_map=self.device_map,
114 | **kwargs,
115 | )
116 | if peft_name:
117 | # Load PEFT model for inference default, if you want to continue training, please set is_trainable=True
118 | self.model = PeftModel.from_pretrained(
119 | self.model,
120 | peft_name,
121 | torch_dtype=self.torch_dtype,
122 | device_map=self.device_map,
123 | is_trainable=False,
124 | )
125 | logger.info(f"Loaded peft model from {peft_name}")
126 |
127 | self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
128 | if self.tokenizer.pad_token_id is None:
129 | self.tokenizer.pad_token_id = 0
130 | # Set padding side equal to Collator padding side
131 | self.tokenizer.padding_side = "left"
132 |
133 | if "starcoder" in model_name_or_path:
134 | self.tokenizer.add_special_tokens({
135 | "eos_token": "<|endoftext|>",
136 | "bos_token": "<|endoftext|>",
137 | "unk_token": "<|endoftext|>",
138 | })
139 | if special_words_dict is not None:
140 | self.tokenizer.add_special_tokens(special_words_dict)
141 | self.results = {}
142 |
143 | def set_seed(self, seed):
144 | logger.debug(f"Set seed for random, numpy and torch: {seed}")
145 | random.seed(seed)
146 | np.random.seed(seed)
147 | torch.manual_seed(seed)
148 | if torch.cuda.is_available():
149 | torch.cuda.manual_seed_all(seed)
150 |
151 | def train_model(
152 | self,
153 | train_file: str,
154 | output_dir: str,
155 | eval_file: str = None,
156 | batch_size: int = 8,
157 | num_epochs: int = 1,
158 | lr: float = 5e-5,
159 | gradient_accumulation_steps: int = 1,
160 | max_steps: int = -1,
161 | logging_steps: int = 50,
162 | gradient_checkpointing: bool = True,
163 | torch_compile: bool = False,
164 | warmup_steps: int = 200,
165 | save_steps: int = 400,
166 | eval_steps: int = 200,
167 | optimizer: str = "adamw_torch",
168 | save_strategy: str = "steps",
169 | save_total_limit: int = 10,
170 | report_to: Optional[List[str]] = "tensorboard",
171 | overwrite_output_dir: bool = True,
172 | use_peft: bool = True,
173 | int8: bool = False,
174 | max_eval_samples: int = 20,
175 | **kwargs,
176 | ):
177 | """
178 | Trains the model on 'train_file'
179 |
180 | Args:
181 | train_file: Path to text file containing the text to train the language model on.
182 | output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
183 | eval_file (optional): Path to eval file containing the text to evaluate the language model on.
184 | batch_size (optional): Batch size for training.
185 | num_epochs (optional): Number of epochs for training.
186 | lr (optional): Learning rate.
187 | gradient_accumulation_steps (optional): Number of updates steps to accumulate before performing a backward/update pass.
188 | max_steps (optional): If > 0: set total number of training steps to perform. Override num_epochs.
189 | logging_steps (optional): Number of steps between logging.
190 | gradient_checkpointing (optional): If True, use gradient checkpointing to save memory at the expense of slower backward pass.
191 | torch_compile (optional): If True, use torch's new experimental just-in-time (JIT) compiler to compile the model for faster runtime.
192 | warmup_steps (optional): Number of steps for the warmup in the lr scheduler.
193 | save_steps (optional): Number of steps between saving.
194 | eval_steps (optional): Number of steps between evaluations.
195 | optimizer (optional): Optimizer to use. Can be 'adamw_torch', 'adamw_deepspeed', 'adam', 'sgd', 'lamb' or 'lamb_wd'.
196 | save_strategy (optional): Strategy to save checkpoints. Can be 'steps' or 'epoch'.
197 | save_total_limit (optional): Maximum number of checkpoints to keep.
198 | report_to (optional): The list of integrations to report the results and logs to.
199 | overwrite_output_dir (optional): Overwrite the content of the output directory.
200 | use_peft (optional): If True, use the PEFT scheduler to schedule the training.
201 | int8 (optional): If True, use int8 quantization for the model.
202 | max_eval_samples (optional): Maximum number of samples to use for evaluation.
203 | kwargs (optional): Optional model specific arguments.
204 |
205 | Returns:
206 | global_step: Number of global steps trained
207 | metrics: Dictionary containing the evaluation results.
208 | """
209 | os.makedirs(output_dir, exist_ok=True)
210 | self.set_seed(42)
211 | logger.debug(f"Tokenizer: {self.tokenizer}")
212 | logger.debug(f"Model: {self.model}")
213 |
214 | training_args = TrainingArguments(
215 | output_dir=output_dir,
216 | dataloader_drop_last=True,
217 | learning_rate=lr,
218 | num_train_epochs=num_epochs,
219 | max_steps=max_steps,
220 | logging_dir=f"{output_dir}/logs",
221 | logging_steps=logging_steps,
222 | per_device_train_batch_size=batch_size,
223 | per_device_eval_batch_size=batch_size,
224 | gradient_checkpointing=gradient_checkpointing,
225 | torch_compile=torch_compile,
226 | gradient_accumulation_steps=gradient_accumulation_steps,
227 | warmup_steps=warmup_steps,
228 | save_steps=save_steps,
229 | optim=optimizer,
230 | save_strategy=save_strategy,
231 | evaluation_strategy='steps' if eval_file is not None else 'no',
232 | eval_steps=eval_steps if eval_file is not None else None,
233 | load_best_model_at_end=True if eval_file is not None else False,
234 | ddp_find_unused_parameters=False,
235 | save_total_limit=save_total_limit,
236 | fp16=self.fp16,
237 | bf16=self.bf16,
238 | report_to=report_to,
239 | overwrite_output_dir=overwrite_output_dir,
240 | no_cuda=True if self.device == "cpu" else False,
241 | **kwargs
242 | )
243 | # update model train config
244 | if training_args.gradient_checkpointing:
245 | self.model.gradient_checkpointing_enable()
246 | self.model.config.use_cache = False
247 | else:
248 | self.model.config.use_cache = True
249 | self.model.enable_input_require_grads()
250 |
251 | # Tell Trainer not to attempt DataParallel
252 | self.model.is_parallelizable = True
253 | self.model.model_parallel = True
254 | # Setup peft
255 | if use_peft:
256 | peft_config = LoraConfig(
257 | task_type="CAUSAL_LM",
258 | inference_mode=False,
259 | r=16,
260 | lora_alpha=32,
261 | lora_dropout=0.05,
262 | target_modules=["c_proj", "c_attn", "q_attn"],
263 | bias="none",
264 | )
265 | if int8:
266 | self.model = prepare_model_for_int8_training(self.model)
267 | self.model = get_peft_model(self.model, peft_config)
268 | self.model.print_trainable_parameters() # Be more transparent about the % of trainable params.
269 | else:
270 | logger.warning("Now full model params fine-tune, which is slow, set `use_peft=True` for lora fine-tune.")
271 | logger.debug(f"Tokenizer: {self.tokenizer}")
272 | logger.debug(f"Model: {self.model}")
273 |
274 | # load dataset
275 | raw_train_datasets = load_dataset('json', data_files=train_file, split="train")
276 | logger.debug(f"Example train_dataset[0]: {raw_train_datasets[0]}")
277 | with training_args.main_process_first(desc="Train dataset tokenization"):
278 | train_dataset = raw_train_datasets.map(
279 | self.train_tokenize_function,
280 | batched=True,
281 | num_proc=1,
282 | remove_columns=raw_train_datasets.column_names,
283 | desc="Running tokenizer on train dataset",
284 | fn_kwargs={"tokenizer": self.tokenizer}
285 | )
286 | logger.debug(f"Train dataset size: {len(train_dataset)}")
287 | logger.debug(f"First sample of the training set: {train_dataset[0]}.")
288 | eval_dataset = None
289 | if eval_file is not None:
290 | raw_eval_datasets = load_dataset('json', data_files=eval_file, split="train")
291 | if max_eval_samples is not None and max_eval_samples > 0:
292 | max_eval_samples = min(len(raw_eval_datasets), max_eval_samples)
293 | raw_eval_datasets = raw_eval_datasets.select(range(max_eval_samples))
294 | with training_args.main_process_first(desc="Eval dataset tokenization"):
295 | eval_dataset = raw_eval_datasets.map(
296 | self.train_tokenize_function,
297 | batched=True,
298 | num_proc=1,
299 | remove_columns=raw_eval_datasets.column_names,
300 | desc="Running tokenizer on train dataset",
301 | fn_kwargs={"tokenizer": self.tokenizer}
302 | )
303 | logger.debug(f"Eval dataset size: {len(eval_dataset)}")
304 | logger.debug(f"First sample of the eval set: {eval_dataset[0]}.")
305 |
306 | # Log on each process the small summary:
307 | logger.warning(
308 | f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
309 | + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
310 | )
311 | if training_args.local_rank <= 0:
312 | logger.info(f"Training/evaluation parameters {training_args}")
313 |
314 | data_collator = DataCollatorForSupervisedDataset(tokenizer=self.tokenizer)
315 | trainer = Trainer(
316 | model=self.model,
317 | tokenizer=self.tokenizer,
318 | args=training_args,
319 | train_dataset=train_dataset,
320 | eval_dataset=eval_dataset,
321 | data_collator=data_collator,
322 | )
323 | logger.info("*** Train ***")
324 | logger.debug(f"Train dataloader example: {next(iter(trainer.get_train_dataloader()))}")
325 | (global_step, training_loss, metrics) = trainer.train()
326 | self.results.update(metrics)
327 | trainer.log_metrics("train", metrics)
328 | trainer.save_metrics("train", metrics)
329 | trainer.save_state()
330 | self.save_model(output_dir=output_dir)
331 | logger.info(f" Training model done. Saved to {output_dir}.")
332 |
333 | if eval_dataset is not None:
334 | logger.info("*** Evaluate ***")
335 | metrics = trainer.evaluate(metric_key_prefix="eval")
336 | metrics['eval_samples'] = len(eval_dataset)
337 | try:
338 | perplexity = math.exp(metrics["eval_loss"])
339 | except OverflowError:
340 | perplexity = float("inf")
341 | metrics["perplexity"] = perplexity
342 | logger.debug(f"eval metrics: {metrics}")
343 | self.results.update(metrics)
344 | trainer.log_metrics("eval", metrics)
345 | trainer.save_metrics("eval", metrics)
346 |
347 | if training_args.local_rank <= 0:
348 | logger.debug(f"metrics: {self.results}")
349 | logger.info(
350 | " Training of {} model complete. Saved to {}.".format(
351 | self.model_name_or_path, output_dir
352 | )
353 | )
354 |
355 | return global_step, metrics
356 |
357 | def save_model(self, output_dir):
358 | """Save the model and the tokenizer."""
359 | os.makedirs(output_dir, exist_ok=True)
360 | model = self.model
361 | # Take care of distributed/parallel training
362 | model_to_save = model.module if hasattr(model, "module") else model
363 | model_to_save.save_pretrained(output_dir)
364 | self.tokenizer.save_pretrained(output_dir)
365 |
366 | def smart_tokenizer_and_embedding_resize(
367 | self,
368 | special_tokens_dict: Dict,
369 | tokenizer: PreTrainedTokenizer,
370 | model: PreTrainedModel,
371 | ):
372 | """Resize tokenizer and embedding.
373 |
374 | Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
375 | """
376 | num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
377 | model.resize_token_embeddings(len(tokenizer))
378 |
379 | if num_new_tokens > 0:
380 | input_embeddings = model.get_input_embeddings().weight.data
381 | output_embeddings = model.get_output_embeddings().weight.data
382 |
383 | input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
384 | output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
385 |
386 | input_embeddings[-num_new_tokens:] = input_embeddings_avg
387 | output_embeddings[-num_new_tokens:] = output_embeddings_avg
388 |
389 | def _tokenize_fn(self, strings: Sequence[str], tokenizer: PreTrainedTokenizer) -> Dict:
390 | """Tokenize a list of strings."""
391 | tokenized_list = [
392 | tokenizer(
393 | text,
394 | return_tensors="pt",
395 | padding="longest",
396 | max_length=tokenizer.model_max_length,
397 | truncation=True,
398 | )
399 | for text in strings
400 | ]
401 | input_ids = [tokenized.input_ids[0] for tokenized in tokenized_list]
402 | input_ids_lens = [
403 | tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list
404 | ]
405 | return dict(
406 | input_ids=input_ids,
407 | input_ids_lens=input_ids_lens,
408 | )
409 |
410 | def preprocess(
411 | self,
412 | sources: Sequence[str],
413 | targets: Sequence[str],
414 | tokenizer: PreTrainedTokenizer,
415 | ) -> Dict:
416 | """Preprocess the data by tokenizing."""
417 | examples = [s + t for s, t in zip(sources, targets)]
418 | examples_tokenized, sources_tokenized = [
419 | self._tokenize_fn(strings, tokenizer) for strings in (examples, sources)
420 | ]
421 | input_ids = examples_tokenized["input_ids"]
422 | labels = deepcopy(input_ids)
423 | for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]):
424 | label[:source_len] = IGNORE_INDEX
425 | return dict(input_ids=input_ids, labels=labels)
426 |
427 | def train_tokenize_function(self, examples, tokenizer):
428 | prompt_input, prompt_no_input = PROMPT_DICT["prompt_input"], PROMPT_DICT["prompt_no_input"]
429 | if 'input' in examples:
430 | sources = [
431 | prompt_input.format_map(dict(instruction=instruction, input=input)) if input != ""
432 | else prompt_no_input.format_map(dict(instruction=instruction))
433 | for instruction, input in zip(examples['instruction'], examples['input'])
434 | ]
435 | else:
436 | sources = [
437 | prompt_no_input.format_map(dict(instruction=instruction))
438 | for instruction in examples['instruction']
439 | ]
440 | targets = [f"{output}{tokenizer.eos_token}" for output in examples['output']]
441 | data_dict = self.preprocess(sources, targets, tokenizer)
442 | return data_dict
443 |
444 | @torch.inference_mode()
445 | def generate(
446 | self,
447 | sentences: Union[List[str], str],
448 | keep_prompt: bool = False,
449 | add_system_prompt: bool = True,
450 | eval_batch_size: int = 4,
451 | max_length: int = 256,
452 | temperature: int = 1.0,
453 | top_k: int = 50,
454 | top_p: float = 0.95,
455 | do_sample: bool = True,
456 | num_beams: int = 1,
457 | repetition_penalty: float = 1.0,
458 | length_penalty: float = 1.2,
459 | early_stopping: bool = True,
460 | verbose: bool = True,
461 | **kwargs
462 | ) -> List[str]:
463 | """
464 | Performs predictions on a list of text.
465 |
466 | Args:
467 | sentences: A prompt text for the model.
468 | keep_prompt: Whether to keep the prompt in the generated text.
469 | add_system_prompt: Whether to add the system prompt to the prompt text.
470 | eval_batch_size: The batch size for evaluation.
471 | max_length: The maximum length of the generated text.
472 | temperature: The sampling temperature.
473 | top_k: The number of top k tokens to be considered by sampling.
474 | top_p: The sampling probability for top p tokens.
475 | num_beams: The number of beams for beam search.
476 | repetition_penalty: The repetition penalty parameter.
477 | do_sample: Boolean value indicating whether to sample or greedy generate.
478 | length_penalty: The length penalty parameter.
479 | early_stopping: Boolean value indicating whether to do early stopping or not.
480 | verbose: Boolean value indicating whether to print the progress bar.
481 | **kwargs: Additional arguments for the generate method of the model.
482 | Returns:
483 | generated_sequences: list, Sequences of text generated by the model.
484 | """
485 | all_outputs = []
486 | if isinstance(sentences, str):
487 | sentences = [sentences]
488 | for batch in tqdm(
489 | [sentences[i: i + eval_batch_size] for i in range(0, len(sentences), eval_batch_size)],
490 | desc="Generating outputs",
491 | disable=not verbose,
492 | ):
493 | if add_system_prompt:
494 | batch = [PROMPT_DICT['prompt_no_input'].format(instruction=s) for s in batch]
495 | inputs = self.tokenizer(
496 | batch,
497 | return_tensors="pt",
498 | max_length=max_length,
499 | truncation=True,
500 | padding=True,
501 | )
502 | outputs = self.model.generate(
503 | input_ids=inputs['input_ids'].to(self.device),
504 | max_new_tokens=max_length,
505 | temperature=temperature,
506 | top_k=top_k,
507 | top_p=top_p,
508 | num_beams=num_beams,
509 | repetition_penalty=repetition_penalty,
510 | do_sample=do_sample,
511 | length_penalty=length_penalty,
512 | early_stopping=early_stopping,
513 | bos_token_id=self.tokenizer.bos_token_id,
514 | pad_token_id=self.tokenizer.pad_token_id,
515 | eos_token_id=self.tokenizer.eos_token_id,
516 | return_dict_in_generate=True,
517 | output_scores=True,
518 | **kwargs
519 | )
520 | for idx, (prompt_text, generated_sequence) in enumerate(zip(batch, outputs.sequences)):
521 | # Decode text
522 | text = self.tokenizer.decode(generated_sequence, skip_special_tokens=True)
523 | prompt_len = len(prompt_text)
524 | gen_text = text[prompt_len:]
525 | if keep_prompt:
526 | total_sequence = prompt_text + gen_text
527 | else:
528 | total_sequence = gen_text
529 | all_outputs.append(total_sequence)
530 | return all_outputs
531 |
532 |
533 | @dataclass
534 | class DataCollatorForSupervisedDataset(object):
535 | """Collate examples for supervised fine-tuning."""
536 |
537 | tokenizer: PreTrainedTokenizer
538 |
539 | def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
540 | input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels"))
541 | input_ids = [torch.tensor(x) for x in input_ids]
542 | input_ids = torch.nn.utils.rnn.pad_sequence(
543 | input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
544 | )
545 | labels = [torch.tensor(x) for x in labels]
546 | labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
547 | return dict(
548 | input_ids=input_ids,
549 | labels=labels,
550 | attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
551 | )
552 |
--------------------------------------------------------------------------------
/docs/36-text-rep-examples.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Text Representation Examples
3 | permalink: /docs/text-rep-examples/
4 | excerpt: "Text Representation Examples"
5 | last_modified_at: 2020/07/26 23:16:38
6 | toc: true
7 | ---
8 |
9 | ### Minimal example for generating word embeddings
10 | Generate a list of contextual word embeddings for every sentence in a list
11 | ```python
12 | from simpletransformers.language_representation import RepresentationModel
13 |
14 | sentences = ["Example sentence 1", "Example sentence 2"]
15 | model = RepresentationModel(
16 | model_type="bert",
17 | model_name="bert-base-uncased",
18 | use_cuda=False
19 | )
20 | word_vectors = model.encode_sentences(sentences, combine_strategy=None)
21 | assert word_vectors.shape === (2, 5, 768) # token vector for every token in each sentence, bert based models add 2 tokens per sentence by default([CLS] & [SEP])
22 | ```
23 |
24 | ### Minimal example for generating sentence embeddings
25 | Same code as for generating word embeddings, the only difference is that we pass `combine_strategy="mean"` parameter
26 | ```python
27 | from simpletransformers.language_representation import RepresentationModel
28 | sentences = ["Example sentence 1", "Example sentence 2"]
29 | model = RepresentationModel(
30 | model_type="bert",
31 | model_name="bert-base-uncased",
32 | use_cuda=False
33 | )
34 | word_vectors = model.encode_sentences(sentences, combine_strategy="mean")
35 | assert word_vectors.shape === (2, 768) # one sentence embedding per sentence
36 | ```
37 |
--------------------------------------------------------------------------------
/docs/api.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shibing624/CodeAssist/c5885d2d20e3dbea7d653da17e2a635951f522a5/docs/api.png
--------------------------------------------------------------------------------
/docs/codeassist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shibing624/CodeAssist/c5885d2d20e3dbea7d653da17e2a635951f522a5/docs/codeassist.png
--------------------------------------------------------------------------------
/docs/hf_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shibing624/CodeAssist/c5885d2d20e3dbea7d653da17e2a635951f522a5/docs/hf_model.png
--------------------------------------------------------------------------------
/docs/source_code_datasets/source_code_dataset.md:
--------------------------------------------------------------------------------
1 | ---
2 | annotations_creators:
3 | - no-annotation
4 | language_creators:
5 | - crowdsourced
6 | languages:
7 | - en
8 | licenses:
9 | - cc-by-4-0
10 | - gfdl-1-3-or-later
11 | multilinguality:
12 | - monolingual
13 | size_categories:
14 | - 100M max_diff:\n max_diff = diff\n return max_diff"}
8 | {"instruction": "Write a function to generate the nth Fibonacci number.", "input": "< noinput >", "output": "def fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-2) + fib(n-1)"}
9 | {"instruction": "Write a class to represent a 2D point with x and y coordinates.", "input": "", "output": "class Point:\n def __init__(self, x, y):\n self.x = x \n self.y = y \n \n def __str__(self): \n return \"({0},{1})\".format(self.x, self.y)"}
10 | {"instruction": "Write code that removes spaces from a given string.", "input": "string = \" A B C D \"", "output": "def remove_spaces(string): \n return \"\".join(string.split())"}
11 | {"instruction": "Write a function that checks if a give number is even or odd.", "input": "", "output": "def is_even(n):\n if n%2 == 0:\n return True \n else:\n return False"}
12 | {"instruction": "Write code to create a list of all prime numbers between 2 and 100.", "input": "", "output": "def find_primes(n): \n prime_list = [2] \n for number in range(2, n + 1): \n is_prime = True\n for k in range(2, number): \n if number % k == 0: \n is_prime = False \n if is_prime: \n prime_list.append(number) \n return prime_list"}
13 | {"instruction": "Edit the following code to print out all even numbers from 1 to 10.", "input": "for i in range(1,11):\n print(i)", "output": "for i in range(1,11):\n if i % 2 == 0:\n print(i)"}
14 | {"instruction": "Write code to find the sum of all numbers between 1 and 10.", "input": "", "output": "sum = 0\nfor i in range(1,11):\n sum += i\nprint(sum)"}
15 | {"instruction": "Create a function to calculate the area of a given circle.", "input": "", "output": "def area_circle(radius):\n return 3.14 * (radius**2)"}
16 | {"instruction": "Follow the comments in the code to complete the program which prints a conversion table from Fahrenheit to Celsius.", "input": "# This program prints a conversion table from Fahrenheit to Celsius\n\n# Print a heading for the table\n# TODO\n\n# Print the column headings\n# TODO\n\n# Calculate and print the table body\n# TODO", "output": "# This program prints a conversion table from Fahrenheit to Celsius\n\n# Print a heading for the table\nprint(\"Fahrenheit to Celsius Conversion Table\")\n\n# Print the column headings\nprint(\"Fahrenheit \\t Celsius\")\n\n# Calculate and print the table body\nfor fahr in range(-50 , 121):\n celsius = (fahr - 32) * 5 / 9\n print(\"%3d \\t\\t %.2f\" % (fahr, celsius))"}
17 | {"instruction": "Develop a classification algorithm in Python to predict whether a bird is a hawk or a falcon.", "input": "", "output": "import pandas as pd\nimport numpy as np\n\n# Define features\nfeatures = [\"wing_shape\", \"size\", \"tails_length\", \"color_pattern\"] \n\n# Load the data\ndata = pd.read_csv(\"birds.csv\")\n\n# Preprocess the data to generate feature vectors\nX = np.array(data[features]) \n\n# Make a target vector \ny = np.array(data[\"species\"]) \n\n# Divide the data into training and test sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n# Train a classification model\nfrom sklearn.tree import DecisionTreeClassifier\nmodel = DecisionTreeClassifier()\nmodel.fit(X_train, y_train)\n\n# Make predictions\ny_pred = model.predict(X_test)\n\n# Generate evaluation metrics\nfrom sklearn.metrics import accuracy_score\naccuracy = accuracy_score(y_test, y_pred)\n\nprint(\"Model accuracy: %.2f\" % accuracy)"}
18 | {"instruction": "Create a function in Java for computing the factorial of a positive integer number.", "input": "", "output": "public static long factorial(int n) {\n if (n == 0) {\n return 1;\n }\n long result = 1;\n for (int i = 1; i <= n; i++) {\n result *= i;\n }\n return result;\n}"}
19 | {"instruction": "Create a C++ program for randomly shuffling an array of integers.", "input": "", "output": "#include \n#include \n#include \n\nvoid shuffleArray(int array[], int size) {\n\tstd::random_device rd;\n\tstd::mt19937 g(rd()); \n\tstd::shuffle(array, array+size, g); \n}\n\nint main() \n{ \n\tint array[] = {1, 2, 3, 4, 5, 6, 7, 8}; \n\tint size = sizeof(array) / sizeof(array[0]); \n\n // Print original array\n std::cout << \"Original array: \";\n for (int i : array)\n std::cout << i << \" \";\n std::cout << \"\\n\";\n\n // Shuffle array\n\tshuffleArray(array, size); \n\n // Print shuffled array\n std::cout << \"Shuffled array: \";\n for (int i : array)\n std::cout << i << \" \";\n std::cout << \"\\n\";\n\n\treturn 0; \n}"}
20 | {"instruction": "Use the function to debug the given program and prevent the segmentation fault.", "input": "#include \n\nint main() {\n int array[] = {1, 2, 3, 4, 5};\n int n = 5;\n int i;\n // TODO\n for (i = 0; i <= n; i++) {\n printf(\"%d\\n\", array[i]);\n }\n return 0;\n}", "output": "#include \n\nint main() {\n int array[] = {1, 2, 3, 4, 5};\n int n = 5;\n int i;\n // Limit loop size to the length of the array\n for (i = 0; i < n; i++) {\n printf(\"%d\\n\", array[i]);\n }\n return 0;\n}"}
21 | {"instruction": "Replace the placeholder with a for loop for iterating over the numbers from 1 to n and printing them.", "input": "for i in range(n):\n # TODO", "output": "for i in range(1, n+1):\n print(i)"}
22 | {"instruction": "Use try-except statements to detect and handle exceptions in the code.", "input": "for i in range(n):\n x = 1/i", "output": "try:\n for i in range(n):\n x = 1/i\nexcept ZeroDivisionError:\n print(\"Zero division error encountered!\")"}
23 | {"instruction": "Re-write the code segment as a list comprehension.", "input": "numbers = []\nfor i in range(10):\n if i % 2 == 0:\n numbers.append(i)", "output": "numbers = [i for i in range(10) if i % 2 == 0]"}
24 | {"instruction": "Remove the boilerplate code from the program for printing 10 random numbers.", "input": "import random\n\n# Generate 10 random numbers\nfor i in range(10):\n # TODO", "output": "import random\n\n# Generate 10 random numbers\nfor i in range(10):\n print(random.randrange(1, 101))"}
25 | {"instruction": "Modify the code to perform the mathematical expression “x to the power of y”.", "input": "# Calculate x to the power of y\n# TODO", "output": "# Calculate x to the power of y\nx_pow_y = x**y"}
26 | {"instruction": "Create a dictionary for counting the occurrence of each letter in the given string.", "input": "string = \"hello world\"\n#TODO", "output": "string = \"hello world\"\nletter_counts = {}\n\nfor letter in string:\n if letter in letter_counts:\n letter_counts[letter] += 1\n else:\n letter_counts[letter] = 1\n\nprint(letter_counts)"}
27 | {"instruction": "Follow the comments to complete the code that finds the maximum value in an array.", "input": "# Find the maximum value in an array\narr = [4, 5, 2, 8, 7]\n# TODO", "output": "# Find the maximum value in an array\narr = [4, 5, 2, 8, 7]\n\nmax_value = arr[0]\nfor i in range(1, len(arr)):\n if arr[i] > max_value:\n max_value = arr[i]\n\nprint(max_value)"}
28 | {"instruction": "Write a function in HTML for creating a table of n rows and m columns.", "input": "", "output": "\n \n <% for (let i = 0; i < n; i++) { %>\n \n <% for (let j = 0; j < m; j++) { %>\n | Cell Content | \n <% } %>\n
\n <% } %>\n \n
"}
29 | {"instruction": "Create a CSS script that centers the login form on the webpage.", "input": "", "output": ".login-form {\n position: absolute;\n top: 50%;\n left: 50%;\n transform: translate(-50%, -50%);\n}"}
30 | {"instruction": "Create a Java program that prints out the sum of the numbers in the array.", "input": "int[] numbers = {1, 2, 3 , 4, 5};", "output": "public class SumOfArray {\n public static void main(String[] args) {\n int[] numbers = {1, 2, 3 , 4, 5};\n int sum = 0;\n for(int i=0; i < numbers.length;i++) {\n sum = sum + numbers[i];\n }\n System.out.println(\"The sum of numbers in the given array is: \" + sum);\n }\n}"}
31 | {"instruction": "Write a SQL query to calculate the total number of orders in the \"orders\" table.", "input": "", "output": "SELECT COUNT(*) AS \"Total Number of Orders\" \nFROM orders;"}
32 | {"instruction": "Write a Python function to convert a given string to lowercase.", "input": "string = \"FOO\"", "output": "def to_lowercase(string):\n return string.lower()\n\nstring = \"FOO\"\nprint(to_lowercase(string))"}
33 | {"instruction": "Create a C program that prints out the Fibonacci sequence.", "input": "", "output": "#include \n\nint main()\n{\n int i, num, first=0, second=1, next;\n printf(\"Enter the number of elements: \");\n scanf(\"%d\", &num);\n printf(\"Fibonacci Series: \");\n for (i = 0; i < num; i++)\n {\n if (i <= 1)\n next = i;\n else\n {\n next = first + second;\n first = second;\n second = next;\n }\n printf(\"%d \", next);\n }\n return 0;\n}"}
34 | {"instruction": "Write a function in JavaScript to count the number of vowels in a given string.", "input": "const string = \"abcdefg\"", "output": "function countVowels(string) {\n let vowelCount = 0;\n const vowels = [\"a\",\"e\",\"i\",\"o\",\"u\"];\n \n for (let i = 0; i < string.length; i++) {\n if (vowels.includes(string[i])) {\n vowelCount++;\n }\n }\n \n return vowelCount;\n}\n\nconst string = \"abcdefg\";\nconsole.log(countVowels(string));"}
35 | {"instruction": "Create a JSX code that renders a \"Hello, World!\" message in the browser.", "input": "", "output": "import React from \"react\";\n\nfunction App() {\n return (\n \n
Hello, World!
\n \n );\n}\n\nexport default App;"}
36 | {"instruction": "Write an SQL query to calculate the average order price for each product category in the \"orders\" table.", "input": "", "output": "SELECT category, AVG(price) as \"Average Order Price\"\nFROM orders\nGROUP BY category;"}
37 | {"instruction": "Create a Java program that prints out the contents of a given two-dimensional array.", "input": "int[][] arr = {{1,2},{3,4},{5,6}}", "output": "public class TwoDArray {\n\n public static void main(String[] args) {\n int[][] arr = {{1,2},{3,4},{5,6}};\n\n for(int i=0;i\nusing namespace std;\n \nint main()\n{\n int array[] = {6, 3, 8, 12, 9, 4};\n int arraySize = sizeof(array)/sizeof(array[0]);\n \n //ascending order sorting\n for(int i = 0; i < arraySize; i++)\n {\n // find the position of smallest element\n int small = i;\n for(int j = i+1; j < arraySize; j++)\n {\n if(array[j] > array[small])\n small = j;\n }\n \n // swap the position of smallest element\n int temp = array[i];\n array[i] = array[small];\n array[small] = temp;\n }\n \n // print the sorted array\n cout << \"Sorted array : \";\n for(int i = 0; i < arraySize; i++)\n cout << array[i] << \" \";\n \n return 0;\n}"}
39 | {"instruction": "Edit the Python code so that it prints out the total number of elements in the list.", "input": "list = [1, 2, 3, 4, 5]", "output": "list = [1, 2, 3, 4, 5]\nprint(\"Total number of elements in the list: \", len(list))"}
40 | {"instruction": "Create a Swift code that show the number of days in the given month.", "input": "int month = 5", "output": "let month = 5\nlet daysEndingInth = [3, 0, 3, 2, 3, 2, 3, 3, 2, 3, 2, 3]\nlet daysInMonth = daysEndingInth[month - 1] \nprint(\"The number of days in the month of \\(month) is \\(daysInMonth).\")"}
41 | {"instruction": "Write a HTML code that create a form for user to fill out their current address.", "input": "", "output": ""}
42 | {"instruction": "Write a SQL query to find the total number of orders placed between two given dates.", "input": "start_date = '2020-01-01'\nend_date = '2020-02-01'", "output": "SELECT COUNT(*)\nFROM orders\nWHERE order_date BETWEEN '2020-01-01' AND '2020-02-01';"}
43 | {"instruction": "Create a Ruby code that prints out the median of a given array of numbers.", "input": "nums = [1, 7, 3, 6]", "output": "nums = [1, 7, 3, 6]\nsorted_nums = nums.sort\n\n# Calculation of median \nif sorted_nums.length % 2 == 0\n mid_index = sorted_nums.length/2\n median = (sorted_nums[mid_index] + sorted_nums[mid_index - 1])/2.to_f\nelse\n mid_index = sorted_nums.length/2\n median = sorted_nums[mid_index]\nend\n\nputs median"}
44 | {"instruction": "Create a PHP code to check if an array contains a given item.", "input": "$input_array = array(1,2,3,4,5);\n$item_to_check = 3;", "output": ""}
45 | {"instruction": "Edit the code to create a try-except block to handle the exception.", "input": "a = 5\nb = 0\n\nans = a/b", "output": "try:\n a = 5\n b = 0\n\n ans = a/b\nexcept ZeroDivisionError:\n print(\"Cannot divide by zero.\")"}
46 | {"instruction": "Write a script using Bash that replaces all vowels with a number.", "input": "", "output": "#!/bin/bash\n\nstring=\"Hello World!\"\nvowels=(\"a\" \"e\" \"i\" \"o\" \"u\" \"A\" \"E\" \"I\" \"O\" \"U\")\n\nfor i in \"${vowels[@]}\"\ndo\n string=\"${string//$i/0}\"\ndone\n\necho $string #H0ll0 W0rld!"}
47 | {"instruction": "Change the variable `x` to the data type desired.", "input": "x = 'Hello World!'", "output": "x = \"Hello World!\""}
48 | {"instruction": "Create a function that takes an array as an argument and returns the sum of all the elements in the array.", "input": "arr = [1, 2, 3, 4, 5]", "output": "def arraySum(arr):\n total = 0\n for num in arr:\n total += num\n return total"}
49 | {"instruction": "Sort the array in ascending order.", "input": "arr = [3, 2, 1, 5, 4]", "output": "arr.sort() # [1, 2, 3, 4, 5]"}
50 | {"instruction": "Generate a random integer between 4 and 8 (inclusively).", "input": "", "output": "import random\nx = random.randint(4, 8)"}
51 |
--------------------------------------------------------------------------------
/examples/distilgpt2_demo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import sys
7 |
8 | sys.path.append('..')
9 | from codeassist import GPT2Coder
10 |
11 | m = GPT2Coder("shibing624/code-autocomplete-distilgpt2-python")
12 | print(m.generate('def load_csv_file(file_path):')[0])
13 |
--------------------------------------------------------------------------------
/examples/gpt2_demo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import sys
7 |
8 | sys.path.append('..')
9 | from codeassist.gpt2_coder import GPT2Coder
10 |
11 | m = GPT2Coder("shibing624/code-autocomplete-gpt2-base")
12 | print(m.generate('def load_csv_file(file_path):')[0])
13 |
--------------------------------------------------------------------------------
/examples/gradio_demo.py:
--------------------------------------------------------------------------------
1 | """
2 | @author:XuMing(xuming624@qq.com)
3 | @description:
4 | """
5 |
6 | import sys
7 |
8 | import gradio as gr
9 |
10 | sys.path.append('..')
11 | from codeassist.gpt2_coder import GPT2Coder
12 |
13 | model = GPT2Coder("shibing624/code-autocomplete-gpt2-base")
14 |
15 |
16 | def ai_text(text):
17 | gen_text = model.generate(text)[0]
18 | print(text, ' => ', gen_text)
19 | return gen_text
20 |
21 |
22 | if __name__ == '__main__':
23 | print(ai_text('import torch.nn as'))
24 |
25 | examples = [
26 | ['def load_csv_file(file_path):'],
27 | ['import torch.nn as'],
28 | ['parser.add_argument("--num_train_epochs",'],
29 | ['torch.device('],
30 | ['def set_seed('],
31 | ]
32 |
33 | output_text = gr.outputs.Textbox()
34 | gr.Interface(ai_text, "textbox", output_text,
35 | # theme="grass",
36 | title="Code Autocomplete Model shibing624/code-autocomplete-gpt2-base",
37 | description="Copy or input python code here. Submit and the machine will generate code.",
38 | article="Link to Github REPO",
39 | examples=examples).launch()
40 |
--------------------------------------------------------------------------------
/examples/inference_demo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import argparse
7 | import sys
8 |
9 | sys.path.append('..')
10 | from codeassist import GPT2Coder, WizardCoder
11 |
12 | if __name__ == '__main__':
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument('--model_type', type=str, default="wizard", help='wizard or gpt2')
15 | parser.add_argument('--model_name', type=str, default="WizardLM/WizardCoder-15B-V1.0", help='model name or path')
16 | parser.add_argument('--max_length', type=int, default=128, help='maximum length for code generation')
17 | parser.add_argument('--temperature', type=float, default=1.0, help='temperature for sampling-based code generation')
18 | args = parser.parse_args()
19 | print(args)
20 | if args.model_type == 'wizard':
21 | model = WizardCoder(args.model_name)
22 | else:
23 | model = GPT2Coder(args.model_name)
24 |
25 | # generate code
26 | while True:
27 | print(f'Enter the context code (exit or python code)')
28 | context = input(">>> ")
29 | if context == "exit":
30 | break
31 | generated_codes = model.generate(context, temperature=args.temperature, max_length=args.max_length)
32 | print("Generated code:")
33 | for i, code in enumerate(generated_codes):
34 | print("{}:\n {}".format(i + 1, code))
35 | print("=" * 20)
36 |
--------------------------------------------------------------------------------
/examples/original_gpt2_demo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 |
7 | import sys
8 |
9 | sys.path.append('..')
10 | from codeassist.gpt2_coder import GPT2Coder
11 |
12 | if __name__ == '__main__':
13 | prompts = [
14 | "def load_csv_file(file_path):",
15 | "import numpy as np",
16 | "import torch.nn as",
17 | 'parser.add_argument("--num_train_epochs",',
18 | "def set_seed(",
19 | "def factorial",
20 | ]
21 |
22 | m = GPT2Coder("gpt2")
23 | for i in m.generate('import torch.nn as', num_return_sequences=3):
24 | print(i)
25 |
26 | for prompt in prompts:
27 | decoded = m.generate(prompt, num_return_sequences=1)
28 | print("Input :", prompt)
29 | print("Output:", decoded[0])
30 | print("=" * 20)
31 |
--------------------------------------------------------------------------------
/examples/prepare_code_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import argparse
7 | import sys
8 |
9 | from sklearn.model_selection import train_test_split
10 |
11 | sys.path.append("..")
12 | from codeassist import create_dataset
13 |
14 |
15 | def main():
16 | parser = argparse.ArgumentParser()
17 | parser.add_argument("--save_dir", type=str, default="download", help="Save dataset directory")
18 | parser.add_argument("--num_repos", type=int, default=3, help="Number of repos to use")
19 | parser.add_argument("--code", default="python", const='python', nargs='?',
20 | choices=['python', 'java', 'cpp'], help="Download code language source code dataset")
21 | args = parser.parse_args()
22 | print(args)
23 | sources = dict()
24 | try:
25 | sources = create_dataset.get_source_code_by_language(code_languages=args.code,
26 | save_dir=args.save_dir,
27 | each_limit_repos=args.num_repos
28 | )
29 | except KeyboardInterrupt:
30 | pass
31 | X = sources[f"{args.code}"]
32 | X_train, X_test = train_test_split(X, test_size=0.2, random_state=1)
33 | X_train, X_val = train_test_split(X_train, test_size=0.25, random_state=1) # 0.25 x 0.8 = 0.2
34 | train_file = f'{args.save_dir}/{args.code}/train.txt'
35 | valid_file = f'{args.save_dir}/{args.code}/valid.txt'
36 | test_file = f'{args.save_dir}/{args.code}/test.txt'
37 | create_dataset.merge_and_save(X_train, train_file)
38 | create_dataset.merge_and_save(X_val, valid_file)
39 | create_dataset.merge_and_save(X_test, test_file)
40 | print(f'Save train file: {train_file}, valid file: {valid_file}, test file: {test_file}')
41 |
42 |
43 | if __name__ == '__main__':
44 | main()
45 |
--------------------------------------------------------------------------------
/examples/server.py:
--------------------------------------------------------------------------------
1 | """
2 | @author:XuMing(xuming624@qq.com)
3 | @description: pip install fastapi uvicorn
4 | """
5 | import argparse
6 | import os
7 | import sys
8 |
9 | import torch
10 | import uvicorn
11 | from fastapi import FastAPI, Query
12 | from loguru import logger
13 | from starlette.middleware.cors import CORSMiddleware
14 |
15 | sys.path.append('..')
16 | from codeassist import GPT2Coder
17 |
18 | pwd_path = os.path.abspath(os.path.dirname(__file__))
19 | use_cuda = torch.cuda.is_available()
20 | # Use finetuned GPT model
21 | parser = argparse.ArgumentParser()
22 | parser.add_argument("--model_name_or_path", type=str, default="shibing624/code-autocomplete-gpt2-base",
23 | help="Model save dir or model name")
24 | args = parser.parse_args()
25 | model = GPT2Coder(args.model_name_or_path)
26 |
27 | # define the app
28 | app = FastAPI()
29 | app.add_middleware(
30 | CORSMiddleware,
31 | allow_origins=["*"],
32 | allow_credentials=True,
33 | allow_methods=["*"],
34 | allow_headers=["*"])
35 |
36 |
37 | @app.get('/')
38 | async def index():
39 | return {"message": "index, docs url: /docs"}
40 |
41 |
42 | @app.get('/autocomplete')
43 | async def autocomplete(q: str = Query(..., min_length=1, max_length=512, title='query')):
44 | try:
45 | # Generate text using the model. Verbose set False to prevent logging generated sequences.
46 | generated = model.generate(q)
47 | result_dict = generated[0]
48 | logger.debug(f"Successfully autocomplete, q:{q}, res:{result_dict}")
49 | return result_dict
50 | except Exception as e:
51 | logger.error(e)
52 | return {'status': False, 'msg': e}, 400
53 |
54 |
55 | if __name__ == '__main__':
56 | uvicorn.run(app=app, host='0.0.0.0', port=8001)
57 |
--------------------------------------------------------------------------------
/examples/training_gpt2_mydata.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import argparse
7 | import sys
8 |
9 | sys.path.append('..')
10 | from codeassist import GPT2Coder
11 |
12 | if __name__ == '__main__':
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument("--model_name", type=str, default="gpt2", help="Model arch, gpt2, gpt2-medium or distilgpt2")
15 | parser.add_argument("--train_file", type=str, default="data/train_code_5k.txt", help="Train file path")
16 | parser.add_argument("--valid_file", type=str, default="data/train_code_5k.txt", help="Valid file path")
17 | parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
18 | parser.add_argument("--do_predict", action="store_true", help="Whether to run predict.")
19 | parser.add_argument("--output_dir", type=str, default="./outputs-fine-tuned/", help="Output dir")
20 | parser.add_argument("--num_epochs", type=int, default=5, help="Num of training epochs")
21 | parser.add_argument("--batch_size", type=int, default=8, help="Batch size")
22 | args = parser.parse_args()
23 | print(args)
24 |
25 | if args.do_train:
26 | model = GPT2Coder(model_name_or_path=args.model_name)
27 | model.train_model(
28 | args.train_file,
29 | args.output_dir,
30 | eval_file=args.valid_file,
31 | num_epochs=args.num_epochs,
32 | batch_size=args.batch_size
33 | )
34 | print(f"model saved to {args.output_dir}")
35 | if args.do_predict:
36 | model = GPT2Coder(model_name_or_path=args.output_dir)
37 | prompts = [
38 | "def load_csv_file(file_path):",
39 | "import numpy as np",
40 | "import torch.nn as",
41 | 'parser.add_argument("--num_train_epochs",',
42 | "def set_seed(",
43 | "def factorial",
44 | ]
45 | for prompt in prompts:
46 | outputs = model.generate(prompt)
47 | print("Input :", prompt)
48 | print("Output:", outputs[0])
49 | print("=" * 20)
50 |
--------------------------------------------------------------------------------
/examples/training_wizardcoder_mydata.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import argparse
7 | import sys
8 |
9 | sys.path.append('..')
10 | from codeassist import WizardCoder
11 |
12 | if __name__ == '__main__':
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument("--model_name", type=str, default="WizardLM/WizardCoder-15B-V1.0",
15 | help="Model arch, gpt2, gpt2-medium, distilgpt2 or WizardLM/WizardCoder-15B-V1.0")
16 | parser.add_argument("--train_file", type=str, default="data/code_alpaca_20k_50.jsonl", help="Train file path")
17 | parser.add_argument("--valid_file", type=str, default="data/code_alpaca_20k_50.jsonl", help="Valid file path")
18 | parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
19 | parser.add_argument("--do_predict", action="store_true", help="Whether to run predict.")
20 | parser.add_argument("--output_dir", type=str, default="./outputs-finetuned-wizardcoder/", help="output dir")
21 | parser.add_argument("--num_epochs", type=int, default=5, help="Num of training epochs")
22 | parser.add_argument("--batch_size", type=int, default=2, help="Batch size")
23 | args = parser.parse_args()
24 | print(args)
25 |
26 | if args.do_train:
27 | model = WizardCoder(model_name_or_path=args.model_name)
28 | model.train_model(
29 | args.train_file,
30 | args.output_dir,
31 | eval_file=args.valid_file,
32 | num_epochs=args.num_epochs,
33 | batch_size=args.batch_size
34 | )
35 | print(f"model saved to {args.output_dir}")
36 | if args.do_predict:
37 | model = WizardCoder(model_name_or_path=args.model_name, peft_name=args.output_dir)
38 | prompts = [
39 | "def load_csv_file(file_path):",
40 | "write a C++ code to sum 1 to 12.",
41 | "写个python的快排算法",
42 | "生成4到400之间的随机数,用java和python写代码",
43 | ]
44 | for prompt in prompts:
45 | outputs = model.generate(prompt)
46 | print("Input :", prompt)
47 | print("Output:", outputs[0])
48 | print("=" * 20)
49 |
--------------------------------------------------------------------------------
/examples/use_transformers_gpt2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 |
7 | import os
8 |
9 | import torch
10 | from transformers import GPT2Tokenizer, GPT2LMHeadModel
11 |
12 | os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
13 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14 |
15 | tokenizer = GPT2Tokenizer.from_pretrained("shibing624/code-autocomplete-gpt2-base")
16 | model = GPT2LMHeadModel.from_pretrained("shibing624/code-autocomplete-gpt2-base")
17 | model.to(device)
18 | prompts = [
19 | "def load_csv_file(file_path):",
20 | "import numpy as np",
21 | "import torch.nn as",
22 | 'parser.add_argument("--num_train_epochs",',
23 | "def set_seed(",
24 | "def factorial",
25 | ]
26 | for prompt in prompts:
27 | input_ids = tokenizer(prompt, return_tensors='pt').to(device).input_ids
28 | outputs = model.generate(
29 | input_ids=input_ids,
30 | max_length=64 + len(input_ids[0]),
31 | temperature=1.0,
32 | top_k=50,
33 | top_p=0.95,
34 | repetition_penalty=1.0,
35 | do_sample=True,
36 | num_return_sequences=1,
37 | length_penalty=2.0,
38 | early_stopping=True,
39 | pad_token_id=tokenizer.eos_token_id,
40 | eos_token_id=tokenizer.eos_token_id,
41 | )
42 | decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
43 | print("Input :", prompt)
44 | print("Output:", decoded)
45 | print("=" * 20)
46 |
--------------------------------------------------------------------------------
/examples/wizardcoder_demo.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import sys
7 |
8 | sys.path.append('..')
9 | from codeassist import WizardCoder
10 |
11 | m = WizardCoder("WizardLM/WizardCoder-15B-V1.0")
12 | print(m.generate('def load_csv_file(file_path):')[0])
13 |
14 | prompts = [
15 | "write a function to load csv file and return a dataframe",
16 | "give me odd numbers from 1 to 10",
17 | "write a function to show fibonacci sequence and return a list",
18 | "write a Java code to sum 1 to 10.",
19 | "write a python code to sum 1 to 10.",
20 | "write a C++ code to sum 1 to 12.",
21 | "写个python的快排算法",
22 | "生成4到400之间的随机数,用java和python写代码",
23 | "写java代码,从1累加到100",
24 | "写python代码,从1累加到10",
25 | "写个斐波拉契数列,返回一个列表,python代码",
26 | "给出所有1-20的偶数列表,C++代码",
27 | "给出所有1-20的奇数列表,python代码",
28 | # below is for language model chat
29 | "tell me about beijing",
30 | "give me a plan to NewYork city for three days trip",
31 | "详细介绍下南京",
32 | "列个南京的详细三天旅游计划",
33 | "失眠怎么办",
34 | ]
35 | for prompt in prompts:
36 | print('input :', prompt)
37 | print('output:', m.generate(prompt)[0])
38 | print()
39 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | loguru
2 | transformers
3 | datasets
4 | pandas
5 | tqdm
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [tool:pytest]
2 | python_functions=test_
3 |
4 | codestyle_max_line_length = 119
5 |
6 | log_cli = true
7 | log_cli_level = WARNING
8 |
9 | [metadata]
10 | description-file = README.md
11 | license_file = LICENSE
12 |
13 | [pycodestyle]
14 | max-line-length = 119
15 |
16 | [flake8]
17 | max-line-length = 119
18 | ignore = E203 , W503, F401
19 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from setuptools import setup, find_packages
3 |
4 | __version__ = "1.0.0"
5 |
6 | with open('README.md', 'r', encoding='utf-8') as f:
7 | readme = f.read()
8 |
9 | setup(
10 | name='codeassist',
11 | version=__version__,
12 | description='Code AutoComplete',
13 | long_description=readme,
14 | long_description_content_type='text/markdown',
15 | author='XuMing',
16 | author_email='xuming624@qq.com',
17 | url='https://github.com/shibing624/autocoder',
18 | license='Apache License 2.0',
19 | zip_safe=False,
20 | python_requires='>=3.5',
21 | classifiers=[
22 | 'Intended Audience :: Developers',
23 | 'Operating System :: OS Independent',
24 | 'Natural Language :: Chinese (Simplified)',
25 | 'Natural Language :: Chinese (Traditional)',
26 | 'Programming Language :: Python',
27 | 'Programming Language :: Python :: 3',
28 | 'Topic :: Text Processing',
29 | 'Topic :: Text Processing :: Indexing',
30 | 'Topic :: Text Processing :: Linguistic',
31 | ],
32 | keywords='CodeGenie,autocomplete,code-autocomplete',
33 | install_requires=[
34 | "loguru",
35 | "transformers",
36 | "pandas",
37 | "datasets",
38 | "tqdm",
39 | ],
40 | packages=find_packages(exclude=['tests']),
41 | package_dir={'autocoder': 'autocoder'},
42 | package_data={'autocoder': ['*.*']}
43 | )
44 |
--------------------------------------------------------------------------------
/tests/test.txt:
--------------------------------------------------------------------------------
1 | # PROJECT: pytorch_audio FILE: torchaudio/functional/functional.py
2 | # -*- coding: utf-8 -*-
3 |
4 | from collections.abc import Sequence
5 | import io
6 | import math
7 | import warnings
8 | from typing import Optional, Tuple
9 |
10 | import torch
11 | from torch import Tensor
12 | from torchaudio._internal import module_utils as _mod_utils
13 | import torchaudio
14 |
15 | __all__ = [
16 | "spectrogram",
17 | "inverse_spectrogram",
18 | "griffinlim",
19 | "amplitude_to_DB",
20 | "DB_to_amplitude",
21 | "compute_deltas",
22 | "compute_kaldi_pitch",
23 | "melscale_fbanks",
24 | "linear_fbanks",
25 | "create_dct",
26 | "compute_deltas",
27 | "detect_pitch_frequency",
28 | "DB_to_amplitude",
29 | "mu_law_encoding",
30 | "mu_law_decoding",
31 | "phase_vocoder",
32 | 'mask_along_axis',
33 | 'mask_along_axis_iid',
34 | 'sliding_window_cmn',
35 | "spectral_centroid",
36 | "apply_codec",
37 | "resample",
38 | "edit_distance",
39 | "pitch_shift",
40 | "rnnt_loss",
41 | ]
42 |
43 |
44 | def spectrogram(
45 | waveform: Tensor,
46 | pad: int,
47 | window: Tensor,
48 | n_fft: int,
49 | hop_length: int,
50 | win_length: int,
51 | power: Optional[float],
52 | normalized: bool,
53 | center: bool = True,
54 | pad_mode: str = "reflect",
55 | onesided: bool = True,
56 | return_complex: Optional[bool] = None,
57 | ) -> Tensor:
58 | r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
59 | The spectrogram can be either magnitude-only or complex.
60 |
61 | Args:
62 | waveform (Tensor): Tensor of audio of dimension `(..., time)`
63 | pad (int): Two sided padding of signal
64 | window (Tensor): Window tensor that is applied/multiplied to each frame/window
65 | n_fft (int): Size of FFT
66 | hop_length (int): Length of hop between STFT windows
67 | win_length (int): Window size
68 | power (float or None): Exponent for the magnitude spectrogram,
69 | (must be > 0) e.g., 1 for energy, 2 for power, etc.
70 | If None, then the complex spectrum is returned instead.
71 | normalized (bool): Whether to normalize by magnitude after stft
72 | center (bool, optional): whether to pad :attr:`waveform` on both sides so
73 | that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
74 | Default: ``True``
75 | pad_mode (string, optional): controls the padding method used when
76 | :attr:`center` is ``True``. Default: ``"reflect"``
77 | onesided (bool, optional): controls whether to return half of results to
78 | avoid redundancy. Default: ``True``
79 | return_complex (bool, optional):
80 | Deprecated and not used.
81 |
82 | Returns:
83 | Tensor: Dimension `(..., freq, time)`, freq is
84 | ``n_fft // 2 + 1`` and ``n_fft`` is the number of
85 | Fourier bins, and time is the number of window hops (n_frame).
86 | """
87 | if return_complex is not None:
88 | warnings.warn(
89 | "`return_complex` argument is now deprecated and is not effective."
90 | "`torchaudio.functional.spectrogram(power=None)` always returns a tensor with "
91 | "complex dtype. Please remove the argument in the function call."
92 | )
93 |
94 | if pad > 0:
95 | # TODO add "with torch.no_grad():" back when JIT supports it
96 | waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
97 |
98 | # pack batch
99 | shape = waveform.size()
100 | waveform = waveform.reshape(-1, shape[-1])
101 |
102 | # default values are consistent with librosa.core.spectrum._spectrogram
103 | spec_f = torch.stft(
104 | input=waveform,
105 | n_fft=n_fft,
106 | hop_length=hop_length,
107 | win_length=win_length,
108 | window=window,
109 | center=center,
110 | pad_mode=pad_mode,
111 | normalized=False,
112 | onesided=onesided,
113 | return_complex=True,
114 | )
115 |
116 | # unpack batch
117 | spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
118 |
119 | if normalized:
120 | spec_f /= window.pow(2.).sum().sqrt()
121 | if power is not None:
122 | if power == 1.0:
123 | return spec_f.abs()
124 | return spec_f.abs().pow(power)
125 | return spec_f
126 |
127 |
128 | def inverse_spectrogram(
129 | spectrogram: Tensor,
130 | length: Optional[int],
131 | pad: int,
132 | window: Tensor,
133 | n_fft: int,
134 | hop_length: int,
135 | win_length: int,
136 | normalized: bool,
137 | center: bool = True,
138 | pad_mode: str = "reflect",
139 | onesided: bool = True,
140 | ) -> Tensor:
141 | r"""Create an inverse spectrogram or a batch of inverse spectrograms from the provided
142 | complex-valued spectrogram.
143 |
144 | Args:
145 | spectrogram (Tensor): Complex tensor of audio of dimension (..., freq, time).
146 | length (int or None): The output length of the waveform.
147 | pad (int): Two sided padding of signal. It is only effective when ``length`` is provided.
148 | window (Tensor): Window tensor that is applied/multiplied to each frame/window
149 | n_fft (int): Size of FFT
150 | hop_length (int): Length of hop between STFT windows
151 | win_length (int): Window size
152 | normalized (bool): Whether the stft output was normalized by magnitude
153 | center (bool, optional): whether the waveform was padded on both sides so
154 | that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
155 | Default: ``True``
156 | pad_mode (string, optional): controls the padding method used when
157 | :attr:`center` is ``True``. This parameter is provided for compatibility with the
158 | spectrogram function and is not used. Default: ``"reflect"``
159 | onesided (bool, optional): controls whether spectrogram was done in onesided mode.
160 | Default: ``True``
161 |
162 | Returns:
163 | Tensor: Dimension `(..., time)`. Least squares estimation of the original signal.
164 | """
165 |
166 | if not spectrogram.is_complex():
167 | raise ValueError("Expected `spectrogram` to be complex dtype.")
168 |
169 | if normalized:
170 | spectrogram = spectrogram * window.pow(2.).sum().sqrt()
171 |
172 | # pack batch
173 | shape = spectrogram.size()
174 | spectrogram = spectrogram.reshape(-1, shape[-2], shape[-1])
175 |
176 | # default values are consistent with librosa.core.spectrum._spectrogram
177 | waveform = torch.istft(
178 | input=spectrogram,
179 | n_fft=n_fft,
180 | hop_length=hop_length,
181 | win_length=win_length,
182 | window=window,
183 | center=center,
184 | normalized=False,
185 | onesided=onesided,
186 | length=length + 2 * pad if length is not None else None,
187 | return_complex=False,
188 | )
189 |
190 | if length is not None and pad > 0:
191 | # remove padding from front and back
192 | waveform = waveform[:, pad:-pad]
193 |
194 | # unpack batch
195 | waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
196 |
197 | return waveform
198 |
199 |
200 | def _get_complex_dtype(real_dtype: torch.dtype):
201 | if real_dtype == torch.double:
202 | return torch.cdouble
203 | if real_dtype == torch.float:
204 | return torch.cfloat
205 | if real_dtype == torch.half:
206 | return torch.complex32
207 | raise ValueError(f'Unexpected dtype {real_dtype}')
208 |
209 |
210 | def griffinlim(
211 | specgram: Tensor,
212 | window: Tensor,
213 | n_fft: int,
214 | hop_length: int,
215 | win_length: int,
216 | power: float,
217 | n_iter: int,
218 | momentum: float,
219 | length: Optional[int],
220 | rand_init: bool
221 | ) -> Tensor:
222 | r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
223 |
224 | Implementation ported from
225 | *librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`]
226 | and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`].
227 |
228 | Args:
229 | specgram (Tensor): A magnitude-only STFT spectrogram of dimension `(..., freq, frames)`
230 | where freq is ``n_fft // 2 + 1``.
231 | window (Tensor): Window tensor that is applied/multiplied to each frame/window
232 | n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins
233 | hop_length (int): Length of hop between STFT windows. (
234 | Default: ``win_length // 2``)
235 | win_length (int): Window size. (Default: ``n_fft``)
236 | power (float): Exponent for the magnitude spectrogram,
237 | (must be > 0) e.g., 1 for energy, 2 for power, etc.
238 | n_iter (int): Number of iteration for phase recovery process.
239 | momentum (float): The momentum parameter for fast Griffin-Lim.
240 | Setting this to 0 recovers the original Griffin-Lim method.
241 | Values near 1 can lead to faster convergence, but above 1 may not converge.
242 | length (int or None): Array length of the expected output.
243 | rand_init (bool): Initializes phase randomly if True, to zero otherwise.
244 |
245 | Returns:
246 | Tensor: waveform of `(..., time)`, where time equals the ``length`` parameter if given.
247 | """
248 | assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
249 | assert momentum >= 0, 'momentum={} < 0'.format(momentum)
250 |
251 | # pack batch
252 | shape = specgram.size()
253 | specgram = specgram.reshape([-1] + list(shape[-2:]))
254 |
255 | specgram = specgram.pow(1 / power)
256 |
257 | # initialize the phase
258 | if rand_init:
259 | angles = torch.rand(
260 | specgram.size(),
261 | dtype=_get_complex_dtype(specgram.dtype), device=specgram.device)
262 | else:
263 | angles = torch.full(
264 | specgram.size(), 1,
265 | dtype=_get_complex_dtype(specgram.dtype), device=specgram.device)
266 |
267 | # And initialize the previous iterate to 0
268 | tprev = torch.tensor(0., dtype=specgram.dtype, device=specgram.device)
269 | for _ in range(n_iter):
270 | # Invert with our current estimate of the phases
271 | inverse = torch.istft(specgram * angles,
272 | n_fft=n_fft,
273 | hop_length=hop_length,
274 | win_length=win_length,
275 | window=window,
276 | length=length)
277 |
278 | # Rebuild the spectrogram
279 | rebuilt = torch.stft(
280 | input=inverse,
281 | n_fft=n_fft,
282 | hop_length=hop_length,
283 | win_length=win_length,
284 | window=window,
285 | center=True,
286 | pad_mode='reflect',
287 | normalized=False,
288 | onesided=True,
289 | return_complex=True,
290 | )
291 |
292 | # Update our phase estimates
293 | angles = rebuilt
294 | if momentum:
295 | angles = angles - tprev.mul_(momentum / (1 + momentum))
296 | angles = angles.div(angles.abs().add(1e-16))
297 |
298 | # Store the previous iterate
299 | tprev = rebuilt
300 |
301 | # Return the final phase estimates
302 | waveform = torch.istft(specgram * angles,
303 | n_fft=n_fft,
304 | hop_length=hop_length,
305 | win_length=win_length,
306 | window=window,
307 | length=length)
308 |
309 | # unpack batch
310 | waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
311 |
312 | return waveform
313 |
314 |
315 | def amplitude_to_DB(
316 | x: Tensor,
317 | multiplier: float,
318 | amin: float,
319 | db_multiplier: float,
320 | top_db: Optional[float] = None
321 | ) -> Tensor:
322 | r"""Turn a spectrogram from the power/amplitude scale to the decibel scale.
323 |
324 | The output of each tensor in a batch depends on the maximum value of that tensor,
325 | and so may return different values for an audio clip split into snippets vs. a full clip.
326 |
327 | Args:
328 |
329 | x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take
330 | the form `(..., freq, time)`. Batched inputs should include a channel dimension and
331 | have the form `(batch, channel, freq, time)`.
332 | multiplier (float): Use 10. for power and 20. for amplitude
333 | amin (float): Number to clamp ``x``
334 | db_multiplier (float): Log10(max(reference value and amin))
335 | top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number
336 | is 80. (Default: ``None``)
337 |
338 | Returns:
339 | Tensor: Output tensor in decibel scale
340 | """
341 | x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
342 | x_db -= multiplier * db_multiplier
343 |
344 | if top_db is not None:
345 | # Expand batch
346 | shape = x_db.size()
347 | packed_channels = shape[-3] if x_db.dim() > 2 else 1
348 | x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])
349 |
350 | x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))
351 |
352 | # Repack batch
353 | x_db = x_db.reshape(shape)
354 |
355 | return x_db
356 |
357 |
358 | def DB_to_amplitude(
359 | x: Tensor,
360 | ref: float,
361 | power: float
362 | ) -> Tensor:
363 | r"""Turn a tensor from the decibel scale to the power/amplitude scale.
364 |
365 | Args:
366 | x (Tensor): Input tensor before being converted to power/amplitude scale.
367 | ref (float): Reference which the output will be scaled by.
368 | power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.
369 |
370 | Returns:
371 | Tensor: Output tensor in power/amplitude scale.
372 | """
373 | return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
374 |
375 |
376 | def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float:
377 | r"""Convert Hz to Mels.
378 |
379 | Args:
380 | freqs (float): Frequencies in Hz
381 | mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
382 |
383 | Returns:
384 | mels (float): Frequency in Mels
385 | """
386 |
387 | if mel_scale not in ['slaney', 'htk']:
388 | raise ValueError('mel_scale should be one of "htk" or "slaney".')
389 |
390 | if mel_scale == "htk":
391 | return 2595.0 * math.log10(1.0 + (freq / 700.0))
392 |
393 | # Fill in the linear part
394 | f_min = 0.0
395 | f_sp = 200.0 / 3
396 |
397 | mels = (freq - f_min) / f_sp
398 |
399 | # Fill in the log-scale part
400 | min_log_hz = 1000.0
401 | min_log_mel = (min_log_hz - f_min) / f_sp
402 | logstep = math.log(6.4) / 27.0
403 |
404 | if freq >= min_log_hz:
405 | mels = min_log_mel + math.log(freq / min_log_hz) / logstep
406 |
407 | return mels
408 |
409 |
410 | def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor:
411 | """Convert mel bin numbers to frequencies.
412 |
413 | Args:
414 | mels (Tensor): Mel frequencies
415 | mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
416 |
417 | Returns:
418 | freqs (Tensor): Mels converted in Hz
419 | """
420 |
421 | if mel_scale not in ['slaney', 'htk']:
422 | raise ValueError('mel_scale should be one of "htk" or "slaney".')
423 |
424 | if mel_scale == "htk":
425 | return 700.0 * (10.0**(mels / 2595.0) - 1.0)
426 |
427 | # Fill in the linear scale
428 | f_min = 0.0
429 | f_sp = 200.0 / 3
430 | freqs = f_min + f_sp * mels
431 |
432 | # And now the nonlinear scale
433 | min_log_hz = 1000.0
434 | min_log_mel = (min_log_hz - f_min) / f_sp
435 | logstep = math.log(6.4) / 27.0
436 |
437 | log_t = (mels >= min_log_mel)
438 | freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel))
439 |
440 | return freqs
441 |
442 |
443 | def _create_triangular_filterbank(
444 | all_freqs: Tensor,
445 | f_pts: Tensor,
446 | ) -> Tensor:
447 | """Create a triangular filter bank.
448 |
449 | Args:
450 | all_freqs (Tensor): STFT freq points of size (`n_freqs`).
451 | f_pts (Tensor): Filter mid points of size (`n_filter`).
452 |
453 | Returns:
454 | fb (Tensor): The filter bank of size (`n_freqs`, `n_filter`).
455 | """
456 | # Adopted from Librosa
457 | # calculate the difference between each filter mid point and each stft freq point in hertz
458 | f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1)
459 | slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_filter + 2)
460 | # create overlapping triangles
461 | zero = torch.zeros(1)
462 | down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter)
463 | up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter)
464 | fb = torch.max(zero, torch.min(down_slopes, up_slopes))
465 |
466 | return fb
467 |
468 |
469 | def melscale_fbanks(
470 | n_freqs: int,
471 | f_min: float,
472 | f_max: float,
473 | n_mels: int,
474 | sample_rate: int,
475 | norm: Optional[str] = None,
476 | mel_scale: str = "htk",
477 | ) -> Tensor:
478 | r"""Create a frequency bin conversion matrix.
479 |
480 | Note:
481 | For the sake of the numerical compatibility with librosa, not all the coefficients
482 | in the resulting filter bank has magnitude of 1.
483 |
484 | .. image:: https://download.pytorch.org/torchaudio/doc-assets/mel_fbanks.png
485 | :alt: Visualization of generated filter bank
486 |
487 | Args:
488 | n_freqs (int): Number of frequencies to highlight/apply
489 | f_min (float): Minimum frequency (Hz)
490 | f_max (float): Maximum frequency (Hz)
491 | n_mels (int): Number of mel filterbanks
492 | sample_rate (int): Sample rate of the audio waveform
493 | norm (str or None, optional): If 'slaney', divide the triangular mel weights by the width of the mel band
494 | (area normalization). (Default: ``None``)
495 | mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
496 |
497 | Returns:
498 | Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
499 | meaning number of frequencies to highlight/apply to x the number of filterbanks.
500 | Each column is a filterbank so that assuming there is a matrix A of
501 | size (..., ``n_freqs``), the applied result would be
502 | ``A * melscale_fbanks(A.size(-1), ...)``.
503 |
504 | """
505 |
506 | if norm is not None and norm != "slaney":
507 | raise ValueError("norm must be one of None or 'slaney'")
508 |
509 | # freq bins
510 | all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
511 |
512 | # calculate mel freq bins
513 | m_min = _hz_to_mel(f_min, mel_scale=mel_scale)
514 | m_max = _hz_to_mel(f_max, mel_scale=mel_scale)
515 |
516 | m_pts = torch.linspace(m_min, m_max, n_mels + 2)
517 | f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale)
518 |
519 | # create filterbank
520 | fb = _create_triangular_filterbank(all_freqs, f_pts)
521 |
522 | if norm is not None and norm == "slaney":
523 | # Slaney-style mel is scaled to be approx constant energy per channel
524 | enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
525 | fb *= enorm.unsqueeze(0)
526 |
527 | if (fb.max(dim=0).values == 0.).any():
528 | warnings.warn(
529 | "At least one mel filterbank has all zero values. "
530 | f"The value for `n_mels` ({n_mels}) may be set too high. "
531 | f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
532 | )
533 |
534 | return fb
535 |
536 |
537 | def linear_fbanks(
538 | n_freqs: int,
539 | f_min: float,
540 | f_max: float,
541 | n_filter: int,
542 | sample_rate: int,
543 | ) -> Tensor:
544 | r"""Creates a linear triangular filterbank.
545 |
546 | Note:
547 | For the sake of the numerical compatibility with librosa, not all the coefficients
548 | in the resulting filter bank has magnitude of 1.
549 |
550 | .. image:: https://download.pytorch.org/torchaudio/doc-assets/lin_fbanks.png
551 | :alt: Visualization of generated filter bank
552 |
553 | Args:
554 | n_freqs (int): Number of frequencies to highlight/apply
555 | f_min (float): Minimum frequency (Hz)
556 | f_max (float): Maximum frequency (Hz)
557 | n_filter (int): Number of (linear) triangular filter
558 | sample_rate (int): Sample rate of the audio waveform
559 |
560 | Returns:
561 | Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_filter``)
562 | meaning number of frequencies to highlight/apply to x the number of filterbanks.
563 | Each column is a filterbank so that assuming there is a matrix A of
564 | size (..., ``n_freqs``), the applied result would be
565 | ``A * linear_fbanks(A.size(-1), ...)``.
566 | """
567 | # freq bins
568 | all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
569 |
570 | # filter mid-points
571 | f_pts = torch.linspace(f_min, f_max, n_filter + 2)
572 |
573 | # create filterbank
574 | fb = _create_triangular_filterbank(all_freqs, f_pts)
575 |
576 | return fb
577 |
578 |
579 | def create_dct(
580 | n_mfcc: int,
581 | n_mels: int,
582 | norm: Optional[str]
583 | ) -> Tensor:
584 | r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
585 | normalized depending on norm.
586 |
587 | Args:
588 | n_mfcc (int): Number of mfc coefficients to retain
589 | n_mels (int): Number of mel filterbanks
590 | norm (str or None): Norm to use (either 'ortho' or None)
591 |
592 | Returns:
593 | Tensor: The transformation matrix, to be right-multiplied to
594 | row-wise data of size (``n_mels``, ``n_mfcc``).
595 | """
596 | # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
597 | n = torch.arange(float(n_mels))
598 | k = torch.arange(float(n_mfcc)).unsqueeze(1)
599 | dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)
600 | if norm is None:
601 | dct *= 2.0
602 | else:
603 | assert norm == "ortho"
604 | dct[0] *= 1.0 / math.sqrt(2.0)
605 | dct *= math.sqrt(2.0 / float(n_mels))
606 | return dct.t()
607 |
608 |
609 | def mu_law_encoding(
610 | x: Tensor,
611 | quantization_channels: int
612 | ) -> Tensor:
613 | r"""Encode signal based on mu-law companding. For more info see the
614 | `Wikipedia Entry `_
615 |
616 | This algorithm assumes the signal has been scaled to between -1 and 1 and
617 | returns a signal encoded with values from 0 to quantization_channels - 1.
618 |
619 | Args:
620 | x (Tensor): Input tensor
621 | quantization_channels (int): Number of channels
622 |
623 | Returns:
624 | Tensor: Input after mu-law encoding
625 | """
626 | mu = quantization_channels - 1.0
627 | if not x.is_floating_point():
628 | x = x.to(torch.float)
629 | mu = torch.tensor(mu, dtype=x.dtype)
630 | x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
631 | x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
632 | return x_mu
633 |
634 |
635 | def mu_law_decoding(
636 | x_mu: Tensor,
637 | quantization_channels: int
638 | ) -> Tensor:
639 | r"""Decode mu-law encoded signal. For more info see the
640 | `Wikipedia Entry `_
641 |
642 | This expects an input with values between 0 and quantization_channels - 1
643 | and returns a signal scaled between -1 and 1.
644 |
645 | Args:
646 | x_mu (Tensor): Input tensor
647 | quantization_channels (int): Number of channels
648 |
649 | Returns:
650 | Tensor: Input after mu-law decoding
651 | """
652 | mu = quantization_channels - 1.0
653 | if not x_mu.is_floating_point():
654 | x_mu = x_mu.to(torch.float)
655 | mu = torch.tensor(mu, dtype=x_mu.dtype)
656 | x = ((x_mu) / mu) * 2 - 1.0
657 | x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
658 | return x
659 |
660 |
661 | def phase_vocoder(
662 | complex_specgrams: Tensor,
663 | rate: float,
664 | phase_advance: Tensor
665 | ) -> Tensor:
666 | r"""Given a STFT tensor, speed up in time without modifying pitch by a
667 | factor of ``rate``.
668 |
669 | Args:
670 | complex_specgrams (Tensor):
671 | A tensor of dimension `(..., freq, num_frame)` with complex dtype.
672 | rate (float): Speed-up factor
673 | phase_advance (Tensor): Expected phase advance in each bin. Dimension of `(freq, 1)`
674 |
675 | Returns:
676 | Tensor:
677 | Stretched spectrogram. The resulting tensor is of the same dtype as the input
678 | spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``.
679 |
680 | Example
681 | >>> freq, hop_length = 1025, 512
682 | >>> # (channel, freq, time)
683 | >>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat)
684 | >>> rate = 1.3 # Speed up by 30%
685 | >>> phase_advance = torch.linspace(
686 | >>> 0, math.pi * hop_length, freq)[..., None]
687 | >>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
688 | >>> x.shape # with 231 == ceil(300 / 1.3)
689 | torch.Size([2, 1025, 231])
690 | """
691 | if rate == 1.0:
692 | return complex_specgrams
693 |
694 | # pack batch
695 | shape = complex_specgrams.size()
696 | complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-2:]))
697 |
698 | # Figures out the corresponding real dtype, i.e. complex128 -> float64, complex64 -> float32
699 | # Note torch.real is a view so it does not incur any memory copy.
700 | real_dtype = torch.real(complex_specgrams).dtype
701 | time_steps = torch.arange(
702 | 0,
703 | complex_specgrams.size(-1),
704 | rate,
705 | device=complex_specgrams.device,
706 | dtype=real_dtype)
707 |
708 | alphas = time_steps % 1.0
709 | phase_0 = complex_specgrams[..., :1].angle()
710 |
711 | # Time Padding
712 | complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 2])
713 |
714 | # (new_bins, freq, 2)
715 | complex_specgrams_0 = complex_specgrams.index_select(-1, time_steps.long())
716 | complex_specgrams_1 = complex_specgrams.index_select(-1, (time_steps + 1).long())
717 |
718 | angle_0 = complex_specgrams_0.angle()
719 | angle_1 = complex_specgrams_1.angle()
720 |
721 | norm_0 = complex_specgrams_0.abs()
722 | norm_1 = complex_specgrams_1.abs()
723 |
724 | phase = angle_1 - angle_0 - phase_advance
725 | phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))
726 |
727 | # Compute Phase Accum
728 | phase = phase + phase_advance
729 | phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
730 | phase_acc = torch.cumsum(phase, -1)
731 |
732 | mag = alphas * norm_1 + (1 - alphas) * norm_0
733 |
734 | complex_specgrams_stretch = torch.polar(mag, phase_acc)
735 |
736 | # unpack batch
737 | complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-2] + complex_specgrams_stretch.shape[1:])
738 | return complex_specgrams_stretch
739 |
740 |
741 | def mask_along_axis_iid(
742 | specgrams: Tensor,
743 | mask_param: int,
744 | mask_value: float,
745 | axis: int
746 | ) -> Tensor:
747 | r"""
748 | Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
749 | ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
750 |
751 | Args:
752 | specgrams (Tensor): Real spectrograms `(batch, channel, freq, time)`
753 | mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
754 | mask_value (float): Value to assign to the masked columns
755 | axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
756 |
757 | Returns:
758 | Tensor: Masked spectrograms of dimensions `(batch, channel, freq, time)`
759 | """
760 |
761 | if axis not in [2, 3]:
762 | raise ValueError('Only Frequency and Time masking are supported')
763 |
764 | device = specgrams.device
765 | dtype = specgrams.dtype
766 |
767 | value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param
768 | min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)
769 |
770 | # Create broadcastable mask
771 | mask_start = min_value[..., None, None]
772 | mask_end = (min_value + value)[..., None, None]
773 | mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
774 |
775 | # Per batch example masking
776 | specgrams = specgrams.transpose(axis, -1)
777 | specgrams = specgrams.masked_fill((mask >= mask_start) & (mask < mask_end), mask_value)
778 | specgrams = specgrams.transpose(axis, -1)
779 |
780 | return specgrams
781 |
782 |
783 | def mask_along_axis(
784 | specgram: Tensor,
785 | mask_param: int,
786 | mask_value: float,
787 | axis: int
788 | ) -> Tensor:
789 | r"""
790 | Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
791 | ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
792 | All examples will have the same mask interval.
793 |
794 | Args:
795 | specgram (Tensor): Real spectrogram `(channel, freq, time)`
796 | mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
797 | mask_value (float): Value to assign to the masked columns
798 | axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)
799 |
800 | Returns:
801 | Tensor: Masked spectrogram of dimensions `(channel, freq, time)`
802 | """
803 | if axis not in [1, 2]:
804 | raise ValueError('Only Frequency and Time masking are supported')
805 |
806 | # pack batch
807 | shape = specgram.size()
808 | specgram = specgram.reshape([-1] + list(shape[-2:]))
809 | value = torch.rand(1) * mask_param
810 | min_value = torch.rand(1) * (specgram.size(axis) - value)
811 |
812 | mask_start = (min_value.long()).squeeze()
813 | mask_end = (min_value.long() + value.long()).squeeze()
814 | mask = torch.arange(0, specgram.shape[axis], device=specgram.device, dtype=specgram.dtype)
815 | mask = (mask >= mask_start) & (mask < mask_end)
816 | if axis == 1:
817 | mask = mask.unsqueeze(-1)
818 |
819 | assert mask_end - mask_start < mask_param
820 |
821 | specgram = specgram.masked_fill(mask, mask_value)
822 |
823 | # unpack batch
824 | specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])
825 |
826 | return specgram
827 |
828 |
829 | def compute_deltas(
830 | specgram: Tensor,
831 | win_length: int = 5,
832 | mode: str = "replicate"
833 | ) -> Tensor:
834 | r"""Compute delta coefficients of a tensor, usually a spectrogram:
835 |
836 | .. math::
837 | d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2}
838 |
839 | where :math:`d_t` is the deltas at time :math:`t`,
840 | :math:`c_t` is the spectrogram coeffcients at time :math:`t`,
841 | :math:`N` is ``(win_length-1)//2``.
842 |
843 | Args:
844 | specgram (Tensor): Tensor of audio of dimension `(..., freq, time)`
845 | win_length (int, optional): The window length used for computing delta (Default: ``5``)
846 | mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``)
847 |
848 | Returns:
849 | Tensor: Tensor of deltas of dimension `(..., freq, time)`
850 |
851 | Example
852 | >>> specgram = torch.randn(1, 40, 1000)
853 | >>> delta = compute_deltas(specgram)
854 | >>> delta2 = compute_deltas(delta)
855 | """
856 | device = specgram.device
857 | dtype = specgram.dtype
858 |
859 | # pack batch
860 | shape = specgram.size()
861 | specgram = specgram.reshape(1, -1, shape[-1])
862 |
863 | assert win_length >= 3
864 |
865 | n = (win_length - 1) // 2
866 |
867 | # twice sum of integer squared
868 | denom = n * (n + 1) * (2 * n + 1) / 3
869 |
870 | specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)
871 |
872 | kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)
873 |
874 | output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom
875 |
876 | # unpack batch
877 | output = output.reshape(shape)
878 |
879 | return output
880 |
881 |
882 | def _compute_nccf(
883 | waveform: Tensor,
884 | sample_rate: int,
885 | frame_time: float,
886 | freq_low: int
887 | ) -> Tensor:
888 | r"""
889 | Compute Normalized Cross-Correlation Function (NCCF).
890 |
891 | .. math::
892 | \phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},
893 |
894 | where
895 | :math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
896 | :math:`w` is the waveform,
897 | :math:`N` is the length of a frame,
898 | :math:`b_i` is the beginning of frame :math:`i`,
899 | :math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
900 | """
901 |
902 | EPSILON = 10 ** (-9)
903 |
904 | # Number of lags to check
905 | lags = int(math.ceil(sample_rate / freq_low))
906 |
907 | frame_size = int(math.ceil(sample_rate * frame_time))
908 |
909 | waveform_length = waveform.size()[-1]
910 | num_of_frames = int(math.ceil(waveform_length / frame_size))
911 |
912 | p = lags + num_of_frames * frame_size - waveform_length
913 | waveform = torch.nn.functional.pad(waveform, (0, p))
914 |
915 | # Compute lags
916 | output_lag = []
917 | for lag in range(1, lags + 1):
918 | s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
919 | s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
920 |
921 | output_frames = (
922 | (s1 * s2).sum(-1)
923 | / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
924 | / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
925 | )
926 |
927 | output_lag.append(output_frames.unsqueeze(-1))
928 |
929 | nccf = torch.cat(output_lag, -1)
930 |
931 | return nccf
932 |
933 |
934 | def _combine_max(
935 | a: Tuple[Tensor, Tensor],
936 | b: Tuple[Tensor, Tensor],
937 | thresh: float = 0.99
938 | ) -> Tuple[Tensor, Tensor]:
939 | """
940 | Take value from first if bigger than a multiplicative factor of the second, elementwise.
941 | """
942 | mask = (a[0] > thresh * b[0])
943 | values = mask * a[0] + ~mask * b[0]
944 | indices = mask * a[1] + ~mask * b[1]
945 | return values, indices
946 |
947 |
948 | def _find_max_per_frame(
949 | nccf: Tensor,
950 | sample_rate: int,
951 | freq_high: int
952 | ) -> Tensor:
953 | r"""
954 | For each frame, take the highest value of NCCF,
955 | apply centered median smoothing, and convert to frequency.
956 |
957 | Note: If the max among all the lags is very close
958 | to the first half of lags, then the latter is taken.
959 | """
960 |
961 | lag_min = int(math.ceil(sample_rate / freq_high))
962 |
963 | # Find near enough max that is smallest
964 |
965 | best = torch.max(nccf[..., lag_min:], -1)
966 |
967 | half_size = nccf.shape[-1] // 2
968 | half = torch.max(nccf[..., lag_min:half_size], -1)
969 |
970 | best = _combine_max(half, best)
971 | indices = best[1]
972 |
973 | # Add back minimal lag
974 | indices += lag_min
975 | # Add 1 empirical calibration offset
976 | indices += 1
977 |
978 | return indices
979 |
980 |
981 | def _median_smoothing(
982 | indices: Tensor,
983 | win_length: int
984 | ) -> Tensor:
985 | r"""
986 | Apply median smoothing to the 1D tensor over the given window.
987 | """
988 |
989 | # Centered windowed
990 | pad_length = (win_length - 1) // 2
991 |
992 | # "replicate" padding in any dimension
993 | indices = torch.nn.functional.pad(
994 | indices, (pad_length, 0), mode="constant", value=0.
995 | )
996 |
997 | indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
998 | roll = indices.unfold(-1, win_length, 1)
999 |
1000 | values, _ = torch.median(roll, -1)
1001 |
--------------------------------------------------------------------------------
/tests/test_issue.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import sys
7 | import unittest
8 |
9 | import torch
10 |
11 | sys.path.append('..')
12 | from codeassist.gpt2_coder import GPT2Coder
13 |
14 |
15 | class IssueTestCase(unittest.TestCase):
16 |
17 | def test_code_predict(self):
18 | prompts = [
19 | """from torch import nn
20 | class LSTM(Module):
21 | def __init__(self, *,
22 | n_tokens: int,
23 | embedding_size: int,
24 | hidden_size: int,
25 | n_layers: int):""",
26 | """import numpy as np
27 | import torch
28 | import torch.nn as""",
29 | "import java.util.ArrayList;",
30 | ]
31 | infer = GPT2Coder("shibing624/code-autocomplete-gpt2-base")
32 | results = []
33 | for prompt in prompts:
34 | res = infer.generate(prompt)
35 | print("Query:", prompt)
36 | print("Result:", res[0])
37 | print("=" * 20)
38 | results.append(res[0])
39 | self.assertEqual(len(results), 3)
40 |
41 |
42 | if __name__ == '__main__':
43 | unittest.main()
44 |
--------------------------------------------------------------------------------
/tests/test_qps.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | @author:XuMing(xuming624@qq.com)
4 | @description:
5 | """
6 | import os
7 | import sys
8 | import unittest
9 | from time import time
10 |
11 | sys.path.append('..')
12 | from codeassist.gpt2_coder import GPT2Coder
13 |
14 | pwd_path = os.path.abspath(os.path.dirname(__file__))
15 |
16 | test_file = os.path.join(pwd_path, 'test.txt')
17 | model = GPT2Coder("shibing624/code-autocomplete-gpt2-base")
18 |
19 |
20 | def load_data(file_path):
21 | res = []
22 | with open(file_path, 'r', encoding='utf-8') as f:
23 | for line in f:
24 | line = line.strip()
25 | res.append(line)
26 | if len(res) >= 200:
27 | break
28 | return res
29 |
30 |
31 | class QPSTestCase(unittest.TestCase):
32 | def test_code_infer_speed(self):
33 | """Test code_infer_speed"""
34 | codes = load_data(test_file)
35 | codes = codes[100:110]
36 | t1 = time()
37 | for prompt in codes:
38 | res = model.generate(prompt)
39 | print("Query:", prompt)
40 | print("Result:", res[0])
41 | print("=" * 20)
42 | spend_time = time() - t1
43 | print('spend time:', spend_time, ' seconds')
44 | print('size:', len(codes), ' qps:', len(codes) / spend_time)
45 |
46 |
47 | if __name__ == '__main__':
48 | unittest.main()
49 |
--------------------------------------------------------------------------------