├── .gitignore
├── CITATION.cff
├── LICENSE
├── README.md
├── cff-version 1.2.0.cliff.txt
├── data
└── alpaca_data.json
├── requirements.txt
└── src
├── configs
├── deepspeed_config.json
└── hostfile
├── environment_ChatPath.yml
├── generate.py
├── imgs
├── chatpath_logo.png
├── data_process.png
├── pathgpt_instruction.png
└── pathgpt_logo.png
├── train.py
└── utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | .idea/
161 |
162 | transformers/
163 | .DS_Store
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | # This CITATION.cff file was generated with cffinit.
2 | # Visit https://bit.ly/cffinit to generate yours today!
3 |
4 | cff-version: 1.2.0
5 | title: 'PathGPT: A Knowledgeable GPT Model for Pathology'
6 | message: >-
7 | If you use this software, please cite it using the
8 | metadata from this file.
9 | type: software
10 | authors:
11 | - given-names: Yuxuan
12 | family-names: Sun
13 | email: sunyuxuan@westlake.edu.cn
14 | name-particle: YUXUAN
15 | repository-code: 'https://github.com/superjamessyx/PathGPT'
16 | abstract: >-
17 | PathGPT is a specialized language model tailored for the
18 | field of pathology. Developed by fine-tuning the Llama-7B
19 | model using a dataset of 13,000 pathology-specific
20 | questions and answers we've collected.
21 | license: Apache-2.0
22 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | [](https://github.com/tatsu-lab/stanford_alpaca/blob/main/LICENSE)
10 | [](https://github.com/tatsu-lab/stanford_alpaca/blob/main/DATA_LICENSE)
11 |
12 | ## PathGPT: A Knowledgeable GPT Model for Pathology
13 |
14 | Welcome to the PathGPT repository! PathGPT is a specialized language model tailored for the field of pathology. Developed by fine-tuning the Llama-7B model using a dataset of 13,000 pathology-specific questions and answers we've collected. We're excited to announce the release of the PathGPT checkpoint (the weight diff of Llama), with the full 13k dataset to follow shortly. The detailed data processing process is as follows:
15 |
16 |
17 |
18 |
19 |
20 | But we're not stopping there! In the future, we plan to expand the dataset to over 100,000 entries, encompassing a diverse range of pathology-related instruction data. We believe that PathGPT will become an valuable tool for pathologists and the entire pathology community.
21 |
22 | ### **Authors**
23 |
24 | This project was completed by **Yuxuan Sun** and **Chenglu Zhu** from the **Artificial Intelligence and Biomedical Image Analysis Lab** of the School of Engineering at Westlake University. We would like to thank **Kai Zhang** (Ohio State University) for participating in the discussion and collaboration, as well as the following individuals who contributed to the annotation process: **Xinheng Lv** and **Ruojia Zhao**.
25 |
26 |
27 |
28 | ## Get your demo experience!!
29 |
30 | We deployed PathGPT on the A100 server and opened it up for user experience. You can follow the instruction illustrated below. The demo webset is: https://f86fb98a1b7bbd2a.gradio.app. Please feel free to point out the problems our model.
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 | Usage
39 |
40 | - Setup. Install the conda environment:
41 | ```bash
42 | conda create -n pathgpt python=3.10
43 | conda activate pathgpt
44 | git clone https://github.com/superjamessyx/PathGPT.git
45 | cd PathGPT/src
46 | conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.3 -c pytorch
47 | git clone https://github.com/huggingface/transformers.git
48 | cd transformers
49 | pip install -e .
50 | cd ../..
51 | pip install -r requirements.txt
52 | ```
53 |
54 | - Training data
55 |
56 | Taking into account that our present data collection focuses solely on pathology-related QA pairs, an exclusive emphasis on QA training may hinder PathGPT's ability to effectively carry out other tasks. To address this limitation, we initially fine-tune the model using Stanford [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html) data, followed by further fine-tuning with our pathology-specific data. This approach enables PathGPT to adeptly handle a diverse range of questioning styles.
57 | ```bash
58 | PathGPT/src/data/pathology_alpaca.json
59 | ```
60 |
61 | - Convert LLaMA checkpoint to HuggingFace format:
62 | ```bash
63 | cd PathGPT/src
64 | python transformers/src/transformers/models/llama/convert_llama_weights_to_hf.py \
65 | --input_dir /path/to/llama-7B/ \
66 | --model_size 7B \
67 | --output_dir /path/to/llama-7B/hf
68 | ```
69 |
70 | - To train LLaMA-7B with DeepSpeed, you can select either DeepSpeed Zero-2 or Zero-3 by using the following command options: `--deepspeed configs/ds_config_zero2.json` or `configs/ds_config_zero3.json`:
71 | ```bash
72 | deepspeed train.py \
73 | --model_name_or_path /path/to/llama-7B/hf \
74 | --data_path /path/to/pathology_alpaca.json \
75 | --output_dir /path/to/llama-7B/hf/ft \
76 | --num_train_epochs 3 \
77 | --model_max_length 512 \
78 | --per_device_train_batch_size 64 \
79 | --per_device_eval_batch_size 1 \
80 | --gradient_accumulation_steps 1 \
81 | --evaluation_strategy "no" \
82 | --save_strategy "steps" \
83 | --save_steps 100 \
84 | --save_total_limit 2 \
85 | --learning_rate 2e-5 \
86 | --warmup_steps 2 \
87 | --logging_steps 2 \
88 | --lr_scheduler_type "cosine" \
89 | --report_to "tensorboard" \
90 | --gradient_checkpointing True \
91 | --deepspeed configs/ds_config_zero2.json \
92 | --fp16 True
93 | ```
94 | - Train LLaMA-7B on DeepSpeed with Multi-nodes
95 | ```bash
96 | deepspeed --num_gpus num_of_gpus_in_each_node \
97 | --num_nodes num_of_nodes \
98 | --master_addr ip_address_of_main_node \
99 | --master_port 34545 \
100 | --hostfile configs/hostfile \
101 | train.py \
102 | --model_name_or_path /path/to/llama-7B/hf \
103 | --data_path /path/to/pathology_alpaca.json \
104 | --output_dir /path/to/llama-7B/hf/ft \
105 | --num_train_epochs 3 \
106 | --model_max_length 512 \
107 | --per_device_train_batch_size 64 \
108 | --per_device_eval_batch_size 4 \
109 | --gradient_accumulation_steps 1 \
110 | --evaluation_strategy "no" \
111 | --save_strategy "steps" \
112 | --save_steps 100 \
113 | --save_total_limit 2 \
114 | --learning_rate 2e-5 \
115 | --warmup_steps 2 \
116 | --logging_steps 2 \
117 | --lr_scheduler_type "cosine" \
118 | --report_to "tensorboard" \
119 | --gradient_checkpointing True \
120 | --deepspeed configs/ds_config_zero2.json \
121 | --fp16 True
122 | ```
123 |
124 |
125 |
126 | - Training Cost
127 |
128 | Training 7B model with `13k examples/ 3 epoch` & `batch_size=32` within `1 hour` on `4 x A100 GPUs`.
129 |
130 | | LLaMA | Batch Size | A100s | Time (h) |
131 | | ----- | ---------- | ----- | -------- |
132 | | 7 B | 32 | 4 | 1 |
133 |
134 |
135 | - Inference
136 | ```bash
137 | # web demo inference
138 | python generate.py
139 | ```
140 |
141 | ## Download fine-tuned checkpoint
142 |
143 | We have released the weight difference ([huggingface](https://huggingface.co/jamessyx/ChatPath)) between the finetuned weights and the original llama weights. By simply adding this difference to the latter, you can obtain the finetuned weights.
144 |
145 | ## Example outputs
146 |
147 | *How to distinguish the distribution pattern of immune cells in solid tumors of H&E staining images?*
148 |
149 | **Alpaca**
150 |
151 | The distribution pattern of immune cells in solid tumors can be determined by examining H&E stained slides. Immunohistochemical stains such as CD3, CD8, CD4, and CD20 can be used to identify T-cells, B-cells, and antigen-presenting cells, respectively. Additionally, macrophages can be identified with CD68 staining.
152 |
153 | **PathGPT**
154 |
155 | To distinguish the distribution pattern of immune cells in solid tumors of H&E staining images, the following steps can be followed:
156 |
157 | 1. Select the area of interest in the H&E staining image.
158 |
159 | 2. Identify the location of immune cells in the selected area.
160 |
161 | 3. Count the number of immune cells in the selected area.
162 |
163 | 4. Calculate the percentage of immune cells in the selected area.
164 |
165 | 5. Compare the percentage of immune cells in the selected area with the percentage of immune cells in normal tissues.
166 |
167 | 6. Analyze the distribution pattern of immune cells in the selected area.
168 |
169 | 7. Compare the distribution pattern of immune cells in the selected area with the distribution pattern of immune cells in normal tissues.
170 |
171 | 8. Evaluate the significance of the distribution pattern of immune cells in the selected area.
172 |
173 | By following these steps, it is possible to distinguish the distribution pattern of immune cells in solid tumors of H&E staining images.
174 |
175 |
176 |
177 |
178 |
179 |
180 | ## Thanks For
181 |
182 | This project is based on
183 |
184 | [Llama-X](https://github.com/AetherCortex/Llama-X)
185 |
186 | and also relate to multiple open source projects:
187 |
188 | [Meta AI LLaMA](https://arxiv.org/abs/2302.13971v1)
189 |
190 | [Huggingface Transformers Llama](https://github.com/huggingface/transformers/tree/main/src/transformers/models/llama)
191 |
192 | [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html) and [Alpaca-LoRA](https://github.com/tloen/alpaca-lora)
193 |
194 |
195 | ## Disclaimer
196 |
197 | The use of resources(e.g., code, data and model weights) related to this project is limited to academic research and is prohibited for commercial purposes. The content generated by PathGPT is subject to factors such as randomness and uncontrollability, and this project cannot guarantee its accuracy. This project does not assume any legal responsibility for the content of the model output, nor does it assume any responsibility for any losses that may arise from the use of related resources and output results.
198 |
199 |
200 |
201 |
--------------------------------------------------------------------------------
/cff-version 1.2.0.cliff.txt:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - family-names: "Yuxuan"
5 | given-names: "Sun"
6 | - family-names: "Chenglu"
7 | given-names: "Zhu"
8 | title: "PathGPT: A Knowledgeable GPT Model for Pathology"
9 | version: 1.0.0
10 | date-released: 2023-4-22
11 | url: "https://github.com/superjamessyx/PathGPT"
12 |
13 |
14 |
15 |
16 | 然而,需要注意的是,PubMed等scientific paper中包含的image-text pair往往较短,yielding a narrow perspective of pathological images instead of a comprehensive analysis of all discernible content. This constraint limits the capability of trained models to provide detailed and accurate descriptions of the images, resulting in typically brief responses,因此,我们需要花费了大量人力去收集来自于书籍、病理图谱或者人为标注来解决。当然,与此同时,研究如何自动化的正确的扩充scientific paper中的caption 详细程度也显得十分重要
17 |
18 |
19 |
20 | However, it's important to note that caption of the image in scientific papers often quite succinct, yielding a narrow description of pathological images instead of a comprehensive analysis of all discernible content. This limitation constrains the capacity of trained models to provide detailed descriptions of the images. Therefore, we have invested considerable effort in collating data from books, pathology atlases, and manual annotations to rectify this issue. Simultaneously, researching how to automatically and correctly augment the details of the captions within scientific papers is of utmost importance.
21 |
22 |
23 | Nevertheless, it's important to note that captions accompanying images in scientific papers are frequently succinct, offering a limited interpretation of pathological images, rather than an exhaustive analysis of all identifiable content. This constrains the ability of models to furnish intricate descriptions of the images. In response to this, we've dedicated a significant effort to collate data from various sources, such as books, pathology atlases, and manual annotations to address this issue. Moreover, it is highly worthwhile to explore appropriate approaches to augment the level of detail in image captions within scientific papers.
24 |
25 |
26 | python -m training.main \
27 | --save-frequency 1 \
28 | --zeroshot-frequency 1 \
29 | --report-to wandb \
30 | --train-data="/data1/syx/dataset/Final_project_data/train/pathclip.csv" \
31 | --csv-img-key img \
32 | --csv-caption-key caption \
33 | --warmup 10000 \
34 | --batch-size=128 \
35 | --pretrained=openai \
36 | --lr=1e-3 \
37 | --wd=0.1 \
38 | --epochs=30 \
39 | --workers=8 \
40 | --model openai/clip-vit-base-patch16
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | rouge_score
3 | fire
4 | openai
5 | sentencepiece
6 | wandb
7 | gradio==3.9
8 | deepspeed
9 | accelerate
10 | tensorboardX
11 |
--------------------------------------------------------------------------------
/src/configs/deepspeed_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "zero_optimization": {
3 | "stage": 3,
4 | "offload_optimizer": {
5 | "device": "cpu",
6 | "pin_memory": true
7 | },
8 | "offload_param": {
9 | "device": "cpu",
10 | "pin_memory": true
11 | },
12 | "overlap_comm": true,
13 | "contiguous_gradients": true,
14 | "sub_group_size": 0,
15 | "reduce_bucket_size": "auto",
16 | "stage3_prefetch_bucket_size": "auto",
17 | "stage3_param_persistence_threshold": "auto",
18 | "stage3_max_live_parameters": 0,
19 | "stage3_max_reuse_distance": 0,
20 | "stage3_gather_16bit_weights_on_model_save": true
21 | },
22 | "fp16": {
23 | "enabled": true,
24 | "auto_cast": false,
25 | "loss_scale": 0,
26 | "initial_scale_power": 32,
27 | "loss_scale_window": 1000,
28 | "hysteresis": 2,
29 | "min_loss_scale": 1
30 | },
31 | "optimizer": {
32 | "type": "AdamW",
33 | "params": {
34 | "lr": 2e-5,
35 | "betas": [
36 | 0.9,
37 | 0.999
38 | ],
39 | "eps": 1e-8,
40 | "weight_decay": 0
41 | }
42 | },
43 | "train_batch_size": "auto",
44 | "train_micro_batch_size_per_gpu": "auto",
45 | "wall_clock_breakdown": false
46 | }
47 |
--------------------------------------------------------------------------------
/src/configs/hostfile:
--------------------------------------------------------------------------------
1 | ip_address_of_main_node slots=num_of_gpus_in_each_node
2 | ip_address_of_sub_node1 slots=num_of_gpus_in_each_node
--------------------------------------------------------------------------------
/src/environment_ChatPath.yml:
--------------------------------------------------------------------------------
1 | name: chatpath
2 | channels:
3 | - pytorch
4 | - defaults
5 | dependencies:
6 | - _libgcc_mutex=0.1=main
7 | - _openmp_mutex=5.1=1_gnu
8 | - blas=1.0=mkl
9 | - bzip2=1.0.8=h7b6447c_0
10 | - ca-certificates=2023.01.10=h06a4308_0
11 | - charset-normalizer=2.0.4=pyhd3eb1b0_0
12 | - cudatoolkit=11.3.1=h2bc3f7f_2
13 | - ffmpeg=4.3=hf484d3e_0
14 | - freetype=2.12.1=h4a9f257_0
15 | - giflib=5.2.1=h5eee18b_3
16 | - gmp=6.2.1=h295c915_3
17 | - gnutls=3.6.15=he1e5248_0
18 | - intel-openmp=2021.4.0=h06a4308_3561
19 | - jpeg=9e=h5eee18b_1
20 | - lame=3.100=h7b6447c_0
21 | - lcms2=2.12=h3be6417_0
22 | - ld_impl_linux-64=2.38=h1181459_1
23 | - lerc=3.0=h295c915_0
24 | - libdeflate=1.17=h5eee18b_0
25 | - libffi=3.4.2=h6a678d5_6
26 | - libgcc-ng=11.2.0=h1234567_1
27 | - libgomp=11.2.0=h1234567_1
28 | - libiconv=1.16=h7f8727e_2
29 | - libidn2=2.3.2=h7f8727e_0
30 | - libpng=1.6.39=h5eee18b_0
31 | - libstdcxx-ng=11.2.0=h1234567_1
32 | - libtasn1=4.16.0=h27cfd23_0
33 | - libtiff=4.5.0=h6a678d5_2
34 | - libunistring=0.9.10=h27cfd23_0
35 | - libuuid=1.41.5=h5eee18b_0
36 | - libwebp=1.2.4=h11a3e52_1
37 | - libwebp-base=1.2.4=h5eee18b_1
38 | - lz4-c=1.9.4=h6a678d5_0
39 | - mkl=2021.4.0=h06a4308_640
40 | - mkl_fft=1.3.1=py310hd6ae3a3_0
41 | - mkl_random=1.2.2=py310h00e6091_0
42 | - ncurses=6.4=h6a678d5_0
43 | - nettle=3.7.3=hbbd107a_1
44 | - numpy-base=1.23.5=py310h8e6c178_0
45 | - openh264=2.1.1=h4ff587b_0
46 | - openssl=1.1.1t=h7f8727e_0
47 | - pycparser=2.21=pyhd3eb1b0_0
48 | - python=3.10.10=h7a1cb2a_2
49 | - pytorch=1.12.0=py3.10_cuda11.3_cudnn8.3.2_0
50 | - pytorch-mutex=1.0=cuda
51 | - readline=8.2=h5eee18b_0
52 | - six=1.16.0=pyhd3eb1b0_1
53 | - sqlite=3.41.1=h5eee18b_0
54 | - tk=8.6.12=h1ccaba5_0
55 | - typing_extensions=4.4.0=py310h06a4308_0
56 | - tzdata=2022g=h04d1e81_0
57 | - xz=5.2.10=h5eee18b_1
58 | - zlib=1.2.13=h5eee18b_0
59 | - zstd=1.5.2=ha4553b6_0
60 | - pip:
61 | - absl-py==1.4.0
62 | - accelerate==0.18.0
63 | - aiofiles==23.1.0
64 | - aiohttp==3.8.4
65 | - aiosignal==1.3.1
66 | - altair==4.2.2
67 | - anyio==3.6.2
68 | - appdirs==1.4.4
69 | - async-timeout==4.0.2
70 | - attrs==22.2.0
71 | - bcrypt==4.0.1
72 | - beartype==0.12.0
73 | - brotlipy==0.7.0
74 | - cachetools==5.3.0
75 | - certifi==2022.12.7
76 | - cffi==1.15.1
77 | - chatllama-py==0.0.3
78 | - click==8.1.3
79 | - cmake==3.26.1
80 | - contourpy==1.0.7
81 | - cryptography==39.0.1
82 | - cycler==0.11.0
83 | - dataclasses-json==0.5.7
84 | - datasets==2.10.1
85 | - deepspeed==0.8.3
86 | - dill==0.3.6
87 | - docker-pycreds==0.4.0
88 | - einops==0.6.0
89 | - entrypoints==0.4
90 | - fairscale==0.4.13
91 | - fastapi==0.95.0
92 | - ffmpy==0.3.0
93 | - filelock==3.10.5
94 | - fire==0.5.0
95 | - flit-core==3.8.0
96 | - fonttools==4.39.2
97 | - frozenlist==1.3.3
98 | - fsspec==2023.3.0
99 | - gitdb==4.0.10
100 | - gitpython==3.1.31
101 | - google-auth==2.16.3
102 | - google-auth-oauthlib==0.4.6
103 | - gradio==3.9
104 | - greenlet==2.0.2
105 | - grpcio==1.51.3
106 | - h11==0.12.0
107 | - hjson==3.1.0
108 | - httpcore==0.15.0
109 | - httpx==0.23.3
110 | - huggingface-hub==0.13.3
111 | - idna==3.4
112 | - jinja2==3.1.2
113 | - joblib==1.2.0
114 | - jsonschema==4.17.3
115 | - kiwisolver==1.4.4
116 | - langchain==0.0.123
117 | - linkify-it-py==2.0.0
118 | - lit==16.0.0
119 | - markdown==3.4.3
120 | - markdown-it-py==2.2.0
121 | - markupsafe==2.1.2
122 | - marshmallow==3.19.0
123 | - marshmallow-enum==1.5.1
124 | - matplotlib==3.7.1
125 | - mdit-py-plugins==0.3.3
126 | - mdurl==0.1.2
127 | - mkl-fft==1.3.1
128 | - mkl-random==1.2.2
129 | - mkl-service==2.4.0
130 | - multidict==6.0.4
131 | - multiprocess==0.70.14
132 | - mypy-extensions==1.0.0
133 | - ninja==1.11.1
134 | - nltk==3.8.1
135 | - numpy==1.23.5
136 | - oauthlib==3.2.2
137 | - openai==0.27.2
138 | - orjson==3.8.8
139 | - packaging==23.0
140 | - pandas==1.5.3
141 | - paramiko==3.1.0
142 | - pathtools==0.1.2
143 | - pillow==9.4.0
144 | - pip==23.0.1
145 | - plotly==5.13.1
146 | - protobuf==4.22.1
147 | - psutil==5.9.4
148 | - py-cpuinfo==9.0.0
149 | - pyarrow==11.0.0
150 | - pyasn1==0.4.8
151 | - pyasn1-modules==0.2.8
152 | - pycryptodome==3.17
153 | - pydantic==1.10.7
154 | - pydub==0.25.1
155 | - pynacl==1.5.0
156 | - pyopenssl==23.0.0
157 | - pyparsing==3.0.9
158 | - pyrsistent==0.19.3
159 | - pysocks==1.7.1
160 | - python-dateutil==2.8.2
161 | - python-multipart==0.0.6
162 | - pytz==2023.2
163 | - pyyaml==6.0
164 | - regex==2023.3.23
165 | - requests==2.28.1
166 | - requests-oauthlib==1.3.1
167 | - responses==0.18.0
168 | - rfc3986==1.5.0
169 | - rouge-score==0.1.2
170 | - rsa==4.9
171 | - semantic-version==2.10.0
172 | - sentencepiece==0.1.97
173 | - sentry-sdk==1.17.0
174 | - setproctitle==1.3.2
175 | - setuptools==65.6.3
176 | - smmap==5.0.0
177 | - sniffio==1.3.0
178 | - sqlalchemy==1.4.47
179 | - starlette==0.26.1
180 | - tenacity==8.2.2
181 | - tensorboard==2.12.0
182 | - tensorboard-data-server==0.7.0
183 | - tensorboard-plugin-wit==1.8.1
184 | - termcolor==2.2.0
185 | - tokenizers==0.12.1
186 | - toolz==0.12.0
187 | - torch==1.12.0
188 | - torchaudio==0.12.0
189 | - torchvision==0.13.0
190 | - tqdm==4.65.0
191 | - transformers==4.28.0.dev0
192 | - typing-extensions==4.4.0
193 | - typing-inspect==0.8.0
194 | - uc-micro-py==1.0.1
195 | - urllib3==1.26.14
196 | - uvicorn==0.21.1
197 | - wandb==0.14.0
198 | - websockets==10.4
199 | - werkzeug==2.2.3
200 | - wheel==0.38.4
201 | - xxhash==3.2.0
202 | - yarl==1.8.2
203 | prefix: /home/yourname/.conda/envs/chatpath
204 |
--------------------------------------------------------------------------------
/src/generate.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import fire
4 | import torch
5 | # from peft import PeftModel
6 | import transformers
7 | import gradio as gr
8 |
9 | assert (
10 | "LlamaTokenizer" in transformers._import_structure["models.llama"]
11 | ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
12 | from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
13 |
14 | if torch.cuda.is_available():
15 | device = "cuda"
16 | else:
17 | device = "cpu"
18 |
19 | try:
20 | if torch.backends.mps.is_available():
21 | device = "mps"
22 | except:
23 | pass
24 |
25 |
26 | def main(
27 | load_8bit: bool = False,
28 | base_model: str = "/path/to/llama-7B/hf/ft/checkpoint-300",
29 | # lora_weights: str = "tloen/alpaca-lora-7b",
30 | ):
31 | assert base_model, (
32 | "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
33 | )
34 |
35 | tokenizer = LlamaTokenizer.from_pretrained(base_model)
36 | if device == "cuda":
37 | model = LlamaForCausalLM.from_pretrained(
38 | base_model,
39 | load_in_8bit=load_8bit,
40 | torch_dtype=torch.float16,
41 | device_map="auto",
42 | )
43 | elif device == "mps":
44 | model = LlamaForCausalLM.from_pretrained(
45 | base_model,
46 | device_map={"": device},
47 | torch_dtype=torch.float16,
48 | )
49 |
50 | # unwind broken decapoda-research config
51 | model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
52 | model.config.bos_token_id = 1
53 | model.config.eos_token_id = 2
54 |
55 | if not load_8bit:
56 | model.half() # seems to fix bugs for some users.
57 |
58 | model.eval()
59 | if torch.__version__ >= "2" and sys.platform != "win32":
60 | model = torch.compile(model)
61 |
62 | def evaluate(
63 | instruction,
64 | input=None,
65 | temperature=0.6,
66 | top_p=0.9,
67 | top_k=40,
68 | num_beams=4,
69 | max_new_tokens=512,
70 | **kwargs,
71 | ):
72 | prompt = generate_prompt(instruction, input)
73 | inputs = tokenizer(prompt, return_tensors="pt")
74 | input_ids = inputs["input_ids"].to(device)
75 | generation_config = GenerationConfig(
76 | temperature=temperature,
77 | top_p=top_p,
78 | top_k=top_k,
79 | num_beams=num_beams,
80 | **kwargs,
81 | )
82 | with torch.no_grad():
83 | generation_output = model.generate(
84 | input_ids=input_ids,
85 | generation_config=generation_config,
86 | return_dict_in_generate=True,
87 | output_scores=True,
88 | max_new_tokens=max_new_tokens,
89 | )
90 | s = generation_output.sequences[0]
91 | output = tokenizer.decode(s)
92 | return output.split("### Response:")[1].strip()
93 |
94 | gr.Interface(
95 | fn=evaluate,
96 | inputs=[
97 | gr.components.Textbox(
98 | lines=2, label="Instruction", placeholder="For pathology questions, you can use: In a genuine and professional manner, please assume the role of a pathologist and respond to the provided questions."
99 | ),
100 | gr.components.Textbox(lines=2, label="Input", placeholder="none"),
101 | gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
102 | gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
103 | gr.components.Slider(
104 | minimum=0, maximum=100, step=1, value=40, label="Top k"
105 | ),
106 | gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
107 | gr.components.Slider(
108 | minimum=1, maximum=2000, step=1, value=128, label="Max tokens"
109 | ),
110 | ],
111 | outputs=[
112 | gr.inputs.Textbox(
113 | lines=5,
114 | label="Output",
115 | )
116 | ],
117 | title="ChatPath",
118 | description="A knowledgeable Llama-based Chat Model for Pathology.",
119 | ).launch(share=True)
120 |
121 |
122 | def generate_prompt(instruction, input=None):
123 | if input:
124 | return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
125 |
126 | ### Instruction:
127 | {instruction}
128 |
129 | ### Input:
130 | {input}
131 |
132 | ### Response:
133 | """
134 | else:
135 | return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
136 |
137 | ### Instruction:
138 | {instruction}
139 |
140 | ### Response:
141 | """
142 |
143 |
144 | if __name__ == "__main__":
145 | fire.Fire(main)
--------------------------------------------------------------------------------
/src/imgs/chatpath_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/superjamessyx/PathGPT/dc170d26d1df8d7cac0492de7f02c84cf4520728/src/imgs/chatpath_logo.png
--------------------------------------------------------------------------------
/src/imgs/data_process.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/superjamessyx/PathGPT/dc170d26d1df8d7cac0492de7f02c84cf4520728/src/imgs/data_process.png
--------------------------------------------------------------------------------
/src/imgs/pathgpt_instruction.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/superjamessyx/PathGPT/dc170d26d1df8d7cac0492de7f02c84cf4520728/src/imgs/pathgpt_instruction.png
--------------------------------------------------------------------------------
/src/imgs/pathgpt_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/superjamessyx/PathGPT/dc170d26d1df8d7cac0492de7f02c84cf4520728/src/imgs/pathgpt_logo.png
--------------------------------------------------------------------------------
/src/train.py:
--------------------------------------------------------------------------------
1 | # Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import copy
16 | import logging
17 | import random
18 | from dataclasses import dataclass, field
19 | from typing import Optional, Dict, Sequence
20 |
21 | import torch
22 | import torch.distributed
23 | import transformers
24 | from torch.utils.data import Dataset
25 | from transformers import Trainer
26 | from datasets import load_dataset
27 | import utils
28 |
29 | IGNORE_INDEX = -100
30 | DEFAULT_PAD_TOKEN = "[PAD]"
31 | DEFAULT_EOS_TOKEN = ""
32 | DEFAULT_BOS_TOKEN = ""
33 | DEFAULT_UNK_TOKEN = ""
34 | PROMPT_DICT = {
35 | "prompt_input": (
36 | "Below is an instruction that describes a task, paired with an input that provides further context. "
37 | "Write a response that appropriately completes the request.\n\n"
38 | "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:"
39 | ),
40 | "prompt_no_input": (
41 | "Below is an instruction that describes a task. "
42 | "Write a response that appropriately completes the request.\n\n"
43 | "### Instruction:\n{instruction}\n\n### Response:"
44 | ),
45 | }
46 |
47 |
48 | @dataclass
49 | class ModelArguments:
50 | model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
51 |
52 |
53 | @dataclass
54 | class DataArguments:
55 | data_path: str = field(default=None, metadata={"help": "Path to the training data."})
56 |
57 |
58 | @dataclass
59 | class TrainingArguments(transformers.TrainingArguments):
60 | cache_dir: Optional[str] = field(default=None)
61 | optim: str = field(default="adamw_torch")
62 | model_max_length: int = field(
63 | default=512,
64 | metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
65 | )
66 |
67 |
68 | def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):
69 | """Collects the state dict and dump to disk."""
70 | state_dict = trainer.model.state_dict()
71 | if trainer.args.should_save:
72 | cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}
73 | del state_dict
74 | trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
75 |
76 |
77 | def smart_tokenizer_and_embedding_resize(
78 | special_tokens_dict: Dict,
79 | tokenizer: transformers.PreTrainedTokenizer,
80 | model: transformers.PreTrainedModel,
81 | ):
82 | """Resize tokenizer and embedding.
83 |
84 | Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
85 | """
86 | num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
87 | model.resize_token_embeddings(len(tokenizer))
88 |
89 | if num_new_tokens > 0:
90 | input_embeddings = model.get_input_embeddings().weight.data
91 | output_embeddings = model.get_output_embeddings().weight.data
92 |
93 | input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
94 | output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
95 |
96 | input_embeddings[-num_new_tokens:] = input_embeddings_avg
97 | output_embeddings[-num_new_tokens:] = output_embeddings_avg
98 |
99 |
100 | def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
101 | """Tokenize a list of strings."""
102 | tokenized_list = [
103 | tokenizer(
104 | text,
105 | return_tensors="pt",
106 | padding="longest",
107 | max_length=tokenizer.model_max_length,
108 | truncation=True,
109 | )
110 | for text in strings
111 | ]
112 | input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
113 | input_ids_lens = labels_lens = [
114 | tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list
115 | ]
116 | return dict(
117 | input_ids=input_ids,
118 | labels=labels,
119 | input_ids_lens=input_ids_lens,
120 | labels_lens=labels_lens,
121 | )
122 |
123 |
124 | def preprocess(
125 | sources: Sequence[str],
126 | targets: Sequence[str],
127 | tokenizer: transformers.PreTrainedTokenizer,
128 | ) -> Dict:
129 | """Preprocess the data by tokenizing."""
130 | examples = [s + t for s, t in zip(sources, targets)]
131 | examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
132 | input_ids = examples_tokenized["input_ids"]
133 | labels = copy.deepcopy(input_ids)
134 | for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]):
135 | label[:source_len] = IGNORE_INDEX
136 | return dict(input_ids=input_ids, labels=labels)
137 |
138 |
139 | @dataclass
140 | class DataCollatorForSupervisedDataset(object):
141 | """Collate examples for supervised fine-tuning."""
142 |
143 | tokenizer: transformers.PreTrainedTokenizer
144 |
145 | def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
146 | input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels"))
147 | input_ids = [torch.tensor(x) for x in input_ids]
148 | input_ids = torch.nn.utils.rnn.pad_sequence(
149 | input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id
150 | )
151 | labels = [torch.tensor(x) for x in labels]
152 | labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
153 | return dict(
154 | input_ids=input_ids,
155 | labels=labels,
156 | attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
157 | )
158 |
159 | def train_tokenize_function(examples, tokenizer):
160 | prompt_input, prompt_no_input = PROMPT_DICT["prompt_input"], PROMPT_DICT["prompt_no_input"]
161 | if 'input' in examples:
162 | sources = [
163 | prompt_input.format_map(dict(instruction=instruction, input=input)) if input != "" \
164 | else prompt_no_input.format_map(dict(instruction=instruction)) \
165 | for instruction, input in zip(examples['instruction'], examples['input'])
166 | ]
167 | else:
168 | sources = [
169 | prompt_no_input.format_map(dict(instruction=instruction)) \
170 | for instruction in examples['instruction']
171 | ]
172 | targets = [f"{output}{tokenizer.eos_token}" for output in examples['output']]
173 | data_dict = preprocess(sources, targets, tokenizer)
174 | return data_dict
175 |
176 | def train():
177 | parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
178 | model_args, data_args, training_args = parser.parse_args_into_dataclasses()
179 |
180 | model = transformers.AutoModelForCausalLM.from_pretrained(
181 | model_args.model_name_or_path,
182 | cache_dir=training_args.cache_dir,
183 | )
184 |
185 | tokenizer = transformers.AutoTokenizer.from_pretrained(
186 | model_args.model_name_or_path,
187 | cache_dir=training_args.cache_dir,
188 | model_max_length=training_args.model_max_length,
189 | padding_side="right",
190 | use_fast=True,
191 | )
192 | if tokenizer.pad_token is None:
193 | smart_tokenizer_and_embedding_resize(
194 | special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN),
195 | tokenizer=tokenizer,
196 | model=model,
197 | )
198 | if "llama" in model_args.model_name_or_path:
199 | tokenizer.add_special_tokens(
200 | {
201 | "eos_token": DEFAULT_EOS_TOKEN,
202 | "bos_token": DEFAULT_BOS_TOKEN,
203 | "unk_token": DEFAULT_UNK_TOKEN,
204 | }
205 | )
206 |
207 | raw_train_datasets = load_dataset('json', data_files=data_args.data_path, split="train", cache_dir=training_args.cache_dir)
208 | if training_args.local_rank > 0:
209 | torch.distributed.barrier()
210 |
211 | train_dataset = raw_train_datasets.map(
212 | train_tokenize_function,
213 | batched=True,
214 | batch_size=3000,
215 | num_proc=32,
216 | remove_columns=raw_train_datasets.column_names,
217 | load_from_cache_file=True, # not args.overwrite_cache
218 | desc="Running tokenizer on train dataset",
219 | fn_kwargs={"tokenizer": tokenizer}
220 | )
221 |
222 | if training_args.local_rank == 0:
223 | torch.distributed.barrier()
224 |
225 | if training_args.local_rank == 0:
226 | print(len(train_dataset))
227 | for index in random.sample(range(len(train_dataset)), 3):
228 | print(f"Sample {index} of the training set: {train_dataset[index]}.")
229 |
230 | data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
231 | data_module = dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
232 |
233 | #Tell Trainer not to attempt DataParallel
234 | model.is_parallelizable = True
235 | model.model_parallel = True
236 |
237 | trainer = Trainer(model=model, tokenizer=tokenizer, args=training_args, **data_module)
238 | model.config.use_cache = False
239 |
240 | trainer.train()
241 | trainer.save_state()
242 | safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)
243 |
244 |
245 | if __name__ == "__main__":
246 | train()
247 |
--------------------------------------------------------------------------------
/src/utils.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | import logging
3 | import math
4 | import os
5 | import io
6 | import sys
7 | import time
8 | import json
9 | from typing import Optional, Sequence, Union
10 |
11 | import openai
12 | import tqdm
13 | from openai import openai_object
14 | import copy
15 |
16 | StrOrOpenAIObject = Union[str, openai_object.OpenAIObject]
17 |
18 | openai_org = os.getenv("OPENAI_ORG")
19 | if openai_org is not None:
20 | openai.organization = openai_org
21 | logging.warning(f"Switching to organization: {openai_org} for OAI API key.")
22 |
23 |
24 | @dataclasses.dataclass
25 | class OpenAIDecodingArguments(object):
26 | max_tokens: int = 1800
27 | temperature: float = 0.2
28 | top_p: float = 1.0
29 | n: int = 1
30 | stream: bool = False
31 | stop: Optional[Sequence[str]] = None
32 | presence_penalty: float = 0.0
33 | frequency_penalty: float = 0.0
34 | suffix: Optional[str] = None
35 | logprobs: Optional[int] = None
36 | echo: bool = False
37 |
38 |
39 | def openai_completion(
40 | prompts: Union[str, Sequence[str], Sequence[dict[str, str]], dict[str, str]],
41 | decoding_args: OpenAIDecodingArguments,
42 | model_name="text-davinci-003",
43 | sleep_time=2,
44 | batch_size=1,
45 | max_instances=sys.maxsize,
46 | max_batches=sys.maxsize,
47 | return_text=False,
48 | **decoding_kwargs,
49 | ) -> Union[Union[StrOrOpenAIObject], Sequence[StrOrOpenAIObject], Sequence[Sequence[StrOrOpenAIObject]],]:
50 | """Decode with OpenAI API.
51 |
52 | Args:
53 | prompts: A string or a list of strings to complete. If it is a chat model the strings should be formatted
54 | as explained here: https://github.com/openai/openai-python/blob/main/chatml.md. If it is a chat model
55 | it can also be a dictionary (or list thereof) as explained here:
56 | https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
57 | decoding_args: Decoding arguments.
58 | model_name: Model name. Can be either in the format of "org/model" or just "model".
59 | sleep_time: Time to sleep once the rate-limit is hit.
60 | batch_size: Number of prompts to send in a single request. Only for non chat model.
61 | max_instances: Maximum number of prompts to decode.
62 | max_batches: Maximum number of batches to decode. This argument will be deprecated in the future.
63 | return_text: If True, return text instead of full completion object (which contains things like logprob).
64 | decoding_kwargs: Additional decoding arguments. Pass in `best_of` and `logit_bias` if you need them.
65 |
66 | Returns:
67 | A completion or a list of completions.
68 | Depending on return_text, return_openai_object, and decoding_args.n, the completion type can be one of
69 | - a string (if return_text is True)
70 | - an openai_object.OpenAIObject object (if return_text is False)
71 | - a list of objects of the above types (if decoding_args.n > 1)
72 | """
73 | is_single_prompt = isinstance(prompts, (str, dict))
74 | if is_single_prompt:
75 | prompts = [prompts]
76 |
77 | if max_batches < sys.maxsize:
78 | logging.warning(
79 | "`max_batches` will be deprecated in the future, please use `max_instances` instead."
80 | "Setting `max_instances` to `max_batches * batch_size` for now."
81 | )
82 | max_instances = max_batches * batch_size
83 |
84 | prompts = prompts[:max_instances]
85 | num_prompts = len(prompts)
86 | prompt_batches = [
87 | prompts[batch_id * batch_size : (batch_id + 1) * batch_size]
88 | for batch_id in range(int(math.ceil(num_prompts / batch_size)))
89 | ]
90 |
91 | completions = []
92 | for batch_id, prompt_batch in tqdm.tqdm(
93 | enumerate(prompt_batches),
94 | desc="prompt_batches",
95 | total=len(prompt_batches),
96 | ):
97 | batch_decoding_args = copy.deepcopy(decoding_args) # cloning the decoding_args
98 |
99 | while True:
100 | try:
101 | shared_kwargs = dict(
102 | model=model_name,
103 | **batch_decoding_args.__dict__,
104 | **decoding_kwargs,
105 | )
106 | completion_batch = openai.Completion.create(prompt=prompt_batch, **shared_kwargs)
107 | choices = completion_batch.choices
108 |
109 | for choice in choices:
110 | choice["total_tokens"] = completion_batch.usage.total_tokens
111 | completions.extend(choices)
112 | break
113 | except openai.error.OpenAIError as e:
114 | logging.warning(f"OpenAIError: {e}.")
115 | if "Please reduce your prompt" in str(e):
116 | batch_decoding_args.max_tokens = int(batch_decoding_args.max_tokens * 0.8)
117 | logging.warning(f"Reducing target length to {batch_decoding_args.max_tokens}, Retrying...")
118 | else:
119 | logging.warning("Hit request rate limit; retrying...")
120 | time.sleep(sleep_time) # Annoying rate limit on requests.
121 |
122 | if return_text:
123 | completions = [completion.text for completion in completions]
124 | if decoding_args.n > 1:
125 | # make completions a nested list, where each entry is a consecutive decoding_args.n of original entries.
126 | completions = [completions[i : i + decoding_args.n] for i in range(0, len(completions), decoding_args.n)]
127 | if is_single_prompt:
128 | # Return non-tuple if only 1 input and 1 generation.
129 | (completions,) = completions
130 | return completions
131 |
132 |
133 | def _make_w_io_base(f, mode: str):
134 | if not isinstance(f, io.IOBase):
135 | f_dirname = os.path.dirname(f)
136 | if f_dirname != "":
137 | os.makedirs(f_dirname, exist_ok=True)
138 | f = open(f, mode=mode)
139 | return f
140 |
141 |
142 | def _make_r_io_base(f, mode: str):
143 | if not isinstance(f, io.IOBase):
144 | f = open(f, mode=mode)
145 | return f
146 |
147 |
148 | def jdump(obj, f, mode="w", indent=4, default=str):
149 | """Dump a str or dictionary to a file in json format.
150 |
151 | Args:
152 | obj: An object to be written.
153 | f: A string path to the location on disk.
154 | mode: Mode for opening the file.
155 | indent: Indent for storing json dictionaries.
156 | default: A function to handle non-serializable entries; defaults to `str`.
157 | """
158 | f = _make_w_io_base(f, mode)
159 | if isinstance(obj, (dict, list)):
160 | json.dump(obj, f, indent=indent, default=default)
161 | elif isinstance(obj, str):
162 | f.write(obj)
163 | else:
164 | raise ValueError(f"Unexpected type: {type(obj)}")
165 | f.close()
166 |
167 |
168 | def jload(f, mode="r"):
169 | """Load a .json file into a dictionary."""
170 | f = _make_r_io_base(f, mode)
171 | jdict = json.load(f)
172 | f.close()
173 | return jdict
174 |
--------------------------------------------------------------------------------