├── mobilellm.png ├── configs ├── 1.5B │ ├── tokenizer.model │ ├── generation_config.json │ ├── special_tokens_map.json │ ├── config.json │ └── tokenizer_config.json ├── 125M │ ├── tokenizer.model │ ├── generation_config.json │ ├── special_tokens_map.json │ ├── config.json │ └── tokenizer_config.json ├── 1B │ ├── tokenizer.model │ ├── generation_config.json │ ├── special_tokens_map.json │ ├── config.json │ └── tokenizer_config.json ├── 350M │ ├── tokenizer.model │ ├── generation_config.json │ ├── special_tokens_map.json │ ├── config.json │ └── tokenizer_config.json └── 600M │ ├── tokenizer.model │ ├── generation_config.json │ ├── special_tokens_map.json │ ├── config.json │ └── tokenizer_config.json ├── requirement.txt ├── local_debug.sh ├── pretrain.sh ├── CONTRIBUTING.md ├── utils ├── process_args.py ├── pretrain_trainer.py └── modeling_llama.py ├── CODE_OF_CONDUCT.md ├── README.md ├── pretrain.py └── LICENSE /mobilellm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/howsam/MobileLLM/main/mobilellm.png -------------------------------------------------------------------------------- /configs/1.5B/tokenizer.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/howsam/MobileLLM/main/configs/1.5B/tokenizer.model -------------------------------------------------------------------------------- /configs/125M/tokenizer.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/howsam/MobileLLM/main/configs/125M/tokenizer.model -------------------------------------------------------------------------------- /configs/1B/tokenizer.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/howsam/MobileLLM/main/configs/1B/tokenizer.model -------------------------------------------------------------------------------- /configs/350M/tokenizer.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/howsam/MobileLLM/main/configs/350M/tokenizer.model -------------------------------------------------------------------------------- /configs/600M/tokenizer.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/howsam/MobileLLM/main/configs/600M/tokenizer.model -------------------------------------------------------------------------------- /requirement.txt: -------------------------------------------------------------------------------- 1 | transformers==4.41.2 2 | accelerate==0.31.0 3 | datasets==2.20.0 4 | sentencepiece 5 | tensorboardX 6 | -------------------------------------------------------------------------------- /configs/1B/generation_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "_from_model_config": true, 3 | "bos_token_id": 1, 4 | "eos_token_id": 2, 5 | "transformers_version": "4.34.1" 6 | } 7 | -------------------------------------------------------------------------------- /configs/1.5B/generation_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "_from_model_config": true, 3 | "bos_token_id": 1, 4 | "eos_token_id": 2, 5 | "transformers_version": "4.34.1" 6 | } 7 | -------------------------------------------------------------------------------- /configs/125M/generation_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "_from_model_config": true, 3 | "bos_token_id": 1, 4 | "eos_token_id": 2, 5 | "transformers_version": "4.34.1" 6 | } 7 | -------------------------------------------------------------------------------- /configs/350M/generation_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "_from_model_config": true, 3 | "bos_token_id": 1, 4 | "eos_token_id": 2, 5 | "transformers_version": "4.34.1" 6 | } 7 | -------------------------------------------------------------------------------- /configs/600M/generation_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "_from_model_config": true, 3 | "bos_token_id": 1, 4 | "eos_token_id": 2, 5 | "transformers_version": "4.34.1" 6 | } 7 | -------------------------------------------------------------------------------- /configs/1.5B/special_tokens_map.json: -------------------------------------------------------------------------------- 1 | { 2 | "bos_token": { 3 | "content": "", 4 | "lstrip": false, 5 | "normalized": false, 6 | "rstrip": false, 7 | "single_word": false 8 | }, 9 | "eos_token": { 10 | "content": "", 11 | "lstrip": false, 12 | "normalized": false, 13 | "rstrip": false, 14 | "single_word": false 15 | }, 16 | "unk_token": { 17 | "content": "", 18 | "lstrip": false, 19 | "normalized": false, 20 | "rstrip": false, 21 | "single_word": false 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /configs/125M/special_tokens_map.json: -------------------------------------------------------------------------------- 1 | { 2 | "bos_token": { 3 | "content": "", 4 | "lstrip": false, 5 | "normalized": false, 6 | "rstrip": false, 7 | "single_word": false 8 | }, 9 | "eos_token": { 10 | "content": "", 11 | "lstrip": false, 12 | "normalized": false, 13 | "rstrip": false, 14 | "single_word": false 15 | }, 16 | "unk_token": { 17 | "content": "", 18 | "lstrip": false, 19 | "normalized": false, 20 | "rstrip": false, 21 | "single_word": false 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /configs/1B/special_tokens_map.json: -------------------------------------------------------------------------------- 1 | { 2 | "bos_token": { 3 | "content": "", 4 | "lstrip": false, 5 | "normalized": false, 6 | "rstrip": false, 7 | "single_word": false 8 | }, 9 | "eos_token": { 10 | "content": "", 11 | "lstrip": false, 12 | "normalized": false, 13 | "rstrip": false, 14 | "single_word": false 15 | }, 16 | "unk_token": { 17 | "content": "", 18 | "lstrip": false, 19 | "normalized": false, 20 | "rstrip": false, 21 | "single_word": false 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /configs/350M/special_tokens_map.json: -------------------------------------------------------------------------------- 1 | { 2 | "bos_token": { 3 | "content": "", 4 | "lstrip": false, 5 | "normalized": false, 6 | "rstrip": false, 7 | "single_word": false 8 | }, 9 | "eos_token": { 10 | "content": "", 11 | "lstrip": false, 12 | "normalized": false, 13 | "rstrip": false, 14 | "single_word": false 15 | }, 16 | "unk_token": { 17 | "content": "", 18 | "lstrip": false, 19 | "normalized": false, 20 | "rstrip": false, 21 | "single_word": false 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /configs/600M/special_tokens_map.json: -------------------------------------------------------------------------------- 1 | { 2 | "bos_token": { 3 | "content": "", 4 | "lstrip": false, 5 | "normalized": false, 6 | "rstrip": false, 7 | "single_word": false 8 | }, 9 | "eos_token": { 10 | "content": "", 11 | "lstrip": false, 12 | "normalized": false, 13 | "rstrip": false, 14 | "single_word": false 15 | }, 16 | "unk_token": { 17 | "content": "", 18 | "lstrip": false, 19 | "normalized": false, 20 | "rstrip": false, 21 | "single_word": false 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /configs/1.5B/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "architectures": [ 3 | "LlamaForCausalLM" 4 | ], 5 | "attention_bias": false, 6 | "bos_token_id": 1, 7 | "eos_token_id": 2, 8 | "hidden_act": "silu", 9 | "hidden_size": 1600, 10 | "initializer_range": 0.02, 11 | "intermediate_size": 4352, 12 | "max_position_embeddings": 2048, 13 | "model_type": "llama", 14 | "num_attention_heads": 25, 15 | "num_hidden_layers": 54, 16 | "num_key_value_heads": 5, 17 | "pretraining_tp": 1, 18 | "rms_norm_eps": 1e-05, 19 | "rope_scaling": null, 20 | "rope_theta": 10000.0, 21 | "tie_word_embeddings": false, 22 | "torch_dtype": "float16", 23 | "transformers_version": "4.34.1", 24 | "use_cache": true, 25 | "vocab_size": 32000 26 | } 27 | -------------------------------------------------------------------------------- /configs/125M/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "architectures": [ 3 | "LlamaForCausalLM" 4 | ], 5 | "attention_bias": false, 6 | "bos_token_id": 1, 7 | "eos_token_id": 2, 8 | "hidden_act": "silu", 9 | "hidden_size": 576, 10 | "initializer_range": 0.02, 11 | "intermediate_size": 1536, 12 | "max_position_embeddings": 2048, 13 | "model_type": "llama", 14 | "num_attention_heads": 9, 15 | "num_hidden_layers": 30, 16 | "num_key_value_heads": 3, 17 | "pretraining_tp": 1, 18 | "rms_norm_eps": 1e-05, 19 | "rope_scaling": null, 20 | "rope_theta": 10000.0, 21 | "tie_word_embeddings": false, 22 | "torch_dtype": "float16", 23 | "transformers_version": "4.34.1", 24 | "use_cache": true, 25 | "vocab_size": 32000 26 | } 27 | -------------------------------------------------------------------------------- /configs/1B/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "architectures": [ 3 | "LlamaForCausalLM" 4 | ], 5 | "attention_bias": false, 6 | "bos_token_id": 1, 7 | "eos_token_id": 2, 8 | "hidden_act": "silu", 9 | "hidden_size": 1280, 10 | "initializer_range": 0.02, 11 | "intermediate_size": 3584, 12 | "max_position_embeddings": 2048, 13 | "model_type": "llama", 14 | "num_attention_heads": 20, 15 | "num_hidden_layers": 54, 16 | "num_key_value_heads": 5, 17 | "pretraining_tp": 1, 18 | "rms_norm_eps": 1e-05, 19 | "rope_scaling": null, 20 | "rope_theta": 10000.0, 21 | "tie_word_embeddings": false, 22 | "torch_dtype": "float16", 23 | "transformers_version": "4.34.1", 24 | "use_cache": true, 25 | "vocab_size": 32000 26 | } 27 | -------------------------------------------------------------------------------- /configs/350M/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "architectures": [ 3 | "LlamaForCausalLM" 4 | ], 5 | "attention_bias": false, 6 | "bos_token_id": 1, 7 | "eos_token_id": 2, 8 | "hidden_act": "silu", 9 | "hidden_size": 960, 10 | "initializer_range": 0.02, 11 | "intermediate_size": 2560, 12 | "max_position_embeddings": 2048, 13 | "model_type": "llama", 14 | "num_attention_heads": 15, 15 | "num_hidden_layers": 32, 16 | "num_key_value_heads": 5, 17 | "pretraining_tp": 1, 18 | "rms_norm_eps": 1e-05, 19 | "rope_scaling": null, 20 | "rope_theta": 10000.0, 21 | "tie_word_embeddings": false, 22 | "torch_dtype": "float16", 23 | "transformers_version": "4.34.1", 24 | "use_cache": true, 25 | "vocab_size": 32000 26 | } 27 | -------------------------------------------------------------------------------- /configs/600M/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "architectures": [ 3 | "LlamaForCausalLM" 4 | ], 5 | "attention_bias": false, 6 | "bos_token_id": 1, 7 | "eos_token_id": 2, 8 | "hidden_act": "silu", 9 | "hidden_size": 1152, 10 | "initializer_range": 0.02, 11 | "intermediate_size": 3072, 12 | "max_position_embeddings": 2048, 13 | "model_type": "llama", 14 | "num_attention_heads": 18, 15 | "num_hidden_layers": 40, 16 | "num_key_value_heads": 6, 17 | "pretraining_tp": 1, 18 | "rms_norm_eps": 1e-05, 19 | "rope_scaling": null, 20 | "rope_theta": 10000.0, 21 | "tie_word_embeddings": false, 22 | "torch_dtype": "float16", 23 | "transformers_version": "4.34.1", 24 | "use_cache": true, 25 | "vocab_size": 32000 26 | } 27 | -------------------------------------------------------------------------------- /configs/1.5B/tokenizer_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "add_bos_token": true, 3 | "add_eos_token": false, 4 | "added_tokens_decoder": { 5 | "0": { 6 | "content": "", 7 | "lstrip": false, 8 | "normalized": false, 9 | "rstrip": false, 10 | "single_word": false, 11 | "special": true 12 | }, 13 | "1": { 14 | "content": "", 15 | "lstrip": false, 16 | "normalized": false, 17 | "rstrip": false, 18 | "single_word": false, 19 | "special": true 20 | }, 21 | "2": { 22 | "content": "", 23 | "lstrip": false, 24 | "normalized": false, 25 | "rstrip": false, 26 | "single_word": false, 27 | "special": true 28 | } 29 | }, 30 | "bos_token": "", 31 | "clean_up_tokenization_spaces": false, 32 | "eos_token": "", 33 | "legacy": true, 34 | "model_max_length": 1000000000000000019884624838656, 35 | "pad_token": null, 36 | "sp_model_kwargs": {}, 37 | "spaces_between_special_tokens": false, 38 | "tokenizer_class": "LlamaTokenizer", 39 | "unk_token": "", 40 | "use_default_system_prompt": true 41 | } 42 | -------------------------------------------------------------------------------- /configs/125M/tokenizer_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "add_bos_token": true, 3 | "add_eos_token": false, 4 | "added_tokens_decoder": { 5 | "0": { 6 | "content": "", 7 | "lstrip": false, 8 | "normalized": false, 9 | "rstrip": false, 10 | "single_word": false, 11 | "special": true 12 | }, 13 | "1": { 14 | "content": "", 15 | "lstrip": false, 16 | "normalized": false, 17 | "rstrip": false, 18 | "single_word": false, 19 | "special": true 20 | }, 21 | "2": { 22 | "content": "", 23 | "lstrip": false, 24 | "normalized": false, 25 | "rstrip": false, 26 | "single_word": false, 27 | "special": true 28 | } 29 | }, 30 | "bos_token": "", 31 | "clean_up_tokenization_spaces": false, 32 | "eos_token": "", 33 | "legacy": true, 34 | "model_max_length": 1000000000000000019884624838656, 35 | "pad_token": null, 36 | "sp_model_kwargs": {}, 37 | "spaces_between_special_tokens": false, 38 | "tokenizer_class": "LlamaTokenizer", 39 | "unk_token": "", 40 | "use_default_system_prompt": true 41 | } 42 | -------------------------------------------------------------------------------- /configs/1B/tokenizer_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "add_bos_token": true, 3 | "add_eos_token": false, 4 | "added_tokens_decoder": { 5 | "0": { 6 | "content": "", 7 | "lstrip": false, 8 | "normalized": false, 9 | "rstrip": false, 10 | "single_word": false, 11 | "special": true 12 | }, 13 | "1": { 14 | "content": "", 15 | "lstrip": false, 16 | "normalized": false, 17 | "rstrip": false, 18 | "single_word": false, 19 | "special": true 20 | }, 21 | "2": { 22 | "content": "", 23 | "lstrip": false, 24 | "normalized": false, 25 | "rstrip": false, 26 | "single_word": false, 27 | "special": true 28 | } 29 | }, 30 | "bos_token": "", 31 | "clean_up_tokenization_spaces": false, 32 | "eos_token": "", 33 | "legacy": true, 34 | "model_max_length": 1000000000000000019884624838656, 35 | "pad_token": null, 36 | "sp_model_kwargs": {}, 37 | "spaces_between_special_tokens": false, 38 | "tokenizer_class": "LlamaTokenizer", 39 | "unk_token": "", 40 | "use_default_system_prompt": true 41 | } 42 | -------------------------------------------------------------------------------- /configs/350M/tokenizer_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "add_bos_token": true, 3 | "add_eos_token": false, 4 | "added_tokens_decoder": { 5 | "0": { 6 | "content": "", 7 | "lstrip": false, 8 | "normalized": false, 9 | "rstrip": false, 10 | "single_word": false, 11 | "special": true 12 | }, 13 | "1": { 14 | "content": "", 15 | "lstrip": false, 16 | "normalized": false, 17 | "rstrip": false, 18 | "single_word": false, 19 | "special": true 20 | }, 21 | "2": { 22 | "content": "", 23 | "lstrip": false, 24 | "normalized": false, 25 | "rstrip": false, 26 | "single_word": false, 27 | "special": true 28 | } 29 | }, 30 | "bos_token": "", 31 | "clean_up_tokenization_spaces": false, 32 | "eos_token": "", 33 | "legacy": true, 34 | "model_max_length": 1000000000000000019884624838656, 35 | "pad_token": null, 36 | "sp_model_kwargs": {}, 37 | "spaces_between_special_tokens": false, 38 | "tokenizer_class": "LlamaTokenizer", 39 | "unk_token": "", 40 | "use_default_system_prompt": true 41 | } 42 | -------------------------------------------------------------------------------- /configs/600M/tokenizer_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "add_bos_token": true, 3 | "add_eos_token": false, 4 | "added_tokens_decoder": { 5 | "0": { 6 | "content": "", 7 | "lstrip": false, 8 | "normalized": false, 9 | "rstrip": false, 10 | "single_word": false, 11 | "special": true 12 | }, 13 | "1": { 14 | "content": "", 15 | "lstrip": false, 16 | "normalized": false, 17 | "rstrip": false, 18 | "single_word": false, 19 | "special": true 20 | }, 21 | "2": { 22 | "content": "", 23 | "lstrip": false, 24 | "normalized": false, 25 | "rstrip": false, 26 | "single_word": false, 27 | "special": true 28 | } 29 | }, 30 | "bos_token": "", 31 | "clean_up_tokenization_spaces": false, 32 | "eos_token": "", 33 | "legacy": true, 34 | "model_max_length": 1000000000000000019884624838656, 35 | "pad_token": null, 36 | "sp_model_kwargs": {}, 37 | "spaces_between_special_tokens": false, 38 | "tokenizer_class": "LlamaTokenizer", 39 | "unk_token": "", 40 | "use_default_system_prompt": true 41 | } 42 | -------------------------------------------------------------------------------- /local_debug.sh: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright (c) Meta Platforms, Inc. and affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | torchrun --nnodes=1 --nproc_per_node=8 pretrain.py \ 9 | --input_model_filename "./configs/125M/" \ 10 | --train_data_local_path "basepath" \ 11 | --output_dir "output_path" \ 12 | --do_train True \ 13 | --do_eval False \ 14 | --model_max_length 2048 \ 15 | --fp16 False \ 16 | --bf16 True \ 17 | --log_on_each_node False \ 18 | --ddp_find_unused_parameters False \ 19 | --logging_dir "logging_path" \ 20 | --per_device_train_batch_size 1 \ 21 | --per_device_eval_batch_size 1 \ 22 | --gradient_accumulation_steps 1 \ 23 | --save_steps 1000 \ 24 | --eval_steps 1000 \ 25 | --logging_steps 10 \ 26 | --evaluation_strategy "no" \ 27 | --save_strategy "steps" \ 28 | --report_to "tensorboard" \ 29 | --save_total_limit 1 \ 30 | --learning_rate 1e-3 \ 31 | --weight_decay 0.1 \ 32 | --adam_beta1 0.9 \ 33 | --adam_beta2 0.95 \ 34 | --adam_epsilon 1e-8 \ 35 | --lr_scheduler_type "cosine" \ 36 | --gradient_checkpointing False \ 37 | --save_safetensors False \ 38 | --max_steps 10000 \ 39 | --warmup_step 1000 \ 40 | --share_embedding True 41 | -------------------------------------------------------------------------------- /pretrain.sh: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright (c) Meta Platforms, Inc. and affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | torchrun --nnodes=1 --nproc_per_node=8 pretrain.py \ 9 | --input_model_filename "./configs/125M/" \ 10 | --train_data_local_path "basepath" \ 11 | --output_dir "output_path" \ 12 | --do_train True \ 13 | --do_eval False \ 14 | --model_max_length 2048 \ 15 | --fp16 False \ 16 | --bf16 True \ 17 | --log_on_each_node False \ 18 | --ddp_find_unused_parameters False \ 19 | --logging_dir "logging_path" \ 20 | --per_device_train_batch_size 32 \ 21 | --per_device_eval_batch_size 32 \ 22 | --gradient_accumulation_steps 1 \ 23 | --save_steps 1000 \ 24 | --eval_steps 5000 \ 25 | --logging_steps 10 \ 26 | --evaluation_strategy "no" \ 27 | --save_strategy "steps" \ 28 | --report_to "tensorboard" \ 29 | --save_total_limit 1 \ 30 | --learning_rate 5e-4 \ 31 | --weight_decay 0.1 \ 32 | --adam_beta1 0.9 \ 33 | --adam_beta2 0.95 \ 34 | --adam_epsilon 1e-8 \ 35 | --lr_scheduler_type "cosine" \ 36 | --gradient_checkpointing False \ 37 | --save_safetensors False \ 38 | --max_steps 480000 \ 39 | --warmup_step 1000 \ 40 | --share_embedding True 41 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to MobileLLM 2 | We want to make contributing to this project as easy and transparent as 3 | possible. 4 | 5 | ## Pull Requests 6 | We actively welcome your pull requests. 7 | 8 | 1. Fork the repo and create your branch from `main`. 9 | 2. If you've added code that should be tested, add tests. 10 | 3. If you've changed APIs, update the documentation. 11 | 4. Ensure the test suite passes. 12 | 5. Make sure your code lints. 13 | 6. If you haven't already, complete the Contributor License Agreement ("CLA"). 14 | 15 | ## Contributor License Agreement ("CLA") 16 | In order to accept your pull request, we need you to submit a CLA. You only need 17 | to do this once to work on any of Facebook's open source projects. 18 | 19 | Complete your CLA here: 20 | 21 | ## Issues 22 | We use GitHub issues to track public bugs. Please ensure your description is 23 | clear and has sufficient instructions to be able to reproduce the issue. 24 | 25 | Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe 26 | disclosure of security bugs. In those cases, please go through the process 27 | outlined on that page and do not file a public issue. 28 | 29 | ## License 30 | By contributing to MobileLLM, you agree that your contributions will be licensed 31 | under the LICENSE file in the root directory of this source tree. -------------------------------------------------------------------------------- /utils/process_args.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright (c) Meta Platforms, Inc. and affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import os 9 | from dataclasses import dataclass, field 10 | from typing import Optional 11 | 12 | import transformers 13 | 14 | 15 | @dataclass 16 | class ModelArguments: 17 | local_dir: str = field( 18 | default=None, metadata={"help": "Local Path of storing inputs and outputs "} 19 | ) 20 | input_model_filename: Optional[str] = field( 21 | default="test-input", metadata={"help": "Input model relative path"} 22 | ) 23 | output_model_filename: Optional[str] = field( 24 | default="test-output", metadata={"help": "Output model relative path"} 25 | ) 26 | share_embedding: Optional[bool] = field( 27 | default=True, metadata={"help": "whether to share input/output embedding"} 28 | ) 29 | layer_sharing: Optional[bool] = field( 30 | default=True, metadata={"help": "whether to do layer sharing"} 31 | ) 32 | 33 | 34 | @dataclass 35 | class DataArguments: 36 | train_data_local_path: Optional[str] = field( 37 | default=None, metadata={"help": "Train data local path"} 38 | ) 39 | eval_data_local_path: Optional[str] = field( 40 | default=None, metadata={"help": "Eval data local path"} 41 | ) 42 | 43 | 44 | 45 | @dataclass 46 | class TrainingArguments(transformers.TrainingArguments): 47 | cache_dir: Optional[str] = field(default=None) 48 | optim: Optional[str] = field(default="adamw_torch") 49 | output_dir: Optional[str] = field(default="/tmp/output/") 50 | model_max_length: Optional[int] = field( 51 | default=512, 52 | metadata={ 53 | "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated). 512 or 1024" 54 | }, 55 | ) 56 | 57 | 58 | def process_args(): 59 | parser = transformers.HfArgumentParser( 60 | (ModelArguments, DataArguments, TrainingArguments) 61 | ) 62 | model_args, data_args, training_args = parser.parse_args_into_dataclasses() 63 | 64 | return model_args, data_args, training_args 65 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to make participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies within all project spaces, and it also applies when 49 | an individual is representing the project or its community in public spaces. 50 | Examples of representing a project or community include using an official 51 | project e-mail address, posting via an official social media account, or acting 52 | as an appointed representative at an online or offline event. Representation of 53 | a project may be further defined and clarified by project maintainers. 54 | 55 | This Code of Conduct also applies outside the project spaces when there is a 56 | reasonable belief that an individual's behavior may have a negative impact on 57 | the project or its community. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported by contacting the project team at . All 63 | complaints will be reviewed and investigated and will result in a response that 64 | is deemed necessary and appropriate to the circumstances. The project team is 65 | obligated to maintain confidentiality with regard to the reporter of an incident. 66 | Further details of specific enforcement policies may be posted separately. 67 | 68 | Project maintainers who do not follow or enforce the Code of Conduct in good 69 | faith may face temporary or permanent repercussions as determined by other 70 | members of the project's leadership. 71 | 72 | ## Attribution 73 | 74 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 75 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 76 | 77 | [homepage]: https://www.contributor-covenant.org 78 | 79 | For answers to common questions about this code of conduct, see 80 | https://www.contributor-covenant.org/faq 81 | -------------------------------------------------------------------------------- /utils/pretrain_trainer.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright (c) Meta Platforms, Inc. and affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import math 9 | from functools import partial 10 | from typing import Optional 11 | 12 | import torch 13 | from transformers import Trainer 14 | from torch.optim import Optimizer 15 | from torch.optim.lr_scheduler import LambdaLR 16 | from torch.utils.data import DataLoader 17 | 18 | 19 | def _get_cosine_schedule_with_warmup_lr_lambda( 20 | current_step: int, 21 | *, 22 | num_warmup_steps: int, 23 | num_training_steps: int, 24 | num_cycles: float, 25 | min_ratio: float = 0.1, 26 | theta: float = 1, 27 | ) -> float: 28 | if current_step < num_warmup_steps: 29 | return float(current_step) / float(max(1, num_warmup_steps)) 30 | elif current_step <= num_training_steps: 31 | progress = float(current_step - num_warmup_steps) / float( 32 | max(1, num_training_steps - num_warmup_steps) 33 | ) 34 | lr = min_ratio + 0.5 * (1 - min_ratio) * ( 35 | math.cos(math.pi * progress**theta / num_cycles) + 1 36 | ) 37 | else: 38 | lr = min_ratio 39 | return lr 40 | 41 | 42 | def get_cosine_schedule_with_warmup( 43 | optimizer: Optimizer, 44 | num_warmup_steps: int, 45 | num_training_steps: int, 46 | num_cycles: float = 1.0, 47 | last_epoch: int = -1, 48 | ) -> LambdaLR: 49 | """ 50 | Create a schedule with a learning rate that decreases following the values of the cosine function between the 51 | initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the 52 | initial lr set in the optimizer. 53 | 54 | Args: 55 | optimizer ([`~torch.optim.Optimizer`]): 56 | The optimizer for which to schedule the learning rate. 57 | num_warmup_steps (`int`): 58 | The number of steps for the warmup phase. 59 | num_training_steps (`int`): 60 | The total number of training steps. 61 | num_cycles (`float`, *optional*, defaults to 0.5): 62 | The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 63 | following a half-cosine). 64 | last_epoch (`int`, *optional*, defaults to -1): 65 | The index of the last epoch when resuming training. 66 | 67 | Return: 68 | `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. 69 | """ 70 | 71 | lr_lambda = partial( 72 | _get_cosine_schedule_with_warmup_lr_lambda, 73 | num_warmup_steps=num_warmup_steps, 74 | num_training_steps=num_training_steps, 75 | num_cycles=num_cycles, 76 | ) 77 | return LambdaLR(optimizer, lr_lambda, last_epoch) 78 | 79 | 80 | class PretrainMixin: 81 | def __init__( 82 | self, 83 | manifold_ckpt_dir: Optional[str] = None, 84 | max_parallel_files: int = 5, 85 | resume: bool = False, 86 | **kwargs, 87 | ) -> None: 88 | super().__init__(**kwargs) 89 | self.manifold_ckpt_dir = manifold_ckpt_dir 90 | self.max_parallel_files = max_parallel_files 91 | self.resume = resume 92 | 93 | def create_scheduler( 94 | self, 95 | num_training_steps: int, 96 | optimizer: Optional[torch.optim.Optimizer] = None, 97 | ) -> LambdaLR: 98 | """ 99 | Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or 100 | passed as an argument. 101 | 102 | Args: 103 | num_training_steps (int): The number of training steps to do. 104 | """ 105 | if self.lr_scheduler is None: 106 | self.lr_scheduler = get_cosine_schedule_with_warmup( 107 | optimizer=self.optimizer if optimizer is None else optimizer, 108 | num_warmup_steps=self.args.get_warmup_steps(num_training_steps), 109 | num_training_steps=num_training_steps, 110 | ) 111 | self._created_lr_scheduler = True 112 | return self.lr_scheduler 113 | 114 | 115 | class PretrainTrainer(PretrainMixin, Trainer): 116 | def get_train_dataloader(self) -> DataLoader: 117 | """ 118 | Returns the training [`~torch.utils.data.DataLoader`]. 119 | 120 | Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed 121 | training if necessary) otherwise. 122 | 123 | Subclass and override this method if you want to inject some custom behavior. 124 | """ 125 | if self.train_dataset is None: 126 | raise ValueError("Trainer: training requires a train_dataset.") 127 | return self.train_dataset 128 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MobileLLM 2 | 3 | This repository contains the training code of MobileLLM introduced in our work: "[MobileLLM: Optimizing Sub-billion Parameter Language Models for On-Device Use Cases](https://arxiv.org/abs/2402.14905)", published in ICML 2024. 4 | 5 | In this work, we comprehensively consider multiple design factors to obtain high-quality LLMs with fewer than a billion parameters. We integrated (1) SwiGLU activation function, (2) deep and thin architectures, (3) embedding sharing, (4) grouped-query attention to build MobileLLM. MobileLLM-125M/350M attains a remarkable 2.7%/4.3% accuracy boost over preceding 125M/350M SoTA models on zero-shot commonsense reasoning tasks. In our updated version, we further demonstrate that our design philosophy scales effectively to larger models, with SoTA results for MobileLLM-600M/1B/1.5B. 6 | 7 |
8 | 9 |
10 | 11 | 12 | ## Citation 13 | 14 | If you find our code useful for your research, please consider citing: 15 | 16 | @article{liu2024mobilellm, 17 | title={MobileLLM: Optimizing Sub-billion Parameter Language Models for On-Device Use Cases}, 18 | author={Liu, Zechun and Zhao, Changsheng and Iandola, Forrest and Lai, Chen and Tian, Yuandong and Fedorov, Igor and Xiong, Yunyang and Chang, Ernie and Shi, Yangyang and Krishnamoorthi, Raghuraman and others}, 19 | journal={arXiv preprint arXiv:2402.14905}, 20 | year={2024} 21 | } 22 | 23 | ## Run 24 | 25 | ### Step 1. Requirements: 26 | * python 3.9, pytorch >= 2.0 27 | * pip install -r requirement.txt 28 | 29 | ### Step 2. Data preprocessing 30 | Dividing a tokenized dataset or tokenize your own dataset, and even distribute it across the total number of training nodes, where each node comprises 1x8 GPUs. Next, organize the data into the following structure: 31 | - basepath 32 | - 1 33 | - xxx.jsonl 34 | - 2 35 | - xxx.jsonl 36 | - ... 37 | - #nodes 38 | - xxx.jsonl 39 | 40 | Each line of a jsonl file is a key-value pair of tokenized data {"token_ids": [1,2,3,4,...]}. 41 | 42 | Our training code is compatible with the data pre-processing method in https://github.com/LLM360/amber-data-prep. 43 | 44 | 45 | ### Step 3. Training script 46 | The script `pretrain.sh` is provided to initiate training on a 1x8 node setup using torchrun. This script can be modified to adjust the `--nnodes` parameter and other settings to suit different multi-node configurations, such as those using slurm or torchx. The learning rate in the script is for 1x8 node with a batch size of 32. If you increase the number of nodes or the batch size, you need to increase the learning rate linearly. 47 | 48 | Steps to run: 49 | * In `pretrain.sh` file, specify the `--train_data_local_path` to the pre-processed data in Step 2 and `--input_model_filename` to `./configs/{model_size}/`. 50 | * Run `bash pretrain.sh ` 51 | 52 | ### Others 53 | The model weights is still under legal review. If you have any questions, feel free to email (zechunliu at meta dot com) and (cszhao at meta dot com) 54 | 55 | 56 | ## Training cost 57 | It takes the following number of days to train MobileLLM on 1T tokens using 32 NVIDIA A100 80G GPUs. 58 | | 125M | 350M | 600M | 1B | 1.5B | 59 | | --- | --- | --- | --- | --- | 60 | | ~3 days| ~6 days| ~8 days | ~12 days | ~18 days | 61 | 62 | 63 | ## Results on Zero-shot Common Sense Reasoning tasks 64 | 65 | ### MobileLLM-125M 66 | 67 | | model | boolq | piqa | siqa | hellaswag | winogrande | arc_easy | arc_challenge | obqa | avg. | 68 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | 69 | | OPT-125M | 41.3 | 25.2 | 57.5 | 62.0 | 41.9 | 31.1 | 31.2 | 50.8 | 42.6 | 70 | | GPT-neo-125M | 40.7 | 24.8 | 61.3 | 62.5 | 41.9 | 29.7 | 31.6 | 50.7 | 42.9 | 71 | | Pythia-160M | 40.0 | 25.3 | 59.5 | 62.0 | 41.5 | 29.9 | 31.2 | 50.9 | 42.5 | 72 | | **MobileLLM-125M** | 43.9 | 27.1 | 60.2 | 65.3 | 42.4 | 38.9 | 39.5 | 53.1 | **46.3** | 73 | | **MobileLLM-LS-125M** | 45.8 | 28.7 | 60.4 | 65.7 | 42.9 | 39.5 | 41.1 | 52.1 | **47.0** | 74 | 75 | ### MobileLLM-350M 76 | 77 | | model | boolq | piqa | siqa | hellaswag | winogrande | arc_easy | arc_challenge | obqa | avg. | 78 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | 79 | | OPT-350M | 41.9 | 25.7 | 54.0 | 64.8 | 42.6 | 36.2 | 33.3 | 52.4 | 43.9 | 80 | | Pythia-410M | 47.1 | 30.3 | 55.3 | 67.2 | 43.1 | 40.1 | 36.2 | 53.4 | 46.6 | 81 | | **MobileLLM-350M** | 53.8 | 33.5 | 62.4 | 68.6 | 44.7 | 49.6 | 40.0 | 57.6 | **51.3** | 82 | | **MobileLLM-LS-350M** | 54.4 | 32.5 | 62.8 | 69.8 | 44.1 | 50.6 | 45.8 | 57.2 | **52.1** | 83 | 84 | ### MobileLLM-600M 85 | 86 | | model | boolq | piqa | siqa | hellaswag | winogrande | arc_easy | arc_challenge | obqa | avg. | 87 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | 88 | | Qwen1.5-500M | 54.7 | 32.1 | 46.9 | 68.9 | 46.0 | 48.8 | 37.7 | 55.0 | 48.8 | 89 | | BLOOM-560M | 43.7 | 27.5 | 53.7 | 65.1 | 42.5 | 36.5 | 32.6 | 52.2 | 44.2 | 90 | | MobiLlama-800M | 52.0 | 31.7 | 54.6 | 73.0 | 43.3 | 52.3 | 42.5 | 56.3 | 50.7 | 91 | | **MobileLLM-600M** | 58.1 | 35.8 | 61.0 | 72.3 | 44.9 | 55.9 | 47.9 | 58.6 | **54.3** | 92 | 93 | ### MobileLLM-1B 94 | 95 | | model | boolq | piqa | siqa | hellaswag | winogrande | arc_easy | arc_challenge | obqa | avg. | 96 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | 97 | | Pythia-1B | 49.9 | 30.4 | 58.7 | 69.2 | 43.3 | 47.4 | 38.6 | 52.2 | 48.7 | 98 | | MobiLlama-1B | 59.7 | 38.4 | 59.2 | 74.5 | 44.9 | 62.0 | 43.7 | 59.0 | 55.2 | 99 | | Falcon-1B | 59.5 | 38.4 | 63.9 | 74.6 | 44.6 | 62.9 | 45.6 | 60.9 | 56.3 | 100 | | BLOOM-1.1B | 47.6 | 27.3 | 58.6 | 67.0 | 42.4 | 42.2 | 36.6 | 53.8 | 46.9 | 101 | | TinyLlama-1.1B | 59.2 | 37.1 | 58.1 | 72.9 | 43.9 | 59.1 | 44.7 | 58.8 | 54.2 | 102 | | **MobileLLM-1B** | 63.0 | 39.0 | 66.7 | 74.4 | 45.0 | 61.4 | 46.8 | 62.3 | **57.3** | 103 | 104 | ### MobileLLM-1.5B 105 | 106 | | model | boolq | piqa | siqa | hellaswag | winogrande | arc_easy | arc_challenge | obqa | avg. | 107 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | 108 | | GPT-neo-1.3B | 51.3 | 33.0 | 61.8 | 70.9 | 43.7 | 48.6 | 41.2 | 54.5 | 50.6 | 109 | | OPT-1.3B | 54.4 | 31.7 | 58.4 | 71.5 | 44.7 | 53.7 | 44.6 | 59.1 | 52.3 | 110 | | BLOOM-1.7B | 50.9 | 31.2 | 61.7 | 70.0 | 43.2 | 47.2 | 36.2 | 56.1 | 49.6 | 111 | | Qwen1.5-1.8B | 61.1 | 36.5 | 68.3 | 74.1 | 47.2 | 60.4 | 42.9 | 61.2 | 56.5 | 112 | | GPT-neo-2.7B | 55.8 | 34.3 | 62.4 | 72.9 | 43.6 | 55.6 | 40.0 | 57.9 | 52.8 | 113 | | OPT-2.7B | 56.6 | 34.6 | 61.8 | 74.5 | 45.6 | 60.2 | 48.2 | 59.6 | 55.1 | 114 | | Pythia-2.8B | 59.4 | 38.9 | 66.1 | 73.8 | 44.5 | 59.6 | 45.0 | 59.4 | 55.8 | 115 | | BLOOM-3B | 55.1 | 33.6 | 62.1 | 70.5 | 43.2 | 53.9 | 41.6 | 58.2 | 52.3 | 116 | | **MobileLLM-1.5B** | 67.5 | 40.9 | 65.7 | 74.8 | 46.4 | 64.5 | 50.5 | 64.7 | **59.4** | 117 | 118 | ## Acknowledgement 119 | 120 | This code is partially based on HuggingFace transformer repo. 121 | 122 | ## Contact 123 | 124 | Zechun Liu, Meta Inc (zechunliu at meta dot com) 125 | 126 | Changsheng Zhao, Meta Inc (cszhao at meta dot com) 127 | 128 | ## License 129 | 130 | BiT is CC-BY-NC 4.0 licensed as of now. 131 | 132 | -------------------------------------------------------------------------------- /pretrain.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright (c) Meta Platforms, Inc. and affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | import json 9 | import logging 10 | import os 11 | from logging import Logger 12 | import re 13 | import sys 14 | from typing import Dict, Iterator, List, Optional 15 | import datetime 16 | 17 | import torch 18 | import transformers 19 | 20 | from utils.modeling_llama import LlamaForCausalLM 21 | from utils.pretrain_trainer import PretrainTrainer 22 | from utils.process_args import process_args 23 | from torch import distributed as dist 24 | from torch.utils.data import Dataset, DataLoader 25 | from transformers import AutoConfig, default_data_collator 26 | 27 | # Define a utility method for setting the logging parameters of a logger 28 | def get_logger(logger_name: Optional[str]) -> logging.Logger: 29 | # Get the logger with the specified name 30 | logger = logging.getLogger(logger_name) 31 | 32 | # Set the logging level of the logger to INFO 33 | logger.setLevel(logging.INFO) 34 | 35 | # Define a formatter for the log messages 36 | formatter = logging.Formatter( 37 | "%(asctime)s - %(name)s - %(levelname)s - %(message)s" 38 | ) 39 | 40 | # Create a console handler for outputting log messages to the console 41 | console_handler = logging.StreamHandler() 42 | console_handler.setFormatter(formatter) 43 | 44 | # Add the console handler to the logger 45 | logger.addHandler(console_handler) 46 | 47 | return logger 48 | 49 | 50 | log: Logger = get_logger("mobileLLM") 51 | 52 | 53 | def get_local_rank() -> int: 54 | if os.environ.get("LOCAL_RANK"): 55 | return int(os.environ["LOCAL_RANK"]) 56 | else: 57 | logging.warning( 58 | "LOCAL_RANK from os.environ is None, fall back to get rank from torch distributed" 59 | ) 60 | return torch.distributed.get_rank() 61 | 62 | def get_global_rank() -> int: 63 | """ 64 | Get rank using torch.distributed if available. Otherwise, the RANK env var instead if initialized. 65 | Returns 0 if neither condition is met. 66 | """ 67 | if torch.distributed.is_available() and torch.distributed.is_initialized(): 68 | return torch.distributed.get_rank() 69 | 70 | environ_rank = os.environ.get("RANK", "") 71 | if environ_rank.isdecimal(): 72 | return int(os.environ["RANK"]) 73 | 74 | return 0 75 | 76 | 77 | def get_folder_paths(directory: str) -> List[str]: 78 | folder_paths = [ 79 | os.path.join(directory, item) 80 | for item in os.listdir(directory) 81 | if os.path.isdir(os.path.join(directory, item)) 82 | ] 83 | return folder_paths 84 | 85 | def get_iterable_dataloader(iterator, batch_size): 86 | def custom_collate_fn(batch): 87 | return dict(input_ids=torch.stack(batch), labels=torch.stack(batch)) 88 | class IteratorDataset(Dataset): 89 | def __init__(self, iterator): 90 | self.iterator = iterator 91 | def __len__(self): 92 | # Return an arbitrarily large number 93 | return sys.maxsize 94 | def __getitem__(self, index): 95 | try: 96 | ids = next(self.iterator) 97 | return torch.tensor(ids) 98 | except StopIteration: 99 | raise IndexError 100 | # Create dataset 101 | dataset = IteratorDataset(iterator) 102 | # Create DataLoader with custom collate function 103 | dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=custom_collate_fn) 104 | return dataloader 105 | 106 | class JSONLIterator: 107 | def __init__( 108 | self, 109 | fpath: str, 110 | world_size: int, 111 | world_rank: int, 112 | infinite: bool, 113 | ) -> None: 114 | assert 0 <= world_rank < world_size, (world_rank, world_size) 115 | self.f = open(fpath, "r", encoding="utf-8", errors="ignore") 116 | self.fpath = fpath 117 | self.world_size = world_size 118 | self.world_rank = world_rank 119 | self.line_num = 0 120 | self.iter = iter(self.gen(infinite)) 121 | self.iter_id = 0 122 | 123 | def __iter__(self) -> "JSONLIterator": 124 | return self 125 | 126 | def __next__(self): 127 | return next(self.iter) 128 | 129 | def gen(self, infinite: bool) -> Iterator[Dict]: 130 | while True: 131 | log.info(f"Starting iteration {self.iter_id} over {self.fpath} ...") 132 | self.iter_id += 1 133 | while True: 134 | try: 135 | line, self.line_num = self.f.readline(), self.line_num + 1 136 | if not line: 137 | break 138 | if (self.line_num - 1) % self.world_size == self.world_rank: 139 | try: 140 | yield json.loads(line)['token_ids'] 141 | except json.JSONDecodeError as e: 142 | print("Failed to parse JSON:", e) 143 | except Exception as e: 144 | print(f"Unexpected Jsonl error: {e}") 145 | continue # Skip to the next iteration 146 | except Exception as e: 147 | print(f"Unexpected error while reading line: {e}") 148 | continue 149 | if not infinite: 150 | break 151 | self.f.seek(0) 152 | self.line_num = 0 153 | self.f.close() 154 | 155 | def train() -> None: 156 | dist.init_process_group( 157 | backend="cpu:gloo,cuda:nccl", timeout=datetime.timedelta(hours=8) 158 | ) 159 | model_args, data_args, training_args = process_args() 160 | 161 | global_rank = get_global_rank() 162 | local_rank = get_local_rank() 163 | 164 | log.info(f"Global Rank: {global_rank}") 165 | log.info(f"Local Rank: {local_rank}") 166 | config = AutoConfig.from_pretrained(model_args.input_model_filename) 167 | config.share_embedding = model_args.share_embedding 168 | config.layer_sharing = model_args.layer_sharing 169 | model = LlamaForCausalLM( 170 | config=config, 171 | ) 172 | log.info( 173 | "model size is " 174 | + str(sum(param.numel() for param in model.model.parameters()) / 1024 / 1024) 175 | ) 176 | log.info("Start to load tokenizer...") 177 | tokenizer = transformers.AutoTokenizer.from_pretrained( 178 | pretrained_model_name_or_path=model_args.input_model_filename, 179 | cache_dir=training_args.cache_dir, 180 | model_max_length=training_args.model_max_length, 181 | padding_side="right", 182 | use_fast=False, 183 | ) 184 | log.info("Complete tokenizer loading...") 185 | 186 | # go to current node's data rank 187 | local_data_folder = os.path.join(data_args.train_data_local_path, str(global_rank//8+1)) 188 | 189 | # Data load locally from shard total data, so world_size is 8 and rank is the current node's local rank 190 | log.info("world_rank for data loader is " + str(local_rank)) 191 | log.info("world_size for data loader is " + str(8)) 192 | assert os.path.isdir(local_data_folder), local_data_folder 193 | fname_match_re: str = r"\.jsonl$" 194 | 195 | # get the jsonl file name. Currently only support 1 file/folder per node 196 | fnames = [x for x in os.listdir(local_data_folder) if re.search(fname_match_re, x)][0] 197 | data_iter = JSONLIterator( 198 | fpath=os.path.join(local_data_folder,fnames), 199 | world_rank=local_rank, 200 | world_size=8, 201 | infinite=True, 202 | ) 203 | trainer = PretrainTrainer( 204 | model=model, 205 | tokenizer=tokenizer, 206 | args=training_args, 207 | train_dataset=get_iterable_dataloader(data_iter, training_args.per_device_train_batch_size) if training_args.do_train else None, 208 | eval_dataset=None, 209 | data_collator=default_data_collator, 210 | ) 211 | torch.distributed.barrier(device_ids=[local_rank]) 212 | 213 | if training_args.do_train: 214 | _ = trainer.train() 215 | trainer.save_state() 216 | 217 | torch.distributed.barrier(device_ids=[local_rank]) 218 | 219 | 220 | if __name__ == "__main__": 221 | train() 222 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More_considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial 4.0 International Public 58 | License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial 4.0 International Public License ("Public 63 | License"). To the extent this Public License may be interpreted as a 64 | contract, You are granted the Licensed Rights in consideration of Your 65 | acceptance of these terms and conditions, and the Licensor grants You 66 | such rights in consideration of benefits the Licensor receives from 67 | making the Licensed Material available under these terms and 68 | conditions. 69 | 70 | Section 1 -- Definitions. 71 | 72 | a. Adapted Material means material subject to Copyright and Similar 73 | Rights that is derived from or based upon the Licensed Material 74 | and in which the Licensed Material is translated, altered, 75 | arranged, transformed, or otherwise modified in a manner requiring 76 | permission under the Copyright and Similar Rights held by the 77 | Licensor. For purposes of this Public License, where the Licensed 78 | Material is a musical work, performance, or sound recording, 79 | Adapted Material is always produced where the Licensed Material is 80 | synched in timed relation with a moving image. 81 | 82 | b. Adapter's License means the license You apply to Your Copyright 83 | and Similar Rights in Your contributions to Adapted Material in 84 | accordance with the terms and conditions of this Public License. 85 | 86 | c. Copyright and Similar Rights means copyright and/or similar rights 87 | closely related to copyright including, without limitation, 88 | performance, broadcast, sound recording, and Sui Generis Database 89 | Rights, without regard to how the rights are labeled or 90 | categorized. For purposes of this Public License, the rights 91 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 92 | Rights. 93 | d. Effective Technological Measures means those measures that, in the 94 | absence of proper authority, may not be circumvented under laws 95 | fulfilling obligations under Article 11 of the WIPO Copyright 96 | Treaty adopted on December 20, 1996, and/or similar international 97 | agreements. 98 | 99 | e. Exceptions and Limitations means fair use, fair dealing, and/or 100 | any other exception or limitation to Copyright and Similar Rights 101 | that applies to Your use of the Licensed Material. 102 | 103 | f. Licensed Material means the artistic or literary work, database, 104 | or other material to which the Licensor applied this Public 105 | License. 106 | 107 | g. Licensed Rights means the rights granted to You subject to the 108 | terms and conditions of this Public License, which are limited to 109 | all Copyright and Similar Rights that apply to Your use of the 110 | Licensed Material and that the Licensor has authority to license. 111 | 112 | h. Licensor means the individual(s) or entity(ies) granting rights 113 | under this Public License. 114 | 115 | i. NonCommercial means not primarily intended for or directed towards 116 | commercial advantage or monetary compensation. For purposes of 117 | this Public License, the exchange of the Licensed Material for 118 | other material subject to Copyright and Similar Rights by digital 119 | file-sharing or similar means is NonCommercial provided there is 120 | no payment of monetary compensation in connection with the 121 | exchange. 122 | 123 | j. Share means to provide material to the public by any means or 124 | process that requires permission under the Licensed Rights, such 125 | as reproduction, public display, public performance, distribution, 126 | dissemination, communication, or importation, and to make material 127 | available to the public including in ways that members of the 128 | public may access the material from a place and at a time 129 | individually chosen by them. 130 | 131 | k. Sui Generis Database Rights means rights other than copyright 132 | resulting from Directive 96/9/EC of the European Parliament and of 133 | the Council of 11 March 1996 on the legal protection of databases, 134 | as amended and/or succeeded, as well as other essentially 135 | equivalent rights anywhere in the world. 136 | 137 | l. You means the individual or entity exercising the Licensed Rights 138 | under this Public License. Your has a corresponding meaning. 139 | 140 | Section 2 -- Scope. 141 | 142 | a. License grant. 143 | 144 | 1. Subject to the terms and conditions of this Public License, 145 | the Licensor hereby grants You a worldwide, royalty-free, 146 | non-sublicensable, non-exclusive, irrevocable license to 147 | exercise the Licensed Rights in the Licensed Material to: 148 | 149 | a. reproduce and Share the Licensed Material, in whole or 150 | in part, for NonCommercial purposes only; and 151 | 152 | b. produce, reproduce, and Share Adapted Material for 153 | NonCommercial purposes only. 154 | 155 | 2. Exceptions and Limitations. For the avoidance of doubt, where 156 | Exceptions and Limitations apply to Your use, this Public 157 | License does not apply, and You do not need to comply with 158 | its terms and conditions. 159 | 160 | 3. Term. The term of this Public License is specified in Section 161 | 6(a). 162 | 163 | 4. Media and formats; technical modifications allowed. The 164 | Licensor authorizes You to exercise the Licensed Rights in 165 | all media and formats whether now known or hereafter created, 166 | and to make technical modifications necessary to do so. The 167 | Licensor waives and/or agrees not to assert any right or 168 | authority to forbid You from making technical modifications 169 | necessary to exercise the Licensed Rights, including 170 | technical modifications necessary to circumvent Effective 171 | Technological Measures. For purposes of this Public License, 172 | simply making modifications authorized by this Section 2(a) 173 | (4) never produces Adapted Material. 174 | 175 | 5. Downstream recipients. 176 | 177 | a. Offer from the Licensor -- Licensed Material. Every 178 | recipient of the Licensed Material automatically 179 | receives an offer from the Licensor to exercise the 180 | Licensed Rights under the terms and conditions of this 181 | Public License. 182 | 183 | b. No downstream restrictions. You may not offer or impose 184 | any additional or different terms or conditions on, or 185 | apply any Effective Technological Measures to, the 186 | Licensed Material if doing so restricts exercise of the 187 | Licensed Rights by any recipient of the Licensed 188 | Material. 189 | 190 | 6. No endorsement. Nothing in this Public License constitutes or 191 | may be construed as permission to assert or imply that You 192 | are, or that Your use of the Licensed Material is, connected 193 | with, or sponsored, endorsed, or granted official status by, 194 | the Licensor or others designated to receive attribution as 195 | provided in Section 3(a)(1)(A)(i). 196 | 197 | b. Other rights. 198 | 199 | 1. Moral rights, such as the right of integrity, are not 200 | licensed under this Public License, nor are publicity, 201 | privacy, and/or other similar personality rights; however, to 202 | the extent possible, the Licensor waives and/or agrees not to 203 | assert any such rights held by the Licensor to the limited 204 | extent necessary to allow You to exercise the Licensed 205 | Rights, but not otherwise. 206 | 207 | 2. Patent and trademark rights are not licensed under this 208 | Public License. 209 | 210 | 3. To the extent possible, the Licensor waives any right to 211 | collect royalties from You for the exercise of the Licensed 212 | Rights, whether directly or through a collecting society 213 | under any voluntary or waivable statutory or compulsory 214 | licensing scheme. In all other cases the Licensor expressly 215 | reserves any right to collect such royalties, including when 216 | the Licensed Material is used other than for NonCommercial 217 | purposes. 218 | 219 | Section 3 -- License Conditions. 220 | 221 | Your exercise of the Licensed Rights is expressly made subject to the 222 | following conditions. 223 | 224 | a. Attribution. 225 | 226 | 1. If You Share the Licensed Material (including in modified 227 | form), You must: 228 | 229 | a. retain the following if it is supplied by the Licensor 230 | with the Licensed Material: 231 | 232 | i. identification of the creator(s) of the Licensed 233 | Material and any others designated to receive 234 | attribution, in any reasonable manner requested by 235 | the Licensor (including by pseudonym if 236 | designated); 237 | 238 | ii. a copyright notice; 239 | 240 | iii. a notice that refers to this Public License; 241 | 242 | iv. a notice that refers to the disclaimer of 243 | warranties; 244 | 245 | v. a URI or hyperlink to the Licensed Material to the 246 | extent reasonably practicable; 247 | 248 | b. indicate if You modified the Licensed Material and 249 | retain an indication of any previous modifications; and 250 | 251 | c. indicate the Licensed Material is licensed under this 252 | Public License, and include the text of, or the URI or 253 | hyperlink to, this Public License. 254 | 255 | 2. You may satisfy the conditions in Section 3(a)(1) in any 256 | reasonable manner based on the medium, means, and context in 257 | which You Share the Licensed Material. For example, it may be 258 | reasonable to satisfy the conditions by providing a URI or 259 | hyperlink to a resource that includes the required 260 | information. 261 | 262 | 3. If requested by the Licensor, You must remove any of the 263 | information required by Section 3(a)(1)(A) to the extent 264 | reasonably practicable. 265 | 266 | 4. If You Share Adapted Material You produce, the Adapter's 267 | License You apply must not prevent recipients of the Adapted 268 | Material from complying with this Public License. 269 | 270 | Section 4 -- Sui Generis Database Rights. 271 | 272 | Where the Licensed Rights include Sui Generis Database Rights that 273 | apply to Your use of the Licensed Material: 274 | 275 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 276 | to extract, reuse, reproduce, and Share all or a substantial 277 | portion of the contents of the database for NonCommercial purposes 278 | only; 279 | 280 | b. if You include all or a substantial portion of the database 281 | contents in a database in which You have Sui Generis Database 282 | Rights, then the database in which You have Sui Generis Database 283 | Rights (but not its individual contents) is Adapted Material; and 284 | 285 | c. You must comply with the conditions in Section 3(a) if You Share 286 | all or a substantial portion of the contents of the database. 287 | 288 | For the avoidance of doubt, this Section 4 supplements and does not 289 | replace Your obligations under this Public License where the Licensed 290 | Rights include other Copyright and Similar Rights. 291 | 292 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 293 | 294 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 295 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 296 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 297 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 298 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 299 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 300 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 301 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 302 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 303 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 304 | 305 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 306 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 307 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 308 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 309 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 310 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 311 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 312 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 313 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 314 | 315 | c. The disclaimer of warranties and limitation of liability provided 316 | above shall be interpreted in a manner that, to the extent 317 | possible, most closely approximates an absolute disclaimer and 318 | waiver of all liability. 319 | 320 | Section 6 -- Term and Termination. 321 | 322 | a. This Public License applies for the term of the Copyright and 323 | Similar Rights licensed here. However, if You fail to comply with 324 | this Public License, then Your rights under this Public License 325 | terminate automatically. 326 | 327 | b. Where Your right to use the Licensed Material has terminated under 328 | Section 6(a), it reinstates: 329 | 330 | 1. automatically as of the date the violation is cured, provided 331 | it is cured within 30 days of Your discovery of the 332 | violation; or 333 | 334 | 2. upon express reinstatement by the Licensor. 335 | 336 | For the avoidance of doubt, this Section 6(b) does not affect any 337 | right the Licensor may have to seek remedies for Your violations 338 | of this Public License. 339 | 340 | c. For the avoidance of doubt, the Licensor may also offer the 341 | Licensed Material under separate terms or conditions or stop 342 | distributing the Licensed Material at any time; however, doing so 343 | will not terminate this Public License. 344 | 345 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 346 | License. 347 | 348 | Section 7 -- Other Terms and Conditions. 349 | 350 | a. The Licensor shall not be bound by any additional or different 351 | terms or conditions communicated by You unless expressly agreed. 352 | 353 | b. Any arrangements, understandings, or agreements regarding the 354 | Licensed Material not stated herein are separate from and 355 | independent of the terms and conditions of this Public License. 356 | 357 | Section 8 -- Interpretation. 358 | 359 | a. For the avoidance of doubt, this Public License does not, and 360 | shall not be interpreted to, reduce, limit, restrict, or impose 361 | conditions on any use of the Licensed Material that could lawfully 362 | be made without permission under this Public License. 363 | 364 | b. To the extent possible, if any provision of this Public License is 365 | deemed unenforceable, it shall be automatically reformed to the 366 | minimum extent necessary to make it enforceable. If the provision 367 | cannot be reformed, it shall be severed from this Public License 368 | without affecting the enforceability of the remaining terms and 369 | conditions. 370 | 371 | c. No term or condition of this Public License will be waived and no 372 | failure to comply consented to unless expressly agreed to by the 373 | Licensor. 374 | 375 | d. Nothing in this Public License constitutes or may be interpreted 376 | as a limitation upon, or waiver of, any privileges and immunities 377 | that apply to the Licensor or You, including from the legal 378 | processes of any jurisdiction or authority. 379 | 380 | ======================================================================= 381 | 382 | Creative Commons is not a party to its public 383 | licenses. Notwithstanding, Creative Commons may elect to apply one of 384 | its public licenses to material it publishes and in those instances 385 | will be considered the “Licensor.” The text of the Creative Commons 386 | public licenses is dedicated to the public domain under the CC0 Public 387 | Domain Dedication. Except for the limited purpose of indicating that 388 | material is shared under a Creative Commons public license or as 389 | otherwise permitted by the Creative Commons policies published at 390 | creativecommons.org/policies, Creative Commons does not authorize the 391 | use of the trademark "Creative Commons" or any other trademark or logo 392 | of Creative Commons without its prior written consent including, 393 | without limitation, in connection with any unauthorized modifications 394 | to any of its public licenses or any other arrangements, 395 | understandings, or agreements concerning use of licensed material. For 396 | the avoidance of doubt, this paragraph does not form part of the 397 | public licenses. 398 | 399 | Creative Commons may be contacted at creativecommons.org. -------------------------------------------------------------------------------- /utils/modeling_llama.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | # Copyright (c) Meta Platforms, Inc. and affiliates. 3 | # All rights reserved. 4 | # 5 | # This source code is licensed under the license found in the 6 | # LICENSE file in the root directory of this source tree. 7 | 8 | # coding=utf-8 9 | # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. 10 | # 11 | # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX 12 | # and OPT implementations in this library. It has been modified from its 13 | # original forms to accommodate minor architectural differences compared 14 | # to GPT-NeoX and OPT used by the Meta AI team that trained the model. 15 | # 16 | # Licensed under the Apache License, Version 2.0 (the "License"); 17 | # you may not use this file except in compliance with the License. 18 | # You may obtain a copy of the License at 19 | # 20 | # http://www.apache.org/licenses/LICENSE-2.0 21 | # 22 | # Unless required by applicable law or agreed to in writing, software 23 | # distributed under the License is distributed on an "AS IS" BASIS, 24 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 25 | # See the License for the specific language governing permissions and 26 | # limitations under the License. 27 | import math 28 | from typing import List, Optional, Tuple, Union 29 | 30 | import torch 31 | import torch.nn.functional as F 32 | import torch.utils.checkpoint 33 | from torch import nn 34 | from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss 35 | 36 | from transformers.activations import ACT2FN 37 | from transformers.cache_utils import Cache, DynamicCache, StaticCache 38 | from transformers.modeling_attn_mask_utils import AttentionMaskConverter 39 | from transformers.modeling_outputs import ( 40 | BaseModelOutputWithPast, 41 | CausalLMOutputWithPast, 42 | QuestionAnsweringModelOutput, 43 | SequenceClassifierOutputWithPast, 44 | TokenClassifierOutput, 45 | ) 46 | from transformers.modeling_utils import PreTrainedModel 47 | from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS 48 | from transformers.utils import ( 49 | add_start_docstrings, 50 | add_start_docstrings_to_model_forward, 51 | is_flash_attn_2_available, 52 | is_flash_attn_greater_or_equal_2_10, 53 | logging, 54 | replace_return_docstrings, 55 | ) 56 | from transformers.models.llama.configuration_llama import LlamaConfig 57 | 58 | 59 | if is_flash_attn_2_available(): 60 | from flash_attn import flash_attn_func, flash_attn_varlen_func 61 | from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa 62 | 63 | 64 | logger = logging.get_logger(__name__) 65 | 66 | _CONFIG_FOR_DOC = "LlamaConfig" 67 | 68 | 69 | def _get_unpad_data(attention_mask): 70 | seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) 71 | indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() 72 | max_seqlen_in_batch = seqlens_in_batch.max().item() 73 | cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) 74 | return ( 75 | indices, 76 | cu_seqlens, 77 | max_seqlen_in_batch, 78 | ) 79 | 80 | 81 | class LlamaRMSNorm(nn.Module): 82 | def __init__(self, hidden_size, eps=1e-6): 83 | """ 84 | LlamaRMSNorm is equivalent to T5LayerNorm 85 | """ 86 | super().__init__() 87 | self.weight = nn.Parameter(torch.ones(hidden_size)) 88 | self.variance_epsilon = eps 89 | 90 | def forward(self, hidden_states): 91 | input_dtype = hidden_states.dtype 92 | hidden_states = hidden_states.to(torch.float32) 93 | variance = hidden_states.pow(2).mean(-1, keepdim=True) 94 | hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) 95 | return self.weight * hidden_states.to(input_dtype) 96 | 97 | 98 | ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm) 99 | 100 | 101 | class LlamaRotaryEmbedding(nn.Module): 102 | def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): 103 | super().__init__() 104 | self.scaling_factor = scaling_factor 105 | self.dim = dim 106 | self.max_position_embeddings = max_position_embeddings 107 | self.base = base 108 | inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) 109 | self.register_buffer("inv_freq", inv_freq, persistent=False) 110 | # For BC we register cos and sin cached 111 | self.max_seq_len_cached = max_position_embeddings 112 | 113 | @torch.no_grad() 114 | def forward(self, x, position_ids): 115 | # x: [bs, num_attention_heads, seq_len, head_size] 116 | inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) 117 | position_ids_expanded = position_ids[:, None, :].float() 118 | # Force float32 since bfloat16 loses precision on long contexts 119 | # See https://github.com/huggingface/transformers/pull/29285 120 | device_type = x.device.type 121 | device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" 122 | with torch.autocast(device_type=device_type, enabled=False): 123 | freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) 124 | emb = torch.cat((freqs, freqs), dim=-1) 125 | cos = emb.cos() 126 | sin = emb.sin() 127 | return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) 128 | 129 | 130 | class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): 131 | """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" 132 | 133 | def forward(self, x, position_ids): 134 | # difference to the original RoPE: a scaling factor is aplied to the position ids 135 | position_ids = position_ids.float() / self.scaling_factor 136 | cos, sin = super().forward(x, position_ids) 137 | return cos, sin 138 | 139 | 140 | class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): 141 | """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" 142 | 143 | def forward(self, x, position_ids): 144 | # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length 145 | seq_len = torch.max(position_ids) + 1 146 | if seq_len > self.max_position_embeddings: 147 | base = self.base * ( 148 | (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) 149 | ) ** (self.dim / (self.dim - 2)) 150 | inv_freq = 1.0 / ( 151 | base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim) 152 | ) 153 | self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation 154 | 155 | cos, sin = super().forward(x, position_ids) 156 | return cos, sin 157 | 158 | 159 | def rotate_half(x): 160 | """Rotates half the hidden dims of the input.""" 161 | x1 = x[..., : x.shape[-1] // 2] 162 | x2 = x[..., x.shape[-1] // 2 :] 163 | return torch.cat((-x2, x1), dim=-1) 164 | 165 | 166 | def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): 167 | """Applies Rotary Position Embedding to the query and key tensors. 168 | 169 | Args: 170 | q (`torch.Tensor`): The query tensor. 171 | k (`torch.Tensor`): The key tensor. 172 | cos (`torch.Tensor`): The cosine part of the rotary embedding. 173 | sin (`torch.Tensor`): The sine part of the rotary embedding. 174 | position_ids (`torch.Tensor`, *optional*): 175 | Deprecated and unused. 176 | unsqueeze_dim (`int`, *optional*, defaults to 1): 177 | The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and 178 | sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note 179 | that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and 180 | k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes 181 | cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have 182 | the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. 183 | Returns: 184 | `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. 185 | """ 186 | cos = cos.unsqueeze(unsqueeze_dim) 187 | sin = sin.unsqueeze(unsqueeze_dim) 188 | q_embed = (q * cos) + (rotate_half(q) * sin) 189 | k_embed = (k * cos) + (rotate_half(k) * sin) 190 | return q_embed, k_embed 191 | 192 | 193 | class LlamaMLP(nn.Module): 194 | def __init__(self, config): 195 | super().__init__() 196 | self.config = config 197 | self.hidden_size = config.hidden_size 198 | self.intermediate_size = config.intermediate_size 199 | self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) 200 | self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias) 201 | self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias) 202 | self.act_fn = ACT2FN[config.hidden_act] 203 | 204 | def forward(self, x): 205 | if self.config.pretraining_tp > 1: 206 | slice = self.intermediate_size // self.config.pretraining_tp 207 | gate_proj_slices = self.gate_proj.weight.split(slice, dim=0) 208 | up_proj_slices = self.up_proj.weight.split(slice, dim=0) 209 | down_proj_slices = self.down_proj.weight.split(slice, dim=1) 210 | 211 | gate_proj = torch.cat( 212 | [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1 213 | ) 214 | up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1) 215 | 216 | intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2) 217 | down_proj = [ 218 | F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp) 219 | ] 220 | down_proj = sum(down_proj) 221 | else: 222 | down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) 223 | 224 | return down_proj 225 | 226 | 227 | def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: 228 | """ 229 | This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, 230 | num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) 231 | """ 232 | batch, num_key_value_heads, slen, head_dim = hidden_states.shape 233 | if n_rep == 1: 234 | return hidden_states 235 | hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) 236 | return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) 237 | 238 | 239 | class LlamaAttention(nn.Module): 240 | """Multi-headed attention from 'Attention Is All You Need' paper""" 241 | 242 | def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None): 243 | super().__init__() 244 | self.config = config 245 | self.layer_idx = layer_idx 246 | if layer_idx is None: 247 | logger.warning_once( 248 | f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " 249 | "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " 250 | "when creating this class." 251 | ) 252 | 253 | self.attention_dropout = config.attention_dropout 254 | self.hidden_size = config.hidden_size 255 | self.num_heads = config.num_attention_heads 256 | self.head_dim = self.hidden_size // self.num_heads 257 | self.num_key_value_heads = config.num_key_value_heads 258 | self.num_key_value_groups = self.num_heads // self.num_key_value_heads 259 | self.max_position_embeddings = config.max_position_embeddings 260 | self.rope_theta = config.rope_theta 261 | self.is_causal = True 262 | 263 | if (self.head_dim * self.num_heads) != self.hidden_size: 264 | raise ValueError( 265 | f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" 266 | f" and `num_heads`: {self.num_heads})." 267 | ) 268 | 269 | self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) 270 | self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) 271 | self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) 272 | self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) 273 | self._init_rope() 274 | 275 | def _init_rope(self): 276 | if self.config.rope_scaling is None: 277 | self.rotary_emb = LlamaRotaryEmbedding( 278 | self.head_dim, 279 | max_position_embeddings=self.max_position_embeddings, 280 | base=self.rope_theta, 281 | ) 282 | else: 283 | scaling_type = self.config.rope_scaling["type"] 284 | scaling_factor = self.config.rope_scaling["factor"] 285 | if scaling_type == "linear": 286 | self.rotary_emb = LlamaLinearScalingRotaryEmbedding( 287 | self.head_dim, 288 | max_position_embeddings=self.max_position_embeddings, 289 | scaling_factor=scaling_factor, 290 | base=self.rope_theta, 291 | ) 292 | elif scaling_type == "dynamic": 293 | self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( 294 | self.head_dim, 295 | max_position_embeddings=self.max_position_embeddings, 296 | scaling_factor=scaling_factor, 297 | base=self.rope_theta, 298 | ) 299 | else: 300 | raise ValueError(f"Unknown RoPE scaling type {scaling_type}") 301 | 302 | def forward( 303 | self, 304 | hidden_states: torch.Tensor, 305 | attention_mask: Optional[torch.Tensor] = None, 306 | position_ids: Optional[torch.LongTensor] = None, 307 | past_key_value: Optional[Cache] = None, 308 | output_attentions: bool = False, 309 | use_cache: bool = False, 310 | cache_position: Optional[torch.LongTensor] = None, 311 | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: 312 | bsz, q_len, _ = hidden_states.size() 313 | 314 | if self.config.pretraining_tp > 1: 315 | key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp 316 | query_slices = self.q_proj.weight.split( 317 | (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0 318 | ) 319 | key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) 320 | value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) 321 | 322 | query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)] 323 | query_states = torch.cat(query_states, dim=-1) 324 | 325 | key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)] 326 | key_states = torch.cat(key_states, dim=-1) 327 | 328 | value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)] 329 | value_states = torch.cat(value_states, dim=-1) 330 | 331 | else: 332 | query_states = self.q_proj(hidden_states) 333 | key_states = self.k_proj(hidden_states) 334 | value_states = self.v_proj(hidden_states) 335 | 336 | query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) 337 | key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) 338 | value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) 339 | 340 | cos, sin = self.rotary_emb(value_states, position_ids) 341 | query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) 342 | 343 | if past_key_value is not None: 344 | # sin and cos are specific to RoPE models; cache_position needed for the static cache 345 | cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} 346 | key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) 347 | 348 | key_states = repeat_kv(key_states, self.num_key_value_groups) 349 | value_states = repeat_kv(value_states, self.num_key_value_groups) 350 | 351 | attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) 352 | 353 | if attention_mask is not None: # no matter the length, we just slice it 354 | causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] 355 | attn_weights = attn_weights + causal_mask 356 | 357 | # upcast attention to fp32 358 | attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) 359 | attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) 360 | attn_output = torch.matmul(attn_weights, value_states) 361 | 362 | if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): 363 | raise ValueError( 364 | f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" 365 | f" {attn_output.size()}" 366 | ) 367 | 368 | attn_output = attn_output.transpose(1, 2).contiguous() 369 | 370 | attn_output = attn_output.reshape(bsz, q_len, -1) 371 | 372 | if self.config.pretraining_tp > 1: 373 | attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2) 374 | o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1) 375 | attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)]) 376 | else: 377 | attn_output = self.o_proj(attn_output) 378 | 379 | if not output_attentions: 380 | attn_weights = None 381 | 382 | return attn_output, attn_weights, past_key_value 383 | 384 | 385 | class LlamaFlashAttention2(LlamaAttention): 386 | """ 387 | Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays 388 | untouched. The only required change would be on the forward pass where it needs to correctly call the public API of 389 | flash attention and deal with padding tokens in case the input contains any of them. 390 | """ 391 | 392 | def __init__(self, *args, **kwargs): 393 | super().__init__(*args, **kwargs) 394 | 395 | # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. 396 | # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. 397 | # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). 398 | self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() 399 | 400 | def forward( 401 | self, 402 | hidden_states: torch.Tensor, 403 | attention_mask: Optional[torch.LongTensor] = None, 404 | position_ids: Optional[torch.LongTensor] = None, 405 | past_key_value: Optional[Cache] = None, 406 | output_attentions: bool = False, 407 | use_cache: bool = False, 408 | cache_position: Optional[torch.LongTensor] = None, 409 | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: 410 | if isinstance(past_key_value, StaticCache): 411 | raise ValueError( 412 | "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " 413 | "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" 414 | ) 415 | 416 | output_attentions = False 417 | 418 | bsz, q_len, _ = hidden_states.size() 419 | 420 | query_states = self.q_proj(hidden_states) 421 | key_states = self.k_proj(hidden_states) 422 | value_states = self.v_proj(hidden_states) 423 | 424 | # Flash attention requires the input to have the shape 425 | # batch_size x seq_length x head_dim x hidden_dim 426 | # therefore we just need to keep the original shape 427 | query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) 428 | key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) 429 | value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) 430 | 431 | cos, sin = self.rotary_emb(value_states, position_ids) 432 | query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) 433 | 434 | if past_key_value is not None: 435 | # sin and cos are specific to RoPE models; cache_position needed for the static cache 436 | cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} 437 | key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) 438 | 439 | # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache 440 | # to be able to avoid many of these transpose/reshape/view. 441 | query_states = query_states.transpose(1, 2) 442 | key_states = key_states.transpose(1, 2) 443 | value_states = value_states.transpose(1, 2) 444 | 445 | dropout_rate = self.attention_dropout if self.training else 0.0 446 | 447 | # In PEFT, usually we cast the layer norms in float32 for training stability reasons 448 | # therefore the input hidden states gets silently casted in float32. Hence, we need 449 | # cast them back in the correct dtype just to be sure everything works as expected. 450 | # This might slowdown training & inference so it is recommended to not cast the LayerNorms 451 | # in fp32. (LlamaRMSNorm handles it correctly) 452 | 453 | input_dtype = query_states.dtype 454 | if input_dtype == torch.float32: 455 | if torch.is_autocast_enabled(): 456 | target_dtype = torch.get_autocast_gpu_dtype() 457 | # Handle the case where the model is quantized 458 | elif hasattr(self.config, "_pre_quantization_dtype"): 459 | target_dtype = self.config._pre_quantization_dtype 460 | else: 461 | target_dtype = self.q_proj.weight.dtype 462 | 463 | logger.warning_once( 464 | f"The input hidden states seems to be silently casted in float32, this might be related to" 465 | f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" 466 | f" {target_dtype}." 467 | ) 468 | 469 | query_states = query_states.to(target_dtype) 470 | key_states = key_states.to(target_dtype) 471 | value_states = value_states.to(target_dtype) 472 | 473 | attn_output = self._flash_attention_forward( 474 | query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate 475 | ) 476 | 477 | attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() 478 | attn_output = self.o_proj(attn_output) 479 | 480 | if not output_attentions: 481 | attn_weights = None 482 | 483 | return attn_output, attn_weights, past_key_value 484 | 485 | def _flash_attention_forward( 486 | self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None 487 | ): 488 | """ 489 | Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token 490 | first unpad the input, then computes the attention scores and pad the final attention scores. 491 | 492 | Args: 493 | query_states (`torch.Tensor`): 494 | Input query states to be passed to Flash Attention API 495 | key_states (`torch.Tensor`): 496 | Input key states to be passed to Flash Attention API 497 | value_states (`torch.Tensor`): 498 | Input value states to be passed to Flash Attention API 499 | attention_mask (`torch.Tensor`): 500 | The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the 501 | position of padding tokens and 1 for the position of non-padding tokens. 502 | dropout (`float`): 503 | Attention dropout 504 | softmax_scale (`float`, *optional*): 505 | The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) 506 | """ 507 | if not self._flash_attn_uses_top_left_mask: 508 | causal = self.is_causal 509 | else: 510 | # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. 511 | causal = self.is_causal and query_length != 1 512 | 513 | # Contains at least one padding token in the sequence 514 | if attention_mask is not None: 515 | batch_size = query_states.shape[0] 516 | query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( 517 | query_states, key_states, value_states, attention_mask, query_length 518 | ) 519 | 520 | cu_seqlens_q, cu_seqlens_k = cu_seq_lens 521 | max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens 522 | 523 | attn_output_unpad = flash_attn_varlen_func( 524 | query_states, 525 | key_states, 526 | value_states, 527 | cu_seqlens_q=cu_seqlens_q, 528 | cu_seqlens_k=cu_seqlens_k, 529 | max_seqlen_q=max_seqlen_in_batch_q, 530 | max_seqlen_k=max_seqlen_in_batch_k, 531 | dropout_p=dropout, 532 | softmax_scale=softmax_scale, 533 | causal=causal, 534 | ) 535 | 536 | attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) 537 | else: 538 | attn_output = flash_attn_func( 539 | query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal 540 | ) 541 | 542 | return attn_output 543 | 544 | def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): 545 | indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) 546 | batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape 547 | 548 | key_layer = index_first_axis( 549 | key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k 550 | ) 551 | value_layer = index_first_axis( 552 | value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k 553 | ) 554 | if query_length == kv_seq_len: 555 | query_layer = index_first_axis( 556 | query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k 557 | ) 558 | cu_seqlens_q = cu_seqlens_k 559 | max_seqlen_in_batch_q = max_seqlen_in_batch_k 560 | indices_q = indices_k 561 | elif query_length == 1: 562 | max_seqlen_in_batch_q = 1 563 | cu_seqlens_q = torch.arange( 564 | batch_size + 1, dtype=torch.int32, device=query_layer.device 565 | ) # There is a memcpy here, that is very bad. 566 | indices_q = cu_seqlens_q[:-1] 567 | query_layer = query_layer.squeeze(1) 568 | else: 569 | # The -q_len: slice assumes left padding. 570 | attention_mask = attention_mask[:, -query_length:] 571 | query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) 572 | 573 | return ( 574 | query_layer, 575 | key_layer, 576 | value_layer, 577 | indices_q, 578 | (cu_seqlens_q, cu_seqlens_k), 579 | (max_seqlen_in_batch_q, max_seqlen_in_batch_k), 580 | ) 581 | 582 | 583 | class LlamaSdpaAttention(LlamaAttention): 584 | """ 585 | Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from 586 | `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to 587 | SDPA API. 588 | """ 589 | 590 | # Adapted from LlamaAttention.forward 591 | def forward( 592 | self, 593 | hidden_states: torch.Tensor, 594 | attention_mask: Optional[torch.Tensor] = None, 595 | position_ids: Optional[torch.LongTensor] = None, 596 | past_key_value: Optional[Cache] = None, 597 | output_attentions: bool = False, 598 | use_cache: bool = False, 599 | cache_position: Optional[torch.LongTensor] = None, 600 | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: 601 | if output_attentions: 602 | # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. 603 | logger.warning_once( 604 | "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 605 | 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' 606 | ) 607 | return super().forward( 608 | hidden_states=hidden_states, 609 | attention_mask=attention_mask, 610 | position_ids=position_ids, 611 | past_key_value=past_key_value, 612 | output_attentions=output_attentions, 613 | use_cache=use_cache, 614 | cache_position=cache_position, 615 | ) 616 | 617 | bsz, q_len, _ = hidden_states.size() 618 | 619 | query_states = self.q_proj(hidden_states) 620 | key_states = self.k_proj(hidden_states) 621 | value_states = self.v_proj(hidden_states) 622 | 623 | query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) 624 | key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) 625 | value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) 626 | 627 | cos, sin = self.rotary_emb(value_states, position_ids) 628 | query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) 629 | 630 | if past_key_value is not None: 631 | # sin and cos are specific to RoPE models; cache_position needed for the static cache 632 | cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} 633 | key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) 634 | 635 | key_states = repeat_kv(key_states, self.num_key_value_groups) 636 | value_states = repeat_kv(value_states, self.num_key_value_groups) 637 | 638 | causal_mask = attention_mask 639 | if attention_mask is not None: 640 | causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] 641 | 642 | # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, 643 | # Reference: https://github.com/pytorch/pytorch/issues/112577. 644 | if query_states.device.type == "cuda" and causal_mask is not None: 645 | query_states = query_states.contiguous() 646 | key_states = key_states.contiguous() 647 | value_states = value_states.contiguous() 648 | 649 | # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment 650 | # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. 651 | is_causal = True if causal_mask is None and q_len > 1 else False 652 | 653 | attn_output = torch.nn.functional.scaled_dot_product_attention( 654 | query_states, 655 | key_states, 656 | value_states, 657 | attn_mask=causal_mask, 658 | dropout_p=self.attention_dropout if self.training else 0.0, 659 | is_causal=is_causal, 660 | ) 661 | 662 | attn_output = attn_output.transpose(1, 2).contiguous() 663 | attn_output = attn_output.view(bsz, q_len, -1) 664 | 665 | attn_output = self.o_proj(attn_output) 666 | 667 | return attn_output, None, past_key_value 668 | 669 | 670 | LLAMA_ATTENTION_CLASSES = { 671 | "eager": LlamaAttention, 672 | "flash_attention_2": LlamaFlashAttention2, 673 | "sdpa": LlamaSdpaAttention, 674 | } 675 | 676 | 677 | class LlamaDecoderLayer(nn.Module): 678 | def __init__(self, config: LlamaConfig, layer_idx: int): 679 | super().__init__() 680 | self.hidden_size = config.hidden_size 681 | 682 | self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) 683 | 684 | self.mlp = LlamaMLP(config) 685 | self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) 686 | self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) 687 | 688 | def forward( 689 | self, 690 | hidden_states: torch.Tensor, 691 | attention_mask: Optional[torch.Tensor] = None, 692 | position_ids: Optional[torch.LongTensor] = None, 693 | past_key_value: Optional[Cache] = None, 694 | output_attentions: Optional[bool] = False, 695 | use_cache: Optional[bool] = False, 696 | cache_position: Optional[torch.LongTensor] = None, 697 | ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: 698 | """ 699 | Args: 700 | hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` 701 | attention_mask (`torch.FloatTensor`, *optional*): 702 | attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, 703 | query_sequence_length, key_sequence_length)` if default attention is used. 704 | output_attentions (`bool`, *optional*): 705 | Whether or not to return the attentions tensors of all attention layers. See `attentions` under 706 | returned tensors for more detail. 707 | use_cache (`bool`, *optional*): 708 | If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding 709 | (see `past_key_values`). 710 | past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states 711 | """ 712 | residual = hidden_states 713 | 714 | hidden_states = self.input_layernorm(hidden_states) 715 | 716 | # Self Attention 717 | hidden_states, self_attn_weights, present_key_value = self.self_attn( 718 | hidden_states=hidden_states, 719 | attention_mask=attention_mask, 720 | position_ids=position_ids, 721 | past_key_value=past_key_value, 722 | output_attentions=output_attentions, 723 | use_cache=use_cache, 724 | cache_position=cache_position, 725 | ) 726 | hidden_states = residual + hidden_states 727 | 728 | # Fully Connected 729 | residual = hidden_states 730 | hidden_states = self.post_attention_layernorm(hidden_states) 731 | hidden_states = self.mlp(hidden_states) 732 | hidden_states = residual + hidden_states 733 | 734 | outputs = (hidden_states,) 735 | 736 | if output_attentions: 737 | outputs += (self_attn_weights,) 738 | 739 | if use_cache: 740 | outputs += (present_key_value,) 741 | 742 | return outputs 743 | 744 | 745 | LLAMA_START_DOCSTRING = r""" 746 | This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the 747 | library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads 748 | etc.) 749 | 750 | This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. 751 | Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage 752 | and behavior. 753 | 754 | Parameters: 755 | config ([`LlamaConfig`]): 756 | Model configuration class with all the parameters of the model. Initializing with a config file does not 757 | load the weights associated with the model, only the configuration. Check out the 758 | [`~PreTrainedModel.from_pretrained`] method to load the model weights. 759 | """ 760 | 761 | 762 | @add_start_docstrings( 763 | "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", 764 | LLAMA_START_DOCSTRING, 765 | ) 766 | class LlamaPreTrainedModel(PreTrainedModel): 767 | config_class = LlamaConfig 768 | base_model_prefix = "model" 769 | supports_gradient_checkpointing = True 770 | _no_split_modules = ["LlamaDecoderLayer"] 771 | _skip_keys_device_placement = ["past_key_values"] 772 | _supports_flash_attn_2 = True 773 | _supports_sdpa = True 774 | _supports_cache_class = True 775 | _supports_quantized_cache = True 776 | _supports_static_cache = True 777 | 778 | def _init_weights(self, module): 779 | std = self.config.initializer_range 780 | if isinstance(module, nn.Linear): 781 | module.weight.data.normal_(mean=0.0, std=std) 782 | if module.bias is not None: 783 | module.bias.data.zero_() 784 | elif isinstance(module, nn.Embedding): 785 | module.weight.data.normal_(mean=0.0, std=std) 786 | if module.padding_idx is not None: 787 | module.weight.data[module.padding_idx].zero_() 788 | 789 | 790 | LLAMA_INPUTS_DOCSTRING = r""" 791 | Args: 792 | input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): 793 | Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide 794 | it. 795 | 796 | Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and 797 | [`PreTrainedTokenizer.__call__`] for details. 798 | 799 | [What are input IDs?](../glossary#input-ids) 800 | attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): 801 | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: 802 | 803 | - 1 for tokens that are **not masked**, 804 | - 0 for tokens that are **masked**. 805 | 806 | [What are attention masks?](../glossary#attention-mask) 807 | 808 | Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and 809 | [`PreTrainedTokenizer.__call__`] for details. 810 | 811 | If `past_key_values` is used, optionally only the last `input_ids` have to be input (see 812 | `past_key_values`). 813 | 814 | If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] 815 | and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more 816 | information on the default strategy. 817 | 818 | - 1 indicates the head is **not masked**, 819 | - 0 indicates the head is **masked**. 820 | position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): 821 | Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, 822 | config.n_positions - 1]`. 823 | 824 | [What are position IDs?](../glossary#position-ids) 825 | past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): 826 | Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention 827 | blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` 828 | returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. 829 | 830 | Two formats are allowed: 831 | - a [`~cache_utils.Cache`] instance; 832 | - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of 833 | shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy 834 | cache format. 835 | 836 | The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the 837 | legacy cache format will be returned. 838 | 839 | If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't 840 | have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` 841 | of shape `(batch_size, sequence_length)`. 842 | inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): 843 | Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This 844 | is useful if you want more control over how to convert `input_ids` indices into associated vectors than the 845 | model's internal embedding lookup matrix. 846 | use_cache (`bool`, *optional*): 847 | If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see 848 | `past_key_values`). 849 | output_attentions (`bool`, *optional*): 850 | Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned 851 | tensors for more detail. 852 | output_hidden_states (`bool`, *optional*): 853 | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for 854 | more detail. 855 | return_dict (`bool`, *optional*): 856 | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. 857 | cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): 858 | Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, 859 | this tensor is not affected by padding. It is used to update the cache in the correct position and to infer 860 | the complete sequence length. 861 | """ 862 | 863 | 864 | @add_start_docstrings( 865 | "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", 866 | LLAMA_START_DOCSTRING, 867 | ) 868 | class LlamaModel(LlamaPreTrainedModel): 869 | """ 870 | Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] 871 | 872 | Args: 873 | config: LlamaConfig 874 | """ 875 | 876 | def __init__(self, config: LlamaConfig): 877 | super().__init__(config) 878 | self.padding_idx = config.pad_token_id 879 | self.vocab_size = config.vocab_size 880 | 881 | self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) 882 | self.layers = nn.ModuleList( 883 | [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] 884 | ) 885 | self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) 886 | self.gradient_checkpointing = False 887 | 888 | self.layer_sharing = config.layer_sharing 889 | # Initialize weights and apply final processing 890 | self.post_init() 891 | 892 | def get_input_embeddings(self): 893 | return self.embed_tokens 894 | 895 | def set_input_embeddings(self, value): 896 | self.embed_tokens = value 897 | 898 | @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) 899 | def forward( 900 | self, 901 | input_ids: torch.LongTensor = None, 902 | attention_mask: Optional[torch.Tensor] = None, 903 | position_ids: Optional[torch.LongTensor] = None, 904 | past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, 905 | inputs_embeds: Optional[torch.FloatTensor] = None, 906 | use_cache: Optional[bool] = None, 907 | output_attentions: Optional[bool] = None, 908 | output_hidden_states: Optional[bool] = None, 909 | return_dict: Optional[bool] = None, 910 | cache_position: Optional[torch.LongTensor] = None, 911 | ) -> Union[Tuple, BaseModelOutputWithPast]: 912 | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions 913 | output_hidden_states = ( 914 | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states 915 | ) 916 | use_cache = use_cache if use_cache is not None else self.config.use_cache 917 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 918 | 919 | if (input_ids is None) ^ (inputs_embeds is not None): 920 | raise ValueError( 921 | "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" 922 | ) 923 | 924 | if self.gradient_checkpointing and self.training and use_cache: 925 | logger.warning_once( 926 | "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." 927 | ) 928 | use_cache = False 929 | 930 | if inputs_embeds is None: 931 | inputs_embeds = self.embed_tokens(input_ids) 932 | 933 | return_legacy_cache = False 934 | if use_cache and not isinstance(past_key_values, Cache): # kept for BC (non `Cache` `past_key_values` inputs) 935 | return_legacy_cache = True 936 | past_key_values = DynamicCache.from_legacy_cache(past_key_values) 937 | 938 | if cache_position is None: 939 | past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 940 | cache_position = torch.arange( 941 | past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device 942 | ) 943 | if position_ids is None: 944 | position_ids = cache_position.unsqueeze(0) 945 | 946 | causal_mask = self._update_causal_mask( 947 | attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions 948 | ) 949 | 950 | # embed positions 951 | hidden_states = inputs_embeds 952 | 953 | # decoder layers 954 | all_hidden_states = () if output_hidden_states else None 955 | all_self_attns = () if output_attentions else None 956 | next_decoder_cache = None 957 | 958 | for decoder_layer in self.layers: 959 | if output_hidden_states: 960 | all_hidden_states += (hidden_states,) 961 | 962 | if self.gradient_checkpointing and self.training: 963 | layer_outputs = self._gradient_checkpointing_func( 964 | decoder_layer.__call__, 965 | hidden_states, 966 | causal_mask, 967 | position_ids, 968 | past_key_values, 969 | output_attentions, 970 | use_cache, 971 | cache_position, 972 | ) 973 | else: 974 | layer_outputs = decoder_layer( 975 | hidden_states, 976 | attention_mask=causal_mask, 977 | position_ids=position_ids, 978 | past_key_value=past_key_values, 979 | output_attentions=output_attentions, 980 | use_cache=use_cache, 981 | cache_position=cache_position, 982 | ) 983 | 984 | hidden_states = layer_outputs[0] 985 | 986 | if use_cache: 987 | next_decoder_cache = layer_outputs[2 if output_attentions else 1] 988 | 989 | if output_attentions: 990 | all_self_attns += (layer_outputs[1],) 991 | 992 | # Repeat current layer if layer_sharing is enabled 993 | if self.layer_sharing: 994 | if output_hidden_states: 995 | all_hidden_states += (hidden_states,) 996 | 997 | if self.gradient_checkpointing and self.training: 998 | layer_outputs = self._gradient_checkpointing_func( 999 | decoder_layer.__call__, 1000 | hidden_states, 1001 | causal_mask, 1002 | position_ids, 1003 | past_key_values, 1004 | output_attentions, 1005 | use_cache, 1006 | cache_position, 1007 | ) 1008 | else: 1009 | layer_outputs = decoder_layer( 1010 | hidden_states, 1011 | attention_mask=causal_mask, 1012 | position_ids=position_ids, 1013 | past_key_value=past_key_values, 1014 | output_attentions=output_attentions, 1015 | use_cache=use_cache, 1016 | cache_position=cache_position, 1017 | ) 1018 | 1019 | hidden_states = layer_outputs[0] 1020 | 1021 | if use_cache: 1022 | next_decoder_cache = layer_outputs[2 if output_attentions else 1] 1023 | 1024 | if output_attentions: 1025 | all_self_attns += (layer_outputs[1],) 1026 | 1027 | hidden_states = self.norm(hidden_states) 1028 | 1029 | # add hidden states from the last decoder layer 1030 | if output_hidden_states: 1031 | all_hidden_states += (hidden_states,) 1032 | 1033 | next_cache = next_decoder_cache if use_cache else None 1034 | if return_legacy_cache: 1035 | next_cache = next_cache.to_legacy_cache() 1036 | 1037 | if not return_dict: 1038 | return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) 1039 | return BaseModelOutputWithPast( 1040 | last_hidden_state=hidden_states, 1041 | past_key_values=next_cache, 1042 | hidden_states=all_hidden_states, 1043 | attentions=all_self_attns, 1044 | ) 1045 | 1046 | def _update_causal_mask( 1047 | self, 1048 | attention_mask: torch.Tensor, 1049 | input_tensor: torch.Tensor, 1050 | cache_position: torch.Tensor, 1051 | past_key_values: Cache, 1052 | output_attentions: bool, 1053 | ): 1054 | # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static 1055 | # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. 1056 | # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using 1057 | # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 1058 | 1059 | if self.config._attn_implementation == "flash_attention_2": 1060 | if attention_mask is not None and 0.0 in attention_mask: 1061 | return attention_mask 1062 | return None 1063 | 1064 | # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in 1065 | # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail 1066 | # to infer the attention mask. 1067 | past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 1068 | using_static_cache = isinstance(past_key_values, StaticCache) 1069 | 1070 | # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward 1071 | if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: 1072 | if AttentionMaskConverter._ignore_causal_mask_sdpa( 1073 | attention_mask, 1074 | inputs_embeds=input_tensor, 1075 | past_key_values_length=past_seen_tokens, 1076 | is_training=self.training, 1077 | ): 1078 | return None 1079 | 1080 | dtype, device = input_tensor.dtype, input_tensor.device 1081 | min_dtype = torch.finfo(dtype).min 1082 | sequence_length = input_tensor.shape[1] 1083 | if using_static_cache: 1084 | target_length = past_key_values.get_max_length() 1085 | else: 1086 | target_length = ( 1087 | attention_mask.shape[-1] 1088 | if isinstance(attention_mask, torch.Tensor) 1089 | else past_seen_tokens + sequence_length + 1 1090 | ) 1091 | 1092 | if attention_mask is not None and attention_mask.dim() == 4: 1093 | # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing 1094 | if attention_mask.max() != 0: 1095 | raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`") 1096 | causal_mask = attention_mask 1097 | else: 1098 | causal_mask = torch.full( 1099 | (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device 1100 | ) 1101 | if sequence_length != 1: 1102 | causal_mask = torch.triu(causal_mask, diagonal=1) 1103 | causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) 1104 | causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) 1105 | if attention_mask is not None: 1106 | causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit 1107 | mask_length = attention_mask.shape[-1] 1108 | padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] 1109 | padding_mask = padding_mask == 0 1110 | causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( 1111 | padding_mask, min_dtype 1112 | ) 1113 | if ( 1114 | self.config._attn_implementation == "sdpa" 1115 | and attention_mask is not None 1116 | and attention_mask.device.type == "cuda" 1117 | and not output_attentions 1118 | ): 1119 | # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when 1120 | # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. 1121 | # Details: https://github.com/pytorch/pytorch/issues/110213 1122 | causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) 1123 | 1124 | return causal_mask 1125 | 1126 | 1127 | class LlamaForCausalLM(LlamaPreTrainedModel): 1128 | _tied_weights_keys = ["lm_head.weight"] 1129 | 1130 | def __init__(self, config): 1131 | super().__init__(config) 1132 | self.model = LlamaModel(config) 1133 | self.vocab_size = config.vocab_size 1134 | if not getattr(self.config, "share_embedding", False): 1135 | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) 1136 | 1137 | # Initialize weights and apply final processing 1138 | self.post_init() 1139 | 1140 | def get_input_embeddings(self): 1141 | return self.model.embed_tokens 1142 | 1143 | def set_input_embeddings(self, value): 1144 | self.model.embed_tokens = value 1145 | 1146 | def get_output_embeddings(self): 1147 | return ( 1148 | self.lm_head 1149 | if not getattr(self.config, "share_embedding", False) 1150 | else self.get_input_embeddings() 1151 | ) 1152 | 1153 | def set_output_embeddings(self, new_embeddings): 1154 | if not getattr(self.config, "share_embedding", False): 1155 | self.lm_head = new_embeddings 1156 | else: 1157 | self.set_input_embeddings(new_embeddings) 1158 | 1159 | def set_decoder(self, decoder): 1160 | self.model = decoder 1161 | 1162 | def get_decoder(self): 1163 | return self.model 1164 | 1165 | @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) 1166 | @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) 1167 | def forward( 1168 | self, 1169 | input_ids: torch.LongTensor = None, 1170 | attention_mask: Optional[torch.Tensor] = None, 1171 | position_ids: Optional[torch.LongTensor] = None, 1172 | past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, 1173 | inputs_embeds: Optional[torch.FloatTensor] = None, 1174 | labels: Optional[torch.LongTensor] = None, 1175 | use_cache: Optional[bool] = None, 1176 | output_attentions: Optional[bool] = None, 1177 | output_hidden_states: Optional[bool] = None, 1178 | return_dict: Optional[bool] = None, 1179 | cache_position: Optional[torch.LongTensor] = None, 1180 | ) -> Union[Tuple, CausalLMOutputWithPast]: 1181 | r""" 1182 | Args: 1183 | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): 1184 | Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., 1185 | config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored 1186 | (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. 1187 | 1188 | Returns: 1189 | 1190 | Example: 1191 | 1192 | ```python 1193 | >>> from transformers import AutoTokenizer, LlamaForCausalLM 1194 | 1195 | >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") 1196 | >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") 1197 | 1198 | >>> prompt = "Hey, are you conscious? Can you talk to me?" 1199 | >>> inputs = tokenizer(prompt, return_tensors="pt") 1200 | 1201 | >>> # Generate 1202 | >>> generate_ids = model.generate(inputs.input_ids, max_length=30) 1203 | >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 1204 | "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." 1205 | ```""" 1206 | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions 1207 | output_hidden_states = ( 1208 | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states 1209 | ) 1210 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 1211 | 1212 | # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) 1213 | outputs = self.model( 1214 | input_ids=input_ids, 1215 | attention_mask=attention_mask, 1216 | position_ids=position_ids, 1217 | past_key_values=past_key_values, 1218 | inputs_embeds=inputs_embeds, 1219 | use_cache=use_cache, 1220 | output_attentions=output_attentions, 1221 | output_hidden_states=output_hidden_states, 1222 | return_dict=return_dict, 1223 | cache_position=cache_position, 1224 | ) 1225 | 1226 | hidden_states = outputs[0] 1227 | if self.config.pretraining_tp > 1: 1228 | lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) 1229 | logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] 1230 | logits = torch.cat(logits, dim=-1) 1231 | else: 1232 | if not getattr(self.config, "share_embedding", False): 1233 | logits = self.lm_head(hidden_states) 1234 | else: 1235 | logits = F.linear(hidden_states, self.model.embed_tokens.weight) 1236 | logits = logits.float() 1237 | 1238 | loss = None 1239 | if labels is not None: 1240 | # Shift so that tokens < n predict n 1241 | shift_logits = logits[..., :-1, :].contiguous() 1242 | shift_labels = labels[..., 1:].contiguous() 1243 | # Flatten the tokens 1244 | loss_fct = CrossEntropyLoss() 1245 | shift_logits = shift_logits.view(-1, self.config.vocab_size) 1246 | shift_labels = shift_labels.view(-1) 1247 | # Enable model parallelism 1248 | shift_labels = shift_labels.to(shift_logits.device) 1249 | loss = loss_fct(shift_logits, shift_labels) 1250 | 1251 | if not return_dict: 1252 | output = (logits,) + outputs[1:] 1253 | return (loss,) + output if loss is not None else output 1254 | 1255 | return CausalLMOutputWithPast( 1256 | loss=loss, 1257 | logits=logits, 1258 | past_key_values=outputs.past_key_values, 1259 | hidden_states=outputs.hidden_states, 1260 | attentions=outputs.attentions, 1261 | ) 1262 | 1263 | def prepare_inputs_for_generation( 1264 | self, 1265 | input_ids, 1266 | past_key_values=None, 1267 | attention_mask=None, 1268 | inputs_embeds=None, 1269 | cache_position=None, 1270 | use_cache=True, 1271 | **kwargs, 1272 | ): 1273 | past_length = 0 1274 | if past_key_values is not None: 1275 | # Past key values are always initialized with a `Cache` object -> no need for if-else anymore 1276 | past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length() 1277 | max_cache_length = ( 1278 | torch.tensor(past_key_values.get_max_length(), device=input_ids.device) 1279 | if past_key_values.get_max_length() is not None 1280 | else None 1281 | ) 1282 | cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length) 1283 | 1284 | # Keep only the unprocessed tokens: 1285 | # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where 1286 | # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as input) 1287 | if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: 1288 | input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] 1289 | # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard 1290 | # input_ids based on the past_length. 1291 | elif past_length < input_ids.shape[1]: 1292 | input_ids = input_ids[:, past_length:] 1293 | # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. 1294 | 1295 | # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. 1296 | if ( 1297 | max_cache_length is not None 1298 | and attention_mask is not None 1299 | and cache_length + input_ids.shape[1] > max_cache_length 1300 | ): 1301 | attention_mask = attention_mask[:, -max_cache_length:] 1302 | 1303 | position_ids = kwargs.get("position_ids", None) 1304 | if attention_mask is not None and position_ids is None: 1305 | # create position_ids on the fly for batch generation 1306 | position_ids = attention_mask.long().cumsum(-1) - 1 1307 | position_ids.masked_fill_(attention_mask == 0, 1) 1308 | if past_key_values: 1309 | position_ids = position_ids[:, -input_ids.shape[1] :] 1310 | 1311 | # if `inputs_embeds` are passed, we only want to use them in the 1st generation step 1312 | if inputs_embeds is not None and past_length == 0: 1313 | model_inputs = {"inputs_embeds": inputs_embeds} 1314 | else: 1315 | # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise 1316 | # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114 1317 | # TODO: use `next_tokens` directly instead. 1318 | model_inputs = {"input_ids": input_ids.contiguous()} 1319 | 1320 | input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1] 1321 | if cache_position is None: 1322 | cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device) 1323 | elif use_cache: 1324 | cache_position = cache_position[-input_length:] 1325 | 1326 | model_inputs.update( 1327 | { 1328 | "position_ids": position_ids, 1329 | "cache_position": cache_position, 1330 | "past_key_values": past_key_values, 1331 | "use_cache": use_cache, 1332 | "attention_mask": attention_mask, 1333 | } 1334 | ) 1335 | return model_inputs 1336 | 1337 | @staticmethod 1338 | def _reorder_cache(past_key_values, beam_idx): 1339 | reordered_past = () 1340 | for layer_past in past_key_values: 1341 | reordered_past += ( 1342 | tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), 1343 | ) 1344 | return reordered_past 1345 | 1346 | 1347 | @add_start_docstrings( 1348 | """ 1349 | The LLaMa Model transformer with a sequence classification head on top (linear layer). 1350 | 1351 | [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models 1352 | (e.g. GPT-2) do. 1353 | 1354 | Since it does classification on the last token, it requires to know the position of the last token. If a 1355 | `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If 1356 | no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the 1357 | padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in 1358 | each row of the batch). 1359 | """, 1360 | LLAMA_START_DOCSTRING, 1361 | ) 1362 | class LlamaForSequenceClassification(LlamaPreTrainedModel): 1363 | def __init__(self, config): 1364 | super().__init__(config) 1365 | self.num_labels = config.num_labels 1366 | self.model = LlamaModel(config) 1367 | self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) 1368 | 1369 | # Initialize weights and apply final processing 1370 | self.post_init() 1371 | 1372 | def get_input_embeddings(self): 1373 | return self.model.embed_tokens 1374 | 1375 | def set_input_embeddings(self, value): 1376 | self.model.embed_tokens = value 1377 | 1378 | @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) 1379 | def forward( 1380 | self, 1381 | input_ids: torch.LongTensor = None, 1382 | attention_mask: Optional[torch.Tensor] = None, 1383 | position_ids: Optional[torch.LongTensor] = None, 1384 | past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, 1385 | inputs_embeds: Optional[torch.FloatTensor] = None, 1386 | labels: Optional[torch.LongTensor] = None, 1387 | use_cache: Optional[bool] = None, 1388 | output_attentions: Optional[bool] = None, 1389 | output_hidden_states: Optional[bool] = None, 1390 | return_dict: Optional[bool] = None, 1391 | ) -> Union[Tuple, SequenceClassifierOutputWithPast]: 1392 | r""" 1393 | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): 1394 | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., 1395 | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If 1396 | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). 1397 | """ 1398 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 1399 | 1400 | transformer_outputs = self.model( 1401 | input_ids, 1402 | attention_mask=attention_mask, 1403 | position_ids=position_ids, 1404 | past_key_values=past_key_values, 1405 | inputs_embeds=inputs_embeds, 1406 | use_cache=use_cache, 1407 | output_attentions=output_attentions, 1408 | output_hidden_states=output_hidden_states, 1409 | return_dict=return_dict, 1410 | ) 1411 | hidden_states = transformer_outputs[0] 1412 | logits = self.score(hidden_states) 1413 | 1414 | if input_ids is not None: 1415 | batch_size = input_ids.shape[0] 1416 | else: 1417 | batch_size = inputs_embeds.shape[0] 1418 | 1419 | if self.config.pad_token_id is None and batch_size != 1: 1420 | raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") 1421 | if self.config.pad_token_id is None: 1422 | sequence_lengths = -1 1423 | else: 1424 | if input_ids is not None: 1425 | # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility 1426 | sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 1427 | sequence_lengths = sequence_lengths % input_ids.shape[-1] 1428 | sequence_lengths = sequence_lengths.to(logits.device) 1429 | else: 1430 | sequence_lengths = -1 1431 | 1432 | pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] 1433 | 1434 | loss = None 1435 | if labels is not None: 1436 | labels = labels.to(logits.device) 1437 | if self.config.problem_type is None: 1438 | if self.num_labels == 1: 1439 | self.config.problem_type = "regression" 1440 | elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): 1441 | self.config.problem_type = "single_label_classification" 1442 | else: 1443 | self.config.problem_type = "multi_label_classification" 1444 | 1445 | if self.config.problem_type == "regression": 1446 | loss_fct = MSELoss() 1447 | if self.num_labels == 1: 1448 | loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) 1449 | else: 1450 | loss = loss_fct(pooled_logits, labels) 1451 | elif self.config.problem_type == "single_label_classification": 1452 | loss_fct = CrossEntropyLoss() 1453 | loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) 1454 | elif self.config.problem_type == "multi_label_classification": 1455 | loss_fct = BCEWithLogitsLoss() 1456 | loss = loss_fct(pooled_logits, labels) 1457 | if not return_dict: 1458 | output = (pooled_logits,) + transformer_outputs[1:] 1459 | return ((loss,) + output) if loss is not None else output 1460 | 1461 | return SequenceClassifierOutputWithPast( 1462 | loss=loss, 1463 | logits=pooled_logits, 1464 | past_key_values=transformer_outputs.past_key_values, 1465 | hidden_states=transformer_outputs.hidden_states, 1466 | attentions=transformer_outputs.attentions, 1467 | ) 1468 | 1469 | 1470 | @add_start_docstrings( 1471 | """ 1472 | The Llama Model transformer with a span classification head on top for extractive question-answering tasks like 1473 | SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). 1474 | """, 1475 | LLAMA_START_DOCSTRING, 1476 | ) 1477 | class LlamaForQuestionAnswering(LlamaPreTrainedModel): 1478 | base_model_prefix = "transformer" 1479 | 1480 | # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama 1481 | def __init__(self, config): 1482 | super().__init__(config) 1483 | self.transformer = LlamaModel(config) 1484 | self.qa_outputs = nn.Linear(config.hidden_size, 2) 1485 | 1486 | # Initialize weights and apply final processing 1487 | self.post_init() 1488 | 1489 | def get_input_embeddings(self): 1490 | return self.transformer.embed_tokens 1491 | 1492 | def set_input_embeddings(self, value): 1493 | self.transformer.embed_tokens = value 1494 | 1495 | @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) 1496 | def forward( 1497 | self, 1498 | input_ids: Optional[torch.LongTensor] = None, 1499 | attention_mask: Optional[torch.FloatTensor] = None, 1500 | position_ids: Optional[torch.LongTensor] = None, 1501 | past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, 1502 | inputs_embeds: Optional[torch.FloatTensor] = None, 1503 | start_positions: Optional[torch.LongTensor] = None, 1504 | end_positions: Optional[torch.LongTensor] = None, 1505 | output_attentions: Optional[bool] = None, 1506 | output_hidden_states: Optional[bool] = None, 1507 | return_dict: Optional[bool] = None, 1508 | ) -> Union[Tuple, QuestionAnsweringModelOutput]: 1509 | r""" 1510 | start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): 1511 | Labels for position (index) of the start of the labelled span for computing the token classification loss. 1512 | Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence 1513 | are not taken into account for computing the loss. 1514 | end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): 1515 | Labels for position (index) of the end of the labelled span for computing the token classification loss. 1516 | Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence 1517 | are not taken into account for computing the loss. 1518 | """ 1519 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 1520 | 1521 | outputs = self.transformer( 1522 | input_ids, 1523 | attention_mask=attention_mask, 1524 | position_ids=position_ids, 1525 | past_key_values=past_key_values, 1526 | inputs_embeds=inputs_embeds, 1527 | output_attentions=output_attentions, 1528 | output_hidden_states=output_hidden_states, 1529 | return_dict=return_dict, 1530 | ) 1531 | 1532 | sequence_output = outputs[0] 1533 | 1534 | logits = self.qa_outputs(sequence_output) 1535 | start_logits, end_logits = logits.split(1, dim=-1) 1536 | start_logits = start_logits.squeeze(-1).contiguous() 1537 | end_logits = end_logits.squeeze(-1).contiguous() 1538 | 1539 | total_loss = None 1540 | if start_positions is not None and end_positions is not None: 1541 | # If we are on multi-GPU, split add a dimension 1542 | if len(start_positions.size()) > 1: 1543 | start_positions = start_positions.squeeze(-1).to(start_logits.device) 1544 | if len(end_positions.size()) > 1: 1545 | end_positions = end_positions.squeeze(-1).to(end_logits.device) 1546 | # sometimes the start/end positions are outside our model inputs, we ignore these terms 1547 | ignored_index = start_logits.size(1) 1548 | start_positions = start_positions.clamp(0, ignored_index) 1549 | end_positions = end_positions.clamp(0, ignored_index) 1550 | 1551 | loss_fct = CrossEntropyLoss(ignore_index=ignored_index) 1552 | start_loss = loss_fct(start_logits, start_positions) 1553 | end_loss = loss_fct(end_logits, end_positions) 1554 | total_loss = (start_loss + end_loss) / 2 1555 | 1556 | if not return_dict: 1557 | output = (start_logits, end_logits) + outputs[2:] 1558 | return ((total_loss,) + output) if total_loss is not None else output 1559 | 1560 | return QuestionAnsweringModelOutput( 1561 | loss=total_loss, 1562 | start_logits=start_logits, 1563 | end_logits=end_logits, 1564 | hidden_states=outputs.hidden_states, 1565 | attentions=outputs.attentions, 1566 | ) 1567 | 1568 | 1569 | @add_start_docstrings( 1570 | """ 1571 | The Llama Model transformer with a token classification head on top (a linear layer on top of the hidden-states 1572 | output) e.g. for Named-Entity-Recognition (NER) tasks. 1573 | """, 1574 | LLAMA_START_DOCSTRING, 1575 | ) 1576 | class LlamaForTokenClassification(LlamaPreTrainedModel): 1577 | def __init__(self, config): 1578 | super().__init__(config) 1579 | self.num_labels = config.num_labels 1580 | self.model = LlamaModel(config) 1581 | if getattr(config, "classifier_dropout", None) is not None: 1582 | classifier_dropout = config.classifier_dropout 1583 | elif getattr(config, "hidden_dropout", None) is not None: 1584 | classifier_dropout = config.hidden_dropout 1585 | else: 1586 | classifier_dropout = 0.1 1587 | self.dropout = nn.Dropout(classifier_dropout) 1588 | self.score = nn.Linear(config.hidden_size, config.num_labels) 1589 | 1590 | # Initialize weights and apply final processing 1591 | self.post_init() 1592 | 1593 | def get_input_embeddings(self): 1594 | return self.model.embed_tokens 1595 | 1596 | def set_input_embeddings(self, value): 1597 | self.model.embed_tokens = value 1598 | 1599 | @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) 1600 | def forward( 1601 | self, 1602 | input_ids: Optional[torch.LongTensor] = None, 1603 | attention_mask: Optional[torch.Tensor] = None, 1604 | position_ids: Optional[torch.LongTensor] = None, 1605 | past_key_values: Optional[List[torch.FloatTensor]] = None, 1606 | inputs_embeds: Optional[torch.FloatTensor] = None, 1607 | labels: Optional[torch.LongTensor] = None, 1608 | use_cache: Optional[bool] = None, 1609 | output_attentions: Optional[bool] = None, 1610 | output_hidden_states: Optional[bool] = None, 1611 | return_dict: Optional[bool] = None, 1612 | ) -> Union[Tuple, TokenClassifierOutput]: 1613 | r""" 1614 | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): 1615 | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., 1616 | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If 1617 | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). 1618 | """ 1619 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 1620 | 1621 | outputs = self.model( 1622 | input_ids, 1623 | attention_mask=attention_mask, 1624 | position_ids=position_ids, 1625 | past_key_values=past_key_values, 1626 | inputs_embeds=inputs_embeds, 1627 | use_cache=use_cache, 1628 | output_attentions=output_attentions, 1629 | output_hidden_states=output_hidden_states, 1630 | return_dict=return_dict, 1631 | ) 1632 | sequence_output = outputs[0] 1633 | sequence_output = self.dropout(sequence_output) 1634 | logits = self.score(sequence_output) 1635 | 1636 | loss = None 1637 | if labels is not None: 1638 | loss_fct = CrossEntropyLoss() 1639 | loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) 1640 | 1641 | if not return_dict: 1642 | output = (logits,) + outputs[2:] 1643 | return ((loss,) + output) if loss is not None else output 1644 | 1645 | return TokenClassifierOutput( 1646 | loss=loss, 1647 | logits=logits, 1648 | hidden_states=outputs.hidden_states, 1649 | attentions=outputs.attentions, 1650 | ) 1651 | --------------------------------------------------------------------------------