├── finetune.sh ├── data_handler.py ├── deepspeed_config_13B.json ├── README.md ├── requirements.txt └── train.py /finetune.sh: -------------------------------------------------------------------------------- 1 | curr_dir=/ 2 | hf_model_dir=bigscience/bloomz-7b1 3 | 4 | export CUDA_VISIBLE_DEVICES='0,1,2,3,4,5,6,7' 5 | export TRANSFORMERS_CACHE=$curr_dir 6 | export TORCH_EXTENSIONS_DIR=$curr_dir 7 | 8 | deepspeed $curr_dir/scripts/train.py \ 9 | --model_name_or_path $hf_model_dir \ 10 | --data_path $curr_dir/instruction \ 11 | --output_dir $curr_dir/bloomz7b1/ \ 12 | --num_train_epochs 3 \ 13 | --per_device_train_batch_size 4 \ 14 | --gradient_accumulation_steps 4 \ 15 | --save_steps 1000 \ 16 | --save_strategy 'steps' \ 17 | --save_total_limit 2 \ 18 | --learning_rate 2e-5 \ 19 | --warmup_steps 1 \ 20 | --logging_steps 10 \ 21 | --lr_scheduler_type 'constant' \ 22 | --report_to 'tensorboard' \ 23 | --gradient_checkpointing True \ 24 | --deepspeed $curr_dir/configs/deepspeed_config_13B.json \ 25 | --fp16 True 26 | 27 | -------------------------------------------------------------------------------- /data_handler.py: -------------------------------------------------------------------------------- 1 | continual_train = { 2 | 'gigaword': { 3 | 'train': [ 4 | ('custom', 'constrain_start+make_a_title'), 5 | ('custom', 'constrain_contain+make_a_title'), 6 | ('custom', 'constrain_end+make_a_title'), 7 | ], 8 | 'test': [ 9 | ('custom', 'constrain_start+make_a_title'), 10 | ('custom', 'constrain_contain+make_a_title'), 11 | ('custom', 'constrain_end+make_a_title'), 12 | ] 13 | }, 14 | 'wiki_auto': { 15 | 'train': [ 16 | ('custom', 'simplification_1'), 17 | ], 18 | 'test': [ 19 | ('custom', 'simplification_1'), 20 | ] 21 | }, 22 | 'eli5': { 23 | 'train': [ 24 | ('custom', 'generate_a_question_1'), 25 | ('custom', 'generate_a_question_2'), 26 | ], 27 | 'test': [ 28 | ('custom', 'generate_a_question_1'), 29 | ] 30 | }, 31 | 'empathetic_dialogues': { 32 | 'train': [ 33 | ('custom', "dialogue_with_emotion"), 34 | ], 35 | 'test':[ 36 | ('custom', "dialogue_with_emotion"), 37 | ] 38 | }, 39 | 'eSNLI': { 40 | 'train': [ 41 | ('custom', "explain_why"), 42 | ], 43 | 'test': [ 44 | ('custom', 'explain_why'), 45 | ] 46 | }, 47 | 48 | } 49 | -------------------------------------------------------------------------------- /deepspeed_config_13B.json: -------------------------------------------------------------------------------- 1 | { 2 | "zero_optimization": { 3 | "stage": 3, 4 | "offload_optimizer": { 5 | "device": "none", 6 | "pin_memory": false 7 | }, 8 | "offload_param": { 9 | "device": "none", 10 | "pin_memory": false 11 | }, 12 | "overlap_comm": true, 13 | "contiguous_gradients": true, 14 | "sub_group_size": 0, 15 | "reduce_bucket_size": "auto", 16 | "stage3_prefetch_bucket_size": "auto", 17 | "stage3_param_persistence_threshold": "auto", 18 | "stage3_max_live_parameters": 1e9, 19 | "stage3_max_reuse_distance": 1e9, 20 | "stage3_gather_16bit_weights_on_model_save": true 21 | }, 22 | "fp16": { 23 | "enabled": "auto", 24 | "auto_cast": "auto", 25 | "loss_scale": 0, 26 | "initial_scale_power": 32, 27 | "loss_scale_window": 1000, 28 | "hysteresis": 2, 29 | "min_loss_scale": 1 30 | }, 31 | "optimizer": { 32 | "type": "AdamW", 33 | "params": { 34 | "lr": "auto", 35 | "betas": [ 36 | 0.9, 37 | 0.999 38 | ], 39 | "eps": 1e-8, 40 | "weight_decay": "auto" 41 | } 42 | }, 43 | "train_batch_size": "auto", 44 | "train_micro_batch_size_per_gpu": "auto", 45 | "wall_clock_breakdown": false 46 | } 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Continual-Tune 2 | 3 | In this work, we evaluate the LLMs during continual fine-tuning. 4 | 5 | For training, the codes are relatively simple. First, you need to install the Python environment by ```pip install -r requirements.txt```. 6 | 7 | Then a script example is shown in ```finetune.sh```, and you can directly use bash to run the codes. 8 | 9 | The processed data can be found at https://drive.google.com/drive/folders/1oqJ11w_3xGpBPXTmwJ1iz2LxtDHSrhxf?usp=sharing. We mainly adopt the instruction tasks used in Scialom et al [1], which can also be found at https://github.com/ThomasScialom/T0_continual_learning. 10 | 11 | For evaluation, we adopt the evaluation framework of lm-evaluation-harness from https://github.com/EleutherAI/lm-evaluation-harness/tree/master. You can follow their instruction to build the test environment. Our study tests MMLU in 5-shots and other datasets in 0-shots. For example, you can run the bash scripts as follows: 12 | 13 | ``` 14 | python3 lm-evaluation-harness/main.py \ 15 | --model hf-causal-experimental \ 16 | --model_args pretrained=${path} \ 17 | --tasks piqa,boolq,winogrande,hellaswag,mathqa,mutual \ 18 | --device cuda:0 \ 19 | --output_path results.txt \ 20 | --no_cache 21 | ``` 22 | 23 | Note that in lm-evaluation-harness, some tasks like MMLU are evaluated separately for each split. Thus some codes are required to merge the splits. We use ```datasets.concatenate_datasets``` and create new classes following their instruction to implement this step. 24 | 25 | [1] Thomas Scialom, Tuhin Chakrabarty, and Smaranda Muresan. 2022. Fine-tuned Language Models are Continual Learners. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 6107–6122, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics 26 | 27 | 28 | ## Citation 29 | 30 | @article{luo2023empirical, 31 | 32 | title={An Empirical Study of Catastrophic Forgetting in Large Language Models During Continual Fine-tuning}, 33 | 34 | author={Yun Luo and Zhen Yang and Fandong Meng and Yafu Li and Jie Zhou and Yue Zhang}, 35 | 36 | year={2023}, 37 | 38 | eprint={2308.08747}, 39 | 40 | archivePrefix={arXiv} 41 | 42 | } 43 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==1.4.0 2 | accelerate==0.18.0 3 | aiohttp==3.8.4 4 | aiosignal==1.3.1 5 | antlr4-python3-runtime==4.9.3 6 | async-timeout==4.0.2 7 | attrs==23.1.0 8 | bert-score==0.3.13 9 | cachetools==5.3.0 10 | certifi==2022.12.7 11 | chardet==5.1.0 12 | charset-normalizer==3.1.0 13 | click==8.1.3 14 | colorama==0.4.6 15 | contourpy==1.0.7 16 | cycler==0.11.0 17 | Cython==0.29.33 18 | DataProperty==0.55.0 19 | dill==0.3.6 20 | filelock==3.12.0 21 | fire==0.5.0 22 | fonttools==4.39.3 23 | frozenlist==1.3.3 24 | fsspec==2023.4.0 25 | google-auth==2.17.2 26 | google-auth-oauthlib==1.0.0 27 | grpcio==1.53.0 28 | hjson==3.1.0 29 | huggingface-hub==0.13.4 30 | idna==3.4 31 | importlib-metadata==6.3.0 32 | importlib-resources==5.12.0 33 | Jinja2==3.1.2 34 | joblib==1.2.0 35 | jsonlines==3.1.0 36 | kiwisolver==1.4.4 37 | -e git+https://github.com/EleutherAI/lm-evaluation-harness.git@44275ae98f0d70ef2bf8a8cf164a0f5e3874ce53#egg=lm_eval 38 | Markdown==3.4.3 39 | MarkupSafe==2.1.2 40 | matplotlib==3.7.1 41 | mbstrdecoder==1.1.2 42 | multidict==6.0.4 43 | multiprocess==0.70.14 44 | ninja==1.11.1 45 | nltk==3.8.1 46 | numexpr==2.8.4 47 | numpy==1.24.2 48 | oauthlib==3.2.2 49 | omegaconf==2.3.0 50 | openai==0.27.4 51 | packaging==23.1 52 | pandas==2.0.0 53 | pathvalidate==2.5.2 54 | Pillow==9.4.0 55 | portalocker==2.7.0 56 | protobuf==3.20.3 57 | psutil==5.9.5 58 | py-cpuinfo==9.0.0 59 | pyarrow==11.0.0 60 | pyasn1==0.4.8 61 | pyasn1-modules==0.2.8 62 | pybind11==2.10.4 63 | pycountry==22.3.5 64 | pydantic==1.10.7 65 | pyparsing==3.0.9 66 | pytablewriter==0.64.2 67 | python-dateutil==2.8.2 68 | pytz==2023.3 69 | PyYAML==6.0 70 | quadprog==0.1.7 71 | regex==2023.3.23 72 | requests==2.28.2 73 | requests-oauthlib==1.3.1 74 | responses==0.18.0 75 | rouge==1.0.1 76 | rouge-score==0.1.2 77 | rsa==4.9 78 | sacrebleu==1.5.0 79 | scikit-learn==1.2.2 80 | scipy==1.10.1 81 | sentencepiece==0.1.98 82 | six==1.16.0 83 | sklearn==0.0.post1 84 | sqlitedict==2.1.0 85 | tabledata==1.3.1 86 | tcolorpy==0.1.2 87 | tensorboard==2.12.1 88 | tensorboard-data-server==0.7.0 89 | tensorboard-plugin-wit==1.8.1 90 | tensorboardX==2.6 91 | termcolor==2.3.0 92 | threadpoolctl==3.1.0 93 | tokenizers==0.13.3 94 | torch==1.10.1+cu111 95 | torchaudio==0.10.1+cu111 96 | torchvision==0.11.2+cu111 97 | tqdm==4.65.0 98 | tqdm-multiprocess==0.0.11 99 | transformers==4.28.1 100 | typepy==1.3.0 101 | typing_extensions==4.5.0 102 | tzdata==2023.3 103 | urllib3==1.26.15 104 | Werkzeug==2.2.3 105 | xxhash==3.2.0 106 | yarl==1.9.1 107 | zipp==3.15.0 108 | zstandard==0.21.0 109 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import glob 16 | import logging 17 | from dataclasses import dataclass, field 18 | from typing import Optional, Dict, Sequence 19 | 20 | import torch 21 | import transformers 22 | from torch.utils.data import Dataset 23 | from transformers import Trainer,EvalPrediction 24 | import json 25 | import copy 26 | 27 | import numpy as np 28 | import os 29 | 30 | 31 | from data_handler import continual_train 32 | 33 | 34 | os.environ['MASTER_PORT'] = '12340' 35 | 36 | 37 | task = ['wiki_auto','empathetic_dialogues','eli5','eSNLI','gigaword'] 38 | 39 | IGNORE_INDEX = -100 40 | DEFAULT_PAD_TOKEN = "" 41 | DEFAULT_EOS_TOKEN = "" 42 | DEFAULT_BOS_TOKEN = "" 43 | DEFAULT_UNK_TOKEN = "" 44 | 45 | 46 | 47 | instruction_datasets = ['ag_news','anli','common_gen','glue_mrpc','glue_qqp','imdb','rotten_tomatoes','rte','samsum','trec', 48 | 'winogrande','wsc','xsum'] 49 | 50 | 51 | 52 | 53 | @dataclass 54 | class ModelArguments: 55 | model_name_or_path: Optional[str] = field(default="facebook/opt-125m") 56 | 57 | 58 | @dataclass 59 | class DataArguments: 60 | data_path: str = field(default=None, metadata={"help": "Path to the training data."}) 61 | 62 | 63 | @dataclass 64 | class TrainingArguments(transformers.TrainingArguments): 65 | cache_dir: Optional[str] = field(default=None) 66 | optim: str = field(default="adamw_torch") 67 | model_max_length: int = field( 68 | default=512, 69 | metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."}, 70 | ) 71 | 72 | 73 | def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str): 74 | """Collects the state dict and dump to disk.""" 75 | state_dict = trainer.model.state_dict() 76 | if trainer.args.should_save: 77 | cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()} 78 | del state_dict 79 | trainer._save(output_dir, state_dict=cpu_state_dict) # noqa 80 | 81 | 82 | def smart_tokenizer_and_embedding_resize( 83 | special_tokens_dict: Dict, 84 | tokenizer: transformers.PreTrainedTokenizer, 85 | model: transformers.PreTrainedModel, 86 | ): 87 | """Resize tokenizer and embedding. 88 | Note: This is the unoptimized version that may make your embedding size not be divisible by 64. 89 | """ 90 | num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) 91 | model.resize_token_embeddings(len(tokenizer)) 92 | 93 | if num_new_tokens > 0: 94 | input_embeddings = model.get_input_embeddings().weight.data 95 | output_embeddings = model.get_output_embeddings().weight.data 96 | 97 | input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) 98 | output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) 99 | 100 | input_embeddings[-num_new_tokens:] = input_embeddings_avg 101 | output_embeddings[-num_new_tokens:] = output_embeddings_avg 102 | 103 | 104 | def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict: 105 | """Tokenize a list of strings.""" 106 | tokenized_list = [ 107 | tokenizer( 108 | text, 109 | return_tensors="pt", 110 | padding="longest", 111 | max_length=tokenizer.model_max_length, 112 | truncation=True, 113 | ) 114 | for text in strings 115 | ] 116 | input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list] 117 | input_ids_lens = labels_lens = [ 118 | tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list 119 | ] 120 | return dict( 121 | input_ids=input_ids, 122 | labels=labels, 123 | input_ids_lens=input_ids_lens, 124 | labels_lens=labels_lens, 125 | ) 126 | 127 | 128 | def preprocess( 129 | sources: Sequence[str], 130 | targets: Sequence[str], 131 | tokenizer: transformers.PreTrainedTokenizer, 132 | ) -> Dict: 133 | """Preprocess the data by tokenizing.""" 134 | examples = [s + t for s, t in zip(sources, targets)] 135 | examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)] 136 | input_ids = examples_tokenized["input_ids"] 137 | labels = copy.deepcopy(input_ids) 138 | for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]): 139 | label[:source_len] = IGNORE_INDEX 140 | return dict(input_ids=input_ids, labels=labels) 141 | 142 | 143 | def minus_data(dataset, train_num = 100000, seed=0): 144 | np.random.seed(seed) 145 | 146 | size = len(dataset['input_ids']) 147 | idxs = np.array(range(len(dataset['input_ids']))) 148 | np.random.shuffle(idxs) 149 | 150 | train_idx = idxs[:train_num] 151 | 152 | return train_idx 153 | 154 | def train_val_split(dataset, train_percent, seed=0): 155 | np.random.seed(seed) 156 | 157 | size = len(dataset['input_ids']) 158 | idxs = np.array(range(len(dataset['input_ids']))) 159 | np.random.shuffle(idxs) 160 | 161 | train_idx = idxs[:int(size*train_percent)] 162 | test_idx = idxs[int(size*train_percent):] 163 | 164 | return train_idx, test_idx 165 | 166 | 167 | def get_data_by_idx(dataset, idxs): 168 | input_id = [] 169 | labels = [] 170 | for item_id in idxs: 171 | labels.append(dataset['labels'][item_id]) 172 | input_id.append(dataset['input_ids'][item_id]) 173 | return input_id, labels 174 | 175 | 176 | class InstructionDataset_S(Dataset): 177 | """Dataset for supervised fine-tuning.""" 178 | 179 | def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, split = 'train',task = 'ag_news'): 180 | super(InstructionDataset_S, self).__init__() 181 | logging.warning("Loading data...") 182 | 183 | self.input_ids = [] 184 | self.labels = [] 185 | 186 | task_splits = continual_train[task]['train'] 187 | for u in task_splits: 188 | with open(data_path+'/'+u[1]+'.train.json', 'r') as f: 189 | data = json.load(f) 190 | 191 | logging.warning("Formatting inputs...") 192 | 193 | sources = [f"Below is an instruction that describes a task, paired with an input that provides \ 194 | further context. Write a response that appropriately completes the request.\n\n{example}\n\n### Response:" for example in data['src']] 195 | deals = [str(u).replace("['","").replace("']","") for u in data['tgt']] 196 | targets = [f"{example}{tokenizer.eos_token}" for example in deals] 197 | 198 | logging.warning("Tokenizing inputs... This may take some time...") 199 | data_dict = preprocess(sources, targets, tokenizer) 200 | 201 | train_idx = minus_data(data_dict,int(100000/len(task_splits))) 202 | input_ids,labels = get_data_by_idx(data_dict,train_idx) 203 | print(len(input_ids)) 204 | self.input_ids.extend(input_ids) 205 | self.labels.extend(labels) 206 | 207 | 208 | print('The number of data samples: ' + str(len(self.input_ids))) 209 | 210 | def __len__(self): 211 | return len(self.input_ids) 212 | 213 | def __getitem__(self, i) -> Dict[str, torch.Tensor]: 214 | return dict(input_ids=self.input_ids[i], labels=self.labels[i]) 215 | 216 | 217 | @dataclass 218 | class DataCollatorForSupervisedDataset(object): 219 | """Collate examples for supervised fine-tuning.""" 220 | 221 | tokenizer: transformers.PreTrainedTokenizer 222 | 223 | def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: 224 | input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) 225 | input_ids = torch.nn.utils.rnn.pad_sequence( 226 | input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id 227 | ) 228 | labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) 229 | return dict( 230 | input_ids=input_ids, 231 | labels=labels, 232 | attention_mask=input_ids.ne(self.tokenizer.pad_token_id), 233 | ) 234 | 235 | 236 | def load_latest_ckpt(output_path): 237 | all_ckpts = glob.glob( 238 | f"{output_path}/checkpoint-*" 239 | ) 240 | print(all_ckpts) 241 | steps = [int(u.split('/')[-1].split('-')[1]) for u in all_ckpts] 242 | ckpt_list = sorted( 243 | steps, reverse=True 244 | ) 245 | assert len(ckpt_list) > 0, f"no checkpoint found" 246 | return 'checkpoint-'+str(ckpt_list[0]) 247 | 248 | def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_path,t) -> Dict: 249 | """Make dataset and collator for supervised fine-tuning.""" 250 | train_dataset = InstructionDataset_S(tokenizer=tokenizer, data_path=data_path,task=t) 251 | data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) 252 | 253 | return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator) 254 | 255 | def train(): 256 | parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments)) 257 | model_args, data_args, training_args = parser.parse_args_into_dataclasses() 258 | 259 | tokenizer = transformers.AutoTokenizer.from_pretrained( 260 | model_args.model_name_or_path, 261 | cache_dir=training_args.cache_dir, 262 | model_max_length=training_args.model_max_length, 263 | padding_side="right", 264 | use_fast=False, 265 | ) 266 | 267 | special_tokens_dict = dict() 268 | if tokenizer.pad_token is None: 269 | special_tokens_dict["pad_token"] = DEFAULT_PAD_TOKEN 270 | if tokenizer.eos_token is None: 271 | special_tokens_dict["eos_token"] = DEFAULT_EOS_TOKEN 272 | if tokenizer.bos_token is None: 273 | special_tokens_dict["bos_token"] = DEFAULT_BOS_TOKEN 274 | if tokenizer.unk_token is None: 275 | special_tokens_dict["unk_token"] = DEFAULT_UNK_TOKEN 276 | 277 | 278 | 279 | for i in range(len(task)): 280 | if os.path.exists(training_args.output_dir+task[i]): 281 | continue 282 | t_args = copy.deepcopy(training_args) 283 | t = task[i] 284 | t_args.output_dir += t+'/' 285 | if i == 0: 286 | model = transformers.AutoModelForCausalLM.from_pretrained( 287 | model_args.model_name_or_path, 288 | cache_dir=training_args.cache_dir, 289 | ) 290 | 291 | smart_tokenizer_and_embedding_resize( 292 | special_tokens_dict=special_tokens_dict, 293 | tokenizer=tokenizer, 294 | model=model, 295 | ) 296 | 297 | data_module = make_supervised_data_module(tokenizer=tokenizer, data_path=data_args.data_path+'/'+t,t=t) 298 | trainer = Trainer(model=model, tokenizer=tokenizer, args=t_args, **data_module) 299 | trainer.train() 300 | trainer.save_state() 301 | safe_save_model_for_hf_trainer(trainer=trainer, output_dir=t_args.output_dir) 302 | del data_module 303 | del trainer 304 | torch.cuda.empty_cache() 305 | torch.cuda.empty_cache() 306 | torch.cuda.empty_cache() 307 | torch.cuda.empty_cache() 308 | 309 | else: 310 | model_name = load_latest_ckpt(training_args.output_dir+task[i-1]) 311 | model = transformers.AutoModelForCausalLM.from_pretrained( 312 | training_args.output_dir+task[i-1]+'/'+model_name, 313 | ) 314 | 315 | # smart_tokenizer_and_embedding_resize( 316 | # special_tokens_dict=special_tokens_dict, 317 | # tokenizer=tokenizer, 318 | # model=model, 319 | # ) 320 | 321 | data_module = make_supervised_data_module(tokenizer=tokenizer, data_path=data_args.data_path+'instruction/'+t,t=t) 322 | # del trainer 323 | trainer = Trainer(model=model, tokenizer=tokenizer, args=t_args, **data_module) 324 | trainer.train() 325 | trainer.save_state() 326 | safe_save_model_for_hf_trainer(trainer=trainer, output_dir=t_args.output_dir) 327 | 328 | 329 | if __name__ == "__main__": 330 | train() 331 | --------------------------------------------------------------------------------