├── mup_examples ├── requirements.txt ├── coord_check_shakespeare_char │ ├── sp │ │ └── run.sh │ ├── mup │ │ └── run.sh │ ├── sp_with_mup_hidden_init │ │ └── run.sh │ ├── sp_with_mup_hidden_init_and_lr │ │ └── run.sh │ ├── sp_with_mup_hidden_init_and_lr_output_logits │ │ └── run.sh │ └── sp_with_mup_hidden_init_and_lr_partial_output_logits │ │ └── run.sh ├── mutransfer_lr_shakespeare_char │ ├── sp │ │ └── run.sh │ ├── mup │ │ └── run.sh │ └── plot.ipynb ├── mutransfer_lr_owt │ ├── sp │ │ └── run.sh │ └── mup │ │ └── run.sh └── README.md ├── assets ├── nanogpt.jpg ├── coord_check_sp.png ├── gpt2_124M_loss.png ├── coord_check_mup.png ├── mutransfer_lr_owt.png └── mutransfer_lr_shakespeare_char.png ├── .gitignore ├── data ├── shakespeare │ ├── readme.md │ └── prepare.py ├── shakespeare_char │ ├── readme.md │ └── prepare.py └── openwebtext │ ├── readme.md │ └── prepare.py ├── .gitattributes ├── config ├── eval_gpt2.py ├── eval_gpt2_xl.py ├── eval_gpt2_large.py ├── eval_gpt2_medium.py ├── finetune_shakespeare.py ├── train_gpt2.py └── train_shakespeare_char.py ├── LICENSE ├── configurator.py ├── sample.py ├── bench.py ├── csv_logging.py ├── transformer_sizing.ipynb ├── README.md ├── train.py └── model.py /mup_examples/requirements.txt: -------------------------------------------------------------------------------- 1 | datasets==2.13.0 2 | seaborn 3 | -------------------------------------------------------------------------------- /assets/nanogpt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EleutherAI/nanoGPT-mup/HEAD/assets/nanogpt.jpg -------------------------------------------------------------------------------- /assets/coord_check_sp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EleutherAI/nanoGPT-mup/HEAD/assets/coord_check_sp.png -------------------------------------------------------------------------------- /assets/gpt2_124M_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EleutherAI/nanoGPT-mup/HEAD/assets/gpt2_124M_loss.png -------------------------------------------------------------------------------- /assets/coord_check_mup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EleutherAI/nanoGPT-mup/HEAD/assets/coord_check_mup.png -------------------------------------------------------------------------------- /assets/mutransfer_lr_owt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EleutherAI/nanoGPT-mup/HEAD/assets/mutransfer_lr_owt.png -------------------------------------------------------------------------------- /assets/mutransfer_lr_shakespeare_char.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EleutherAI/nanoGPT-mup/HEAD/assets/mutransfer_lr_shakespeare_char.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | .ipynb_checkpoints/ 4 | .vscode 5 | __pycache__/ 6 | *.bin 7 | *.pkl 8 | *.pt 9 | *.pyc 10 | input.txt 11 | env/ 12 | venv/ 13 | mup_examples/*/*/out/* -------------------------------------------------------------------------------- /data/shakespeare/readme.md: -------------------------------------------------------------------------------- 1 | 2 | # tiny shakespeare 3 | 4 | Tiny shakespeare, of the good old char-rnn fame :) 5 | 6 | After running `prepare.py`: 7 | 8 | - train.bin has 301,966 tokens 9 | - val.bin has 36,059 tokens 10 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Override jupyter in Github language stats for more accurate estimate of repo code languages 2 | # reference: https://github.com/github/linguist/blob/master/docs/overrides.md#generated-code 3 | *.ipynb linguist-generated 4 | -------------------------------------------------------------------------------- /config/eval_gpt2.py: -------------------------------------------------------------------------------- 1 | # evaluate the base gpt2 2 | # n_layer=12, n_head=12, n_embd=768 3 | # 124M parameters 4 | batch_size = 8 5 | eval_iters = 500 # use more iterations to get good estimate 6 | eval_only = True 7 | wandb_log = False 8 | init_from = 'gpt2' 9 | -------------------------------------------------------------------------------- /config/eval_gpt2_xl.py: -------------------------------------------------------------------------------- 1 | # evaluate the base gpt2 2 | # n_layer=48, n_head=25, n_embd=1600 3 | # 1558M parameters 4 | batch_size = 8 5 | eval_iters = 500 # use more iterations to get good estimate 6 | eval_only = True 7 | wandb_log = False 8 | init_from = 'gpt2-xl' 9 | -------------------------------------------------------------------------------- /config/eval_gpt2_large.py: -------------------------------------------------------------------------------- 1 | # evaluate the base gpt2 2 | # n_layer=36, n_head=20, n_embd=1280 3 | # 774M parameters 4 | batch_size = 8 5 | eval_iters = 500 # use more iterations to get good estimate 6 | eval_only = True 7 | wandb_log = False 8 | init_from = 'gpt2-large' 9 | -------------------------------------------------------------------------------- /config/eval_gpt2_medium.py: -------------------------------------------------------------------------------- 1 | # evaluate the base gpt2 2 | # n_layer=24, n_head=16, n_embd=1024 3 | # 350M parameters 4 | batch_size = 8 5 | eval_iters = 500 # use more iterations to get good estimate 6 | eval_only = True 7 | wandb_log = False 8 | init_from = 'gpt2-medium' 9 | -------------------------------------------------------------------------------- /data/shakespeare_char/readme.md: -------------------------------------------------------------------------------- 1 | 2 | # tiny shakespeare, character-level 3 | 4 | Tiny shakespeare, of the good old char-rnn fame :) Treated on character-level. 5 | 6 | After running `prepare.py`: 7 | 8 | - train.bin has 1,003,854 tokens 9 | - val.bin has 111,540 tokens 10 | -------------------------------------------------------------------------------- /data/openwebtext/readme.md: -------------------------------------------------------------------------------- 1 | 2 | ## openwebtext dataset 3 | 4 | after running `prepare.py` (preprocess) we get: 5 | 6 | - train.bin is ~17GB, val.bin ~8.5MB 7 | - train has ~9B tokens (9,035,582,198) 8 | - val has ~4M tokens (4,434,897) 9 | 10 | this came from 8,013,769 documents in total. 11 | 12 | references: 13 | 14 | - OpenAI's WebText dataset is discussed in [GPT-2 paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) 15 | - [OpenWebText](https://skylion007.github.io/OpenWebTextCorpus/) dataset 16 | -------------------------------------------------------------------------------- /config/finetune_shakespeare.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | out_dir = 'out-shakespeare' 4 | eval_interval = 5 5 | eval_iters = 40 6 | wandb_log = False # feel free to turn on 7 | wandb_project = 'shakespeare' 8 | wandb_run_name = 'ft-' + str(time.time()) 9 | 10 | dataset = 'shakespeare' 11 | init_from = 'gpt2-xl' # this is the largest GPT-2 model 12 | 13 | # only save checkpoints if the validation loss improves 14 | always_save_checkpoint = False 15 | 16 | # the number of examples per iter: 17 | # 1 batch_size * 32 grad_accum * 1024 tokens = 32,768 tokens/iter 18 | # shakespeare has 301,966 tokens, so 1 epoch ~= 9.2 iters 19 | batch_size = 1 20 | gradient_accumulation_steps = 32 21 | max_iters = 20 22 | 23 | # finetune at constant LR 24 | learning_rate = 3e-5 25 | decay_lr = False 26 | -------------------------------------------------------------------------------- /config/train_gpt2.py: -------------------------------------------------------------------------------- 1 | # config for training GPT-2 (124M) down to very nice loss of ~2.85 on 1 node of 8X A100 40GB 2 | # launch as the following (e.g. in a screen session) and wait ~5 days: 3 | # $ torchrun --standalone --nproc_per_node=8 train.py config/train_gpt2.py 4 | 5 | wandb_log = True 6 | wandb_project = 'owt' 7 | wandb_run_name='gpt2-124M' 8 | 9 | # these make the total batch size be ~0.5M 10 | # 12 batch size * 1024 block size * 5 gradaccum * 8 GPUs = 491,520 11 | batch_size = 12 12 | block_size = 1024 13 | gradient_accumulation_steps = 5 * 8 14 | 15 | # this makes total number of tokens be 300B 16 | max_iters = 600000 17 | lr_decay_iters = 600000 18 | 19 | # eval stuff 20 | eval_interval = 1000 21 | eval_iters = 200 22 | log_interval = 10 23 | 24 | # weight decay 25 | weight_decay = 1e-1 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Andrej Karpathy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /data/shakespeare/prepare.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import tiktoken 4 | import numpy as np 5 | 6 | # download the tiny shakespeare dataset 7 | input_file_path = os.path.join(os.path.dirname(__file__), 'input.txt') 8 | if not os.path.exists(input_file_path): 9 | data_url = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt' 10 | with open(input_file_path, 'w', encoding='utf-8') as f: 11 | f.write(requests.get(data_url).text) 12 | 13 | with open(input_file_path, 'r', encoding='utf-8') as f: 14 | data = f.read() 15 | n = len(data) 16 | train_data = data[:int(n*0.9)] 17 | val_data = data[int(n*0.9):] 18 | 19 | # encode with tiktoken gpt2 bpe 20 | enc = tiktoken.get_encoding("gpt2") 21 | train_ids = enc.encode_ordinary(train_data) 22 | val_ids = enc.encode_ordinary(val_data) 23 | print(f"train has {len(train_ids):,} tokens") 24 | print(f"val has {len(val_ids):,} tokens") 25 | 26 | # export to bin files 27 | train_ids = np.array(train_ids, dtype=np.uint16) 28 | val_ids = np.array(val_ids, dtype=np.uint16) 29 | train_ids.tofile(os.path.join(os.path.dirname(__file__), 'train.bin')) 30 | val_ids.tofile(os.path.join(os.path.dirname(__file__), 'val.bin')) 31 | 32 | # train.bin has 301,966 tokens 33 | # val.bin has 36,059 tokens 34 | -------------------------------------------------------------------------------- /config/train_shakespeare_char.py: -------------------------------------------------------------------------------- 1 | # train a miniature character-level shakespeare model 2 | # good for debugging and playing on macbooks and such 3 | 4 | out_dir = 'out-shakespeare-char' 5 | eval_interval = 250 # keep frequent because we'll overfit 6 | eval_iters = 200 7 | log_interval = 10 # don't print too too often 8 | 9 | # we expect to overfit on this small dataset, so only save when val improves 10 | always_save_checkpoint = False 11 | 12 | wandb_log = False # override via command line if you like 13 | wandb_project = 'shakespeare-char' 14 | wandb_run_name = 'mini-gpt' 15 | 16 | dataset = 'shakespeare_char' 17 | gradient_accumulation_steps = 1 18 | batch_size = 64 19 | block_size = 256 # context of up to 256 previous characters 20 | 21 | # baby GPT model :) 22 | n_layer = 6 23 | n_head = 6 24 | n_embd = 384 25 | dropout = 0.2 26 | 27 | learning_rate = 1e-3 # with baby networks can afford to go a bit higher 28 | max_iters = 5000 29 | lr_decay_iters = 5000 # make equal to max_iters usually 30 | min_lr = 1e-4 # learning_rate / 10 usually 31 | beta2 = 0.99 # make a bit bigger because number of tokens per iter is small 32 | 33 | warmup_iters = 100 # not super necessary potentially 34 | 35 | # on macbook also add 36 | # device = 'cpu' # run on cpu only 37 | # compile = False # do not torch compile the model 38 | -------------------------------------------------------------------------------- /mup_examples/coord_check_shakespeare_char/sp/run.sh: -------------------------------------------------------------------------------- 1 | for width in 256 512 1024 2048 4096 2 | do 3 | for seed in 1 2 3 4 5 4 | do 5 | head_size=64 6 | n_heads=$((width / head_size)) 7 | out_dir="mup_examples/coord_check_shakespeare_char/sp/out/width${width}_depth2_seed${seed}" 8 | python train.py \ 9 | --out_dir=$out_dir \ 10 | --eval_interval=1 \ 11 | --log_interval=1 \ 12 | --eval_iters=1 \ 13 | --eval_only=False \ 14 | --always_save_checkpoint=False \ 15 | --never_save_checkpoint=True \ 16 | --init_from='scratch' \ 17 | --wandb_log=False \ 18 | --csv_log=True \ 19 | --dataset='shakespeare_char' \ 20 | --gradient_accumulation_steps=4 \ 21 | --batch_size=2 \ 22 | --block_size=1024 \ 23 | --n_layer=2 \ 24 | --n_head=$n_heads \ 25 | --n_embd=$width \ 26 | --dropout=0.0 \ 27 | --bias=False \ 28 | --init_std=0.02 \ 29 | --learning_rate=1e-2 \ 30 | --max_iters=10 \ 31 | --weight_decay=1e-1 \ 32 | --beta1=0.9 \ 33 | --beta2=0.95 \ 34 | --grad_clip=1.0 \ 35 | --decay_lr=False \ 36 | --seed=$seed \ 37 | --backend='nccl' \ 38 | --device='mps' \ 39 | --dtype='float32' \ 40 | --compile=False \ 41 | --mup_enable_coord_check_logging=True 42 | done 43 | done 44 | -------------------------------------------------------------------------------- /mup_examples/coord_check_shakespeare_char/mup/run.sh: -------------------------------------------------------------------------------- 1 | for width in 256 512 1024 2048 4096 2 | do 3 | for seed in 1 2 3 4 5 4 | do 5 | head_size=64 6 | n_heads=$((width / head_size)) 7 | mup_base_width=256 8 | mup_width_multiplier=$(echo "scale=8; $width/$mup_base_width" | bc -l) 9 | out_dir="mup_examples/coord_check_shakespeare_char/mup/out/width${width}_depth2_seed${seed}" 10 | python train.py \ 11 | --out_dir=$out_dir \ 12 | --eval_interval=1 \ 13 | --log_interval=1 \ 14 | --eval_iters=1 \ 15 | --eval_only=False \ 16 | --always_save_checkpoint=False \ 17 | --never_save_checkpoint=True \ 18 | --init_from='scratch' \ 19 | --wandb_log=False \ 20 | --csv_log=True \ 21 | --dataset='shakespeare_char' \ 22 | --gradient_accumulation_steps=4 \ 23 | --batch_size=2 \ 24 | --block_size=1024 \ 25 | --n_layer=2 \ 26 | --n_head=$n_heads \ 27 | --n_embd=$width \ 28 | --dropout=0.0 \ 29 | --bias=False \ 30 | --init_std=0.02 \ 31 | --learning_rate=1e-2 \ 32 | --max_iters=10 \ 33 | --weight_decay=1e-1 \ 34 | --beta1=0.9 \ 35 | --beta2=0.95 \ 36 | --grad_clip=1.0 \ 37 | --decay_lr=False \ 38 | --mup_enabled=True \ 39 | --mup_width_multiplier=$mup_width_multiplier \ 40 | --mup_input_alpha=1.0 \ 41 | --mup_output_alpha=1.0 \ 42 | --mup_enable_coord_check_logging=True \ 43 | --seed=$seed \ 44 | --backend='nccl' \ 45 | --device='mps' \ 46 | --dtype='float32' \ 47 | --compile=False 48 | done 49 | done 50 | -------------------------------------------------------------------------------- /mup_examples/coord_check_shakespeare_char/sp_with_mup_hidden_init/run.sh: -------------------------------------------------------------------------------- 1 | for width in 256 512 1024 2048 4096 2 | do 3 | for seed in 1 2 3 4 5 4 | do 5 | head_size=64 6 | n_heads=$((width / head_size)) 7 | mup_base_width=256 8 | mup_width_multiplier=$(echo "scale=8; $width/$mup_base_width" | bc -l) 9 | out_dir="mup_examples/coord_check_shakespeare_char/sp_with_mup_hidden_init/out/width${width}_depth2_seed${seed}" 10 | python train.py \ 11 | --out_dir=$out_dir \ 12 | --eval_interval=1 \ 13 | --log_interval=1 \ 14 | --eval_iters=1 \ 15 | --eval_only=False \ 16 | --always_save_checkpoint=False \ 17 | --never_save_checkpoint=True \ 18 | --init_from='scratch' \ 19 | --wandb_log=False \ 20 | --csv_log=True \ 21 | --dataset='shakespeare_char' \ 22 | --gradient_accumulation_steps=4 \ 23 | --batch_size=2 \ 24 | --block_size=1024 \ 25 | --n_layer=2 \ 26 | --n_head=$n_heads \ 27 | --n_embd=$width \ 28 | --dropout=0.0 \ 29 | --bias=False \ 30 | --init_std=0.02 \ 31 | --learning_rate=1e-2 \ 32 | --max_iters=10 \ 33 | --weight_decay=1e-1 \ 34 | --beta1=0.9 \ 35 | --beta2=0.95 \ 36 | --grad_clip=1.0 \ 37 | --decay_lr=False \ 38 | --mup_enabled=True \ 39 | --mup_disable_attention_scaling=True \ 40 | --mup_disable_hidden_lr_scaling=True \ 41 | --mup_width_multiplier=$mup_width_multiplier \ 42 | --mup_input_alpha=1.0 \ 43 | --mup_output_alpha=$mup_width_multiplier \ 44 | --mup_enable_coord_check_logging=True \ 45 | --seed=$seed \ 46 | --backend='nccl' \ 47 | --device='mps' \ 48 | --dtype='float32' \ 49 | --compile=False 50 | done 51 | done 52 | -------------------------------------------------------------------------------- /mup_examples/coord_check_shakespeare_char/sp_with_mup_hidden_init_and_lr/run.sh: -------------------------------------------------------------------------------- 1 | for width in 256 512 1024 2048 4096 2 | do 3 | for seed in 1 2 3 4 5 4 | do 5 | head_size=64 6 | n_heads=$((width / head_size)) 7 | mup_base_width=256 8 | mup_width_multiplier=$(echo "scale=8; $width/$mup_base_width" | bc -l) 9 | out_dir="mup_examples/coord_check_shakespeare_char/sp_with_mup_hidden_init_and_lr/out/width${width}_depth2_seed${seed}" 10 | python train.py \ 11 | --out_dir=$out_dir \ 12 | --eval_interval=1 \ 13 | --log_interval=1 \ 14 | --eval_iters=1 \ 15 | --eval_only=False \ 16 | --always_save_checkpoint=False \ 17 | --never_save_checkpoint=True \ 18 | --init_from='scratch' \ 19 | --wandb_log=False \ 20 | --csv_log=True \ 21 | --dataset='shakespeare_char' \ 22 | --gradient_accumulation_steps=4 \ 23 | --batch_size=2 \ 24 | --block_size=1024 \ 25 | --n_layer=2 \ 26 | --n_head=$n_heads \ 27 | --n_embd=$width \ 28 | --dropout=0.0 \ 29 | --bias=False \ 30 | --init_std=0.02 \ 31 | --learning_rate=1e-2 \ 32 | --max_iters=10 \ 33 | --weight_decay=1e-1 \ 34 | --beta1=0.9 \ 35 | --beta2=0.95 \ 36 | --grad_clip=1.0 \ 37 | --decay_lr=False \ 38 | --mup_enabled=True \ 39 | --mup_disable_attention_scaling=True \ 40 | --mup_disable_hidden_lr_scaling=False \ 41 | --mup_width_multiplier=$mup_width_multiplier \ 42 | --mup_input_alpha=1.0 \ 43 | --mup_output_alpha=$mup_width_multiplier \ 44 | --mup_enable_coord_check_logging=True \ 45 | --seed=$seed \ 46 | --backend='nccl' \ 47 | --device='mps' \ 48 | --dtype='float32' \ 49 | --compile=False 50 | done 51 | done 52 | -------------------------------------------------------------------------------- /mup_examples/coord_check_shakespeare_char/sp_with_mup_hidden_init_and_lr_output_logits/run.sh: -------------------------------------------------------------------------------- 1 | for width in 256 512 1024 2048 4096 2 | do 3 | for seed in 1 2 3 4 5 4 | do 5 | head_size=64 6 | n_heads=$((width / head_size)) 7 | mup_base_width=256 8 | mup_width_multiplier=$(echo "scale=8; $width/$mup_base_width" | bc -l) 9 | out_dir="mup_examples/coord_check_shakespeare_char/sp_with_mup_hidden_init_and_lr_output_logits/out/width${width}_depth2_seed${seed}" 10 | python train.py \ 11 | --out_dir=$out_dir \ 12 | --eval_interval=1 \ 13 | --log_interval=1 \ 14 | --eval_iters=1 \ 15 | --eval_only=False \ 16 | --always_save_checkpoint=False \ 17 | --never_save_checkpoint=True \ 18 | --init_from='scratch' \ 19 | --wandb_log=False \ 20 | --csv_log=True \ 21 | --dataset='shakespeare_char' \ 22 | --gradient_accumulation_steps=4 \ 23 | --batch_size=2 \ 24 | --block_size=1024 \ 25 | --n_layer=2 \ 26 | --n_head=$n_heads \ 27 | --n_embd=$width \ 28 | --dropout=0.0 \ 29 | --bias=False \ 30 | --init_std=0.02 \ 31 | --learning_rate=1e-2 \ 32 | --max_iters=10 \ 33 | --weight_decay=1e-1 \ 34 | --beta1=0.9 \ 35 | --beta2=0.95 \ 36 | --grad_clip=1.0 \ 37 | --decay_lr=False \ 38 | --mup_enabled=True \ 39 | --mup_disable_attention_scaling=True \ 40 | --mup_disable_hidden_lr_scaling=False \ 41 | --mup_width_multiplier=$mup_width_multiplier \ 42 | --mup_input_alpha=1.0 \ 43 | --mup_output_alpha=1.0 \ 44 | --mup_enable_coord_check_logging=True \ 45 | --seed=$seed \ 46 | --backend='nccl' \ 47 | --device='mps' \ 48 | --dtype='float32' \ 49 | --compile=False 50 | done 51 | done 52 | -------------------------------------------------------------------------------- /configurator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Poor Man's Configurator. Probably a terrible idea. Example usage: 3 | $ python train.py config/override_file.py --batch_size=32 4 | this will first run config/override_file.py, then override batch_size to 32 5 | 6 | The code in this file will be run as follows from e.g. train.py: 7 | >>> exec(open('configurator.py').read()) 8 | 9 | So it's not a Python module, it's just shuttling this code away from train.py 10 | The code in this script then overrides the globals() 11 | 12 | I know people are not going to love this, I just really dislike configuration 13 | complexity and having to prepend config. to every single variable. If someone 14 | comes up with a better simple Python solution I am all ears. 15 | """ 16 | 17 | import sys 18 | from ast import literal_eval 19 | 20 | for arg in sys.argv[1:]: 21 | if '=' not in arg: 22 | # assume it's the name of a config file 23 | assert not arg.startswith('--') 24 | config_file = arg 25 | print(f"Overriding config with {config_file}:") 26 | with open(config_file) as f: 27 | print(f.read()) 28 | exec(open(config_file).read()) 29 | else: 30 | # assume it's a --key=value argument 31 | assert arg.startswith('--') 32 | key, val = arg.split('=') 33 | key = key[2:] 34 | if key in globals(): 35 | try: 36 | # attempt to eval it it (e.g. if bool, number, or etc) 37 | attempt = literal_eval(val) 38 | except (SyntaxError, ValueError): 39 | # if that goes wrong, just use the string 40 | attempt = val 41 | # ensure the types match ok 42 | assert type(attempt) == type(globals()[key]) 43 | # cross fingers 44 | print(f"Overriding: {key} = {attempt}") 45 | globals()[key] = attempt 46 | else: 47 | raise ValueError(f"Unknown config key: {key}") 48 | -------------------------------------------------------------------------------- /mup_examples/coord_check_shakespeare_char/sp_with_mup_hidden_init_and_lr_partial_output_logits/run.sh: -------------------------------------------------------------------------------- 1 | for width in 256 512 1024 2048 4096 2 | do 3 | for seed in 1 2 3 4 5 4 | do 5 | head_size=64 6 | n_heads=$((width / head_size)) 7 | mup_base_width=256 8 | mup_width_multiplier=$(echo "scale=8; $width/$mup_base_width" | bc -l) 9 | out_dir="mup_examples/coord_check_shakespeare_char/sp_with_mup_hidden_init_and_lr_partial_output_logits/out/width${width}_depth2_seed${seed}" 10 | mup_output_alpha=$(echo "scale=8; sqrt($mup_width_multiplier)" | bc -l) 11 | python train.py \ 12 | --out_dir=$out_dir \ 13 | --eval_interval=1 \ 14 | --log_interval=1 \ 15 | --eval_iters=1 \ 16 | --eval_only=False \ 17 | --always_save_checkpoint=False \ 18 | --never_save_checkpoint=True \ 19 | --init_from='scratch' \ 20 | --wandb_log=False \ 21 | --csv_log=True \ 22 | --dataset='shakespeare_char' \ 23 | --gradient_accumulation_steps=4 \ 24 | --batch_size=2 \ 25 | --block_size=1024 \ 26 | --n_layer=2 \ 27 | --n_head=$n_heads \ 28 | --n_embd=$width \ 29 | --dropout=0.0 \ 30 | --bias=False \ 31 | --init_std=0.02 \ 32 | --learning_rate=1e-2 \ 33 | --max_iters=10 \ 34 | --weight_decay=1e-1 \ 35 | --beta1=0.9 \ 36 | --beta2=0.95 \ 37 | --grad_clip=1.0 \ 38 | --decay_lr=False \ 39 | --mup_enabled=True \ 40 | --mup_disable_attention_scaling=True \ 41 | --mup_disable_hidden_lr_scaling=False \ 42 | --mup_width_multiplier=$mup_width_multiplier \ 43 | --mup_input_alpha=1.0 \ 44 | --mup_output_alpha=$mup_output_alpha \ 45 | --mup_enable_coord_check_logging=True \ 46 | --seed=$seed \ 47 | --backend='nccl' \ 48 | --device='mps' \ 49 | --dtype='float32' \ 50 | --compile=False 51 | done 52 | done 53 | -------------------------------------------------------------------------------- /mup_examples/mutransfer_lr_shakespeare_char/sp/run.sh: -------------------------------------------------------------------------------- 1 | for width in 256 512 1024 2048 2 | do 3 | for lr in 0.00390625 0.001953125 0.0009765625 0.00048828125 0.000244140625 0.0001220703125 0.00006103515625 0.00003051757812 0.00048828125 0.000244140625 0.0001220703125 0.00006103515625 0.00003051757812 0.00001525878906 0.000007629394531 0.000003814697266 4 | do 5 | for seed in 1 2 3 6 | do 7 | head_size=64 8 | n_heads=$((width / head_size)) 9 | out_dir="mup_examples/mutransfer_lr_shakespeare_char/sp/out/width${width}_depth2_seed${seed}_lr${lr}" 10 | python train.py \ 11 | --out_dir=$out_dir \ 12 | --eval_interval=1 \ 13 | --log_interval=1 \ 14 | --eval_iters=1 \ 15 | --eval_only=False \ 16 | --skip_val_loss=True \ 17 | --always_save_checkpoint=False \ 18 | --never_save_checkpoint=True \ 19 | --init_from='scratch' \ 20 | --wandb_log=False \ 21 | --csv_log=True \ 22 | --dataset='shakespeare_char' \ 23 | --gradient_accumulation_steps=8 \ 24 | --batch_size=1 \ 25 | --block_size=1024 \ 26 | --n_layer=2 \ 27 | --n_head=$n_heads \ 28 | --n_embd=$width \ 29 | --dropout=0.0 \ 30 | --bias=False \ 31 | --init_std=0.02 \ 32 | --learning_rate=$lr \ 33 | --max_iters=122 \ 34 | --weight_decay=1e-1 \ 35 | --beta1=0.9 \ 36 | --beta2=0.95 \ 37 | --grad_clip=1.0 \ 38 | --decay_lr=False \ 39 | --seed=$seed \ 40 | --backend='nccl' \ 41 | --device='mps' \ 42 | --dtype='float32' \ 43 | --compile=False 44 | done 45 | done 46 | done 47 | -------------------------------------------------------------------------------- /mup_examples/mutransfer_lr_owt/sp/run.sh: -------------------------------------------------------------------------------- 1 | # Single-GPU Launching 2 | LAUNCHER=python 3 | 4 | # Multi-GPU Launching (single node) 5 | #GPU=2 6 | #LAUNCHER=torchrun --standalone --nproc_per_node=$GPU 7 | 8 | LAYERS=2 9 | 10 | for width in 256 512 1024 2048 11 | do 12 | for lr in 0.125 0.0625 0.03125 0.015625 0.0078125 0.00390625 0.001953125 0.0009765625 0.00048828125 0.000244140625 0.0001220703125 0.00006103515625 13 | do 14 | for seed in 1 2 3 15 | do 16 | head_size=64 17 | n_heads=$((width / head_size)) 18 | min_lr=$(awk "BEGIN {print $lr/10}") 19 | out_dir="mup_examples/mutransfer_lr_owt/sp/out/width${width}_depth${LAYERS}_seed${seed}_lr${lr}" 20 | $LAUNCHER train.py \ 21 | --out_dir=$out_dir \ 22 | --eval_interval=1 \ 23 | --log_interval=1 \ 24 | --eval_iters=1 \ 25 | --eval_only=False \ 26 | --skip_val_loss=True \ 27 | --always_save_checkpoint=False \ 28 | --never_save_checkpoint=True \ 29 | --init_from='scratch' \ 30 | --wandb_log=False \ 31 | --csv_log=True \ 32 | --dataset='openwebtext' \ 33 | --gradient_accumulation_steps=1 \ 34 | --batch_size=32 \ 35 | --block_size=1024 \ 36 | --n_layer=2 \ 37 | --n_head=$n_heads \ 38 | --n_embd=$width \ 39 | --dropout=0.0 \ 40 | --bias=False \ 41 | --init_std=0.02 \ 42 | --learning_rate=$lr \ 43 | --lr_decay_iters=1000 \ 44 | --min_lr=$min_lr \ 45 | --max_iters=1000 \ 46 | --weight_decay=1e-1 \ 47 | --beta1=0.9 \ 48 | --beta2=0.95 \ 49 | --grad_clip=1.0 \ 50 | --decay_lr=True \ 51 | --seed=$seed \ 52 | --backend='nccl' \ 53 | --device='cuda' \ 54 | --dtype='bfloat16' \ 55 | --compile=True 56 | done 57 | done 58 | done 59 | -------------------------------------------------------------------------------- /mup_examples/mutransfer_lr_shakespeare_char/mup/run.sh: -------------------------------------------------------------------------------- 1 | for width in 256 512 1024 2048 2 | do 3 | for lr in 0.125 0.0625 0.03125 0.015625 0.0078125 0.00390625 0.001953125 0.0009765625 0.00048828125 0.000244140625 0.0001220703125 0.00006103515625 4 | do 5 | for seed in 1 2 3 6 | do 7 | head_size=64 8 | n_heads=$((width / head_size)) 9 | mup_base_width=256 10 | mup_width_multiplier=$(echo "scale=8; $width/$mup_base_width" | bc -l) 11 | out_dir="mup_examples/mutransfer_lr_shakespeare_char/mup/out/width${width}_depth2_seed${seed}_lr${lr}" 12 | python train.py \ 13 | --out_dir=$out_dir \ 14 | --eval_interval=1 \ 15 | --log_interval=1 \ 16 | --eval_iters=1 \ 17 | --eval_only=False \ 18 | --skip_val_loss=True \ 19 | --always_save_checkpoint=False \ 20 | --never_save_checkpoint=True \ 21 | --init_from='scratch' \ 22 | --wandb_log=False \ 23 | --csv_log=True \ 24 | --dataset='shakespeare_char' \ 25 | --gradient_accumulation_steps=8\ 26 | --batch_size=1 \ 27 | --block_size=1024 \ 28 | --n_layer=2 \ 29 | --n_head=$n_heads \ 30 | --n_embd=$width \ 31 | --dropout=0.0 \ 32 | --bias=False \ 33 | --init_std=0.02 \ 34 | --learning_rate=$lr \ 35 | --max_iters=122 \ 36 | --weight_decay=1e-1 \ 37 | --beta1=0.9 \ 38 | --beta2=0.95 \ 39 | --grad_clip=1.0 \ 40 | --decay_lr=False \ 41 | --mup_enabled=True \ 42 | --mup_width_multiplier=$mup_width_multiplier \ 43 | --mup_input_alpha=1.0 \ 44 | --mup_output_alpha=1.0 \ 45 | --seed=$seed \ 46 | --backend='nccl' \ 47 | --device='mps' \ 48 | --dtype='float32' \ 49 | --compile=False 50 | done 51 | done 52 | done 53 | -------------------------------------------------------------------------------- /mup_examples/mutransfer_lr_owt/mup/run.sh: -------------------------------------------------------------------------------- 1 | # Single-GPU Launching 2 | LAUNCHER=python 3 | 4 | # Multi-GPU Launching (single node) 5 | #GPU=2 6 | #LAUNCHER=torchrun --standalone --nproc_per_node=$GPU 7 | 8 | LAYERS=2 9 | 10 | for width in 256 512 1024 2048 11 | do 12 | for lr in 0.125 0.0625 0.03125 0.015625 0.0078125 0.00390625 0.001953125 0.0009765625 0.00048828125 0.000244140625 0.0001220703125 0.00006103515625 13 | do 14 | for seed in 1 2 3 15 | do 16 | head_size=64 17 | n_heads=$((width / head_size)) 18 | min_lr=$(awk "BEGIN {print $lr/10}") 19 | mup_base_width=256 20 | mup_width_multiplier=$(echo "scale=8; $width/$mup_base_width" | bc -l) 21 | out_dir="mup_examples/mutransfer_lr_owt/mup/out/width${width}_depth${LAYERS}_seed${seed}_lr${lr}" 22 | $LAUNCHER train.py \ 23 | --out_dir=$out_dir \ 24 | --eval_interval=1 \ 25 | --log_interval=1 \ 26 | --eval_iters=1 \ 27 | --eval_only=False \ 28 | --skip_val_loss=True \ 29 | --always_save_checkpoint=False \ 30 | --never_save_checkpoint=True \ 31 | --init_from='scratch' \ 32 | --wandb_log=False \ 33 | --csv_log=True \ 34 | --dataset='openwebtext' \ 35 | --gradient_accumulation_steps=1 \ 36 | --batch_size=32 \ 37 | --block_size=1024 \ 38 | --n_layer=2 \ 39 | --n_head=$n_heads \ 40 | --n_embd=$width \ 41 | --dropout=0.0 \ 42 | --bias=False \ 43 | --init_std=0.02 \ 44 | --learning_rate=$lr \ 45 | --lr_decay_iters=1000 \ 46 | --min_lr=$min_lr \ 47 | --max_iters=1000 \ 48 | --weight_decay=1e-1 \ 49 | --beta1=0.9 \ 50 | --beta2=0.95 \ 51 | --grad_clip=1.0 \ 52 | --decay_lr=True \ 53 | --mup_enabled=True \ 54 | --mup_width_multiplier=$mup_width_multiplier \ 55 | --mup_input_alpha=1.0 \ 56 | --mup_output_alpha=1.0 \ 57 | --seed=$seed \ 58 | --backend='nccl' \ 59 | --device='cuda' \ 60 | --dtype='bfloat16' \ 61 | --compile=True 62 | done 63 | done 64 | done 65 | -------------------------------------------------------------------------------- /data/shakespeare_char/prepare.py: -------------------------------------------------------------------------------- 1 | """ 2 | Prepare the Shakespeare dataset for character-level language modeling. 3 | So instead of encoding with GPT-2 BPE tokens, we just map characters to ints. 4 | Will save train.bin, val.bin containing the ids, and meta.pkl containing the 5 | encoder and decoder and some other related info. 6 | """ 7 | import os 8 | import pickle 9 | import requests 10 | import numpy as np 11 | 12 | # download the tiny shakespeare dataset 13 | input_file_path = os.path.join(os.path.dirname(__file__), 'input.txt') 14 | if not os.path.exists(input_file_path): 15 | data_url = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt' 16 | with open(input_file_path, 'w') as f: 17 | f.write(requests.get(data_url).text) 18 | 19 | with open(input_file_path, 'r') as f: 20 | data = f.read() 21 | print(f"length of dataset in characters: {len(data):,}") 22 | 23 | # get all the unique characters that occur in this text 24 | chars = sorted(list(set(data))) 25 | vocab_size = len(chars) 26 | print("all the unique characters:", ''.join(chars)) 27 | print(f"vocab size: {vocab_size:,}") 28 | 29 | # create a mapping from characters to integers 30 | stoi = { ch:i for i,ch in enumerate(chars) } 31 | itos = { i:ch for i,ch in enumerate(chars) } 32 | def encode(s): 33 | return [stoi[c] for c in s] # encoder: take a string, output a list of integers 34 | def decode(l): 35 | return ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string 36 | 37 | # create the train and test splits 38 | n = len(data) 39 | train_data = data[:int(n*0.9)] 40 | val_data = data[int(n*0.9):] 41 | 42 | # encode both to integers 43 | train_ids = encode(train_data) 44 | val_ids = encode(val_data) 45 | print(f"train has {len(train_ids):,} tokens") 46 | print(f"val has {len(val_ids):,} tokens") 47 | 48 | # export to bin files 49 | train_ids = np.array(train_ids, dtype=np.uint16) 50 | val_ids = np.array(val_ids, dtype=np.uint16) 51 | train_ids.tofile(os.path.join(os.path.dirname(__file__), 'train.bin')) 52 | val_ids.tofile(os.path.join(os.path.dirname(__file__), 'val.bin')) 53 | 54 | # save the meta information as well, to help us encode/decode later 55 | meta = { 56 | 'vocab_size': vocab_size, 57 | 'itos': itos, 58 | 'stoi': stoi, 59 | } 60 | with open(os.path.join(os.path.dirname(__file__), 'meta.pkl'), 'wb') as f: 61 | pickle.dump(meta, f) 62 | 63 | # length of dataset in characters: 1115394 64 | # all the unique characters: 65 | # !$&',-.3:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz 66 | # vocab size: 65 67 | # train has 1003854 tokens 68 | # val has 111540 tokens 69 | -------------------------------------------------------------------------------- /mup_examples/README.md: -------------------------------------------------------------------------------- 1 | # Experiment Reproduction 2 | 3 | Install the minimal dataset and plotting requirements with `pip install -r requirements.txt`. We used the PyTorch NGC container for GPU-based runs, but any environment containing the dependencies from [the main README](https://github.com/EleutherAI/nanoGPT-mup?tab=readme-ov-file#install) will suffice. 4 | 5 | To download the tiny shakespeare dataset, run `python data/shakespeare_char/prepare.py`. For OpenWebText (OWT), run `python data/openwebtext/prepare.py`. 6 | 7 | 8 | # Coordinate Checks 9 | 10 | The lowest-overhead correctness check of a mutransfer implementation is a [coordinate check](https://github.com/microsoft/mup?tab=readme-ov-file#checking-correctness-of-parametrization). 11 | 12 | To run coordinate checks in our implementation using the tiny shakespeare dataset, use the following scripts for Standard Parameterization (SP): 13 | 14 | ``` 15 | bash mup_examples/coord_check_shakespeare_char/sp/run.sh 16 | ``` 17 | 18 | And muP: 19 | 20 | ``` 21 | bash mup_examples/coord_check_shakespeare_char/mup/run.sh 22 | ``` 23 | 24 | These scripts populate the `out/` subdirectories with your coord check data, which you can then plot with `mup_examples/coord_check_shakespeare_char/plot.ipynb` 25 | 26 | 27 | # Learning Rate muTransfer 28 | 29 | To actually test transferring hyperparameters, you need to run training for a set number of steps on a chosen dataset. 30 | 31 | 1. Tiny Shakespeare is small and simple enough to see stable training loss with few iterations and small batch sizes, so we recommend it to test transfer quickly or on compute-constrained systems (e.g. laptop/desktop CPU). 32 | 2. OpenWebText is comparatively large, but more representative of the massive webcrawl-based datasets used to train most models today. 33 | 34 | The default values chosen in each `run.sh` reflect this. 35 | 36 | ## Tiny Shakespeare 37 | 38 | To sweep over seeds, model widths, and learning rates on the tiny shakespeare dataset with muP: 39 | 40 | ``` 41 | bash mup_examples/mutransfer_lr_shakespeare_char/mup/run.sh 42 | ``` 43 | 44 | and SP: 45 | 46 | ``` 47 | bash mup_examples/mutransfer_lr_shakespeare_char/sp/run.sh 48 | ``` 49 | 50 | ## OpenWebText 51 | 52 | To sweep over seeds, model widths, and learning rates on the OpenWebText (OWT) dataset with muP: 53 | 54 | ``` 55 | bash mup_examples/mutransfer_lr_owt/mup/run.sh 56 | ``` 57 | 58 | and SP: 59 | 60 | ``` 61 | bash mup_examples/mutransfer_lr_owt/sp/run.sh 62 | ``` 63 | 64 | These scripts populate 65 | 66 | These scripts populate the `out/` subdirectories with your train loss data, which you can then plot with `mup_examples/mutransfer_lr/plot.ipynb` -------------------------------------------------------------------------------- /data/openwebtext/prepare.py: -------------------------------------------------------------------------------- 1 | # saves the openwebtext dataset to a binary file for training. following was helpful: 2 | # https://github.com/HazyResearch/flash-attention/blob/main/training/src/datamodules/language_modeling_hf.py 3 | 4 | import os 5 | from tqdm import tqdm 6 | import numpy as np 7 | import tiktoken 8 | from datasets import load_dataset # huggingface datasets 9 | 10 | # number of workers in .map() call 11 | # good number to use is ~order number of cpu cores // 2 12 | num_proc = 8 13 | 14 | # number of workers in load_dataset() call 15 | # best number might be different from num_proc above as it also depends on NW speed. 16 | # it is better than 1 usually though 17 | num_proc_load_dataset = num_proc 18 | 19 | enc = tiktoken.get_encoding("gpt2") 20 | 21 | if __name__ == '__main__': 22 | # takes 54GB in huggingface .cache dir, about 8M documents (8,013,769) 23 | dataset = load_dataset("openwebtext", num_proc=num_proc_load_dataset) 24 | 25 | # owt by default only contains the 'train' split, so create a test split 26 | split_dataset = dataset["train"].train_test_split(test_size=0.0005, seed=2357, shuffle=True) 27 | split_dataset['val'] = split_dataset.pop('test') # rename the test split to val 28 | 29 | # this results in: 30 | # >>> split_dataset 31 | # DatasetDict({ 32 | # train: Dataset({ 33 | # features: ['text'], 34 | # num_rows: 8009762 35 | # }) 36 | # val: Dataset({ 37 | # features: ['text'], 38 | # num_rows: 4007 39 | # }) 40 | # }) 41 | 42 | # we now want to tokenize the dataset. first define the encoding function (gpt2 bpe) 43 | def process(example): 44 | ids = enc.encode_ordinary(example['text']) # encode_ordinary ignores any special tokens 45 | ids.append(enc.eot_token) # add the end of text token, e.g. 50256 for gpt2 bpe 46 | # note: I think eot should be prepended not appended... hmm. it's called "eot" though... 47 | out = {'ids': ids, 'len': len(ids)} 48 | return out 49 | 50 | # tokenize the dataset 51 | tokenized = split_dataset.map( 52 | process, 53 | remove_columns=['text'], 54 | desc="tokenizing the splits", 55 | num_proc=num_proc, 56 | ) 57 | 58 | # concatenate all the ids in each dataset into one large file we can use for training 59 | for split, dset in tokenized.items(): 60 | arr_len = np.sum(dset['len'], dtype=np.uint64) 61 | filename = os.path.join(os.path.dirname(__file__), f'{split}.bin') 62 | dtype = np.uint16 # (can do since enc.max_token_value == 50256 is < 2**16) 63 | arr = np.memmap(filename, dtype=dtype, mode='w+', shape=(arr_len,)) 64 | total_batches = 1024 65 | 66 | idx = 0 67 | for batch_idx in tqdm(range(total_batches), desc=f'writing {filename}'): 68 | # Batch together samples for faster write 69 | batch = dset.shard(num_shards=total_batches, index=batch_idx, contiguous=True).with_format('numpy') 70 | arr_batch = np.concatenate(batch['ids']) 71 | # Write into mmap 72 | arr[idx : idx + len(arr_batch)] = arr_batch 73 | idx += len(arr_batch) 74 | arr.flush() 75 | 76 | # train.bin is ~17GB, val.bin ~8.5MB 77 | # train has ~9B tokens (9,035,582,198) 78 | # val has ~4M tokens (4,434,897) 79 | 80 | # to read the bin files later, e.g. with numpy: 81 | # m = np.memmap('train.bin', dtype=np.uint16, mode='r') 82 | -------------------------------------------------------------------------------- /sample.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sample from a trained model 3 | """ 4 | import os 5 | import pickle 6 | from contextlib import nullcontext 7 | import torch 8 | import tiktoken 9 | from model import GPTConfig, GPT 10 | 11 | # ----------------------------------------------------------------------------- 12 | init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl') 13 | out_dir = 'out' # ignored if init_from is not 'resume' 14 | start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt" 15 | num_samples = 10 # number of samples to draw 16 | max_new_tokens = 500 # number of tokens generated in each sample 17 | temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions 18 | top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability 19 | seed = 1337 20 | device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc. 21 | dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32' or 'bfloat16' or 'float16' 22 | compile = False # use PyTorch 2.0 to compile the model to be faster 23 | exec(open('configurator.py').read()) # overrides from command line or config file 24 | # ----------------------------------------------------------------------------- 25 | 26 | torch.manual_seed(seed) 27 | torch.cuda.manual_seed(seed) 28 | torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul 29 | torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn 30 | device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast 31 | ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] 32 | ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype) 33 | 34 | # model 35 | if init_from == 'resume': 36 | # init from a model saved in a specific directory 37 | ckpt_path = os.path.join(out_dir, 'ckpt.pt') 38 | checkpoint = torch.load(ckpt_path, map_location=device) 39 | gptconf = GPTConfig(**checkpoint['model_args']) 40 | model = GPT(gptconf) 41 | state_dict = checkpoint['model'] 42 | unwanted_prefix = '_orig_mod.' 43 | for k,v in list(state_dict.items()): 44 | if k.startswith(unwanted_prefix): 45 | state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) 46 | model.load_state_dict(state_dict) 47 | elif init_from.startswith('gpt2'): 48 | # init from a given GPT-2 model 49 | model = GPT.from_pretrained(init_from, dict(dropout=0.0)) 50 | 51 | model.eval() 52 | model.to(device) 53 | if compile: 54 | model = torch.compile(model) # requires PyTorch 2.0 (optional) 55 | 56 | # look for the meta pickle in case it is available in the dataset folder 57 | load_meta = False 58 | if init_from == 'resume' and 'config' in checkpoint and 'dataset' in checkpoint['config']: # older checkpoints might not have these... 59 | meta_path = os.path.join('data', checkpoint['config']['dataset'], 'meta.pkl') 60 | load_meta = os.path.exists(meta_path) 61 | if load_meta: 62 | print(f"Loading meta from {meta_path}...") 63 | with open(meta_path, 'rb') as f: 64 | meta = pickle.load(f) 65 | # TODO want to make this more general to arbitrary encoder/decoder schemes 66 | stoi, itos = meta['stoi'], meta['itos'] 67 | encode = lambda s: [stoi[c] for c in s] 68 | decode = lambda l: ''.join([itos[i] for i in l]) 69 | else: 70 | # ok let's assume gpt-2 encodings by default 71 | print("No meta.pkl found, assuming GPT-2 encodings...") 72 | enc = tiktoken.get_encoding("gpt2") 73 | encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"}) 74 | decode = lambda l: enc.decode(l) 75 | 76 | # encode the beginning of the prompt 77 | if start.startswith('FILE:'): 78 | with open(start[5:], 'r', encoding='utf-8') as f: 79 | start = f.read() 80 | start_ids = encode(start) 81 | x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...]) 82 | 83 | # run generation 84 | with torch.no_grad(): 85 | with ctx: 86 | for k in range(num_samples): 87 | y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k) 88 | print(decode(y[0].tolist())) 89 | print('---------------') 90 | -------------------------------------------------------------------------------- /bench.py: -------------------------------------------------------------------------------- 1 | """ 2 | A much shorter version of train.py for benchmarking 3 | """ 4 | import os 5 | from contextlib import nullcontext 6 | import numpy as np 7 | import time 8 | import torch 9 | from model import GPTConfig, GPT 10 | 11 | # ----------------------------------------------------------------------------- 12 | batch_size = 12 13 | block_size = 1024 14 | bias = False 15 | real_data = True 16 | seed = 1337 17 | device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc. 18 | dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32' or 'bfloat16' or 'float16' 19 | compile = True # use PyTorch 2.0 to compile the model to be faster 20 | profile = False # use pytorch profiler, or just simple benchmarking? 21 | exec(open('configurator.py').read()) # overrides from command line or config file 22 | # ----------------------------------------------------------------------------- 23 | 24 | torch.manual_seed(seed) 25 | torch.cuda.manual_seed(seed) 26 | torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul 27 | torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn 28 | device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast 29 | ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] 30 | ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype) 31 | 32 | # data loading init 33 | if real_data: 34 | dataset = 'openwebtext' 35 | data_dir = os.path.join('data', dataset) 36 | train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r') 37 | def get_batch(split): 38 | data = train_data # note ignore split in benchmarking script 39 | ix = torch.randint(len(data) - block_size, (batch_size,)) 40 | x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix]) 41 | y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix]) 42 | x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True) 43 | return x, y 44 | else: 45 | # alternatively, if fixed data is desired to not care about data loading 46 | x = torch.randint(50304, (batch_size, block_size), device=device) 47 | y = torch.randint(50304, (batch_size, block_size), device=device) 48 | get_batch = lambda split: (x, y) 49 | 50 | # model init 51 | gptconf = GPTConfig( 52 | block_size = block_size, # how far back does the model look? i.e. context size 53 | n_layer = 12, n_head = 12, n_embd = 768, # size of the model 54 | dropout = 0, # for determinism 55 | bias = bias, 56 | ) 57 | model = GPT(gptconf) 58 | model.to(device) 59 | 60 | optimizer = model.configure_optimizers(weight_decay=1e-2, learning_rate=1e-4, betas=(0.9, 0.95), device_type=device_type) 61 | 62 | if compile: 63 | print("Compiling model...") 64 | model = torch.compile(model) # pytorch 2.0 65 | 66 | if profile: 67 | # useful docs on pytorch profiler: 68 | # - tutorial https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html 69 | # - api https://pytorch.org/docs/stable/profiler.html#torch.profiler.profile 70 | wait, warmup, active = 5, 5, 5 71 | num_steps = wait + warmup + active 72 | with torch.profiler.profile( 73 | activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], 74 | schedule=torch.profiler.schedule(wait=wait, warmup=warmup, active=active, repeat=1), 75 | on_trace_ready=torch.profiler.tensorboard_trace_handler('./bench_log'), 76 | record_shapes=False, 77 | profile_memory=False, 78 | with_stack=False, # incurs an additional overhead, disable if not needed 79 | with_flops=True, 80 | with_modules=False, # only for torchscript models atm 81 | ) as prof: 82 | 83 | X, Y = get_batch('train') 84 | for k in range(num_steps): 85 | with ctx: 86 | logits, loss = model(X, Y) 87 | X, Y = get_batch('train') 88 | optimizer.zero_grad(set_to_none=True) 89 | loss.backward() 90 | optimizer.step() 91 | lossf = loss.item() 92 | print(f"{k}/{num_steps} loss: {lossf:.4f}") 93 | 94 | prof.step() # notify the profiler at end of each step 95 | 96 | else: 97 | 98 | # simple benchmarking 99 | torch.cuda.synchronize() 100 | for stage, num_steps in enumerate([10, 20]): # burnin, then benchmark 101 | t0 = time.time() 102 | X, Y = get_batch('train') 103 | for k in range(num_steps): 104 | with ctx: 105 | logits, loss = model(X, Y) 106 | X, Y = get_batch('train') 107 | optimizer.zero_grad(set_to_none=True) 108 | loss.backward() 109 | optimizer.step() 110 | lossf = loss.item() 111 | print(f"{k}/{num_steps} loss: {lossf:.4f}") 112 | torch.cuda.synchronize() 113 | t1 = time.time() 114 | dt = t1-t0 115 | mfu = model.estimate_mfu(batch_size * 1 * num_steps, dt) 116 | if stage == 1: 117 | print(f"time per iteration: {dt/num_steps*1000:.4f}ms, MFU: {mfu*100:.2f}%") 118 | -------------------------------------------------------------------------------- /csv_logging.py: -------------------------------------------------------------------------------- 1 | """ 2 | Authored by Gavia Gray (https://github.com/gngdb) 3 | 4 | Wrapper for wandb logging with efficient CSV logging and correct config JSON writing. 5 | The CSV structure maintains a consistent order of keys based on their first appearance, 6 | using a simple list for ordering. This ensures data integrity and allows for graceful 7 | failure and manual recovery if needed. 8 | 9 | Example usage: 10 | run = wandb.init(config=your_config) 11 | wrapper = LogWrapper(run, out_dir='path/to/output') 12 | 13 | ... 14 | # in train loop 15 | wrapper.log({"train/loss": 0.5, "train/accuracy": 0.9, "val/loss": 0.6, "val/accuracy": 0.85}) 16 | wrapper.print("Train: {loss=:.4f}, {accuracy=:.2%}", prefix="train/") 17 | wrapper.print("Val: {loss=:.4f}, {accuracy=:.2%}", prefix="val/") 18 | wrapper.step() 19 | 20 | ... 21 | # at the end of your script 22 | wrapper.close() 23 | 24 | # If the script terminates unexpectedly, you can still recover the CSV using bash: 25 | # cat path/to/output/log_header.csv.tmp path/to/output/log_data.csv.tmp > path/to/output/log.csv 26 | """ 27 | 28 | import re 29 | import os 30 | import csv 31 | import json 32 | import atexit 33 | 34 | 35 | def exists(x): return x is not None 36 | 37 | def transform_format_string(s): 38 | """ 39 | Transforms a string containing f-string-like expressions to a format 40 | compatible with str.format(). 41 | 42 | This function converts expressions like '{var=}' or '{var=:formatting}' 43 | to 'var={var}' or 'var={var:formatting}' respectively. This allows 44 | for f-string-like syntax to be used with str.format(). 45 | 46 | Args: 47 | s (str): The input string containing f-string-like expressions. 48 | 49 | Returns: 50 | str: The transformed string, compatible with str.format(). 51 | 52 | Examples: 53 | >>> transform_format_string("Value is {x=}") 54 | "Value is x={x}" 55 | >>> transform_format_string("Formatted value is {x=:.2f}") 56 | "Formatted value is x={x:.2f}" 57 | """ 58 | pattern = r'\{(\w+)=(:.[^}]*)?\}' 59 | return re.sub(pattern, lambda m: f"{m.group(1)}={{{m.group(1)}{m.group(2) or ''}}}", s) 60 | 61 | class CSVLogWrapper: 62 | def __init__(self, logf=None, config={}, out_dir=None, flush_every: int = 100): 63 | self.logf = logf 64 | self.config = config 65 | self.log_dict = {} 66 | self.out_dir = out_dir 67 | self.csv_data_file = None 68 | self.csv_header_file = None 69 | self.csv_writer = None 70 | self.step_count = 0 71 | self.flush_every = flush_every # how often to flush; 0 = never flush mid-run 72 | self.ordered_keys = [] 73 | self.header_updated = False 74 | self.is_finalized = False 75 | self.no_sync_keyword = 'no_sync' # Keyword to prevent syncing to wandb 76 | 77 | if self.out_dir: 78 | os.makedirs(self.out_dir, exist_ok=True) 79 | self.setup_csv_writer() 80 | self.write_config() 81 | 82 | atexit.register(self.close) 83 | 84 | def setup_csv_writer(self): 85 | self.csv_data_path = os.path.join(self.out_dir, 'log_data.csv.tmp') 86 | self.csv_header_path = os.path.join(self.out_dir, 'log_header.csv.tmp') 87 | self.csv_data_file = open(self.csv_data_path, 'w', newline='') 88 | self.csv_header_file = open(self.csv_header_path, 'w', newline='') 89 | self.csv_writer = csv.writer(self.csv_data_file) 90 | 91 | def write_config(self): 92 | if self.config: 93 | config_path = os.path.join(self.out_dir, 'config.json') 94 | with open(config_path, 'w') as f: 95 | json.dump(dict(**self.config), f, indent=2) 96 | 97 | def log(self, data): 98 | self.log_dict.update(data) 99 | for key in data: 100 | if key not in self.ordered_keys: 101 | self.ordered_keys.append(key) 102 | self.header_updated = True 103 | 104 | def update_header(self): 105 | if self.header_updated: 106 | header = ['step'] + self.ordered_keys 107 | with open(self.csv_header_path, 'w', newline='') as header_file: 108 | csv.writer(header_file).writerow(header) 109 | self.header_updated = False 110 | 111 | def print(self, format_string, prefix=None): 112 | format_string = transform_format_string(format_string) 113 | 114 | if prefix: 115 | # Filter keys with the given prefix and remove the prefix 116 | filtered_dict = {k.replace(prefix, ''): v for k, v in self.log_dict.items() if k.startswith(prefix)} 117 | else: 118 | filtered_dict = self.log_dict 119 | # replace any '/' in keys with '_' 120 | filtered_dict = {k.replace('/', '_'): v for k, v in filtered_dict.items()} 121 | 122 | try: 123 | print(format_string.format(**filtered_dict)) 124 | except KeyError as e: 125 | print(f"KeyError: {e}. Available keys: {', '.join(filtered_dict.keys())}") 126 | raise e 127 | 128 | def step(self): 129 | if exists(self.logf) and self.log_dict: 130 | self.logf({k: v for k, v in self.log_dict.items() if self.no_sync_keyword not in k}) 131 | 132 | if self.csv_writer and self.log_dict: 133 | self.update_header() 134 | 135 | # Prepare the row data 136 | row_data = [self.step_count] + [self.log_dict.get(key, '') for key in self.ordered_keys] 137 | self.csv_writer.writerow(row_data) 138 | if self.flush_every and (self.step_count % self.flush_every == 0): 139 | self.csv_data_file.flush() 140 | 141 | self.step_count += 1 142 | self.log_dict.clear() 143 | 144 | def close(self): 145 | if self.csv_data_file: 146 | self.csv_data_file.close() 147 | 148 | self.finalize_csv() 149 | 150 | def finalize_csv(self): 151 | if self.is_finalized: 152 | return 153 | 154 | csv_final_path = os.path.join(self.out_dir, 'log.csv') 155 | 156 | with open(csv_final_path, 'w', newline='') as final_csv: 157 | # Copy header 158 | with open(self.csv_header_path, 'r') as header_file: 159 | final_csv.write(header_file.read()) 160 | 161 | # Copy data 162 | with open(self.csv_data_path, 'r') as data_file: 163 | final_csv.write(data_file.read()) 164 | self.is_finalized = True 165 | 166 | # Remove the temporary files 167 | os.remove(self.csv_header_path) 168 | os.remove(self.csv_data_path) 169 | -------------------------------------------------------------------------------- /transformer_sizing.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "### Transformer Theoretical Model\n", 9 | "\n", 10 | "This notebook stores a bunch of analysis about a Transformer, e.g. estimates the number of FLOPs, parameters, peak memory footprint, checkpoint size, etc." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 1, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "from collections import OrderedDict" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": 2, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "# config_args = {\n", 29 | "# 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params\n", 30 | "# 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params\n", 31 | "# 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params\n", 32 | "# 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params\n", 33 | "# }[model_type]\n", 34 | "\n", 35 | "block_size = 1024\n", 36 | "vocab_size = 50257\n", 37 | "n_layer = 12\n", 38 | "n_head = 12\n", 39 | "n_embd = 768\n", 40 | "bias = False\n", 41 | "assert not bias, \"this notebook assumes bias=False just for simplicity\"" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 3, 47 | "metadata": {}, 48 | "outputs": [ 49 | { 50 | "name": "stdout", 51 | "output_type": "stream", 52 | "text": [ 53 | "we see: 124337664, expected: 124337664, match: True\n", 54 | "name params ratio (%) \n", 55 | "emebedding/position 786432 0.6325\n", 56 | "embedding/token 38597376 31.0424\n", 57 | "embedding 39383808 31.6749\n", 58 | "attention/ln 768 0.0006\n", 59 | "attention/kqv 1769472 1.4231\n", 60 | "attention/proj 589824 0.4744\n", 61 | "attention 2360064 1.8981\n", 62 | "mlp/ln 768 0.0006\n", 63 | "mlp/ffw 2359296 1.8975\n", 64 | "mlp/proj 2359296 1.8975\n", 65 | "mlp 4719360 3.7956\n", 66 | "block 7079424 5.6937\n", 67 | "transformer 84953088 68.3245\n", 68 | "ln_f 768 0.0006\n", 69 | "dense 0 0.0000\n", 70 | "total 124337664 100.0000\n" 71 | ] 72 | } 73 | ], 74 | "source": [ 75 | "def params():\n", 76 | " \"\"\" estimates the number of parameters in the model\"\"\"\n", 77 | " out = OrderedDict()\n", 78 | "\n", 79 | " # token and position embeddings\n", 80 | " out['emebedding/position'] = n_embd * block_size\n", 81 | " out['embedding/token'] = n_embd * vocab_size\n", 82 | " out['embedding'] = out['emebedding/position'] + out['embedding/token']\n", 83 | "\n", 84 | " # attention blocks\n", 85 | " out['attention/ln'] = n_embd # note, bias=False in our LN\n", 86 | " out['attention/kqv'] = n_embd * 3*n_embd\n", 87 | " out['attention/proj'] = n_embd**2\n", 88 | " out['attention'] = out['attention/ln'] + out['attention/kqv'] + out['attention/proj']\n", 89 | "\n", 90 | " # MLP blocks\n", 91 | " ffw_size = 4*n_embd # feed forward size\n", 92 | " out['mlp/ln'] = n_embd\n", 93 | " out['mlp/ffw'] = n_embd * ffw_size\n", 94 | " out['mlp/proj'] = ffw_size * n_embd\n", 95 | " out['mlp'] = out['mlp/ln'] + out['mlp/ffw'] + out['mlp/proj']\n", 96 | " \n", 97 | " # the transformer and the rest of it\n", 98 | " out['block'] = out['attention'] + out['mlp']\n", 99 | " out['transformer'] = n_layer * out['block']\n", 100 | " out['ln_f'] = n_embd # final layernorm\n", 101 | " out['dense'] = 0 # 0 because of parameter sharing. This layer uses the weights from the embedding layer\n", 102 | "\n", 103 | " # total\n", 104 | " out['total'] = out['embedding'] + out['transformer'] + out['ln_f'] + out['dense']\n", 105 | "\n", 106 | " return out\n", 107 | "\n", 108 | "# compare our param count to that reported by PyTorch\n", 109 | "p = params()\n", 110 | "params_total = p['total']\n", 111 | "print(f\"we see: {params_total}, expected: {124337664}, match: {params_total == 124337664}\")\n", 112 | "# create a header\n", 113 | "print(f\"{'name':20s} {'params':10s} {'ratio (%)':10s}\")\n", 114 | "for k,v in p.items():\n", 115 | " print(f\"{k:20s} {v:10d} {v/params_total*100:10.4f}\")\n", 116 | " " 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 4, 122 | "metadata": {}, 123 | "outputs": [ 124 | { 125 | "name": "stdout", 126 | "output_type": "stream", 127 | "text": [ 128 | "est checkpoint size: 1.49 GB\n", 129 | "measured with wc -c ckpt.pt: 1542470366\n", 130 | "fluff ratio: 103.38%\n" 131 | ] 132 | } 133 | ], 134 | "source": [ 135 | "# we can now calculate the size of each checkpoint\n", 136 | "# params are stored in fp32, and the AdamW optimizer has 2 additional buffers per param for statistics\n", 137 | "params_bytes = params_total*4\n", 138 | "params_and_buffers_bytes = params_bytes + 2*params_bytes\n", 139 | "print(f\"est checkpoint size: {params_and_buffers_bytes/1e9:.2f} GB\")\n", 140 | "measured_bytes = 1542470366 # from wc -c ckpt.pt\n", 141 | "print(f\"measured with wc -c ckpt.pt: {measured_bytes}\")\n", 142 | "print(f\"fluff ratio: {measured_bytes/params_and_buffers_bytes*100:.2f}%\")" 143 | ] 144 | }, 145 | { 146 | "attachments": {}, 147 | "cell_type": "markdown", 148 | "metadata": {}, 149 | "source": [ 150 | "We can also estimate the ratio of our GPU memory that will be taken up just by the weights and the buffers inside the AdamW optimizer" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 5, 156 | "metadata": {}, 157 | "outputs": [ 158 | { 159 | "name": "stdout", 160 | "output_type": "stream", 161 | "text": [ 162 | "memory ratio taken up just for parameters: 3.73%\n" 163 | ] 164 | } 165 | ], 166 | "source": [ 167 | "gpu_memory = 40e9 # 40 GB A100 GPU, roughly\n", 168 | "print(f\"memory ratio taken up just for parameters: {params_and_buffers_bytes / gpu_memory * 100:.2f}%\")" 169 | ] 170 | }, 171 | { 172 | "attachments": {}, 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "i.e. not that much of the memory for this tiny model, most of the memory is activations (forward and backward). This of course changes dramatically for larger and larger models." 177 | ] 178 | }, 179 | { 180 | "attachments": {}, 181 | "cell_type": "markdown", 182 | "metadata": {}, 183 | "source": [ 184 | "Let's estimate FLOPs for a single forward pass." 185 | ] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "execution_count": 6, 190 | "metadata": {}, 191 | "outputs": [ 192 | { 193 | "name": "stdout", 194 | "output_type": "stream", 195 | "text": [ 196 | "name flops ratio (%) \n", 197 | "attention/kqv 3623878656 1.2426\n", 198 | "attention/scores 1610612736 0.5522\n", 199 | "attention/reduce 1610612736 0.5522\n", 200 | "attention/proj 1207959552 0.4142\n", 201 | "attention 8053063680 2.7612\n", 202 | "mlp/ffw1 4831838208 1.6567\n", 203 | "mlp/ffw2 4831838208 1.6567\n", 204 | "mlp 9663676416 3.3135\n", 205 | "block 17716740096 6.0747\n", 206 | "transformer 212600881152 72.8963\n", 207 | "dense 79047426048 27.1037\n", 208 | "forward_total 291648307200 100.0000\n", 209 | "backward_total 583296614400 200.0000\n", 210 | "total 874944921600 300.0000\n" 211 | ] 212 | } 213 | ], 214 | "source": [ 215 | "def flops():\n", 216 | " # we only count Weight FLOPs, all other layers (LayerNorm, Softmax, etc) are effectively irrelevant\n", 217 | " # we count actual FLOPs, not MACs. Hence 2* all over the place\n", 218 | " # basically for any matrix multiply A (BxC) @ B (CxD) -> (BxD) flops are 2*B*C*D\n", 219 | "\n", 220 | " out = OrderedDict()\n", 221 | " head_size = n_embd // n_head\n", 222 | "\n", 223 | " # attention blocks\n", 224 | " # 1) the projection to key, query, values\n", 225 | " out['attention/kqv'] = 2 * block_size * (n_embd * 3*n_embd)\n", 226 | " # 2) calculating the attention scores\n", 227 | " out['attention/scores'] = 2 * block_size * block_size * n_embd\n", 228 | " # 3) the reduction of the values (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n", 229 | " out['attention/reduce'] = 2 * n_head * (block_size * block_size * head_size)\n", 230 | " # 4) the final linear projection\n", 231 | " out['attention/proj'] = 2 * block_size * (n_embd * n_embd)\n", 232 | " out['attention'] = sum(out['attention/'+k] for k in ['kqv', 'scores', 'reduce', 'proj'])\n", 233 | "\n", 234 | " # MLP blocks\n", 235 | " ffw_size = 4*n_embd # feed forward size\n", 236 | " out['mlp/ffw1'] = 2 * block_size * (n_embd * ffw_size)\n", 237 | " out['mlp/ffw2'] = 2 * block_size * (ffw_size * n_embd)\n", 238 | " out['mlp'] = out['mlp/ffw1'] + out['mlp/ffw2']\n", 239 | "\n", 240 | " # the transformer and the rest of it\n", 241 | " out['block'] = out['attention'] + out['mlp']\n", 242 | " out['transformer'] = n_layer * out['block']\n", 243 | " out['dense'] = 2 * block_size * (n_embd * vocab_size)\n", 244 | "\n", 245 | " # forward,backward,total\n", 246 | " out['forward_total'] = out['transformer'] + out['dense']\n", 247 | " out['backward_total'] = 2 * out['forward_total'] # use common estimate of bwd = 2*fwd\n", 248 | " out['total'] = out['forward_total'] + out['backward_total']\n", 249 | "\n", 250 | " return out\n", 251 | " \n", 252 | "# compare our param count to that reported by PyTorch\n", 253 | "f = flops()\n", 254 | "flops_total = f['forward_total']\n", 255 | "print(f\"{'name':20s} {'flops':14s} {'ratio (%)':10s}\")\n", 256 | "for k,v in f.items():\n", 257 | " print(f\"{k:20s} {v:14d} {v/flops_total*100:10.4f}\")\n", 258 | " " 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": 7, 264 | "metadata": {}, 265 | "outputs": [ 266 | { 267 | "name": "stdout", 268 | "output_type": "stream", 269 | "text": [ 270 | "palm_flops: 875062886400, flops: 874944921600, ratio: 1.0001\n" 271 | ] 272 | } 273 | ], 274 | "source": [ 275 | "# now here is an estimate copy pasted from the PaLM paper\n", 276 | "# this formula is often used to calculate MFU (model flops utilization)\n", 277 | "def palm_flops():\n", 278 | " \"\"\"estimate of the model flops following PaLM paper formula\"\"\"\n", 279 | " # non-embedding model parameters. note that we do not subtract the\n", 280 | " # embedding/token params because those are tied and get used in the last layer.\n", 281 | " N = params()['total'] - params()['emebedding/position']\n", 282 | " L, H, Q, T = n_layer, n_head, n_embd//n_head, block_size\n", 283 | " mf_per_token = 6*N + 12*L*H*Q*T\n", 284 | " mf = mf_per_token * block_size\n", 285 | " return mf\n", 286 | "\n", 287 | "print(f\"palm_flops: {palm_flops():d}, flops: {flops()['total']:d}, ratio: {palm_flops()/flops()['total']:.4f}\")" 288 | ] 289 | }, 290 | { 291 | "attachments": {}, 292 | "cell_type": "markdown", 293 | "metadata": {}, 294 | "source": [ 295 | "Ok they are quite similar, giving some confidence that my math in flops() function was ~ok. Now, A100 is cited at 312TFLOPS bfloat16 on tensor cores. So what is our model flops utilization (MFU)? I trained the model above with a batch_size of 20 and grad_accum of 5, which runs in about 755ms on a single A100 GPU. We get:" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": 8, 301 | "metadata": {}, 302 | "outputs": [ 303 | { 304 | "name": "stdout", 305 | "output_type": "stream", 306 | "text": [ 307 | "fraction of A100 used: 37.14%\n" 308 | ] 309 | } 310 | ], 311 | "source": [ 312 | "# here is what we currently roughly measure\n", 313 | "batch_size = 20 * 5 # 5 is grad_accum, so total batch size is 100\n", 314 | "measured_time = 0.755 # in seconds per iteration\n", 315 | "measured_throughput = batch_size / measured_time\n", 316 | "flops_achieved = f['total'] * measured_throughput\n", 317 | "\n", 318 | "# A100 is cited to be 312 TFLOPS of bloat16 running on tensor cores\n", 319 | "a100_flops_promised = 312e12\n", 320 | "\n", 321 | "# the fraction of the A100 that we are using:\n", 322 | "print(f\"fraction of A100 used: {flops_achieved / a100_flops_promised * 100:.2f}%\")" 323 | ] 324 | }, 325 | { 326 | "attachments": {}, 327 | "cell_type": "markdown", 328 | "metadata": {}, 329 | "source": [ 330 | "For reference, we'd prefer to be somewhere around 50%+, and not just for a single GPU but for an entire DDP run. So we still have some work to do, but at least we're within a factor of ~2X of what is achievable with this GPU." 331 | ] 332 | }, 333 | { 334 | "cell_type": "code", 335 | "execution_count": 9, 336 | "metadata": {}, 337 | "outputs": [ 338 | { 339 | "name": "stdout", 340 | "output_type": "stream", 341 | "text": [ 342 | "time needed to train the model: 3.46 days\n" 343 | ] 344 | } 345 | ], 346 | "source": [ 347 | "# Finally let's check out the 6ND approximation as total cost of training in FLOPs\n", 348 | "model_size = params()['total'] # this is number of parameters, N\n", 349 | "tokens_num = 300e9 # 300B tokens, this is dataset size in tokens, D\n", 350 | "a100_flops = 312e12 # 312 TFLOPS\n", 351 | "assumed_mfu = 0.3 # assume this model flops utilization (take the current 37% from above and add some DDP overhead)\n", 352 | "flops_throughput = a100_flops * 8 * assumed_mfu # assume an 8XA100 node at 30% utilization\n", 353 | "flops_needed = 6 * model_size * tokens_num # 6ND\n", 354 | "time_needed_s = flops_needed / flops_throughput # in seconds\n", 355 | "print(f\"time needed to train the model: {time_needed_s/3600/24:.2f} days\")" 356 | ] 357 | }, 358 | { 359 | "attachments": {}, 360 | "cell_type": "markdown", 361 | "metadata": {}, 362 | "source": [ 363 | "This is not a bad estimate at all. I trained this model and it converged in roughly 4 days. Btw as a good reference for where 6ND comes from and some intuition around it I recommend [Dzmitry's post](https://medium.com/@dzmitrybahdanau/the-flops-calculus-of-language-model-training-3b19c1f025e4)." 364 | ] 365 | }, 366 | { 367 | "attachments": {}, 368 | "cell_type": "markdown", 369 | "metadata": {}, 370 | "source": [ 371 | "Now, FLOPs are just one constraint, the other that we have to keep a close track of is the memory bandwidth. TODO estimate LOAD/STORE costs of our model later." 372 | ] 373 | } 374 | ], 375 | "metadata": { 376 | "kernelspec": { 377 | "display_name": "pytorch2", 378 | "language": "python", 379 | "name": "python3" 380 | }, 381 | "language_info": { 382 | "codemirror_mode": { 383 | "name": "ipython", 384 | "version": 3 385 | }, 386 | "file_extension": ".py", 387 | "mimetype": "text/x-python", 388 | "name": "python", 389 | "nbconvert_exporter": "python", 390 | "pygments_lexer": "ipython3", 391 | "version": "3.10.8" 392 | }, 393 | "orig_nbformat": 4, 394 | "vscode": { 395 | "interpreter": { 396 | "hash": "7f5833218766b48e6e35e4452ee875aac0e2188d05bbe5298f2c62b79f08b222" 397 | } 398 | } 399 | }, 400 | "nbformat": 4, 401 | "nbformat_minor": 2 402 | } 403 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # nanoGPT-mup 3 | 4 | This repository is a fork of [nanoGPT](https://github.com/karpathy/nanoGPT) that provides a minimal implementation of the [maximal update parameterization](https://arxiv.org/abs/2203.03466) ([muP](https://github.com/microsoft/mup)). 5 | 6 | Branches 7 | - The [master](https://github.com/EleutherAI/nanoGPT-mup) branch acts as supplementary material for ["The Practitioner’s Guide to the Maximal Update Parameterization"](https://www.cerebras.ai/blog/the-practitioners-guide-to-the-maximal-update-parameterization). 8 | - The [supar](https://github.com/EleutherAI/nanoGPT-mup/tree/supar) branch contains a minimal implementation of sparse maximal update parameterization (SuPar) introduced in [Sparse maximal update parameterization: A holistic approach to sparse training dynamics](https://arxiv.org/abs/2405.15743). 9 | - The [completep](https://github.com/EleutherAI/nanoGPT-mup/tree/completep) branch contains a minimal implementation of CompleteP introduced in [Don't be lazy: CompleteP enables compute-efficient deep transformers](https://arxiv.org/abs/2505.01618). 10 | 11 | The [mup_examples](https://github.com/EleutherAI/nanoGPT-mup/tree/master/mup_examples) folder contains scripts to reproduce the plots in ["The Practitioner’s Guide to the Maximal Update Parameterization"](https://www.cerebras.ai/blog/the-practitioners-guide-to-the-maximal-update-parameterization) (see [mup_examples/README.md](https://github.com/EleutherAI/nanoGPT-mup/blob/master/mup_examples/README.md) for instructions to reproduce). 12 | 13 | Each of the critical muP changes are marked with 14 | ``` 15 | ### Begin muP code ### 16 | 17 | ### End muP code ### 18 | ``` 19 | to make everything easily searchable. 20 | 21 | | Parameterization | SP | **μP** | Code | 22 | |------------------|----|----|----| 23 | | Embedding Init. Var. | $σ_{base}^2$ | $σ_{base}^2$ | | 24 | | Embedding LR | $η_{base}$ | $η_{base}$ | | 25 | | Embedding Fwd. | $x W_{\text{emb}}$ | $\mathbf{α_{input}} · x W_{\text{emb}}$ | [Code](https://github.com/EleutherAI/nanoGPT-mup/blob/bcadbc3c7a44138525eca8a799764afba7dca2b3/model.py#L208) | 26 | | Hidden Init. Var. | $σ_{base}^2$ | $σ_{base}^2 / \mathbf{m_d}$ | [Code](https://github.com/EleutherAI/nanoGPT-mup/blob/bcadbc3c7a44138525eca8a799764afba7dca2b3/model.py#L163-L169) | 27 | | Hidden LR (Adam) | $η_{base}$ | $η_{base} / \mathbf{m_d}$ | [Code](https://github.com/EleutherAI/nanoGPT-mup/blob/bcadbc3c7a44138525eca8a799764afba7dca2b3/model.py#L306-L329) | 28 | | Output Logit Fwd. | $x W_{\text{emb}}^\top$ | $\mathbf{α_{output}} · x W_{\text{emb}}^\top / \mathbf{m_d}$ | [Code](https://github.com/EleutherAI/nanoGPT-mup/blob/bcadbc3c7a44138525eca8a799764afba7dca2b3/model.py#L219) | 29 | | Attention logits | $Q^\top K / \sqrt{d_{\text{head}}}$ | $Q^\top K / \mathbf{d_{\text{head}}}$ | [Code](https://github.com/EleutherAI/nanoGPT-mup/blob/bcadbc3c7a44138525eca8a799764afba7dca2b3/model.py#L65) | 30 | 31 | 32 | ## Implementation Validation 33 | 34 | ### Coordinate Checks 35 | 36 | Standard Parameterization: 37 | 38 | SP 39 | 40 | muTransfer: 41 | 42 | muP 43 | 44 | 45 | ### Learning Rate muTransfer 46 | 47 | **Tiny Shakespeare** | **OpenWebText** 48 | :-------------------------:|:-------------------------: 49 | mup-shakespeare | mup-owt 50 | 51 | 52 | ## Citation 53 | 54 | If ["The Practitioner’s Guide to the Maximal Update Parameterization"](https://www.cerebras.ai/blog/the-practitioners-guide-to-the-maximal-update-parameterization) or this repository was useful to you, please cite: 55 | ``` 56 | @misc{cerebras2024mupguide, 57 | author = {Dey, Nolan and Anthony, Quentin and Hestness, Joel}, 58 | title = {{The practitioner’s guide to the maximal update parameterization}}, 59 | month = September, 60 | year = 2024, 61 | howpublished = {\url{https://www.cerebras.ai/blog/the-practitioners-guide-to-the-maximal-update-parameterization}}, 62 | url = \url{https://www.cerebras.ai/blog/the-practitioners-guide-to-the-maximal-update-parameterization}, 63 | } 64 | ``` 65 | 66 | # nanoGPT (Original README) 67 | 68 | ![nanoGPT](assets/nanogpt.jpg) 69 | 70 | The simplest, fastest repository for training/finetuning medium-sized GPTs. It is a rewrite of [minGPT](https://github.com/karpathy/minGPT) that prioritizes teeth over education. Still under active development, but currently the file `train.py` reproduces GPT-2 (124M) on OpenWebText, running on a single 8XA100 40GB node in about 4 days of training. The code itself is plain and readable: `train.py` is a ~300-line boilerplate training loop and `model.py` a ~300-line GPT model definition, which can optionally load the GPT-2 weights from OpenAI. That's it. 71 | 72 | ![repro124m](assets/gpt2_124M_loss.png) 73 | 74 | Because the code is so simple, it is very easy to hack to your needs, train new models from scratch, or finetune pretrained checkpoints (e.g. biggest one currently available as a starting point would be the GPT-2 1.3B model from OpenAI). 75 | 76 | ## install 77 | 78 | ``` 79 | pip install torch numpy transformers datasets tiktoken wandb tqdm 80 | ``` 81 | 82 | Dependencies: 83 | 84 | - [pytorch](https://pytorch.org) <3 85 | - [numpy](https://numpy.org/install/) <3 86 | - `transformers` for huggingface transformers <3 (to load GPT-2 checkpoints) 87 | - `datasets` for huggingface datasets <3 (if you want to download + preprocess OpenWebText) 88 | - `tiktoken` for OpenAI's fast BPE code <3 89 | - `wandb` for optional logging <3 90 | - `tqdm` for progress bars <3 91 | 92 | ## quick start 93 | 94 | If you are not a deep learning professional and you just want to feel the magic and get your feet wet, the fastest way to get started is to train a character-level GPT on the works of Shakespeare. First, we download it as a single (1MB) file and turn it from raw text into one large stream of integers: 95 | 96 | ```sh 97 | python data/shakespeare_char/prepare.py 98 | ``` 99 | 100 | This creates a `train.bin` and `val.bin` in that data directory. Now it is time to train your GPT. The size of it very much depends on the computational resources of your system: 101 | 102 | **I have a GPU**. Great, we can quickly train a baby GPT with the settings provided in the [config/train_shakespeare_char.py](config/train_shakespeare_char.py) config file: 103 | 104 | ```sh 105 | python train.py config/train_shakespeare_char.py 106 | ``` 107 | 108 | If you peek inside it, you'll see that we're training a GPT with a context size of up to 256 characters, 384 feature channels, and it is a 6-layer Transformer with 6 heads in each layer. On one A100 GPU this training run takes about 3 minutes and the best validation loss is 1.4697. Based on the configuration, the model checkpoints are being written into the `--out_dir` directory `out-shakespeare-char`. So once the training finishes we can sample from the best model by pointing the sampling script at this directory: 109 | 110 | ```sh 111 | python sample.py --out_dir=out-shakespeare-char 112 | ``` 113 | 114 | This generates a few samples, for example: 115 | 116 | ``` 117 | ANGELO: 118 | And cowards it be strawn to my bed, 119 | And thrust the gates of my threats, 120 | Because he that ale away, and hang'd 121 | An one with him. 122 | 123 | DUKE VINCENTIO: 124 | I thank your eyes against it. 125 | 126 | DUKE VINCENTIO: 127 | Then will answer him to save the malm: 128 | And what have you tyrannous shall do this? 129 | 130 | DUKE VINCENTIO: 131 | If you have done evils of all disposition 132 | To end his power, the day of thrust for a common men 133 | That I leave, to fight with over-liking 134 | Hasting in a roseman. 135 | ``` 136 | 137 | lol `¯\_(ツ)_/¯`. Not bad for a character-level model after 3 minutes of training on a GPU. Better results are quite likely obtainable by instead finetuning a pretrained GPT-2 model on this dataset (see finetuning section later). 138 | 139 | **I only have a macbook** (or other cheap computer). No worries, we can still train a GPT but we want to dial things down a notch. I recommend getting the bleeding edge PyTorch nightly ([select it here](https://pytorch.org/get-started/locally/) when installing) as it is currently quite likely to make your code more efficient. But even without it, a simple train run could look as follows: 140 | 141 | ```sh 142 | python train.py config/train_shakespeare_char.py --device=cpu --compile=False --eval_iters=20 --log_interval=1 --block_size=64 --batch_size=12 --n_layer=4 --n_head=4 --n_embd=128 --max_iters=2000 --lr_decay_iters=2000 --dropout=0.0 143 | ``` 144 | 145 | Here, since we are running on CPU instead of GPU we must set both `--device=cpu` and also turn off PyTorch 2.0 compile with `--compile=False`. Then when we evaluate we get a bit more noisy but faster estimate (`--eval_iters=20`, down from 200), our context size is only 64 characters instead of 256, and the batch size only 12 examples per iteration, not 64. We'll also use a much smaller Transformer (4 layers, 4 heads, 128 embedding size), and decrease the number of iterations to 2000 (and correspondingly usually decay the learning rate to around max_iters with `--lr_decay_iters`). Because our network is so small we also ease down on regularization (`--dropout=0.0`). This still runs in about ~3 minutes, but gets us a loss of only 1.88 and therefore also worse samples, but it's still good fun: 146 | 147 | ```sh 148 | python sample.py --out_dir=out-shakespeare-char --device=cpu 149 | ``` 150 | Generates samples like this: 151 | 152 | ``` 153 | GLEORKEN VINGHARD III: 154 | Whell's the couse, the came light gacks, 155 | And the for mought you in Aut fries the not high shee 156 | bot thou the sought bechive in that to doth groan you, 157 | No relving thee post mose the wear 158 | ``` 159 | 160 | Not bad for ~3 minutes on a CPU, for a hint of the right character gestalt. If you're willing to wait longer, feel free to tune the hyperparameters, increase the size of the network, the context length (`--block_size`), the length of training, etc. 161 | 162 | Finally, on Apple Silicon Macbooks and with a recent PyTorch version make sure to add `--device=mps` (short for "Metal Performance Shaders"); PyTorch then uses the on-chip GPU that can *significantly* accelerate training (2-3X) and allow you to use larger networks. See [Issue 28](https://github.com/karpathy/nanoGPT/issues/28) for more. 163 | 164 | ## reproducing GPT-2 165 | 166 | A more serious deep learning professional may be more interested in reproducing GPT-2 results. So here we go - we first tokenize the dataset, in this case the [OpenWebText](https://openwebtext2.readthedocs.io/en/latest/), an open reproduction of OpenAI's (private) WebText: 167 | 168 | ```sh 169 | python data/openwebtext/prepare.py 170 | ``` 171 | 172 | This downloads and tokenizes the [OpenWebText](https://huggingface.co/datasets/openwebtext) dataset. It will create a `train.bin` and `val.bin` which holds the GPT2 BPE token ids in one sequence, stored as raw uint16 bytes. Then we're ready to kick off training. To reproduce GPT-2 (124M) you'll want at least an 8X A100 40GB node and run: 173 | 174 | ```sh 175 | torchrun --standalone --nproc_per_node=8 train.py config/train_gpt2.py 176 | ``` 177 | 178 | This will run for about 4 days using PyTorch Distributed Data Parallel (DDP) and go down to loss of ~2.85. Now, a GPT-2 model just evaluated on OWT gets a val loss of about 3.11, but if you finetune it it will come down to ~2.85 territory (due to an apparent domain gap), making the two models ~match. 179 | 180 | If you're in a cluster environment and you are blessed with multiple GPU nodes you can make GPU go brrrr e.g. across 2 nodes like: 181 | 182 | ```sh 183 | # Run on the first (master) node with example IP 123.456.123.456: 184 | torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=123.456.123.456 --master_port=1234 train.py 185 | # Run on the worker node: 186 | torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=123.456.123.456 --master_port=1234 train.py 187 | ``` 188 | 189 | It is a good idea to benchmark your interconnect (e.g. iperf3). In particular, if you don't have Infiniband then also prepend `NCCL_IB_DISABLE=1` to the above launches. Your multinode training will work, but most likely _crawl_. By default checkpoints are periodically written to the `--out_dir`. We can sample from the model by simply `python sample.py`. 190 | 191 | Finally, to train on a single GPU simply run the `python train.py` script. Have a look at all of its args, the script tries to be very readable, hackable and transparent. You'll most likely want to tune a number of those variables depending on your needs. 192 | 193 | ## baselines 194 | 195 | OpenAI GPT-2 checkpoints allow us to get some baselines in place for openwebtext. We can get the numbers as follows: 196 | 197 | ```sh 198 | $ python train.py config/eval_gpt2.py 199 | $ python train.py config/eval_gpt2_medium.py 200 | $ python train.py config/eval_gpt2_large.py 201 | $ python train.py config/eval_gpt2_xl.py 202 | ``` 203 | 204 | and observe the following losses on train and val: 205 | 206 | | model | params | train loss | val loss | 207 | | ------| ------ | ---------- | -------- | 208 | | gpt2 | 124M | 3.11 | 3.12 | 209 | | gpt2-medium | 350M | 2.85 | 2.84 | 210 | | gpt2-large | 774M | 2.66 | 2.67 | 211 | | gpt2-xl | 1558M | 2.56 | 2.54 | 212 | 213 | However, we have to note that GPT-2 was trained on (closed, never released) WebText, while OpenWebText is just a best-effort open reproduction of this dataset. This means there is a dataset domain gap. Indeed, taking the GPT-2 (124M) checkpoint and finetuning on OWT directly for a while reaches loss down to ~2.85. This then becomes the more appropriate baseline w.r.t. reproduction. 214 | 215 | ## finetuning 216 | 217 | Finetuning is no different than training, we just make sure to initialize from a pretrained model and train with a smaller learning rate. For an example of how to finetune a GPT on new text go to `data/shakespeare` and run `prepare.py` to download the tiny shakespeare dataset and render it into a `train.bin` and `val.bin`, using the OpenAI BPE tokenizer from GPT-2. Unlike OpenWebText this will run in seconds. Finetuning can take very little time, e.g. on a single GPU just a few minutes. Run an example finetuning like: 218 | 219 | ```sh 220 | python train.py config/finetune_shakespeare.py 221 | ``` 222 | 223 | This will load the config parameter overrides in `config/finetune_shakespeare.py` (I didn't tune them much though). Basically, we initialize from a GPT2 checkpoint with `init_from` and train as normal, except shorter and with a small learning rate. If you're running out of memory try decreasing the model size (they are `{'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}`) or possibly decreasing the `block_size` (context length). The best checkpoint (lowest validation loss) will be in the `out_dir` directory, e.g. in `out-shakespeare` by default, per the config file. You can then run the code in `sample.py --out_dir=out-shakespeare`: 224 | 225 | ``` 226 | THEODORE: 227 | Thou shalt sell me to the highest bidder: if I die, 228 | I sell thee to the first; if I go mad, 229 | I sell thee to the second; if I 230 | lie, I sell thee to the third; if I slay, 231 | I sell thee to the fourth: so buy or sell, 232 | I tell thee again, thou shalt not sell my 233 | possession. 234 | 235 | JULIET: 236 | And if thou steal, thou shalt not sell thyself. 237 | 238 | THEODORE: 239 | I do not steal; I sell the stolen goods. 240 | 241 | THEODORE: 242 | Thou know'st not what thou sell'st; thou, a woman, 243 | Thou art ever a victim, a thing of no worth: 244 | Thou hast no right, no right, but to be sold. 245 | ``` 246 | 247 | Whoa there, GPT, entering some dark place over there. I didn't really tune the hyperparameters in the config too much, feel free to try! 248 | 249 | ## sampling / inference 250 | 251 | Use the script `sample.py` to sample either from pre-trained GPT-2 models released by OpenAI, or from a model you trained yourself. For example, here is a way to sample from the largest available `gpt2-xl` model: 252 | 253 | ```sh 254 | python sample.py \ 255 | --init_from=gpt2-xl \ 256 | --start="What is the answer to life, the universe, and everything?" \ 257 | --num_samples=5 --max_new_tokens=100 258 | ``` 259 | 260 | If you'd like to sample from a model you trained, use the `--out_dir` to point the code appropriately. You can also prompt the model with some text from a file, e.g. ```python sample.py --start=FILE:prompt.txt```. 261 | 262 | ## efficiency notes 263 | 264 | For simple model benchmarking and profiling, `bench.py` might be useful. It's identical to what happens in the meat of the training loop of `train.py`, but omits much of the other complexities. 265 | 266 | Note that the code by default uses [PyTorch 2.0](https://pytorch.org/get-started/pytorch-2.0/). At the time of writing (Dec 29, 2022) this makes `torch.compile()` available in the nightly release. The improvement from the one line of code is noticeable, e.g. cutting down iteration time from ~250ms / iter to 135ms / iter. Nice work PyTorch team! 267 | 268 | ## todos 269 | 270 | - Investigate and add FSDP instead of DDP 271 | - Eval zero-shot perplexities on standard evals (e.g. LAMBADA? HELM? etc.) 272 | - Finetune the finetuning script, I think the hyperparams are not great 273 | - Schedule for linear batch size increase during training 274 | - Incorporate other embeddings (rotary, alibi) 275 | - Separate out the optim buffers from model params in checkpoints I think 276 | - Additional logging around network health (e.g. gradient clip events, magnitudes) 277 | - Few more investigations around better init etc. 278 | 279 | ## troubleshooting 280 | 281 | Note that by default this repo uses PyTorch 2.0 (i.e. `torch.compile`). This is fairly new and experimental, and not yet available on all platforms (e.g. Windows). If you're running into related error messages try to disable this by adding `--compile=False` flag. This will slow down the code but at least it will run. 282 | 283 | For some context on this repository, GPT, and language modeling it might be helpful to watch my [Zero To Hero series](https://karpathy.ai/zero-to-hero.html). Specifically, the [GPT video](https://www.youtube.com/watch?v=kCc8FmEb1nY) is popular if you have some prior language modeling context. 284 | 285 | For more questions/discussions feel free to stop by **#nanoGPT** on Discord: 286 | 287 | [![](https://dcbadge.vercel.app/api/server/3zy8kqD9Cp?compact=true&style=flat)](https://discord.gg/3zy8kqD9Cp) 288 | 289 | ## acknowledgements 290 | 291 | All nanoGPT experiments are powered by GPUs on [Lambda labs](https://lambdalabs.com), my favorite Cloud GPU provider. Thank you Lambda labs for sponsoring nanoGPT! 292 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | """ 2 | This training script can be run both on a single gpu in debug mode, 3 | and also in a larger training run with distributed data parallel (ddp). 4 | 5 | To run on a single GPU, example: 6 | $ python train.py --batch_size=32 --compile=False 7 | 8 | To run with DDP on 4 gpus on 1 node, example: 9 | $ torchrun --standalone --nproc_per_node=4 train.py 10 | 11 | To run with DDP on 4 gpus across 2 nodes, example: 12 | - Run on the first (master) node with example IP 123.456.123.456: 13 | $ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=123.456.123.456 --master_port=1234 train.py 14 | - Run on the worker node: 15 | $ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=123.456.123.456 --master_port=1234 train.py 16 | (If your cluster does not have Infiniband interconnect prepend NCCL_IB_DISABLE=1) 17 | """ 18 | 19 | import os 20 | import time 21 | import math 22 | import pickle 23 | from contextlib import nullcontext 24 | from functools import partial 25 | 26 | import numpy as np 27 | import torch 28 | from torch.nn.parallel import DistributedDataParallel as DDP 29 | from torch.distributed import init_process_group, destroy_process_group 30 | 31 | from model import GPTConfig, GPT 32 | 33 | # ----------------------------------------------------------------------------- 34 | # default config values designed to train a gpt2 (124M) on OpenWebText 35 | # I/O 36 | out_dir = 'out' 37 | eval_interval = 2000 38 | log_interval = 1 39 | eval_iters = 200 40 | eval_only = False # if True, script exits right after the first eval 41 | skip_val_loss = False # If True, will only measure train loss 42 | always_save_checkpoint = True # if True, always save a checkpoint after each eval 43 | never_save_checkpoint = False # if True, never save a checkpoint 44 | init_from = 'scratch' # 'scratch' or 'resume' or 'gpt2*' 45 | # wandb logging 46 | wandb_log = False # disabled by default 47 | wandb_project = 'owt' 48 | wandb_run_name = 'gpt2' # 'run' + str(time.time()) 49 | # csv logging 50 | csv_log = False # If enabled, logs stats to a csv file 51 | flush_every = 100 # how often to flush, set to 0 to only flush on close 52 | # data 53 | dataset = 'openwebtext' 54 | gradient_accumulation_steps = 5 * 8 # used to simulate larger batch sizes 55 | batch_size = 12 # if gradient_accumulation_steps > 1, this is the micro-batch size 56 | block_size = 1024 57 | # model 58 | n_layer = 12 59 | n_head = 12 60 | n_embd = 768 61 | dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+ 62 | bias = False # do we use bias inside LayerNorm and Linear layers? 63 | init_std = 0.02 # Initialization standard deviation for weights 64 | # adamw optimizer 65 | learning_rate = 6e-4 # max learning rate 66 | max_iters = 600000 # total number of training iterations 67 | weight_decay = 1e-1 68 | beta1 = 0.9 69 | beta2 = 0.95 70 | grad_clip = 1.0 # clip gradients at this value, or disable if == 0.0 71 | # learning rate decay settings 72 | decay_lr = True # whether to decay the learning rate 73 | warmup_iters = 2000 # how many steps to warm up for 74 | lr_decay_iters = 600000 # should be ~= max_iters per Chinchilla 75 | min_lr = 6e-5 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla 76 | # mup settings 77 | mup_enabled = False # Whether to use muP. If False then all other mup variables are ignored 78 | mup_disable_attention_scaling = False # Uses 1/sqrt(d_head) attn scaling instead of 1/d_head (Only needed for the step-by-step coord check in the blog) 79 | mup_disable_hidden_lr_scaling = False # Disables muP hidden LR adjustment (Only needed for the step-by-step coord check in the blog) 80 | mup_width_multiplier = 1.0 # mup_width_multiplier = width / base_width where base_width is typically 256 81 | mup_input_alpha = 1.0 # Optional tunable multiplier applied to input embedding forward pass output 82 | mup_output_alpha = 1.0 # Optional tunable multiplier applied to output unembedding forward pass output 83 | mup_enable_coord_check_logging = False # If True will track the output.abs().mean() of various layers throughout training 84 | # seed 85 | seed = 1337 86 | # DDP settings 87 | backend = 'nccl' # 'nccl', 'gloo', etc. 88 | # system 89 | device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks 90 | dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32', 'bfloat16', or 'float16', the latter will auto implement a GradScaler 91 | compile = True # use PyTorch 2.0 to compile the model to be faster 92 | # ----------------------------------------------------------------------------- 93 | config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))] 94 | exec(open('configurator.py').read()) # overrides from command line or config file 95 | config = {k: globals()[k] for k in config_keys} # will be useful for logging 96 | # ----------------------------------------------------------------------------- 97 | 98 | assert not (never_save_checkpoint and always_save_checkpoint) 99 | 100 | # various inits, derived attributes, I/O setup 101 | ddp = int(os.environ.get('RANK', -1)) != -1 # is this a ddp run? 102 | if ddp: 103 | init_process_group(backend=backend) 104 | ddp_rank = int(os.environ['RANK']) 105 | ddp_local_rank = int(os.environ['LOCAL_RANK']) 106 | ddp_world_size = int(os.environ['WORLD_SIZE']) 107 | device = f'cuda:{ddp_local_rank}' 108 | torch.cuda.set_device(device) 109 | master_process = ddp_rank == 0 # this process will do logging, checkpointing etc. 110 | seed_offset = ddp_rank # each process gets a different seed 111 | # world_size number of processes will be training simultaneously, so we can scale 112 | # down the desired gradient accumulation iterations per process proportionally 113 | assert gradient_accumulation_steps % ddp_world_size == 0 114 | gradient_accumulation_steps //= ddp_world_size 115 | else: 116 | # if not ddp, we are running on a single gpu, and one process 117 | master_process = True 118 | seed_offset = 0 119 | ddp_world_size = 1 120 | tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * block_size 121 | print(f"tokens per iteration will be: {tokens_per_iter:,}") 122 | 123 | if master_process: 124 | os.makedirs(out_dir, exist_ok=True) 125 | torch.manual_seed(seed + seed_offset) 126 | torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul 127 | torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn 128 | device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast 129 | # note: float16 data type will automatically use a GradScaler 130 | ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] 131 | ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype) 132 | 133 | # poor man's data loader 134 | data_dir = os.path.join('data', dataset) 135 | def get_batch(split): 136 | # We recreate np.memmap every batch to avoid a memory leak, as per 137 | # https://stackoverflow.com/questions/45132940/numpy-memmap-memory-usage-want-to-iterate-once/61472122#61472122 138 | if split == 'train': 139 | data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r') 140 | else: 141 | data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r') 142 | ix = torch.randint(len(data) - block_size, (batch_size,)) 143 | x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix]) 144 | y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix]) 145 | if device_type == 'cuda': 146 | # pin arrays x,y, which allows us to move them to GPU asynchronously (non_blocking=True) 147 | x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True) 148 | else: 149 | x, y = x.to(device), y.to(device) 150 | return x, y 151 | 152 | # init these up here, can override if init_from='resume' (i.e. from a checkpoint) 153 | iter_num = 0 154 | best_val_loss = 1e9 155 | 156 | # attempt to derive vocab_size from the dataset 157 | meta_path = os.path.join(data_dir, 'meta.pkl') 158 | meta_vocab_size = None 159 | if os.path.exists(meta_path): 160 | with open(meta_path, 'rb') as f: 161 | meta = pickle.load(f) 162 | meta_vocab_size = meta['vocab_size'] 163 | print(f"found vocab_size = {meta_vocab_size} (inside {meta_path})") 164 | 165 | # model init 166 | model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=block_size, 167 | bias=bias, vocab_size=None, dropout=dropout, mup_enabled=mup_enabled, 168 | mup_disable_attention_scaling=mup_disable_attention_scaling, 169 | mup_disable_hidden_lr_scaling=mup_disable_hidden_lr_scaling, 170 | mup_width_multiplier=mup_width_multiplier, mup_input_alpha=mup_input_alpha, 171 | mup_output_alpha=mup_output_alpha) # start with model_args from command line 172 | 173 | if init_from == 'scratch': 174 | # init a new model from scratch 175 | print("Initializing a new model from scratch") 176 | # determine the vocab size we'll use for from-scratch training 177 | if meta_vocab_size is None: 178 | print("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)") 179 | model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304 180 | gptconf = GPTConfig(**model_args) 181 | model = GPT(gptconf) 182 | elif init_from == 'resume': 183 | print(f"Resuming training from {out_dir}") 184 | # resume training from a checkpoint. 185 | ckpt_path = os.path.join(out_dir, 'ckpt.pt') 186 | checkpoint = torch.load(ckpt_path, map_location=device) 187 | checkpoint_model_args = checkpoint['model_args'] 188 | # force these config attributes to be equal otherwise we can't even resume training 189 | # the rest of the attributes (e.g. dropout) can stay as desired from command line 190 | for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']: 191 | model_args[k] = checkpoint_model_args[k] 192 | # create the model 193 | gptconf = GPTConfig(**model_args) 194 | model = GPT(gptconf) 195 | state_dict = checkpoint['model'] 196 | # fix the keys of the state dictionary :( 197 | # honestly no idea how checkpoints sometimes get this prefix, have to debug more 198 | unwanted_prefix = '_orig_mod.' 199 | for k,v in list(state_dict.items()): 200 | if k.startswith(unwanted_prefix): 201 | state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) 202 | model.load_state_dict(state_dict) 203 | iter_num = checkpoint['iter_num'] 204 | best_val_loss = checkpoint['best_val_loss'] 205 | elif init_from.startswith('gpt2'): 206 | print(f"Initializing from OpenAI GPT-2 weights: {init_from}") 207 | # initialize from OpenAI GPT-2 weights 208 | override_args = dict(dropout=dropout) 209 | model = GPT.from_pretrained(init_from, override_args) 210 | # read off the created config params, so we can store them into checkpoint correctly 211 | for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']: 212 | model_args[k] = getattr(model.config, k) 213 | # crop down the model block size if desired, using model surgery 214 | if block_size < model.config.block_size: 215 | model.crop_block_size(block_size) 216 | model_args['block_size'] = block_size # so that the checkpoint will have the right value 217 | model.to(device) 218 | 219 | # initialize a GradScaler. If enabled=False scaler is a no-op 220 | scaler = torch.cuda.amp.GradScaler(enabled=(dtype == 'float16')) 221 | 222 | # optimizer 223 | optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type) 224 | if init_from == 'resume': 225 | optimizer.load_state_dict(checkpoint['optimizer']) 226 | checkpoint = None # free up memory 227 | 228 | # compile the model 229 | if compile: 230 | print("compiling the model... (takes a ~minute)") 231 | unoptimized_model = model 232 | model = torch.compile(model) # requires PyTorch 2.0 233 | 234 | # wrap model into DDP container 235 | if ddp: 236 | model = DDP(model, device_ids=[ddp_local_rank]) 237 | 238 | # helps estimate an arbitrarily accurate loss over either split using many batches 239 | @torch.no_grad() 240 | def estimate_loss(): 241 | out = {} 242 | model.eval() 243 | splits = ['train'] if skip_val_loss else ['train', 'val'] 244 | for split in splits: 245 | losses = torch.zeros(eval_iters) 246 | for k in range(eval_iters): 247 | X, Y = get_batch(split) 248 | with ctx: 249 | logits, loss = model(X, Y) 250 | losses[k] = loss.item() 251 | out[split] = losses.mean().item() 252 | if skip_val_loss: 253 | out['val'] = -1 254 | model.train() 255 | return out 256 | 257 | # learning rate decay scheduler (cosine with warmup) 258 | def get_lr(it): 259 | # 1) linear warmup for warmup_iters steps 260 | if it < warmup_iters: 261 | return learning_rate * it / warmup_iters 262 | # 2) if it > lr_decay_iters, return min learning rate 263 | if it > lr_decay_iters: 264 | return min_lr 265 | # 3) in between, use cosine decay down to min learning rate 266 | decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters) 267 | assert 0 <= decay_ratio <= 1 268 | coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1 269 | return min_lr + coeff * (learning_rate - min_lr) 270 | 271 | # logging 272 | if master_process: 273 | if wandb_log: 274 | import wandb 275 | wandb_run = wandb.init(project=wandb_project, name=wandb_run_name, config=config) 276 | if csv_log: 277 | from csv_logging import CSVLogWrapper 278 | def log(log_dict): 279 | pass 280 | csv_logger = CSVLogWrapper(log, config=config, out_dir=out_dir, flush_every=flush_every) 281 | 282 | # training loop 283 | X, Y = get_batch('train') # fetch the very first batch 284 | t0 = time.time() 285 | local_iter_num = 0 # number of iterations in the lifetime of this process 286 | raw_model = model.module if ddp else model # unwrap DDP container if needed 287 | running_mfu = -1.0 288 | coord_check_dict = None 289 | while True: 290 | 291 | # determine and set the learning rate for this iteration 292 | lr = get_lr(iter_num) if decay_lr else learning_rate 293 | for param_group in optimizer.param_groups: 294 | param_group['lr'] = lr * param_group.get('lr_scale', 1.0) 295 | 296 | # evaluate the loss on train/val sets and write checkpoints 297 | if iter_num % eval_interval == 0 and master_process: 298 | losses = estimate_loss() 299 | if np.isnan(losses['train']): 300 | raise Exception('NaN loss') 301 | print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}") 302 | log_dict = { 303 | "iter": iter_num, 304 | "train/loss": losses['train'], 305 | "val/loss": losses['val'], 306 | "lr": lr, 307 | "mfu": running_mfu*100, # convert to percentage 308 | } 309 | if mup_enable_coord_check_logging and coord_check_dict is not None: 310 | for key in coord_check_dict: 311 | log_dict[key + '_act_abs_mean'] = np.mean(coord_check_dict[key]) 312 | if wandb_log: 313 | wandb_run.log(log_dict) 314 | if csv_log: 315 | csv_logger.log(log_dict) 316 | csv_logger.step() 317 | if (not never_save_checkpoint) and (losses['val'] < best_val_loss or always_save_checkpoint): 318 | best_val_loss = losses['val'] 319 | if iter_num > 0: 320 | checkpoint = { 321 | 'model': raw_model.state_dict(), 322 | 'optimizer': optimizer.state_dict(), 323 | 'model_args': model_args, 324 | 'iter_num': iter_num, 325 | 'best_val_loss': best_val_loss, 326 | 'config': config, 327 | } 328 | print(f"saving checkpoint to {out_dir}") 329 | torch.save(checkpoint, os.path.join(out_dir, 'ckpt.pt')) 330 | if iter_num == 0 and eval_only: 331 | break 332 | 333 | if mup_enable_coord_check_logging: 334 | coord_check_dict = { 335 | 'token_embedding': [], 336 | 'attn': [], 337 | 'mlp': [], 338 | 'lm_head': [], 339 | } 340 | def hook(module, input, output, key): 341 | with torch.no_grad(): 342 | coord_check_dict[key].append(output.abs().mean().item()) 343 | coord_check_handles = [] 344 | for module_name, module in model.named_modules(): 345 | if module_name == 'transformer.wte': 346 | coord_check_handles.append(module.register_forward_hook(partial(hook, key='token_embedding'))) 347 | elif module_name.endswith('.attn'): 348 | coord_check_handles.append(module.register_forward_hook(partial(hook, key='attn'))) 349 | elif module_name.endswith('.mlp'): 350 | coord_check_handles.append(module.register_forward_hook(partial(hook, key='mlp'))) 351 | elif module_name == 'lm_head': 352 | coord_check_handles.append(module.register_forward_hook(partial(hook, key='lm_head'))) 353 | else: 354 | coord_check_dict = None 355 | 356 | # forward backward update, with optional gradient accumulation to simulate larger batch size 357 | # and using the GradScaler if data type is float16 358 | for micro_step in range(gradient_accumulation_steps): 359 | if ddp: 360 | # in DDP training we only need to sync gradients at the last micro step. 361 | # the official way to do this is with model.no_sync() context manager, but 362 | # I really dislike that this bloats the code and forces us to repeat code 363 | # looking at the source of that context manager, it just toggles this variable 364 | model.require_backward_grad_sync = (micro_step == gradient_accumulation_steps - 1) 365 | with ctx: 366 | logits, loss = model(X, Y) 367 | loss = loss / gradient_accumulation_steps # scale the loss to account for gradient accumulation 368 | # immediately async prefetch next batch while model is doing the forward pass on the GPU 369 | X, Y = get_batch('train') 370 | # backward pass, with gradient scaling if training in fp16 371 | scaler.scale(loss).backward() 372 | # clip the gradient 373 | if grad_clip != 0.0: 374 | scaler.unscale_(optimizer) 375 | torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) 376 | # step the optimizer and scaler if training in fp16 377 | scaler.step(optimizer) 378 | scaler.update() 379 | # flush the gradients as soon as we can, no need for this memory anymore 380 | optimizer.zero_grad(set_to_none=True) 381 | 382 | # timing and logging 383 | t1 = time.time() 384 | dt = t1 - t0 385 | t0 = t1 386 | if iter_num % log_interval == 0 and master_process: 387 | # get loss as float. note: this is a CPU-GPU sync point 388 | # scale up to undo the division above, approximating the true total loss (exact would have been a sum) 389 | lossf = loss.item() * gradient_accumulation_steps 390 | if local_iter_num >= 5: # let the training loop settle a bit 391 | mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt) 392 | running_mfu = mfu if running_mfu == -1.0 else 0.9*running_mfu + 0.1*mfu 393 | print(f"iter {iter_num}: loss {lossf:.4f}, time {dt*1000:.2f}ms, mfu {running_mfu*100:.2f}%") 394 | iter_num += 1 395 | local_iter_num += 1 396 | 397 | if mup_enable_coord_check_logging: 398 | for handle in coord_check_handles: 399 | handle.remove() 400 | 401 | # termination conditions 402 | if iter_num > max_iters: 403 | break 404 | 405 | if ddp: 406 | destroy_process_group() 407 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Full definition of a GPT Language Model, all of it in this single file. 3 | References: 4 | 1) the official GPT-2 TensorFlow implementation released by OpenAI: 5 | https://github.com/openai/gpt-2/blob/master/src/model.py 6 | 2) huggingface/transformers PyTorch implementation: 7 | https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py 8 | """ 9 | 10 | import math 11 | import inspect 12 | from dataclasses import dataclass 13 | 14 | import torch 15 | import torch.nn as nn 16 | from torch.nn import functional as F 17 | 18 | class LayerNorm(nn.Module): 19 | """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """ 20 | 21 | def __init__(self, ndim, bias): 22 | super().__init__() 23 | self.weight = nn.Parameter(torch.ones(ndim)) 24 | self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None 25 | 26 | def forward(self, input): 27 | return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5) 28 | 29 | class CausalSelfAttention(nn.Module): 30 | 31 | def __init__(self, config): 32 | super().__init__() 33 | assert config.n_embd % config.n_head == 0 34 | # key, query, value projections for all heads, but in a batch 35 | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) 36 | # output projection 37 | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) 38 | # regularization 39 | self.attn_dropout = nn.Dropout(config.dropout) 40 | self.resid_dropout = nn.Dropout(config.dropout) 41 | self.n_head = config.n_head 42 | self.n_embd = config.n_embd 43 | self.dropout = config.dropout 44 | self.mup_enabled = config.mup_enabled 45 | self.mup_disable_attention_scaling = config.mup_disable_attention_scaling 46 | # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0 47 | self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') 48 | if not self.flash: 49 | print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0") 50 | # causal mask to ensure that attention is only applied to the left in the input sequence 51 | self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) 52 | .view(1, 1, config.block_size, config.block_size)) 53 | 54 | def forward(self, x): 55 | B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) 56 | 57 | # calculate query, key, values for all heads in batch and move head forward to be the batch dim 58 | q, k, v = self.c_attn(x).split(self.n_embd, dim=2) 59 | k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 60 | q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 61 | v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs) 62 | 63 | if self.mup_enabled and not self.mup_disable_attention_scaling: 64 | ### Begin muP code ### 65 | attention_scale = 1.0 / k.size(-1) 66 | ### End muP code ### 67 | else: 68 | attention_scale = 1.0 / math.sqrt(k.size(-1)) 69 | 70 | # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) 71 | if self.flash: 72 | # efficient attention using Flash Attention CUDA kernels 73 | y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, 74 | dropout_p=self.dropout if self.training else 0, 75 | is_causal=True, scale=attention_scale) 76 | else: 77 | # manual implementation of attention 78 | att = (q @ k.transpose(-2, -1)) * attention_scale 79 | att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf')) 80 | att = F.softmax(att, dim=-1) 81 | att = self.attn_dropout(att) 82 | y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) 83 | y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side 84 | 85 | # output projection 86 | y = self.resid_dropout(self.c_proj(y)) 87 | return y 88 | 89 | class MLP(nn.Module): 90 | 91 | def __init__(self, config): 92 | super().__init__() 93 | self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) 94 | self.gelu = nn.GELU() 95 | self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) 96 | self.dropout = nn.Dropout(config.dropout) 97 | 98 | def forward(self, x): 99 | x = self.c_fc(x) 100 | x = self.gelu(x) 101 | x = self.c_proj(x) 102 | x = self.dropout(x) 103 | return x 104 | 105 | class Block(nn.Module): 106 | 107 | def __init__(self, config): 108 | super().__init__() 109 | self.ln_1 = LayerNorm(config.n_embd, bias=config.bias) 110 | self.attn = CausalSelfAttention(config) 111 | self.ln_2 = LayerNorm(config.n_embd, bias=config.bias) 112 | self.mlp = MLP(config) 113 | 114 | def forward(self, x): 115 | x = x + self.attn(self.ln_1(x)) 116 | x = x + self.mlp(self.ln_2(x)) 117 | return x 118 | 119 | @dataclass 120 | class GPTConfig: 121 | block_size: int = 1024 122 | vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency 123 | n_layer: int = 12 124 | n_head: int = 12 125 | n_embd: int = 768 126 | dropout: float = 0.0 127 | bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster 128 | init_std: float = 0.02 129 | mup_enabled: bool = False # Whether to use muP. If False then all other mup variables are ignored 130 | mup_disable_attention_scaling: bool = False # Disables mup attention scaling 131 | mup_disable_hidden_lr_scaling: bool = False # Disables mup hidden LR scaling 132 | mup_width_multiplier: float = 1 # `mup_width_multiplier = width / base_width` where base_width is typically 256 133 | mup_input_alpha: float = 1 # Optional tunable multiplier applied to input embedding forward pass output 134 | mup_output_alpha: float = 1 # Optional tunable multiplier applied to output unembedding forward pass output 135 | 136 | class GPT(nn.Module): 137 | 138 | def __init__(self, config): 139 | super().__init__() 140 | assert config.vocab_size is not None 141 | assert config.block_size is not None 142 | self.config = config 143 | 144 | self.transformer = nn.ModuleDict(dict( 145 | wte = nn.Embedding(config.vocab_size, config.n_embd), 146 | wpe = nn.Embedding(config.block_size, config.n_embd), 147 | drop = nn.Dropout(config.dropout), 148 | h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), 149 | ln_f = LayerNorm(config.n_embd, bias=config.bias), 150 | )) 151 | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) 152 | # with weight tying when using torch.compile() some warnings get generated: 153 | # "UserWarning: functional_call was passed multiple values for tied weights. 154 | # This behavior is deprecated and will be an error in future versions" 155 | # not 100% sure what this is, so far seems to be harmless. TODO investigate 156 | self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying 157 | 158 | # init all weights 159 | self.apply(self._init_weights) 160 | # apply special scaled init to the residual projections, per GPT-2 paper 161 | for pn, p in self.named_parameters(): 162 | if config.mup_enabled: 163 | ### Begin muP code ### 164 | # Adjust hidden weight initialization variance by 1 / mup_width_multiplier 165 | if pn.endswith('c_attn.weight') or pn.endswith('c_fc.weight'): 166 | torch.nn.init.normal_(p, mean=0.0, std=config.init_std / math.sqrt(config.mup_width_multiplier)) 167 | elif pn.endswith('c_proj.weight'): 168 | torch.nn.init.normal_(p, mean=0.0, std=config.init_std / math.sqrt(2 * config.n_layer * config.mup_width_multiplier)) 169 | ### End muP code ### 170 | elif pn.endswith('c_proj.weight'): 171 | torch.nn.init.normal_(p, mean=0.0, std=config.init_std / math.sqrt(2 * config.n_layer)) 172 | 173 | # report number of parameters 174 | print("number of parameters: %.2fM" % (self.get_num_params()/1e6,)) 175 | 176 | def get_num_params(self, non_embedding=True): 177 | """ 178 | Return the number of parameters in the model. 179 | For non-embedding count (default), the position embeddings get subtracted. 180 | The token embeddings would too, except due to the parameter sharing these 181 | params are actually used as weights in the final layer, so we include them. 182 | """ 183 | n_params = sum(p.numel() for p in self.parameters()) 184 | if non_embedding: 185 | n_params -= self.transformer.wpe.weight.numel() 186 | return n_params 187 | 188 | def _init_weights(self, module): 189 | if isinstance(module, nn.Linear): 190 | torch.nn.init.normal_(module.weight, mean=0.0, std=self.config.init_std) 191 | if module.bias is not None: 192 | torch.nn.init.zeros_(module.bias) 193 | elif isinstance(module, nn.Embedding): 194 | torch.nn.init.normal_(module.weight, mean=0.0, std=self.config.init_std) 195 | 196 | def forward(self, idx, targets=None): 197 | device = idx.device 198 | b, t = idx.size() 199 | assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" 200 | pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t) 201 | 202 | # forward the GPT model itself 203 | tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd) 204 | pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd) 205 | x = self.transformer.drop(tok_emb + pos_emb) 206 | if self.config.mup_enabled: 207 | ### Begin muP code ### 208 | x *= self.config.mup_input_alpha 209 | ### End muP code ### 210 | for block in self.transformer.h: 211 | x = block(x) 212 | x = self.transformer.ln_f(x) 213 | 214 | if targets is not None: 215 | # if we are given some desired targets also calculate the loss 216 | if self.config.mup_enabled: 217 | ### Begin muP code ### 218 | # Scaling `x` instead of `logits` allows coord check to log change 219 | x *= self.config.mup_output_alpha / self.config.mup_width_multiplier 220 | ### End muP code ### 221 | logits = self.lm_head(x) 222 | loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) 223 | else: 224 | # inference-time mini-optimization: only forward the lm_head on the very last position 225 | logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim 226 | loss = None 227 | 228 | return logits, loss 229 | 230 | def crop_block_size(self, block_size): 231 | # model surgery to decrease the block size if necessary 232 | # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024) 233 | # but want to use a smaller block size for some smaller, simpler model 234 | assert block_size <= self.config.block_size 235 | self.config.block_size = block_size 236 | self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size]) 237 | for block in self.transformer.h: 238 | if hasattr(block.attn, 'bias'): 239 | block.attn.bias = block.attn.bias[:,:,:block_size,:block_size] 240 | 241 | @classmethod 242 | def from_pretrained(cls, model_type, override_args=None): 243 | assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'} 244 | override_args = override_args or {} # default to empty dict 245 | # only dropout can be overridden see more notes below 246 | assert all(k == 'dropout' for k in override_args) 247 | from transformers import GPT2LMHeadModel 248 | print("loading weights from pretrained gpt: %s" % model_type) 249 | 250 | # n_layer, n_head and n_embd are determined from model_type 251 | config_args = { 252 | 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params 253 | 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params 254 | 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params 255 | 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params 256 | }[model_type] 257 | print("forcing vocab_size=50257, block_size=1024, bias=True") 258 | config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints 259 | config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints 260 | config_args['bias'] = True # always True for GPT model checkpoints 261 | # we can override the dropout rate, if desired 262 | if 'dropout' in override_args: 263 | print(f"overriding dropout rate to {override_args['dropout']}") 264 | config_args['dropout'] = override_args['dropout'] 265 | # create a from-scratch initialized minGPT model 266 | config = GPTConfig(**config_args) 267 | model = GPT(config) 268 | sd = model.state_dict() 269 | sd_keys = sd.keys() 270 | sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param 271 | 272 | # init a huggingface/transformers model 273 | model_hf = GPT2LMHeadModel.from_pretrained(model_type) 274 | sd_hf = model_hf.state_dict() 275 | 276 | # copy while ensuring all of the parameters are aligned and match in names and shapes 277 | sd_keys_hf = sd_hf.keys() 278 | sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer 279 | sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer) 280 | transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight'] 281 | # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear 282 | # this means that we have to transpose these weights when we import them 283 | assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}" 284 | for k in sd_keys_hf: 285 | if any(k.endswith(w) for w in transposed): 286 | # special treatment for the Conv1D weights we need to transpose 287 | assert sd_hf[k].shape[::-1] == sd[k].shape 288 | with torch.no_grad(): 289 | sd[k].copy_(sd_hf[k].t()) 290 | else: 291 | # vanilla copy over the other parameters 292 | assert sd_hf[k].shape == sd[k].shape 293 | with torch.no_grad(): 294 | sd[k].copy_(sd_hf[k]) 295 | 296 | return model 297 | 298 | def configure_optimizers(self, weight_decay, learning_rate, betas, device_type): 299 | # start with all of the candidate parameters 300 | param_dict = {pn: p for pn, p in self.named_parameters()} 301 | # filter out those that do not require grad 302 | param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad} 303 | # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no. 304 | # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't. 305 | if self.config.mup_enabled and not self.config.mup_disable_hidden_lr_scaling: 306 | ### Begin muP code ### 307 | mup_decay_params = [] 308 | decay_params = [] 309 | nodecay_params = [] 310 | for n, p in param_dict.items(): 311 | if p.dim() >= 2: 312 | if n.endswith('c_attn.weight') or n.endswith('c_fc.weight') or n.endswith('c_proj.weight'): 313 | mup_decay_params.append(p) 314 | else: 315 | decay_params.append(p) 316 | else: 317 | nodecay_params.append(p) 318 | optim_groups = [ 319 | {'params': mup_decay_params, 'weight_decay': weight_decay, 'lr_scale': 1/self.config.mup_width_multiplier}, 320 | {'params': decay_params, 'weight_decay': weight_decay, 'lr_scale': 1}, 321 | {'params': nodecay_params, 'weight_decay': 0.0, 'lr_scale': 1} 322 | ] 323 | num_mup_decay_params = sum(p.numel() for p in mup_decay_params) 324 | num_decay_params = sum(p.numel() for p in decay_params) 325 | num_nodecay_params = sum(p.numel() for p in nodecay_params) 326 | print(f"num mup decayed parameter tensors: {len(mup_decay_params)}, with {num_mup_decay_params:,} parameters") 327 | print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters") 328 | print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters") 329 | ### End muP code ### 330 | else: 331 | decay_params = [p for n, p in param_dict.items() if p.dim() >= 2] 332 | nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2] 333 | optim_groups = [ 334 | {'params': decay_params, 'weight_decay': weight_decay}, 335 | {'params': nodecay_params, 'weight_decay': 0.0} 336 | ] 337 | num_decay_params = sum(p.numel() for p in decay_params) 338 | num_nodecay_params = sum(p.numel() for p in nodecay_params) 339 | print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters") 340 | print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters") 341 | # Create AdamW optimizer and use the fused version if it is available 342 | fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters 343 | use_fused = fused_available and device_type == 'cuda' 344 | extra_args = dict(fused=True) if use_fused else dict() 345 | optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args) 346 | print(f"using fused AdamW: {use_fused}") 347 | 348 | return optimizer 349 | 350 | def estimate_mfu(self, fwdbwd_per_iter, dt): 351 | """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """ 352 | # first estimate the number of flops we do per iteration. 353 | # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311 354 | N = self.get_num_params() 355 | cfg = self.config 356 | L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size 357 | flops_per_token = 6*N + 12*L*H*Q*T 358 | flops_per_fwdbwd = flops_per_token * T 359 | flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter 360 | # express our flops throughput as ratio of A100 bfloat16 peak flops 361 | flops_achieved = flops_per_iter * (1.0/dt) # per second 362 | flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS 363 | mfu = flops_achieved / flops_promised 364 | return mfu 365 | 366 | @torch.no_grad() 367 | def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): 368 | """ 369 | Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete 370 | the sequence max_new_tokens times, feeding the predictions back into the model each time. 371 | Most likely you'll want to make sure to be in model.eval() mode of operation for this. 372 | """ 373 | for _ in range(max_new_tokens): 374 | # if the sequence context is growing too long we must crop it at block_size 375 | idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:] 376 | # forward the model to get the logits for the index in the sequence 377 | logits, _ = self(idx_cond) 378 | # pluck the logits at the final step and scale by desired temperature 379 | logits = logits[:, -1, :] / temperature 380 | # optionally crop the logits to only the top k options 381 | if top_k is not None: 382 | v, _ = torch.topk(logits, min(top_k, logits.size(-1))) 383 | logits[logits < v[:, [-1]]] = -float('Inf') 384 | # apply softmax to convert logits to (normalized) probabilities 385 | probs = F.softmax(logits, dim=-1) 386 | # sample from the distribution 387 | idx_next = torch.multinomial(probs, num_samples=1) 388 | # append sampled index to the running sequence and continue 389 | idx = torch.cat((idx, idx_next), dim=1) 390 | 391 | return idx 392 | -------------------------------------------------------------------------------- /mup_examples/mutransfer_lr_shakespeare_char/plot.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 17, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "data": { 10 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAxAAAAE9CAYAAACbVIQ8AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd5wV1dnA8d/cfrf33oClLkvvCAJ2BbvGXqKxJho1xpI3xtiI3diwJRprFFsUjUQRpEjvZVlgl+293N7vnPePC6srbReWref7+aDszNyZZ7j3zs4z55znKEIIgSRJkiRJkiRJUhtoujoASZIkSZIkSZJ6DplASJIkSZIkSZLUZjKBkCRJkiRJkiSpzWQCIUmSJEmSJElSm8kEQpIkSZIkSZKkNpMJhCRJkiRJkiRJbSYTCEmSJEmSJEmS2kwmEJIkSZIkSZIktZlMICRJkiRJkiRJajOZQEiSJEmSJEmS1GYygZAkSZIkSZIkqc10XR2AJEkdY9euXcybN481a9ZgtVqJiYlh3Lhx3HTTTQwZMgSAe++9l88++6zV63Q6HbGxsUyePJk777yT1NTUrghfkiSpT7jyyitZs2ZNq2V6vZ6EhARmzpzJ73//e6Kjo7soOklqG5lASFIvsHv3bn71q18xatQo/u///o/4+Hhqamp49913ufjii3n77bcZNWoUAImJibz44ostrw0EAuzdu5ennnqKjRs3smDBAkwmUxediSRJUu83bNgw/vKXv7T87Pf72b59O8888wwFBQV88MEHKIrShRFK0uHJBEKSeoE333yT2NhYXn/9dXS6n77WJ598Mqeffjovv/wyr732GgAGg6Elmdhv3Lhx6PV67rnnHhYtWsRZZ53VmeFLkiT1CieddBKjR4/mqaeearX8yiuvRAjBu+++C0BERMQB1+Hx48fjdDp5/vnn2bx58wHrJak7kWMgJKkXaGhoQAiBqqqtloeFhXH//fdzxhlnHHEf+fn5AFRWVh6XGCVJknozp9NJZWVlS5fRn9u1axeDBw8+4j6GDx8OQFVVVYfHJ0kdSSYQktQLzJgxg6qqKi655BLee+89ioqKEEIAcPrpp3PeeecdcR979+4FICsr67jGKkmS1Bvt3r0bIcQBCURNTQ0Wi6VNCcT+63BmZuZxiVGSOorswiRJvcBll11GfX09//jHP3jooYcAiI2N5YQTTuCqq65ixIgRrbYPBAItf3c4HGzdupW5c+eSkZHBjBkzOjN0SZKkXmH37t0AByQQO3fuBGiVQAghWl2HrVYra9asYd68eYwePbqlJUKSuiuZQEhSL3H77bdzzTXXsGzZMlauXMnq1av58ssvWbBgAffffz9XXXUVEOqilJeXd8DrR44cyUMPPSQHUEuSJB2FwsJCEhISSEhIOGC5RqNh4MCBLcvWrl17wHVYo9EwZcoUHnroITmAWur2ZAIhSb1IdHQ0s2fPZvbs2QDs2LGDu+++myeffJI5c+YAoSpM8+bNa3mNwWAgJSVFlg2UJEk6Brt37z7o+IeCggIyMzMJCwtrWZaXl8df//pXABRFwWg0kpqaSkRERKfFK0nHQo6BkKQerra2lhNOOIH58+cfsG7YsGHccccd+Hw+ysvLgVDCkJ+f3/Jn8ODBMnmQJEk6Rrt27SInJ6fVMlVVWbVq1QHjH8LDw1uuwcOHD2fgwIEyeZB6FJlASFIPl5CQgE6n4/3338fr9R6wvri4GKPRSHZ2dhdEJ0mS1Ps1NjbS1NREfX19q+Vvv/02zc3NDBo0qIsik6TjQ3ZhkqQeTqvV8uCDD3LrrbdywQUXcPnllzNgwADcbjcrVqzgvffe4/bbb5etDJIkScfJrl27AFi+fDkPPvgg/fv3Z/PmzSxfvhyA7du3s3nzZkaOHNmVYUpSh5EJhCT1AjNmzOCjjz7iH//4B6+88gpNTU0YDAaGDRvGs88+y6mnntrVIUqSJPVahYWFaLVann32WR555BE+/fRTxo0bxzvvvMOtt95KQUFBq0k+JamnU8T+YvGSJEmSJElSu/3pT39i3bp1LFy4sKtDkaROIcdASJIkSZIkHYNdu3YxYMCArg5DkjqNTCAkSZIkSZKOkhCCPXv2kJub29WhSFKnkQmEJEmSJEnSUaqoqMDlcskWCKlPkWMgJEmSJEmSJElqM9kCIUmSJEmSJElSm8kEQpIkSZIkSZKkNpNFiTvIxo0bEUKg1+u7OhRJkqQexe/3oygKo0ePPqb9yOuwJEnS0WvPtVgmEB1ECEFPHE4ihMDv96PX61EUpavD6RTynOU591Y99Zw76topr8M9R188Z+ib5324cw6IIO6AD43qw+dTcXk1JEU50WuDWAJGXG4tSnOQ8Hg9QZ0ebVBBhwZzhKlb//v11Pe5PddPmUB0kP1PvPLz87s4kvZxuVwUFBSQm5tLWFhYV4fTKeQ5y3PurXrqOW/durVD9iOvwz1HXzxn6JvnfbhzXlyznW11lSQ7l/HSJwlohI9v734LjQKzt52F43k7yevtnPHaLPYGTCQVapgxZRz5I4Z20dm0TU99n9tzLZZjICRJkiRJkqROZfG5qHY3o7isFO7VYndpmTG0HI0Cu93RNATM6Nc4GXZqJNaAGY1HEGuIIGNgaleHLiETCEmSJEmSJKmT1bibqXPYwdPEiq3RAJw6vBiAVbZkNKVeNHUB+s1IwSE0iGY/2dmpxCRFd2XY0j6yC5MkSZIkSZLUaVShstdRj9Phx1tjp85iwmRQGZ5eCcBKewq6tU7ShurQJ6fidfuJ9ujJGZbZo8YU9GayBUKSJEmSJEnqNA1eO2X2RgJOF1t2hm5FzxhdSbjBhzuoZYszAf0aF8NPDcfpj8Tj9JIcE0dyTlIXRy7tJxMISZIkSZIkqdNUu5ups9lxNFgoqzWg1QimDy4CYK0jiYBdoNvlpv+JidgCBoQzyNBB/TAYZYnm7kImEJIkSZIkSVKn8KsBCpurcdh9FBV5AMjr72dAXC0Aq2wp6De4yB2jJxCWjM3rJlJvJjc3syvDln5BJhCSJEmSJElSp6jz2Njb3IijPkBJhUBBMHpgEylRzcC+8Q9rnOSfHobLH4nV5aR/YiqxSTFdG7jUikwgJEmSJEmSpE5RYquj3uqgptQOwOBsP8MSKtEoghJPJNWeMMzbXPSbHIPdbQYBg/tnd3HU0i/JBEKSpF5HiCAa/xqSIr5E41+BEMGuDkmSJKnPcwW8bKmrxG0JUlrmAmDkYDeZETUArLIno93lYdhoHW5NIk02DzER4eTmpHdl2NJByDKukiT1KsKzEGF7FINaQ3o04H4f4U2BqD+hmE7r6vAkSZL6rCpXMyVNDdSWeFBV6J/qISXGT3p0HRAa/6Bb62LkaSYcvmjsfjdD0voTGxbZxZFLvyRbICRJ6jWEZyHCchuoNa1XqLUIy20Iz8KuCUySJEliU00ZVruH8hIHABOGOYjX2Qg3evCqGjY6EokscJM5NoomqxElXMvgzEw0irxd7W7kOyJJUq8gRBBhexQQB1sb+q/tMdmdSZIkqQtYfS521FfTVBLA71dJivWTnaqQZgpNHrfBkYivXjB6kIIrGI2lKUBkfASZCXLuh+6o1yQQjY2N3H333UyaNInRo0dzww03UFRUdMTXqarK9ddfzwsvvNAJUUqSdNz41h3Y8tCKALU6tJ0kSZLUqQoba6mxWqncGxr7MD2/Ca1GT0rkvvKt9hR065yMOsOAxRVDQCtISown1hDelWFLh9BrEohbb72V0tJSXnvtNT7++GNMJhPXXHMNbrf7kK/x+Xzcf//9LFu2rBMjlSTpeBCB7W3bUK0/voFIkiRJrahCsLG2jIYyH253gIgwwcgBLsxBP4lRjQCssiUTu9dL0iAz9Y1adLFG0hITiNKbuzh66WB6RQJhtVpJT0/nkUceYcSIEQwYMIBbbrmFuro6du/efdDXbNiwgfPPP59169YRFRXVyRFLktSRRKAcAhVt21iTeHyDkSRJklqpdjvYa22gZm9o4rhJeXb0xkhilAq0GpVqXxiljnDGpYM3EI7Fqic8JYK08Fg5/qGb6hXvSnR0NE8//TSDBg0CoKmpibfeeouUlBRyc3MP+poffviBadOm8fnnnxMZKUf3S1JPJYI1CO9SUBI4fGE5BTSpYBjXWaFJkiRJwG5bEzWVLhw2P3q9wpShTWhEGInh1QCstKWg2+ph3Ck66pqjMIabiIiLIN4k78+6q15XxvXPf/4zH330EQaDgXnz5hEWFnbQ7e64444OP7YQApfL1eH7PZ72d/E6XFev3kaecy+iNqAJLEFRrWgCK9ASaBlCrfxsM7HvJ7/xD6hub6eH2Vl66vsshEBRlCNv2MZ9yetw99cXzxn65nnXNTdT5Gqibq8PgCH9BbHhKi57kOS00PiHlbYU4sp9RM/UULhdgzk5DLNBjymg6XHfZ+i573N7rsW9LoG4+uqr+dWvfsV7773Hrbfeyvvvv09eXl6nHNvv91NQUNApx+poJSUlXR1Cp5Pn3LNpNTaijRvQaS2E6fcSG7YBIRQs7vFEGneg0zpatvUH4qiwXoXVkwb0zO9oe/TE99lgMHTIfuR1uGfpi+cMfee8VVXwTWEJRRUObE2hxzsj+1Xj9wvczSVEDnAREArrHYlMimqk0aKjqkElOsWBtk6h0lZKdQc9XOgKPfF9buu1uNclEPu7LD366KNs3ryZd999l7lz53bKsfV6/SG7THVXbrebkpIScnJyMJv7xkAlec694JyFDY1/D4qqQ1GD6AJrAAgaLiA8bCJqsAG3fzcNzeHExE9CHzmJtDgtaV0c9vHWU9/nPXv2dNi+5HW4Z+iL5wx967x/2FLMM5+toNH2UwuCVgtq0ESYMZzouM0AbHYk4CkTzJxpoN4ZR0pWFpH9ExgWlU5eXP+uCv+Y9NT3uT3X4l6RQDQ1NbFy5UpOO+00dLrQKWk0GnJzc6mrq+u0OBRFOWSXqe7ObDb32NiPljznnkmoDoR3LWgbQBME16ehFcZT0ZunI1QnaP14dRfSUKGQmDW0x59ze/W097mjui/t31dPOvef62nvW0foi+cMvf+8F23czZ/+9e0By4NBeP/bGPRBHxcMD1XEW2VPJr7CR2S+hl21EaQPTsVthPToxB7/b9TT3uf2XIt7xSDqhoYG7rzzTlauXNmyzO/3s2PHDgYMGNCFkUndTTAYZOvSAjYt3MHWpQUEg3JSsZ5EqC6EdxkESkHRg+tNQAX9WDCdhRA+CFaDPh+hHdLV4UqSJPU5QVXliY8WH3abBWsUkqIagND4h5ExCk6XloA2AVN8GEaNXs7/0M31ihaIQYMGMX36dB555BEeeeQRoqOjefXVV7HZbFxzzTUEg0GampqIjIzEZDJ1dbhSF1n26Wpe/v2bNFQ07lvyBQkZ8dzy3LVMO39il8YmHZkQHoRvBQT2gCYWHH8HvKDNhbDLAAGBMtDlohjGQzDQ1SFLkiT1OcsLiqm3OA+zhUL/xDp02iANfhNFTZFcPcpNs8VAZEoaAYNKhMYo53/o5npFCwTAM888w+TJk7njjju46KKLsFgsvPfee6SlpVFdXc0JJ5zA119/3dVhSl1k2aereeiip36WPIQ0VDby0EVPsezT1V0UmdQWQvgQ3pUQKARtMjj/AcIKmmQIvx5F0UOwHLSpKMYpKErHDMiVJEmS2q7BY2NZ+a4jbjc5twzYN3lcVYBwow+rJ564lDicAS8p5hh0Gu3xDlc6Br2iBQIgMjKSBx98kAcffPCAdRkZGRQWFh7ytd9///1xjEzqasFgkJd//yYt9T1/TgAKzLvjTaacMw6tVl6wuhshAgjvKvDvAG0quP4FaiUokRB+E4omDBGsA8UcSh40cmJISZKkzlblamZVwy5swSOXXZ0yoByAVfYUhocruF0CJTyZqLgIml0eEo3yOt7d9ZoWCEk6lG3Ldh7Q8tCKgPryRrYt29l5QUltIkQQ4VsL/q2gSQHPlxDYCRgg/AYUbTxCtYFwoxgmomhTuzpkSZKkPqfEUceyugIaXQ4iTUaMpkM/jEuOspOb3ERQwBp7MtMGBHG4DUQkZ+MTAYwaPdFy/EO3JxMIqddrrG7u0O2kziGEivBtAP/GULcl/wrw/QgoEH41ii4bIbwQrAP9aNAN6uqQJUmS+hQhBLtsVayoLySgqnht4PL4iIo8VDdSwaR9rQ/bXXGIWg0xuHAFk4hOCnVfCtcbidH3nMpFfZVMIKReLz41tkO3k44/IQTCvxn860GTEGp18CwIrTRfgKLPR4ggBCpAPwTFMLpDS4FKkiRJh6cKla2WMlY27Mag0aF4tVQ32nBYg9TXh2ZgNhhbt0REm1UuGFMEwCpbCkOMGnxeP4a4LAwmPc6Al2RTtBz/0AP0mjEQknQow6cNISEjnobKxoOPg1AgMSOe4dNk2c/uQvi3g28NaGJArQPXe6EVxpkoxukIISBYBrp0FMOk0CBqSZIkqVME1CAbm0rYai0jRh+GAQO7aqoQQcGGdbUADB0WT1b/CMxCB85ShiTVoDh99E+sAULjH6YnBPD7zZjiswEICpVEkxz/0BPIFgip19Nqtdzy3LWhHw7xkPrmZ6+VA6i7CeEvBN8qUCJAeMH5BhAE/UgwnRPaSK0BJRLFMAVFE9Gl8UqSJPUl3qCf1Y172GwpJcEYSZQujNKaJhwuD3sKrbjdAaKiDGT0CycpNoIxealMHGwnJ95DrKkBkzGAJWBgb1M06UErQUMy5th4vEE/Bo2OWL0c/9ATyARC6hOmnT+RB+b/gYT0+NYrFDj75tOYcva4rglMakUEihDeFaAYQhPFOeeBcIE2B8KuRFE0CNUCIohinIyiTerqkCVJkvoMV8DLj/W7KLBWkGqOJlxnpM7qoKrJhtcp2LPHAsDosUmYjXqyk2PRafyYtVYszSoJpjoAVtuT6a/RoFMC6GL6oyihfYfrjMTIAdQ9gkwgpD5j2vkTeXfvSzzy9b2ceftMjGEGEKHB0zUldV0dXp8nAmUI73JQFNBEg/M1UJtCYyDCf4OiGBDCDcFG0I9B0clZ5iVJkjqLze9med1O9jhqSA+Lw6Q14PYFKKltAhXWrQ11TRo4KBZjuIaMxGiiw00YsaENOLDaNCSY93VfsqUwxBgAXRiG2EwAHEE5/qEnkQmE1KdotVrypw9l6iXjmXjWGAC2ryikeEtpqF+91CVEsBLhXQoEQpPDOf8VGuOghO2b6yESIQIQrAB9HophRFeHLEmS1Gc0eu0sq9tBuauRrLB4DBodQoXSmiZsTg8lRTYcDj9hYTr6DYwiNjKM9IRoAAxKMwGfD0V1kZZuB2CNNYmBwoYuIgWMMQAEVJVEU3RXnaLUTjKBkPokrU7D7JtPQW/UYamzsubrjTRWNXV1WH2SCNYivMtCXZU0aeD+FALbAN2+uR6Sfho0re2HYpiAosj6D5IkSZ2hxm3hh7oC6tw2MsPjW1oI9nddUn0KOwtCvz/HjEtGr9OQkxyHQacQQRExyh5sVohWQy39ha4YwgMmYk0BiOoHaPCpAQwaLbGy+1KPIRMIqc/qNyKLEdOHAbB5yXZKd1R0cUR9jwg2hloeVAtoM8G7BHxLQyvDrkTR9Q/9Xa0CTdy+maZlfXBJkqTOUOpsYGldAXa/i4zweLRK6LZxf9clDQrr1tQgBOT0iyY8SkdafBSJUVpi2UQ8m/B5VRqbDMTpQtWZVtpS6C8CGMLC0ESkAeAMeInQmYgxyOt7TyETCKnPMpgMnPO7M1E0CtXFtWz4dgu2JntXh9VnCLUZ4VsKaj1os8C/BTyfh1aazkExjA5tF2wEoeyruBTXdQFLkiT1EUIIdturWVG3E78aICMsHs2+uXZ+3nWpvMSBxeLFaNQyJC+WCLORAckKicpqotiDhzisdhMBb4ABA0OtFCvtyQzVuNFHJuJXYgBwBjwkmqLRa2Trck8hEwipTxt54jAGju4HwLr/baZ8Z2UXR9Q3CNUeankIVIUqLAVLwfU2IMBwAhhn7dvOCcIKhvEouqwujVmSJKkvUIXKdks5K+t3odVoSDHHtFq/v+uSRtWybWsDAGPGJoNGMCLdRaZhDSbqcJOG36/H1uRA02whKiaAI6ijwhFHvwg/SkQWgtAcPkFVJVnO/9CjyARC6tPCIs2cddOpAJRsK2PL0h24nZ4ujqp3E6pzX/JQDrocUBtDFZfwgy4vNNO0oiCED4LVoB+Bos/r6rAlSZJ6vYAaZGNzCWubigjXGUkwRrZav7/rklZRWL+2BlUVpKdHEBOvMCalisGRO1BQcZOGqirUljawe3kDke5Q96W19mSygoLImDAC+mQA/GoAnUYry7f2MDKBkPq8qeeMJ21AMqoqWP3VRqr21HR1SL2WEO7QgOlACeiyQXjA+QoIJ2gzIPwaFEWLEOq+BGMAimE8iiIvVZIkSceTTw2wtnEPG5v2EmeIOOCGfn/XJbvTS02Fi/p6N3q9hgljzYxIKGRofBV+TTQ+4gGFH9/fyyvnbeebvzYwZLgFgFW2ZKILnegjYvESC4TGP4TJ+R96HPlbWerzohOiOPnKEwHYvb6IglW7CPgDXRxV7yOEF+H9EYJ7QBeq+43z9dAYCCUWwm9EUYyh5cEK0CaFBk3vXyZJkiQdF+6Aj5X1hWy3VpJiiiFCbzpgmzqrg+omO3pFy8aNoYpKU8aayYvdwoA4O6ohgyChQdCrPyzl0z8W424OMPFkK0PHugBYU5/Ihvvq2bJYIUgoYXAGvCSZojDI8Q89ikwgJAk49ZoZRCdG4fP4WfnlOmr2yonlOpIQfoR3JfgLQtWW0IHrXQgWA2aIuAlFE6r/LYL1oBj2VVySNcElSZKOJ7vfzfL6neyy1ZBujsWsMxywjccXoLS2CY0C69fVEgwKMlIUTs4rICZcoDX3QxBKABwWN189WsTUMyy8vaaAh94uQbPvbvPVwUs44dRmPn9gD2pQBcCvBkk2xXTW6UodpMsSCDlpl9SdJGUmMO2CSQDs+HEXRZtK5Ge0gwgRQPhWg38b6NJDLQqeL8G/EdBC+HUo2tTQtqodhAvFMAlFm961gUuSJPVyTV4HS+t2UuqsJys8HoP2wFYAoUJpbTM2p5fGOi/V1U50WsEVs8rxY8IUkYVWG5obwu/xs+mrUkaNb+TPr5eSkOJvta+EMC9/fq2U4SOrKF9dhl8NhsY/6GX51p6m3e1FQgjmz5/P4sWLcbvdqKraar2iKPzrX/864n7mzJnDXXfdxcyZM9sbgiR1OEVROPvW0/junR9wWJz8+MVahk4eRFJmQleH1qMJIRC+jeDbDNpUFMWM8C4H76LQBmGXougH7dvWC8FaMIwH3eAujFqSJKn3q/VYWVm/iyafg6zwhJY5Hn6p3uagqtGGUatjw7oyAM6caCGoDSPMHE2EOdTNNOgPUltaj6XKxc0PV4EC+yq/ttBoQFXhpoeq+HiNDWfAQ7hWjn/oidrdAvH000/zwAMPsHv3bgKBQOgG4Wd/fplQHEp1dTVms7ndAUvS8ZI1OJ1xp40CQhPLlWwr69qAeoNAIfg3gDYRRROO8G8H9/zQOtMZKIYJAAgRDA2a1g9GMYxG+eVvHUmSJKnDlDsbWVq7A6vPSWZY/CGTB48vQElNEzqNYOfmErw+QUaij8G5WoxGE/FRoRt/oarUVzRia7KTN8ZNYpr/gORhP40GktL99B9YhyvgJdEUhVGrP16nKh0n7W6B+Pzzz7n22mu55557junAc+bM4a233qJ///4kJSUd074kqSNodVrOv/0sVn6xjsaqZtb8dyODx+cSnSBrUx8NEagIjXsI1oLagFA94PmU0FwPE8F4+k8bB8tD3ZsMk1GUA/vfSpIkSR2jyF7DmsYigiJIeljcIR/YCBHquuRx2zC6qthVokGjCM6YJhBAQnQken2o61JTjZXmWivhkSbyBrVtDGH6UD3V7iBJJjnWrSdqdwLhcDiYMWPGMR+4pKSEdevWceKJJxITE0NYWOv+b4qi8N133x3zcSSpPQaPH0DelMFsWbqDDd9u4cSLp5B/gkwg2kuozQjn66GxDsLReqUmDcyXtPzSEsEaUCL2zTQd0QXRSpIk9X5CCAqsFaxv2oteoyXVHHvY7eutDuy2cvJiSnj5uzBAw9RRQSLDvUSHm4kOC1VqsjU6aKyqJzetikEpOwk3OA673/1KPKBVNMTK7ks9UrsTiLFjx7JhwwYmTpx4TAdOTU1lzpw5x7QPSepoBpOBc353BluW7aC8sIqN320hd1QO5gjZ3a6thHAjHK+A+4ODb6BWhQZUG0YiVCsIH4rpBBRtcucGKkmS1EcEhcrm5lI2N5cSqTcd8abd4/PhbN5KXsxuvv4xGptLR0KMYFK+F61OS1JMBIoGPHYnZucaTsvfSbgxVKq12alDZ1II1/jRHKRxQxVQ5zezzZXMoCg5/qGnancCcf3113P33XcTCAQYOXLkQccxjB8//oj7mTt3bnsPLUmdYtypI+k3PIu9W8tY/fUGppwzgdzR/bo6rB5BiADCs/KncQ6H4v4EocuFYAMYJoF2QOcEKEmS1AeVOxvY3FxCrCGcSP3hH4gpwkvAspp0QwEldfGs2hG6wZ8z3UdQBEmJisJkUIj0b6C/cRVhWaHEweI38nbtID5tGsCkyFrm5qxEFbRKIlQBCvBc5ShiI3XEGyMxyfEPPVK7E4hrr70WgJdeegmgVd85IQSKolBQUNDm/TU2NuLz+VpKZqqqitvtZt26dVx66aXtDU+SjllYpJkzf3MSL932JkWbStm6rIDsvAz0BnmRO5xQxaXN4P32wG5LB2xsAd8aMJ2OYhgpB01LkiQdRzVuC8ARkwc9FjTOdYQH92ALxvPxktD4hAl5QeJjvESZdeREFJIQXEO4PnSdb/CbeLduEJ819McrdOjLffyYkc59JZO5I30TyQZ3y/7r/GaeqxzFRtdALkqIIsUcc1zOVzr+2p1AvP322x1y4J07d/KHP/yBoqKig65XFEUmEFKXmfGrqcx/6kvqyhr48T9rGXvqSLKGyHkJDiuwB/zr4RDVPA6ghKEYJqIocvZRSZKk48WnBqh0NxOpO3B26Z8Iwqggwr+FBlc1Tb5EftgQQbNNITpCMGG0k4iwQibF7SBOG2pxqPebeKd2MF829sdYFED5uomEHW6ufCybSqOWjyxpLLWmMSqinnidh8aAiU2ORFQULskehUGjleMferB2/+aeMGFChxz4iSeewGq1cs8997B48WIMBgMzZ85k6dKlLF26tN2JSmNjI3/7299YtmwZXq+X8ePHc8899zBgwMG7RjQ3N/PII4+wdOlSFEXhrLPO4o9//KMsLSsBEJMYzazLpvHvv33GzjW7KVxXRMagVDQaOXn7wYhgNcK3EhQjaFLa9iLDFBSNnDxIkiTpeGr02rH73Yd82q/gJ4pCosQuah1eKu2xWO0mVpWoGAe7uGjqds5M3knSvpaEOp+JD2sHs6u5PwN0Rqb9p5HVr9Rh1sI5T+UQnWpA73BznlbDYq2WDY6fKm1G68OYnT6GrPB4/KpKjF4mED3VUT3627t3L88//zxr1qzBZrMRGxvLuHHjuPXWWw95w/5Lmzdv5r777uPCCy/EbDbz5Zdfctlll3HZZZdx22238c477zBu3Lg2x3TrrbeiqiqvvfYa4eHh/P3vf+eaa67hf//730GTgttuuw23281bb72FzWbjT3/6Ey6Xi8cff7zNx5R6t7NuOJkFr/4PR7OTFZ+tZsS0oSRnJ3Z1WN2OUK0I748g3Ci67FC51iPRJKKYZh3/4CRJkvq4WreFclcDjT47UTozORGJaPa1FOuwE8M2wimnxhHG8nqV0qBgh9fCpRcVcWVyIYn60DW9wR/GKutwKkpTyNQaGGg2UPSjndWvhsq2zrgtlcxR4fg8PkRQMC0nkTPiwimw2miy1WCImEBO7BA0ioZqdzMp5ljMOlm2u6dqdwKxZ88eLrnkErRaLbNmzSIhIYH6+noWL17MkiVLmD9/fpuSCJ/PR05ODgA5OTns3LmzZd3555/PX/7ylzbHZLVaSU9P58Ybb2TQoNCstrfccgvnnHMOu3fvZsSIEa2237hxI2vWrOHrr79uifWhhx7i+uuv58477yQ5WVaDkSA5O5Gp505g4ZuL2bqsgOItpTKB+AUhPAjvCgjWgK4fQrWA640jvk6JegBF0R7/ACVJkvqwRTVbmbv9c2z+n8YhROvNzE4fy/gYHYp/C+tsFtZY9Gx32FEIcF5CMQ/l7iJhX+Jg8YdR4R+HJ5hHoKKOJFRMZgMNez3895EKEDDi7FhGnh1HIBDE6/aRlJVAVHyoLPfwcD9NfgOu8JyWxMUbDJB2hDKyUvfW7gTiqaeeIiMjg3feeYfIyMiW5Xa7nauvvppnn32WF1988Yj7SUtLo7y8nHHjxpGTk4PD4aCiooKMjAwMBgNWq7XNMUVHR/P000+3/NzU1MRbb71FSkoKubm5B2y/bt06EhMTWyU6EyZMQFEU1q9fz5lnntnmY0u9l6IonPu7M1j8wXJsDXZ+/GItQyYOJDZJTnoDodmjhXcNBItBlw34wfkaCGuoG5PpFHB/GRowvZ8SB1F/QTGd1lVhS5Ik9QmLa7Zx36YDy2lb/W7eK1nOIoNCrS80KZxJ4+TChGKuSCokTu8FwOKJoNA2DKsynMTICGr21hLwBIiIDcNtDfDF/5Xhd6tkjg5nxu9SUVUVl9VFbGoMccnRaPCixY1WsWH3J6DZd8sZFCoaRUO0XnZh7cnanUCsXbuWRx99tFXyABAZGckNN9zQ5paDU089laeffpqwsDBOO+00+vfvz3PPPcdvfvMb/vnPf5KZmdne0AD485//zEcffYTBYGDevHkHTFAHUFtbS2pqaqtlBoOBmJgYqqurj+q4Uu/Ub3gWo08eweoF69m4aCtlV1TIBGIf4d8CgW2hieHQguttCFaAEg7hN6BoExD6sRDYDYEi0A2A8OvRaGSfV0mSpOMpKFSeLlgAgAZx0IHMNT6BWRPg18l7OTd+J5G6UOJQ2RzJ1wWjiU3PQKPRk5kQQW15Iy6bi8jYCIIB+OqvFVir/ESl6jnrLxnodCoeRzMpKTpSM71otdUEMRLEjEXNpdlnJn5fbM6Al3CtUQ6g7uHanUDodDqMRuNB1xkMBnw+X5v289vf/pbS0lI+/vhjTjvtNO677z5++9vf8tVXX6HVannmmWfaGxoAV199Nb/61a947733uPXWW3n//ffJy8trtY3b7cZgOLDfndFoxOv1HtVxIVTG0uVyHfXru4Lb7W71/76gved81o0nsebrDdSW1LPiP2tIH5xCWFTPGmzf0e+zEixGE1iJUMJB0aL1/wdtYDMCLQH91YhAJARC3yVF1SGUCajKLPAoQOd8R+Rnu+fYXwK8o/Ylr8PdX188Z+i8895oKaHOa2NGdOUBpVRrfWZerhpOksHNr1P2YNaEuio1uiJ56duxfFcwkOvP9eP1e0iNM9JU1YilzkJYVBiqUFnyYg3lG50YzAqXPhFJclIDLocfs8mMOSGdRjUVvy8Kn4gkQDheXxCPWtdyf2jxOEgyRoIviMvXs76rbdVTP9/tuRYrYv8EDG10/fXXoygKr7322gFzQFx//fX4fD7eeeedNu/P7/ej14fq65eVlbF9+3by8vLIyspqT1gHUFWV2bNnM3LkyAMmrXv44YfZsmUL8+e3nuxq8uTJ3HjjjVxzzTXtPt7WrVvbnDy1ilMI9vgbsKoeojUmcvUJaGRN/G7F7w3w+s3vU7a1iuQBCVz0wFlkDEs98gt7Kb2miWjTWhQliD8YT4RxO4kR3wJQZz8Np29oq22FomD1jMUflONHpEMzGAzk5+cf0z6O9josSb3NWk85xZrPmJuzEkHrydz23/Xtv9WwesNZVzWMBz4Yhdev4+RxNgZkOQg36DAHVGw1FiIiFcymIDu+cfLtc35Q4MwHIkkcGUtDsxG700BMv2xMsXFHjK0mYGeIPon+hvgjbit1vrZei9vdAnH77bdz6aWXcvbZZ3P66aeTmJhIfX0933zzDXv37uXNN99s1/72Jw8AWVlZR5U4NDU1sXLlSk477TR0utApaTQacnNzqaurO2D7lJQUvvvuu1bLfD4fFouFpKSkA7ZvK71ef9AxF4eytGEnL+z5H/U+e8uyREMkv8s9jekJQ446jvZwu92UlJSQk5PTZ0rYHs05X3jH2Tzz61eo29uIrcxN7uxc9MaeM7Fch73PwobGvwtFRCKUTBR1Lzrf9wAEdScRk3QyMS3bWlAEqLoTSNS2/XvRUeRnu+ec8549ezpsX+29DncHPfV9OxZ98Zyh887bYzFwTmDTAckD/JQ4BITCNzXj8fkG8sGicLx+LdmpQSYMD6JTFJJMftz1jcSm60Afxp618N0LFgCGXzYQTd5wKm1BHA4X/fKzSOl38Psnn89HfX0diYlJaPU6VHczo5PyevUg6p76+W7PtbjdCUR+fj5vvPEGTz/9NC+++GJLc8fw4cN5/fXXGT9+fJv243a7eeGFF1i1ahV2ux1VVVutVxTlgJv8Q2loaODOO+/kjTfeYNq0aUCoZWPHjh3MmnVgqcjx48fz1FNPUVpaSnZ2NgBr1qwBYOzYsW065sEoinLQMRcHs7hmGw/s+PiA5fU+Ow/s+JjHR13GzJThRx1Le5nN5jbH3lu055xPvGAKHz3+BRWFVaz772amnzeJ7GFHN06nKx3L+yyEF+HZBNrG0HgGtREc/wKCoB+FNmwOun0VNoTqBNUJhqloDCMOu9/jTX62u7+OnIm8Pdfh7qanvW8doS+eMxz/856kdaA0H777jE4RhClRbC3Vs7dSi16rcuVJtZg0PkzGSKoqBTb7QLQRiTRWChb95QdEENJPyKLfeWNQgyoum5OswRlkDUpH+WWm8gsGgwGfJkiMOYLU6ATCdAfvDt+b9LTPd3uuxUc1D8SkSZOYP38+brcbm81GVFRUuzOsuXPn8tFHHzF27FgGDhx4TBN0DRo0iOnTp/PII4/wyCOPEB0dzauvvorNZuOaa64hGAzS1NREZGQkJpOJkSNHMmbMGO644w4efPBBXC4XDzzwAOeee26nlHD9+eCmQ3lm51dMTx6Gtq2z+krHVVikmTOum8Xrf3yX3euL2bFqFxmD09Bq+0YpUiFUhG9taLZpXRYIDzhfBeECbRaEXYGyP3kQ3lBZV8MoFP2xdUmRJEmS2k8E62nLrWCsromvfwxVpJw2VkulfwAiEEGgSGBv8hKXGkPAEmD5o9/hs/uIGRDHqFtCEwo311pJzIgna+iRk4f9nAEviaYowvtA8tDbHVUCsZ/ZbD7qppmFCxdy++23c/PNNx9LCC2eeeYZnn76ae644w7sdjvjxo3jvffeIy0tjYqKCk466STmzp3L+eefj6IovPjii/z1r3/l6quvxmg0cvrpp3Pfffd1SCxHsqkpNLjpcGo9VjY1lTA2vn+nxCQd2alXz+DjZxbQXGNh+SerGTVzOKn9+sacIcK/DfxbQJtKqOLS66DWgRID4b9BUUJFCYQIQKAc9ENQDONbkgpJkiSp89iDEbSlXuAX6/rh9mqIizMRm51JjctPhNOHp8FKfGocQoUNf1+FvcyKMcbE+HtOQGvU0VxjISo+gn75Wej0bX+Q5gn6STHFHPV5Sd3HMSUQx8Lv9zNmzJgO219kZCQPPvggDz744AHrMjIyKCwsbLUsPj6e559/vsOO3x4NXvuRN2rHdlLniEmMZtalJ/DJswvYtqKQok0lpOQkdWj3i+5IBPaCby1oYkAJA/dHECgEDBBxI4om9GtKCBUCpaDLRjFMbkkqpL6hqtFGQVktU/P6YTJ02a8WSZKA6uAAdGoMYYqFg/2KEgIs3mi+WpOAosDkyWnY3R6MrgDuGhtxKTEoGoWC97dQs7YSjV7DhHumYY4Pw9HsRG/S0y8/G1N421sSVCHQKApxxogOPFOpq3TZ48Fp06axZMmSrjp8l0owRh55o3ZsJ3Wes285DXOkCZfNxbJPVtFca+nqkI4rEaxHeH8ERUHRxIF3CfhWAAqEX42iTf9p42AFaJNQjCegyLke+pSyOgtLtxZT02THHwh0dTiS1KcJIahwW9jgv2zfz79cH/r/U/+dgio0DB+egNYIwukjWGcjOiESrU5L5Yoydn+yA4CRN08gdlA8HpcXvy9ATl4m0Qntu0dxBb2YtUZi5PwPvUKnPib6/PPPW/6el5fH888/T11dHWPHjj3oIJNzzz2384LrRKPickgyRh22G1OSMYpRcTmdF5TUJqn9k5k0exyLP1jOpu+3UbqjgriU3llJQqgOhG8FCBtoc0LdmDyfh1aazmk1vkEEq0Ex70seeue/h3RwxdWNrCooxenxEWaUrU6S1NXsAQ+NXgc+dRQrK0bxx8xNrdbbAtG88eMsFm5JISrKwJChcdTVWzE0OomMCMNgMmApamLji6sByD13CJkn5hDwBXA0O8kelkFiZkK743IFfaSYYwnXyvEPvUGHJBCBQACHw0FMTMxht7v33nsPWPbVV1/x1VdfHbBcUZRem0BoFQ13DZ3NPZveP+Q2l+RMlQOouyFFUbjg92ex7OOVNNVYWPH5agaO6U9ETO96oiKED+FbCYEK0PUDtQqc/wIEGKaAceZP26pNIIIopuko2r47P0ZfI4Rgd2UDqwpK0Wk1JMdGYncd/USckiR1jAavDWfAS427Gf2+odT2YCw/1MymyR3Bhuokli8JlbgfOTGe2mYrapOTCJ2WsCgznmY3a/62DNUXJHlsGkMvG4GqqljqrCTnJJExKO2g3aKOxBP0kWaO7fXdfvuKdicQgUCAV155hezsbObMmcPq1au57bbbsNlsTJgwgeeff57o6IMP3Vm0aNExB9xbzEwZzuOjLuPpggUHtESMjMkmQmfEHfBh1sknet1N7uh+jJiRx4Zvt7Bu4RZOvuJEhkwY2NVhdZhQxaX14C8EXSYIJzheA7ygGwTmi1p+AQjVDqoNDFNQdAO6NnCp0wgh2FFay9rCcsxGHfFR4bi8/q4OS5IkoMZtQaso7LJXc118DQDragaxcN0QhiSlsHX1VgCGD0nEFKahqrqBCJ0KCbG4vF7WP7EcT5ObiIwoxvx+MopWoanaQkxyDP3yM9Hq2v9wUxUCBWT3pV6k3Z+C559/nnnz5mGzhW56H3nkEWJiYrjvvvsoKyvj6aefPuRr09PTW/2JjIykqKio5WchBEuWLCEqKor09PRD7qe3mJkynP/M+CPzxl/P1f2mMywqdM57nXXUuK0UO2q7OELpYLQ6LRfcMRsUqNxdzdpvNuF1954nr8JfAP7NoE0BFHC+DqIZNEkQ9msUJVRxQwgPBGtBPwpF33lzlkhdK6iqbC6qYlVBGeFmA/FR8oZAkroLb9BPlbuZMK2BUkcVYyNCLQ2vf5nFyk0VvPm/dVidXsJMBs4eNRztdoUB5eFMiulPuGpk62vraN7ViC5Cz8h7J6MN02FrcGCOMDFgRDYG09E91PSKAGatgViZQPQa7U4gvvrqK+68804uv/xyioqK2L17NzfffDNXXXUVd9xxB99//32b9lNUVMRZZ53VqmpSeXk5c+fO5YILLqCqqqq9ofVIWkXD2Pj+XN5vGienjCBSZ8bmd1PkqGGnrRJHwNPVIUoHMWpGHrmj+wGw8ou1VBX1jmRPBMrAtwaUSFDCwfU+BEtD1ZfCb0DRhMYqCeGHYDno81AMY2W51j4iEFRZv7uSdbsqiI00ERsRKuOtqoK9NU0UlNWxcU8lwV9MDCpJUudo8Nqx+93Ue20MDasjTBukwWFmd218q+1cHh+LVuxAaXAxKacf/YjH959aaheXo2gUJt51Avo0M5XuJhr1LuKGJmCMOvqxCx7hJ0ofRoTOdKynKHUT7f6tX1dXx8iRIwFYsmQJGo2G6dOnA5CSkoLd3rbSo08++STJycl88MEHLcsmT57MDz/8QExMDE888UR7Q+vRYgzhDI/JZMy+gdNrGouocVsostd0bWDSQRlMBs6+5XQAijaXsuWH7QQDwS6O6tiIYCPC9yMoKoo2Hjz/Bf8GQAvh16Fok0LbCRUCZaAdgGKYiKLouzZwqVP4A0HW7ixj855KEqLDiAoL3Qhs3VvN3H9/z7/+t46v1xTw+3lfcNaf/sGijbu7OGJJ6nsavDZUIdhtq2FSZOjB1qo9mYiDTCu3qaKOYZlJhIUZ2bNxL9+/uwwIzXk0K280+bYksqrCmTxgGPHJsVS7LZQ46qnz2PAE29dl0SuCpJhi5PiHXqTdCURSUhIVFRUAfP/99wwdOpS4uDgANm7cSEpKSpv2s2HDBn73u98dMPNzfHw8N910E6tWrWpvaD3ewKhUJsYPJN4QgTvoo9BWRaGtGpv/8NPRS11j+oWTSOmXhBpUWfbpampL67s6pKMmVGeo4pLaBJo0hG8deL8JrTT/CkUXGuMhhIBgGWhTUIxTWlokpN7N4wuwckcpW0tqSI6LJMIcehK5dW8173y3AauzdUtpvcXBH19bIJMISepEqlCpcDVi1hrY1lzBpKjQA8iVRZkH3d4rBB69QkNlE58+9xVCCEaflM/400cR8AewlVqYOHwY50+ZxuyMsZyaOpJx8f2J0pto9jrZ66ij2t2Mw+9B/WWt2FZxhdbF6uXvi96k3QnE7NmzmTt3Ltdddx3r16/nggsuAODRRx/lhRdeYM6cOW3aj6IouN0HvzEOBAL4/X1vQF64zkheTAZj40KzT69v2kuVu5k99uoujkw6mPCoME67JlSNaOeq3RSuLwrdYPcwQvgRvlX7WhWyIbgXXO+FVhpPQjFO+mljtRqUCBTjVBRNTJfEK3Uut9fPj9tLKCivIy0+ijBjqMVJVQVfrNxx0Nfs/xY8NX+J7M4kSZ2k2eek2eciIAIYNE0MMltRBawuPngCAdBgdfHhE5/jdfvIGprOGdfNAgFVRbVkDE5nxInD0Gq1GDQ60sJiGRmbw+lpozk9fRTTkoaSYo7BHfRR5mygzNmAxeckoLZujfcEfRgVHTEygehV2p1A/P73v+fXv/41iqJw1113cdlloYlKtm7dyq9//WtuueWWNu1n/PjxvPTSSzQ1NbVabrFYeOWVV5gwYUJ7Q+sV+kckMykhl1RTDH4RpMBawS5bDRafs6tDkw7izBtOJio+Eq/bxw8f/khjdXNXh9QuQgiEbwP4C0CXAaoFnG8AQdCPANNPDwREsBEEoeRB27aWRqlnc7i9LNtazJ6qejITo1vNML23pumAloefE0Bts4ONeyo7IVJJkhq9djxBP3sd9Uzc132poCoRi8t8yNds+mIdTdUWohOjuPCuOWh1WmpK6ohLiWHMSfkYzQeOe9AqGhKMkQyOSuPklBGclT6GmSl5DIpKQwioclsocTTQ4LXjDfpxBr2Eawxy/EMv0+4yroqicOONN3LjjTe2Wv7vf/+7Xfu56667uPjiiznppJMYNWoUcXFxNDc3s2nTJgwGw2GrOfVmRq2eYTGZTEjI5T8V69jcXMqgyDR22aqZkJDb1eFJvxCXHMOJF0/my3n/Y8sPOyjZWkZCWlxXh9V2gZ3g3wTaJECA8zUQDtBmQNiVLYOjhWoDYQfDNBRdvy4NWeocVqeHH7fvpbzeSmZiDHqdttV6m6ttBR4arPLhhyR1hkpXEwaNlkJ7NVfG7Rv/cIjuSwCmgKB+xW4MRj2/+uM5hEeF0VxrQWfQMebkEUTFt22m6Ui9mUi9mf4RyXiCfhq9dmo9FspdTTR47dj8bhI0YXL8Qy9zVKVT1qxZw6ZNmwCoqqripptuYs6cObz00ktt3ke/fv1YsGABl1xyCS6Xi23btmGz2bj44ov5/PPP6dev796kZIUlMCE+l+zwRFQEWyyl7HHU0OBt2wB1qXOdd/tZGM0G7E0Ofpi/Enuzo6tDahMRKEf4VocqLCnhoYni1GpQoiD8NyhK6MmTEG4I1oN+DIp+WBdHLXWGJpuLpVuKqKi3kp18YPIAEN7Gco4J0bJsoyQdb86AlzqPDaNGT6mjpqUF4sc9WYd8je77QhQB5/7udJKzE3HaXDhtbkbOyCMlJ+mo4jBp9aSHxTEmrj9npY3mjLTRTEsYTIqubcmI1HO0O4H4/PPPufrqq/n2228BeOCBB1i9ejXZ2dm88sorvPbaa23eV3JyMvfccw8ffvghCxcu5OOPP+bee+9tNRBbCMF9993XZ8q6Aug0WoZFZzA5ITRwtcBWSZmjgUJbVY/sY9/bZQxMZfwZowFY/90Wygu7/2dVqE2hikvCj6JNBPfnENgB6PeVa40NbSf8EKzcV651jHyC1AfUWRz8sKWIWouD7ORYtJoDf01UN9lYsKrgsPtRgOTYCEbn9v45fSSpqzV47TgDHmo8FnJNTcTofPhVHcXNqQdsG6HREvVdIca9TZx48RSGTBiI3+unobKJYZMG0X9EdofEpNNoSTRFMSgylSiN7L7U27Q7gXjrrbc477zzuPvuu6mvr+fHH3/kt7/9LS+++CJ33HEHn3zySYcGqKoqn3/+Oc3NPatv+bFKM8cyLm4AgyJDX/6NzSXsddQdMGu11PUUReHCO2ej0WqoL2tgxWer8bi678RyQnUhvD+C2gDadIR3Gfh+CK0MuwpFF3piJUTwp3KtxkkoSrt7PEo9TFWjlaVbimi2u8lKikGjaZ0wqqpgyeYinv9sBTXNdgwHaZkAWgpG/uGiGQdNQCRJ6lh1bgsKCrvt1UyKCrU+7GlOxe4A/EEi/ruDiO92EfXFNoyvLkdf1MjQSYOYdsFE1KBKVXEt/YZnkTd1MBr5nZXaoN2fkuLiYs4991wAfvjhB4QQnHTSSQDk5+dTXd3xFYP64lN3RVEYGp3O1MRBaBUNe511FDtqKbTKVojuaMiEgeRNHQLA6q82ULWnm87fIQKhbkvBvaGKS4FCcO9L+k1zUAyhOV5ayrXq0kKDphX59Ki3K6trZumWvTg8PjKTotH8orWp0ebilQUr+XrNToKqytCsJO751QyuPHkM0eGtPx9JsRE8ccNsTho9sDNPQZL6pIAapMLdRJjOQKGtikmRod8/36wOjX8wFdRiLLNgLGpAX20DNXQPMXj8ABRFobq4luSsREbPGo7eIOf1kdqm3Y8Uo6KicDhCfbyXLVtGWloaOTk5AJSVlREbG9uhAfZliaYoxsYNYG1jMdus5axvLCY7LIHcyBTSwuS/c3ei1Wm54PdnsXXpDsoKKln/7Wayhqaj03enp/YCJbgV1B37Ki7Vg/NNQAX9BDCe/NOmahVoolEMJ6BoorosYqlzFFc3snJHKapQyUiIbrVOCMHqneUsWLUDXyCIUa/j7MnDGDcoA0VRyO+XSl52CgXlddRbnJw6diBTh/eTLQ+S1EkafQ5sfjdCCIKqlbzwUHXLRbv6gyowbT34g93v319Ock4iYZFmxpycT7gcryS1Q7uv8BMnTuTFF1/ktddeY9GiRZx55pkALFy4kL///e9MnTq1w4PsywZHpTE9cSgGjY5qj4Vd9moKbZWoQtZW727Gnz6K7LxMhBAs+3Q1NSXda2I5k64CTXAjaBNABEIVl/CAdgCE/aplfIMI1gPaUPKgTezSmKXjSwjBzrJalm/bi6JAalzrZNHm8vDPhWv5dPlWfIEg/VPjuOOCaYwfnNlqPIxGo9AvJY6hWUmMzk2XyYMkdaIGjw2/GqTYUcv4iDp0iqCiMYoaaySGoga0joN3qbU12infWcWoWcNJSI/v5Kilnq7dV/k//elPxMbG8uKLLzJ58uSWcq5z584lLS2Nu+66q8OD7MuiDWGMie9HfkyoKXJ90172OuupcvetMSE9gcFkYM7NpwKwe30xO34s7D7dzdRaIgzbEZhCVZec/wC1ETTxEH4dihJqthaqFYQLxTCxZSyE1DupqmB7SQ2rCkox6nUkxUS0Wr+5qIpnPl5KYXk9Oq2G2ZOGcsNZk4iLlJNBSVJ3IYSgwtWEWatnW3NFS/WlpbtDA6HNmw9f1CM+LZasoRnHPU6p92l3/4q4uDj+8Y9/HLD8/fffJy0trUOCklrLjUxhetIwdlgrafI52GGtIDMsnhRTDDrNwQcxSl3jpMun8cHcz2isbGLJRysYfVI+iRld+2RHVZ1offMJNxaiqCZwfg/BYsAM4TehaEI3jkJ1QbABDBNBN6RLY5aOr6CqsqW4mg27K4gONxET8dNEUy6Pj89/3M6motCNR3pCFJfMGEVyrCzDKEndjdXvptFnRwlCpbuBSVGh8Q8r92Shr7Cgazz8PCzDTxgiq+tJR+WoO2gvXbqUNWvWYLPZiI2NZdy4cTKBOE7CdEbGxvVjdGwOPzbsYkPTXgZFpjIoMpXsCNnFpDuJiA7n1Ktn8MFjn7Jt+U72bCrp0gRCdS8E2wPoRTNxYYB/1b41CoT/GkWbDIAQvtC4B/0IFMMo+QulFwsEVTbsrmBzcTUJUWYiw34aAF1YXs/8pZuxubxoFIVZo3M5aXSu7JIkSd1Uo9eOw+Nmx+4SsiPspBjceP1aNpSmEbmn9LCvTUiPY+SMvE6KVOpt2p1A+Hw+brnlFpYvX45WqyU2Npbm5mZee+01Jk2axKuvvorB0LYJhtpK3sxATkQSs5KHs81ajs3vZru1nKzwBNLD4mQrRDdz9i2n8cVL3+C0ulj8wXLyJg9q84yeHUl4FoL1d4daCyI0k3BLuVZdLopxoizX2ov5/AHW7apgW0kNybERLZPB+fwBFqwuYFVBGQCJ0eH8asYospJiujBaSZKOpMxaR01RHbuctZycFmp92FCahkFvYuD4/uwprD34CxW45e+/RquV9w/S0Wn3Y6UXXniB9evX88QTT7BlyxaWL1/O5s2bmTt3Lps2bWLevHkdHmS36UfehQwaHSNisxgfNwCAjU0lFNtrKXU2dHFk0i8lpMVxwvkTAdj0/VZKCyo6PQYhggjbw4ffyP0JQg1CsBR0GSjGE1pmn5Z6H48vwKqCMraV1JAaF9mSPJTUNvHsp8takocThudw+/nT2pU8CCFwe/3HI2xJkg7B7nSydtMOmkubsUR6WsY/rCzKZFBCJMJ/8GIrcakxPDD/D0zb93tKko5Gux81LliwgN/+9recffbZP+1Ep+Pcc8+lsbGRDz74gNtvv73N+ysqKmLFihXU1dVx5ZVXUl5ezpAhQ4iICPXL1mq17Ny5s71h9kqZ4QnMTBnOpuZSGn12tljKyI5IIDM8HoNGPjXuTi68cw7fv7eM5lorP3z0I7mjcjD/rJ/58SZ8q0GtO8JGFvCtAkMeimFay1gIqfdxeXysKihjT2U9aQnRmAw6AsEg367fzZItRQgBMeEmLj5xJLnpCW3eb1BVsTjcWJ0eIsxG+qfGYTLKOvKSdLz5PD4WL1pLRW0dwSg9Wp2fMRGhyn8bSjPJ1hvY+s1WAGZeMpWk7ARqiusYOXM4Z/7mJNnyIB2zdt91NjU1MWzYsIOuGzZsGLW1h2gu+wVVVXnggQf45JNPEEKgKApnnHEGL7/8MmVlZbz77rukpKS0N7xeTatoGB6dyZTEgXxZuYEtllKGRqUzMLKeQVEHTlcvdZ3sYRmMOWUEq7/awJr/buT0X88id1S/zgvAt7GNG3pQDFNRtLKEX29ld3tZub2EvbXNZCbFYNBpqW608e8lm6husgMwdmAGZ08ZhrmNk0j5AkEabU7c3gBxkWFMHJJNTkrcARPKSZLU8bxuL+u/3ULBnmLMWWZ2qRZGRdRj1KjUWCNQtalUL96F1+oiJjGKSbPHUltaz6zLpzFp9lg507TUIdr9KcrKymL9+vUHXbd27VpSU9t2I/vyyy/z5Zdf8sgjj7BixYqWbkp33303qqry7LPPtje0PiHVHMOJScNINcUSECqbLCUUWCvwBGX3ge5EURQuvGtOaJbPolpWfbkOv69z3iMRrAa1jV3bDBNRdJnHNyCpy1gcbpZuKaaktpnspGh0Gg2LNxXx/OfLqW6yE24ycNXJY/nVjJFtSh5cHh/ldRaqG23ERYQxY+QAzpo0lJED0mTyIEmdYH/ysHvTXkSmETWopdnoaJl9enVxBolBPdVLQz03Zl0+DZRQN8OsoRkyeZA6TLs/SZdccgmvvvoqb7zxBtXV1fj9fqqrq3n99dd5/fXXueCCC9q0n08++YTbbruNCy64gJiYmJblQ4cO5bbbbmPFihXtDa1PUBSFYTGZTE8OldncYa1kl72KvY62tfxInSf/hKEMnpALwIrP11BdfIQuRR1AqK5Q9yVNEihH6JKkxIPp3OMek9Q1Gm1Olm4ppqrRSnZyDBaHh3kLVvLftTsJqoK87GTuvGA6w/sdvqVXCIHF4WZvdRMWp4eclDhOGTuY08YPYVBGYptbLSRJOjZet5d1/9tM8eYSogbE4NYFaXb7cBl8TIoK3QMUWdKp+3Inqi9IxuA0hk0ehLXeTmxyDElZbe+eKElH0u4uTJdeeik7duzgqaee4umnn25ZLoTgvPPO44YbbmjTfhoaGhg6dOhB1yUnJ2Oz2doVl8Vi4ZlnnmHJkiU4HA4GDx7MXXfdxbhx4w66/ZYtW3jiiSfYvn07CQkJXHnllVx11VXtOmZXSTBGMj1xGKsb9lDirGdDUwkDIlLIDk8kTCcHwXYXWp2W828/k8cu+zvFW8rY8sN2MgalHrcnQEIIhH8DBCtCCcSRZiuPegCNHDvTawRVlQ17qthSVEe114AnILA43WQlxrBmZzkLVhfgDwQx6nWcM2UYYwdmHLbCXVBVabK5sLu8RIWbyO+fyoDUeBKiw2VlPEnqZD8lD6Wk9k+m3uzCEvDRrHOSbHDRz2QnoCrsLUqmaf12AE69+kQURcFpdTJ4Qi4GOT5J6kDtvnvQaDQ8+uijXHvttS3zQERHRzNhwgQGDBjQ5v1kZ2fzww8/MGXKlAPWrVmzhuzs7HbFdeedd1JfX88zzzxDfHw877zzDtdddx2fffYZ/fv3b7VtWVkZV155JTNmzODDDz+kurqaP/3pT9jtdm699dZ2HberDI5OY1ZyHm8WL6HIUUuBtZJBkakMj5WzB3cnU8+dQPrAVCp3V/PlK//D7wvQPz+b4dOGdPwgtmAR+LeDkgzu9wEXKFGE2q+tP22nxIaSB/MZHXt8qcss2ribJz9aQp3FsW/JTiJMBk4dN4hFG3ZTWBEaXDkgNZ6LTxxB7GFmk/b5AzTYXHj9AeKjwhneL5Wc5NhW80X8UjAYZNuynTRWNxOfGnt8Pt/HWW84B6l38ri8rP/2p+TBYNJTjwOnO4Aj3M30fd2XiuoTKP9XaALI4ScMIT03FZfNjTnCTGr/5K48BakXOurHj7m5ueTm5h71ga+++moeeOAB/H4/M2fORFEUSktLWb16Nf/85z+5995727yv0tJSVqxYwfvvv8/YsWMB+POf/8yyZcv48ssvD6gK9c477xAXF8eTTz6JwWBg0KBB/PGPf+TPf/4z1113HSZT9+/LG6U3c0LiEJbXF7LLXs365mIGRaaSE5FEhL77x99XGEwGhk0eROXuaoo3l/Ly7W8CkJARzy3PXdthZfSE2ozwrQXFAP51ENgO6CD8JtCm4XftwGHdQkTMcPTRN6LRRnfIcaWut2jjbv742gJ+Weza4fHx6fJtAOi0Gs4YP4Spw3PQHKL1wOnx0WB1odEopMRGMDA9kYzEGEyGw/+aWPbpal7+/Zs0VDS2LOvoz/fx1hvOQeqdPC4v6xZuYu/WspbkwYOfyoADn1fFFu9u6b5UsCced1EzOoOOWZdNA8BSZyVjcBoxiVFdeRpSL9SmBGLWrFltbrJWFIXvvvvuiNtddNFFNDU1MW/ePD744AOEENx5553o9Xquv/56Lr300jYdDyA2NpbXXnuN/Pz8VnEoinLQrlClpaXk5+e3mvBu2LBheDwetm7dyvjx49t87K40IDKFU1NHUOSopcLVxFZrOQOjUhkVl9PVoUn7LPt0Nd++88MByxsqG3nooqc6pBa3EH6Edw0EmwAteL4IrTCfh6LLCP1dE47dOxyz4XKMMnnoNYKqypMfLTkgefg5rUbhd+dOJTXuwBsIVQisTg8WuxuzUc+AtHhy0+JJjY9q0+zTyz5dzUMXPcUvA+jIz/fx1hvOQeqdfp48pA1IRr+vC5IFDw0+FwF9ALQBxkWExtctnRf6zk6aM5bohEiCgSDBoErW0MN3V5Sko9GmBGLChAkd/uGz2+3ceOONXH755WzcuBGLxUJUVBQjR45sNai6LaKiojjxxBNbLVu4cCGlpaXcf//9B2yflJREYWFhq2WVlZUANDY2HrB9WwkhcLlcR/36ozE6IouhkWlss1WwrrGIgeYkUrSRROnbNueA2+1u9f++oLPOORhUeen2fxxwYwKElinw8u//yahT8tBqj35chBLYjCZQgFDi0fteQiGIqsknIMaD1wvCQsAfwO7NI9JrAk3nfka7Sl/4bG/YU/WzbksHF1QFVruLuJ9VSQoEVZodbpweH1FhRoZmxpOdFEtcpBlFUfB6PEc8dkd+vveX8u4I7bkOd9Z39Ej6wmf1l/riOUPbz9vj9LJx0VZKt1eQ0i8JFYHX6wOgWG3E6ffjDPOQF95EpM6P3aWnYIkOc3QY404fidfro7nWijnaRFRSeKffm/xcX3yve+o5t+da3KYE4m9/+9sxBXQwZ555Jvfddx9nnnkm06ZN69B9b9iwgfvuu49TTz2VGTNmHLD+nHPO4YorruCNN97gqquuora2lueeew5FUfD7j77Upt/vp6Cg4Bgib7+AUMlXEymkigafnWUV29A2uhlsSGrXfkpKSo5PgN3Y8T7novWlNFY2H3oDAQ0VTfz3g/8xYGz7xvzsp9fWE2NagyoMxIX9G4OxCX8wiqqmqaiiCgUfBl09dm8evmCKfJ97mS1FbavsVVpZjSHowhcIYnP78QdVosMMZMaFkxxpwOy3UVdpoz11wjr68/3zFuFj0Z7rcGd8R9ujN39WD6UvnjMc/rx9bh+7VhZTU9RAfEYMdQ0/VVkMoFIQXk9QVWmMcnLOvvEP6xeFo6oK+acMpr4x9E2u3dvAgHHZFO0tOq7n0lZ98b3uiefc1mtxl5Vg8fl8xMbGdvh+v/vuO/7whz8wZswYnnrqqYNuM378eB555BGeeOIJnn76aWJjY7n77ru57777iIyMPOpj6/X6YxoXcrRiXCns3mNhnWUve9VmJscZSEnOItYQfsTXut1uSkpKyMnJwWzuvJmSu1JnnXP9VkubtosyRB+yItlhCRca/x4UEYeiVqDz70aggbCrSY3IAqGiqCUI7VTM/rGUlFbI97mXceujYfHOI26XlJCIajBgMGkYnhnBgNR40hOiMOiOfpBw5br6Nm3Xls/3nj17jjqOX2rPdfi4f0fbqC98Vn+pL54zHPm8PU4vG77dQtCmkD9xOPpfjEEq9Vvxq81ECS2lEU1MjAwlF2u/iyQmM44T50xB0Sh4nB4M2UYmzZxAXEpMZ5zaIfXF97qnnnN7rsVdlkBcddVVPPfcc5hMJoYMGdIh/8Dvvvsujz76KKeffjqPP/74YbOoiy66iAsvvJC6ujri4+MpKSlBCEFm5tFPqqUoCmFhh65ucrzkmk2c7RvPDnsl1oCbLfZyhsZlkh6T2OZ9mM3mLom9Kx3vc07NadtM6qk5Ke2OQwgV4VsPog4wgSM07kExnY3BNDC0TaAUNFkopumonlD3C/k+9y7J8dGEmww4Pb5DbhNuMpCSEE1OciwD0uJJiY1Cozm27kJuhxtLTdtKbbfl892RXWTbcx0+nt/Ro9GbP6uH0hfPGQ5+3m6nh+1LC6neU0f20Az0v5hjRQhBhdeF0EBNwE201svQsFAL2oYfIjntxhmYzKFS7k2VzaQPSCO9X2q3Gf/QnvdaCEEwGCQQCBznqI6P/eXaNRpNt5m8T6/XH7GyXHs+K12WQPznP/+hqqqKyy677KDrFUVhx44dbd7f+++/z8MPP8yVV17Jn/70p8P+IyxcuJCvvvqK559/nuTk5JZlaWlp7SpF211oFQ2jYnOYGJ/L4rodbGwuYWRsNrmRKSSYZOWFrjJ82hASMuJpqGw8eB9rQpVehk8b0v6dB/bsK9kaD855QAB0eWCcAYAINoJiRDFOQtFEAH1j3ENfUt1kY9WOUkx63WETiKtOGctZE4YSF9UxN2luh5u132xCCEFYlBmX7RB9fBVIPNrPdyc54ne0B5yD1Dvs/16V7qggLTf5gOQBwOL1UanaCNPoKNY3c3JkLRoFirab0GRkMyg/BwA1qBLwBcgeltltkoe2EkJgsVior68nGAx2dThHTVVVdDodVVVV3SaBAIiJiSElJaVDPhddlkCcffbZHbavvXv38thjj3HKKadw44030tDQ0LLOZDJhNBqxWq1ER0djMBjIzc3l+++/54033uD0009n9erVzJs3j8cee6zDYupsyaZozsoYy4bmEqx+F6sbi8iNTGWqMbLHXUB6C61Wyy3PXRuq8KJw0BuUC35/VrtrzQu1aV/JVjN4vga1FpRoCLscRdEgVFdo3gfDdBRtesecjNStWBxuftxWwperdtBod6HXaTDqdTjcPyUS8VFh3HHBdM6c0HFdb35+kxOfEoNQD5EZ77vk3Pzstd16LoWff0eFBgLJUahhBjQuH7oaG4ro/ucg9Xz7v1dlBYdOHgBKXFbcOh/CokXN/Wn26fVLI5l++dSW7WyNdqLiI0nOaXsvhO6ipqampahOVFQUOp2uR97DBINBvF4vRqOxW1w/9heXqKsLjY9JTU095n12WQLx29/+tsP2tXDhQvx+P99++y3ffvttq3XnnXce5513HldddRVvv/02EydOZMCAATz//PM8++yzvPDCC2RkZPDYY491aFLT2RRFYXh0JtMSh7CgagNbLWXssFQwMDKFZHNMV4fXZ007fyIPzP/DATXm97M3O3A7QhP9tIUQPoR3dShBCNaDfw2gQPjVKJoIhAiCWgW6fBT98euzLXUdt9fPyh0lfLFqO3uqGtFqNPz6tAlkJESzcsde/G4np08eyfRRA9tUirXNx/1Z8pDaL4l/P/E5boeHiJhwUMDR7GzZNjEjnpuf7RlzKEw7fyLnvHgtb6/eRsD8042bxuElu8zG6FnDuzA6qbf7+fcqPTcF3SHmXbH7/JT77GjNCnvdDjQ6lUkRoQHUe13ZTMhP+2nbJgfDpw3BHN6z5oQKBoNYrVYSExNJSEjo6nCOyf7WE5PJ1C0SCKBlqEBdXR1JSUnHHNdRJRArVqxg8eLFuN1uVFVttU5RlDY/yfd6vRQWFuLz+RAi9CRLVVXcbjfr1q3jD3/4Q5v2c9NNN3HTTTcddptflm2dNWsWs2bNatP+e4o4YwRnpY9mVeNuGrx2VjfuZnB0KommKDRK92lC62umnT+RKeeMa5nlNizSxNt/nc/u9cUsencZ404bRf4JbbvZF76tECgGxQju+aGFpjNRdPsGjQbLQJuJYhiPonSPi5bUcfzBIGsLy/ly1Q627g3dPPxqxkiyk2Mpq7MwZVg2MYqLUUMyj1vykJ6bwuJ/r6B0ewUGk54rHriQqPgINi/eQWr/JIafMLRHzeK8aONu3txSiDDrUOJ9YFLBo0EVevYOTeC1N7/lzjvO7eowpV7IZXez9puNlBVUHjZ5AKh2uLBq3fgdCq5ID4PNVuINXtxODfq8WegJfd+8Li96o560Acf+hLmz+f1+hBCEhx+5AIx0dPaPQfH7/Z2fQPzzn//kiSeewGg0EhcXd0DTUlubmlavXs3tt9+O1Wo96Prw8PA2JxDSTwZHpTMreTgfla1ku62CrZYyciNTSQ+L6+rQ+jStVsvIGXktPyuKwl/Of5La0noWvrmYrCHpRCccfryKCJSDfzNoosD5D8AHukFgPCW0PlgHSgSKYRKKpu8NSuzthBBsLqriy1XbWVVQBsDsSUPJ75dCaa2FrORYxvVPprSDSza67G7WLdzUcpNTsGY3qxasB+DsW08nMSOe8sIqJpw5minnjEen77KG7XbbPxEfqV60+Q4U808PxIRbg7olnA+37OaKykaS0uO7LlCp13HZ3Wz7oZCKwqojJg9Of4AKlx0RE6R+TxBlpI9J+8q3FlfGkhGZ0dJFtrnORmJmPPFpHV/lsrP0xC5LPUVH/tu2+0r/7rvvMmfOHB599NFjqtv97LPPEhsby8MPP8wXX3yBRqPh/PPPZ+nSpXzwwQe8/vrrR73vvixCb+LMtFGsqC+k0t3EqoY9DInOIMUcg1a2QnQbo08ewcQzx/Djf9ay4vM1TD13PJNmjzvkl1uoDoRvDRAA77JQNyUlEsKu3DfuwQHChWKciaJN7tyTkTrFzvI6FqzaweLNoQRh2vB+nJDXj9K6ZtLjo5g6LBst6hH20j4uu5s1/91IRWEVaQOSaahqYsEroW6iU8+bwNCJA7HUWQmLNJE3dUiPSh4ANu6ppN7ciGb8QSpKmVQ0E+wElCjemPc19z50ebcaDCn1XF6nlw3/20JdSSNpA5IPmzwA1DhdWPDiDQZpdHvRRQaZtK98a41hINEi1FVJVVV8bh/ZwzLlZ7Wb6cjJMruLdn/CGhoauPDCC4950p/CwkJ++9vfcsoppzBz5kyqq6s58cQT+fOf/8yFF17IvHnzjmn/fVn/yGROTxsJwC57NZuaSqh0NXVxVNLPGYx6rnzgIsKizDianSx49TvqyxsOum1LydZgNaj14FsBKKHkQRONEH5Qa0A/ItQiIfU6ZXXNfLNmJ9+sLURVBSP6p3LGxCGU1VtIiolgyvB+RIZ1bH/nXyYPPq+f+U99ScAXYMCoHGb8agp+nx9ro4MhEwcRn9rznnjWWexo8kMzef/yd/v+nzXDHSz73yaq9tR0cnRSb+Syudn5YzEVu6pJyz1y8uAJBKiyu1BMQeoqgyjJPsI0fkaEhX5fVNtGEEbofsze5CQyLoKUfu2bSLYvuf3225k48cCxWVu3bmXw4MGMGTPmgAmFt23bxuDBg3nppZcYPHgwn3766WGPMWvWLO6///6Wn+fPn8/jjz/e8vOnn37K4MGDqaioOMaz6VrtTiCGDRvG7t27j/nAqqq2lFDNzs5utc/TTjutXSVcpdZMWgMnp+TTPyL077u6cQ87rZUE1J5bEq036j8ym9OumQnAxkVbWP/d1oOXrQvsgkBBaNyD68PQMuPJKPohobFDwXLQ9kcxjO11TzgkqLc6+N/6Qj5dsQ1fIEj/1Dh+deIIqhtsxEWGMTWvH7FtHITfVvuTh/KdlaQNSEaj0/DZ37/GUmclNjma8247E41GQ21JA5mD08gdndOhx+8sFqMNxayiKKBBMCaijlNiyhgTUYcGgaKAEqbiiHDy0VNfEPD3zJr0UvcQDAbZ8N0W6ksaSR2Q0qYWu1qnG7vfj1vno7E2iJLkY2xEPXqtwO4zozgGo+wre2ZtsJE5OI2wyJ4zcVlnmzx5MhaLheLi4lbLly1bRkxMDE6nk40bN7Zat27dOgDOPfdcPvzwQ2bMmNGuY86bNw+LxXIsYXdL7U4g7r//fv75z3/y6aefUlRURFVV1QF/2iIrK6tlYHO/fv1wu90tb2ggEMDpdB7u5dIRZIcncnb6GDQolDrrWdtURLnrwCpAUtfRaDRc/MezScyIx+8N8NWr31K5u/VTThFsRPjWAUZwfwh4QNsfTGeGNlBrQBMTmu9BMXb6OUjHl93t5fuNe/jg+004PT5SYiO5+pRx1FmcRIQZmZqXQ0J0xw44/HnykD4w1Dd78QcrKN5Sit6o46K7z8YcYcLaYMMYZmD41MGHLDvZ3cUmhL4zM6Ir+WzY17ycu5SHc9bwcu5SPhv2NTOiKwHQ4GXxv1ewe33Hji+R+pbmGgt1pQ3EpUej0x95AKsvGKTS7kJnUqms96AiUBJ8LbNPl9kziRSh8W4+jw+dXkfGoJ43eLozTZ48GYANGza0Wr58+XJOP/100tLSWLZsWat1a9euZdCgQaSnpzNq1Cji4uSYUjiKBOLSSy+lurqa+++/n9mzZ3PSSScd8Kct5syZw1NPPcW7775LXFwcw4cP5+GHH+b777/npZdeIjc3t90nI/1Ep9EyLWkYQ6ND8wCsbtjDDksFflU+QetOEtLiuejuUPngXeuKWPbxKnz7JgUTwofwrQZhA9/KUIUlJSxUslXRIlQbiACKYSKKRl7QehuvP8CKrcX863/raHa4iQ438evTx2N1ejDqdUwZlkNybGSHHtNpc7VOHvQ6dqzcxY//WQvAnJtPIzkrkYAvgLXexpAJuST04MHFSaZoZkRXMjdnJYn61hPiJerdzM1ZyYzoSiL8RjwOD+8+/Alup6eLopV6uvqKJvzeAHpj2xLuOpcHm9+PMKrUVgdQEnwoOpgcUQ1AuWUQkSKUBFvqbCSkxxGfLn8XHE52djbp6emtEgi73c7mzZuZMmUKkydPZvny5a1es379eqZOnUpFRcUBXZh27tzJtddey+jRo5k5cyZffPFFq9eefPLJVFZW8tlnnx3QbWnz5s1ccskl5OfnM2PGDN54443jdNbHR7tHvD388MMd0k3i+uuvp7m5mc2bN3PFFVfwl7/8hd/85jfccsstREREyDEQHSA9LJZzMyZQaKuixmNhVeMuhkSnMSAypatDk37m1Ktn8O2/lrB7w14WvrWYcaePZPC4XIRvCwT2grCDb0lo47DLUTSxCOGDYB0YJoZaJKReJaiqrN1Zzuv/XUNNsx2TQcd1p0/AFwiiKAqTh+WQnhDdocd02lys/WYT5YU/JQ91ZQ188fJCACafPY68KYMBqC2tJ31gKgPHDujQGDrbqNhMMjM2IwDNL36taRRQBdyVsYUvpt3BRyu+ZN23m1m3cBPTzp/UJfFKPZeqqlQUVmKOMOHBdcTtA6pKhc2JSaNhh8VOwA/aODcZBgfpZheqUKhrGkl/tAhV4HZ6GTU8s8eUT+5KkyZNapVArFy5EiEEkydPJhgM8umnn9LQ0EBCQgJ79uyhubmZqVOnHrCf2tparrjiCnJycnjyySdxOBw89dRTNDb+1Nvj+eef5+abb2bYsGHccsstJCX9ND7lwQcf5LbbbuP222/no48+4sknn2TAgAHMnDnz+P4DdJB2JxDnn39+hxxYo9Fwzz33tPycn5/Pd999R3FxMf379yciIqJDjtOXaRQNkxIHMiq2H+uailjbWMzYuAFkhMVj1PbMLge9UXhUGFc/dAkPnPM41cW1LHxzMZm5QUy6TaDowfXv0IbGGSj6fIRQIVAOuoEohpFy3EMvI4Rga3E1r329ir01TWg1Gq4+dRwGnRaPP8DUvByykzt2wPL+loeKXftKSup1eJwe5j/1BX6vn375Wcy69AQgNMutwWQgb+oQDG18ktpdafwbSNAf+mZOo0Ci3smFt6SwfH4yVUW1fDD3M0aemEdUfMe2/ki9W3OtleY6KxFxEXiajpxA1Ls8WH0+Is1aysrdgEBJ9DIpKtTNtd6dgNkfehhotziIiAkjVQ6ebpPJkyfzySef0NTURFxcHMuWLWPEiBFERUUxZcoUFEVh+fLlnHvuuaxduxaDwcD48eNpaGhd6OStt94iGAzy2muvtXRr6tevHxdffHHLNsOGDcNgMBAXF8eoUaNavf7OO+/k0ksvBWDUqFF8++23rFq1qsckEG3qwvT555/T3Nzc8vcj/WkPq9XKokWL+OCDD/D5fERERMhJRDpQsimaC7MmYNToafI5WFG/k1JnfVeHJf3CmJPzmXDmaAB+mL+Cml1fgwiC+1MQLtBmgWnfTOnBKtAm7hv3cGzV0KTup7i6kX98s4ZtJaEbhUtmjCQ+MgyX18eEwZkMSOvYGVqdNhdrvt7QKnkQquCz5/9LU42F6MQozr/9LDRaDQF/gOZaK4PGDyAps2fPFAuEqpq1gT7CwUV/CH3/dq8v5rt3lx7PqKReqL68AZ/bj9F85Gt2UFWpsDvRazSUue14vALF4IM4Tcv8D6XWHCLVUPcla72djIGphHfweKjeav84iP2DpZcvX84JJ4QekMTExJCXl8ePP/4IhAZQjxkzBpPpwCp369evP2BMxMiRI0lLSztg24MZN25cy9/NZjMJCQnYbAcpKd1NtakF4t577+Wjjz4iNjaWe++997DbKorCueee26aDz5s3j1dffRWPx4OiKIwYMYLnnnuO5uZm/vnPfxIVdfiJtaS2GR3Xj4nxuSytL2BD0162NJeRFZ6ASStvPrsLvUHPtQ9dwqbF27A1OPnvm1u57i86DBQDRgi7GkXRIdRmQLNvsriO7cIidb2qRhtvLlzLiu0lAMyZNIz+qXE02d1MHJLF4MyOfcLYkjzsrm5JHgB+mP8jezbuRafXctEfziYsKlTVpa60gbTcFAaP69ldl1poEtu02Scrqjn/4qv539s/ULByF588u4Cp500gOattr5f6NlVVqdhVjSmibYUuGtxemj0+4kwGlhRbANBrmtAqCuMiQklvedNwEjHg9/pRNAoZg9t20ypBQkICgwYNYsOGDeTk5FBVVcW0adNa1k+dOrXlYfj69eu57LLLDrofq9VKRkbGAcsTE9t2XTCbW1fL0mg0ocqKPUSbWiAWLVrEkCFDWv5+uD/fffddmw787rvv8sILL3Dttdfy0UcftfyjXXHFFZSXl/P3v//9KE9J+qVYQwQXZU8iXGfEHvCwtK6AYnttV4cl/UK/EdnM+c0IAKqLq9GLJaEVYZeiaBMRwgPBJjCMRdFldV2g0nHR7HDzwfcb+GZtqDrdtPx+jBmYTqPNzZiBGeTlpHRod7WfWh6qyRiY2pI87Fyzh2WfrAbgrBtPaekWYW9yoDNoGT51CAZTL3n4YBgHmhTg4P+uqoAaTxjzvvFSbnNwxf9dgM4QGhvy2d+/PnjZZUn6BWu9jeZaS5u6valCUOlwolWgwePB5lEhqKLtDyPCGzFpg3gCRtzWYWhQsNTbSEiL6x0tgp1o0qRJbN68mZUrVxITE0N+fn7LuhNOOIGamhpWrVpFdXX1Qcc/AMTGxh7QrQnolSVbD6ZNCUR6enrLxHHp6emH/dPWppt33nmHG264gdtvv528vLyW5SeeeCK///3v+f7774/idHoeIYII72qEe0Ho/+L4/ELKi8lkeuJQADZZStjUXIoz4D0ux5KOktrAJXdFM2SMwh1PlaIo4A2OQzGMCX0uAhWgH4qizzvyvqQexeXx8dmyLXy0dAuqEIzsn8rMUbnUNTsYMSCNEf1Tj0/ysLuajEGpaHWhgZcNlU3856VvAJhw5mhGTB8GQDAQpKnGwsCxA0jO7j1P3RVFixL1p/0/tVon9i15rnok/kwPj3+4hPzpw5h41lgAvnnze0q2lXVqvFLPVF/RiNflwxR25BaIJreXJreXKIOBDeWh7kqGsnoCA3Qt3ZcqHOlEigiEELjtbnKGZ7Z8h6W2mTJlCtu3b2f16tVMnjy51czdo0aNIjw8nPfff5/Y2FiGDRt20H1MmjSJjRs3Ulv70wPZPXv2UF5e3mq73joreLsHUQN8/fXXrFmzBp/P19JyIITA5XKxadMmli49cv/QqqoqJkyYcNB1/fv3P2hW19sIz0KE7dFQLf/9NCkQ9ScU02kdeqwInYmLsiexsmE3Fr+TH+p2MDwmk1xj77kZ6MmE8CJ8a4iI9PPAm3XEJgbYW2CirGwo0y9UUUQF6NJRDBNQlKP62krdlD8Q5Jt1hfxz4Vr8+yaKO2fKcOqaHeTnpDAmNx1tB/4CclqdoQHTu0MtD/tvPLwuLx89+R98bh/ZwzI4+YrpLa+pLa0ntX8yQyb0vvLaiuk0iHn+gGuxAuz1ZbDEmo5mqJOt31WycNMeLvvT+Wz8fitOi4v3H/uMe97+XY8fTC4dP0IIKndXYww/cqud2Nf6AOAKBKjzBUBRiM4RNGtgalToRrXUkkuUMOK0ugiLCpMzTx+F8ePH4/P5WLx4MQ8++GCrdXq9ngkTJvD9999z6qmnHvLhzdVXX83HH3/Mddddx+9+9zuCwSDPPvssen3r60FUVBQ7duxgzZo1jBgx4nidUqdr92+lF198kTvvvJOvvvqK//73vyxatIgffviBzz//nO+++67No8dTU1MPmO1vv23btpGa2rsnQxGehQjLba2TBwC1FmG5DeFZ2OHHHBiZyimpoWa6bZZy1jYU8WPjLtZ6ytloKSEo1A4/pnRkQgiEb3OoZGtgN/GJVXg9Gh69MZuPniumqboIFOO++R5kdbLeRFUFy7eX8OJ/luPy+kmJjeTSGaOotTgYkpnEuMGZ6LTHP3kQquDzF7+hsaqZqPgILrhjdss6h8WJRqslb+pgjObeOVmhYjoNJXExSuw7EPUkGOcAkGOsZFK0E0Uv0Axz8tIXK0gamMJpV88A4Mf/rGXbsoIujFzq7qwNttD3Ku7I3ZcsXh8NLi9RBj1rd5aBoqAvb0YzPYoEnZv+ZitCQE3TKPRosdTZSM9NadO+pdYiIiLIz8/H7/e3DKD+uWnTpuH3+5kyZcoh9xEbG8sHH3xARkYG9957L4899hiXX355S5f//X7961/T0NDAddddx7Zt2zr8XLpKux9lfvbZZ5x77rnMnTuX559/nqqqKh5//HG2bdvGDTfcwMCBA9u0nwsvvJAXXngBk8nUMi24y+Vi4cKFvPrqq1x77bXtDa3HECIYetrFwQbLhBrOhe0xMJ6MonRcs6RRq+f8zAksrSug1mPlH8XfE9ifNGxZQ1JhFHcNnc3MlOEddkypDYKl4N8SqrbkDXUfqas/hcq9tahBBys+28kZN16PUSsHyfU2W4qreOqjxVgcHqLDTVx1ylga7S4GpicycWgW+g7sluC0Oln99UYq97ROHgCWfbqaXeuK0Oq1XHTX2YRHh2a3DQaCNFY1M+LEoaT2S+6wWLojRdGCcSIKIIwTEc27UAKFPJi1njO2TkOT7aGx1MorC1ZxzR1nsfyz1dSXN/LOQ/MZPCGX8Kiwrj4FqRtqqGjC4/SQlHX4MQpCCCrtLoJC4PcFqRRBQENyuKAhysNp+1ofmryxGDyZBHyhSWHl4Omj9+9///uQ6y6//HIuv/zyVssyMjIoLCxstSwzM5NXXnml1bJrrrmGYDCIxxOadHL27NnMnj27Zf24ceMOOiVCT+u63+5HW7W1tcyZMwdFURg6dGhLK8Lw4cO56aabmD9/fpv285vf/IbzzjuPp556quUf9qqrruL3v/89M2bM4MYbb2xvaD2Hb92BLQ+tCFCrQ9t1sOzwRIZHZwL8lDzsU+e1cc+m91lc03sy5O5OqHaEby0IN7jnAwL048kYdgbTzg3V+P73323s2dY7n/z2ZcXVjTz2wSKqm+yYDTquOXUcNpeHfinxTB6ajVHfcV3VDpc87N5QzA/zQyULz7z+JNJyf5posq6sgZScRAaPb9uDod5C0aZA+C2AnhhtHbdlVAGgHeHg0x+3YNMqnP/7swDYvmInyz5Z1YXRSt2VEIKKXVUY2tByZ/P5qXe5iTLoWb1pD0KnQdfkJOGUZAK6IJMjQwlE2b7yrZYGG7Ep0UdMTCTpeGn3b6iwsLCW/mDZ2dlUVFTg8XgwmUwMHTq01TTdh6MoCg899BDXXnstq1evxmKxEBkZyfjx4xk0aFB7w+pZ2lh7vM3btYOiKGxqLjnsNs/s/IrpycPQKr1z4E93IUQwlDwEasC3CIQVNEkQdjEKCr/+Sxpr/1dHY5WHz/7+DVlDs4iMlV2YeoO6ZjuPvb+IPVWNaDUarjh5DL6ASlZiDJOHZWPuwD71+5OHqj01ByQPjdXNfPb8f0HAuFNHMmrm8J+9zoWiKORNHdKmwZ+9jWKcjjCeBN5vuDhhA+/XJlEfA8EMF098uJinrj6dRe8sZc+mEj58/HMmnDmGuOSYrg5b6kbsTQ4aq0PdAo+k2uHCp6oYHF7K992ZJcZBY4QbDYJJ+xOI5qHEYqDRWs/QiYPQG+T4G6lrtPsOMT8/v6U+br9+/dBqtaxcuRKAoqKilmpNbdWvXz8mTZrE5MmTmT59eu9PHqDNtcfbvF07bGoqodHnOOw2tR4rm5pKOvzYUmvCXwCBnaAWQ2AHoIPwa1EUI6h1pOSkc+rVof6XqxasZ6vsa90r2F0envr4BzbsqUQBfjVjJDqtjpS4SKbk9SOiA8cZOCxOVn21gao9NaQPTGmVPPg8Pj568gu8Li+Zg9M49ZoZLevUoEpDZRMDx/YntX/v7rp0KIomHCJuAU08Wrz8rX/o+6cZ6mRDWTk/FlVy2f9diEaroWJXNQte+V+PquEuHX/1FY247B7MEQdOQvZzDp+fGqebSL2eVasLEWY9WrePzPFJWE0uhoQ1E6Hz4Q/qsDePwG1zExZpJrW/HDwtdZ12JxA33XQTX3/9NTfddBMGg4Gzzz6be+65h9/97nc8/vjjBx2Mcij//ve/mT59OmeccQaXXHIJp556KieddBJff/11e8PqWY5QexwIrTeMO/T6o9TgtXfodtLREcFa8G8A1Q6e0LgHzOehaNMRqh2EB8U4kcv+7yriUmLwun188uwCGqqaujZw6Zh4fAFe/uJHvtuwG4DZk4YSGxFGYnQ4J+TlEBV++BuN9nBYnKz+egPVRbUHJA9CCL54eSENFY1ExIZzwZ2zW62vK28gKSuBIRNyO7R8bE+j6AaC+QoAhpl3MT3GimIIDah+/vPl5M0YxthTQlVVvnx5IZV7Dtc1VeprKvfUYDDpj/gdqnG68ASC+OtsVIaHvoeJCQoBVcVl9LaUb61yphKpRmGps5LSL4mYRDmZqNR12p1AjB8/no8//pgzzjgDgAceeIDTTjuN4uJiTj/9dP7v//6vTfv54IMPePDBB8nPz+dvf/sbr7/+OnPnzmXgwIHcddddbZ6Qric6XO3xFuZLOIq354gSjG2r1tDW7aT2E8KD8K0BtRE8nwNB0I8CwwkI4YdgLehHgG4gsUkxXPzHcwHYtmwnP/5nLaoqq2X1REFV5b1FG5i/dAsQmiguJyWOmAgTU4fnEBvZcYNwD5c8QKh6UMGq3Wi0Gi66a06rrnEumxtVFeRNHYI5wvzLXfcpiqJFCbsIdMNRgD9nbUCDQJPtoU5p5q3vN3DlXy7CFG7EUm/jg7mfEQzIyeUksDc7aKhoPGL3JZc/QJXDTYROy+plO1BjzGgCQZLzw2k2ukCBE/YNoC6z5BLm1yOCgqwh6Z1xGpJ0SO2+Q3355ZfR6/Wcc845ABiNRh5++GG++uor5s6dS3R02zLit956i8suu4yXXnqJc845hxNOOIFzzz2XV155hYsuuoiXXnqpvaH1KIrpNJSY50FziO4BwRII7u3w446KyyHJGHXYbZJN0YyKy+nwY0v7S7ZugkAJeFeEkghN/L6EEQiWga4/imFMy1Or2TeeTL/8LFRV5YuXvqG6WM4ifiRBobLRUtJtShQLIViwcgevfbUyNFHcgDTG5KYRbjQwNa8fidEdN7bFbfewfuGWQyYPRZtLWPzBCgBO//UsMgb9VMVFVVXqKxoZOLof6T8bTN2XKdqkfQOqDURqGrkzMzR5nHaEgw+XbkKfHsvMS0Mz1f7w0QoK1+7pwmil7qKhohHXvq5Gh1PjdOH2B3DsqqUuIbRtcoIOrarFFeYhUutjSFgjANVNI3E3OIlOiupVEzpKPVO7E4hXX321zQOlD6empoaTTjrpoOv2t2j0dj+vPa5EPwNRc0E3OrTS81+E5xuE2rFdVrSKhruGzj7sNpfnnCAHUHewlhnHXf8E94JQAhHYAmgg7BoUTVio8pYmDsU4KTQOYh+j2ci1j16GolEo3VHBwrcW4/f5u+xcurvFNds4Z8kT3LHlXf5pX8MdW97lnCVPdGl1sZU7Snly/hL8QZUBqfHMGNEfg07PlLwcUjq4hnvxhjKqiw+ePDTXWvj0ua8QQjB61nDGnJzfan19eSOJmfEMnTSoT3dd+iXFOAX2Te55fvxmUgxelJgAgXQnT3+ylF/98Vxik6Pxuny8/df5eN3eLo5Y6mqVRbXojbrDfo88gSCVdhcmjYaNS7cTSIlCEYLEgUaCfoHN7GZcRB0aBWy+CHD1x9HsIntYJgZT+8abSlJHa/ddYm5uLnv3HvuT8fz8fJYtW3bQdRs3bmTw4MHHfIyeQFG0KMaJKObZKObzIeJ3+8ZHeMH1PsKzHCF8HXrMmSnDeXzUZQdtiYjRh6FRFJqPMNBaajvhWYion4lovhLsj4PnI/B+GVppOhtFl41QrSDUfZPFxR2wjwlnjGLcqSMB+OafiyndfuxJfG+0uGYb92x6nzqvrdXyrixRvLO8lgf+9U1oori4SM6aOBStRsOkYdlkJMZ06LFsDXaaKq3Ep8cdkDz4PH4+euoLPE4v6QNTOP26Wa1ubtwON0F/kLwpg4/41LSvUTRhEH4DaJLQ4OPx/tuB0IDq1cUlFFjtnPPbULfejd9tZe1/Dz5JqtQ3OCxO6svqiYw7fMtirdOF0x+gcXURzVkxAKTHmlD0KgFjEL8m2DL7dJktG70DTOEG2ToodQvtLuM6c+ZMnnnmGZYtW8bgwYMJC2vdb1dRFG699dYj7ufmm2/mzjvvxOl0cs4555CcnExzczOLFi3izTff5P7772ft2rUt248fP769ofY4iqKAcQIi/Caw/w3UOnC9hdAmg2FChz4RnJkynOnJw1hVvZM1xduxhqt8V7cNi9/Ft9XbiDNGMj1pKAZNx9Wi74taZhw/6KSBgCYOIbwQrAfDJBRd/4NuptVque5vV7D5hx0011j45LkF3PbS9X2+j/rPBYXK0wULDrtNZ5QoDqoqG/dU0mB1olEUXvh8OU12NzHhJi44IR9FUZg4NJt+KQcmiseqtqwBn9N7QNUXIQQLXv0fdaUNhEeHceGdc9D9bJ4JVVWpK2tkyITcVl2apJ8ouoGIsKvB8SSDTcWcFJPDIkscmqFOnv1sKf+4+Vy+f38ZZQWVvPPwx4yalU9ETHhXhy11gYbKJpw2N3EpsYfcxh9UqXS40PmC7FheiO+cPABSsww4Ax68kV5AMGXfAOry5iH4a9ykD0glJkkOnpa6XrvvDl988UUAVqxYwYoVKw5Y39YE4rrrrgNg/vz5fPzxxy3L95fB++tf/9rys6IoFBT0jRKWimIA85mIYDG43obANnB9ANp40OV26LG0iobRMTmYzG7MGQmoGoVvqjex2VJCWm0McYYIxsT169Bj9iWHn3F8H/enwJWgH4xiGHHY/Q0Ykc2pV89gwSv/Y/mnqzn5immMPWVUR4bco21qKjmg5eGX9pcoHht/8ETtWC3auJsnP1pCnaV1C55ep+HiE0ei0WgYPziTgekdP/lTMBikvKACY7jhgIcNq7/awPYVhWi0Gi64YzZR8a27TTVUNBGfFsuwKYNl16VDUBQNmM9DeL8D/0buz1rPEstJiGwP1aVNzF9TwGX3n8/jV71I8eZSFr61mAt+f/juolLvVFVUg06nRdEc+rtU7/Zg9/mp/34H9oEJoCikRZgR4UH0Ph11pmb6mWzEGdwEVQ1NDaPQ+INkDU2X31GpW2h3ArFz584OOfDbb7/dIfvZz2Kx8Mwzz7BkyRIcDgeDBw/mrrvuYty4g5dCLSsr47HHHmPdunWYTCZmzZrF3XffTWRk11cfUjQxEHYlwr8H/D+C91uELgfCrzpo95aOkB2WwJz0MZS56tlhreSHuh2kmWNIMEaSFS5nujwqR5xxHBAWwLVv3MOR+7Re9eDFLP90FZY6Gx8++QUDxww44Gawr+rqEsWLNu7mj68tOGi66A+olNVZuWTGSIZmHZ/a7Y2VTTTVWgmPbd0qvHdbGd+9uxSAU68+kexhGa3Wux0e/L4AeVOHEB7VcZWgeiNFm4AIuwWsvyNcY+WPWcXMLctFO8LB+0s28q87LmbEiUPZvGQHHz/zJdMvmkxienxXhy11IqfNRW1pPZGHqb4UEIJ6h4tgo4PiNXvwXjoGgP6p4TQKK1F6AxadmzP2TR5X60oiWKcnNiGC5Bw590N7BINBti3bSWN1M/GpsQyfNgStVnvkF3YAq9XK3/72N3744YeD3pdee+21/Pjjj61eM2HCBN55552Wn//xj3/w/vvvU19fT25uLn/84x+ZNGlSp8R/JO1ux3/xxReprT14FZiKigoeeuihNu1nwoQJ7fpzJHfeeScbN27kmWee4ZNPPmHo0KFcd911Bx2M7ff7+c1vfoNOp+PDDz/kueeeY/Xq1W0uQdsZFF32vvEQmYAfXB8gPD+Eurscj+MpCvmx2VyYOZl4QwSuoI+FNVtY11iMze8+Lsfs7USwvG0bapNRNIevjLVfbFI0v9pX1nXz4u2sXLBeTl61T1eWKA6qKk9+tORwbU2sLixlaHbycXt6WFVUi+oPojP89FzI2mDj02e/QqiCEdOHMe60Ua1eI1RBXXkD/UdmkzlYdl1qC8U0EcyhloU5cdvIMLpRYgP4Uh28sGAlV/7lYvRGPQ0VTf/P3nmHR1GtDfw3s32z6b13QkLoCU2kKE2xYe/lWgB7wYLt8qmICnq99l65NqyACALSpPdOCIT0tunJ9t2Z74+FYAwlgTRwf8+TR5k5c857dndmznvexpyZcz1pl/9hVBRVYa41Y/A9vvtarcNFrd1BwYIdWFLDQKkgSKdB4yMhSzJOgztJxvAj8Q+1icgVDqK7R/4jq8KfKqt+XM+N8fcw5bxpzLjhv0w5bxo3xt/Dqh/Xd8j4U6dOZdu2bcddl2ZlZTFt2jT+/PPPxr8333yz8fp33nmHt956i0ceeYS5c+fSp08fJk+eTEFBC9cW7UyrFYi33377uArE9u3bmTNnTov7+umnn1ixYgXgtmxcfPHF9OvXjyeffBK7veWBw3l5eaxevZpp06aRkZFBfHw8zzzzDCEhIcybN69Z+wMHDpCbm8t9991HYmIiGRkZ3HDDDccN6u4sBHVvMNwPghfI1WD6GNnWfgtGrULFoOBkLo3KRCUoKDRXsbR0J1urcnBKntzmrUGWTOAqalFbQZnaqr4vvnsssWlRSC6JH/8zH2Nh5amIeNbR2z8Wrag6YZv2SlG89UBRM7elv1Ndb2F7TnGbjw1gNdso2FeE4S/1HBx2B3NmzcNcbyEsPoQL7zq/mfJSUVxFQKgf6UNSEEVP5rWWIAha0P8LxAhEnLyUsBMAMc3E6v05VHqrOPdK9w7hos+WkbsrvzPF9dDBlOaUISrF47ovuWQZo82B6ZCRsn3FWNPdAdFpwX5UKSx4CWqqNSY0gpMeXkYA8kvS8NJoiUwO77B5nOms+nE9z101i4q/vR8riip57qpZ7a5E5OXlsW7dOp599tljrksrKyuprKykd+/eBAcHN/75+fkBYDab+fDDD5kyZQoXXnghcXFxPPXUU0RHR7N58+Z2lb2ltOiNce2115KamkpqaiqyLHPNNdc0/vuvf1OmTCE1tWWLoU8++YQnn3ySPXv2ADBt2jSqq6u56qqrWLJkCW+88UaLJ+Hv788HH3xAz55HUxIKgoAgCNTVNfeJ9vf3RxRFvvvuO+x2O1VVVSxcuJDevXu3eMyOQBCUCNrzQX8zIIBrP1hmg7P98owHabw5PzydoSHdAdhUdZBV5fvYV9uyxbCHw4XibKtBAjjJbtEpVBzXaNXcPuMGBEEgZ0ceiz9fjsvlUfA+PLAUq3Ti9LYPdx/fLgHUFbWmNm3XWsrzK6ipqKO6rJbcbYXk7Snk1/eXUJJTht5bx1VTLkGlbqpcWc02bGY76UO743WC3VIPzRGUCeD1L0AgSZPPBQHljRWqX//5T658/FK8AwyY6yx8+dz3OB3OzhbZQwdgabBQeqj8uNmXJEli9/ZDFG0vIvenzdi6BSNrVRhUSgL9lJhxEKTSUqKop5/BiEqUMDt0GA+FExIdRECYX8dOqAshyzIWk7VFfw11Zt5+4JNjhx8ePvbOA5/QUGduUX+nsmnr7+/PG2+8QXp6euOxv65Ls7KyEASB+Phjx5lu3rwZi8XC+PHjG48pFArmzp3LZZdd1mp52oMWxUC88MILLFy4EFmWefvtt7niiisIC2uaRkwURXx8fBgzZkyLBp4zZw533HEHkydPprCwkG3btvHss89y/fXXk5CQwHvvvceUKVNa1JePjw/Dhw9vcmzRokXk5eXx5JNPNmsfFhbG008/zaxZs/jqq6+QJIlu3bp1yeJ1gmgA/XXIzv1gWwq2ZciKWBBvR1C0T2xCoiGUCVGZFJorya4vZVnZbiJ0/gRovInQHz+rhAeQZbtbeXBmgWsXcCKXMwHB5ykEofX+mAPH96Pf6F5s/n07899fzNArBhKbGn3Kcp/pfJ+3jk9zlgPQzTsco62OavvRxXqo1peHu49nZFj6cXo4PYJauABvabvW8vvny5n/7u+Y6syHj2xqPHf5Q+PxC27qIidLMmV5RpL7xhPtqWjbatwB1RchW38HxwYejdrK0upR2GKtFOZV8EduEeMnjuabGT+xdt4mdqzYTb9RXWuDykPbYyysoqHWTGRy8zSre9dns+jTZdRXuS2VsgCW0d0A6BHkT61gQykKSDoXDsHFOT7lABTUR6NpUBA7MvofayWUZZkHz32GPWuy2qhDt6vZBL9bWtS8xzkp/Gfl861yP/Xx8WHo0KGo1UdjG/+6Lt2/fz/e3t4899xzrF69Gr1ez7hx47j77rtRq9UcOnQIX19fsrKyeP3118nNzSUpKYmHHnqIfv36tXrK7UGLFIikpCTuvfdewK1BXXXVVYSGHqeCcgspLCxk2LBhAKxYsQJBEDjvvPMASEhIoLLy1N0ytmzZwtSpUxkzZgwjRoxodt5ut5OVlcWYMWO44YYbqK6u5pVXXuHBBx/kk08+OeUAG1mWMZvNJ2/YanwQFHeiEg4hyjnI5u+wS5FIqgtB0J788hNgsVia/PcIydoQLgnrx4eWZdQ5LfxWtA0fQcvw4FS8lGe2D+bx5nzayA5E53oE1y5E1xYUrg0AuBQZiK5sBGqPNhWCcWifQJLOhVP8zdz8/JXsWLGbiqIqvps1l7tm3YhKc2wXnnabcxdguXEPM/e6XRXTfaK4LmowGf4JbKs4RFZxLikRcWSEJKEQxHa6PyElwp9gXy+MJ7AwhPh5kRLh3+YyLJ39J9++/PNxzzfUmLDZmrqEVhZX4+WnI6FvLFartU3lORWOZNtrq77a63tuihZBeQdqxy70Yj2Px2TzfF53FL0a+HzJZt6+dRzLvv6Tslwjnzz9NXG9Y9B6HfvZeTbfn8fjbJxz3r4CnC4njr9ZnPZvPMjPb/zW5Jg9LgDJV4dgdaDLq8QYqsJHUFGqcHtNnHM4fev+4jj8dHp8Qgwd9Ltue1r6XdtsNiRJwuVyNbGqd3acnyy7g7Fb84w6IrMsy7hcLrZu3crUqVMZPXo05557LosWLcJms5Gens4tt9zC3r17mTlzJkVFRbz00kvU19djtVp55plnePjhh4mIiOC7777jlltu4YcffiAxMfGU5uJyuZAkCYvFcsz4rNY8i1udhemIInG6BAQEUFFRAbgViISEhEarRlZWFkFBp7a7vmTJEqZMmUK/fv2YNWvWMdt89tlnrF+/ngULFjQqC3FxcYwZM4Zly5YxatSoUxrb4XC0Y7pZCR/NpcT6f4BSUY9U/yEFNQ4a7D2B03/x5ubmNjsW6JIZqIxiqfMAeZYKfs3dSENZFT3UYYhnQRq5Y8351HFhUO/GS7MfH81ODJr9yDJUms6n3tYTGIJBvQ+Fop4GazqVlhG4PQhP4/ciQt8L09nw0zZWfLuGhCGRxPU5sRWibefc+WTZy3mrdjUyMpEKX/pJoejLbOQYD+ADZGqjocrF/qo22rk6Ab2i/Fh6AgXiiv6x7M9qWzkkl8QnT/7vhG1+/3w5ujA14mGfbIfVQU1ZPWnDkikozYeTJArrKP66U3c6tO9zuCkCGiJ9hxBsWMIFfnv4vCSKfH8DtrB63ly4lsHX9+PnFxeRteEg3775EwMuPbEV4my7P1vC2TJnu9XB9nU7EESBoqKjCoQkyfz++bImbWXA2ttt+dPuLmXv/nL0fXrhL2kp8KsmTGUiXFuPJMPW9cGkBrrILTj9Ar6dTUu+a6VSic3W3HI/feET2Mwti43dvTqL56947aTtnvnhYXqcc/KixRq9+pgytQSbzcby5ct58skn6dOnD8899xxWq5UnnniC+++/Hx8ft3U4OjoaWZaZOnUq9913HwBWq5UpU6ZwzjnnAPDEE0+wZcsWvvjiC6ZOnXrK8jidzmMmGDpCS5/FnVYlbOTIkbz66qusXbuWlStX8tBDDwHw6aef8vbbb3P55Ze3us/Zs2czffp0xo0bx8svv3zcD2Hz5s2kpaU1sTTExsbi7+9/Wg8zlUpFUlLb1mpogpyIbHci295BqyohIWwFTm1/ZMWpj2mxWMjNzSUuLg6drnlRssD6cBryZNZWHWCf00iSVxR9wvzp5n3mBnOdbM6tRpYQnFsQnVUoXHsRpf3IiLjU1+Kj74uPLCHIRUAfJOVA9GICIW2kgN3zWiR7V06lvrKBjd/vYvj4c9H7NJ9Tm8+5C5BdX8IHO+bjRCJKG8ClEf0ZGZzWmGWpI+fskiT2/OiuPqxSiDhcR3d2Qvy8eODSIQzv1fa1J7Yv30ND1Yl3Jc21FuQGgcjUKGRZpmh/CRkjksgY1weFomu4RBw40HZxXe3+HP4bgtMXybIPkUJeSdzOtXuHIKaa2P6HkauvGk23xQfYv/Ega/63mcvuGI9vUPMsYGfj/XkyzrY5F2WXYtD6EJ4QiviX+yp/byHmWreVTxbAGeaDI8IHZ6g3OCW0u0txWhxoSuwE9g2jQV3K+YfTtxrNgQSpExl0XibBUWduOuCWftc2m43i4mI0Gg1abXPvipb+TgaNzyAoKoCKoqpjx0EIEBwZyKDxGe32DJRlGZvNxvfff89LL73E2LFjmTFjRpN1qcHQNFYmLS0NcJcliIx0K5g9evRo8lkkJSVRWlp6zM+npSiVSmJiYtBomltEW/Ms7jQFYurUqUyfPp2NGzdy7bXX8q9//QuAb775huHDh/Pggw+2qr+vvvqK559/nptuuomnnnrqhCaY0NBQtmzZ0sRUU1ZWRk1NDXFxcac6JQRBaFaZu23RI2uvR67PBes8FK61KKS5CPq7EBSnlxtap9MdU/ZeugSuEhwU22rJMxlZUbGPaEMQEb5BhGjP7GqYx5tza5BlGdm+BaRtIC8DaT+gQNDfhkrdC1l2gLPQnapVcw6Com1TZepj9Vz7+AQ+fOxLti3dxe4/9zP8ysHHbd8Wc+4K5DUYeXz3t5hdNkK0vlwQ2ZehYWnEeDd3reyIOf/4505KqurRqBTcMiaTxPAAXJJMkK8XfZMiUbST73JZjrFF7WwNdjQaNZXF1QSG+dP/vN54ex8/T31H05apbdv/OdwUWU5FVtwJ9f9HnKaESwNL+aUyHKG7iQ+XbmHKv6/m/ya8QumhchZ9tIxbn7v2uH2dLfdnazhb5lxbVodarUanb7qwszW4d81t8QGYh8QjGf6yaJNlHGHeaA5VoTNDuca9GTDycPrWHGM0iZGRRCVGdFjtgvbkZN+1KIqIoohCoTit+SoUCu5+/V88d9Ust4PGX5WIw4+aya/fhlp94qx9p4PL5WLOnDnMmDHjmOvSm266iaioKGbMmNF4bM+ePahUKhISEggKCkIQBHbu3NmoTMiyzMGDBxk8ePApfz4KhQJRFNHpdMdUQlrzLO607SeNRsNzzz3Hb7/9xrRp01Cp3F/k3LlzefXVV1v1QDl06BAvvvgio0ePZuLEiVRUVGA0GjEajdTX12O32zEajY2pYW+44Qby8vJ45plnOHjwINu2beP++++ne/fuzYKxuxqCIhC8JoOyByCD9Xtk6+/Icvv4kSoEkb4BCVwVPQiDUkuNw+SuD1FxEIuz5al2z0ZkWUZ27AD7arDOB+d+QAVedyGoe7m/E1cuKOMQtKPaXHk4wmX3jSO6ewQup4tvX/6Z6vLak190BlNiqeHBzZ9RZW/AV6VnTFgv+gXGk2DonAJLDpeL9+evBaB3QgTpcaGM6teNcZndyegW3W7KA9Di7FsGfy9sFjvmegvpQ1M9xQfbEEEQEHRjQe1W3B+K3IZWdCLEWTlkLWe3bGPQRf0BmPvOIooOlHSmuB7aAZvFRsnBMgz+zZMkGPy9sMUH0DA6Bcnrb14RSpGG0SnY4gMICvChSFGLAok+hy0QB/OS6NYj/qxQHjqacy8fyLNzphD0t0KOwVGBPDtnCudePrBdx8/NzWXmzJmMGjXqmOvSsWPH8ssvv/D1119TUFDAggULeOWVV7j99tsxGAxERERwxRVX8MILL7BixQoOHTrE888/T2FhIddff327yt5SOs0CcYQVK1awZs0aysvLefjhh9m7dy89evRo1LhawqJFi3A4HCxevJjFixc3OTdhwgQmTJjAzTffzBdffMHAgQNJSUnhyy+/5LXXXuOaa65Bp9MxdOhQHn300UZFpisjKBORDQ9D7WMgV4L5Y2RFFGiGubODtDFeSg1DQ7qTbzIyp2A92fUlLCvfTYDGmwFBiYjtMOaZgOzYC7YVYP0ZXIWABgyT3N+PVAcuI6jSD1eZbj8TvVqj5o6XbuTfE14he3MOf/xvFZc/OL7dCpZ1JpXWeh7d8iVFlmp0CjVjw3vT0y+GXn4xnTbfOSu2Y6w1oVOrGJQaS2pM+xWL+ys2iw2VWonBz4uGmuPHXvgEehPdPYKi/aXE94ohtkfUcdt6ODUEMQDZazI4dqAV63k6Jounc3ug6NnA50s38+ojF7F1yU7qqxr43ws/8MjHkz2LwrOIiqIq6mtMhMc338SI6h6BZehh98W/PxcEAWQZ89AEglKDWSXuJt2rEq3Cic2pwlLbk7Bj9OmhZZx7+UCGXJrRKZWoFy1ahNPpZMmSJSxZsqTJuQkTJvDSSy8hCAJffvklL774IsHBwdx6663cddddje2mTZvGW2+9xdNPP01tbS1paWl88sknJCS0vTvsqXBKCsShQ4dYsWIFZrO5WRS3IAjcc889J+3DYrFwzz33sGbNGgwGAyaTiTvuuIOvv/6aPXv2MHv2bJKTk1skz6RJk5g0adIJ22T9LXixT58+fPHFFy3qv6shCAJoBiJ73Q0NL7kLlpk+AkUYqLq3y5gRen/GRfThkMnIxqqDrDHuJ1znT5DWmyTv5inrznZkRzZYF4P1e5DKQNCD12QEZSyyywiyGdQDENR9EYT219MHX5xB3/N7snXJTn78768MuSyT8PjTy5TW1aiy1fPU9q/ZX1+CUlBwQUQfuvmEkxmYiErsnL0Qm8PJxwvd2bb6JkWQFhtKoE/H1FQoz6+gvsrEmFtH8OPrvx633ZhbR1BrrMM7wED6OR3z8vwnIqj7IOuuAPNnnO+XxefaGLIDvDEH1/HD7oOMuXUEP7/5Gyu+W8P4iaPpMfjkwZsezgzK8yuQJRmFsvm9dbCmAZf+BEGpgoCkV3PAVofTW+Jcb3f61ryqMJJi4/DyOfPduzoThUJB7xE9OnzciRMncsstt6DVao/7zL3hhhu44YYbjtuHSqXioYceaowR7mq0euv4l19+4cILL+Sll17ijTfe4K233mr21xJee+01du/ezWeffca6desaU169/PLLhIaG8t///re1ov2jEAQ1gv5S0B0ONndsQDb/D9l17CrhbUF330iujBlEpC4Ah+xiSelONlQcoNJW325jdkVk5yFk60KwfnNYefB2VwxXxCA7CwAXgmYYgjqjQ5QHcCuVk169BaVaSXl+BT/+dwEu59lTXK7K1sCM3b+wpToXAYHxkX2J8wpmQGASBtXppTI+Hb5auoXqegteWjWDUmNJie643cLC/SWICpG0wd3wDmwez+AT6M2Vj1xMYu9YTLUWepyTgm+QzzF68tAWuJ/JN4IiAQGJGQlbAXdxuaW79pNy9QCCIgOwWx188e9vsdtOXPTQw5mB3eagKLvkmO5LACWmlqVeLVO607ee6+NOi5ZTnkD3bscuMubBQ1eg1QrEO++8w5AhQ1i2bBl79+5l3759Tf5amj7vt99+4+GHH2bQoEFNzP0hISFMnjy5y5Tq7soIog+C10RQuf1rsfyMbF2ALLVPrmilqCAjMIEroweiU6ipsNXze+kONlfmYHP9M16GsjMf2TIXzF+CVAmCPxgeADEUXIdA9EbQnIegSu1wl5qEnrGMvsldW2Xx58vJ2ZHboeO3F5W2et7av5AV5e6q9ePCexOhC6BfQAJhOr9Ok8tic/D5YnfBtv7JkfSIC8PXq2OUmfrqBkpyyvAN8ubQznzqKxtQaZRc8fB4zrk2g2ufvIz73r6d7gOSKM01EpsWRXx6TIfI9o9GEQ36OwAFUepyrg4qRNDI0L2BD5ZtZcJDFwGwdeku1s/3vOPOBiqLqqirasD7GAqELMvYWliFvMbLhL/SSqy+GoDauoGERQe3qawePLQlrVYgiouLueOOOwgPDz+tBVJdXd1x4xx8fX3P2IIpHY2gjALDo+4FLDYwfYxsW4ssNy8Q0hZ4q3QMC01jTFgvAPbUFrK8fA87qvM7vdhLeyO7SpDNP4L5c5BrQAwC7wdA9AVnLigiETSjEZSdt1D714vX4x1gwFRr5qvpP2KznFru6q5Cha2ez3JW8GuRO0XqucHdidIH0sM3iqRjZFzqSD5btJE6sw0fvYaBqbEkR7ZPZfhjUZZrxFRnxstXz4YF7s+mz8h0EvvGE9cnipjUKERRpKa8FoOfnh7ndD+me4WHtsUdUD0K1OcCcE/kdrxEB0KclQMNJZi6h5DYOw5Zlvni/777S+VwD2cq5QVGZJeEUtXc2lxvd2DKKUdssLmrkR0Hvb+MWWVnoMHtvlRp9iYqqr/nnvXQpWm1AhEfH09JyelnkUhOTmbevHnHPPfHH3+0OP7BAwjq3mB4CNCAVA7m95Ad+9ptvGivQMZH9qO3XywAq8r3sb4ymzxTy1JKnonIrnJk8zdg+RTkBhDD3JYH1O40raoUBM357ixZnYhfsC9XP3opAOvmb2b78t2dKs/pUGGtY07eWn7IX4eMTF//OLr7RBLvHUJv/9hODd6vN1v53x9bAMjoFk3PuDAMuo6p0C5JEnl7CtB6aaguqyF7q7sgUOa4vk3aOWwOGqpNpA1OwT/kzE65fCYhiH7gNQkEPzSClWdj9yAIoOjVwGd/bOLSJy5BFEVydxWw4MMlJ+3PQ9fFYXdQuL8EL79juy+VmSwUrshCv+bEReCiergVhZE+bgUityqWlITYthXWg4c2ptVv4EceeYR33nmH9evXn3JlPoDJkyfzyy+/MHHiRObMmYMgCGzcuJHnn3+er7/+mjvuuOOU+/6nIQgKBO040B8OxnFsB/NnyK72KzHbwy+Kq2MHE6r1xSY5+L1kBxsrD1JjP/t21GSpCtn0JZg+AdkCiih3zAMukIyg7ueOeRC7Rl79Kx++iMjkcJwOF7Of//6EGXq6KkZrHfOKNvNN3mqcskSydxiDg5IJ1vqQGZCIRtG52dI++m0DZpsDP4OOwamxJEZ0nPWhqqSayuJq/IJ82LhwG8iQ1DeewAj/Ju1Kc41Ed48koZdnIdLRCOp00F0NwDDfA6TpahACnDQE1rGqoY7+Y9wW3O9fnUdFcVVniurhNKgsrqa2oh6fgObPfqvTycGDJdTuP/wePobHhk4rcm5vf6x+FgRk+vu4N2er7P0JCQpoV9k9eDhdWq1ATJ8+ncrKSm699Vb69OlDampqk78jlfROxqhRo5g5cyZZWVlMmzYNWZZ56aWXWLhwIdOmTWPcuHGtnsw/GUHUI3j9C9RD3QesvyJb5iJL7bN4VIlKMgOTmBA1AI2opNRaw+8lO9hSlYNDapnP55mALNUgmz4B8yeADRTx4HUPyHUgWUB9DoJ6IILQstLvHYFSpeTOl28EYO+6bJZ/u6aTJWod5dZaFpZs53+5f2JxOYjQ+XNheF/UoorMwER81Z2blaS63sx3K7YDMCAlmvT4MHSajlNoig+WYbc6QBDYtsxtYRpwQVPrQ62xDr23jvSh3Y/pWuGhfXEHVF8DyhQEZF5M2IqAjJjWwOKdWQy8exQanZqq0hq+nvHTWe/+ebZiLKhAckko1c3vsXKzlUPL3DGhrsHuYOieQf6cFx1Kkl7NmO4hjBzuQ2yonlKxnmRdDQalDYdLgcFvbIfOw4OHU6HVb5ZLLrmkzQa/+OKLufjii8nJyaGmpgYfHx8SEhIQ27Hw0tmMoAhBNjwKtXngKgDTJ8iKONCejyC0vS+ln1rPeWE9yDMZ+a1kG9tr8ogoCyBQY6C3f1ybj9fRyFI9csP7YP4McIGyG+j/5XYTEwwI2nMQlF0zS8aQSzPpPaIH25fv5ttXfkbjrSI/twCnETJG9+myqTzLrLUsK93Ft7mrqXNY8Fd7cW3MEMySnUEByUTqO39X7t35a7E5nAT66BmSFktcWMfJZLfaKdhXhMHfi+3Ld2O32AmMDCCh91Erg9Phor6hgSEXZxIQ5n+C3jy0K4oo8LoTaqcSpqrkhpA8ZpfHIXVr4LudBxh5/VAWfvwHiz79g5geEVRUVXT5+9PDUZwOp9t9ybf5hoZTksgtrqRyax6OUG8s3hpEQSAtyA+z3UGMTk1YiIpqXJSJDUiCzPDD6VtLGsJIikns6Ol48NBqWq1A3HvvvW0y8Jo1axgyZAhAs6IYhYWFPPvss3zyySdtMtY/CUHV3a1E1D0BchWY3kIWwxE0PdtlvFivYC6JziDPXMGe2kJWlO8mQu9HoMabKH3nxgOcDrJkQm54yx0wjeSu/K2/EVzFoIxEUA9BUHTdOguCIDD59Vu5u//jlB4q55Ub3z58Zi5BUYHc/fpt7V6Js7WUWWpYVb6XH/LXU26rQ6dQc1PcuTS4rPTwjSLFt30qebeG8poGfl69C4BBqTH0iAtD04E7/OX5FdQa6wiLD2HjQnfw9IBxfZoktKgprqXXoPQmSoWHjsddr2cEsmYk2H7nrvCdzK0MpzYe9i4vZuQFg9HPWYu5zsJbdx9513Xd+9NDU6pKqqk11hEc3fw9V2mxcWDFXmSXhGtwHACJft5oFQqqHBb8NCL1Khs6VBxUVAJH07caHen08/LELHno+pzSVr/NZmPHjh1s2rSJjRs3snHjRtavX8/y5cuZNWtWi/q4++67Wb16dZNjsizzySefcNFFF7Fz585TEe0fjyAICNrzQH87IIBzH5g/QnYVt9t46b7RXB0zmEC1AbPLzqLi7WysOEi9w9IuY7Y3smxBrn/1sOVBAlVf0F0PUgkokw4HS3dd5eEIxQfKkFzNs3FVFFXy3FWzWPXj+k6Q6tiUWKpZVb6XeUVbyDNXoBQU3BI/DJvsJEYfRN+AeBRdoOL5O3NX43RJhPgZGJwaS0xox+7wF2WXIIgCh3blU1VSg0avodfwo26jlnoLokpByoAkVOrOjRPx4E61jf5OEAJRCzamxe4+HFBdz+d/bMbUYG12TVe8Pz00p7ygEqfDhepv7ouSLFNQVUfZ2gO4fLWYQrwB6BHoj9nhRKdSotGDVXCikRUUibXoRQeJXhUACIaxXeJZ58HDyWj1r3T9+vUMHz6ca665hptuuombb76Zm2++mVtvvZXJkyfzzTfftKifCy64gLvvvpuVK1cCsG/fPq688kpeeeUVRowYwa+/Hr+yqocTIwhqBK8bQTPKfcD2O7LlR2SpoV3G0yhUDApK5tKoDJSCgkJLFYtLd7K1KhendGYVM5NlO3LdS2CZDcigHgjaS0E2grIXgnake1HQxXG5XLzz4KfHPnnY3frdhz7F5er876fYXM3q8iyWl+9hb10RAgLXxQ1Bo1Dhr/IiMygRraLzY0yKKmqZv97t03xOjzh6xIWj6kBXk4YaE8UHy/AJ8mbDb27rQ9/z0lFrj342lSXVhMQFEhTV+a5eHtwI6lTQXw/AEJ8centVIgQ6MUeYsfY6hlWti92fHprjcroo2l98TPelWpudrNVZOM12HJmxIECUtxd+WjX1Didheh0OjQsZiQbBjkm0k2kwohBkGhxehAQP6oQZeWgPysvLSUtLIyUlpcnfjz/+2KRddXU1Q4cOZf36ppsGNTU1PPvsswwbNox+/fpx3XXXsWnTpo6cwglpte39P//5D/7+/jz//PPMnTsXURS5/PLLWblyJV9//TUffvhhi/qZMWMGer2ee++9l/HjxzNv3jzCwsL44IMPGDZsWKsn4qEpguiPbHgEnDngOgimz5DFeNCNa5d4iACNgdHhvcg1GfmjbDebqw4SqfcnUGOgh190m493qrgkiS0HitlxsByLypfB6QkoDsfcyLIDuXYaWL93N1YPcwelyyZQDUJQ926Xz6492LVqHxWFlcdvIIOxoJJdq/bRe0SPjhPsbxSZq1hjzGJTVQ4bKg8CcGlUfyJ1AZiddjICE/FXd43sVm/8/CeSJBMR6MOg1FiigjvWzaAsz0hDrRm9S0PO9jwEQSBjXJ/G86Y6MxqdmrCUwA4vYujh+AiCGvSXI9uWgXMXL8Rt5bLd5yH3aMBcEIE624jCZG96URe5Pz0cm6rSGqqNdQRFNFfUS+rNFK/MQtIqsSS4z6cH+WF2ONEqFQR7aThkt6OXvSlS1AJw/uH4hzJ7NyI03h03kX8ALkli64EiKmpNBPl60TcpsvGd395kZ2ej0WhYsmRJk2eyt/fR77isrIzJkydjNDZPg//www9jNBp57bXXCAwM5Msvv+T222/np59+aub63xm0WoHIysrihRdeYPTo0dTX1/PNN98wfPhwhg8fjsPh4N133+WDDz5oUV/PPPMMer2eDz/8kHPPPZe3334btbrzdxrPFkRVApJhKtQ97M4aZHoTWRGFoOndLuMlGEK5LCqTfHMlB+pL+aN0F+E6PwI0BsJ1nR/MuXRrNjO/W055zWFLzLJ9hPgZePTqEYzsHQd1U8E6131OMwqUvUAQETQjQZF4Ri3KKkuq27Rde1BkrmK1MYt9tUWsKHNXmR4Z2oPe/nGUWWvICEgkxqvj0qOeiJySSpZs2Q/A0PR40mJDO+wlBEdqPxSi1avZuMidAapbRkKT+g6VxdXE9ohEH+J5hnY1BEUkstedUPsYwaoabvA7yBc1yYg9bZgGx6HbXYqkVyOa7ShL6xAOWyE68/70cHyMBRU4bU7U2qbuSyaHk32bDmKtqMc+MBZJEAjUaQjV6yi3WInxMSCoZaySi0BZRZFYB8hk+LhdjK2awXgpO6aezD+BZu98aHznn9+3/WuNZWdnExcXR0hIyDHPf//998ycOfOYRZXz8vJYvXo1X331Ff379wfca+ZVq1Yxb948HnjggXaVvSW0WoGQJInQULf/d2xsLNnZ2Y3nxo4dy+OPP37ca3/++edmxxITE0lPT2ft2rV88cUXBAUdXTBcdtllrRXPw98QtEOQXXdDwyvgygHzu8iKp90VrNt6LEGgT0Ac18QM5q39i6h1mFlYvJ0QjQ8jw3p26oNx6dZsHvtgPn9PlmisaeDRD+bx40P7iPFZ7j6oucCdcUkRiKA5B0HR+cG7rSUwvGUKW0vbtTWF5krWlO8n32xkUel2JGT6+cdzXmgP8k2VpPpGkubb9r/RU+WNn/5EliEq2JeBqTFEBHasG1tVaQ0VRZVovbTsWNE8dWtDjQmdl5b43rGUVZ9+oU8PbY+gGYqsHQ3W+dwRtZtfG6KoiAdHX38ciUffe2KDDf2aQ2gOVXXa/enh+LhcLgr3l6D30TU7ZzRZyF++F1khYusZDkB6kD82l4RaFIkw6KkS63AJ7ti0crGeaHUDARozkiyiMVzQoXM5mznRO/+xD+bzyl0XtbsSkZ2dfUJLweLFi3nooYc455xzGDVqVJNz/v7+fPDBB/TseTQBjiAICIJAXV1du8ncGlqtQMTExJCVlUVGRgbx8fFYLBZycnJISEjA6XRiMh2/7sATTzxxwr7/GoAtCIJHgWgDBEEJ+quRnfvA+jPYliErksEwsV0Kn2kVaoYEp5BnquDrvNXkmowsLtmFv8abwUHJnVI92CVJzPxuOTIgChJ9Y0oI8jZTUa9nW34Yj134JzE+ew5P4BJQxoIiDkEzBEE8M1/g6ed2JygqkIqiSpo9QQ8TGOFP+rndO1YwIN9UwVrjfoy2Wn4t2opDcpHsHcaE6EyKzdVE6QPoFxCPUuwa7mL7CspZudNd7XlEr0TSYkI73BpVklOGzWLn0M58HDYnITFBxPY46hpYVVJDyoAkAsL8PApEF0UQvZH1/wL7BtTKch7Qb+PZusEoetUh7fUCrQxWEUlW0TA6Be+NhZ1yf3o4MdVltdSU1+If5tfkuN0lsW9vAfWHjNjSQnEqRAwqJbE+BiosNiIMenzUKg7IZhSyQKmyHlmAEYfdl6qc4QRowzthRmcGsixjtbesxpRLknjl8Du/WT+H/zvzu+UM7B7TIkuyVq08pWf+gQMHCAgI4IYbbuDQoUPExsYyefLkRjf9999/H3BnHv07Pj4+DB8+vMmxRYsWkZeXx5NPPtlqWdqDVisQF198MbNmzUKWZW688UbS09N5/vnnuemmm3jvvfdISko67rVLly49LWE9nBqCaEA2PAjObHDuBstsZGUC6C5ul/GCtT6Mi+hNrqmcP41ZbKg6QKTen2CNN918On43f+uBIsprGhjZPYcp4/4k1PeokmuxK9GpnUgy5NdfRJxXPCjT3MXhxM4tWHY6KBQK7n79Np67ahYIHFOJCIsPwWa2o/duvpPWXuSZKlhrzKLeYWF+0VZMLhsROn9uiBtKpb0Bg0pLZlAS+i5kxn/9B3eih7gwfwZ0jybEr2NjMuw2B/l7C/Hy1bFp4TbAbX048kKrr25A560lqU9ch8rlofUIqhRk3Y3IptcYk1DEz1nlbAkMQTH06I6ibBGRdnhR0Tuc2sp6AkL8Ok9gD80wFlRitzrQ6Jq6ClZarOQs24MM7uBpIC3IH6ckoRCEw9YHM1VY0LoU5CnqATjX4E7fWiP2IUHt1aFzOVOQZZl/zfqW7TlttzlSXtPAsIffaVHbPokRfPzI1a1SIpxOJ7m5uSgUCqZOnYrBYODXX3/lrrvu4tNPP2Xw4MGtknfLli1MnTqVMWPGMGLEiFZd2160ejv4jjvu4Nprr2X7drcf7r///W/27t3L3XffTU5ODo899thxr42MjGzxX0TEmec20pURlRHg/TQIAe6g4Ia3kG3bEJ0b8detRnRuRJbbLuNHkncYl0cPJFYfhEuWWFK6k/XGAxitHW96q6g1MbJ7Di9fvYhgn6YWMp3aiSzD1+t6sa8sEdSZCJqhZ7TycIRzLx/Is3OmEBTZNE+51su9ON+9OouvX/oJh93RIfLkNRhZa8zC4rKzoHgbVfYG/NVe3JowHJvkxCm5yAhMJKgLBRFuP1jEhqwCBAHO651EaidYH4wFFdRW1FOeV0GNsQ6dQdtkZ7qqtIb49Bj8Q/06VC4PrUcQVAj6SzCa3XFoj8ZsRSn8LdWyVkIcUI8zzsXMf3+N1WzrBEk9HAtJkijKLkHnrW16XJY5WGCkckcBjlh/7FolalEk2c+HOruDIJ0GvVbBIbEKkFFJIsXKOlSCi1QftwXCpR7ZZayuXZEzKQYRQKlUsmzZMr766isGDRpEeno6jz/+OEOHDuXjjz9uVV9LlizhX//6F3369GlxqYSOoNUWCFEUm8Q59OzZkyVLljS6MRkMLd+dW7BgARs2bMButyPL7i1SWZYxm81s27atMcWrh7ZBUPdF9n4I6v4PpAKouRU1duICADPI1jDweQpBO/a0xxIFkX4B8VwTO4R3sn+nxmFmYek2grTejAzr0aFpOYN8tUwZ9+dhuY7d5vy0HIpc9yOoM8+4B9WJOPfygQy5NINNi7exa8se0vul0WNwd5678lW2Lt3J97PmEh4fyrh/jWzXCvCHGspZW7EflyyxtHQXRZYq9Ao1tyWMQC0qKbZU0z8gkTiv4HaT4VT470/u301SRBADuscQ6NPxO4SF2SUgw6bf3Zs2fUf1bKzxUFdVj8FXT6LH+nDGICgi2FZ2BRlR+4jX1XN9cBa7zIEEKq1UOrVsawjGJQuI6Q2s/iqfXz9YzGX3XeCpTt0FqCmvpaq0Br+QpjFQ1VYb+5buAknGOSgOgJQAXxDcxt9Iby+KFLVUi2b8nDryVbVYRScD9JWoFC5skg69lyd96/EQBIGPH7m6xS5MW7ILue/tn0/a7s17LqNf8slj7U7VhUmv16PVNlU2k5OT+fPPP1vcx+zZs5k+fTrjxo3j5Zdf7lKJhk65hGptbS2bNm2ivLycsWPHYjAY8PJq+cv1rbfe4q233sLb2xun04lKpUKpVFJVVYUoilx11VWnKpqH4yAIIuguQbb+DvZVwN9SB0plyDX3g98bbaJE6JUahoZ0J9dUzpz89WTXl/JH2W4CNd5kBnZcVqO+MSUINcePzREECPNtINRPdVYpD0dQKBT0HJaKMhhSU1PR6/U89vk9PDbqeQr2FfHBo18QGhtE/9Ftn53L5nLwW/FWtlTl4qVUk9tgZH99CSpBwS0JwwnQGMg3VZDsHUZPv+gu9fmv35fPtoPFiILA+X2S6BbV8cqNqdZE8YFS7FY7ebsLEUSBjDF9APdmS3VpLT3PTcU3qOvXJvFwlEptOG8V9+TZ2E1MCt/dZGOjzK7jP0V9WC5EIkdIfP7st0QkhjH44ozOE9gDAMbCKuwWO1p9UxfLfGMt5etzcIQYsPjpEAVIDfSjzmYnQKsBvYsCRQ0GSYuIgFHnLiA4yqsMgEq5G4E6zz18IgRBQKdpWXHMQWmxhPgZMNY0HDMOQgBC/A0MSottt2x62dnZXH/99bz99ttN3JV27dp1Qlf/v/LVV181hgg89dRTXer9CKdYifrdd99l+PDh3HPPPTz33HOUlJQwY8YMrrrqqhZHh//0009cdtllbNiwgVtvvZWRI0eyZs0avv/+e/z8/EhObv8UW/9M1ODMOs65w1aguhfbzJ0pTOfHhRF9yQhwZyJYY8xiVfle5hdtYVHxdjZX5uCSm1dLbksEqaJF7WRX8zzMZytBEYE89fWD+Ab7YKo185+J75O7p6BNx/gpfwPjl7/Ei7t/ZmHJNn4o2MDm6kMAXBs3hBivIIrN1YTr/OgfmNilzPeyLPPfH1cBkBIdTGb3GPwMHRcrcoSyvApMNWZ2r3bfs90HJOMb5HbxqqtswODvRULv2A6Xy8PpERDig1lSIsvNraLBKgsz4tYywrcIg48CS4OVN+/9iJydeZ0jrAfA7b5UmFXU6AJ6hHq7g90r9+CyOXAMcN+LCb4+qBUiLlkm1FtLnqIaSZbQ414AVxxWIAb4uX366xWZGJRNd6o9nDoKUeTRq0cAbmXhrxz595SrRrRrKu7ExETi4uJ4/vnn2bRpEwcPHmTGjBls27aNyZMnn/T6Q4cO8eKLLzJ69GgmTpxIRUUFRqMRo9FIfX19u8ndGlr96c2ePZs333yT2267je+++67R9ejGG2+koKCA//73vy3qp6ysjIsvvhhBEEhNTWXrVndl1fT0dCZNmsScOXNaK5qHlmDfBFL5CRrIIJUg2ze02ZDdfCK4KnYQkTp/HLKLOfnreH7XDzyz41smb/yIS5e/wrLSXW023hEkyY5k+hoaWuYzOH+DEUk6Tsqis5DE3nE8/OEk1Fo1ZblGXr39HarKak67X5PTxofZS5mx52fqHJZjtpFkGaO1Dp1CTUZgUpd7ea7cmcO+gnIUosD5fZNIjuz4ehSyLJO/txBJktj1p7sC9sAL+zaeqzXWktg7Dp+ArhMz4qFlhGgMPBS5/ZjnxMNuLw9GbuOSa0ei0akxFlTy2p3vtcn96eHUqK2op7qsBp/Apm7apXUmilZm4fLRYIlwWxF6BPlRb3fgr9VgNVipFE34yTokZAoVtdRo7ASrLITp65BlUGjGdcaUzmrO75vMK3ddRPDfkl6E+Bs6JIWrKIq8/vrr9OrViwcffJAJEyawfft2Pv30U7p163bS6xctWoTD4WDx4sUMHTq0yd/06dPbVfaW0moXpi+//JK77rqLBx54AJfr6C718OHDefDBB/nggw945plnTtqPXq9vNMfExsZSWFiI1WpFq9WSmpp6zLRWHtoAqYW77JZ5yGIgKKMRhNPbeVWKCjICEunrH0+RpRrpb0bFclsdj2/7ipf7XM/IsPTTGgtAkurA9DlYvmnRfGUZyuoMvPhDLQ5xO1cN73PaMpwpDL44g3+9eB3vT/mCfesP8O6Dn/Lwh5PQncJuu1NykWcysr06j69yT+zjOa9wM9fHDWVYaCqh2o6t6HwyJEnmjcOxD2mxYQxIicGg6/isUFWlNZQXVJK7Kx+nw0V4QihRKe7kErUV9Xj7G0jo5bE+nIn08a5AcB5buQa3EhGmtuDta+TqJy5j9rQ5ZG04wHsPf85DH0xE59W1FO5/AhWFlVhNdkJijn72VqeLHWuzsNeYsY9IAkEg0qDHV6PGaLbi7augSFmNt6ShSKxlo6oAs+hOWjHA2+2+VOYMxl8X1xlTOus5v28yI3ondlol6sDAQKZPn37S+KWoqCiyspp6hkyaNIlJkya1p3inTas/xeLiYgYMGHDMcwkJCVRUtMxdpGfPno2F5eLj41EoFKxduxaAgwcPdqlAkbMKsYV+3NYfkGseQa5/C8m6CVmqOq1hdUo1ayv2n7DNa/t+PS13JslZhFQ7DYwjwPTmYeVBC+ohoD7/OFcJIAh8t/lCnC6BV79fyR9bs4/T9uxDEAQuu/cCLpo4GoDl367h6xk/4XS0LFjtCOXWWlaW72VF2R6y60sxuU6cOabOaUEjqkg0hJ6y7O3F4i37OVRahUohMrp/MokRnVMNu/RQGZYGC9uXuwvHZR5O3SpLMrXGOhL7xmPw86R9PBMRpcoWtduduwNbn0jOu/FcAJZ/8yffvzoPl7PtMuZ5ODmyLFO4vxiNvum6xGi2kLd8L5JGiTXZ/W5ND/KnzubAS6ug3suEU5aoEE2sUOdgFo5mvBvs7U7fOr8yiO3VuR02l38aClEko1s04zK7k9EtusOUh38Crf4kw8PDG92N/s6uXbsID29ZIZRJkyaxYMECJk2ahFqt5pJLLuHxxx/nvvvu4+WXX2bo0KGtFc1DS1BngBhGc8/Av6IAZHBlgflDqHsQuW46kuV3ZGfBKcVHbKvKpdLecMI2ZdZatlXltrpvyb4Lqfp+qBgLlq9AbgDBFzTng/5W0F4AhsngM/Pw3P+CGIbo9wY3jZ9KTIgfdqeLaV8uZtvBolbLcaaiUCq4a+bN9B/jDqKeM2sei79Y0eieeCLMThubq3JYUrKTvAYj4Tp/lC18QPtp9F0uKMzpknj7l9UA9EwIJ7NbdIsD99oSh91B/t4iyvMrqKtswMtXT48hbrN3jbEW3yBv4tOjT9KLhy5LCzdyKsMEPvx9Awk3DCZtSDd3yumXfmLl92vbWUAPf6Wusp6qkqbuSy5JYs+OPEwFVdjSw5BEgQCthhC9FpvLiTpAokZpwVfWslF1OL7s8ONORCbzsAVifV0o/836rd1jAT14aGtarUBceeWVvPfee3z88cfk5uYCYDabWbRoEe+//z4TJkxoUT+ZmZl8//33XHCBu3T7s88+y9ixY8nJyWHcuHE8/fTTrRXNQwsQBAWCz1NH/nXsRtpLQXsNKHsBCnfMhHUe1D6KXDsV2fw/ZMc+ZMnc4nErbC0L+mlpO1mWkKwrkCpvhqorwbYQsLsVBM140N8MmtEIugsQdJciqnsh6i9FCF6GXf8huVX3Ytd/iBD8B4J2LIG+Xrw++VICffQ0WGw8/uGv5JaentXlTEKr1/DY5/cQkxqJ0+Hk/SlfsG3Z8eNSXLLEwfoyfi/ZwdaqQ+iUKmIM7pofO6vzWzRmmNavjaRvOxas30thRS0alYKx/VOICwvoFDmMBZVUl9U2Bk/3G90LpUqJLMnUVzWQ1C8BL1+P9eGM5SQbObLszsa0Q+eLlFLPm7+tZ9QzlxMaF4zD6uDtBz5l38YDHSvzPxhjYSXmBgvav7iOVVptZC/dhawQsPd2pwJND/LD5HCi0EODlxkvSU2laHa7Lf3lq07VV+GrdFDnVLHHHHDKm2cePHQmrY6BuPPOOyksLGTWrFmNBS1uvvlmwF2leuLEiS3uq3v37nTv7i6IpNFoeP7551srjodTQNCOBb83kOumg1R69IQYjuDzJKgzkR254OoBrkHg2AuOHSDXgmMDODYiKxJAMxRZPQZBlYCgCDzueECLi4PZpRO7zkiSDazzwfw5OPcdPaFIBFVvUMSCMhJBlQaKGAShqclZEBRIykyqLQbClKkIwlHfxLiwAGbeeTH3vf0TxloTD78/j/cfuKJZENbZSkCoP09/+xCPjXqOmvI6Xr3jXaYveJLY7k3zZButdeyqySfXZEQjqoj1crv4bKg8wO/FO07qvgQQqvWlT0Bce0zjlHE4Xbwzbw0AfZMi6ZcciUZ1ypmuT4uiA6UYiyopyi5BVIj0H90LgOryGvxCfInr4bE+nMkIggJ8nnKnzT5GqXhBAJMcgoSAmGTBbK3hzWWbueel6/nkrg+pNdYx619vM+O3pwmOOvGz18PpIcsyRdklaHTqRoupLMtk55RSvacIW0oITpWIl0pJrI+BMqsZpb8DQZTxktSUC80t74MOWx82NoTgOryP29LNMw8eugqtfjsKgsBzzz3Hbbfdxrp166itrcXb25vMzMwWRZb/laqqKrZv305dXd0x3SUuu+yy1ornoYUI2rGgGYWt/k+KC7cTEdUbjffQxgW1oAlAltPAVYisTAXVAHDtA8cecB0C10EwHwTLfGR1JrL2AgRVD1BEIAjNf1Z9AuII0fhQbjt+ml+FIFJiqWZXdT5pflGIwlEDmeSqBcu3YP4apCPuRQIo00GVDopIUEQjqFJBEXVMGVpCn6QI/u/msUz9ZAG5pVU89uF83rhnAt76jg+i7Qzi02OZ8sndPHfla5TlGpl12zs8P/dx/IJ9sTjt7K0rYl9dEVannTCdHxqFinxTBXMLN1NkcVtsQrQ+9PSNYWnZ8S0YD3cfj0LoWr6oP/25k/KaBnRqJWMzUogJ9e8UOUx1ZoqzSziwJQeAtMHd8PY3IEkS9dVmMsf2Ru/d8SllPbQtx93IQQ3YSdDk8VxCKM/mxKBIN1G5SeSLfQe5+v+u4stHZ5O3u5D/Tv6AJ7960PN7aEfqqxqoKKpqku2s1uZgz5JdyDI4Mt2JDNIC/bA4XTgMdtA7CZLc7XVycxfIQT7u73td3VGX2pZusnnw0FU45e21+Ph44uPjT3nglStX8sADD2C1Wo+pPAiC0CoFoqamhtdee43ly5fT0NBASkoKjzzyCBkZzYvvPPHEE/z000/H7Oe+++7j3nvvbfG4ZzIn2o13n9eCMgkUCe4ic84eoOoLzkPg3AuOXSBXut2HbMuQVT1AMwo057h3/8WjLhYKQeSR1It4fNtXx5XHJUvMLdqM1eXA7LLTNyAOhVwODZ+B9ReQqw+3VIGqH6jSQAwBZRyCMuWw8nL6i9Lz+ibxyJXDeOXb5WzPKWHaF7/z4u0XdNpudEcz4IJ+3D7jet575HP2rc/mrQc+4YrXr2S/vQyjtZYAtYFQgy/1DgtzCzez5XBtB42oYlRYOoODu6EQRMJ0fswv2kztX1K5hmp9ebj7+DbJttWWWO1OPliwHoCMlGh6J0Sg6qTKv+V5RsrzK9i/ya1ADLywHwDVpbUEhPoSm3byyqkezgyabeREpqNWi2B6F5xbGe2zkfJIDW8VhSL2qyd3bSnLHFoumDya+W8uZP2vW/hi2nfc8dINKP8hz6eOpqKoCku9laDIo+6M+eXVlG/MwRHjj12vQiWKJPn5UCqZkPwc+Al6FLL7XRQiGVDLCuyCCxGZIT4l9NC7N1s21IcAXdMi68HDyWjRE2fq1Kkt7lAQBF588cWTtnv11VeJjY3l8ccfJyoqCvE0I+MffvhhjEYjr732GoGBgXz55Zfcfvvt/PTTTyQkJDRp+9RTT/HII480OTZjxgw2bNjgqYB9DARBBEU4giIcWdUL2ZkHzv2gHuh2b3LuBKkSHFvAsQXZEgvqc5A14xBUiSAGIQgCI8PSebnP9by6d34TS4S3Ukd3nwj21BVSbq3l58KNqKRDRDu3EsIGBA4vQAUvUGeCIhkUwaBIQFB1AzG0zYNxrxrWm/LqBj5ZtJFl2w/w6vfLeezq81AqutaueXsgCAKX3juOogMlzH17ESu+WUN5gpMBtw0g5rC70qryfSwt3YntsMtZ/4B4xob3xlt1dCc03S+aCJ0/e2uL3FXJg1PIDErqcpYHgG+Xb6Oq3oyXVs24jBSiQ/w6RQ5ZlsnfV8SBbblILonI5HAiksKQXBINtSYGDOl3Sil2PXRdmmzkqFIRtEpkHGB6A8GZxQ3BaymzD2OOMQBxQB1b/swnIK07mRf2Y+OCLfz85m9Ep0Rw4Z2julxSgrOBouwSVBpl42drdjjZ8cdOJIcLx4AYAFICfHHILkwGM34aES/5qOtsvWDDicQI3yIeitxGqProhsoHycv5T1EfLoi/vks+Fz14OBEtUiB++uknBEEgNDT0pAv9lj7ADh06xJtvvtmkxPepkpeXx+rVq/nqq6/o378/AM888wyrVq1i3rx5PPDAA03ae3t74+191Fz4xx9/sGDBAj7//HNCQ7teWsmuhCD6IKh7IqtSwFWErOwBzgHu6tau3eA8AK48sOSB9VdkVYY7C5K6FygiGRmWzrDAJIp+nYlcnIMQkYDX6LvZXFtIcm0QuyvXMMp3LRf75aLFne1JEgIQ1QPd8Q2KIFB2Q1AmnzTu4rTmKQhMvmQI5bUNzF+3l+9X7iTY18Dt4wYi/r107FmIXXYx6KnhbMrZT/Fvh8iatZm4uAjqh9n4tWhLowIYpQ/g4sj+jYrFESxOO2XWWryUGi6JyiDFJwKdsmumZjZZ7Xy6yF04cVBqDD3jwzst1V9NeS2lh8rYt8GdSnjA4cJxVaU1BIb7e6wP/wAEQQ3aociyza1EuHJ5KHI1Rsdwltf4oBhUy5JVWVx5VT9i84zk7S7gg8e+JColgt7De3S2+GcV9dUNGAsr8Qk8ul4orWugcNV+nMEGrIFeCEBqoC+FyloEbxehwtG6Nk4kVqpzGOZXyIy45pmzglUWZsSvRfQrArqWVdaDh5PRIgXiggsuYPny5djtdsaNG8f48eMbF+qnSnh4OBbL8QvptAZ/f38++OADevbs2XhMEAQEQaCu7vg+9wA2m43p06dzxRVXMHDgwDaR55+AIKhBGQ+KOJCMyM5ccPYGZx4494Bjpzvo2r4U7CvdcRTa85F/2Yn46MfElNgb+5LDX2Dky5czcHQ9Br/9HNFB95r92WXpRrpvBFGaZLw1fRFUSQhixxQeU4giT18/iopaM+v25vHBr+sJ8vFiwtCeJ7/4DEWSJfJMFeysyafcWsuYly9kXsEcjOVV/FSyEVuOHgAvpYax4b3pH5CA+JdNA4fkosxaAwh08wmnh280AZquHYQ+e+lm6sw2fPQaxmamEBHYeYXtSg6Vs2/DQcx1Frz9vUgdmIzL6cJcZ6HXsDS0/5BYnH86gqAD7QhkLNDwJqJUyvS41UzcP4Jd6FAMruXH1bu56ZFx1D7xLTXldcy67W1eWfIs4QlhJx/AQ4uoKKrCXGchMNwdD+VwSWxduRdHvRX7OakAJPh5Y1c6MHmZiVV6o/xLcsuNqnzqRDMPR24D4O/7q+69KAG57kXQjGrmRuzBQ1emRQrEf/7zHywWC8uWLWPBggXcdtttBAUFceGFFzJ+/HhSU1NbPfCkSZN444036N69O3Fxca2+/q/4+PgwfPjwJscWLVpEXl4eTz755AmvnTNnDhUVFTz44IOnJQO43Q/M5panNu0KHFHiTk+ZM+DePYlHEAsRFMkI9EeU9iFIuxFlIzh3wLdrEO4s+XvCESi1o7zlG7w/DIfxBiqckfxYkcgnpcGAQI9qL0YFR9I/MIQYvQo4vc+4tXOedv1I7n9vPvuLKnjlu+UYtErOSTuzKgC3ZM5V9gb21BWRazaiEpSEanyQvGSCXutFjjkX1AK4ZHrrohkX1xedQo3D7i6MJMkyVfYGzC4bEVo/Un0iidD6I7iETrsnWjLnOrOVLxZvBmBg92gSQ/ywWttmY6O1OB1OsrceaLQ+9Dm/J06Xyx3AGexFYIz/ST/LtrmfOx5ZltvM/ebseQ6LIA9FVNWhsn2AgmreTv6TG/YOo9BbA5k1fLVxL9c8Np4lT31Paa6Rl255k6e+ffCMKDB4JvxWD+3OQ0LC7nA/50pNZg4t24PLoMES4wdAsp+BXLEKlQr8ZDVOp9ut85CyigPKSvp5GQlRn2iOMkgl2Or/RFJmtvOMOoeWftc2mw1JknC5XLhcZ3axxCOxvbIsd6m5uFwuJEnCYrEgSc1rj7TmWSzILakW9TcaGhpYvHgxCxYsYO3atURFRXHRRRcxfvz4EwZWn3feeU0EKykpQZZl/P390ema+vUKgsCSJUtaKxoAW7Zs4Y477uCcc87hzTffPG47SZIYO3Ysw4cPP+26Ezt37sRut5+84T8CF2pFBVplIRplKVpVPnrxEF7nrIQS5zEzn8sCEK6kdMlEatByyK5gcY0PaywmZCBQ1DNIG0tPdRgxSv8O9/WtbrDxyq/bqai34qVRcv+YdBJCfTpUhvbCLjvJd9SQ76zGKjsJUOhQoSDXWc06ax4Nsvt3rdxlQft2OUFBvpz74gi0Xu7dcJNkp06y4i1qiVcFEK7wQXmG+PP+sOEQC3cUYNCquG1YN3pGB3SaH3llYQ0rZ69j/Q/bEJUiE54Yi0qrorKwmrRhyYQnh3SKXB2FWq1uYkU+Fc7G57BSrCFAt5xQn7koRQs1dj+u3jecOkmFVKxGs8OfoRaJLR+tARkyLunFZU+MRany7GafDtYGK5vn70KlVaLz1iLJMpt3FrH/q02Yh8Zj6RFOoEpBYoRIua+JWHQYRPeebIPSwbrwclyizLVyKQ/2/fOk4+VW3Uu15Zz2nlaXR6lUEh0djUbTNtZWlyyxs7aASnsDgWoDPX2jOyXe5JNPPmHt2rV8+OGHjceysrKYOXMme/bswd/fnxtvvJHrrruu8XxtbS1vvfUWq1atwmQykZyczH333Uffvn2b9S/LMvfccw8Oh6PJGH/HZrNRUFDQqOgei5Y+i08pbYPBYGDChAlMmDCBmpoaFi9ezG+//cZ7771Ht27d+PHHH4953YABA9r95bxkyRKmTJlCv379GutUHI8tW7aQn5/f5As7HVQqFUlJSW3SV0dhsVjIzc0lLi6umRLXJkjVCFIeimUfIpQc/wcryECxk8BsB34jJ6J3+GH1LsRQk8eKir1USmZW2g+h8fMiLCiKdN/oU16knuqcX42K5oF351NjsvLJqmxen3QR0cF+pyRDR3OsOUuyTKGlity6QiqtMlHqSHxVeoy2On4r206OpRwAH6WOsaG9sO8uZ3lBEdV5RvZ9sofh/z6fatmMv8KHTEMo3QzheCm7jovNyb7nqnozyz53V50e3iuBUYP7Eeij72gxG9lavIvSfRUA9BiSQmJKIsaCStL6hnDu2EGoWlARu93v53biwIG2K4p2Vj6HpXhkuzeyfTZ+6hr+l7aBy3cNxhFhx2atZVtBKJlXDWLjd+vYPG8n6YPSuOy+cV06qLqr/1bz9xbhrfMhMjkcQRSostoo27QOSa3Alup2E0uN8KcqsJYwlR/Rel8EQcCJi3X6A7hEmQCThviQHsDJFYiIqN6EKVvvzXEm0NLv2mazUVxcjEajQavVHrddS1letpv/ZC1okrQlROPDQykXMiK0feOFZFnGZrOh0Wj45ptveOedd+jfv3/jvGpqarj77rsZOXIk//d//8f27dt5/vnn8fX15fLLLwfg3nvvpaKiglmzZhEUFMTs2bO55557+OGHH5pt1n/++eesW7eOzMzMk352SqWSmJiYYypprXkWn3beN5vNhsViwWq14nK5KCoqOm7bl156qcm/GxoaMJlMhIaGYrfb+fLLLykpKWHMmDEMGDCg1bLMnj2b6dOnM27cOF5++WXU6hMHbS5evJi0tDQSExNbPdaxEAQBvb7zFiCng06nayfZ9UAkUvWcFrVWVhlQe/ckFgj0DsZf74OPVs+S0p1U203ML92GRXYiqwT6BySiVZx8UXU8Wjvnnol6XrnrIh54+xfKakw8/cUS3nvgCgJ9ur67wBGOzLnK1sCumgJyGspQiiJJ/uE4JBdLSneyxrgfCRmlIHJuSCojQtJQK5TI18VTl1/HljfXsf/b3SgTvbjs7gvoGRBDkLbrWmOO9z2/OW89NoeLIB8vLhrUg+iwoGNc3TGY6y3kbi8gf6/7+Tnoov4oRBHJKdFjcCq+/q2Ly2i/+7l9aMuF7tn5HE5Adl6DbHaA+X8EK0uYnbaZa/dkICZYqbBUkK2LInlQMtnrsvnq+R9J6hnHwPGnF6vYEXTV32p1cS1avQatzr3IyssqpHZ/GbY+kUiigL9WjRjkQFLKROl8UKnc76INqkJqFVY0TgXD5SR0sSk4mI0S03HqjgsghjWpw3S2crLvWhRFRFFEoVCgOM002stKdzF1xzfNjpfb6pi64xte7nN9u6YTd7lcGI3GxgyfcXFxCILQOK/vv/8elUrF888/j1KppFu3bhQUFPDRRx9x1VVXkZeXx5o1a5okB3r22Wf5888/+fXXX5skB8rKyuLdd9+lT58+TcY4FgqFAlEU0el0x1Q0WvMsPqUt3LKyMj7//HOuu+46Ro4cyRtvvEFMTAzvvfceq1evblEf27dvZ+TIkcyePRuA6dOnM2vWLObOncutt97K0qVLWyXTV199xfPPP88NN9zAa6+9dlLlAWDjxo1tkgXKw8kRIpJb2O6oMmdQaTknJIURIWlcFpVBtD4Qu+RkYck2vstby6ryvTQ4rO0l8jHJ6BbNMzeOQqkQOVhcyeMf/YrJcvLKy10Fm8vBjuo8fi/ZQXZ9CcFab0K1fmyryuXVvfP505iFhEyqTyQPdR/PmPBeqBXufQZZgN73DSB0fBwAWTM3Y11UTuAZWACptKqeH1btAOC8PomkRHeue1B5fgWbF29HlmRiUiMJiwuhsriasLgQorqFd6psHroGgjIWQX8NaC8FBGLVebzbbScgo+hhIkdRjP2SngTHBGG32HntzvfI21PQ2WKfkZjrLZTllTcWj2uwO9jx+w5kUcDe150JLTZUR7nSRIjghUHtVh4OKio4qKwEGTJrIkhNSyBAnYuAy11vvJnDuHuxJvg8edYrD6eLLMtYnPYW/TU4rMzaO/+E/b26dz4NDmuL+jsFT38A9uzZg0qlYu7cufTu3bvJuU2bNjFgwACUyqP7+IMGDSI3N5eKiooWJwey2WxMmTKF+++//7Rqs50KLbZAlJWVsXDhQhYuXMi2bdvQ6XSMHDmSO+64g3PPPbdFC/a/8vrrr5OYmMjVV1+NxWLhl19+4brrruPZZ5/l2Wef5b333uP8889vUV+HDh3ixRdfZPTo0UycOJGKiorGc1qtFo1GQ21tLb6+vo1yulwu9u/fz6233toquT2cIufdgRz+EJTa3e5Kf8MdA6GB8+5oclwtKukfmIC3SodBpWNpyU721BWxtiKbGrsZi8vOkOCUDq3iOS6zO5V1Jl77fiVbsot47n9LeP6Wsai7aCEnlyyxufoQa8z7UecVo9foCNQYiDMEU2iuZG7hZgrMlYC7GupFkf1I8Ylo0ke9w4LRVk+A2osH3rqD9/PeJ29XAe8//DmRiaH0GnZmpY98/9e1OF0SYf7ejM7ohl8n1laQZZmD23PZv9ldOG7Ahf1w2B3YrQ6S+8WjUp+6lc3D2YWgTELW3wiyGWy/0Ue/nxfitDyd2w2xbz1b1+UzdNIwGmb8RlVpDTNufIOXFz+Db2DXtRB2RYyFlZhqzUQmu5X3vNJKyjfnYksKwqlWoFMpUIY6ULgUBBt0CAjUCBbWq/IB6FblT7+EbgQG2EnhA5RYccganLIanVB/dCAxzK08aMd2xjTPGGRZ5s7177OjJr/N+iy31XHe0uda1La3XywfDLyr1ZbS4cOHM3bs2GNaBEpLS+nWrVuTYyEh7o2skpISevbs2aLkQDNnziQkJIQbb7yxVTXb2oIWrXiuu+46tm/fjkajYfjw4fz3v/9l+PDhpxXksn37dv7zn/8QHR3NkiVLsNlsXHrppQBceOGFzJ07t8V9LVq0CIfDweLFi1m8eHGTc0diNW6++Wa++OKLxlStNTU1OBwO/Pz8TnkOHlqOoFQjv/YYXP8CskATJUI+ck+++ijCMWoFiIJId99IvFU69AoNPuV61ldks7euiE8PLqfBYWF4aA8i9QHNrm0vrj+vH2XVDcxeuoXFm/cT5KPnoSuGd7lCc0tKdvDq3vlU2hvcB0zgo9IxOqwn+aZKNlUdRMatqJ0fls6QoG4oxaMPO5vLQamlBq1CRR+/WFJ9ozCotITNeYRHRvyb6rJaXrnlbV5e/CyRSWdG+sj88hrmrd0DwKh+SaREda71ocZYx9q5G7GZbfgG+5CSkUhZfgVh8SGNCxgPHo4gqNKQvW4F2QT2lYzy20FZpIY3i2IRM+tY9WchI+8fxa6XfuXgtlz+O+lDnph9P+oWxNB4cFOaW4542NXD5nKxZdEOJJeEI8NdOC4yTI1JcBAsemFQqXDgYqU6B5cgE2jWMsQvmfA4b+L5CG9ykWWBjbbL8fG+mWRlISVF24mI6v2PcFtqK4TjOICdqVit1mYb70fW1DZbc6+GLVu2MHXqVMaMGcOIESMAWLlyJfPmzWPu3LmdEu/UIgVi69atKBQKkpKSqKqqYvbs2Y2uR39HEAQ+//zzk/YpimLjh7Vq1Sp8fHzo1asX4I6NaE0AzaRJk5g0adIJ22RlZTX5d2BgYLNjHtoX4drn3RlcH34F/lIHgnCNW3m49vkTXh+pD+D8sHQMSi0+Kh3LynZTZKnis5yV1DosjA7vRaKh7atSHwtBELh/wrkYa00s2pTFN8u3EeTjxa1jM7tE4KLV5eDHgvW8vm9Bs3N1Dgs/FGxo/Hcf/zguiOiNj+qob6pLlii31uKQJOINofTwiyJEe9QPPzolkse/vI9/X/oKZXlGXr7pDV74dWqjyb8r8978NUiyTFSwL2P6p2DQdW7gd0lOGTtWuBWajLG9cTlduJwukvsloOyiVi0PnYcgCKDqjex1p1uJcGzm+uBNlNvVfGsMRzG4lpV/ipxzxzB2v7+cVT+sIyY1kpunXX3SQrAewGKyUppTjre/O7attLqewtX7cUT7YffWoBQF9BEuBLuSQG8NggAbVPnUila0TgVDrHHE9YkgQlxGOCsBOOAYgMbrGnoExGOzhLurjitTPcpDCxEEgQ8G3oXV5WhR+61Vh3hwy8nXoa/3u4W+ASd3+9EqVG3+Xtdqtc0yxh1RHP4eJ3Ks5EBVVVU8+eSTTJs2rdMKILfoaZKZmUm/fv3QarXIsnzCv2PllT0W6enpzJkzh23btrFw4UJGjBiBIAhUVlby4Ycfkp7uqcp4NiJc+zzk1yMvehv5synIi96GvLqTKg9H8FN7MSwklQsi+nBJZAY+Kh01DhNf563mu7y17KzJxyW37Dd4uigVIv++aTQZ3aKQZXh3/lrmr9/TIWMfD5PTxp6aAhYUbeXD7BPHEYkI3Jl4PtfEDm5UHmRZpsrWQL6pAn+1FyND0xgWmtpEeThC/1G9mfjqzQiiwN712bx578fYrV07hebB4goWbXRvHIzp343EiM4LnAZ37Yc1P2+guqwWlUZJ3/N6UlFURXh8yBlj0fHQ8QiCiKDuD16TQZmKgMyDkesY6VuBoJVgYA3rvZwkXOD2u/725Z9Z/u2aTpb6zKCisJKGGhMGPy9cksym5btxmu3Y+0cDEByqBFnAR6HGR6PmoKKSHGUVyNCvIoyU1HiC9TnE8x0CElWucKrUk+gT2K1T0oeeLQiCgE6pbtHfwOBkQjQndtsL1foyMDi5Rf21x6ZgWFgY5eXlTY4d+fdfFYLZs2dz3333MXLkSN57773GjfcVK1ZgNBp58skn6du3L3379mXevHls2rSJvn37Ulxc3OYy/50WbW99+eWXbT7wo48+yh133MGvv/5KQEAAkydPBuCiiy5CkiQ+/vjjNh/TQ9dAUKphzN2nfL1OqWZgYHKjJeK34q2UWmuZW7iJansDV8YMom9APGqx/XdvtWoVM++6iLtem0N2cSUvfb2MQG8vhvSIa/ex/0qN3UxuQzkH6kuptpuosZswuU4c3C0h89eqfg1OK0ZrHb4qPUOCupHoHYbmJFmuxt81msL9Jfz4+q8s/2Y1EUlh3Pzvq047g0Z78fbcNchAfJg/o/t1Q9fJbh0VRVWsm+8uZNfz3DQUShGXSyK5XwIKZdf8DD10DQRBCZpByPI90DALwZXLC/FrmLx/ODvwxdm3kiwxlMiCKMp3FfLmvR8RmRRGSuaZleK2oynNNSIIAqJCpLzBQs4fe3AGemEL9UYAAiIFFHYFAT5aakUrG47EPVT60SsmgYhwG934DDX12GUdh8SJ9AnKRNUB7yMPbhSCyCOpF/H4tq+O2+bh7uM7VaHLzMzkm2++weVyNb4v161bR3x8PIGBgcDR5EA33XQTTz31VBNFZvTo0fTr169Jn7NmzaK0tJRZs2Y1xlO0J5326fXo0YPFixfz7bffsmTJksZq1NOmTWP+/PkeC4SHE6IUFfTyi2VsRG+uiT2HREMoEjIryvfy4YGlrC7fi8nZMdmRfL10/GfypYQFeGOxO3j6s4XszS/tkLErrHVsqDjAwuJth+MZZOIMwdjl49fc+Ct1Tgt2l5N8UwX1Dis9/WIYG9GHNL/okyoP4HZFvOOlGxh4kTvN3Hcv/8zS/50853lnsCevjOXbDwIwLqM7cWEdFzNzPHb9ua8xdeuAC/tSUVhFRGIY4YmdY5L2cGYhCGoE7bngdS+I4Siw81byamLUJoRAJw2pRiovSMErxIeGahMzbvwvlSXVnS12l8VqtlFysBTvAC9kWWbb+v1YK+qx9XNnXvIPFtHJKvRKFTq1gpXqg7gEmWCzjgxdHLFJPsQzB29ykGWBg9IEUgIvQXeM2D4P7cvIsHRe7nN9M0tEqNa33VO4toQrrriChoYGnnrqKQ4cOMCPP/7IZ599xsSJE4FjJwcyGo0YjUbq6+sxGAzExsY2+fPy8kKr1RIbG9sku1N70akqscFgaJbaauxYTzYCDy1DEAQSDKEYlFq8lVoWFG9lS/UhdtTkU5ttps5hZXhoGn7q9q/TEBHky2sTL2HSf3+gpsHCox/8ynsPXElUcOvy97cESZYos9ZyoL6UAlMFFpeDAI0XAeogDjaU8VvxNnbXtix9o1NyUWqtIdoriHTfaMJ0fq2WR6VW8cQX9/Lw8H9zaGc+7zzwCWHxwfQ6N63VfbUnb/3iVmySI4M4r28Smk6OL7CYrPz++XJkWSa+Zww+gd5UFlWR3C++y1pwPHQ9BEEH2vOQZQs0vI6aSj7rvpordw+nKhwqrOVE3ZyJ4s0VFGWX8vItb/L8L4+j6eTYn65IRVEVDTUmwhNCqbM72Lt4By6DGmu8e7MhIkyNywX+BhWbNAXUiTa0LiWZdZEkDIogQr2K0MNxD8VSBqGBE/FRGzpzSv9oRoalMyw0jW1VuVTY6gnSeNMnIK5LuJIFBgby0UcfMX36dCZMmEBwcDCPPfYYEyZMAE6eHOjvddU6A49NzcMZT4jWl/PC0vFWafFV61lZvpc8UwXvZS+m1mFmTHjvU1oYt5buMSG8ePsFTHlvHsWVdTz6wTzevu9yAtqourFTclFsqeZAfSmF5kqckosgrQ86pZpNlTlsqsqh2m5qbK8QxBPGg3gpNaT4RNLLP4YYfVCT7EutxeBn4N8/TOGhYc9SXVrDKze/xStLniU0Lphdq/ZRWVJNYLg/6ed275TF8dYDRazbm48gCFyQ2Z2YUP8Ol+HvFOwtZPfqfYA7dWtFURVRyeGEJ3isDx5ahyAaQDcOGTM0vIVerGN26mqu2D0MS7yVAkslSXcMpert5WxdspMPHpvN3a/f6lFU/0Z5nhEZAYVSwd6dudTlGLEOjgNBwNtXxKBWISBQaWggV1mFIEPf0hC69Ygj0i+XeL5HxEWdFIbK51GCtJ4sap2NQhDpH5jQ2WIcc8Hfq1cvvv3222O2b0lyoJaM0Z54FAgPZwXuonPd8VJq8VPpWVSynUp7A58cXEa1zcQl0RnEG9rfJ3BIWhxPXn8+//fl72QVGnny0wW8NvES9NpTN2HbJScFpkqy60sosdQgAIEaA/mmCn7IX09WXfHheAZ3toi+/vEMCEykwlbP/3KP7050c/wwxkX0RqtoG/N6ZFI4T/7vAZ6++CXK8ow8eeGLWE1WKouPukwERQVy9+u3ce7lA9tkzJYgyzJv/uz+HFJjQhjROxFVJy+cZFlmwYdLsVsd+If6Ep0SQXVZLUl94z2ZcjycEoLoC9pLkCUzmN4nQFnFF6nruG7vEEgzccAqknz9ACq/2sC8dxcRnx7NBXec3yUU/K6A3Wqn6EAJ3n56LE4n2xduQ1IrsPdwJzOICdNgc0kYfEXWqwsB6FbpT2p4DDExNhL5H2rqcMgazNr7iPDu04mz8eCh/fEoEB7OGtSikozABLxVWvzVBn4p3EilvYFv8tdQaW/g2tjBJPuEs7Uml53WAqw1Ogbpure5OXP8wFSMtQ28+fNqNuwrYPrXS3n6+vPZnVdGRa2JIF8v+iZFojjJQtHitJNvriCrrhijtQ6NQolWoWJHdR6bqnKodZgb28Z5BZMZmEhPv+jGYL0wnR83xA1lXtFm6hyWxrZ+Kj33p1zARVH923TeAH1GpnPPf2/jtbveoyi7pNn5iqJKnrtqFs/OmdJhSsTG/YVsO1iMQhS4aGAq0SF+HTLuiagx1rJm7kYAMi/oS2VJDTHdIwiNC+5kyTycyQiKQNBfgSw3gPkLotWlvJe8iTv3ZyL2qSfb5kv8yO7ULtvHOw9+ymfPfkut8WhV285Q8LsKFUVV1FeZCE8IYX+BkfLt+dh6hiMpRHQ6AR8vJU5RYqdPEZIgE2LW01sRSWJ3b+LEb/DhILIM1YqrCPe/tLOn48FDu+NRIDycVYiCSKpvFN4qHb5qPT/kryffXMHCkm1k15dQaaun5sjCe8cGQrJ8eCT1ojYNqBIEgVtGZ1JeY+Lb5dv4bcM+Vmw/iNl2NId1iJ+BR68ewfl9k5tdX++wkNtgJLu+hGq7CY1CSb3DwvLyQ+yvK0E+bG3QKdT0C3BbG/6eZlWWZUwuGwFqL66OGUKFuYba6hqGxvXkgpj+p+WudDJG3zKCdx/6DEuDtflJGRDg3Yc+ZcilGe222+mSJLYcKGbHgTIW7doGQM/4MIamx59UcesIVny3luqyWtRaFSmZSVjqLST3S/BYHzycNoIiDPTXIcsmsHxHuj6fl+LVPH6oN2JmLXlOf4Jyg3AcqmiiPEDnKPhdhfL8Cnc6elFg48KtyICtbyQAMeFarJKL8uBqGkQbOpeKvlVhJGSGE61fS5i8CgSoFTMJCZiEIHiCpj2c/XgUCA9nJVH6QMZF9MZPpeP7/A3srM3nYENZs3bltjoe3/ZVm2dlEEWBh68cxu7cUnblljZRHgCMNQ089sF8XrnrokYlosrWwCFTOQfryqhzmpFkmQP1pWyuOkS986gFIcEQQmZgIj18o1H9RRGQZRmT00atw4xdcuKl1BKhDyRaH4i3rKLIkUtaSFq7Kg8Au//cd2zloVFQMBZUsmvVPnqP6NHm4y/dms3M75ZTXtPQ5HhyZDARgW0f1N5aXE4Xv33srtHRe0QP6qsaiO8ZQ0hM59ak8HD2IChjwOtmZKkBbPMZ7nuAhyO1vFbUHTmzBmN1DD5f1iGa7TjDfJD0akSzHWVpHQLtr+B3New2B0XZJRj89BRX1VO45gD2xCBcGhUqlUCAn4pSr1rKtHUIMvQuDiQhOZK40Hxi5Z8QBScWwvDxewJR2bmV7T146Cg8CoSHsxZ/tYERoel4KbU8uf0b7NLxU5u+vGcuUfoAtAoNaoUSjahEffjvVBfcoiBQVu1exIqSi37m/QTZ6qjQ+LBF3w1ZVDBrznJSU4LIM5VzyGSk3mGhwlbPnppCDjSUNlZp8FJq6B+QQGZgIkGao9WeJVnG5LRS57Bgl1wYlBqi9YFEeQUSovHFV+0O4DabzRR3UIXslqaJbI90kku3ZvPYB/P/Ut3iKHNW7mBA95hjWn06kl1/7iNnex4APYelolAqSeob3yUqmHs4exCUSWC4w+3OZF/O1cG7KLdrmG2Mh/PM1NnSkJ0iRIuglcAqIuRL6FfnYjzUfgp+V6SyuIraynpCYoNY9uNanDYH1v7u1K3RoRpqFGaK/CsBSKkMIDkogm6JDuLlb9EItTjRoDQ8iEL9z/i8PHgAjwLh4SxHp1SjVahPqDwAVNkbeGLbN4RpffBW6fFT6/FXGfDXeOGlVKNTaDAoteiU6r8oF6pmysZfF4FbDxRhrG3gvOrNPLpkLqGV9Y3nygK9mTnqEv6gP59vWIMmRCbXZGRXbT4Nf6lfkeQdxoDARFJ9IhsVmSNKQ63DjFOS8FJqiNEHuZUGrS8+Kl0bf4qtIzC8ZRmOWtqupbgkiZnfLT+m8nCEWXOWM6J3Yqe6Mf385gIAkvrGIzkl4tMjCI4K7DR5PJzFKFPBcA/Um8CxkXsit1BqUrOESBhnQVCCoDuaqU22iJgS4+CL9lHwuyrl+UZkSabe4eTAsj04In1x+uoQRQgIVrA3uARJkAmzepHqDKFbDwOxyh/xFQ4gyyDprkGjH4PQBdKDevDQUXgUCA9nPX9NbXoiCswVFJgrmhwTAC+lFm+VDoNS4/5/pdZde0KlI0DthU6pQSmIqEQFeqUGvUKDl1LDtpISzqvezMxv/9dsURtcWc/Mb//Ho7c7WWW1U5Ff23jOW6mlf2ACmQGJBGjcOcQlWabOYaHWYcYlSXgptcR7hRCpDyRE64N3JysNfyX93O4ERQVSUVTJ8VbzSpUCURSQZbnNdt63Hihq5rb0d8qqG9h6oIiMbtFtMmZrqSqtZv1vWwHoNSINjV7tsT54aDcEQQBVT2Sv+6FhBoJzD/+XtAFj1lC2E4z89/tTKyEOqMdsi8LLv23ST3d1HHYHhftL8fLVsenPvdiqzVjPdVsSwoPVHAo0YlM50btU9CwNIql/MHE+mwiV3ZndHKpMNIZb3Kl0PXj4B+FRIDyc9fzV5edE9PGPRSGI1NjN1NhN1DjMuGSJBqeVBufxffp1CjU+Kh3eSh0GlRYvpQYvhRqz1cnjS+Yi07zkuwhIwJS5C7hsQhyCQiTZO5wBgUl0941AIYhIskSdw0yt3YJLljAotSQYQonSBxCi8cWg0p7qR9KuKBQK7n79Np67apZbAzuGEuF0uHh87AtceOf53Pr8tRh8T7/YX0lV/ckbARW1LVMo24Of3vwNh9VBYIQ/Bj8volOjCIr0WB88tB+CIIKmPzIPQf2LKDjErOQ1TMweQa7Nhz4GI4FKK5VOLdsagnHJAkKGjW+/W0VYbDDRKZFndXB/VUkNdZV16MN82b14B84APY5wd6yUKslKmZfJHfdQFERsXBhJkYVEyb+gEJw4hTDUhgcQlJ2zIeHBQ2fiUSA8nPX0CYgjRONDua3uuG28lVqGBKUgISHL7vgFlaDAKbmwSA5MThv1TgvVdhO1djPVdhM1dhNWyYHFZcfislNGbZM+++3Ia+K29HdEIMxYT59lpTh6nc/45O5469XUO9zuSTIyBqWORO8wIvX+hGh9MSi7ptLwd869fCDPzpnCOw9+SkVhZePxwAh/Msb1YcviHRgLKvnlrYVsWLCVyf+5hUEXZZzSTrxLkvhh1Q7e/mVNi9oHtYGycipIksTvny0D3MHTeoOOpD7xnSKLh38WgqAEzWBk+WFKSl4gXF/Gu8nLccgKglRHN0fK7Dr+U9SH5UIk60sOMHXcdK58+GJGXjcU/5DOT0DQHpTnG5GcEln7CmnIr8J6njtGKiBGpijQ/ezqXhVArE8wad2tRPEjOqEWCQ1Kw2QETZ9OlN6Dh87Do0B4OOtRCCKPpF7E49u+Om6bR9IuJjMgEbPLjsVpa1zE1znMaCUneqUGf8mLGH0QoiCgORz/IMmyu73TethqYaLmsIIRXp3VIvkC8q0sNFWwd8da+gwM5Jy+MXTzCSdCH0CI1hcvpaatPooO5dzLBzLk0oxmhaoEQSBvTwHfvPQzq75fR0lOGf+eMJPhVw1m8n9uJSCsZbERsiyzePN+3vxlNUUVbuXtOAaPxnMh/gb6JkW2yfxay4pv11BVUoNGpyYsLoSYtKg2jwPx4OF4CIIatMP5rXI/Fyg+IlxjQZabZocLVlmYEbeWqbmDWefljbGgkvce+Zw1v2zkuicnkH5OdzS6M/N5dCycDieF2SWoDFo2fb8Kl5caW1IQKCUs6bXIAoRYvEg2B5I6WEukaj6B4kEARN3lCLqxnpStHo5LbW0tL730EitWrKChoYGUlBQeeeQRMjIyAFi7di0zZ87k4MGDhIeHc9999zF+/Phj9rVp0yZuuukmPvvsMwYOPJpiec2aNbz66qscPHiQoKAgrrvuOm6//fYOmZ9HgfDwj2BkWDov97meV/fOb2KJCNX68nD38cdN4eqSJbeFwWnH7LRhdtlpcFiocZipd1hwyi4QBHQKNTqdmkh9QKNyIaY6gR9PKpuYGIrKImI1uVj3RzmFO61MviiIAf0DO71i8umiUCiOmcklPj2WB9+7iyGXZDL7he/J3VXA8m/XsG3ZLv714g2MuWX4CVNIrtubx+s/rGR/kTtmRaNS0CcxkhA/A/PW7WnW/ohdY8pVIzotgPrHN9zB0z2GdscnyJukPnGdIoeHfy6CoKVXzDCU9g+RZfi7wU8UQJLhwchtTOp/Kz4H1ZRtL2D78t1kb8lh9E3DueSesUR1izgr3JqqSmuoNdZT5rRSvbMY64BYEECTacKhcqJxKkkvDiChlx+x/luJFNe6L1T1R9DfgCAGdO4EPJwUWXaBfRNIRhCDQZ2BIHTMe3Xq1KlUVVXx2muvERgYyJdffsntt9/OTz/9hCzLTJw4kdtuu42ZM2eyfPlyHnvsMQICAhg8eHCTfurr63nssceQJKnJ8ZycHCZOnMjEiRN5/fXX2blzJ1OnTkWr1XLDDTe0+/w8CoSHfwwjw9IZFprGupJ97Dy0n57x3RgUfuJK1ApBxHA4aPrvOCUXVpcDs8v2F+XCSo3DRIPDinXIIMqDfAiqqGsWAwHuGIiKYF+ufeAhRpTZ+XHFTjZlFVJorOOpTxfy3Yrt3HvpUPomRSKKZ1+Qrc6gY/jVQ0gdlMyP//2VBR/9QU15Ha/d8S7Lvv6T+9++g6huEU2u2Z1bxms/LGfrgWIAlAqRnvFhDE6No1dCON2jQxjWK6FZHYgQfwNTrjp24b6OIGdnHvvWZyMIAnE9oolPj8E/1K9TZPHwz6a/nxmh+vgxXaIAYWoLYYnZbB+ZTPLAeOrmbMJcbeaXtxeyZckOrnj4IoZcOuCMd2sqz6/AZrOzbsVmJKUCW3oYQoIFV4gVQYbUokBiogJJiSskWvgNpeBAFkMRvO4CZVJni+/hJMjWRch100EqPXpQDAOfpxC0Y9t17Ly8PNatW8fs2bMbLQ7PPPMMq1atYt68eVRWVpKSksJDDz0EQGJiInv27OGjjz5qpkBMmzaN6OhoioqKmhxfuXIler2ee++9F4Do6GgWLFjAqlWrPAqEBw9tjUIQ6esXh1ZrIdUv7oTKw8lQigoMouKYwcxOyYXFZWfP9CcJnvgEEk0DqSXcu+JlM6bRMyyO9DDoHRfB6l25zFmxnb0F5WzPKWHSf79nRO8k7rl0CHGhZ+duV0hMMLfPuIFBF2fyxb+/ZeeqvWxdupO7Mx7n2icmcNWUiymqruc/P6xk1c5DgHvnNDUmlIHdY0iPDyM9NoxgP3cWlPP7JjOidyJrd+WwY182vbonMzg9oVNTt3438xcA4tKjCYsLJtFjffDQSYhS5QlTHR8hOKYOSVPB3r1eeN+SSWRWJRWLdlOQVcyb937M2rmbufrRi0nJTDoj3ZpcThdF2SWUqCxUrM7H1j0UOUhC2cOdZCHC6Ee8NoCePcyEC7/iJdYgo0HwuhVBPcCTsrWLI1sXIdfcTzOnVqnMfdzvjXZVIvz9/XnjjTdITz/q3SAIAoIgUFdXx6ZNmxg1alSTawYNGsT06dObZCf85Zdf2Lp1K++++y6XXHJJk/aBgYHU1NQwf/58xo8fz/79+9m8eTM333xzu83rr3gUCA8e2gGlqMBb1DHwrsfZqdAQNnUawcajQdaVwb6UzphGz9sfbDwW7GvgksE96N8tit83ZfHT6l0UV9axdGs2a/fkctk5Pbl9XCb+3mdfekWVWkWfET1I+PFRFn6ylDmvzqemvJaPX5jDZ+t2URXlg3Q452RSZCAZ3aLpGRdGj7hwooJ8m1loFKJIv6QIdI5aUpMiOlV5qK9uYOX36wBI7pdAfM9YfIN8Ok0eD/9wxOAWNRNQI2hkFH0aMNVY2aszEJ40HO2ifdTtL2X9r5vZtz6bC24/jzG3jiAyOfyMcmuqLquh3FjJzs1ZyE4Z24AIFBl1IIJ3g474Wh96nqMgRLOEMGUOAIL2IgTtOASxcxIx/JORZRlkSwvbuqDuBY4dEScDAnLdC8iqIS1zZxJ0rU7w4ePjw9ChQ1Grj8bILFq0iLy8PJ588kl++uknwsLCmlwTEhKCxWKhurqagIAACgsLmT59Ou+88w5eXs1/cxdccAHr16/n0Ucf5bHHHsPlcnHxxRczadKkVsl6qngUCA8e2pmetz+I65Z7yPp1DtaifLSRMSSNv4pgpapZW1EUiA724+YxGQzrmcCPf+5k4aYsak1WvvpjC79v2setYzKZMLQnOk3z6890fAK9ufKRS+g+Mo3/e+1ncnUCsloJsoyvQ2LYOWn0To4iPS6M+LAAVMquHyMy/73fcVgd+If6ktg3joTesZ0tkod/MuoMtxuHVMbxUw7Av+N2MqhG4uW8cGx+TpTn1lBWYEFQxhE1MBbbT9uprajjm5d/ZtPi7Vzx4EX0H9P7jHFrKi+oJEuqouKPPGwJgchDrIheEmqHkoh8b3r18iYscCvxys3uC5R9QH81gqJzkjD8k5FlGbnqOnBsaase3b9/Y/8WWeNQ9YOAr0+rXs+WLVuYOnUqY8aMYcSIEVit1ibKBdD4b7vdjsvl4tFHH+Waa64hIyODwsLCZn1WVlZSVFTE/fffz/Dhw9mzZw8vv/wyb775Jvfff/8py9pSPAqEBw8dgEKpIuXS61vcXqVQkBwVzP2Xn8uYjBS+/mMrf+46REWdmVnfr+DH1bu455IhnNszAaXizNn1OxkWu53PFm3i62VbafB1K0jqOiuaVTkoCmvYuzib89+4jW5DW7aL2tm4XC5+eXshACmZSST2jsMnoGV1STx4aA8EQQE+Tx127zhe3jItCmq50G8NI/yieLeoO3OMfojRNuRwG/n79Wj8+xO2q4K6P7I4sOUQ/5n4PkMuyeTSe8eS3C+hS7s1uVwudh44wMGsfFw1dmwPhiKG20GC0HxfkoO9iYsvIEX1J0rBDmII6G9GUB072YYHDydiyZIlTJkyhX79+jFr1iwANBoNdru9Sbsj/9bpdLz33ntYLBbuu+++4/b71FNPER4ezuTJkwFIS0tDlmWmTZvGjTfeSEBA+7o9exQIDx66MDq1ir5JkSSEB7I5u4Cv/9jG1oNF5JRUMuX9eWSkRHP/ZUNJiw09o6sZ2x0uvluxjc9/30RlvRkAf4OOjG5RRPt6USDq2PnDBqoLKnnh8lcZdtUg7n3zdvyCu/Zu59q5m6gsrkatPeyi1ctjffDQ+QjaseD3xjECTENBdx0ISrCtAMdW9BTycGQR14el8OzBJHaatSjSzDhireT5G/DtNgT97/ux5VSw/NvV7F69jwvvGsWIq4cQkRTWJd2aSksr2WYqpGZJIbahgcgZdgQgrNwPf6eWfn3qSdGuwkusBNSgvwZBew6CcPZZfc8EBEGAgK9b7sJk3wg1d568od+HCOrMFgjQehemI/zvf/9jxowZjBs3jpdffrnRyhAeHk55eXmTtuXl5ej1ery9vfnhhx8oLy9vTNkqH3bhvfPOO7nssst47rnn2Lx5M4899liTPvr06YPT6aSwsNCjQHjw4AF8vbSc1yeZ9LhwVu48yHfLd3CguIKNWQX8a9a3jMnoxt0XDyE8sGsvqP+OS5L4dd1ePlywjqJKd3pdL62azJRoukcHkxgeSFpcGEGXDWXnrefx0RP/I3tzDiu+W8vWpbu485UbGXvryC6rPH3/6jwAEvvGkzIwGW9/QydL5MGDG0E7FjSjjpniUnZVICsTwb4Z7CsRnPsJV+zjg265bDSl8UxOLLVeoBhYR12ZilqvBEKKYpHm7cJYWMkX075j8+/bufTeC+gzskenKvp2qx1TnYXqmjpKa6vIqyhjxZ7t5BaUYomQsd0XgCCCpkKH3qgmo5+DJN9thCiy3R1oxiBoL0AQPTVbOhNBEEBoYfyfZijyCd30BBDDEDRD2zWl65w5c5gxYwY33XQTTz31VJP3VEZGBhs2bGjSft26dfTr1w9RFPnyyy9xOp2N58rKyrjpppt44YUXOOeccwAIDQ0lK6tpvamsrCwEQSA2tv03qzwKhAcPZxAhfgYuP6cXA1JiWLB+Lz+v2U15TQO/rt/Hyh2HuHp4b24enYG3/qj7gMvh4ODXP1G5YwcHe/Ui7earUag6ZifNJUlsPVBERa2JIF8v+iZFohBFXJLEih05vDdvDQeK3dVe1UoFmSnRpESHEBvqT3pcGDEhfo0B0P3O78XLvyfw05sL+PH1X6mrrOfV299l6exVPPThJCISQjtkTi3l4PZcdq9xP8wHXtiX+PTozhbJg4cmCIICNAObH1cEgTgSlPHIqmSwrQP7agSpnAFeW5jfM5/vKlJ5uzAUQh3IwdUYc3SIwf0I2lGBfUU2u/7cR86OPIZdOZhx/xpJUt/4dnVrspptWOotmOosmOvM1FTUUVxRidFcS4WrgRrBzIHyYooSbEg9lNBDA+Pc8QyyRSC8yEBctItecfkkqre6a8co00F/OSgS201uD23Pid303It4wefJdlUecnNzmTlzJqNGjWLixIlUVFQ0ntNqtdx0001MmDCBWbNmMWHCBFasWMHChQv56KOPAIiMbBprc6QuUmhoKIGBgQDcdtttPPfccyQkJDBy5EiysrJ46aWXuP766/H1bX+l3aNAePBwhiGKAjEh/tx+wUBG9kniuxXbWbwlm3qLjY8XbuDX9Xu588KBXDQojb2vv0v4c8/Ss76anoevNz50HyXPPkevKfe2q5xLt2Y3r8fgZ2DCOT1YuyefHYdKAFCIAv2SIkmLDSMyyIe02DASIwLRqJo/nrz9Ddz0zFUMuWQAHz0xmy2Lt7Nt2S4m9n6E6568nGseuxSFQoHLbif3rS+p27yd3P69SXnwNhTqjqkY63K52LVqH59P+xaAqJRw+p7fCy9fT+YWD2cOgiCCMgEUEciKBFCmgX0NONajkiu4IWgV4wPimFXQjSXVPghJFuQoK2VhBnTdBuG15ADmQxUs/OQPdv25lwvvHMXgizOISApDdjpP6f6UZRmr2Yb5sJJgrrdSV1lPdWkNlnoLDTYLtZIVk9JOrd6OUwuSN6iVKkoOVVGQ4QQUf+sTBK2M1WokI1Wij9dWFFjcFhn9dQiqfl3Wwunh+BzfTS/MrTy0cx2IRYsW4XQ6WbJkCUuWLGlybsKECbz00ku88847zJw5k88//5yoqChmzpzZrAbEibjmmmvQaDR8+umnvPbaa4SGhnL99ddz550tcN9qAwT5iGPVGU5NTQ2vvfYay5cvP2bJ8L/T0NDAzJkzWbRoEQ6Hg8zMTJ566imio09tl3Dnzp0A9OzZ8yQtuxZms5m9e/eSmpqKXn/2pQc9FmfbnM1WO7tzS/nqj62s3ZuH3ekC4EpjNk/88C7QvAYFwK6Zb7abErF0azaPfTD/hBkuBKBnQjg948MID/AhJSqYlOgQDC3cpbSabfzx1Sq++L85VBZVARDfK5abeylI/eptAl2mxraVCi/KHn6atFeeOI1ZnZxVP67nnQc/paKwsvGYzqDlgXfv5PwbhrXr2HDm/rbb6vnpeQ63H7JUhWzfAY4dhxWJbYCEjIIDtu48nZNIns2tBMjVSlw7DfjlOBEX7EFosKFQKeh7Xk/O05bRb+4nBErHvz9lWcZqsh62JriVhdqKemrKa7E0WLBbHDhsDiRkHFoZuwHMXk5MWhcOlYQLGdElonCKuJwyDVUW1gQcQA5QNC+/DSDJiNUufh62nxDVHkAF+tsQvG5AUIS36ed4JnzXbU1L52y1Wjl06BDx8fFotc3rK50KnVWJ2uVyYbVa0Wq1jdaDrsDJPuPWPEPPGgvEww8/jPH/27v36KbKdPHj353sNC2lpZVLW+TeQi23ttwpMFQUGQ6ioM54ASseBEGPgnh+ah2YGUDUQS71gDfU0aWC43Fw1OWMMqwDMiM3RRBhFGdBC4hAi5RSCmnaZL+/P0JDL2kboGlI9vNZi5VmJzt5Hnb2m/1kv/t9T5zwOWV4t27d6jz/oYce4tixY7zwwgtER0ezcOFCZs6cyccff3xFXvQlRH1aREYw8JpOdO/Qli/3HWbNxl3s3f8T//nZGoA6s2Bb8BQRiQt+i3vW/fV2Z3IbBoZSKEPhPn9rKIVR+7bGMoNKt8Gzf9rQYPGgWy3cOrwPCVfFeK5z6JxA69iL+4U+soWd/7jvevrfkM6b8/7Exnc3c/XuLQzbvbXOc+PdZ7nquVy+g4AVEf/8YDsLfrWkTpdbR1k5z+asICLKzohb6nYXESIUaJarwD7S061J7wwVaVCxFc2dT3f7v3g37RD/dzqNpw52xBnvQv9FCaVd7JCYQdw3J3F9cYDIz/7K9dS/f35x7BT67b+ipKgUx9lynI4KXBUuNA10m05EZAS0sFARpVFc7qSwtISSn8s4u99B5ekKjJIKKkoqcJVW4DrjxFVeQWWcRsWwlqjbG7ig1KKR0bmYNvr3nvsR10Lk6CYvHkTzq6+bnrh8YVFAHDp0iM2bN7NmzRr69+8P1JwyfNasWTWev337drZu3cpHH31EamoqAPPnz2fatGkcPHjQZ8EhxJUurmUUNwxIpW+39mzIe5XEs6frfa4FaHfmFL+//7ccSkvHOF8kGOeLBsNQKIWngDhfJFS/rxTVlisUeNZBUVnppqy8ot73BnC5DWKjIxndrwftW8deVheBhE5t+e/XH2DkLYNInnAtUNXLtWa+BtBu6VNsG34tmq7jdhko43yBZBgYbs8/ZRgY7vP/F27PcwxDYbjd5289y9xVzzUUrkoXa5d90tCw+rz0yBtk3Tzgivo1SoiLoWka6J3BmuTp1mTt5jkTUbkFi3GS0a2+Ynj6YV47lsbqwjZYOjlR7Ss41bElESn9mfnm31Du+n/USF29kjdpj0tBRXklToeT8rNOSk+Xcab0HGdPn6Oi1IlyGTXWVxYwkmwYnSNwJ9txd47A6NISo70NrI23LfF6OfM7f4lFU56uWi1uRrP1aqr/NiHCUlgUEPHx8axatarGKZfqU4bX9sUXX9CjRw9v8QCQkpLCxo0bmyVeIQIp8aoY0mxG408EnIePsNveLsAR+dapXTxXt2maC72supU2Bd/RlvqH+bMAbYyzPHPzY3yrNXPOCk78eJK9/9xHerYcmIjQpmkRaBF9UHpHVEVn0LtD5Q6o2E4UhTyUVMiv2qWwoCCFnWUtsfY6S9+yH2lXrVthbRagrTrH8Xc+qHf/VIBqrePuEomRHIlKicTdKQJXkg4234WC1W3Bdg7KY3y3iRqK33X6ira2ck47WtGq3S1oEQPRtLA4PBIiYMJiD4mNjWXkyJE1llWfMry2goICOnfuzJo1a1i9ejWlpaX079+f3NxcEhKurJFchLgULbp08ut5Md06M7x3VzSqim7Q0Dyj3GkXCnENz8XbNe5rGppFwwKeW01DQ+PE6TL+saeg0fdOaOIhTZ0HDvn1vKQogx/j4jy5ahdyQtPO5wicz837uKXW/arnWjyvUVZyjuMFRY2+98ljpy4zSyGuHJolDuwjQO+G0juC3gMqv4LKb0m07ueFlEPsOteTuQe60q6BM6LVtY0xsCW3wd3OhquzHZVkw2hrwRWv4Yo1MGz1nOZza1jP6VjP6mhlOpzR4YwVw2GhvNJAXVcMkUadSyAmt/uBIbGFlLutHCy9kYyOwzx5CSEaFBYFRG21pwyvraysjH/961+cOnWK+fPnA7BkyRJycnL4+OOPsdsvbag5pRTnzp27nNCbncPhqHFrBmbIuf1NYzgRE0/rM6fqdBcAT3eBn2PiueHhe9F0q+eXvfNdkajqonR+WVXXpKpuS0opDPB0/6HasvPrGG6Dbw4cpfScs9742sVFk9o+vkn3F62jf/2Vh0wbxy/vvPlCwaRZvEUDeAolvIVC1fjjVCs06v79w1f7WTz5hUbfOzo+KqBtRKh+tpVSTTbSjbTDwXAVqOFoWhKaNQFNdcfq3o5F/Ui/Frv5qHcB2/7s31nRiP+I5MTtrdGiqj//wt/KAM5aUaU6qtQKZ3RUqQ5nLbjqdF4ET6ukoe1piWVgKZpSZMacoLVeTivdyf1JewF47ouhZCb2IdWZCBWyjzYlf3N2Op0YhoHb7cbtdjdHaAFTNT6RUuqKysXtdmMYBg6HA8Oou09eTFscdgWErynDa9N1HafTyQsvvOAdK3flypWMGDGCDRs2MHbs2Et678rKSr7//vtLjj2YDh48GOwQml2451x8/4Nct+QpDHyPwrT7/ge5ylkCTs81A5d8+FZ7ZStMGprMS//3Xb2r3Nq/M/+uNQHO5TJGpPOzpQVXGefqLZqKLdHY78jGaWvgAMG/45wa4lKiadUuhtNFZ+p9TquEGCxXGc3SRoTiZzuiiYbZlXY4uCxaN1rYFFG2CFpEFBBr34PNWsqIyJ8bXxloxdkLxcM5C5TpWM5asZ61ojt0bE4LugWsVrDqCmuEQk90YdVBtyh0XaFbPbc2q8KmQ9kZCzu+bcHIE8U8OnIrCfaaB7K7jibw0ee96T6uBd/v29fU/yU+hcO2vlj+5Fx1fBYurrRcnE4nLpeL/Pz8ep/jb1scVgXEO++8w6JFi+pMGV5bYmIiCQkJNSbaaNOmDXFxcRw5cuSS399ms5GSknLJ6weDw+Hg4MGDdOnShaioqGCH0yxMk/P8NL5p3ZqOTy+g7ZkLXWd+jonnyJO/ZdjD0wP21mlp0KHD1eR9uIUTpy/0e24XF82sm7MY2TcwYDLEIgAAFIRJREFUAxXsfziX1nnzfBZNGnD04Sfold43IO89c/kUnp28wnOn7rxFzFw2hV69A3v9Q6h+tvfv399kryXt8JWgPxjHsbj3oFx9cBt70LRPfBb2tcUquDP2DO2sLiJaK++sDRY83SqtmoZu0bBixappWDUrNosFq2Y9371Sw6I8ZwYteO4rpfGB5SjzRm+o835KQUZSIRP6/8T47OneiSsDJfy2deP8zdnpdHL06FHsdnuTDeMaLEopnE4ndrv9ippHRCmFrut06tTJZ2+bi2mLw6aAWLNmDQsXLvQ5ZXhtAwcO5C9/+QtFRUW0a+e5WKuoqIhTp05d1vTfmqaF7LjOUVFRIRv7pTJDzv2emI370QfZ89b/cuzbb0k6PxN1u2aYiXrskN7cMKinz5moA6Xv8rl8Z9NJWPZUjXkgiq3RFM2ZS98AzgNx3Z2/IMJurzMPRNsOrZm5/N5mHcI11D7bTfkFK+3wlaIbSnUCVz6qshMHBlXQnT81ulb6jW24NtkOyu7pNmkoDKV5Rz5zu8/fGgaGMlCqEsPTv9LzAkqhaecLDovnWi2LBk+M20y1nopemgaGgv8eu5Wo6KhmmSMAwm1b+6exnO12O4WFhTgcDqKjQ3vizapuS5qmXVEj7zmdTiwWC7GxsT7jupi2OCwKiIKCAp5++mlGjx7tc8pwu93O6dOnadWqFREREYwdO5ZVq1Yxa9YsfvOb32CxWHj66afp2rWrz2smhAhlVpuN5DsnUpFxDclpafXO+xCQ97ZYGNDj0iZnvFQ9Fz+B+6k5fJf3BoVf7ybh/Ey3bZphJuoRtwwm6+YB7P3nPk4eO0XrpHh6j7jmivoCEaK5aJoOth5gbU/X2B1+rZMS+TWo892INGpPHB0QFg0irT97JhyTOQOCxmq10qpVK06cOIHT6SQ2NhZd16+oX/D95Xa7vd2XroT2v+rasKKiIuLi4pokprAoIKpmk16/fj3r16+v8djEiROZOHEiOTk5vPXWWwwePJiIiAjefPNNnn32We655x6UUgwbNoylS5c2WT9cIUTwWCMi6PJfd+P4fgBd0tKwNuN+bbVaZahWIarRLC2xnmxoaslqTsaAnsyFC6tq31rqWa5VO71w4cIspUC5T2FRfozSZpzwL0YRMImJiURFRVFUVORzGP5QYRgGLpcLXdevqMmJ4+LiSExMbJLXCosCYsaMGcyYMaPB5/xQ64LNtm3bsnTp0kCGJYQQQgiApK7+Pa9DP7AP9/zt/eFZ4bug0Orer1VEaIDmOgwOPwoIS1v/YhQBo2kacXFxtGrVCrfbjcvlCnZIl8ThcJCfn0+nTp2umGtdbDZbk54NCYsCQgghhBBXsFH3oZIegeMVaD5ORigNSIqA6/4fmm4DjAvXNXgGiEYp4/zfbu8yUKCq3VfGheVVt7aroPwTUPXNRaGBJREiBjRdvuKyaJqGruvoemgeplYNkRoOF4TXJzS3jBBCCCFChqZHoJY9Bnc9hdKoUUSoqpMGSx/DEpVZ/2tcxvsrSySq5OGqe3VeVYt9stkuoBYiHFw5HbOEEEIIEba0OxbCmrmQWOuapCQ7rJnreTxQ7x05Bi3uf8CSUPMBSyJa3P+gRY4J2HsLEY7kDIQQQgghmoV2x0LUbfOoWPcip77bRXzPTGxjHkDTAz/QgRY5BuzXe0ZbMk54rnmIGCBnHoS4BJpSys+hEURDdu7ciVIq5EZxUkpRWVmJzWYLyaHSLoXkLDmHq1DNuaKiAk3T6Nev32W9jrTDocOMOYM585acQyfni2mL5QxEEwmlD0h1mqaF3Jft5ZKczUFyDh2apjVJGyrtcOgwY85gzrwl59BxMW2xnIEQQgghhBBC+E0uohZCCCGEEEL4TQoIIYQQQgghhN+kgBBCCCGEEEL4TQoIIYQQQgghhN+kgBBCCCGEEEL4TQoIIYQQQgghhN+kgBBCCCGEEEL4TQoIIYQQQgghhN+kgBBCCCGEEEL4TQoIIYQQQgghhN+kgBBCCCGEEEL4TQoIIYQQQgghhN+kgBBehYWFjB492nv/448/5uabb/b+y8jI4PXXXw9ihIFRO2+AnJwcxo8f78395MmTQYouMHzlvGTJEsaNG8f48eP529/+FqTIAstX3g0tDxfLly9nzJgx3HLLLWG7bcOJ7J/+LQ8XZtw/zXJ8UVtYHVsoIZRSW7ZsUWPGjFEZGRk+H9+5c6caP368cjgczRxZYPnK2zAMNWrUKGUYRhAjCxxfOW/dulXdc889yu12q1OnTqmhQ4cqp9MZxCibXn2f8cY++6Fu06ZN6rbbblPnzp1T5eXl6o477lBHjx4NdliiHrJ/yv5ptv0zXI8vagu3Yws5AyEAWLt2LXl5eT4fMwyDBQsWMHfuXCIjI5s3sADzlXd+fj5ut5t77rmHiRMn8ve//z04wQWIr5yHDBnCa6+9hsVioaioiIiICKxWa3ACDJD6PuMNffbDwb///W9GjhxJVFQUdrudzMxMvvjii2CHJeoh+6d/y8OF2ffPcD6+qC3cji30YAcgrgxLliyp97GNGzcSHx/PoEGDmjGi5uEr79LSUoYMGcLvf/97Tp06xaRJk0hLS6Njx45BiLDp1betdV3nmWeeYfXq1cyYMSPsDlDqy7uhz3446NmzJ3l5eUyZMgWALVu2EBsbG9ygRL1k//Rvebgw+/4ZzscXtYXbsYUUECbyySefsHjx4hrLxo4dS25uboPrvf/+++Tk5AQytIC62LwzMzPJzMwEICkpiVGjRrFt27aQ2skvdVvn5uYyc+ZM7r77boYMGcKAAQMCGWaTu9S8w0FDue/du5c777yTdu3aMXjwYGw2W5CiFCD7Z3Wyf4b3/tnYNg/14wtfGso51I8tqtOUUirYQYgrR2ZmJrt27fLedzqdXHfddWzcuDGsGrXaquf99ddfU1lZyZAhQwB46qmn6NWrFxMnTgxmiE2ues4FBQVUVFSQmpoKwB/+8Ac6duzIXXfdFcwQA6L2Z7yx5aGurKyMM2fOkJSUBHg+z/3792fs2LFBjkw0RPZP/5aHOjPvn2Y5vqgSbscWcg2EaNAPP/xAamqqKXbuKmVlZSxdupSKigqKi4v5/PPPycrKCnZYAXX48GEWLFiAy+WirKyMzZs3k5GREeywRBM4fPgws2fPxjAMCgsL2bRpE0OHDg12WOIiyP4Zvsy8f5rt+CLcji2kC5No0JEjR0hMTAx2GM1q5MiR7Ny5kwkTJmAYBnPmzCEhISHYYQVUVc433XQTVquVyZMn07Nnz2CHJZpAz549GTJkCDfeeCNWq5V58+YRFxcX7LDERZD9M3yZef802/FFuB1bSBemMFNSUsKyZcv4/PPPKSsrIzU1lUcffTTk+speLDPmbcacwbx5g7lzDzVm3VZmzRvMm7sZ8zZjzrVJF6YwM2fOHHbt2sWyZctYu3YtaWlpTJ06lfz8/GCHFlBmzNuMOYN58wZz5x5qzLqtzJo3mDd3M+ZtxpzrCO40FKIpHTx4UPXo0UPt2LHDu8wwDHX99dervLy8IEYWWGbM24w5K2XevJUyd+6hxqzbyqx5K2Xe3M2Ytxlz9kXOQISR+Ph4Vq1aRZ8+fbzLNE1D0zRKS0uDGFlgmTFvM+YM5s0bzJ17qDHrtjJr3mDe3M2Ytxlz9kUKiDASGxvLyJEjiYiI8C5bt24dhw4dYsSIEUGMLLDMmLcZcwbz5g3mzj3UmHVbmTVvMG/uZszbjDn7IgVEGNu5cye5ubnccMMNZGdnBzucZmPGvM2YM5g3bzB37qHGrNvKrHmDeXM3Y95mzBmQayDC1fr161V6erq69957VXl5ebDDaTZmzNuMOStl3ryVMnfuocas28qseStl3tzNmLcZc64iBUQYevvtt9U111yjZs+erZxOZ7DDaTZmzNuMOStl3ryVMnfuocas28qseStl3tzNmLcZc65OCogws3r1atWjRw+1cOFCZRhGsMNpNmbM24w5K2XevJUyd+6hxqzbyqx5K2Xe3M2Ytxlzrk0mkgsjBQUFjB8/nuzsbH73u9/VeCwyMpKYmJggRRZYZszbjDmDefMGc+ceasy6rcyaN5g3dzPmbcacfdGDHYBoOuvWraOyspL169ezfv36Go9NnDiRZ599NkiRBZYZ8zZjzmDevMHcuYcas24rs+YN5s3djHmbMWdf5AyEEEIIIYQQwm8yjKsQQgghhBDCb1JACCGEEEIIIfwmBYQQQgghhBDCb1JACCGEEEIIIfwmBYQQQgghhBDCb1JACCGEEEIIIfwmBYQQQgghhBDCb1JACCGCSqaiEUKI4JO2WFwMKSCE8NPdd9/N3XffHewwGrV9+3ZSU1PZvn17sENp0PHjx5k+fTo//fRTsEMRQoQQaYublrTF4lLowQ5ACNG0evXqxXvvvUdKSkqwQ2nQli1b2LRpU7DDEEKIgJC2WIQzKSCECDMtW7YkIyMj2GEIIYSpSVsswpl0YRKiie3YsYPJkyeTnp7OoEGDePzxxykuLq7xnK+++oqpU6cycOBAevfuzahRo1ixYgWGYQBw5MgRUlNTeeONN/jlL39Jeno6a9euZcWKFYwePZrPP/+c8ePH07t3b8aMGcOHH37ofe3ap839WQfgwIEDTJs2jX79+pGVlcXy5cvJzc1tsKvABx98QM+ePXn//fcZNmwYgwYNYv/+/bjdblatWsWNN95I3759ycjI4I477mDbtm3e9XJzcwG47rrreOKJJ7yv+f777zNu3Dh69+5NdnY2K1aswO12X/L2EEKYk7TF0haLwJECQogm9NVXXzFlyhQiIyPJy8vjySef5MsvvyQnJ4fy8nIA9u3bx5QpU4iLi2P58uW89NJLDBgwgJUrV/Lpp5/WeL0VK1Ywbdo0Fi9ezLBhwwA4ceIECxYsICcnh1WrVtGhQwcef/xxDhw4UG9cja1TXFzM5MmTOXbsGM888wxz587ls88+45NPPmk0Z7fbzR//+EcWLVpEbm4uycnJLFmyhBdffJHbb7+d1157jYULF1JSUsKsWbNwOBxkZ2czc+ZMAFauXMkDDzwAwCuvvMK8efMYOnQoL7/8MpMmTeLVV19l3rx5F78xhBCmJW2xtMUisKQLkxBNaOnSpXTt2pVXXnkFq9UKQHp6OuPGjWPt2rVMmjSJffv2kZWVxXPPPYfF4qnhhw0bxoYNG9i+fTvjxo3zvt7YsWO59dZba7yHw+Fg0aJFDB06FIAuXbpw7bXXsmnTJpKTk33G1dg6b7/9NmfPnuXDDz8kISHBG/eYMWP8ynvGjBlkZ2d77xcVFfHII4/U+MXMbrfz0EMP8cMPP5CRkUGnTp0ASEtLo0OHDpw5c8b7RTd37lwAhg8fTlxcHHPnzuXee++le/fufsUjhDA3aYs9pC0WgSIFhBBNxOFwsHv3bqZOnYpSCpfLBUDHjh1JTk5m8+bNTJo0iQkTJjBhwgScTicFBQUcOnSI77//HrfbTWVlZY3XTEtL8/le1fvVJiYmAnDu3LkG42tonW3btpGZmen9wgK4+uqryczM9Cv32nEuXboU8Pyalp+fz6FDh9i4cSMAFRUVPl9j165dlJeXM2rUKO//HcCoUaMA2Lx5s3xpCSEaJW3xBdIWi0CRAkKIJlJaWophGLz66qu8+uqrdR632+0AlJeXs3DhQj766CNcLhcdOnQgMzMTXdfrjMPdokULn+8VFRXl/bvql7PGxvBuaJ3i4mJ69epVZ502bdrw888/N/i6vuLcs2cP8+fPZ8+ePURFRZGSkkL79u0bjLOkpASA6dOn+3y8qKio0TiEEELa4gukLRaBIgWEEE0kOjoaTdOYMmVKjVPfVaq+NBYtWsS6devIy8sjKyvL2+BXndIOhsTERJ9fTidPnrzo1yorK+O+++4jNTWVv/71r3Tr1g2LxcKmTZtYt25dvevFxsYCsGTJErp06VLn8TZt2lx0LEII85G22EPaYhFIchG1EE2kZcuW9OzZk/z8fPr06eP91717d1asWOEdiePrr79m8ODBXH/99d4vrL1791JcXOwd+aO5DRw4kG+++YYTJ054lxUVFfHNN99c9Gvl5+dTUlJCTk4OKSkp3l/Y/vGPfwB4c6xaXiU9PR2bzUZhYWGN/z9d11m2bBlHjhy5xOyEEGYibbGHtMUikOQMhBAX4fjx47z55pt1lvfo0YOsrCzmzJnD9OnTefTRR7npppu8o2Ls3r3bO7pF3759+fTTT3n33XdJTk5m3759vPTSS2iahsPhaOaMPHJycli9ejVTp07lwQcfBODFF1+ksrISTdMu6rW6du1Ky5Ytefnll9F1HV3XWbduHX/+858BvDlW/cq1fv16fvGLX5CcnMx9993H888/T1lZGYMHD6awsJDnn38eTdO45pprmjBjIUQok7a4cdIWi0CSAkKIi3D48GGeeeaZOstvu+02srKyGD58OK+//jorV67k4Ycfxmaz0atXL9544w3vhXNPPPEElZWV5OXlUVFRQYcOHZg5cyb79+9nw4YNQRlnOzY2lrfeeotFixbx2GOPER0dzV133UVUVFS9fX/rExMTw4svvsjixYuZNWsW0dHRpKWl8c477zBt2jR27NjBqFGjGDx4MFlZWSxdupStW7eyatUqZs+eTdu2bVmzZg2vvfYarVq1YujQocyZM4eYmJgAZS+ECDXSFjdO2mIRSJpq7GofIUTY2717NyUlJYwcOdK7zOVykZ2dzbhx47wTDQkhhAgcaYtFqJAzEEIIjh49yiOPPMKDDz7IoEGDcDgcvPfee5w5c4Zf//rXwQ5PCCFMQdpiESrkDIQQAoB3332XNWvW8OOPP2Kz2UhPT2fWrFn06dMn2KEJIYRpSFssQoEUEEIIIYQQQgi/yTCuQgghhBBCCL9JASGEEEIIIYTwmxQQQgghhBBCCL9JASGEEEIIIYTwmxQQQgghhBBCCL9JASGEEEIIIYTwmxQQQgghhBBCCL9JASGEEEIIIYTwmxQQQgghhBBCCL/9f0pWvdM7O06mAAAAAElFTkSuQmCC", 11 | "text/plain": [ 12 | "
" 13 | ] 14 | }, 15 | "metadata": {}, 16 | "output_type": "display_data" 17 | } 18 | ], 19 | "source": [ 20 | "import os\n", 21 | "import pandas as pd\n", 22 | "import numpy as np\n", 23 | "from tqdm import tqdm\n", 24 | "import matplotlib.pyplot as plt\n", 25 | "import matplotlib as mpl\n", 26 | "from matplotlib import cm\n", 27 | "import seaborn as sns\n", 28 | "sns.set(style='whitegrid')\n", 29 | "\n", 30 | "parameterizations = [\n", 31 | " ('sp', r'SP'),\n", 32 | " ('mup', r'$\\mu$P'),\n", 33 | "]\n", 34 | "seeds = [1,2,3]\n", 35 | "widths = [\n", 36 | " 256,\n", 37 | " 512,\n", 38 | " 1024,\n", 39 | " 2048,\n", 40 | "]\n", 41 | "lrs = [\n", 42 | " # 0.125,\n", 43 | " 0.0625,\n", 44 | " 0.03125,\n", 45 | " 0.015625,\n", 46 | " 0.0078125,\n", 47 | " 0.00390625,\n", 48 | " 0.001953125,\n", 49 | " 0.0009765625,\n", 50 | " 0.00048828125,\n", 51 | " 0.000244140625,\n", 52 | " 0.0001220703125,\n", 53 | " 0.00006103515625,\n", 54 | " 0.00003051757812,\n", 55 | " 0.00001525878906,\n", 56 | " 0.000007629394531,\n", 57 | " 0.000003814697266,\n", 58 | "]\n", 59 | "class MplColorHelper:\n", 60 | "\n", 61 | " def __init__(self, cmap_name, start_val, stop_val):\n", 62 | " self.cmap_name = cmap_name\n", 63 | " self.cmap = plt.get_cmap(cmap_name)\n", 64 | " self.norm = mpl.colors.Normalize(vmin=start_val, vmax=stop_val)\n", 65 | " self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)\n", 66 | "\n", 67 | " def get_rgb(self, val):\n", 68 | " return self.scalarMap.to_rgba(val)\n", 69 | "\n", 70 | "\n", 71 | "color_helper = MplColorHelper('viridis', 0, len(widths)-1)\n", 72 | "n_cols = len(parameterizations)\n", 73 | "n_rows = 1\n", 74 | "fig, axes = plt.subplots(n_rows, n_cols, figsize=(4*n_cols, 3.33*n_rows))\n", 75 | "\n", 76 | "for parameterization_idx, (parameterization, parameterization_str) in enumerate(parameterizations):\n", 77 | " ax = axes[parameterization_idx]\n", 78 | " optimal_lrs = []\n", 79 | " optimal_losses = []\n", 80 | " for width_idx, width in enumerate(widths):\n", 81 | " mean_losses = []\n", 82 | " sem_losses = []\n", 83 | " lrs_to_plot = []\n", 84 | " for lr in lrs:\n", 85 | " losses = []\n", 86 | " for seed in seeds:\n", 87 | " job_name = f'width{width}_depth2_seed{seed}_lr{lr:.20f}'.rstrip('0')\n", 88 | " csv_path = os.path.join(parameterization, 'out', job_name, 'log.csv')\n", 89 | " if os.path.exists(csv_path):\n", 90 | " ckpt_df = pd.read_csv(csv_path)\n", 91 | " losses.append(ckpt_df['train/loss'].mean())\n", 92 | " # losses.append(ckpt_df['train/loss'].min())\n", 93 | " # losses.append(ckpt_df['train/loss'].ewm(alpha=0.9).mean().values[-1])\n", 94 | " # else:\n", 95 | " # print(f'Missing {csv_path}')\n", 96 | " if len(losses):\n", 97 | " mean_losses.append(np.mean(losses))\n", 98 | " sem_losses.append(np.std(losses, ddof=1) / np.sqrt(len(losses)))\n", 99 | " lrs_to_plot.append(lr)\n", 100 | " \n", 101 | " mean_losses = np.array(mean_losses)\n", 102 | " sem_losses = np.array(sem_losses)\n", 103 | " ax.plot(lrs_to_plot, mean_losses, label=width, marker='o', color=color_helper.get_rgb(width_idx))\n", 104 | " ax.fill_between(lrs_to_plot, mean_losses-sem_losses, mean_losses+sem_losses, color=color_helper.get_rgb(width_idx), alpha=0.33)\n", 105 | " \n", 106 | " if len(mean_losses):\n", 107 | " optimum_idx = np.argmin(mean_losses)\n", 108 | " optimal_lrs.append(lrs_to_plot[optimum_idx])\n", 109 | " optimal_losses.append(mean_losses[optimum_idx])\n", 110 | " \n", 111 | " ax.plot(optimal_lrs, optimal_losses, color='red', linestyle='none', marker='o')\n", 112 | " ax.set_xscale('log', base=2)\n", 113 | " ax.set_xlabel('Learning rate')\n", 114 | " ax.set_title(parameterization_str)\n", 115 | " ax.set_ylim(2.57, 3.15)\n", 116 | " # ax.set_ylim(2.3, 2.7)\n", 117 | " # ax.set_ylim(2.4, 2.8)\n", 118 | "\n", 119 | "axes[1].legend(title='Width')\n", 120 | "# axes[0].set_ylabel('Train loss on\\nshakespeare_char')\n", 121 | "axes[0].set_ylabel('Mean train loss on\\nshakespeare_char')\n", 122 | "axes[1].yaxis.set_ticklabels([])\n", 123 | "axes[1].tick_params(axis='y', length=0, width=0)\n", 124 | "\n", 125 | "plt.tight_layout()\n", 126 | "plt.show()\n", 127 | "plt.close()" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [] 136 | } 137 | ], 138 | "metadata": { 139 | "kernelspec": { 140 | "display_name": "nanogpt", 141 | "language": "python", 142 | "name": "python3" 143 | }, 144 | "language_info": { 145 | "codemirror_mode": { 146 | "name": "ipython", 147 | "version": 3 148 | }, 149 | "file_extension": ".py", 150 | "mimetype": "text/x-python", 151 | "name": "python", 152 | "nbconvert_exporter": "python", 153 | "pygments_lexer": "ipython3", 154 | "version": "3.9.19" 155 | } 156 | }, 157 | "nbformat": 4, 158 | "nbformat_minor": 2 159 | } 160 | --------------------------------------------------------------------------------