├── .gitignore ├── 1_img_recog └── 1_4_build_env.ipynb ├── 2_img_basics ├── 2_1_img_load.ipynb ├── 2_2_smoothing_filter.ipynb ├── 2_3_convolution.ipynb └── 2_4_attention.ipynb ├── 3_dnn_prep └── 3_1_train_eval.ipynb ├── 4_classification ├── 4_1_fnn │ ├── 4_1_fnn.ipynb │ ├── transform.py │ └── util.py ├── 4_2_cnn │ ├── 4_2_cnn.ipynb │ ├── eval.py │ └── util.py ├── 4_3_transformer │ ├── 4_3_transformer.ipynb │ ├── eval.py │ └── util.py └── 4_4_technique │ ├── 4_4_technique.ipynb │ ├── eval.py │ ├── model.py │ └── util.py ├── 5_object_detection ├── 5_1_object_detection_basics.ipynb ├── 5_2_dataset.ipynb ├── 5_3_retinanet │ ├── 5_3_retinanet.ipynb │ ├── dataset.py │ ├── transform.py │ └── util.py ├── 5_4_detr │ ├── 5_4_detr.ipynb │ ├── dataset.py │ ├── model.py │ ├── transform.py │ └── util.py └── model │ ├── .gitattributes │ ├── detr.pth │ └── retinanet.pth ├── 6_img_captioning ├── 6_2_dataset.ipynb ├── 6_3_show_and_tell │ ├── 6_3_show_and_tell.ipynb │ └── util.py ├── 6_4_show_attend_and_tell │ ├── 6_4_show_attend_and_tell.ipynb │ └── util.py ├── 6_5_transformer_captioning │ ├── 6_5_transformer_captioning.ipynb │ ├── model.py │ └── util.py └── model │ └── best │ ├── .gitattributes │ ├── 6-3_decoder_best.pth │ ├── 6-3_encoder_best.pth │ ├── 6-4_decoder_best.pth │ ├── 6-4_encoder_best.pth │ ├── 6-5_decoder_best.pth │ ├── 6-5_encoder_best.pth │ ├── id_to_word.pkl │ └── word_to_id.pkl ├── LICENSE.txt ├── README.md ├── appendix └── a_pytorch.ipynb ├── data ├── apple.jpg ├── classification │ ├── airplane.jpg │ ├── automobile.jpg │ ├── bird.jpg │ ├── cat.jpg │ ├── deer.jpg │ ├── dog.jpg │ ├── frog.jpg │ ├── horse.jpg │ ├── ship.jpg │ └── truck.jpg ├── coco2014 │ └── instances_val2014_small.json ├── coffee.jpg ├── coffee_noise.jpg ├── cosmos.jpg ├── image_captioning │ ├── adorable-1849992_1920.jpg │ ├── africa-1170179_1920.jpg │ ├── airplane-3702676_1920.jpg │ ├── automotive-1846910_1920.jpg │ ├── beach-1837030_1920.jpg │ ├── caravan-339564_1920.jpg │ ├── cat-4467818_1920.jpg │ ├── cherry-1468933_1920.jpg │ ├── couple-955926_1280.jpg │ ├── dog-7367949_1920.jpg │ ├── hit-1407826_1920.jpg │ ├── man-498473_1920.jpg │ ├── musician-743973_1920.jpg │ ├── port-5788261_1920.jpg │ ├── profile-7579739_1920.jpg │ ├── ural-owl-4808774_1920.jpg │ ├── wine-bar-2139973_1920.jpg │ ├── woman-3432069_1920.jpg │ └── zebras-1883654_1920.jpg └── object_detection │ ├── image1.jpg │ ├── image2.jpg │ ├── image3.jpg │ └── image4.jpg └── software_version.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | data/coco2014/val2014.zip 3 | data/coco2014/captions_val2014.json 4 | *.csv 5 | *.txt 6 | 6_img_captioning/model/id_to_word.pkl 7 | 6_img_captioning/model/word_to_id.pkl 8 | 6_img_captioning/model/6-3_decoder_best.pth 9 | 6_img_captioning/model/6-3_encoder_best.pth 10 | 6_img_captioning/model/6-4_decoder_best.pth 11 | 6_img_captioning/model/6-4_encoder_best.pth 12 | 6_img_captioning/model/6-3_decoder_best.pth 13 | 6_img_captioning/model/6-3_encoder_best.pth 14 | 6_img_captioning/model/6-4_decoder_best.pth 15 | 6_img_captioning/model/6-4_encoder_best.pth 16 | 6_img_captioning/model/6-5_decoder_best.pth 17 | 6_img_captioning/model/6-5_encoder_best.pth 18 | -------------------------------------------------------------------------------- /1_img_recog/1_4_build_env.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"authorship_tag":"ABX9TyMkJrcf0ucSJDSbc5Tb3c+h"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU","gpuClass":"standard"},"cells":[{"cell_type":"markdown","source":["# Pythonで学ぶ画像認識 第1章 画像認識とは?\n","## 第1.4節 画像認識のための開発環境構築"],"metadata":{"id":"BIRrkAsCkGeK"}},{"cell_type":"markdown","source":["### 環境構築結果の確認"],"metadata":{"id":"ePktBg2wkpoh"}},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"9ppJ0VQukAlK","executionInfo":{"status":"ok","timestamp":1673041865440,"user_tz":480,"elapsed":8128,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"8b93cefd-d223-4072-fe62-830aabc836cb"},"outputs":[{"output_type":"stream","name":"stdout","text":["Hello world!\n","CPU tensor([1, 2, 3, 4])\n","GPU tensor([1, 2, 3, 4], device='cuda:0')\n"]}],"source":["import torch\n","\n","print('Hello world!')\n","\n","x = torch.tensor([1, 2, 3, 4])\n","print('CPU', x)\n","\n","x = x.to('cuda')\n","print('GPU', x)"]}]} -------------------------------------------------------------------------------- /2_img_basics/2_4_attention.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[]},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU","gpuClass":"standard"},"cells":[{"cell_type":"markdown","source":["#Pythonで学ぶ画像認識 第2章 画像処理の基礎知識\n","## 第2.4節 注意機構を使った特徴抽出"],"metadata":{"id":"PJHqNyQ4oGxC"}},{"cell_type":"markdown","source":["### モジュールのインポートとGoogleドライブのマウント"],"metadata":{"id":"F2MvMRsypQZK"}},{"cell_type":"code","execution_count":1,"metadata":{"id":"FnniPmx2oCdT","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1673796041991,"user_tz":-540,"elapsed":16857,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"163cd00a-7102-4488-aaae-5b37a486044d"},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}],"source":["from PIL import Image\n","import numpy as np\n","\n","# Googleドライブをマウント\n","from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["###特徴空間への射影"],"metadata":{"id":"PnUvhuWPxpom"}},{"cell_type":"code","source":["# 画像の読み込み\n","img = Image.open('drive/MyDrive/python_image_recognition/data/cosmos.jpg')\n","\n","# NumPyを使うため画像をNumPy配列に変換\n","img = np.asarray(img, dtype='float32')\n","\n","# 画像を特徴空間に射影\n","w = np.array([[ 0.0065, -0.0045, -0.0018, 0.0075,\n"," 0.0095, 0.0075, -0.0026, 0.0022],\n"," [-0.0065, 0.0081, 0.0097, -0.0070,\n"," -0.0086, -0.0107, 0.0062, -0.0050],\n"," [ 0.0024, -0.0018, 0.0002, 0.0023,\n"," 0.0017, 0.0021, -0.0017, 0.0016]])\n","features = np.matmul(img, w)"],"metadata":{"id":"2L-LJ93Gxs_n","executionInfo":{"status":"ok","timestamp":1673796079996,"user_tz":-540,"elapsed":702,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}}},"execution_count":2,"outputs":[]},{"cell_type":"markdown","source":["###アテンションの計算"],"metadata":{"id":"3ixn17doWGWv"}},{"cell_type":"code","source":["# アテンション計算用の特徴を画像から抽出\n","feature_white = features[50, 50]\n","feature_pink = features[200, 200]\n","\n","# アテンションの計算\n","atten_white = np.matmul(features, feature_white)\n","atten_pink = np.matmul(features, feature_pink)\n","\n","# ソフトマックスの計算\n","atten_white = np.exp(atten_white) / np.sum(np.exp(atten_white))\n","atten_pink = np.exp(atten_pink) / np.sum(np.exp(atten_pink))"],"metadata":{"id":"oEF_OH2VWOSK","executionInfo":{"status":"ok","timestamp":1673796083155,"user_tz":-540,"elapsed":408,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}}},"execution_count":3,"outputs":[]},{"cell_type":"markdown","source":["###注意の表示"],"metadata":{"id":"1BtIz0SRwlLJ"}},{"cell_type":"code","source":["# 表示用に最大・最小値で正規化\n","atten_white = (atten_white - np.amin(atten_white)) / \\\n"," (np.amax(atten_white) - np.amin(atten_white))\n","atten_pink = (atten_pink - np.amin(atten_pink)) / \\\n"," (np.amax(atten_pink) - np.amin(atten_pink))\n","\n","# NumPy配列をPIL画像に変換\n","img_atten_white = Image.fromarray(\n"," (atten_white * 255).astype('uint8'))\n","img_atten_pink = Image.fromarray(\n"," (atten_pink * 255).astype('uint8'))\n","\n","print('白のコスモスに対するアテンション')\n","display(img_atten_white)\n","print('ピンクのコスモスに対するアテンション')\n","display(img_atten_pink)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":565},"id":"oi4qqFTnwvPa","executionInfo":{"status":"ok","timestamp":1673796086351,"user_tz":-540,"elapsed":435,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"e9ce4348-becc-4087-993f-c8e909dd49e0"},"execution_count":4,"outputs":[{"output_type":"stream","name":"stdout","text":["白のコスモスに対するアテンション\n"]},{"output_type":"display_data","data":{"text/plain":[""],"image/png":"iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAAAAAB5Gfe6AABoE0lEQVR4nLX9SZMkWZImiH38Nl1EzVTV3Mx8jfDwyFgzMiO3rsraZqqolunpQWNATTMX4ACAaIiGCCcQcMQB/wInEIEIB9xwAFEDQ40edBfQ3bVkVWTlGhl7hHuEr7apmpmKqMjbGIf3RFTV3NxjySrJDHMzVVFRYX78mD/+mN8Twtc8iEgqISEgGE7KresDG0+Ol94HJgAMpH8I4GdeZPOX7rxnfuBL7in/nwAQmMHrV+KNE/OtgYkJANQ3+0pQpMBMpnjzx7tV9cVnJ9WTReD2G9M3fKU7BwDqPvjNDqb0f85Kf5b87d/cqeubKIABCDAoMJnt/X/2u1ioqzfOqr//uIq8LsqaBmjzLr6Car7+bRHSt1/4Lr5w0up1+mb3QiCptIxRkR5ce+GVf/7C4/N9nMyOZ//m11XgyMyrU/N3XhwEeuqX39IEsvHn3zhf6JKrUXqRuqn6TaYAM8dIshhe/9b3dkbXNLbG0IMfVHJXFY8/PLA2fXsWv7u11e20ht+9xETfWPR8BdBqbrfz4NLzVgcBAH1tBTABPsji9Tde/f73p6VBYaAruOPrf7wjlf/w7sNHfl76+NQHaf0WnmF+9M1toLU04mcO/9rXUDcZvrYCiBFIqsk//+OXcfWgKBz23cK6s4EbXPGL/l45eOP1n/zi6NTlr3rGHTzrz29uCrxuaF9yFSIAzIRvoAAGADV8+Xd/3FtA28JqhxHck/PhsoZS0L0rfzI0H73rV594prjZ9InXBP8tJsNXiTvt97aSfBMfQGR2X/hf/BdkqTQoAe1sdQRf+boa+mk41U/Gf3pd/8SteZz0dUlaYDVACS1cCBFPzVN86YiuzvpqOiAVKabv/doKELI/uP473/9vAGcdDHQJx3rgGC40UOraNfXh+OhN/dnDuDHWQkjJIcSkBCIQiCN356ymf/fbN5kZ3Lr45x9SIkQCwF9TAWK49/J/vX1tZ9eOoK3RMIAtrIEeaN/rAf16hPB7n+nwnUO7FgKpd+vWD4rZw48PZgCp3sCMRjKef7pkdBFgJeQagMhCfZ2Z8RXOjCQj2DJ9XQug4r/8z1793cns0ZAAQ9AAoF1hiyHm1UTVqj/Ch73qn797+P2fHcV030pw6H/7f/7jN94/wMc//xmL3vbV3YnZ3Zmc/x8/DusaeOrm22DR4et/rCMICElg+poKkC//d99+9P7v7zhYSxZJAdAwBpgvofr9Pqnx+JNvD18+2J8xwAw56vvm5r/8i9vmymD3zZe06E329vrqhufh/qv3l/xs3/UUkvxt0UJ3cDCAEOFrWgCJ/b/AL2p/Re+5cg6T5QcAa8qjh6VU8Iv+q+7RR+Pd7dtflIFB6vqf/O75u+ObNwh7rjC3/0SOpteGGjiqjhdvf/i5S3ZycfT/KdDy2hFYxSgify0fQGb3X9356V8oXaEAVdppOGhYVyxKh9Oz2kul/Kh2s2a5V/zZ5+efPA5k3v5f3TiWf/bqG1M4uPnwxTctHGwFYFkff+fk/3nUABnKXkzb1r/7H8/6AQDRGwqCmL+OBehXfrT16IXPJ1fPrsyx1NAYsrGwdlGdnnlVK6X6gFf1IFw5fLV8LTQnfuv2/+Qtdayu/cjA6Skw0qWxKCvnz9R5XRZv/NVZkx33uswZP/M/vuT54EBEQsSvMwVo9INXMNpvjvQVjbPtwmkAFtYUqM6WGv09p7xCvYV6crxdwg/3hv5bv/Nn5wMJZQANN4K1Mzg4553/pFeq8nysZMj3tOn7u/+Y/ikUEeq+IKKvMwXkze+8foyylMuzba1caYCCUaLS2mmvBk71vYYi76Gds8XL+MGs/r3ff+E3n33em1gDQDs4W+jSVf7ML3ASzOyR3hd1E57nA+ifxAyid8DXUQDJl/6Xf7Dt4WVfoRrDAdC2KFFVS+WXgIeH0wrqvN4auGWFN7Zf/u7pD3d58cD2bhsAgC5Le9OZCtVycVgtMDpz0z1f//KJT0I+Ox58JYT/dY7o2AhS4asqgIqX/jf/zbyCUs14yy/1GYbeD7XVtjqZKQD9c688oKDmZV+p81NMX7XfxejJuw/P9nZfxGK0cPbUOb9XVtWTWV2eLwOa0L/iR1fsLAZCm8aiZXjar/4Kgn8DEwksBRHHr6gA8c/+t2+WOOvXJTCslmdeKTVQunA4WpZeSeW9h/JQGJVNrQHvYIw1cI+O3datKdsnp6cn3kNtu7PlYVlaj4FpYrE4fjKT8jlS0PMlvECqffWDOQhi+qp8gP7B/+z+6VDffPDI1JVTSg211kVhjIavg1cBHvCAVzWCXyoFXRoYoOpHPb4yM3h0drgA+v3T5XJelrapw8B4gagQq3hJ2OcLlAFd6gk33v6aGogkwlcmRIq+nVTDa8MTxunJltIuo0BrASDkYOLRhweaM+W1dhYGT07Lshhtf7J095uzgF6vt/BV5S1CjUUzxNJcgzt1TxGXG3HxnygSRpbMX1EBdGX0P/7B6fiORa+SR1DLermNYQnr5ksfmh6gGga88lCqVx/2ByimtjSzd/1pvdXHrx4vYnDQfWNssNGSgJBlkLUb9t3RLDwl4gWKq8sP/zHJVQ5CfGUcMJpsza8bi6t3d9jPFVSNk8FgPHSVU9tlGPcXHvBQXnn0UAatpjAG2H7QO5v4+z9dRKFsr+cmp+fRAT2j4fSW1TIeffroLIKIO16AO/aeCR3HvwETW17tK1MglyogMkh+qQKS0m1xZQ8jYDw+hqpVX6naL6ZDQOstKN9XqkHDgFTKQ02uXtmH04BW6oko9KNK6OBrMdAILgKanIN2pijrYWNpVPtIa2K25BZtSt3ioRYY0FdiDJ6TRXLkrxAF0ocbPzQAUGwdN00P3ivUflkNAaW2lB8AihECpO8rr7YGhQGs085hrofuFOiFZfRSWOcgtQhA7IUgsYSPtLWo4vqXZQagHd8uWb7cD36JBrjV09PTLAbmFgcQ+Esc6dBYa6C3FEJoAPSVhzsFlh4KyqPnJQJCQBOodiX24Szb5RyDXt3bRS0gZHAuAlpICWDEWEa7OMXVsXsc+Cnx+IKF89ob6Za/8rFBSK9dMTBDrSip9a9P1icFcWCAIRRgAIyuFE3deAC19wvMoODh4ZXySsHAhxl6BWC0Y8C580O3XaD/cnViZT/EBfQwBBgtpRyY6shhtjCv/4FomnBpILx8yBlEl8z+i3WhtRlEEAzmC+xjZFzIBimzT5TOlNeu0/yk9JFp6xYAa4DxdOaD99L7BgCUgof3GPhGQsEHb43s9YfaWK1teXR8Ut+64v1+HQ5lv1zYWGzLABhjJHrnlYcV/Z3JG58ehEsqSPzM+Zto3UvfWRMmzX8GMZEQHGPcuFryN5f4gFa5ovjRW/L+rx6cW5CeJZ4fZge+CUHVoQoIUvXrAO9DuQgSgQFI1Rtta21RWLizsoyT/RlunJd22a+lij0phTRmIGVolrFZsNp74cF4LJ5iBP4RDyYmEmCB+LQ+LyigC7pSyOIP//slfefV37z/+IT3tkyaA8WVpZIITQ+NxVAGNB7WW1lJiSABBajBcDKChSmrZROGV3Z9/3sPZjhHYLitGIw0RkoVzp2wNW1/+zv/Nph/OgIoKZWjESIEcFwzEcLTpTHO80C+ebW8/l/fHk7MT/7o/Xf/+r7Y2YcFHPREqw/PGyBAei9VAEJjAwNBShlMz0iphoV2BRZT+86xHr8x0VPcLH7tF66Ex8wGuTVUmDXL4M/8zVs/3h2esLpUA18jzF/Kpna/MqPp99jZgA1HwCC1GSkTXc/ovfUvTnfuOBh9xwxuFn/1GmZGG5RAoY/Q2OCHPQDBo4cgJYBlqCGkDYFUAskAcHQuJsMChTWTH5fzClieN6rp9XoIcnHqzrH7vbd2PjJ9+Yz66LM08AwPePHdVbGeol86SYoix7iBuC6qXghBMYrxzq2XZr/wN/boh/bK9u2dF1C5CYDSTUaabMByS5JEAPpAIwO8CAEhIgTfwAAoMbXucDEaAlMYGFMUdoqDez3A9JRvmtOFL3HzL779y4+CebqU+tzjMrWsF1a6f1s2GUDkKJUWnkVYL+FnJ7iKF1L02Em9Fe8XB7MtzMKDYTG/8Qd+mzWsM86WVksJKa2BQg8A0KvYQgYTLDRgyyWMY7NYzMt5s907HV6FNYADAL0MMD2J4JdOxGbwwvXiPEhbh2e6+6cEfSZx8iUulCMzhGQGcTYCRnaCq08Ko1WUve033i7628Ndf9r/+PZL5fRHd/eccSUmk7nTw9Gw6QEW6AFYBPSaECIkApwwsMtzDZSmOjqdLWFwAjN1BtC6AMpaoifhm1ADXu+/Mpn5YXV2Hp5525eU0HNd93INbLyzoRJmjkqpyIQYYvvuBScoeuTNZO/aW4XyI+UfVlPPDmUxAdK01toUW1sAEHzwQJBSAcEBIoTotDXRA2DYo4P5oR+EcO7GQJEu744WfVa+ac5DGWtXvPk25sviyb3z+Ex3x18D8m2eeUFLDDBz8EISoojMsbUAXv9IEOb2S9PhlZ7v97eXi964fDJXy29Z66Cndq5NAewU55WUCLZBwBYUfIDQ1oUAp5c03DIwbnY+P7kbe5Ln/c92hgUMAHdwXl/1mIfzRd0srbz+g5sPHqM+/nTOz+mNuNQIvsqx6ViJAQ4xCK2FomgDeQBQG0EBRIOdNyCLcznEltp+YeZl3WyfbA+dLvcL/QA3ATMcIYRBCBaQsul5BAeJGFxEqFEMdzRsceDKaiG0aNSpOt/fMwZOMzt1x57hiGPjoujf/o65v1RYnNT8DB/wHOG+1umrzzEjysEwcmYaxMbbZCZXb4x6ArJpasAt1WhUyGb+5FTjyAITbWAXGjYECyBKCYkQAkKARhRCFkB/aGAWwyO1aER/oBp1iuX7ZVmydZXu7cKjWQZhy/G1qT4A98/Pao9nt/VcfnSu/hmKYG5/PHXE4OoohBAEbPgABqlbfyTmC7U1Nj34R4eqVv1pv79oHi2XONmdGgAoAUiZgp6U+QJSGgshAYcIDcA4deLipNiChD9V5+7asADQn8J9UVbGnVXyxiuvjn5z2NtqHi2fpoR++2OdUtvMKgPZc0Mijb3KUJEAkr0b/9Pb99GTY8D3/IJ6UFBbqq5K7339BfaMnWGOChIRwUFKiUbCB0CGEBEkQoQcAjB3/cF53JkMfQ9nDeony+1tHJ97/M3jh8sQ69gvvvfDyaLy8uC9h/7LcvHnHZcCqE0NtCro+k3C0hHHVLzvoALJ3Tf/i+Evy723zhrPCkENlYI/H2BU+ko1zaMBJjh0GGsMm+BCkBqAlQHQMCGEphciXOgboOTHJycV9nd8UL5RdRWO4M6PKvQen5UO0enRnR/eKWfovfiLz+bhy8n/ltO4JGe8XPyVBrqQTyDKUy16cGLdOkKE0H/lX/zO+6PxtZu+16uPpVQSfdQjBTU+rQ7R88tqiGo50MN+r2fLCAmEEIKBlECAc0ILLLcAwJawC2+2+/6sF0IYBsCfHx01Q17MSzFooth76w5ssX12Nls+EwR0slP321PCPs8ZXhZCBbecMzO6ZIgEEV390z8b1q/sLK9gsLwfAqQfBom6BkY+nJFsZno4XLqBc1BSRgdIAEFaCYRFDN7LKFyQIwBwvvJR+7ofGiXRhIHyi8OT2uiDBih9MX7re1iU++Nj+3jmWwf4PF5v7S/enODPO3iDIEEaaYCS8+cEhZmkFILFAAcvfV8X5XSsH9azJWSwvZ5a1FCjUaN8eHy2wMsKqBx6vfMQgKAl4KQ2srYREEJEMdy7Wlhj3OHMq134uofetLF+u1+fHM2dxHm/53xx+7U/vIW5mz367Bc/PwupwfsZwtClYf95I79BrG1+hIiyYRBFBgMKTCQEIoUn/+ben//5HICeFidDKwmAV+wbpRSkr46P7eTloQYw8M05YvTCaQ0JKaW1EEoIHcP4+nUDaJwsYHb7UD30+4tZrw8EGyHieIhF8dKPXpsABR787DcPHi9jahq7PGi1g9dOaOKOtPi6UICBQARBJAQ44WlWTClDAJ1/VondP8EMUzKDsV1gIHt9HFZQVc9Xcnp01p8fodrVlUO91Y9OeAGngzbDRhYuRLF1uhzt3XCzKcy4if2xgley3z8azUxdn52bEaCjWfb2X79z0wBTvneG8sgJ3dNDDk0dAAS/pghab6N71iBfIudz3mIAQpBgjkDyAUTEDGJuTu/euzs+3QdjvNvASgTvQwgBAVbKQbU8PH6hqrTTSvVMnTGUlBJSyhAQAYjploE1cOdB6OCBvlo0QK/xjYUUUkbE/v6LN401wEdPQrVoSE/HkxfM2ZNjJxTNz+waa/N0r8yXBswvcQqgSNEn5N1mg5Q5eHf+2U/2bzti4IbWh4338L4qLSBl0NjB8vFnV4dVBWjV61vEqHXWgAnGRYEo+tduTCyAoxmGfQRAoQyNHJ/MF3UUMNIFufut2wVQaozV5798hNHL39u++Tv64YdP0Je1/ptP53UEiJ8Fj6n1bN8MDjMxM+dKTK4NMgiRKNTHv9zxWwtoNzLYOfOP4K1dNg6i73rV7vTzhx/feb06cVvew5gQoQuEGADZa4AgAFnsTwwMZg8WcmskvVLwjcT26NGsdrInIGH6N1+awJnS4oOfvvco3vrP/neTo3ExXfy4GhYPn2zd+snH90tpTHnqvywTvOz9LwdUDOIQRTIBVgCYhYgMcODj+1f35to5pws9rtT5HAahiRrSVS9Mmocnh687V9fwHkJGbQZYIiyHyldSIqKHnYGBgb33eTOYbgUACj2pRr6qnNCSIPtX9l++aRxg8Mu//Gz75Z0//+8MrmKG0QjlgwePP5/JV98c3Rruffp/ue+ezRQ9ywi+EqBkArfgIykgFQ4AsF82p5o9dIXxRMH7MxnhYYSG2t4f18cfAN73oZSULiAgRCvt+FQG6IjeYEcDwMG9J9BbRRXglZeqP/pw5qLWMghsvfjKni5hDoBHM/Vf/Yl71T6YTFEA9qN3P3x0jp2bL33nW8XB4x989P+ab4hz6eS/iPO/4rGKId0UEMwgoSB9peGc84NxMVm6eiiFFCA5GPYxefGTe83+7sj3vRr6YENYoo4I8EpKrZswGI8nAOzR8Vns95QHapRQCg/PndRahmiv3b5NJcgUh1/UN8wrrwEHxRQwwPzee58tvHrp+29cByaY/M47Vdtz/1TH/eX8wdeRP51MUCmhFAkXSmPQnCvll+dbA2OKgVY9raPTqq+3t7Cz/8mTMLw+gHYYelu7AFdDRw/ZCwMBaNPXAHB25gDlA9CEsoeRPvdQ/VFwkFd2RyVIozyajf50e1KaA9jZFFgc/uadj097O3/yoxt45K7vY/7W6wfL9aymrZN15GcW4evLv34oMAkBkI4hktqqjrbvyls1vHeHc11sO6Vm5zX6W1uvXNcTfPiosaPXTg77g2thjrgQ/QiHXtH35X0HAItyisXcz+e9F8tjc009no9evnH+i7qnt66EU7Pzwu9rFIWzZfHirp4YPTNsyr+f7j7+1b1P6+l3v/X22yC8CpSFHl4bz1zbIkRd+XYDJTyXMntOZXx1jkpXYgIJJmK4xdFV773Hcrnt4PzIw1gMt3dfGJtivDX0i0fX4T0gTd8FBBERGgCAjECABoxehjja9mYYGovtfb2sIIayCcBwTxdINSZjCnugtd43s62Tn/3m0OP2269cu5l6b6w2s9NiLD2nIL0a4raMvComb6yV2RQfz4dMyQLATIgCJADyMVSHV+coUav6HFAD9NEDJjvXboKht6ZnywfXdxWcV0PbdyFCAGiUB4Iwqd3L4KhGf1Rv75TnXm5vV0cL2R8E68L4yrXCALDACIxFqW/OisXDkwcf3Fts3/jhP7+a78o6wB0Pdgd21UzOG+ntRvMYP+UQvkLnRKcABkMwCCRBdeMiSq9qkk3Z62PkleqP9ODKuDBsUfWvYnb+xWiEAQGoRRAAEHwdfJCiMKE8MlOcPjkXxUj5/sEJdibV5/fORV/auj++dXvPALY1TjMpjLF/9Q9PqtLcvv3tH04XVVEk+W05Pdp/++jDMgKxhX9PF8zXG0suPb4UKymAY5QgMEmi+qxEmJth6IXKDxvl6xEG/eHuvgHIlGfYWjbVw503hm6ILXW+hHYAQqO8DygmIdRnu8D9h+fY2vJYzOvtW1sPPz6ohgD0lZdfvWkAOGhnFwCKwt53v/wPd5W58ur3rt0w9rDlzy3s5FXtXvt//+yg8ZGJiVPNLrEDvJL90gH+Gp5RAYhBEMAQktzyfKAeTYKU1iPAw0NpPSwMrIE9+PywwbA+e3BlD9eHFU6WTkQhJbxXDUR/u6kCDBaPZrbfWy4qqa5ee8u9/2ABOCO3Xvz2TVOysdpBQ8NZXf7s3Qcnp+PbL7zyewaL0uxrA6TVSDTffwu3v/vObz7+7K7DhZFvA8BX4g+ec7SMUAwSRCCSgqLyRxqwkApQvgk91XclrNPaHT98bPu6X5098HsTMzrdqc6DlhoSSgLS9FUjVYH5iRUDzBdeTm7cfumjo0pJDQyuvXLTlOUEgDYwAOzd9356rzbXf+cPJoWBda7IFTQDOO1gDV4cf3/75/+HB/E5nNlzuie/RPq2ONqhjCilMjD+rIYMGKjQoLGyBzhXVoBmV5UO/V5TH/enANTISCfToPVLAP1aYWxQWaFGEqqnihu7BZaxp/vBTF/ILdPpp7Uo73/88JS3XnrrpgFgiI3NbwLABKWZwkDf2X+UC1l00bdf7uAuLD145iRItLhihhQEhvSsrmyNNYYzoUa9kZdhFoaVqmce1Uyhrg+OFn4Upu4MoWDt7vjP0ZdGLhdbxaJXBtSj050hcHrqzYvFQa9QL7/p8MvD2Azr3b2r18iVpdtPrTY4OL179ODucvfqt36cFOPgSqA0BVIk1GVZ3sL045PH/gLL+aV54Obbl/eItYcqmJkEODJzpOJ2gWroa+937BYQlgFAXR/WJ1aiWVZRSDkKzcly9PCmNuOdZi6XxgRb92sB6b2aatiHy6KQanvUx+5o9s4DH7jZ2rp97SYOKj08gC6A2eHjf3iwOOu99b2XdqdYkDYPSo/xcKRLBwCF0TDQmN1bPvjlk7gSilted0PAL8eAz+zEVipxUawiB440+sHjUzGfW3vkzgbalWHSNL4MZwvEEFzjRdVX+/Nj++T+ZILJy0oeONOzVVkE6F7f965M7YPPcXOklOorPUT5s08bgfrK1R+PcHDqdgs9s1Ms3v/0H+7G6ZXbf3Z7CsChLJ2vtB5pFEAJY02pHcP+6hefzD5fJNEv6wxdrbh9qj3gS1WS/q9E5gghwMQBkDtGoilj6Z3pawcwmmrpLFwMS6uCraaF8qcf7BTG7LrFJ7o3PLeVKgFTA2NrAIy2pUethoU9mp2L/rB384Yp4YZDHGhd4MnffnD4udu6fvsH3zawaenVWGMw0bAGKCzDAuzKz3/xmy9iiTzkzzT85zBIX6oCFfLOEwSS4PPDz8zNKzd8/fjANk0UMdayF+zyrGkABpSS0jbF/tny86vjW2bog5SyWdpGWThbhh2t9Y39z8+2x15h+oK5+8nMDkfXt95846422CvK+aTA3/9/3j9T+7df398eG8BYWxijS6tQAs4AMLYEHjy+e/L4i5NG6lEdWsTHl4f9jlNP8/2raIGyBlRIvwoGEXj5eO/bP9rVU3vvZyfzhZVxOQqNtPWi8QJCCKjQt3Z3ePzo0Qcwt4or5w/teZgjYHp+3gyLSYEPq7qYbE8eY/ri/t2//fiJKK7fevWFKYB5ieLG/P1ffXQQb0y+/f23AJTGAgYoy7JaAigKAJg9PFsePvpidm4DTdW13k9XxcOnl6Jn4VcAmb8KY5wpciIlOJdKGEQIbhaGI029l146OL3/pJ7DxAVc6aJChBCiB+Gaa/t78t6Ds/qP918dzz84kgGB+wOoK9Or+sFdx6Nrk77qb9+a/c0783r7hbdu3IabknWlPjj6H45O1K3ffXtvjgPtgENbDI1zZamHqI6q4RBVdXbw5Nwu5sswHI62rpmrH9995Dvxn9EcsvbzUkVd8IIEApEAkWIGESWSkNiVi7OzibVuilu3br93Uiy5Ll3wEBpOyQitpYMfTH05O8Hke7duvfCpk30YxYCcXh2UH9bX+WRxbbs/MJh//rg2N97+EfYflLYw+7j/D7/6eW/rxvd+fx+j+1M9c4Ubm3Kui+Gw1MDZE6WX56dn56e1DHEw0XJyQ2HvM3q2XN/8ICIigsp7CRBYIEpXHt9fjthqa4DRK+W7j5uy9s5DNcJHQAltZDi85q89uF+dfrR/C9eGfWzzyW0NyFePBnoh8bMHL14Z7m2P31FntVDXbpt9aA3DB3x694NYvPDmdwsLdwuYwunCAbows6l17uwooD5fWAsdMSq25PTaYjrAwq+P3rN18HzlbBbJiFI9kFQ7dyIAYhvNZ+9cuQHnysJgUjj/aV1FRHgvYlRQcjCCqo5Gk93dg/lH2y9evXFLLBjjGzN57dXy9Ini+bt/6UoZvjuZfvpe1e+/+Pq3DUrYEcpPPj94bH5w7dbVmzgs9ywcClMYsKvMrDpwZ7MnsyAXNsBIYByMhNITVenH57GL9dyuJOn08ZT8z++qX00XIiEIqt1RSaRyhD97+M4+79mMWu+Mt764Nzv1VgAeXuj+uJBKluhfPS+Oyrt/e1tN5PLj2ehgf0vyv75331w7/vDRpJiwtU8+/7tw88q3XzUo9T7K37zz6enwhX/2xvUCI7ba2MoC1sDBnT6u6rpuzs5rCJIDaXoGRShG0H1oDA6bCxW+jcr/U+P/rCpja+pAsn8hhCBWMZkDEwKRECKUX/yif7y9ZzGDntLOD6/0Dx4dVlFB+aggh7J/59zXam86+83D8v3Pf7w3uffF/AWc4PynP314ODgon9jiB9/yH/cHB+X171y9uQdoc/D+L++d6mu33nh9aUrn9EijmFoHaGtPq/Pl4hQ+yEmAVLLf6ys1gB9cG1YOj/ng7lNT4LJm+VY8Xp32rNFP8pMQEqw8EZHIWuPIwp0/+mx+DcszqBdGBqMb+n5PzJ3AUsQYQ1D9ay/idHdqHy4WNuCd64v3P6p2wqR3uDg8X/aXQfWKs0P168mtretv/dBfN7AGD3/xTl18+w9vAxNtK6eHUwDGWFuW1fFJ7b1s0OtNlPJQeqCAoTs7W265WqH+1eO4Ef67DXMuGehLtoq6VA0kkgYoqnQpSjxDJA62Ovm8quqF8r3toZliqjVKVzoAQvRkrz+a2mp8FWbvejkbbvn5g0/noayu3PxFyZGUkH0lPj2R709l8cKbr1QMgD/8xYPe9vUffs+U8/3UMFdqONhDV/nzwzPfgxxDjbaUh1dKaWi3RF0v61MU/otluutO2s2lNM/UwDMPWksrhWIAzKlESgACL+434/G97aEpHjzRV29AT+4sg66DdtDD/YLqObSzBsX3p++W6spHH3xeq3pRntcWpCXQJ3U8mx8v9m5dVxg6wH72bz8J37/y2lsG89NJoQvtZtaU1dn5rIH3oUEYjl6EoqpWGs7VCt5hu+8VgPvVsbuY+14mJRPWV99eFHlNZS0GBIGJFDMRswARExGYQnO8HAy37uyM4Ss5e6Ku3Lgznh7Om9kyYLzTL+vqpWIGwEz1gzO/OMdwLO390+NZVD21HXtG9s7Pjxppey/j7vDl+59+ce/u+F/9cASaVRVsgcLOHrvZ/GRul1IOzGQP/b2dIdzyfK5GA++9qn2NRXmqZOgN5z4+N6v/CseqxM5dDGyTy2wBkUgoAoEFR8vNsia7HF8ZNqdenZ9en7z5rdPHX5w1sofgVWXNSAMg2vKP5ah4cer98XlZn0YtjYyjYW20sn3tT/6q/s7uz/8mVON/9TasgzkFphaw1dn5k2phbTSD8c5EbY0dHcMta9UfOO/hfd00OGqwhaEZD8VTUe2ZTvB54ne/JxWkloyo0G5DB4YABKXldMEeWzcA5MnQ1/WtvasGS5zJYuQVloduCgCluTI+RwWx5R4740nCxOhkjEIoI3U4+/e/pnr88bx/Zf9bZqZhrNYOxpbzx+eLyppBWMrB9kgBzmHpoffgHbyHr5smnFWQ1TIMgxdfDQB+VRtpHQAzAay6Vkv2ggQpEkL1lNLLZjGvdrZ96Plysazmw/Gd7bNaKe2IzpbzvSlg9Y0n/vD+0RnLctQzkKyZrdCIrIv6/Of+oB67Rpg3bu9M4QpjHFkA1lbLRSO3pEKDXjMPvuir/kBhoFG5uvZACJDBGixK+DDUv9Vaoq62tAEZuuWpqjUKMEB60Btube3s6to9cstFeb/XH++MlVSquj55bVFWzkEDZ2dnp+M9Yxzu9E9mD8/ccLtnrBUsY9BoPAMUlrOl01KfX339L24+cXpaAGyhAVed1egpgNHr9b1vGqUUtMLyoAaOg4fy1lvUCGhqLO0lzUNfzSQIK//XfYDanDH5FdWeRGQGgyv7Zrp7feeFbVd9cXLw6LA+X5wejwcTqRbnV20xMpgX2lio86U/G+5iOBl+/gspq2Y7njWQMnDsRyDY4IMQWhU7e1fffn144GAM7AM3nIwWZXl2tvBQPjQA1KRuQm8EnMHPz3wIlYVEqIPT1anQlRPNF8vs3H+LbtL1X1sPyMxEpERrEWLr5vTVV47toJofTsfTP3bV+z9/dLqsw7K3dNUwzN2L02IGGLjhHuznB8rtGq1Ge41qIoLzDDgfJGmBxgY9ULJmX91+a/xr/bqemcLs2wI4qJbLGkF6yCHQV3N4qHrRBB+C6VWVdQ7BhRjdeYRYoIpPuo3pvoEGOgO/mCu13bkq9Y8KEoOrL+xe7RkpQzj3HhjvVcdy8Njbuo4QIQzPTo0p4Cy4mMKe1v6smvPRL4+F7ofKMEIAB/YyCGYoM+ijUaZfaPjtwlXO6KLA7KPjpfe1lwG9vgcUPBS8b5olmp7qIYTaergoRLRCxAbzxdFlfSLPWjn6NQ4GpRYZEoBQ46uv7BuJrWa7UP0aW48GQ3375vyjh8encXnstq+c/eaTK9+6PbVmYTUwv2mq+/clP37vEwoj9mzJBamUJArRWwjpXFNd/97rb+6O/NDi5kzbsqxOTx57QHmE4NPABuVDGTwGaqtpPEJ0TYwAYmOH/agg51UHb74kK/6yKtm67B2NphiRTVGMX/nW96c+QbBtaPvkQMF73//B/vxsflo9/sKMr+029ZOd65OicNZq/dIDqM8eHJS9LZxVFCVUZKBBjCI6luQirmzV5VIfXB+dnN7T99xydlr6AED2pAICfIDvNbIHJYP3wTZBqtCXDkLLvgkzo/8Q/4+73Wa5/zhkCK2qyUyAIkgzvXP95SvbOIQvm0YWsmlCszQSQfbkfjHZPT2c1fVpfTLQezfc4Y2iIKut2d+7NhLNIz2I0ctsSWAfAQhQFArNw+MPfvPRm58fepzJ4KulDZBSyp7qKdkfKXgoDQcP7z3QeCgJIED2+ztuW2un/7YSEoTIfLFF8BuvGek0wMSAAgkzvX59UFWQMlQW1mBeCbljZQhBys8AaWFUfzY/19qNZ4fLNwqMYLEoip2xWdjGA0GzSG24nkkKYiLZE5WteHH0cGB70iKwswDOhSlkrz9S21eGcNAAKu2ACgCgMYSGNno0m98Byge/OHACAsEnb/YssP/NtJDCoFCD8Vguj3R/3NilrGsZFlaJnkS9XHrUAaSMMPWyjMr0Hp+a4aMXrzuMjVV8Oj89XTYaSngWEYIA7RCFiMyMwCTs/PT8zouDpQxQkICUpre1vbsz1MOigAVpwDq4tDMhAONShXx6aN1duvvR3EEIRFrN2mcRxE8dz3s/zStiKKGH49FpxTGqw7j08DWEC8DDiOB9ZBsDkVKSG8eurh+PpDbTq4OAyfa4d++TB8slG00OJMAgVoghsqAYLAcnYnMquJgMpZJKKgD9kdJbw/HEWBiYtJrSEApjAcBYmLY8Ojw4fqDe/aJ0LFKySswsiOMlS8C/lg54lTcwoAbD7Wk8cBg2jgmClAeCb+qaWJAQJJAKArKQaJbljBRY9kwsRoMRxeYUuie58UJHIEoQCYaQkl2MjGi9Hr/5u2/skz4bDIbkNPYAgjYwNomunYZJt2pRwhBTMgUang1+9u/uN0yREEFEDCKSgdol4F9KfT9z+LGiU5SQsGdLQT0loh4WgwKwi+Vi4UmYgVHCI+0zInoDWT16smApQoxOcFyWHkoMMKFlRUoQMwfEACmUiXCBpfIw1773F98mkL8+Tl0BefshU0Knv7SDJmsdAOt0umtbABOcffCf3q+6JZ6IDDBY5F8v1cCXG8eq5Sj9o5TWxmg9EOOJgLmy31N6dhjmx43ob02GaLaaJgQLGDuYhi/uni51LzRR9iZby9ms0dSXkwjlleQADmAvpBSCSQQnRITZ2gtn1/2k0kYDpYZOK1CtNbkobmEcylIbWOeAAiU0rLF47yd//8Gi2y6j3WaHSayWwD932cQzTlkvGzFAP966sb873Q7F1vZAO+g9jdK5o6XfGgK7xurDJ0u/YISe0lsD7c79lj+Bwo3t9/7jO4+i7N28sXx4LhoRiAITOyWFkggLJ3nY2N5QqVd3/9s3H+5rZ+GcuwHAwGCR78FYOJunQbonAoBCv/t//ev7p01cYSBOabwgxBBX5M5TQ7yugI0kYPXmGqem/vT6a/u7E1M6XbT3MMJCH12f5k6OR7Ma0Go0GA6KyciaskCpDc8md370f/q3x/VAn87Pm+BYmD77iEIghuAbT1oKDa5Olh9OTv/3rznW7LR1qaHE5m4oBwtTEGCRNmhoe2Ts7N/8+y9qn0HQigYFMQuRTeDLSyQ5Icw4asWprdSl/vzq9cJ0N2TLKcoCBvDOOAtb3XSndb9QW1vjPWgD41BawGAKW4XAXB1UDQakQoi1j2Ygoo8Q0gBWsNnRp2QmL+yc3geMhp4UAOAWFaC1MYAtnLUwtjR5zwVtkBzFcDzDci0N6LAbmER89myndTp4LSF+Rm+xurpr0qR0BYDSlSUsqjNyriwroLp53gQAfncvrQYDYFCAGB/99JNGSM2VV6IRBKHJw2kGMwtlSQgsA0szfftG+feqUP1Bf1tjYg30yMCaAnDWWleaoSkKB6thgAV0aiP6z4/fufdFMgFaq/7mpyg8J/Ohp/96Ri8RAKihKawr4OYaBBhjAJRVpTUc3BDDcukDGq2qKu10cqC1ATB76D754KgRvS0pmVyQzJEFMZdSIDITRa/6ziHIqy8PPj3YOjc9VcyUX2KgJ0UBFIAtna4s4BZ59lkDcN5r4aXfDWFmU12c13IBXnUJfp2l9ZeLD6jToSsddGmTBboCupwfAxpwGlrbuoJXakdbB02wkwL2rjt+dBAOZ6InR1fm0QUn+pJAkWMMLKJjElJK2V8sAg/j3PWMV2iqEiHclVIVvYHaGQy10UVqCHA24z8LGE1wGuXk+zuT82XJmb3suLwWCtJvkyCtLELd0/tTAFPtCICzpS6tO1e6KFBYA3uwWJogR7u38idLi3ufnPpqaa6/dONuEx8/OvNUjLfFol54GCU5+MDEzF7GbbKhd/yfJm/90QuOyVX+HPcA1PUJPgFU0dtSO1oPDXQxM8kL62QEWpd4EQ8OrGNOW0DRynkzQPRVAOEaD7Zx8tof6pdQe4XBwrlq6OCqoXVAH9sa0MyGqzJg2B/oVMlEOXfuSdm7CdzRGod3f35QWS9DPLMMArsYmSGEBLuGFy/uWPRPz5d3rtwBYFFW7g8tnK0qvzxfeDSNvxsA2S/Ulh6MhzCYAgCsnc50sb+8W51Gz5woq/UhJ6LIz9hK44LwTx/rylCPr77h5gXcKYaY2GoIB4w1xgapd+WMzWTkz0/NvNLQcPcH2N4Z7k7LKYCxmv/HCC2pnge9pQX5aFlow0I0Nno+MGrw0snBsHd2MIHTzjitNeCmQAlrYKvq7NwffFJqOZBmS6pi2r9yw8I5hymAW28cflrXFuC0v08a/bQEFDKsy39hjOnCv2vvbxqOqs7Ohiisg9ZFYeBGi9S0gDTiZrsvlfKH7hhKww3djtMOGpgCFqPhREUp+3rpWErtiBBYKAVEIHBcetXb2t1RL24DgAOMdUhRbgIArih3qwrlIASExUJK82j78MmW1g4P5xqFe+HO+bFDy2qvYZkLoYB4cxHNZSzg5Ye6/sKVwlk4rYsCelpapw0mrrDGOQeUGGPsy7Kst3a0dlprOOjhVH9awEwX7/18Jkhq7Yi0kMyEwkduKPoA4lCzCOoPv4UrL89gQNCuSFlf/nZTFGaB229XqGazk+NlqOP+EzOUhVJ9BaUxfqPSoU79S+3mmpyWfhPRs0b14ug/VwHfe/0Wl04DGraEKUpXFgUZTUDpnHbjVxzunaAY7Fw3DtpoZ63Rhu8QbPn/+799WF+J0Qz2Gq4R4WPU7BsvRAxGhsjGfOfm3tsGKGCN09BYwKw1BMNgBEyBskD5oFqeH326sE4KI0fckwCFKcJZSDvCrcj8lcyX9kp8Dfmh+phpp6GBEocY63npqtJomzoYoSdTPX+gzJWtYWEAkE63TcyuLAY+NGPEXtHfLualgK/rxvnaaSO4p2oKo534q7l6w+0bx2VeK2JNub4zP4AShbam2IMrb946n8+qxTKeBb0tyaA3vnbfplh4gQ96DhqmZ76Di95CKdjCAGbuUHp2c1stB04Pp8YCQwMUjrG/UANolwQgmLKCAUxpBkLqRvQG4mR3iWhN0fNVjCGSNkv0tQgjcdDo+aHHLZCBYzinsfFkArIGGLEBMLJOj8euOj68u6zhXNTSQKrB8Dy09MXa8RwPuNIMgS97c10Bb5o9kxZxoNDAHMvzpdoeas1lAQMNC61H9XKQwnRprLNVTmnIW5Y9LRZ1uRzEpTAqBMsCwmwPTzHoDbwJD7z4+fFYF8AIAKA7UiAdFhZuVtj06sQ48KsP9h+WRwvr6hjjth6Mj0S4uFkaA0xifXe8DeFXqeAqG3qGAu6sfh8OcbisThboYwzAQGug1IZRL+qtNHMLC20BW4DgTmYN935Hzd+bh3Dqw1BUi1MIknLy8s57570hanYsiysj9eCTV3SlC2PhtDUWMLZ1BKU22gAGFto6gjX6zuQ7xt578ODJWdXEaPYfhLjaN4KSz2eQEIywQRNuDv76S20l4GkF/GYIh2FR7MNBY/7ew0pex2xwE2kHRavLEq6cKbxsyylmgHEGMCUMTz762eDGS3/6+r8/mC+jt6xrexK3tptY3Ly6X87Fdv9xpW68+aNXXz+6u+UUiiKNf/cfAOgJWViHBYyD0YAtAdLG3N576cH8eC6Xg/NtH+KqNpA1ABL09Mxg6uRf0ehZelr/o1MA9BBOa5TGFrBHp+dh4KFQGmud08DMwS19t+GQsWX6V8/K6v3FzSu/9ztTd/ewYSIOronYHjlIpQffqkc7XzSm/+0//q4p4LCLyYbje8bh2gUlJYaTG9Xh5+70TEvZ8gE5t8v+b33oRXrS4pr06XTGmvTdH2u5gNUFTAmtZwY4eHhipYLaGZbaWVhjUDkHr3qyn1c7WsBY54yF+eLR4LUfvFHMcLUvOKoQI0kaby3PxFYfr/T2EGbl4I//rHgwn9y2pmC0K+bWDwJgSgCwBtbCoICDRYXxiKd7Y3f8K6fkxb1Q8tDH/EraBoRjzEaQFpltfAlvLDheXU/98uzaDaMTCWI/fbLgwXR3oHAKpGrFY8AtZU8NAMBZjKzTutS6hD2oXvrhDdDO9MFfPmGztajksDAvby0PB6++gu/p4RLbTe9FXR7q4UjDalhj4LAOA/KRbMN2VgBg6NwC2mH3Bj593FSh3W+OuoSYY86HCSAhBXGMIeOkNbGzvqgLCZvb76r/++6tH/1eoQE9sw8fPSmL4fUtaOccAOexnCmlfX+ErbTUp0xMLmA//vxg8va33YPXQFe/ZcULL7/zfv/qzvj13vkXwxe+d33qUI5f07ow5VAb1m3xY016e4kqYLEAQJguYCtoMzK/r/f/+p2Dtc2msgIIDMqUmZCSIGQUMaz7BLpAG3Qp9NoU+InZ+oX/UwAHp+7JiQ2y1x865+EBt0SNBkpNtVbDfJGyAHRhZ3/90fnNH942eA2wr/yvD7YG+LS4cl3MHt780cfN3+G/B4xZGKMBc1OnQXbGrbuBnF/CIhcJ8supZlI6Pcrm8vZ4ejiLnQZa+VsXmHByBEAgQXEVGsWa2N006LSQTlOnNDu/en18OHy0PF80MEoB2im9hFvW3kP2t4aDcWFKOAM9shYoC1N+/MtF/+b3UWpYY8zt3dPb98Z7t0ezI0d/fvOTL8af3THQU5fiaB5sAsqMBjaPdUPQ2hnAoGyzJi4dxldEW8xoybGW9c5/dTQpibX9eGidNaF29LMWcoNEgD/94OdX8a3leV3LgZXBD6EBByyBPjAYqmFhsH9gC0Bro2flEPPP/XhL2wzrD5wpHt1+u9mTg1pge6QnEwCLUVr+ZmBhysvMfU0F7fxHTkQxtbBwcMDIHjVgTqvI29GkjWXEDMHc8uYgWn/u6dpn2tao9TmgGFj+7Oy1OwcvLsqiqa72rr4w0SgrLD1Bbe1orYsCBqRpNsVspO3hjfLekw+2rr9+GygAA0xLFNr+yz9CMf9g8ar6o+HoNc0oZtNueG3r5lz7xbb9aQxWFcFOD7BGG6uhQU/o/kefh+TGYnZolPJBpIUulFnTTJyAEJhSukjEnBriqUOE1JLLjLRylKtP9fjNuRp7td1Tg+HUwjlNlerr7V2DwjjtrDWmLAuHGV4y770zD3fefAkAUGokghNFUZrJTQtcf+PqS2Y2bRdCJ5TnQICxevVaktbmIcwBcU0FrWWYX/+f33vgk5CEvK04txOiQzfcvSyIwJHSoqB2RVi2C6aV9YMAKBBxDNs3RqdqMscQcjCFKR2GS63VeJJ4OmfLamI0oO3UmvvvfWy/9fYdlGVRWO2gCbC2Aj06u7ZX3Ku+dwuwpTadeC65wEvs3mKVGLRKWPsTAMqH/+lnR1XnyTv525Ny01ee7DksiEh5QuTtFlqnkVSyag+ASqsGh6ilX3h/JgsPWAsq/BVoA5dAuwPm0AZFWRj77mOx/eYEdp7v3RUAFwbFQ5zqoUNppwsUNKsmhdMuqwCwF5JgpCd1bbywCk86L9mw5clPTurYznlqPSEhecA1+bOGwEypW4NzOtQB6HTCxrMMFJgAXQzuT88Oh1W1BWeNgwZ2nS60s9AAQaOY2+mCDbC49+vTa3uvmYPSTNoSv0MJW7jbt501ty2OUBSlpta953l/GRK2sNBwl78JADgsZ58tPVHkNIxEaY/41ZOjV/JTaxJpmNe3EW8zQ6ZVBEw+AMQMcfpZfxnijg9FT5scimAMKAVr60pdALbcN0/+9qeHL/zgzp4ulwZkobVzJZy7x5UujDsaF7CVA1nAjEC2zBpIxfCn5HQ6PYwkd8tcPIx1Zw/LJDURkQA4pm3Q2iZ/2hz+NtRRerppXHEjl3NHqVWWpy8+mAtzjkFvsm1gnLHJH6ViCZx1zsBWQ/uffvnByYs/+t1R6aaFKVNPB1ACYzc0gB4MGcWVArOKDFLV3+G5hy31SMM6XZoLKmBjYZz3W3PECBIk0mMS0maYyWe0WCd9gNp8iWVqrNq4HFbE0poCmAA0C6vrYd9hgq0xYFNTcznXzu3BEKzVTrsF8Jt/9064+i/+FCjKCVw7q60BdDnSVus7sK7YLxx0igFFUeb07oIXbCe/K6shDMoLBqC7f184uXHoOAqSJCS3hfEVGuoEah0EAcRpFRDaoL+21ozWQDXaXWXj47NZLOc9nOMFDWBmtINxMFVltWNnbKWtsZPiP3xejV+7Azg9Akze9CUF8VHaaxy9HkYwZtR6gLWyX77zFusABjS5P9QljEZpCo0OKzuUriiMta+WV3tKgIQkEjHGuHpWMa2nxNTZOBMzApgRSYDajKAFiMy8hhXzrrLh0V8JMdS96c7+ALAoHDQsDCZ6CldWutibm4MJPvxp/fatH995js965vE0ELSJHJuWmLvC7mvAdmypQwnNFgZwbqgkCUGCEAOvDIA2NlXmVfrPIHCEiBwYYgWBEqmaXGNHJaTN1OJpo0avv7r74mvX901q3yotAKcnzsLA2dG+/fsRjo6/9aev3skK/+2P1Cajy8of86lzICi9awygnYU2RfKP29/5zIcghUAr/moCrPAArTJlAhOzUJ45Cl41DKTC0to+hQTOm6uzjZLijZf+dArk+N7CmJkbTovZ/FAfL079/Pp//t0p4PTF+P3VZb5gO5ZLODhXv+e9UkpiujXAcOigJzo3Duq33ilrR4QYExRYwbj24Fb8lYNnFiI1WOU3k+zd6Hd8QFZZ4Prxr5Y3ft+SdigcisR8wVWng5Euv5iVeMnPqpeuYGYKTc9PbZ4j/9rvBoB1KCtAqxreWongT/u9/tZg4IY5zwCgjFEcAY6c6NCMf/KOAdwSgSuGjAjEESIvi8ngkTcgc6eArE12J59UA63drkbpqrkuAOjq9COv3KTSWx6vfdr0bg8PiwJ0KZXxTY+lW2KAa27R+MYvQ9Mr69EASwYXTLCumscokJxY68C6+Z5vf31X2mwaaUkst8+sa/WzGRqJFbcaiqf+6PDd6WBvpwePPfRROyxOPir2+xgPtTmsDg+H16YHcOYbT4DLDrN9dlgXXkGVDSCDtbIsZb/vloOhJnyoP/vlQZ3y/TR4q7wXQJdCrOrGABgtclwvn7Tzf51LXXvMTrBcL+4bORz0pBiMdU9WAeUp7Q4WV83EwD6qe1cc9m1pRwaLy4iNr3JkWGRNSoWNHQ5xUHsu+74M6PVm3gYMema4OCTV9/6T8ODXj6uYGMC2WWIlfwJ61E0MILM96X8idfB27MlGAEiaIgAQUhCUJkGKjOZA/f4wCrjgw/bv/Mu39u/jFj78e7w2XOrrKIq829M3OdYSfuv0FADK0h598mi+wHiyrXxzuAhRjKQJ27JSOPvcH354Fji4AMTIiJGZIIjbm08OIHakxwYyJiGTvpg5JgiwOQnWH7VFDIaX1hPDV5WQHIBYFKisOZzMPwtXdjUA99s5AN0ZgUOqyRkUxisPuwS2IdXA1tJKu1wAqI8euaqOMUZ0TqArDyQJ2p8dDmxTxRwxCOvV9U7w9tMKyKw6IQJCUASBIhoiFlKKrYGrAHfv7Kj30k1HzuWNXn4LHawfqTdwgivTw+ODxfKoLoI0gAxl7WRs5idHHGzkGEHMnJ6Vk5iOtVjGyddRB3XSi2vZD+cEsaVE18Mg8kygpFqPSJGU9GAYGXg4HQwdlmfoT2/ATNJGT7+1/AY27RwJAHB6Utws731w356eHPuegBTOlV7Yo+OqhgzpQXH56BDg2mLqNbOOK+kYxIirykBXGNnIBpM6IifSKCY+CSAioXoc5Gj7hSHm59i68ULhjJn/I0if7sEY2FIXQGkNDIyZjl9c3v/0sDyPsa9kqIM8PZwHFhQ4cuoVj5x5rpUGVgRRC4Q7MAAGWLRcAHec4MaRcgGOggg5dRDEQGDV065CMbq+Z+fHC6Xu3ELid/RvrwLNgAYWuU6aIc+dib5z5WduUfuqp+Hgl7WHQMqARIyc28LWBErKbOVfjfwqIPCay2CALqogP2qLo1wxS0TELAShWSo13nKHS4/+9j5gYIcORjtgFXS+qQ4AmNIAs7J9jBsKY9zxaclNBSkUN+eeBJhDzPfYzn+0GlhPhtf8IVoGjIljmyWvfehpBSCKzLYGQRRISpbs3NIxDOZnDqPBtQLQwM7JbyP15mEBq8v56VCjnEPbAjAvVX4wf/hoGaPh5dKBGCFldzGvHNtQ+4UpvZEZr+WNYp0du0wBzMxByShISKnAogdXBRdiHE23v1hia2d3NNcFULDJ/O4aOZ1ATU5kaRXqL5someu3sAUMivK96uFBcWtruAtjYKwxt189ePx3/+Pp0nFwgQT7EEFg9iGt9ZUZC7cd1G2BGB3IWasL54yBWgzdIqGnFABwQBQkpQAHMBbeRWaQ9Mo53x/riXauhEkdxGn4DECpl3C9srPJ6z/jyI3JxrmTR+8+MOPxeHwLfudKsQ9gv/hC+cYiRhB8euhrGn/RNsZ1A7xm/9yh4nX5W6i4VlndPNaAUGRET4LA7UMJhVAD72popZEKw4WBhknS29xEkkYW1plNBZhLM4bMoaW3tD5/dPdIP1bb27+Rcnz91uHuu1du4+C0aiIASQiRiNMEQOJE167FaOF/TvXWF8yvGLMISk+Y7XS2hhHUxvUiIVAHJUgo5ZTuY1BkyVzptNO2lTPXdc1a8+Oa/HhOzpQXaBh39OTxuQafmqI3sPDL4yO//OzvDpoQBYmMcFcB8JI2n5XDX0XDdS1kDWRqoNUAOiy4+bi9DVdBJJUGhlcwmKT+WAfnJlnMvMzHWmuABUzLI3fSA5dqwObFUfOh09bMZ3cfnisJQYPB6OjeYHt6tjh/9HBmU8KXn4zYMQC88j1oJ3yWvRU+y7+RBCI9UoWxqhUk7Mz0nIevCyGHxXivOXNDY5xxBtrBrvo8U+1e5zUORrvN0d/4d+0wFi5ZQIEZaTs/XUASQ8ueFFL3q6p2PiBtbZPsNjJHJiLRDnGX2edg176wivhr/GCrqjaX3ggafPGps2vyS6mK8U4PWHptNTRgDDQ0Q7eiWhSANToBepdeulzujcMBBgQ4C7jgOYDJi1oAUlifO1+iIISYRi0yhOiieVLDmr1zG/5an9dipUwRcIeVN3JBBi598HK6GClltFRROQ90hb38X+ff2kE36/UP89Qva0c2AIsCcA4IYI4Atz0eMY+UICG63KW76xb7dR4f6yNK1BEEKy2touOlqwuepQBhTK+vKjbeX8WwRLFR1zYatkAuCmkYOJTrAqe0OUeFTTW0qkpbBz+xgtoUrkMWSWhBDGYRUoVnLQCsKgG00kX6e238U2dE1h+jTQafhQQvHCQGA22MbKo51LgawnWbnqcM3mINDQCuzM0iFoAxnBeXMT0zELTrRmsfkqF2xpmaIElKStAvIUCRdXNhvFcFUFqbAascoeOJ+Kk8+HkKENJMCUD0qD69OdZTzFr5ndb53+5sZxw6ntyU2jCgUymMn10X1MDCaaCO6yw1sUhjR0IgRiAk+XOKmlYKtPInBbTOMHv+/DPNKe4I4s1M4UsUQEb3lI+BRSB3b9fv3dQGsA7Q0A7apL4WA+MIFsZZGJR5BugR4DiZg77UCxhKalk4bXzpY85bczcPi7zRDXPk9AM5AKwAPqE1gC74UxcC2mayZFOCBMVMkV6SDV+qAKF7mgViCMzN7DN78/pemUrbxqLU7dhbApDWwloYnZpc7QhcpsCgkdGG7fphsCqqOT0qgdlJlfbyXLGbIICIJCLHXOQnQbwG3phWds6b47/iP/PMhyQSeM6TrS9RAPX6mrwEIrPws8+q8MXQPdCTAigtCjhTWlfAlLAoAQLM3Lq9Iq97dFbrhIocjAXBbuyLwAB0ciEF5g8fL323O0g79Gm/Q2ZGjIG5i23r2R6liMFd+0uXJWe1ZPJIiLWSwaWEyCU62TIIISUhjFk5m9fnN6uBNtqV2piSU1+HSw9C0NYWDm7pjAEWNHEWDuX89DZgXAp5uCwvYADl6XntmEVnAEikDAlJFCJzDG0GkMnN7OOTT+C2RkLUqWg1/Ak8S3AbWYU0zn8FHyAGveipxVjwbnk2s3+yNSzgrJ6inLtxoWFs6TQcUMxPK+2gHDQW1QRal7Z8fO9412nX+kt7cQqktorZvApopWktmIiYKDV6cQQIYq3tbw3qtgQJcpKwiord8JMQYu2zYtjU6w/zu1wBJHvRB6kQAZZxaZQW/vFf96/evA2MDQo7RwFraTKfmAM8+uIKxlMcwFpj3aQAUBTTf//Z4tPfu+vuzJwbmhGAVPlvW6V0aUs7bfiGP54nS1uXK4H+GEM3/1cghjLeJQAxvZ3CQVsnzMxwZKQG6pbgZAJBDJ7egeQSBVBg4giwT8WE4OrDun/wZH5ta1xg5rQtCzwoJvtwpw4O4ynK8nQMALYA3Azu8Nh98bLTSEEhgWSHNaBsoGEMPXmy2jcd4PaBmNnrR7SFm4wU2r+oYzhSLyRSg3xKjLultkLKlW0Rgzn27VNg8KIChJDOAaDIHAlQiL62pdJmal+9Pj34QF85dUc3+GgPT1x1rM/V45fLAwcPwDldmllVjedf3G1eOdWAQ8GrNUJ5bVqnathffrZYPUmIkcMBCSm4DYCbULfrjeWYiezM+ib5QYwY00YjJKUAIJByYQDRjkPDF55xe0EBJAXXHLnljshooylUPgb14N1rQ/fkrOfNK//Vd7bnt67a0hw9Opk/uu5w51oFAI5QYTg5PJvVD59sb5faFMgcUhc8LQjOFgbAg988qjznbs4kITOkkDJwagbJ0C+3+LeMeFsgEesUIa0UAxKCSCS0QEDg5ESdKBq+sNBqUwEkJWLgVc89N65RgqKPjJO7WpN3gPpoe8s93uvB8cG7/4EW/+2to1O3a/L+KHqIg5PKndbbZ3umANZDvwWcLjsi6fjJ0se1zR7zAAvBHFoKeKMW1jYCpJb49PTsVgO5IpqdPwmiyCREahhMp4XYGwQfN2iVDQVQ2qFlXUUcOVWlWr4sAvTw87Pth9ZY+9l/+MlnYvL3v+/HDqxRWFPqoTm4v1j683qgV31vGUU6W5JO6YQGgNiGpbaAh7y3bYhpJNsAsLb5Tct/gvLj09fljxlGyw4XCXCIWYdse0YLeqYFEIRo6cCVBtpKG5gSoiI+//u9bzUf73347/7hk5OF+uTXr6v77jUDrdlqV/36H/7ui/P48OMXNZzpfJ9x0GzniVHSMAzMlv1Vm1/6lrTnN0KIYX3DpxwFUkcbsrKyjbeKaYMfRHqyPEcQgWWKFiluLIc9V/lnTwGhyG80mGJlYW2cJaIYDn8jThYnf/3+uY8xHvyNf/PtO62ve/fu3/364YnHo/dv3ZlMsvQu9RWVZbWcwBgYMDA79PnekSjunMwIQRyyj2tHoYP5zG08ECSI1/oBwcwREEJKygNHSXdpMjEx27BFi2atPLJBiZGQyvEzNmbISxRS7hUP/r//gEVjPQDEs0/tdGf24h0s5kb/x3/92UenIQY+Onm0O9cGD/ZQFtrpcq6LQj9+7+F3MaluOpR8+M7/8HdVZKT+9tbehFbS2RBzVBOMtr7Z2gM4WX9eDpDshCMzmARJKYgRA3JAJR9ZIoUaXi6HvWK5muK0SYlx9OHyVZjtt3MaDfZn5+AWmsb6yU/kS3481NCfv/uzJ6c2Esf6IXaGxs4BuNJYW6VVuOr9eve1ApYPfvPXv3r/2Kfxb8NbdvGtF1pnfLrcJy2cv9Ainr1Cwv4MkIjgKBhCghA6v+dssb4Gj9coMeIEfS4b/vb7VzTs+qoTDtXnV+VoMNJu8Td/+/jcMzM4HJ8eDMz+/gNjHQycBgptlToJ/k4BdsXxx1+cuEg5tcmOhgnEIaymYUfnUQ7n6dmQops0BGJOoDC1U6+SovybZBVT1RwcAozYYNc7BWTzfoYCqF2hm6BaVlj7UXf0D4+334nlsjo+KzOCiqef3rmJA621tVprwBYaRkucHhwNDYrP/+H9g2WKtyQotUAyg2Ok6HO9K7e2d5lyapNuD84tgJzwbfKKIq2ZikwsiAgeHnKoYhPSnTJrJdazY5WFosuFX4cZvP43r6FMoHl0LGIIETHmWAQs39u5Oi6HhbVgbZAqIcMRjurt5fZ1HPziYeVzLiPS4KcRiAkDdYRPm+zG9LRwEl2DRLqPPPshICQxMThfSUAA3nrWkythvvQA2DfcU+vRM7fLY7Pv7in5acVEoQ1NaKE5CcB5cPaflKv4/tEv928Pb86dXbWUY7JjT88lruLkX394HgCwIEkitokdEyJi7OTP2W6b+gEkhFjdXVvyhKCcFXE2DMkkiMiHhsGl7I+p8gD7ejlUG3Pg2YWRTT085RxoRcyzQFq3lttT0uwMiwcfPnkNWps9bRcjWwLFSA9iyQdXevUv/v4spGgmu7UdubyRwNzaFxGydjNKaFWVxoGZBHIC3eYVTBCkYmZU4zIOd6QPDA5NqYUUa/nAmgKe5f++5GDKpI3InrElYO3849/cMYA2ZlHBlHAOpQvBLcqT6oPjdBNCSIT2OVLJt8YuO+4mXRt/BUG02eDGYKQOKW7XioKRwkGiQ0PDg4FuGBx944XcmAJrl3lKCSsc0ukWQLv2tJ0WuaZKINGxNMwEf/bJX76qT/VLVus9Y3V1fPYu5mVYnh8NDo9PPIhJqh75NqSDwekSomWvcjsYx5ggkkhzIplI2mIyW4VIfiM5fw4iChsFCcmROTKfTAvvfGRnG6m9Dx0D85WmAICNnlMASVp0JkdtNIuZqADYz//OfXfwg11jy6nFo/c+GEh5NmtsOQtV7WKeABGKIwmQiG1CjzbitouEUm+EECKlCgkb5fCYnpeUjCRzKSSI4ZogZL+ofIzMoaKtsS1DDM2yb3yTqlBg+soK6MLeil1uddIF8lydl7ENM/7wr355/dP5a9tVMXv3P75zX4wKN/fRV41NCEiKGCIUAgvKWmy9TQQBJHJvIJHIXi4HQHS9gSKHfgaDRaKTmYmdZUg5lkvrGByWYjKITWRXD3qh9sgsklCbudGl1Dk2N6Bojae9xubkEcyr+o1fLJfidn/r/PO/vPvpUa16vUhKcEyznoSUiEwkJTFxYLHyttzijWxQbfQTq5WjyGSQyAAy31EkphgRPDOio1HKBhFs0+tpD3ah15NdmiHUpsyXO8KNrJNIxDRRRVzVqto+bTAJptgasvf+ffposvjiN6cxQlYSQy185DSUSmlyHkILGVIsIU75RZvnRY5pGeQK5DFnLaW1wQItVhSpowCR2dvUXBtd2RtS5ACOror9oQvs/TZMwkUgUgoboO5ZRwcA27AEIPcVZued8VrKVJhjTCVfhNN3foYYAgAEEAUpBJFgCSJldHQgqSWxi1GFhPQzqwGiNXonzZBUCksGSaJdNdGOJkSMhGBDDG0CfK4HQ6CJzH6xLAaDJbOlQX+ZUYUcqLVVpM8Z+AtmwEDXtJi7k9vLCDALAYTAzEyMaNvPM8BUEwkJgEiKnhSRhFQABQ4Q8DFASI6xreozBEnRgcXWAjpvkLMB5Lw3cmDnY3sKA35W9fojcp45hBg0B/aN6g9iADFJs6U2dlW5nAlYf4E7XppzNrK+MLk9kZik5OgR25psFy+YEAMRhFRaeg8ptWLAeybpYoRQ2nOb0IOkTKKm0l67aD7N/lZ+keJCenBu47krnRFxdNHTUDS1ixFhKQexiZVCfxkZEKo/ULi4r8wllpAnQF6SmGY3MeeSNaNlpPN5uUpBhCA4YYKcZ+WMg0kQCSkFghCyp2JkH9IiZSaTcpVc2pSyBZ2EXOHMQIhyzVxk1xkjXKpndjcNgDj6ZtCntN4oWEESTmknAwhQ+quEwezjW3+f97HjVUrczoXu3pKwRCRSDO9wdH6TwCSUQgyBhNFSOB8ZTJGZEQVaOEUkRQZDXe8DZZPjDP+IBFGMwTGzj9l7MJB2YAYjoqkHBlyHGGMdpFACpLWPgFDCbTjBTTNYgz5d+sOr0xiC4oqs7GJjHn9mgAUBqck5tqlNGjsplFHBORZMTD1yCeSCEJ0Pvi1sCoqS2jneJYYMIkptdACBQww+ZvglBRMzRyKpRPQBQHQLKNM7rT3HEIWSkqTpuRAhe6Z5tgVQu2PnehpIADgiL0/PvDyvAgRa40fa8gEsBDGQG/6z/xZSCkkcPRPBeiH6zBEqBgoi2KSuJH92IZTjvMj5N4GUSHlDBPuQChlklCKOiJEDCSGJhY1gsK16agvCNZYjw3kZhVIcSfWoaYHQU05greO25SS7FZh5uodcte+qdW2bSmJj22iRVCPzqh2ASCpJ3AiOpLR0pJ0ehIYMc2Rf2zS5EumdG4IpR9+u1JcxM8fIHH1IpwsjQYJYCA5EYCEohAAwBx98L1pEB45uCSmFioGkYPdMC1hNgLZDZcVM59cjx7w6byNkMBG127oQITJFSrg5l2rTdWIkklogUnBCSiGI+jGIpp3sQhBRu+CBIAXlbkcSqV7MMYQQUy1EkJBS6hgTaApZR2knCXDw3mmp+zZEcGigpbGOhEJ4ZpvcpiKIiPO2pmkcUgvrikZr84JWbQQmQbFrT8syZ9jJPoKIhNJEQRCCh9TsWSBJjdQiAJHbA9PzIZPHkUyEGBgcQ4gcE0UohJKpFB7y7vsMjghIjfIcGsFiYHwMzNFG1StqIbT2Pmz2Cm9MgdZnIXuzzgkL4cm3KzI5T47UyYW2kE3U9m6EdrFam00RmAOBSGiKiIqAQAiuYSmZZMoKM/RP/G/2d9nzRMQQU5KbkLnUSnAIQaYqG0cwMTGzgIwqpkddCCGdMEMbHAdebveN9UqFJqjLIFC+U24V0QWytIkFSXhiMEQiQbK8eTUPdSUugASiZGai9IGYJ3NkJiayMbCUhGAhm9DAMMmY9UosJAlJAkgdYzFwjIwQMwmaAqVUIKFkjBEUCAjC57o5xzxhQIHhG9bE2LINe5BbVtoEaa0P605wwxFu0FLodJEK8ZIdMSBb1poImZLvWjXawr1CYE6tKtwC94SkCNGxQESkCEQXCIJtm+qmOkeEQEcKxjairoZDKgUktrg11sSSpR4zAJy21OHgpWCOhm1k4qWFVBRjXC2YeNoCnnojtbExIidajaFj69gJJEXb7djS/JSZnghExQyOFCI6dpcD54FC9DFw2ySWFgrHsFb8blODFpCBkpsQCWrG1FiabjI3hiJvv0aJugkseip4rXsIzPZ8a0CuCYHVMyVfl3+FYRLnL4QIQAx9UMRqrnZAGdx2LOSuj66Bg2JAYE7cXuK+iARicIGji+CupXOV5V08CIkcFwIcclqyccvJQ2daRggIRI6xjoVyUQ+U9S5W/WGxWIbICiuc+5TsbfgHIrWhPvlFoRzAYQ0Jtk+wjq0JiNzwFmJbdMitSxEMpiiImBAjhBAM7xhsSYDbVcCbY9ClW5kFEExMqcUwl6x5JUhSQMZQHFOywC7SSHIE9RQHv1R9ySHG54ZBbr+ZczEu6UAgKpWEErFT3mrxQjotpKgv0orXfEuMrmmRBCEgQEoEjpGZuGMUW2GxKhoDaaYJIbvOoVZLDOZu2Uz7Wp6OkZLvYUawYYi6xpYIy+iavmaOz8YBbd9Gh4dbG0AHihApFSvRYYHsExlMFJnBqdc7rhK8NqIQCWKKCAiuzZ5iVxdN7TArON4CTSIpBHeRFgxEJnQ1mQQ+N5IYpthuNxid0J6jVVKwr0dSUN5C43INtEsTs/ycevdyRw8TgBjAbSbAaMVP6s/ZT2v87ehwlypKcIjMBBcZIhJp9tyt9aXOr2CjSgJBqTcoA9RMEHPXHZhHnmnt3vPIULQWWoSKpAwetSHQpgWsx8Gu2z7/GQTJVizOWUl0RoXsu1vPzzkS5f+tkclMjNZJgjnkU2IIEYIghIrt81W77yVSLfWRcc9F8bJl5pJgN/c5K6S7HyYiyXMjhXRJaV7rvrPPyQYvJEgxtZxxeqRWYkYDqYBVKtCO9SoHwmo8u4b9VqOc2qE4pIq/SGifW64pDVziQolESi6odfKUdkzsQkWXu7WaSNLHBAuRy2psndFEaUaG2AO8Wpn5hSNj/zUjaDs1OEE6AEGoOp3KGSatAvUmkuqumM9rJwKnfkhmRCtC579bmB3JkyARJSBEG3NWzTqXBMocfoFUpY2i3TgCgODIkEQUSCQHGC7wAZtWfyEQc2Rq+9JyYhYiOBJffOxRmzTkmNlGZqy3H6Wrx2yjyfVlVawn4rnglZ57ByKW6Rsotr1zLQqkdaeDNlEXIHDacZkQwRytNoE5EEXJRCJvoLB26+ujvmkZzCIFttScxECsEWOOQa2hd/9DB3uT9LELAG2Ebhu4aDVIqac39x60HjRlFWk2RLTusYPcuQtsjdlNaqVW92kcmCKBo8NQlM5F9kGTUk/3Crd3eClRKrsYI6II4Ia5bdzKgUp0NpAmTRf7Om4wx3DO1p9126lKCJFJz/bFbB2UdgdscWD2fmIDL7fzrzMG5jUYFgWIA5pBn0Lgpi5M9M9YNIXLNUBChNY5iUjMwcmuOpY5yrazP/sQccHJtDlzzOCYO1nb75CpBsyt7KsMoN0KpPXqa+AUF+64bbHsttUE2jQqVmHYCzbGpR716q9cHAU27yWDsVTCiSu01t3Khf3N227FDtPyasAuzjQICO4wNXF2DSsRI3HLkcUOobWmDzCQ9hGjVpO0BpDYU1+pAHaWlVIXvp7Xb5qeureQh68jQlkaG0U7ru0N0yqGJNeXxGmbOxLD1PmEza+MYLCAFAnbtExk699o5VXTQGQKDiveeQ0OcXsz2WwAEAcshzpwsLWP8mI2eDGubE4DDpS292VAUIzMwavMh7UGifUbRguLkNgBZBKHiDiEEFspWi9KlKiMnFwjbxTOWMHb1l224lDGQpyZ+JX4a0gs3Xb2CMQ1CsWRm1Ojv3QKXEBDQWSzYEEiAuxjZsG427irs+vul7SjEbUTgySBo4+rvEhwAFryg8ARkEoQRMzQqp0BbVjNjhGZ/mnHewUyWg/ShhxiIK3RJWYspdLsQhXMl/uAi3gwc/NMabsZjgLtI5Ba+TvX3U34lV8lAIIoeu+78RFKBY5IGwCmNVKRmWTeMiIXVvLtUEajbSTJQSjNcaLEPfDGDWz+xqDIdU/JEL0IX6U0tq4B5twTliq2QIwKHQ6klcWnL6VWyAwMW6DFMa5OAwkZUwhJMT7N/UgSyREgxi6YpDyRIiEnlq1u1kAQcmYKMHUtV51SAIBtMgV+Rhh8zsF5p1IOuSc/doPbDkSSPc/o/NSLNDqUkF2idTuDEVJm1rmdsMwAAoDURx3bpY/UgX4CkHuyCMj0FyNSfgJNK24HLGndBIBggyZJ8dmFkY00YF0BCOnNSCQjg72X4LwsJyV8QFe7BtINtXdNFAGEGBFTS1sESCophIwkupOTKw8cpSRAiNh1xWd3giwTM0MwIDqulFfYK1ljchu5ogtqPWSI6EnGsxVAqyh4IRIgQjKQ+iCA6Hi1m12aCNSl8ivgQC2bBsq0l0gEIAklECET/k3hm1I9IYJZgqIQiLS+mShhNcDJi+QWQ0CAOlJlfT7S5oAy2GsBfvYGCu0/T5sBR4pIdcvkBUPepygrLEvfYdls1p0bykQBUZviCwAkuMUqnOxHpIo5kBpKWvDELdJIbalrOklzh4iiyLww2nwAhG7tWHsXxAEsvrw09lTBAK2mEVIXW0iwKwOhbo4yd/RPHv7WGDhG5DQfiR/ulE6xPUlka+DUZdKmJ22zRNJNvmrr5PLkk4lYE1itkiSirk7QCeEhntkrfBEPbb7MTCEKkibtcxkkqMXGLThfW3mVKR20cTnX82QeahIgkfmPNYKDJIvU3yBi+/muMzHJ2nWmpXvKfGvI+QgAkUAViKhttl4fxkD07D1E8vdgZQJrvaKUPs6Uu1OCFGtRlwHkeknyht2gJZAemECpBzLlviREALW9Ry3YFiLtIJeiS6TuDrJF52eQJqfSguq0adDqPgSnlYgUMoW/FnzBkfkZOKD12R2nt8psVhfn6JRIZWKx3n+9AqvI3reFyF0sb4c0l9YFAOpcHCUMzxDobplJ5HuIeV5TZgw2elRy4rH+fRIRQtBlj6VjfpYCaKWDDZWs/k1uOMauc6DFo9QpOTc0rPxTGqq4fmlqfXoHYbrqITFiZh4jgRAJaXcJBidup3W1rate5QAxeVIQQEIy88YWLOsKeE5doJOc1yPBmloY4CAIII5etGC82w2l7SToEEi3pQuyXYDSjiGCuux9jZJLxTWRGn9BIrnL1rFyDrerGNcpodUEgyCIJSQix8ssABz4mQsmCGvOf8VtdyrIWWZMQxKAtjbTikttp8PmV2aSbwVlAOQJvj7ROvAgOCN+EMBph+kOb7RSU4pyK0+AzJMHIh+F5OjcpQqIG+3yfOFm8/XXWJbN/1oaksBRtGbS2jx1kx8MkOBIHCNHbh+ahphqOsQxlW9XKs/xDKkAqBA4jzCnWhSBROpX7u6bWrfLOedogU8ER6YAEnzh0RJpHOXmvsIrQddeW7nCVvYupnVncWKBWyzQwoHs/hP2afv+27nRGTuvTeR0vdweAjCLje6blTEAq/FuR3x1SQAgwUSsiGOE0CE/tXJNC0S0XhnaUE8XYdfPX/1MJAvn2yLmKLr3W2KUu418aY2P4FaBq982rs4rp8Htc2VkW4/qwkgWti21rGY+2nwzTdGEMmIk1iJPm3UxNxTQSnNBBRc+0QEdyvJ1N9uejuwL22jQxdFW/DYlACfgjtw5gjUYhOTpKa+PFC3DlW6Us89sWyk6U1iF2ZQGEVhQYBchSGiKcV1AABAXgdB6LTb/yHOgk7/16xmRZ2vj2KIP0dX/Wnlyi0QaUxB11MF6gkLtjXeKZESCBNrqc+slulCS/u7cO+erdkUKImYEn3YljlrqEDbBMJhEvNAis6ai9u7WXcCaf+j8W1J7XHP6kYhaqrvNjHJeRnmtVVv2TVX0lT23BpW1GZgkgHZDSLRD3noXAmTejK4bqjw6iFJ65pDnR7RKkcQ6DQNmiC4K0EUNtH7pcifQ+ay1sI3smtb00umW23W/qZkvDxJlvMZrV+xuhImTLol5Ndvb9Wvp76Rd0ZIua3ednEn2D0xgeBYkwOv7R3AQQm0YwGqEM++wsYVPPiF5h87K8z+tcYU17LPSTOej2viZPpc9KWUra2HgCt52Pl/kJw+3Lm91eW7TztWrIhHvPvDKrpiZhEzuuTszcCZF16dF/tm5tA3p23JMu2y/e4MR1Sr57riLHNBSaYA5LXPJ7SKinQetZXF36exCc8wT3ZdkP7/64g3Oo1NfOiNsqgqMwDIzue1LMVwojq6dflntGeigAG/6BqTl+u3F2lJh+wHOlaEVKO+Ce7YL4rX8lrv4SqspvybvyqxyUT77kbZzB7lg0mHMVgORhFAx8wQE4GlStDWBp+HOpg7WkEvrnTIa6URr2wRXnyMSqx0qaGVExGupJyXqnqkN3F31vYtwa2O4FlS51QyDwYJSQ9rq+xLZzEoK36krXiyNdXN008QvU9IFpTBzRzutSkXUFi2yAlIcX9PeWpDltpLJ65dFd4E1M1j3QtTNlvzVDHC7BduaEJyCKLMgmWyAqd1e/4JwTCTECmk+86BclMifjGuURs54BGE1fJkrzbTxBtSkNlPsaBfOEbNdmt3eR+y84CpmZNG6JaUASIqUAq7fPgHM0adNZnJAvTwbZKEptXBd8l6Ggwzqvi5dPYhc/0yJgUyoOO/XwEmtWB/ydiUw8vMRgba02k6llv6ltQjbBR20pc/sONtYicQ0t45hJT+JwODIUQqdexMvWTSVildKcFj5tEuP9lm/qyOCQvb7QggwS9mNNPNq+NNNpoSH1not10Y1ufM2w25B7kWnzy1mbRfQtlNNyrR1wPpkAkikNn8OkCr342xupNR+gMBr+5hvhN0OBaT05kKKyUwkUkM5ok9P88vglSAkMne68vyp13SFL7sJ0cJ97mJCO88v4IBsKYojU+4Jy+hqY3yST5YILJmRGtgDVqxwi+/yZwTk+vfmkSEmsEjKIaauunVBByn+Bx8ptcPmwC9lN+upBb+ctk9uQQDltQ5Jy+t7wSYqZPVK5yyyfUkBHyX7VKgTomvIWWlAAmJQtdurMlJfcl4v0LUYtYrIy3s2Ct5ZwEwAIjX1XWQZmCITQbjApAVT7MZ7DSGt5a+tdXdkWnsKr1k7MsvVeoNsh9n+mUExID2ECUCqQl4wABKCI0fFLhGZkVIffX7YWstotp8RMsTVrW7Ink4TgmPASj0rU+D2HCKpJVa7IlGulba5YpZzFdgp7wmRncTmCLYtdLkGg5ZZI2Kft0OgkG2SBBOFzUoQCRDFWvVFw8yCwJEEtd3iJLre6DaG+bUg0EXYLsAzC/I5GHRfwWt/M0BSq9YSmYF2RQlW9ts6rlUdnXhdn607XBlBfonamIq0KCn91qbGJDrTyhcgEoIhg2NM5jZGlgLMQgZ0Cohr59PaI9w3I0l+jUlISX7lNTMxuTFZpBYQeWPQTqaVl80sUVy90Okxrz1YEUAr+QGASMh0uZgKLSul5/fB2KCBV7XaGGMvIDDHNA1SNkgk1/ceJhJSdjPwEvlBQg5IWL8ONDqf1FIlsmdE3hSrdWXo4G6ykdi5gFTrbr+vM7SnvVBqo2mZb0by/EBb/EoJ15r8OdgSgwMLCufDnbIKIVKqbiYFSAnfDYEgkuqyUkrLA0II3TPtA3C7YWk/Qu1EVKkAjtxKnOs9K2V1+uSO8FxjNtci37owaXs5Xo+H3S687RdTXO0KmSZ0KldGluBAu72wjKnZOitADHredvCMpFBis5KQR63lAUW/b0LdhA03Jbq9dtKFhJGMsIremxWWjmkgBkcJYoorC2jjPbdwM8VxRu4u5vV0PO3IEFYfzzOAuAuVeZ0xwCQolrTlHGdkpgCQGI3OF63ChCKVnmt1cajagSE96Ill5eMmXUZr5xMDnOr0Ym3Q21tMyDeVjZNARKF18vmCUXQG0HUNRE4NKrkoQtniRXocT0YXyZp4zdUmlxYBYkg0vb7X6dkVyDhAD4xqWQchVQbNK3k2ZwMJIUT0YdPNbJBHed4GapeRrSmy/TOfE0Exxgy8ef2sPKPza0IQ4ppht5A6O7c2TwBadLEq56Z0IbZXERADRchTWgEkh9vQipiISai+siSepsLW7kz2DJbNxTib3lwzAZKtO+zeT5zXeo7FkQgBMrYrT7o5n60kG0q7tiqD8RXplFxiXN0MtfWI7u8uKjAJQVqc2p3TOv3NrIjITAbWKGIwVG+7bz3aRRWrMcu/EUio/pCaysang8TKaojBIXQ9E22T2Eq29nVmQYgs0NLI663RKxcm0vRv91pec4CgLllq5d3AUNSSEAyQUEptiVMURRXy9ygBMRgPpNUEJjW+OgkH1IHKdbCfwYoUwyIsmjqs++E8Dq1sBAaiFaZNm1YeuSMpViGEwOyFaG2XV01ulIvqqYyYU+TWmeR4KgUJv+ayqS0h528ULQ1DxKSMMkN5GqqYOxsApQL1+n1SRATRe/GGWpyIyN0Gb2sjLABAKDPqnZYupBFb1/TKStIRvBEe7XbQlDdC4nZk15IZRgwtJbJyGUQkZAAQ2lLTmmEj6ZWEFry2z0SCE2vBliQjPWGBwKQlAcx+7rqFNUpB9CS7BoIhB1e2PdKy3vQVayogQRxJ9nomLpP8WDtotXY3/+C4WpqNlSdIY8etBGBAROaw3j6A7M7TplUxRupsA6vKIoMgpKKweSddxpgAc2ozEZzBtotLReyFkNlqlIIYSltWQRL00EDEyCQ8c0fRtiJIYpAaKF/bsP6Vmdq/oBEAwSl0bqA9+MIHk+1wDNw+CiClhUyRiJhDi3DSPKCVpQGCpETuOWkrDCvfCAKRlGmPZQCMyDHERU8jspS5qef/D4Mbbi6wmMgEAAAAAElFTkSuQmCC\n"},"metadata":{}},{"output_type":"stream","name":"stdout","text":["ピンクのコスモスに対するアテンション\n"]},{"output_type":"display_data","data":{"text/plain":[""],"image/png":"iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAAAAAB5Gfe6AABOYklEQVR4nM39W5McSdIlBp6jaubuEZEJoKr6Nt9MjyzJEdkViuzLyvKBz+R/5R/hI/dlhbfhfCOznOnpa10AZEaEu5mpKh/MPSISQKESKFT3WlcnMiIj3M3U1fRy9GLEzxwkAER87tff/fXdC3H9sX0wPvApAiBifT/e+/vHRnr+XD80CBEAERGfcNObEXzn9Yc+cf0MP3iX4PrFWC/4CVP5eQQgKGQwIuCfywTb+PD3AxsNGMD2890P8XqBT5wGf/ojH/kqhcoAERHhn02CD04iLm9z/R/6I17/vXxs3SU373/KPD6fAwihkIoA4WCA61745BHrBW/2w/rg379p8Mni+z98QpdPmsFnc4BQRAQUIBAREXBs0uBn7IanLL6KQK6yFpuIe1fQXbfAp976czmAIiJCUBgIR4DRHyHBsM8nwYe/yPX/21/flQTxYdnwjPGZBCCpCg2SBAjnuhuDQXV8vjz48Ah2At8u/emSP/d+n0cAUhPBEEKC8HUGq0AKKmGfOaEPjOhrXzfBRx/1p5NBPn0+pKgq2Z9+vCv4yEBQP+PCHxhx80sA/LjM+pyN96kcQIoAJIUCARBdMPMyWTJIuPjTLwqFERG+2nUE8FzbIRhcd3l0Cfgha+/zNsEnEoBJSCAoQoKAS0SnQPDCrJCgPGFViooEEG4AVsEB1J+c9KYKgxdrJ95Z68+TNp9IAEkMSIAgiQggHOh7YDNJwABJ2aQAAVBVGCA9wCsjy08zbWcuvrsZLq9/rrD9NAJIYgQCQtl8l4AzrsqZXQiA/R8AIgKnCEkIqN1/CiAC4s9YQLf0GVzN/M9S9z86PoUAZBYXAuhrib4KvLuKAAiN8ACgyoBSpbOCyM2nQLVnrKQvnE9FzZcan0IA0VUYyWVWV5l0My8C/UMGdmeJujL9tvz+vaD4cwyYq028+ZwXI/hnj08ggGjnbTAQsm3Fbv8CWDX0umNXLUnhjaMsT6bP4HP1VoCIJz4fvhQrfAIBKNttV5bcJvHeTBgMkhIQMrqdenXtVrWxac1PWMkXFP6X8XwCaCb9cmsXJ+DwG+7f+LLLKwoliEB0qceNeH3169ahINw/dL+n46JintzqC4znEoCS9QnzeXeDcLsfL64KgJDA6hiS7By8ujMREVw5moDE8xyZlXzPnPBzx3MJoFnWNW4bl77BcE8kUqDbRIT6qr43rcfgip0EYtsUnwalfenlP5sAlGQrKrU99Mv2f29Sff0IQBzd7A0E4N0CfmLLffkVfeJ4tgzwDfIB+0P2LpPfY8pgBAhZAeMuK5+wSfS3+xbogOI/bjyTAEKXJ9Dztv5NHdwurZOKcETQg4D3NXY1uro1z9aBv+x4thDcHPJNgW2PEFdPrY9VthEgVxsxHBcsI7ZLbVvI/7GEePYW6A4cL17uRoUO1dy+B5KUVf7drm4lYVxcpn+8BODzAZEN/PqAOt4Ay+tWkCt6E3jiyUVcCLG+FVfA8+8++HxEaItQ4brYJ9b/BbnvL1dabB4iby7w5JIRAOUfRwHw41vg1k7tEkwcH+LcTQZcH26sJs9Km6uRcEOGLk4pPz+q9PnjowQQRXQZRcHqB13E17vEeTJWBdjtwE1ibm7E7RcCIOUftn5+lABUZZh30H81hK/wTPB9L3W76srfHusOZ3Tg+BLn6F9CF6PxAQr+vcbHtgBTBiFu7hcYi8H3JxvvSnTyunlIgKtPeKVaXEJ5FAb+QTKA+NEt0EN+ToFkt+pU7V6/GrBpdAY3j2C1gjYXlwKEMzxAJUgXjxU+uchHAAyKSvsHWQIk+T4HdI+NSu/PRUBF027IrGjozYSvIMVl/RewcF2qgGAIug8UT0QgRfAMZ/gXHO8SgAQDFFLcSTAFmKDAFZp75ytPvOQrk69an3AiKAgw6OFPPk3ReO+Cf8cR728BCrx7rf0DRoQo1gD4rby6EWV4IgK6IdCTRhhx1abCeMf3FxH77OySLzDe0wKk9ogfET1+YcLu1vZ3AldY+/rkrq7xjRkQ3errP7ZoEC8UIAIiT2G+v/OID2gBMqjEhQEA57bkTaxzTVNYf/wINNDDYOslCG6x3avBTFDogX+UPxwf1ALBJNFRmx7kWHHwTRHK1Sv8wML7nwhnxIr1RVzN7Vg9g/Xzoj009A/zB+MDvgBFPPxq/8Wa/xEe3DyX9+367XrrSvyK+0TA48Z+vib7gKIC8h+6/niHABTC3XxNcFijH+E9CyQQ2CDceHqh9dMbznmRAeik8Ih+Aax4MDeF8154/e863tUCFBVvPWCxJmQACIQEwwnAN4p9wHjdbFzihtFXw9kE9NiCad0FUKXHPxQVCMYTAlCUHpc9ucq3QMAZcCGCF8ftR8z3VWR2yXbxnYO+OslbIJEU6fvL/oG+4BMOIEVoWG2AbZ9f/dxwXrNhbi7xjjAgV6Rzc3YAAE5uaEj/CkWk24b/f2EJEqvzx7hAuU+hnMs/mzJ/92/XsQXNrvqh/3ZVrdj8BfwjrSAAKwG64ZMyA7AewVi9nQ3DANYcjR9fNvqVsEn5dyXb03dI9t32j9MBfXQCiBKelEHAIbEK9Kt0B4BY33yarflkdNf3CZL0xDRc3wlQ2P3DfzAm3AlAqkiEN2pH7d9zd3jRCU8h0XcIsZpBxBPW2fCgVShglTYMINz/sWERIAFkVggkFOErjHmR41e0jxcFvo6rAlt3BolgEtJtzZRcMfMnJGWQ2n2NS5L5Gj1dr/p3HQkXrnXbJNNT8X+Z1W1wBHjXLUTfK7xWULyjSLYP98ABGL5dW0i5REj+zhRI6OYfCUZEcqzwL/DuVr/AWuuL69sXJy/WZ/menciI64ahdBOAgfAgRCjCCA8GYH/fTZFACpUIorojObpNy9jSd65bNy5pKrwhDnEr8q4pYD+CcxA9ydq7XCQlrfTXYDjFV2v772MfdC2QBC3Iq1UewuBNEtuq3jbXbTV5g1033OyCbfmxBQO2DRXbVUgRASFwDwpVxUmXNRvag6IQ0OrfhQIJAIIK73N7its/jX0TvLHbrtA+L6+5fiP8VmU8EQMUoRAScIAqZDjhcALugaBQBB4pfjqP9AuMBAAekNwgikt+Z5fNgadKnytKgMv7caXAE1d3G6tkuIDAEBFuTqhAVrS1M4lHACJKQCJE/h5OQgIQbgoo3EVk9VBXDr4Vg9vieH3i74/NZuoC/SkCDLALwK5sQnoOXcA7ghAOMAkRQSL4br71LzK6L2DGgQspKRwRK3a5bQOsPn7fAyFEvEOep07TFgEOdpbpLtQaIhVC+t/S5gqHr84TRVQ2QlPt7xEvSgAirLHpGtPfsj8v4m4DNG9ivhcheNEQwO0DXx2+Ver1D692MrdcsVU3dl5xD4IqclEvjCD/DhRI/aGZJKq2uHAvcHmoxGUtVyTwZhVP46SIq39zIdlFb1yk7ProN2/IAyEqemtf4O+yBxKAoPS0VjHjZffGk4ljDfauQPAKDAZvN3u3n7Fhh5dNdKGpUORiTwYiGKC7R6za8SpwAxCXZ+VS/0wCdKhXhAHVaquj/yTayRUJfar3eCHQrVNw6/asDsLN+rVHiOBEdLkXHgaAwrUSg9v6CVWRXxwuStvcRLgKsLjubmwSAFcjaBUCvMIDN3NG54prAcFN6J9UvXqWXdpYWGdA7Tr3qlABKqlmnUK/HAE6J3P1B9Y1XyOAtznatzgvCcqWCno7uAnPbknwAg52D+hqWzoi3D1AUZLS7e/LbQHAQZXwXxQ0EyFFKAK9vvlusOb9Ss2LdNjGBQ9+B+G69YOv1Z8kIhy93HjNERLh0y/3ZHNev/aLjLQPyDgmp7M1GfxG7Gxu3mU/dLNvqxQhZA0gRX90QkRoiGNLe6Cgw8hBEdWLq+h9+REQUGQLnHUyr1n5bAH8rCLU5xHgpTCLurcID6gUPBFpuOz0q13EiI7yXo39tZyna4vVUFg9w/Vrvc7supfCHUGRLTj1jvPBzTVaK29+OQIcZGAgwq0EIkQvoZ9bcHd9a0sDjUC4bN0jbhfQeXtNqcKWHAQR1TWrMkC6h0UnyqotQmL7Tg/A9Nu5/9JxozRhCkZ1IAsRkCd5n70q+PI6NkwIcO3J7wx2uHR7hHxizKwScS23XGWLu2FV/QA2AO421warIHL/pVHz1FLTvi2Zq8Mh9NgkPy9cfssJ3dJx4bZ+XP9yjX/3je3o7K9dBkYAMLcu+kiK41JFcAlHdPOwxxPx+YXhzyMAw5IzG2FDbYAg2HGx24KuJym/60aPEIhfkoHYn6k7YmNoRjfylJq2B7zGmSlKsIMCF+bqNkdclh9dYfyieeVJJBkrPEQkJacJujgLvQYun2hAAtJDWiT1plC+18hhjaxSGG4Bioh2My8cYRFYUeF1tdwIfMP6lyxbEvxFE0lTeKi4U4DBq7MKVq6k30jCD0QAwgUKOC6PiKvd2zNCFD3DUEQuJbLNo4eFQSc3i2kTIZudtTJEN59A/yU3QQq4LgFQBtShgEG5Cj4635VC3d5nBOgJvTBu+1N/nB5wpa47g0IBA2Lh4S4UUUYHhVYq42Iab47le4bWL0cAk9bUwJB0MG8WsRYIrghWrFzwBBtag2ThlxpiIjozOHyD1t0CEJVeL+nd7UtCuYES+i675FN1X2O1i7a7/qKKMDmIhlAhmVTlFBcMY9VpfisDVq5cN4HRueVPIqIFAHiQRJib90YzDpBh5kFh5golb1kYuCAIEUD0GmNuq7/gDr8YAcxcBE6N5sFc2W1cwj26f7IhAxc3piM1EhGw1RwWWc02AgGK9Kp6KoWkY3X6ZY0JQoDblMHbuCG2XbY5ob+sJZQaQx1gpbuQkE52WbPjSMia43SxfWXN+PMIoIcBHdeoAkFGOAhoWguE3RyQJGsMkLYCDHHJHVoJcHn61x3xC5vCpEhAIAkRHi4OAiK9MYys0F5seaKdARgSPXcU1zrIG6uhr4kiog1AuDlAEcWaMnyxCW73/mpFRlezq3D8pZPI0maEBqOXBtO73CNjzYkiIVurKK40AKIXha0gsK8lFV16OLYmEx37MQcpKhCsdsZGucv6t6cfAG+jYr+sIQykpnCCjJZEAiHNI0BXYIXsSdJvizpIiQuJNpwQfiHQuiQRRRBh7g7ZknDX0onwLfyONXr+BGK7Wfsv7AwkgTvU2apYdWvz+nh67HI1x65+DAAGSXcSSruUxq+T5yUnkkK6CSJI0e49c80VjI35u2C58TfWezzJ0fhlfYHo2dDiaNasXJXeqpJXFHcDq7hGwPseeArcx0V+AwQc7nQhQMq6I1bgIy5P//LzYhb0F5cqlF/aHkrOoFBoXoo1W0tiEVtpQ8fBu03UdZP0LDiucNUTwO4m6asjHgRFu9knW/bFk+VfcrBxvdt6KVwQiF+SANExKI+yNPO1QUwHq7tM3NSSQFYHWeDuvZiYsnlx7+7U2Lwd0SFa9xPsmnt884W47P/13at5gPev+8UJMAVp1ppXAyAu0h2QW/0MAHBZi+cIrPgtIU6ubQFu174NAqIZESurWPfsnoZOL4/Yn3478IuvHkB6aVbMSqtBoQYQDgZDne63UklchJ2pAYEFNzvpilm85zYBol1gxpZPcSHAJeXoSQT+whcrTZ/faeXzRnpRz6XOpTn0EqpbFbNs+5ABhkuYbiAgJbrt57EVk8TT9W+Pt2fEY5WdfhWy3KJAN6u+lanbNvmlUeH7xwW1VGNPY75kSMbm3l9wcY+uyBlkj+BH9MT6VQS8s/5uCjCC9NiyjP2Gwy+S8LKB3lnrBXN9P/7y5QhwOj0eKzYBvSW6bOQnOwX6E2T4Jb4rBN3C45JFedkFF5FIABYdGMJm/V5WdtUDt5klVygi0LM2grehui9OgDfnJTLR7B2Ta/tdtpcduKFa9+ejFzxxKw65+fJNYoiHqa1FFquk2MyE1W5cXentTfRfYjU4GA1xSSv+JVgh/bA0uximH7j6ume5qTsnoheEGHui649PKRgB1wtpA8DmMnC1rrr6e5KDuypLUQGrAFR42yj3nPZrnzLS60DQw2NzwNbBy4unCm61hgLGXgJyk19/XXi/QmcMk+3vl+fOTgBwY4qb5a9gU4CkigkoiVt1LWn+wef0+QRYhIzmfuOWPl32jb3ScWyGyZoCEe5Ycbz3J7WaMbGhPH1tsjHAtugVgLiRAkR0i0RlWLTXMXRJK5LOzb8oEySA0cwuns8Hl9FnD2DFcXzbkB+dSWcNSurAYaz1mDeL7x7FxYtkIC696kRVhtYoVEHqrexkGKQUsy9IguTwMHsKSQWwRQHfoUDETTxwCwAAH+TK1bqXlAQd/hVeMf7rBpAbYoX0vScRQuYonqEkqEkU1CnVpKV8wTTadBHKN5YZ14VtoNw25d4lDjdK/+MPYtvgpCAkKBvgcbkLyC5Utjz0LWeSILy2xkR1ECJKSk4qOjyr+9qzCdCA2AoYoj+ha1AmGKuk6xu3VxVvMJjbygLxjn56YuB6BDNj9SKvgeN1rP0HrmyAVQpYCyadBjDcgWEad7pYkbEm+peLmacB0RMasDFAp4CsiQprX6j1QZLaQTBzUMKu2uJizb5LhoATTIHoJei8yNONTk99fvZwIVoN+kg6XAB6SyXRXSKQ05dLm0gKj1UQdawnum0nmwO3bQiSFJAri8gW1dsY+r0Z3TB7eFtNiUs2RKzqY73OJdeQGwwh4iLWMlyg4UGKhIeEi+b2rFaczyLABfpgrKBNN4OVIMS3JmqQjoavsyOfeIo/ljfclySMtvVWC123PNaQg+PdTMv1yxoA0TyEEhrRAA8zhRqT2pdCy1N/FiLcdmevmBAQkNDYtDaIrRgCDo9A+Ca5Pi4LKeh50Nwuf9kdG5PcaJrNWg5oMNwaPQcp5hGu8BwRyNnrF1l+R4T6E+m4Lw2OCAidsqVwb1PuMbBYq8u55dOvc39Chas7cCk3j4s7gFuqPqHfJcca6M0FrFLXVr4IJ6iNwsj+pXKHkq/ifYtQ2xrgh9yUDhCXYihsiTtCdVvhzTVZ5kqGq2Ag4ZBemEKA/aIbVTeDmFfaBLokcjCphxGK8FCwx8mDkAFC1C9CgXSTA+SE09esjR7bCGzgKCIIB9fqLlJEVNslrS5unjq3twSgwNEBFPCGithsz1gpcw0PdCp4dDQ9GI4ukt0RDqRIHIfj6YtQIIWsRp+DW95+nwHXuNQl6yl4MRCJWK2bjXrXxd9kewauOGJsYeGNRt1Vcuk5M9icwk3WBML6zRebkCgOC0YRl4GDShnsS8RM0kWGX/KEL2bg1WnfbLNVXhOBjg/e5FJc7KEbrR5gQBiyWdAXSxObqAus3Xq7YR3rdgt4IJRkeNATR85r8mBEDJCWcvkiHKC8RDjWuGR/Wp1bL8Bg36hdEnSArAdJY6uzvsiKd73oEPZyEz61Fter6rp3tu4KKySFVRYDgBXsxuQkxWkIuCdrFLkh9c8gwFWKOwLkavAL12wpgftqCUHAIEMQriBhzbAm+fTFXoy7dR+tEPJFWbwT5mDPSwwgtJMgPDZcsluODsBqJphgQYYDbYzWG1T9/D2QLigwOv1lc3U3WGOFQdffV5feJZyxtop/av126Gg7cwbh1hUmgUBcjP4+d1ndy556FoGQrt4uMFsTAm4lBRUCsUCgJqTyZbKo0yX8sT6ha6hqFWKI2xbwsSW9b611rvIRwE1KHy++TVxcwLU1940MWMMs/RrhhIt3Vlxnw17FZe7SRUJ1gUWLFZL52RZxWl2TSwKE9ziH+Gq6Slz7YG9rE4Qh9LLGCzB2TY2+TCwQztVYkA9w7Kb2uvyBsuuizULvnombuwTQ/REwmjVzfV5z/p8gwFYfuzLkFh2IbTH9x+2u6A5rL/3rMmLlVq5MxG7ErXKxJwQ7GbLtpfdZ92r/YeUKX3srBENIONRZIQwyCyrNIekLAANJFIYuc4Ncy/5j09gMA8LRg7zCoAAwBDUirIGaWNulED42bUhZNzVVVbqCNYDvNi66bDh2dAQhsskS7wFKBui1NmtI41BBejXz2KdlPr9jDH06QySs0bdY+R9bEG+zBHE18y+mAEB3hAdVJfS2D8b2hIVkGChJZFUF71SEfHhs3BEANUK8y51oCSC8DSYwhEMkt2sV4iqYPn1LpDVk1XH/LXK/ds6+bHxGr/e8dXKACPYDlcR58yS7MhX2tPPemeRdK/lDq74CS+uHJBgh64w8OjIi7IKJKVO0d7DvBId/DgVSlVV2rYLnJln1xsAnetT8ms3bvQTtOXHdNuifpIOi7EdRBYnwi0x83vSu1OrZ8hDVCAMQYHI4DMI8QrNrRaxuQ4T5p2NlaRGhwgnH1kN3W/4q/Nm3LnnVeliz3kXC17oHijlWIwH9UB1QtwXdKMefGFs+IreouxB5UgPDQz1yT0HNsjtUT6O71eYhKSGat+blE+VAaqDGClncpq5gM2k2r3UDibeKb4/eCIGArjY6iAjRgCP7mjhxsQxuzeT3Vs2N6BBc3IkeHgzIkF8+tCteZwHR6bBLKSJqO1eoqHrNLVcP+7TOPMkBj7T6IttU45Kuspp+5CVVpkNG1os+AoyOWInLqjpF3FfJIrHFdFbD6Mc44MmUn3yIDsBz71AL0tGYgHEamIeMMjc1FY3kS7GWvUmzT9EGKYAevY2br22yrP8uF0d+m+0l03/1kUICoqvJR6KnkkKi95S9+NGfM0g6A+5wiTAJckwcdvSch/RmVIEg75bIy0nA2SLiEwykBCDsErPYTJy+8gDYn77w4vt3R70ngm+8SjBCFXAPgW4OxGY3fdbaLxoIRD7VYFBooa5J9xkCR1KEccw5OYaqaR8txha0T4gh9x4iLpdU0M7sV83fHVmudn9PpzUPbMfNPVVwqoiQFb5ahf9qJHcJ8iPE+LH3ez7q4kpJUT1COQzDjtKKkVFbVUlkRK2ig1j2rGBr7x3/9XEC4MlDogRXNyPWlW94VY/zeDhERBxcs4qxeoAdXFm7BF2C4E8NiA8tfvO7nrzdXyoQ5jqManAKUr7f5+xHnKRWVMsKbbZ4VUcj896wJrl+EgHWhxVXluZFDK7R7VVSE2Frbexqf/T3OzbENdbXNwGfrJ0fsgO3ksMAn9Ig1vtJwCw0pXAPAaZ8uLuLZc4iUaLIiCilmbuJBxVTtRqGZ9cbXhoqxvrfalmtTu/W/qNPPwLwcO94YE+d7IJj9XtW6jl7Dglv/N7tNu885s0S/wAL9O6dEmERkpMHhGnQ3fRiEF1qq62ZJ1GzVqoDqoRBQodwf3bR/UaAW0uFa2rrLc4lKz+EuW/nTXZ4jBcNH7zSbNMPVwIEEB8Oa9+ADe++z5AwQFQtkDBMu8OdGvUw/DCf5xbD/hCzmbm5kMEBLqop6rM7sFxkwBNbbcOpr2/1X51mHqKisUUTtwutFApGrEGm9YBE8uYCHxqOrXTuHS6I7lWGiApCU2DY695q5J3UR1rRfNhJZbcNES0xZCrI5ibPxQouW+ADWT4butFXAwTgzUDVdZNcPkpsbgjjKjEvtLw+4g/M4JIQ+aTp3vUwm0jZhLYbUdIQtoQO3+DU3jaTSVTMZg/rh1kGVoBd8OxTb9d7JtnQi5C11Uk3dqLJyuxOd1Rzak6fqdpXGjx5yC22rdMZx9bQdM8NjAgZ3EhOw258dSicZBwBLWUuYR6thMIiWqAGLICU6fOyzOfyLE2wcsBWnLnyrUtsGOF2yGDQCPdQJvl8s+79saZZBhG9Bg/9/rxmrUmIi+Q8SrWkLRhmc8PYpJRGssHdnIRIA8LEuvH6SVsgoqd/UOWK/WPl7GCQboBDMuU2y/dnL39Vj6srv7lLPWVhhYYHpadxHHfJqBiSmJ2dA4ZjCw84KkqQSKziFAqjKVWfpQmvfYUpAhG6RPAawupgjnuwH7aiQsYXiUhgXen6yyVs7tuBJNzQqBjF8jiNGkxppxgfjaFMBi8uMHcKNcQsQEVW+NA0x7MOQE/rKrvpIxIZ18rVvl8jwi0Cshp5/JLrX48e3gQhcJGqF5vMwzVpUnUMo2TPls9tb8o2l+pDoLloEqwAWRqmCilDhNdnGIRbHyEhe9nkGim6INgeEbikxOpn+zYfHRsmvwqDS9yMCIEQEEoaJA6jM3lrC330utQW0YWoSrLefU+HPAxiE4hA+2kmWDlAkoABOPXaSDIA84YN1e9a4pdYfk833WQBrsAEAaepYdSEGERTG9/WmKED53qqHuEhQhVItBCqRN5LIHEElWw/mUlzIUB2uIezUiNA6Ymw5r6Zs4HovfDiJwybn1jtO6+IXrF5U0YPYDUDSSB80ZEDJe19wKh+CkOGt3muFEZ4SpKjhFMpxG7Sko6CUbNSav2JqW6ttYcR0qqDiewNbiLCI8JTCkOXiyI/5rZ+zogNe6LQ4pqF/S48xAAlqStf+gS1WmO/k9NyrkKPipTzVFplSGZWV5pDK3VqbCHtJx7VhQMGza16UAPuHkx0h0nPGVoRv1VD/vxlXxYHdHdCaLjkFlzN8h6lDkmJuh9G7k0eKpKLldJC4DQQKtUNwjGJ5JKMLBWRU5bQdo6PQwMdEIHmnLMoE4kaZqDSKrjtAAS2/sfX119gbCl64vKknGrD2Dsak1NKd+kOIjx9e448ckZxDRMTQhEWnlT2OTSqknVuFRyytCj6ExjpqgXCgwlJJ52hJTRltxBFA7DlbQiv7TK+zPqvO54i3tPgt7xUACJgiKQ07vKAu5zkZOeafIAuPYAbimBmuKiOukuWzM+QtsyN4iE5i1xrfj9CgGhnb1U1hfb2BxzgqZYjuW1UiGyBvS9kCPcEpAtq3iNJ258YpGiAI8eU85gmRTo2qz7sdG6GGkQGYDJgAdI4JFADUR1Lbc7uIUPpHzWKVwJYVTMRJskRyas6lTi3yk0L8tb++UIbAKuXAQIQh6zdg1a7LCkgB+6QdNztU67zkSJDrrIsPZE0AQkCcuSggzeF0Atqo4hKNCMlPt6YcyWAW7OKEDeDBgaJJFHrYjlWeJeUNZfyi61+ywDAJQgL6TmlEQSFIim/CHWdhmmKWU5l0CHXqEtIc43UC5JaouQ0WWoFcXaDWggVEcIkTvw0B8DNmpp6tYUpQpFSW+bStslxO3PyC5pBm9uzoXHcjOCteCXANI4h8zAOcoqwmXnMomdENAF7QzMPNRkxSfVWw5cQUoOoCWErtvcRCmwcUK0M03JWWM70gNtic2m9gQilV04HEHrx2G7HrWl0odEH78pY84SiFyHWzfcSkXCHCujNSclpOvz25X/zcPTdi+Y/PJYH7l7dfYtTXehsauKafDRXGXYEm6fj7MagZvc8pijLYiEJsB9n200LwNyqahIUhbu1Zr42PF6p2Bf5LtDyTo7Q86yEniviKxLQg3LcjtvojXcCgAz7ob5xwVDm+c/mSb7Z0eHBXrUV8GoEU564O83FT2fz0EwhaWEjvLrg4/2Jb0DR5pfOot4LEshNUssF2XvnUlfL9b27fPima/Q3OtU2848IOBERKkr1IFXH6auvKJOXhvmHh+D0YoqlzGabp6xRWXXMO8mtLFbOBRiTIGpQbBzQElL4Dbzx4wTokC3telxijwrF5eHzvcqYj6/3x4neUZB18/eCoovfSpGUzAjJu8PXv7mfZPDmWM7mGlKint3o0rb2ezqlPO2jmVtp1bKralnCU845e0oxNpfncECf8maHAVtCFq6h4YuLuFkqV8zgPRfhI6tfA6aw2OolqOF+va8bhlAd93df/fqFSHpsHqVETno+hTUqYQDCA5rkBTLMS4pzXVoQwhb0JMxDJKWIpvqxbfmBQ1fXJYpqChfqioDEk79ePhxY0//49BI/Nq5Hcm6pye5JLi/hrkh5knE4vPrdy0O1NrTl9GbOk4rXgDSh1eihChv1ZQ3x2ZfZmpMmyZNTTVyCtZWyxid+lAc+fOpsgJI1pTA27bVtiK1DDOMdQb92frhC4R+1FGJNG18zQbQJs1gLQ282TRkOr14kS3f39zzFV1b48GAYDg1YeulVqUoJSNJRBVYNs7WgiNBDIACNJXldWuNPIGM/duwuJQ+ZDZ7U1n5B14yxK2J2473fHKv4sfVf4K71U6Ke9zmqNYdFUOTw1a9+9XJG+fplHG14Wf/WfMaQw2gLwAhFuLq4ZM0dMlvClwgkFjYG6A2uNdrSWuDjh5r+2MHLOk13yR9CAsl79tiqm67xn40cPWbuT6ykCHwYOr52D+8iMDVOd19bqaU30eb49W//y3+S05/vq8dp2QFtqfBh8pgb4EJvLSIUOQ1paGdfWtjigqBXNaGFuYP1xOIRFfbRpJkPEoDQ3f39vZyOxNrd8yZ7qB8xcwn4dTzbybhkOF5wrQ+Q4Ma97gJWIu3v7k5tDo/Bw8df/+ar3X432fL9w1GG+vZPbwsOU2vVnA2wsACdnO6VaHOxxYtDQWlqUeFoFrDUopTSGj6eTvphAuj41e5O80LosmKFK+teJNjWEHfTDbeKYDuU8P1Du25GRAglhNRhf7g/AU2GyR8Pv3s5jD5ULIuZv/1Pb47nNOrI4jXoLNpcqMJp2sOWpVRtFhJCC6mhZ4ohAhLWlqW1fo7wp3EA83jYD4jFUoM37d0vY01SXP9zXuuo1sX7FkXa6o7eP9EUeOJNUyFkHkfnIXvwZQK/vr+rfwrUXRHZtfObx1MdxxxmTOHhmBk6MqXxzpPVCl9AU+ciYTCYujeAEt6aOURh/hEKfIgAMux2eUxuG3eDsp6c283htSfKGtZhr3m5qrc1A//Dj/+GU0Rk9TM9E8uYUhqatDc/PArq1zH7uCvx5uiSZZqrY7SyWiQcJB9USyklAiZiLVC9wsWjH54lACigJjZrn6QGmafdQATdtpSHtfyhy4Fty6/2EIHe+PVmf3QNHzB+4FjblRBryY0hws+5Ypx5V86lvYnTuU7J9rthP/xw/r5qjuRGiajmiPAkKe05JFgpZiEQBBsaenmOOdUF5hYiSBofjWS9TwDq7jAmt8aIEKeLx3r8ChiQS335pf/0BcaJSxqMd9NxVXe393+CqvQgsBqOhpKnxxI5z6cfqk37BHmVlPvv8nRAzCXBzs1SpeqQ8u6wN5xO5+rZXYfwghbc+p0h3NWQUoMIPD3PF7gyQI6WxbS7QQxbD/9BbElT/QmuH9/q7K4mUIR7bCHUD26G2IQJQWVSYxPXsOrIWOoiXiOWU9JcBp0Oj9V6AmqAyBx1THk8nY6nGjoVCqSxeoBh4aB7uLumQRYTd340lvceATTdTYyFHvD8Qvg9HQ6r1tuE9mbZ2qtAQskU1XvWjKCXs4bQHC796IyKlf6X3RJBUYnwyBjp8uv/96u339VX//IOc8ju6zd//OrtOPzq/hBLvD7x3+ThNVEqWzhVRuyTgFK/e1jmpcpotDmiRrgMslg0cVP3NOwO93g8t4ca7ceX/z4BSKGFu5hFNEtAwA212vr0YzOsubZT6AmyF70n3b5xAUnXp832Nn9HNsS7WhKmqb0ZXmj2rBNeP7azSspoJy6Vo6cFMK8SgUkVgPmQ6K3WWikqGto8whRVBJEj6I6eWZy82E8c3/ceB4hWF1n7YAozac3N3CEqhGtvNBxAQBNviwGI6MFViwAkBPSnzHeRFgBAoS9NNI/3+kMaD3evD3uxP70tLY0S1eyBMR7g7WhzYVNAJ2kaaO6+nI6LJ6q6WzSLJtET83aL0ck0Docox/Oy9MqT5xKAIlHg63eCksfkZs3CIbEdFk8JX6sttdp6tFxgzZRioDcRvhagXWIdAcVWQkmieZt00BE7Qyw7GaWdDfc5JdHlWG26++avx9JARDUMOqoHm7KV4zzHOIyqP9CLt4ANiABMKeKa826M8vhmqTXEP2YLv3P0Ngn3MGwwlVsR6eccwJ3CXkWm8BBxCpbiqdteF2P50pRGLqfVXOxfh17sJ5KwoN6Z57sz59inKt+m2ONVfq35XAtVbamPtbSogkFH0yHrbPPpvLRgmnaOsBbNCbSM8OaIhJAhAcv5Ya6t0b19xBh+lwMQl65S/ZnZetgFripu7SQX6H5JPyMoorcPNXpYPzdwXfiaMLvCH1segAMQSKQhyqNGW+o+Znk8y+4wjElKM75cRjmil+hHMo5p0B1s/+BLcYNEVI8CNHdoINzcKlwTgOpR56WZOz9eQfAOB6z9A66ff8eRJMRXS59J0Kr1tOkIhIMUaPcNL2V/W+8MkN2MuVQpA4nBIfNN1hrnefGhHM93OQ47OanNwjtp5vnwuACDpDEfhsyjwYuRo2X1ahBUKAKEW2uOpqo0k1aimptH+EdPq3p6+HqH5D/8cYpoEiRfLJykDijVIuAmG2SCQNLSs2ujOwwbA6D3FcRWjhigcM+2Tzx/Pw5LBe7L8W387ZAtv2l1bsNO/Hy+Lxhax4gPu3uX47FZcVHaIDRjarlF0EmDN8egk/LcKtACZD/L7MfX/y4H/Dh0QNE0KJRN6AiyQo0pGjySxnbsjr8oxSWbXSovcWP9MdwI53q2dZwi7wZhGiarddy9/PPj2/PQjvVP54z70S3S3Qt/rT6lBbu0O/ik4bVGlhS152lrqhFidLNomtXBFlYaslSgy6/nE+Aj66ekYRBoeLcKSU6EtXPXFgwSSvGhJTfpggJxabUT6+90rmU0EdKY8ysObCLT4+7w4jfzmzSfzndLSp6HvCS3lM+D3J3lbphkl8ZzfVuWpkMAdCkIUiQ1lzAkDyYUbQKmMFcPcbmguc8hQPz4hymqIgLmrcaV+iL7udVwkKkXk4t7bToWRA3XfkTPxlWr9dApQxFAyeHly7n6IdVZqf/iq9fjOKT9jrO3KdVU6Of7ue18GsbdQVpJ7fhoZT2Wsa7ba5yJCoGQEirhLl1jA+b2Uy2X3uGAH/sYKZp6x4INDKOmQzQfwrwfqQVzur32JAEwLFPWcAc2Y3ED0wgIIwvGvdvsp1EfT3JfcD8dvvqn33512lEkC81aedOQJ+Rpn2s5vq61yCAQkMZAMARurSIkqQ1RFEENi0oC3lqzn6gv/jFM8APrT6l30N28IE0vYqkHemtuYS0CAhjdA1QPI2kdH+heUVf/gQAUSpNxuj+cltnm7+vxtNs9/Kql4Z/+b78Z5e0whqKObxd7ux+mF3Ic/OF4OpZFxiGZz/Bq4UgRFl6aCZNypy15UhL0cGNrrfinccCPU0BUJCfzWNrm28n4zVdpAMXL27n0WlLKemQ1JUw6Pn3pObU2hugMYS5693KUNzia7k4Pre6+P3wn029+fXg48RDjOZUmKaFpcDf4H+fjqZXMYQo5o1V0U9ObFw9hHgeoSwuhqkNgLrXWn3r+zycAQMlJWhRfa0nS7jf/xTfDX4+utUzfz62iAcQAR24MdwckvFfwrAGRlQCAG5j3v3657L+PMtuxHIm3f2uNbeQPxyWNckrn8CzDQ0pJ5Hz6rpYaOQ3Vca7WtpIar96ozDolKc3cRUlJcwRq/Un+fy4BOteLSoitWpXg7utf/6s6/vGUhTixCFIIeMe27OYw8bUtyoYirl/rI5yaX0wx/OafUZeozbAsPzBwyOf2bSTaeC4e6eX03U7gdT7WYpWjIrNZqNsajQkvJDSLUrVILlAVc7cw/7gF9CkEgORhyKN4cx9a73kZ9vDn//WH/T4mDPvxm3k2IsJKqrMEVY0GqoRghQl67wBSgjKkDIyPb9vwH/8oxDIvLcfyh7+08ff/1fJXP4/jOE+vcG61/LcP54fHb9+6cErwoycCFk2EbClUl0xhIhqWKItFnBGsoRFLfU730WdyAClJAGX4paEK7fxnn3/ICVhIJSWSqOONvjXj1qTOOn5+sX8DWFP67Rze6iJQbw3u7ZRdy/f+uKSvX533NtHHUKD99fvzjBfTgctslUkQTQVwJxCJEFEh4M09aGEWUVot9VlH1j6HAAQlJRWCIVv3noDPbxefl5bSkmHVGCnneFUFyxyN/bBQBtaDFsPRDyIRStJR3KuZRQEF5kB5naZB/vy7eZGvfvX7x7+Mh+GbOX/3vf/p9bee9i9eHN7CA8zKdKIFEkGgpuQEs3u06tLOjGpezJo9q7/G8zhAVHVtciPJ12Q5K+d6PuehpqH5Yj005QPmcRytBde8WhkI9wbvDnCIpDQd9vF2XprZlpULVNe2Y/xhjhcv9jz8ruo+9vP4l2//9m0RyXt1wdCGtnUoRShEqT0O4u4W8OYFZtZq+DOPLH4eATQraQHpTM0Awn05x3m+My02NGeIDipy5OC7YsG6GX9Dpi/NOhQoTdO0v3u1Py9zqxZb/w6GhTvt/Aef0ut/f/rtKw54PL3+9v/33YPvcuaiyG1Bb2GUiGjUENYJcQ5EhDkdXszCWms/kSD7aQQgVQcNukTmUL23RQhr5zjVPSShMjcROlQWMh2M+UiLcA/z8wJvFhQKREPz9OI3v8p/Tm7uEbIlyYdFHBNbfnGwf37z8F9/lYf27/73P3/X5MWU8pSXIKu5SqPoEGlRd9Y8LdaLFyUMtVWLVtsnNB5+BgEIERE1J1Qkh/QzwcMWqUtbBhEMCakA8DDYHD6k/TQ3NHeb3RwhpAoiUqrRLObX9ueH4h5X/yMccFb4uFuKPpa5vv3hP/67PzU5jINXaSNrsdrqYIpRsw6lamvC0pPNI6KVYrW2Vu2K0X0RAnQ7oIVBU3hedBWDVrU1Ly3tOKBXqkbBUYokcD+q87XOeFO67FBJvQDLcf7uUcsy16f1vWsJbZH5WOWr/+Lf8H////7tjfzLN0wNaG2ZsLhZxBg554NEbtVJqbBkGSgVbanVlvaJHSSe9amIou5EySLDMnQhEHABS5JFZyEcUZF5FqlURT4M+Pp8jqjeAnBqFtVmx0A7L2bXapZrukkEYLbUevfq3/zTvz36jK+/amGJsXAENCINCdnxKmFx0TGdYk7qsPBW51habZ/YQeN5MiDc4AFPTB7QoYOAhuxjQzRtdQHFAnR3CKoKx7Sb5hcFOpcaLYzEmA9yPNVm1lHDWN2DXpnXaQCrpcxW/0P59234lX/1q7cwZwmBuciBMmBohwCyNDefahbXFtWPcyu11ut5F89sWfMcAkR4C41AbU2m6tRuhmMU87MoEAVCB3OIuiXNg44yE3IPnQur+zKZL5pl/7vjw1LNbSu1vaYddJJEmx/+KnfTX7/+v//r8kN5PFjCOe1nShVJOwgxQTXNGIB8zubNai2tVV8gyXvCZQf1v5AhFPAmEmtf6GML6yioVw5Z/6ZJ+onFTXIMYNIh78ehITnqKfKYJCdb6tBKk4MdHvXh5Lam+a09Jm5n6stb9/Hl7/8fX8//bqx/Oe3dYa1Y6KjTK8yAi4ho6GJWW0E7LSWiwXVP8NFa7yQTfm2N9/MIAPSWEZ1lF0Trc/b5rGOSLDmLsCIJsuYJHPIQZuq+CJrQYI5h5IvH2v5WDjqeU10p21NMrg0UOje00tI0jn96/WZ+9S/+gqIcbVgyBz1MODscOLrwwZZSSrHTeXbRUchdhe+reYSFWafCT5HgWVuAwZRzP4ch+qlJgYh63iEDVJKeWu+DlkFlybMUeEsqLDrHIDaIDYDP1SvW9lObror1Lh0+jAjz+S//UfwvGY174SkhlVd+1zwBgI8QWXyxujyclnOU6tA8DqNymTkMbjRHa83NzX6qp86zOCAgOowaVhtdaH4GgDDr9qaFmhhsYBtBEOFLOEWmECt1dn1UmadpDh/r69rrUHu87JIvsqmCDvXa8e2frZT73XQYZJqqWvIBnoreK7OUN+fzbMtpKUurQe7ymF/kkGrMSgMaJVCa11JPP8ECz1WDHKcsrXg0jBadh8PMRR1COlhEs2Qde/AuqBptkOwkmrvR5sF8Pi8Ra7enHmy6TcJd+zaQWNr3VSpz+/VdPt3ZEcMj5/Bc5uGtzafvHsrJyuIkUpLdtNdYECiWQqHeBqowR7MyV/usRMl3CWDnthuSpmH6LiW9O9VwQIacMSokcxQOeY9BIkNE73AObWaF0KnFkmQ5N8w+H+vsVjZjZU34EaK3a14PYIcf8TBIljQephOANMSQUkpDG07/+Twfz6ezRXFo2r9MlGk/2fIA8tAQZ0SECcUyRliTZS4fwwWfCYm5SUGIRwO0SdbKADDc6ciQLKIcPckYMMhBBXuksy17BbGYqbglqHkzDfg1i+7Smqa/ga2AxhoHhR52KpkCgZRFcjlF+Ol8qqUyQiSP968EQDuhZRhcEGML97WXmWpNCc38IxrxmRzgJo3uCbDUkLJ24yWP2NGz6BjQkfvB1QGZ4A7b171WFtGq4dwnl2FJ6QRbbXWs9gq23y6ZZmglzGtuyXEC0EDxWGKpNdC7eaeB47Qfx/FBxYt59CweiEwLPJPSkotObaoW/pEzSZ4nBMNNSAhmWGUgJXHAZz8ddoasdzU3HZLIfhkcUFA4CkQT/qASrBjMsvjU9LU/CVb1NDvGpZXIdsfmUuStpooIC4G5u4VDKUl03KXEKUtdjhSErQceABiLOlBzM1HLMRwKEMsHaqhXb+nZHEAqQtCyipoOaoCX5YyJKmPk3GBh+agRQBWMiyZ3N0GxurjZ4G6paBoKN0t1bcyCNYi2rp+A5N1+L6LiFgz3FqFeW4iJj1NSSftxYbSKxdF6bK5KrE3vm1U3a+I2IMsoEebvRrlxMb6fKQMiwq1JkDnt9JzyUAPws5YpkBVhShCLY3SIxM7TAd7gHmbnagtOLVu4Mw05VYm4pF3jkmC8PRpJu1ffvHw15SbzmzcxWhQynUq4pkj3BztrankBAwOOZkjusHAGLAS2WNCCGIZpBIeaczP5sb5Kz+QAuJFiTjClsU02nxiwRSWzhYxgLOw9rwfIAEZW0cXdm3l2qyaoVSLkVZRaW8R13fTbbquk5N2rf/rtr35/t8T8cKfelGeBTUOFTuks7tpzPzUtDioF5qwajnCVZr2TsahM6rtpGZakau8HSeNTOADhJkG6JwQGxlEdaHUW9UGnAbAMimcnsLc0pzm8wZuHnZrCBGYs0uJlXeYm1vtXBrpbfeMNkLr/5vf/6l/80+EwneOo/x93L6cloC3DorFZr6q0phJEgihEpKUIk8BdWarAmJgHHWCWSj/4Nj7sHj5/C0Baq0Fma8FhTA2wM7M2vU+4TyeYQ1vd0b4v2VhS6NE4PQDi+o34ZHRH/a//lKfvHpfWzylBrx7eOsgEQ3TcTf46+8uv2ry/+81uWB4hWEpzQiljFHd4Kh4hEjZ3fMpgKhPAIataNSTdHQ4vynmcs1K0QS5dyz6HAEAYKMpxEnhLehiKR5RBc7mTdDjIuOBtwNHc6tKsyinEB5SDEJA8+jR4LjH/9//2//g/vJn7Zgutom/dE5SUByzzaSx/g8g3Szmr12PxijCqR45cBK16hEUErFcsKWFaE0JmDlUiJtzvD0kju7bm3roce98ufjYBALdWc2i2UOzqoA6Ye0o65HF0LUjhEkuYWQsTb/tJHV9DxJMMr9IUjkf77/6Jp4dx3vKnOh7YG3AxQKiwVRnI2SAeKvsmVnNJ1uAJ2QABHTCGcD2aDTCsFsUpKk0hopAoxlGQFNbI8Gjvt1h7PgECDVQ2ybkNvh/VDNa0hZ3K/bk4SgEoBmgIU8S95ox46fBdk+Hwisvg2lDvX6TeqOta0xtbMjZB1CXGnR5HlUf+7S3q4lWajaxgUl8cuem+1tK/bQGPhtCgRyCdI4CKbGzHmlJRTIedhC0S/iG34BM4INyaGyA6TRUv3laElThbYXo81DYL0kIdT+ChUVQAPwJvkntNO9QfDnzUc/yPf/jrAzTdwmA37dcIyeM4DDId7idfSmmtMaJhmBZkTiP+s8AD1GGk1NX3rLmXd7sGqgYyIepe2UpRGYc7iVOjGvT9IuJPIAAQHIassEHu0/Fvi0WUOGpwWrLLJC5jmv64Q0XQi/VDUsIY+9z0vJu15Gj/88Mxxukx1Yt1em07SJLjq2/GOy1tmWzkQeapFd0fUIqp+ePZbQGSR64JHKVyRy45KoJInsMjVIeEoYXFafE6pKTyqr4pTdj4fi+BTyIAMBx2JjbkAYeh935rY+i9DRSgJUxDNviCBQv7qWkn8XzGD7vKlsYXg/2nZYaM+1bd1/YUm/1HCkUPr35NPEp7VfK9aasPbWAaND228lhna6RwGAa0tQV8pgBtKIGht2PLHKZRB2txNnjZy5Bf5entcbkUrP0MAvTcKwecfSMzOLQ8pXHviwNu7pgFNRqXoIS4NgOKiw9Ieb/Xf36cZ827YrGFb64MIJqm/Z7F0TiehypN/D8N+yzfJOpylh901iQLAnNAUkVM5oCiSWaEILLlGFJO+2xHG9ocIbus9Sv9y19qj03+HAKEl29PrzhoouLum2OtCPtfXvDFSf88pNaOOtx/fYJYatBlQBMzpHHQV9NoRq3fv27e4vFxxj4djqW20tCdIYkAh2nc7+538lWlJEluhqXuY1mOf0Rkr2Gxx9ICpyQGzR5UKag0GXSYcsIZ5bXEXf7VN7+z7/70vUT103kZTw/tIe8SjuX9iPGncYDXckxtrAPA3SiMwHmcWLLoEKEkztIb8Uc4cnUgrMFaBqZ0P71eFh/uSp5t8dPx4VRWW0gTnLp7+fLFqxjaQ+Uw7NJ0DyxLcgfo5sPQzz/QCEUwYkaEaPVoopZDmlQ2q0xHYRzTw/k8h6hmnWTgy+/Pb619wCH4NCEIW35I44tyx3Te73MFopQxyi5wEkBa2RNw5HNIVGcKpBRKSZRJo72cHxbsDvb18VhLXbO8iDwdMEO//u39/X0blvDSZr+fhpSXWcTd3cwBwUBoAyyWEoTXCJPSHFRDy2HIc25WH2JJE2Yf46BTjPfIbdKyVGs/wxLsFPD2etjnN+N908OQyGBtZkhOtFpkpxBzM9eYHSAbs47D8DKzSbSidp5qyN7u+IYlMofaLOTwza/K39r4298N8tXjHXIsZs1lfCF3OdxdrNQGCMwQFCrH4girNeBDCydrSDUfkSk2t5YyhTK9fLH79ZsqzSSL12pbkf5NO7RP1ALws8kp3avOHLMY4AUqaYixnCyAc/Ww5gERUD3hBfR+EK1jTRn21g94eDHw9d153DeILw/V01e//s0PrzF+/Rv44Rj3j2Myj5bqvKsyONyrmwMOA+YQ8Z5TAor7VKs6JUmoN9LCq5XIWYZ8d3//2999/eb1Y9ijo7YViRJ4P/jhcwgQaEEZ6ym+XSxpA8IgjP0BbzSHL7Z4AEHuQHWlHDAlYZEqlnBOcRpeLnz0ZtPXSffDD384xe5f/ctxGXWXX5qfWyzDNFFpR7gfpupAGjwArAh3WIRQQclhtj+lGiJqLoglzLwUd2bNU2q7X7/4N38eX75e4tvWM/hJSXCPNZ3x0zkgPGbIBPmhFlUGos51HA75yGqWqy9BqqgOQhEivqoitadFVWvVfl00TvNxlv2//np8df5Dfpt++19qnH5dX/1W9un4qCrTYeRjOntdlsMMST44eOaAgkAwG8UhwBBu3JWdIIbwZfeDW2Nzm42Jucnh+Je4+/rxhzdvXp821EmGSbqVxs8iAAJteSPztJTaU7FtXnQcF5npqOEKSUlz3iVpjmYavvjrF5JjCcmP/sp4/x9OS7y8/3/+xqe3etr/5l/en5bxK/ntK91PlHjbmFVyaVa8eRHB4AMGeInRq1I6vuNBgwXDgzoO4+PkfnxMHGKJcFBbfTwe//SXt2/+87ffVtty9dL4zeH8UL18pgzou+DNcmBrvRTHy2m2BUVkaIlBSapjfpFE0IpKqQLBSUZJJ2BsX5/GP2qzYfrX//T749vpm9/c/+bXC3HguB/OBW/GokHNJlqa8WHJyDq49HPGWhWHUbwfiiYIWwxmieHZuVvGhtxUHQHzNp++wx/f/p/f//VYWukagBxe/f7td+flEb2o/NMJgPA4lSXDmwNBn9++HcaDTJy9it4jaR4GQXG0EhhdTIQDBeJw6K++4w+LpK//q9+/Gh/y/qu7FyjTMSoOdcyDMpoPpAzQEks2AgswqgFADRcHKIBVIkidw6XwtBvEpaUk7jo0eFBBe1jw73749s9vivnZvIcjRe+TPJ7DOk74GQRAoHnTtRgo0E6v5ZtDJqdSqwoogoZzAwSOswyD6DAqgMUFX313+uMbG/Zff/VrOfOcfzXNGM8pZdc0oMVQDIBGSmN5m5s3QU3igx2O02lceuadQ4CmvS6LNVddyqHBjREeaZeqgzZbSfif2tuHalaPrQNw3uY3L318cz7XRgQ/hwAIuhvXsvnw5btxfpsrhoOcrRtbzRYDNKmUlLxhTjvcC8wn/+EPf8Mdv9L4Y56/16++8xo7r9MyDQPstIzB5moMZDuc43EnGBBtHtrhtJ9ZIwwQIAE+wMMDbElbWrJJDpnDkSWjui/Lo+hjtGbiXUkHGOXtt22nOo21mX8eB6xI3lY9EX58/Ntxr/e7PKMcHGZLP4QGEJUkKEG7k1llD//ub9+dq+6Wh+8e6+kh351ft72P3/qvxh0UGJAZ0YAqwsEeIS5NzEMQu3PWsdZGgQdVIVicRQLF1LPtqpq2lBNZ2eZSW0NQ6Q7W1f0Or8fvg655KrUF4vMIsGE4AALwevz2RXnRtJ0twZt5wDVniGpKImf33Vgm1Xuvh3//7TzP6u3F6/35/Pabv55qSvvjDsMdJhwxNRGxcKJpjZIWRoEdUtJFbGzZBAhHcwSVRZyDRQ4K207Ds49UTbEULrWW5vAkYcEtNBRuyyOTmiQVDfu8LbBS4IKu2PG1V47fpYdmzc0ChDArUkonZIHUEQJJy37553I+W9V6eoij8WFAs9co2O2wR8ZRSgUAzyYPAuZ9ppVhcSDPWLz3dQw3Q8DUzEloUDKHpMHB610ZPdXjg7W5eMBM6Y7LgRPhVs7HlJ2qBD+bA57Qwss8iPJuOJW2orsyjOMAWfswO04TANwfy7G9CRlGYf1+dDmUE1yXtDun3ZT1OMtQ27gAqAREidHQ67jhaEMD3KHuFuEiBo9AVSRLSaqJ1JwGewGmWVozD1i4bGlTnXHdrEWK3iUbH8BIPmPo3e9+M7XF9DA+jsP9q30JujdR52yBkr+28evpxZT8+M9DtJqLTMOLYfr97+/+h+P9716YP1QdEBh4tGr527PjNGv1uR3u/VRfZRkAQWH6Pw1Vmpcqpj4QCHeZIkFTVqj9v978+bXvfvubN//h3/7pbTFXr+5PmrWNL14oJ5nfnE5LfKYQfHeEVWnBPO5eLpIyco4yxyCy5FP2hsETTE9gnKbwHDokuEOGlARlKTSBtDEMYTNxyt7tJ0GMljOIftZeSTGeIvtYxtzYNKIfq1JFVYaG0blU1JbqW9//6uyLGcs7lXMkRUOZVcmfRYAbiD3q+fV96DAchoMe8tjQwmgIMXVPSZKgLtwVP9ukzd39gDoY5uZeT/sJEAV4B5tRRxMRzOLjeTSVyqnWXRH4ySsxmVZRi1a1H9IrJEZyJ5BQLsdHLzEWR77zpeIt38kaDqimGBKzg5+rBt+lQLTTG3k1HTjeL2peH6sDTHlQwRAZw84HdVtqcdOciw8i9PmrilMxc1WHAiqmmmc5Jfe0pObFxpezj/BkRwdEXDDYIPfidvYG1GOz7BGDR8wptca7h4fTydPRrR5bMubvansSDwpAhwHTXn8w/1lCkLcU8OUh5x1fjAcrx7I0D80SLcbIo4xtP82NzV67tEnTUaa6Ty2Jx1/zrrXTY8wLJl3uYDA9pvLgxYKljS6wRz2UquYQQnTXfNRBW8LRIw5HW6DW8iC1soUcl9O8ZDFrx6W+Wao/vps8HG4q+X4/mznw+WqwBzIul27nh+ntpPe8X+ZTdU9VPLk2zSmn5CWCQIGMp/PS4HvB3TmHlbPGom2ukLKToOLo1c8KODySjekHczmGnENcAG1qQ5vYWppTnesCN+/NgxsMVVFdd7ZMWv/czq/n2tpN5L1XtLfl+CKl+3NUB7+QEER4m893Neqr12Br1aGeTRZJ3wxpeJPldHDxt3pPotTYyx1cdCymLQrf7tpBAT3vkAA9ytIPwzVZahVZklv2mgwwal6m00tVX5Z5bos045wgNUUJZ4Emz0RZ/vpdOT1Wf1o4yADg81vK8mbFBz6TAJe+KRt1w+pymt82t2oawUEgYbI0d2/q0FM6oc72TdEyRoaLwIvUDFCy+nLnZVBFjPUN3N3hGB8lsSVBUamCmqsHUs3z3TImz0lqM59NRYplwmt4U6uZYm/m7x8el/qhDMHw8hhWU1mE9rlb4Go9XMKbXublNCYrC8iBA0SjGTg7JZVl8KUeyz6sCdtQd2P1F2M+nabFECUobTTF48vh5MmXNEulLrkYS2USOCTc0BqPPqbIRXie51IWlohkQyarMQxRYn7L8/J4nJcPFM6w56ObHYmmxJeyAwDA5/P8EE0jVElPma1KpNkEovYQ+Xw2w1dLdW338rCXnBf47O4OaTtZxmWnsGzw6kPTJtQR0qimay6FW+SHYRnrNKucj0s4qcikhDwi5iqOgjiW5dSrhp+s/2K7e3NX9PN4Pk8L8L0XAfhyft3m00GBMRFREQg+ZnUHi9tjLT74yarUoaTd6T4fsy/9/uo2YdmX8SwCuCAahja1JUz7Od7ucHWo2ZnRapqkKIKMARNVZJlhc7GAST3Otf5oL+lAPzQRa0ucn80Ba1OkCNqZ8Th+M4p6GhvdLFXRU8sWFbQ2Nw2bS1MJlGE6WdKT+/F89PO+UHZAbRMckN3ZxgXwyGgjSlRMMNPo6RHeoo42VneNRFLRsCzHaqU2Q6At1ewmB/Oy9C0fIXpGTm86/nMJcJP07jNsdD8yD3rKM6qE6xA+J28RVhcfcj0LUdiGkjUAlSwQ+EPb4Ty8GUqdxhMg46Obp0Xg2SOTYjQ1HxaEuoOeQiljGyjW1LicH49Lc2vuEX7pGfBuWtg1KbX3/iN/hh2Ay8UuFpHN7v4okT352YXJ4a7ezurSWqkMP5avtSU7aSoae8hX07eLhChySbUeR4HvcfKAs9YcaP20bVMMUSM8jEjg4GPloHsKX4+llsfl+FDN3SJ+vA3OhQLXapqg/hgBnlV2d7EwOmfBSrS/pmkXKUwRjpQnt1ZSRFssFB61DIDgLNBxqi/e5mmOaIZFfFE87n2RPD1idEF1cfjOFhcJuHlqHr1jCgQcOSAxva7NTqf5WJr1lksfrRS6HhGynZv0czjg/Tt5tUhBUzK1+YS7F9YsDCxhlDEzjHV0KeLt4YWaQXeP8NbEE1xFHndLhkBEdixNTWAxLO5B8RZuruHhlqLtJSN2KjTU1orhUiv10TlfDZeVG36MAB+9zFMBE1u7+AiGGefHrEIuNZLe+V9VqdOLY5SkPsNfp8dRlF5f5PPw/cMe+2WYc16ajzWX4XFplrg4j+aVE6MyTT6frYYcxsPXL/43BsLYjg7oax3cjo9vH5Zq9q7a+9DE12MM+yIYcP98SOyDWEowokUT0FtIhoHREtRRRFhlPblc3MVbpQPezKDuTU570ETPYqINg+NBKGlK0hDyqO4l4s1CbYtDHxNUXBxzq8tcm9mzDtfrM78dP8MbvDrDl6sGI2jeul5USk3uyVFtkaDSrXlRAwTiMwaDH5YKx5xahmYvU8knExkVQW33zUPksIgN84lsWA5vvaIJanhCUrKU8w/H+txq8XgXHfgZBPjg/Xpb1c4bourVQRfAgkCVHp6as+QEzHugFoebPyCqS12qlliE55w1788lSm4vBRzPS+O0VM4y+9xakEt4PxT9FMdT+YmukU8neJl68GcCIk+u+84rApICEbF1U+HaeiQ3JeEzU1Yvi8rQzgpvLilLnVwK3DzrOXttzSmQJWoaj0RBxNyqV4nazD0INKvt+ewP4CoFiPg57vBFljxVmVsCMClDhkVOHlvPSXd3qQpIP/qgqFefEBGzY/IU84HIi9BPNoTL3AqrTS2Fu4kYjFGrz1ZbNe/h3ohnHzLd5xcicRHjP8cZukEE3xWH0SmgotFz92lDhQxLAA4Uz8uY/LG+APDq+zYPw2mPhce7jLM3Vy5pEbN981rhzQUVtZUoZj4vpfQGSdu6P2H16+xuawe+yBZ4bwpBgCkR4SIeaoLBGVAE0ALulQtRZ2CSVuYhYC443rHVEhWotVQJGtPiKFLTsnh7OLdmj62Z+9ad6DOnu9arEV9MBnxw+OpusN6pURTTEiPcpM7qjCOdw6Soy53t03CMuhO1E7FQDFLHyCPvfkAtSz2fyjKX81K9+hbj+NxpXdf/mfkBz75RWACQoK6tqCj9WHgy0FjnMZXHF0Nu9SjDmaoxVC8miCws490w+f7FsTScj/WhlWWp2+o/e/HrzNbGyL8gBwQR4RG6FciTAVT2lGAlTOA45nn4T7vJ3kz3UWQciX38uYqoWxXogPDyeJrnH07nY7Pa3sM4PntyffzMwMjHb4Iwi0axSBYFnhgO71BM+DyWUc+Hxb8f652/Hsc0jXepyrSYlhRa5qj+GDbxD9Ue385LsUt87wtMjtu/qxp8XrOJT71LW3QItCHUsUhEKjDzoUdzQiQ1P8yhrfF0vnt1/zL7pEIsxccKfXuKFlXbm9LOSzP7QOXf541LiVoHRX45GRBu7koa2ZgcXlzcHSqGpl7bkHCUek6SGh/by/HFyVOdhakl83k5zWWxKGf3Zl/u4V+n1yGR7g7/EgwAhLkiBeCCJuYWPdMDvQMLmHrJnO6Gurx+0CZtbE2SYYnj29NxOReLVhH+M1TeB6a1gpixHmf7xS78/p28mWjRqhqCIojKGt4bsJx2IbroOVdJo7bl8W/MWVord8eT2fz4cFxKKRZueMb52Z82bjz54P8F02xtqOso5FYAAAAASUVORK5CYII=\n"},"metadata":{}}]}]} -------------------------------------------------------------------------------- /4_classification/4_1_fnn/transform.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | 4 | 5 | ''' 6 | img : 整形対象の画像 7 | channel_mean: 各次元のデータセット全体の平均, [入力次元] 8 | channel_std : 各次元のデータセット全体の標準偏差, [入力次元] 9 | ''' 10 | def transform(img: Image.Image, channel_mean: np.ndarray=None, 11 | channel_std: np.ndarray=None): 12 | # PIL to numpy array, PyTorchでの処理用に単精度少数を使用 13 | img = np.asarray(img, dtype='float32') 14 | 15 | # [32, 32, 3]の画像を3072次元のベクトルに平坦化 16 | x = img.flatten() 17 | 18 | # 各次元をデータセット全体の平均と標準偏差で正規化 19 | if channel_mean is not None and channel_std is not None: 20 | x = (x - channel_mean) / channel_std 21 | 22 | return x 23 | -------------------------------------------------------------------------------- /4_classification/4_1_fnn/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | 4 | from torch.utils.data import Dataset 5 | 6 | 7 | ''' 8 | データセットを分割するための2つの排反なインデックス集合を生成する関数 9 | dataset : 分割対象のデータセット 10 | ratio : 1つ目のセットに含めるデータ量の割合 11 | random_seed: 分割結果を不変にするためのシード 12 | ''' 13 | def generate_subset(dataset: Dataset, ratio: float, 14 | random_seed: int=0): 15 | # サブセットの大きさを計算 16 | size = int(len(dataset) * ratio) 17 | 18 | indices = list(range(len(dataset))) 19 | 20 | # 二つのセットに分ける前にシャッフル 21 | random.seed(random_seed) 22 | random.shuffle(indices) 23 | 24 | # セット1とセット2のサンプルのインデックスに分割 25 | indices1, indices2 = indices[:size], indices[size:] 26 | 27 | return indices1, indices2 28 | 29 | 30 | ''' 31 | 各次元のデータセット全体の平均と標準偏差を計算する関数 32 | dataset: 平均と標準偏差を計算する対象のPyTorchのデータセット 33 | ''' 34 | def get_dataset_statistics(dataset: Dataset): 35 | data = [] 36 | for i in range(len(dataset)): 37 | # 3072次元のベクトルを取得 38 | img_flat = dataset[i][0] 39 | data.append(img_flat) 40 | # 第0軸を追加して第0軸でデータを連結 41 | data = np.stack(data) 42 | 43 | # データ全体の平均と標準偏差を計算 44 | channel_mean = np.mean(data, axis=0) 45 | channel_std = np.std(data, axis=0) 46 | 47 | return channel_mean, channel_std 48 | -------------------------------------------------------------------------------- /4_classification/4_2_cnn/eval.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import torch 4 | from torch import nn 5 | from torch.utils.data import Dataset 6 | 7 | 8 | ''' 9 | data_loader: 評価に使うデータを読み込むデータローダ 10 | model : 評価対象のモデル 11 | loss_func : 目的関数 12 | ''' 13 | def evaluate(data_loader: Dataset, model: nn.Module, 14 | loss_func: Callable): 15 | model.eval() 16 | 17 | losses = [] 18 | preds = [] 19 | for x, y in data_loader: 20 | with torch.no_grad(): 21 | x = x.to(model.get_device()) 22 | y = y.to(model.get_device()) 23 | 24 | y_pred = model(x) 25 | 26 | losses.append(loss_func(y_pred, y, reduction='none')) 27 | preds.append(y_pred.argmax(dim=1) == y) 28 | 29 | loss = torch.cat(losses).mean() 30 | accuracy = torch.cat(preds).float().mean() 31 | 32 | return loss, accuracy 33 | -------------------------------------------------------------------------------- /4_classification/4_2_cnn/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | import matplotlib.pyplot as plt 3 | from sklearn.manifold import TSNE 4 | 5 | import torch 6 | from torch import nn 7 | from torch.utils.data import Dataset 8 | 9 | 10 | ''' 11 | データセットを分割するための2つの排反なインデックス集合を生成する関数 12 | dataset : 分割対象のデータセット 13 | ratio : 1つ目のセットに含めるデータ量の割合 14 | random_seed: 分割結果を不変にするためのシード 15 | ''' 16 | def generate_subset(dataset: Dataset, ratio: float, 17 | random_seed: int=0): 18 | # サブセットの大きさを計算 19 | size = int(len(dataset) * ratio) 20 | 21 | indices = list(range(len(dataset))) 22 | 23 | # 二つのセットに分ける前にシャッフル 24 | random.seed(random_seed) 25 | random.shuffle(indices) 26 | 27 | # セット1とセット2のサンプルのインデックスに分割 28 | indices1, indices2 = indices[:size], indices[size:] 29 | 30 | return indices1, indices2 31 | 32 | 33 | ''' 34 | t-SNEのプロット関数 35 | data_loader: プロット対象のデータを読み込むデータローダ 36 | model : 特徴量抽出に使うモデル 37 | num_samples: t-SNEでプロットするサンプル数 38 | ''' 39 | def plot_t_sne(data_loader: Dataset, model: nn.Module, 40 | num_samples: int): 41 | model.eval() 42 | 43 | # t-SNEのためにデータを整形 44 | x = [] 45 | y = [] 46 | for imgs, labels in data_loader: 47 | with torch.no_grad(): 48 | imgs = imgs.to(model.get_device()) 49 | 50 | # 特徴量の抽出 51 | embeddings = model(imgs, return_embed=True) 52 | 53 | x.append(embeddings.to('cpu')) 54 | y.append(labels.clone()) 55 | 56 | x = torch.cat(x) 57 | y = torch.cat(y) 58 | 59 | # NumPy配列に変換 60 | x = x.numpy() 61 | y = y.numpy() 62 | 63 | # 指定サンプル数だけ抽出 64 | x = x[:num_samples] 65 | y = y[:num_samples] 66 | 67 | # t-SNEを適用 68 | t_sne = TSNE(n_components=2, random_state=0) 69 | x_reduced = t_sne.fit_transform(x) 70 | 71 | # 各ラベルの色とマーカーを設定 72 | cmap = plt.get_cmap("tab10") 73 | markers = ['4', '8', 's', 'p', '*', 'h', 'H', '+', 'x', 'D'] 74 | 75 | # データをプロット 76 | plt.figure(figsize=(20, 15)) 77 | for i, cls in enumerate(data_loader.dataset.classes): 78 | plt.scatter(x_reduced[y == i, 0], x_reduced[y == i, 1], 79 | c=[cmap(i / len(data_loader.dataset.classes))], 80 | marker=markers[i], s=500, alpha=0.6, label=cls) 81 | plt.axis('off') 82 | plt.legend(bbox_to_anchor=(1, 1), fontsize=24, framealpha=0) 83 | plt.show() 84 | -------------------------------------------------------------------------------- /4_classification/4_3_transformer/eval.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import torch 4 | from torch import nn 5 | from torch.utils.data import Dataset 6 | 7 | 8 | ''' 9 | data_loader: 評価に使うデータを読み込むデータローダ 10 | model : 評価対象のモデル 11 | loss_func : 目的関数 12 | ''' 13 | def evaluate(data_loader: Dataset, model: nn.Module, 14 | loss_func: Callable): 15 | model.eval() 16 | 17 | losses = [] 18 | preds = [] 19 | for x, y in data_loader: 20 | with torch.no_grad(): 21 | x = x.to(model.get_device()) 22 | y = y.to(model.get_device()) 23 | 24 | y_pred = model(x) 25 | 26 | losses.append(loss_func(y_pred, y, reduction='none')) 27 | preds.append(y_pred.argmax(dim=1) == y) 28 | 29 | loss = torch.cat(losses).mean() 30 | accuracy = torch.cat(preds).float().mean() 31 | 32 | return loss, accuracy 33 | -------------------------------------------------------------------------------- /4_classification/4_3_transformer/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | import matplotlib.pyplot as plt 3 | from sklearn.manifold import TSNE 4 | 5 | import torch 6 | from torch import nn 7 | from torch.utils.data import Dataset 8 | 9 | 10 | ''' 11 | データセットを分割するための2つの排反なインデックス集合を生成する関数 12 | dataset : 分割対象のデータセット 13 | ratio : 1つ目のセットに含めるデータ量の割合 14 | random_seed: 分割結果を不変にするためのシード 15 | ''' 16 | def generate_subset(dataset: Dataset, ratio: float, 17 | random_seed: int=0): 18 | # サブセットの大きさを計算 19 | size = int(len(dataset) * ratio) 20 | 21 | indices = list(range(len(dataset))) 22 | 23 | # 二つのセットに分ける前にシャッフル 24 | random.seed(random_seed) 25 | random.shuffle(indices) 26 | 27 | # セット1とセット2のサンプルのインデックスに分割 28 | indices1, indices2 = indices[:size], indices[size:] 29 | 30 | return indices1, indices2 31 | 32 | 33 | ''' 34 | 各チャネルのデータセット全体の平均と標準偏差を計算する関数 35 | dataset: 平均と標準偏差を計算する対象のPyTorchのデータセット 36 | ''' 37 | def get_dataset_statistics(dataset: Dataset): 38 | data = [] 39 | for i in range(len(dataset)): 40 | # [チャネル数, 高さ, 幅]の画像を取得 41 | img = dataset[i][0] 42 | data.append(img) 43 | data = torch.stack(data) 44 | 45 | # 各チャネルの平均と標準偏差を計算 46 | channel_mean = data.mean(dim=(0, 2, 3)) 47 | channel_std = data.std(dim=(0, 2, 3)) 48 | 49 | return channel_mean, channel_std 50 | 51 | 52 | ''' 53 | t-SNEのプロット関数 54 | data_loader: プロット対象のデータを読み込むデータローダ 55 | model : 特徴量抽出に使うモデル 56 | num_samples: t-SNEでプロットするサンプル数 57 | ''' 58 | def plot_t_sne(data_loader: Dataset, model: nn.Module, 59 | num_samples: int): 60 | model.eval() 61 | 62 | # t-SNEのためにデータを整形 63 | x = [] 64 | y = [] 65 | for imgs, labels in data_loader: 66 | with torch.no_grad(): 67 | imgs = imgs.to(model.get_device()) 68 | 69 | # 特徴量の抽出 70 | embeddings = model(imgs, return_embed=True) 71 | 72 | x.append(embeddings.to('cpu')) 73 | y.append(labels.clone()) 74 | 75 | x = torch.cat(x) 76 | y = torch.cat(y) 77 | 78 | # NumPy配列に変換 79 | x = x.numpy() 80 | y = y.numpy() 81 | 82 | # 指定サンプル数だけ抽出 83 | x = x[:num_samples] 84 | y = y[:num_samples] 85 | 86 | # t-SNEを適用 87 | t_sne = TSNE(n_components=2, random_state=0) 88 | x_reduced = t_sne.fit_transform(x) 89 | 90 | # 各ラベルの色とマーカーを設定 91 | cmap = plt.get_cmap("tab10") 92 | markers = ['4', '8', 's', 'p', '*', 'h', 'H', '+', 'x', 'D'] 93 | 94 | # データをプロット 95 | plt.figure(figsize=(20, 15)) 96 | for i, cls in enumerate(data_loader.dataset.classes): 97 | plt.scatter(x_reduced[y == i, 0], x_reduced[y == i, 1], 98 | c=[cmap(i / len(data_loader.dataset.classes))], 99 | marker=markers[i], s=500, alpha=0.6, label=cls) 100 | plt.axis('off') 101 | plt.legend(bbox_to_anchor=(1, 1), fontsize=24, framealpha=0) 102 | plt.show() 103 | -------------------------------------------------------------------------------- /4_classification/4_4_technique/eval.py: -------------------------------------------------------------------------------- 1 | from typing import Callable 2 | 3 | import torch 4 | from torch import nn 5 | from torch.utils.data import Dataset 6 | 7 | 8 | ''' 9 | data_loader: 評価に使うデータを読み込むデータローダ 10 | model : 評価対象のモデル 11 | loss_func : 目的関数 12 | ''' 13 | def evaluate(data_loader: Dataset, model: nn.Module, 14 | loss_func: Callable): 15 | model.eval() 16 | 17 | losses = [] 18 | preds = [] 19 | for x, y in data_loader: 20 | with torch.no_grad(): 21 | x = x.to(model.get_device()) 22 | y = y.to(model.get_device()) 23 | 24 | y_pred = model(x) 25 | 26 | losses.append(loss_func(y_pred, y, reduction='none')) 27 | preds.append(y_pred.argmax(dim=1) == y) 28 | 29 | loss = torch.cat(losses).mean() 30 | accuracy = torch.cat(preds).float().mean() 31 | 32 | return loss, accuracy 33 | -------------------------------------------------------------------------------- /4_classification/4_4_technique/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | class BasicBlock(nn.Module): 6 | ''' 7 | ResNet18における残差ブロック 8 | in_channels : 入力チャネル数 9 | out_channels: 出力チャネル数 10 | stride : 畳み込み層のストライド 11 | ''' 12 | def __init__(self, in_channels: int, out_channels: int, 13 | stride: int=1): 14 | super().__init__() 15 | 16 | ''''' 残差接続 ''''' 17 | self.conv1 = nn.Conv2d(in_channels, out_channels, 18 | kernel_size=3, stride=stride, 19 | padding=1, bias=False) 20 | self.bn1 = nn.BatchNorm2d(out_channels) 21 | self.conv2 = nn.Conv2d(out_channels, out_channels, 22 | kernel_size=3, padding=1, bias=False) 23 | self.bn2 = nn.BatchNorm2d(out_channels) 24 | self.relu = nn.ReLU(inplace=True) 25 | '''''''''''''''''''' 26 | 27 | # strideが1より大きいときにスキップ接続と残差接続の高さと幅を 28 | # 合わせるため、別途畳み込み演算を用意 29 | self.downsample = None 30 | if stride > 1: 31 | self.downsample = nn.Sequential( 32 | nn.Conv2d(in_channels, out_channels, kernel_size=1, 33 | stride=stride, bias=False), 34 | nn.BatchNorm2d(out_channels) 35 | ) 36 | 37 | ''' 38 | 順伝播関数 39 | x: 入力, [バッチサイズ, 入力チャネル数, 高さ, 幅] 40 | ''' 41 | def forward(self, x: torch.Tensor): 42 | ''''' 残差接続 ''''' 43 | out = self.conv1(x) 44 | out = self.bn1(out) 45 | out = self.relu(out) 46 | out = self.conv2(out) 47 | out = self.bn2(out) 48 | '''''''''''''''''''' 49 | 50 | if self.downsample is not None: 51 | x = self.downsample(x) 52 | 53 | # 残差写像と恒等写像の要素毎の和を計算 54 | out += x 55 | 56 | out = self.relu(out) 57 | 58 | return out 59 | -------------------------------------------------------------------------------- /4_classification/4_4_technique/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | import matplotlib.pyplot as plt 3 | from sklearn.manifold import TSNE 4 | 5 | import torch 6 | from torch import nn 7 | from torch.utils.data import Dataset 8 | 9 | 10 | ''' 11 | データセットを分割するための2つの排反なインデックス集合を生成する関数 12 | dataset : 分割対象のデータセット 13 | ratio : 1つ目のセットに含めるデータ量の割合 14 | random_seed: 分割結果を不変にするためのシード 15 | ''' 16 | def generate_subset(dataset: Dataset, ratio: float, 17 | random_seed: int=0): 18 | # サブセットの大きさを計算 19 | size = int(len(dataset) * ratio) 20 | 21 | indices = list(range(len(dataset))) 22 | 23 | # 二つのセットに分ける前にシャッフル 24 | random.seed(random_seed) 25 | random.shuffle(indices) 26 | 27 | # セット1とセット2のサンプルのインデックスに分割 28 | indices1, indices2 = indices[:size], indices[size:] 29 | 30 | return indices1, indices2 31 | 32 | 33 | ''' 34 | 各チャネルのデータセット全体の平均と標準偏差を計算する関数 35 | dataset: 平均と標準偏差を計算する対象のPyTorchのデータセット 36 | ''' 37 | def get_dataset_statistics(dataset: Dataset): 38 | data = [] 39 | for i in range(len(dataset)): 40 | # [チャネル数, 高さ, 幅]の画像を取得 41 | img = dataset[i][0] 42 | data.append(img) 43 | data = torch.stack(data) 44 | 45 | # 各チャネルの平均と標準偏差を計算 46 | channel_mean = data.mean(dim=(0, 2, 3)) 47 | channel_std = data.std(dim=(0, 2, 3)) 48 | 49 | return channel_mean, channel_std 50 | 51 | 52 | ''' 53 | t-SNEのプロット関数 54 | data_loader: プロット対象のデータを読み込むデータローダ 55 | model : 特徴量抽出に使うモデル 56 | num_samples: t-SNEでプロットするサンプル数 57 | ''' 58 | def plot_t_sne(data_loader: Dataset, model: nn.Module, 59 | num_samples: int): 60 | model.eval() 61 | 62 | # t-SNEのためにデータを整形 63 | x = [] 64 | y = [] 65 | for imgs, labels in data_loader: 66 | with torch.no_grad(): 67 | imgs = imgs.to(model.get_device()) 68 | 69 | # 特徴量の抽出 70 | embeddings = model(imgs, return_embed=True) 71 | 72 | x.append(embeddings.to('cpu')) 73 | y.append(labels.clone()) 74 | 75 | x = torch.cat(x) 76 | y = torch.cat(y) 77 | 78 | # NumPy配列に変換 79 | x = x.numpy() 80 | y = y.numpy() 81 | 82 | # 指定サンプル数だけ抽出 83 | x = x[:num_samples] 84 | y = y[:num_samples] 85 | 86 | # t-SNEを適用 87 | t_sne = TSNE(n_components=2, random_state=0) 88 | x_reduced = t_sne.fit_transform(x) 89 | 90 | # 各ラベルの色とマーカーを設定 91 | cmap = plt.get_cmap("tab10") 92 | markers = ['4', '8', 's', 'p', '*', 'h', 'H', '+', 'x', 'D'] 93 | 94 | # データをプロット 95 | plt.figure(figsize=(20, 15)) 96 | for i, cls in enumerate(data_loader.dataset.classes): 97 | plt.scatter(x_reduced[y == i, 0], x_reduced[y == i, 1], 98 | c=[cmap(i / len(data_loader.dataset.classes))], 99 | marker=markers[i], s=500, alpha=0.6, label=cls) 100 | plt.axis('off') 101 | plt.legend(bbox_to_anchor=(1, 1), fontsize=24, framealpha=0) 102 | plt.show() 103 | -------------------------------------------------------------------------------- /5_object_detection/5_1_object_detection_basics.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"mount_file_id":"1RF8xXYnvOx0RIK0LxkgnvvlSU2w7MaZB","authorship_tag":"ABX9TyM7LnUMWQDOpd7IZ9s5WRDz"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"gpuClass":"premium"},"cells":[{"cell_type":"markdown","source":["#Pythonで学ぶ画像認識 第5章 画像分類\n","##第5.1節 物体検出の基礎"],"metadata":{"id":"8pGi7CLCXU01"}},{"cell_type":"markdown","source":["###モジュールのインポート"],"metadata":{"id":"Ye6RlSk6XffT"}},{"cell_type":"code","execution_count":1,"metadata":{"id":"-T2qBvOoXIol","executionInfo":{"status":"ok","timestamp":1673049994947,"user_tz":480,"elapsed":4258,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}}},"outputs":[],"source":["import torch"]},{"cell_type":"markdown","source":["###外接矩形の表現方法を変換する関数"],"metadata":{"id":"qpiTy4fWXlHq"}},{"cell_type":"code","source":["'''\n","矩形をxmin, ymin, xmax, ymaxからx, y, width, heightに変換する関数\n","boxes: 矩形集合, [矩形数 (任意の軸数), 4 (xmin, ymin, xmax, ymax)]\n","'''\n","def convert_to_xywh(boxes: torch.Tensor):\n"," wh = boxes[..., 2:] - boxes[..., :2]\n"," xy = boxes[..., :2] + wh / 2\n"," boxes = torch.cat((xy, wh), dim=-1)\n","\n"," return boxes\n","\n","'''\n","矩形をx, y, width, heightからxmin, ymin, xmax, ymaxに変換\n","boxes: 外接集合, [矩形数 (任意の軸数), 4 (x, y, width, height)]\n","'''\n","def convert_to_xyxy(boxes: torch.Tensor):\n"," xymin = boxes[..., :2] - boxes[..., 2:] / 2\n"," xymax = boxes[..., 2:] + xymin\n"," boxes = torch.cat((xymin, xymax), dim=-1)\n","\n"," return boxes"],"metadata":{"id":"oL0CWEFKXoNB","executionInfo":{"status":"ok","timestamp":1673049997362,"user_tz":480,"elapsed":189,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}}},"execution_count":2,"outputs":[]},{"cell_type":"markdown","source":["###IoUを計算する関数"],"metadata":{"id":"dpjp9i_U6Qxr"}},{"cell_type":"code","source":["'''\n","boxes1: 矩形集合, [矩形数, 4 (xmin, ymin, xmax, ymax)]\n","boxes2: 矩形集合, [矩形数, 4 (xmin, ymin, xmax, ymax)]\n","'''\n","def calc_iou(boxes1: torch.Tensor, boxes2: torch.Tensor):\n"," # 第1軸をunsqueezeし、ブロードキャストを利用することで\n"," # [矩形数, 1, 2] と[矩形数, 2]の演算結果が\n"," # [boxes1の矩形数, boxes2の矩形数, 2] となる\n"," \n"," # 積集合の左上の座標を取得\n"," intersect_left_top = torch.maximum(\n"," boxes1[:, :2].unsqueeze(1), boxes2[:, :2])\n"," # 積集合の右下の座標を取得\n"," intersect_right_bottom = torch.minimum(\n"," boxes1[:, 2:].unsqueeze(1), boxes2[:, 2:])\n","\n"," # 積集合の幅と高さを算出し、面積を計算\n"," intersect_width_height = (\n"," intersect_right_bottom - intersect_left_top).clamp(min=0)\n"," intersect_areas = intersect_width_height.prod(dim=2)\n","\n"," # それぞれの矩形の面積を計算\n"," areas1 = (boxes1[:, 2] - boxes1[:, 0]) * \\\n"," (boxes1[:, 3] - boxes1[:, 1])\n"," areas2 = (boxes2[:, 2] - boxes2[:, 0]) * \\\n"," (boxes2[:, 3] - boxes2[:, 1])\n","\n"," # 和集合の面積を計算\n"," union_areas = areas1.unsqueeze(1) + areas2 - intersect_areas\n","\n"," ious = intersect_areas / union_areas\n","\n"," return ious, union_areas"],"metadata":{"id":"jckB6g-Z6UWA","executionInfo":{"status":"ok","timestamp":1673050000307,"user_tz":480,"elapsed":186,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}}},"execution_count":3,"outputs":[]}]} -------------------------------------------------------------------------------- /5_object_detection/5_2_dataset.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"authorship_tag":"ABX9TyNJEA3Ap62BI7hQMCy7ubOe"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"markdown","source":["#Pythonで学ぶ画像認識 第5章 画像分類\n","##第5.2節 データセットの準備"],"metadata":{"id":"DjLIh1ZEV3UD"}},{"cell_type":"markdown","source":["###モジュールのインポート"],"metadata":{"id":"jKbIqBVPWpR-"}},{"cell_type":"code","source":["import random\n","import numpy as np\n","from PIL import Image\n","from typing import Sequence, Callable\n","\n","import torch\n","import torchvision\n","import torchvision.transforms as T\n","import torchvision.transforms.functional as F"],"metadata":{"id":"aFpcGXs2WrZa"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["###物体検出用COCOデータセットを扱うCocoDetectionクラス"],"metadata":{"id":"IFSVkl3o8sQS"}},{"cell_type":"code","source":["class CocoDetection(torchvision.datasets.CocoDetection):\n"," '''\n"," 物体検出用COCOデータセット読み込みクラス\n"," img_directory: 画像ファイルが保存されてるディレクトリへのパス\n"," anno_file : アノテーションファイルのパス\n"," transform : データ拡張と整形を行うクラスインスタンス\n"," '''\n"," def __init__(self, img_directory: str, anno_file: str,\n"," transform: Callable=None):\n"," super().__init__(img_directory, anno_file)\n","\n"," self.transform = transform\n","\n"," # カテゴリーIDに欠番があるため、それを埋めてクラスIDを割り当て\n"," self.classes = []\n"," # 元々のクラスIDと新しく割り当てたクラスIDを相互に変換する\n"," # ためのマッピングを保持\n"," self.coco_to_pred = {}\n"," self.pred_to_coco = {}\n"," for i, category_id in enumerate(\n"," sorted(self.coco.cats.keys())):\n"," self.classes.append(self.coco.cats[category_id]['name'])\n"," self.coco_to_pred[category_id] = i\n"," self.pred_to_coco[i] = category_id\n","\n"," '''\n"," データ取得関数\n"," idx: サンプルを指すインデックス\n"," '''\n"," def __getitem__(self, idx: int):\n"," img, target = super().__getitem__(idx)\n","\n"," # 親クラスのコンストラクタでself.idsに画像IDが\n"," # 格納されているのでそれを取得\n"," img_id = self.ids[idx]\n","\n"," # 物体の集合を一つの矩形でアノテーションしているものを除外\n"," target = [obj for obj in target\n"," if 'iscrowd' not in obj or obj['iscrowd'] == 0]\n","\n"," # 学習用に当該画像に映る物体のクラスIDと矩形を取得\n"," # クラスIDはコンストラクタで新規に割り当てたIDに変換\n"," classes = torch.tensor([self.coco_to_pred[obj['category_id']]\n"," for obj in target], dtype=torch.int64)\n"," boxes = torch.tensor([obj['bbox'] for obj in target],\n"," dtype=torch.float32)\n","\n"," # 矩形が0個のとき、boxes.shape == [0]となってしまうため、\n"," # 第1軸に4を追加して軸数と第2軸の次元を合わせる\n"," if boxes.shape[0] == 0:\n"," boxes = torch.zeros((0, 4))\n","\n"," width, height = img.size\n"," # xmin, ymin, width, height -> xmin, ymin, xmax, ymax\n"," boxes[:, 2:] += boxes[:, :2]\n","\n"," # 矩形が画像領域内に収まるように値をクリップ\n"," boxes[:, ::2] = boxes[:, ::2].clamp(min=0, max=width)\n"," boxes[:, 1::2] = boxes[:, 1::2].clamp(min=0, max=height)\n","\n"," # 学習のための正解データを用意\n"," # クラスIDや矩形など渡すものが多岐にわたるため、辞書で用意\n"," target = {\n"," 'image_id': torch.tensor(img_id, dtype=torch.int64),\n"," 'classes': classes,\n"," 'boxes': boxes,\n"," 'size': torch.tensor((width, height), dtype=torch.int64),\n"," 'orig_size': torch.tensor((width, height),\n"," dtype=torch.int64),\n"," 'orig_img': torch.tensor(np.asarray(img))\n"," }\n","\n"," # データ拡張と整形\n"," if self.transform is not None:\n"," img, target = self.transform(img, target)\n","\n"," return img, target\n","\n"," '''\n"," モデルで予測されたクラスIDからCOCOのクラスIDに変換する関数\n"," label: 予測されたクラスID\n"," '''\n"," def to_coco_label(self, label: int):\n"," return self.pred_to_coco[label]"],"metadata":{"id":"L7q0Ov4T8x2E"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["###無作為に画像を水平反転するクラス"],"metadata":{"id":"ZrXXSpiJ-2I2"}},{"cell_type":"code","source":["class RandomHorizontalFlip:\n"," '''\n"," 無作為に画像を水平反転するクラス\n"," prob: 水平反転する確率\n"," '''\n"," def __init__(self, prob: float=0.5):\n"," self.prob = prob\n","\n"," '''\n"," 無作為に画像を水平反転する関数\n"," img : 水平反転する画像\n"," target: 物体検出用のラベルを持つ辞書\n"," '''\n"," def __call__(self, img: Image, target: dict):\n"," if random.random() < self.prob:\n"," # 画像の水平反転\n"," img = F.hflip(img)\n","\n"," # 正解矩形をx軸方向に反転\n"," # xmin, xmaxは水平反転すると大小が逆転し、\n"," # width - xmax, width - xminとなる\n"," width = img.size[0]\n"," target['boxes'][:, [0, 2]] = width - \\\n"," target['boxes'][:, [2, 0]]\n","\n"," return img, target"],"metadata":{"id":"LNwH1Ztw-5ym"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["###無作為に画像を切り抜くクラス"],"metadata":{"id":"36KyArnm_rMw"}},{"cell_type":"code","source":["class RandomSizeCrop:\n"," '''\n"," 無作為に画像を切り抜くクラス\n"," scale: 切り抜き前に対する切り抜き後の画像面積の下限と上限\n"," ratio: 切り抜き後の画像のアスペクト比の下限と上限\n"," '''\n"," def __init__(self, scale: Sequence[float],\n"," ratio: Sequence[float]):\n"," self.scale = scale\n"," self.ratio = ratio\n","\n"," '''\n"," 無作為に画像を切り抜く関数\n"," img : 切り抜きをする画像\n"," target: 物体検出用のラベルを持つ辞書\n"," '''\n"," def __call__(self, img: Image, target: dict):\n"," width, height = img.size\n","\n"," # 切り抜く領域の左上の座標と幅および高さを取得\n"," # 切り抜く領域はscaleとratioの下限と上限に従う\n"," top, left, cropped_height, cropped_width = \\\n"," T.RandomResizedCrop.get_params(\n"," img, self.scale, self.ratio)\n","\n"," # 左上の座標と幅および高さで指定した領域を切り抜き\n"," img = F.crop(img, top, left, cropped_height, cropped_width)\n","\n"," # 原点がx = left, y = topに移動し、合わせて矩形の座標も移動\n"," target['boxes'][:, ::2] -= left\n"," target['boxes'][:, 1::2] -= top\n","\n"," # 矩形の座標が切り抜き後に領域外に出る場合は座標をクリップ\n"," target['boxes'][:, ::2] = \\\n"," target['boxes'][:, ::2].clamp(min=0)\n"," target['boxes'][:, 1::2] = \\\n"," target['boxes'][:, 1::2].clamp(min=0)\n"," target['boxes'][:, ::2] = \\\n"," target['boxes'][:, ::2].clamp(max=cropped_width)\n"," target['boxes'][:, 1::2] = \\\n"," target['boxes'][:, 1::2].clamp(max=cropped_height)\n","\n"," # 幅と高さが0より大きくなる(矩形の面積が0でない)矩形のみ保持\n"," keep = (target['boxes'][:, 2] > target['boxes'][:, 0]) & \\\n"," (target['boxes'][:, 3] > target['boxes'][:, 1])\n"," target['classes'] = target['classes'][keep]\n"," target['boxes'] = target['boxes'][keep]\n","\n"," # 切り抜き後の画像の大きさを保持\n"," target['size'] = torch.tensor(\n"," [cropped_width, cropped_height], dtype=torch.int64)\n","\n"," return img, target"],"metadata":{"id":"TKYc2IUr_ulN"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["###無作為に画像をリサイズするクラス"],"metadata":{"id":"EcyzJYyuAwHf"}},{"cell_type":"code","source":["class RandomResize:\n"," '''\n"," 無作為に画像をアスペクト比を保持してリサイズするクラス\n"," min_sizes: 短辺の長さの候補、この中から無作為に長さを抽出\n"," max_size : 長辺の長さの最大値\n"," '''\n"," def __init__(self, min_sizes: Sequence[int], max_size: int):\n"," self.min_sizes = min_sizes\n"," self.max_size = max_size\n","\n"," '''\n"," リサイズ後の短辺と長辺を計算する関数\n"," min_side: 短辺の長さ\n"," max_side: 長辺の長さ\n"," target : 目標となる短辺の長さ\n"," '''\n"," def _get_target_size(self, min_side: int, max_side:int,\n"," target: int):\n"," # アスペクト比を保持して短辺をtargetに合わせる\n"," max_side = int(max_side * target / min_side)\n"," min_side = target\n","\n"," # 長辺がmax_sizeを超えている場合、\n"," # アスペクト比を保持して長辺をmax_sizeに合わせる\n"," if max_side > self.max_size:\n"," min_side = int(min_side * self.max_size / max_side)\n"," max_side = self.max_size\n","\n"," return min_side, max_side\n","\n"," '''\n"," 無作為に画像をリサイズする関数\n"," img : リサイズする画像\n"," target: 物体検出用のラベルを持つ辞書\n"," '''\n"," def __call__(self, img: Image, target: dict):\n"," # 短辺の長さを候補の中から無作為に抽出\n"," min_size = random.choice(self.min_sizes)\n","\n"," width, height = img.size\n","\n"," # リサイズ後の大きさを取得\n"," # 幅と高さのどちらが短辺であるかで場合分け\n"," if width < height:\n"," resized_width, resized_height = self._get_target_size(\n"," width, height, min_size)\n"," else:\n"," resized_height, resized_width = self._get_target_size(\n"," height, width, min_size)\n","\n"," # 指定した大きさに画像をリサイズ\n"," img = F.resize(img, (resized_height, resized_width))\n","\n"," # 正解矩形をリサイズ前後のスケールに合わせて変更\n"," ratio = resized_width / width\n"," target['boxes'] *= ratio\n","\n"," # リサイズ後の画像の大きさを保持\n"," target['size'] = torch.tensor(\n"," [resized_width, resized_height], dtype=torch.int64)\n","\n"," return img, target"],"metadata":{"id":"R8NnhNStAwP9"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["###PIL画像をテンソルに変換するクラス"],"metadata":{"id":"lO8NX061BvQu"}},{"cell_type":"code","source":["class ToTensor:\n"," '''\n"," PIL画像をテンソルに変換する関数\n"," img : テンソルに変換する画像\n"," target: 物体検出用のラベルを持つ辞書\n"," '''\n"," def __call__(self, img: Image, target: dict):\n"," img = F.to_tensor(img)\n","\n"," return img, target"],"metadata":{"id":"RTqRQrXXByPR"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["###画像を標準化するクラス"],"metadata":{"id":"w-EAGmbbCEpJ"}},{"cell_type":"code","source":["class Normalize:\n"," '''\n"," 画像を標準化するクラス\n"," mean: R, G, Bチャネルそれぞれの平均値\n"," std : R, G, Bチャネルそれぞれの標準偏差\n"," '''\n"," def __init__(self, mean: Sequence[float], std: Sequence[float]):\n"," self.mean = mean\n"," self.std = std\n","\n"," '''\n"," 画像を標準化する関数\n"," img : 標準化する画像\n"," target: 物体検出用のラベルを持つ辞書\n"," '''\n"," def __call__(self, img: torch.Tensor, target: dict):\n"," img = F.normalize(img, mean=self.mean, std=self.std)\n","\n"," return img, target"],"metadata":{"id":"rfHi21e9CG3a"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["###データ整形・拡張をまとめるクラス"],"metadata":{"id":"8JdeTcxyCycQ"}},{"cell_type":"code","source":["class Compose:\n"," '''\n"," データ整形・拡張をまとめて適用するためのクラス\n"," transforms: データ整形・拡張のクラスインスタンスのシーケンス\n"," '''\n"," def __init__(self, transforms: Sequence[Callable]):\n"," self.transforms = transforms\n","\n"," '''\n"," データ整形・拡張を連続して適用する関数\n"," img : データ整形・拡張する画像\n"," target: 物体検出用のラベルを持つ辞書\n"," '''\n"," def __call__(self, img: Image, target: dict):\n"," for transform in self.transforms:\n"," img, target = transform(img, target)\n","\n"," return img, target"],"metadata":{"id":"DirPK2mNC2K-"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["###2つのデータ拡張から無作為にどちらかを選択して適用する関数"],"metadata":{"id":"UTy9XBvxD0Av"}},{"cell_type":"code","source":["class RandomSelect:\n"," '''\n"," 2種類のデータ拡張を受け取り、無作為にどちらかを適用するクラス\n"," transform1: データ拡張1\n"," transform2: データ拡張2\n"," prob : データ拡張1が適用される確率\n"," '''\n"," def __init__(self, transform1: Callable, transform2: Callable,\n"," prob: float=0.5):\n"," self.transform1 = transform1\n"," self.transform2 = transform2\n"," self.prob = prob\n","\n"," '''\n"," データ拡張を無作為に選択して適用する関数\n"," img : データ整形・拡張する画像\n"," target: 物体検出用のラベルを持つ辞書\n"," '''\n"," def __call__(self, img: Image, target: dict):\n"," if random.random() < self.prob:\n"," return self.transform1(img, target)\n","\n"," return self.transform2(img, target)"],"metadata":{"id":"i5APzNHuD0k1"},"execution_count":null,"outputs":[]}]} -------------------------------------------------------------------------------- /5_object_detection/5_3_retinanet/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Callable 3 | 4 | import torch 5 | import torchvision 6 | 7 | 8 | class CocoDetection(torchvision.datasets.CocoDetection): 9 | ''' 10 | 物体検出用COCOデータセット読み込みクラス 11 | img_directory: 画像ファイルが保存されてるディレクトリへのパス 12 | anno_file : アノテーションファイルのパス 13 | transform : データ拡張と整形を行うクラスインスタンス 14 | ''' 15 | def __init__(self, img_directory: str, anno_file: str, 16 | transform: Callable=None): 17 | super().__init__(img_directory, anno_file) 18 | 19 | self.transform = transform 20 | 21 | # カテゴリーIDに欠番があるため、それを埋めてクラスIDを割り当て 22 | self.classes = [] 23 | # 元々のクラスIDと新しく割り当てたクラスIDを相互に変換する 24 | # ためのマッピングを保持 25 | self.coco_to_pred = {} 26 | self.pred_to_coco = {} 27 | for i, category_id in enumerate( 28 | sorted(self.coco.cats.keys())): 29 | self.classes.append(self.coco.cats[category_id]['name']) 30 | self.coco_to_pred[category_id] = i 31 | self.pred_to_coco[i] = category_id 32 | 33 | ''' 34 | データ取得関数 35 | idx: サンプルを指すインデックス 36 | ''' 37 | def __getitem__(self, idx: int): 38 | img, target = super().__getitem__(idx) 39 | 40 | # 親クラスのコンストラクタでself.idsに画像IDが 41 | # 格納されているのでそれを取得 42 | img_id = self.ids[idx] 43 | 44 | # 物体の集合を一つの矩形でアノテーションしているものを除外 45 | target = [obj for obj in target 46 | if 'iscrowd' not in obj or obj['iscrowd'] == 0] 47 | 48 | # 学習用に当該画像に映る物体のクラスIDと矩形を取得 49 | # クラスIDはコンストラクタで新規に割り当てたIDに変換 50 | classes = torch.tensor([self.coco_to_pred[obj['category_id']] 51 | for obj in target], dtype=torch.int64) 52 | boxes = torch.tensor([obj['bbox'] for obj in target], 53 | dtype=torch.float32) 54 | 55 | # 矩形が0個のとき、boxes.shape == [0]となってしまうため、 56 | # 第1軸に4を追加して軸数と第2軸の次元を合わせる 57 | if boxes.shape[0] == 0: 58 | boxes = torch.zeros((0, 4)) 59 | 60 | width, height = img.size 61 | # xmin, ymin, width, height -> xmin, ymin, xmax, ymax 62 | boxes[:, 2:] += boxes[:, :2] 63 | 64 | # 矩形が画像領域内に収まるように値をクリップ 65 | boxes[:, ::2] = boxes[:, ::2].clamp(min=0, max=width) 66 | boxes[:, 1::2] = boxes[:, 1::2].clamp(min=0, max=height) 67 | 68 | # 学習のための正解データを用意 69 | # クラスIDや矩形など渡すものが多義にわたるため、辞書で用意 70 | target = { 71 | 'image_id': torch.tensor(img_id, dtype=torch.int64), 72 | 'classes': classes, 73 | 'boxes': boxes, 74 | 'size': torch.tensor((width, height), dtype=torch.int64), 75 | 'orig_size': torch.tensor((width, height), 76 | dtype=torch.int64), 77 | 'orig_img': torch.tensor(np.asarray(img)) 78 | } 79 | 80 | # データ拡張と整形 81 | if self.transform is not None: 82 | img, target = self.transform(img, target) 83 | 84 | return img, target 85 | 86 | ''' 87 | モデルで予測されたクラスIDからCOCOのクラスIDに変換する関数 88 | label: 予測されたクラスID 89 | ''' 90 | def to_coco_label(self, label: int): 91 | return self.pred_to_coco[label] 92 | -------------------------------------------------------------------------------- /5_object_detection/5_3_retinanet/transform.py: -------------------------------------------------------------------------------- 1 | import random 2 | from PIL import Image 3 | from typing import Sequence, Callable 4 | 5 | import torch 6 | import torchvision.transforms as T 7 | import torchvision.transforms.functional as F 8 | 9 | 10 | class RandomHorizontalFlip: 11 | ''' 12 | 無作為に画像を水平反転するクラス 13 | prob: 水平反転する確率 14 | ''' 15 | def __init__(self, prob: float=0.5): 16 | self.prob = prob 17 | 18 | ''' 19 | 無作為に画像を水平反転する関数 20 | img : 水平反転する画像 21 | target: 物体検出用のラベルを持つ辞書 22 | ''' 23 | def __call__(self, img: Image, target: dict): 24 | if random.random() < self.prob: 25 | # 画像の水平反転 26 | img = F.hflip(img) 27 | 28 | # 正解矩形をx軸方向に反転 29 | # xmin, xmaxは水平反転すると大小が逆転し、 30 | # width - xmax, width - xminとなる 31 | width = img.size[0] 32 | target['boxes'][:, [0, 2]] = width - \ 33 | target['boxes'][:, [2, 0]] 34 | 35 | return img, target 36 | 37 | 38 | class RandomSizeCrop: 39 | ''' 40 | 無作為に画像を切り抜くクラス 41 | scale: 切り抜き前に対する切り抜き後の画像面積の下限と上限 42 | ratio: 切り抜き後の画像のアスペクト比の下限と上限 43 | ''' 44 | def __init__(self, scale: Sequence[float], 45 | ratio: Sequence[float]): 46 | self.scale = scale 47 | self.ratio = ratio 48 | 49 | ''' 50 | 無作為に画像を切り抜く関数 51 | img : 切り抜きをする画像 52 | target: 物体検出用のラベルを持つ辞書 53 | ''' 54 | def __call__(self, img: Image, target: dict): 55 | width, height = img.size 56 | 57 | # 切り抜く領域の左上の座標と幅および高さを取得 58 | # 切り抜く領域はscaleとratioの下限と上限に従う 59 | top, left, cropped_height, cropped_width = \ 60 | T.RandomResizedCrop.get_params( 61 | img, self.scale, self.ratio) 62 | 63 | # 左上の座標と幅および高さで指定した領域を切り抜き 64 | img = F.crop(img, top, left, cropped_height, cropped_width) 65 | 66 | # 原点がx = left, y = topに移動し、合わせて矩形の座標も移動 67 | target['boxes'][:, ::2] -= left 68 | target['boxes'][:, 1::2] -= top 69 | 70 | # 矩形の座標が切り抜き後に領域外に出る場合は座標をクリップ 71 | target['boxes'][:, ::2] = \ 72 | target['boxes'][:, ::2].clamp(min=0) 73 | target['boxes'][:, 1::2] = \ 74 | target['boxes'][:, 1::2].clamp(min=0) 75 | target['boxes'][:, ::2] = \ 76 | target['boxes'][:, ::2].clamp(max=cropped_width) 77 | target['boxes'][:, 1::2] = \ 78 | target['boxes'][:, 1::2].clamp(max=cropped_height) 79 | 80 | # 幅と高さが0より大きくなる(矩形の面積が0でない)矩形のみ保持 81 | keep = (target['boxes'][:, 2] > target['boxes'][:, 0]) & \ 82 | (target['boxes'][:, 3] > target['boxes'][:, 1]) 83 | target['classes'] = target['classes'][keep] 84 | target['boxes'] = target['boxes'][keep] 85 | 86 | # 切り抜き後の画像の大きさを保持 87 | target['size'] = torch.tensor( 88 | [cropped_width, cropped_height], dtype=torch.int64) 89 | 90 | return img, target 91 | 92 | 93 | class RandomResize: 94 | ''' 95 | 無作為に画像をアスペクト比を保持してリサイズするクラス 96 | min_sizes: 短辺の長さの候補、この中から無作為に長さを抽出 97 | max_size : 長辺の長さの最大値 98 | ''' 99 | def __init__(self, min_sizes: Sequence[int], max_size: int): 100 | self.min_sizes = min_sizes 101 | self.max_size = max_size 102 | 103 | ''' 104 | リサイズ後の短辺と長辺を計算する関数 105 | min_side: 短辺の長さ 106 | max_side: 長辺の長さ 107 | target : 目標となる短辺の長さ 108 | ''' 109 | def _get_target_size(self, min_side: int, max_side:int, 110 | target: int): 111 | # アスペクト比を保持して短辺をtargetに合わせる 112 | max_side = int(max_side * target / min_side) 113 | min_side = target 114 | 115 | # 長辺がmax_sizeを超えている場合、 116 | # アスペクト比を保持して長辺をmax_sizeに合わせる 117 | if max_side > self.max_size: 118 | min_side = int(min_side * self.max_size / max_side) 119 | max_side = self.max_size 120 | 121 | return min_side, max_side 122 | 123 | ''' 124 | 無作為に画像をリサイズする関数 125 | img : リサイズする画像 126 | target: 物体検出用のラベルを持つ辞書 127 | ''' 128 | def __call__(self, img: Image, target: dict): 129 | # 短辺の長さを候補の中から無作為に抽出 130 | min_size = random.choice(self.min_sizes) 131 | 132 | width, height = img.size 133 | 134 | # リサイズ後の大きさを取得 135 | # 幅と高さのどちらが短辺であるかで場合分け 136 | if width < height: 137 | resized_width, resized_height = self._get_target_size( 138 | width, height, min_size) 139 | else: 140 | resized_height, resized_width = self._get_target_size( 141 | height, width, min_size) 142 | 143 | # 指定した大きさに画像をリサイズ 144 | img = F.resize(img, (resized_height, resized_width)) 145 | 146 | # 正解矩形をリサイズ前後のスケールに合わせて変更 147 | ratio = resized_width / width 148 | target['boxes'] *= ratio 149 | 150 | # リサイズ後の画像の大きさを保持 151 | target['size'] = torch.tensor( 152 | [resized_width, resized_height], dtype=torch.int64) 153 | 154 | return img, target 155 | 156 | 157 | class ToTensor: 158 | ''' 159 | PIL画像をテンソルに変換する関数 160 | img : テンソルに変換する画像 161 | target: 物体検出用のラベルを持つ辞書 162 | ''' 163 | def __call__(self, img: Image, target: dict): 164 | img = F.to_tensor(img) 165 | 166 | return img, target 167 | 168 | 169 | class Normalize: 170 | ''' 171 | 画像を標準化するクラス 172 | mean: R, G, Bチャネルそれぞれの平均値 173 | std : R, G, Bチャネルそれぞれの標準偏差 174 | ''' 175 | def __init__(self, mean: Sequence[float], std: Sequence[float]): 176 | self.mean = mean 177 | self.std = std 178 | 179 | ''' 180 | 画像を標準化する関数 181 | img : 標準化する画像 182 | target: 物体検出用のラベルを持つ辞書 183 | ''' 184 | def __call__(self, img: torch.Tensor, target: dict): 185 | img = F.normalize(img, mean=self.mean, std=self.std) 186 | 187 | return img, target 188 | 189 | 190 | class Compose: 191 | ''' 192 | データ整形・拡張をまとめて適用するためのクラス 193 | transforms: データ整形・拡張のクラスインスタンスのシーケンス 194 | ''' 195 | def __init__(self, transforms: Sequence[Callable]): 196 | self.transforms = transforms 197 | 198 | ''' 199 | データ整形・拡張を連続して適用する関数 200 | img : データ整形・拡張する画像 201 | target: 物体検出用のラベルを持つ辞書 202 | ''' 203 | def __call__(self, img: Image, target: dict): 204 | for transform in self.transforms: 205 | img, target = transform(img, target) 206 | 207 | return img, target 208 | 209 | 210 | class RandomSelect: 211 | ''' 212 | 2種類のデータ拡張を受け取り、無作為にどちらかを適用するクラス 213 | transform1: データ拡張1 214 | transform2: データ拡張2 215 | prob : データ拡張1が適用される確率 216 | ''' 217 | def __init__(self, transform1: Callable, transform2: Callable, 218 | prob: float=0.5): 219 | self.transform1 = transform1 220 | self.transform2 = transform2 221 | self.prob = prob 222 | 223 | ''' 224 | データ拡張を無作為に選択して適用する関数 225 | img : データ整形・拡張する画像 226 | target: 物体検出用のラベルを持つ辞書 227 | ''' 228 | def __call__(self, img: Image, target: dict): 229 | if random.random() < self.prob: 230 | return self.transform1(img, target) 231 | 232 | return self.transform2(img, target) 233 | -------------------------------------------------------------------------------- /5_object_detection/5_3_retinanet/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import torch 4 | from torch.utils.data import Dataset 5 | import torchvision 6 | 7 | 8 | ''' 9 | データセットを分割するための2つの排反なインデックス集合を生成する関数 10 | dataset : 分割対称のデータセット 11 | ratio : 1つ目のセットに含めるデータ量の割合 12 | random_seed: 分割結果を不変にするためのシード 13 | ''' 14 | def generate_subset(dataset: Dataset, ratio: float, 15 | random_seed: int=0): 16 | # サブセットの大きさを計算 17 | size = int(len(dataset) * ratio) 18 | 19 | indices = list(range(len(dataset))) 20 | 21 | # 二つのセットに分ける前にシャッフル 22 | random.seed(random_seed) 23 | random.shuffle(indices) 24 | 25 | # セット1とセット2のサンプルのインデックスに分割 26 | indices1, indices2 = indices[:size], indices[size:] 27 | 28 | return indices1, indices2 29 | 30 | 31 | ''' 32 | 矩形をxmin, ymin, xmax, ymaxからx, y, width, heightに変換する関数 33 | boxes: 矩形集合, [矩形数 (任意の軸数), 4 (xmin, ymin, xmax, ymax)] 34 | ''' 35 | def convert_to_xywh(boxes: torch.Tensor): 36 | wh = boxes[..., 2:] - boxes[..., :2] 37 | xy = boxes[..., :2] + wh / 2 38 | boxes = torch.cat((xy, wh), dim=-1) 39 | 40 | return boxes 41 | 42 | 43 | ''' 44 | 矩形をx, y, width, heightからxmin, ymin, xmax, ymaxに変換 45 | boxes: 外接集合, [矩形数 (任意の軸数), 4 (x, y, width, height)] 46 | ''' 47 | def convert_to_xyxy(boxes: torch.Tensor): 48 | xymin = boxes[..., :2] - boxes[..., 2:] / 2 49 | xymax = boxes[..., 2:] + xymin 50 | boxes = torch.cat((xymin, xymax), dim=-1) 51 | 52 | return boxes 53 | 54 | 55 | ''' 56 | boxes1: 矩形集合, [矩形数, 4 (xmin, ymin, xmax, ymax)] 57 | boxes2: 矩形集合, [矩形数, 4 (xmin, ymin, xmax, ymax)] 58 | ''' 59 | def calc_iou(boxes1: torch.Tensor, boxes2: torch.Tensor): 60 | # 第1軸をunsqueezeし、ブロードキャストを利用することで 61 | # [矩形数, 1, 2] と[矩形数, 2]の演算結果が 62 | # [boxes1の矩形数, boxes2の矩形数, 2] となる 63 | 64 | # 積集合の左上の座標を取得 65 | intersect_left_top = torch.maximum( 66 | boxes1[:, :2].unsqueeze(1), boxes2[:, :2]) 67 | # 積集合の右下の座標を取得 68 | intersect_right_bottom = torch.minimum( 69 | boxes1[:, 2:].unsqueeze(1), boxes2[:, 2:]) 70 | 71 | # 積集合の幅と高さを算出し、面積を計算 72 | intersect_width_height = ( 73 | intersect_right_bottom - intersect_left_top).clamp(min=0) 74 | intersect_areas = intersect_width_height.prod(dim=2) 75 | 76 | # それぞれの矩形の面積を計算 77 | areas1 = (boxes1[:, 2] - boxes1[:, 0]) * \ 78 | (boxes1[:, 3] - boxes1[:, 1]) 79 | areas2 = (boxes2[:, 2] - boxes2[:, 0]) * \ 80 | (boxes2[:, 3] - boxes2[:, 1]) 81 | 82 | # 和集合の面積を計算 83 | union_areas = areas1.unsqueeze(1) + areas2 - intersect_areas 84 | 85 | ious = intersect_areas / union_areas 86 | 87 | return ious, union_areas 88 | -------------------------------------------------------------------------------- /5_object_detection/5_4_detr/dataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from typing import Callable 3 | 4 | import torch 5 | import torchvision 6 | 7 | 8 | class CocoDetection(torchvision.datasets.CocoDetection): 9 | ''' 10 | 物体検出用COCOデータセット読み込みクラス 11 | img_directory: 画像ファイルが保存されてるディレクトリへのパス 12 | anno_file : アノテーションファイルのパス 13 | transform : データ拡張と整形を行うクラスインスタンス 14 | ''' 15 | def __init__(self, img_directory: str, anno_file: str, 16 | transform: Callable=None): 17 | super().__init__(img_directory, anno_file) 18 | 19 | self.transform = transform 20 | 21 | # カテゴリーIDに欠番があるため、それを埋めてクラスIDを割り当て 22 | self.classes = [] 23 | # 元々のクラスIDと新しく割り当てたクラスIDを相互に変換する 24 | # ためのマッピングを保持 25 | self.coco_to_pred = {} 26 | self.pred_to_coco = {} 27 | for i, category_id in enumerate( 28 | sorted(self.coco.cats.keys())): 29 | self.classes.append(self.coco.cats[category_id]['name']) 30 | self.coco_to_pred[category_id] = i 31 | self.pred_to_coco[i] = category_id 32 | 33 | ''' 34 | データ取得関数 35 | idx: サンプルを指すインデックス 36 | ''' 37 | def __getitem__(self, idx: int): 38 | img, target = super().__getitem__(idx) 39 | 40 | # 親クラスのコンストラクタでself.idsに画像IDが 41 | # 格納されているのでそれを取得 42 | img_id = self.ids[idx] 43 | 44 | # 物体の集合を一つの矩形でアノテーションしているものを除外 45 | target = [obj for obj in target 46 | if 'iscrowd' not in obj or obj['iscrowd'] == 0] 47 | 48 | # 学習用に当該画像に映る物体のクラスIDと矩形を取得 49 | # クラスIDはコンストラクタで新規に割り当てたIDに変換 50 | classes = torch.tensor([self.coco_to_pred[obj['category_id']] 51 | for obj in target], dtype=torch.int64) 52 | boxes = torch.tensor([obj['bbox'] for obj in target], 53 | dtype=torch.float32) 54 | 55 | # 矩形が0個のとき、boxes.shape == [0]となってしまうため、 56 | # 第1軸に4を追加して軸数と第2軸の次元を合わせる 57 | if boxes.shape[0] == 0: 58 | boxes = torch.zeros((0, 4)) 59 | 60 | width, height = img.size 61 | # xmin, ymin, width, height -> xmin, ymin, xmax, ymax 62 | boxes[:, 2:] += boxes[:, :2] 63 | 64 | # 矩形が画像領域内に収まるように値をクリップ 65 | boxes[:, ::2] = boxes[:, ::2].clamp(min=0, max=width) 66 | boxes[:, 1::2] = boxes[:, 1::2].clamp(min=0, max=height) 67 | 68 | # 学習のための正解データを用意 69 | # クラスIDや矩形など渡すものが多義にわたるため、辞書で用意 70 | target = { 71 | 'image_id': torch.tensor(img_id, dtype=torch.int64), 72 | 'classes': classes, 73 | 'boxes': boxes, 74 | 'size': torch.tensor((width, height), dtype=torch.int64), 75 | 'orig_size': torch.tensor((width, height), 76 | dtype=torch.int64), 77 | 'orig_img': torch.tensor(np.asarray(img)) 78 | } 79 | 80 | # データ拡張と整形 81 | if self.transform is not None: 82 | img, target = self.transform(img, target) 83 | 84 | return img, target 85 | 86 | ''' 87 | モデルで予測されたクラスIDからCOCOのクラスIDに変換する関数 88 | label: 予測されたクラスID 89 | ''' 90 | def to_coco_label(self, label: int): 91 | return self.pred_to_coco[label] 92 | -------------------------------------------------------------------------------- /5_object_detection/5_4_detr/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from torchvision.ops.misc import FrozenBatchNorm2d 5 | 6 | 7 | class BasicBlock(nn.Module): 8 | ''' 9 | ResNet18における残差ブロック 10 | in_channels : 入力チャネル数 11 | out_channels: 出力チャネル数 12 | stride : 畳み込み層のストライド 13 | ''' 14 | def __init__(self, in_channels: int, out_channels: int, 15 | stride: int=1): 16 | super().__init__() 17 | 18 | ''''' 残差接続 ''''' 19 | self.conv1 = nn.Conv2d(in_channels, out_channels, 20 | kernel_size=3, stride=stride, 21 | padding=1, bias=False) 22 | self.bn1 = FrozenBatchNorm2d(out_channels) 23 | self.conv2 = nn.Conv2d(out_channels, out_channels, 24 | kernel_size=3, padding=1, bias=False) 25 | self.bn2 = FrozenBatchNorm2d(out_channels) 26 | self.relu = nn.ReLU(inplace=True) 27 | '''''''''''''''''''' 28 | 29 | # strideが1より大きいときにスキップ接続と残差接続の高さと幅を 30 | # 合わせるため、別途畳み込み演算を用意 31 | self.downsample = None 32 | if stride > 1: 33 | self.downsample = nn.Sequential( 34 | nn.Conv2d(in_channels, out_channels, kernel_size=1, 35 | stride=stride, bias=False), 36 | FrozenBatchNorm2d(out_channels) 37 | ) 38 | 39 | ''' 40 | 順伝播関数 41 | x: 入力, [バッチサイズ, チャネル数, 高さ, 幅] 42 | ''' 43 | def forward(self, x: torch.Tensor): 44 | ''''' 残差接続 ''''' 45 | out = self.conv1(x) 46 | out = self.bn1(out) 47 | out = self.relu(out) 48 | out = self.conv2(out) 49 | out = self.bn2(out) 50 | '''''''''''''''''''' 51 | 52 | if self.downsample is not None: 53 | x = self.downsample(x) 54 | 55 | # 残差写像と恒等写像の要素毎の和を計算 56 | out += x 57 | 58 | out = self.relu(out) 59 | 60 | return out 61 | 62 | 63 | class ResNet18(nn.Module): 64 | ''' 65 | ResNet18モデル 66 | ''' 67 | def __init__(self): 68 | super().__init__() 69 | 70 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, 71 | padding=3, bias=False) 72 | self.bn1 = FrozenBatchNorm2d(64) 73 | self.relu = nn.ReLU(inplace=True) 74 | 75 | self.max_pool = nn.MaxPool2d(kernel_size=3, 76 | stride=2, padding=1) 77 | 78 | self.layer1 = nn.Sequential( 79 | BasicBlock(64, 64), 80 | BasicBlock(64, 64), 81 | ) 82 | self.layer2 = nn.Sequential( 83 | BasicBlock(64, 128, stride=2), 84 | BasicBlock(128, 128), 85 | ) 86 | self.layer3 = nn.Sequential( 87 | BasicBlock(128, 256, stride=2), 88 | BasicBlock(256, 256), 89 | ) 90 | self.layer4 = nn.Sequential( 91 | BasicBlock(256, 512, stride=2), 92 | BasicBlock(512, 512), 93 | ) 94 | 95 | ''' 96 | 順伝播関数 97 | x: 入力, [バッチサイズ, チャネル数, 高さ, 幅] 98 | ''' 99 | def forward(self, x: torch.Tensor): 100 | x = self.conv1(x) 101 | x = self.bn1(x) 102 | x = self.relu(x) 103 | x = self.max_pool(x) 104 | 105 | x = self.layer1(x) 106 | c3 = self.layer2(x) 107 | c4 = self.layer3(c3) 108 | c5 = self.layer4(c4) 109 | 110 | return c3, c4, c5 111 | -------------------------------------------------------------------------------- /5_object_detection/5_4_detr/transform.py: -------------------------------------------------------------------------------- 1 | import random 2 | from PIL import Image 3 | from typing import Sequence, Callable 4 | 5 | import torch 6 | import torchvision.transforms as T 7 | import torchvision.transforms.functional as F 8 | 9 | 10 | class RandomHorizontalFlip: 11 | ''' 12 | 無作為に画像を水平反転するクラス 13 | prob: 水平反転する確率 14 | ''' 15 | def __init__(self, prob: float=0.5): 16 | self.prob = prob 17 | 18 | ''' 19 | 無作為に画像を水平反転する関数 20 | img : 水平反転する画像 21 | target: 物体検出用のラベルを持つ辞書 22 | ''' 23 | def __call__(self, img: Image, target: dict): 24 | if random.random() < self.prob: 25 | # 画像の水平反転 26 | img = F.hflip(img) 27 | 28 | # 正解矩形をx軸方向に反転 29 | # xmin, xmaxは水平反転すると大小が逆転し、 30 | # width - xmax, width - xminとなる 31 | width = img.size[0] 32 | target['boxes'][:, [0, 2]] = width - \ 33 | target['boxes'][:, [2, 0]] 34 | 35 | return img, target 36 | 37 | 38 | class RandomSizeCrop: 39 | ''' 40 | 無作為に画像を切り抜くクラス 41 | scale: 切り抜き前に対する切り抜き後の画像面積の下限と上限 42 | ratio: 切り抜き後の画像のアスペクト比の下限と上限 43 | ''' 44 | def __init__(self, scale: Sequence[float], 45 | ratio: Sequence[float]): 46 | self.scale = scale 47 | self.ratio = ratio 48 | 49 | ''' 50 | 無作為に画像を切り抜く関数 51 | img : 切り抜きをする画像 52 | target: 物体検出用のラベルを持つ辞書 53 | ''' 54 | def __call__(self, img: Image, target: dict): 55 | width, height = img.size 56 | 57 | # 切り抜く領域の左上の座標と幅および高さを取得 58 | # 切り抜く領域はscaleとratioの下限と上限に従う 59 | top, left, cropped_height, cropped_width = \ 60 | T.RandomResizedCrop.get_params( 61 | img, self.scale, self.ratio) 62 | 63 | # 左上の座標と幅および高さで指定した領域を切り抜き 64 | img = F.crop(img, top, left, cropped_height, cropped_width) 65 | 66 | # 原点がx = left, y = topに移動し、合わせて矩形の座標も移動 67 | target['boxes'][:, ::2] -= left 68 | target['boxes'][:, 1::2] -= top 69 | 70 | # 矩形の座標が切り抜き後に領域外に出る場合は座標をクリップ 71 | target['boxes'][:, ::2] = \ 72 | target['boxes'][:, ::2].clamp(min=0) 73 | target['boxes'][:, 1::2] = \ 74 | target['boxes'][:, 1::2].clamp(min=0) 75 | target['boxes'][:, ::2] = \ 76 | target['boxes'][:, ::2].clamp(max=cropped_width) 77 | target['boxes'][:, 1::2] = \ 78 | target['boxes'][:, 1::2].clamp(max=cropped_height) 79 | 80 | # 幅と高さが0より大きくなる(矩形の面積が0でない)矩形のみ保持 81 | keep = (target['boxes'][:, 2] > target['boxes'][:, 0]) & \ 82 | (target['boxes'][:, 3] > target['boxes'][:, 1]) 83 | target['classes'] = target['classes'][keep] 84 | target['boxes'] = target['boxes'][keep] 85 | 86 | # 切り抜き後の画像の大きさを保持 87 | target['size'] = torch.tensor( 88 | [cropped_width, cropped_height], dtype=torch.int64) 89 | 90 | return img, target 91 | 92 | 93 | class RandomResize: 94 | ''' 95 | 無作為に画像をアスペクト比を保持してリサイズするクラス 96 | min_sizes: 短辺の長さの候補、この中から無作為に長さを抽出 97 | max_size : 長辺の長さの最大値 98 | ''' 99 | def __init__(self, min_sizes: Sequence[int], max_size: int): 100 | self.min_sizes = min_sizes 101 | self.max_size = max_size 102 | 103 | ''' 104 | リサイズ後の短辺と長辺を計算する関数 105 | min_side: 短辺の長さ 106 | max_side: 長辺の長さ 107 | target : 目標となる短辺の長さ 108 | ''' 109 | def _get_target_size(self, min_side: int, max_side:int, 110 | target: int): 111 | # アスペクト比を保持して短辺をtargetに合わせる 112 | max_side = int(max_side * target / min_side) 113 | min_side = target 114 | 115 | # 長辺がmax_sizeを超えている場合、 116 | # アスペクト比を保持して長辺をmax_sizeに合わせる 117 | if max_side > self.max_size: 118 | min_side = int(min_side * self.max_size / max_side) 119 | max_side = self.max_size 120 | 121 | return min_side, max_side 122 | 123 | ''' 124 | 無作為に画像をリサイズする関数 125 | img : リサイズする画像 126 | target: 物体検出用のラベルを持つ辞書 127 | ''' 128 | def __call__(self, img: Image, target: dict): 129 | # 短辺の長さを候補の中から無作為に抽出 130 | min_size = random.choice(self.min_sizes) 131 | 132 | width, height = img.size 133 | 134 | # リサイズ後の大きさを取得 135 | # 幅と高さのどちらが短辺であるかで場合分け 136 | if width < height: 137 | resized_width, resized_height = self._get_target_size( 138 | width, height, min_size) 139 | else: 140 | resized_height, resized_width = self._get_target_size( 141 | height, width, min_size) 142 | 143 | # 指定した大きさに画像をリサイズ 144 | img = F.resize(img, (resized_height, resized_width)) 145 | 146 | # 正解矩形をリサイズ前後のスケールに合わせて変更 147 | ratio = resized_width / width 148 | target['boxes'] *= ratio 149 | 150 | # リサイズ後の画像の大きさを保持 151 | target['size'] = torch.tensor( 152 | [resized_width, resized_height], dtype=torch.int64) 153 | 154 | return img, target 155 | 156 | 157 | class ToTensor: 158 | ''' 159 | PIL画像をテンソルに変換する関数 160 | img : テンソルに変換する画像 161 | target: 物体検出用のラベルを持つ辞書 162 | ''' 163 | def __call__(self, img: Image, target: dict): 164 | img = F.to_tensor(img) 165 | 166 | return img, target 167 | 168 | 169 | class Normalize: 170 | ''' 171 | 画像を標準化するクラス 172 | mean: R, G, Bチャネルそれぞれの平均値 173 | std : R, G, Bチャネルそれぞれの標準偏差 174 | ''' 175 | def __init__(self, mean: Sequence[float], std: Sequence[float]): 176 | self.mean = mean 177 | self.std = std 178 | 179 | ''' 180 | 画像を標準化する関数 181 | img : 標準化する画像 182 | target: 物体検出用のラベルを持つ辞書 183 | ''' 184 | def __call__(self, img: torch.Tensor, target: dict): 185 | img = F.normalize(img, mean=self.mean, std=self.std) 186 | 187 | return img, target 188 | 189 | 190 | class Compose: 191 | ''' 192 | データ整形・拡張をまとめて適用するためのクラス 193 | transforms: データ整形・拡張のクラスインスタンスのシーケンス 194 | ''' 195 | def __init__(self, transforms: Sequence[Callable]): 196 | self.transforms = transforms 197 | 198 | ''' 199 | データ整形・拡張を連続して適用する関数 200 | img : データ整形・拡張する画像 201 | target: 物体検出用のラベルを持つ辞書 202 | ''' 203 | def __call__(self, img: Image, target: dict): 204 | for transform in self.transforms: 205 | img, target = transform(img, target) 206 | 207 | return img, target 208 | 209 | 210 | class RandomSelect: 211 | ''' 212 | 2種類のデータ拡張を受け取り、無作為にどちらかを適用するクラス 213 | transform1: データ拡張1 214 | transform2: データ拡張2 215 | prob : データ拡張1が適用される確率 216 | ''' 217 | def __init__(self, transform1: Callable, transform2: Callable, 218 | prob: float=0.5): 219 | self.transform1 = transform1 220 | self.transform2 = transform2 221 | self.prob = prob 222 | 223 | ''' 224 | データ拡張を無作為に選択して適用する関数 225 | img : データ整形・拡張する画像 226 | target: 物体検出用のラベルを持つ辞書 227 | ''' 228 | def __call__(self, img: Image, target: dict): 229 | if random.random() < self.prob: 230 | return self.transform1(img, target) 231 | 232 | return self.transform2(img, target) 233 | -------------------------------------------------------------------------------- /5_object_detection/5_4_detr/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import torch 4 | from torch.utils.data import Dataset 5 | import torchvision 6 | 7 | 8 | ''' 9 | データセットを分割するための2つの排反なインデックス集合を生成する関数 10 | dataset : 分割対称のデータセット 11 | ratio : 1つ目のセットに含めるデータ量の割合 12 | random_seed: 分割結果を不変にするためのシード 13 | ''' 14 | def generate_subset(dataset: Dataset, ratio: float, 15 | random_seed: int=0): 16 | # サブセットの大きさを計算 17 | size = int(len(dataset) * ratio) 18 | 19 | indices = list(range(len(dataset))) 20 | 21 | # 二つのセットに分ける前にシャッフル 22 | random.seed(random_seed) 23 | random.shuffle(indices) 24 | 25 | # セット1とセット2のサンプルのインデックスに分割 26 | indices1, indices2 = indices[:size], indices[size:] 27 | 28 | return indices1, indices2 29 | 30 | 31 | ''' 32 | 矩形をxmin, ymin, xmax, ymaxからx, y, width, heightに変換する関数 33 | boxes: 矩形集合, [矩形数 (任意の軸数), 4 (xmin, ymin, xmax, ymax)] 34 | ''' 35 | def convert_to_xywh(boxes: torch.Tensor): 36 | wh = boxes[..., 2:] - boxes[..., :2] 37 | xy = boxes[..., :2] + wh / 2 38 | boxes = torch.cat((xy, wh), dim=-1) 39 | 40 | return boxes 41 | 42 | 43 | ''' 44 | 矩形をx, y, width, heightからxmin, ymin, xmax, ymaxに変換 45 | boxes: 外接集合, [矩形数 (任意の軸数), 4 (x, y, width, height)] 46 | ''' 47 | def convert_to_xyxy(boxes: torch.Tensor): 48 | xymin = boxes[..., :2] - boxes[..., 2:] / 2 49 | xymax = boxes[..., 2:] + xymin 50 | boxes = torch.cat((xymin, xymax), dim=-1) 51 | 52 | return boxes 53 | 54 | 55 | ''' 56 | boxes1: 矩形集合, [矩形数, 4 (xmin, ymin, xmax, ymax)] 57 | boxes2: 矩形集合, [矩形数, 4 (xmin, ymin, xmax, ymax)] 58 | ''' 59 | def calc_iou(boxes1: torch.Tensor, boxes2: torch.Tensor): 60 | # 第1軸をunsqueezeし、ブロードキャストを利用することで 61 | # [矩形数, 1, 2] と[矩形数, 2]の演算結果が 62 | # [boxes1の矩形数, boxes2の矩形数, 2] となる 63 | 64 | # 積集合の左上の座標を取得 65 | intersect_left_top = torch.maximum( 66 | boxes1[:, :2].unsqueeze(1), boxes2[:, :2]) 67 | # 積集合の右下の座標を取得 68 | intersect_right_bottom = torch.minimum( 69 | boxes1[:, 2:].unsqueeze(1), boxes2[:, 2:]) 70 | 71 | # 積集合の幅と高さを算出し、面積を計算 72 | intersect_width_height = ( 73 | intersect_right_bottom - intersect_left_top).clamp(min=0) 74 | intersect_areas = intersect_width_height.prod(dim=2) 75 | 76 | # それぞれの矩形の面積を計算 77 | areas1 = (boxes1[:, 2] - boxes1[:, 0]) * \ 78 | (boxes1[:, 3] - boxes1[:, 1]) 79 | areas2 = (boxes2[:, 2] - boxes2[:, 0]) * \ 80 | (boxes2[:, 3] - boxes2[:, 1]) 81 | 82 | # 和集合の面積を計算 83 | union_areas = areas1.unsqueeze(1) + areas2 - intersect_areas 84 | 85 | ious = intersect_areas / union_areas 86 | 87 | return ious, union_areas 88 | -------------------------------------------------------------------------------- /5_object_detection/model/.gitattributes: -------------------------------------------------------------------------------- 1 | detr.pth filter=lfs diff=lfs merge=lfs -text 2 | retinanet.pth filter=lfs diff=lfs merge=lfs -text 3 | -------------------------------------------------------------------------------- /5_object_detection/model/detr.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:6a4dcee45815597550bcb347dab16853589e1711bb8e025b08bcdd9913133158 3 | size 115447628 4 | -------------------------------------------------------------------------------- /5_object_detection/model/retinanet.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:8426108f9ac671eed6da09de7c244bf3f47fc7cf1bb058a54a42cb0fdbd2a88b 3 | size 79254533 4 | -------------------------------------------------------------------------------- /6_img_captioning/6_2_dataset.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[]},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"gpuClass":"standard"},"cells":[{"cell_type":"markdown","source":["#第6章 画像キャプショニング\n","##第6.2節 データセットの準備"],"metadata":{"id":"oQUVJmRm7a2V"}},{"cell_type":"markdown","source":["###モジュールのインポートとGoogleドライブのマウント"],"metadata":{"id":"fIZCu47o_nCg"}},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"eS1kRMB4512I","executionInfo":{"status":"ok","timestamp":1677132421957,"user_tz":-540,"elapsed":22318,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"c7931ad2-005a-4c3e-f2a3-03fcc143bfde"},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n"]}],"source":["import pickle\n","from pycocotools.coco import COCO\n","from collections import Counter\n","\n","# Googleドライブをマウント\n","from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["###辞書の作成"],"metadata":{"id":"PqJpT6tdBVFL"}},{"cell_type":"code","source":["# データの保存先\n","fp_train_caption = 'drive/MyDrive/python_image_recognition/' \\\n"," 'data/coco2014/captions_val2014.json'\n","fp_word_to_id = 'drive/MyDrive/python_image_recognition/' \\\n"," '6_img_captioning/model/word_to_id.pkl'\n","fp_id_to_word = 'drive/MyDrive/python_image_recognition/' \\\n"," '6_img_captioning/model/id_to_word.pkl'\n","\n","# キャプションを読み込み\n","coco = COCO(fp_train_caption)\n","anns_keys = coco.anns.keys()\n","\n","# 単語ーID対応表の作成\n","coco_token = []\n","for key in anns_keys:\n"," caption = coco.anns[key]['caption']\n"," tokens = caption.lower().split()\n"," coco_token.extend(tokens)\n","\n","# ピリオド、カンマを削除\n","table = str.maketrans({'.': '',\n"," ',': ''})\n","for k in range(len(coco_token)):\n"," coco_token[k] = coco_token[k].translate(table)\n","\n","# 単語ヒストグラムを作成\n","freq = Counter(coco_token)\n","\n","# 3回以上出現する単語に限定して辞書を作成\n","vocab = [token for token, count in freq.items() if count >= 3]\n","sorted(vocab)\n","\n","# 特殊トークンの追加\n","vocab.append('') # 文章の始まりを表すトークンを追加\n","vocab.append('') # 文章の終わりを表すトークンを追加\n","vocab.append('') # 辞書に無い単語を表すトークンを追加\n","vocab.append('') # 系列長を揃えるためのトークンを追加\n","\n","# 単語ー単語ID対応表の作成\n","word_to_id = {token: i for i, token in enumerate(vocab)}\n","id_to_word = {i: token for i, token in enumerate(vocab)}\n","\n","# ファイル出力\n","with open(fp_word_to_id, 'wb') as f:\n"," pickle.dump(word_to_id, f)\n","with open(fp_id_to_word, 'wb') as f:\n"," pickle.dump(id_to_word, f)\n","\n","print(f'単語数: {str(len(word_to_id))}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"dt03YvWpBVbo","executionInfo":{"status":"ok","timestamp":1677132447572,"user_tz":-540,"elapsed":4197,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"19bb016b-6664-4a4c-cd12-93e82478acc6"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["loading annotations into memory...\n","Done (t=1.66s)\n","creating index...\n","index created!\n","単語数: 8583\n"]}]}]} -------------------------------------------------------------------------------- /6_img_captioning/6_3_show_and_tell/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from torch.utils.data import Dataset 4 | 5 | 6 | ''' 7 | データセットを分割するための2つの排反なインデックス集合を生成する関数 8 | dataset : 分割対称のデータセット 9 | ratio : 1つ目のセットに含めるデータ量の割合 10 | random_seed: 分割結果を不変にするためのシード 11 | ''' 12 | def generate_subset(dataset: Dataset, ratio: float, 13 | random_seed: int=0): 14 | # サブセットの大きさを計算 15 | size = int(len(dataset) * ratio) 16 | 17 | indices = list(range(len(dataset))) 18 | 19 | # 二つのセットに分ける前にシャッフル 20 | random.seed(random_seed) 21 | random.shuffle(indices) 22 | 23 | # セット1とセット2のサンプルのインデックスに分割 24 | indices1, indices2 = indices[:size], indices[size:] 25 | 26 | return indices1, indices2 27 | -------------------------------------------------------------------------------- /6_img_captioning/6_4_show_attend_and_tell/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | from typing import Sequence, Dict, Tuple, Union 3 | 4 | import torch 5 | from torch.utils.data import Dataset 6 | 7 | 8 | ''' 9 | データセットを分割するための2つの排反なインデックス集合を生成する関数 10 | dataset : 分割対称のデータセット 11 | ratio : 1つ目のセットに含めるデータ量の割合 12 | random_seed: 分割結果を不変にするためのシード 13 | ''' 14 | def generate_subset(dataset: Dataset, ratio: float, 15 | random_seed: int=0): 16 | # サブセットの大きさを計算 17 | size = int(len(dataset) * ratio) 18 | 19 | indices = list(range(len(dataset))) 20 | 21 | # 二つのセットに分ける前にシャッフル 22 | random.seed(random_seed) 23 | random.shuffle(indices) 24 | 25 | # セット1とセット2のサンプルのインデックスに分割 26 | indices1, indices2 = indices[:size], indices[size:] 27 | 28 | return indices1, indices2 29 | 30 | 31 | ''' 32 | サンプルからミニバッチを生成するcollate関数 33 | batch : CocoCaptionsからサンプルした複数の画像とラベルをまとめたもの 34 | word_to_id: 単語->単語ID辞書 35 | ''' 36 | def collate_func(batch: Sequence[Tuple[Union[torch.Tensor, str]]], 37 | word_to_id: Dict[str, int]): 38 | imgs, captions = zip(*batch) 39 | 40 | # それぞれのサンプルの5個のキャプションの中から1つを選択してトークナイズ 41 | captions = [tokenize_caption( 42 | random.choice(cap), word_to_id) for cap in captions] 43 | 44 | # キャプションの長さが降順になるように並び替え 45 | batch = zip(imgs, captions) 46 | batch = sorted(batch, key=lambda x: len(x[1]), reverse=True) 47 | imgs, captions = zip(*batch) 48 | imgs = torch.stack(imgs) 49 | 50 | lengths = [cap.shape[0] for cap in captions] 51 | targets = torch.full((len(captions), max(lengths)), 52 | word_to_id[''], dtype=torch.int64) 53 | for i, cap in enumerate(captions): 54 | end = lengths[i] 55 | targets[i, :end] = cap[:end] 56 | 57 | return imgs, targets, lengths 58 | 59 | 60 | ''' 61 | トークナイザ - 文章(caption)を単語IDのリスト(tokens_id)に変換 62 | caption : 画像キャプション 63 | word_to_id: 単語->単語ID辞書 64 | ''' 65 | def tokenize_caption(caption: str, word_to_id: Dict[str, int]): 66 | tokens = caption.lower().split() 67 | 68 | tokens_temp = [] 69 | # 単語についたピリオド、カンマを削除 70 | for token in tokens: 71 | if token == '.' or token == ',': 72 | continue 73 | 74 | token = token.rstrip('.') 75 | token = token.rstrip(',') 76 | 77 | tokens_temp.append(token) 78 | 79 | tokens = tokens_temp 80 | 81 | # 文章(caption)を単語IDのリスト(tokens_id)に変換 82 | tokens_ext = [''] + tokens + [''] 83 | tokens_id = [] 84 | for k in tokens_ext: 85 | if k in word_to_id: 86 | tokens_id.append(word_to_id[k]) 87 | else: 88 | tokens_id.append(word_to_id['']) 89 | 90 | return torch.Tensor(tokens_id) 91 | -------------------------------------------------------------------------------- /6_img_captioning/6_5_transformer_captioning/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torchvision import models 4 | 5 | 6 | class CNNEncoder(nn.Module): 7 | ''' 8 | Transformer captioningのエンコーダ 9 | dim_embedding: 埋め込み次元 10 | ''' 11 | def __init__(self, dim_embedding: int): 12 | super().__init__() 13 | 14 | # ImageNetで事前学習された 15 | # ResNet152モデルをバックボーンネットワークとする 16 | resnet = models.resnet152(weights="IMAGENET1K_V2") 17 | 18 | # 特徴抽出器として使うため全結合層を削除 19 | modules = list(resnet.children())[:-1] 20 | self.backbone = nn.Sequential(*modules) 21 | 22 | # デコーダへの出力 23 | self.linear = nn.Linear(resnet.fc.in_features, dim_embedding) 24 | 25 | ''' 26 | エンコーダの順伝播 27 | imgs: 入力画像, [バッチサイズ, チャネル数, 高さ, 幅] 28 | ''' 29 | def forward(self, imgs: torch.Tensor): 30 | # 特徴抽出 -> [バッチサイズ, 2048] 31 | # 今回はバックボーンネットワークは学習させない 32 | with torch.no_grad(): 33 | features = self.backbone(imgs) 34 | features = features.flatten(1) 35 | 36 | # 全結合 37 | features = self.linear(features) 38 | 39 | return features 40 | -------------------------------------------------------------------------------- /6_img_captioning/6_5_transformer_captioning/util.py: -------------------------------------------------------------------------------- 1 | import random 2 | from typing import Sequence, Dict, Tuple, Union 3 | 4 | import torch 5 | from torch.utils.data import Dataset 6 | 7 | 8 | ''' 9 | データセットを分割するための2つの排反なインデックス集合を生成する関数 10 | dataset : 分割対称のデータセット 11 | ratio : 1つ目のセットに含めるデータ量の割合 12 | random_seed: 分割結果を不変にするためのシード 13 | ''' 14 | def generate_subset(dataset: Dataset, ratio: float, 15 | random_seed: int=0): 16 | # サブセットの大きさを計算 17 | size = int(len(dataset) * ratio) 18 | 19 | indices = list(range(len(dataset))) 20 | 21 | # 二つのセットに分ける前にシャッフル 22 | random.seed(random_seed) 23 | random.shuffle(indices) 24 | 25 | # セット1とセット2のサンプルのインデックスに分割 26 | indices1, indices2 = indices[:size], indices[size:] 27 | 28 | return indices1, indices2 29 | 30 | 31 | ''' 32 | サンプルからミニバッチを生成するcollate関数 33 | batch : CocoCaptionsからサンプルした複数の画像とラベルをまとめたもの 34 | word_to_id: 単語->単語ID辞書 35 | ''' 36 | def collate_func(batch: Sequence[Tuple[Union[torch.Tensor, str]]], 37 | word_to_id: Dict[str, int]): 38 | imgs, captions = zip(*batch) 39 | 40 | # それぞれのサンプルの5個のキャプションの中から1つを選択してトークナイズ 41 | captions = [tokenize_caption( 42 | random.choice(cap), word_to_id) for cap in captions] 43 | 44 | # キャプションの長さが降順になるように並び替え 45 | batch = zip(imgs, captions) 46 | batch = sorted(batch, key=lambda x: len(x[1]), reverse=True) 47 | imgs, captions = zip(*batch) 48 | imgs = torch.stack(imgs) 49 | 50 | lengths = [cap.shape[0] for cap in captions] 51 | targets = torch.full((len(captions), max(lengths)), 52 | word_to_id[''], dtype=torch.int64) 53 | for i, cap in enumerate(captions): 54 | end = lengths[i] 55 | targets[i, :end] = cap[:end] 56 | 57 | return imgs, targets, lengths 58 | 59 | 60 | ''' 61 | トークナイザ - 文章(caption)を単語IDのリスト(tokens_id)に変換 62 | caption : 画像キャプション 63 | word_to_id: 単語->単語ID辞書 64 | ''' 65 | def tokenize_caption(caption: str, word_to_id: Dict[str, int]): 66 | tokens = caption.lower().split() 67 | 68 | tokens_temp = [] 69 | # 単語についたピリオド、カンマを削除 70 | for token in tokens: 71 | if token == '.' or token == ',': 72 | continue 73 | 74 | token = token.rstrip('.') 75 | token = token.rstrip(',') 76 | 77 | tokens_temp.append(token) 78 | 79 | tokens = tokens_temp 80 | 81 | # 文章(caption)を単語IDのリスト(tokens_id)に変換 82 | tokens_ext = [''] + tokens + [''] 83 | tokens_id = [] 84 | for k in tokens_ext: 85 | if k in word_to_id: 86 | tokens_id.append(word_to_id[k]) 87 | else: 88 | tokens_id.append(word_to_id['']) 89 | 90 | return torch.Tensor(tokens_id) 91 | -------------------------------------------------------------------------------- /6_img_captioning/model/best/.gitattributes: -------------------------------------------------------------------------------- 1 | 6-3_encoder_best.pth filter=lfs diff=lfs merge=lfs -text 2 | 6-3_decoder_best.pth filter=lfs diff=lfs merge=lfs -text 3 | 6-4_decoder_best.pth filter=lfs diff=lfs merge=lfs -text 4 | 6-4_encoder_best.pth filter=lfs diff=lfs merge=lfs -text 5 | 6-5_encoder_best.pth filter=lfs diff=lfs merge=lfs -text 6 | 6-5_decoder_best.pth filter=lfs diff=lfs merge=lfs -text 7 | -------------------------------------------------------------------------------- /6_img_captioning/model/best/6-3_decoder_best.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:762e8185b42f54da08e3e754292ef82078a2b9cf23660e7b03dbd3e3e1f905d4 3 | size 16139797 4 | -------------------------------------------------------------------------------- /6_img_captioning/model/best/6-3_encoder_best.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:a44ad9fc1b38858e237607ebb7bfb708988d5c5ac906c4b0a28c6b297b2aac3b 3 | size 235959247 4 | -------------------------------------------------------------------------------- /6_img_captioning/model/best/6-4_decoder_best.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:ac4eb6a6205cd5578b801158c80cf47e480afc08ea31b9086b593019d3d67720 3 | size 17822365 4 | -------------------------------------------------------------------------------- /6_img_captioning/model/best/6-4_encoder_best.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:add4894c87d982ac48bf0698f4478278abf04c3ec3b9016843b495bd02de1d7c 3 | size 233499841 4 | -------------------------------------------------------------------------------- /6_img_captioning/model/best/6-5_decoder_best.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:52fdb48f5dd833ac9402ea1bfdb28fa7eece366ced29db109d033702fcde734e 3 | size 52331167 4 | -------------------------------------------------------------------------------- /6_img_captioning/model/best/6-5_encoder_best.pth: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:931e345d34a8839188885a3d1cb9e00ab33255928c92f1a3ba3e44f1a0688d48 3 | size 235959247 4 | -------------------------------------------------------------------------------- /6_img_captioning/model/best/id_to_word.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/6_img_captioning/model/best/id_to_word.pkl -------------------------------------------------------------------------------- /6_img_captioning/model/best/word_to_id.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/6_img_captioning/model/best/word_to_id.pkl -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Masato Tamura and Katuyuki Nakamura 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pythonで学ぶ画像認識 (機械学習実践シリーズ) 2 | 3 | ![License](https://img.shields.io/badge/License-MIT-green) 4 | ![Python](https://img.shields.io/badge/Python-3.8-orange) 5 | ![PyTorch](https://img.shields.io/badge/PyTorch-1.13.0-orange) 6 | Open In Colab 7 | 8 | 9 | 10 | 本リポジトリではインプレス社より出版されている[田村 雅人](https://tamtamz.github.io/ja/)・[中村 克行](https://scholar.google.com/citations?user=ZIxQ5zAAAAAJ&hl=en) 著の機械学習実践シリーズ「**[Pythonで学ぶ画像認識](https://book.impress.co.jp/books/1122101074)**」で扱うソースコードやデータ、学習済みパラメータを管理しています。ソースコードはJupyterノートブックにまとめられており、Google Colabで実行されることを想定しています。ソースコードの解説は書籍内に記載されており、本リポジトリのソースコードは補助教材となっています。 11 | 12 | ## 書籍の内容 13 | 14 | 書籍は以下のような構成になります。また、[CVMLエキスパートガイド](https://cvml-expertguide.net/)という学習支援サイトにて[紹介記事](https://cvml-expertguide.net/books/cv-dl-books/python-image-recognition/)を書いていただいており、記事にて本書の内容や長所を簡単に把握できます。本書ではカバーしきれなかった補完情報を載せた記事にすぐたどり着くことができるようになっておりますので、合わせてご活用ください。 15 | 16 | Jupyterノートブックの補助教材がある節には Open In Colab のバッジをつけています。バッジをクリックすると該当するノートブックをColabで開けます。ただし、この方法でノートブックを開いて画像やラベルデータの読み込みを行う処理を実行した場合、該当するデータがColab上にないためエラーが発生します。ノートブックの処理を実行したい場合には書籍の第1.4節で解説されている環境構築を行って実行してください。 17 | 18 | - **第1章 画像認識とは?** 19 | - 第1節 画像認識の概要 20 | 21 | - 第2節 コンピュータによる画像認識の仕組みを理解しよう 22 | 23 | - 第3節 実社会で使われている画像認識アプリケーション 24 | 25 | - 第4節 画像認識のための開発環境構築 Open In Colab 26 | - **第2章 画像処理の基礎知識** 27 | - 第1節 画像データを読み込んで表示してみよう Open In Colab 28 | 29 | - 第2節 画像に平滑化フィルタをかけてみよう Open In Colab 30 | 31 | - 第3節 畳み込み演算を使った特徴抽出Open In Colab 32 | 33 | - 第4節 アテンションを使った特徴抽出Open In Colab 34 | 35 | - **第3章 深層学習を使う準備** 36 | - 第1節 学習と評価の基礎 Open In Colab 37 | 38 | - 第2節 深層ニューラルネットワーク 39 | 40 | - **第4章 画像分類** 41 | - 第1節 順伝播型ニューラルネットワークによる手法 Open In Colab 42 | 43 | - 第2節 畳み込みニューラルネットワークによる手法ーResNet18を実装してみよう Open In Colab 44 | 45 | - 第3節 Transformerによる手法ーVision Transformerを実装してみよう Open In Colab 46 | 47 | - 第4節 精度向上のテクニック Open In Colab 48 | 49 | - **第5章 物体検出** 50 | - 第1節 物体検出の基礎 Open In Colab 51 | 52 | - 第2節 データセットの準備 Open In Colab 53 | 54 | - 第3節 CNNによる手法ーRetinaNetを実装してみよう Open In Colab 55 | 56 | - 第4節 Transformerによる手法ーDETRを実装してみよう Open In Colab 57 | 58 | - **第6章 画像キャプショニング** 59 | - 第1節 画像キャプショニングの基礎 60 | 61 | - 第2節 データセットの準備 Open In Colab 62 | 63 | - 第3節 CNN-LSTMによる手法ーShow and tellを実装してみよう Open In Colab 64 | 65 | - 第4節 アテンション機構による手法ーShow, attend and tellを実装してみよう Open In Colab 66 | 67 | - 第5節 Transformerによる画像キャプショニングを実装してみよう Open In Colab 68 | 69 | ## 付録 70 | 71 | 書籍でカバーしきれなかった内容について付録を用意しました。付録はJupyterノートブックで作成されています。 72 | 73 |
74 |
付録A PyTorchの基礎 Open In Colab
75 |
PyTorchを使う上で最低限必要となる知識について解説しています。PyTorchを始めて使う方は第4章に入る前に本ノートブックを読むことをおすすめします。
76 |
77 | 78 | ## 関連リンク 79 | 80 | ### [CVMLエキスパートガイド](https://cvml-expertguide.net/) 81 | 82 | コンピュータビジョンやディープラーニングを専門とする研究開発者を対象とした学習支援サイトです。サイトの指定参考書に本書を入れていただいています。本書では分量の関係上、詳細を割愛したものやカバーしきれなかったものが詳しく解説されており、本書と合わせてサイトをご活用いただくと、より理解が深まるのでおススメのサイトです。 83 | 84 | ## 疑問点・修正点 85 | 86 | 疑問点や修正点はIssueにて管理しています。不明点などございましたら以下を確認し、解決方法が見つからない場合には新しくIssueを作成してください。 87 | 88 | https://github.com/py-img-recog/python_image_recognition/issues 89 | -------------------------------------------------------------------------------- /appendix/a_pytorch.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"authorship_tag":"ABX9TyPDtZFTJ7tNYqlLJyztRKXW"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU","gpuClass":"standard"},"cells":[{"cell_type":"markdown","source":["#Pythonで学ぶ画像認識 付録A PyTorchの基礎"],"metadata":{"id":"aI7F4SRNLsQz"}},{"cell_type":"markdown","source":["本付録では本書で使う深層学習フレームワークのPyTorchについて、本書を読み進める上で必要となる最低限の知識を解説します。PyTorchではNumPyと同じように、複数の数値を1つにまとめた多次元配列を使って処理を進めます。多次元配列の使い方や多次元配列に適用する関数はNumPyと類似する部分が多くあります。NumPyの使い方をご存じの方はそれらの処理を思い浮かべながら共通する部分を確認していただければと思います。"],"metadata":{"id":"c5M_ug8Bnh6k"}},{"cell_type":"markdown","source":["##モジュールのインポート"],"metadata":{"id":"zeIShpEgLw5q"}},{"cell_type":"code","execution_count":1,"metadata":{"id":"Mz_fwzMLLkOd","executionInfo":{"status":"ok","timestamp":1677900422622,"user_tz":480,"elapsed":4734,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}}},"outputs":[],"source":["import torch\n","import torch.nn.functional as F\n","from torch import nn"]},{"cell_type":"markdown","source":["##テンソルとテンソルの操作"],"metadata":{"id":"-n79EMMFnlku"}},{"cell_type":"markdown","source":["###テンソルとは"],"metadata":{"id":"6_heDLJRqBSe"}},{"cell_type":"markdown","source":["####テンソルの生成"],"metadata":{"id":"jdQk8njyMAhB"}},{"cell_type":"markdown","source":["まずはPyTorchの多次元配列の基礎と多次元配列を使った簡単な処理を解説します。PyTorchでは多次元配列のことを**テンソル(Tensor)**と呼び、`Tensor`クラスで定義しています。以下にテンソルの生成例を示します。"],"metadata":{"id":"G0EvEfbZnzLe"}},{"cell_type":"code","source":["t1 = torch.tensor([1, 2, 3, 4])\n","t2 = torch.zeros((32, 3, 128, 128))\n","\n","print(f't1 = {t1}, t1.shape = {t1.shape}')\n","print(f't2.shape = {t2.shape}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"lxMPxuFeMC07","executionInfo":{"status":"ok","timestamp":1677900425665,"user_tz":480,"elapsed":133,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"6d011007-d804-40e6-fee1-0304cea74500"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["t1 = tensor([1, 2, 3, 4]), t1.shape = torch.Size([4])\n","t2.shape = torch.Size([32, 3, 128, 128])\n"]}]},{"cell_type":"markdown","source":["`t1`は`tensor`関数にリストを渡すことで生成されています。この生成結果は第0軸が4次元で1から4の値を持つテンソルです。`t2`は`zeros`関数により生成されています。`zeros`関数は各軸の次元を指定することで、その形を持つ全ての値が0のテンソルを生成します。このようにテンソルは任意の軸数および次元を持つことができます。テンソルの形は`Tensor`クラスのインスタンスが持つ`shape`変数で確認できます。"],"metadata":{"id":"vL8PbEB9oRy6"}},{"cell_type":"markdown","source":["####テンソルのGPUへの転送"],"metadata":{"id":"GkmJxwi8SWt-"}},{"cell_type":"markdown","source":["テンソルは通常だとCPUで処理するためにメインメモリ上に生成されますが、GPUメモリ上に生成したり、後からGPUメモリに転送したりできます。以下にGPUメモリにテンソルを配置するための例を示します。"],"metadata":{"id":"YRHMME8Go4wv"}},{"cell_type":"code","source":["t1 = torch.tensor([1, 2, 3, 4], device='cuda')\n","\n","t2 = torch.tensor([1, 2, 3, 4])\n","t2 = t2.to('cuda')"],"metadata":{"id":"GZQZnl1nSc9n","executionInfo":{"status":"ok","timestamp":1677900433279,"user_tz":480,"elapsed":5130,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}}},"execution_count":3,"outputs":[]},{"cell_type":"markdown","source":["`t1`は生成時に`device`引数に`'cuda'`を指定しているので最初からGPUメモリ上に生成されます。一方で`t2`はまずメインメモリ上に生成され、`to`関数によりGPUメモリに転送されます。このようにテンソルをGPUメモリに配置してGPUで処理することにより高速に処理できるようになります。ただし、メインメモリと比較してGPUメモリは容量が限られていることが多いので、GPUメモリに配置するテンソルの数と大きさに気をつける必要があります。"],"metadata":{"id":"9hDO2aV8o_ce"}},{"cell_type":"markdown","source":["####Python演算子を使ったテンソルの演算"],"metadata":{"id":"hTLJXSpkWww2"}},{"cell_type":"markdown","source":["Pythonの演算子を使ってテンソルの演算をした場合、演算は要素ごとに行われます。以下にPython演算子によるテンソルの演算とその結果を示します。"],"metadata":{"id":"vlKmhSh3pYrg"}},{"cell_type":"code","source":["t1 = torch.tensor([1, 2, 3, 4])\n","t2 = torch.tensor([2, 4, 6, 8])\n","\n","t3 = t1 + t2\n","t4 = t1 ** 2\n","\n","print(f't3 = {t3}')\n","print(f't4 = {t4}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"BtLMa2q4W1km","executionInfo":{"status":"ok","timestamp":1677900435828,"user_tz":480,"elapsed":115,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"74990e18-1acf-4315-f491-567c96f44dc5"},"execution_count":4,"outputs":[{"output_type":"stream","name":"stdout","text":["t3 = tensor([ 3, 6, 9, 12])\n","t4 = tensor([ 1, 4, 9, 16])\n"]}]},{"cell_type":"markdown","source":["`t3`は`t1`と`t2`の要素毎の加算により得られ、`t4`は`t1`の要素毎の2乗により得られていることがわかります。このような要素毎の演算をするため、Python演算子を使った2つのテンソルの演算では2つのテンソルは基本的に同じ形である必要があります。ただし、後ほど解説するブロードキャストが可能な条件に当てはまれば、異なる形のテンソル同士で演算可能です。"],"metadata":{"id":"NCtQjvrkpiN-"}},{"cell_type":"markdown","source":["以上がテンソルの解説になります。`Tensor`クラスにはテンソルを処理するための様々な関数が実装されています。次はそれらの中から頻繁に使うものをいくつか解説します。"],"metadata":{"id":"oOqSJHMcpx1Z"}},{"cell_type":"markdown","source":["###テンソルを処理する関数"],"metadata":{"id":"h5p8dH0BqmUi"}},{"cell_type":"markdown","source":["####view関数によるテンソルの形の変更"],"metadata":{"id":"tcudLLoZcakt"}},{"cell_type":"markdown","source":["PyTorchでデータを処理する際にテンソルの形を変更することが多くあります。そのようなときは`view`関数を使用します。以下に`view`関数の使用例と形を変更した結果を示します。"],"metadata":{"id":"6iOf7nuLqv9o"}},{"cell_type":"code","source":["t1 = torch.tensor([1, 2, 3, 4])\n","t1 = t1.view(2, 2)\n","\n","t2 = torch.tensor([1, 2, 3, 4, 5, 6])\n","t2 = t2.view(2, -1)\n","\n","print(f't1.shape = {t1.shape}')\n","print(f't2.shape = {t2.shape}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"l6SCg2yLcdnH","executionInfo":{"status":"ok","timestamp":1677900438225,"user_tz":480,"elapsed":111,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"ad44ddc1-c020-4ab9-f583-e3e2ff744587"},"execution_count":5,"outputs":[{"output_type":"stream","name":"stdout","text":["t1.shape = torch.Size([2, 2])\n","t2.shape = torch.Size([2, 3])\n"]}]},{"cell_type":"markdown","source":["`t1`は元々`[4]`の形のテンソルでしたが、`view`関数により`[2, 2]`の形に変更されています。`t2`の形は`view`関数により`[6]`から`[2, 3]`に変更されています。`view`関数には1つだけ`-1`を渡すことができ、`-1`を指定された軸の次元は、元のテンソルの大きさと`view`関数に渡されたその他の軸の次元から自動的に計算されます。\n","\n","`view`関数は元のテンソルのデータをメモリ上で複製せずに、見かけ上の形を変更します。このような理由からメモリ上のデータ配置と見かけ上の形で整合性が取れないときは`view`関数を使えません。このようなときは`reshape`関数を使います。`reshape`関数は前述のような整合性がとれないときは、データを複製して形を変更します。多くの場合で`view`関数を使うことができるので、`view`関数でエラーが出るときのみ`reshape`関数を使えば問題ありません。"],"metadata":{"id":"UUw3edaEq4l9"}},{"cell_type":"markdown","source":["####transpose関数とpermute関数による軸の並び替え"],"metadata":{"id":"UlRMk54pqpNG"}},{"cell_type":"markdown","source":["`view`関数と同様にテンソルの処理で頻繁に登場するのが軸の順番を並び替える`transpose`関数や`permute`関数です。軸の並び替えというと想像しにくいですが、行列の行と列を入れ替える操作をより多くの軸で行うイメージです。`transpose`関数は任意の2軸を入れ替えます。一方で`permute`関数は`transpose`関数を拡張した関数で、全ての軸を一度に並べ替えることができます。以下に`transpose`関数と`permute`関数の使用例と軸を並び替えた結果を示します。"],"metadata":{"id":"FaQC98PnrkHu"}},{"cell_type":"code","source":["t1 = torch.zeros((32, 3, 128, 128))\n","t1 = t1.transpose(0, 2)\n","\n","t2 = torch.zeros((32, 3, 128, 128))\n","t2 = t2.permute(2, 0, 3, 1)\n","\n","print(f't1.shape = {t1.shape}')\n","print(f't2.shape = {t2.shape}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"SHN1nRCQqshs","executionInfo":{"status":"ok","timestamp":1677900440457,"user_tz":480,"elapsed":125,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"dc850e11-14a9-4906-ae23-4505299e7f78"},"execution_count":6,"outputs":[{"output_type":"stream","name":"stdout","text":["t1.shape = torch.Size([128, 3, 32, 128])\n","t2.shape = torch.Size([128, 32, 128, 3])\n"]}]},{"cell_type":"markdown","source":["`transpose`関数で第0軸と第2軸を入れ替えた結果、`[32, 3, 128, 128]`の形であったテンソル`t1`が`[128, 3, 32, 128]`の形になります。`permute`関数は軸の順番が第2軸、第0軸、第3軸、第1軸となるよう並べ替えており、軸を並べ替えられた`t2`は`[128, 32, 128, 3]`の形になります。"],"metadata":{"id":"3imq7A8tr0Ha"}},{"cell_type":"markdown","source":["####cat関数とstack関数による複数テンソルの連結"],"metadata":{"id":"nJMzfO1fwGEi"}},{"cell_type":"markdown","source":["`view`関数や`transpose`関数は1つのテンソルに対する操作でしたが、複数のテンソルを組み合わせる操作が必要になることも多くあります。そのようなときに使用するのが`cat`関数と`stack`関数です。`cat`関数はテンソルが持つ既存の軸の1つで複数のテンソルを連結します。一方で`stack`関数は新しく軸を追加して、その軸で複数のテンソルを連結します。以下に`cat`関数と`stack`関数の使用例と複数のテンソルを連結した結果を示します。"],"metadata":{"id":"2xP3U8MnsAaF"}},{"cell_type":"code","source":["t1 = torch.tensor([1, 2, 3, 4, 5, 6]).view(2, 3)\n","t2 = torch.tensor([7, 8, 9]).view(1, 3)\n","t3 = torch.cat((t1, t2))\n","\n","t4 = torch.tensor([1, 2, 3])\n","t5 = torch.tensor([4, 5, 6])\n","t6 = torch.stack((t4, t5), dim=1)\n","\n","print(f't3 = {t3}')\n","print(f't6 = {t6}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"vYv8qYn9wKin","executionInfo":{"status":"ok","timestamp":1677900442375,"user_tz":480,"elapsed":173,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"cf31c61f-1a40-4264-a207-3124be9c65e6"},"execution_count":7,"outputs":[{"output_type":"stream","name":"stdout","text":["t3 = tensor([[1, 2, 3],\n"," [4, 5, 6],\n"," [7, 8, 9]])\n","t6 = tensor([[1, 4],\n"," [2, 5],\n"," [3, 6]])\n"]}]},{"cell_type":"markdown","source":["`cat`関数は`[2, 3]`の形の`t1`と`[1, 3]`の形の`t2`を第0軸で連結して、`[3, 3]`の形の`t3`を生成しています。`cat`関数はデフォルトで第0軸で連結しますが、連結する軸を`dim`引数で指定することもできます。`cat`関数を使うときは、連結されるテンソルは`dim`引数で指定する軸以外は同じ次元である必要があります。`stack`関数は`[3]`の形の`t4`と`[3]`の形の`t5`に第1軸を追加してその軸で連結し、`[3, 2]`の形の`t6`を生成しています。`stack`関数を使うときは、連結されるテンソルは全て同じ形である必要があります。"],"metadata":{"id":"9wOSVVUTsRTS"}},{"cell_type":"markdown","source":["###インデクシングによるテンソルの要素の抽出"],"metadata":{"id":"6w6kqKqj5U0u"}},{"cell_type":"markdown","source":["データを処理していると、テンソルから一部の要素を抽出する必要があるときがあります。そのようなときは、Pythonのリストと同じようにインデクシングにより要素を抽出できます。ただし、`Tensor`クラスにはより高度なインデクシングの方法が実装されています。以下にインデクシングの例とその結果を示します。"],"metadata":{"id":"t_1dcPoQscm-"}},{"cell_type":"code","source":["t1 = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]).view(3, 3)\n","t2 = t1[[0, 1]]\n","t3 = t1[:, [0, 2]]\n","t4 = t1[[0, 2, 1], [1, 2, 1]]\n","t5 = t1[[True, False, False]]\n","t6 = t1[t1 % 2 == 0]\n","\n","print(f't2 = {t2}')\n","print(f't3 = {t3}')\n","print(f't4 = {t4}')\n","print(f't5 = {t5}')\n","print(f't6 = {t6}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"sP6OWTze5aBN","executionInfo":{"status":"ok","timestamp":1677900444627,"user_tz":480,"elapsed":131,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"497acee0-fc0c-47c7-81b7-d4d73ad18e40"},"execution_count":8,"outputs":[{"output_type":"stream","name":"stdout","text":["t2 = tensor([[1, 2, 3],\n"," [4, 5, 6]])\n","t3 = tensor([[1, 3],\n"," [4, 6],\n"," [7, 9]])\n","t4 = tensor([2, 9, 5])\n","t5 = tensor([[1, 2, 3]])\n","t6 = tensor([2, 4, 6, 8])\n"]}]},{"cell_type":"markdown","source":["インデクシングの1つの方法は抽出したい次元を指定する方法です。`t2`や`t3`はそれぞれ`t1`の第0軸と第1軸の次元を複数のインデックスを使って指定し、その次元の値を抽出することで得られるテンソルになります。このように1つの軸に対して複数のインデックスを指定することで、複数の次元を抽出した新しいテンソルを生成できます。一方で`t4`は`t1`の2つの軸に同時にインデックスを指定して値を抽出したテンソルになります。このように複数の軸に同時にインデックスを指定した場合、インデックスを座標のように使った値の抽出ができます。`t4`の例では2軸を持つテンソルである`t1`から0行1列目、2行2列目および1行1列目の3つの値を抽出したテンソルになります。\n","\n","インデクシングのもう1つの方法は真偽値を使う方法です。`t5`の例では`t1`の第0軸に対して抽出対象の次元に`True`を、そうでない次元に`False`を設定して値を抽出しています。このように1つの軸の各次元に真偽値を設定することによって、その軸の必要な次元を抽出することができます。また、`t6`の例のように`t1`の全ての要素に対して真偽値を設定し、必要な要素を取り出すこともできます。`t6`は`t1`の要素が偶数かどうかを表す真偽値をインデックスとして使い、抽出されたテンソルです。このようなインデクシングにより、`t1`から偶数のみを抽出したテンソルを得られています。"],"metadata":{"id":"WYhrakzTtRDm"}},{"cell_type":"markdown","source":["###ブロードキャストを使った演算"],"metadata":{"id":"_CUKF2gZKNNj"}},{"cell_type":"markdown","source":["Python演算子を使ったテンソルの計算のところで解説したように、要素毎の演算において異なる形を持つテンソル同士がある一定の条件を満たすとき、それらを使って演算を行うことができます。これは一方のテンソルの形が他方に合わせて自動的に拡張して解釈されるためで、**ブロードキャスト(broadcast)**と呼ばれます。ブロードキャストが起きる条件は以下になります。\n","\n","* 2つのテンソルの軸数が1以上である\n","* 2つのテンソルを最終軸から比較した場合、各軸の次元が同じであるか、どちらかが1であるか、どちらかの軸が存在しない\n","\n","以下にブロードキャストが起きる例とその演算結果を示します。"],"metadata":{"id":"YnGLIaWjuEgL"}},{"cell_type":"code","source":["t1 = torch.tensor([1, 2]).view(2, 1)\n","t2 = torch.tensor([3, 4, 5])\n","# t1 = [[1], -> [[1, 1, 1],\n","# [2]] broadcast [2, 2, 2]]\n","# t2 = [3, 4, 5] -> [[3, 4, 5],\n","# [3, 4, 5]]\n","t3 = t1 + t2\n","\n","t4 = torch.tensor([1, 2, 3, 4, 5, 6]).view(3, 2)\n","t5 = torch.tensor([3, 4])\n","# t4 = [[1, 2],\n","# [3, 4],\n","# [5, 6]] broadcast \n","# t5 = [3, 4] -> [[3, 4],\n","# [3, 4],\n","# [3, 4]]\n","t6 = t4 + t5\n","\n","print(f't3 = {t3}')\n","print(f't6 = {t6}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"YQvWndTfKRYi","executionInfo":{"status":"ok","timestamp":1677900447954,"user_tz":480,"elapsed":123,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"a490b9ca-cc48-4420-e9e8-b0b8213885a6"},"execution_count":9,"outputs":[{"output_type":"stream","name":"stdout","text":["t3 = tensor([[4, 5, 6],\n"," [5, 6, 7]])\n","t6 = tensor([[ 4, 6],\n"," [ 6, 8],\n"," [ 8, 10]])\n"]}]},{"cell_type":"markdown","source":["`t1`と`t2`の加算では`t1`の形`[2, 1]`と`t2`の形`[3]`が最終軸から比較されます。`t1`の最終軸の次元は1、`t2`の最終軸の次元は3であるので、`t1`の軸が拡張して解釈され、最終軸の次元が3であるかのように扱われます。`t1`の1つ前の軸の次元は2、`t2`は1つ前の軸がないので、`t2`の軸が拡張して解釈され、第0軸が追加されてその次元が2のように扱われます。その結果、`[2, 3]`の形の`t3`が得られます。`t4`と`t5`の加算では`t4`の形`[3, 2]`と`t5`の形`[2]`が最終軸から比較されます。`t4`と`t5`の最終軸の次元はともに2であるので、なにもせずに1つ前の軸が比較されます。`t4`の1つ前の軸の次元は3、`t5`は1つ前の軸がないので、`t5`の軸が拡張して解釈され、第0軸が追加されてその次元が3のように扱われます。その結果、`[3, 2]`の形の`t6`が得られます。"],"metadata":{"id":"5Wnl2-zAuXWx"}},{"cell_type":"markdown","source":["以上がテンソルに関する解説になります。次はPyTorchを使ったモデルの実装について解説します。"],"metadata":{"id":"5UI3CVNpuaFv"}},{"cell_type":"markdown","source":["##モジュール"],"metadata":{"id":"DSxwYPdIu9tU"}},{"cell_type":"markdown","source":["PyTorchではパラメータを持つモデルを実装する際に決められたやり方に従って実装する必要があります。そのやり方に従うことによって、モデルが持つパラメータの抽出やパラメータのGPUへの転送、モデルを学習に使うのか評価に使うのかの切り替えなどの処理を簡単に実行することができます。以下ではその実装方法について解説します。"],"metadata":{"id":"PXM1CXIqmwxA"}},{"cell_type":"markdown","source":["###モジュールとは"],"metadata":{"id":"EBLd-quSvIZi"}},{"cell_type":"markdown","source":["####多クラスロジスティック回帰のPyTorchを使った実装"],"metadata":{"id":"tmMhwvy8M9yJ"}},{"cell_type":"markdown","source":["PyTorchでは1つ1つのモデルをクラスで定義し、PyTorchの`Module`クラスを継承して実装します。Pythonで`import`して使用するモジュールとPyTorchのモジュールは名称は同じですが異なる概念なので注意してください。以下に多クラスロジスティック回帰モデルを`Module`クラスを継承して実装した例を示します。"],"metadata":{"id":"-6XTOvUev8kh"}},{"cell_type":"code","source":["class MultiClassLogisticRegression(nn.Module):\n"," '''\n"," 多クラスロジスティック回帰\n"," dim_input : 入力次元\n"," num_classes: 分類対象の物体クラス数\n"," '''\n"," def __init__(self, dim_input: int, num_classes: int):\n"," super().__init__()\n"," \n"," self.linear = nn.Linear(dim_input, num_classes)\n","\n"," '''\n"," 順伝播関数\n"," x: 入力データ, [バッチサイズ, 入力次元]\n"," '''\n"," def forward(self, x: torch.Tensor):\n"," l = self.linear(x)\n"," y = l.softmax(dim=1)\n","\n"," return y"],"metadata":{"id":"30q0tlikNDTI","executionInfo":{"status":"ok","timestamp":1677900451789,"user_tz":480,"elapsed":122,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}}},"execution_count":10,"outputs":[]},{"cell_type":"markdown","source":["`Module`クラスを継承したモデルクラスのコンストラクタでは、まず親クラスのコンストラクタを呼ぶ必要があります。そのあとにモデルに必要なものを用意します。\n","\n","PyTorchには`Tensor`クラスを拡張した`Parameter`クラスがあり、そのクラスがパラメータの実装になります。しかし、自分で実装するモデルクラスのコンストラクタの中で`Parameter`クラスのインスタンスを生成することはあまりありません。なぜならPyTorchには深層学習で必要となる多くの処理が既にクラスとして実装されており、それらの中でそれぞれに必要なパラメータが生成されるからです。例えば線形関数は`Linear`クラスとして実装されており、上記の`MultiClassLogisticRegression`クラスではそれを使用しています。`Linear`クラスもまた`Module`クラスを継承したクラスとなっており、このクラスのコンストラクタの中で重みやバイアスのパラメータが用意されています。\n","\n","モデルの実装でコンストラクタの他にもう1つ必要になるのが順伝播を行う`forward`関数です。`Module`クラスには`__call__`関数が定義されており、`Module`クラスのインスタンスを関数と同じように使えます。`__call__`関数を呼び出すと`forward`関数が呼ばれ、順伝播が行われます。上記の`forward`関数では線形関数の順伝播によりロジット`l`を得た後、`Tensor`クラスが持つ`softmax`関数を第1軸に適用することで予測確率を得ています。"],"metadata":{"id":"WDH3h59iwdy0"}},{"cell_type":"markdown","source":["####多クラスロジスティック回帰モデルの使用例"],"metadata":{"id":"MgLaieykN7YV"}},{"cell_type":"markdown","source":["以下に実装した多クラスロジスティック回帰モデルの使用例を示します。\n"],"metadata":{"id":"Wca9fjw8xRiS"}},{"cell_type":"code","source":["model = MultiClassLogisticRegression(32 * 32 * 3, 10)\n","\n","# 学習モードに設定\n","model.train()\n","\n","# 評価(推論)モードに設定\n","model.eval()\n","\n","x = torch.normal(0, 1, size=(1, 32 * 32 * 3))\n","y = model(x)\n","\n","for name, parameter in model.named_parameters():\n"," print(f'{name}: shape = {parameter.shape}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"Gsrwn4__OC54","executionInfo":{"status":"ok","timestamp":1677900454057,"user_tz":480,"elapsed":131,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"8dad1a01-cda2-42d8-8e6f-cec9d3a212ca"},"execution_count":11,"outputs":[{"output_type":"stream","name":"stdout","text":["linear.weight: shape = torch.Size([10, 3072])\n","linear.bias: shape = torch.Size([10])\n"]}]},{"cell_type":"markdown","source":["`Module`クラスには`train`関数と`eval`関数が実装されており、モデルの学習モードと評価モードを切り替えられるようになっています。今回の多クラスロジスティック回帰モデルでは学習モードと評価モードに違いはありませんが、モデルの中で学習と評価で異なる処理が必要となることもあるため、このような関数を用意して学習モードと評価モードを簡単に切り替えられるようになっています。\n","\n","`Module`クラスに実装されている`named_parameters`関数を使うことで、モデルが持つパラメータをその名前とともに抽出することができます。上記コードの実行結果には`MultiClassLogisticRegression`クラスのパラメータの名前と形が示されています。`MultiClassLogisticRegression`クラスで`Linear`クラスのインスタンスは`linear`変数に登録されており、`Linear`クラスで重みは`weight`変数に、バイアスは`bias`変数に登録されているため、パラメータの名前は上記結果のようになります。"],"metadata":{"id":"ScN28iWhxc_d"}},{"cell_type":"markdown","source":["以上がモジュールの解説になります。次は複数のモジュールを1つにまとめるために用意されているクラスを解説します。"],"metadata":{"id":"N416J6L-xgfL"}},{"cell_type":"markdown","source":["###`Sequential`クラスと`ModuleList`クラス"],"metadata":{"id":"gGS5GhaMygjD"}},{"cell_type":"markdown","source":["PyTorchには複数の`Module`クラスのインスタンスを1つにまとめるための`Sequential`クラスおよび`ModuleList`クラスが用意されています。`Sequential`クラスは複数の処理を直列にまとめて適用するためのクラスです。一方で`ModuleList`クラスはリストのように複数の処理をリストにまとめて保持しておくためのクラスです。ここでは`Sequential`クラスと`ModuleList`クラスを使いながら2つのクラスがどのように異なるかを確認します。"],"metadata":{"id":"oNuX-GwzyySW"}},{"cell_type":"markdown","source":["####Sequentialクラスを使ったモデルの実装"],"metadata":{"id":"kpLz7SPKs4ka"}},{"cell_type":"markdown","source":["以下に`Sequential`クラスを使った順伝播型ニューラルネットワークの実装例を示します。"],"metadata":{"id":"a2TPbZZhz9oD"}},{"cell_type":"code","source":["class FNNSequential(nn.Module):\n"," '''\n"," 順伝播型ニューラルネットワーク\n"," dim_input : 入力次元\n"," num_classes: 分類対象の物体クラス数\n"," '''\n"," def __init__(self, dim_input: int, num_classes: int):\n"," super().__init__()\n"," \n"," self.layers = nn.Sequential(\n"," nn.Linear(dim_input, 256),\n"," nn.ReLU(inplace=True),\n"," nn.Linear(256, 256),\n"," nn.ReLU(inplace=True),\n"," nn.Linear(256, 256),\n"," nn.ReLU(inplace=True),\n"," nn.Linear(256, num_classes)\n"," )\n","\n"," '''\n"," 順伝播関数\n"," x: 入力データ, [バッチサイズ, 入力次元]\n"," '''\n"," def forward(self, x):\n"," l = self.layers(x)\n"," y = l.softmax(dim=1)\n","\n"," return y\n"],"metadata":{"id":"EZsmWRWms8mA","executionInfo":{"status":"ok","timestamp":1677900462202,"user_tz":480,"elapsed":134,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}}},"execution_count":12,"outputs":[]},{"cell_type":"markdown","source":["`FNNSequential`クラスのコンストラクタの中で`Sequential`クラスを使用しています。`Sequential`クラスのコンストラクタには適用したい処理のクラスインスタンスを渡します。ここでは線形関数である`Linear`クラスとReLU関数である`ReLU`クラスのインスタンスを渡しています。これらの処理は引数で渡した順番で直列に適用されます。`forward`関数の実装に示すように、順伝播を行うには`Sequential`クラスのインスタンスに入力を渡します。得られる結果は`Sequential`クラスのコンストラクタに渡した全ての処理を適用したものになります。"],"metadata":{"id":"u43DLJhj0CKa"}},{"cell_type":"markdown","source":["####ModuleListクラスを使ったモデルの実装"],"metadata":{"id":"9ktyaLi1v-3F"}},{"cell_type":"markdown","source":["以下に`ModuleList`クラスを使った順伝播型ニューラルネットワークの実装例を示します。"],"metadata":{"id":"kU5H_bE50PMo"}},{"cell_type":"code","source":["class FNNModuleList(nn.Module):\n"," '''\n"," 順伝播型ニューラルネットワーク\n"," dim_input : 入力次元\n"," num_classes: 分類対象の物体クラス数\n"," '''\n"," def __init__(self, dim_input: int, num_classes: int):\n"," super().__init__()\n"," \n"," layers = [nn.Linear(dim_input, 256)]\n"," layers += [nn.Linear(256, 256) for _ in range(2)]\n"," layers.append(nn.Linear(256, num_classes))\n"," self.layers = nn.ModuleList(layers)\n","\n"," '''\n"," 順伝播関数\n"," x: 入力データ, [バッチサイズ, 入力次元]\n"," '''\n"," def forward(self, x):\n"," for layer in self.layers[:-1]:\n"," x = F.relu(layer(x))\n"," l = self.layers[-1](x)\n"," y = l.softmax(dim=1)\n"," \n"," return y"],"metadata":{"id":"q3w97GU6wBzb","executionInfo":{"status":"ok","timestamp":1677900464565,"user_tz":480,"elapsed":134,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}}},"execution_count":13,"outputs":[]},{"cell_type":"markdown","source":["`ModuleList`クラスのコンストラクタには必要な処理のクラスを集めたリストを渡します。このように複数の処理をまとめたリストを使う場合にはPythonクラスのリストではなく`ModuleList`クラスを使う必要があります。`ModueList`クラスを使わずにPythonのリストをそのままクラス変数に登録してしまった場合、モデルが持つパラメータの管理がうまくできなくなります。`ModueList`クラスの使い方はPythonリストと同じで、順伝播時には`for`ループなどを使って要素を抽出して使います。"],"metadata":{"id":"HQu2eEFu0f2Q"}},{"cell_type":"markdown","source":["####`Sequential`クラスと`ModuleList`クラスを使ったFNNモデルの使用例"],"metadata":{"id":"Px2QMu0mw-zw"}},{"cell_type":"code","source":["model_sequential = FNNSequential(32 * 32 * 3, 10)\n","model_modulelist = FNNModuleList(32 * 32 * 3, 10)\n","\n","model_sequential.eval()\n","model_modulelist.eval()\n","\n","x = torch.normal(0, 1, size=(1, 32 * 32 * 3))\n","y_sequential = model_sequential(x)\n","y_modulelist = model_modulelist(x)\n","\n","print(f'y_sequential = {y_sequential}')\n","print(f'y_modulelist = {y_modulelist}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"q5njzERaxDCk","executionInfo":{"status":"ok","timestamp":1677900466443,"user_tz":480,"elapsed":112,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"5999ffb1-e850-4f53-d92e-668567f215e2"},"execution_count":14,"outputs":[{"output_type":"stream","name":"stdout","text":["y_sequential = tensor([[0.1015, 0.0933, 0.0947, 0.0955, 0.1072, 0.0904, 0.1054, 0.1059, 0.0952,\n"," 0.1109]], grad_fn=)\n","y_modulelist = tensor([[0.0947, 0.0904, 0.1014, 0.0958, 0.0994, 0.1121, 0.1010, 0.0981, 0.0946,\n"," 0.1125]], grad_fn=)\n"]}]},{"cell_type":"markdown","source":["上記に示すように、`Sequential`クラスを使った場合と`ModuleList`クラスを使った場合で、モデルの外から見た違いはありません。では2つのクラスをモデルの中でどのように使い分けるかというと、`Sequential`クラスはまとまった複数の処理を1つの処理として適用するときによく使われます。例えば線形関数とReLU関数は1セットの処理と考えられるので、`Sequential`クラスで`Linear`クラスと`ReLU`クラスをまとめます。一方で`ModuleList`クラスは同じ処理を複数回適用するようなときに使われます。例えば`Sequential`クラスでまとめた線形関数とReLU関数を3回適用したい場合などは、`Sequential`クラスのインスタンスを3つ`ModuleList`クラスでまとめます。"],"metadata":{"id":"DZ6MnvZr0kr-"}},{"cell_type":"markdown","source":["以上がPyTorchのモジュールの解説になります。最後にPyTorchの誤差逆伝播のために実装された自動微分について解説します。"],"metadata":{"id":"EzGn3g3b0lhM"}},{"cell_type":"markdown","source":["##自動微分"],"metadata":{"id":"TwNBjhRc0tR1"}},{"cell_type":"markdown","source":["PyTorchでは誤差逆伝播を簡単に行うために自動微分という仕組みが実装されています。第3.1節で実装した多クラスロジスティック回帰モデルでは勾配の計算を実装してパラメータの更新を行いましたが、自動微分があるPyTorchを使えば勾配の計算を実装する必要はありません。\n","\n","第3.2節で解説したように、DNNはグラフ構造として捉えられます。順伝播ではグラフを順方向に辿って出力を計算し、誤差逆伝播では順伝播とは逆方向に辿って勾配を計算します。また、誤差逆伝播では連鎖率により現在の処理の勾配と逆伝播されてきた勾配の積を計算すれば、目的関数のパラメータ方向の勾配を計算できました。これらを考慮すると、順伝播でグラフ構造を把握し、かつ個々の処理で勾配の計算が実装されていれば誤差逆伝播を行えることになります。\n","\n","PyTorchではこのような順伝播時のグラフ構造の記録と個々の処理の勾配計算の実装により自動微分を実現しています。PyTorchでは入力から出力までで、適用された関数や計算の途中結果がグラフを構築するように記録され、誤差逆伝播時にはそれを逆にたどることで全てのパラメータの勾配が計算されます。グラフの記録を漏れなく行うためには、テンソルに対する処理を全てPyTorchの関数で行う必要があります。PyTorchの関数にない独自の処理を行いたい場合には、PyTorchの関数の設計方針に従って関数を実装することも可能です。しかし、多くの場合はPyTorchの関数を組み合わせれば目的の処理を実現できます。"],"metadata":{"id":"iiNp8tCw01XW"}},{"cell_type":"markdown","source":["以下に自動微分を使った誤差逆伝播の例を示します。\n"],"metadata":{"id":"jcNUHLEO05-z"}},{"cell_type":"code","source":["linear = nn.Linear(32 * 32 * 3, 10)\n","\n","# 入力とラベルの用意\n","x = torch.normal(0, 1, size=(1, 32 * 32 * 3))\n","y = torch.tensor([0])\n","\n","# 目的関数(交差エントロピー誤差)の計算\n","y_pred = linear(x)\n","loss = F.cross_entropy(y_pred, y)\n","\n","# 誤差逆伝播\n","loss.backward()\n","\n","print(f'linear.weight.grad = {linear.weight.grad}')\n","print(f'linear.bias.grad = {linear.bias.grad}')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"18a-IwS0CP6F","executionInfo":{"status":"ok","timestamp":1677900472339,"user_tz":480,"elapsed":112,"user":{"displayName":"Taro Python","userId":"05683982304053166545"}},"outputId":"570f3f3d-3c28-41ef-ccd3-af3c73471949"},"execution_count":15,"outputs":[{"output_type":"stream","name":"stdout","text":["linear.weight.grad = tensor([[ 0.0814, 0.1924, -1.3381, ..., -0.5630, -0.7319, 0.1235],\n"," [-0.0034, -0.0081, 0.0564, ..., 0.0237, 0.0308, -0.0052],\n"," [-0.0124, -0.0294, 0.2045, ..., 0.0860, 0.1119, -0.0189],\n"," ...,\n"," [-0.0074, -0.0174, 0.1209, ..., 0.0509, 0.0661, -0.0112],\n"," [-0.0208, -0.0491, 0.3417, ..., 0.1438, 0.1869, -0.0315],\n"," [-0.0080, -0.0189, 0.1316, ..., 0.0554, 0.0720, -0.0121]])\n","linear.bias.grad = tensor([-0.8701, 0.0367, 0.1330, 0.1623, 0.0346, 0.0345, 0.0828, 0.0786,\n"," 0.2222, 0.0856])\n"]}]},{"cell_type":"markdown","source":["誤差逆伝播を行う際は、まずは入力に対して順伝播を行い、目的関数を計算します。あとは`Tensor`クラスに実装された`backward`関数を呼ぶだけで誤差逆伝播が完了します。\n","\n","勾配は上記に示すように`Parameter`クラスのインスタンスの`grad`変数に格納されています。パラメータの更新はこの勾配を使って行われます。"],"metadata":{"id":"Ht5nIrHT09Pz"}},{"cell_type":"markdown","source":["以上がPyTorchの基礎の解説になります。"],"metadata":{"id":"S281tbQM1CP6"}}]} -------------------------------------------------------------------------------- /data/apple.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/apple.jpg -------------------------------------------------------------------------------- /data/classification/airplane.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/airplane.jpg -------------------------------------------------------------------------------- /data/classification/automobile.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/automobile.jpg -------------------------------------------------------------------------------- /data/classification/bird.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/bird.jpg -------------------------------------------------------------------------------- /data/classification/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/cat.jpg -------------------------------------------------------------------------------- /data/classification/deer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/deer.jpg -------------------------------------------------------------------------------- /data/classification/dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/dog.jpg -------------------------------------------------------------------------------- /data/classification/frog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/frog.jpg -------------------------------------------------------------------------------- /data/classification/horse.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/horse.jpg -------------------------------------------------------------------------------- /data/classification/ship.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/ship.jpg -------------------------------------------------------------------------------- /data/classification/truck.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/classification/truck.jpg -------------------------------------------------------------------------------- /data/coffee.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/coffee.jpg -------------------------------------------------------------------------------- /data/coffee_noise.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/coffee_noise.jpg -------------------------------------------------------------------------------- /data/cosmos.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/cosmos.jpg -------------------------------------------------------------------------------- /data/image_captioning/adorable-1849992_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/adorable-1849992_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/africa-1170179_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/africa-1170179_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/airplane-3702676_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/airplane-3702676_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/automotive-1846910_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/automotive-1846910_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/beach-1837030_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/beach-1837030_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/caravan-339564_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/caravan-339564_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/cat-4467818_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/cat-4467818_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/cherry-1468933_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/cherry-1468933_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/couple-955926_1280.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/couple-955926_1280.jpg -------------------------------------------------------------------------------- /data/image_captioning/dog-7367949_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/dog-7367949_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/hit-1407826_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/hit-1407826_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/man-498473_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/man-498473_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/musician-743973_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/musician-743973_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/port-5788261_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/port-5788261_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/profile-7579739_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/profile-7579739_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/ural-owl-4808774_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/ural-owl-4808774_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/wine-bar-2139973_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/wine-bar-2139973_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/woman-3432069_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/woman-3432069_1920.jpg -------------------------------------------------------------------------------- /data/image_captioning/zebras-1883654_1920.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/image_captioning/zebras-1883654_1920.jpg -------------------------------------------------------------------------------- /data/object_detection/image1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/object_detection/image1.jpg -------------------------------------------------------------------------------- /data/object_detection/image2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/object_detection/image2.jpg -------------------------------------------------------------------------------- /data/object_detection/image3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/object_detection/image3.jpg -------------------------------------------------------------------------------- /data/object_detection/image4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-img-recog/python_image_recognition/60711d7e2b8d8427d50394f2b6bdf1a915eea834/data/object_detection/image4.jpg -------------------------------------------------------------------------------- /software_version.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"toc_visible":true,"authorship_tag":"ABX9TyPOZRUTzIPe9UbWWMiostfP"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"markdown","source":["# Pythonで学ぶ画像認識\n","## Colab実行環境の確認"],"metadata":{"id":"vfQvOnC9U9Hq"}},{"cell_type":"markdown","source":["### 確認日時"],"metadata":{"id":"ktmUldoUW59U"}},{"cell_type":"code","source":["import datetime\n","import pytz\n","\n","now = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))\n","print(now)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"b0qj0_vPW2fN","executionInfo":{"status":"ok","timestamp":1673232717453,"user_tz":-540,"elapsed":5,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"b0e75f0e-c517-42a3-fff3-4919bed150cd"},"execution_count":15,"outputs":[{"output_type":"stream","name":"stdout","text":["2023-01-09 11:51:57.245406+09:00\n"]}]},{"cell_type":"markdown","source":["### Pythonバージョン"],"metadata":{"id":"-XTTYtWqVuZZ"}},{"cell_type":"code","source":["! python -V"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"3QInpPaeVthL","executionInfo":{"status":"ok","timestamp":1673232723305,"user_tz":-540,"elapsed":252,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"6a9954e6-c3c4-4fdd-a84a-573040974c69"},"execution_count":16,"outputs":[{"output_type":"stream","name":"stdout","text":["Python 3.8.16\n"]}]},{"cell_type":"markdown","source":["### PyTorchバージョン"],"metadata":{"id":"5DXr5X8uVFIC"}},{"cell_type":"code","source":["import torch\n","print(torch.__version__)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"uxiX9A10VDn8","executionInfo":{"status":"ok","timestamp":1673232724728,"user_tz":-540,"elapsed":270,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"c706d5ce-4773-4c2d-a069-65e2b5cb2620"},"execution_count":17,"outputs":[{"output_type":"stream","name":"stdout","text":["1.13.0+cu116\n"]}]},{"cell_type":"markdown","source":["### Torchvisionバージョン"],"metadata":{"id":"K7p4MXwhWXUy"}},{"cell_type":"code","source":["import torchvision\n","print(torchvision.__version__)"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"MP2Wng6XWFBp","executionInfo":{"status":"ok","timestamp":1673232726590,"user_tz":-540,"elapsed":240,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"02b73319-b1da-4265-af55-9e4b8531f82a"},"execution_count":18,"outputs":[{"output_type":"stream","name":"stdout","text":["0.14.0+cu116\n"]}]},{"cell_type":"markdown","source":["### CUDAバージョン"],"metadata":{"id":"A7mWoKTVVMgY"}},{"cell_type":"code","execution_count":19,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"4N8tI0cQUv7V","executionInfo":{"status":"ok","timestamp":1673232728114,"user_tz":-540,"elapsed":390,"user":{"displayName":"Katsuyuki Nakamura","userId":"06073403914299127731"}},"outputId":"b0d58f37-21b0-4022-e559-054202e50b08"},"outputs":[{"output_type":"stream","name":"stdout","text":["nvcc: NVIDIA (R) Cuda compiler driver\n","Copyright (c) 2005-2021 NVIDIA Corporation\n","Built on Sun_Feb_14_21:12:58_PST_2021\n","Cuda compilation tools, release 11.2, V11.2.152\n","Build cuda_11.2.r11.2/compiler.29618528_0\n"]}],"source":["!nvcc --version"]}]} --------------------------------------------------------------------------------