├── .gitignore ├── Output ├── Images │ ├── staying_open_curve.png │ └── top2_accuracy_curve.png └── output.txt ├── README.md ├── bot_table_draft.py ├── create_model.py ├── create_test_models.py ├── models.py ├── predict_from_log.py ├── preprocessing.py ├── requirements.txt └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | Data/* 2 | Saved_Models/* 3 | __pycache__ 4 | .ipynb_checkpoints 5 | .DS_Store 6 | -------------------------------------------------------------------------------- /Output/Images/staying_open_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RyanSaxe/MagicDraftBot/bbf8bdbaf66bc421c54725777221a38479224d3f/Output/Images/staying_open_curve.png -------------------------------------------------------------------------------- /Output/Images/top2_accuracy_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RyanSaxe/MagicDraftBot/bbf8bdbaf66bc421c54725777221a38479224d3f/Output/Images/top2_accuracy_curve.png -------------------------------------------------------------------------------- /Output/output.txt: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------------------------------------------------------ 2 | DRAFTNET TRAINING OUTPUT 3 | ------------------------------------------------------------------------------------------------------------------------ 4 | 5 | Epoch: 0 6 | Seconds = 19.957993984222412 Accuracy = 0.5504959873385346 Total Loss = 107.85201370716095 7 | Epoch: 1 8 | Seconds = 16.26480221748352 Accuracy = 0.6076770793366587 Total Loss = 73.61702418327332 9 | Epoch: 2 10 | Seconds = 17.346413135528564 Accuracy = 0.6097646757897432 Total Loss = 72.98482036590576 11 | Epoch: 3 12 | Seconds = 23.625234842300415 Accuracy = 0.6104281238009975 Total Loss = 72.53888547420502 13 | Epoch: 4 14 | Seconds = 19.451050996780396 Accuracy = 0.6097626774523596 Total Loss = 72.28247427940369 15 | Epoch: 5 16 | Seconds = 23.132797956466675 Accuracy = 0.6105200473206291 Total Loss = 71.72604870796204 17 | Epoch: 6 18 | Seconds = 23.776450634002686 Accuracy = 0.6111282079976126 Total Loss = 71.11808204650879 19 | Epoch: 7 20 | Seconds = 25.364643096923828 Accuracy = 0.6111328707848406 Total Loss = 70.6046531200409 21 | Epoch: 8 22 | Seconds = 21.630998849868774 Accuracy = 0.6117410314618237 Total Loss = 69.95587086677551 23 | Epoch: 9 24 | Seconds = 19.092872858047485 Accuracy = 0.6127202167796392 Total Loss = 69.32743668556213 25 | Epoch: 10 26 | Seconds = 18.844078063964844 Accuracy = 0.6120014814341138 Total Loss = 69.13177990913391 27 | Epoch: 11 28 | Seconds = 18.992830753326416 Accuracy = 0.613095238095238 Total Loss = 68.83444535732269 29 | Epoch: 12 30 | Seconds = 19.953551054000854 Accuracy = 0.6138579368631969 Total Loss = 68.62059080600739 31 | Epoch: 13 32 | Seconds = 18.14860510826111 Accuracy = 0.6145027337255402 Total Loss = 68.51463639736176 33 | Epoch: 14 34 | Seconds = 18.21041774749756 Accuracy = 0.6155838342499041 Total Loss = 68.38579654693604 35 | Epoch: 15 36 | Seconds = 17.898975133895874 Accuracy = 0.6156750916570746 Total Loss = 68.3488575220108 37 | Epoch: 16 38 | Seconds = 18.01970887184143 Accuracy = 0.6163432024555571 Total Loss = 68.28388357162476 39 | Epoch: 17 40 | Seconds = 17.16843819618225 Accuracy = 0.6173656850833439 Total Loss = 68.15571141242981 41 | Epoch: 18 42 | Seconds = 17.40714693069458 Accuracy = 0.6181370433132967 Total Loss = 68.03437793254852 43 | Epoch: 19 44 | Seconds = 17.467884063720703 Accuracy = 0.6176634373534554 Total Loss = 68.10264992713928 45 | Epoch: 20 46 | Seconds = 17.71033525466919 Accuracy = 0.6185999914737603 Total Loss = 68.02939522266388 47 | Epoch: 21 48 | Seconds = 17.88291621208191 Accuracy = 0.6187432056528969 Total Loss = 67.9691276550293 49 | Epoch: 22 50 | Seconds = 17.676794052124023 Accuracy = 0.6193673530289466 Total Loss = 67.85765540599823 51 | Epoch: 23 52 | Seconds = 17.699449062347412 Accuracy = 0.6199508675448694 Total Loss = 67.79436147212982 53 | Epoch: 24 54 | Seconds = 17.673818111419678 Accuracy = 0.6197989939037386 Total Loss = 67.83336758613586 55 | Epoch: 25 56 | Seconds = 17.58336305618286 Accuracy = 0.6199621914567082 Total Loss = 67.77263963222504 57 | Epoch: 26 58 | Seconds = 17.05748987197876 Accuracy = 0.6199082363473594 Total Loss = 67.77091360092163 59 | Epoch: 27 60 | Seconds = 17.215869188308716 Accuracy = 0.6202306347785309 Total Loss = 67.72248876094818 61 | Epoch: 28 62 | Seconds = 16.987221002578735 Accuracy = 0.6199262213838088 Total Loss = 67.76163053512573 63 | Epoch: 29 64 | Seconds = 17.4461932182312 Accuracy = 0.6206109849938184 Total Loss = 67.65526294708252 65 | Epoch: 30 66 | Seconds = 17.679396152496338 Accuracy = 0.6206542823037902 Total Loss = 67.70792353153229 67 | Epoch: 31 68 | Seconds = 16.59309482574463 Accuracy = 0.6205243903738753 Total Loss = 67.70479822158813 69 | Epoch: 32 70 | Seconds = 17.07687783241272 Accuracy = 0.6210839248411987 Total Loss = 67.62405252456665 71 | Epoch: 33 72 | Seconds = 17.101009845733643 Accuracy = 0.6211252238137868 Total Loss = 67.63189029693604 73 | Epoch: 34 74 | Seconds = 16.984009981155396 Accuracy = 0.6208447904676643 Total Loss = 67.59802067279816 75 | Epoch: 35 76 | Seconds = 16.66465401649475 Accuracy = 0.6210712687044377 Total Loss = 67.5817118883133 77 | Epoch: 36 78 | Seconds = 16.306640148162842 Accuracy = 0.6214736006309414 Total Loss = 67.53021693229675 79 | Epoch: 37 80 | Seconds = 16.54048728942871 Accuracy = 0.6214642750564863 Total Loss = 67.53648614883423 81 | Epoch: 38 82 | Seconds = 16.434791088104248 Accuracy = 0.6210146491452444 Total Loss = 67.59249341487885 83 | Epoch: 39 84 | Seconds = 16.323299884796143 Accuracy = 0.6215635258131901 Total Loss = 67.51976609230042 85 | Epoch: 40 86 | Seconds = 16.729887008666992 Accuracy = 0.621408987722215 Total Loss = 67.5118910074234 87 | Epoch: 41 88 | Seconds = 16.450814962387085 Accuracy = 0.6216068231231614 Total Loss = 67.49663174152374 89 | Epoch: 42 90 | Seconds = 16.71395492553711 Accuracy = 0.6221810120646288 Total Loss = 67.47971105575562 91 | Epoch: 43 92 | Seconds = 16.5125150680542 Accuracy = 0.6219465404783221 Total Loss = 67.45536267757416 93 | Epoch: 44 94 | Seconds = 16.304292917251587 Accuracy = 0.6220544506970199 Total Loss = 67.42323660850525 95 | Epoch: 45 96 | Seconds = 16.48198175430298 Accuracy = 0.6220238095238096 Total Loss = 67.40113258361816 97 | Epoch: 46 98 | Seconds = 16.686541080474854 Accuracy = 0.6220051583748988 Total Loss = 67.41931986808777 99 | Epoch: 47 100 | Seconds = 16.417071104049683 Accuracy = 0.6220044922624376 Total Loss = 67.4485434293747 101 | Epoch: 48 102 | Seconds = 16.344688177108765 Accuracy = 0.6217760156882807 Total Loss = 67.45842730998993 103 | Epoch: 49 104 | Seconds = 16.36019992828369 Accuracy = 0.6220138178368932 Total Loss = 67.43850433826447 105 | Epoch: 50 106 | Seconds = 16.723531007766724 Accuracy = 0.6224494553864517 Total Loss = 67.40670561790466 107 | Epoch: 51 108 | Seconds = 16.393635988235474 Accuracy = 0.622544709468389 Total Loss = 67.30758261680603 109 | Epoch: 52 110 | Seconds = 16.51868438720703 Accuracy = 0.6225453755808498 Total Loss = 67.34248566627502 111 | Epoch: 53 112 | Seconds = 16.57243275642395 Accuracy = 0.622135050304813 Total Loss = 67.3712830543518 113 | Epoch: 54 114 | Seconds = 16.122026920318604 Accuracy = 0.62213305196743 Total Loss = 67.37864625453949 115 | Epoch: 55 116 | Seconds = 16.296875953674316 Accuracy = 0.6223168990066932 Total Loss = 67.35871803760529 117 | Epoch: 56 118 | Seconds = 16.285767793655396 Accuracy = 0.6226699386110757 Total Loss = 67.3107739686966 119 | Epoch: 57 120 | Seconds = 16.67344617843628 Accuracy = 0.6224960832587288 Total Loss = 67.32120275497437 121 | Epoch: 58 122 | Seconds = 16.738306045532227 Accuracy = 0.6225566994926889 Total Loss = 67.34807646274567 123 | Epoch: 59 124 | Seconds = 16.877129793167114 Accuracy = 0.6225433772434668 Total Loss = 67.34267580509186 125 | Epoch: 60 126 | Seconds = 16.764015913009644 Accuracy = 0.6226399635503261 Total Loss = 67.32016730308533 127 | Epoch: 61 128 | Seconds = 17.176553964614868 Accuracy = 0.6225906712282048 Total Loss = 67.34142196178436 129 | Epoch: 62 130 | Seconds = 16.82524585723877 Accuracy = 0.6227811793920792 Total Loss = 67.26094186306 131 | Epoch: 63 132 | Seconds = 16.946338176727295 Accuracy = 0.6226306379758706 Total Loss = 67.30590987205505 133 | Epoch: 64 134 | Seconds = 16.12708878517151 Accuracy = 0.6231268917593896 Total Loss = 67.25750720500946 135 | Epoch: 65 136 | Seconds = 16.749572038650513 Accuracy = 0.6227205631581191 Total Loss = 67.30013382434845 137 | Epoch: 66 138 | Seconds = 16.909940004348755 Accuracy = 0.6230263087777637 Total Loss = 67.26832509040833 139 | Epoch: 67 140 | Seconds = 17.226252794265747 Accuracy = 0.6222529522104276 Total Loss = 67.35327887535095 141 | Epoch: 68 142 | Seconds = 16.77109408378601 Accuracy = 0.6224048258515584 Total Loss = 67.27689337730408 143 | Epoch: 69 144 | Seconds = 17.348408222198486 Accuracy = 0.6226486230123206 Total Loss = 67.25793409347534 145 | Epoch: 70 146 | Seconds = 16.702624797821045 Accuracy = 0.6226566163618535 Total Loss = 67.29876351356506 147 | Epoch: 71 148 | Seconds = 17.27109932899475 Accuracy = 0.6226366329880206 Total Loss = 67.25256371498108 149 | Epoch: 72 150 | Seconds = 17.180596113204956 Accuracy = 0.6230616127382018 Total Loss = 67.20068538188934 151 | Epoch: 73 152 | Seconds = 17.34205412864685 Accuracy = 0.6231175661849342 Total Loss = 67.2172224521637 153 | Epoch: 74 154 | Seconds = 17.273471117019653 Accuracy = 0.6230143187534638 Total Loss = 67.24772191047668 155 | Epoch: 75 156 | Seconds = 17.324405193328857 Accuracy = 0.6233567005584685 Total Loss = 67.19549763202667 157 | Epoch: 76 158 | Seconds = 17.525264978408813 Accuracy = 0.6233567005584687 Total Loss = 67.17989909648895 159 | Epoch: 77 160 | Seconds = 17.48654818534851 Accuracy = 0.6229310546958268 Total Loss = 67.21070766448975 161 | Epoch: 78 162 | Seconds = 17.304057121276855 Accuracy = 0.6229417124952039 Total Loss = 67.20252203941345 163 | Epoch: 79 164 | Seconds = 18.05297303199768 Accuracy = 0.6231981657927275 Total Loss = 67.16486728191376 165 | Epoch: 80 166 | Seconds = 17.196092128753662 Accuracy = 0.6231029117107898 Total Loss = 67.21009850502014 167 | Epoch: 81 168 | Seconds = 17.57267117500305 Accuracy = 0.6232001641301103 Total Loss = 67.21346139907837 169 | Epoch: 82 170 | Seconds = 16.853698253631592 Accuracy = 0.6229756842307198 Total Loss = 67.16357457637787 171 | Epoch: 83 172 | Seconds = 16.771862983703613 Accuracy = 0.6234992486251438 Total Loss = 67.1435956954956 173 | Epoch: 84 174 | Seconds = 16.914061069488525 Accuracy = 0.623479931363772 Total Loss = 67.16676318645477 175 | Epoch: 85 176 | Seconds = 16.346294164657593 Accuracy = 0.6228531195378781 Total Loss = 67.18925154209137 177 | Epoch: 86 178 | Seconds = 16.810386896133423 Accuracy = 0.6235818465703202 Total Loss = 67.15256237983704 179 | Epoch: 87 180 | Seconds = 16.828124046325684 Accuracy = 0.6234066589930514 Total Loss = 67.14895617961884 181 | Epoch: 88 182 | Seconds = 16.865461111068726 Accuracy = 0.6233307221724856 Total Loss = 67.16316771507263 183 | Epoch: 89 184 | Seconds = 16.946651220321655 Accuracy = 0.6232581159142262 Total Loss = 67.15129625797272 185 | Epoch: 90 186 | Seconds = 15.517289876937866 Accuracy = 0.6228964168478491 Total Loss = 67.17679846286774 187 | Epoch: 91 188 | Seconds = 16.123286962509155 Accuracy = 0.6230802638871126 Total Loss = 67.1955144405365 189 | Epoch: 92 190 | Seconds = 16.042288780212402 Accuracy = 0.6228198139148229 Total Loss = 67.16623449325562 191 | Epoch: 93 192 | Seconds = 15.25264286994934 Accuracy = 0.6230609466257407 Total Loss = 67.10761344432831 193 | Epoch: 94 194 | Seconds = 15.948694944381714 Accuracy = 0.6236284744425972 Total Loss = 67.0743043422699 195 | Epoch: 95 196 | Seconds = 15.639892816543579 Accuracy = 0.6236737700899521 Total Loss = 67.10946142673492 197 | Epoch: 96 198 | Seconds = 16.128363132476807 Accuracy = 0.6235891738073922 Total Loss = 67.09574687480927 199 | Epoch: 97 200 | Seconds = 15.428593158721924 Accuracy = 0.6238782666155094 Total Loss = 67.05200445652008 201 | Epoch: 98 202 | Seconds = 14.995767831802368 Accuracy = 0.6240547864177005 Total Loss = 67.06211280822754 203 | Epoch: 99 204 | Seconds = 15.476691961288452 Accuracy = 0.6236830956644073 Total Loss = 67.05612003803253 205 | 206 | ------------------------------------------------------------------------------------------------------------------------ 207 | TESTING ACCURACY: AVG = 0.6245763524747411 208 | ------------------------------------------------------------------------------------------------------------------------ 209 | 210 | Pick 1 Accuracy: 0.6955013428827216 211 | Pick 2 Accuracy: 0.5017905102954342 212 | Pick 3 Accuracy: 0.489816472694718 213 | Pick 4 Accuracy: 0.4870188003581021 214 | Pick 5 Accuracy: 0.5090644583706356 215 | Pick 6 Accuracy: 0.5212623097582811 216 | Pick 7 Accuracy: 0.5525962399283796 217 | Pick 8 Accuracy: 0.5566248880931065 218 | Pick 9 Accuracy: 0.5812444046553268 219 | Pick 10 Accuracy: 0.5982542524619516 220 | Pick 11 Accuracy: 0.6236571172784243 221 | Pick 12 Accuracy: 0.6622649955237243 222 | Pick 13 Accuracy: 0.7237018800358102 223 | Pick 14 Accuracy: 1.0 224 | Pick 15 Accuracy: 0.6074306177260519 225 | Pick 16 Accuracy: 0.5783348254252462 226 | Pick 17 Accuracy: 0.5719561324977619 227 | Pick 18 Accuracy: 0.5662488809310654 228 | Pick 19 Accuracy: 0.5732990152193375 229 | Pick 20 Accuracy: 0.5736347358997315 230 | Pick 21 Accuracy: 0.5865040286481648 231 | Pick 22 Accuracy: 0.5942256042972247 232 | Pick 23 Accuracy: 0.6026186213070726 233 | Pick 24 Accuracy: 0.6150402864816473 234 | Pick 25 Accuracy: 0.6417860340196956 235 | Pick 26 Accuracy: 0.6678603401969562 236 | Pick 27 Accuracy: 0.7082587287376902 237 | Pick 28 Accuracy: 1.0 238 | Pick 29 Accuracy: 0.5950089525514771 239 | Pick 30 Accuracy: 0.5757609668755596 240 | Pick 31 Accuracy: 0.5784467323187108 241 | Pick 32 Accuracy: 0.57408236347359 242 | Pick 33 Accuracy: 0.579341987466428 243 | Pick 34 Accuracy: 0.5810205908683975 244 | Pick 35 Accuracy: 0.581468218442256 245 | Pick 36 Accuracy: 0.5913160250671441 246 | Pick 37 Accuracy: 0.5964637421665174 247 | Pick 38 Accuracy: 0.6117949865711728 248 | Pick 39 Accuracy: 0.63272157564906 249 | Pick 40 Accuracy: 0.6546553267681289 250 | Pick 41 Accuracy: 0.690129811996419 251 | Pick 42 Accuracy: 1.0 252 | 253 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MagicDraftBot 2 | 3 | My initial attempt at a Magic: the Gathering (MTG) Draft AI. 4 | 5 | ### Game Description 6 | 7 | MTG Draft is a game where 8 players sit at a table and each open a pack of 14 MTG cards. They then select one (hidden), and pass the remainder (hidden) to the person to their left. This is repeated until no cards are left, and then repeated two more times (with two more packs) until each player has 42 cards. Then those 42 cards are used to construct a deck to play with. 8 | 9 | *Note: I currently don't have data corresponding to decks built nor games played, so this agent only participates in the card selection (drafting) process.* 10 | 11 | ### Algorithmic Structure 12 | 13 | 1. Cluster the dataset into archetypes via Kmeans 14 | 1. for each cluster, learn pick-order given data of P2P2 - P3P14 15 | 1. Initialize model weights with the normalized learned pick-orders 16 | 1. Compute the bias towards each archetype given the cards in the pool 17 | 1. Elevate the archetypal bias by adding a decaying function (simulated staying open) 18 | 1. Use this final archetypal bias to select the best card in the pack 19 | 20 | I use L2 regularization on the weights in attempt to avoid exploding weights. However this doesn't appear to do enough to fight against an overly strong bias towards one color. I attempted to put a ceiling on the bias, however this led to the bot refusing to commit to an archetype and/or never pass rares. Still working on a solution to this. 21 | 22 | *Note: I cannot share the trained model nor any of the data used to train it.* 23 | 24 | ### Results (Accuracy on Human Picks) 25 | 26 | The plot below displays the resulting accuracy on the test set. The blue line is the actual accuracy of the bot, where the yellow line is the accuracy if the bot's first or second choice was what the human selected. 27 | 28 | ![Accuracy Plot](https://raw.githubusercontent.com/RyanSaxe/MagicDraftBot/master/Output/Images/top2_accuracy_curve.png) 29 | 30 | ### External Content about this Agent: 31 | 32 | * [Lords of Limited Podcast Episode (@16:25)](https://lordsoflimited.libsyn.com/lords-of-limited-129-bot-design-with-ryan-saxe) 33 | * [Article explaining the math behind the algorithm and interpreting the results](https://draftsim.com/ryan-saxe-bot-model/) 34 | * [Article with learned archetypal pick orders for Theros Beyond Death](https://draftsim.com/theros-beyond-death-early-analysis/) 35 | 36 | ### Next Step: Add More Features 37 | 38 | Currently the bot does not look at things like converted mana-cost. CMC is inherrently encoded in the value of cards, however it is important for the bot to understand that even if a 5-cmc card is "better" than a 2-cmc card, it should take the cheaper card if it already has expensive cards. 39 | 40 | Furthermore, the bot only knows synergy as it relates to archetypes, not cards. It takes Opt higher if it has Mad Ratter, and this is because it knows Opt and Mad Ratter are high in Izzet. But it doesn't know that Opt is specifically good with Mad Ratter. Creating a card-to-card synergy matrix could help the bot take non-blue cards that draw additional cards if there is a Mad Ratter in the pool, which is not something it can currently do. 41 | 42 | ### Example Drafts 43 | 44 | Below are some decks and drafts that the first iteration of this bot did on MTGO. This was with a different modeling of staying open, and no bias ceiling, but a similar implementation. I will update this section as I continue to test the bot. 45 | 46 | Draft: https://magic.flooey.org/draft/show?id=1_fn_hicUYtWQyvXSbjjhtsIy1k 47 | 48 | ![3-0](https://pbs.twimg.com/media/ELFSpb4XkAAYCrb?format=jpg&name=small) 49 | 50 | Draft: https://magic.flooey.org/draft/show?id=9GymyrTy70YDYfHiKh2HRvNodao 51 | 52 | ![0-0](https://pbs.twimg.com/media/ELFUnzvWkAEFtGn?format=jpg&name=small) 53 | 54 | Draft: https://magic.flooey.org/draft/show?id=xg0h8Jo41w2TcHNwwAQ-_UlxxFQ 55 | 56 | ![0-0](https://pbs.twimg.com/media/ELT3FbHW4AA3V9z?format=jpg&name=small) 57 | 58 | ### Full Table of Bots 59 | 60 | Below are links to a draft where every seat at the table was my bot: 61 | 62 | seat 1: https://magic.flooey.org/draft/show?id=Y66phkMHXy1Fkxinlv2_0mgnyFM 63 | 64 | seat 2: https://magic.flooey.org/draft/show?id=ISj69cO7itncFby65oNGRxzSsSM 65 | 66 | seat 3: https://magic.flooey.org/draft/show?id=7W_eV52n2LhQay9tywaEUaypoGw 67 | 68 | seat 4: https://magic.flooey.org/draft/show?id=8S0w_5hMokFeH8rCvxcER_BKZJU 69 | 70 | seat 5: https://magic.flooey.org/draft/show?id=PPy7SadVNNPVCuELgdkYNLWrocI 71 | 72 | seat 6: https://magic.flooey.org/draft/show?id=b6Rcmynu0iEWBtsrC90gQgrtEV8 73 | 74 | seat 7: https://magic.flooey.org/draft/show?id=Tyhg2f-8J08rOgaU6TaQdkq4i7Y 75 | 76 | seat 8: https://magic.flooey.org/draft/show?id=jfjbVeeFaHzTuVde2za0axX2W20 77 | 78 | ### Future Goals 79 | 80 | Right now this is modeled such that there is no integration with the whole table; no reading of signals. Eventually I would like to design a Long Short Term Memory (LSTM) Network that is optimized for predicting what the people next to the bot are drafting. This could then be integrated into the current model to help bias away from archetypes that appear closed and towards archetypes that appear open! 81 | 82 | Overall, a greedy-optimization system is not the best way to solve this problem. However, it's hard to do much more with the data I have. Eventually I would like to explore reinforcement learning in this space, but that will also require a deck evaluation function. It's far in the future, but something I will do when I get everything in place. 83 | 84 | ### File Descriptions 85 | 86 | models.py --- the two models trained to create the Draft Agent 87 | 88 | preprocessing.py --- some data processing techniques (e.g. label drafts via Kmeans) 89 | 90 | create_model.py --- the script for creating the model 91 | 92 | predict_from_log.py --- script for seeing how the bot would navigate a draft given a MTGO Draft Log 93 | 94 | bot_table_draft.py --- script for generating a draft with 8 bots at the table 95 | 96 | Output/ place to store images and files to show results. 97 | 98 | Data/ and Saved_Models/ --- folders in .gitignore as these cannot be in this repo due to NDA. 99 | -------------------------------------------------------------------------------- /bot_table_draft.py: -------------------------------------------------------------------------------- 1 | from models import DraftNet 2 | from utils import generate_pack 3 | import pandas as pd 4 | import random 5 | import torch 6 | import sys 7 | import datetime 8 | import os 9 | from preprocessing import get_format_features 10 | save_logs = sys.argv[1] 11 | #load in model and name map 12 | model = torch.load('Saved_Models/draft_model.pkl') 13 | card_df = get_format_features() 14 | #define draft with 8 players, 3 rounds, and packs of 14 cards 15 | seats = 8 16 | n_rounds = 3 17 | n_sub_rounds = 14 18 | n_cards = len(card_df) 19 | #index circular shuffle per iteration 20 | pack_shuffle_right = [7,0,1,2,3,4,5,6] 21 | pack_shuffle_left = [1,2,3,4,5,6,7,0] 22 | #initialize 23 | for_draft_logs = torch.zeros(size=(seats,n_rounds * n_sub_rounds,n_cards * 2)) 24 | names = [] 25 | picks = [torch.zeros(n_cards) for pack in range(seats)] 26 | for larger_round in range(n_rounds): 27 | #generate packs for this round 28 | packs = [generate_pack(card_df) for pack in range(seats)] 29 | for smaller_round in range(n_sub_rounds): 30 | pick_n = n_sub_rounds * larger_round + smaller_round 31 | #get data for each bot 32 | data = torch.stack([torch.cat([picks[idx],packs[idx]]) for idx in range(seats)]) 33 | #make pick 34 | bot_picks = model(data).argmax(1) 35 | bot_pick_names = [card_df.loc[bp.item()]['orig_name'] for bp in bot_picks] 36 | #store pick 37 | names.append(bot_pick_names) 38 | #update bot pools 39 | for idx,bot_pick in enumerate(bot_picks): 40 | bp = bot_pick.item() 41 | pick_encoded = torch.zeros(n_cards) 42 | pick_encoded[bp] = 1 43 | for_draft_logs[idx,pick_n] = torch.cat([packs[idx],pick_encoded]) 44 | packs[idx][bp] = 0 45 | picks[idx][bp] += 1 46 | #pass the packs (left, right, left) 47 | if larger_round % 2 == 1: 48 | packs = [packs[idx] for idx in pack_shuffle_right] 49 | else: 50 | packs = [packs[idx] for idx in pack_shuffle_left] 51 | #display the draft picks 52 | print(pd.DataFrame(names)) 53 | 54 | """create_logs:""" 55 | if save_logs.lower() == 'save': 56 | unique = str(datetime.datetime.now()) 57 | logs = ["" for x in range(seats)] 58 | bot_names = ['bot' + str(i) for i in range(seats)] 59 | event_header = 'Event #: 1\nTime: ' + str(unique) + '\nPlayers:\n' 60 | for idx,bot in enumerate(for_draft_logs): 61 | pack_counter = 1 62 | bot_name_head = [' ' + name if i != idx else '--> ' + name for i,name in enumerate(bot_names)] 63 | header = event_header + '\n'.join(bot_name_head) 64 | logs[idx] += header 65 | for pick_n,data in enumerate(bot): 66 | if pick_n % 14 == 0: 67 | pack_header = '\n\n------ Pack ' + str(pack_counter) + ': Throne of Eldraine ------' 68 | logs[idx] += pack_header 69 | pack_counter += 1 70 | m = (pick_n % n_sub_rounds) + 1 71 | n = (int(pick_n/n_sub_rounds)) + 1 72 | logs[idx] += "\n\n" 73 | logs[idx] += f"Pack {n} pick {m}:\n" 74 | pick,pack = data[n_cards:],data[:n_cards] 75 | names = ["--> " + card_df.loc[i]['orig_name'] if i == pick.argmax() else " " + card_df.loc[i]['orig_name'] for i,c in enumerate(pack) if c == 1] 76 | logs[idx] += "\n".join(names) 77 | log_loc = 'Output/Logs/Generated_on_' + unique 78 | os.mkdir(log_loc) 79 | for i,log in enumerate(logs): 80 | fname = log_loc + '/bot' + str(i) + '_' + unique + '.txt' 81 | with open(fname,'w') as f: 82 | f.write(log) -------------------------------------------------------------------------------- /create_model.py: -------------------------------------------------------------------------------- 1 | from preprocessing import create_dataset 2 | from models import * 3 | import torch 4 | import matplotlib.pyplot as plt 5 | import utils 6 | import sys 7 | 8 | #flags for train-test-split and saving the models 9 | #potential update: have these be command line params 10 | full_flag = False 11 | save = True 12 | #name = '_simple' 13 | #create dataset 14 | train_packs,train_picks,test_packs,test_picks = create_dataset(full_dataset=full_flag,save_clusters=save) 15 | #initialize model with 249 cards and 15 archetypes 16 | rank_model = RankingNet(249,15) 17 | optimizer = torch.optim.Adam(rank_model.parameters(), lr=0.1) 18 | #cross entropy loss function 19 | # --> this works well for this problem because we are optimizing 20 | # for a pick out of a set of options that can be described in 21 | loss_function = torch.nn.CrossEntropyLoss() 22 | #only consider picks where the player has likely solidified their 23 | #archetype (e.g., early in pack 2) 24 | train_x = torch.flatten(train_packs[:,16:,:],start_dim=0,end_dim=1) 25 | train_y = torch.flatten(train_picks[:,16:,:],start_dim=0,end_dim=1) 26 | #train the model 27 | train_loss = utils.train(rank_model,loss_function,optimizer,train_x,train_y,epochs=5) 28 | if save: 29 | torch.save(rank_model,'Saved_Models/rank_model_final.pkl') 30 | #initialize drafting model with learned weights from rank model 31 | init_weights = rank_model.rank_matrix.clone().detach() 32 | #normalize the weights such that 1 is the largest initial weight 33 | smaller_init_weights = init_weights / init_weights.max(0, keepdim=True)[0] 34 | draft_model = DraftNet(smaller_init_weights) 35 | #add l2 regularization to avoid exploding weights 36 | #with regularization, also lower the learning rate and increase epochs 37 | #note: with this regularization there is no need for ceiling on pool bias. 38 | optimizer = torch.optim.Adam(draft_model.parameters(), lr=0.1,weight_decay=1e-5) 39 | #flatten the drafts so that the algorithm only considers each pick 40 | #individually and remove archetype label to avoid leakage 41 | train_x = torch.flatten(train_packs,start_dim=0,end_dim=1)[:,1:] 42 | train_y = torch.flatten(train_picks,start_dim=0,end_dim=1) 43 | #train the model 44 | losses = utils.train(draft_model,loss_function,optimizer,train_x,train_y,epochs=100) 45 | if not full_flag: 46 | #flatten test data and remove archetype label to avoid leakage 47 | test_x = torch.flatten(test_packs,start_dim=0,end_dim=1)[:,1:] 48 | test_y = torch.flatten(test_picks,start_dim=0,end_dim=1) 49 | #make predictions 50 | npicks = 42 51 | accuracy = [] 52 | for pick in range(npicks): 53 | idx = list(range(pick,test_x.shape[0],npicks)) 54 | x = test_x[idx] 55 | y = test_y[idx] 56 | predictions = draft_model(x) 57 | y_pred = torch.argmax(predictions,axis=1) 58 | y_true = torch.argmax(y,axis=1) 59 | #evaluate predictions 60 | amount_right = int((y_pred == y_true).sum()) 61 | acc = amount_right/y.shape[0] 62 | print("Pick",pick + 1,"Accuracy: ",acc) 63 | accuracy.append(acc) 64 | plt.plot(accuracy) 65 | plt.xlabel('Time (pick number)') 66 | plt.ylabel('Accuracy') 67 | plt.title('Avg Accuracy in Test Set') 68 | plt.savefig('Output/accuracy_curve_final.png') 69 | if save: 70 | #save the model so it can be used to make decisions in a real draft 71 | torch.save(losses,'Data/train_loss_final.pkl') 72 | torch.save(draft_model,'Saved_Models/draft_model_final.pkl') 73 | 74 | -------------------------------------------------------------------------------- /create_test_models.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | import torch 4 | from models import * 5 | from preprocessing import * 6 | import utils 7 | import matplotlib.pyplot as plt 8 | #script for creating new models with the same initial specifications 9 | 10 | def create_dataset_test(): 11 | #get colors of each card in the set 12 | features = get_format_features() 13 | #create embedding from card_name to integer 14 | card_mapping = features['name'].to_dict() 15 | #get gets the draft data and converts to the following via one-hot-encoding: 16 | # A X B X C matrix = Draft Packs 17 | # A is number of drafts in the dataset 18 | # B is number of picks in the draft (42) 19 | # C is 2 * (number of cards in the set) + 1 (499) 20 | # on the C axis, the first element is empty and 21 | # will be populated via clustering. The next 249 22 | # is the draft pool. And the next 249 is the current 23 | # pack. 24 | # A X 249 = Draft Picks --> binary vector for the correct pick 25 | draft_packs,draft_picks = create_drafts() 26 | #cluster the dataset via archetype 27 | clusters = torch.load('Saved_Models/clusters_final.pkl') 28 | #update the data to include the cluster 29 | draft_packs[:,:,0] = torch.tensor(clusters.labels_)[:,None] 30 | #note, currently I do not include the extra features into the 31 | #data, but that is one of the next steps I intend to take. 32 | #if full_dataset: 33 | # train_perc = 1 34 | #else: 35 | # train_perc = 0.8 36 | #very important to divide train,test by full draft and not 37 | #individual picks to avoid leakage. 38 | #size = draft_packs.shape[0] 39 | #train_size = int(size * train_perc) 40 | train_idx = torch.load('Data/train_idx_final.pkl') 41 | test_idx = torch.load('Data/test_idx_final.pkl') 42 | #if not full_dataset: 43 | # #store the train/test split for the current model 44 | # torch.save(test_idx,'Data/test_idx_new.pkl') 45 | #torch.save(train_idx,'Data/train_idx_new.pkl') 46 | train_pack = draft_packs[train_idx,:,:] 47 | train_pick = draft_picks[train_idx,:,:] 48 | test_pack = draft_packs[test_idx,:,:] 49 | test_pick = draft_picks[test_idx,:,:] 50 | #note: no need for validation set since there is no hyperparameter 51 | #tuning or feedback given by it at the moment. This is another aspect 52 | #to add in the future 53 | return train_pack,train_pick,test_pack,test_pick 54 | 55 | ending = "_" + sys.argv[1] 56 | loss_function = torch.nn.CrossEntropyLoss() 57 | train_packs,train_picks,test_packs,test_picks = create_dataset_test() 58 | rank_model = torch.load('Saved_Models/rank_model_final.pkl') 59 | #initialize drafting model with learned weights from rank model 60 | init_weights = rank_model.rank_matrix.clone().detach() 61 | #normalize the weights such that 1 is the largest initial weight 62 | smaller_init_weights = init_weights / init_weights.max(0, keepdim=True)[0] 63 | draft_model = DraftNet(smaller_init_weights) 64 | #add l2 regularization to avoid exploding weights 65 | #with regularization, also lower the learning rate and increase epochs 66 | #note: with this regularization there is no need for ceiling on pool bias. 67 | optimizer = torch.optim.Adam(draft_model.parameters(), lr=0.01,weight_decay=1e-5) 68 | #flatten the drafts so that the algorithm only considers each pick 69 | #individually and remove archetype label to avoid leakage 70 | train_x = torch.flatten(train_packs,start_dim=0,end_dim=1)[:,1:] 71 | train_y = torch.flatten(train_picks,start_dim=0,end_dim=1) 72 | #train the model 73 | losses = utils.train(draft_model,loss_function,optimizer,train_x,train_y,epochs=100) 74 | #flatten test data and remove archetype label to avoid leakage 75 | test_x = torch.flatten(test_packs,start_dim=0,end_dim=1)[:,1:] 76 | test_y = torch.flatten(test_picks,start_dim=0,end_dim=1) 77 | #make predictions 78 | npicks = 42 79 | accuracy = [] 80 | for pick in range(npicks): 81 | idx = list(range(pick,test_x.shape[0],npicks)) 82 | x = test_x[idx] 83 | y = test_y[idx] 84 | predictions = draft_model(x) 85 | y_pred = torch.argmax(predictions,axis=1) 86 | y_true = torch.argmax(y,axis=1) 87 | #evaluate predictions 88 | amount_right = int((y_pred == y_true).sum()) 89 | acc = amount_right/y.shape[0] 90 | print("Pick",pick + 1,"Accuracy: ",acc) 91 | accuracy.append(acc) 92 | plt.plot(accuracy) 93 | plt.xlabel('Time (pick number)') 94 | plt.ylabel('Accuracy') 95 | plt.title('Avg Accuracy in Test Set') 96 | plt.savefig('Output/accuracy_curve' + ending + '.png') 97 | #save the model so it can be used to make decisions in a real draft 98 | torch.save(losses,'Data/train_loss' + ending + '.pkl') 99 | torch.save(draft_model,'Saved_Models/draft_model' + ending + '.pkl') -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | class RankingNet(torch.nn.Module): 3 | """ 4 | Learn N pick orders (1 per archetypal cluster) for draft picks 5 | 6 | assumes the first item in the data is an archetype label. 7 | """ 8 | def __init__(self,num_cards,num_archetypes): 9 | super(RankingNet, self).__init__() 10 | self.n_cards = num_cards 11 | self.n_archs = num_archetypes 12 | #goal is transparency. self.ratings.weights is the 13 | # parameter we're trying to learn 14 | self.rank_matrix = torch.nn.Parameter( 15 | torch.rand( 16 | (self.n_cards,self.n_archs), 17 | requires_grad=True, 18 | dtype=torch.float 19 | )) 20 | def forward(self,x): 21 | #get the archetype label 22 | arch_idx = x[:,0].type(torch.long) 23 | #get the current options in the pack 24 | pack = x[:,1 + self.n_cards:] 25 | return self.rank_matrix[:,arch_idx].t() * pack 26 | 27 | class DraftNet(torch.nn.Module): 28 | """ 29 | NN that learns how to navigate a draft with a given set of archetype initializations 30 | """ 31 | def __init__(self,rank_matrix): 32 | """ 33 | rank_matrix: pre-initialized m x n matrix where m is number of cards 34 | in the set and n is the number of archetypes 35 | 36 | """ 37 | super(DraftNet, self).__init__() 38 | self.n_cards,self.n_archs = rank_matrix.shape 39 | #m x n matrix where m is number of cards in the set 40 | #and n is the number of archetypes. Conceptually, this 41 | #matrix helps dictate how to make decisions in order to 42 | #properly navigate a draft towards each archetype 43 | self.rank_matrix = torch.nn.Parameter( 44 | torch.tensor( 45 | rank_matrix, 46 | requires_grad=True, 47 | dtype=torch.float 48 | ) 49 | ) 50 | #vector to express opposition to bias (staying open) 51 | #the initialization of 2 as open_base and 7 is purposeful. 52 | #This is because cumprod(sigmoid(open_base)) * 7 - minimum has a value 53 | # of tensor(1.0090) at Pack 2 Pick 1. I tested the algorithm without 54 | # this initialization and it did learn the same curve, but I see no 55 | # reason to change the initialization and I wanted to explain the numbers. 56 | self.open_base = torch.nn.Parameter(torch.tensor( 57 | [2.0 for x in range(42)], 58 | requires_grad=True, 59 | dtype=torch.float 60 | )) 61 | self.lift = torch.nn.Parameter(torch.tensor(7.0,requires_grad=True)) 62 | #placeholder for future versions. Currently doesn't update, but I want to explore that. 63 | self.arch_bias = torch.ones(self.n_archs,requires_grad=False) 64 | self.relu = torch.nn.ReLU() 65 | self.sigmoid = torch.nn.Sigmoid() 66 | def forward(self,x): 67 | #gets pool of cards drafted so far 68 | pool = x[:,:self.n_cards] 69 | #gets current pack to decide what to pick from 70 | pack = x[:,self.n_cards:] 71 | #computes where in the draft it is. This is to index 72 | #into self.open_base. Clamp above 42 to enable using 73 | #the model as an oracle ('what if it had all the red cards'). 74 | pick_n = torch.clamp(pool.sum(1),max=41) 75 | #squash open_base numbers between 0 and 1 76 | open_base = self.sigmoid(self.open_base) 77 | #enforce decaying structure over time 78 | open_decay = torch.cumprod(open_base,dim=0) * self.relu(self.lift) 79 | #enforce this decay to go to zero 80 | open_decay = open_decay - open_decay.min() 81 | #get the proper open bias forr each pick 82 | open_factor = open_decay[pick_n.type(torch.long)] 83 | #compute bias towards card drafted so far 84 | simple_pull = torch.matmul(pool,self.rank_matrix) 85 | pull_relu = self.relu(simple_pull) 86 | #I want to explore a version of this model where the pool informs 87 | #the pull as a probability distribution across archetypes, and then 88 | #has some memory of past cards seen to also update that distribution 89 | #based on what it models as open, but for now this doesn't exist 90 | pull_final = (pull_relu * self.arch_bias) + open_factor[:,None] 91 | #rank every card in the format according to bias 92 | pick_rankings = torch.matmul(pull_final,self.rank_matrix.t()) 93 | pick_relu = self.relu(pick_rankings) 94 | #zero value for all cards that are not in the current pack 95 | return pick_relu * pack -------------------------------------------------------------------------------- /predict_from_log.py: -------------------------------------------------------------------------------- 1 | from models import DraftNet 2 | from utils import read_log 3 | from preprocessing import get_format_features 4 | import sys 5 | import torch 6 | import re 7 | 8 | #get logfile name from command line 9 | log_file = sys.argv[1] 10 | #get model and card mapping 11 | model = torch.load('Saved_Models/draft_model.pkl') 12 | card_df = get_format_features() 13 | card_map = card_df['name'].to_dict() 14 | #read logfile 15 | pools,picks,packs = read_log(log_file,card_df) 16 | #turn log to proper format 17 | create_data = [torch.cat([torch.tensor(pools[i]),torch.tensor(packs[i])]) for i in range(len(packs))] 18 | data = torch.stack(create_data) 19 | #make prediction 20 | prediction = model(data.type(torch.float32)) 21 | #compare prediction with actual user pick 22 | for i,idx in enumerate(prediction.argsort(1,descending=False)): 23 | actual = picks[i].argmax() 24 | #only display top 3 if not at the end of the pack 25 | if 14 - ((i % 14) + 1) > 2: 26 | pred = idx[[-1,-2,-3]] 27 | else: 28 | pred = idx[[-1]] 29 | pstr = ", ".join([card_map[p.item()] for p in pred]) 30 | print ("Pick ",i,":") 31 | print ("\t Actual: ",card_map[actual]) 32 | print ("\t Predicted: ",pstr) -------------------------------------------------------------------------------- /preprocessing.py: -------------------------------------------------------------------------------- 1 | from sklearn.cluster import KMeans 2 | import torch 3 | import random 4 | 5 | def add_clusters(features,drafts,n_archetypes,colors_only=False,save=True): 6 | """ 7 | kmeans clustering of the draft data 8 | """ 9 | draft_pool_vectors = aggregate_drafts(drafts[:,:,1:],features,colors_only=colors_only) 10 | kmeans = KMeans(n_clusters=n_archetypes).fit(draft_pool_vectors) 11 | if save: 12 | torch.save(kmeans,'Saved_Models/clusters_final.pkl') 13 | return kmeans 14 | def aggregate_drafts(drafts,features,colors_only=False): 15 | """ 16 | logic for aggregating all picks of a draft into one vector 17 | so that the clustering algorithm can consider a draft as a 18 | single data point 19 | """ 20 | n_cards = features.index.size 21 | #this grabs the pool during the last pick 22 | last_picks = drafts[:,-1,:n_cards] 23 | #binary feature to describe the colors of a card 24 | colors = features[list('WUBRG')] 25 | #compute the color density of a draft pool 26 | color_density = torch.matmul(last_picks,torch.tensor(colors.values).type(torch.float)) 27 | #return the draft pool with 6 additional features to describe coor density 28 | if colors_only: 29 | return color_density 30 | else: 31 | return torch.cat([last_picks,color_density],1) 32 | 33 | def create_drafts(): 34 | """ 35 | this is the function for converting the main DraftSim data into one-hot-encoded vectors. 36 | 37 | The actual code and pickled files are excluded from the repo due to NDA, but 38 | this is the body replacing it in order to test that the pipeline works. 39 | """ 40 | draft_picks = torch.load('Data/draft_picks.pkl') 41 | draft_packs = torch.load('Data/draft_packs.pkl') 42 | return draft_packs,draft_picks 43 | 44 | def get_format_features(): 45 | """ 46 | this is the function for grabbing card features such as color and name. 47 | 48 | #the code to create ft.pkl is replaced by loading it due to NDA 49 | """ 50 | return torch.load('Data/ft_full.pkl') 51 | 52 | def create_dataset(n_archetypes=15,full_dataset=False,save_clusters=True): 53 | #get colors of each card in the set 54 | features = get_format_features() 55 | #create embedding from card_name to integer 56 | card_mapping = features['name'].to_dict() 57 | #get gets the draft data and converts to the following via one-hot-encoding: 58 | # A X B X C matrix = Draft Packs 59 | # A is number of drafts in the dataset 60 | # B is number of picks in the draft (42) 61 | # C is 2 * (number of cards in the set) + 1 (499) 62 | # on the C axis, the first element is empty and 63 | # will be populated via clustering. The next 249 64 | # is the draft pool. And the next 249 is the current 65 | # pack. 66 | # A X 249 = Draft Picks --> binary vector for the correct pick 67 | draft_packs,draft_picks = create_drafts() 68 | #cluster the dataset via archetype 69 | clusters = add_clusters(features,draft_packs,n_archetypes,save=save_clusters) 70 | #update the data to include the cluster 71 | draft_packs[:,:,0] = torch.tensor(clusters.labels_)[:,None] 72 | #note, currently I do not include the extra features into the 73 | #data, but that is one of the next steps I intend to take. 74 | if full_dataset: 75 | train_perc = 1 76 | else: 77 | train_perc = 0.8 78 | #very important to divide train,test by full draft and not 79 | #individual picks to avoid leakage. 80 | size = draft_packs.shape[0] 81 | train_size = int(size * train_perc) 82 | train_idx = random.sample(range(size),train_size) 83 | test_idx = list(set(range(size)) - set(train_idx)) 84 | if not full_dataset: 85 | #store the train/test split for the current model 86 | torch.save(test_idx,'Data/test_idx_final.pkl') 87 | torch.save(train_idx,'Data/train_idx_final.pkl') 88 | train_pack = draft_packs[train_idx,:,:] 89 | train_pick = draft_picks[train_idx,:,:] 90 | test_pack = draft_packs[test_idx,:,:] 91 | test_pick = draft_picks[test_idx,:,:] 92 | #note: no need for validation set since there is no hyperparameter 93 | #tuning or feedback given by it at the moment. This is another aspect 94 | #to add in the future 95 | return train_pack,train_pick,test_pack,test_pick 96 | 97 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==1.2.0 2 | numpy==1.16.4 3 | pandas==0.24.2 4 | scikit-learn==0.21.2 -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import random 4 | import re 5 | import time 6 | 7 | def train(model,loss_fn,optimizer,x_data,labels,n_batches=64,epochs=20): 8 | """ 9 | function for training a model in batches. Currently no regularization 10 | option, but will be exploring that soon. 11 | """ 12 | losses = [] 13 | batch_size = x_data.shape[0] // n_batches 14 | #create list of indices to shuffle for random batches 15 | idx_list = np.arange(len(x_data)) 16 | for epoch in range(epochs): 17 | np.random.shuffle(idx_list) 18 | total_loss = 0 19 | accuracy = 0 20 | start_time = time.time() 21 | for i in range(n_batches): 22 | #print ("\tBatch ",i,'/',n_batches) 23 | batch_idx = idx_list[batch_size * i:batch_size * (i+1)] 24 | #get batch data 25 | x = x_data[batch_idx,:] 26 | y = labels[batch_idx,:] 27 | #prepare model to train 28 | model.train() 29 | #make predictions 30 | predictions = model(x) 31 | #compute loss 32 | y_true = y.argmax(axis=1) 33 | loss = loss_fn(predictions,y_true) 34 | #compute gradients 35 | loss.backward() 36 | #update parameters 37 | optimizer.step() 38 | #compute accuracy to print out during training 39 | y_pred = torch.argmax(predictions,axis=1) 40 | amount_right = int((y_pred == y_true).sum()) 41 | v = amount_right/len(batch_idx) 42 | accuracy += v 43 | #print("\t",v) 44 | total_loss += loss.item() 45 | #zero gradients 46 | optimizer.zero_grad() 47 | losses.append(total_loss) 48 | print('Epoch: ',epoch) 49 | print('\tSeconds =',time.time() - start_time,' Accuracy = ',accuracy/n_batches," Total Loss = ",total_loss) 50 | return losses 51 | 52 | def generate_pack(card_df): 53 | """ 54 | generate random pack of MTG cards 55 | """ 56 | p_r = 7/8 57 | p_m = 1/8 58 | if random.random() < 1/8: 59 | rare = random.sample(card_df[card_df['rarity'] == 'mythic'].index.tolist(),1) 60 | else: 61 | rare = random.sample(card_df[card_df['rarity'] == 'rare'].index.tolist(),1) 62 | uncommons = random.sample(card_df[card_df['rarity'] == 'uncommon'].index.tolist(),3) 63 | commons = random.sample(card_df[card_df['rarity'] == 'common'].index.tolist(),10) 64 | idxs = rare + uncommons + commons 65 | pack = torch.zeros(len(card_df)) 66 | pack[idxs] = 1 67 | return pack 68 | 69 | def read_log(fname,card_df): 70 | """ 71 | process MTGO log file and convert it into tensors so the bot 72 | can say what it would do 73 | """ 74 | ignore_cards = ['Plains','Island','Swamp','Mountain','Forest'] 75 | with open(fname,'r') as f: 76 | lines = f.readlines() 77 | set_lookup = {v:i for i,v in enumerate(card_df['name'])} 78 | print(set_lookup) 79 | packs = [] 80 | picks = [] 81 | pools = [] 82 | in_pack = False 83 | cur_pack = np.zeros(len(set_lookup.keys())) 84 | cur_pick = np.zeros(len(set_lookup.keys())) 85 | pool = np.zeros(len(set_lookup.keys())) 86 | for line in lines: 87 | match = re.findall(r'Pack \d pick \d+',line) 88 | if len(match) == 1: 89 | in_pack = True 90 | continue 91 | if in_pack: 92 | if len(line.strip()) == 0: 93 | in_pack = False 94 | if sum(cur_pick) != 0: 95 | packs.append(cur_pack) 96 | picks.append(cur_pick) 97 | pools.append(pool.copy()) 98 | pool += cur_pick 99 | cur_pack = np.zeros(len(set_lookup.keys())) 100 | cur_pick = np.zeros(len(set_lookup.keys())) 101 | continue 102 | process = line.strip() 103 | if process.startswith("-"): 104 | cardname = process.split(' ',1)[1].replace(' ','_').replace(',','') 105 | if cardname in ignore_cards: 106 | continue 107 | card_idx = set_lookup[cardname] 108 | cur_pick[card_idx] = 1 109 | else: 110 | cardname = process.replace(' ','_').replace(',','') 111 | if cardname in ignore_cards: 112 | continue 113 | card_idx = set_lookup[cardname] 114 | cur_pack[card_idx] = 1 115 | return pools,picks,packs --------------------------------------------------------------------------------