├── .gitignore ├── Env.py ├── LICENSE ├── Pipfile ├── Pipfile.lock ├── README.md ├── app.py ├── client.py ├── conf └── ConfigWrapper.py ├── default_imports.py ├── lichess-listener.py ├── modules ├── auth │ ├── Auth.py │ ├── Env.py │ ├── Priv.py │ ├── Token.py │ └── User.py ├── client │ ├── Api.py │ ├── Env.py │ └── Job.py ├── db │ └── DBManager.py ├── fishnet │ └── fishnet.py ├── game │ ├── AnalysedGame.py │ ├── AnalysedMove.py │ ├── AnalysedPosition.py │ ├── Api.py │ ├── Colour.py │ ├── EngineEval.py │ ├── EngineTools.py │ ├── Env.py │ ├── Game.py │ ├── GameStore.py │ └── Player.py ├── http │ └── __init__.py ├── irwin │ ├── AnalysedGameModel.py │ ├── AnalysisReport.py │ ├── BasicGameModel.py │ ├── Env.py │ ├── GameReport.py │ ├── Irwin.py │ ├── MoveReport.py │ ├── PlayerReport.py │ ├── models │ │ ├── analysedGame.h5 │ │ └── basicGame.h5 │ └── training │ │ ├── AnalysedGameActivation.py │ │ ├── AnalysedModelTraining.py │ │ ├── BasicGameActivation.py │ │ ├── BasicModelTraining.py │ │ ├── Evaluation.py │ │ └── Training.py ├── lichess │ ├── Api.py │ └── Request.py └── queue │ ├── EngineQueue.py │ ├── Env.py │ ├── IrwinQueue.py │ ├── Origin.py │ └── Queue.py ├── tools.py ├── utils ├── buildAnalysedPositionTable.py ├── buildAverageReport.py ├── mongodb │ ├── addDateToPlayer.py │ ├── addGameAnalysedBool.py │ ├── add_null_blurs.js │ ├── add_user_to_game.js │ ├── convert_playerAnalysis_to_player.js │ ├── createGameAnlysisPivotTable.js │ ├── partition_gameAnalysis.js │ └── split_game_pgn.js └── updatePlayerDatabase.py └── webapp ├── DefaultResponse.py ├── Env.py └── controllers └── api └── blueprint.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_config.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project config 86 | .spyderproject 87 | 88 | # Rope project config 89 | .ropeproject 90 | 91 | # Data Files 92 | *.csv 93 | 94 | # TensorFlow Files 95 | checkpoint 96 | *.meta 97 | model-* 98 | 99 | # Stockfish 100 | stockfish* 101 | 102 | # Config 103 | conf/*.json 104 | 105 | # Deployment Scripts 106 | deploymodels.sh 107 | 108 | .venv 109 | -------------------------------------------------------------------------------- /Env.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from pymongo import MongoClient 4 | 5 | from chess import uci 6 | 7 | from modules.fishnet.fishnet import stockfish_command 8 | 9 | from modules.lichess.Api import Api 10 | 11 | from modules.game.Game import GameDB 12 | from modules.game.AnalysedGame import AnalysedGameDB 13 | from modules.game.Player import PlayerDB 14 | from modules.game.AnalysedPosition import AnalysedPositionDB 15 | 16 | from modules.queue.IrwinQueue import IrwinQueueDB 17 | from modules.queue.EngineQueue import EngineQueueDB 18 | 19 | from modules.irwin.training.AnalysedGameActivation import AnalysedGameActivationDB 20 | from modules.irwin.training.BasicGameActivation import BasicGameActivationDB 21 | 22 | from modules.irwin.AnalysisReport import PlayerReportDB, GameReportDB 23 | 24 | from modules.irwin.Env import Env as IrwinEnv 25 | from modules.irwin.Irwin import Irwin 26 | 27 | class Env: 28 | def __init__(self, config, engine=True, newmodel: bool = False): 29 | logging.debug('newmodel') 30 | logging.debug(newmodel) 31 | self.config = config 32 | self.engine = engine 33 | 34 | if self.engine: 35 | self.engine = uci.popen_engine(stockfish_command(config['stockfish']['update'])) 36 | self.engine.setoption({'Threads': config['stockfish']['threads'], 'Hash': config['stockfish']['memory']}) 37 | self.engine.uci() 38 | self.infoHandler = uci.InfoHandler() 39 | self.engine.info_handlers.append(self.infoHandler) 40 | 41 | self.api = Api(config['api']['url'], config['api']['token']) 42 | 43 | # Set up mongodb 44 | self.client = MongoClient(config['db']['host']) 45 | self.db = self.client.irwin 46 | if config['db']['authenticate']: 47 | self.db.authenticate( 48 | config['db']['authentication']['username'], 49 | config['db']['authentication']['password'], mechanism='MONGODB-CR') 50 | 51 | # Irwin 52 | self.irwinEnv = IrwinEnv(config, self.db) 53 | self.irwin = Irwin(self.irwinEnv, newmodel) 54 | 55 | def restartEngine(self): 56 | if self.engine: 57 | self.engine.kill() 58 | self.engine = uci.popen_engine(stockfish_command(self.config['stockfish']['update'])) 59 | self.engine.setoption({'Threads': self.config['stockfish']['threads'], 'Hash': self.config['stockfish']['memory']}) 60 | self.engine.uci() 61 | self.infoHandler = uci.InfoHandler() 62 | self.engine.info_handlers.append(self.infoHandler) 63 | 64 | def __del__(self): 65 | logging.warning("Removing Env") 66 | self.engine.kill() 67 | try: 68 | del self.irwin 69 | except TypeError: 70 | pass -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | flask = "*" 8 | numpy = "*" 9 | pymongo = "*" 10 | python-chess = "*" 11 | keras = "*" 12 | requests = "*" 13 | tensorflow = "*" 14 | 15 | [dev-packages] 16 | 17 | [requires] 18 | python_version = "3.7" 19 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "01c686f9046ea3d607baa573b42d99ecb232816fc9cec61d7b240867d07624da" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.7" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "absl-py": { 20 | "hashes": [ 21 | "sha256:b943d1c567743ed0455878fcd60bc28ac9fae38d129d1ccfad58079da00b8951" 22 | ], 23 | "version": "==0.7.1" 24 | }, 25 | "astor": { 26 | "hashes": [ 27 | "sha256:95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d", 28 | "sha256:fb503b9e2fdd05609fbf557b916b4a7824171203701660f0c55bbf5a7a68713e" 29 | ], 30 | "version": "==0.7.1" 31 | }, 32 | "certifi": { 33 | "hashes": [ 34 | "sha256:59b7658e26ca9c7339e00f8f4636cdfe59d34fa37b9b04f6f9e9926b3cece1a5", 35 | "sha256:b26104d6835d1f5e49452a26eb2ff87fe7090b89dfcaee5ea2212697e1e1d7ae" 36 | ], 37 | "version": "==2019.3.9" 38 | }, 39 | "chardet": { 40 | "hashes": [ 41 | "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", 42 | "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" 43 | ], 44 | "version": "==3.0.4" 45 | }, 46 | "click": { 47 | "hashes": [ 48 | "sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13", 49 | "sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7" 50 | ], 51 | "version": "==7.0" 52 | }, 53 | "flask": { 54 | "hashes": [ 55 | "sha256:2271c0070dbcb5275fad4a82e29f23ab92682dc45f9dfbc22c02ba9b9322ce48", 56 | "sha256:a080b744b7e345ccfcbc77954861cb05b3c63786e93f2b3875e0913d44b43f05" 57 | ], 58 | "index": "pypi", 59 | "version": "==1.0.2" 60 | }, 61 | "gast": { 62 | "hashes": [ 63 | "sha256:fe939df4583692f0512161ec1c880e0a10e71e6a232da045ab8edd3756fbadf0" 64 | ], 65 | "version": "==0.2.2" 66 | }, 67 | "grpcio": { 68 | "hashes": [ 69 | "sha256:07c7f7b251b26ef94e29d2c19245e34d4d05897325a289b31de3b6a5e16fbd6c", 70 | "sha256:2ddbca16c2e7b3f2ffc6e34c7cfa6886fb01de9f156ad3f77b72ad652d632097", 71 | "sha256:30d84f9684b4c81ee37906bb303a84435948c2dd3db55d3ef38f8daf28bc6ea3", 72 | "sha256:316e6c79fb1585b23ae100ee26f6ffefa91a21e4d39588fa42efadd7f20c7225", 73 | "sha256:400abff9a772351fff72d5698c8758b837bec3d7f4ed93de70bae744d8f63f53", 74 | "sha256:4ed90a256f6f8690b5c95b9d4f2e9fe6513628f3674e9068e10637e50c2f93d6", 75 | "sha256:51fd87ff610ca2f483c668c3fa7f70d479bffb3c14805d2065b51194edea5e26", 76 | "sha256:5569aba69041530e04eff3d40536027db8851f4e11e6282849b9fc5b1855075d", 77 | "sha256:566b752e36cdcd5a4d38f292aca4c8e3095f13cfe82606e010d67749cacba341", 78 | "sha256:5817f970fbfed72a6203ff96349e796d8f6ff3ce85b58af241c4a14190d9f4d1", 79 | "sha256:5a97bb5a4af16f840f1211dbe66d61592f02110f286d96e67bf6006d7f96aab7", 80 | "sha256:5d57e41c913152b215eda070955b3544bdf20ed2327e5e5eed3005186220ebd0", 81 | "sha256:6cec17145978cef3d20093cdc05e88da597ce05076db566a66a35b9c55d416a3", 82 | "sha256:6ef7ab9b6ba09ce087ddb3b27f12504f50efdbf5d319b8b23173478765452301", 83 | "sha256:756c0d65e4ebce1c47787dbb48955864f2a768e1df76902f33d3e4062c209f3e", 84 | "sha256:828d13f0edd27f452af7fc23093c8a2d63d8fbd92595dbd0f698c78b13af9bdb", 85 | "sha256:8cf02c4e07520be61ad8b59b0043771ef2af666cb73066516eabfee562a28df4", 86 | "sha256:919dfe84d22ce2e2ae81d82238586d7c2a86714fb0b6cf9b437e336851e3c32d", 87 | "sha256:b04a061280b06cdc4e68c4147a0f46b98c395cf62f0c6df4fa2a30a083cdc333", 88 | "sha256:b2dbe7d2f9685bdbb4415f8e475dd96b1b1776193b7286705f90490c3f039037", 89 | "sha256:b60df7cbc3e77c39d5befe6a1e6e4213f3ca683d743ff7c1622b1d4412245a55", 90 | "sha256:b740681332b5a042b9e22246a3cdbfc3d644cf73d38e117f20ad9d8deab8f1a5", 91 | "sha256:ba434873945d5d4542589674cb60c43a1cf76b2b5f0c0f759aa76d499055722f", 92 | "sha256:bcb44cd53beccc92c730254ad3d50715b67a7432e693961b566d982f759b1787", 93 | "sha256:be1cbb6cad1d4242e3aaa4143eabcfbf383358f6c8e9951be2c497b65561b075", 94 | "sha256:c4e38326fcab5c52fd1a8c8e0f908bfe830629a5ffc60793ec5545ef913d62d2", 95 | "sha256:d03c0524d5953568f74269e0faebb1e880ba9f36ca8c773be397087c35bd8188", 96 | "sha256:ea897ffa80276565acdd92349ef82a768db0e3327aacd4aec82f79ca10989689", 97 | "sha256:edc50e8bcd10b165f34c3cf3e1d4f97e9c71b165b85a85b91cf3444000a17692", 98 | "sha256:f96a2e97df522b50da9cb3795f08199b110ceab4146bf70ea7f6a3a0213786cc", 99 | "sha256:fadb649a69e3b08e01f090c24f0c8cccc122e92c362c1a1727b695a63be8416b", 100 | "sha256:fbe4360ff1689a9753cbf1b27dad11e683d39117a32a64372a7c95c6abc81b81" 101 | ], 102 | "version": "==1.19.0" 103 | }, 104 | "h5py": { 105 | "hashes": [ 106 | "sha256:05750b91640273c69989c657eaac34b091abdd75efc8c4824c82aaf898a2da0a", 107 | "sha256:082a27208aa3a2286e7272e998e7e225b2a7d4b7821bd840aebf96d50977abbb", 108 | "sha256:08e2e8297195f9e813e894b6c63f79372582787795bba2014a2db6a2de95f713", 109 | "sha256:0dd2adeb2e9de5081eb8dcec88874e7fd35dae9a21557be3a55a3c7d491842a4", 110 | "sha256:0f94de7a10562b991967a66bbe6dda9808e18088676834c0a4dcec3fdd3bcc6f", 111 | "sha256:106e42e2e01e486a3d32eeb9ba0e3a7f65c12fa8998d63625fa41fb8bdc44cdb", 112 | "sha256:1606c66015f04719c41a9863c156fc0e6b992150de21c067444bcb82e7d75579", 113 | "sha256:1854c4beff9961e477e133143c5e5e355dac0b3ebf19c52cf7cc1b1ef757703c", 114 | "sha256:1e9fb6f1746500ea91a00193ce2361803c70c6b13f10aae9a33ad7b5bd28e800", 115 | "sha256:2cca17e80ddb151894333377675db90cd0279fa454776e0a4f74308376afd050", 116 | "sha256:30e365e8408759db3778c361f1e4e0fe8e98a875185ae46c795a85e9bafb9cdf", 117 | "sha256:3206bac900e16eda81687d787086f4ffd4f3854980d798e191a9868a6510c3ae", 118 | "sha256:3c23d72058647cee19b30452acc7895621e2de0a0bd5b8a1e34204b9ea9ed43c", 119 | "sha256:407b5f911a83daa285bbf1ef78a9909ee5957f257d3524b8606be37e8643c5f0", 120 | "sha256:4162953714a9212d373ac953c10e3329f1e830d3c7473f2a2e4f25dd6241eef0", 121 | "sha256:5fc7aba72a51b2c80605eba1c50dbf84224dcd206279d30a75c154e5652e1fe4", 122 | "sha256:713ac19307e11de4d9833af0c4bd6778bde0a3d967cafd2f0f347223711c1e31", 123 | "sha256:71b946d80ef3c3f12db157d7778b1fe74a517ca85e94809358b15580983c2ce2", 124 | "sha256:8cc4aed71e20d87e0a6f02094d718a95252f11f8ed143bc112d22167f08d4040", 125 | "sha256:9d41ca62daf36d6b6515ab8765e4c8c4388ee18e2a665701fef2b41563821002", 126 | "sha256:a744e13b000f234cd5a5b2a1f95816b819027c57f385da54ad2b7da1adace2f3", 127 | "sha256:b087ee01396c4b34e9dc41e3a6a0442158206d383c19c7d0396d52067b17c1cb", 128 | "sha256:b0f03af381d33306ce67d18275b61acb4ca111ced645381387a02c8a5ee1b796", 129 | "sha256:b9e4b8dfd587365bdd719ae178fa1b6c1231f81280b1375eef8626dfd8761bf3", 130 | "sha256:c5dd4ec75985b99166c045909e10f0534704d102848b1d9f0992720e908928e7", 131 | "sha256:d2b82f23cd862a9d05108fe99967e9edfa95c136f532a71cb3d28dc252771f50", 132 | "sha256:e58a25764472af07b7e1c4b10b0179c8ea726446c7141076286e41891bf3a563", 133 | "sha256:f3b49107fbfc77333fc2b1ef4d5de2abcd57e7ea3a1482455229494cf2da56ce" 134 | ], 135 | "version": "==2.9.0" 136 | }, 137 | "idna": { 138 | "hashes": [ 139 | "sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407", 140 | "sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c" 141 | ], 142 | "version": "==2.8" 143 | }, 144 | "itsdangerous": { 145 | "hashes": [ 146 | "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", 147 | "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" 148 | ], 149 | "version": "==1.1.0" 150 | }, 151 | "jinja2": { 152 | "hashes": [ 153 | "sha256:065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013", 154 | "sha256:14dd6caf1527abb21f08f86c784eac40853ba93edb79552aa1e4b8aef1b61c7b" 155 | ], 156 | "version": "==2.10.1" 157 | }, 158 | "keras": { 159 | "hashes": [ 160 | "sha256:794d0c92c6c4122f1f0fcf3a7bc2f49054c6a54ddbef8d8ffafca62795d760b6", 161 | "sha256:90b610a3dbbf6d257b20a079eba3fdf2eed2158f64066a7c6f7227023fd60bc9" 162 | ], 163 | "index": "pypi", 164 | "version": "==2.2.4" 165 | }, 166 | "keras-applications": { 167 | "hashes": [ 168 | "sha256:60607b2b98868983e5153bf1cc6aa468ba73adc93bc977a90edaa4bc595e69fa", 169 | "sha256:94b8acc84fb8b1e3d752e20ed4cafa8377c9ecf6e6c1aa09942d959dc02e439a" 170 | ], 171 | "version": "==1.0.7" 172 | }, 173 | "keras-preprocessing": { 174 | "hashes": [ 175 | "sha256:0170b799a7562f80ad7931d22d56de22cf4bdd502e11c48f31a46380137a70a8", 176 | "sha256:5e3700117981c2db762e512ed6586638124fac5842170701628088a11aeb51ac" 177 | ], 178 | "version": "==1.0.9" 179 | }, 180 | "markdown": { 181 | "hashes": [ 182 | "sha256:fc4a6f69a656b8d858d7503bda633f4dd63c2d70cf80abdc6eafa64c4ae8c250", 183 | "sha256:fe463ff51e679377e3624984c829022e2cfb3be5518726b06f608a07a3aad680" 184 | ], 185 | "version": "==3.1" 186 | }, 187 | "markupsafe": { 188 | "hashes": [ 189 | "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", 190 | "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", 191 | "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", 192 | "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", 193 | "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", 194 | "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", 195 | "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", 196 | "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", 197 | "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", 198 | "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", 199 | "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", 200 | "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", 201 | "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", 202 | "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", 203 | "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905", 204 | "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735", 205 | "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d", 206 | "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e", 207 | "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d", 208 | "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c", 209 | "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21", 210 | "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2", 211 | "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5", 212 | "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b", 213 | "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", 214 | "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", 215 | "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", 216 | "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7" 217 | ], 218 | "version": "==1.1.1" 219 | }, 220 | "mock": { 221 | "hashes": [ 222 | "sha256:5ce3c71c5545b472da17b72268978914d0252980348636840bd34a00b5cc96c1", 223 | "sha256:b158b6df76edd239b8208d481dc46b6afd45a846b7812ff0ce58971cf5bc8bba" 224 | ], 225 | "version": "==2.0.0" 226 | }, 227 | "numpy": { 228 | "hashes": [ 229 | "sha256:1980f8d84548d74921685f68096911585fee393975f53797614b34d4f409b6da", 230 | "sha256:22752cd809272671b273bb86df0f505f505a12368a3a5fc0aa811c7ece4dfd5c", 231 | "sha256:23cc40313036cffd5d1873ef3ce2e949bdee0646c5d6f375bf7ee4f368db2511", 232 | "sha256:2b0b118ff547fecabc247a2668f48f48b3b1f7d63676ebc5be7352a5fd9e85a5", 233 | "sha256:3a0bd1edf64f6a911427b608a894111f9fcdb25284f724016f34a84c9a3a6ea9", 234 | "sha256:3f25f6c7b0d000017e5ac55977a3999b0b1a74491eacb3c1aa716f0e01f6dcd1", 235 | "sha256:4061c79ac2230594a7419151028e808239450e676c39e58302ad296232e3c2e8", 236 | "sha256:560ceaa24f971ab37dede7ba030fc5d8fa173305d94365f814d9523ffd5d5916", 237 | "sha256:62be044cd58da2a947b7e7b2252a10b42920df9520fc3d39f5c4c70d5460b8ba", 238 | "sha256:6c692e3879dde0b67a9dc78f9bfb6f61c666b4562fd8619632d7043fb5b691b0", 239 | "sha256:6f65e37b5a331df950ef6ff03bd4136b3c0bbcf44d4b8e99135d68a537711b5a", 240 | "sha256:7a78cc4ddb253a55971115f8320a7ce28fd23a065fc33166d601f51760eecfa9", 241 | "sha256:80a41edf64a3626e729a62df7dd278474fc1726836552b67a8c6396fd7e86760", 242 | "sha256:893f4d75255f25a7b8516feb5766c6b63c54780323b9bd4bc51cdd7efc943c73", 243 | "sha256:972ea92f9c1b54cc1c1a3d8508e326c0114aaf0f34996772a30f3f52b73b942f", 244 | "sha256:9f1d4865436f794accdabadc57a8395bd3faa755449b4f65b88b7df65ae05f89", 245 | "sha256:9f4cd7832b35e736b739be03b55875706c8c3e5fe334a06210f1a61e5c2c8ca5", 246 | "sha256:adab43bf657488300d3aeeb8030d7f024fcc86e3a9b8848741ea2ea903e56610", 247 | "sha256:bd2834d496ba9b1bdda3a6cf3de4dc0d4a0e7be306335940402ec95132ad063d", 248 | "sha256:d20c0360940f30003a23c0adae2fe50a0a04f3e48dc05c298493b51fd6280197", 249 | "sha256:d3b3ed87061d2314ff3659bb73896e622252da52558f2380f12c421fbdee3d89", 250 | "sha256:dc235bf29a406dfda5790d01b998a1c01d7d37f449128c0b1b7d1c89a84fae8b", 251 | "sha256:fb3c83554f39f48f3fa3123b9c24aecf681b1c289f9334f8215c1d3c8e2f6e5b" 252 | ], 253 | "index": "pypi", 254 | "version": "==1.16.2" 255 | }, 256 | "pbr": { 257 | "hashes": [ 258 | "sha256:8257baf496c8522437e8a6cfe0f15e00aedc6c0e0e7c9d55eeeeab31e0853843", 259 | "sha256:8c361cc353d988e4f5b998555c88098b9d5964c2e11acf7b0d21925a66bb5824" 260 | ], 261 | "version": "==5.1.3" 262 | }, 263 | "protobuf": { 264 | "hashes": [ 265 | "sha256:21e395d7959551e759d604940a115c51c6347d90a475c9baf471a1a86b5604a9", 266 | "sha256:57e05e16955aee9e6a0389fcbd58d8289dd2420e47df1a1096b3a232c26eb2dd", 267 | "sha256:67819e8e48a74c68d87f25cad9f40edfe2faf278cdba5ca73173211b9213b8c9", 268 | "sha256:75da7d43a2c8a13b0bc7238ab3c8ae217cbfd5979d33b01e98e1f78defb2d060", 269 | "sha256:78e08371e236f193ce947712c072542ff19d0043ab5318c2ea46bbc2aaebdca6", 270 | "sha256:7ee5b595db5abb0096e8c4755e69c20dfad38b2d0bcc9bc7bafc652d2496b471", 271 | "sha256:86260ecfe7a66c0e9d82d2c61f86a14aa974d340d159b829b26f35f710f615db", 272 | "sha256:92c77db4bd33ea4ee5f15152a835273f2338a5246b2cbb84bab5d0d7f6e9ba94", 273 | "sha256:9c7b90943e0e188394b4f068926a759e3b4f63738190d1ab3d500d53b9ce7614", 274 | "sha256:a77f217ea50b2542bae5b318f7acee50d9fc8c95dd6d3656eaeff646f7cab5ee", 275 | "sha256:ad589ed1d1f83db22df867b10e01fe445516a5a4d7cfa37fe3590a5f6cfc508b", 276 | "sha256:b06a794901bf573f4b2af87e6139e5cd36ac7c91ac85d7ae3fe5b5f6fc317513", 277 | "sha256:bd8592cc5f8b4371d0bad92543370d4658dc41a5ccaaf105597eb5524c616291", 278 | "sha256:be48e5a6248a928ec43adf2bea037073e5da692c0b3c10b34f9904793bd63138", 279 | "sha256:cc5eb13f5ccc4b1b642cc147c2cdd121a34278b341c7a4d79e91182fff425836", 280 | "sha256:cd3b0e0ad69b74ee55e7c321f52a98effed2b4f4cc9a10f3683d869de00590d5", 281 | "sha256:d6e88c4920660aa75c0c2c4b53407aef5efd9a6e0ca7d2fc84d79aba2ccbda3a", 282 | "sha256:ec3c49b6d247152e19110c3a53d9bb4cf917747882017f70796460728b02722e" 283 | ], 284 | "version": "==3.7.1" 285 | }, 286 | "pymongo": { 287 | "hashes": [ 288 | "sha256:025f94fc1e1364f00e50badc88c47f98af20012f23317234e51a11333ef986e6", 289 | "sha256:02aa7fb282606331aefbc0586e2cf540e9dbe5e343493295e7f390936ad2738e", 290 | "sha256:057210e831573e932702cf332012ed39da78edf0f02d24a3f0b213264a87a397", 291 | "sha256:0d946b79c56187fe139276d4c8ed612a27a616966c8b9779d6b79e2053587c8b", 292 | "sha256:104790893b928d310aae8a955e0bdbaa442fb0ac0a33d1bbb0741c791a407778", 293 | "sha256:15527ef218d95a8717486106553b0d54ff2641e795b65668754e17ab9ca6e381", 294 | "sha256:1826527a0b032f6e20e7ac7f72d7c26dd476a5e5aa82c04aa1c7088a59fded7d", 295 | "sha256:22e3aa4ce1c3eebc7f70f9ca7fd4ce1ea33e8bdb7b61996806cd312f08f84a3a", 296 | "sha256:244e1101e9a48615b9a16cbd194f73c115fdfefc96894803158608115f703b26", 297 | "sha256:24b8c04fdb633a84829d03909752c385faef249c06114cc8d8e1700b95aae5c8", 298 | "sha256:2c276696350785d3104412cbe3ac70ab1e3a10c408e7b20599ee41403a3ed630", 299 | "sha256:2d8474dc833b1182b651b184ace997a7bd83de0f51244de988d3c30e49f07de3", 300 | "sha256:3119b57fe1d964781e91a53e81532c85ed1701baaddec592e22f6b77a9fdf3df", 301 | "sha256:3bee8e7e0709b0fcdaa498a3e513bde9ffc7cd09dbceb11e425bd91c89dbd5b6", 302 | "sha256:436c071e01a464753d30dbfc8768dd93aecf2a8e378e5314d130b95e77b4d612", 303 | "sha256:46635e3f19ad04d5a7d7cf23d232388ddbfccf46d9a3b7436b6abadda4e84813", 304 | "sha256:4772e0b679717e7ac4608d996f57b6f380748a919b457cb05bb941467b888b22", 305 | "sha256:4e2cd80e16f481a62c3175b607373200e714ed29025f21559ebf7524f295689f", 306 | "sha256:52732960efa0e003ca1c092dc0a3c65276e897681287a788a01ca78dda3b41f0", 307 | "sha256:55a7de51ec7d1731b2431886d0349146645f2816e5b8eb982d7c49f89472c9f3", 308 | "sha256:5f8ed5934197a2d4b2087646e98de3e099a237099dcf498b9e38dd3465f74ef4", 309 | "sha256:64b064124fcbc8eb04a155117dc4d9a336e3cda3f069958fbc44fe70c3c3d1e9", 310 | "sha256:65958b8e4319f992e85dad59d8081888b97fcdbde5f0d14bc28f2848b92d3ef1", 311 | "sha256:7683428862e20c6a790c19e64f8ccf487f613fbc83d47e3d532df9c81668d451", 312 | "sha256:78566d5570c75a127c2491e343dc006798a384f06be588fe9b0cbe5595711559", 313 | "sha256:7d1cb00c093dbf1d0b16ccf123e79dee3b82608e4a2a88947695f0460eef13ff", 314 | "sha256:8c74e2a9b594f7962c62cef7680a4cb92a96b4e6e3c2f970790da67cc0213a7e", 315 | "sha256:8e60aa7699170f55f4b0f56ee6f8415229777ac7e4b4b1aa41fc61eec08c1f1d", 316 | "sha256:9447b561529576d89d3bf973e5241a88cf76e45bd101963f5236888713dea774", 317 | "sha256:970055bfeb0be373f2f5299a3db8432444bad3bc2f198753ee6c2a3a781e0959", 318 | "sha256:a6344b8542e584e140dc3c651d68bde51270e79490aa9320f9e708f9b2c39bd5", 319 | "sha256:ce309ca470d747b02ba6069d286a17b7df8e9c94d10d727d9cf3a64e51d85184", 320 | "sha256:cfbd86ed4c2b2ac71bbdbcea6669bf295def7152e3722ddd9dda94ac7981f33d", 321 | "sha256:d7929c513732dff093481f4a0954ed5ff16816365842136b17caa0b4992e49d3" 322 | ], 323 | "index": "pypi", 324 | "version": "==3.7.2" 325 | }, 326 | "python-chess": { 327 | "hashes": [ 328 | "sha256:7053ad0870ec889cb558298278887cd8b977ca5e752f452f5f0f1de67f7d18cf", 329 | "sha256:8b5745ae1cd250247ba2a5a2cefc3b3a1015e8f04909aca5ac1686f6102c1e24" 330 | ], 331 | "index": "pypi", 332 | "version": "==0.27.3" 333 | }, 334 | "pyyaml": { 335 | "hashes": [ 336 | "sha256:1adecc22f88d38052fb787d959f003811ca858b799590a5eaa70e63dca50308c", 337 | "sha256:436bc774ecf7c103814098159fbb84c2715d25980175292c648f2da143909f95", 338 | "sha256:460a5a4248763f6f37ea225d19d5c205677d8d525f6a83357ca622ed541830c2", 339 | "sha256:5a22a9c84653debfbf198d02fe592c176ea548cccce47553f35f466e15cf2fd4", 340 | "sha256:7a5d3f26b89d688db27822343dfa25c599627bc92093e788956372285c6298ad", 341 | "sha256:9372b04a02080752d9e6f990179a4ab840227c6e2ce15b95e1278456664cf2ba", 342 | "sha256:a5dcbebee834eaddf3fa7366316b880ff4062e4bcc9787b78c7fbb4a26ff2dd1", 343 | "sha256:aee5bab92a176e7cd034e57f46e9df9a9862a71f8f37cad167c6fc74c65f5b4e", 344 | "sha256:c51f642898c0bacd335fc119da60baae0824f2cde95b0330b56c0553439f0673", 345 | "sha256:c68ea4d3ba1705da1e0d85da6684ac657912679a649e8868bd850d2c299cce13", 346 | "sha256:e23d0cc5299223dcc37885dae624f382297717e459ea24053709675a976a3e19" 347 | ], 348 | "version": "==5.1" 349 | }, 350 | "requests": { 351 | "hashes": [ 352 | "sha256:502a824f31acdacb3a35b6690b5fbf0bc41d63a24a45c4004352b0242707598e", 353 | "sha256:7bf2a778576d825600030a110f3c0e3e8edc51dfaafe1c146e39a2027784957b" 354 | ], 355 | "index": "pypi", 356 | "version": "==2.21.0" 357 | }, 358 | "scipy": { 359 | "hashes": [ 360 | "sha256:014cb900c003b5ac81a53f2403294e8ecf37aedc315b59a6b9370dce0aa7627a", 361 | "sha256:281a34da34a5e0de42d26aed692ab710141cad9d5d218b20643a9cb538ace976", 362 | "sha256:588f9cc4bfab04c45fbd19c1354b5ade377a8124d6151d511c83730a9b6b2338", 363 | "sha256:5a10661accd36b6e2e8855addcf3d675d6222006a15795420a39c040362def66", 364 | "sha256:628f60be272512ca1123524969649a8cb5ae8b31cca349f7c6f8903daf9034d7", 365 | "sha256:6dcc43a88e25b815c2dea1c6fac7339779fc988f5df8396e1de01610604a7c38", 366 | "sha256:70e37cec0ac0fe95c85b74ca4e0620169590fd5d3f44765f3c3a532cedb0e5fd", 367 | "sha256:7274735fb6fb5d67d3789ddec2cd53ed6362539b41aa6cc0d33a06c003aaa390", 368 | "sha256:78e12972e144da47326958ac40c2bd1c1cca908edc8b01c26a36f9ffd3dce466", 369 | "sha256:790cbd3c8d09f3a6d9c47c4558841e25bac34eb7a0864a9def8f26be0b8706af", 370 | "sha256:79792c8fe8e9d06ebc50fe23266522c8c89f20aa94ac8e80472917ecdce1e5ba", 371 | "sha256:865afedf35aaef6df6344bee0de391ee5e99d6e802950a237f9fb9b13e441f91", 372 | "sha256:870fd401ec7b64a895cff8e206ee16569158db00254b2f7157b4c9a5db72c722", 373 | "sha256:963815c226b29b0176d5e3d37fc9de46e2778ce4636a5a7af11a48122ef2577c", 374 | "sha256:9726791484f08e394af0b59eb80489ad94d0a53bbb58ab1837dcad4d58489863", 375 | "sha256:9de84a71bb7979aa8c089c4fb0ea0e2ed3917df3fb2a287a41aaea54bbad7f5d", 376 | "sha256:b2c324ddc5d6dbd3f13680ad16a29425841876a84a1de23a984236d1afff4fa6", 377 | "sha256:b86ae13c597fca087cb8c193870507c8916cefb21e52e1897da320b5a35075e5", 378 | "sha256:ba0488d4dbba2af5bf9596b849873102d612e49a118c512d9d302ceafa36e01a", 379 | "sha256:d78702af4102a3a4e23bb7372cec283e78f32f5573d92091aa6aaba870370fe1", 380 | "sha256:def0e5d681dd3eb562b059d355ae8bebe27f5cc455ab7c2b6655586b63d3a8ea", 381 | "sha256:e085d1babcb419bbe58e2e805ac61924dac4ca45a07c9fa081144739e500aa3c", 382 | "sha256:e2cfcbab37c082a5087aba5ff00209999053260441caadd4f0e8f4c2d6b72088", 383 | "sha256:e742f1f5dcaf222e8471c37ee3d1fd561568a16bb52e031c25674ff1cf9702d5", 384 | "sha256:f06819b028b8ef9010281e74c59cb35483933583043091ed6b261bb1540f11cc", 385 | "sha256:f15f2d60a11c306de7700ee9f65df7e9e463848dbea9c8051e293b704038da60", 386 | "sha256:f31338ee269d201abe76083a990905473987371ff6f3fdb76a3f9073a361cf37", 387 | "sha256:f6b88c8d302c3dac8dff7766955e38d670c82e0d79edfc7eae47d6bb2c186594" 388 | ], 389 | "version": "==1.2.1" 390 | }, 391 | "six": { 392 | "hashes": [ 393 | "sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", 394 | "sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73" 395 | ], 396 | "version": "==1.12.0" 397 | }, 398 | "tensorboard": { 399 | "hashes": [ 400 | "sha256:53d8f40589c903dae65f39a799c2bc49defae3703754984d90613d26ebd714a4", 401 | "sha256:b664fe7772be5670d8b04200342e681af7795a12cd752709aed565c06c0cc196" 402 | ], 403 | "version": "==1.13.1" 404 | }, 405 | "tensorflow": { 406 | "hashes": [ 407 | "sha256:0de5887495c20e1130ae4d9bcfaf80cec87f579a9c27a84141a588a46e5aa853", 408 | "sha256:0f305f3c461ed2ce5e0b65fccc7b7452f483c7935dd8a52a466d622e642fdea8", 409 | "sha256:4325f20b5a703b80a5f7a8807f07ad8735025bd2a947093ffff1c26fbdc7980b", 410 | "sha256:4c86be0e476b64cedf4ffa059d71b764e75b895effb697345687e3057929a7b5", 411 | "sha256:6b0a0a413390302ce7c22c98695983d6fb8406861cfb418b25536f57a96c0b89", 412 | "sha256:77eec2351d0a9b5312ea01ee4c78c13996f249cf1bead2e68256a65e533f45ef", 413 | "sha256:87bf719a564f11d63e4f614e933e5a612dd4e67c88266b774236e0982f5fcf69", 414 | "sha256:ba29e66331cd2a8f824e0fa937ce44bd624bc37739f2f083694e473051d89ace", 415 | "sha256:bc374f5a662b6e164cd1c4da61ccc752ec208a44893d2f9dcf47d2a0a2cef311", 416 | "sha256:bcf86966b7554e407bb7d73341f2e108df62a910d40b4cd2a914867f2a5de51c", 417 | "sha256:c3abffd51c168cfd62a557243c47a29ab48deb52a64465e6818060f20755ddb4", 418 | "sha256:c41862c65628261229db22e33f9e570d845eeb5cea66dcbaebe404405edaa69b", 419 | "sha256:d7341617aedd73c2c847755e87697e9c19eb625c73da26d6cd669220c5565119", 420 | "sha256:de0425b58cb34006e4500565239b4c3a3055b95bff132f097fa46c87d8e463c9", 421 | "sha256:f21fb65c8e874f40c654bc9b3ff3db3ec26f98f03fe64a541bc768f6f5c52ac2" 422 | ], 423 | "index": "pypi", 424 | "version": "==1.13.1" 425 | }, 426 | "tensorflow-estimator": { 427 | "hashes": [ 428 | "sha256:7cfdaa3e83e3532f31713713feb98be7ea9f3065722be4267e49b6c301271419" 429 | ], 430 | "version": "==1.13.0" 431 | }, 432 | "termcolor": { 433 | "hashes": [ 434 | "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b" 435 | ], 436 | "version": "==1.1.0" 437 | }, 438 | "urllib3": { 439 | "hashes": [ 440 | "sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39", 441 | "sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22" 442 | ], 443 | "version": "==1.24.1" 444 | }, 445 | "werkzeug": { 446 | "hashes": [ 447 | "sha256:0a73e8bb2ff2feecfc5d56e6f458f5b99290ef34f565ffb2665801ff7de6af7a", 448 | "sha256:7fad9770a8778f9576693f0cc29c7dcc36964df916b83734f4431c0e612a7fbc" 449 | ], 450 | "version": "==0.15.2" 451 | }, 452 | "wheel": { 453 | "hashes": [ 454 | "sha256:66a8fd76f28977bb664b098372daef2b27f60dc4d1688cfab7b37a09448f0e9d", 455 | "sha256:8eb4a788b3aec8abf5ff68d4165441bc57420c9f64ca5f471f58c3969fe08668" 456 | ], 457 | "markers": "python_version >= '3'", 458 | "version": "==0.33.1" 459 | } 460 | }, 461 | "develop": {} 462 | } 463 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # irwin 2 | irwin is the AI that learns cheating patterns, marks cheaters, and assists moderators in assessing potential cheaters. 3 | 4 | ![screenshot of Irwin report](https://i.imgur.com/UcVlDK3.png) 5 | 6 | ![screenshot of companion WebApp](https://i.imgur.com/LQtSQAh.png) 7 | 8 | ## Dependencies 9 | Compatible with Python 3.x 10 | 11 | ### Python Libraries 12 | ```sh 13 | pip3 install pymongo python-chess numpy requests 14 | ``` 15 | - **tensorflow** : [tensorflow installation guide](https://www.tensorflow.org/install) 16 | 17 | ### Database 18 | - **mongodb** : [mongodb installation guide](https://docs.mongodb.com/manual/installation/) 19 | 20 | ## Configuring 21 | ### Create `conf/config.json` 22 | ```javascript 23 | { 24 | "api": { 25 | "url": "https://lichess.org/", 26 | "token": "token" 27 | }, 28 | "stockfish": { 29 | "threads": 4, 30 | "memory": 2048, 31 | "nodes": 4500000, 32 | "update": false 33 | }, 34 | "db": { 35 | "host": "localhost", 36 | "port": 27017, 37 | "authenticate": false, 38 | "authentication": { 39 | "username": "username", 40 | "password": "password" 41 | } 42 | }, 43 | "irwin": { 44 | "train": { 45 | "batchSize": 5000, 46 | "cycles": 80 47 | } 48 | } 49 | } 50 | ``` 51 | 52 | `conf/config.json` contains config for stockfish, mongodb, tensorflow, lichess (authentication token and URL), etc... 53 | ### Build a database of analysed players 54 | If you do not already have a database of analysed players, it will be necessary to analyse 55 | a few hundred players to train the neural networks on. 56 | `python3 main.py --no-assess --no-report` 57 | 58 | ## About 59 | Irwin (named after Steve Irwin, the Crocodile Hunter) started as the name of the server that the original 60 | cheatnet ran on (now deprecated). This is the successor to cheatnet. 61 | 62 | Similar to cheatnet, it works on a similar concept of analysing the available PVs of a game to determine 63 | the odds of cheating occurring. 64 | 65 | This bot makes improvements over cheatnet by taking a dramatically more modular approach to software design. 66 | `modules/core` contains most of the generic datatypes, BSON serialisation handlers and database interface 67 | layers. It is also significantly faster due to a simplified approach to using stockfish analysis. 68 | 69 | `modules/irwin` contains the brains of irwin, this is where the tensorflow learning and application takes place. 70 | 71 | Irwin has been designed so that `modules/irwin` can be replaced with other approaches to player assessment. 72 | 73 | `Env.py` contains all of the tools to interact with lichess, irwin, and the database handlers. 74 | 75 | `main.py` covers accessing the lichess API (`modules/Api.py`) via Env to get player data; pulling records from mongodb, 76 | analysing games using stockfish, assessing those games using tensorflow and then posting the final assessments. 77 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | import sys 4 | 5 | from conf.ConfigWrapper import ConfigWrapper 6 | 7 | from webapp.Env import Env 8 | 9 | from modules.db.DBManager import DBManager 10 | 11 | from flask import Flask 12 | 13 | from webapp.controllers.api.blueprint import buildApiBlueprint 14 | 15 | 16 | config = ConfigWrapper.new('conf/server_config.json') 17 | 18 | loglevels = { 19 | 'CRITICAL': logging.CRITICAL, 20 | 'ERROR': logging.ERROR, 21 | 'WARNING': logging.WARNING, 22 | 'INFO': logging.INFO, 23 | 'DEBUG': logging.DEBUG, 24 | 'NOTSET': logging.NOTSET 25 | } 26 | 27 | logging.basicConfig(format="%(message)s", level=loglevels[config.loglevel], stream=sys.stdout) 28 | logging.getLogger("requests.packages.urllib3").setLevel(logging.WARNING) 29 | logging.getLogger("chess.uci").setLevel(logging.WARNING) 30 | logging.getLogger("modules.fishnet.fishnet").setLevel(logging.INFO) 31 | 32 | ## Database 33 | dbManager = DBManager(config) 34 | 35 | ## Modules 36 | 37 | env = Env(config) 38 | 39 | app = Flask(__name__) 40 | 41 | apiBlueprint = buildApiBlueprint(env) 42 | 43 | app.register_blueprint(apiBlueprint) 44 | 45 | if __name__ == "__main__": 46 | app.run(host="0.0.0.0", threaded=True) 47 | -------------------------------------------------------------------------------- /client.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | import argparse 4 | import sys 5 | import time 6 | import json 7 | 8 | from conf.ConfigWrapper import ConfigWrapper 9 | 10 | from modules.game.Game import Game, GameDB 11 | from modules.game.AnalysedPosition import AnalysedPositionDB 12 | from modules.game.AnalysedGame import AnalysedGame 13 | from modules.game.EngineTools import EngineTools 14 | 15 | from modules.db.DBManager import DBManager 16 | 17 | from modules.client.Env import Env 18 | from modules.client.Api import Api 19 | 20 | 21 | conf = ConfigWrapper.new('conf/client_config.json') 22 | 23 | parser = argparse.ArgumentParser(description=__doc__) 24 | ## Training 25 | parser.add_argument("--token", dest="token", nargs="?", 26 | default=None, help="token to use with webserver") 27 | 28 | loglevels = { 29 | 'CRITICAL': logging.CRITICAL, 30 | 'ERROR': logging.ERROR, 31 | 'WARNING': logging.WARNING, 32 | 'INFO': logging.INFO, 33 | 'DEBUG': logging.DEBUG, 34 | 'NOTSET': logging.NOTSET 35 | } 36 | 37 | logging.basicConfig(format="%(message)s", level=loglevels[conf.loglevel], stream=sys.stdout) 38 | logging.getLogger("requests.packages.urllib3").setLevel(logging.WARNING) 39 | logging.getLogger("chess.uci").setLevel(logging.WARNING) 40 | logging.getLogger("modules.fishnet.fishnet").setLevel(logging.WARNING) 41 | 42 | args = parser.parse_args() 43 | 44 | env = Env(conf, token = args.token) 45 | api = Api(env) 46 | 47 | def analyseGames(games: List[Game], playerId: str) -> Iterable[AnalysedGame]: 48 | """ 49 | Iterate through list of games and return analysed games 50 | """ 51 | 52 | count = len(games) 53 | for i, game in enumerate(games): 54 | logging.warning(f'{playerId}: Analysing Game #{i+1} / {count}: {game.id}') 55 | analysedGame = env.engineTools.analyseGame(game, game.white == playerId, conf['stockfish nodes']) 56 | if analysedGame is not None: 57 | yield analysedGame 58 | 59 | while True: 60 | logging.info('getting new job') 61 | job = api.requestJob() 62 | 63 | if job is not None: 64 | logging.warning(f'Analysing Player: {job.playerId}') 65 | gameIds = [g.id for g in job.games] 66 | logging.warning(f'Analysing Games: {gameIds}') 67 | 68 | analysedGames = list(analyseGames(job.games, job.playerId)) 69 | 70 | response = api.completeJob(job, analysedGames) 71 | 72 | if response is not None: 73 | try: 74 | resJson = response.json() 75 | if response.status_code == 200: 76 | logging.info('SUCCESS. Posted completed job. Message: {}'.format(resJson.get('message'))) 77 | else: 78 | logging.warning('SOFT FAILURE. Failed to post completed job. Message: {}'.format(resJson.get('message'))) 79 | except json.decoder.JSONDecodeError: 80 | logging.warning(f'HARD FAILURE. Failed to post job. Bad response from server.') 81 | else: 82 | logging.warning('Job is None. Pausing') 83 | time.sleep(10) 84 | -------------------------------------------------------------------------------- /conf/ConfigWrapper.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | import json 4 | 5 | class ConfigWrapper: 6 | """ 7 | Used for loading and accessing values from a json config file. 8 | """ 9 | def __init__(self, d: Dict): 10 | self.d = d 11 | 12 | @staticmethod 13 | def new(filename: str): 14 | with open(filename) as confFile: 15 | return ConfigWrapper(json.load(confFile)) 16 | 17 | def __getitem__(self, key: str): 18 | """ 19 | allows for accessing like, conf["index items like this"] 20 | """ 21 | try: 22 | head, tail = key.split(' ', 1) 23 | return self.__getattr__(head)[tail] 24 | except ValueError: 25 | return self.__getattr__(key) 26 | 27 | def __getattr__(self, key: str): 28 | """ 29 | allows for accessing like, conf.index.like.this 30 | """ 31 | r = self.d.get(key) 32 | if isinstance(r, dict): 33 | return ConfigWrapper(r) 34 | return r 35 | 36 | def asdict(self) -> Dict: 37 | return self.d 38 | 39 | def __repr__(self): 40 | return "ConfigWrapper({})".format(self.d) -------------------------------------------------------------------------------- /default_imports.py: -------------------------------------------------------------------------------- 1 | ## Typing and enforcing of types 2 | from typing import NamedTuple, TypeVar, NewType, Iterable, List, Dict, Tuple 3 | from typing import Optional as Opt 4 | 5 | from numpy import float16, float32, float64 6 | from numpy import float as npfloat 7 | 8 | Number = TypeVar('Number', int, float, npfloat, float16, float32, float64) 9 | 10 | ## Logging 11 | import logging 12 | 13 | ## Tools 14 | notNone = lambda x: x is not None -------------------------------------------------------------------------------- /lichess-listener.py: -------------------------------------------------------------------------------- 1 | """Stream listener for Irwin. Acts on player status updates, and analysis requests""" 2 | from default_imports import * 3 | 4 | from conf.ConfigWrapper import ConfigWrapper 5 | 6 | import requests 7 | from requests.exceptions import ChunkedEncodingError, ConnectionError 8 | from requests.packages.urllib3.exceptions import NewConnectionError, ProtocolError, MaxRetryError 9 | from http.client import IncompleteRead 10 | from socket import gaierror 11 | 12 | from webapp.Env import Env 13 | 14 | from modules import http 15 | from modules.lichess.Request import Request 16 | from modules.queue.EngineQueue import EngineQueue 17 | 18 | import json 19 | import argparse 20 | import logging 21 | import sys 22 | from datetime import datetime, timedelta 23 | from time import sleep 24 | 25 | parser = argparse.ArgumentParser(description=__doc__) 26 | 27 | parser.add_argument("--quiet", dest="loglevel", 28 | default=logging.DEBUG, action="store_const", const=logging.INFO, 29 | help="reduce the number of logged messages") 30 | settings = parser.parse_args() 31 | 32 | logging.basicConfig(format="%(message)s", level=settings.loglevel, stream=sys.stdout) 33 | logging.getLogger("requests.packages.urllib3").setLevel(logging.WARNING) 34 | logging.getLogger("chess.uci").setLevel(logging.WARNING) 35 | logging.getLogger("modules.fishnet.fishnet").setLevel(logging.WARNING) 36 | 37 | 38 | config = ConfigWrapper.new('conf/server_config.json') 39 | 40 | env = Env(config) 41 | 42 | """ 43 | Possible messages that lichess will emit 44 | 45 | {'t':'request', 'origin': 'moderator', 'user': {'id': 'userId', 'titled': bool, 'engine': bool, 'games': int}, 'games': []} 46 | """ 47 | 48 | def handleLine(payload: Dict): 49 | request = Request.fromJson(payload) 50 | playerId = request.player.id 51 | if request is not None: 52 | logging.info(f'Processing request for {request.player}') 53 | # store user 54 | env.gameApi.writePlayer(request.player) 55 | # store games 56 | env.gameApi.writeGames(request.games) 57 | 58 | existingEngineQueue = env.queue.engineQueueById(playerId) 59 | 60 | newEngineQueue = EngineQueue.new( 61 | playerId=playerId, 62 | origin=request.origin, 63 | gamesAndPredictions=list(zip(request.games, env.irwin.basicGameModel.predict(playerId, request.games)))) 64 | 65 | if existingEngineQueue is not None and not existingEngineQueue.completed: 66 | newEngineQueue = EngineQueue.merge(existingEngineQueue, newEngineQueue) 67 | 68 | requiredGames = env.gameApi.gamesForAnalysis(playerId, newEngineQueue.requiredGameIds) 69 | if len(requiredGames) > 0: 70 | env.queue.queueEngineAnalysis(newEngineQueue) 71 | 72 | 73 | session = http.get_requests_session_with_keepalive() 74 | while True: 75 | try: 76 | r = session.get( 77 | config.api.url + 'api/stream/irwin', 78 | headers = { 79 | 'User-Agent': 'Irwin', 80 | 'Authorization': f'Bearer {config.api.token}' 81 | }, 82 | stream = True 83 | ) 84 | for line in r.iter_lines(): 85 | try: 86 | payload = json.loads(line.decode("utf-8")) 87 | handleLine(payload) 88 | except json.decoder.JSONDecodeError: 89 | logging.warning(f"Failed to decode: {line.text}") 90 | except (ChunkedEncodingError, ConnectionError, NewConnectionError, ProtocolError, MaxRetryError, IncompleteRead, gaierror): 91 | sleep(5) 92 | continue 93 | -------------------------------------------------------------------------------- /modules/auth/Auth.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.auth.Env import Env 4 | from modules.auth.User import User, UserID, Username, Password 5 | from modules.auth.Token import Token, TokenID 6 | from modules.auth.Priv import Priv 7 | 8 | from webapp.DefaultResponse import BadRequest 9 | 10 | from flask import request, abort 11 | from functools import wraps 12 | 13 | Authable = TypeVar('Authable', User, Token) 14 | 15 | Authorised = NewType('Authorised', bool) 16 | 17 | AuthID = TypeVar('AuthID', UserID, TokenID) 18 | 19 | class Auth(NamedTuple('Auth', [('env', Env)])): 20 | def loginUser(self, username: Username, password: Password) -> Tuple[Opt[User], Authorised]: 21 | """ 22 | Attempts to log in a user. 23 | Returns True is successful. 24 | False if the user exists and the password is incorrect. 25 | None if the user does not exist. 26 | """ 27 | user = self.env.userDB.byId(username) 28 | if user is not None: 29 | return (user, user.checkPassword(password)) 30 | return (None, False) 31 | 32 | def registerUser(self, name: Username, password: Password, privs: List[Priv] = []) -> Opt[User]: 33 | """ 34 | Will attempt to register a user. 35 | Returns User object if successful, otherwise None. 36 | """ 37 | user = User.new(name, password, privs) 38 | if env.userDB.byId(user.id) is None: 39 | env.userDB.write(user) 40 | return user 41 | return None 42 | 43 | def authoriseTokenId(self, tokenId: TokenID, priv: Priv) -> Tuple[Opt[Token], Authorised]: 44 | """ 45 | Given a tokenId, will check if the tokenId has priv. 46 | """ 47 | token = self.env.tokenDB.byId(tokenId) 48 | if token is not None: 49 | return (token, token.hasPriv(priv)) 50 | return (None, False) 51 | 52 | def authoriseUser(self, username: Username, password: Password, priv: Priv) -> Tuple[Opt[User], Authorised]: 53 | """ 54 | Checks if user has priv in list of privs. 55 | """ 56 | user, loggedIn = self.loginUser(username, password) 57 | if user is not None: 58 | return (user, loggedIn and user.hasPriv(priv)) 59 | return (None, False) 60 | 61 | def authoriseRequest(self, req: Opt[Dict], priv: Priv) -> Tuple[Opt[Authable], Authorised]: 62 | """ 63 | Checks if a request is verified with priv. 64 | """ 65 | if req is not None: 66 | # Attempt to authorise token first 67 | authReq = req.get('auth') 68 | if authReq is not None: 69 | tokenId = authReq.get('token') 70 | if tokenId is not None: 71 | return self.authoriseTokenId(tokenId, priv) 72 | 73 | # Then attempt to authorise user/password 74 | username = authReq.get('username') 75 | password = authReq.get('password') 76 | 77 | if None not in [username, password]: 78 | return self.authoriseUser(username, password, priv) 79 | 80 | return (None, False) 81 | 82 | def authoriseRoute(self, priv: Priv): 83 | """ 84 | Wrap around a flask route and check it is authorised 85 | """ 86 | def decorator(func): 87 | @wraps(func) 88 | def wrapper(*args, **kwargs): 89 | json_obj = request.get_json(silent=True) 90 | authable, authorised = self.authoriseRequest(json_obj, priv) 91 | if authorised: 92 | logging.info(f'{authable.name} has been authorised to {priv.permission}') 93 | args_ = (authable,) + args 94 | return func(*args_, **kwargs) 95 | if authable is not None: 96 | logging.warning(f'UNAUTHORISED: {authable.name} has tried to perform an action requiring {priv.permission}') 97 | abort(BadRequest) 98 | return wrapper 99 | return decorator -------------------------------------------------------------------------------- /modules/auth/Env.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from modules.auth.User import UserDB 6 | from modules.auth.Token import TokenDB 7 | 8 | from pymongo.database import Database 9 | 10 | class Env: 11 | def __init__(self, config: ConfigWrapper, db: Database): 12 | self.db = db 13 | self.config = config 14 | self.userDB = UserDB(self.db[self.config["auth coll user"]]) 15 | self.tokenDB = TokenDB(self.db[self.config["auth coll token"]]) -------------------------------------------------------------------------------- /modules/auth/Priv.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | Permission = NewType('Permission', str) 4 | 5 | Priv = NamedTuple('Priv', [ 6 | ('permission', Permission) 7 | ]) 8 | 9 | RequestJob = Priv('request_job') # client can request work 10 | CompleteJob = Priv('complete_job') # client can post results of work 11 | PostJob = Priv('post_job') # lichess can post a job for analysis -------------------------------------------------------------------------------- /modules/auth/Token.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.auth.Priv import Priv 4 | 5 | from pymongo.collection import Collection 6 | 7 | TokenID = NewType('TokenID', str) 8 | 9 | class Token(NamedTuple('Token', [ 10 | ('id', TokenID), 11 | ('name', str), 12 | ('privs', List[Priv]) 13 | ])): 14 | def hasPriv(self, priv: Priv) -> bool: 15 | return priv in self.privs 16 | 17 | class TokenBSONHandler: 18 | @staticmethod 19 | def reads(bson: Dict) -> Token: 20 | return Token( 21 | id = bson['_id'], 22 | name = bson['name'], 23 | privs = [Priv(p) for p in bson['privs']]) 24 | 25 | @staticmethod 26 | def writes(token: Token) -> Dict: 27 | return { 28 | '_id': token.id, 29 | 'name': token.name, 30 | 'privs': [p.permission for p in token.privs]} 31 | 32 | class TokenDB(NamedTuple('TokenDB', [ 33 | ('coll', Collection) 34 | ])): 35 | def write(self, token: Token): 36 | self.coll.update_one({'_id': token.id}, {'$set': TokenBSONHandler.writes(token)}, upsert=True) 37 | 38 | def byId(self, _id: TokenID) -> Opt[Token]: 39 | doc = self.coll.find_one({'_id': _id}) 40 | return None if doc is None else TokenBSONHandler.reads(doc) -------------------------------------------------------------------------------- /modules/auth/User.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.auth.Priv import Priv 4 | 5 | from pymongo.collection import Collection 6 | import hashlib, uuid 7 | 8 | Username = NewType('Username', str) 9 | UserID = NewType('UserID', str) 10 | Password = NewType('Password', str) 11 | Salt = NewType('Salt', str) 12 | 13 | class User(NamedTuple('User', [ 14 | ('id', UserID), 15 | ('name', Username), 16 | ('password', Password), 17 | ('salt', Salt), 18 | ('privs', List[Priv]) 19 | ])): 20 | @staticmethod 21 | def new(name: Username, password: Password, privs: List[Priv] = []): 22 | """ 23 | Creates a new User object. 24 | """ 25 | hashedPassword, salt = User.hashPassword(password) 26 | return User( 27 | id = name.lower().replace(' ', ''), 28 | name = name, 29 | password = hashedPassword, 30 | salt = salt, 31 | privs = privs 32 | ) 33 | 34 | @staticmethod 35 | def hashPassword(password: Password, salt: Opt[Salt] = None) -> Tuple[Password, Salt]: 36 | """ 37 | Given a string and a salt this function will generate a hash of the password. 38 | If salt is not provided a new random salt is created. 39 | """ 40 | if salt is None: 41 | salt = uuid.uuid4().hex 42 | hashedPassword = hashlib.sha512(password + salt).hexdigest() 43 | return hashedPassword, salt 44 | 45 | def checkPassword(self, password: Password) -> bool: 46 | """ 47 | Checks if a raw password matches that hashed password of the user. 48 | """ 49 | return self.hashPassword(password, self.salt) == self.password 50 | 51 | class UserBSONHandler: 52 | @staticmethod 53 | def reads(bson: Dict) -> User: 54 | return User( 55 | id = bson['_id'], 56 | name = bson['name'], 57 | password = bson['password'], 58 | salt = bson['salt'], 59 | privs = [Priv(p) for p in bson['privs']]) 60 | 61 | @staticmethod 62 | def writes(user: User) -> Dict: 63 | return { 64 | '_id': user.id, 65 | 'name': user.name, 66 | 'password': user.password, 67 | 'salt': user.salt, 68 | 'privs': [p.permission for p in user.privs] 69 | } 70 | 71 | class UserDB(NamedTuple('UserDB', [ 72 | ('coll', Collection) 73 | ])): 74 | def write(self, user: User): 75 | self.coll.update_one({'_id': user.id}, {'$set': UserBSONHandler.writes(user)}, upsert=True) 76 | 77 | def byId(self, _id: UserID) -> Opt[User]: 78 | doc = self.coll.find_one({'_id': _id}) 79 | return None if doc is None else UserBSONHandler.reads(doc) -------------------------------------------------------------------------------- /modules/client/Api.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | import json 4 | import requests 5 | import time 6 | 7 | from modules.game.Game import GameBSONHandler 8 | from modules.game.AnalysedGame import AnalysedGameBSONHandler, AnalysedGame 9 | from modules.client.Env import Env 10 | from modules.client.Job import Job 11 | 12 | from requests.models import Response 13 | 14 | class Api(NamedTuple('Api', [ 15 | ('env', Env) 16 | ])): 17 | def requestJob(self) -> Opt[Dict]: 18 | for i in range(5): 19 | try: 20 | result = requests.get(f'{self.env.url}/api/request_job', json={'auth': self.env.auth}) 21 | return Job.fromJson(result.json()) 22 | except (json.decoder.JSONDecodeError, requests.ConnectionError, requests.exceptions.SSLError): 23 | logging.warning(f"Error in request job. Trying again in 10 sec. Received: {result.text}") 24 | time.sleep(10) 25 | return None 26 | 27 | def completeJob(self, job: Job, analysedGames: List[AnalysedGame]) -> Opt[Response]: 28 | payload = { 29 | 'auth': self.env.auth, 30 | 'job': job.toJson(), 31 | 'analysedGames': [ag.toJson() for ag in analysedGames] 32 | } 33 | for i in range(5): 34 | try: 35 | result = requests.post(f'{self.env.url}/api/complete_job', json=payload) 36 | return result 37 | except (json.decoder.JSONDecodeError, requests.ConnectionError, requests.exceptions.SSLError): 38 | logging.warning('Error in completing job. Trying again in 10 sec') 39 | time.sleep(10) 40 | return None -------------------------------------------------------------------------------- /modules/client/Env.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from modules.game.EngineTools import EngineTools 6 | 7 | class Env: 8 | def __init__(self, config: ConfigWrapper, token: Opt[str] = None): 9 | self.config = config 10 | self.url = "{}://{}".format(self.config.server.protocol, self.config.server.domain) 11 | self.engineTools = EngineTools.new(self.config) 12 | if token is None: 13 | self.auth = self.config.auth.asdict() 14 | else: 15 | self.auth = {'token': token} -------------------------------------------------------------------------------- /modules/client/Job.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.Player import PlayerID 4 | from modules.game.Game import Game, GameBSONHandler 5 | from modules.game.AnalysedPosition import AnalysedPosition, AnalysedPositionBSONHandler 6 | 7 | class Job(NamedTuple('Job', [ 8 | ('playerId', PlayerID), 9 | ('games', List[Game]), 10 | ('analysedPositions', List[AnalysedPosition]) 11 | ])): 12 | @staticmethod 13 | def fromJson(json: Dict): 14 | try: 15 | return JobBSONHandler.reads(json) 16 | except KeyError as e: 17 | logging.warning(f'Failed convert {json} to Job: {e}') 18 | return None 19 | 20 | def toJson(self): 21 | return JobBSONHandler.writes(self) 22 | 23 | class JobBSONHandler: 24 | @staticmethod 25 | def reads(bson: Dict) -> Job: 26 | return Job( 27 | playerId = bson['playerId'], 28 | games = [GameBSONHandler.reads(g) for g in bson['games']], 29 | analysedPositions = [AnalysedPositionBSONHandler.reads(ap) for ap in bson['analysedPositions']]) 30 | 31 | @staticmethod 32 | def writes(job: Job) -> Dict: 33 | return { 34 | 'playerId': job.playerId, 35 | 'games': [g.toJson() for g in job.games], 36 | 'analysedPositions': [AnalysedPositionBSONHandler.writes(ap) for ap in job.analysedPositions] 37 | } -------------------------------------------------------------------------------- /modules/db/DBManager.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from pymongo import MongoClient 6 | from pymongo.database import Database 7 | 8 | class DBManager(NamedTuple('DBManager', [ 9 | ('config', 'ConfigWrapper') 10 | ])): 11 | def client(self) -> MongoClient: 12 | return MongoClient(self.config.db.host) 13 | 14 | def db(self) -> Database: 15 | client = self.client() 16 | db = client[self.config['db database']] 17 | if self.config['db authenticate']: 18 | db.authenticate( 19 | self.config.authentication.username, 20 | self.config.authentication.password, mechanism='MONGODB-CR') 21 | return db -------------------------------------------------------------------------------- /modules/fishnet/fishnet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from __future__ import print_function 4 | from __future__ import division 5 | 6 | import json 7 | import time 8 | import contextlib 9 | import sys 10 | import os 11 | import stat 12 | import platform 13 | import ctypes 14 | 15 | try: 16 | import httplib 17 | except ImportError: 18 | import http.client as httplib 19 | 20 | try: 21 | import urlparse 22 | except ImportError: 23 | import urllib.parse as urlparse 24 | 25 | try: 26 | import urllib.request as urllib 27 | except ImportError: 28 | import urllib 29 | 30 | 31 | def stockfish_command(update=True): 32 | filename = stockfish_filename() 33 | 34 | if update: 35 | filename = update_stockfish(filename) 36 | 37 | return os.path.join(".", filename) 38 | 39 | 40 | def stockfish_filename(): 41 | machine = platform.machine().lower() 42 | 43 | modern, bmi2 = detect_cpu_capabilities() 44 | if modern and bmi2: 45 | suffix = "-bmi2" 46 | elif modern: 47 | suffix = "-modern" 48 | else: 49 | suffix = "" 50 | 51 | if os.name == "nt": 52 | return "stockfish-windows-%s%s.exe" % (machine, suffix) 53 | elif os.name == "os2" or sys.platform == "darwin": 54 | return "stockfish-osx-%s" % machine 55 | elif os.name == "posix": 56 | return "stockfish-%s%s" % (machine, suffix) 57 | 58 | 59 | def update_stockfish(filename): 60 | print("Looking up %s ..." % filename) 61 | 62 | headers = {} 63 | headers["User-Agent"] = "Python-Puzzle-Generator" 64 | 65 | # Only update to newer versions 66 | try: 67 | headers["If-Modified-Since"] = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(os.path.getmtime(filename))) 68 | except OSError: 69 | pass 70 | 71 | # Escape GitHub API rate limiting 72 | if "GITHUB_API_TOKEN" in os.environ: 73 | headers["Authorization"] = "token %s" % os.environ["GITHUB_API_TOKEN"] 74 | 75 | # Find latest release 76 | with http("GET", "https://api.github.com/repos/niklasf/Stockfish/releases/latest", headers=headers) as response: 77 | if response.status == 304: 78 | print("Local %s is newer than release" % filename) 79 | return filename 80 | 81 | release = json.loads(response.read().decode("utf-8")) 82 | 83 | print("Latest stockfish release is tagged", release["tag_name"]) 84 | 85 | for asset in release["assets"]: 86 | if asset["name"] == filename: 87 | print("Found", asset["browser_download_url"]) 88 | break 89 | else: 90 | raise ConfigError("No precompiled %s for your platform" % filename) 91 | 92 | # Download 93 | def reporthook(a, b, c): 94 | if sys.stderr.isatty(): 95 | sys.stderr.write("\rDownloading %s: %d/%d (%d%%)" % ( 96 | filename, min(a * b, c), c, 97 | round(min(a * b, c) * 100 / c))) 98 | sys.stderr.flush() 99 | 100 | urllib.urlretrieve(asset["browser_download_url"], filename, reporthook) 101 | 102 | sys.stderr.write("\n") 103 | sys.stderr.flush() 104 | 105 | # Make executable 106 | print("chmod +x", filename) 107 | st = os.stat(filename) 108 | os.chmod(filename, st.st_mode | stat.S_IEXEC) 109 | return filename 110 | 111 | 112 | @contextlib.contextmanager 113 | def make_cpuid(): 114 | # Loosely based on cpuid.py by Anders Høst, licensed MIT: 115 | # https://github.com/flababah/cpuid.py 116 | 117 | # Prepare system information 118 | is_windows = os.name == "nt" 119 | is_64bit = ctypes.sizeof(ctypes.c_void_p) == 8 120 | if platform.machine().lower() not in ["amd64", "x86_64", "x86", "i686"]: 121 | raise OSError("Got no CPUID opcodes for %s" % platform.machine()) 122 | 123 | # Struct for return value 124 | class CPUID_struct(ctypes.Structure): 125 | _fields_ = [("eax", ctypes.c_uint32), 126 | ("ebx", ctypes.c_uint32), 127 | ("ecx", ctypes.c_uint32), 128 | ("edx", ctypes.c_uint32)] 129 | 130 | # Select kernel32 or libc 131 | if is_windows: 132 | if is_64bit: 133 | libc = ctypes.CDLL("kernel32.dll") 134 | else: 135 | libc = ctypes.windll.kernel32 136 | else: 137 | libc = ctypes.pythonapi 138 | 139 | # Select opcodes 140 | if is_64bit: 141 | if is_windows: 142 | # Windows x86_64 143 | # Two first call registers : RCX, RDX 144 | # Volatile registers : RAX, RCX, RDX, R8-11 145 | opc = [ 146 | 0x53, # push %rbx 147 | 0x48, 0x89, 0xd0, # mov %rdx,%rax 148 | 0x49, 0x89, 0xc8, # mov %rcx,%r8 149 | 0x31, 0xc9, # xor %ecx,%ecx 150 | 0x0f, 0xa2, # cpuid 151 | 0x41, 0x89, 0x00, # mov %eax,(%r8) 152 | 0x41, 0x89, 0x58, 0x04, # mov %ebx,0x4(%r8) 153 | 0x41, 0x89, 0x48, 0x08, # mov %ecx,0x8(%r8) 154 | 0x41, 0x89, 0x50, 0x0c, # mov %edx,0xc(%r8) 155 | 0x5b, # pop %rbx 156 | 0xc3 # retq 157 | ] 158 | else: 159 | # Posix x86_64 160 | # Two first call registers : RDI, RSI 161 | # Volatile registers : RAX, RCX, RDX, RSI, RDI, R8-11 162 | opc = [ 163 | 0x53, # push %rbx 164 | 0x48, 0x89, 0xf0, # mov %rsi,%rax 165 | 0x31, 0xc9, # xor %ecx,%ecx 166 | 0x0f, 0xa2, # cpuid 167 | 0x89, 0x07, # mov %eax,(%rdi) 168 | 0x89, 0x5f, 0x04, # mov %ebx,0x4(%rdi) 169 | 0x89, 0x4f, 0x08, # mov %ecx,0x8(%rdi) 170 | 0x89, 0x57, 0x0c, # mov %edx,0xc(%rdi) 171 | 0x5b, # pop %rbx 172 | 0xc3 # retq 173 | ] 174 | else: 175 | # CDECL 32 bit 176 | # Two first call registers : Stack (%esp) 177 | # Volatile registers : EAX, ECX, EDX 178 | opc = [ 179 | 0x53, # push %ebx 180 | 0x57, # push %edi 181 | 0x8b, 0x7c, 0x24, 0x0c, # mov 0xc(%esp),%edi 182 | 0x8b, 0x44, 0x24, 0x10, # mov 0x10(%esp),%eax 183 | 0x31, 0xc9, # xor %ecx,%ecx 184 | 0x0f, 0xa2, # cpuid 185 | 0x89, 0x07, # mov %eax,(%edi) 186 | 0x89, 0x5f, 0x04, # mov %ebx,0x4(%edi) 187 | 0x89, 0x4f, 0x08, # mov %ecx,0x8(%edi) 188 | 0x89, 0x57, 0x0c, # mov %edx,0xc(%edi) 189 | 0x5f, # pop %edi 190 | 0x5b, # pop %ebx 191 | 0xc3 # ret 192 | ] 193 | 194 | code_size = len(opc) 195 | code = (ctypes.c_ubyte * code_size)(*opc) 196 | 197 | if is_windows: 198 | # Allocate executable memory 199 | addr = libc.VirtualAlloc(None, code_size, 0x1000, 0x40) 200 | if not addr: 201 | raise MemoryError("Could not VirtualAlloc RWX memory") 202 | else: 203 | # Allocate memory 204 | libc.valloc.restype = ctypes.c_void_p 205 | libc.valloc.argtypes = [ctypes.c_size_t] 206 | addr = libc.valloc(code_size) 207 | if not addr: 208 | raise MemoryError("Could not valloc memory") 209 | 210 | # Make executable 211 | libc.mprotect.restype = ctypes.c_int 212 | libc.mprotect.argtypes = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_int] 213 | if 0 != libc.mprotect(addr, code_size, 1 | 2 | 4): 214 | raise OSError("Failed to set RWX using mprotect") 215 | 216 | # Copy code to allocated executable memory. No need to flush instruction 217 | # cache for CPUID. 218 | ctypes.memmove(addr, code, code_size) 219 | 220 | # Create and yield callable 221 | result = CPUID_struct() 222 | func_type = ctypes.CFUNCTYPE(None, ctypes.POINTER(CPUID_struct), ctypes.c_uint32) 223 | func_ptr = func_type(addr) 224 | 225 | def cpuid(eax): 226 | func_ptr(result, eax) 227 | return result.eax, result.ebx, result.ecx, result.edx 228 | 229 | yield cpuid 230 | 231 | # Free 232 | if is_windows: 233 | libc.VirtualFree(addr, 0, 0x8000) 234 | else: 235 | libc.free.restype = None 236 | libc.free.argtypes = [ctypes.c_void_p] 237 | libc.free(addr) 238 | 239 | 240 | def detect_cpu_capabilities(): 241 | # Detects support for popcnt and pext instructions 242 | modern, bmi2 = False, False 243 | 244 | try: 245 | with make_cpuid() as cpuid: 246 | for eax in [0x0, 0x80000000]: 247 | highest, _, _, _ = cpuid(eax) 248 | for eax in range(eax, highest + 1): 249 | a, b, c, d = cpuid(eax) 250 | 251 | # popcnt 252 | if eax == 1 and c & (1 << 23): 253 | modern = True 254 | 255 | # pext 256 | if eax == 7 and b & (1 << 8): 257 | bmi2 = True 258 | except OSError: 259 | pass 260 | 261 | return modern, bmi2 262 | 263 | 264 | class HttpError(Exception): 265 | def __init__(self, status, reason, body): 266 | self.status = status 267 | self.reason = reason 268 | self.body = body 269 | 270 | def __str__(self): 271 | return "HTTP %d %s\n\n%s" % (self.status, self.reason, self.body) 272 | 273 | def __repr__(self): 274 | return "%s(%d, %r, %r)" % (type(self).__name__, self.status, 275 | self.reason, self.body) 276 | 277 | 278 | class HttpServerError(HttpError): 279 | pass 280 | 281 | 282 | class HttpClientError(HttpError): 283 | pass 284 | 285 | 286 | @contextlib.contextmanager 287 | def http(method, url, body=None, headers=None): 288 | url_info = urlparse.urlparse(url) 289 | if url_info.scheme == "https": 290 | con = httplib.HTTPSConnection(url_info.hostname, url_info.port or 443) 291 | else: 292 | con = httplib.HTTPConnection(url_info.hostname, url_info.port or 80) 293 | 294 | con.request(method, url_info.path, body, headers) 295 | response = con.getresponse() 296 | 297 | try: 298 | if 400 <= response.status < 500: 299 | raise HttpClientError(response.status, response.reason, 300 | response.read()) 301 | elif 500 <= response.status < 600: 302 | raise HttpServerError(response.status, response.reason, 303 | response.read()) 304 | else: 305 | yield response 306 | finally: 307 | con.close() 308 | -------------------------------------------------------------------------------- /modules/game/AnalysedGame.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from math import log10, floor 4 | import numpy as np 5 | import json 6 | 7 | from modules.game.Game import Game, GameID, Emt 8 | from modules.game.Colour import Colour 9 | from modules.game.Player import PlayerID 10 | from modules.game.AnalysedMove import AnalysedMove, AnalysedMoveBSONHandler, EngineEval, Analysis, Rank, TrueRank 11 | from modules.game.AnalysedPosition import AnalysedPosition 12 | 13 | from pymongo.collection import Collection 14 | 15 | AnalysedGameID = NewType('AnalysedGameID', str) # / 16 | 17 | AnalysedGameTensor = NewType('AnalysedGameTensor', np.ndarray) 18 | 19 | class AnalysedGame(NamedTuple('AnalysedGame', [ 20 | ('id', AnalysedGameID), 21 | ('playerId', PlayerID), 22 | ('gameId', GameID), 23 | ('analysedMoves', List[AnalysedMove]) 24 | ])): 25 | """ 26 | An analysed game is a game that has been deeply analysed from a single 27 | player's perspective. 28 | """ 29 | @staticmethod 30 | def new(gameId: GameID, colour: Colour, playerId: PlayerID, analysedMoves: List[AnalysedMove]): 31 | return AnalysedGame( 32 | id=AnalysedGame.makeId(gameId, colour), 33 | playerId=playerId, 34 | gameId=gameId, 35 | analysedMoves=analysedMoves) 36 | 37 | @staticmethod 38 | def makeId(gameId: GameID, colour: Colour) -> AnalysedGameID: 39 | return gameId + '/' + ('white' if colour else 'black') 40 | 41 | def tensor(self, length: int = 60) -> AnalysedGameTensor: 42 | emtAvg = self.emtAverage() 43 | wclAvg = self.wclAverage() 44 | ts = [ma.tensor(emtAvg, wclAvg) for ma in self.analysedMoves] 45 | ts = ts[:length] 46 | ts = ts + (length-len(ts))*[AnalysedMove.nullTensor()] 47 | return ts 48 | 49 | def toJson(self): 50 | return AnalysedGameBSONHandler.writes(self) 51 | 52 | def emtAverage(self) -> Number: 53 | return np.average([m.emt for m in self.analysedMoves]) 54 | 55 | def wclAverage(self) -> Number: 56 | return np.average([m.winningChancesLoss() for m in self.analysedMoves]) 57 | 58 | def gameLength(self) -> int: 59 | return len(self.analysedMoves) 60 | 61 | def emts(self) -> List[Emt]: 62 | return [m.emt for m in self.analysedMoves] 63 | 64 | def emtSeconds(self) -> List[Number]: 65 | return [emt/100 for emt in self.emts()] 66 | 67 | def winningChances(self) -> List[Number]: 68 | return [m.advantage() for m in self.analysedMoves] 69 | 70 | def winningChancesPercent(self) -> List[Number]: 71 | return [100*m.advantage() for m in self.analysedMoves] 72 | 73 | def winningChancesLossPercent(self, usePV: bool = True) -> List[Number]: 74 | return [100*m.winningChancesLoss(usePV=usePV) for m in self.analysedMoves] 75 | 76 | def winningChancesLossByPV(self): 77 | """ for generating graphs """ 78 | pvs = [( 79 | 'PV'+str(i+1), 80 | 'rgba(20, 20, 20, ' + str(0.6 - i*0.1) + ')', 81 | []) for i in range(5)] # one entry per PV 82 | for analysedMove in self.analysedMoves: 83 | losses = analysedMove.PVsWinningChancesLoss() 84 | for i in range(5): 85 | try: 86 | pvs[i][2].append(max(0, 100*losses[i])) 87 | except IndexError: 88 | pvs[i][2].append('null') 89 | return pvs 90 | 91 | def ranks(self) -> List[TrueRank]: 92 | """ for generating graphs """ 93 | return [move.trueRank() for move in self.analysedMoves] 94 | 95 | def ambiguities(self) -> List[int]: 96 | """ for generating graphs """ 97 | return [move.ambiguity() for move in self.analysedMoves] 98 | 99 | def length(self) -> int: 100 | return len(self.analysedMoves) 101 | 102 | def ranksJSON(self) -> str: 103 | return json.dumps(self.ranks()) 104 | 105 | def binnedSeconds(self, bins: int = 10) -> Dict: 106 | # JSON format for graphing 107 | emts = self.emts() 108 | minSec = min(emts) 109 | maxSec = max(emts) 110 | step = int((maxSec-minSec)/bins) 111 | data = [[] for i in range(bins)] 112 | labels = [[] for i in range(bins)] 113 | for i, stepStart in enumerate(range(minSec, maxSec, step)): 114 | data[min(bins-1, i)] = len([a for a in emts if a >= stepStart and a <= stepStart+step]) 115 | labels[min(bins-1, i)] = str(round_sig(stepStart/100)) + '-' + str(round_sig((stepStart+step)/100)) + 's' 116 | return {'data': json.dumps(data), 'labels': json.dumps(labels)} 117 | 118 | def binnedLosses(self, bins: int = 10) -> Dict: 119 | # JSON format for graphing 120 | losses = self.winningChancesLossPercent() 121 | data = [[] for i in range(bins+1)] 122 | for i in range(0, bins, 1): 123 | data[min(bins-1,i)] = len([a for a in losses if i == int(a)]) 124 | data[bins] = sum([int(a >= 10) for a in losses]) 125 | labels = [('-' + str(a) + '%') for a in range(bins)] 126 | labels.append('Other') 127 | return {'data': json.dumps(data), 'labels': json.dumps(labels)} 128 | 129 | def binnedPVs(self, bins: int = 6) -> Dict: 130 | # JSON format for graphing 131 | pvs = self.ranks() 132 | data = [[] for i in range(bins)] 133 | for i, p in enumerate([1, 2, 3, 4, 5, None]): 134 | data[i] = len([1 for pv in pvs if pv == p]) 135 | labels = ['PV 1', 'PV 2', 'PV 3', 'PV 4', 'PV 5', 'Other'] 136 | return {'data': json.dumps(data), 'labels': json.dumps(labels)} 137 | 138 | def moveRankByTime(self) -> List[Dict]: 139 | return [{'x': time, 'y': rank} for rank, time in zip(self.ranks(), self.emtSeconds())] 140 | 141 | def moveRankByTimeJSON(self) -> str: 142 | # json format for graphing 143 | return json.dumps(self.moveRankByTime()) 144 | 145 | def lossByTime(self) -> List[Dict]: 146 | return [{'x': time, 'y': loss} for loss, time in zip(self.winningChancesLossPercent(), self.emtSeconds())] 147 | 148 | def lossByTimeJSON(self) -> str: 149 | # json format for graphing 150 | return json.dumps(self.lossByTime()) 151 | 152 | def lossByRank(self) -> List[Dict]: 153 | return [{'x': rank, 'y': loss} for loss, rank in zip(self.winningChancesLossPercent(), self.ranks())] 154 | 155 | def lossByRankJSON(self) -> str: 156 | # json format for graphing 157 | return json.dumps(self.lossByRank()) 158 | 159 | def round_sig(x, sig=2): 160 | if x == 0: 161 | return 0 162 | return round(x, sig-int(floor(log10(abs(x))))-1) 163 | 164 | class GameAnalysedGame(NamedTuple('GameAnalysedGame', [ 165 | ('analysedGame', AnalysedGame), 166 | ('game', Game) 167 | ])): 168 | """ 169 | Merger of Game and Analysed Game for a merged tensor 170 | """ 171 | def length(self): 172 | return self.analysedGame.length() 173 | 174 | def tensor(self): 175 | try: 176 | gt = self.game.boardTensorsByPlayerId(self.analysedGame.playerId, safe = False) 177 | at = self.analysedGame.tensor() 178 | return [ 179 | [_1 + _2 for _1, _2 in zip(gt[0], at)], 180 | gt[1] 181 | ] 182 | except (TypeError, AttributeError): 183 | return None 184 | 185 | class AnalysedGameBSONHandler: 186 | @staticmethod 187 | def reads(bson: Dict) -> AnalysedGame: 188 | return AnalysedGame( 189 | id = bson['_id'], 190 | playerId = bson['userId'], 191 | gameId = bson['gameId'], 192 | analysedMoves = [AnalysedMoveBSONHandler.reads(am) for am in bson['analysis']]) 193 | 194 | @staticmethod 195 | def writes(analysedGame: AnalysedGame) -> Dict: 196 | return { 197 | '_id': analysedGame.id, 198 | 'userId': analysedGame.playerId, 199 | 'gameId': analysedGame.gameId, 200 | 'analysis': [AnalysedMoveBSONHandler.writes(am) for am in analysedGame.analysedMoves] 201 | } 202 | 203 | class AnalysedGameDB(NamedTuple('AnalysedGameDB', [ 204 | ('analysedGameColl', Collection) 205 | ])): 206 | def write(self, analysedGame: AnalysedGame): 207 | return self.analysedGameColl.update_one( 208 | {'_id': analysedGame.id}, 209 | {'$set': AnalysedGameBSONHandler.writes(analysedGame)}, 210 | upsert=True) 211 | 212 | def writeMany(self, analysedGames: List[AnalysedGame]): 213 | return [self.write(ga) for ga in analysedGames] 214 | 215 | def byPlayerId(self, playerId: PlayerID) -> List[AnalysedGame]: 216 | return [AnalysedGameBSONHandler.reads(ga) for ga in self.analysedGameColl.find({'userId': playerId})] 217 | 218 | def byPlayerIds(self, playerIds: List[PlayerID]) -> List[AnalysedGame]: 219 | return [self.byPlayerId(playerId) for playerId in playerIds] 220 | 221 | def byId(self, _id: AnalysedGameID) -> Opt[AnalysedGame]: 222 | bson = self.analysedGameColl.find_one({"_id": _id}) 223 | return None if bson is None else AnalysedGameBSONHandler.reads(bson) 224 | 225 | def byIds(self, ids: List[AnalysedGameID]) -> List[AnalysedGame]: 226 | return [AnalysedGameBSONHandler.reads(ga) for ga in self.analysedGameColl.find({"_id": {"$in": ids}})] 227 | 228 | def allBatch(self, batch: int, batchSize: int = 500): 229 | """ 230 | Gets all analysed games in a paged format 231 | batch = page number 232 | batchSize = size of page 233 | """ 234 | return [AnalysedGameBSONHandler.reads(ga) for ga in self.analysedGameColl.find(skip=batch*batchSize, limit=batchSize)] 235 | 236 | def byGameIdAndUserId(self, gameId: GameID, playerId: PlayerID) -> Opt[AnalysedGame]: 237 | bson = self.analysedGameColl.find_one({'gameId': gameId, 'userId': playerId}) 238 | return None if bson is None else AnalysedGameBSONHandler.reads(bson) -------------------------------------------------------------------------------- /modules/game/AnalysedMove.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.EngineEval import EngineEval, EngineEvalBSONHandler 4 | 5 | from modules.game.Game import Emt 6 | from functools import lru_cache 7 | from math import exp 8 | import numpy as np 9 | 10 | # For moves that have been analysed by stockfish 11 | 12 | UCI = NewType('UCI', str) 13 | 14 | MoveNumber = NewType('MoveNumber', int) 15 | 16 | Analysis = NamedTuple('Analysis', [ 17 | ('uci', 'UCI'), 18 | ('engineEval', 'EngineEval') 19 | ]) 20 | 21 | Rank = NewType('Rank', int) 22 | TrueRank = NewType('TrueRank', Opt[Rank]) 23 | 24 | class AnalysedMove(NamedTuple('AnalysedMove', [ 25 | ('uci', UCI), 26 | ('move', MoveNumber), 27 | ('emt', Emt), 28 | ('engineEval', EngineEval), 29 | ('analyses', List[Analysis]) 30 | ])): 31 | def tensor(self, timeAvg: Number, wclAvg: Number) -> List[Number]: 32 | return [ 33 | self.rank() + 1, 34 | self.ambiguity() + 1, 35 | self.advantage(), 36 | self.emt / (timeAvg + 1e-8), # elapsed move time / average 37 | abs(self.emt - timeAvg) / (timeAvg + 1e-8), # variance from average 38 | self.difToNextBest(), 39 | self.difToNextWorst(), 40 | self.winningChancesLoss(), # loss of advantage 41 | self.winningChancesLoss() - wclAvg, # loss in comparison to average 42 | self.averageWinningChancesLoss() 43 | ] 44 | 45 | @staticmethod 46 | def nullTensor() -> List[int]: 47 | return 10*[0] 48 | 49 | def top(self) -> Opt[Analysis]: 50 | return next(iter(self.analyses or []), None) 51 | 52 | def difToNextBest(self) -> Number: 53 | tr = self.trueRank() 54 | if tr is not None and tr != 1: 55 | return winningChances(self.analyses[tr-2].engineEval) - self.advantage() 56 | elif tr == 1: 57 | return 0 58 | else: 59 | return winningChances(self.analyses[-1].engineEval) - self.advantage() 60 | 61 | def difToNextWorst(self) -> Number: 62 | tr = self.trueRank() 63 | if tr is not None and tr <= len(self.analyses)-1: 64 | return winningChances(self.analyses[tr].engineEval) - self.advantage() 65 | return 0 66 | 67 | def PVsWinningChancesLoss(self) -> Number: 68 | return [winningChances(self.top().engineEval) - winningChances(a.engineEval) for a in self.analyses] 69 | 70 | def averageWinningChancesLoss(self) -> Number: 71 | return np.average(self.PVsWinningChancesLoss()) 72 | 73 | def winningChancesLoss(self, usePV: bool = False) -> Number: 74 | adv = self.advantage() 75 | if usePV: 76 | r = self.trueRank() 77 | if r is not None: 78 | adv = winningChances(self.analyses[r-1].engineEval) 79 | 80 | return max(0, winningChances(self.top().engineEval) - adv) 81 | 82 | def advantage(self) -> Number: 83 | return winningChances(self.engineEval) 84 | 85 | def ambiguity(self) -> int: # 1 = only one top move, 5 = all moves good 86 | return sum(int(similarChances(winningChances(self.top().engineEval), winningChances(analysis.engineEval))) for analysis in self.analyses) 87 | 88 | def trueRank(self) -> TrueRank: 89 | return next((x+1 for x, am in enumerate(self.analyses) if am.uci == self.uci), None) 90 | 91 | def rank(self) -> Rank: 92 | return min(15, next((x for x, am in enumerate(self.analyses) if am.uci == self.uci), self.projectedRank()) + 1) 93 | 94 | def projectedRank(self) -> Number: 95 | if len(self.analyses) == 1: 96 | return 10 97 | else: # rise over run prediction of move rank given the difference between the winning chances in the bottom two analysed moves 98 | try: 99 | return len(self.analyses) + int(len(self.analyses)*abs(winningChances(self.analyses[-1].engineEval) - winningChances(self.engineEval)) / abs(winningChances(self.analyses[0].engineEval) - winningChances(self.analyses[-2].engineEval))) 100 | except ZeroDivisionError: 101 | return 10 102 | 103 | @lru_cache(maxsize=64) 104 | def winningChances(engineEval: EngineEval) -> Number: 105 | if engineEval.mate is not None: 106 | return 1 if engineEval.mate > 0 else 0 107 | else: 108 | return 1 / (1 + exp(-0.004 * engineEval.cp)) 109 | 110 | def similarChances(c1: Number, c2: Number) -> bool: 111 | return abs(c1 - c2) < 0.05 112 | 113 | class AnalysisBSONHandler: 114 | @staticmethod 115 | def reads(bson: Dict) -> Analysis: 116 | return Analysis( 117 | uci = bson['uci'], 118 | engineEval = EngineEvalBSONHandler.reads(bson['score']) 119 | ) 120 | 121 | @staticmethod 122 | def writes(analysis: Analysis) -> Dict: 123 | return { 124 | 'uci': analysis.uci, 125 | 'score': EngineEvalBSONHandler.writes(analysis.engineEval) 126 | } 127 | 128 | 129 | class AnalysedMoveBSONHandler: 130 | @staticmethod 131 | def reads(bson: Dict) -> AnalysedMove: 132 | return AnalysedMove( 133 | uci = bson['uci'], 134 | move = bson['move'], 135 | emt = bson['emt'], 136 | engineEval = EngineEvalBSONHandler.reads(bson['score']), 137 | analyses = [AnalysisBSONHandler.reads(a) for a in bson['analyses']] 138 | ) 139 | 140 | @staticmethod 141 | def writes(analysedMove: AnalysedMove) -> Dict: 142 | return { 143 | 'uci': analysedMove.uci, 144 | 'move': analysedMove.move, 145 | 'emt': analysedMove.emt, 146 | 'score': EngineEvalBSONHandler.writes(analysedMove.engineEval), 147 | 'analyses': [AnalysisBSONHandler.writes(a) for a in analysedMove.analyses] 148 | } 149 | -------------------------------------------------------------------------------- /modules/game/AnalysedPosition.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.AnalysedMove import Analysis, AnalysisBSONHandler 4 | from chess import polyglot, Board 5 | from pymongo.collection import Collection 6 | import pymongo 7 | import logging 8 | 9 | AnalysedPositionID = NewType('AnalysedPositionID', str) 10 | 11 | class AnalysedPosition(NamedTuple('AnalysedPosition', [ 12 | ('id', AnalysedPositionID), 13 | ('analyses', List[Analysis]) 14 | ])): 15 | """ 16 | Like an analysed move, but only with SF analysis. Does not contain any other move data. 17 | This is used for accelerating stockfish analysis. 18 | """ 19 | @staticmethod 20 | def fromBoardAndAnalyses(board: Board, analyses: List[Analysis]): 21 | return AnalysedPosition( 22 | id=AnalysedPosition.idFromBoard(board), 23 | analyses=analyses) 24 | 25 | @staticmethod 26 | def idFromBoard(board: Board) -> AnalysedPositionID: 27 | return str(polyglot.zobrist_hash(board)) 28 | 29 | class AnalysedPositionBSONHandler: 30 | @staticmethod 31 | def reads(bson: Dict) -> AnalysedPosition: 32 | return AnalysedPosition( 33 | id=bson['_id'], 34 | analyses=[AnalysisBSONHandler.reads(b) for b in bson['analyses']]) 35 | 36 | def writes(analysedPosition: AnalysedPosition) -> Dict: 37 | return { 38 | '_id': analysedPosition.id, 39 | 'analyses': [AnalysisBSONHandler.writes(a) for a in analysedPosition.analyses] 40 | } 41 | 42 | class AnalysedPositionDB(NamedTuple('AnalysedPositionDB', [ 43 | ('analysedPositionColl', Collection) 44 | ])): 45 | def write(self, analysedPosition: AnalysedPosition): 46 | try: 47 | self.analysedPositionColl.update_one( 48 | {'_id': analysedPosition.id}, 49 | {'$set': AnalysedPositionBSONHandler.writes(analysedPosition)}, 50 | upsert=True) 51 | except pymongo.errors.DuplicateKeyError: 52 | logging.warning("DuplicateKeyError when attempting to write position: " + str(analysedPosition.id)) 53 | 54 | def writeMany(self, analysedPositions: List[AnalysedPosition]): 55 | [self.write(analysedPosition) for analysedPosition in analysedPositions] 56 | 57 | def byBoard(self, board: Board) -> Opt[AnalysedPosition]: 58 | analysedPositionBSON = self.analysedPositionColl.find_one({'_id': AnalysedPosition.idFromBoard(board)}) 59 | return None if analysedPositionBSON is None else AnalysedPositionBSONHandler.reads(analysedPositionBSON) -------------------------------------------------------------------------------- /modules/game/Api.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | import logging 3 | 4 | from modules.game.AnalysedGame import AnalysedGameBSONHandler 5 | 6 | from modules.game.Env import Env 7 | 8 | from modules.game.Player import Player 9 | from modules.game.Player import PlayerID 10 | from modules.game.Game import Game, GameID 11 | 12 | class Api(NamedTuple('Api', [ 13 | ('env', Env) 14 | ])): 15 | def writeAnalysedGames(self, analysedGamesBSON: List[Dict]) -> bool: 16 | try: 17 | analysedGames = [AnalysedGameBSONHandler.reads(g) for g in analysedGamesBSON] 18 | self.env.analysedGameDB.writeMany(analysedGames) 19 | return True 20 | except (KeyError, ValueError): 21 | logging.warning('Malformed analysedGamesBSON: ' + str(analysedGamesBSON)) 22 | return False 23 | 24 | def gamesForAnalysis(self, playerId: PlayerID, required: List[str] = []) -> List[Game]: 25 | """ 26 | Given a playerId and an amount of games. This function will return the games within `limit` 27 | that should be analysed 28 | """ 29 | games = self.env.gameDB.byPlayerId(playerId) 30 | analysedGames = self.env.analysedGameDB.byPlayerId(playerId) 31 | 32 | gameIds = {g.id for g in games} 33 | analysedGameIds = {g.gameId for g in analysedGames} 34 | 35 | notAnalysedButRequiredIds = set(required) - analysedGameIds 36 | 37 | correct_length = lambda g: len(g.pgn) >= 40 and len(g.pgn) <= 120 38 | games = [g for g in games if g.id in notAnalysedButRequiredIds and correct_length(g)] 39 | 40 | return games 41 | 42 | def gamesByIds(self, gameIds: List[GameID]): 43 | return self.env.gameDB.byIds(gameIds) 44 | 45 | def writeGames(self, games: List[Game]): 46 | """ 47 | Store games from lichess 48 | """ 49 | self.env.gameDB.writeMany(games) 50 | 51 | def writePlayer(self, player: Player): 52 | """ 53 | Upsert a new player to the db 54 | """ 55 | self.env.playerDB.write(player) 56 | -------------------------------------------------------------------------------- /modules/game/Colour.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | Colour = NewType('Color', bool) 4 | White = Colour(True) 5 | Black = Colour(False) -------------------------------------------------------------------------------- /modules/game/EngineEval.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.Colour import Colour 4 | 5 | from math import exp 6 | 7 | class EngineEval(NamedTuple('EngineEval', [ 8 | ('cp', Opt[Number]), 9 | ('mate', Opt[int]) 10 | ])): 11 | @staticmethod 12 | def fromDict(d: Dict): 13 | return EngineEval(d.get('cp', None), d.get('mate', None)) 14 | 15 | def asdict(self) -> Dict: 16 | return {'cp': self.cp} if self.cp is not None else {'mate': self.mate} 17 | 18 | def inverse(self): 19 | return EngineEval(-self.cp if self.cp is not None else None, 20 | -self.mate if self.mate is not None else None) 21 | 22 | def winningChances(self, colour: Colour) -> Number: 23 | if self.mate is not None: 24 | base = (1 if self.mate > 0 else 0) 25 | else: 26 | base = 1 / (1 + exp(-0.004 * self.cp)) 27 | return 100*(base if colour else (1-base)) 28 | 29 | class EngineEvalBSONHandler: 30 | @staticmethod 31 | def reads(bson: Dict) -> List[EngineEval]: 32 | return EngineEval.fromDict(bson) 33 | 34 | def writes(engineEval: EngineEval) -> Dict: 35 | return engineEval.asdict() -------------------------------------------------------------------------------- /modules/game/EngineTools.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from modules.game.Game import Game 6 | from modules.game.Colour import Colour 7 | from modules.game.AnalysedGame import AnalysedGame 8 | from modules.game.EngineEval import EngineEval 9 | from modules.game.AnalysedPosition import AnalysedPosition, AnalysedPositionDB 10 | from modules.game.AnalysedMove import AnalysedMove, Analysis 11 | 12 | from modules.fishnet.fishnet import stockfish_command 13 | 14 | from chess.pgn import read_game 15 | 16 | from chess import uci 17 | from chess.uci import Engine 18 | from chess.uci import InfoHandler 19 | 20 | try: 21 | from StringIO import StringIO 22 | except ImportError: 23 | from io import StringIO 24 | 25 | class EngineTools(NamedTuple('EngineTools', [ 26 | ('engine', Engine), 27 | ('infoHandler', InfoHandler) 28 | ])): 29 | @staticmethod 30 | def new(conf: ConfigWrapper): 31 | engine = uci.popen_engine(stockfish_command(conf['stockfish update'])) 32 | engine.setoption({'Threads': conf['stockfish threads'], 'Hash': conf['stockfish memory']}) 33 | engine.uci() 34 | 35 | infoHandler = uci.InfoHandler() 36 | 37 | engine.info_handlers.append(infoHandler) 38 | 39 | return EngineTools( 40 | engine=engine, 41 | infoHandler=infoHandler) 42 | 43 | def analyseGame(self, game: Game, colour: Colour, nodes: int) -> Opt[AnalysedGame]: 44 | gameLen = len(game.pgn) 45 | if gameLen < 40 or gameLen > 120: 46 | logging.warning(f'game too long/short to analyse ({gameLen} plys)') 47 | return None 48 | elif game.emts is None: 49 | logging.warning(f'game has no emts') 50 | return None 51 | analysedMoves = [] 52 | 53 | try: 54 | playableGame = read_game(StringIO(" ".join(game.pgn))) 55 | except ValueError: 56 | return None 57 | 58 | node = playableGame 59 | mainline_moves = [x for x in node.main_line()] 60 | if len(game.emts) < len(mainline_moves): 61 | logging.warning(f"Not enough emts. len(emts): {len(game.emts)} vs len(node.main_line()): {len(mainline_moves)}") 62 | return None 63 | 64 | self.engine.ucinewgame() 65 | 66 | while not node.is_end(): 67 | logging.info(f'analysing position\n{node.board()}\n') 68 | nextNode = node.variation(0) 69 | if colour == node.board().turn: ## if it is the turn of the player of interest 70 | self.engine.setoption({'multipv': 5}) 71 | self.engine.position(node.board()) 72 | self.engine.go(nodes=nodes) 73 | 74 | analyses = list([ 75 | Analysis( 76 | pv[1][0].uci(), 77 | EngineEval(engineEval[1].cp, engineEval[1].mate)) for engineEval, pv in zip( 78 | self.infoHandler.info['score'].items(), 79 | self.infoHandler.info['pv'].items())]) 80 | 81 | self.engine.setoption({'multipv': 1}) 82 | self.engine.position(nextNode.board()) 83 | self.engine.go(nodes=nodes) 84 | 85 | engineEval = EngineEval( 86 | self.infoHandler.info['score'][1].cp, 87 | self.infoHandler.info['score'][1].mate).inverse() # flipped because analysing from other player side 88 | 89 | moveNumber = node.board().fullmove_number 90 | 91 | analysedMoves.append(AnalysedMove( 92 | uci = node.variation(0).move.uci(), 93 | move = moveNumber, 94 | emt = game.emts[EngineTools.ply(moveNumber, colour)], 95 | engineEval = engineEval, 96 | analyses = analyses)) 97 | 98 | node = nextNode 99 | 100 | playerId = game.white if colour else game.black 101 | return AnalysedGame.new(game.id, colour, playerId, analysedMoves) 102 | 103 | @staticmethod 104 | def ply(moveNumber, colour: Colour) -> int: 105 | return (2*(moveNumber-1)) + (0 if colour else 1) 106 | -------------------------------------------------------------------------------- /modules/game/Env.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from modules.game.Game import GameDB 6 | from modules.game.AnalysedGame import AnalysedGameDB 7 | from modules.game.Player import PlayerDB 8 | from modules.game.AnalysedPosition import AnalysedPositionDB 9 | 10 | from pymongo.database import Database 11 | 12 | class Env: 13 | def __init__(self, config: ConfigWrapper, db: Database): 14 | self.config = config 15 | self.db = db 16 | 17 | self.gameDB = GameDB(self.db[self.config["game coll game"]]) 18 | self.analysedGameDB = AnalysedGameDB(self.db[self.config["game coll analysed_game"]]) 19 | self.playerDB = PlayerDB(self.db[self.config["game coll player"]]) 20 | self.analysedPositionDB = AnalysedPositionDB(self.db[self.config["game coll analysed_position"]]) -------------------------------------------------------------------------------- /modules/game/Game.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.Colour import Colour 4 | from modules.game.Player import PlayerID 5 | from modules.game.EngineEval import EngineEval, EngineEvalBSONHandler 6 | 7 | from pymongo.collection import Collection 8 | 9 | from multiprocessing import Pool 10 | 11 | import math 12 | import chess 13 | from chess.pgn import read_game 14 | import numpy as np 15 | 16 | GameID = NewType('GameID', str) 17 | Emt = NewType('Emt', int) 18 | Analysis = NewType('Analysis', Opt[List[EngineEval]]) 19 | 20 | MoveTensor = NewType('MoveTensor', List[Number]) 21 | GameTensor = NewType('GameTensor', List[MoveTensor]) 22 | 23 | class Game(NamedTuple('Game', [ 24 | ('id', GameID), 25 | ('white', PlayerID), 26 | ('black', PlayerID), 27 | ('pgn', List[str]), 28 | ('emts', Opt[List[Emt]]), 29 | ('analysis', Analysis) 30 | ])): 31 | @staticmethod 32 | def fromDict(d: Dict): 33 | return Game( 34 | id=d['id'], 35 | white=d['white'], 36 | black=d['black'], 37 | pgn=d['pgn'].split(' '), 38 | emts=d['emts'], 39 | analysis=None if d.get('analysis') is None else [EngineEval.fromDict(a) for a in d['analysis']] 40 | ) 41 | 42 | @staticmethod 43 | def fromJson(json: Dict): 44 | return Game.fromDict(json) 45 | 46 | def toJson(self): 47 | return { 48 | '_id': self.id, 49 | 'white': self.white, 50 | 'black': self.black, 51 | 'pgn': self.pgn, 52 | 'emts': self.emts, 53 | 'analysis': [EngineEvalBSONHandler.writes(a) for a in self.analysis], 54 | 'analysed': len(self.analysis) > 0 55 | } 56 | 57 | 58 | def playable(self): 59 | try: 60 | from StringIO import StringIO 61 | except ImportError: 62 | from io import StringIO 63 | 64 | return read_game(StringIO(" ".join(self.pgn))) 65 | 66 | def boardTensors(self, colour): 67 | # replay the game for move tensors 68 | playable = self.playable() 69 | node = playable.variation(0) 70 | 71 | advancement = lambda rank: rank if colour else (7 - rank) 72 | 73 | while not node.is_end(): 74 | nextNode = node.variation(0) 75 | 76 | board = node.board() 77 | move = node.move 78 | 79 | if board.turn == colour: 80 | yield ( 81 | [ 82 | advancement(chess.square_rank(move.to_square)), 83 | board.pseudo_legal_moves.count(), 84 | int(board.is_capture(move)) 85 | ], 86 | board.piece_at(move.to_square).piece_type 87 | ) 88 | 89 | node = nextNode 90 | 91 | def boardTensorsByPlayerId(self, playerId: PlayerID, length: int = 60, safe: bool = True): 92 | if safe and self.white != playerId and self.black != playerId: 93 | logging.warning(f'{playerId} is not a player in game {self.id} - ({self.white}, {self.black})') 94 | return None 95 | 96 | colour = (self.white == playerId) 97 | tensors = list(self.boardTensors(colour)) 98 | remaining = max(0, length-len(tensors)) 99 | output = [ 100 | [remaining*[Game.nullBoardTensor()] + [t[0] for t in tensors]][0][:length], 101 | [remaining*[[0]] + [[t[1]] for t in tensors]][0][:length] 102 | ] 103 | 104 | return output 105 | 106 | def tensor(self, playerId: PlayerID, length: int = 60, noisey: bool = False, safe: bool = True) -> Opt[GameTensor]: 107 | if self.analysis == [] or (safe and self.white != playerId and self.black != playerId): 108 | if noisey: 109 | logging.debug(f'playerId: "{playerId}"') 110 | logging.debug(f'gameId: "{self.id}"') 111 | logging.debug(f'white: "{self.white}"') 112 | logging.debug(f'black: "{self.black}"') 113 | return None 114 | 115 | colour = (self.white == playerId) 116 | 117 | analysis = self.analysis[1:] if colour else self.analysis 118 | analysis = list(zip(analysis[0::2],analysis[1::2])) # grouping analyses pairwise 119 | 120 | emts = self.emtsByColour(colour, [-1 for _ in self.analysis] if self.emts is None else self.emts) 121 | avgEmt = np.average(emts) 122 | boardTensors = list(self.boardTensors(colour)) 123 | pieceTypes = [[b[1]] for b in boardTensors] 124 | tensors = [Game.moveTensor(a, e, b, avgEmt, colour) for a, e, b in zip(analysis, emts, [b[0] for b in boardTensors])] 125 | remaining = (max(0, length-len(tensors))) 126 | tensors = [ 127 | #np.array([remaining*[Game.nullMoveTensor()] + tensors][0][:length]), 128 | [remaining*[Game.nullMoveTensor()] + tensors][0][:length], 129 | #np.array([remaining*[[0]] + pieceTypes][0][:length]) 130 | [remaining*[[0]] + pieceTypes][0][:length] 131 | ] # pad to `length` tensors in length 132 | return tensors 133 | 134 | def emtsByColour(self, colour: Colour, emts: Opt[List[int]] = None) -> List[Emt]: 135 | emts = self.emts if emts is None else emts 136 | return emts[(0 if colour else 1)::2] 137 | 138 | @staticmethod 139 | def moveTensor(analysis: Analysis, emt: Emt, boardTensor: List[int], avgEmt: Number, colour: Colour) -> MoveTensor: 140 | return [ 141 | analysis[1].winningChances(colour), 142 | (analysis[0].winningChances(colour) - analysis[1].winningChances(colour)), 143 | emt, 144 | emt - avgEmt, 145 | 100*((emt - avgEmt)/(avgEmt + 1e-8)), 146 | ] + boardTensor 147 | 148 | @staticmethod 149 | def nullBoardTensor(): 150 | return [0, 0, 0] 151 | 152 | @staticmethod 153 | def nullMoveTensor() -> MoveTensor: 154 | return [0, 0, 0, 0, 0, 0, 0, 0] 155 | 156 | @staticmethod 157 | def ply(moveNumber: int, colour: Colour) -> int: 158 | return (2*(moveNumber-1)) + (0 if colour else 1) 159 | 160 | class GameBSONHandler: 161 | @staticmethod 162 | def reads(bson: Dict) -> Game: 163 | return Game( 164 | id = bson['_id'], 165 | white = bson.get('white'), 166 | black = bson.get('black'), 167 | pgn = bson['pgn'], 168 | emts = bson['emts'], 169 | analysis = [EngineEvalBSONHandler.reads(a) for a in bson.get('analysis', [])]) 170 | 171 | @staticmethod 172 | def writes(game: Game) -> Dict: 173 | return { 174 | 'white': game.white, 175 | 'black': game.black, 176 | 'pgn': game.pgn, 177 | 'emts': game.emts, 178 | 'analysis': [EngineEvalBSONHandler.writes(a) for a in game.analysis], 179 | 'analysed': len(game.analysis) > 0 180 | } 181 | 182 | class GameDB(NamedTuple('GameDB', [ 183 | ('gameColl', Collection) 184 | ])): 185 | def byId(self, _id: GameID) -> Opt[Game]: 186 | bson = self.gameColl.find_one({'_id': _id}) 187 | return None if bson is None else GameBSONHandler.reads(bson) 188 | 189 | def byIds(self, ids: List[GameID]) -> List[Game]: 190 | return [self.byId(gid) for gid in ids] 191 | #return [GameBSONHandler.reads(g) for g in self.gameColl.find({'_id': {'$in': [i for i in ids]}})] 192 | 193 | def byPlayerId(self, playerId: PlayerID) -> List[Game]: 194 | return [GameBSONHandler.reads(g) for g in self.gameColl.find({"$or": [{"white": playerId}, {"black": playerId}]})] 195 | 196 | def byPlayerIdAndAnalysed(self, playerId: PlayerID, analysed: bool = True) -> List[Game]: 197 | return [GameBSONHandler.reads(g) for g in self.gameColl.find({"analysed": analysed, "$or": [{"white": playerId}, {"black": playerId}]})] 198 | 199 | def write(self, game: Game): 200 | self.gameColl.update_one({'_id': game.id}, {'$set': GameBSONHandler.writes(game)}, upsert=True) 201 | 202 | def writeMany(self, games: List[Game]): 203 | [self.write(g) for g in games] 204 | -------------------------------------------------------------------------------- /modules/game/GameStore.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.Player import PlayerID 4 | from modules.game.Game import Game, GameID, GameTensor, Emt 5 | from modules.game.AnalysedGame import AnalysedGame, AnalysedGameTensor 6 | 7 | import numpy as np 8 | import math 9 | import json 10 | 11 | class GameStore(NamedTuple('GameStore', [ 12 | ('playerId', PlayerID), 13 | ('games', List[Game]), 14 | ('analysedGames', List[AnalysedGame]) 15 | ])): 16 | @staticmethod 17 | def new(playerId: PlayerID): 18 | return GameStore(playerId, [], []) 19 | 20 | def gamesWithoutAnalysis(self, excludeIds: List[GameID] = []) -> List[Game]: 21 | return [game for game in self.games if not self.gameIdHasAnalysis(game.id) if (game.id not in excludeIds)] 22 | 23 | def gameIdHasAnalysis(self, gid: GameID) -> bool: 24 | return any([ga for ga in self.analysedGames if ga.gameId == gid]) 25 | 26 | def hasGameId(self, gid: GameID) -> bool: 27 | return any([g for g in self.games if gid == g.id]) 28 | 29 | def gameById(self, gid: GameID) -> Opt[Game]: 30 | return next(iter([g for g in self.games if gid == g.id]), None) 31 | 32 | def addGames(self, games: List[Game]) -> None: 33 | [self.games.append(g) for g in games if (not self.hasGameId(g.id) and g.emts is not None and len(g.pgn) < 120 and len(g.pgn) > 40)] 34 | 35 | def addAnalysedGame(self, ga: AnalysedGame) -> None: 36 | if not self.gameIdHasAnalysis(ga.gameId) and ga is not None and len(ga.analysedMoves) < 60 and len(ga.analysedMoves) > 20: 37 | self.analysedGames.append(ga) 38 | 39 | def addAnalysedGames(self, analysedGames: List[AnalysedGame]) -> None: 40 | [self.addAnalysedGame(ga) for ga in analysedGames] 41 | 42 | def randomGamesWithoutAnalysis(self, size: int = 10, excludeIds: List[GameID] = []) -> List[Game]: 43 | gWithout = self.gamesWithoutAnalysis(excludeIds) 44 | if len(gWithout) > 0: 45 | return [gWithout[x] for x in np.random.choice(list(range(len(gWithout))), min(len(gWithout), size), replace=False)] 46 | return [] 47 | 48 | def gameTensors(self) -> List[GameTensor]: 49 | tensors = [(g.id, g.tensor(self.playerId)) for g in self.games] 50 | return [t for t in tensors if t[1] is not None] 51 | 52 | def gameTensorsWithoutAnalysis(self) -> List[GameTensor]: 53 | return [(gid, t) for gid, t in self.gameTensors(self.playerId) if not self.gameIdHasAnalysis(gid)] 54 | 55 | def analysedGameTensors(self) -> List[AnalysedGameTensor]: 56 | return [(analysedGame.tensor(), analysedGame.length()) for analysedGame in self.analysedGames if len(analysedGame.analysedMoves) < 60 and len(analysedGame.analysedMoves) > 20 and analysedGame.emtAverage() < 2000] 57 | 58 | def moveRankByTime(self): 59 | output = [] 60 | [output.extend(ga.moveRankByTime()) for ga in self.analysedGames] 61 | return output 62 | 63 | def moveRankByTimeJSON(self): 64 | return json.dumps(self.moveRankByTime()) 65 | 66 | def lossByTime(self): 67 | output = [] 68 | [output.extend(ga.lossByTime()) for ga in self.analysedGames] 69 | return output 70 | 71 | def lossByTimeJSON(self): 72 | return json.dumps(self.lossByTime()) 73 | 74 | def lossByRank(self): 75 | output = [] 76 | [output.extend(ga.lossByRank()) for ga in self.analysedGames] 77 | return output 78 | 79 | def lossByRankJSON(self): 80 | return json.dumps(self.lossByRank()) -------------------------------------------------------------------------------- /modules/game/Player.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from datetime import datetime, timedelta 4 | import pymongo 5 | from pymongo.collection import Collection 6 | 7 | from typing import NewType 8 | 9 | PlayerID = NewType('PlayerID', str) 10 | 11 | class Player(NamedTuple('Player', [ 12 | ('id', 'PlayerID'), 13 | ('titled', bool), 14 | ('engine', bool), 15 | ('gamesPlayed', int)])): 16 | @staticmethod 17 | def fromJson(userData: Dict): 18 | try: 19 | return Player( 20 | id=userData['id'], 21 | titled=userData['titled'], 22 | engine=userData['engine'], 23 | gamesPlayed=userData['games']) 24 | except (RuntimeTypeError, AttributeError): 25 | logging.debug("something's fucked") 26 | return None 27 | 28 | class PlayerBSONHandler: 29 | @staticmethod 30 | def reads(bson: Dict) -> Player: 31 | return Player( 32 | id = bson['_id'], 33 | titled = bson.get('titled', False), 34 | engine = bson['engine'], 35 | gamesPlayed = bson['gamesPlayed'] 36 | ) 37 | 38 | def writes(player: Player) -> Dict: 39 | return { 40 | '_id': player.id, 41 | 'titled': player.titled, 42 | 'engine': player.engine, 43 | 'gamesPlayed': player.gamesPlayed, 44 | 'date': datetime.now() 45 | } 46 | 47 | class PlayerDB(NamedTuple('PlayerDB', [ 48 | ('playerColl', 'Collection') 49 | ])): 50 | def byId(self, playerId: PlayerID) -> Opt[Player]: 51 | playerBSON = self.playerColl.find_one({'_id': playerId}) 52 | return None if playerBSON is None else PlayerBSONHandler.reads(playerBSON) 53 | 54 | def byPlayerId(self, playerId: PlayerID) -> Opt[Player]: 55 | return self.byId(playerId) 56 | 57 | def unmarkedByUserIds(self, playerIds: List[PlayerID]) -> List[Player]: 58 | return [(None if bson is None else PlayerBSONHandler.reads(bson)) 59 | for bson in [self.playerColl.find_one({'_id': playerId, 'engine': False}) for playerId in playerIds]] 60 | 61 | def engineSample(self, engine: bool, size: int) -> List[Player]: 62 | pipeline = [ 63 | {"$match": {"engine": engine}}, 64 | {"$sample": {"size": int(size)}} 65 | ] 66 | return [PlayerBSONHandler.reads(p) for p in self.playerColl.aggregate(pipeline)] 67 | 68 | def oldestNonEngine(self) -> Opt[Player]: 69 | playerBSON = self.playerColl.find_one_and_update( 70 | filter={'$or': [{'engine': False}, {'engine': None}], 'date': {'$lt': datetime.now() - timedelta(days=30)}}, 71 | update={'$set': {'date': datetime.now()}}, 72 | sort=[('date', pymongo.ASCENDING)]) 73 | return None if playerBSON is None else PlayerBSONHandler.reads(playerBSON) 74 | 75 | def byEngine(self, engine: bool = True) -> List[Player]: 76 | return [PlayerBSONHandler.reads(p) for p in self.playerColl.find({'engine': engine})] 77 | 78 | def all(self) -> List[Player]: 79 | return [PlayerBSONHandler.reads(p) for p in self.playerColl.find({})] 80 | 81 | def write(self, player: Player): 82 | self.playerColl.update_one({'_id': player.id}, {'$set': PlayerBSONHandler.writes(player)}, upsert=True) -------------------------------------------------------------------------------- /modules/http/__init__.py: -------------------------------------------------------------------------------- 1 | """Some http/socket related utilities. 2 | """ 3 | import socket 4 | import requests 5 | 6 | class HTTPAdapterWithSocketOptions(requests.adapters.HTTPAdapter): 7 | """A helper adapter to set socket options on the socket requests will use. 8 | """ 9 | def __init__(self, *args, **kwargs): 10 | self.socket_options = kwargs.pop("socket_options", None) 11 | super(HTTPAdapterWithSocketOptions, self).__init__(*args, **kwargs) 12 | 13 | def init_poolmanager(self, *args, **kwargs): 14 | if self.socket_options is not None: 15 | kwargs["socket_options"] = self.socket_options 16 | super(HTTPAdapterWithSocketOptions, self).init_poolmanager(*args, **kwargs) 17 | 18 | 19 | def get_keepalive_adapter(): 20 | return HTTPAdapterWithSocketOptions( 21 | socket_options=[(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)] 22 | ) 23 | 24 | def get_requests_session_with_keepalive(): 25 | adapter = get_keepalive_adapter() 26 | s = requests.session() 27 | s.mount("http://", adapter) 28 | s.mount("https://", adapter) 29 | return s 30 | -------------------------------------------------------------------------------- /modules/irwin/AnalysedGameModel.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | import numpy as np 6 | import logging 7 | import os 8 | 9 | from random import shuffle 10 | from math import ceil 11 | 12 | from modules.game.AnalysedGame import GameAnalysedGame 13 | 14 | from keras.models import load_model, Model 15 | from keras.layers import Dropout, Embedding, Reshape, Dense, LSTM, Input, concatenate, Conv1D, Flatten 16 | from keras.optimizers import Adam 17 | 18 | from keras.engine.training import Model 19 | 20 | from numpy import ndarray 21 | 22 | GamePrediction = NewType('GamePrediction', int) 23 | MovePrediction = NewType('MovePrediction', int) 24 | 25 | WeightedMovePrediction = NewType('WeightedMovePrediction', int) 26 | WeightedGamePrediction = NewType('WeightedGamePrediction', int) 27 | 28 | class AnalysedGamePrediction(NamedTuple('AnalysedGamePrediction', [ 29 | ('game', GamePrediction), 30 | ('lstmMoves', List[MovePrediction]), 31 | ('isolatedMoves', List[MovePrediction]) 32 | ])): 33 | @staticmethod 34 | def fromTensor(tensor: ndarray, length: int): 35 | return AnalysedGamePrediction( 36 | game = int(100*tensor[0][0]), 37 | lstmMoves = [int(100*i) for i in tensor[1][0][:length]], 38 | isolatedMoves = [int(100*i) for i in tensor[2][0][:length]]) 39 | 40 | def weightedMovePredictions(self) -> List[WeightedMovePrediction]: 41 | return [int(0.5*(l + i)) for l, i in zip(self.lstmMoves, self.isolatedMoves)] 42 | 43 | def weightedGamePrediction(self) -> WeightedGamePrediction: 44 | moveActivations = sorted(self.weightedMovePredictions(), reverse=True) 45 | moveActivationsLen = len(moveActivations) 46 | 47 | nanToZero = lambda x: 0 if np.isnan(x) else x 48 | 49 | highest = nanToZero(np.average([i for i in moveActivations if i > 80])) 50 | topX = np.average(moveActivations[:ceil(0.3*moveActivationsLen)]) 51 | topY = np.average(moveActivations[:ceil(0.9*moveActivationsLen)]) 52 | 53 | return int(np.average([highest, topX, topY])) 54 | 55 | class AnalysedGameModel: 56 | def __init__(self, config: ConfigWrapper, newmodel: bool = False): 57 | self.config = config 58 | self.model = self.createModel(newmodel) 59 | 60 | def createModel(self, newmodel: bool = False) -> Model: 61 | if os.path.isfile(self.config["irwin model analysed file"]) and not newmodel: 62 | logging.debug("model already exists, opening from file") 63 | m = load_model(self.config["irwin model analysed file"]) 64 | m._make_predict_function() 65 | return m 66 | logging.debug('model does not exist, building from scratch') 67 | inputGame = Input(shape=(60, 13), dtype='float32', name='game_input') 68 | pieceType = Input(shape=(60, 1), dtype='float32', name='piece_type') 69 | 70 | pieceEmbed = Embedding(input_dim=7, output_dim=8)(pieceType) 71 | rshape = Reshape((60,8))(pieceEmbed) 72 | 73 | concats = concatenate(inputs=[inputGame, rshape]) 74 | 75 | # Merge embeddings 76 | 77 | ### Conv Net Block of Siamese Network 78 | conv1 = Conv1D(filters=64, kernel_size=3, activation='relu')(concats) 79 | dense1 = Dense(32, activation='relu')(conv1) 80 | conv2 = Conv1D(filters=64, kernel_size=5, activation='relu')(dense1) 81 | dense2 = Dense(32, activation='sigmoid')(conv2) 82 | conv3 = Conv1D(filters=64, kernel_size=10, activation='relu')(dense2) 83 | dense3 = Dense(16, activation='relu')(conv3) 84 | dense4 = Dense(8, activation='sigmoid')(dense3) 85 | 86 | f = Flatten()(dense4) 87 | dense5 = Dense(64, activation='relu')(f) 88 | convNetOutput = Dense(16, activation='sigmoid')(dense5) 89 | 90 | 91 | ### LSTM Block of Siamese Network 92 | # merge move stats with move options 93 | c1 = Conv1D(filters=128, kernel_size=5, name='conv1')(concats) 94 | 95 | # analyse all the moves and come to a decision about the game 96 | l1 = LSTM(128, return_sequences=True)(c1) 97 | l2 = LSTM(128, return_sequences=True, activation='sigmoid')(l1) 98 | 99 | c2 = Conv1D(filters=64, kernel_size=10, name='conv2')(l2) 100 | 101 | l3 = LSTM(64, return_sequences=True)(c2) 102 | l4 = LSTM(32, return_sequences=True, activation='sigmoid', name='position_words')(l3) 103 | l5 = LSTM(32)(l4) 104 | l6 = Dense(16, activation='sigmoid', name='game_word')(l5) 105 | d4 = Dropout(0.3)(l6) 106 | 107 | s1 = Dense(16, activation='sigmoid')(l4) 108 | lstmMove = Dense(1, activation='sigmoid', name='lstm_move_output')(s1) 109 | 110 | # isolated consideration of move blocks 111 | 112 | mi1 = Dense(64, activation='relu')(c1) 113 | mi2 = Dense(16, activation='relu')(mi1) 114 | isolatedMove = Dense(1, activation='sigmoid', name='isolated_move')(mi2) 115 | 116 | 117 | mergeLSTMandConv = concatenate([d4, convNetOutput]) 118 | denseOut1 = Dense(16, activation='sigmoid')(mergeLSTMandConv) 119 | mainOutput = Dense(1, activation='sigmoid', name='main_output')(denseOut1) 120 | 121 | model = Model(inputs=[inputGame, pieceType], outputs=[mainOutput, lstmMove, isolatedMove]) 122 | 123 | model.compile(optimizer=Adam(lr=0.0001), 124 | loss='binary_crossentropy', 125 | loss_weights=[1., 0.3, 0.2], 126 | metrics=['accuracy']) 127 | return model 128 | 129 | def predict(self, gameAnalysedGames: List[GameAnalysedGame]) -> List[Opt[ndarray]]: 130 | list_to_array = lambda l: None if l is None else [np.array([l[0]]), np.array([l[1]])] 131 | arrs = ((list_to_array(ag.tensor()), ag.length()) for ag in gameAnalysedGames) 132 | return [None if t is None else AnalysedGamePrediction.fromTensor(self.model.predict(t), l) for t, l in arrs] 133 | 134 | def saveModel(self): 135 | self.model.save(self.config["irwin model analysed file"]) -------------------------------------------------------------------------------- /modules/irwin/AnalysisReport.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from datetime import datetime 3 | from functools import reduce 4 | from math import ceil 5 | import operator 6 | import numpy as np 7 | import random 8 | import pymongo 9 | import json 10 | 11 | class PlayerReport(namedtuple('PlayerReport', ['id', 'userId', 'owner', 'activation', 'date'])): 12 | @staticmethod 13 | def new(userId, owner, activation): 14 | reportId = str("%016x" % random.getrandbits(64)) 15 | return PlayerReport( 16 | id=reportId, 17 | userId=userId, 18 | owner=owner, 19 | activation=activation, 20 | date=datetime.now()) 21 | 22 | def reportDict(self, gameReports): 23 | return { 24 | 'userId': self.userId, 25 | 'owner': self.owner, 26 | 'activation': int(self.activation), 27 | 'games': [gameReport.reportDict() for gameReport in gameReports] 28 | } 29 | 30 | class GameReportStore(namedtuple('GameReportStore', ['gameReports'])): 31 | @staticmethod 32 | def new(gameReports): 33 | gameReports.sort(key=lambda obj: -obj.activation) 34 | return GameReportStore(gameReports) 35 | 36 | def topGames(self, p=0.15): 37 | """ Get the top p games from all gameReports """ 38 | self.gameReports.sort(key=lambda obj: -obj.activation) 39 | return [gameReport 40 | for i, gameReport in enumerate(self.gameReports) 41 | if (i <= p*len(self.gameReports) or gameReport.activation >= 90)] 42 | 43 | def longestGame(self): 44 | if len(self.gameReports) == 0: 45 | return 0 46 | return max([len(gameReport.moves) for gameReport in self.gameReports]) 47 | 48 | def losses(self, top=False): 49 | gameReports = self.topGames() if top else self.gameReports 50 | return [gameReport.losses() for gameReport in gameReports] 51 | 52 | def ranks(self, subNone=None, top=False): 53 | gameReports = self.topGames() if top else self.gameReports 54 | return [gameReport.ranks(subNone=subNone) for gameReport in gameReports] 55 | 56 | def averageLossByMove(self, top=False): 57 | """ Calculate the average loss by move. Used for graphing""" 58 | if self.longestGame() == 0: 59 | return [] # zero case 60 | return json.dumps(GameReportStore.zipAvgLOL(self.losses(top=top))) 61 | 62 | def averageRankByMove(self, top=False): 63 | """ Calculate the the average rank by move. Used for graphing """ 64 | if self.longestGame() == 0: 65 | return [] # zero case 66 | return json.dumps(GameReportStore.zipAvgLOL(self.ranks(subNone=6, top=top))) 67 | 68 | def stdBracketLossByMove(self, top=False): 69 | if self.longestGame() == 0: 70 | return [] # zero case 71 | return json.dumps(GameReportStore.stdBracket(self.losses(top=top))) 72 | 73 | def stdBracketRankByMove(self, top=False): 74 | if self.longestGame() == 0: 75 | return [] # zero case 76 | return json.dumps(GameReportStore.stdBracket(self.ranks(subNone=6, top=top), lowerLimit=1)) 77 | 78 | def binnedActivations(self, top=False): 79 | gameReports = self.topGames() if top else self.gameReports 80 | return json.dumps([sum([int(gameReport.activation in range(i,i+10)) for gameReport in gameReports]) for i in range(0, 100, 10)][::-1]) 81 | 82 | def binnedMoveActivations(self, top=False): 83 | gameReports = self.topGames() if top else self.gameReports 84 | moveActivations = reduce(operator.concat, [gameReport.activations() for gameReport in gameReports]) 85 | return json.dumps([sum([int(moveActivation in range(i,i+10)) for moveActivation in moveActivations]) for i in range(0, 100, 10)][::-1]) 86 | 87 | def activations(self, top=False): 88 | gameReports = self.topGames() if top else self.gameReports 89 | activations = [] 90 | [activations.extend(gr.activations()) for gr in gameReports] 91 | return activations 92 | 93 | @staticmethod 94 | def zipLOL(lol): 95 | """ 96 | lol: List[List[A]] 97 | assumes the input isn't : [] 98 | """ 99 | longest = max([len(l) for l in lol]) 100 | bins = [[] for i in range(longest)] 101 | for l in lol: 102 | try: 103 | [bins[i].append(l[i]) for i in range(longest) if l[i] is not None] 104 | except IndexError: 105 | continue 106 | return bins 107 | 108 | @staticmethod 109 | def zipAvgLOL(lol): 110 | """ 111 | lol: List[List[A]] 112 | assumes the input isn't : [] 113 | """ 114 | return [np.average(b) for b in GameReportStore.zipLOL(lol)] 115 | 116 | @staticmethod 117 | def zipStdLOL(lol): 118 | """ 119 | lol: List[List[A]] 120 | assumes the input isn't : [] 121 | """ 122 | return [np.std(b) for b in GameReportStore.zipLOL(lol)] 123 | 124 | @staticmethod 125 | def stdBracket(lol, lowerLimit=0): 126 | stds = GameReportStore.zipStdLOL(lol) 127 | avgs = GameReportStore.zipAvgLOL(lol) 128 | return { 129 | 'top': [avg + stds[i] for i, avg in enumerate(avgs)], 130 | 'bottom': [max(avg - stds[i], lowerLimit) for i, avg in enumerate(avgs)] 131 | } 132 | 133 | class GameReport(namedtuple('GameReport', ['id', 'reportId', 'gameId', 'activation', 'moves'])): 134 | @staticmethod 135 | def new(analysedGame, gameActivation, gamePredictions, reportId, userId): 136 | gameId = analysedGame.gameId 137 | return GameReport( 138 | id=gameId + '/' + reportId, 139 | reportId=reportId, 140 | gameId=gameId, 141 | activation=gameActivation, 142 | moves=[MoveReport.new(am, p) for am, p in zip(analysedGame.analysedMoves, movePredictions(gamePredictions[0]))]) 143 | 144 | def reportDict(self): 145 | return { 146 | 'gameId': self.gameId, 147 | 'activation': self.activation, 148 | 'moves': [move.reportDict() for move in self.moves] 149 | } 150 | 151 | def colorIndex(self): 152 | return int(self.activation/10) 153 | 154 | def activations(self): 155 | return [move.activation for move in self.moves] 156 | 157 | def ranks(self, subNone=None): 158 | return [(subNone if move.rank is None else move.rank) for move in self.moves] 159 | 160 | def ranksJSON(self): 161 | return json.dumps(self.ranks()) 162 | 163 | def losses(self): 164 | losses = [move.loss for move in self.moves] 165 | if losses[-1] > 50: 166 | losses[-1] = 0 167 | return losses 168 | 169 | def moveNumbers(self): 170 | return [i+1 for i in range(len(self.moves))] 171 | 172 | def binnedActivations(self): 173 | bins = [0 for i in range(10)] 174 | for move in self.moves: 175 | bins[int(move.activation/10)] += 1 176 | return bins[::-1] 177 | 178 | 179 | class MoveReport(namedtuple('MoveReport', ['activation', 'rank', 'ambiguity', 'advantage', 'loss'])): 180 | @staticmethod 181 | def new(analysedMove, movePrediction): 182 | return MoveReport( 183 | activation=moveActivation(movePrediction), 184 | rank=analysedMove.trueRank(), 185 | ambiguity=analysedMove.ambiguity(), 186 | advantage=int(100*analysedMove.advantage()), 187 | loss=int(100*analysedMove.winningChancesLoss())) 188 | 189 | def reportDict(self): 190 | return { 191 | 'a': self.activation, 192 | 'r': self.rank, 193 | 'm': self.ambiguity, 194 | 'o': self.advantage, 195 | 'l': self.loss 196 | } 197 | 198 | def movePredictions(gamePredictions): 199 | return list(zip(list(gamePredictions[1][0]), list(gamePredictions[2][0]))) 200 | 201 | def moveActivation(movePrediction): 202 | return int(50*(movePrediction[0][0]+movePrediction[1][0])) 203 | 204 | class PlayerReportBSONHandler: 205 | @staticmethod 206 | def reads(bson): 207 | return PlayerReport( 208 | id=bson['_id'], 209 | userId=bson['userId'], 210 | owner=bson['owner'], 211 | activation=bson['activation'], 212 | date=bson['date'] 213 | ) 214 | 215 | @staticmethod 216 | def writes(playerReport): 217 | return { 218 | '_id': playerReport.id, 219 | 'userId': playerReport.userId, 220 | 'owner': playerReport.owner, 221 | 'activation': playerReport.activation, 222 | 'date': playerReport.date 223 | } 224 | 225 | class GameReportBSONHandler: 226 | @staticmethod 227 | def reads(bson): 228 | return GameReport( 229 | id=bson['_id'], 230 | reportId=bson['reportId'], 231 | gameId=bson['gameId'], 232 | activation=bson['activation'], 233 | moves=[MoveReportBSONHandler.reads(mBson) for mBson in bson['moves']]) 234 | 235 | @staticmethod 236 | def writes(gameReport): 237 | return { 238 | '_id': gameReport.id, 239 | 'reportId': gameReport.reportId, 240 | 'gameId': gameReport.gameId, 241 | 'activation': gameReport.activation, 242 | 'moves': [MoveReportBSONHandler.writes(move) for move in gameReport.moves] 243 | } 244 | 245 | class MoveReportBSONHandler: 246 | @staticmethod 247 | def reads(bson): 248 | return MoveReport( 249 | activation=bson['a'], 250 | rank=bson['r'], 251 | ambiguity=bson['m'], 252 | advantage=bson['o'], 253 | loss=bson['l']) 254 | 255 | @staticmethod 256 | def writes(moveReport): 257 | return { 258 | 'a': moveReport.activation, 259 | 'r': moveReport.rank, 260 | 'm': moveReport.ambiguity, 261 | 'o': moveReport.advantage, 262 | 'l': moveReport.loss 263 | } 264 | 265 | class PlayerReportDB(namedtuple('PlayerReportDB', ['playerReportColl'])): 266 | def byPlayerId(self, userId): 267 | return [PlayerReportBSONHandler.reads(bson) 268 | for bson 269 | in self.playerReportColl.find( 270 | filter={'userId': userId}, 271 | sort=[('date', pymongo.DESCENDING)])] 272 | 273 | def newestByUserId(self, userId): 274 | bson = self.playerReportColl.find_one( 275 | filter={'userId': userId}, 276 | sort=[('date', pymongo.DESCENDING)]) 277 | return None if bson is None else PlayerReportBSONHandler.reads(bson) 278 | 279 | def byPlayerIds(self, userIds): 280 | return [self.newestByUserId(userId) for userId in userIds] 281 | 282 | def newest(self, amount=50): 283 | return [PlayerReportBSONHandler.reads(bson) 284 | for bson in self.playerReportColl.find(sort=[('date', pymongo.DESCENDING)], limit=amount)] 285 | 286 | def byId(self, reportId): 287 | bson = self.playerReportColl.find_one({'_id': reportId}) 288 | return None if bson is None else PlayerReportBSONHandler.reads(bson) 289 | 290 | def write(self, playerReport): 291 | self.playerReportColl.update_one( 292 | {'_id': playerReport.id}, 293 | {'$set': PlayerReportBSONHandler.writes(playerReport)}, 294 | upsert=True) 295 | 296 | def timeSinceUpdated(self, userId): 297 | report = self.newestByUserId(userId) 298 | if report is None: 299 | return None 300 | return datetime.now() - report.date 301 | 302 | class GameReportDB(namedtuple('GameReportDB', ['gameReportColl'])): 303 | def byId(self, id): 304 | bson = self.gameReportColl.find_one({'_id': id}) 305 | return None if bson is None else GameReportBSONHandler.reads(bson) 306 | 307 | def byReportId(self, reportId): 308 | return [GameReportBSONHandler.reads(bson) for bson in self.gameReportColl.find({'reportId': reportId})] 309 | 310 | def byGameId(self, gameId): 311 | return [GameReportBSONHandler.reads(bson) for bson in self.gameReportColl.find({'gameId': gameId})] 312 | 313 | def write(self, gameReport): 314 | self.gameReportColl.update_one( 315 | {'_id': gameReport.id}, 316 | {'$set': GameReportBSONHandler.writes(gameReport)}, 317 | upsert=True) 318 | 319 | def writeMany(self, gameReports): 320 | [self.write(gameReport) for gameReport in gameReports] -------------------------------------------------------------------------------- /modules/irwin/BasicGameModel.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | import numpy as np 6 | import logging 7 | import os 8 | 9 | from random import shuffle 10 | 11 | from collections import namedtuple 12 | 13 | from modules.game.Player import PlayerID 14 | from modules.game.Game import Game 15 | 16 | from keras.models import load_model, Model 17 | from keras.layers import Dropout, Embedding, Reshape, Flatten, Dense, LSTM, Input, concatenate, Conv1D 18 | from keras.optimizers import Adam 19 | from keras.callbacks import TensorBoard 20 | 21 | from functools import lru_cache 22 | 23 | class BasicGameModel: 24 | def __init__(self, config: ConfigWrapper, newmodel: bool = False): 25 | self.config = config 26 | self.model = self.createModel(newmodel) 27 | 28 | def createModel(self, newmodel: bool = False): 29 | if os.path.isfile(self.config["irwin model basic file"]) and not newmodel: 30 | logging.debug("model already exists, opening from file") 31 | m = load_model(self.config["irwin model basic file"]) 32 | m._make_predict_function() 33 | return m 34 | logging.debug('model does not exist, building from scratch') 35 | 36 | moveStatsInput = Input(shape=(60, 8), dtype='float32', name='move_input') 37 | pieceType = Input(shape=(60, 1), dtype='float32', name='piece_type') 38 | 39 | pieceEmbed = Embedding(input_dim=7, output_dim=8)(pieceType) 40 | rshape = Reshape((60,8))(pieceEmbed) 41 | 42 | concats = concatenate(inputs=[moveStatsInput, rshape]) 43 | 44 | ### Conv Net Block of Siamese Network 45 | conv1 = Conv1D(filters=64, kernel_size=3, activation='relu')(concats) 46 | dense1 = Dense(32, activation='relu')(conv1) 47 | conv2 = Conv1D(filters=64, kernel_size=5, activation='relu')(dense1) 48 | dense2 = Dense(32, activation='sigmoid')(conv2) 49 | conv3 = Conv1D(filters=64, kernel_size=10, activation='relu')(dense2) 50 | dense3 = Dense(16, activation='relu')(conv3) 51 | dense4 = Dense(8, activation='sigmoid')(dense3) 52 | 53 | f = Flatten()(dense4) 54 | dense5 = Dense(64, activation='relu')(f) 55 | convNetOutput = Dense(16, activation='sigmoid')(dense5) 56 | 57 | ### LSTM Block of Siamese Network 58 | mv1 = Dense(32, activation='relu')(concats) 59 | d1 = Dropout(0.3)(mv1) 60 | mv2 = Dense(16, activation='relu')(d1) 61 | 62 | c1 = Conv1D(filters=64, kernel_size=5, name='conv1')(mv2) 63 | 64 | # analyse all the moves and come to a decision about the game 65 | l1 = LSTM(64, return_sequences=True)(c1) 66 | l2 = LSTM(32, return_sequences=True, activation='relu')(l1) 67 | 68 | c2 = Conv1D(filters=64, kernel_size=10, name='conv2')(l2) 69 | 70 | l3 = LSTM(32, return_sequences=True)(c2) 71 | l4 = LSTM(16, return_sequences=True, activation='relu', recurrent_activation='hard_sigmoid')(l3) 72 | l5 = LSTM(16, activation='sigmoid')(l4) 73 | 74 | mergeLSTMandConv = concatenate([l5, convNetOutput]) 75 | denseOut1 = Dense(16, activation='sigmoid')(mergeLSTMandConv) 76 | mainOutput = Dense(1, activation='sigmoid', name='main_output')(denseOut1) 77 | 78 | model = Model(inputs=[moveStatsInput, pieceType], outputs=mainOutput) 79 | 80 | model.compile(optimizer=Adam(lr=0.0001), 81 | loss='binary_crossentropy', 82 | metrics=['accuracy']) 83 | return model 84 | 85 | def predict(self, playerId: PlayerID, games: List[Game]) -> Opt[int]: 86 | tensors = [game.tensor(playerId) for game in games] 87 | return [None if t is None else int(100*self.model.predict([np.array([t[0]]),np.array([t[1]])])[0][0]) for t in tensors] 88 | 89 | def saveModel(self): 90 | logging.debug("saving model") 91 | self.model.save(self.config["irwin model basic file"]) -------------------------------------------------------------------------------- /modules/irwin/Env.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from pymongo.database import Database 6 | 7 | from modules.game.Game import GameDB 8 | from modules.game.Player import PlayerDB 9 | from modules.game.AnalysedGame import AnalysedGameDB 10 | 11 | from modules.irwin.training.BasicGameActivation import BasicGameActivationDB 12 | from modules.irwin.training.AnalysedGameActivation import AnalysedGameActivationDB 13 | 14 | class Env: 15 | def __init__(self, config: ConfigWrapper, db: Database): 16 | self.config = config 17 | self.db = db 18 | 19 | self.gameDB = GameDB(db[self.config["game coll game"]]) 20 | self.playerDB = PlayerDB(db[self.config["game coll player"]]) 21 | self.analysedGameDB = AnalysedGameDB(db[self.config["game coll analysed_game"]]) 22 | self.analysedGameActivationDB = AnalysedGameActivationDB(db[self.config["irwin coll analysed_game_activation"]]) 23 | self.basicGameActivationDB = BasicGameActivationDB(db[self.config["irwin coll basic_game_activation"]]) -------------------------------------------------------------------------------- /modules/irwin/GameReport.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.irwin.AnalysedGameModel import AnalysedGamePrediction, WeightedGamePrediction 4 | 5 | from modules.irwin.MoveReport import MoveReport 6 | 7 | from modules.game.AnalysedGame import AnalysedGame, AnalysedGameID 8 | from modules.game.Player import PlayerID 9 | from modules.game.Game import GameID 10 | 11 | GameReportID = NewType('GameReportID', str) 12 | 13 | class GameReport(NamedTuple('GameReport', [ 14 | ('id', GameReportID), 15 | ('reportId', str), 16 | ('gameId', AnalysedGameID), 17 | ('activation', WeightedGamePrediction), 18 | ('moves', List[MoveReport]) 19 | ])): 20 | @staticmethod 21 | def new(analysedGame: AnalysedGame, analysedGamePrediction: AnalysedGamePrediction, playerReportId: str): 22 | gameId = analysedGame.gameId 23 | return GameReport( 24 | id=GameReport.makeId(gameId, playerReportId), 25 | reportId=playerReportId, 26 | gameId=gameId, 27 | activation=analysedGamePrediction.weightedGamePrediction(), 28 | moves=[MoveReport.new(am, p) for am, p in zip(analysedGame.analysedMoves, analysedGamePrediction.weightedMovePredictions())]) 29 | 30 | @staticmethod 31 | def makeId(gameId: GameID, reportId: str) -> GameReportID: 32 | return '{}/{}'.format(gameId, reportId) 33 | 34 | def reportDict(self): 35 | return { 36 | 'gameId': self.gameId, 37 | 'activation': self.activation, 38 | 'moves': [move.reportDict() for move in self.moves] 39 | } -------------------------------------------------------------------------------- /modules/irwin/Irwin.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.auth.Auth import AuthID 4 | 5 | from modules.game.Player import Player 6 | from modules.game.AnalysedGame import GameAnalysedGame 7 | 8 | from modules.irwin.PlayerReport import PlayerReport 9 | from modules.irwin.AnalysedGameModel import AnalysedGameModel 10 | from modules.irwin.BasicGameModel import BasicGameModel 11 | 12 | from modules.irwin.Env import Env 13 | 14 | from modules.irwin.training.Training import Training 15 | from modules.irwin.training.Evaluation import Evaluation 16 | 17 | class Irwin: 18 | """ 19 | Irwin(env: Env) 20 | 21 | The main thinking and evalutaion engine of the application. 22 | """ 23 | def __init__(self, env: Env, newmodel: bool = False): 24 | logging.debug('creating irwin instance') 25 | self.env = env 26 | self.basicGameModel = BasicGameModel(env.config) 27 | self.analysedGameModel = AnalysedGameModel(env.config) 28 | self.training = Training(env, newmodel) 29 | self.evaluation = Evaluation(self, self.env.config) 30 | 31 | def createReport(self, player: Player, gameAnalysedGames: List[GameAnalysedGame], owner: AuthID = 'test'): 32 | predictions = self.analysedGameModel.predict(gameAnalysedGames) 33 | playerReport = PlayerReport.new(player, [(ag, p) for ag, p in zip(gameAnalysedGames, predictions) if p is not None], owner) 34 | 35 | return playerReport -------------------------------------------------------------------------------- /modules/irwin/MoveReport.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.AnalysedMove import AnalysedMove, TrueRank 4 | from modules.irwin.AnalysedGameModel import WeightedMovePrediction 5 | 6 | class MoveReport(NamedTuple('MoveReport', [ 7 | ('activation', WeightedMovePrediction), 8 | ('rank', TrueRank), 9 | ('ambiguity', int), 10 | ('advantage', int), 11 | ('loss', int) 12 | ])): 13 | @staticmethod 14 | def new(analysedMove: AnalysedMove, movePrediction: WeightedMovePrediction): 15 | return MoveReport( 16 | activation=movePrediction, 17 | rank=analysedMove.trueRank(), 18 | ambiguity=analysedMove.ambiguity(), 19 | advantage=int(100*analysedMove.advantage()), 20 | loss=int(100*analysedMove.winningChancesLoss())) 21 | 22 | def reportDict(self): 23 | return { 24 | 'a': self.activation, 25 | 'r': self.rank, 26 | 'm': self.ambiguity, 27 | 'o': self.advantage, 28 | 'l': self.loss 29 | } -------------------------------------------------------------------------------- /modules/irwin/PlayerReport.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | import random 4 | 5 | from datetime import datetime 6 | from math import ceil 7 | 8 | import numpy as np 9 | 10 | from modules.game.AnalysedGame import AnalysedGame 11 | from modules.game.Player import Player, PlayerID 12 | from modules.auth.Auth import AuthID 13 | from modules.irwin.AnalysedGameModel import AnalysedGamePrediction 14 | from modules.irwin.GameReport import GameReport 15 | 16 | PlayerReportID = NewType('PlayerReportID', str) 17 | 18 | class PlayerReport(NamedTuple('PlayerReport', [ 19 | ('id', PlayerReportID), 20 | ('userId', PlayerID), 21 | ('owner', AuthID), 22 | ('activation', int), 23 | ('gameReports', List[GameReport]), 24 | ('date', datetime) 25 | ])): 26 | @property 27 | def playerId(self): 28 | return self.userId 29 | 30 | @staticmethod 31 | def new(player: Player, gamesAndPredictions: Iterable[Tuple[AnalysedGame, AnalysedGamePrediction]], owner: AuthID = 'test'): 32 | reportId = PlayerReport.makeId() 33 | gamesAndPredictions = [(ag, agp) for ag, agp in gamesAndPredictions if agp is not None] 34 | gameReports = [GameReport.new(analysedGame, analysedGamePrediction, reportId) for analysedGame, analysedGamePrediction in gamesAndPredictions] 35 | return PlayerReport( 36 | id=reportId, 37 | userId=player.id, 38 | owner=owner, 39 | activation=PlayerReport.playerPrediction(player, [agp for _, agp in gamesAndPredictions]), 40 | gameReports=gameReports, 41 | date=datetime.now()) 42 | 43 | @staticmethod 44 | def makeId() -> PlayerReportID: 45 | return str("%016x" % random.getrandbits(64)) 46 | 47 | @staticmethod 48 | def playerPrediction(player: Player, analysedGamePredictions: List[AnalysedGamePrediction]) -> int: 49 | sortedGameActivations = sorted([gp.weightedGamePrediction() for gp in analysedGamePredictions], reverse=True) 50 | topGameActivations = sortedGameActivations[:ceil(0.15*len(sortedGameActivations))] 51 | topGameActivationsAvg = int(np.average(topGameActivations)) if len(topGameActivations) > 0 else 0 52 | 53 | aboveUpper = len([i for i in sortedGameActivations if i > 90]) 54 | aboveLower = len([i for i in sortedGameActivations if i > 80]) 55 | 56 | if aboveUpper > 2 and player.gamesPlayed < 500: 57 | result = topGameActivationsAvg 58 | elif aboveLower > 0: 59 | result = min(92, topGameActivationsAvg) 60 | else: 61 | result = min(62, topGameActivationsAvg) 62 | return result 63 | 64 | def reportDict(self) -> Dict: 65 | return { 66 | 'userId': self.userId, 67 | 'owner': self.owner, 68 | 'activation': int(self.activation), 69 | 'games': [gameReport.reportDict() for gameReport in self.gameReports] 70 | } -------------------------------------------------------------------------------- /modules/irwin/models/analysedGame.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/clarkerubber/irwin/bebf193d39e7ad1339d996f692e9a41a0ec02835/modules/irwin/models/analysedGame.h5 -------------------------------------------------------------------------------- /modules/irwin/models/basicGame.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/clarkerubber/irwin/bebf193d39e7ad1339d996f692e9a41a0ec02835/modules/irwin/models/basicGame.h5 -------------------------------------------------------------------------------- /modules/irwin/training/AnalysedGameActivation.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.Player import PlayerID 4 | from modules.game.AnalysedGame import AnalysedGame, AnalysedGameID 5 | 6 | from modules.irwin.AnalysedGameModel import AnalysedGamePrediction 7 | 8 | from pymongo.collection import Collection 9 | 10 | Prediction = NewType('Prediction', int) 11 | 12 | class AnalysedGameActivation(NamedTuple('AnalysedGameActivation', [ 13 | ('id', AnalysedGameID), 14 | ('playerId', PlayerID), 15 | ('engine', bool), 16 | ('length', int), 17 | ('prediction', Prediction)] 18 | )): 19 | """ 20 | Used as a pivot coll for training. 21 | """ 22 | @staticmethod 23 | def fromAnalysedGameAndPrediction(analysedGame: AnalysedGame, prediction: AnalysedGamePrediction, engine: bool): 24 | return AnalysedGameActivation( 25 | id = analysedGame.id, 26 | playerId = analysedGame.playerId, 27 | engine = engine, 28 | length = len(analysedGame.analysedMoves), 29 | prediction = prediction.game) 30 | 31 | class AnalysedGameActivationBSONHandler: 32 | @staticmethod 33 | def reads(bson: Dict) -> AnalysedGameActivation: 34 | return AnalysedGameActivation( 35 | id = bson['_id'], 36 | playerId = bson['playerId'], 37 | engine = bson['engine'], 38 | length = bson['length'], 39 | prediction = bson['prediction']) 40 | 41 | @staticmethod 42 | def writes(analysedGameActivation: AnalysedGameActivation) -> Dict: 43 | return { 44 | '_id': analysedGameActivation.id, 45 | 'playerId': analysedGameActivation.playerId, 46 | 'engine': analysedGameActivation.engine, 47 | 'length': analysedGameActivation.length, 48 | 'prediction': analysedGameActivation.prediction 49 | } 50 | 51 | class AnalysedGameActivationDB(NamedTuple('AnalysedGameActivationDB', [ 52 | ('confidentAnalysedGamePivotColl', Collection) 53 | ])): 54 | def byPlayerId(self, playerId: PlayerID) -> List[AnalysedGameActivation]: 55 | return [AnalysedGameActivationBSONHandler.reads(bson) for bson in self.confidentAnalysedGamePivotColl.find({'userId': playerId})] 56 | 57 | def byEngineAndPrediction(self, engine: bool, prediction: Prediction, limit = None) -> List[AnalysedGameActivation]: 58 | gtlt = '$gte' if engine else '$lte' 59 | pipeline = [{'$match': {'engine': engine, 'prediction': {gtlt: prediction}}}] 60 | 61 | if limit is not None: 62 | pipeline.append({'$sample': {'size': limit}}) 63 | 64 | return [AnalysedGameActivationBSONHandler.reads(bson) for bson in self.confidentAnalysedGamePivotColl.aggregate(pipeline)] 65 | 66 | def write(self, analysedGameActivation: AnalysedGameActivation): 67 | self.confidentAnalysedGamePivotColl.update_one({'_id': analysedGameActivation.id}, {'$set': AnalysedGameActivationBSONHandler.writes(analysedGameActivation)}, upsert=True) 68 | 69 | def writeMany(self, analysedGameActivations: List[AnalysedGameActivation]): 70 | [self.write(analysedGameActivation) for analysedGameActivation in analysedGameActivations] -------------------------------------------------------------------------------- /modules/irwin/training/AnalysedModelTraining.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from multiprocessing import Pool 4 | 5 | from modules.game.AnalysedGame import AnalysedGameTensor, GameAnalysedGame 6 | 7 | from modules.irwin.AnalysedGameModel import AnalysedGameModel 8 | 9 | from modules.irwin.Env import Env 10 | from modules.irwin.training.AnalysedGameActivation import AnalysedGameActivation 11 | 12 | import numpy as np 13 | 14 | from random import shuffle 15 | 16 | Batch = NamedTuple('Batch', [ 17 | ('data', np.ndarray), 18 | ('labels', List[np.ndarray]) 19 | ]) 20 | 21 | class AnalysedModelTraining(NamedTuple('AnalysedModelTraining', [ 22 | ('env', Env), 23 | ('analysedGameModel', AnalysedGameModel) 24 | ])): 25 | def train(self, epochs: int, filtered: bool = True) -> None: 26 | logging.debug("getting dataset") 27 | batch = self.getTrainingDataset(filtered) 28 | 29 | logging.debug("training") 30 | logging.debug("Batch Info: Games: {}".format(len(batch.data[0]))) 31 | 32 | logging.debug("Game Len: {}".format(len(batch.data[0][0]))) 33 | 34 | self.analysedGameModel.model.fit( 35 | batch.data, batch.labels, 36 | epochs=epochs, batch_size=32, validation_split=0.2) 37 | 38 | self.analysedGameModel.saveModel() 39 | logging.debug("complete") 40 | 41 | def getPlayerTensors(self, playerId: str): 42 | analysedGames = self.env.analysedGameDB.byPlayerId(playerId) 43 | games = self.env.gameDB.byIds([ag.gameId for ag in analysedGames]) 44 | 45 | return list(filter(None, [GameAnalysedGame(ag, g).tensor() for ag, g in zip(analysedGames, games) if ag.gameLength() <= 60])) 46 | 47 | def getTensorsByEngine(self, engine: bool, limit: int): 48 | players = self.env.playerDB.byEngine(engine) 49 | shuffle(players) 50 | 51 | tensors = [] 52 | 53 | for player in players: 54 | logging.info(f'getting tensors for {player.id}') 55 | 56 | tensors.extend(self.getPlayerTensors(player.id)) 57 | l = len(tensors) 58 | 59 | logging.info(f'loaded {l} / {limit} tensors') 60 | 61 | if l >= limit: 62 | logging.info('reached limit') 63 | break 64 | return tensors 65 | 66 | def getTensorByCPE(self, cpe): 67 | analysedGame = self.env.analysedGameDB.byId(cpe.id) 68 | if analysedGame is not None and analysedGame.gameLength() <= 60: 69 | game = self.env.gameDB.byId(analysedGame.gameId) 70 | return GameAnalysedGame(analysedGame, game).tensor() 71 | return None 72 | 73 | def getFilteredEngineTensors(self, limit: int): 74 | logging.info(f'getting {limit} filtered tensors') 75 | 76 | cheatPivotEntries = self.env.analysedGameActivationDB.byEngineAndPrediction( 77 | engine = True, 78 | prediction = 80, 79 | limit = limit) 80 | 81 | return list(filter(None, [self.getTensorByCPE(cpe) for cpe in cheatPivotEntries])) 82 | 83 | def getTrainingDataset(self, filtered: bool): 84 | limit = self.env.config["irwin model analysed training sample_size"] 85 | 86 | legitTensors = self.getTensorsByEngine( 87 | engine = False, 88 | limit = limit) 89 | 90 | if filtered: 91 | cheatTensors = self.getFilteredEngineTensors(limit = limit) 92 | else: 93 | cheatTensors = self.getTensorsByEngine( 94 | engine = True, 95 | limit = limit) 96 | 97 | logging.debug('cgts: ' + str(len(cheatTensors))) 98 | logging.debug('lgts: ' + str(len(legitTensors))) 99 | 100 | logging.debug("batching tensors") 101 | return self.createBatchAndLabels(cheatTensors, legitTensors) 102 | 103 | @staticmethod 104 | def createBatchAndLabels(cheatTensors: List[AnalysedGameTensor], legitTensors: List[AnalysedGameTensor]) -> Batch: 105 | """ 106 | group the dataset into batches by the length of the dataset, because numpy needs it that way 107 | """ 108 | mlen = min(len(cheatTensors), len(legitTensors)) 109 | 110 | cheats = cheatTensors[:mlen] 111 | legits = legitTensors[:mlen] 112 | 113 | logging.debug("batch size " + str(len(cheats + legits))) 114 | 115 | labels = [1.0]*len(cheats) + [0.0]*len(legits) 116 | 117 | blz = list(zip(cheats+legits, labels)) 118 | shuffle(blz) 119 | 120 | r = Batch( 121 | data = [ 122 | np.array([t[0] for t, l in blz]), 123 | np.array([t[1] for t, l in blz]) 124 | ], 125 | labels=[ 126 | np.array([l for t, l in blz]), 127 | np.array([ [[l]]*(60-13) for t, l in blz]), 128 | np.array([ [[l]]*(60-4) for t, l in blz]) 129 | ]) 130 | logging.debug(r.labels[0]) 131 | logging.debug(r.labels[1]) 132 | logging.debug(r.labels[2]) 133 | return r 134 | 135 | def buildTable(self): 136 | """Build table of activations for analysed games. used for training""" 137 | logging.warning("Building Analysed Activation Table") 138 | logging.debug("getting players") 139 | cheats = self.env.playerDB.byEngine(True) 140 | 141 | lenPlayers = str(len(cheats)) 142 | 143 | logging.info("gettings games and predicting") 144 | 145 | for i, p in enumerate(cheats): 146 | logging.info("predicting: " + p.id + " - " + str(i) + '/' + lenPlayers) 147 | analysedGames = self.env.analysedGameDB.byPlayerId(p.id) 148 | games = self.env.gameDB.byIds([ag.gameId for ag in analysedGames]) 149 | 150 | predictions = self.analysedGameModel.predict([GameAnalysedGame(ag, g) for ag, g in zip(analysedGames, games)]) 151 | 152 | analysedGameActivations = [AnalysedGameActivation.fromAnalysedGameAndPrediction( 153 | analysedGame = analysedGame, 154 | prediction = prediction, 155 | engine=p.engine) for analysedGame, prediction in zip(analysedGames, predictions) if prediction is not None] 156 | self.env.analysedGameActivationDB.writeMany(analysedGameActivations) -------------------------------------------------------------------------------- /modules/irwin/training/BasicGameActivation.py: -------------------------------------------------------------------------------- 1 | """Type used for pivot coll for basic game model training""" 2 | from default_imports import * 3 | 4 | from modules.game.Game import GameID, PlayerID 5 | 6 | from pymongo.collection import Collection 7 | 8 | BasicGameActivationID = NewType('BasicGameActivationID', str) 9 | Prediction = NewType('Prediction', int) 10 | 11 | class BasicGameActivation(NamedTuple('BasicGameActivation', [ 12 | ('id', BasicGameActivationID), 13 | ('gameId', GameID), 14 | ('playerId', PlayerID), 15 | ('engine', bool), 16 | ('prediction', int) 17 | ])): 18 | @staticmethod 19 | def fromPrediction(gameId: GameID, playerId: PlayerID, prediction: Prediction, engine: bool): 20 | return BasicGameActivation( 21 | id = gameId + '/' + playerId, 22 | gameId = gameId, 23 | playerId = playerId, 24 | engine = engine, 25 | prediction = prediction 26 | ) 27 | 28 | @staticmethod 29 | def makeId(gameId: GameID, playerId: PlayerID) -> BasicGameActivationID: 30 | return gameId + '/' + playerId 31 | 32 | class BasicGameActivationBSONHandler: 33 | @staticmethod 34 | def reads(bson: Dict) -> BasicGameActivation: 35 | return BasicGameActivation( 36 | id = bson['_id'], 37 | gameId = bson['gameId'], 38 | playerId = bson['userId'], 39 | engine = bson['engine'], 40 | prediction = bson['prediction']) 41 | 42 | @staticmethod 43 | def writes(gba: BasicGameActivation) -> Dict: 44 | return { 45 | '_id': gba.id, 46 | 'gameId': gba.gameId, 47 | 'userId': gba.playerId, 48 | 'engine': gba.engine, 49 | 'prediction': gba.prediction 50 | } 51 | 52 | class BasicGameActivationDB(NamedTuple('BasicGameActivationDB', [ 53 | ('basicGameActivationColl', Collection) 54 | ])): 55 | def byPlayerId(self, playerId: PlayerID) -> List[BasicGameActivation]: 56 | return [BasicGameActivationBSONHandler.reads(bson) for bson in self.basicGameActivationColl.find({'userId': playerId})] 57 | 58 | def byEngineAndPrediction(self, engine: bool, prediction: Prediction, limit: Opt[int] = None) -> List[BasicGameActivation]: 59 | gtlt = '$gte' if engine else '$lte' 60 | pipeline = [{'$match': {'engine': engine, 'prediction': {gtlt: prediction}}}] 61 | 62 | if limit is not None: 63 | pipeline.append({'$sample': {'size': limit}}) 64 | 65 | return [BasicGameActivationBSONHandler.reads(bson) for bson in self.basicGameActivationColl.aggregate(pipeline)] 66 | 67 | def write(self, gba: BasicGameActivation): 68 | self.basicGameActivationColl.update_one({'_id': gba.id}, {'$set': BasicGameActivationBSONHandler.writes(gba)}, upsert=True) 69 | 70 | def writeMany(self, gbas: List[BasicGameActivation]): 71 | [self.write(gba) for gba in gbas] -------------------------------------------------------------------------------- /modules/irwin/training/BasicModelTraining.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.game.Game import GameTensor 4 | 5 | from modules.irwin.BasicGameModel import BasicGameModel 6 | 7 | from modules.irwin.Env import Env 8 | from modules.irwin.training.BasicGameActivation import BasicGameActivation 9 | 10 | import numpy as np 11 | 12 | from random import shuffle 13 | 14 | Batch = NamedTuple('Batch', [ 15 | ('data', np.ndarray), 16 | ('labels', np.ndarray) 17 | ]) 18 | 19 | class BasicModelTraining(NamedTuple('BasicModelTraining', [ 20 | ('env', Env), 21 | ('basicGameModel', BasicGameModel) 22 | ])): 23 | def train(self, epochs: int, filtered: bool = False, newmodel: bool = False): 24 | logging.debug("getting dataset") 25 | batch = self.getTrainingDataset(filtered) 26 | 27 | logging.debug("training") 28 | logging.debug("Batch Info: Games: {}".format(len(batch.data[0]))) 29 | 30 | self.basicGameModel.model.fit( 31 | batch.data, batch.labels, 32 | epochs=epochs, batch_size=32, validation_split=0.2) 33 | 34 | self.basicGameModel.saveModel() 35 | logging.debug("complete") 36 | 37 | def getPlayerTensors(self, playerId: str): 38 | games = self.env.gameDB.byPlayerIdAndAnalysed(playerId) 39 | return list(filter(None, [g.tensor(playerId) for g in games])) 40 | 41 | def getTensorsByEngine(self, engine: bool, limit: int): 42 | players = self.env.playerDB.byEngine(engine) 43 | shuffle(players) 44 | 45 | tensors = [] 46 | 47 | for player in players: 48 | logging.info(f'getting tensors for {player.id}') 49 | 50 | tensors.extend(self.getPlayerTensors(player.id)) 51 | l = len(tensors) 52 | 53 | logging.info(f'loaded {l} / {limit} tensors') 54 | 55 | if l >= limit: 56 | logging.info('reached limit') 57 | break 58 | 59 | return tensors 60 | 61 | def getTensorByCPE(self, cpe): 62 | game = self.env.gameDB.byId(cpe.gameId) 63 | return game.tensor(cpe.playerId) 64 | 65 | 66 | def getFilteredEngineTensors(self, limit: int): 67 | logging.info(f'getting {limit} filtered tensors') 68 | 69 | cheatPivotEntries = self.env.basicGameActivationDB.byEngineAndPrediction( 70 | engine = True, 71 | prediction = 70, 72 | limit = limit) 73 | 74 | return list(filter(None, [self.getTensorByCPE(cpe) for cpe in cheatPivotEntries])) 75 | 76 | def getTrainingDataset(self, filtered: bool = False): 77 | logging.debug("Getting players from DB") 78 | 79 | limit = self.env.config['irwin model basic training sample_size'] 80 | 81 | legitTensors = self.getTensorsByEngine( 82 | engine = False, 83 | limit = limit) 84 | 85 | if filtered: 86 | cheatTensors = self.getFilteredEngineTensors(limit = limit) 87 | else: 88 | cheatTensors = self.getTensorsByEngine( 89 | engine = True, 90 | limit = limit) 91 | 92 | logging.debug("batching tensors") 93 | return self.createBatchAndLabels(cheatTensors, legitTensors) 94 | 95 | @staticmethod 96 | def createBatchAndLabels(cheatTensors: List[GameTensor], legitTensors: List[GameTensor]) -> Batch: 97 | """ 98 | group the dataset into batches by the length of the dataset, because numpy needs it that way 99 | """ 100 | logging.debug(len(cheatTensors)) 101 | logging.debug(len(legitTensors)) 102 | mlen = min(len(cheatTensors), len(legitTensors)) 103 | logging.debug(mlen) 104 | 105 | cheats = cheatTensors[:mlen] 106 | legits = legitTensors[:mlen] 107 | 108 | logging.debug("batch size " + str(len(cheats + legits))) 109 | 110 | labels = [1]*len(cheats) + [0]*len(legits) 111 | 112 | blz = list(zip(cheats+legits, labels)) 113 | shuffle(blz) 114 | 115 | b = Batch( 116 | data = [ 117 | np.array([t[0] for t, l in blz]), 118 | np.array([t[1] for t, l in blz]) 119 | ], 120 | labels = np.array([l for t, l in blz]) 121 | ) 122 | 123 | return b 124 | 125 | def buildTable(self): 126 | """ 127 | Build table of activations for basic games (analysed by lichess). used for training 128 | """ 129 | logging.debug("Building Basic Activation Table") 130 | logging.info("getting players") 131 | cheats = self.env.playerDB.byEngine(True) 132 | 133 | lenPlayers = str(len(cheats)) 134 | 135 | logging.info("getting games and predicting") 136 | for i, p in enumerate(cheats): 137 | logging.info("predicting: " + p.id + " - " + str(i) + "/" + lenPlayers) 138 | 139 | games = self.env.gameDB.byPlayerIdAndAnalysed(p.id) 140 | gamesAndTensors = zip(games, self.basicGameModel.predict(p.id, games)) 141 | 142 | self.env.basicGameActivationDB.writeMany([BasicGameActivation.fromPrediction( 143 | gameId=g.id, 144 | playerId=p.id, 145 | prediction=pr, 146 | engine=p.engine) for g, pr in gamesAndTensors if pr is not None]) -------------------------------------------------------------------------------- /modules/irwin/training/Evaluation.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from modules.game.Player import Player 6 | from modules.game.GameStore import GameStore 7 | from modules.game.AnalysedGame import GameAnalysedGame 8 | 9 | from modules.irwin.PlayerReport import PlayerReport 10 | 11 | class Evaluation(NamedTuple('Evaluation', [ 12 | ('irwin', 'Irwin'), 13 | ('config', ConfigWrapper) 14 | ])): 15 | def getPlayerOutcomes(self, engine: bool, batchSize: int) -> Opt[int]: # returns a generator for activations, player by player. 16 | for player in self.irwin.env.playerDB.engineSample(engine, batchSize): 17 | analysedGames = self.irwin.env.analysedGameDB.byPlayerId(player.id) 18 | games = self.irwin.env.gameDB.byIds([ag.gameId for ag in analysedGames]) 19 | predictions = self.irwin.analysedGameModel.predict([GameAnalysedGame(ag, g) for ag, g in zip(analysedGames, games) if ag.gameLength() <= 60]) 20 | playerReport = PlayerReport.new(player, zip(analysedGames, predictions)) 21 | if len(playerReport.gameReports) > 0: 22 | yield Evaluation.outcome( 23 | playerReport.activation, 24 | 92, 64, engine) 25 | else: 26 | yield None 27 | 28 | def evaluate(self): 29 | outcomes = [] 30 | [[((outcomes.append(o) if o is not None else ...), Evaluation.performance(outcomes)) for o in self.getPlayerOutcomes(engine, self.config['irwin testing eval_size'])] for engine in (True, False)] 31 | 32 | @staticmethod 33 | def performance(outcomes): 34 | tp = len([a for a in outcomes if a == 1]) 35 | fn = len([a for a in outcomes if a == 2]) 36 | tn = len([a for a in outcomes if a == 3]) 37 | fp = len([a for a in outcomes if a == 4]) 38 | tr = len([a for a in outcomes if a == 5]) 39 | fr = len([a for a in outcomes if a == 6]) 40 | 41 | cheatsLen = max(1, tp + fn + tr) 42 | legitsLen = max(1, fp + tn + fr) 43 | 44 | logging.warning("True positive: " + str(tp) + " (" + str(int(100*tp/cheatsLen)) + "%)") 45 | logging.warning("False negative: " + str(fn) + " (" + str(int(100*fn/cheatsLen)) + "%)") 46 | logging.warning("True negative: " + str(tn) + " (" + str(int(100*tn/legitsLen)) + "%)") 47 | logging.warning("False positive: " + str(fp) + " (" + str(int(100*fp/legitsLen)) + "%)") 48 | logging.warning("True Report: " + str(tr) + " (" + str(int(100*tr/cheatsLen)) + "%)") 49 | logging.warning("False Report: " + str(fr) + " (" + str(int(100*fr/legitsLen)) + "%)") 50 | logging.warning("Cheats coverage: " + str(int(100*(tp+tr)/cheatsLen)) + "%") 51 | logging.warning("Legits coverage: " + str(int(100*(tn)/legitsLen)) + "%") 52 | 53 | @staticmethod 54 | def outcome(a: int, tm: int, tr: int, e: bool) -> int: # activation, threshold mark, threshold report, expected value 55 | logging.debug(a) 56 | true_positive = 1 57 | false_negative = 2 58 | true_negative = 3 59 | false_positive = 4 60 | true_report = 5 61 | false_report = 6 62 | 63 | if a > tm and e: 64 | return true_positive 65 | if a > tm and not e: 66 | return false_positive 67 | if a > tr and e: 68 | return true_report 69 | if a > tr and not e: 70 | return false_report 71 | if a <= tr and e: 72 | return false_negative 73 | return true_negative -------------------------------------------------------------------------------- /modules/irwin/training/Training.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from modules.irwin.Env import Env 6 | 7 | from modules.irwin.training.AnalysedModelTraining import AnalysedModelTraining 8 | from modules.irwin.training.BasicModelTraining import BasicModelTraining 9 | from modules.irwin.training.Evaluation import Evaluation 10 | 11 | from modules.irwin.AnalysedGameModel import AnalysedGameModel 12 | from modules.irwin.BasicGameModel import BasicGameModel 13 | 14 | 15 | class Training: 16 | def __init__(self, env: Env, newmodel: bool = False): 17 | self.analysedModelTraining = AnalysedModelTraining( 18 | env=env, 19 | analysedGameModel=AnalysedGameModel(env.config, newmodel)) 20 | 21 | self.basicModelTraining = BasicModelTraining( 22 | env=env, 23 | basicGameModel=BasicGameModel(env.config, newmodel)) 24 | 25 | self.evaluation = Evaluation(env, env.config) -------------------------------------------------------------------------------- /modules/lichess/Api.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import logging 3 | import time 4 | import json 5 | from collections import namedtuple 6 | 7 | class Api(namedtuple('Api', ['url', 'token'])): 8 | def postReport(self, report): 9 | reportDict = report.reportDict() 10 | logging.debug(f'Sending player report: {reportDict}') 11 | for _ in range(5): 12 | try: 13 | response = requests.post( 14 | self.url + 'irwin/report', 15 | headers = { 16 | 'User-Agent': 'Irwin', 17 | 'Authorization': f'Bearer {self.token}' 18 | }, 19 | json = reportDict 20 | ) 21 | if response.status_code == 200: 22 | logging.debug(f'Lichess responded with: {response.text}') 23 | return True 24 | else: 25 | logging.warning(str(response.status_code) + ': Failed to post player report') 26 | logging.warning(json.dumps(reportDict)) 27 | if response.status_code == 413: 28 | return False 29 | logging.debug('Trying again in 60 sec') 30 | time.sleep(60) 31 | except requests.exceptions.ChunkedEncodingError: 32 | logging.warning("ChunkedEncodingError: Failed to post report.") 33 | logging.debug("Not attempting to post again") 34 | return 35 | except requests.ConnectionError: 36 | logging.warning("CONNECTION ERROR: Failed to post report.") 37 | logging.debug("Trying again in 30 sec") 38 | time.sleep(30) 39 | except requests.exceptions.SSLError: 40 | logging.warning("SSL ERROR: Failed to post report.") 41 | logging.debug("Trying again in 30 sec") 42 | time.sleep(30) 43 | except ValueError: 44 | logging.warning("VALUE ERROR: Failed to post report.") 45 | logging.debug("Trying again in 30 sec") 46 | time.sleep(30) 47 | 48 | def getPlayerData(self, userId): 49 | for _ in range(5): 50 | try: 51 | response = requests.get( 52 | self.url+'irwin/'+userId+'/assessment', 53 | headers = { 54 | 'User-Agent': 'Irwin', 55 | 'Authorization': f'Bearer {self.token}' 56 | } 57 | ) 58 | try: 59 | return response.json() 60 | except json.decoder.JSONDecodeError: 61 | logging.warning('Error: JSONDecodeError in getPlayerData for user: ' + str(userId)) 62 | logging.warning('Status Code ' + str(response.status_code)) 63 | logging.warning('Text: ' + response.text[:200]) 64 | return None 65 | except requests.ConnectionError: 66 | logging.warning('CONNECTION ERROR: Failed to pull assessment data') 67 | logging.debug('Trying again in 30 sec') 68 | time.sleep(30) 69 | except requests.exceptions.SSLError: 70 | logging.warning('SSL ERROR: Failed to pull assessment data') 71 | logging.debug('Trying again in 30 sec') 72 | time.sleep(30) 73 | return False 74 | -------------------------------------------------------------------------------- /modules/lichess/Request.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.queue.Origin import Origin 4 | from modules.game.Player import Player 5 | from modules.game.Game import Game 6 | 7 | class Request(NamedTuple('Request', [ 8 | ('origin', Origin), 9 | ('player', Player), 10 | ('games', List[Game]) 11 | ])): 12 | @staticmethod 13 | def fromJson(json): # Opt[Request] 14 | try: 15 | return Request( 16 | origin = json['origin'], 17 | player = Player.fromJson(json['user']), 18 | games = [Game.fromJson(game) for game in json['games']] 19 | ) 20 | except KeyError: 21 | logging.debug('key error mofo') 22 | return None 23 | -------------------------------------------------------------------------------- /modules/queue/EngineQueue.py: -------------------------------------------------------------------------------- 1 | """Queue item for basic analysis by irwin""" 2 | from default_imports import * 3 | 4 | from modules.auth.Auth import AuthID 5 | from modules.game.Game import Game, PlayerID, GameID 6 | from modules.queue.Origin import Origin, OriginReport, OriginModerator, OriginRandom, maxOrigin 7 | 8 | from datetime import datetime, timedelta 9 | 10 | import pymongo 11 | from pymongo.collection import Collection 12 | 13 | import numpy as np 14 | from math import ceil 15 | 16 | EngineQueueID = NewType('EngineQueueID', str) 17 | Precedence = NewType('Precedence', int) 18 | 19 | class EngineQueue(NamedTuple('EngineQueue', [ 20 | ('id', EngineQueueID), # same as player ID 21 | ('origin', Origin), 22 | ('requiredGameIds', List[GameID]), # games that must be analysed 23 | ('precedence', Precedence), 24 | ('completed', bool), 25 | ('owner', AuthID), 26 | ('date', datetime) 27 | ])): 28 | @staticmethod 29 | def new(playerId: PlayerID, origin: Origin, gamesAndPredictions: List[Tuple[Game, int]]): 30 | if len(gamesAndPredictions) > 0: 31 | gamesAndPredictions = sorted(gamesAndPredictions, key=lambda gap: gap[1], reverse=True) 32 | required = [gap[0].id for gap in gamesAndPredictions][:10] 33 | activations = [gap[1]**2 for gap in gamesAndPredictions] 34 | top30avg = ceil(np.average(activations[:ceil(0.3*len(activations))])) 35 | else: 36 | required = [] 37 | top30avg = 0 38 | 39 | # set the precedence to the top30avg 40 | precedence = top30avg 41 | 42 | # then modify it depending on where it came from 43 | if origin == OriginReport: 44 | precedence += 5000 45 | elif origin == OriginModerator: 46 | precedence = 100000 47 | 48 | return EngineQueue( 49 | id=playerId, 50 | origin=origin, 51 | requiredGameIds=required, 52 | precedence=precedence, 53 | owner=None, 54 | completed=False, 55 | date=datetime.now()) 56 | 57 | def complete(self): 58 | return EngineQueue( 59 | id=self.id, 60 | origin=self.origin, 61 | requiredGameIds=self.requiredGameIds, 62 | precedence=self.precedence, 63 | completed=True, 64 | owner=self.owner, 65 | date=self.date) 66 | 67 | @staticmethod 68 | def merge(engineQueueA, engineQueueB): 69 | if engineQueueA.completed: 70 | return engineQueueB 71 | elif engineQueueB.completed: 72 | return engineQueueA 73 | return EngineQueue( 74 | id=engineQueueA.id, 75 | origin=maxOrigin(engineQueueA.origin, engineQueueB.origin), 76 | requiredGameIds=list(set(engineQueueA.requiredGameIds) | set(engineQueueB.requiredGameIds)), 77 | precedence=max(engineQueueA.precedence, engineQueueB.precedence), 78 | completed=min(engineQueueA.completed, engineQueueB.completed), 79 | owner=engineQueueA.owner if engineQueueA.owner is not None else (engineQueueB.owner if engineQueueB.owner is not None else None), 80 | date=min(engineQueueA.date, engineQueueB.date)) # retain the oldest datetime so the sorting doesn't mess up 81 | 82 | class EngineQueueBSONHandler: 83 | @staticmethod 84 | def reads(bson: Dict) -> EngineQueue: 85 | return EngineQueue( 86 | id=bson['_id'], 87 | origin=bson['origin'], 88 | precedence=bson['precedence'], 89 | requiredGameIds=list(set(bson.get('requiredGameIds', []))), 90 | completed=bson.get('complete', False), 91 | owner=bson.get('owner'), 92 | date=bson.get('date')) 93 | 94 | @staticmethod 95 | def writes(engineQueue: EngineQueue) -> Dict: 96 | return { 97 | '_id': engineQueue.id, 98 | 'origin': engineQueue.origin, 99 | 'precedence': engineQueue.precedence, 100 | 'requiredGameIds': list(set(engineQueue.requiredGameIds)), 101 | 'completed': engineQueue.completed, 102 | 'owner': engineQueue.owner, 103 | 'date': datetime.now() 104 | } 105 | 106 | class EngineQueueDB(NamedTuple('EngineQueueDB', [ 107 | ('engineQueueColl', Collection) 108 | ])): 109 | def write(self, engineQueue: EngineQueue): 110 | self.engineQueueColl.update_one( 111 | {'_id': engineQueue.id}, 112 | {'$set': EngineQueueBSONHandler.writes(engineQueue)}, upsert=True) 113 | 114 | def inProgress(self) -> List[EngineQueue]: 115 | return [EngineQueueBSONHandler.reads(bson) for bson in self.engineQueueColl.find({'owner': {'$ne': None}, 'completed': False})] 116 | 117 | def byId(self, _id: EngineQueueID) -> Opt[EngineQueue]: 118 | bson = self.engineQueueColl.find_one({'_id': _id}) 119 | return None if bson is None else EngineQueueBSONHandler.reads(bson) 120 | 121 | def byPlayerId(self, playerId: str) -> Opt[EngineQueue]: 122 | return self.byId(playerId) 123 | 124 | def complete(self, engineQueue: EngineQueue): 125 | """remove a complete job from the queue""" 126 | self.write(engineQueue.complete()) 127 | 128 | def updateComplete(self, _id: EngineQueueID, complete: bool): 129 | self.engineQueueColl.update_one( 130 | {'_id': _id}, 131 | {'$set': {'completed': complete, 'owner': None}}) 132 | 133 | def removePlayerId(self, playerId: PlayerID): 134 | """remove all jobs related to playerId""" 135 | self.engineQueueColl.remove({'_id': playerId}) 136 | 137 | def exists(self, playerId: PlayerID) -> bool: 138 | """playerId has a engineQueue object against their name""" 139 | return self.engineQueueColl.find_one({'_id': playerId}) is not None 140 | 141 | def owned(self, playerId: PlayerID) -> bool: 142 | """Does any deep player queue for playerId have an owner""" 143 | bson = self.engineQueueColl.find_one({'_id': playerId, 'owner': None}) 144 | hasOwner = False 145 | if bson is not None: 146 | hasOwner = bson['owner'] is not None 147 | return hasOwner 148 | 149 | def oldest(self) -> Opt[EngineQueue]: 150 | bson = self.engineQueueColl.find_one( 151 | filter={'date': {'$lt': datetime.now() - timedelta(days=2)}}, 152 | sort=[('date', pymongo.ASCENDING)]) 153 | return None if bson is None else EngineQueueBSONHandler.reads(bson) 154 | 155 | def nextUnprocessed(self, name: AuthID) -> Opt[EngineQueue]: 156 | """find the next job to process against owner's name""" 157 | incompleteBSON = self.engineQueueColl.find_one({'owner': name, 'completed': {'$ne': True}}) 158 | if incompleteBSON is not None: # owner has unfinished business 159 | logging.debug(f'{name} is returning to complete {incompleteBSON}') 160 | return EngineQueueBSONHandler.reads(incompleteBSON) 161 | 162 | engineQueueBSON = self.engineQueueColl.find_one_and_update( 163 | filter={'owner': None, 'completed': False}, 164 | update={'$set': {'owner': name}}, 165 | sort=[("precedence", pymongo.DESCENDING), 166 | ("date", pymongo.ASCENDING)]) 167 | return None if engineQueueBSON is None else EngineQueueBSONHandler.reads(engineQueueBSON) 168 | 169 | def top(self, amount: int = 20) -> List[EngineQueue]: 170 | """Return the top `amount` of players, ranked by precedence""" 171 | bsons = self.engineQueueColl.find( 172 | filter={'complete': False}, 173 | sort=[("precedence", pymongo.DESCENDING), 174 | ("date", pymongo.ASCENDING)]).limit(amount) 175 | return [EngineQueueBSONHandler.reads(b) for b in bsons] 176 | -------------------------------------------------------------------------------- /modules/queue/Env.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from conf.ConfigWrapper import ConfigWrapper 4 | 5 | from pymongo.collection import Collection 6 | 7 | from modules.queue.EngineQueue import EngineQueueDB 8 | from modules.queue.IrwinQueue import IrwinQueueDB 9 | 10 | class Env: 11 | def __init__(self, config: ConfigWrapper, db: Collection): 12 | self.db = db 13 | 14 | self.engineQueueDB = EngineQueueDB(db[config['queue coll engine']]) 15 | self.irwinQueueDB = IrwinQueueDB(db[config['queue coll irwin']]) -------------------------------------------------------------------------------- /modules/queue/IrwinQueue.py: -------------------------------------------------------------------------------- 1 | """Queue item for deep analysis by irwin""" 2 | from default_imports import * 3 | 4 | from modules.queue.Origin import Origin 5 | from modules.game.Game import PlayerID 6 | 7 | from datetime import datetime 8 | import pymongo 9 | from pymongo.collection import Collection 10 | 11 | IrwinQueue = NamedTuple('IrwinQueue', [ 12 | ('id', PlayerID), 13 | ('origin', Origin) 14 | ]) 15 | 16 | class IrwinQueueBSONHandler: 17 | @staticmethod 18 | def reads(bson: Dict) -> IrwinQueue: 19 | return IrwinQueue( 20 | id=bson['_id'], 21 | origin=bson['origin']) 22 | 23 | @staticmethod 24 | def writes(irwinQueue: IrwinQueue) -> Dict: 25 | return { 26 | '_id': irwinQueue.id, 27 | 'origin': irwinQueue.origin, 28 | 'date': datetime.now() 29 | } 30 | 31 | class IrwinQueueDB(NamedTuple('IrwinQueueDB', [ 32 | ('irwinQueueColl', Collection) 33 | ])): 34 | def write(self, irwinQueue: IrwinQueue): 35 | self.irwinQueueColl.update_one( 36 | {'_id': irwinQueue.id}, 37 | {'$set': IrwinQueueBSONHandler.writes(irwinQueue)}, 38 | upsert=True) 39 | 40 | def removePlayerId(self, playerId: PlayerID): 41 | self.irwinQueueColl.remove({'_id': playerId}) 42 | 43 | def nextUnprocessed(self) -> Opt[IrwinQueue]: 44 | irwinQueueBSON = self.irwinQueueColl.find_one_and_delete( 45 | filter={}, 46 | sort=[("date", pymongo.ASCENDING)]) 47 | return None if irwinQueueBSON is None else IrwinQueueBSONHandler.reads(irwinQueueBSON) -------------------------------------------------------------------------------- /modules/queue/Origin.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | Origin = NewType('Origin', str) 4 | OriginReport = Origin('report') 5 | OriginModerator = Origin('moderator') 6 | OriginRandom = Origin('random') 7 | 8 | def maxOrigin(a, b): 9 | if a == OriginModerator or b == OriginModerator: 10 | return OriginModerator 11 | 12 | if a == OriginReport or b == OriginReport: 13 | return OriginReport 14 | 15 | return OriginRandom -------------------------------------------------------------------------------- /modules/queue/Queue.py: -------------------------------------------------------------------------------- 1 | from default_imports import * 2 | 3 | from modules.queue.Env import Env 4 | from modules.queue.EngineQueue import EngineQueue, EngineQueueID 5 | from modules.game.Player import PlayerID 6 | 7 | from modules.auth.Auth import Authable 8 | 9 | class Queue(NamedTuple('Queue', [('env', Env)])): 10 | def nextEngineAnalysis(self, id: EngineQueueID) -> Opt[EngineQueue]: 11 | return self.env.engineQueueDB.nextUnprocessed(id) 12 | 13 | def completeEngineAnalysis(self, _id: EngineQueueID): 14 | return self.env.engineQueueDB.updateComplete(_id, complete=True) 15 | 16 | def nextIrwinAnalysis(self): 17 | return None 18 | #return self.env.irwinAnalysisQueueDB. 19 | 20 | def queueNerualAnalysis(self, playerId: PlayerID): 21 | ... 22 | 23 | def queueEngineAnalysis(self, engineQueue: EngineQueue): 24 | return self.env.engineQueueDB.write(engineQueue) 25 | 26 | def engineQueueById(self, playerId: PlayerID): 27 | return self.env.engineQueueDB.byPlayerId(playerId) -------------------------------------------------------------------------------- /tools.py: -------------------------------------------------------------------------------- 1 | """Main interface for Irwin""" 2 | from default_imports import * 3 | 4 | from conf.ConfigWrapper import ConfigWrapper 5 | 6 | import argparse 7 | import sys 8 | import logging 9 | import json 10 | 11 | from utils.updatePlayerDatabase import updatePlayerDatabase 12 | from utils.buildAnalysedPositionTable import buildAnalysedPositionTable 13 | from utils.buildAverageReport import buildAverageReport 14 | 15 | from Env import Env 16 | 17 | config = ConfigWrapper.new('conf/server_config.json') 18 | 19 | parser = argparse.ArgumentParser(description=__doc__) 20 | ## Training 21 | parser.add_argument("--trainbasic", dest="trainbasic", nargs="?", 22 | default=False, const=True, help="train basic game model") 23 | parser.add_argument("--trainanalysed", dest="trainanalysed", nargs="?", 24 | default=False, const=True, help="train analysed game model") 25 | parser.add_argument("--filtered", dest="filtered", nargs="?", 26 | default=False, const=True , help="use filtered dataset for training") 27 | parser.add_argument("--newmodel", dest="newmodel", nargs="?", 28 | default=False, const=True, help="throw out current model. build new") 29 | 30 | ## Database building 31 | parser.add_argument("--buildbasictable", dest="buildbasictable", nargs="?", 32 | default=False, const=True, 33 | help="build table of basic game activations") 34 | parser.add_argument("--buildanalysedtable", dest="buildanalysedtable", nargs="?", 35 | default=False, const=True, 36 | help="build table of analysed game activations") 37 | parser.add_argument("--buildpositiontable", dest="buildpositiontable", nargs="?", 38 | default=False, const=True, 39 | help="build table of analysed positions") 40 | parser.add_argument("--updatedatabase", dest="updatedatabase", nargs="?", 41 | default=False, const=True, 42 | help="collect game analyses for players. Build database collection") 43 | parser.add_argument("--buildaveragereport", dest="buildaveragereport", nargs="?", 44 | default=False, const=True, 45 | help="build an average report for all players in the database") 46 | 47 | ## Evaluation and testing 48 | parser.add_argument("--eval", dest="eval", nargs="?", 49 | default=False, const=True, 50 | help="evaluate the performance of neural networks") 51 | parser.add_argument("--test", dest="test", nargs="?", 52 | default=False, const=True, help="test on a single player") 53 | parser.add_argument("--discover", dest="discover", nargs="?", 54 | default=False, const=True, 55 | help="search for cheaters in the database that haven't been marked") 56 | 57 | parser.add_argument("--quiet", dest="loglevel", 58 | default=logging.DEBUG, action="store_const", const=logging.INFO, 59 | help="reduce the number of logged messages") 60 | args = parser.parse_args() 61 | 62 | logging.basicConfig(format="%(message)s", level=args.loglevel, stream=sys.stdout) 63 | logging.getLogger("requests.packages.urllib3").setLevel(logging.WARNING) 64 | logging.getLogger("chess.uci").setLevel(logging.WARNING) 65 | logging.getLogger("modules.fishnet.fishnet").setLevel(logging.INFO) 66 | 67 | logging.debug(args.newmodel) 68 | env = Env(config, newmodel=args.newmodel) 69 | 70 | if args.updatedatabase: 71 | updatePlayerDatabase() 72 | 73 | # train on a single batch 74 | if args.trainbasic: 75 | env.irwin.training.basicModelTraining.train( 76 | config['irwin model basic training epochs'], 77 | args.filtered) 78 | 79 | if args.buildbasictable: 80 | env.irwin.training.basicModelTraining.buildTable() 81 | 82 | if args.buildanalysedtable: 83 | env.irwin.training.analysedModelTraining.buildTable() 84 | 85 | if args.buildpositiontable: 86 | buildAnalysedPositionTable(env) 87 | 88 | if args.trainanalysed: 89 | env.irwin.training.analysedModelTraining.train( 90 | config['irwin model analysed training epochs'], 91 | args.filtered) 92 | 93 | # test on a single user in the DB 94 | if args.test: 95 | for userId in ['ralph27_velasco']: 96 | player = env.playerDB.byPlayerId(userId) 97 | gameStore = GameStore.new() 98 | gameStore.addGames(env.gameDB.byPlayerIdAndAnalysed(userId)) 99 | gameStore.addAnalysedGames(env.analysedGameDB.byPlayerId(userId)) 100 | env.api.postReport(env.irwin.report(player, gameStore)) 101 | logging.debug("posted") 102 | 103 | # how good is the network? 104 | if args.eval: 105 | env.irwin.evaluation.evaluate() 106 | 107 | if args.discover: 108 | env.irwin.discover() 109 | 110 | if args.buildaveragereport: 111 | buildAverageReport(env) -------------------------------------------------------------------------------- /utils/buildAnalysedPositionTable.py: -------------------------------------------------------------------------------- 1 | from chess.pgn import read_game 2 | from modules.game.AnalysedPosition import AnalysedPosition 3 | import logging 4 | 5 | def buildAnalysedPositionTable(env): 6 | logging.info("buildAnalysedPositionColl") 7 | logging.info("Getting AnalysedGames") 8 | batch = 908 9 | while True: 10 | logging.info("Processing Batch: " + str(batch)) 11 | analysedGames = env.analysedGameDB.allBatch(batch) 12 | batch += 1 13 | if len(analysedGames) == 0: 14 | logging.info("reached end of analysedGameDB") 15 | return 16 | analysedGamesLength = str(len(analysedGames)) 17 | for i, analysedGame in enumerate(analysedGames): 18 | game = env.gameDB.byId(analysedGame.gameId) 19 | white = analysedGame.userId == game.white # is the player black or white 20 | try: 21 | from StringIO import StringIO 22 | except ImportError: 23 | from io import StringIO 24 | 25 | try: 26 | playableGame = read_game(StringIO(" ".join(game.pgn))) 27 | except ValueError: 28 | continue 29 | 30 | node = playableGame 31 | 32 | index = 0 33 | analysedPositions = [] 34 | logging.info("walking through game - " + game.id + " - " + str(i) + "/" + analysedGamesLength) 35 | while not node.is_end(): 36 | nextNode = node.variation(0) 37 | if white == node.board().turn: # if it is the turn of the player of interest 38 | analysedPositions.append(AnalysedPosition.fromBoardAndAnalyses( 39 | node.board(), 40 | analysedGame.analysedMoves[index].analyses)) 41 | index += 1 42 | node = nextNode 43 | env.analysedPositionDB.writeMany(analysedPositions) 44 | -------------------------------------------------------------------------------- /utils/buildAverageReport.py: -------------------------------------------------------------------------------- 1 | """ build and average player report and game report """ 2 | import logging 3 | from random import shuffle 4 | from modules.irwin.AnalysisReport import GameReportStore 5 | 6 | def gameReportStoreByPlayers(env, players): 7 | logging.debug('getting player reports against players') 8 | playerReports = [env.playerReportDB.newestByUserId(player.id) for player in players] 9 | gameReports = [] 10 | logging.debug('getting game reports against player reports') 11 | [gameReports.extend(env.gameReportDB.byReportId(report.id)) for report in playerReports if report is not None] 12 | return GameReportStore(gameReports) 13 | 14 | def getAverages(gameReportStore): 15 | return { 16 | 'averageLossByMove': gameReportStore.averageLossByMove(), 17 | 'averageRankByMove': gameReportStore.averageRankByMove() 18 | } 19 | 20 | 21 | def buildAverageReport(env): 22 | logging.debug('getting legit players') 23 | legitPlayers = env.playerDB.byEngine(False) 24 | titledPlayers = [player for player in legitPlayers if player.titled] 25 | 26 | logging.debug('---calculating legit averages---') 27 | legitReportStore = gameReportStoreByPlayers(env, legitPlayers) 28 | legitAvgs = getAverages(legitReportStore) 29 | del legitReportStore 30 | del legitPlayers 31 | 32 | logging.debug('---calculating titled averages---') 33 | titledReportStore = gameReportStoreByPlayers(env, titledPlayers) 34 | titledAvgs = getAverages(titledReportStore) 35 | del titledReportStore 36 | del titledPlayers 37 | 38 | logging.debug('---calculating engine averages---') 39 | engineReportStore = gameReportStoreByPlayers(env, env.playerDB.byEngine(True)) 40 | engineAvgs = getAverages(engineReportStore) 41 | del engineReportStore 42 | 43 | averages = { 44 | 'legit': legitAvgs, 45 | 'titled': titledAvgs, 46 | 'engine': engineAvgs 47 | } 48 | 49 | logging.debug(averages) 50 | -------------------------------------------------------------------------------- /utils/mongodb/addDateToPlayer.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient 2 | from datetime import datetime, timedelta 3 | 4 | client = MongoClient() 5 | db = client.irwin 6 | playerColl = db.player 7 | 8 | for pBSON in playerColl.find({}): 9 | if pBSON.get('date') is None: 10 | playerColl.update_one({'_id': pBSON['_id']}, {'$set': {'date': datetime.now() - timedelta(days=50)}}) -------------------------------------------------------------------------------- /utils/mongodb/addGameAnalysedBool.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient 2 | 3 | client = MongoClient() 4 | db = client.irwin 5 | gameColl = db.game 6 | 7 | for gBSON in gameColl.find({}): 8 | analysed = len(gBSON.get('analysis', [])) > 0 9 | gameColl.update_one({'_id': gBSON['_id']}, {'$set': {'analysed': analysed}}) 10 | -------------------------------------------------------------------------------- /utils/mongodb/add_null_blurs.js: -------------------------------------------------------------------------------- 1 | db.game.find().forEach(function(o) { 2 | var len = o.pgn.length; 3 | var whiteLen = Math.ceil(len/2); 4 | var blackLen = Math.floor(len/2); 5 | var whiteBlurs = { 6 | nb: 0, 7 | bits: Array(whiteLen + 1).join('0') 8 | }; 9 | var blackBlurs = { 10 | nb: 0, 11 | bits: Array(blackLen + 1).join('0') 12 | } 13 | db.game.update({ 14 | _id: o._id 15 | }, { 16 | $set: { 17 | whiteBlurs: whiteBlurs, 18 | blackBlurs: blackBlurs 19 | } 20 | }); 21 | }); -------------------------------------------------------------------------------- /utils/mongodb/add_user_to_game.js: -------------------------------------------------------------------------------- 1 | db.playerAssessment.find().forEach(function(o) { 2 | var query = {}; 3 | if (o.white) { 4 | query = { 5 | white: o.userId 6 | }; 7 | } else { 8 | query = { 9 | black: o.userId 10 | }; 11 | } 12 | db.game.update({ 13 | _id: o.gameId 14 | },{ 15 | $set: query 16 | }); 17 | }); -------------------------------------------------------------------------------- /utils/mongodb/convert_playerAnalysis_to_player.js: -------------------------------------------------------------------------------- 1 | db.playerAnalysis.find({}).forEach(function(o) { 2 | db.player.insert({ 3 | _id: o._id, 4 | titled: o.titled, 5 | engine: o.engine, 6 | gamesPlayed: o.gamesPlayed, 7 | date: o.date 8 | }); 9 | }); -------------------------------------------------------------------------------- /utils/mongodb/createGameAnlysisPivotTable.js: -------------------------------------------------------------------------------- 1 | db.player.find({}).forEach(function(p) { 2 | db.analysedGame.find({userId: p._id}).forEach(function(g) { 3 | db.analysedGamePlayerPivot.update({ 4 | _id: g._id 5 | }, { 6 | _id: g._id, 7 | userId: p._id, 8 | engine: p.engine, 9 | length: g.analysis.length 10 | }, { 11 | upsert: true 12 | } 13 | ); 14 | }); 15 | }); -------------------------------------------------------------------------------- /utils/mongodb/partition_gameAnalysis.js: -------------------------------------------------------------------------------- 1 | db.analysedGame.find({}).forEach(function(o) { 2 | db.analysedGame.update({ 3 | _id: o._id 4 | }, { 5 | $set: { 6 | analysis: o.analysedMoves 7 | }, 8 | $unset: { 9 | assessedMoves: true, 10 | assessedChunks: true, 11 | activation: true, 12 | pvActivation: true, 13 | moveChunkActivation: true, 14 | analysedMoves: true 15 | } 16 | }); 17 | }); -------------------------------------------------------------------------------- /utils/mongodb/split_game_pgn.js: -------------------------------------------------------------------------------- 1 | db.game.find({}).forEach(function(o) { 2 | if (!Array.isArray(o.pgn)) { 3 | db.game.update({ 4 | _id: o._id 5 | }, { 6 | $set: { 7 | pgn: o.pgn.split(" ") 8 | } 9 | }); 10 | } 11 | }); -------------------------------------------------------------------------------- /utils/updatePlayerDatabase.py: -------------------------------------------------------------------------------- 1 | """Update data on players in the database""" 2 | import logging 3 | 4 | from modules.game.Player import Player 5 | from modules.game.Game import Game 6 | from modules.game.GameStore import GameStore 7 | 8 | def updatePlayerDatabase(env): 9 | players= env.playerDB.all() 10 | length = len(players) 11 | for i, p in enumerate(players): 12 | logging.info('Getting player data for '+p.id + ' - '+str(i)+'/'+str(length)) 13 | playerData = env.api.getPlayerData(p.id) 14 | if playerData is not None: 15 | env.playerDB.write(Player.fromPlayerData(playerData)) 16 | env.gameDB.writeMany(Game.fromPlayerData(playerData)) -------------------------------------------------------------------------------- /webapp/DefaultResponse.py: -------------------------------------------------------------------------------- 1 | from flask import Response, json 2 | 3 | Success = Response( 4 | response=json.dumps({ 5 | 'success': True, 6 | 'message': 'action completed successfully' 7 | }), 8 | status=200, 9 | mimetype='application/json') 10 | 11 | BadRequest = Response( 12 | response=json.dumps({ 13 | 'success': False, 14 | 'message': 'bad request' 15 | }), 16 | status=400, 17 | mimetype='application/json') 18 | 19 | NotAuthorised = Response( 20 | response=json.dumps({ 21 | 'success': False, 22 | 'message': 'you are not authorised to perform that action' 23 | }), 24 | status=401, 25 | mimetype='application/json') 26 | 27 | NotAvailable = Response( 28 | response=json.dumps({ 29 | 'success': False, 30 | 'message': 'resource not available' 31 | }), 32 | status=418, 33 | mimetype='application/json') -------------------------------------------------------------------------------- /webapp/Env.py: -------------------------------------------------------------------------------- 1 | from modules.db.DBManager import DBManager 2 | 3 | from modules.auth.Auth import Auth 4 | from modules.auth.Env import Env as AuthEnv 5 | 6 | from modules.game.Env import Env as GameEnv 7 | from modules.game.Api import Api as GameApi 8 | 9 | from modules.queue.Env import Env as QueueEnv 10 | from modules.queue.Queue import Queue 11 | 12 | from modules.irwin.Env import Env as IrwinEnv 13 | from modules.irwin.Irwin import Irwin 14 | 15 | from modules.lichess.Api import Api as LichessApi 16 | 17 | import logging 18 | 19 | class Env: 20 | def __init__(self, config): 21 | self.config = config 22 | 23 | ## Database 24 | self.dbManager = DBManager(self.config) 25 | self.db = self.dbManager.db() 26 | 27 | ## Envs 28 | self.authEnv = AuthEnv(self.config, self.db) 29 | self.gameEnv = GameEnv(self.config, self.db) 30 | self.queueEnv = QueueEnv(self.config, self.db) 31 | self.irwinEnv = IrwinEnv(self.config, self.db) 32 | 33 | ## Modules 34 | self.auth = Auth(self.authEnv) 35 | self.gameApi = GameApi(self.gameEnv) 36 | self.queue = Queue(self.queueEnv) 37 | self.irwin = Irwin(self.irwinEnv) 38 | self.lichessApi = LichessApi(self.config['api url'], self.config['api token']) 39 | -------------------------------------------------------------------------------- /webapp/controllers/api/blueprint.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from flask import Blueprint, Response, request, jsonify, json 3 | from webapp.DefaultResponse import Success, BadRequest, NotAvailable 4 | 5 | from modules.game.AnalysedGame import GameAnalysedGame 6 | from modules.irwin.PlayerReport import PlayerReport 7 | from modules.auth.Priv import RequestJob, CompleteJob, PostJob 8 | from modules.queue.Origin import OriginReport, OriginModerator, OriginRandom 9 | from modules.client.Job import Job 10 | import traceback 11 | 12 | def buildApiBlueprint(env): 13 | apiBlueprint = Blueprint('Api', __name__, url_prefix='/api') 14 | 15 | @apiBlueprint.route('/request_job', methods=['GET']) 16 | @env.auth.authoriseRoute(RequestJob) 17 | def apiRequestJob(authable): 18 | engineQueue = env.queue.nextEngineAnalysis(authable.id) 19 | logging.debug(f'EngineQueue for req {engineQueue}') 20 | if engineQueue is not None: 21 | requiredGames = env.gameApi.gamesForAnalysis(engineQueue.id, engineQueue.requiredGameIds) 22 | requiredGameIds = [g.id for g in requiredGames] 23 | 24 | logging.warning(f'Requesting {authable.name} analyses {requiredGameIds} for {engineQueue.id}') 25 | 26 | job = Job( 27 | playerId = engineQueue.id, 28 | games = requiredGames, 29 | analysedPositions = []) 30 | 31 | logging.info(f'Job: {job}') 32 | 33 | return Response( 34 | response = json.dumps(job.toJson()), 35 | status = 200, 36 | mimetype = 'application/json') 37 | return NotAvailable 38 | 39 | @apiBlueprint.route('/complete_job', methods=['POST']) 40 | @env.auth.authoriseRoute(CompleteJob) 41 | def apiCompleteJob(authable): 42 | req = request.get_json(silent=True) 43 | try: 44 | job = Job.fromJson(req['job']) 45 | insertRes = env.gameApi.writeAnalysedGames(req['analysedGames']) 46 | if insertRes: 47 | env.queue.completeEngineAnalysis(job.playerId) 48 | 49 | player = env.irwin.env.playerDB.byId(job.playerId) 50 | analysedGames = env.irwin.env.analysedGameDB.byPlayerId(job.playerId) 51 | games = env.irwin.env.gameDB.byIds([ag.gameId for ag in analysedGames]) 52 | predictions = env.irwin.analysedGameModel.predict([GameAnalysedGame(ag, g) for ag, g in zip(analysedGames, games) if ag.gameLength() <= 60]) 53 | 54 | playerReport = PlayerReport.new(player, zip(analysedGames, predictions), owner = authable.name) 55 | logging.warning(f'Sending player report for {playerReport.playerId}, activation {playerReport.activation}%') 56 | env.lichessApi.postReport(playerReport) 57 | 58 | return Success 59 | except KeyError as e: 60 | tb = traceback.format_exc() 61 | logging.warning(f'Error completing job: {tb}') 62 | 63 | return BadRequest 64 | 65 | return apiBlueprint 66 | --------------------------------------------------------------------------------