├── tests ├── __init__.py ├── test_transformer.py ├── test_policy.py ├── test_policy_train_atari.py ├── test_memory.py └── test_policy_train_cartpole.py ├── smt ├── __init__.py ├── policy.py ├── transformer.py └── memory.py ├── Pipfile ├── main.py ├── pyproject.toml ├── README.md ├── .gitignore ├── poetry.lock └── Pipfile.lock /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /smt/__init__.py: -------------------------------------------------------------------------------- 1 | from .transformer import create_transformer 2 | from .policy import SceneMemoryPolicy 3 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | pytest = "*" 8 | 9 | [packages] 10 | stable-baselines = "~=2.6.0" 11 | gym = {extras = ["atari"],version = "*"} 12 | tensorflow = "~=1.12.0" 13 | numpy = "*" 14 | 15 | [requires] 16 | python_version = "3.6" 17 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import gym 2 | 3 | from stable_baselines.common.policies import MlpPolicy 4 | from stable_baselines.common.vec_env import DummyVecEnv 5 | from stable_baselines import PPO2 6 | 7 | env = gym.make("CartPole-v1") 8 | env = DummyVecEnv([lambda: env]) 9 | 10 | model = PPO2(MlpPolicy, env, verbose=1) 11 | model.learn(total_timesteps=10000) 12 | 13 | obs = env.reset() 14 | for i in range(1000): 15 | action, _states = model.predict(obs) 16 | obs, rewards, dones, info = env.step(action) 17 | env.render() 18 | 19 | env.close() 20 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "smt" 3 | version = "0.2.0" 4 | description = "The scene memory transformer implemented in Tensorflow" 5 | authors = ["Erik Gärtner "] 6 | license = "MIT" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.6" 10 | tensorflow-gpu = "~1.12.2" 11 | stable-baselines = "^2.5.1" 12 | 13 | [tool.poetry.dev-dependencies] 14 | pytest = "^4.6" 15 | gym = {version = "^0.12.5",extras = ["atari"]} 16 | 17 | [build-system] 18 | requires = ["poetry>=0.12"] 19 | build-backend = "poetry.masonry.api" 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Scene Memory Transformer 2 | *My reimplementation of the [SMT module](https://arxiv.org/abs/1903.03878) for RL* 3 | 4 | ## Sources 5 | - [Scene Memory Transformer for Embodied Agents in Long-Horizon Tasks](https://arxiv.org/abs/1903.03878) 6 | - [Self-Attention: A Better Building Block for Sentiment Analysis Neural 7 | Network Classifiers](https://aclweb.org/anthology/W18-6219) 8 | - [Attention Is All You Need](https://arxiv.org/abs/1706.03762) 9 | - [Attention? Attention!](https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html) *(blog)* 10 | - [Lilian Weng's implementation](https://github.com/lilianweng/transformer-tensorflow) 11 | - [The Annotated Transformer](http://nlp.seas.harvard.edu/2018/04/01/attention.html) *(blog)* 12 | - [The Illustrated Transformer](http://jalammar.github.io/illustrated-transformer/) *(blog)* 13 | -------------------------------------------------------------------------------- /tests/test_transformer.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | from smt.transformer import * 5 | 6 | 7 | def test_intitialize_transformer(): 8 | batch_size = 50 9 | embed_size = 10 10 | memory_size = 20 11 | dim_model = embed_size 12 | dim_ff = 128 13 | nbr_actions = 4 14 | 15 | sess = tf.Session() 16 | with sess: 17 | observations = tf.constant( 18 | np.random.rand(batch_size, memory_size, embed_size), 19 | dtype=tf.float32, 20 | ) 21 | current_obs = tf.constant( 22 | np.random.rand(batch_size, 1, embed_size), dtype=tf.float32 23 | ) 24 | input_mask = tf.constant( 25 | np.ones((batch_size, memory_size, memory_size)), dtype=tf.float32 26 | ) 27 | target_mask = tf.constant(np.ones((batch_size, 1, 1)), dtype=tf.float32) 28 | 29 | print(f"Embeddings (scene memory output): {observations.shape}") 30 | print(f"Current observations (state): {current_obs.shape}") 31 | 32 | enc = encoder( 33 | memory=observations, 34 | nbr_encoders=3, 35 | nbr_heads=2, 36 | dim_model=dim_model, 37 | dim_ff=dim_ff, 38 | input_mask=input_mask, 39 | ) 40 | print(f"Encoded Memory (encoder output): {enc.shape}") 41 | 42 | dec = decoder( 43 | target=current_obs, 44 | context=enc, 45 | nbr_decoders=3, 46 | nbr_heads=2, 47 | dim_model=dim_model, 48 | dim_ff=dim_ff, 49 | input_mask=input_mask, 50 | target_mask=target_mask, 51 | ) 52 | print(f"Decoded Memory (policy input): {dec.shape}") 53 | 54 | logits = tf.layers.dense(dec, nbr_actions) 55 | print(f"Logits: {logits.shape}") 56 | 57 | # Write graph to tensorboard log 58 | sess.run( 59 | [ 60 | tf.global_variables_initializer(), 61 | tf.local_variables_initializer(), 62 | ] 63 | ) 64 | sess.run(logits) 65 | writer = tf.summary.FileWriter("logs", sess.graph) 66 | writer.close() 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.gitignore.io/api/python,osx 3 | # Edit at https://www.gitignore.io/?templates=python,osx 4 | 5 | ### OSX ### 6 | # General 7 | .DS_Store 8 | .AppleDouble 9 | .LSOverride 10 | 11 | # Icon must end with two \r 12 | Icon 13 | 14 | # Thumbnails 15 | ._* 16 | 17 | # Files that might appear in the root of a volume 18 | .DocumentRevisions-V100 19 | .fseventsd 20 | .Spotlight-V100 21 | .TemporaryItems 22 | .Trashes 23 | .VolumeIcon.icns 24 | .com.apple.timemachine.donotpresent 25 | 26 | # Directories potentially created on remote AFP share 27 | .AppleDB 28 | .AppleDesktop 29 | Network Trash Folder 30 | Temporary Items 31 | .apdisk 32 | 33 | ### Python ### 34 | # Byte-compiled / optimized / DLL files 35 | __pycache__/ 36 | *.py[cod] 37 | *$py.class 38 | 39 | # C extensions 40 | *.so 41 | 42 | # Distribution / packaging 43 | .Python 44 | build/ 45 | develop-eggs/ 46 | dist/ 47 | downloads/ 48 | eggs/ 49 | .eggs/ 50 | lib/ 51 | lib64/ 52 | parts/ 53 | sdist/ 54 | var/ 55 | wheels/ 56 | pip-wheel-metadata/ 57 | share/python-wheels/ 58 | *.egg-info/ 59 | .installed.cfg 60 | *.egg 61 | MANIFEST 62 | 63 | # PyInstaller 64 | # Usually these files are written by a python script from a template 65 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 66 | *.manifest 67 | *.spec 68 | 69 | # Installer logs 70 | pip-log.txt 71 | pip-delete-this-directory.txt 72 | 73 | # Unit test / coverage reports 74 | htmlcov/ 75 | .tox/ 76 | .nox/ 77 | .coverage 78 | .coverage.* 79 | .cache 80 | nosetests.xml 81 | coverage.xml 82 | *.cover 83 | .hypothesis/ 84 | .pytest_cache/ 85 | 86 | # Translations 87 | *.mo 88 | *.pot 89 | 90 | # Django stuff: 91 | *.log 92 | local_settings.py 93 | db.sqlite3 94 | 95 | # Flask stuff: 96 | instance/ 97 | .webassets-cache 98 | 99 | # Scrapy stuff: 100 | .scrapy 101 | 102 | # Sphinx documentation 103 | docs/_build/ 104 | 105 | # PyBuilder 106 | target/ 107 | 108 | # Jupyter Notebook 109 | .ipynb_checkpoints 110 | 111 | # IPython 112 | profile_default/ 113 | ipython_config.py 114 | 115 | # pyenv 116 | .python-version 117 | 118 | # pipenv 119 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 120 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 121 | # having no cross-platform support, pipenv may install dependencies that don’t work, or not 122 | # install all needed dependencies. 123 | #Pipfile.lock 124 | 125 | # celery beat schedule file 126 | celerybeat-schedule 127 | 128 | # SageMath parsed files 129 | *.sage.py 130 | 131 | # Environments 132 | .env 133 | .venv 134 | env/ 135 | venv/ 136 | ENV/ 137 | env.bak/ 138 | venv.bak/ 139 | 140 | # Spyder project settings 141 | .spyderproject 142 | .spyproject 143 | 144 | # Rope project settings 145 | .ropeproject 146 | 147 | # mkdocs documentation 148 | /site 149 | 150 | # mypy 151 | .mypy_cache/ 152 | .dmypy.json 153 | dmypy.json 154 | 155 | # Pyre type checker 156 | .pyre/ 157 | 158 | # End of https://www.gitignore.io/api/python,osx 159 | logs/ 160 | -------------------------------------------------------------------------------- /tests/test_policy.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from gym.envs.classic_control import CartPoleEnv 4 | from gym.wrappers.time_limit import TimeLimit 5 | from gym import spaces 6 | import gym 7 | import numpy as np 8 | import tensorflow as tf 9 | import pytest 10 | 11 | from stable_baselines import A2C, ACER, ACKTR, PPO2, bench 12 | from stable_baselines.common.policies import ( 13 | MlpLstmPolicy, 14 | LstmPolicy, 15 | nature_cnn, 16 | ) 17 | from smt.policy import SceneMemoryPolicy 18 | from stable_baselines.common.vec_env import SubprocVecEnv 19 | from stable_baselines.common.vec_env.vec_normalize import VecNormalize 20 | from stable_baselines.ppo2.ppo2 import safe_mean 21 | from stable_baselines.a2c.utils import linear 22 | 23 | 24 | def post_processor(inp, **kwargs): 25 | """Layers applied after the SMT, but before the softmax""" 26 | out = tf.nn.tanh(linear(inp, "post1", 64, init_scale=np.sqrt(2))) 27 | return out 28 | 29 | 30 | class CustomSceneMemoryPolicyCartPole(SceneMemoryPolicy): 31 | def __init__( 32 | self, 33 | sess, 34 | ob_space, 35 | ac_space, 36 | n_env, 37 | n_steps, 38 | n_batch, 39 | memory_size=128, 40 | embedding_size=4, 41 | transformer_ff_dim=32, 42 | transformer_nbr_heads=1, 43 | transformer_nbr_encoders=3, 44 | transformer_nbr_decoders=3, 45 | reuse=False, 46 | **_kwargs 47 | ): 48 | super().__init__( 49 | sess, 50 | ob_space, 51 | ac_space, 52 | n_env, 53 | n_steps, 54 | n_batch, 55 | memory_size=memory_size, 56 | embedding_size=embedding_size, 57 | transformer_ff_dim=transformer_ff_dim, 58 | transformer_nbr_heads=transformer_nbr_heads, 59 | transformer_nbr_encoders=transformer_nbr_encoders, 60 | transformer_nbr_decoders=transformer_nbr_encoders, 61 | reuse=reuse, 62 | post_processor=post_processor, 63 | **_kwargs 64 | ) 65 | 66 | 67 | class CartPoleNoVelEnv(CartPoleEnv): 68 | """Variant of CartPoleEnv with velocity information removed. This task requires memory to solve.""" 69 | 70 | def __init__(self): 71 | super(CartPoleNoVelEnv, self).__init__() 72 | high = np.array( 73 | [self.x_threshold * 2, self.theta_threshold_radians * 2] 74 | ) 75 | self.observation_space = spaces.Box(-high, high, dtype=np.float32) 76 | 77 | @staticmethod 78 | def _pos_obs(full_obs): 79 | xpos, _xvel, thetapos, _thetavel = full_obs 80 | return xpos, thetapos 81 | 82 | def reset(self): 83 | full_obs = super().reset() 84 | return CartPoleNoVelEnv._pos_obs(full_obs) 85 | 86 | def step(self, action): 87 | full_obs, rew, done, info = super().step(action) 88 | return CartPoleNoVelEnv._pos_obs(full_obs), rew, done, info 89 | 90 | 91 | N_TRIALS = 100 92 | MODELS = [A2C, PPO2] 93 | LSTM_POLICIES = [CustomSceneMemoryPolicyCartPole] 94 | 95 | 96 | @pytest.mark.parametrize("model_class", MODELS) 97 | @pytest.mark.parametrize("policy", LSTM_POLICIES) 98 | def test_scene_memory_policy(request, model_class, policy): 99 | model_fname = "./test_model_{}.pkl".format(request.node.name) 100 | 101 | try: 102 | # create and train 103 | if model_class == PPO2: 104 | model = model_class(policy, "CartPole-v1", nminibatches=1) 105 | else: 106 | model = model_class(policy, "CartPole-v1") 107 | model.learn(total_timesteps=100, seed=0) 108 | 109 | env = model.get_env() 110 | # predict and measure the acc reward 111 | obs = env.reset() 112 | for _ in range(N_TRIALS): 113 | action, _ = model.predict(obs) 114 | obs, _, _, _ = env.step(action) 115 | # saving 116 | model.save(model_fname) 117 | del model, env 118 | # loading 119 | _ = model_class.load(model_fname, policy=policy) 120 | 121 | finally: 122 | if os.path.exists(model_fname): 123 | os.remove(model_fname) 124 | -------------------------------------------------------------------------------- /tests/test_policy_train_atari.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from gym.envs.classic_control import CartPoleEnv 4 | from gym.wrappers.time_limit import TimeLimit 5 | from gym import spaces 6 | import gym 7 | import numpy as np 8 | import tensorflow as tf 9 | import pytest 10 | 11 | from stable_baselines import A2C, ACER, ACKTR, PPO2, bench 12 | from stable_baselines.common.policies import ( 13 | MlpLstmPolicy, 14 | LstmPolicy, 15 | CnnLstmPolicy, 16 | nature_cnn, 17 | ) 18 | from smt.policy import SceneMemoryPolicy 19 | from stable_baselines.common.vec_env import SubprocVecEnv 20 | from stable_baselines.common.vec_env.vec_normalize import VecNormalize 21 | from stable_baselines.ppo2.ppo2 import safe_mean 22 | from stable_baselines.a2c.utils import linear 23 | 24 | 25 | def post_processor(inp, **kwargs): 26 | """Layers applied after the SMT, but before the softmax""" 27 | out = tf.nn.tanh(linear(inp, "post1", 64, init_scale=np.sqrt(2))) 28 | return out 29 | 30 | 31 | NUM_ENVS = 1 32 | NUM_EPISODES_FOR_SCORE = 10 33 | 34 | 35 | class CustomSceneMemoryPolicyAtari(SceneMemoryPolicy): 36 | def __init__( 37 | self, 38 | sess, 39 | ob_space, 40 | ac_space, 41 | n_env, 42 | n_steps, 43 | n_batch, 44 | memory_size=128, 45 | embedding_size=512, 46 | transformer_ff_dim=512, 47 | transformer_nbr_heads=8, 48 | transformer_nbr_encoders=6, 49 | transformer_nbr_decoders=6, 50 | reuse=False, 51 | **_kwargs 52 | ): 53 | super().__init__( 54 | sess, 55 | ob_space, 56 | ac_space, 57 | n_env, 58 | n_steps, 59 | n_batch, 60 | memory_size=memory_size, 61 | embedding_size=embedding_size, 62 | transformer_ff_dim=transformer_ff_dim, 63 | transformer_nbr_heads=transformer_nbr_heads, 64 | transformer_nbr_encoders=transformer_nbr_encoders, 65 | transformer_nbr_decoders=transformer_nbr_encoders, 66 | reuse=reuse, 67 | post_processor=post_processor, 68 | extractor=nature_cnn, 69 | **_kwargs 70 | ) 71 | 72 | 73 | def test_smt_train_atari(): 74 | """Test that LSTM models are able to achieve >=150 (out of 500) reward on CartPoleNoVelEnv. 75 | This environment requires memory to perform well in.""" 76 | 77 | def make_env(i): 78 | env = env = gym.make("Breakout-v0") 79 | env = bench.Monitor(env, None, allow_early_resets=True) 80 | env.seed(i) 81 | return env 82 | 83 | env = SubprocVecEnv([lambda: make_env(i) for i in range(NUM_ENVS)]) 84 | # env = VecNormalize(env) 85 | model = PPO2( 86 | CustomSceneMemoryPolicyAtari, 87 | env, 88 | n_steps=128, 89 | nminibatches=NUM_ENVS, 90 | lam=0.95, 91 | gamma=0.99, 92 | noptepochs=5, 93 | ent_coef=0.0, 94 | learning_rate=3e-4, 95 | cliprange=0.2, 96 | verbose=1, 97 | tensorboard_log="./logs/", 98 | ) 99 | 100 | eprewmeans = [] 101 | 102 | def reward_callback(local, _): 103 | nonlocal eprewmeans 104 | eprewmeans.append( 105 | safe_mean([ep_info["r"] for ep_info in local["ep_info_buf"]]) 106 | ) 107 | 108 | model.learn(total_timesteps=1000000, seed=0, callback=reward_callback) 109 | 110 | # Maximum episode reward is 500. 111 | # In CartPole-v1, a non-recurrent policy can easily get >= 450. 112 | # In CartPoleNoVelEnv, a non-recurrent policy doesn't get more than ~50. 113 | # LSTM policies can reach above 400, but it varies a lot between runs; consistently get >=150. 114 | # See PR #244 for more detailed benchmarks. 115 | 116 | average_reward = ( 117 | sum(eprewmeans[-NUM_EPISODES_FOR_SCORE:]) / NUM_EPISODES_FOR_SCORE 118 | ) 119 | assert ( 120 | average_reward >= 150 121 | ), "Mean reward below 150; per-episode rewards {}".format(average_reward) 122 | 123 | 124 | def test_lstm_train_atari(): 125 | """Test that LSTM models are able to achieve >=150 (out of 500) reward on CartPoleNoVelEnv. 126 | This environment requires memory to perform well in.""" 127 | 128 | def make_env(i): 129 | env = env = gym.make("Breakout-v0") 130 | env = bench.Monitor(env, None, allow_early_resets=True) 131 | env.seed(i) 132 | return env 133 | 134 | env = SubprocVecEnv([lambda: make_env(i) for i in range(NUM_ENVS)]) 135 | # env = VecNormalize(env) 136 | model = PPO2( 137 | CnnLstmPolicy, 138 | env, 139 | n_steps=128, 140 | nminibatches=NUM_ENVS, 141 | lam=0.95, 142 | gamma=0.99, 143 | noptepochs=10, 144 | ent_coef=0.0, 145 | learning_rate=3e-4, 146 | cliprange=0.2, 147 | verbose=1, 148 | tensorboard_log="./logs/", 149 | ) 150 | 151 | eprewmeans = [] 152 | 153 | def reward_callback(local, _): 154 | nonlocal eprewmeans 155 | eprewmeans.append( 156 | safe_mean([ep_info["r"] for ep_info in local["ep_info_buf"]]) 157 | ) 158 | 159 | model.learn(total_timesteps=1000000, seed=0, callback=reward_callback) 160 | 161 | # Maximum episode reward is 500. 162 | # In CartPole-v1, a non-recurrent policy can easily get >= 450. 163 | # In CartPoleNoVelEnv, a non-recurrent policy doesn't get more than ~50. 164 | # LSTM policies can reach above 400, but it varies a lot between runs; consistently get >=150. 165 | # See PR #244 for more detailed benchmarks. 166 | 167 | average_reward = ( 168 | sum(eprewmeans[-NUM_EPISODES_FOR_SCORE:]) / NUM_EPISODES_FOR_SCORE 169 | ) 170 | assert ( 171 | average_reward >= 150 172 | ), "Mean reward below 150; per-episode rewards {}".format(average_reward) 173 | -------------------------------------------------------------------------------- /tests/test_memory.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | import pytest 6 | 7 | from smt.memory import ( 8 | Memory, 9 | update_memory, 10 | batch_update_memory, 11 | sequence_update_memory, 12 | ) 13 | 14 | 15 | class Test_Memory(object): 16 | def setup(self): 17 | self.memory_size = 10 18 | self.embedding_size = 5 19 | self.memory = Memory( 20 | memory_size=self.memory_size, embedding_size=self.embedding_size 21 | ) 22 | 23 | def test_add_embedding(self): 24 | 25 | # Test adding a single embedding 26 | self.memory.add_embedding(np.array([1, 2, 3, 4, 5])) 27 | expected = np.zeros((self.memory_size, self.embedding_size)) 28 | expected[0, :] = np.array([1, 2, 3, 4, 5]) 29 | np.testing.assert_equal(expected, self.memory.get_state()) 30 | 31 | expected = np.zeros(self.memory_size) 32 | expected[0] = 1 33 | np.testing.assert_equal(expected, self.memory.get_mask()) 34 | 35 | assert len(self.memory) == 1 36 | 37 | def test_set_state(self): 38 | state = np.random.rand(self.memory_size, self.embedding_size) 39 | mask = np.ones(self.memory_size) 40 | 41 | self.memory.set_state(state, mask) 42 | np.testing.assert_equal(state, self.memory.get_state()) 43 | np.testing.assert_equal(mask, self.memory.get_mask()) 44 | 45 | 46 | def test_update_memory(): 47 | batch_size = 10 48 | memory_size = 20 49 | embed_size = 5 50 | 51 | sess = tf.Session() 52 | with sess: 53 | observations = tf.constant( 54 | np.tile(np.arange(1, batch_size + 1), (embed_size, 1)).T, 55 | dtype=tf.float32, 56 | ) 57 | memory = tf.constant( 58 | np.zeros((memory_size, embed_size), dtype=np.float32) 59 | ) 60 | mask = tf.constant(np.zeros(memory_size, dtype=np.float32)) 61 | done_ph = tf.constant(np.array([0, 0, 0, 1, 0, 0, 0, 0, 1, 0])) 62 | 63 | # Test the function 64 | new_mem = memory 65 | new_mask = mask 66 | for idx in range(batch_size): 67 | 68 | new_mem, new_mask = update_memory( 69 | tf.squeeze(observations[idx, :]), 70 | new_mem, 71 | new_mask, 72 | tf.squeeze(done_ph[idx]), 73 | ) 74 | 75 | # Test the sequence version 76 | batch_memory, batch_mask, new_state = sequence_update_memory( 77 | observations, memory, mask, done_ph 78 | ) 79 | 80 | mem_np, mask_np = sess.run([batch_memory, batch_mask]) 81 | 82 | corr_mem = np.zeros((3, memory_size, embed_size)) 83 | corr_mem[0, 0, :] = np.ones(embed_size) 84 | corr_mem[1, 1, :] = np.ones(embed_size) 85 | corr_mem[2, 2, :] = np.ones(embed_size) 86 | corr_mem[1, 0, :] = np.ones(embed_size) * 2 87 | corr_mem[2, 1, :] = np.ones(embed_size) * 2 88 | corr_mem[2, 0, :] = np.ones(embed_size) * 3 89 | np.testing.assert_equal(mem_np[0:3, :, :], corr_mem) 90 | 91 | corr_mask = np.zeros((3, memory_size)) 92 | corr_mask[0, 0] = 1 93 | corr_mask[1, 0:2] = 1 94 | corr_mask[2, 0:3] = 1 95 | np.testing.assert_equal(mask_np[0:3, :], corr_mask) 96 | 97 | 98 | tries = 5 99 | batch_sizes = np.random.randint(1, 5, tries) 100 | memory_sizes = np.random.randint(1, 20, tries) 101 | sequence_lengths = np.random.randint(1, 20, tries) 102 | test_params = [ 103 | (int(batch_sizes[x]), int(memory_sizes[x]), int(sequence_lengths[x])) 104 | for x in range(tries) 105 | ] 106 | 107 | 108 | @pytest.mark.parametrize("batch_size,memory_size,sequence_length", test_params) 109 | def test_compare_implementations(batch_size, memory_size, sequence_length): 110 | """Compares the results of the TF and NP implementation of the SMT memory""" 111 | with tf.Session() as sess: 112 | 113 | embed_size = 10 114 | 115 | # We have one np memory for each batch 116 | memories = [ 117 | Memory(memory_size=memory_size, embedding_size=embed_size) 118 | for _ in range(batch_size) 119 | ] 120 | 121 | # We create a start memory and mask for all batches 122 | memory_tf = tf.zeros( 123 | (batch_size, memory_size, embed_size), dtype=tf.float32 124 | ) 125 | mask_tf = tf.zeros((batch_size, memory_size), dtype=tf.float32) 126 | done_np = np.random.choice( 127 | [0, 1], (batch_size, sequence_length), True, p=[0.9, 0.1] 128 | ) 129 | done_tf = tf.constant(done_np, dtype=tf.float32) 130 | 131 | total_obs = [] 132 | for seq_idx in range(sequence_length): 133 | 134 | batch_obs = [] 135 | 136 | # Batches are parallel memories for parallel environments 137 | for batch_idx in range(batch_size): 138 | # Generate random observation 139 | obs_np = np.array(np.random.rand(embed_size), dtype=np.float32) 140 | batch_obs.append(tf.constant(obs_np, dtype=tf.float32)) 141 | 142 | if done_np[batch_idx, seq_idx] == 1: 143 | # if done, reset memory 144 | memories[batch_idx].reset() 145 | 146 | # Add to memory 147 | memories[batch_idx].add_embedding(obs_np) 148 | 149 | # Gather observations into batches of sequences 150 | batch_obs_tf = tf.stack(batch_obs) # (batch, embed) 151 | total_obs.append(batch_obs_tf) 152 | 153 | input_obs = tf.stack(total_obs, axis=1) # (batch, seq, embed) 154 | batch_memory, batch_mask, batch_new_state = batch_update_memory( 155 | input_obs, memory_tf, mask_tf, done_tf 156 | ) 157 | 158 | # Verify outputs 159 | tf_res_memory, tf_res_mask, tf_res_new_state = sess.run( 160 | [batch_memory, batch_mask, batch_new_state] 161 | ) 162 | 163 | # Compare results 164 | for batch_idx, memory in enumerate(memories): 165 | np.testing.assert_array_equal( 166 | tf_res_memory[batch_idx, -1, :, :], 167 | memory.get_state(), 168 | "Incorrect memory after batch update", 169 | ) 170 | np.testing.assert_array_equal( 171 | tf_res_mask[batch_idx, -1, :], 172 | memory.get_mask(), 173 | "Incorrect mask after batch update", 174 | ) 175 | np.testing.assert_array_equal( 176 | np.squeeze(tf_res_new_state[batch_idx, :, :], axis=0), 177 | memory.get_statemask(), 178 | "Incorrect new state after batch update", 179 | ) 180 | -------------------------------------------------------------------------------- /tests/test_policy_train_cartpole.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from gym.envs.classic_control import CartPoleEnv 4 | from gym.wrappers.time_limit import TimeLimit 5 | from gym import spaces 6 | import gym 7 | import numpy as np 8 | import tensorflow as tf 9 | import pytest 10 | 11 | from stable_baselines import A2C, ACER, ACKTR, PPO2, bench 12 | from stable_baselines.common.policies import ( 13 | MlpLstmPolicy, 14 | LstmPolicy, 15 | nature_cnn, 16 | ) 17 | from smt.policy import SceneMemoryPolicy 18 | from stable_baselines.common.vec_env import SubprocVecEnv 19 | from stable_baselines.common.vec_env.vec_normalize import VecNormalize 20 | from stable_baselines.ppo2.ppo2 import safe_mean 21 | from stable_baselines.a2c.utils import linear 22 | 23 | 24 | def post_processor(inp, **kwargs): 25 | """Layers applied after the SMT, but before the softmax""" 26 | out = tf.nn.tanh(linear(inp, "post1", 64, init_scale=np.sqrt(2))) 27 | return out 28 | 29 | 30 | def extractor(inp, **kwargs): 31 | """Layers applied after the SMT, but before the softmax""" 32 | out = tf.nn.tanh(linear(inp, "pre", 2, init_scale=np.sqrt(2))) 33 | return out 34 | 35 | 36 | class CustomSceneMemoryPolicyCartPole(SceneMemoryPolicy): 37 | def __init__( 38 | self, 39 | sess, 40 | ob_space, 41 | ac_space, 42 | n_env, 43 | n_steps, 44 | n_batch, 45 | memory_size=1, 46 | embedding_size=2, 47 | transformer_ff_dim=32, 48 | transformer_nbr_heads=1, 49 | transformer_nbr_encoders=3, 50 | transformer_nbr_decoders=3, 51 | reuse=False, 52 | **_kwargs 53 | ): 54 | super().__init__( 55 | sess, 56 | ob_space, 57 | ac_space, 58 | n_env, 59 | n_steps, 60 | n_batch, 61 | memory_size=memory_size, 62 | embedding_size=embedding_size, 63 | transformer_ff_dim=transformer_ff_dim, 64 | transformer_nbr_heads=transformer_nbr_heads, 65 | transformer_nbr_encoders=transformer_nbr_encoders, 66 | transformer_nbr_decoders=transformer_nbr_encoders, 67 | reuse=reuse, 68 | post_processor=post_processor, 69 | extractor=extractor, 70 | **_kwargs 71 | ) 72 | 73 | 74 | class CartPoleNoVelEnv(CartPoleEnv): 75 | """Variant of CartPoleEnv with velocity information removed. This task requires memory to solve.""" 76 | 77 | def __init__(self): 78 | super(CartPoleNoVelEnv, self).__init__() 79 | high = np.array( 80 | [self.x_threshold * 2, self.theta_threshold_radians * 2] 81 | ) 82 | self.observation_space = spaces.Box(-high, high, dtype=np.float32) 83 | 84 | @staticmethod 85 | def _pos_obs(full_obs): 86 | xpos, _xvel, thetapos, _thetavel = full_obs 87 | return xpos, thetapos 88 | 89 | def reset(self): 90 | full_obs = super().reset() 91 | return CartPoleNoVelEnv._pos_obs(full_obs) 92 | 93 | def step(self, action): 94 | full_obs, rew, done, info = super().step(action) 95 | return CartPoleNoVelEnv._pos_obs(full_obs), rew, done, info 96 | 97 | 98 | NUM_ENVS = 16 99 | NUM_EPISODES_FOR_SCORE = 10 100 | 101 | 102 | def test_smt_train_cartpole(): 103 | """Test that LSTM models are able to achieve >=150 (out of 500) reward on CartPoleNoVelEnv. 104 | This environment requires memory to perform well in.""" 105 | 106 | def make_env(i): 107 | env = CartPoleNoVelEnv() 108 | env = bench.Monitor(env, None, allow_early_resets=True) 109 | env.seed(i) 110 | return env 111 | 112 | env = SubprocVecEnv([lambda: make_env(i) for i in range(NUM_ENVS)]) 113 | env = VecNormalize(env) 114 | model = PPO2( 115 | CustomSceneMemoryPolicyCartPole, 116 | env, 117 | n_steps=128, 118 | nminibatches=NUM_ENVS, 119 | lam=0.95, 120 | gamma=0.99, 121 | noptepochs=10, 122 | ent_coef=0.0, 123 | learning_rate=1e-4, 124 | cliprange=0.2, 125 | verbose=1, 126 | tensorboard_log="./logs/", 127 | full_tensorboard_log=True, 128 | ) 129 | 130 | eprewmeans = [] 131 | 132 | def reward_callback(local, _): 133 | nonlocal eprewmeans 134 | eprewmeans.append( 135 | safe_mean([ep_info["r"] for ep_info in local["ep_info_buf"]]) 136 | ) 137 | 138 | model.learn(total_timesteps=100000, seed=0, callback=reward_callback) 139 | 140 | # Maximum episode reward is 500. 141 | # In CartPole-v1, a non-recurrent policy can easily get >= 450. 142 | # In CartPoleNoVelEnv, a non-recurrent policy doesn't get more than ~50. 143 | # LSTM policies can reach above 400, but it varies a lot between runs; consistently get >=150. 144 | # See PR #244 for more detailed benchmarks. 145 | 146 | average_reward = ( 147 | sum(eprewmeans[-NUM_EPISODES_FOR_SCORE:]) / NUM_EPISODES_FOR_SCORE 148 | ) 149 | assert ( 150 | average_reward >= 150 151 | ), "Mean reward below 150; per-episode rewards {}".format(average_reward) 152 | 153 | 154 | @pytest.mark.skip( 155 | reason="this tests the official LSTM implementation as a reference" 156 | ) 157 | def test_lstm_train_cartpole(): 158 | """Test that LSTM models are able to achieve >=150 (out of 500) reward on CartPoleNoVelEnv. 159 | This environment requires memory to perform well in.""" 160 | 161 | def make_env(i): 162 | env = CartPoleNoVelEnv() 163 | env = bench.Monitor(env, None, allow_early_resets=True) 164 | env.seed(i) 165 | return env 166 | 167 | env = SubprocVecEnv([lambda: make_env(i) for i in range(NUM_ENVS)]) 168 | env = VecNormalize(env) 169 | model = PPO2( 170 | MlpLstmPolicy, 171 | env, 172 | n_steps=128, 173 | nminibatches=NUM_ENVS, 174 | lam=0.95, 175 | gamma=0.99, 176 | noptepochs=10, 177 | ent_coef=0.0, 178 | learning_rate=3e-4, 179 | cliprange=0.2, 180 | verbose=1, 181 | tensorboard_log="./logs/", 182 | full_tensorboard_log=True, 183 | ) 184 | 185 | eprewmeans = [] 186 | 187 | def reward_callback(local, _): 188 | nonlocal eprewmeans 189 | eprewmeans.append( 190 | safe_mean([ep_info["r"] for ep_info in local["ep_info_buf"]]) 191 | ) 192 | 193 | model.learn(total_timesteps=100000, seed=0, callback=reward_callback) 194 | 195 | # Maximum episode reward is 500. 196 | # In CartPole-v1, a non-recurrent policy can easily get >= 450. 197 | # In CartPoleNoVelEnv, a non-recurrent policy doesn't get more than ~50. 198 | # LSTM policies can reach above 400, but it varies a lot between runs; consistently get >=150. 199 | # See PR #244 for more detailed benchmarks. 200 | 201 | average_reward = ( 202 | sum(eprewmeans[-NUM_EPISODES_FOR_SCORE:]) / NUM_EPISODES_FOR_SCORE 203 | ) 204 | assert ( 205 | average_reward >= 150 206 | ), "Mean reward below 150; per-episode rewards {}".format(average_reward) 207 | -------------------------------------------------------------------------------- /smt/policy.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | from itertools import zip_longest 3 | from abc import ABC, abstractmethod 4 | 5 | import numpy as np 6 | import tensorflow as tf 7 | from gym.spaces import Discrete 8 | 9 | from stable_baselines.a2c.utils import ( 10 | conv, 11 | linear, 12 | conv_to_fc, 13 | batch_to_seq, 14 | seq_to_batch, 15 | lstm, 16 | ) 17 | from stable_baselines.common.distributions import ( 18 | make_proba_dist_type, 19 | CategoricalProbabilityDistribution, 20 | MultiCategoricalProbabilityDistribution, 21 | DiagGaussianProbabilityDistribution, 22 | BernoulliProbabilityDistribution, 23 | ) 24 | from stable_baselines.common.input import observation_input 25 | from stable_baselines.common.policies import ( 26 | RecurrentActorCriticPolicy, 27 | nature_cnn, 28 | ) 29 | 30 | from .memory import batch_update_memory 31 | from .transformer import create_transformer 32 | 33 | 34 | class SceneMemoryPolicy(RecurrentActorCriticPolicy): 35 | """ 36 | SceneMemoryPolicy implements a policy that uses a Scene Memory Transformer 37 | to attend previous states as a memory. 38 | 39 | :param type sess: Description of parameter `sess`. 40 | :param type ob_space: Description of parameter `ob_space`. 41 | :param type ac_space: Description of parameter `ac_space`. 42 | :param type n_env: Description of parameter `n_env`. 43 | :param type n_steps: Description of parameter `n_steps`. 44 | :param type n_batch: Description of parameter `n_batch`. 45 | :param type memory_size: Description of parameter `memory_size`. 46 | :param type embedding_size: Description of parameter `embedding_size`. 47 | :param type transformer_ff_dim: Description of parameter `transformer_ff_dim`. 48 | :param type transformer_nbr_heads: Description of parameter `transformer_nbr_heads`. 49 | :param type transformer_nbr_encoders: Description of parameter `transformer_nbr_encoders`. 50 | :param type transformer_nbr_decoders: Description of parameter `transformer_nbr_decoders`. 51 | :param type extractor: Description of parameter `extractor`. 52 | :param type post_processor: Description of parameter `post_processor`. 53 | :param type reuse: Description of parameter `reuse`. 54 | :param type scale_features: Description of parameter `scale_features`. 55 | :param type **kwargs: Description of parameter `**kwargs`. 56 | 57 | """ 58 | 59 | recurrent = True 60 | 61 | def __init__( 62 | self, 63 | sess, 64 | ob_space, 65 | ac_space, 66 | n_env, 67 | n_steps, 68 | n_batch, 69 | memory_size=128, 70 | embedding_size=64, 71 | transformer_ff_dim=128, 72 | transformer_nbr_heads=8, 73 | transformer_nbr_encoders=6, 74 | transformer_nbr_decoders=6, 75 | extractor=None, 76 | post_processor=None, 77 | reuse=False, 78 | scale_features=False, 79 | **kwargs, 80 | ): 81 | super(SceneMemoryPolicy, self).__init__( 82 | sess, 83 | ob_space, 84 | ac_space, 85 | n_env, 86 | n_steps, 87 | n_batch, 88 | state_shape=(memory_size, embedding_size + 1), 89 | reuse=reuse, 90 | scale=scale_features, 91 | ) 92 | 93 | with tf.variable_scope("model", reuse=reuse): 94 | ext = self.processed_obs 95 | 96 | if extractor is not None: 97 | with tf.variable_scope("extractor"): 98 | ext = extractor(self.processed_obs, **kwargs) 99 | 100 | extracted_features = tf.layers.flatten(ext) 101 | 102 | assert extracted_features.shape[-1] == tf.Dimension( 103 | embedding_size 104 | ), f"embedding_size not correct: {extracted_features.shape[-1]} vs {embedding_size}" 105 | 106 | # Transform from (batch x seq, ... ) into (batch, seq, ...) shape 107 | sequence_input = tf.reshape( 108 | extracted_features, 109 | (self.n_env, n_steps, embedding_size), 110 | name="sequence_input", 111 | ) 112 | sequence_state = tf.reshape( 113 | self.states_ph, 114 | (self.n_env, 1, memory_size, embedding_size + 1), 115 | name="sequence_state", 116 | ) 117 | sequence_done = tf.reshape( 118 | self.dones_ph, (self.n_env, n_steps), name="sequence_done" 119 | ) 120 | sequence_memory = tf.squeeze( 121 | sequence_state[:, :, :, :-1], axis=[1], name="sequence_memory" 122 | ) 123 | sequence_mask = tf.squeeze( 124 | sequence_state[:, :, :, -1:], axis=[1, 3], name="sequence_mask" 125 | ) 126 | 127 | # Update the memory states for all observations taking batches and 128 | # sequences into account. 129 | batch_memory, batch_mask, batch_new_state = batch_update_memory( 130 | observations=sequence_input, 131 | start_memory=sequence_memory, 132 | start_mask=sequence_mask, 133 | dones_ph=sequence_done, 134 | ) 135 | 136 | # Transform back into (batch, ...) format 137 | memory = tf.reshape( 138 | batch_memory, (n_batch, memory_size, embedding_size) 139 | ) 140 | mask = tf.reshape(batch_mask, (n_batch, memory_size)) 141 | new_state = tf.reshape( 142 | batch_new_state, (n_env, memory_size, embedding_size + 1) 143 | ) 144 | self.snew = new_state 145 | 146 | # Mask should be of dims: (batch, memory_size, memory_size) 147 | tiled_mask = tf.tile( 148 | tf.reshape(mask, (n_batch, 1, memory_size)), (1, memory_size, 1) 149 | ) 150 | 151 | # We need to tile the observation in the (transformer's) sequence 152 | # dimension. We do this since we use the current observation as the 153 | # context when attending each memory cell in the sequence. 154 | obs = tf.reshape(extracted_features, (n_batch, 1, embedding_size)) 155 | 156 | # Create the transformer. 157 | # Note that here the batch and seq has been turned into a single 158 | # dimension. This is due to that fact that we use sequence dimension 159 | # in the transformer to represent the memory dimension. 160 | trans_out = create_transformer( 161 | observation=obs, 162 | memory=memory, 163 | dim_model=embedding_size, 164 | dim_ff=transformer_ff_dim, 165 | nbr_heads=transformer_nbr_heads, 166 | nbr_encoders=transformer_nbr_encoders, 167 | nbr_decoders=transformer_nbr_decoders, 168 | input_mask=None, 169 | target_mask=None, 170 | ) 171 | flat_out = tf.layers.flatten(trans_out) 172 | 173 | if post_processor is not None: 174 | with tf.variable_scope("post_processor"): 175 | flat_out = post_processor(flat_out, **kwargs) 176 | 177 | value_fn = linear(flat_out, "vf", 1) 178 | 179 | self._proba_distribution, self._policy, self.q_value = self.pdtype.proba_distribution_from_latent( 180 | flat_out, flat_out 181 | ) 182 | 183 | self._value_fn = value_fn 184 | self._setup_init() 185 | 186 | def step(self, obs, state=None, mask=None, deterministic=False): 187 | if deterministic: 188 | return self.sess.run( 189 | [ 190 | self.deterministic_action, 191 | self.value_flat, 192 | self.snew, 193 | self.neglogp, 194 | ], 195 | {self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask}, 196 | ) 197 | else: 198 | return self.sess.run( 199 | [self.action, self.value_flat, self.snew, self.neglogp], 200 | {self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask}, 201 | ) 202 | 203 | def proba_step(self, obs, state=None, mask=None): 204 | return self.sess.run( 205 | self.policy_proba, 206 | {self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask}, 207 | ) 208 | 209 | def value(self, obs, state=None, mask=None): 210 | return self.sess.run( 211 | self.value_flat, 212 | {self.obs_ph: obs, self.states_ph: state, self.dones_ph: mask}, 213 | ) 214 | -------------------------------------------------------------------------------- /smt/transformer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions for constructing a transformer model. 3 | """ 4 | 5 | import tensorflow as tf 6 | import tensorflow.contrib as tc 7 | 8 | 9 | def scaled_dot_product_attention( 10 | Q: tf.Tensor, 11 | K: tf.Tensor, 12 | V: tf.Tensor, 13 | dim_model: int, 14 | mask: tf.Tensor = None, 15 | scope: str = "sdp_attention", 16 | ) -> tf.Tensor: 17 | assert Q.shape[-1] == K.shape[-1] == V.shape[-1] 18 | 19 | with tf.variable_scope(scope): 20 | # Create K^T 21 | out = tf.matmul(Q, tf.transpose(K, [0, 2, 1]), name="Q_mult_K") 22 | 23 | # Scale by dimension 24 | factor = Q.shape.as_list()[-1] 25 | out = tf.divide( 26 | out, tf.sqrt(tf.cast(factor, tf.float32)), name="Q_mult_K_scaled" 27 | ) 28 | 29 | if mask is not None: 30 | # Set to -Inf for 0 in mask 31 | assert ( 32 | out.shape == mask.shape 33 | ), f"Incorrect mask dimensions: {out.shape} vs {mask.shape}" 34 | out = tf.add( 35 | tf.multiply(out, mask), 36 | (1.0 - mask) * (-1e10), 37 | name="Q_mult_K_scaled_masked", 38 | ) 39 | 40 | out = tf.nn.softmax(out) 41 | out = tf.matmul(out, V, name="attn_block_out") 42 | 43 | return out 44 | 45 | 46 | def multihead_attention( 47 | query: tf.Tensor, 48 | memory: tf.Tensor, 49 | nbr_heads: int, 50 | dim_model: int, 51 | mask: int = None, 52 | scope: str = "multihead_attention", 53 | ) -> tf.Tensor: 54 | 55 | if memory is None: 56 | memory = query 57 | 58 | with tf.variable_scope(scope): 59 | # Linear projections 60 | # dimensions: [batch, q_size / k_size, model_dim] 61 | Q = tf.layers.dense(query, dim_model, activation=tf.nn.relu, name="Q") 62 | K = tf.layers.dense(memory, dim_model, activation=tf.nn.relu, name="K") 63 | V = tf.layers.dense(memory, dim_model, activation=tf.nn.relu, name="V") 64 | 65 | Q_split = tf.concat( 66 | tf.split(Q, nbr_heads, axis=2), axis=0, name="Q_multihead" 67 | ) 68 | K_split = tf.concat( 69 | tf.split(K, nbr_heads, axis=2), axis=0, name="K_multihead" 70 | ) 71 | V_split = tf.concat( 72 | tf.split(V, nbr_heads, axis=2), axis=0, name="V_multihead" 73 | ) 74 | 75 | if mask is not None: 76 | mask_split = tf.tile(mask, [nbr_heads, 1, 1]) 77 | else: 78 | mask_split = mask 79 | 80 | # Apply scaled dot product attention 81 | out = scaled_dot_product_attention( 82 | Q=Q_split, 83 | K=K_split, 84 | V=V_split, 85 | mask=mask_split, 86 | dim_model=dim_model, 87 | ) 88 | 89 | # Merge the multi-head back to the original shape 90 | out = tf.concat( 91 | tf.split(out, nbr_heads, axis=0), axis=2, name="multihead_out" 92 | ) 93 | 94 | return out 95 | 96 | 97 | def pointwise_feedforward( 98 | x: tf.Tensor, 99 | dim_ff: int, 100 | dim_model: int, 101 | scope: str = "pointwise_feedforward", 102 | ) -> tf.Tensor: 103 | 104 | out = x 105 | with tf.variable_scope(scope): 106 | out = tf.layers.conv1d( 107 | out, filters=dim_ff, kernel_size=1, activation=tf.nn.relu 108 | ) 109 | out = tf.layers.conv1d(out, filters=dim_model, kernel_size=1) 110 | 111 | return out 112 | 113 | 114 | def encoder_layer( 115 | x: tf.Tensor, 116 | nbr_heads: int, 117 | dim_model: int, 118 | dim_ff: int, 119 | scope: str, 120 | mask: tf.Tensor = None, 121 | ) -> tf.Tensor: 122 | 123 | out = x 124 | with tf.variable_scope(scope): 125 | out = tc.layers.layer_norm( 126 | out 127 | + multihead_attention( 128 | query=out, 129 | memory=None, 130 | nbr_heads=nbr_heads, 131 | dim_model=dim_model, 132 | mask=mask, 133 | ), 134 | center=True, 135 | scale=True, 136 | ) 137 | out = tc.layers.layer_norm( 138 | out 139 | + pointwise_feedforward(x=out, dim_ff=dim_ff, dim_model=dim_model), 140 | center=True, 141 | scale=True, 142 | ) 143 | 144 | return out 145 | 146 | 147 | def encoder( 148 | memory: tf.Tensor, 149 | nbr_encoders: int, 150 | nbr_heads: int, 151 | dim_model: int, 152 | dim_ff: int, 153 | input_mask: tf.Tensor = None, 154 | scope: str = "encoder", 155 | ) -> tf.Tensor: 156 | 157 | out = memory 158 | with tf.variable_scope(scope): 159 | for i in range(nbr_encoders): 160 | out = encoder_layer( 161 | x=out, 162 | nbr_heads=nbr_heads, 163 | dim_model=dim_model, 164 | dim_ff=dim_ff, 165 | mask=input_mask, 166 | scope=f"enc_{i}", 167 | ) 168 | return out 169 | 170 | 171 | def decoder_layer( 172 | target: tf.Tensor, 173 | context: tf.Tensor, 174 | nbr_heads: int, 175 | dim_model: int, 176 | dim_ff: int, 177 | scope: str, 178 | input_mask: tf.Tensor = None, 179 | target_mask: tf.Tensor = None, 180 | ) -> tf.Tensor: 181 | 182 | out = target 183 | with tf.variable_scope(scope): 184 | out = tc.layers.layer_norm( 185 | out 186 | + multihead_attention( 187 | query=out, 188 | memory=None, 189 | nbr_heads=nbr_heads, 190 | dim_model=dim_model, 191 | mask=target_mask, 192 | scope="self_attn", 193 | ), 194 | center=True, 195 | scale=True, 196 | ) 197 | out = tc.layers.layer_norm( 198 | out 199 | + multihead_attention( 200 | query=out, 201 | memory=context, 202 | nbr_heads=nbr_heads, 203 | dim_model=dim_model, 204 | mask=input_mask, 205 | scope="decorder_attn", 206 | ), 207 | center=True, 208 | scale=True, 209 | ) 210 | out = tc.layers.layer_norm( 211 | out 212 | + pointwise_feedforward(x=out, dim_ff=dim_ff, dim_model=dim_model), 213 | center=True, 214 | scale=True, 215 | ) 216 | 217 | return out 218 | 219 | 220 | def decoder( 221 | target: tf.Tensor, 222 | context: tf.Tensor, 223 | nbr_decoders: int, 224 | nbr_heads: int, 225 | dim_model: int, 226 | dim_ff: int, 227 | input_mask: tf.Tensor = None, 228 | target_mask: tf.Tensor = None, 229 | scope: str = "decoder", 230 | ) -> tf.Tensor: 231 | 232 | out = target 233 | with tf.variable_scope(scope): 234 | 235 | if input_mask is not None: 236 | input_mask = input_mask[:, 0:1, :] 237 | 238 | for i in range(nbr_decoders): 239 | out = decoder_layer( 240 | target=out, 241 | context=context, 242 | nbr_heads=nbr_heads, 243 | dim_model=dim_model, 244 | dim_ff=dim_ff, 245 | input_mask=input_mask, 246 | target_mask=target_mask, 247 | scope=f"dec_{i}", 248 | ) 249 | return out 250 | 251 | 252 | def create_transformer( 253 | observation: tf.Tensor, 254 | memory: tf.Tensor, 255 | dim_model: int, 256 | dim_ff: int, 257 | nbr_heads: int, 258 | nbr_encoders: int, 259 | nbr_decoders: int, 260 | input_mask: tf.Tensor = None, 261 | target_mask: tf.Tensor = None, 262 | ): 263 | """ 264 | Creates a transformer optimized for Scene Memory. It expects the current 265 | observation to be a single element and not a sequence (as is normal) for 266 | transformers. 267 | """ 268 | assert ( 269 | observation.shape.ndims == memory.shape.ndims == 3 270 | ), "Incorrect tensor ranks." 271 | assert observation.shape[0] == memory.shape[0], "Mismatching batch sizes" 272 | assert ( 273 | dim_model % nbr_heads == 0 274 | ), "dim_model must be divisible by nbr_heads" 275 | enc = encoder( 276 | memory=memory, 277 | nbr_encoders=nbr_encoders, 278 | nbr_heads=nbr_heads, 279 | dim_model=dim_model, 280 | dim_ff=dim_ff, 281 | input_mask=input_mask, 282 | ) 283 | dec = decoder( 284 | target=observation, 285 | context=enc, 286 | nbr_decoders=nbr_decoders, 287 | nbr_heads=nbr_heads, 288 | dim_model=dim_model, 289 | dim_ff=dim_ff, 290 | input_mask=input_mask, 291 | target_mask=target_mask, 292 | ) 293 | return dec 294 | -------------------------------------------------------------------------------- /smt/memory.py: -------------------------------------------------------------------------------- 1 | """ 2 | A neural memory module inspired by the Scene Memory Transformer paper. 3 | """ 4 | 5 | import typing 6 | from collections import deque 7 | 8 | import numpy as np 9 | import tensorflow as tf 10 | 11 | 12 | class Memory(object): 13 | """ 14 | The numpy reference implementation of a circular buffer / memory. 15 | Used to test the Tensorflow implementation. 16 | """ 17 | 18 | def __init__(self, memory_size: int, embedding_size: int) -> None: 19 | self.memory_size = memory_size 20 | self.embedding_size = embedding_size 21 | self.embeddings: typing.Deque[Any] = deque(maxlen=memory_size) 22 | self.input_mask = np.zeros(self.memory_size) 23 | 24 | self.reset() 25 | 26 | def reset(self) -> None: 27 | self.embeddings.clear() 28 | self.input_mask = np.zeros(self.memory_size) 29 | 30 | def set_state(self, state: np.ndarray, input_mask: np.ndarray): 31 | if state is None: 32 | self.reset() 33 | return 34 | 35 | assert input_mask.shape == (self.memory_size,) 36 | assert state.shape == (self.memory_size, self.embedding_size) 37 | 38 | size = np.count_nonzero(input_mask) 39 | self.embeddings = deque(state[:size, :], maxlen=self.memory_size) 40 | self.input_mask = np.array(input_mask) 41 | 42 | def get_state(self): 43 | size = len(self.embeddings) 44 | state = np.zeros((self.memory_size, self.embedding_size)) 45 | if size > 0: 46 | state[:size, :] = np.array(self.embeddings) 47 | return state 48 | 49 | def get_statemask(self): 50 | """Returns a combined state and mask in one""" 51 | size = len(self.embeddings) 52 | state = np.zeros((self.memory_size, self.embedding_size + 1)) 53 | state[:size, :-1] = np.array(self.embeddings) 54 | state[:, -1] = self.input_mask 55 | return state 56 | 57 | def set_statemask(self, statemask): 58 | self.set_state(statemask[:, :-1].squeeze(), statemask[:, -1].flatten()) 59 | 60 | def get_mask(self): 61 | return np.array(self.input_mask) 62 | 63 | def add_embedding(self, emb: np.ndarray): 64 | """Adds an embedding to the memory and update the input mask. 65 | 66 | :param np.ndarray emb: The new embedding. 67 | """ 68 | emb = emb.squeeze() 69 | assert emb.shape == ( 70 | self.embedding_size, 71 | ), f"{emb.shape} vs {(self.embedding_size,)}" 72 | self.embeddings.appendleft(emb) 73 | size = len(self.embeddings) 74 | self.input_mask[size - 1] = 1 75 | 76 | def __len__(self): 77 | return len(self.embeddings) 78 | 79 | 80 | """ 81 | The Tensorflow implementation of the memory. 82 | """ 83 | 84 | 85 | def update_memory( 86 | observation: tf.Tensor, memory: tf.Tensor, mask: tf.Tensor, reset: tf.Tensor 87 | ): 88 | """ 89 | Update the memory and mask based on latest observation 90 | """ 91 | assert ( 92 | observation.shape[0] == memory.shape[1] 93 | ), f"Embedding sizes don't match, {observation.shape[0]} vs {memory.shape[1]}" 94 | assert mask.shape[0] == memory.shape[0], f"Memory sizes don't match" 95 | assert len(reset.shape.as_list()) == 0, f"Reset must be scalar" 96 | 97 | reset = tf.cast(reset, dtype=tf.float32) 98 | 99 | # Reset memory if requested 100 | new_memory = memory * (1 - reset) 101 | 102 | # Shift memory forward and add new observation 103 | new_memory = tf.concat( 104 | [tf.expand_dims(observation, 0), new_memory[:-1, :]], axis=0 105 | ) 106 | 107 | # Update mask 108 | new_mask = mask * (1 - reset) 109 | new_mask = tf.concat([tf.ones((1)), new_mask[:-1]], axis=0) 110 | return new_memory, new_mask 111 | 112 | 113 | def sequence_update_memory( 114 | observations: tf.Tensor, 115 | start_memory: tf.Tensor, 116 | start_mask: tf.Tensor, 117 | dones_ph: tf.Tensor, 118 | ): 119 | """Takes a number of observations in a sequence and creates appropriate 120 | memory and mask for each observation. 121 | 122 | :param tf.Tensor observations: Shape: (sequence_size, embedding_size) 123 | :param tf.Tensor start_memory: Shape: (memory_size, embedding_size) 124 | :param tf.Tensor start_mask: Shape: (memory_size) 125 | :param tf.Tensor dones_ph: Shape: (sequence_size) 126 | :return: Returns a tuple of the new memory, mask and the new state. 127 | :rtype: (tf.Tensor, tf.Tensor, tf.Tensor) 128 | 129 | """ 130 | assert ( 131 | observations.shape[0] == dones_ph.shape[0] 132 | ), f"Done and observations do not match." 133 | 134 | nbr_obs = observations.shape.as_list()[0] 135 | 136 | masks = [] 137 | memories = [] 138 | obs = tf.split(observations, nbr_obs, axis=0) 139 | dones = tf.split(dones_ph, nbr_obs, axis=0) 140 | new_mem = start_memory 141 | new_mask = start_mask 142 | for seq_idx in range(nbr_obs): 143 | new_mem, new_mask = update_memory( 144 | tf.squeeze(obs[seq_idx]), 145 | new_mem, 146 | new_mask, 147 | tf.squeeze(dones[seq_idx]), 148 | ) 149 | masks.append(new_mask) 150 | memories.append(new_mem) 151 | 152 | new_state = tf.expand_dims( 153 | tf.concat( 154 | [new_mem, tf.expand_dims(new_mask, axis=1)], 155 | axis=1, 156 | name="new_state", 157 | ), 158 | axis=0, 159 | ) 160 | sequence_memory = tf.stack(memories, axis=0, name="sequence_memory") 161 | sequence_mask = tf.stack(masks, axis=0, name="sequence_mask") 162 | assert sequence_memory.shape == tf.TensorShape( 163 | [dones_ph.shape[0], start_memory.shape[0], start_memory.shape[1]] 164 | ), f"Incorrect memory output shape: {sequence_memory.shape}" 165 | assert sequence_mask.shape == tf.TensorShape( 166 | [dones_ph.shape[0], start_mask.shape[0]] 167 | ), f"Incorrect mask output shape: {sequence_mask.shape}" 168 | assert new_state.shape == tf.TensorShape( 169 | [ 170 | tf.Dimension(1), 171 | start_memory.shape[0], 172 | start_memory.shape[1] + tf.Dimension(1), 173 | ] 174 | ), f"Incorrect new_state output shape: {new_state.shape}" 175 | return sequence_memory, sequence_mask, new_state 176 | 177 | 178 | def batch_update_memory( 179 | observations: tf.Tensor, 180 | start_memory: tf.Tensor, 181 | start_mask: tf.Tensor, 182 | dones_ph: tf.Tensor, 183 | ): 184 | """Takes a batch of sequences and updates their memories. 185 | 186 | :param tf.Tensor observations: Shape: (batch, sequence_size, embedding_size) 187 | :param tf.Tensor start_memory: Shape: (batch, memory_size, embedding_size) 188 | :param tf.Tensor start_mask: Shape: (batch, memory_size) 189 | :param tf.Tensor dones_ph: Shape: (batch, sequence_size) 190 | :return: Returns a tuple of the new memory, mask and the new state. 191 | :rtype: (tf.Tensor, tf.Tensor, tf.Tensor) 192 | 193 | """ 194 | assert ( 195 | observations.shape.ndims == 3 196 | and start_memory.shape.ndims == 3 197 | and start_mask.shape.ndims == 2 198 | and dones_ph.shape.ndims == 2 199 | ), "Incorrect ranks of input data" 200 | assert ( 201 | observations.shape[0] 202 | == dones_ph.shape[0] 203 | == start_mask.shape[0] 204 | == start_memory.shape[0] 205 | ), "Batch size should agree for all inputs." 206 | assert ( 207 | observations.shape[-1] == start_memory.shape[-1] 208 | ), "Embedding sizes should agreee" 209 | assert ( 210 | start_memory.shape[1] == start_mask.shape[1] 211 | ), "Memory sizes should agree" 212 | assert ( 213 | dones_ph.shape[1] == observations.shape[1] 214 | ), "Sequence sizes should agree" 215 | 216 | batch_size = observations.shape.as_list()[0] 217 | 218 | masks = [] 219 | memories = [] 220 | new_states = [] 221 | 222 | with tf.variable_scope("memory"): 223 | for batch_idx in range(batch_size): 224 | with tf.variable_scope(f"batch_{batch_idx}"): 225 | new_mem, new_mask, new_state = sequence_update_memory( 226 | observations[batch_idx, :, :], 227 | start_memory[batch_idx, :, :], 228 | start_mask[batch_idx, :], 229 | dones_ph[batch_idx, :], 230 | ) 231 | masks.append(new_mask) 232 | memories.append(new_mem) 233 | new_states.append(new_state) 234 | 235 | batch_memory = tf.stack(memories, axis=0, name="batch_memory") 236 | batch_mask = tf.stack(masks, axis=0, name="batch_mask") 237 | batch_new_state = tf.stack(new_states, axis=0, name="batch_new_state") 238 | return batch_memory, batch_mask, batch_new_state 239 | 240 | 241 | def empty_state(memory_size, embed_size): 242 | """Returns an empty state for the memory""" 243 | return ( 244 | tf.zeros((memory_size, embed_size), dtype=np.float32), 245 | tf.zeros((memory_size), dtype=np.float32), 246 | ) 247 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | category = "main" 3 | description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." 4 | name = "absl-py" 5 | optional = false 6 | python-versions = "*" 7 | version = "0.7.1" 8 | 9 | [package.dependencies] 10 | six = "*" 11 | 12 | [[package]] 13 | category = "main" 14 | description = "Read/rewrite/write Python ASTs" 15 | name = "astor" 16 | optional = false 17 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 18 | version = "0.8.0" 19 | 20 | [[package]] 21 | category = "main" 22 | description = "Python bindings to Atari games" 23 | marker = "extra == \"atari\"" 24 | name = "atari-py" 25 | optional = false 26 | python-versions = "*" 27 | version = "0.1.15" 28 | 29 | [package.dependencies] 30 | numpy = "*" 31 | six = "*" 32 | 33 | [[package]] 34 | category = "dev" 35 | description = "Atomic file writes." 36 | name = "atomicwrites" 37 | optional = false 38 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 39 | version = "1.3.0" 40 | 41 | [[package]] 42 | category = "dev" 43 | description = "Classes Without Boilerplate" 44 | name = "attrs" 45 | optional = false 46 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 47 | version = "19.1.0" 48 | 49 | [[package]] 50 | category = "main" 51 | description = "Extended pickling support for Python objects" 52 | name = "cloudpickle" 53 | optional = false 54 | python-versions = "*" 55 | version = "1.2.1" 56 | 57 | [[package]] 58 | category = "dev" 59 | description = "Cross-platform colored terminal text." 60 | marker = "sys_platform == \"win32\"" 61 | name = "colorama" 62 | optional = false 63 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 64 | version = "0.4.1" 65 | 66 | [[package]] 67 | category = "main" 68 | description = "Composable style cycles" 69 | name = "cycler" 70 | optional = false 71 | python-versions = "*" 72 | version = "0.10.0" 73 | 74 | [package.dependencies] 75 | six = "*" 76 | 77 | [[package]] 78 | category = "main" 79 | description = "Clean single-source support for Python 3 and 2" 80 | name = "future" 81 | optional = false 82 | python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" 83 | version = "0.17.1" 84 | 85 | [[package]] 86 | category = "main" 87 | description = "Python AST that abstracts the underlying Python version" 88 | name = "gast" 89 | optional = false 90 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 91 | version = "0.2.2" 92 | 93 | [[package]] 94 | category = "main" 95 | description = "HTTP/2-based RPC framework" 96 | name = "grpcio" 97 | optional = false 98 | python-versions = "*" 99 | version = "1.21.1" 100 | 101 | [package.dependencies] 102 | six = ">=1.5.2" 103 | 104 | [[package]] 105 | category = "main" 106 | description = "The OpenAI Gym: A toolkit for developing and comparing your reinforcement learning agents." 107 | name = "gym" 108 | optional = false 109 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 110 | version = "0.12.5" 111 | 112 | [package.dependencies] 113 | Pillow = "*" 114 | atari_py = ">=0.1.0,<0.2.0" 115 | numpy = ">=1.10.4" 116 | opencv-python = "*" 117 | pyglet = ">=1.2.0" 118 | scipy = "*" 119 | six = "*" 120 | 121 | [[package]] 122 | category = "main" 123 | description = "Read and write HDF5 files from Python" 124 | name = "h5py" 125 | optional = false 126 | python-versions = "*" 127 | version = "2.9.0" 128 | 129 | [package.dependencies] 130 | numpy = ">=1.7" 131 | six = "*" 132 | 133 | [[package]] 134 | category = "dev" 135 | description = "Read metadata from Python packages" 136 | name = "importlib-metadata" 137 | optional = false 138 | python-versions = ">=2.7,!=3.0,!=3.1,!=3.2,!=3.3" 139 | version = "0.18" 140 | 141 | [package.dependencies] 142 | zipp = ">=0.5" 143 | 144 | [[package]] 145 | category = "main" 146 | description = "Lightweight pipelining: using Python functions as pipeline jobs." 147 | name = "joblib" 148 | optional = false 149 | python-versions = "*" 150 | version = "0.13.2" 151 | 152 | [[package]] 153 | category = "main" 154 | description = "Reference implementations of popular deep learning models" 155 | name = "keras-applications" 156 | optional = false 157 | python-versions = "*" 158 | version = "1.0.8" 159 | 160 | [package.dependencies] 161 | h5py = "*" 162 | numpy = ">=1.9.1" 163 | 164 | [[package]] 165 | category = "main" 166 | description = "Easy data preprocessing and data augmentation for deep learning models" 167 | name = "keras-preprocessing" 168 | optional = false 169 | python-versions = "*" 170 | version = "1.1.0" 171 | 172 | [package.dependencies] 173 | numpy = ">=1.9.1" 174 | six = ">=1.9.0" 175 | 176 | [[package]] 177 | category = "main" 178 | description = "A fast implementation of the Cassowary constraint solver" 179 | name = "kiwisolver" 180 | optional = false 181 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 182 | version = "1.1.0" 183 | 184 | [package.dependencies] 185 | setuptools = "*" 186 | 187 | [[package]] 188 | category = "main" 189 | description = "Python implementation of Markdown." 190 | name = "markdown" 191 | optional = false 192 | python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" 193 | version = "3.1.1" 194 | 195 | [package.dependencies] 196 | setuptools = ">=36" 197 | 198 | [[package]] 199 | category = "main" 200 | description = "Python plotting package" 201 | name = "matplotlib" 202 | optional = false 203 | python-versions = ">=3.6" 204 | version = "3.1.0" 205 | 206 | [package.dependencies] 207 | cycler = ">=0.10" 208 | kiwisolver = ">=1.0.1" 209 | numpy = ">=1.11" 210 | pyparsing = ">=2.0.1,<2.0.4 || >2.0.4,<2.1.2 || >2.1.2,<2.1.6 || >2.1.6" 211 | python-dateutil = ">=2.1" 212 | 213 | [[package]] 214 | category = "dev" 215 | description = "More routines for operating on iterables, beyond itertools" 216 | marker = "python_version > \"2.7\"" 217 | name = "more-itertools" 218 | optional = false 219 | python-versions = ">=3.4" 220 | version = "7.0.0" 221 | 222 | [[package]] 223 | category = "main" 224 | description = "Python bindings for MPI" 225 | name = "mpi4py" 226 | optional = false 227 | python-versions = "*" 228 | version = "3.0.2" 229 | 230 | [[package]] 231 | category = "main" 232 | description = "NumPy is the fundamental package for array computing with Python." 233 | name = "numpy" 234 | optional = false 235 | python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" 236 | version = "1.16.4" 237 | 238 | [[package]] 239 | category = "main" 240 | description = "Wrapper package for OpenCV python bindings." 241 | name = "opencv-python" 242 | optional = false 243 | python-versions = "*" 244 | version = "4.1.0.25" 245 | 246 | [package.dependencies] 247 | numpy = ">=1.11.1" 248 | 249 | [[package]] 250 | category = "dev" 251 | description = "Core utilities for Python packages" 252 | name = "packaging" 253 | optional = false 254 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 255 | version = "19.0" 256 | 257 | [package.dependencies] 258 | pyparsing = ">=2.0.2" 259 | six = "*" 260 | 261 | [[package]] 262 | category = "main" 263 | description = "Powerful data structures for data analysis, time series, and statistics" 264 | name = "pandas" 265 | optional = false 266 | python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" 267 | version = "0.24.2" 268 | 269 | [package.dependencies] 270 | numpy = ">=1.12.0" 271 | python-dateutil = ">=2.5.0" 272 | pytz = ">=2011k" 273 | 274 | [[package]] 275 | category = "main" 276 | description = "Python Imaging Library (Fork)" 277 | marker = "extra == \"atari\"" 278 | name = "pillow" 279 | optional = false 280 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" 281 | version = "6.0.0" 282 | 283 | [[package]] 284 | category = "dev" 285 | description = "plugin and hook calling mechanisms for python" 286 | name = "pluggy" 287 | optional = false 288 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 289 | version = "0.12.0" 290 | 291 | [package.dependencies] 292 | importlib-metadata = ">=0.12" 293 | 294 | [[package]] 295 | category = "main" 296 | description = "Protocol Buffers" 297 | name = "protobuf" 298 | optional = false 299 | python-versions = "*" 300 | version = "3.8.0" 301 | 302 | [package.dependencies] 303 | setuptools = "*" 304 | six = ">=1.9" 305 | 306 | [[package]] 307 | category = "dev" 308 | description = "library with cross-python path, ini-parsing, io, code, log facilities" 309 | name = "py" 310 | optional = false 311 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 312 | version = "1.8.0" 313 | 314 | [[package]] 315 | category = "main" 316 | description = "Cross-platform windowing and multimedia library" 317 | name = "pyglet" 318 | optional = false 319 | python-versions = "*" 320 | version = "1.3.2" 321 | 322 | [package.dependencies] 323 | future = "*" 324 | 325 | [[package]] 326 | category = "main" 327 | description = "Python parsing module" 328 | name = "pyparsing" 329 | optional = false 330 | python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" 331 | version = "2.4.0" 332 | 333 | [[package]] 334 | category = "dev" 335 | description = "pytest: simple powerful testing with Python" 336 | name = "pytest" 337 | optional = false 338 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 339 | version = "4.6.3" 340 | 341 | [package.dependencies] 342 | atomicwrites = ">=1.0" 343 | attrs = ">=17.4.0" 344 | colorama = "*" 345 | importlib-metadata = ">=0.12" 346 | packaging = "*" 347 | pluggy = ">=0.12,<1.0" 348 | py = ">=1.5.0" 349 | six = ">=1.10.0" 350 | wcwidth = "*" 351 | 352 | [package.dependencies.more-itertools] 353 | python = ">=2.8" 354 | version = ">=4.0.0" 355 | 356 | [[package]] 357 | category = "main" 358 | description = "Extensions to the standard Python datetime module" 359 | name = "python-dateutil" 360 | optional = false 361 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" 362 | version = "2.8.0" 363 | 364 | [package.dependencies] 365 | six = ">=1.5" 366 | 367 | [[package]] 368 | category = "main" 369 | description = "World timezone definitions, modern and historical" 370 | name = "pytz" 371 | optional = false 372 | python-versions = "*" 373 | version = "2019.1" 374 | 375 | [[package]] 376 | category = "main" 377 | description = "SciPy: Scientific Library for Python" 378 | name = "scipy" 379 | optional = false 380 | python-versions = ">=3.5" 381 | version = "1.3.0" 382 | 383 | [package.dependencies] 384 | numpy = ">=1.13.3" 385 | 386 | [[package]] 387 | category = "main" 388 | description = "Python 2 and 3 compatibility utilities" 389 | name = "six" 390 | optional = false 391 | python-versions = ">=2.6, !=3.0.*, !=3.1.*" 392 | version = "1.12.0" 393 | 394 | [[package]] 395 | category = "main" 396 | description = "A fork of OpenAI Baselines, implementations of reinforcement learning algorithms." 397 | name = "stable-baselines" 398 | optional = false 399 | python-versions = "*" 400 | version = "2.6.0" 401 | 402 | [package.dependencies] 403 | cloudpickle = ">=0.5.5" 404 | gym = ">=0.10.9" 405 | joblib = "*" 406 | matplotlib = "*" 407 | mpi4py = "*" 408 | numpy = "*" 409 | opencv-python = "*" 410 | pandas = "*" 411 | scipy = "*" 412 | 413 | [[package]] 414 | category = "main" 415 | description = "TensorBoard lets you watch Tensors Flow" 416 | name = "tensorboard" 417 | optional = false 418 | python-versions = ">= 2.7, != 3.0.*, != 3.1.*" 419 | version = "1.12.2" 420 | 421 | [package.dependencies] 422 | grpcio = ">=1.6.3" 423 | markdown = ">=2.6.8" 424 | numpy = ">=1.12.0" 425 | protobuf = ">=3.4.0" 426 | six = ">=1.10.0" 427 | werkzeug = ">=0.11.10" 428 | 429 | [package.dependencies.wheel] 430 | python = ">=3" 431 | version = ">=0.26" 432 | 433 | [[package]] 434 | category = "main" 435 | description = "TensorFlow is an open source machine learning framework for everyone." 436 | name = "tensorflow" 437 | optional = false 438 | python-versions = "*" 439 | version = "1.12.2" 440 | 441 | [package.dependencies] 442 | absl-py = ">=0.1.6" 443 | astor = ">=0.6.0" 444 | gast = ">=0.2.0" 445 | grpcio = ">=1.8.6" 446 | keras-applications = ">=1.0.6" 447 | keras-preprocessing = ">=1.0.5" 448 | numpy = ">=1.13.3" 449 | protobuf = ">=3.6.1" 450 | six = ">=1.10.0" 451 | tensorboard = ">=1.12.0,<1.13.0" 452 | termcolor = ">=1.1.0" 453 | wheel = ">=0.26" 454 | 455 | [[package]] 456 | category = "main" 457 | description = "ANSII Color formatting for output in terminal." 458 | name = "termcolor" 459 | optional = false 460 | python-versions = "*" 461 | version = "1.1.0" 462 | 463 | [[package]] 464 | category = "dev" 465 | description = "Measures number of Terminal column cells of wide-character codes" 466 | name = "wcwidth" 467 | optional = false 468 | python-versions = "*" 469 | version = "0.1.7" 470 | 471 | [[package]] 472 | category = "main" 473 | description = "The comprehensive WSGI web application library." 474 | name = "werkzeug" 475 | optional = false 476 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 477 | version = "0.15.4" 478 | 479 | [[package]] 480 | category = "main" 481 | description = "A built-package format for Python." 482 | name = "wheel" 483 | optional = false 484 | python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" 485 | version = "0.33.4" 486 | 487 | [[package]] 488 | category = "dev" 489 | description = "Backport of pathlib-compatible object wrapper for zip files" 490 | name = "zipp" 491 | optional = false 492 | python-versions = ">=2.7" 493 | version = "0.5.1" 494 | 495 | [metadata] 496 | content-hash = "0a703a80db038055d43d1d674d7b8648fcf2349e66068746af3aec7eac8f2958" 497 | python-versions = "^3.6" 498 | 499 | [metadata.hashes] 500 | absl-py = ["b943d1c567743ed0455878fcd60bc28ac9fae38d129d1ccfad58079da00b8951"] 501 | astor = ["0e41295809baf43ae8303350e031aff81ae52189b6f881f36d623fa8b2f1960e", "37a6eed8b371f1228db08234ed7f6cfdc7817a3ed3824797e20cbb11dc2a7862"] 502 | atari-py = ["591542da26733e4a2ec7ae26f7d1785b4a085823484f51f2e48bd42c6eb52e69", "903cddba8c3262673a86ead75ddab86819fdc232991159ee18e7444937f3391c", "92676e1aa77049d7a665b1d48d9549516ea644977cb840f45468809536c875e4", "d13d6f4d136abc816c8c9d035df88554bc641db4cf2878cdccb295b68434aa07", "f00e77e0e22fcec44e1a02fb59cd49cbe5f1061d87458abeb7517f4ffecf99cd", "f0f41e5b340bce1fa76e5b591cbd50c6b0cadfc0a94d9099a39370635e2c88a1"] 503 | atomicwrites = ["03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4", "75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6"] 504 | attrs = ["69c0dbf2ed392de1cb5ec704444b08a5ef81680a61cb899dc08127123af36a79", "f0b870f674851ecbfbbbd364d6b5cbdff9dcedbc7f3f5e18a6891057f21fe399"] 505 | cloudpickle = ["603244e0f552b72a267d47a7d9b347b27a3430f58a0536037a290e7e0e212ecf", "b8ba7e322f2394b9bbbdc1c976e6442c2c02acc784cb9e553cee9186166a6890"] 506 | colorama = ["05eed71e2e327246ad6b38c540c4a3117230b19679b875190486ddd2d721422d", "f8ac84de7840f5b9c4e3347b3c1eaa50f7e49c2b07596221daec5edaabbd7c48"] 507 | cycler = ["1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", "cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8"] 508 | future = ["67045236dcfd6816dc439556d009594abf643e5eb48992e36beac09c2ca659b8"] 509 | gast = ["fe939df4583692f0512161ec1c880e0a10e71e6a232da045ab8edd3756fbadf0"] 510 | grpcio = ["0232add03144dd3cf9b660e2718244cb8e175370dca4d3855cb4e489a7811b53", "0f20e6dcb1b8662cdca033bb97c0a8116a5343e3ebc7f71c5fe7f89039978350", "10b07a623d33d4966f45c85d410bc6a79c5ac6341f06c3beda6c22be12cbfe07", "10c0476d5a52d21f402fc073745dc43b87cc8e080a1f49bbff4e1059019310fb", "289dae0b35c59d191c524e976dd0a6f8c995d2062e72621eb866ad0f4472a635", "2be726f16142d358a0df1e81d583d6820ee561a7856a79cca2fbe49989308be7", "4338d2a81f5b4ca022e085040b3cfce19419a5ce44aa7e6810ac1df05365bed7", "4c535b46f20e66bee3097583231977e721acdfcb1671d1490c99b7be8902ce18", "557154aef70a0e979700cc9528bc8b606b668084a29a0d57dbc4b06b078a2f1c", "5bfdd7e6647498f979dc46583723c852d97b25afe995d55aa1c76a5f9816bc1f", "87d8943ae7aa6ca5bbad732867d7f17d2550e4966a0c15b52088e8b579422e47", "89d8719d8de4d137678f7caa979e1b0a6fd4026f8096ceef8c2d164bbabefaf2", "9c3f4af989ce860710ac1864dc2e867dd87e6cee51a2368df1b253596868e52f", "9da52c3c728883aee429bb7c315049f50b2139f680cd86bb1165418e4f93a982", "9e9736659987beab42d18525ed10d21f80a1ba8389eac03425fbfd5684e6bbf0", "9ebcbb1a054cab362d29d3be571d43d6b9b23302d9fc4b43e5327000da1680a9", "a93e08636623e24c939851e2e0c0140b14f524b2980c9cdc4ea52b70a871c7e0", "ac322d86d1a079e0a118d544443ee16f320af0062c191b4754c0c6ec2fc79310", "b1fb101459868f52df6b61e7bb13375e50badf17a160e39fe1d51ae19e53f461", "b39aac96cceac624a23d540473835086a3ffa77c91030189988c073488434493", "b65507bc273c6dbf539175a786a344cc0ac78d50e5584f72c6599733f8a3301f", "be5bb6e47417e537c884a2e2ff2e1a8b2c064a998fcfdfcc67528d4e63e7ebaf", "c92de6a28a909c4f460dc1bbbcb50d676cf0b1f40224b222761f73fdd851b522", "c9f5962eb7fa7607b20eb0e4f59ed35829bd600fc0eacb626a6db83229a3e445", "d00bdf9c546ed6e649f785c55b05288e8b2dbb6bf2eb74b6c579fa0d591d35bd", "da804b1dd8293bd9d61b1e6ea989c887ba042a808a4fbdd80001cfa059aafed2", "ead6c5aa3e807345913649c3be395aaca2bbb2d225f18b8f31f37eab225508f6", "eb4d81550ce6f826af4ec6e8d98be347fe96291d718bf115c3f254621ae8d98d", "ef6a18ec8fd32ec81748fe720544ea2fb2d2dc50fd6d06739d5e2eb8f0626a1c", "fad42835656e0b6d3b7ffc900598e776722e30f43b7234a48f2576ca30f31a47", "fb98dbfee0d963b49ae5754554028cf62e6bd695f22de16d242ba9d2f0b7339b", "fb9cd9bb8d26dc17c2dd715a46bca3a879ec8283879b164e85863110dc6e3b2a"] 511 | gym = ["027422f59b662748eae3420b804e35bbf953f62d40cd96d2de9f842c08de822e"] 512 | h5py = ["05750b91640273c69989c657eaac34b091abdd75efc8c4824c82aaf898a2da0a", "082a27208aa3a2286e7272e998e7e225b2a7d4b7821bd840aebf96d50977abbb", "08e2e8297195f9e813e894b6c63f79372582787795bba2014a2db6a2de95f713", "0dd2adeb2e9de5081eb8dcec88874e7fd35dae9a21557be3a55a3c7d491842a4", "0f94de7a10562b991967a66bbe6dda9808e18088676834c0a4dcec3fdd3bcc6f", "106e42e2e01e486a3d32eeb9ba0e3a7f65c12fa8998d63625fa41fb8bdc44cdb", "1606c66015f04719c41a9863c156fc0e6b992150de21c067444bcb82e7d75579", "1854c4beff9961e477e133143c5e5e355dac0b3ebf19c52cf7cc1b1ef757703c", "1e9fb6f1746500ea91a00193ce2361803c70c6b13f10aae9a33ad7b5bd28e800", "2cca17e80ddb151894333377675db90cd0279fa454776e0a4f74308376afd050", "30e365e8408759db3778c361f1e4e0fe8e98a875185ae46c795a85e9bafb9cdf", "3206bac900e16eda81687d787086f4ffd4f3854980d798e191a9868a6510c3ae", "3c23d72058647cee19b30452acc7895621e2de0a0bd5b8a1e34204b9ea9ed43c", "407b5f911a83daa285bbf1ef78a9909ee5957f257d3524b8606be37e8643c5f0", "4162953714a9212d373ac953c10e3329f1e830d3c7473f2a2e4f25dd6241eef0", "5fc7aba72a51b2c80605eba1c50dbf84224dcd206279d30a75c154e5652e1fe4", "713ac19307e11de4d9833af0c4bd6778bde0a3d967cafd2f0f347223711c1e31", "71b946d80ef3c3f12db157d7778b1fe74a517ca85e94809358b15580983c2ce2", "8cc4aed71e20d87e0a6f02094d718a95252f11f8ed143bc112d22167f08d4040", "9d41ca62daf36d6b6515ab8765e4c8c4388ee18e2a665701fef2b41563821002", "a744e13b000f234cd5a5b2a1f95816b819027c57f385da54ad2b7da1adace2f3", "b087ee01396c4b34e9dc41e3a6a0442158206d383c19c7d0396d52067b17c1cb", "b0f03af381d33306ce67d18275b61acb4ca111ced645381387a02c8a5ee1b796", "b9e4b8dfd587365bdd719ae178fa1b6c1231f81280b1375eef8626dfd8761bf3", "c5dd4ec75985b99166c045909e10f0534704d102848b1d9f0992720e908928e7", "d2b82f23cd862a9d05108fe99967e9edfa95c136f532a71cb3d28dc252771f50", "e58a25764472af07b7e1c4b10b0179c8ea726446c7141076286e41891bf3a563", "f3b49107fbfc77333fc2b1ef4d5de2abcd57e7ea3a1482455229494cf2da56ce"] 513 | importlib-metadata = ["6dfd58dfe281e8d240937776065dd3624ad5469c835248219bd16cf2e12dbeb7", "cb6ee23b46173539939964df59d3d72c3e0c1b5d54b84f1d8a7e912fe43612db"] 514 | joblib = ["21e0c34a69ad7fde4f2b1f3402290e9ec46f545f15f1541c582edfe05d87b63a", "315d6b19643ec4afd4c41c671f9f2d65ea9d787da093487a81ead7b0bac94524"] 515 | keras-applications = ["5579f9a12bcde9748f4a12233925a59b93b73ae6947409ff34aa2ba258189fe5", "df4323692b8c1174af821bf906f1e442e63fa7589bf0f1230a0b6bdc5a810c95"] 516 | keras-preprocessing = ["44aee5f2c4d80c3b29f208359fcb336df80f293a0bb6b1c738da43ca206656fb", "5a8debe01d840de93d49e05ccf1c9b81ae30e210d34dacbcc47aeb3049b528e5"] 517 | kiwisolver = ["05b5b061e09f60f56244adc885c4a7867da25ca387376b02c1efc29cc16bcd0f", "26f4fbd6f5e1dabff70a9ba0d2c4bd30761086454aa30dddc5b52764ee4852b7", "3b2378ad387f49cbb328205bda569b9f87288d6bc1bf4cd683c34523a2341efe", "400599c0fe58d21522cae0e8b22318e09d9729451b17ee61ba8e1e7c0346565c", "47b8cb81a7d18dbaf4fed6a61c3cecdb5adec7b4ac292bddb0d016d57e8507d5", "53eaed412477c836e1b9522c19858a8557d6e595077830146182225613b11a75", "58e626e1f7dfbb620d08d457325a4cdac65d1809680009f46bf41eaf74ad0187", "5a52e1b006bfa5be04fe4debbcdd2688432a9af4b207a3f429c74ad625022641", "5c7ca4e449ac9f99b3b9d4693debb1d6d237d1542dd6a56b3305fe8a9620f883", "682e54f0ce8f45981878756d7203fd01e188cc6c8b2c5e2cf03675390b4534d5", "79bfb2f0bd7cbf9ea256612c9523367e5ec51d7cd616ae20ca2c90f575d839a2", "7f4dd50874177d2bb060d74769210f3bce1af87a8c7cf5b37d032ebf94f0aca3", "8944a16020c07b682df861207b7e0efcd2f46c7488619cb55f65882279119389", "8aa7009437640beb2768bfd06da049bad0df85f47ff18426261acecd1cf00897", "939f36f21a8c571686eb491acfffa9c7f1ac345087281b412d63ea39ca14ec4a", "9733b7f64bd9f807832d673355f79703f81f0b3e52bfce420fc00d8cb28c6a6c", "a02f6c3e229d0b7220bd74600e9351e18bc0c361b05f29adae0d10599ae0e326", "a0c0a9f06872330d0dd31b45607197caab3c22777600e88031bfe66799e70bb0", "acc4df99308111585121db217681f1ce0eecb48d3a828a2f9bbf9773f4937e9e", "b64916959e4ae0ac78af7c3e8cef4becee0c0e9694ad477b4c6b3a536de6a544", "d3fcf0819dc3fea58be1fd1ca390851bdb719a549850e708ed858503ff25d995", "d52e3b1868a4e8fd18b5cb15055c76820df514e26aa84cc02f593d99fef6707f", "db1a5d3cc4ae943d674718d6c47d2d82488ddd94b93b9e12d24aabdbfe48caee", "e3a21a720791712ed721c7b95d433e036134de6f18c77dbe96119eaf7aa08004", "e8bf074363ce2babeb4764d94f8e65efd22e6a7c74860a4f05a6947afc020ff2", "f16814a4a96dc04bf1da7d53ee8d5b1d6decfc1a92a63349bb15d37b6a263dd9", "f2b22153870ca5cf2ab9c940d7bc38e8e9089fa0f7e5856ea195e1cf4ff43d5a", "f790f8b3dff3d53453de6a7b7ddd173d2e020fb160baff578d578065b108a05f"] 518 | markdown = ["2e50876bcdd74517e7b71f3e7a76102050edec255b3983403f1a63e7c8a41e7a", "56a46ac655704b91e5b7e6326ce43d5ef72411376588afa1dd90e881b83c7e8c"] 519 | matplotlib = ["08d9bc2e2acef42965256acd5015dc2c899cbd53e01bf4214c5510c7ea0efd2d", "1e0213f87cc0076f7b0c4c251d7e23601e2419cd98691df79edb95517ba06f0c", "1f31053f660df5f0310118d7f5bd1e8025170e9773f0bebe8fec486d0926adf6", "399bf6352633aeeb45ca55c6c943fa2738022fb17ae498c32a142ced0b41528d", "409a5894efb810d630d2512449c7a4394de9a4d15fc6394e26a409b17d9cc18c", "5c5ef5cf1bc8f483123102e2615644937af7d4c01d100acc72bf74a044a78717", "d0052be5cdfa27018bb08194b8812c47cb985d60eb682e1809c76e9600839516", "e7d6620d145ca9f6c3e88248e5734b6fda430e75e70755b887e48f8e9bc1de2a", "f3d8b6bccc577e4e5ecbd58fdd63cacb8e58f0ed1e97616a7f7a7baaf4b8d036"] 520 | more-itertools = ["2112d2ca570bb7c3e53ea1a35cd5df42bb0fd10c45f0fb97178679c3c03d64c7", "c3e4748ba1aad8dba30a4886b0b1a2004f9a863837b8654e7059eebf727afa5a"] 521 | mpi4py = ["014076ffa558bc8d1d82c820c94848ae5f9fe1aab3c9e0a18d80e0c339a4bbe4", "020dbf8c8d2b95b6098c6a66352907afed1c449d811fd085247d5ee244890bb1", "06514c4205e1de84d04c780ab6aa8751121203dd246a45b120817c4444bed341", "0bcd7acb12c7e830267f9d3df13da0576ccf1603fb1c9f940e600ceefbe69200", "1c83daae9a99908109200b29c9cfd93e7c0dc9cad50bef15f0ea85642c288746", "39807cca8195b0c1e43dc9a3e1d80ef4b7cdc66a9f19a184ce7c28d8b42b7f4a", "45b5674d0d630c31bbb94abd9563202ecd83e72a2c54ee719b9813d3a5938767", "4f2f6f5cdece7a95b53bfc884ff9201e270ca386f8c53b54ff2bec799e5b8e0c", "5c1b377022a43e515812f6064d7b1ec01fd61027592aa16e5ad5e14f27f8db3a", "baa8a41f5bddbf581f521fc68db1a297fe24a0256c36bf7dd22fcb3e2cc93ea1", "c105ac976e1605a6883db06a37b0dfac497b210de6d8569dc6d23af33597f145", "e452b96ff879700dcbcef19d145190d56621419e4fbc73e43998b2e692dc6eeb", "f8d629d1e3e3b7b89cb99d0e3bc5505e76cc42089829807950d5c56606ed48e0"] 522 | numpy = ["0778076e764e146d3078b17c24c4d89e0ecd4ac5401beff8e1c87879043a0633", "141c7102f20abe6cf0d54c4ced8d565b86df4d3077ba2343b61a6db996cefec7", "14270a1ee8917d11e7753fb54fc7ffd1934f4d529235beec0b275e2ccf00333b", "27e11c7a8ec9d5838bc59f809bfa86efc8a4fd02e58960fa9c49d998e14332d5", "2a04dda79606f3d2f760384c38ccd3d5b9bb79d4c8126b67aff5eb09a253763e", "3c26010c1b51e1224a3ca6b8df807de6e95128b0908c7e34f190e7775455b0ca", "52c40f1a4262c896420c6ea1c6fda62cf67070e3947e3307f5562bd783a90336", "6e4f8d9e8aa79321657079b9ac03f3cf3fd067bf31c1cca4f56d49543f4356a5", "7242be12a58fec245ee9734e625964b97cf7e3f2f7d016603f9e56660ce479c7", "7dc253b542bfd4b4eb88d9dbae4ca079e7bf2e2afd819ee18891a43db66c60c7", "94f5bd885f67bbb25c82d80184abbf7ce4f6c3c3a41fbaa4182f034bba803e69", "a89e188daa119ffa0d03ce5123dee3f8ffd5115c896c2a9d4f0dbb3d8b95bfa3", "ad3399da9b0ca36e2f24de72f67ab2854a62e623274607e37e0ce5f5d5fa9166", "b0348be89275fd1d4c44ffa39530c41a21062f52299b1e3ee7d1c61f060044b8", "b5554368e4ede1856121b0dfa35ce71768102e4aa55e526cb8de7f374ff78722", "cbddc56b2502d3f87fda4f98d948eb5b11f36ff3902e17cb6cc44727f2200525", "d79f18f41751725c56eceab2a886f021d70fd70a6188fd386e29a045945ffc10", "dc2ca26a19ab32dc475dbad9dfe723d3a64c835f4c23f625c2b6566ca32b9f29", "dd9bcd4f294eb0633bb33d1a74febdd2b9018b8b8ed325f861fffcd2c7660bb8", "e8baab1bc7c9152715844f1faca6744f2416929de10d7639ed49555a85549f52", "ec31fe12668af687b99acf1567399632a7c47b0e17cfb9ae47c098644ef36797", "f12b4f7e2d8f9da3141564e6737d79016fe5336cc92de6814eba579744f65b0a", "f58ac38d5ca045a377b3b377c84df8175ab992c970a53332fa8ac2373df44ff7"] 523 | opencv-python = ["1703a296a96d3d46615e5053f224867977accb4240bcaa0fcabcb0768bf5ac13", "1777ce7535ee7a1995cae168a107a1320e9df13648b930e72a1a2c2eccd64cda", "1e5520482fb18fbd64d079e7f17ac0018f195fd75f6360a53bb82d7903106b50", "25522dcf2529614750a71112a6659759080b4bdc2323f19d47f4d895960fd796", "2af5f2842ad44c65ae2647377e0ff198719e1a1cfc9c6a19bc0c525c035d4bd8", "31ec48d7eca13fc25c287dea7cecab453976e372cad8f50d55c054a247efda21", "47cf48ff5dbd554e9f58cc9e98cf0b5de3f6a971172612bffa06bc5fb79ce872", "494f98366bb5d6c2ac7e50e6617139f353704fd97a6d12ec9d392e72817d5cb0", "4a9845870739e640e3350a8d98d511c92c087fe3d66090e83be7bf94e0ac64f7", "4ac29cc0847d948a6636899014e84e165c30cc8779d6218394d44363462a01ce", "5857ace03b7854221abf8072462d306c2c2ce4e366190b21d90ee8ee8aaf5bb4", "5b4a23d99d5a2874767034466f5a8fd37b9f93ac14955a01b1a208983c76b9ad", "734d87a5021c037064beb62133e135e66c7128e401a63b8b842b809ae2093749", "78005c1c5d15ef4e32e0f485557bd15b5b6d87f49c19db7fe3e9246a61ebe7e4", "81ae2283225c5c52fc3d72debd4241c30ccff2bb922578bf7867f9851cce3acb", "88dbf900f297fdae0f62b899d6a784d8868ec2135854c5f8a9abbad00a6f0c5b", "8c98ea7b8d327a31cd6028782a06147d0e0329ae8e829e881fb5d02f7ed8aec9", "937d4686fef6967921145290f5b50c01c00c5b5d3542a6519e8a85cd88448723", "a057958c0e362b3c4f03b9af1cbdb6d5af035fd22ecd7fd794eba8fdeb049eb8", "c41eab31fa2c641226c6187caa391a688d064c99f078d604574f1912296b771f", "cf4f7e62d1f80d1fa85a1693a3500def5cde54b2b75212b3609e552e4c25acfb", "d90d60143e18334330c149f293071c9f2f3c79c896f33dc4ec65099e58baaaa7", "db3106b7ca86999a7bd1f2fcc93e49314e5e6e451356774e421a69428df5020b", "dbaf264db56f4771dfac6624f438bc4dc670aa94f61a6138848fcab7e9e77380", "e65206c4cf651dc9cf0829962fae8bec986767c9f123d6a1ad17f9356bf7257e", "eac94ddc78c58e891cff7180274317dad2938a4ddfc6ced1c04846c7f50e77e9", "f2e828711f044a965509c862b3a59b3181e9c56c145a950cb53d43fec54e66d2"] 524 | packaging = ["0c98a5d0be38ed775798ece1b9727178c4469d9c3b4ada66e8e6b7849f8732af", "9e1cbf8c12b1f1ce0bb5344b8d7ecf66a6f8a6e91bcb0c84593ed6d3ab5c4ab3"] 525 | pandas = ["071e42b89b57baa17031af8c6b6bbd2e9a5c68c595bc6bf9adabd7a9ed125d3b", "17450e25ae69e2e6b303817bdf26b2cd57f69595d8550a77c308be0cd0fd58fa", "17916d818592c9ec891cbef2e90f98cc85e0f1e89ed0924c9b5220dc3209c846", "2538f099ab0e9f9c9d09bbcd94b47fd889bad06dc7ae96b1ed583f1dc1a7a822", "366f30710172cb45a6b4f43b66c220653b1ea50303fbbd94e50571637ffb9167", "42e5ad741a0d09232efbc7fc648226ed93306551772fc8aecc6dce9f0e676794", "4e718e7f395ba5bfe8b6f6aaf2ff1c65a09bb77a36af6394621434e7cc813204", "4f919f409c433577a501e023943e582c57355d50a724c589e78bc1d551a535a2", "4fe0d7e6438212e839fc5010c78b822664f1a824c0d263fd858f44131d9166e2", "5149a6db3e74f23dc3f5a216c2c9ae2e12920aa2d4a5b77e44e5b804a5f93248", "627594338d6dd995cfc0bacd8e654cd9e1252d2a7c959449228df6740d737eb8", "83c702615052f2a0a7fb1dd289726e29ec87a27272d775cb77affe749cca28f8", "8c872f7fdf3018b7891e1e3e86c55b190e6c5cee70cab771e8f246c855001296", "90f116086063934afd51e61a802a943826d2aac572b2f7d55caaac51c13db5b5", "a3352bacac12e1fc646213b998bce586f965c9d431773d9e91db27c7c48a1f7d", "bcdd06007cca02d51350f96debe51331dec429ac8f93930a43eb8fb5639e3eb5", "c1bd07ebc15285535f61ddd8c0c75d0d6293e80e1ee6d9a8d73f3f36954342d0", "c9a4b7c55115eb278c19aa14b34fcf5920c8fe7797a09b7b053ddd6195ea89b3", "cc8fc0c7a8d5951dc738f1c1447f71c43734244453616f32b8aa0ef6013a5dfb", "d7b460bc316064540ce0c41c1438c416a40746fd8a4fb2999668bf18f3c4acf1"] 526 | pillow = ["0683e80d81e840d401b687ebc00a02bbb23d0793c34d0852a5af64cfa1589540", "09c4e81c3277199898e8dc2d85d94febad87c41251ecbd447ba7d64d94765bd8", "0ee74a23022af9baf997e3016b4e090e4ff08688d37a6f49010338ab46cfe101", "10860baedfe5da7c43cd17835b091494dcc59dda5ad176a011713fe398ea6ac2", "15c056bfa284c30a7f265a41ac4cbbc93bdbfc0dfe0613b9cb8a8581b51a9e55", "1a4e06ba4f74494ea0c58c24de2bb752818e9d504474ec95b0aa94f6b0a7e479", "1c3c707c76be43c9e99cb7e3d5f1bee1c8e5be8b8a2a5eeee665efbf8ddde91a", "1fd0b290203e3b0882d9605d807b03c0f47e3440f97824586c173eca0aadd99d", "24114e4a6e1870c5a24b1da8f60d0ba77a0b4027907860188ea82bd3508c80eb", "258d886a49b6b058cd7abb0ab4b2b85ce78669a857398e83e8b8e28b317b5abb", "2734c55f7d054b0ad889c971136cbb0a5b35a921e27beaa44fdc2436af529c6e", "2ac36ec56727a95bd5a04dfca6abce1db8042c31ee73b65796a42f31fd52d009", "2bc1002b573d107c0b172a5da0f34b4900b2ddc6c3296b82d601e966d5ac1959", "33c79b6dd6bc7f65079ab9ca5bebffb5f5d1141c689c9c6a7855776d1b09b7e8", "367385fc797b2c31564c427430c7a8630db1a00bd040555dfc1d5c52e39fcd72", "3c1884ff078fb8bf5f63d7d86921838b82ed4a7d0c027add773c2f38b3168754", "44e5240e8f4f8861d748f2a58b3f04daadab5e22bfec896bf5434745f788f33f", "46aa988e15f3ea72dddd81afe3839437b755fffddb5e173886f11460be909dce", "492e1e4df823b57f9334f591c78a1e0e65a361e92594534e0568eeeeea56bbba", "50fb9e25d25cfcb50b2e6842c4e104e4f0b424be4624e1724532bf005c67589a", "5ceadd60dbd1e56ab7faffbfee1df5ecb83c3f0420e47f652cd5306d70eb0296", "74d90d499c9c736d52dd6d9b7221af5665b9c04f1767e35f5dd8694324bd4601", "7eeac51fc37e6b19631a4b8e38b8261a074efcf7cc27fc16a6bee4697af7aaa5", "809c0a2ce9032cbcd7b5313f71af4bdc5c8c771cb86eb7559afd954cab82ebb5", "85d1ef2cdafd5507c4221d201aaf62fc9276f8b0f71bd3933363e62a33abc734", "8c3889c7681af77ecfa4431cd42a2885d093ecb811e81fbe5e203abc07e0995b", "9218d81b9fca98d2c47d35d688a0cea0c42fd473159dfd5612dcb0483c63e40b", "9319215530e236822169cbe92426cdc18d16b88c943fdf365a6309a89876e335", "96ec275c83bf839972d6a7dd7d685fdfb6a3233c3c382ecff839d04e7d53955d", "9aa4f3827992288edd37c9df345783a69ef58bd20cc02e64b36e44bcd157bbf1", "9d80f44137a70b6f84c750d11019a3419f409c944526a95219bea0ac31f4dd91", "b7ebd36128a2fe93991293f997e44be9286503c7530ace6a55b938b20be288d8", "c30857e1fbf7d4a4b79d7d376eefaf293ea4307b8293d00a62e6f517f51bfe9b", "c4c78e2c71c257c136cdd43869fd3d5e34fc2162dc22e4a5406b0ebe86958239", "c5472ea3945e8f9eb0659f37fc1f592fd06f4f725f0f03774a8999ad8c130334", "c6a842537f887be1fe115d8abb5daa9bc8cc124e455ff995830cc785624a97af", "cf0a2e040fdf5a6d95f4c286c6ef1df6b36c218b528c8a9158ec2452a804b9b8", "cfd28aad6fc61f7a5d4ee556a997dc6e5555d9381d1390c00ecaf984d57e4232", "d0fd1ec2e7c3e0aeaae999efe83f5d0f42c1160a1f8be5120d40857d20baa452", "dca5660e25932771460d4688ccbb515677caaf8595f3f3240ec16c117deff89a", "de7aedc85918c2f887886442e50f52c1b93545606317956d65f342bd81cb4fc3", "e6c0bbf8e277b74196e3140c35f9a1ae3eafd818f7f2d3a15819c49135d6c062"] 527 | pluggy = ["0825a152ac059776623854c1543d65a4ad408eb3d33ee114dff91e57ec6ae6fc", "b9817417e95936bf75d85d3f8767f7df6cdde751fc40aed3bb3074cbcb77757c"] 528 | protobuf = ["03f43eac9d5b651f976e91cf46a25b75e5779d98f0f4114b0abfed83376d75f8", "0c94b21e6de01362f91a86b372555d22a60b59708599ca9d5032ae9fdf8e3538", "2d2a9f30f61f4063fadd7fb68a2510a6939b43c0d6ceeec5c4704f22225da28e", "34a0b05fca061e4abb77dd180209f68d8637115ff319f51e28a6a9382d69853a", "358710fd0db25372edcf1150fa691f48376a134a6c69ce29f38f185eea7699e6", "3761ab21883f1d3add8643413b326a0026776879b13ecf904e1e05fe18532c03", "41e47198b94c27ba05a08b4a95160656105745c462af574e4bcb0807164065c0", "8c61cc8a76e9d381c665aecc5105fa0f1878cf7db8b5cd17202603bcb386d0fc", "a6eebc4db759e58fdac02efcd3028b811effac881d8a5bad1996e4e8ee6acb47", "a9c12f7c98093da0a46ba76ec40ace725daa1ac4038c41e4b1466afb5c45bb01", "cb95068492ba0859b8c9e61fa8ba206a83c64e5d0916fb4543700b2e2b214115", "cd98476ce7bb4dcd6a7b101f5eecdc073dafea19f311e36eb8fba1a349346277", "ce64cfbea18c535176bdaa10ba740c0fc4c6d998a3f511c17bedb0ae4b3b167c", "dcbb59eac73fd454e8f2c5fba9e3d3320fd4707ed6a9d3ea3717924a6f0903ea", "dd67f34458ae716029e2a71ede998e9092493b62a519236ca52e3c5202096c87", "e3c96056eb5b7284a20e256cb0bf783c8f36ad82a4ae5434a7b7cd02384144a7", "f612d584d7a27e2f39e7b17878430a959c1bc09a74ba09db096b468558e5e126", "f6de8a7d6122297b81566e5bd4df37fd5d62bec14f8f90ebff8ede1c9726cd0a", "fa529d9261682b24c2aaa683667253175c9acebe0a31105394b221090da75832"] 529 | py = ["64f65755aee5b381cea27766a3a147c3f15b9b6b9ac88676de66ba2ae36793fa", "dc639b046a6e2cff5bbe40194ad65936d6ba360b52b3c3fe1d08a82dd50b5e53"] 530 | pyglet = ["8b07aea16f34ac861cffd06a0c17723ca944d172e577b57b21859b7990709a66", "b00570e7cdf6971af8953b6ece50d83d13272afa5d1f1197c58c0f478dd17743"] 531 | pyparsing = ["1873c03321fc118f4e9746baf201ff990ceb915f433f23b395f5580d1840cb2a", "9b6323ef4ab914af344ba97510e966d64ba91055d6b9afa6b30799340e89cc03"] 532 | pytest = ["4a784f1d4f2ef198fe9b7aef793e9fa1a3b2f84e822d9b3a64a181293a572d45", "926855726d8ae8371803f7b2e6ec0a69953d9c6311fa7c3b6c1b929ff92d27da"] 533 | python-dateutil = ["7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb", "c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e"] 534 | pytz = ["303879e36b721603cc54604edcac9d20401bdbe31e1e4fdee5b9f98d5d31dfda", "d747dd3d23d77ef44c6a3526e274af6efeb0a6f1afd5a69ba4d5be4098c8e141"] 535 | scipy = ["03b1e0775edbe6a4c64effb05fff2ce1429b76d29d754aa5ee2d848b60033351", "09d008237baabf52a5d4f5a6fcf9b3c03408f3f61a69c404472a16861a73917e", "10325f0ffac2400b1ec09537b7e403419dcd25d9fee602a44e8a32119af9079e", "1db9f964ed9c52dc5bd6127f0dd90ac89791daa690a5665cc01eae185912e1ba", "409846be9d6bdcbd78b9e5afe2f64b2da5a923dd7c1cd0615ce589489533fdbb", "4907040f62b91c2e170359c3d36c000af783f0fa1516a83d6c1517cde0af5340", "6c0543f2fdd38dee631fb023c0f31c284a532d205590b393d72009c14847f5b1", "826b9f5fbb7f908a13aa1efd4b7321e36992f5868d5d8311c7b40cf9b11ca0e7", "a7695a378c2ce402405ea37b12c7a338a8755e081869bd6b95858893ceb617ae", "a84c31e8409b420c3ca57fd30c7589378d6fdc8d155d866a7f8e6e80dec6fd06", "adadeeae5500de0da2b9e8dd478520d0a9945b577b2198f2462555e68f58e7ef", "b283a76a83fe463c9587a2c88003f800e08c3929dfbeba833b78260f9c209785", "c19a7389ab3cd712058a8c3c9ffd8d27a57f3d84b9c91a931f542682bb3d269d", "c3bb4bd2aca82fb498247deeac12265921fe231502a6bc6edea3ee7fe6c40a7a", "c5ea60ece0c0c1c849025bfc541b60a6751b491b6f11dd9ef37ab5b8c9041921", "db61a640ca20f237317d27bc658c1fc54c7581ff7f6502d112922dc285bdabee"] 536 | six = ["3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73"] 537 | stable-baselines = ["8ec47f426b3f7717303c0160d2548eb57e65ebaa3d306ed25c02fd926cc71d7c", "feb484e5552c325182fb7e6cf1313acc40a88edceeb2f1585127a57a27f59de3"] 538 | tensorboard = ["6f194519f41762bfdf5eb410ccf33226d1c252caf5ad8893288648bfbcf4d135", "81170f66bf8f95c2e9f6b3fefe0ddc5472655a9e3793e73b5b5d4ec0ba395e76"] 539 | tensorflow = ["17f406ce35801e438229b102b9e82251c2a5c6fb1384702e1b56cf667b01f921", "1e22f267221a3ec7cbd2d0638887ce162152e2d55e2d9e6f457609f5ff93c71c", "253928f4dfe72a3571a811cc0277f2d2663183f50c2da926dea6e7ea6ea45db7", "50b33261a05fe7b7f1f29e79f4281a99e19fb847912ff8df09dafad099eb9736", "74bc3259d22b59f10fdc9731a43a375d72fa6180bbfac85241ae3a8abd3900ab", "a1311b990bc9e794edcab92009e8dd24b5907f5ba1041c387c702bb04393389f", "a6ac81d6ea529255bd403a9557cd6dde57a165dd1395ce0573108f36a3aa0503", "b2eab8f81a62a78bd0011e1ba7dca3846962fd14374f8b215cd36e0389290d85", "ba36994431dbee4e46f2ac19fcf3d8b8c08de88ecbcf39a73c3cfaf33470f460", "c0a1c44322955532468b164029a242bace4e8d6f5a36c4ceeffe1bdf6b0ef3c4", "d0d660c434a9224534ec8bfee1f9d0c03f35ab2fe75e2ba874b6450a6368e68d", "e0d95f32d9eca232e91460b625c6d01004d292cfe8b10b9d14c7479049916d19"] 540 | termcolor = ["1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"] 541 | wcwidth = ["3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e", "f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c"] 542 | werkzeug = ["865856ebb55c4dcd0630cdd8f3331a1847a819dda7e8c750d3db6f2aa6c0209c", "a0b915f0815982fb2a09161cb8f31708052d0951c3ba433ccc5e1aa276507ca6"] 543 | wheel = ["5e79117472686ac0c4aef5bad5172ea73a1c2d1646b808c35926bd26bdfb0c08", "62fcfa03d45b5b722539ccbc07b190e4bfff4bb9e3a4d470dd9f6a0981002565"] 544 | zipp = ["8c1019c6aad13642199fbe458275ad6a84907634cc9f0989877ccc4a2840139d", "ca943a7e809cc12257001ccfb99e3563da9af99d52f261725e96dfe0f9275bc3"] 545 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "19bcd55434df4176fec7ffa56c5752db7c195720d87f73ce11e4dda6677133d3" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.6" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "absl-py": { 20 | "hashes": [ 21 | "sha256:b943d1c567743ed0455878fcd60bc28ac9fae38d129d1ccfad58079da00b8951" 22 | ], 23 | "version": "==0.7.1" 24 | }, 25 | "astor": { 26 | "hashes": [ 27 | "sha256:0e41295809baf43ae8303350e031aff81ae52189b6f881f36d623fa8b2f1960e", 28 | "sha256:37a6eed8b371f1228db08234ed7f6cfdc7817a3ed3824797e20cbb11dc2a7862" 29 | ], 30 | "version": "==0.8.0" 31 | }, 32 | "atari-py": { 33 | "hashes": [ 34 | "sha256:591542da26733e4a2ec7ae26f7d1785b4a085823484f51f2e48bd42c6eb52e69", 35 | "sha256:903cddba8c3262673a86ead75ddab86819fdc232991159ee18e7444937f3391c", 36 | "sha256:92676e1aa77049d7a665b1d48d9549516ea644977cb840f45468809536c875e4", 37 | "sha256:d13d6f4d136abc816c8c9d035df88554bc641db4cf2878cdccb295b68434aa07", 38 | "sha256:f00e77e0e22fcec44e1a02fb59cd49cbe5f1061d87458abeb7517f4ffecf99cd", 39 | "sha256:f0f41e5b340bce1fa76e5b591cbd50c6b0cadfc0a94d9099a39370635e2c88a1" 40 | ], 41 | "version": "==0.1.15" 42 | }, 43 | "cloudpickle": { 44 | "hashes": [ 45 | "sha256:603244e0f552b72a267d47a7d9b347b27a3430f58a0536037a290e7e0e212ecf", 46 | "sha256:b8ba7e322f2394b9bbbdc1c976e6442c2c02acc784cb9e553cee9186166a6890" 47 | ], 48 | "version": "==1.2.1" 49 | }, 50 | "cycler": { 51 | "hashes": [ 52 | "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", 53 | "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8" 54 | ], 55 | "version": "==0.10.0" 56 | }, 57 | "future": { 58 | "hashes": [ 59 | "sha256:67045236dcfd6816dc439556d009594abf643e5eb48992e36beac09c2ca659b8" 60 | ], 61 | "version": "==0.17.1" 62 | }, 63 | "gast": { 64 | "hashes": [ 65 | "sha256:fe939df4583692f0512161ec1c880e0a10e71e6a232da045ab8edd3756fbadf0" 66 | ], 67 | "version": "==0.2.2" 68 | }, 69 | "grpcio": { 70 | "hashes": [ 71 | "sha256:0232add03144dd3cf9b660e2718244cb8e175370dca4d3855cb4e489a7811b53", 72 | "sha256:0f20e6dcb1b8662cdca033bb97c0a8116a5343e3ebc7f71c5fe7f89039978350", 73 | "sha256:10b07a623d33d4966f45c85d410bc6a79c5ac6341f06c3beda6c22be12cbfe07", 74 | "sha256:10c0476d5a52d21f402fc073745dc43b87cc8e080a1f49bbff4e1059019310fb", 75 | "sha256:289dae0b35c59d191c524e976dd0a6f8c995d2062e72621eb866ad0f4472a635", 76 | "sha256:2be726f16142d358a0df1e81d583d6820ee561a7856a79cca2fbe49989308be7", 77 | "sha256:4338d2a81f5b4ca022e085040b3cfce19419a5ce44aa7e6810ac1df05365bed7", 78 | "sha256:4c535b46f20e66bee3097583231977e721acdfcb1671d1490c99b7be8902ce18", 79 | "sha256:557154aef70a0e979700cc9528bc8b606b668084a29a0d57dbc4b06b078a2f1c", 80 | "sha256:5bfdd7e6647498f979dc46583723c852d97b25afe995d55aa1c76a5f9816bc1f", 81 | "sha256:87d8943ae7aa6ca5bbad732867d7f17d2550e4966a0c15b52088e8b579422e47", 82 | "sha256:89d8719d8de4d137678f7caa979e1b0a6fd4026f8096ceef8c2d164bbabefaf2", 83 | "sha256:9c3f4af989ce860710ac1864dc2e867dd87e6cee51a2368df1b253596868e52f", 84 | "sha256:9da52c3c728883aee429bb7c315049f50b2139f680cd86bb1165418e4f93a982", 85 | "sha256:9e9736659987beab42d18525ed10d21f80a1ba8389eac03425fbfd5684e6bbf0", 86 | "sha256:9ebcbb1a054cab362d29d3be571d43d6b9b23302d9fc4b43e5327000da1680a9", 87 | "sha256:a93e08636623e24c939851e2e0c0140b14f524b2980c9cdc4ea52b70a871c7e0", 88 | "sha256:ac322d86d1a079e0a118d544443ee16f320af0062c191b4754c0c6ec2fc79310", 89 | "sha256:b1fb101459868f52df6b61e7bb13375e50badf17a160e39fe1d51ae19e53f461", 90 | "sha256:b39aac96cceac624a23d540473835086a3ffa77c91030189988c073488434493", 91 | "sha256:b65507bc273c6dbf539175a786a344cc0ac78d50e5584f72c6599733f8a3301f", 92 | "sha256:be5bb6e47417e537c884a2e2ff2e1a8b2c064a998fcfdfcc67528d4e63e7ebaf", 93 | "sha256:c92de6a28a909c4f460dc1bbbcb50d676cf0b1f40224b222761f73fdd851b522", 94 | "sha256:c9f5962eb7fa7607b20eb0e4f59ed35829bd600fc0eacb626a6db83229a3e445", 95 | "sha256:d00bdf9c546ed6e649f785c55b05288e8b2dbb6bf2eb74b6c579fa0d591d35bd", 96 | "sha256:da804b1dd8293bd9d61b1e6ea989c887ba042a808a4fbdd80001cfa059aafed2", 97 | "sha256:ead6c5aa3e807345913649c3be395aaca2bbb2d225f18b8f31f37eab225508f6", 98 | "sha256:eb4d81550ce6f826af4ec6e8d98be347fe96291d718bf115c3f254621ae8d98d", 99 | "sha256:ef6a18ec8fd32ec81748fe720544ea2fb2d2dc50fd6d06739d5e2eb8f0626a1c", 100 | "sha256:fad42835656e0b6d3b7ffc900598e776722e30f43b7234a48f2576ca30f31a47", 101 | "sha256:fb98dbfee0d963b49ae5754554028cf62e6bd695f22de16d242ba9d2f0b7339b", 102 | "sha256:fb9cd9bb8d26dc17c2dd715a46bca3a879ec8283879b164e85863110dc6e3b2a" 103 | ], 104 | "version": "==1.21.1" 105 | }, 106 | "gym": { 107 | "extras": [ 108 | "atari", 109 | "classic_control" 110 | ], 111 | "hashes": [ 112 | "sha256:027422f59b662748eae3420b804e35bbf953f62d40cd96d2de9f842c08de822e" 113 | ], 114 | "index": "pypi", 115 | "version": "==0.12.5" 116 | }, 117 | "h5py": { 118 | "hashes": [ 119 | "sha256:05750b91640273c69989c657eaac34b091abdd75efc8c4824c82aaf898a2da0a", 120 | "sha256:082a27208aa3a2286e7272e998e7e225b2a7d4b7821bd840aebf96d50977abbb", 121 | "sha256:08e2e8297195f9e813e894b6c63f79372582787795bba2014a2db6a2de95f713", 122 | "sha256:0dd2adeb2e9de5081eb8dcec88874e7fd35dae9a21557be3a55a3c7d491842a4", 123 | "sha256:0f94de7a10562b991967a66bbe6dda9808e18088676834c0a4dcec3fdd3bcc6f", 124 | "sha256:106e42e2e01e486a3d32eeb9ba0e3a7f65c12fa8998d63625fa41fb8bdc44cdb", 125 | "sha256:1606c66015f04719c41a9863c156fc0e6b992150de21c067444bcb82e7d75579", 126 | "sha256:1854c4beff9961e477e133143c5e5e355dac0b3ebf19c52cf7cc1b1ef757703c", 127 | "sha256:1e9fb6f1746500ea91a00193ce2361803c70c6b13f10aae9a33ad7b5bd28e800", 128 | "sha256:2cca17e80ddb151894333377675db90cd0279fa454776e0a4f74308376afd050", 129 | "sha256:30e365e8408759db3778c361f1e4e0fe8e98a875185ae46c795a85e9bafb9cdf", 130 | "sha256:3206bac900e16eda81687d787086f4ffd4f3854980d798e191a9868a6510c3ae", 131 | "sha256:3c23d72058647cee19b30452acc7895621e2de0a0bd5b8a1e34204b9ea9ed43c", 132 | "sha256:407b5f911a83daa285bbf1ef78a9909ee5957f257d3524b8606be37e8643c5f0", 133 | "sha256:4162953714a9212d373ac953c10e3329f1e830d3c7473f2a2e4f25dd6241eef0", 134 | "sha256:5fc7aba72a51b2c80605eba1c50dbf84224dcd206279d30a75c154e5652e1fe4", 135 | "sha256:713ac19307e11de4d9833af0c4bd6778bde0a3d967cafd2f0f347223711c1e31", 136 | "sha256:71b946d80ef3c3f12db157d7778b1fe74a517ca85e94809358b15580983c2ce2", 137 | "sha256:8cc4aed71e20d87e0a6f02094d718a95252f11f8ed143bc112d22167f08d4040", 138 | "sha256:9d41ca62daf36d6b6515ab8765e4c8c4388ee18e2a665701fef2b41563821002", 139 | "sha256:a744e13b000f234cd5a5b2a1f95816b819027c57f385da54ad2b7da1adace2f3", 140 | "sha256:b087ee01396c4b34e9dc41e3a6a0442158206d383c19c7d0396d52067b17c1cb", 141 | "sha256:b0f03af381d33306ce67d18275b61acb4ca111ced645381387a02c8a5ee1b796", 142 | "sha256:b9e4b8dfd587365bdd719ae178fa1b6c1231f81280b1375eef8626dfd8761bf3", 143 | "sha256:c5dd4ec75985b99166c045909e10f0534704d102848b1d9f0992720e908928e7", 144 | "sha256:d2b82f23cd862a9d05108fe99967e9edfa95c136f532a71cb3d28dc252771f50", 145 | "sha256:e58a25764472af07b7e1c4b10b0179c8ea726446c7141076286e41891bf3a563", 146 | "sha256:f3b49107fbfc77333fc2b1ef4d5de2abcd57e7ea3a1482455229494cf2da56ce" 147 | ], 148 | "version": "==2.9.0" 149 | }, 150 | "joblib": { 151 | "hashes": [ 152 | "sha256:21e0c34a69ad7fde4f2b1f3402290e9ec46f545f15f1541c582edfe05d87b63a", 153 | "sha256:315d6b19643ec4afd4c41c671f9f2d65ea9d787da093487a81ead7b0bac94524" 154 | ], 155 | "version": "==0.13.2" 156 | }, 157 | "keras-applications": { 158 | "hashes": [ 159 | "sha256:5579f9a12bcde9748f4a12233925a59b93b73ae6947409ff34aa2ba258189fe5", 160 | "sha256:df4323692b8c1174af821bf906f1e442e63fa7589bf0f1230a0b6bdc5a810c95" 161 | ], 162 | "version": "==1.0.8" 163 | }, 164 | "keras-preprocessing": { 165 | "hashes": [ 166 | "sha256:44aee5f2c4d80c3b29f208359fcb336df80f293a0bb6b1c738da43ca206656fb", 167 | "sha256:5a8debe01d840de93d49e05ccf1c9b81ae30e210d34dacbcc47aeb3049b528e5" 168 | ], 169 | "version": "==1.1.0" 170 | }, 171 | "kiwisolver": { 172 | "hashes": [ 173 | "sha256:05b5b061e09f60f56244adc885c4a7867da25ca387376b02c1efc29cc16bcd0f", 174 | "sha256:26f4fbd6f5e1dabff70a9ba0d2c4bd30761086454aa30dddc5b52764ee4852b7", 175 | "sha256:3b2378ad387f49cbb328205bda569b9f87288d6bc1bf4cd683c34523a2341efe", 176 | "sha256:400599c0fe58d21522cae0e8b22318e09d9729451b17ee61ba8e1e7c0346565c", 177 | "sha256:47b8cb81a7d18dbaf4fed6a61c3cecdb5adec7b4ac292bddb0d016d57e8507d5", 178 | "sha256:53eaed412477c836e1b9522c19858a8557d6e595077830146182225613b11a75", 179 | "sha256:58e626e1f7dfbb620d08d457325a4cdac65d1809680009f46bf41eaf74ad0187", 180 | "sha256:5a52e1b006bfa5be04fe4debbcdd2688432a9af4b207a3f429c74ad625022641", 181 | "sha256:5c7ca4e449ac9f99b3b9d4693debb1d6d237d1542dd6a56b3305fe8a9620f883", 182 | "sha256:682e54f0ce8f45981878756d7203fd01e188cc6c8b2c5e2cf03675390b4534d5", 183 | "sha256:79bfb2f0bd7cbf9ea256612c9523367e5ec51d7cd616ae20ca2c90f575d839a2", 184 | "sha256:7f4dd50874177d2bb060d74769210f3bce1af87a8c7cf5b37d032ebf94f0aca3", 185 | "sha256:8944a16020c07b682df861207b7e0efcd2f46c7488619cb55f65882279119389", 186 | "sha256:8aa7009437640beb2768bfd06da049bad0df85f47ff18426261acecd1cf00897", 187 | "sha256:939f36f21a8c571686eb491acfffa9c7f1ac345087281b412d63ea39ca14ec4a", 188 | "sha256:9733b7f64bd9f807832d673355f79703f81f0b3e52bfce420fc00d8cb28c6a6c", 189 | "sha256:a02f6c3e229d0b7220bd74600e9351e18bc0c361b05f29adae0d10599ae0e326", 190 | "sha256:a0c0a9f06872330d0dd31b45607197caab3c22777600e88031bfe66799e70bb0", 191 | "sha256:acc4df99308111585121db217681f1ce0eecb48d3a828a2f9bbf9773f4937e9e", 192 | "sha256:b64916959e4ae0ac78af7c3e8cef4becee0c0e9694ad477b4c6b3a536de6a544", 193 | "sha256:d3fcf0819dc3fea58be1fd1ca390851bdb719a549850e708ed858503ff25d995", 194 | "sha256:d52e3b1868a4e8fd18b5cb15055c76820df514e26aa84cc02f593d99fef6707f", 195 | "sha256:db1a5d3cc4ae943d674718d6c47d2d82488ddd94b93b9e12d24aabdbfe48caee", 196 | "sha256:e3a21a720791712ed721c7b95d433e036134de6f18c77dbe96119eaf7aa08004", 197 | "sha256:e8bf074363ce2babeb4764d94f8e65efd22e6a7c74860a4f05a6947afc020ff2", 198 | "sha256:f16814a4a96dc04bf1da7d53ee8d5b1d6decfc1a92a63349bb15d37b6a263dd9", 199 | "sha256:f2b22153870ca5cf2ab9c940d7bc38e8e9089fa0f7e5856ea195e1cf4ff43d5a", 200 | "sha256:f790f8b3dff3d53453de6a7b7ddd173d2e020fb160baff578d578065b108a05f" 201 | ], 202 | "version": "==1.1.0" 203 | }, 204 | "markdown": { 205 | "hashes": [ 206 | "sha256:2e50876bcdd74517e7b71f3e7a76102050edec255b3983403f1a63e7c8a41e7a", 207 | "sha256:56a46ac655704b91e5b7e6326ce43d5ef72411376588afa1dd90e881b83c7e8c" 208 | ], 209 | "version": "==3.1.1" 210 | }, 211 | "matplotlib": { 212 | "hashes": [ 213 | "sha256:08d9bc2e2acef42965256acd5015dc2c899cbd53e01bf4214c5510c7ea0efd2d", 214 | "sha256:1e0213f87cc0076f7b0c4c251d7e23601e2419cd98691df79edb95517ba06f0c", 215 | "sha256:1f31053f660df5f0310118d7f5bd1e8025170e9773f0bebe8fec486d0926adf6", 216 | "sha256:399bf6352633aeeb45ca55c6c943fa2738022fb17ae498c32a142ced0b41528d", 217 | "sha256:409a5894efb810d630d2512449c7a4394de9a4d15fc6394e26a409b17d9cc18c", 218 | "sha256:5c5ef5cf1bc8f483123102e2615644937af7d4c01d100acc72bf74a044a78717", 219 | "sha256:d0052be5cdfa27018bb08194b8812c47cb985d60eb682e1809c76e9600839516", 220 | "sha256:e7d6620d145ca9f6c3e88248e5734b6fda430e75e70755b887e48f8e9bc1de2a", 221 | "sha256:f3d8b6bccc577e4e5ecbd58fdd63cacb8e58f0ed1e97616a7f7a7baaf4b8d036" 222 | ], 223 | "version": "==3.1.0" 224 | }, 225 | "mpi4py": { 226 | "hashes": [ 227 | "sha256:014076ffa558bc8d1d82c820c94848ae5f9fe1aab3c9e0a18d80e0c339a4bbe4", 228 | "sha256:020dbf8c8d2b95b6098c6a66352907afed1c449d811fd085247d5ee244890bb1", 229 | "sha256:06514c4205e1de84d04c780ab6aa8751121203dd246a45b120817c4444bed341", 230 | "sha256:0bcd7acb12c7e830267f9d3df13da0576ccf1603fb1c9f940e600ceefbe69200", 231 | "sha256:1c83daae9a99908109200b29c9cfd93e7c0dc9cad50bef15f0ea85642c288746", 232 | "sha256:39807cca8195b0c1e43dc9a3e1d80ef4b7cdc66a9f19a184ce7c28d8b42b7f4a", 233 | "sha256:45b5674d0d630c31bbb94abd9563202ecd83e72a2c54ee719b9813d3a5938767", 234 | "sha256:4f2f6f5cdece7a95b53bfc884ff9201e270ca386f8c53b54ff2bec799e5b8e0c", 235 | "sha256:5c1b377022a43e515812f6064d7b1ec01fd61027592aa16e5ad5e14f27f8db3a", 236 | "sha256:baa8a41f5bddbf581f521fc68db1a297fe24a0256c36bf7dd22fcb3e2cc93ea1", 237 | "sha256:c105ac976e1605a6883db06a37b0dfac497b210de6d8569dc6d23af33597f145", 238 | "sha256:e452b96ff879700dcbcef19d145190d56621419e4fbc73e43998b2e692dc6eeb", 239 | "sha256:f8d629d1e3e3b7b89cb99d0e3bc5505e76cc42089829807950d5c56606ed48e0" 240 | ], 241 | "version": "==3.0.2" 242 | }, 243 | "numpy": { 244 | "hashes": [ 245 | "sha256:0778076e764e146d3078b17c24c4d89e0ecd4ac5401beff8e1c87879043a0633", 246 | "sha256:141c7102f20abe6cf0d54c4ced8d565b86df4d3077ba2343b61a6db996cefec7", 247 | "sha256:14270a1ee8917d11e7753fb54fc7ffd1934f4d529235beec0b275e2ccf00333b", 248 | "sha256:27e11c7a8ec9d5838bc59f809bfa86efc8a4fd02e58960fa9c49d998e14332d5", 249 | "sha256:2a04dda79606f3d2f760384c38ccd3d5b9bb79d4c8126b67aff5eb09a253763e", 250 | "sha256:3c26010c1b51e1224a3ca6b8df807de6e95128b0908c7e34f190e7775455b0ca", 251 | "sha256:52c40f1a4262c896420c6ea1c6fda62cf67070e3947e3307f5562bd783a90336", 252 | "sha256:6e4f8d9e8aa79321657079b9ac03f3cf3fd067bf31c1cca4f56d49543f4356a5", 253 | "sha256:7242be12a58fec245ee9734e625964b97cf7e3f2f7d016603f9e56660ce479c7", 254 | "sha256:7dc253b542bfd4b4eb88d9dbae4ca079e7bf2e2afd819ee18891a43db66c60c7", 255 | "sha256:94f5bd885f67bbb25c82d80184abbf7ce4f6c3c3a41fbaa4182f034bba803e69", 256 | "sha256:a89e188daa119ffa0d03ce5123dee3f8ffd5115c896c2a9d4f0dbb3d8b95bfa3", 257 | "sha256:ad3399da9b0ca36e2f24de72f67ab2854a62e623274607e37e0ce5f5d5fa9166", 258 | "sha256:b0348be89275fd1d4c44ffa39530c41a21062f52299b1e3ee7d1c61f060044b8", 259 | "sha256:b5554368e4ede1856121b0dfa35ce71768102e4aa55e526cb8de7f374ff78722", 260 | "sha256:cbddc56b2502d3f87fda4f98d948eb5b11f36ff3902e17cb6cc44727f2200525", 261 | "sha256:d79f18f41751725c56eceab2a886f021d70fd70a6188fd386e29a045945ffc10", 262 | "sha256:dc2ca26a19ab32dc475dbad9dfe723d3a64c835f4c23f625c2b6566ca32b9f29", 263 | "sha256:dd9bcd4f294eb0633bb33d1a74febdd2b9018b8b8ed325f861fffcd2c7660bb8", 264 | "sha256:e8baab1bc7c9152715844f1faca6744f2416929de10d7639ed49555a85549f52", 265 | "sha256:ec31fe12668af687b99acf1567399632a7c47b0e17cfb9ae47c098644ef36797", 266 | "sha256:f12b4f7e2d8f9da3141564e6737d79016fe5336cc92de6814eba579744f65b0a", 267 | "sha256:f58ac38d5ca045a377b3b377c84df8175ab992c970a53332fa8ac2373df44ff7" 268 | ], 269 | "index": "pypi", 270 | "version": "==1.16.4" 271 | }, 272 | "opencv-python": { 273 | "hashes": [ 274 | "sha256:1703a296a96d3d46615e5053f224867977accb4240bcaa0fcabcb0768bf5ac13", 275 | "sha256:1777ce7535ee7a1995cae168a107a1320e9df13648b930e72a1a2c2eccd64cda", 276 | "sha256:1e5520482fb18fbd64d079e7f17ac0018f195fd75f6360a53bb82d7903106b50", 277 | "sha256:25522dcf2529614750a71112a6659759080b4bdc2323f19d47f4d895960fd796", 278 | "sha256:2af5f2842ad44c65ae2647377e0ff198719e1a1cfc9c6a19bc0c525c035d4bd8", 279 | "sha256:31ec48d7eca13fc25c287dea7cecab453976e372cad8f50d55c054a247efda21", 280 | "sha256:47cf48ff5dbd554e9f58cc9e98cf0b5de3f6a971172612bffa06bc5fb79ce872", 281 | "sha256:494f98366bb5d6c2ac7e50e6617139f353704fd97a6d12ec9d392e72817d5cb0", 282 | "sha256:4a9845870739e640e3350a8d98d511c92c087fe3d66090e83be7bf94e0ac64f7", 283 | "sha256:4ac29cc0847d948a6636899014e84e165c30cc8779d6218394d44363462a01ce", 284 | "sha256:5857ace03b7854221abf8072462d306c2c2ce4e366190b21d90ee8ee8aaf5bb4", 285 | "sha256:5b4a23d99d5a2874767034466f5a8fd37b9f93ac14955a01b1a208983c76b9ad", 286 | "sha256:734d87a5021c037064beb62133e135e66c7128e401a63b8b842b809ae2093749", 287 | "sha256:78005c1c5d15ef4e32e0f485557bd15b5b6d87f49c19db7fe3e9246a61ebe7e4", 288 | "sha256:81ae2283225c5c52fc3d72debd4241c30ccff2bb922578bf7867f9851cce3acb", 289 | "sha256:88dbf900f297fdae0f62b899d6a784d8868ec2135854c5f8a9abbad00a6f0c5b", 290 | "sha256:8c98ea7b8d327a31cd6028782a06147d0e0329ae8e829e881fb5d02f7ed8aec9", 291 | "sha256:937d4686fef6967921145290f5b50c01c00c5b5d3542a6519e8a85cd88448723", 292 | "sha256:a057958c0e362b3c4f03b9af1cbdb6d5af035fd22ecd7fd794eba8fdeb049eb8", 293 | "sha256:c41eab31fa2c641226c6187caa391a688d064c99f078d604574f1912296b771f", 294 | "sha256:cf4f7e62d1f80d1fa85a1693a3500def5cde54b2b75212b3609e552e4c25acfb", 295 | "sha256:d90d60143e18334330c149f293071c9f2f3c79c896f33dc4ec65099e58baaaa7", 296 | "sha256:db3106b7ca86999a7bd1f2fcc93e49314e5e6e451356774e421a69428df5020b", 297 | "sha256:dbaf264db56f4771dfac6624f438bc4dc670aa94f61a6138848fcab7e9e77380", 298 | "sha256:e65206c4cf651dc9cf0829962fae8bec986767c9f123d6a1ad17f9356bf7257e", 299 | "sha256:eac94ddc78c58e891cff7180274317dad2938a4ddfc6ced1c04846c7f50e77e9", 300 | "sha256:f2e828711f044a965509c862b3a59b3181e9c56c145a950cb53d43fec54e66d2" 301 | ], 302 | "version": "==4.1.0.25" 303 | }, 304 | "pandas": { 305 | "hashes": [ 306 | "sha256:071e42b89b57baa17031af8c6b6bbd2e9a5c68c595bc6bf9adabd7a9ed125d3b", 307 | "sha256:17450e25ae69e2e6b303817bdf26b2cd57f69595d8550a77c308be0cd0fd58fa", 308 | "sha256:17916d818592c9ec891cbef2e90f98cc85e0f1e89ed0924c9b5220dc3209c846", 309 | "sha256:2538f099ab0e9f9c9d09bbcd94b47fd889bad06dc7ae96b1ed583f1dc1a7a822", 310 | "sha256:366f30710172cb45a6b4f43b66c220653b1ea50303fbbd94e50571637ffb9167", 311 | "sha256:42e5ad741a0d09232efbc7fc648226ed93306551772fc8aecc6dce9f0e676794", 312 | "sha256:4e718e7f395ba5bfe8b6f6aaf2ff1c65a09bb77a36af6394621434e7cc813204", 313 | "sha256:4f919f409c433577a501e023943e582c57355d50a724c589e78bc1d551a535a2", 314 | "sha256:4fe0d7e6438212e839fc5010c78b822664f1a824c0d263fd858f44131d9166e2", 315 | "sha256:5149a6db3e74f23dc3f5a216c2c9ae2e12920aa2d4a5b77e44e5b804a5f93248", 316 | "sha256:627594338d6dd995cfc0bacd8e654cd9e1252d2a7c959449228df6740d737eb8", 317 | "sha256:83c702615052f2a0a7fb1dd289726e29ec87a27272d775cb77affe749cca28f8", 318 | "sha256:8c872f7fdf3018b7891e1e3e86c55b190e6c5cee70cab771e8f246c855001296", 319 | "sha256:90f116086063934afd51e61a802a943826d2aac572b2f7d55caaac51c13db5b5", 320 | "sha256:a3352bacac12e1fc646213b998bce586f965c9d431773d9e91db27c7c48a1f7d", 321 | "sha256:bcdd06007cca02d51350f96debe51331dec429ac8f93930a43eb8fb5639e3eb5", 322 | "sha256:c1bd07ebc15285535f61ddd8c0c75d0d6293e80e1ee6d9a8d73f3f36954342d0", 323 | "sha256:c9a4b7c55115eb278c19aa14b34fcf5920c8fe7797a09b7b053ddd6195ea89b3", 324 | "sha256:cc8fc0c7a8d5951dc738f1c1447f71c43734244453616f32b8aa0ef6013a5dfb", 325 | "sha256:d7b460bc316064540ce0c41c1438c416a40746fd8a4fb2999668bf18f3c4acf1" 326 | ], 327 | "version": "==0.24.2" 328 | }, 329 | "pillow": { 330 | "hashes": [ 331 | "sha256:15c056bfa284c30a7f265a41ac4cbbc93bdbfc0dfe0613b9cb8a8581b51a9e55", 332 | "sha256:1a4e06ba4f74494ea0c58c24de2bb752818e9d504474ec95b0aa94f6b0a7e479", 333 | "sha256:1c3c707c76be43c9e99cb7e3d5f1bee1c8e5be8b8a2a5eeee665efbf8ddde91a", 334 | "sha256:1fd0b290203e3b0882d9605d807b03c0f47e3440f97824586c173eca0aadd99d", 335 | "sha256:24114e4a6e1870c5a24b1da8f60d0ba77a0b4027907860188ea82bd3508c80eb", 336 | "sha256:258d886a49b6b058cd7abb0ab4b2b85ce78669a857398e83e8b8e28b317b5abb", 337 | "sha256:33c79b6dd6bc7f65079ab9ca5bebffb5f5d1141c689c9c6a7855776d1b09b7e8", 338 | "sha256:367385fc797b2c31564c427430c7a8630db1a00bd040555dfc1d5c52e39fcd72", 339 | "sha256:3c1884ff078fb8bf5f63d7d86921838b82ed4a7d0c027add773c2f38b3168754", 340 | "sha256:44e5240e8f4f8861d748f2a58b3f04daadab5e22bfec896bf5434745f788f33f", 341 | "sha256:46aa988e15f3ea72dddd81afe3839437b755fffddb5e173886f11460be909dce", 342 | "sha256:74d90d499c9c736d52dd6d9b7221af5665b9c04f1767e35f5dd8694324bd4601", 343 | "sha256:809c0a2ce9032cbcd7b5313f71af4bdc5c8c771cb86eb7559afd954cab82ebb5", 344 | "sha256:85d1ef2cdafd5507c4221d201aaf62fc9276f8b0f71bd3933363e62a33abc734", 345 | "sha256:8c3889c7681af77ecfa4431cd42a2885d093ecb811e81fbe5e203abc07e0995b", 346 | "sha256:9218d81b9fca98d2c47d35d688a0cea0c42fd473159dfd5612dcb0483c63e40b", 347 | "sha256:9aa4f3827992288edd37c9df345783a69ef58bd20cc02e64b36e44bcd157bbf1", 348 | "sha256:9d80f44137a70b6f84c750d11019a3419f409c944526a95219bea0ac31f4dd91", 349 | "sha256:b7ebd36128a2fe93991293f997e44be9286503c7530ace6a55b938b20be288d8", 350 | "sha256:c4c78e2c71c257c136cdd43869fd3d5e34fc2162dc22e4a5406b0ebe86958239", 351 | "sha256:c6a842537f887be1fe115d8abb5daa9bc8cc124e455ff995830cc785624a97af", 352 | "sha256:cf0a2e040fdf5a6d95f4c286c6ef1df6b36c218b528c8a9158ec2452a804b9b8", 353 | "sha256:cfd28aad6fc61f7a5d4ee556a997dc6e5555d9381d1390c00ecaf984d57e4232", 354 | "sha256:dca5660e25932771460d4688ccbb515677caaf8595f3f3240ec16c117deff89a", 355 | "sha256:de7aedc85918c2f887886442e50f52c1b93545606317956d65f342bd81cb4fc3", 356 | "sha256:e6c0bbf8e277b74196e3140c35f9a1ae3eafd818f7f2d3a15819c49135d6c062" 357 | ], 358 | "version": "==6.0.0" 359 | }, 360 | "protobuf": { 361 | "hashes": [ 362 | "sha256:03f43eac9d5b651f976e91cf46a25b75e5779d98f0f4114b0abfed83376d75f8", 363 | "sha256:0c94b21e6de01362f91a86b372555d22a60b59708599ca9d5032ae9fdf8e3538", 364 | "sha256:2d2a9f30f61f4063fadd7fb68a2510a6939b43c0d6ceeec5c4704f22225da28e", 365 | "sha256:34a0b05fca061e4abb77dd180209f68d8637115ff319f51e28a6a9382d69853a", 366 | "sha256:358710fd0db25372edcf1150fa691f48376a134a6c69ce29f38f185eea7699e6", 367 | "sha256:41e47198b94c27ba05a08b4a95160656105745c462af574e4bcb0807164065c0", 368 | "sha256:8c61cc8a76e9d381c665aecc5105fa0f1878cf7db8b5cd17202603bcb386d0fc", 369 | "sha256:a6eebc4db759e58fdac02efcd3028b811effac881d8a5bad1996e4e8ee6acb47", 370 | "sha256:a9c12f7c98093da0a46ba76ec40ace725daa1ac4038c41e4b1466afb5c45bb01", 371 | "sha256:cb95068492ba0859b8c9e61fa8ba206a83c64e5d0916fb4543700b2e2b214115", 372 | "sha256:cd98476ce7bb4dcd6a7b101f5eecdc073dafea19f311e36eb8fba1a349346277", 373 | "sha256:ce64cfbea18c535176bdaa10ba740c0fc4c6d998a3f511c17bedb0ae4b3b167c", 374 | "sha256:dcbb59eac73fd454e8f2c5fba9e3d3320fd4707ed6a9d3ea3717924a6f0903ea", 375 | "sha256:dd67f34458ae716029e2a71ede998e9092493b62a519236ca52e3c5202096c87", 376 | "sha256:e3c96056eb5b7284a20e256cb0bf783c8f36ad82a4ae5434a7b7cd02384144a7", 377 | "sha256:f612d584d7a27e2f39e7b17878430a959c1bc09a74ba09db096b468558e5e126", 378 | "sha256:f6de8a7d6122297b81566e5bd4df37fd5d62bec14f8f90ebff8ede1c9726cd0a", 379 | "sha256:fa529d9261682b24c2aaa683667253175c9acebe0a31105394b221090da75832" 380 | ], 381 | "version": "==3.8.0" 382 | }, 383 | "pyglet": { 384 | "hashes": [ 385 | "sha256:8b07aea16f34ac861cffd06a0c17723ca944d172e577b57b21859b7990709a66", 386 | "sha256:b00570e7cdf6971af8953b6ece50d83d13272afa5d1f1197c58c0f478dd17743" 387 | ], 388 | "version": "==1.3.2" 389 | }, 390 | "pyparsing": { 391 | "hashes": [ 392 | "sha256:1873c03321fc118f4e9746baf201ff990ceb915f433f23b395f5580d1840cb2a", 393 | "sha256:9b6323ef4ab914af344ba97510e966d64ba91055d6b9afa6b30799340e89cc03" 394 | ], 395 | "version": "==2.4.0" 396 | }, 397 | "python-dateutil": { 398 | "hashes": [ 399 | "sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb", 400 | "sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e" 401 | ], 402 | "version": "==2.8.0" 403 | }, 404 | "pytz": { 405 | "hashes": [ 406 | "sha256:303879e36b721603cc54604edcac9d20401bdbe31e1e4fdee5b9f98d5d31dfda", 407 | "sha256:d747dd3d23d77ef44c6a3526e274af6efeb0a6f1afd5a69ba4d5be4098c8e141" 408 | ], 409 | "version": "==2019.1" 410 | }, 411 | "scipy": { 412 | "hashes": [ 413 | "sha256:03b1e0775edbe6a4c64effb05fff2ce1429b76d29d754aa5ee2d848b60033351", 414 | "sha256:09d008237baabf52a5d4f5a6fcf9b3c03408f3f61a69c404472a16861a73917e", 415 | "sha256:10325f0ffac2400b1ec09537b7e403419dcd25d9fee602a44e8a32119af9079e", 416 | "sha256:1db9f964ed9c52dc5bd6127f0dd90ac89791daa690a5665cc01eae185912e1ba", 417 | "sha256:409846be9d6bdcbd78b9e5afe2f64b2da5a923dd7c1cd0615ce589489533fdbb", 418 | "sha256:4907040f62b91c2e170359c3d36c000af783f0fa1516a83d6c1517cde0af5340", 419 | "sha256:6c0543f2fdd38dee631fb023c0f31c284a532d205590b393d72009c14847f5b1", 420 | "sha256:826b9f5fbb7f908a13aa1efd4b7321e36992f5868d5d8311c7b40cf9b11ca0e7", 421 | "sha256:a7695a378c2ce402405ea37b12c7a338a8755e081869bd6b95858893ceb617ae", 422 | "sha256:a84c31e8409b420c3ca57fd30c7589378d6fdc8d155d866a7f8e6e80dec6fd06", 423 | "sha256:adadeeae5500de0da2b9e8dd478520d0a9945b577b2198f2462555e68f58e7ef", 424 | "sha256:b283a76a83fe463c9587a2c88003f800e08c3929dfbeba833b78260f9c209785", 425 | "sha256:c19a7389ab3cd712058a8c3c9ffd8d27a57f3d84b9c91a931f542682bb3d269d", 426 | "sha256:c3bb4bd2aca82fb498247deeac12265921fe231502a6bc6edea3ee7fe6c40a7a", 427 | "sha256:c5ea60ece0c0c1c849025bfc541b60a6751b491b6f11dd9ef37ab5b8c9041921", 428 | "sha256:db61a640ca20f237317d27bc658c1fc54c7581ff7f6502d112922dc285bdabee" 429 | ], 430 | "version": "==1.3.0" 431 | }, 432 | "six": { 433 | "hashes": [ 434 | "sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", 435 | "sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73" 436 | ], 437 | "version": "==1.12.0" 438 | }, 439 | "stable-baselines": { 440 | "hashes": [ 441 | "sha256:8ec47f426b3f7717303c0160d2548eb57e65ebaa3d306ed25c02fd926cc71d7c", 442 | "sha256:feb484e5552c325182fb7e6cf1313acc40a88edceeb2f1585127a57a27f59de3" 443 | ], 444 | "index": "pypi", 445 | "version": "==2.6.0" 446 | }, 447 | "tensorboard": { 448 | "hashes": [ 449 | "sha256:6f194519f41762bfdf5eb410ccf33226d1c252caf5ad8893288648bfbcf4d135", 450 | "sha256:81170f66bf8f95c2e9f6b3fefe0ddc5472655a9e3793e73b5b5d4ec0ba395e76" 451 | ], 452 | "version": "==1.12.2" 453 | }, 454 | "tensorflow": { 455 | "hashes": [ 456 | "sha256:17f406ce35801e438229b102b9e82251c2a5c6fb1384702e1b56cf667b01f921", 457 | "sha256:1e22f267221a3ec7cbd2d0638887ce162152e2d55e2d9e6f457609f5ff93c71c", 458 | "sha256:253928f4dfe72a3571a811cc0277f2d2663183f50c2da926dea6e7ea6ea45db7", 459 | "sha256:50b33261a05fe7b7f1f29e79f4281a99e19fb847912ff8df09dafad099eb9736", 460 | "sha256:74bc3259d22b59f10fdc9731a43a375d72fa6180bbfac85241ae3a8abd3900ab", 461 | "sha256:a1311b990bc9e794edcab92009e8dd24b5907f5ba1041c387c702bb04393389f", 462 | "sha256:a6ac81d6ea529255bd403a9557cd6dde57a165dd1395ce0573108f36a3aa0503", 463 | "sha256:b2eab8f81a62a78bd0011e1ba7dca3846962fd14374f8b215cd36e0389290d85", 464 | "sha256:ba36994431dbee4e46f2ac19fcf3d8b8c08de88ecbcf39a73c3cfaf33470f460", 465 | "sha256:c0a1c44322955532468b164029a242bace4e8d6f5a36c4ceeffe1bdf6b0ef3c4", 466 | "sha256:d0d660c434a9224534ec8bfee1f9d0c03f35ab2fe75e2ba874b6450a6368e68d", 467 | "sha256:e0d95f32d9eca232e91460b625c6d01004d292cfe8b10b9d14c7479049916d19" 468 | ], 469 | "index": "pypi", 470 | "version": "==1.12.2" 471 | }, 472 | "termcolor": { 473 | "hashes": [ 474 | "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b" 475 | ], 476 | "version": "==1.1.0" 477 | }, 478 | "werkzeug": { 479 | "hashes": [ 480 | "sha256:865856ebb55c4dcd0630cdd8f3331a1847a819dda7e8c750d3db6f2aa6c0209c", 481 | "sha256:a0b915f0815982fb2a09161cb8f31708052d0951c3ba433ccc5e1aa276507ca6" 482 | ], 483 | "version": "==0.15.4" 484 | }, 485 | "wheel": { 486 | "hashes": [ 487 | "sha256:5e79117472686ac0c4aef5bad5172ea73a1c2d1646b808c35926bd26bdfb0c08", 488 | "sha256:62fcfa03d45b5b722539ccbc07b190e4bfff4bb9e3a4d470dd9f6a0981002565" 489 | ], 490 | "markers": "python_version >= '3'", 491 | "version": "==0.33.4" 492 | } 493 | }, 494 | "develop": { 495 | "atomicwrites": { 496 | "hashes": [ 497 | "sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4", 498 | "sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6" 499 | ], 500 | "version": "==1.3.0" 501 | }, 502 | "attrs": { 503 | "hashes": [ 504 | "sha256:69c0dbf2ed392de1cb5ec704444b08a5ef81680a61cb899dc08127123af36a79", 505 | "sha256:f0b870f674851ecbfbbbd364d6b5cbdff9dcedbc7f3f5e18a6891057f21fe399" 506 | ], 507 | "version": "==19.1.0" 508 | }, 509 | "importlib-metadata": { 510 | "hashes": [ 511 | "sha256:6dfd58dfe281e8d240937776065dd3624ad5469c835248219bd16cf2e12dbeb7", 512 | "sha256:cb6ee23b46173539939964df59d3d72c3e0c1b5d54b84f1d8a7e912fe43612db" 513 | ], 514 | "version": "==0.18" 515 | }, 516 | "more-itertools": { 517 | "hashes": [ 518 | "sha256:2112d2ca570bb7c3e53ea1a35cd5df42bb0fd10c45f0fb97178679c3c03d64c7", 519 | "sha256:c3e4748ba1aad8dba30a4886b0b1a2004f9a863837b8654e7059eebf727afa5a" 520 | ], 521 | "markers": "python_version > '2.7'", 522 | "version": "==7.0.0" 523 | }, 524 | "packaging": { 525 | "hashes": [ 526 | "sha256:0c98a5d0be38ed775798ece1b9727178c4469d9c3b4ada66e8e6b7849f8732af", 527 | "sha256:9e1cbf8c12b1f1ce0bb5344b8d7ecf66a6f8a6e91bcb0c84593ed6d3ab5c4ab3" 528 | ], 529 | "version": "==19.0" 530 | }, 531 | "pluggy": { 532 | "hashes": [ 533 | "sha256:0825a152ac059776623854c1543d65a4ad408eb3d33ee114dff91e57ec6ae6fc", 534 | "sha256:b9817417e95936bf75d85d3f8767f7df6cdde751fc40aed3bb3074cbcb77757c" 535 | ], 536 | "version": "==0.12.0" 537 | }, 538 | "py": { 539 | "hashes": [ 540 | "sha256:64f65755aee5b381cea27766a3a147c3f15b9b6b9ac88676de66ba2ae36793fa", 541 | "sha256:dc639b046a6e2cff5bbe40194ad65936d6ba360b52b3c3fe1d08a82dd50b5e53" 542 | ], 543 | "version": "==1.8.0" 544 | }, 545 | "pyparsing": { 546 | "hashes": [ 547 | "sha256:1873c03321fc118f4e9746baf201ff990ceb915f433f23b395f5580d1840cb2a", 548 | "sha256:9b6323ef4ab914af344ba97510e966d64ba91055d6b9afa6b30799340e89cc03" 549 | ], 550 | "version": "==2.4.0" 551 | }, 552 | "pytest": { 553 | "hashes": [ 554 | "sha256:4a784f1d4f2ef198fe9b7aef793e9fa1a3b2f84e822d9b3a64a181293a572d45", 555 | "sha256:926855726d8ae8371803f7b2e6ec0a69953d9c6311fa7c3b6c1b929ff92d27da" 556 | ], 557 | "index": "pypi", 558 | "version": "==4.6.3" 559 | }, 560 | "six": { 561 | "hashes": [ 562 | "sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c", 563 | "sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73" 564 | ], 565 | "version": "==1.12.0" 566 | }, 567 | "wcwidth": { 568 | "hashes": [ 569 | "sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e", 570 | "sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c" 571 | ], 572 | "version": "==0.1.7" 573 | }, 574 | "zipp": { 575 | "hashes": [ 576 | "sha256:8c1019c6aad13642199fbe458275ad6a84907634cc9f0989877ccc4a2840139d", 577 | "sha256:ca943a7e809cc12257001ccfb99e3563da9af99d52f261725e96dfe0f9275bc3" 578 | ], 579 | "version": "==0.5.1" 580 | } 581 | } 582 | } 583 | --------------------------------------------------------------------------------