├── B.Tech. Minor Project - Cache Management.zip ├── B_Tech__Minor_Project___Cache_Management.pdf ├── DQN.py ├── Plagiarism Checking Result for Btech Project - RLCaR.html ├── Qlearning ├── Qlearning.py ├── README.md ├── __pycache__ ├── environment.cpython-37.pyc └── os_sim.cpython-37.pyc ├── e2.py ├── environment.py ├── figs ├── .ignore ├── IJEATRLFramework-DQN(1).png └── comparison.png ├── lfu.py ├── lru.py ├── notebooks ├── .ipynb_checkpoints │ ├── DQN-checkpoint.ipynb │ ├── DQN2-checkpoint.ipynb │ ├── LRU & LFU-checkpoint.ipynb │ ├── Plot #2-checkpoint.ipynb │ ├── Test neural net-checkpoint.ipynb │ └── Test_Env.py-checkpoint.ipynb ├── DQN.ipynb ├── DQN2.ipynb ├── DQN3.ipynb ├── DQN_workingbackup.ipynb ├── LRU & LFU.ipynb ├── Plot #2.ipynb ├── Test neural net.ipynb ├── Test_Env.py.ipynb ├── __pycache__ │ ├── e2.cpython-37.pyc │ ├── environment.cpython-37.pyc │ └── os_sim.cpython-37.pyc ├── e2.py ├── e3.py ├── environment.py └── os_sim.py ├── os_sim.py └── results ├── lfu.pkl ├── lru.pkl └── rlcar.pkl /B.Tech. Minor Project - Cache Management.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/B.Tech. Minor Project - Cache Management.zip -------------------------------------------------------------------------------- /B_Tech__Minor_Project___Cache_Management.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/B_Tech__Minor_Project___Cache_Management.pdf -------------------------------------------------------------------------------- /DQN.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | import time 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | import pandas as pd 9 | from collections import namedtuple 10 | import math 11 | import random 12 | 13 | 14 | class DQN(object): 15 | def __init__(self): 16 | self.target_net = Net().to(torch.device('cuda')) 17 | self.policy_new = Net().to(torch.device('cuda')) 18 | self.learn_step_counter = 0 #For target net updation 19 | self.observe_counter = 0 #For storage 20 | # self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) 21 | self.memory = ReplayMemory(MEMORY_CAPACITY) 22 | self.optimizer = torch.optim.Adam(self.policy_new.parameters(), lr=LR) 23 | self.loss_func = nn.SmoothL1Loss() 24 | self.PATH = 'model/' 25 | self.epsilon = EPS_START 26 | self.step = 0 27 | 28 | def choose_action(self, x, test=False): 29 | self.step += 1 30 | print(f"Epsilon: {self.epsilon}") 31 | if random.random() <= self.epsilon: 32 | action = np.random.randint(0, N_ACTIONS) 33 | print(f"[RANDOM {action}]") 34 | else: 35 | with torch.no_grad(): 36 | x = torch.unsqueeze(torch.FloatTensor(x).to(torch.device('cuda')), 0).permute(0, 3, 1, 2) 37 | action = int(self.policy_new.forward(x).max(1)[1].view(1, 1)) 38 | print(f"[NET {action}]") 39 | if self.epsilon > EPS_END and self.step > E_OBSERVE_STEPS: 40 | old_e = self.epsilon 41 | interval = EPS_START - EPS_END 42 | self.epsilon -= interval / float(E_EXPLORE_STEPS) 43 | return action 44 | 45 | def store_transition(self, s, a, r, s_): 46 | self.memory.push(s, a, r, s_) 47 | 48 | def learn(self): 49 | if self.observe_counter < E_OBSERVE_STEPS: 50 | self.observe_counter += 1 51 | return 52 | if self.learn_step_counter % TARGET_REPLACE_ITER == 0: 53 | print("Traget updated!") 54 | self.target_net.load_state_dict(self.policy_new.state_dict()) 55 | self.learn_step_counter += 1 56 | 57 | batch = Transition(*zip(*self.memory.sample(BATCH_SIZE))) 58 | #https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html 59 | # ^For explanation of above 60 | 61 | b_s = torch.Tensor(batch.state).permute(0, 3, 1, 2).cuda() 62 | b_s_ = torch.Tensor(batch.next_state).permute(0, 3, 1, 2).cuda() 63 | b_a = torch.LongTensor(batch.action)[..., None].cuda() 64 | b_r = torch.FloatTensor(batch.reward)[..., None].cuda() 65 | q_eval = self.policy_new(b_s).gather(1, b_a) 66 | q_next = self.target_net(b_s_).detach() 67 | q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) 68 | loss = self.loss_func(q_eval, q_target) 69 | self.optimizer.zero_grad() 70 | loss.backward() 71 | # for param in self.policy_new.parameters(): 72 | # param.grad.data.clamp_(-1, 1) #Gradient clipping 73 | self.optimizer.step() 74 | 75 | def save_model(self, name): 76 | eval_name = name + '_policy_new.m' 77 | train_name = name + '_train_net.m' 78 | torch.save(self.policy_new.state_dict(), self.PATH + eval_name) 79 | torch.save(self.target_net.state_dict(), self.PATH + train_name) 80 | 81 | def load_model(self, name): 82 | eval_name = name + '_policy_new.m' 83 | train_name = name + '_train_net.m' 84 | self.policy_new.load_state_dict(torch.load(self.PATH + eval_name)) 85 | self.target_net.load_state_dict(torch.load(self.PATH + train_name)) 86 | -------------------------------------------------------------------------------- /Qlearning: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | import time 4 | 5 | """ 6 | Qlearning is an off policy learning python implementation. 7 | This is a python implementation of the qlearning algorithm in the Sutton and 8 | Barto's book on RL. It's called SARSA because - (state, action, reward, state, 9 | action). The only difference between SARSA and Qlearning is that SARSA takes the 10 | next action based on the current policy while qlearning takes the action with 11 | maximum utility of next state. 12 | Using the simplest gym environment for brevity: https://gym.openai.com/envs/FrozenLake-v0/ 13 | """ 14 | 15 | def init_q(s, a, type="ones"): 16 | """ 17 | @param s the number of states 18 | @param a the number of actions 19 | @param type random, ones or zeros for the initialization 20 | """ 21 | if type == "ones": 22 | return np.ones((s, a)) 23 | elif type == "random": 24 | return np.random.random((s, a)) 25 | elif type == "zeros": 26 | return np.zeros((s, a)) 27 | 28 | 29 | def epsilon_greedy(Q, epsilon, n_actions, s, train=False): 30 | """ 31 | @param Q Q values state x action -> value 32 | @param epsilon for exploration 33 | @param s number of states 34 | @param train if true then no random actions selected 35 | """ 36 | if train or np.random.rand() >= epsilon: 37 | action = np.argmax(Q[s, :]) 38 | else: 39 | action = np.random.randint(0, n_actions) 40 | return action 41 | 42 | def qlearning(alpha, gamma, epsilon, episodes, max_steps, n_tests, render = False, test=False): 43 | """ 44 | @param alpha learning rate 45 | @param gamma decay factor 46 | @param epsilon for exploration 47 | @param max_steps for max step in each episode 48 | @param n_tests number of test episodes 49 | """ 50 | env = gym.make('Taxi-v2') 51 | n_states, n_actions = env.observation_space.n, env.action_space.n 52 | Q = init_q(n_states, n_actions, type="ones") 53 | timestep_reward = [] 54 | for episode in range(episodes): 55 | print(f"Episode: {episode}") 56 | s = env.reset() 57 | a = epsilon_greedy(Q, epsilon, n_actions, s) 58 | t = 0 59 | total_reward = 0 60 | done = False 61 | while t < max_steps: 62 | if render: 63 | env.render() 64 | t += 1 65 | s_, reward, done, info = env.step(a) 66 | total_reward += reward 67 | a_ = np.argmax(Q[s_, :]) 68 | if done: 69 | Q[s, a] += alpha * ( reward - Q[s, a] ) 70 | else: 71 | Q[s, a] += alpha * ( reward + (gamma * Q[s_, a_]) - Q[s, a] ) 72 | s, a = s_, a_ 73 | if done: 74 | if render: 75 | print(f"This episode took {t} timesteps and reward: {total_reward}") 76 | timestep_reward.append(total_reward) 77 | break 78 | if render: 79 | print(f"Here are the Q values:\n{Q}\nTesting now:") 80 | if test: 81 | test_agent(Q, env, n_tests, n_actions) 82 | return timestep_reward 83 | 84 | def test_agent(Q, env, n_tests, n_actions, delay=1): 85 | for test in range(n_tests): 86 | print(f"Test #{test}") 87 | s = env.reset() 88 | done = False 89 | epsilon = 0 90 | while True: 91 | time.sleep(delay) 92 | env.render() 93 | a = epsilon_greedy(Q, epsilon, n_actions, s, train=True) 94 | print(f"Chose action {a} for state {s}") 95 | s, reward, done, info = env.step(a) 96 | if done: 97 | if reward > 0: 98 | print("Reached goal!") 99 | else: 100 | print("Shit! dead x_x") 101 | time.sleep(3) 102 | break 103 | 104 | 105 | if __name__ =="__main__": 106 | alpha = 0.4 107 | gamma = 0.999 108 | epsilon = 0.1 109 | episodes = 10000 110 | max_steps = 2500 111 | n_tests = 2 112 | timestep_reward = qlearning(alpha, gamma, epsilon, episodes, max_steps, n_tests, test = True) 113 | print(timestep_reward) 114 | -------------------------------------------------------------------------------- /Qlearning.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | import time 4 | 5 | def init_q(s, a, type="ones"): 6 | """ 7 | @param s the number of states 8 | @param a the number of actions 9 | @param type random, ones or zeros for the initialization 10 | """ 11 | if type == "ones": 12 | return np.ones((s, a)) 13 | elif type == "random": 14 | return np.random.random((s, a)) 15 | elif type == "zeros": 16 | return np.zeros((s, a)) 17 | 18 | 19 | def epsilon_greedy(Q, epsilon, n_actions, s, train=False): 20 | """ 21 | @param Q Q values state x action -> value 22 | @param epsilon for exploration 23 | @param s number of states 24 | @param train if true then no random actions selected 25 | """ 26 | if train or np.random.rand() >= epsilon: 27 | action = np.argmax(Q[s, :]) 28 | else: 29 | action = np.random.randint(0, n_actions) 30 | return action 31 | 32 | def qlearning(alpha, gamma, epsilon, episodes, max_steps, n_tests, render = False, test=False): 33 | """ 34 | @param alpha learning rate 35 | @param gamma decay factor 36 | @param epsilon for exploration 37 | @param max_steps for max step in each episode 38 | @param n_tests number of test episodes 39 | """ 40 | env = CacheEnv() # Init using default args 41 | n_states, n_actions = env.observation_space.n, env.action_space.n 42 | Q = init_q(n_states, n_actions, type="ones") 43 | timestep_reward = [] 44 | for episode in range(episodes): 45 | print(f"Episode: {episode}") 46 | s = env.reset() 47 | a = epsilon_greedy(Q, epsilon, n_actions, s) 48 | t = 0 49 | total_reward = 0 50 | done = False 51 | while t < max_steps: 52 | if render: 53 | env.render() 54 | t += 1 55 | s_, reward, done, info = env.step(a) 56 | total_reward += reward 57 | a_ = np.argmax(Q[s_, :]) 58 | if done: 59 | Q[s, a] += alpha * ( reward - Q[s, a] ) 60 | else: 61 | Q[s, a] += alpha * ( reward + (gamma * Q[s_, a_]) - Q[s, a] ) 62 | s, a = s_, a_ 63 | if done: 64 | if render: 65 | print(f"This episode took {t} timesteps and reward: {total_reward}") 66 | timestep_reward.append(total_reward) 67 | break 68 | if render: 69 | print(f"Here are the Q values:\n{Q}\nTesting now:") 70 | if test: 71 | test_agent(Q, env, n_tests, n_actions) 72 | return timestep_reward 73 | 74 | def test_agent(Q, env, n_tests, n_actions, delay=1): 75 | for test in range(n_tests): 76 | print(f"Test #{test}") 77 | s = env.reset() 78 | done = False 79 | epsilon = 0 80 | while True: 81 | time.sleep(delay) 82 | env.render() 83 | a = epsilon_greedy(Q, epsilon, n_actions, s, train=True) 84 | print(f"Chose action {a} for state {s}") 85 | s, reward, done, info = env.step(a) 86 | if done: 87 | if reward > 0: 88 | print("Reached goal!") 89 | else: 90 | print("Shit! dead x_x") 91 | time.sleep(3) 92 | break 93 | 94 | 95 | if __name__ =="__main__": 96 | alpha = 0.4 97 | gamma = 0.9 98 | epsilon = 0.1 99 | episodes = 1000 100 | max_steps = 5 101 | n_tests = 2 102 | timestep_reward = qlearning(alpha, gamma, epsilon, episodes, max_steps, n_tests, test = True) 103 | print(timestep_reward) 104 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RLCaR: Deep Reinforcement Learning Framework for Optimal and Adaptive Cache Replacement 2 | Adaptive Cache replacement strategies have shown superior performance in comparison to classical strategies like LRU and LFU. Some of these strategies like Adaptive Replacement Cache (ARC), Clock with Adaptive Replacement (CAR) are quite effective for day to day applications but they do not encode access history or truly learn from cache misses. We propose a reinforcement learning framework, RLCaR which seeks to tackle these limitations. We use TD 0 model-free algorithms like Q-Learning, Expected SARSA and SARSA to train our agent to efficiently replace pages in cache in order to maximize the cache hit ratio. We also developed a memory cache simulator in order to test our approach and compare it with LRU and LFU policies. 3 | ![](./figs/IJEATRLFramework-DQN(1).png "Model Architecture") 4 | 5 | ### Comparison with LRU and LFU: 6 | 7 | ![](./figs/comparison.png "Average Cache Hits") 8 | -------------------------------------------------------------------------------- /__pycache__/environment.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/__pycache__/environment.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/os_sim.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/__pycache__/os_sim.cpython-37.pyc -------------------------------------------------------------------------------- /e2.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | import pandas as pd 4 | import pickle 5 | from collections import defaultdict, OrderedDict 6 | from os_sim import OS 7 | 8 | LIMIT = 3 9 | N_PAGES = 5 10 | EPS_LEN = 3 11 | POS_REW = 1 12 | NEG_REW = -1 13 | HEAVY_NEG_R = -10 14 | 15 | class CacheEnv(gym.Env): 16 | metadata = {'render.modes': ['human']} 17 | def __init__(self, limit=LIMIT, n_pages=N_PAGES, eps_len=EPS_LEN, human=False, verbose=False): 18 | super(CacheEnv, self).__init__() 19 | self.limit = limit 20 | self.n_pages = n_pages 21 | self.eps_len = eps_len 22 | self.os = OS(limit, n_pages) 23 | self.pages, self.NT = self.os.init_pages() 24 | self.timestep = 0 #counter; if this reaches eps_len, return done=True 25 | self.done = False 26 | self.new_page_id = -1 27 | self.action_space_n = limit 28 | self.human = human 29 | self.verbose = verbose 30 | 31 | def step(self, action, test=False): 32 | """ 33 | OS just asked for a page not in memory (stored in self.new_page_id). 34 | Replace page at `action` index to make space for the page. 35 | Then keep asking OS for more pages until a miss occurs. 36 | For ever hit meanwhile, increase positive reward by 1. 37 | """ 38 | self.timestep += 1 39 | if self.timestep >= self.eps_len: 40 | self.done = True #Episode reached its end 41 | 42 | 43 | if self.verbose: 44 | self.print_cache() 45 | 46 | self.allocate_cache(self.new_page_id, action) #we took action to make space for this page 47 | 48 | if self.verbose: 49 | print(f"Allocated {self.new_page_id} at index {action}") 50 | self.print_cache() 51 | 52 | reward = 0 53 | nhits = 0 54 | hit = True 55 | while(True): #until page miss occurs 56 | new_page_id = self.os.get_id() #This is page requested by the OS 57 | self.new_page_id = new_page_id #Store for debugging 58 | if self.verbose: 59 | print(f"== Page: {new_page_id} requested!") 60 | if self.is_allocated(new_page_id): 61 | if self.verbose: 62 | print(f"Page: {new_page_id} is allocated. Hit!") 63 | r = self.access_page(new_page_id) 64 | reward += r 65 | nhits += 1 66 | else: 67 | if self.verbose: 68 | print(f"Page: {new_page_id} Not allocated!! MISS!") 69 | break 70 | 71 | observation = f"There were {nhits} hits." 72 | return self.nn_state(), reward, self.done, observation 73 | 74 | def reset(self): 75 | self.timestep = 0 76 | self.pages, self.NT = self.os.init_pages() #self.NT keeps record of number of times a page was accessed. This info might be lost after page is removed so its saved here. 77 | self.done = False 78 | self.new_page_id = self.page_not_in_memory() #this will cause a miss and make agent choose an action for step. 79 | # return self.pages 80 | return self.nn_state() 81 | 82 | def page_not_in_memory(self): 83 | current_pages = set(self.pages.keys()) 84 | all_pages = set([i for i in range(self.n_pages)]) 85 | left_pages = list(all_pages-current_pages) 86 | return np.random.choice(left_pages) 87 | 88 | 89 | def render(self, mode='human'): 90 | pass 91 | 92 | def close(self): 93 | pass 94 | 95 | def is_allocated(self, id): 96 | """ 97 | returns true if 'id' is allocated a cache currently 98 | """ 99 | if id in self.pages.keys(): 100 | return True 101 | return False 102 | 103 | def toggle_human(self): 104 | self.human = not self.human 105 | 106 | @staticmethod 107 | def normalize(arr): 108 | return arr/arr.sum() 109 | 110 | def nn_state(self): 111 | """returns state in numpy format for neural net inpu""" 112 | if self.human: 113 | return self.pages 114 | state = [] 115 | for k in self.pages: 116 | vals = self.pages[k] 117 | state.append(vals[0]) #Flatten 118 | state.append(vals[1]) #Flatten 119 | return self.normalize(np.array(state)) 120 | 121 | def print_cache(self): 122 | print(self.pages) 123 | 124 | 125 | def access_page(self, id): 126 | """change counters of a page requested that is currently in cache""" 127 | hit = True #HIT! 128 | page = self.pages[id] 129 | self.pages[id][0] = 1 #Last accessed 1 timestep ago 130 | self.pages[id][1] += 1 #Increase local fu counter 131 | self.NT[id] += 1 #Increase global fu counter 132 | reward = POS_REW #pos reward for hit 133 | 134 | #For all the pages except id, increament their lu counter 135 | for page_id in self.pages.keys(): 136 | if page_id == id: 137 | continue 138 | else: 139 | self.pages[page_id][0] += 1 140 | 141 | return reward 142 | 143 | def allocate_cache(self, id, action=None): 144 | """ 145 | remove page at 'action' 146 | add page 'id' 147 | """ 148 | id = int(id) 149 | self.NT[id] += 1 #increase global fu counter 150 | 151 | #For all the pages except id, increament their lu counter 152 | for page_id in self.pages.keys(): 153 | if page_id == id: 154 | continue 155 | else: 156 | self.pages[page_id][0] += 1 157 | 158 | 159 | action = int(action) 160 | old_key = list(self.pages.keys())[action] #the page at index 'action' which will be replaced 161 | new_key = id #new page whose id is id 162 | #update the dictionary to contain new page's counter (update value) 163 | new_value = [1, self.NT[id]] 164 | #update the dictionary to contain new page's key (replace key) 165 | self.pages = dict(OrderedDict([(new_key, new_value) if k == old_key else (k, v) for k, v in self.pages.items()])) 166 | 167 | 168 | if __name__ == "__main__": 169 | env = CacheEnv() 170 | env.reset() 171 | -------------------------------------------------------------------------------- /environment.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | import pandas as pd 4 | import pickle 5 | from collections import defaultdict 6 | from os_sim import OS 7 | 8 | LIMIT = 3 9 | N_PAGES = 5 10 | EPS_LEN = 3 11 | POS_REW = 1 12 | NEG_REW = -1 13 | HEAVY_NEG_R = -10 14 | 15 | class CacheEnv(gym.Env): 16 | metadata = {'render.modes': ['human']} 17 | def __init__(self, limit=LIMIT, n_pages=N_PAGES, eps_len=EPS_LEN): 18 | super(CacheEnv, self).__init__() 19 | self.limit = limit 20 | self.n_pages = n_pages 21 | self.eps_len = eps_len 22 | self.os = OS(limit, n_pages) 23 | self.pages, self.NT = self.os.init_pages() 24 | self.total_hits = 0 25 | self.timestep = 0 #counter; if this reaches eps_len, return done=True 26 | self.done = False 27 | self.new_page_id = -1 28 | self.action_space_n = limit 29 | 30 | def step(self, action, test=False): 31 | """ 32 | First OS will send a page id (randomly from distribution P) 33 | based on the action choose to evict a page 34 | allocate this page id cache inplace of the 'action' id 35 | """ 36 | self.timestep += 1 37 | if self.timestep >= self.eps_len: 38 | self.done = True #Episode reached its end 39 | new_page_id = self.os.get_id() #This is page requested by the OS 40 | self.new_page_id = new_page_id #Store for debugging 41 | reward, hit = self.allocate_cache(action, new_page_id) 42 | if hit: 43 | observation = f"This was a hit, OS asked for: {new_page_id}" 44 | self.total_hits += 1 45 | else: 46 | observation = f"This was not a hit, OS asked for: {new_page_id}" 47 | # return self.pages, reward, self.done, observation 48 | return self.nn_state(), reward, self.done, observation 49 | 50 | def reset(self): 51 | self.timestep = 0 52 | self.pages, self.NT = self.os.init_pages() 53 | self.total_hits = 0 #Intuitive 54 | self.done = False 55 | # return self.pages 56 | return self.nn_state() 57 | 58 | def render(self, mode='human'): 59 | pass 60 | 61 | def close(self): 62 | pass 63 | 64 | def if_allocated(self, id): 65 | """ 66 | returns true if 'id' is allocated a cache currently 67 | """ 68 | if id in self.pages.keys(): 69 | return True 70 | return False 71 | 72 | def nn_state(self): 73 | """returns state in numpy format for neural net inpu""" 74 | state = [] 75 | for k in self.pages: 76 | vals = self.pages[k] 77 | state.append(vals[0]) #Flatten 78 | state.append(vals[1]) #Flatten 79 | return np.array(state) 80 | 81 | def allocate_cache(self, action, id): 82 | """ 83 | remove page at 'action' 84 | add page 'id' 85 | """ 86 | action = int(action) 87 | id = int(id) 88 | hit = False #Page hit or not? 89 | self.NT[id] += 1 90 | # For all the pages except id, increament their lu counter 91 | for page_id in self.pages.keys(): 92 | if page_id == id: 93 | continue 94 | else: 95 | self.pages[page_id][0] += 1 96 | # new_page = self.pages[page_id] 97 | # new_page = [new_page[0]+1, new_page[1]] 98 | # self.pages[page_id] = new_page 99 | 100 | # if action not in self.pages.keys(): 101 | # #Agent asked to remove a page that wasn't even allocated 102 | # return HEAVY_NEG_R, hit 103 | 104 | if self.if_allocated(id): 105 | hit = True #HIT! 106 | page = self.pages[id] 107 | page[0] = 0 108 | page[1] += 1 109 | self.pages[id] = page 110 | reward = POS_REW #pos reward for hit 111 | else: 112 | key = list(self.pages.keys())[action] 113 | self.pages.pop(key) #Remove page 'action' 114 | self.pages[id] = [0, self.NT[id]] #Add page 'id' 115 | reward = NEG_REW #neg reward for no hit 116 | 117 | return reward, hit 118 | 119 | if __name__ == "__main__": 120 | env = CacheEnv() 121 | env.reset() 122 | -------------------------------------------------------------------------------- /figs/.ignore: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /figs/IJEATRLFramework-DQN(1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/figs/IJEATRLFramework-DQN(1).png -------------------------------------------------------------------------------- /figs/comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/figs/comparison.png -------------------------------------------------------------------------------- /lfu.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | from environment import CacheEnv 3 | from collections import defaultdict 4 | 5 | 6 | def lfu_policy(s): 7 | least_frequent = 100000000 8 | action = -1 9 | for key in s.keys(): 10 | cur = s[key][1] 11 | if cur < least_frequent: 12 | action = key 13 | least_frequent = cur 14 | return action 15 | 16 | if __name__ == "__main__": 17 | l = 20 18 | trials = 50 19 | results = defaultdict(list) 20 | debug = False 21 | 22 | for l in range(1, 10): 23 | for trial in range(trials): 24 | env = CacheEnv(eps_len=l) 25 | s = env.reset() 26 | done = env.done 27 | if debug: 28 | print("Start: ", env.pages) 29 | while not done: 30 | a = lfu_policy(s) 31 | s, r, done, observation = env.step(a) 32 | if debug: 33 | print(f">> Request: {env.new_page_id}") 34 | print(f"Replace: {a}") 35 | print(observation) 36 | print(env.pages, f"reward: {r}\n") 37 | print(f"Total hits: {env.total_hits}") 38 | percentage = 100 * env.total_hits / l 39 | results[l].append(percentage) 40 | 41 | with open("results/lfu.pkl", "wb") as handle: 42 | pickle.dump(results, handle) 43 | -------------------------------------------------------------------------------- /lru.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | from environment import CacheEnv 3 | from collections import defaultdict 4 | 5 | 6 | def lru_policy(s): 7 | least_recent = -1 8 | action = -1 9 | for key in s.keys(): 10 | cur = s[key][0] 11 | if cur > least_recent: 12 | action = key 13 | least_recent = cur 14 | return action 15 | 16 | if __name__ == "__main__": 17 | l = 20 18 | trials = 50 19 | results = defaultdict(list) 20 | debug = False 21 | 22 | for l in range(1, 10): 23 | for trial in range(trials): 24 | env = CacheEnv(eps_len=l) 25 | s = env.reset() 26 | done = env.done 27 | if debug: 28 | print("Start: ", env.pages) 29 | while not done: 30 | a = lru_policy(s) 31 | s, r, done, observation = env.step(a) 32 | if debug: 33 | print(f">> Request: {env.new_page_id}") 34 | print(f"Replace: {a}") 35 | print(observation) 36 | print(env.pages, f"reward: {r}\n") 37 | print(f"Total hits: {env.total_hits}") 38 | percentage = 100 * env.total_hits / l 39 | results[l].append(percentage) 40 | 41 | with open("results/lru.pkl", "wb") as handle: 42 | pickle.dump(results, handle) 43 | -------------------------------------------------------------------------------- /notebooks/.ipynb_checkpoints/DQN-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Imports" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 51, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import torch\n", 17 | "import torch.nn as nn\n", 18 | "import torch.nn.functional as F\n", 19 | "import numpy as np\n", 20 | "import gym\n", 21 | "import time\n", 22 | "# from environment import CacheEnv\n", 23 | "from e2 import CacheEnv\n", 24 | "# from environment import scheduler" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 52, 30 | "metadata": {}, 31 | "outputs": [ 32 | { 33 | "name": "stdout", 34 | "output_type": "stream", 35 | "text": [ 36 | "\n", 37 | "Cache limit: 10\n", 38 | "Total Pages: 20\n" 39 | ] 40 | } 41 | ], 42 | "source": [ 43 | "# env vars\n", 44 | "EPS_LEN = 100\n", 45 | "# EPS_LEN = 500\n", 46 | "N_PAGES = 20\n", 47 | "CACHE_LIMIT = 10 \n", 48 | "\n", 49 | "env = CacheEnv(\n", 50 | " eps_len=EPS_LEN, \n", 51 | " n_pages=N_PAGES, \n", 52 | " limit=CACHE_LIMIT\n", 53 | " )\n", 54 | "\n", 55 | "# dqn vars\n", 56 | "# N_EPS = 60000\n", 57 | "N_EPS = 5000\n", 58 | "BATCH_SIZE = 32\n", 59 | "LR_adam = 3e-3 # learning rate for Adam (use adam, sgd is slow even though more \"stable\")\n", 60 | "LR_sgd = 5e-1 # learning rate for SGD (pretty high, determined using other notebook)\n", 61 | "EPSILON = 0.9 # greedy policy\n", 62 | "GAMMA = 0.9 # reward discount\n", 63 | "TARGET_REPLACE_ITER = 2000 # target update frequency\n", 64 | "MEMORY_CAPACITY = 20000\n", 65 | "\n", 66 | "s = env.reset()\n", 67 | "N_ACTIONS = env.action_space_n\n", 68 | "STATE_SHAPE = (CACHE_LIMIT, 2)\n", 69 | "N_STATES = STATE_SHAPE[0]*STATE_SHAPE[1]" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 53, 75 | "metadata": {}, 76 | "outputs": [ 77 | { 78 | "name": "stdout", 79 | "output_type": "stream", 80 | "text": [ 81 | "10\n", 82 | "20\n" 83 | ] 84 | } 85 | ], 86 | "source": [ 87 | "print(N_ACTIONS)\n", 88 | "print(N_STATES)" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 54, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "class Net(nn.Module):\n", 98 | " def __init__(self, ):\n", 99 | " super(Net, self).__init__()\n", 100 | " input_size = N_STATES\n", 101 | " h_dim = 50\n", 102 | "# h_dim = 30\n", 103 | " self.fc1 = nn.Linear(input_size, h_dim)\n", 104 | " self.fc2 = nn.Linear(h_dim, h_dim//4)\n", 105 | " self.fc3 = nn.Linear(h_dim//4, h_dim)\n", 106 | " self.out = nn.Linear(h_dim, N_ACTIONS)\n", 107 | "\n", 108 | " def forward(self, x):\n", 109 | "# bs = x.shape[0]\n", 110 | " x = F.relu(self.fc1(x))\n", 111 | " x = F.relu(self.fc2(x))\n", 112 | " x = F.relu(self.fc3(x))\n", 113 | " x = self.out(x)\n", 114 | " return x" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 55, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "class DQN(object):\n", 124 | " def __init__(self):\n", 125 | " self.eval_net, self.target_net = Net().cuda(), Net().cuda()\n", 126 | " self.learn_step_counter = 0 # for target updating\n", 127 | " self.memory_counter = 0 # for storing memory\n", 128 | " self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) # initialize memory\n", 129 | " self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR_adam)\n", 130 | "# self.optimizer = torch.optim.SGD(self.eval_net.parameters(), lr=LR_sgd)\n", 131 | " self.loss_func = nn.MSELoss()\n", 132 | " self.PATH = 'model/'\n", 133 | "\n", 134 | " def choose_action(self, x):\n", 135 | "# x = torch.unsqueeze(torch.FloatTensor(x), 0).cuda()\n", 136 | " x = torch.FloatTensor(x).cuda()\n", 137 | " # input only one sample\n", 138 | " if np.random.uniform() < EPSILON: # greedy\n", 139 | " actions_value = self.eval_net(x).detach().cpu()\n", 140 | " action = torch.argmax(actions_value).data.numpy()\n", 141 | " else: # random\n", 142 | " action = np.random.randint(0, N_ACTIONS)\n", 143 | " return action\n", 144 | "\n", 145 | " def store_transition(self, s, a, r, s_):\n", 146 | " transition = np.hstack((s, [a, r], s_))\n", 147 | " # replace the old memory with new memory\n", 148 | " index = self.memory_counter % MEMORY_CAPACITY\n", 149 | " self.memory[index, :] = transition\n", 150 | " self.memory_counter += 1\n", 151 | "\n", 152 | " def learn(self):\n", 153 | " # target parameter update\n", 154 | " if self.learn_step_counter % TARGET_REPLACE_ITER == 0:\n", 155 | " self.target_net.load_state_dict(self.eval_net.state_dict())\n", 156 | " self.learn_step_counter += 1\n", 157 | "\n", 158 | " # sample batch transitions\n", 159 | " sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)\n", 160 | " b_memory = self.memory[sample_index, :]\n", 161 | " b_s = torch.FloatTensor(b_memory[:, :N_STATES]).cuda()\n", 162 | " b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int)).cuda()\n", 163 | " b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2]).cuda()\n", 164 | " b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:]).cuda()\n", 165 | "\n", 166 | " # q_eval w.r.t the action in experience\n", 167 | " q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1) \n", 168 | "# q_eval = self.eval_net(b_s) # shape (batch, 1) \n", 169 | " \n", 170 | " q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\n", 171 | "# q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) # shape (batch, 1)\n", 172 | " q_target = b_r + GAMMA * q_next.argmax(dim=1).view(BATCH_SIZE, 1) # shape (batch, 1)\n", 173 | " \n", 174 | " self.q_target = q_target\n", 175 | " self.q_eval = q_eval\n", 176 | " loss = self.loss_func(q_eval, q_target)\n", 177 | "\n", 178 | " self.optimizer.zero_grad()\n", 179 | " loss.backward()\n", 180 | " self.optimizer.step()\n", 181 | " \n", 182 | " def save_model(self, eval_name = 'eval_net', train_name = 'train_net'):\n", 183 | " torch.save(self.eval_net.state_dict(), self.PATH + str(N_STATES) + eval_name)\n", 184 | " torch.save(self.target_net.state_dict(), self.PATH + str(N_STATES)+ train_name)\n", 185 | " \n", 186 | " def load_model(self, eval_name = 'eval_net.m', train_name = 'train_net.m'):\n", 187 | " self.eval_net.load_state_dict(torch.load(self.PATH + str(N_STATES) + eval_name))\n", 188 | " self.target_net.load_state_dict(torch.load(self.PATH + str(N_STATES) + train_name))" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": null, 194 | "metadata": { 195 | "scrolled": true 196 | }, 197 | "outputs": [ 198 | { 199 | "name": "stdout", 200 | "output_type": "stream", 201 | "text": [ 202 | "Start\n", 203 | "\n", 204 | "Collecting experience...\n" 205 | ] 206 | } 207 | ], 208 | "source": [ 209 | "start = time.time()\n", 210 | "dqn = DQN()\n", 211 | "# dqn.load_model()\n", 212 | "print(\"Start\")\n", 213 | "print('\\nCollecting experience...')\n", 214 | "history = []\n", 215 | "for i_episode in range(N_EPS):\n", 216 | " s = env.reset()\n", 217 | " ep_r = 0\n", 218 | " n = 0\n", 219 | " while True:\n", 220 | " a = dqn.choose_action(s)\n", 221 | " s_, r, done, info = env.step(a)\n", 222 | " dqn.store_transition(s, a, r, s_)\n", 223 | " ep_r += r\n", 224 | " n += 1\n", 225 | " \n", 226 | " if dqn.memory_counter > MEMORY_CAPACITY:\n", 227 | " dqn.learn()\n", 228 | " \n", 229 | " if done:\n", 230 | " history.append(ep_r)\n", 231 | " if (dqn.memory_counter > MEMORY_CAPACITY) and i_episode%100==0:\n", 232 | " print('Ep: ', i_episode, '| Ep_r: ', ep_r, f'Ran for: {n} timesteps')\n", 233 | " break\n", 234 | "\n", 235 | " s = s_\n", 236 | "end = time.time()" 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": null, 242 | "metadata": {}, 243 | "outputs": [], 244 | "source": [ 245 | "4700" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 14, 251 | "metadata": {}, 252 | "outputs": [ 253 | { 254 | "data": { 255 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlkAAAEyCAYAAADJI8VDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzsnXe4HLXVh3/a29x7b1xsg427jQ3Gplcb0wklCQQIJQSSECDhcwgEQieEEEINBAi9hVCCwbiCbWxwt3Gv1733cvvq+2N3djUz0kjTdmfv6uXh8d4pkmZGI5055+gcQimFRqPRaDQajSZYYtlugEaj0Wg0Gk1dRAtZGo1Go9FoNCGghSyNRqPRaDSaENBClkaj0Wg0Gk0IaCFLo9FoNBqNJgS0kKXRaDQajUYTAlrI0mg0Go1GowkBLWRpNBqNRqPRhIAWsjQajUaj0WhCoDDbDQCAVq1a0dLS0mw3Q6PRaDQajUbKnDlzdlJKW8uOi4SQVVpaitmzZ2e7GRqNRqPRaDRSCCHrVI7T5kKNRqPRaDSaENBClkaj0Wg0Gk0IaCFLo9FoNBqNJgS0kKXRaDQajUYTAlrI0mg0Go1GowkBLWRpNBqNRqPRhIAWsjQajUaj0WhCQAtZGo1Go9FoNCGghSyNRqPRaDSaENBClkajyRqLNu3DzoOV2W6GRqPRhIIWsjQaTdY475lpOOepKdluhkaj0YSCFrI0Gk1W2XWoKttN0NRBtu6rwH/mbMx2MzR5TiQSRGs0Go1GEyRXvfI9Vm0/iLN7t0WTekXZbo4mT9GaLI1Go9Hg7e/X4avFW7PdjMDYvr8CABCP0yy3RJPPaE2WRqPRaPDHjxcBAMoeG5XllgQDIQQAQLWMpckiWpOl0Wg0mjpHUsbSaLKKFrI0Go3GA4MfGo+HxyzJdjM0Gk2E0UKWRqPReGDnwSq8PHVttpuhEWAosrS1UJNNtJCl0Wg0mjpH2idLi1ma7KGFLI1Go9FoNJoQ0EKWRqPRaOosWo+lySZayNJoNBpNnUMvLtREAS1kaTQajabOYYRw0C5ZmmyihSyNRqPR1EGSju/aYKjJIlrI0mg0Go1GowkBLWRpNJo6Q1VNHJf/cwbmrt+T7aZoooJWZGmyiBayNBpNnWHNzoOYuXY3/vDRD9luiibLpHyystsMTZ6jhSyNRqPR1DmM1YVx7fmuySJayNJoNHWOfHN23nGgEqWjx+Ddmeuz3ZTI4UbG2ldeHV5DNHmJFrI0Gk2dgeRpdKR1uw4BAD6cvSHLLYkObs2FS7fsR/8/j8NHczaG1iZN/qGFLI1Go9HUWVRzFy7buh8AMGXljjCbo8kztJCl0eQw+w5XY8bqXdluhkaToryqFt+syL6gYmg1tUuWJptoIUujyWF+/vos/Pjl71BeVZvtpkSKfJtYSYSspPd8sgjXvDoTK7YdyGo73EZ8z1dTsyZctJCl0eQwS7ckTBy1+SZVaLhEoRes2XkQAHCgoibLLUmguggi3xZLaDKDFrI0mjqA/gY3EyXNTmYQX/CkZdvw6fxNGWxLNDDuiNvvj7zrOhGjNk7x6BdLsf1ARbabEghSIYsQUo8QMpMQsoAQspgQ8ufk9iMJId8TQlYRQt4nhBQnt5ck/16V3F8a7iVoNPmLMYHkn1DhjFbspfn5v2fjtvfmZ7sZWcNtV9BdJ7t8u2on/jllDe7+b90IKKyiyaoEcDqltD+AAQBGEEKGAngcwFOU0u4A9gC4Pnn89QD2JLc/lTxOo9GEgGHiyEV/EtVVX27QwmaUyK64Qojh+K7FJj9c/cr3GLtoS8bqM4LHVtXWjecmFbJogoPJP4uS/1MApwP4T3L76wAuSv6+MPk3kvvPIEQPfRpNGOTy/JHLbdfkDnFtLvTF1JU7cfNbc7PdjJxFySeLEFJACJkPYDuA8QBWA9hLKTU8GzcC6Jj83RHABgBI7t8HoCWnzJsIIbMJIbN37Mj+cl+NN2rdjmAajSY0oiW4Zldc2bS3PPkrUjdFo0hd0UAqCVmU0lpK6QAAnQAcB6Cn34oppS9RSgdTSge3bt3ab3GaLLB6x0F0u/sLjFmYOVVyvkApxZ5DVfLjMtCWsAiz7bl8X7wQTVtBNJ5CHZmr6wT7DlejpjbueExdM3y5Wl1IKd0LYDKAEwA0I4QUJnd1AmAsX9kEoDMAJPc3BaCjJdZBFm3aBwAYu3hrlltS93h9ehkGPjgea3YclB+M3Fx+HopPVuAlanIRtm/l3psRHjsOVGLf4ezkZ6yqiaP/A+Nw76eLslJ/tlBZXdiaENIs+bs+gLMALEVC2PpR8rBrAHya/P1Z8m8k90+idUXvp9FkiMnLEyb0dbsOOx+Yw29WDjddo0Q0RN64nn5SDHl4AgY+OE75+CCn7qqkBuuz+ZtRUxvHnHW7ucdFo9cEh4omqz2AyYSQhQBmARhPKf0cwP8BuIMQsgoJn6tXkse/AqBlcvsdAEYH32yNRsOi5xENEDXBNXutYd8H1XcjX94hVTfaqpo4xi4K3kpBATw5fgUufWEGFm7cKz6ujjyPQtkBlNKFAAZytq9Bwj/Lur0CwGWBtE6jyXckn3W5aCY0yOVBNGrK+br29W+lqiaOD2ZvwE+O64JYzN3VUgps3HMYizbtx4g+7UJqYd3jqQkr8MLXqwMrjw0OuyyZqWLnwUr7cXWsM+uI7xrf1LF3IlpI5nJjro/WlK9GmAJi2EJQxGSsNJFqWHAjw3OTV+GeTxbhv/PUItdT02+K856ZhpvfmuN4Tl2b3P2ycU+56e8vftiCOev2eC4vlUsypz8N3aOFLI0mS8xZtwf/mrrGVxm5PFhFSh5wSdSaXtdWZFnZezix0vZghXunbUqBvQrO3rncHzPBLW/PxaUvTA+0TF4QZWNbXRHFtJCl0WSJS1+YjofGLHU+SHHujJr5SqPJJqbVhW6DkdZxgVUV0V1YsGEvbn9/PuIuYySqPoeUxquODGlayNJ4pq68BJFGai7UD4GFnR+nr96Jc56agsqa2sDr0fddheDvkZcS3WpE9LN15oY3ZuPjeZu4/lROGHeV0vyaO7SQpdFEELcf07k4ZoU90N77ySIs33YA62VhMFwwd/0elI4egy37KgIrUyPHrXbJ5JOViy9HHYQrvOaB0lALWRpNBFFedh5uM0IlbJ+LMMw+r08vAwDMWBPN+Moqd3TjnsMoHT0GpaPH4INZG4THzS7bjdLRY7Bs636PrYnGDGq9J/E4RVWNOOp4FMyFpaPH4Mlxy5WOraqJ51SydSr8w3Kcx0vauOcwditky8gUWsjSaOoA+mvdjMrt2Hu4yrUpsSbph1IQgYmYxU1rFmzYl/r94jfiJfpf/JCIkTRt5U6PrQrBXOgh5pVVAPnl23Nw9D1fBtiqYDF8nZ6ZtEp6bGVNLY6+50s8NnZZ2M3yDbX94JMK9eCx/1zw7Lf423g1ATUTaCFLo4kgqnN4LgtX4bRdXdwY8MB4XPfaLFelG3nXCguiJWS5gZ28yquD91eLGlb/7K8Wb/Nd5o4Dla59klQxIqOrjAHlVYnn995MsUYyLNy+vtz3nXeNPl8tSil31WK20EKWxjcR+6ivE7gWQHJQ2Aqnye5Knb7andmvpjZRfmGsbgydh6vCFLK8DQzb91dgxbYD3H2qT9esBeGf5WdBxJCHJ2DwQxOUjt17uCqV51WF6qSQVVQg72PGOBHGGBx4kamYfmpPUWUM3LD7MMp2HrJVE6U5qW6MFBqNJufIxVVcKXNhREdOt7e0PFQhy9vzPf7RiTj7qSmmbX4mTZHp8GBFjfdCXXDJC9Nx3jPTlI+vTgryRQ6R7feVV2Pi0m2pOxwhmUIIK1w59Qw3WqiT/jIZp/71a3M9NFr3I6JDhUajcUMuBu4Lp8X24TXIemriCS1DQcQ0WawQMm/9HsxQ1NAZpqkoEYTsbRKsLPsM2eWARcgKS+Zfs+OQ/CCG6pRJWtzHfvXOXFz/+mxsTa5yjYKzvoxMLS6klEbqfkRrpNDkFLk4secKERojcpOQvmarU+bCzDygsYu24ttV6o7nFBQXPz8dP375O/ExIb+289YbSX+j0YnZoJmUAiWFBQDCNpV6x1j56GQuXJcMS2L41IVxp2WCits62ThZbo53S9RmJS1kaTQRxO1EaBy/fOsBPD52WU6Y4nKgiTZqXDglB8HNb83BT//1fWYqY6CU4tVv1/otJZC2+EXknWX9SIzKh42hXSx2WFxhtDWefIn8tv2Fr1djVtluV+f4EYKcxqfJy7f7riAqzxLQQpZGU6f4ycvf4YWvVyvlass60ZiDXVGb1IrkooDohtUuTVyZwsvHg/UUQ7iybY/IM1UxF8aSUgSrpauujeOG12dxnewXbdqHG16fLSzv8bHLcNmLM1y1M+7yhhnPjn2GPG3ZS1P85XONGlrI0vgmQh8NdQavEd9rjYEs2OaEQjjm5nCv3DAXup1gwib4JeuZvb5fvTMXfxu/Qrjf7fWZfbJYcyG1hXSw15VdqmuSJmknTVby3/S1ECzfegATlm7H7/+z0Hb87e/Px4Sl/kNXsLjWtgdau3M9OoSDRqMJhVQgv4gJAZkmrKs3HN9d5sbNGCqPXaXpYXWfW96eg8v/adeYfL5wC/4xcWUgdTw8ZglO+suk9AarE7wRSkBwjZl4tBt2J6Luz1m3x7bPMBcWOSyu4JkLnT7MgjSfpQU8t5osd/V4/QhLOL57OjUUCrPdAI1G4x9DqDLMCBGVAUyEMZHzyvRTT2VNLQoISZlujLKipsnKFYwo8mHy8lSzLxkrEFPKmAuz+JZMSy5m+HD2Bhx7RHPTvmqFgLeGmS0lZDH7wv7AcuvAnj7PrmV3koVE5dfUxhGnQHFhWgitjVMUxNJjX4RkLK3J0nhHzzPRwzr4AsDmveWR1GyFaSwkcP56V70fPe4Zix+9OAM1tXFs31+R1iBEVZUVEHXp6qzCVFpQ5h8f1gSt2ufiqVhs4pYYuwwfQUKCN5EFVdrOg5WJwK8Kl3+gQu5Let4z02xpkdjAslQ7vms0mqBJCRfJwcUYz+eu34Nhj03Ch3M2ZqVdToQp+MlKdiMjzd+wF/d9thjHPTIRhyoTg3ltBIXWfOJgZY0t0rcIq39WWhOT2WcYZOgCQ6BKRXzPgu5Gps2llGLRpn0Y/NAE3PjGHGa7+Jy+94+T1rtsayIbgBEjDEj7SgKJZ6zjZGk0OcS6XYewVnFAzzYxi5C1MpmeZLbL5dm5inoCYXcT7LglCafhw1WJAJZRU2S5mVNUrj3qMuQV/5xhi/QtwhS2gTKr3IJvlnI7HI9TOJBwNFmZRtbOt75bl4p0P2XFDve5DiX7hz46MfW71hILLToilhayNAEQpa8GEVNW7DB9+bjhlCe+xmmKA3q2sH7RGl+ZUZ4ss9k0r0IS4SydjxJRft5+sA4xizfvVz7XqnExHl2m75WqYC8KMcHC88lydHz3InYITlF1fLc+ozDvt7EgBUiOKxGakrSQpckLfvbqTJz3zNRsNyN0Uposy3Yvg+yM1btcByhUZd2uQ/h0/ubAy1V1ZlZxXK/mpJzxurJKk0WEj0rtGa7afhBf/LCFu+/VaWtxsFItB2KQcrnxnqdXF3qXKryaTeUmefMR45kQEkqrYF20q9a0uiFaIRz06kJN3rDzYFVoZX+3Zhe27a/AhQM6BlqucsZ6mAdbQ9PiZ1w3UrOUPTbKtu9/CzajRcNiDO/eylPZI5+eGmhak/FLtqEgBnRoVh9Achl38hvS6yoyp/ZFTZH1/drMmoM/mrMRXVo2wJDSFhmt14s8YI6Tld5ufYaios/82zcA+O/BA58vwdIt+/HEZf1dtcPxOEMr7SAnGKuIeaknuTkCHcry2pdlQpC13Hs/WWQ7JigjSI3NJyuYcoNAa7I0ntEf82mufOk73Pbe/Ky2Yc66Pdi0txxA+tmoDNgvfL0a013kxwOAX787z3O6l1enrQ08b9yNb8zGz/8929QnrYsAWFQ0UeVMG623T+X8ycu345Vpa6XHBcGDny8JrKyP58kXSdz54QLXEcL94GfOFAcm9VEow75ytewKgTq+Gz5ZinGyrLBR4UV9WaYNkl2P0zsyTWG8cfN4arRPlkaj8YKy2psCl74wnfnTPESt23UYP3phOvZzlkg/PnYZfpLB/HgPBCgQeEVlwuOZCw1UfLKue21WoMKPCkHIDbe/v8DUf56bvCqAUoPBi1ZS9KismhivE7OqcBPkogyrxpptA3uPxi7agt++N892/iXPp8cKr6Zv2VmZ/AivtfhkaU2WRqNRwqupyzqxzFizC7PX7cGkpduF5wx/bBK+FPie5AqqA7vKxMLTiqXPd9EoB978bh1G/SPavoJ7IpAHM6hJ06zVsuwLpgpx3cqmf2fu+GA+FmzYC4BZXZj8z8rNb83FJxzfx6raOF6fXpaoT1Ch7J7L3iGVdywo3ymzJotGyidLC1kaTR3AOpylVhe6mDo27S3HHzl+E7mI3ClXXobTJBFUnKx7P1nkaqWcjKBiP+WKK0CvP43FX79a7ngMe0/YyxI+X5fzs+qEbhLwnO5vaqUwn//O3ZT6zZoL3XLfZ4sB+NBkSc2FnopVLp/F7JOlNVkajUYRr19kmQq0WDp6jHllT5ZRFipdClmp4I/G6S7vbzxOlVeh8SgdPSYV1VolKrYTmRKgDlbWSs2qh6tqXPcf9t4frqrFsxJzJls6dVJlSbaXjh6D79fssm1nJ/Sb3piN0tFj3BRrq+O6f89SODJBylzI1sN1fBePI15f3yA0WU64OduIxwVonyxNHSRKHbqu4XZFkkFYK4Z4sDFqnKiqiWPLvnLX5bshHS/Mvo1FZQLg3UPjPLeCwdMTV6LPfV9h72HvK1wPVdZiyood6Hv/OExf7W6hgowNuw8HWh4AXPPqTDwzyVkA6vWnr/Db99MLRlQEyPdmbXDVDvZRs7HyRE9wt8Mzmrx8h2NdRtBaHiahPaBBM63JIp7LFDu+O0NpIp3N9v38+INBfugdrqrBjgOV6idESJWlhSyNJoL4HSIMISBKZp/R/12IEx6dlJG6TNoLznSq5pNlP0aW9w4AtuwrxzbLxPO/BQm/mF2HvAtZcUpTkftnrd3juRwrYxZuwUl/mRxYeSyTlomFDgPj3gDAoAfHp35X1diFd0oT4TrckX5YZz01xVQWj68dBCnR3G34SDm2IoR3kafJ4uGYiFntG8l+HgVufXsujntkonC/DFVZ6OLnpmPIwxMU2qR2PzKJFrI0mgjidjy2ChJpn6zwWbH1oNJx7ifHYOAN9ir3hRWk0uEgqOlfHic8OgnHCyYeP1TVxDE1ufS9VkF7uHTLfmzcY9dQWfvKre/MtR2zbGswfmI92jV2dTybg+7+/y1O/TbMXUH2Z6+LSqx8uWgrLnzuW0xeJl5UkqwwcFIaVdPqQnd4X11IMcFhIY2KttfZNy29c3kyPZhqeRFSZGkhS+OdCClJcpow/KcyqcE6/9lp8oMihpq5kKPJSv6bDT+0J8etwLz1CY3JvvJqfDp/k+PxI5+eihMfN2uovlq8FbsUgvLe/v4C7w1l6Nq6kedzjWtlYXMPqiI63MsjjDlM3hs4Aq25PpcVKkgKtYyM5VWuEJoLJQWyp63afhDfWfzVVO7vzoOV+GrxVpRX1eK/c83x2by8YcY5UVpdqCO+azQRxO0QYffJotwdQX29u+GrxVvRMRmJPWy4WivFbVa4k4+CuTAs1uxMawxfn7EOr89Yh+5tGqF3h6ZK5x+srMEv3pxj2kZIuAK5H+dnQ5gau2gLliRXYMYpdX3vhf7tHtrmNHnLUtuEcZvTcbLSdfOuK4yI7+yzNaLiz7nnTLRsVGK0RFqG4Y93+eBO+GA2Pwium+eUMhdGR8bSmiyNJtsYY8j+imo8PGYJ1xfFLUFqWh79cin2+PAl+sWbc0yrfzKBKMo3b79KGVuSDtPGxBJ07kKViYS3Uq/cReR8Xp8Iey4K4jbd/NbcVIRwL4KR6BQvTXNOdSNrR/BiVsrxHd4FiyBzF97xQVoD6mYI+uKHrfbyk+fPKlP3P0xrsqKDFrI0/olSj85BjIHhb+NW4OWpa/HRXHlaE1EZBqIBzosa/Z/frDH5x3gmA9ofnkA1ieMr49dcqBLx3Q1GcXsPV+H29+dzwz3wYnP96dPFyv5TMiHAD9+s2IFnJq7k7nt6wkpMWeG8Kk8VSv37KzI7LGXLS3a6hQUSKcd1l1FoTyoYqeTZZip34V4mxZCbDxGn8CZG6BK1NiX+1ZosjUZjoyqZxiUILVTKQdvteYLtldXutGub9pbj3Kenult2DeD16WX448c/uDpHhGFC+fsE++Tv1vE9dV5yW1DBSNN1Jcp7dtIqfDxvE979fr3tGF6WnyVb9uNnr8y0tY8Hz5wlM3Gpcs2rM/Hk+BW27ZRSPDVhBX726kzOWe6JB+iT5cl07nC/pD5MIeRNTK8uZMyFbssIMBgpewsybVK/7b15qXscVL8OAqmQRQjpTAiZTAhZQghZTAi5Lbn9fkLIJkLI/OT/5zLn/IEQsooQspwQck6YF6DR5DpBmBGsZRgDnNeiF2/eZ/rb7YT02rS1WLJlPz6Zl3bOPqAQjPO+zxbjbY6AoYpyWh2FGYCvyTLMha6axQQxda7LqVhRm2syOJt9On+T0lJ6g7+OswtegPd8iEH6FFoXaLIT86/fnYe7OcK+09Qtndh5Qolzgc7lwRrx3UEAFOyjlOLExyfZtimZryXXE5R5VFX7/un8zZEKWWOgosmqAXAnpbQXgKEAbiWE9Erue4pSOiD5/xcAkNx3JYDeAEYAeJ4QUhBC2zVZJlNRxfOFIG+nX23YmzPWmf72bFJwMSle/Yr7JNX97v/KpPlSqe2xL5cJ40JVVNeidPQYvPP9em7/Tguv3m6ISGtgXafw8BdLbceIgr6yiayd7jc/7pe767jn40U27eQD/3OfBPsJSSocEV5uu9Dx3VZ2esv/FmzGOy6FfZm5kNcOt9djfV5+3/M4tb/bR/7hCxz5hy+kvlBcB3tT2f7a5kegjpAiSy5kUUq3UErnJn8fALAUQEeHUy4E8B6ltJJSuhbAKgDHBdFYjcYLURcGra1THSBGf7QwXYbAvySMXHZuJx9Vpq50F8X8xW9WY39FDVfz5TRAv/jNauE+Q4B4bvIqrmBpLEpwO4EYj1Q0KaqtduRvZ/O2OcGd5JXOTFBdG8ehKrs28tVv17oohalbctGixZ1+BRPV+nnsr6gWnucmobLKO77zQGVK4Dewai3TPlkE+8qTi1NcXJZNA86Uv3bnIedzJWUrJoIQl8/xr5JlTEidEyFHYVc+WYSQUgADARifnL8ihCwkhLxKCGme3NYRAJv3YCOchTKNJlQiLmMpt2/XwUrsYxxLndKLBG9BShf4CEfL4hdrhHQV3vpunW2b08S5duchVxOrk0mR5x/lhFGSSDhTSehdLaiU1XA5TS5eQ1kY/OqduYH2K5W6bWmYAnyZvZT02rdlwvcuJtNkuaxw097EtX84J12fVaA2zIVrdhzEpS/MENctuFrrVhVfQ+MyZR8aYYSLGfDAeMf9aZ+swKv2jLKQRQhpBOAjAL+llO4H8AKAbgAGANgC4Ek3FRNCbiKEzCaEzN6xI5iVJ3WdjXsOh577zQtR+mrIbXjmnPTvYx+agCEPT0BFdS0WbnRO4yE0S3kc+NjJtaJafbWPKl4ipLuZtKav2onT/vo1/jNHfeWmk0Dh1RQi+rpPCVkOxYq0YKx2w/H5+pzzvlpsj9i/fKtaJG4vUFBbGiYK931Y6Pju8Rl+vZwf5TwmWb7p9fazj73a0oGMD4HKGtZk7KZsH+ZHrk9W+h741WR5wQi1EqUZSUnIIoQUISFgvU0p/S8AUEq3UUprKaVxAC8jbRLcBKAzc3qn5DYTlNKXKKWDKaWDW7du7eca8oYTH5+csdxvuci89XtMCWANIq7Isk0aTkJrVU0cf/p0ES549lvHMmtTmhGXbVGYkLJ1P79evt2UI47rY8T8Zu/iqh2JQJ7PunC4dpqEvYZwEGuy5OeKHNxVZYUwNAvn/H2K/CABXloTpzQwZZbXckTnyUJkeO0zbD+0abIUBRnxe23+283HA9fx3WNZPLycfsaTiaCoOaXJIgnR9BUASymlf2O2t2cOuxjAouTvzwBcSQgpIYQcCeAoAMGs39VoHLj4+ek4+YnJ2W5GoPAGi4Ub99m2WQekaSt3YqVivi8V2OJVBs+gBrkDFdX4KKl9uva1WbjwubRw6WbOMr6w1+1yTn2SPt65fNassr+iWnygw3ksKloVv7ngomY2n7te5litto1l6RZ7zDBVU5kqovOs5sKJS7dhvaS/vTdrg/TZs8+9xiJVcVfAcrfxy7Zud9PHePfVtLpQWpKkHqNMD+dGybqioskaDuBqAKdbwjX8hRDyAyFkIYDTANwOAJTSxQA+ALAEwFgAt1JKg7cv1EEqa2rxzMSVroKvaczwoqVnw/F918FKvDRltVLd1pVlvH0sKjFgXpm2Fmc9NcV2vnXwUb037GHOEznFC1+vNvmO+eGPHy/CnR8uMGmwDPgTDL8c1UCc7PlOwiS7664P0wsQDnMcw+es25266/E4xTcrdtiCc6o8Bv8rycJ/D9y4M1z2otiHCBA76vMuw3CIHvn0VPs5gsv+bs0uTBaY/hzbJSjQ2seuf302zv77N9x2sL9lseTM5kK+ea+owFucLFtieRcmPn6cLMZcmEWpPkqaLGnuQkrpNPCFyS8cznkYwMM+2pWXvD69DE+OX4HCghh+eWq3bDdH44PffbgAk5fvwHFHtsSAzs2UzjHGpH9NXeMopPAEBq9f62skK4gMVAfMb1ftwuNjl6X+fuSLZQ5HyzEc4nmr2pxkDkrNA+3zk8UrCgFg7KKtmLxsO9o2SeRdS2iyHMyFzL7tB9Im6r9+ZY8LdekLM3Bkq4apNl+TDMxZ9tgobnkigliuHzbX/3t2qOVTyu/rj36xDI//qJ/wHB5vzFiHN2asw0e/HIYfNu5FwxK1VL6i8nj5AyuYIL7GM/5k3iZMWpb2b5M5m5vNhWYpyFj0wCuCXRgiqsGmyXKhpZ5VttvxOL/MtgSHAAAgAElEQVT9LXXdERKYvKATREeIQ5UJDVZ5CI7FYRAx64OQbLTTSC9R68H7kxV8eF9kslVMbmCjhfvh1+/Ow18v62dzzPVL6lK5Wj6RrsOOsVJLxM1vmRMnU+qsXWKFornrE1q2v361HJ8tsLmfmhDFulKJvyVaXahKJjS6QWkwAWDV9oO2baKPCSenc5kf3qUvTAcAPCEQ0lRh38sqzrMyWm4kRU5tlzwWtq9VW3yyrH+z3PPJIuE+XtmAO0H+mUnO9zWboXNyKuK7RqNxT3p8kb/sbv1oePPJ6I+8paJRNU3LNC3/W7AZ01ftCvyj05i4eGP/Lk7Sai/j+obdfL8Zp2vmyTvPTl6FnQed4/hUCpJ/89Ig/eG/C03HqGi7nI7wo1m46z8L5AdlAoHw27pRsfAUWbwnpmhfx7F9v6JK3W1BVq/JJ8sipBvuEWwZfLcDtbr9mvhmlu3GN0lTeFbNhVmr2Y4WsjR1nmy86ymnTcvb/uZ363Dsg/xYL6qrv3hfaTPW7OKXKTVFKFWpdFxRQSywL8iLn084uBvFiTRAQfDGjDLbNpnju9evdHZ1GBvhnlfXuzPN8Zj89mM/k94Hs90nLQ+DOOW/JfWLM2eUUXn2FZyPF7HJTv0dta4u5Pmgrhd8NMjKHtatpZImi20D73X/3YfZF8gjpMjSQpbGP1Hq0FHDemvu/WSRTQPjVoBQdeLmIRPkxNGxE/4gTsvQiwqIr7axzEua4AxH2mtfm6V0Hts61X7JM7ls2F2OpzgJjw28+kfd/9ni1G82wr3xXJzmW785CrOpWQDgeD9VoRRcaSWIfqdahFBYYn6XV3GELEq578+CDftQOnqMLV8or1yrydhvUvl9h9Pm3QbFBUrlsDlIefesuCCG6tq474+CtEuW+4cbpSlJ+2Rp6jxhxAeSV5qoU0Wz0/f+cSYnaBlBaIsOVFSjqCDmwkRCcdwjE9GiodgsU1QYc/QR8YLbS92VNNe5GeBFE8sSTjgAA6/+USLfsJRPVoh9NRMylpNW5umJK/2XL9gexIee8rsgOJAVYrmaLAp0vdu+Xmzs4q0AgMnL+CsdzeZCc+W7OSZzHgUCKXTldnOYFxVBnKc9YykujOGoP36p1C4n9ldUo7yq1tOz1T5ZGkfC7B7VtXGs2WF3KNUES8pcKNpvGcyoiyCLQXy1971/HM5/Zpry8ZQmBnSeM7JBUSwWuFbT7WDJOrCr3k8vGqKghUmvgSqtOAZQzYCUVSGZgP0iWl0YRFykjXvUwk+orNRjVxUalAliZhnPXtTXzY7v5nLnc0Kb8Cgs4E/1ByrSWilK/a9gBYIz7W/ZV4Hhj3sLvh0hGSu/hax4nOJ7gS9LXeXhMUtx+pPfBJOeJ0eWF2bFJ8tQdQtedtUggF7jZDmdb7By+0G7sCc4lrdaikeQYxulVFlItXKoqgaLN4s1USzWZfEqiBzYvUIpMLtst3KyZy9kIoSDqmbFKxT8j5EgJtV/KGraxH0vvZ0n0N74Bj+8RTyl9RbVl/7ttX8UCb7M2EC6FO4Fcd5YtPNAcH1ApT+15GjXIyRj5beQ9dLUNbjipe8wdWX+5E78LilU7jkU3FJrjZ1UolLB625f1aNetp8BZMbqXdh5MB38ULVelcE9TmmgavraOOVOPJv2lkvbvfewev/2osliY2MFwcJNe/GjF2c4Jv32S7Z9soIgocmyEwXzENuNXCUip85jxaa95ansDV61RIUF/LLNmizqKvF5p+b1udsz/Si41xaB/mCQ10LWym0J08cWTr67bJL7Q6FGpsmyTniiVVO8bW7iZFlL/WD2RlzI5D20TgYHKmq4IQ1UzAivTy8L1KeoupZyr/XExycFYtYw8CJk8VIbOdfhPHvJon6r4nQl2YxbFBQJszrPXJjJNsi3s11K9rrGJWMFAJz1VCJHpKomy3qPCmP8qf4g48RO4c5cKLoPhUGtfkkiK413bdERsfJcyEpNCBR4esJK5Rxk789aH2r2eTfMWL0LXyUdJ7NFlDo0D95gEI9TPDNxpWl1jVv+t2CzMP8ar07TgGbZLwp+yZtQBOOlMqzzNW+cPO+ZaSlnXAMVLch/523Cf+c6B+N0Q3U8zu1blAarlfFiLnTLht3O5vlMyD+ZMBeGjWBxYUY/TMXZFRhzIXOzZeMjTWmyVOpWQ1VYYt8jt+9VXGDOLxL4f4UFz6k/QoqsPF9dmOxP45Zsw4Sl27B5b7kwNQPL/yUDP7pZERYWP375OwDRaEsu8fWK7Xhy/Aqs2XkIT10xwFMZv353HgBg+UMjbPtSju/My/4XJt2MLWeYi8EtyIjvPHhRu1Xbd7DCnv7GKzW1fHMhAEwSrMTywpeL/H+k+HVcz4SQEKT2L1uItUiZuza3miwZU1YkQnmovNaq12lNjyN8f6n5p5s+knAPsG8XmSa9IjMF8zRnUUoQnd9CVhJjxcbhLKezeXnqmqzWr8JDny/B4NIWGNGnXbabogzvy7MyufqHl9DXLZ/O32yvMzmovTxlDY4tbYF1Ow/h43mbmP3m40WDoF//E+mYrDimZmN+dgqTcMvbczPYEjleNWuEGFrM8G9wnRCyBI7vYfmb7TpoN+OqhHAwabUkTTMWlagIBqqXabVOi5492+YpK3agZ7vGahUgcV2JNpvLDlrAkb0bWpMVYbI55LwxowyTl23Ha9cdl9p2OBnALhP9w2sn/Ne0tfjXtLUoe2xUduJPBURK0xTA3XaavD6ZvxmfcIQwK3HKFwZV0+qICEjGcgzdEBbVtfFIODSr4FV+MaaowIQEh2LqsuN7WPLja9+W2dsguMlPT0ivTvRyq5U0WYplWXOIfr+Wn8zZet9emqL+oS8SfrbuD9bHWfZseeEpojRq5LdPVrKTGJOWn6/J6at3onT0GGUH1j99uhiTl+fPqsZswj7W0tFjTD5sgQQx5PpSOZ9jnfBUloUbBB0mIUiCFLxrasUhHKKGXwEmE/KP3wTTUUB0m+KCaOp+4X3QiKphE7uHJdCqllutGGLETzszpRiVjSlcc2GEBo68E7JqauMpR9e034z/J/LqtDIAwDyBI3RUqAMfs775x8SV0tV/Trz4zWqUjh6T+purgZKmrzH/LTPlsPUF6ZPlN1VLmCQ0WdluhRrezYWJC8zEU6iqie6zVqWiupY7iFFq194Egde5wctr9dCYpdJjgg6wy9PUqRKnNDMqI6kmS/tkRYpj/jQWrRqVYMYfzghV4Ni+vwKtG5cov6R1YXl1VLHeWXYVjZeX8fnJq+R1Sh6ndXdcsGzKb+BFWTuCDqoZZDDKTXvLIzVYOuFVWDWuTmuy1BCtXo3HKbbtCyYMBgv3g0bhWYWlyVI2F2bgWYehOeQhq6WYt5oxQsNG3miyNuw+jAMV1aiupba4WME8j3RXWLPjII57ZKIrR/byLDvdRwlKKaat3Bl4wEdTHcaPsMyF0nPUzIV+Hd8zzayy4DS51742C5OXB7eKMExqPUbiNh5lUGZWp1LqgpAlopZSnPzE5MDL5ZkLlZ5VSPKH6sd4mJkDDPZX1EjzGAaB7JKLC7VPViQ46S+TTUEYgWDNhSwbkjmwpq7cqXxOplf+BHnJQc/5n87fjKte+R7HPTwxkPJ4qWPcxKaxst8SpoD35GSDIU+TpdoDAo71F2mC1rSFhXdNVtJcqDVZviivCucjdTMn/ZjKszoUwKplHqr9pC4967W7Djnu52myovQhmjdCFmB2TASYiTaQ5+GvkAi7xmSc5dvMgV637a/ANBcCqwxWAHL7MnJV5DwfEWkbLOUKIlnzVxd6j/iuCQfPH0nJR5kJ00tVBrQb2eJwSELWR3Ps5kmVu/i7DxcE3paynYcwZ52apjjoBOZWMinD3PvJIsf9vOCn0RGx8kzIspJexm/+219prPAW4Fr7LPOfORuxPeCluSKsmpoLn/0WV73yvefynG6t25dRNVGy7HlaBSpxnCz7dldCVsT7VV3Ba045g0w8JtUVZ1Hk7F5tHfeHpTnive8qJruK6uDv9al//RpvfrdO6diwNVlBp87xA9dcGJ3m5beQZYxsQT8Qq/BmsHzrAeFLEmWNw66Dlfjdhwtw7WuzTNvDmsCtzs5Bx10BGMd3l8++kjN4srfBSL7sVpO182AVN56WX8d3TWbw6gMTtOO7kwCQyyYk2e1RNRe2bVLiuy1z1+/1XUbY+BX6ZYSddcINWsiKMIZgE8wKJnkZ5/x9ilD1mW1z4f6Kaoz+aCEOVdq/CI0UDdsDSmLL8uq0tZi60hwvLOiPJKc8gdaq5qzbjWcnrYSIylr7YM6W/7sPF2DMwi1Yu9PZj8D6uP/vPwsdj2eJkr+BJoFnnyzt+K7EnWcf7bifNRd2al5feFyurFb1y7yQBUFelPVswTcXRqd9eS1kGQQzZ/kNRhjMILtgw15c99pM4YAqGsz/+c1qvDdrA/49vcy2L+2cG6wk+OZ36/DA50tw9Ssz8dKU1XguGRohbCGCvQfWui59YQb+Om6F8FyeJoulvKoWt74jT/livZcHOcItwO9VRQHnBssnSls2CKVcrz5Zxrv19wliwd4NTqEDctknq2e7Jo77WU2WU4LiCMkGoaISc8sPBRH60CvhaLKiRLRbFzJR8lcJqil3fDAfk5fvwDrJigxb/Q4N4L1P5zw1BZv3JlbeeP1qYLV6j3yxDE98tRyA+CvJs5CnGH9KpU5lnyxZuZJ6DH7z7jxss5hLRbGCrHRsJv6iz1fCeuUfGrPE03lBz1VOVqJc1mTJmFmWThvj5C+ktcDBEIuQtBp1c2HeBSNl8RP1O2jYL9AotIcHO0Et33ZAurTWym3vzUPZzkP49FcnOh4nuvw4BbwocazaOzb/mexeW2Ufrk+WB+HPZsJ0OHbckm2uyweAVo1LdJBbC2HdDjfhWsLESZOVy47vbuDlsjOI5bVaITgi5fju8LyjQLRbFzLB+mSZCk6U66bYoBxfgynGRNo516LVcTlofzp/MxZs3IcvftjieJzoKymoKMoU6hHfaxU0WV5apbq6EACKYgTNGhR5qSRS2lqNnaAdiJ3MlkFpYaOOkzk9Sr46uUyUNFlcn6wIaSryWshKEUgMh/RDTQtvfHjahWw7vqtUv+dwdSB1OZm7dh+qSuQn4+BVyLI7vlNlQdg6afEESy/NspsLxccWxIinOrLdp6JI1FbxBj0VOL0jYaV6iRrO5sIMNqQOEyWfrKJCXu7C6KDNhQg+rY5qveazgxkAVa/FzRedymS9ctsBdG7RAPWKCpTL5THowfHCfWHMEbK7YK2TK2R5qNea2snpHhcWEG8myciJFNkncnJGwLOB0yrHfBG6Hc2FERIOwqReUSyUWF0GUVpdyHumUXrMea3JSvvlZPaJ8Ma6wOLkBFOMpUznUg9X1+Ksp6bgt+/ND6H2NJ41WZa/95XXKEdOtpoLg9IGXPScOcWTEV+LR2Es5um5hhwqRxMAQY88TjkUvfTdSXee4qc5WcHRXBihyTdMwhSwgGgJWTyiZBbObyHLgyZLrFFwoxnimQuDFo9EK/Q8FCU5p6omYd4bu3gr5q2XCy/fr9nloRHOX+JuND07D1bi/dkbAMi/bK3mQr6AHK6KoKiAeJKe80Rx4YrIabICxvpRwOLl2pvW9+ALmGUKHbzbozP15jZRErJ4fohREqbzWsgyYB/Iht2H8d+5Gz2Ukn7QssGMay7M8OAfZMYftu0XPz9dWt4BQUwoGU6CqNP9cxKCZPeBzSn32rdrsb/c7pfmNRClKgs37vN0z6h2fLfx06Fdst0EE0Fr0Z0iz3vJj5iL5jUnTVYuXk8UiZCMxR3fI9S8fBey7A7qFz8/HXd8sACUUvywcZ8tDY5s0npjRvp40QAaRQdUp04pa6+XEAOb9tqz2/M4UJEWaqav2olP5qnFiFJFKmQx1/7n/y3BHz/+wXbMm6ZnHljTUjz/9WpP50Wwm2Wdk49qjdvOOCrbzUixjyO0+8Epncq8De6jgOeiUOKkZcnBy4kkUdJk8b4d9OrCiJCOk5WMaA6a8o2JU+D8Z6fZ0uCwz/OlKfbJb9qqnR5XgkV3Rgyjab94c7bScU8y0ddvfmsufvs+3+9L1ERKqa/ox1bzy/4Ku0ZJVWDMNFS7vtsg3iyvOYOTskrVD9FEdOYqZZwjvufgBUWQgggFHOPNT1HKihGdO5UFUo7vnH2s0HPRc99iyz77RPrIF8tcl2stO3VOyCP/2EVb8CuFdC88wmjaAY6wwkM1to/IJLi/vAYfO2q/nF/GXHYej9Pw+lX3No3CKThkCLyt1MxXIqSwUMZJyHKj4bhicOcgmlMniVL8T54fotMK00wTnZZkE857xzrTzd+wF699WwZAzclZFqSTV0TYmqyb35qLzxfyg4C+8PVqR5OUF18OGU6+I14QlSb74BJNIo98sRS/eHO2oyNx1Fm1/aDjqkU/RClOjlty+JFmnFzU/DiaC12Uc8GADjjuyBb+G1QHidL7z5uTiyL0dZDXQpaTwGTd5eaRVdbwg2kacDVZLsp3ROZ0z9n2+Fi+Ri5MMmUeldUiGitemrIGXy3e5lrAjNLSYQCYvtrbSk4ZERpjXZEwF2opy8rfrxhg2/b7c3rk5HOuVxRMWp0cvPSMkY2I7zeceCR3e7MGxbZtUfIZy28hK/kvb2IUCQEqw3OlTJPF28ZsDGKiDnJwDEMeqg5akyUoTiYkye51lH3lVAhrqInSIOYGQvInKKcb+nRsiuHdW5q29e7QJCc1WU64Glvr1qUHitfchX7GjZ8OPYK7/SfH2VcM55S5kBDSmRAymRCyhBCymBByW3J7C0LIeELIyuS/zZPbCSHkH4SQVYSQhYSQQWFfhFecEkSLzEQqc25lMi2MaHzih3DIjNBhsOdQFfZXqK1s8ipobNtfIUyRU6vo7KT6Soq0E34nVKdccG7aUdfI5bk3x+XmUIgR+32JEZLTz5mHmzk+RqKml1bjyFYNQ6/Dq/BdUuhd+OHVeOYxbVHIcXKPUgJrlSuuAXAnpbQXgKEAbiWE9AIwGsBESulRACYm/waAkQCOSv5/E4AXAm91wPAeB/Xh8Fwh9ckyj2bzN+zN+Nf1FS99h0EP2FPY8IQ9r007/pGJuO61Wdx9qrGl/A7yMgHRTQiHnCSksSZXNRwEpM4Jwo9f2tdx/10jekjL4OXHTAgZufecHV9ZF/2WQDz29Wjb2E2TMkom5AuvGqliP0IWp0pKKdc/LEqadukVU0q3UErnJn8fALAUQEcAFwJ4PXnY6wAuSv6+EMAbNMF3AJoRQtoH3vIASJkLk8+DfTltmizjGIUBujKV0kAedX36qp246Llv8a+pa+QNdoHKWKIq6PjRss0QRHd3qyGSITQXStouExZy3bS0ZsehUMr1GoemPpPb8qGL+gTVHGUIR2OT65QUOucL7dOhqbSMGLELnzGSm6sLnXBzPU59nGfpiEr8tdWSd/4vl/bzXYdnIcuHGY83Vscp5fqHRekj0NUVE0JKAQwE8D2AtpRSY7naVgBtk787AtjAnLYxuc1a1k2EkNmEkNk7duxw2Wz/fDB7A9eUZTwb3uS8YfdhfLNc3lY3ju/rdh8GAKzYflBabiapqY3j/VnrURunrgUNFQEq6NWFIvyGYHBtLoz4BN67Q5NAyvE6+XZp0SD1u3G97OSnr2shHPp3bua4X2XCicWI/T0nmZusguoLU+86zfEz2M31xIhDGB7OuPDbM4/C2b3aco6OFp2a1/ddhlchq8RhUYIXRM+aZ0LMFspXTAhpBOAjAL+llO5n99HEqOVq5KKUvkQpHUwpHdy6dWs3pwbCXf9ZiJlrdyfbYt/PE7LOeuob3PTmHGnZsuScbMnGJJ6tLjFt5U7u9n9PL8P/ffQD3p25Hm4NhtUKsa2cIlN7wasmS0bQGrdsc8up3QMpx+vkax2cX7wq4bI5vHtLNGsQbJ68O886Gh/9cphpGwFw4QDbN19OI/PBUVlRV8CJ0krg3ifrvvN7uTshybl9vBs7WO1ISVHM8UPHzeU4XTtPk0UIQcOS7Hw4uCKAycZrCAc/mixelaLhOec0WYSQIiQErLcppf9Nbt5mmAGT/25Pbt8EgI3i1im5LbLwnpNVBiAgypnNDU2W6DmzE7/xO1tq+ate+Z67fceBRHylAxU1rrUzKkKWquzi1yck732yLDiZu888po1yOV77KytkJcwx4XX8ggK7kEBIYiVdPqGkySL8vh6l9CQiWK2F7FrdXQ4Rvi3C1ec5MF4E4WfnNYSDzLTtBK8vilaP55RPFklc2SsAllJK/8bs+gzANcnf1wD4lNn+s+Qqw6EA9jFmxchjPBo/k6sshAP75hqakkxJ3qqDgBFioZBnRpBQG6eBDTaqt0UkPMiaIQ/hoFZ/uh3Rxul6Cl0EETqipbcVTDyhJ/Xb4TwvX8DZXh0WFflE1VxosxZmsP1+FiOwEd4T1youy43Q6GWejvr7D9iv66GL+rhOQ+NZkxXw6kLRPJ1rqwuHA7gawOmEkPnJ/88F8BiAswghKwGcmfwbAL4AsAbAKgAvA7gl+GYHC+85Wc1EbvqULOJ7nCNkZWpAkzlFGhjmvKIC96uxKM28b5K1vn3l1bjg2WlYuf2A43myd9G1uTDio6yT8FukOAA2KC4ILDdYmN2+gBDOpJq5wbdjM/++L0GgMt/EiD3dUHSmKWfMQpbzsW4d33mHD+vWEs/82H1koksGhmumVp1DrO9EjBDUK3KnYYqS4zuPKGmypAZkSuk0iN+3MzjHUwC3+mxXRmGFCJJcfuRHSBDFhuLVZ3SSoNTyQc3xxsrDwgJnHwdRG0wm0ThF17u/CKhlany9fDsWbtxnSjDNI5/MhQ2KnQdSVcEp0Tf899cGRQWm/ur0DnjRdBCO43ImtTO7D1VxtydMc5lrh8rYUkDsmiwvJqFsTG3FTL8lxB6KgqW+C2FCdC33X9AbbRqXcPc51a16P2848Uj8a9papWO9YO0OMZIYG1TzyQLehRg/wo8bnyyn/JWZJjotiQDsC6I6uZaOHmMLv2CYC0XdyRQqIqn0ctP39pVXo3T0GPzFIR2O165stK0m2bCEudDdjBCn5hWJqkme/SD2nfBXbl1yfG9YUug4CRQpmguDMAVfPLAjzjimTaqfUurcZ71UyQummUkh4HBV+mPrppO7pn4XFcQwpLR5xtqhMrHFYv5SifnFT5cqtGiynMoa0Lk5HryoD2486UjufnZ1oMjMmvAkdH93VE1YPdqFE4OrZ7JcaytihCgJn8ceke6z2dAUtWpkF2x5Y1HLhsVoLRCCs4EWsgS4SQz89wkrTX9bNVl7LF+0fMd39U67blfC5OeU1NnvNGiEWPCiyUoIWQH5ZHG2cQOmMtv2lVen6pcJBLKvfLfXEeVAl0UxZ9NvUaF6H/SSNLxT8/qpgf43ZxwFQkhq5ZvsNnu5q4kl+NEwG1zP5F3LdKwuZXNhAHWx79Otp3ULoEQ5RS4d368eegQ3390x7ZtgWLeWpmNFZXjpVqqCSc923sKsOJU+qEuzVLR1m7kwRjCwi1zo/9kJR6TP8agS9qpJfu3aIdz7xxuGfsxJs5NNtJAFfnodN1/rVme+lCaLJKK5D3xwPD5bsNlWH5AWZthOGwWH2epk7/Xke2PxyfIzoXBXlHDKYzf1//M4PPT50uSxEiFLUn9d0mTJTCmqju+UetdOPnBhH7x/09BU2AFWCAq63/MG5WytlrPWKutVbhUFTr4uKhNiAUcF5PdWuTHNsVo/EV//7lR88/tTbdutju9OHxKyS2L7hzjRtDi0hdNztWqyRNq0sLQwor4fI8Cjl/TF+zcNVT6f191U+ovXPjXoCL4QyBufw9IEekULWQw8M56BU9+w5mNiVxcu3rwPADBjdToeFS+yfNBjv9/i0ubCmAdzoVm4CVq7oyL47EpqD2XHrt3pvBBAJRwFS9RduBzNhSEH8CMEqFdUgOO7MomITVU6+GR5uLEky+ZCUcUEdidzvxxjCTJ7y6lpLZL3yc/f3XJzifM37AUA9O8kDq9R2qohd1Ur+5Er0xL2dSifUmq6D/WL+S7LTvfT6blafbJEoURiMWD2PWfi+7ttLs+OWIUoa7iU9J92Ydr2XvLaxRTP+4AZ8+uT5G302aesUfWt9/u164bg/P4dfNURNFrIsmB0ga8WbzVtX7ZVvErNKmSxD974ijRpdphObiRKjlo8mlQIhwJn7QcPahGrglYGWYW+pVv2Y7UgYr6s7eOWbHPc7zYyfZRlLFmMKNXM9RTBxQNS7fXezIXhvlNOAoETmXjVj2mfFrpUzFQFhNg08kG3c+xvxZPw/vJEsvoOHlZkshoip2deGCM4tUciFpyo/7JnNxBo4jxaC22aLNGYX0AIWjUqQdsm9TzUkvadalY/Hdw3IUAm6rOOx6rvCXsc75wm9dUDsR5/ZAvpMd/94YyUts/QKrayaPms13JaD/VYf5lCC1ngv3B/G29elTZp2XbbMQbWwckoblbZHizZvN+0DTB3jJpUnCw37VU/1iuG8MdbdSTDqsl6f9YGh6PdYxWyRj49FRc/P517rG/ftBw1F57ekz/Y9GjXGMseHIHjOIOcajBSSimCyorETjTOGgL3ZceIc1wuVX5xSlfu9muGlSqXYf2Cl13Or073lwePvU6nSdTQDBTEiC1QpHHWiodG4pGLnZNQ87BeY9P6/Ij+o0f2xIHKxMo2nq+UleUPjTD9rRrCQUloYe5VfYfVuCIByem5Fiia4/06ld97Xi8se3CESXNWyyws4SUCVyEm6VMqigJCEs/v6SsHSo9t17Qe/jDyGCx7cIQwiGkurP7WQhbD2MVbPU2q1g5gPPjdh6rw5nfrAJi1V6xQZzgQB/1xG5RwsetQJV50cLDn1k0pKGNle/DzJb7aYn13DRPgx/M2poRYETJzIAD8Y+LKlHnUiltzYVR4kDVA6VEAACAASURBVJN82biP9YoKbNpXAOjb0TkHHosXx3feeEgEv4MgFiM24caLueIPI49BQ86E60ZTphp0FUhooe4462jlsnllmrUO4vNuP+tolD02CoQTK8mYNIsLY1wfHAOzWUp8nOh+sRqe5gqplazjLfuR69eBn20h7x0BxPGznLh0UCebJkv0XLxGUzfOiiXNf6YP+zhlrCrmOyTrx+2SwimR9KmWDeUCMpB4fqpxj2Mx5xhexjDUu0MTXD30COFx2UQLWQFQXBizaKrsr/kHszcy+9PbVybNXG4GbKdBJCgzjlHM/330A8ZaTKcq54b5hWGkPLr9/QU49x9TfZf3t/Er0Pf+cRjHuU63ju+yQLSZgicEsVMDP4WKevlBLQgwCR8BS1m8EA48VBy0Fz8wwrbNa3tlCxCCeIdNwqugoSP7tDP9LXb0dtZSrH7kXNu2q4baV3gJncWZy+XFcvv16c75NgstQp7K7eMdQ6m1P/IbTODw7AV1P3l5f1vSYpGo5jWaulO5NfG0v5ndXCgpjxjHmX28WMoeG6UU0NQ4L6hVv8Y4N+Y3J3E/LKOAFrLgX+tjVe/KX/L0AV8v3wEgeJ8sr+P0k+NX4PIXZ/gSFoIUsrbsK7ddSxgCXHl1LTf5t1tNlpG3MtvwfFvMq2ft57jpgW5CnPDqT9cZnoOSqrnQq3XGzTsr0tgN7+7sbOwVtmmiSfuFq441/W3VELH3xVrCv342mF8v89vaRUTtoKB47dohuOaEI7h+gbyu1r9zWutqz4fpDQpq13zy+iwR91unRT52TSH/OL/mwnS5Zt9fYzsrxJ/es03KT02EcbjMXKjUtuS/QaW90ebCHMHvR7l1MJcVx+sXE5Y6O2Cr8PCYJSjbddh3OTPLdmNRclWkF6zBSP3w1WL7ffEywXvFTRRkAFixje+An2kKYgTXOvgM+RmcAnV8N03kwQpcidyF8jL9mmeUjrVMSsbdu+0Md2ZBL/WpTohWU505vIa5jDOZoJ0sTr1C1A5KgdN6tsGfL+zDnXx5gsttZ6S1W7aPXIXPZtkRPz6uc6ptVrz2U6vGVFRKGDGoauNpAZK9pFevHeLoeyZql18ZiTXxPvGjfsrnWavVQlaekFiSnf5b9uD9CiCiCe7lqWvZo3zV4ecdCnIS5uHFH8grD41ZmrG6gsYpfAHvFqpqASgN0FzIluvQZ9t5WGnFE574mqzgJzXbsYI/eJOVdfy4YnDn1O/GJWoruMzmQqVTcPtZR+M3ZxyFo9s2sp3n5Q5Zn6dokmaP4gpZEkHHFt/NY9ekVE1AIxwNKVuGCJuQJVpdGJCWx7rYSjXwr62c5D0xLabw+mGSPI31d2NXwsrb4vx3FNFCVgBY3xV59OrwuwalwPnPTMN5z3jzWfKjdg9Sk8Ujk5qsXMZJeOAJwbInzjp/Z3rR5Ue3DMOzP5GvSGJRnQdUj7OayGTC2dNXDuBuJ0BqkCgsiAmPM2B9Tb647ST85gz7ykO7WZQRaJgLNAQoHg1LCnHHWUdzg9KqDgdOhxGmWPbesV2xwEPOOauvkwgjAK4aSd8hl8Og09BUUqQWIsOz+drh7tfE46l77lX7o7oS2LGM5L+q4WJk5MJUoIUs+Ne6EALsPFiZ+luqycqQb/QPm/Zh0Sbn1XciDla6M5OxhO34ngsvFo9LBnbMaH3WwZodJPmaLFl5wZkLeDg9147N6uO8fu6CDHJzF3IuUlWTdWavtvjk1uHo3iap6ZEczwbOdHKovnBAR1NeNpsvE3OzO7dogFF920vbagocyU6OCjop3mNwq+2j1DlUgHEPE/WlD1T21XHwORN1o+d+OsjUPituhhUvQoaKY3ii7HThXqK/p3yvmG3snON1bA7EJ4sb+sF+XJcWDfjnW/7OhYwcWsgKgG9X7cKCjWkfJpk5KyOarNBrcKg7wNyFPHLBDs8lBMHECatK3+yUzFtd6NxAtjxRzJ/2TRNmvdeuG6LYyvDgJQvmXaEbre2Azs1Q2rKB0nkmk50gTpax1cmn0yp3qMgh5jhZ8uNNbeNkoVC+RQ4Hsu248aSuGNSlWbK+9Ha+T5YdVrAyhNCLBnRIlscfH0RxuvhQW9sMeI7vsZRgIx6bSi3R6lUElbG3yaOo29rH6eW18XQwUjej54c3n5D6zbbXq+M69/2zbP3RsZ3w6a3DlcrLhblAC1kIXiCRmbNk/ULWfbcfqJQckV3inK/YIMmB94qP7LkHLITZBnHmTy8fgOy4KhpkjTqPaNHAVe46a5aAIFAVntzOF+n76txiUfWyZlmFBOt1sH+KTGBm0443M4+fhQgUPGGRNWECQ5NpXNjrNQKLytrZuF7aN80wF/Z05dtjf3aUMs7hDo/Wmq5p5t1nYO69Z0nr7NGuMV65Jm1yVrm7LRuV2Hy0RvUTaDJtbivpi6iJ03Q/d/GiHdO+CbO6MF1Bp+buI/MD/OdqFZR6tmuM5ooxt87p3U5+UJbJCyFLag4MeHSXTWB+hYRfcEINBF2HHyhonRCygjKJiSKGW5HFx7lrRA9X+cxOOqqV6W+TJku5lDTsIFsg8IPhmSocYYr58XH22Epu6dA07SDP19bZz3HraGwIAtWSsPcmIcXy02garz2iUs/t2y55TvqkL5OaDmsx7N/uNVn2tqmah5yOMsVZEiRZNp7FuX3a47rhpab2sLCR4a2+Rn6Gh5RGyuH5EJivs02TekqR6gGYMi2oCrxWk1hHSeohkSDjxSfLbCJM/27f1JuQxesh1usbXCpPuWNw97nHeGxH5sgLISvTyIS6TKg43/5+Xeh1iIjHQ/bJypAxlOcA7IUWigOwLNtAcUEMzRXLAoBh3Vph5cMjlY+XwZoLZZosa2BHFW4/M+3QbeRfc8ulx3bCmcckwgvwQpzxtDNu/UsMQUDmD2I1t/Gczo32mFrAKXbVwyPx7I8H2dor8vPxEsLBXkb6t5cyzjrGHOYhZrkfBuxQMSj53C89tqOjPxKbl894HkY5XoceyrTLafxyuhWyuhvXS7ebLcfNCjvZk+B96NTGKa4cklil2quDel1m7WPid6tGxYHmBbWOewM6q2eeCGolZphoIQvqk7bXLw97fbL2+OeNGdkTsijC9skKrWgTAclYacFD8XinfuZV65IoVy4kXTJI7JxvcqYWnO9n7GXb5yfqtbE8XLUPuq3KuHduUnARAE9dMSBZH7Eti2fb8LtzetjrLIilJjmV5po0EC6DdabaxtTkZS7r26kpyh4bheuGl2Jgl2amibm4IMZ9Lzo2q4+yx0bh9J5pAY03PrP+VSkNjYvnIeoaKn5LCS0c/4a4c573d39t5Tnsq41TnN27HcoeG+VKC2XWPiYoKoj5yHaQ/j2sW0v89PgurpzXg3apyAR5IWTJxlrV1X6qcoNUyMpZpyI1EqsLwys/U86OQWmy3A4MosMJIb4GY/ZUkZD05GX9heerOL6q+iuFiSFUqKYOcvtVbvgAifJdiupitVZOXfhcyepBlfaKBCSVK33gwj44pn0TlLZKr/DyE9LlvvN74+NbhpvuR3FhLNUW0fucugbO7liMcOJO2Y+7Z5QLcxK1mx657SLBrGEZqKixef6ng9CG0epVVDtnleD5ldUoTnJXDz0ilTQcEOel9Hr97Hnv3DgUD1/cNydWCPohL4QsGUGbn2R9RrY/B4V1E4nl2+GGcMiEoBqUJnpk3/YY0bsd7hph11CI4MZCgr/Jjj21SOhTJS7f5JMlEEBTpgpqjuoM+HOkdoNxabzBm9cCdiJRWRVpXLtMkyXK9UYIb9VjYv8vT+0mrZ/rJ+TgHM+2429XiIVog6FdW+LL204ypdnx6tclamNxYSzVSLFWKVmWoI63bzweVw7pnHp+Rn9TGRl4x1Ck+4d0cZLgfrgZlhoUF+LVawdLzzu3b3t88Iv0Kr+DlXwhy9om477069QUb98wVKlND17UB7czycljhFkJyz6vADRZBrmwQtAPeSFkyR5h0IK0vNPU7U4VdjBSGnL5BkEFzGtQVIAXrz5WWU1PCMEdZ9nTrQSpKhcJSSxdW1uWnDOniJbDsyagD5mJIZPEFH2mUscz9/U0SR43IK3Fc+WTBZFwZP773D7yGFi8ctjVdkZ9BqyQdXSbxtLyefC0Z09d0d8WoFW1jxYVpDVZorsoK2pQl+Z47NJ+qfyixcn31c8HmNHHncbwOKUOHyPunMpVPzzY+3+IiWF4w4lHpn4bJnbjUON+PHpJX88+jkH49pnKU3B8r2vkhZAlI2hBOuy0OlGHItyvk3jGNFnBSDVBlaNSyts3HI9/C7Qx7ABXpKCasPq4bNhdnvp9G0fTxraRUuCoto1NK6EylenA0fleQQskI2UulAlZlsrS1RCxYKHQFF5/evKy/vjDyJ6pv7u2TjvZs8Kx167Ik8kvHthJmMNQJmwUF6Z9smQDsOxdN5LZFxfaFwEItWSCetLmQqf2ODZHGTYUhKxI9rkdqkoLWfec1yv12/goNA41NHt+x5/USku2PQFqpet6Bg+1RFg5juwlDXrClpm/63ifSmqywl1d6EZQjRFvgm1AiizXqnU/38jDu7cS7mPHWhUHeqfBj00qW1RAUuEMrOO5V7c2PwKZ8UXPaz9/daG78lOO7y58sgijtTBruOzbDCbeeQo27LYnfOcd27JRCX5xSjc8+uUyAEA7JpSFyGzphqA+FI5p3wRLt+xHMeM8LXo3Vas0hCzDBG4tbtKdp2C95T625eTCTKwulPtkOfVM62kT7jgFm/aWc4+NEfXnwS5eOCTIxmF1ATCErDCcxYMss0k9N0Ficw+tyULmg5HWdRs0L6VGkLgNEeHVgT1bju+ZoEhBglRdEHLFkHQC436dEs68jZLmK69fvL07NOVun3rXaXjz+uOE51FQR3NhEI7vqj5Zoi9/ArUPu26tG+FUjvnSbXuD6H5BCVkp014hay6UfQQ7l1nJlMmjK+c+XjmkM1686lhT3KqEFtT47SBkOezr2T5hjn3ysv4Yd/vJ6N6mEU45ujX3WHYhi6w/sB8CopRnxnhlNRfWSOK5ecFrb+CN28ce0Rx/vqC3vwZFmLwQsqQeUgFLBPLVhYFWJ2Xf4eqM1rd5b7kpl2PQuA126jWWStAhHFTJhFCmpMlSVP+xAsRDF/XBp7cOT5kJvV6LKMhg5xYN0K+j86qsAgW/Ghan58OLX2RoDNzGyVLxyVK5X27vaRACknowUufjUqa9gljq3vZqzxeoj26bEFh6d3SO62Ss0utmmEgVHnssRjCiTztba1lTsxduP/NofPTLYbj02E6p9jvhzSeL7/ieTpSd+LcoKXRWcTSuM+8+A9/8/lSlunl41YiKXhmx2dlSbw4uC8sLc6GMTPtIZSqYpsFZT32T0fp+/e68UMun1N099JpnKzBNluvj+Wf4Fc7ZgfHUHq3x2YLNjscbGlnWHCijXlEB+jNL061RvlUY2rWFUCuhwvBurfDWd+vRu0MT2z3jtUAkcE6445RULkaWIcmI1PKgiYT7F1fYSv0rv0duJ5oghPbAVtr2aYd/TlmDFo2KcWartph05ykm/zGWU3u0wcQ7T0FXQeogg58PPxKn92yTKocdG9yMExTp1DOO5kKHIgsLYq6czNn7WtqyAcp22c3DieMYIatKZC40vzMlyb8NwZalDcdcKiPtP+Z9IBIpNFS7F1s3L7hvFMkLISvDWXWkuJks9x2uxsyy3ThLUdLnEfVch25xu3pRlAJGhsrEUlwYQ1VNnLss3yAq5kK2GZcM6oQBnZvh9CfFArgxILZvWt/m02Iq1+H63F76D/efbQodwKNecWLy6Nq6IdbsOGTbP7Jve8y79yw0b1iM5VsPWNpqb5Go/R2a1UODYvsQefLRrVPlO2HtP2w91rQtbjQDbgUeP2E/vJYhehfuGtETt5zaPeWHIxKwDLpJ9gMJrRRbzqi+HfDFD1vV2mn5O+2TpXS6f5jbOva3Jwu1o+wzl/lkGWUYHyo8IcsdSX9L1uTtsUsFdVsv6N8BT1zWL6DSwiUvzIUyMh0c1E11v3x7Dm58Yza2768Ir0E5RiLYqfpN9Bo9XMWkNqS0OdY8ci76dxJrNVyruCWHn92rLY5q4/4rznobWkiEBGOwFsXUSpXrok4RxuNsXK9IqsUqKSzA2kfP5Ya5MFBNMAuIBQin56ZSvigxs1O5XlcXho2qYCdrWkGMoGmDcB2dR/Vrj+uZ0AaqmFemOodwCArWPFmvqAANS/h6D/aZ33EWP96eoXk3/N6uHVYKIJFwOQjYZ9tdQfjlIQw8S8z/CtuQfHcalhRIP8aiQl4IWX4dK4PGzUu6Lqk+rvT9NVJ3iFMK6uJ2xDzaOlRiSd16Wndp+UHNiUaveelngzH+jlNs+/t14vu2iNohE/7SX8TeBzOZQODVx4IQYjr3d2eLBS5zfXZEMqTKcxvWraWLukiqXF7qGtU6s6EZZfv4+f07SI/P9toeL6886/jOUyjdf34vNK5XyF2Z6BXVZrLvkShgrTW0yJm92qLssVGeTIM82GfavGExyh4b5boM4UrS5J1Qde3Idv9yQ14IWTIy7SPlpjbj3fp+7e5Q2pKLULh7ZioxobjnSTQ4J3RtiWHdxCETDNxOiqLDnb6uR/Vrj89+dSJ332e/Gs6vR/D2XzigA/5yab/UgCjVZDlcYDa0LiwqjuUijaVK29+5cagwMKtVe6Xi+K5CEOY/txi3qG/HpnjmxwMzXr9X3MaFToeVMO9454bjce3wI/HD/ecIk3Kr8txPBuH0nm2S9SkuKFCYqYtSqwnD/SD343wu9MlKFpkLCZ/dkhdCluxF+3bVrsw0JIkbTZbR+X734YKQWpN7UErx0pQ1ysd7jdwue93ZAcHJxJUJR2UVk6hNayI47ukrB+LyIZ1T+c78DHxGswZ2aYaXfnas53Lk9aTNLm64dFAnPHpJX+4+v+O91Ycl7dyOlKnDWoVKX8nGPJSO5J9DKgQPCPtRgPd8VL/2ePXaRMDgVAgHyX1VEfgNLRBvNaEfgtQaycpSXWwUFT9XFfJCyIocko6m6r+Ry3Rqrp4J3kqcAs9/vVp6XPMGRbj5lG44nomF4wbZVyZrQnn6ygHi4zLwCJ0EIauTtYHs+gxNlkiAOy65ys6pmOYNEr5Lj1/aDz3bOS/FDwJp5GzL+/Tk5f3RqXkD/rE+R3LR6YQQvHDVIPzm9O7obvGtUzMXZkOTpSbE5txoZWmwMEF0SLKl6qNUGUNaNkq8a159UGUEsbpQnAw8geoHnTYXahyp68FIVfAzDqjevjaN62H0yJ5M/Bh3SJ14mf1OeQkzMSk6fekat8t6hGw8M9LqiHzOLk8GIW3p4AT+zE8G4r7ze3ly1A8FF49CVThWWThDYO4HnZo3wB1n90htc9NFsqnJUiXbGi8v7xyl1JYgWvaR21jgqK6KqgZW5f4/enE//PHcY0wBVqOGcO5LXp7MJyuXNFgGeRHCIWq4kbFUO1VFNT9AXVRRjSbOY/Y6Nf80KhQv1JANbKpfXW5r96K9dLKIimPTSBzfk+eJBr5LBnZEPE5x8aCO+Ou4FdxjWjUqwXXDj3SsJ0waFMv9Z5w0TiqIXmdWOE046stROSYbfm4pLYYLLXwUkA61lgOsmiwnYfG1a4fgKJ+xmrw4voto2qAIN57c1Vd7eBh3wPBFUwmyKixLcDuN7donSxMIrhzfFY/ree9YL03JGn60eROXblc6zlhl43Xgd+OT5ViO4LAXrxrk6ninW+ZoLhQULLsvhplVNMDHYgSXD+mMooIYevgYeMOkU/MGePuG41N/8wOB+hzYnT/OU7+dlqmrLmHPNrmig/dyGynSWR5UxqfTerYRmppVSfu6yY7zVU0gNGtQhHduPB7P/5Q/bvE4oat59a3oOo2xWqbJ6py830dFdLzhkRdCVtSsc5U16lqnbK/OCgvVlC08VAW0/eWJdEJe76BKzB+1cvjHWfPz/fWy/krl8RjYRR5l2u19eOWaIRh3+8lKE/8HN5+ACXec7LKGzMAmzeZdSlivmFhDxtnm4un4HRO8ZECoo8OQDeM5xBXNhb7rU/bJisYDGNatFRq7SOj8r2sGY9zt6XFBpFWvrXV2TTA48ahW+OTW4fj58FLlNmQbbS7MAk9PWKl+cDTercDxE4WeJ5+1alRiy5e4+1AVgPAmiKAHvubJII2iUkWJx6f8/jR0biH2CRM5vsva37CkUNk00LR+kTCUQRQwIvJzI76HVacgQrbzxC1vjd9uN/dPZ6UmNbdkOnBz2JhS8DBxsjJ1nSrBT4HMC7ndWjfE6mQ2hYsHdsQr09aiISf7gQzrGCL6tjZWMqt8AMjTWUULqSaLEPIqIWQ7IWQRs+1+QsgmQsj85P/nMvv+QAhZRQhZTgg5J6yGuyHbTphWynbZU4Fo1Fm17YBtWwknhIIR9dmLMKRySlj+A4YgYL2m8iq+BrRLywYS/yEj8KW1Hq8tjC4Sv9pw6hRsNwlWqvGQFA7zK9w3qVfkKiK+lzqjIou5bYehSTHOi8rckekVpV/edjKWPTgCAPDHc4/Boj+fg/oK/o0i3ro+YbIXWSFaNS4BAFx9QqnnOqKKirnw3wBGcLY/RSkdkPz/CwAghPQCcCWA3slznieE5Ebs+wzSw0Wagzo4D/rmEEfY4I1BfxzVK7HPQx1FsZh0gA56qbS1uOUPjTT9Xe5xcYNfwSMqE6YT0nQcxio+h31BYy1VpFF0Szb8c5Qd36MyYnlohmqC6CBR9cnKNMWFsZSjeyxG0MjnKsoGJYmyRLe1Sb0ilD02ylM6pKgjFbIopVMAqIYbvxDAe5TSSkrpWgCrABzno32BELVJQqXDfjRnI6av3pmVmDi5iJNWycs9bNXI/qV/THtznCev6XpE8Caoe0Ydk/otSgyrXL7lPkTFzyMTGFfK94fyh9DUYynYaa1ryvFdob5sjgmqmp2IDblCbmfyX7Lm5Kj5ZOU6xmXWNXOzCn4c339FCFmYNCcaXrcdAWxgjtmY3GaDEHITIWQ2IWT2jh07fDQj91AJX3Dnhwvwk5e/z9p3YUMfquFsYBUY7hrBT6KqysAuzW0ThTVMgl9NlvX0RvUKcVqP1vjn1fzI6CJzoQwVc1a2CWroFQkBTtfq9z4IQzhYClaKpxWlh8KgLGxEpPnXDTsSAzo3w2WDOzkeN6xbK3x/9xmpv3t3aIKhXVvgwQv7hN1EAEzfq+OyR1Q1dpnAqw7wBQAPInHPHgTwJICfuymAUvoSgJcAYPDgwaHee6PwM49pgwmKy//DROTAbMDGHMrWmBtFLceDF/bGvZ8u5u5jW9u1VUPccmr31N9erqVNkxJs3V9h2saWc16/9r4FOWs3iBGC164TK369viTpwIpmVCd0v7nanDACxfJ86tygKgTwjiOE4JendsPIPu18tcFel2B7BN8tVWRyopHn0inNVCZo17QePrmVn7PTivE46hcXoKSwAO/ddEKILTMTxDj7xI/6pfIWRoXXrh2C1TsOpv4WRtLPAzwJWZTSbcZvQsjLAD5P/rkJQGfm0E7JbREhGoObq9yFWWpzWPPAGT3bYOIy94Ju2yYlGNBZHKbA1F7isE+RLi0aYOHGfaZt7ID4xI/62xxBf3f20cKgnCrI2smaDt1gaFC8PtPHLu2LS54/gAcv6oNrXp3prRABx3Zpjt+c3h1XnXCE5zLO7SsXjhLvEU3dg1euGWwKI/J/I3p6rl9oLbRqslJt4bUveD69dTgWbd4nP1CBdEoVZ87r1wHLtx7ELad1C6TeTNCmcT38/pweOK9f+4zXHcRzv2xwZ/lBGea0nm1wWjIJNgCUFCWEQCPNVj7hScgihLSnlG5J/nkxAGPl4WcA3iGE/A1ABwBHAQh2VPaAMclEIaAbkE5XokK2PnrDWznn8TwQx3PZ9to0Nh7qu3roEfh84RbTNvaW8Npy6bGd/AlZDvt+PvxItGxU4rnsRPnebn6rRiWYctdprvqtKrEYwR1n+9MIKmG59DOOaZvpKtPCmMNjCNJnpX/nZugf0HJ31Z5TVBDD6JHeBdZscetp3eUHhQDJEzPa0W0b48GL+uDcgLXFuYBKCId3AcwA0IMQspEQcj2AvxBCfiCELARwGoDbAYBSuhjABwCWABgL4FZKaWTyvUTFBCYzF0aBqNwrFke/GlNMIvOBXi6lsCCGOev2mLZZk/k6tUEFqyYsLDNSs+TXY3e/KUAy3CW+vO0k9ydlJYSDqh+Y+L3Plck2Hx2XwySCw2xoXD30CN8firmIVJNFKf0xZ/MrDsc/DOBhP40KGmNYiEXEbF3rI29fpghtWbuPcp0EP6digxIY7zy7Bz6YvVFYnxvl34tXHYtWlgEnrPG2R7vGePuG43HsEXZz68e3DMPFz09XKifTvkTW1ZxOZHOyEi8u5DcqU+bCIJGZC7+87SQ0axDdYLRRJddX3U288xRPGQTyibyK+B4Vh1M3ZpeV2w/KDwqB0FKNeD2PqLfJdlhA1yJ1LnVRzwiO2txJGPQbFJFNLcOiko4nm3RoWg+b91U4HqMyP6nGeQoUawiHVJysaIxD7nCWstwIxJo0udkX0nRr7U87ng/khZBlDG5RMYEt50QsF+Enx58fwvo48fMIVO+FtY4wFg/wylTpXwUxgmZM+pmiAoLqWn+O6XWZCXeegspq/6rfK4d0wb+nl6VWMwaJOIQD/zinFvgVAhfcd3Yo727rpNb1ggEdgi9co/wJddJR/I8lTXTJCyHLQM9h6hAQDOrSDHPX7w28XK+oTkBWYceN8NKsQRF6CiLymxYwcspUqWbpAyNM5y7+8wiMfHpKKk+Yrcw8l7waFBdCtiBJ5Rb96bxeGD2yZ0aXuttWFzr134Aec1j5I5s2KMLSB0agXlFEfC4yTFifum4e+7IHR2jTXA6SH29MSpOV3WbkEoSEE+tGVWZ4+soBtm1OmiynCcypncG8IgAAGchJREFUygv6m7/M5//pbKU4ObwyVRRtxYUx00TP/p3n8lSoxGIkvHhfWXC2zwb1iwvyXugPC5UPyHpFBSiMWDwsjZy8emJRMRfmCmFYKr2HcPC+KlP03C8f3An3nd9LvQ2mEA72Mv06r0Ym71sOEyX3YWsXMfzqnN+BKF2BxiCsN1NPSXWfvDAXpgc33aNVIbALDTHiX/BSFSR4gpHqggGnEA6sCbS4MOaqT8jabm3efef3Ulq4kPIZ5HzyOAluvzm9O1o3zr8l0TyuGNwZXy/fHqkEs9b+0q11I5zesw1uP/NozrHu+MnxXXCiYDGDJvfwu7BFE13yQsgy0OZCdQghNhV2YUEMVTU+nZBVVwhyjnM0FzKDlFMw0nduHIqe944FADSuV+R9tSNnW9smJbjp5K4Y2LkZ5m/Yi2uHlSoJcamPAE6pRroZnqkrI0E8c4TmDYszmg6FRTVOVlFBDK9eOySQOh+5uG8g5WjUCM8nq25NSk9e1j/rKZWiRl4JWVqR5Q5R+p9rh5Xi39PLPJWp+gh4g4/qy2t7zswGVkP269O7uxMaTeZCXr0Ed5+bSH0zsm8wKTquGNIF2/ZXRiZNycMX98GgiId9yDRBhoUIK8TEf24+ARv3lIdTuEaT5NJjnRNy5yN5IXJGLYQDi2glW7YhxP71ZmgCLxjQAUd7jB7u1WRLCMEAhxQh7ORkrYLVYLK/GxQX+mpPJigujOF35/RAg+JofA/99PgjdEwkC+IQDi5M0SFHfB9c2gIXDewYUul1n7Df9hyNRapRIC+ELIMo+mTdcFLXbDfBxK1JjQkh9hff0C7FCPGs5lY9q00Tu6+R6vOzto3925qT0U2XCKv76AG2buKqb4XXDE0AhGYu1A++zpMXQlYqrY7u0FJOT2ZOJyBCp+sC4pys2QnZedefeCTevXEohpS2kJb10S+HpX6zLbUFI3VYFejmMsLuPnrAjQaT7jwF0/7vNN/l6MeZ+2TqndQfWnWXvBCyDPQkpk6MYy407p+f+yg7tSBGcEK3ltJy6hXFTLn4WIHQyfHd1h6FixnaNSHwheXQme3xtVPz+lluQbTo2roROjVvoHy86GPEi+ZcT7bRQj8PjV+i4egRMsYgqGriKimModLvKjpFoif3JVpECLE5vhtttZrcWI47sgVmrt0tLl0y8ajGmipk4h307dgUh6tq2EpMx8Yc2qui3Xzr+uOxr7waJYUhBbNMkq2+MP72U1CVC1nLLfRsn/BnPLuXPQ9kJhH1WFda0ugNBBqNJgC0JotDSR4vQU1pqwDELfOu4cjr5NBr5DgTlu+ncaa2JP5d+fBIfHzLMNM+ax2nHN3aoT3yFhUWxNBScl1+8BvE1C/1iwtCS8cSJt1aN8Lyh0Zk3aFb9PgIAX52whGZbYwmUMIWfrVwXffJC2ki7ZOl1qOjnrqgKIQktwaE+WGbO5I7nbQ/TloucwV8VOUN4xkVFcRsz+tXp3U3/d2nY1PT32f0bIORfRLaD6cucfMp3dC9TeayzOsB1z1haxe98MCFvVG/KJGC5oEL+6DssVHK5+qglPlFq0YlIAT4/Tk65l1dJS/MhW7J5FznZWKtrg1vIDbMebyI7waxGBGa/WQJTGWaI9Urs1ZjnPefm0/AYInT/CuKASFHj+yJ0SN7KrZIo0nwsxNK8bMTSl2dc/8FvXHvJ4tQ2rJhOI3SeCJsJXO9ogKsfVRdCNfkHtFW2QSE8aKoCDTXDS8NtS1Rh71F9hAOCZw0gjKfK6/aGqn2LvWM3VUQhdhpWnehGdatFSbeeWp4Saw1Gk1WyAshy0BlQr3v/N4ZNdtEYI43kV5BSGymC0OAKSBifZTM0iqzJoq+HFXTkbi9n5EI65G65ig0RqPRGERtfNbkHnkhZKVzwznz9ysGJH9l7s3yEtSzZcPiEFqSwGgPgT3hsUoIB5lPltxcaJeyTujaEl1bW32jzOW49bszYNv71W9PdnVu0OgBXaPRaOoWeSFkGcgmsQsHdFA6Lpu0alSCT24dHlr5rCBl9clKmQsdQyIk9nl15PTrA+FWM8WaF3sopDhq3Ti8VYYajUajqVvkh5Cl6K/DOn1HlRF92qJzC/VAiSK6tm6I924aKtwfI8Tuk8WYC2staq5BXRJ5BaWaLM7u164dgnN6t1VotXM5QPhZ7cf85kS8e6P4vmk0Go1GY5AfQlaSKApPbrVmQa12ad2oBEO7tsT3d5+Bfp2aco+xRXxP/hsjQLUliNa/f34cPv/1ialjhEIQZ3vrxiU4/kh5lHcnUgFnQ37IbRrXU4pI7wbt+K7RRBNjpXJbTi7VbDJ99On45venZrsZGgXyQsgyJjHlBMNRlMaSyEIkuKVtk3rcZeOE2HMXpu4LAWosYSSa1CtCn45NGW2gqJ327YSo3/M2EnNdlJ+diHRGAk0uckH/DtlugiYkbjvjKEy88xR0byN3JcgkHZrVxxE63EdOkBdCloHqBBy2yckPBbFgHpnsXvAc31NiAAWqJWlYYgQ4uq09kKfYzJcsmqOqM3zlAKRMnNZivDq+Rwkvue402efJy/tjwZ/OznYzNCFQECPoZlt0o9Goo4ORZplBXZrLD2IIIxi9IaD8/YoBpphibO7CUX3bY2bZ7tTxMiGLEGDsbSejllIc9ccv09sV2wIAqx85F5RSU0R3URwho6m5LGRpcpOighiaNsir71WNRqNIXowM6QlY7XjVefqMnm18me/KHhtlcmL/03m9pOcEpcliYf2ZUuEuSPq+Tb3rNDz300GMtkkcdT6d+5AgFiMoskiFvHtLwI8gXxAjtpQ5MiEqF2Us7ZOl0Wg0dZO8ELIMgjYDEhKs5uSaYaXSY4L2yWIhzIpCAmJzJv/b5QMwsEsztGpULNRkpeJsCZrJu1/sJpljPysE8st3Pj/K5HDTNRqNRsMhL8yFsonZiupkd9/5vXHuP6aiSmI6CxKnGFVBkF4kYPdzOvGoVjjxqFYA7I7vYM4DxMKSqPWXHtsJ363Zhd+ccZRSO9041ms0meCFnw7Cyu0Hs90MjUYTIfJLk+UgZT12SV+l4wx+fXp3dG7RAB/9clggbQPUxIOCDNnDEo7vYuHUGsKBPQ/gR25PlPX/7d1/sJTVfcfx95d7uffChcvl/uCH/Lr8RqBG+U1AChIRMBXbONSMVPBHqZam2kxqtE6tadppfkzaTqYZGTs6YkuipjaVydREyjRJO1OtP4pKGg1ocJRBsNFEJ4kIePrHc5Z97t79vfvss7vP5zWzs8+effZ5znM4y37vOec5J3v+R7W3cs+2xWVP9pk6XyO2ZI0ZMRwoPMeY1LdNvzax6D8SRCQZktGSda4LLLerFk8u6Zi71s0CYM746t3aW0z81FpooeQKhe/uC3cdZjpvzAiO/exXQ9ILtWRlU0rcmOu4hQa+P7xzBb/44EzxJ6qhe39nCd85dLwqk8yKiEj9SFhL1tC02eOC23PztSKMyHJHW6673KJW7hiwVbMGT6AZDpzCcUt4TrF0d+HQ433zppVZz5NqqSplMPfI4cXH+u2tQZWdnWV6iOD82T+3fEYvl8wrbVb5WpkwpoMdq6bHnQ0REamyRARZqR/8kW1DA6OHdq7goZ0rBnVjLZpW2rQK1VJMN2XmwPeFk7qKOvZHJncXPj+D7zRMLZXTkaXczusekfMYkLvFada4wcHRg9cvY2pv8S04vaPa+ccblvO1axYNStcUDiIiUm8SEWSldI9oG5LW65eXCfvyVRcMel2L3+25RXY7ljNuZ0JXB5++dE7O9xf7ubqm9Y4c1LX6t799Ed/+1Gq6OoYXf7I82Xv05pVcs3zqoLQ1c/qLP7a3enZfaXkSERGJQULGZKWbVab0jOD1t4eOJQrL7AqMOsZ66k/WM6q9uH+KzCCrmLFPK2f2DplvKuy6VQNcMm8cA32dPO0nHDUzRrS1sHBS9nUNC8k28H3xtJ6yjiUiItKIEtWShUGOm+JqInNcVMr4rg46ywyyipEKGifl6uIzY6AvWAermJsE8kmN9arWQtYiIiKNKhFBVvgH/8Myfv0LjZUyg+0rp+Xd57k/vZS9N64o+dyZVs/qG/S6mMu5avEkAP7zs+vYe+PyvPtmTkBaqlLnJKs2DckSEZF6kYjuwpTw3E/V9JO/upz3T59lz3+9lvfclTr6hcuHpOW7mh0fHeDuKxak81BEBHLu7sIyc3z6THCEtiIWWbz8gollnSObbAtLi4iIxKngL6GZ3W9mJ83sUCitx8z2m9lh/zzWp5uZfdXMjpjZC2a2KPeR4xHV5OyF4peoWlhyBRerZvUOCrCKP57fKDO/p86cBdJTLeTzN1svLO8kWYSnnhAREakHxXQXPgBszEi7HTjgnJsNHPCvATYBs/1jJ3BPdbJZHcHafKW3eIQHwn/pExcMufsQCs/EXu11EwvJNZXBuTFXBdb+K3cesA/OBFFsWxFBVhTxkEIsERGpFwW7C51zPzCzgYzkLcBav70H+B7wWZ/+oAsimSfNrNvMJjrnjlcrw+UIx1VnSwyy/viyuWxcOIH1X/k+AFuXTsm6X6nzM+3etjjv+0sHxvL00XdKOmY1LB3o4ffXzmRHEYtVZ5Nax7GoIKusM2RXSW/h7m2LG3I5HhERqW/lDnwfHwqc3gRSU2lPAl4P7feGTxvCzHaa2TNm9sxbb71VZjZKY8CHH5b2a7xr3Sxm9mefXXzQsQv9SGe8v7jAhKd3bD6/4Dmz+fyVC4HyJ+UcNsy4beM8xnV1lPX5U6eDIKu9tXBLWL107W1cOIENCybEnQ0REWkyFd9d6FutSm5HcM7d65xb4pxb0t9f+oSU5SoxxipaOGD47q1ruOvj8zPez9w///EyZ3bP1DcqmFg1swXnvDFBcFSoZSaq+OZcS1YRA9/rI8QSERGJRrlB1gkzmwjgn0/69GNAuD9tsk+LVXhizCjuLsw0d8Jolg7kn3gzV4Axb0Iw83uh+bAev2UN/7Jr1ZD09JireEKY8ycG+c+17E5YNbMY99QRIiIimcoNsvYB2/32duCxUPq1/i7DFcDP4x6PFWZWfJA1padwkFDoXINeZ7yfqzvvjG9qG16gJah/dDsXTukeMrN6vkWda+GPPjaHb39qNfPPK7ymYjUDwdTiz8VO6ioiIhK1gr9IZvYNgkHufWb2BvBnwBeAR8zsBuA1YKvf/V+BzcAR4JfAdRHkuWSDJyMt7jPfuWUN758+e+71R6Z08/zrPys7D5kBRa744qzPYLEzu6eubdzodk6+d4qZ/cHM7evPH5/nU9FpbRlW9lI8lfjzLQv4w/WztKahiIjUjWLuLvxkjrfWZ9nXAbsqzVRUzIof+N7Z3jqoVeSfblp5LgAq9lz538/VkhWMaSo0JisllaM91y9joLeTEW0tHPrcZXS2lTcFQ6Ma3jKMiWMqa30UERGppkT0rYRDo3LHZA1vGUaZU0cFecg4b64g7MzZYL98Czpn0zosWNAZyLvYdLaFm0VERKT6ErF2YYphkd1dWKpcY7I+d8UCJnWPoH9Ue1HH0XIyIiIi9SkZLVl1GIjk6gzcsKC8OZt0V52IiEh9SVZLVg0DkcxldFJh3rjRQQtVqd2We29cnjW93PCx1sv8iIiIJE0yWrLizkDII7+3kscPvcnoEu+CWzWrL/sb5y5OQZOIiEg9SVRLFsDksbW5Ay2z1Sy1gPRAXyc3r51ZtfOkYix1F4qIiNSXRARZ4Z65R2/+KA9ct7Rm5+4f3c5925dEPklmnDHWf9y2Lsazi4iI1KdEdBemmBnjuzoYX+bix+XoGdkW6cSgpQ7qj+IegCk9I6t/UBERkQaXiJasOEZl1ar7LhXAdY9sK+lzcXYvrprVG9/JRUREaiRZLVkZr399Tn/k54x68s87Ns1j55oZ9HSWFmTF6b7tS/nFqTNxZ0NERCRSiQiysnWRvfwXG2kdFl1DXjWnSPjdi6fnfK+1ZVhNuz+roWN4Cx2VTJ8vIiLSABIRZKWEu8jaWxvnR/7Oy+dX7Vj1NJ2FiIhIM0vEmKw4A4s6nGxeREREaiARQVZKLWc517xVIiIiyZao7sI4NHND1u5ti3jvfQ1gFxERySYRQVatuux2b1tE14hguZwkNGRtXDgx7iyIiIjUrUQEWSlRd+E1QtDR1RH8k9dqeSEREZGkSkSQFfVcVXnPXWcj3y+aOpbd2xaxdu64uLMiIiLS1BIRZKXUsguvGq1m3/vMWlqGVT/XjdDiJiIi0ugSEWTF2ZhUyakH+jqrlg8RERGprWRN4ZCE0egiIiJSFxIRZNXZsCgRERFJgER0F6bVdFRWDc9V35YN9DBznLo+RUQkWRIRZMV5d6HAIzetjDsLIiIiNZeI7sKUWMZkKb4TERFJpEQEWaPbh3PJvHH0jWqv2Tk1yF5ERCTZEtFdOLV3JPfvWBrLudWQJSIikkyJaMmKgxqyREREkk1BloiIiEgEFGRFrN7WLhQREZHaUJAVkZ7ONkDrBIqIiCRVIga+x6F7ZBsH77qUro7hcWdFREREYqAgK0LdI9vizoKIiIjERN2FIiIiIhFQkCUiIiISAQVZIiIiIhGoaEyWmR0F3gPOAmecc0vMrAd4GBgAjgJbnXPvVJZNERERkcZSjZasdc65C51zS/zr24EDzrnZwAH/WkRERCRRougu3ALs8dt7gCsjOIeIiIhIXas0yHLAE2b2rJnt9GnjnXPH/fabwPgKzyEiIiLScCqdJ2u1c+6YmY0D9pvZS+E3nXPOzLKuK+ODsp0AU6dOrTAbIiIiIvWlopYs59wx/3wS+BawDDhhZhMB/PPJHJ+91zm3xDm3pL+/v5JsiIiIiNSdsoMsM+s0s9GpbWADcAjYB2z3u20HHqs0kyIiIiKNxpzL2ptX+INmMwharyDodvy6c+4vzawXeASYCrxGMIXD2wWO9ZbfN2p9wP/V4Dz1TuWQprJIU1mkqSwCKoc0lUWaygKmOecKdsOVHWQ1IjN7JjTVRGKpHNJUFmkqizSVRUDlkKaySFNZFE8zvouIiIhEQEGWiIiISASSFmTdG3cG6oTKIU1lkaaySFNZBFQOaSqLNJVFkRI1JktERESkVpLWkiUiIiJSEwqyRERERCKQiCDLzDaa2ctmdsTMbo87P1Ezsylm9u9m9r9m9kMzu8Wn321mx8zsoH9sDn3mDl8+L5vZZfHlvvrM7KiZveiv+Rmf1mNm+83ssH8e69PNzL7qy+IFM1sUb+6rw8zmhv7dD5rZu2Z2a1LqhJndb2YnzexQKK3kOmBm2/3+h81se7Zz1bscZfFlM3vJX++3zKzbpw+Y2a9C9WN36DOL/ffqiC8vi+N6KpGjLEr+TjT6b0yOcng4VAZHzeygT2/qOlF1zrmmfgAtwCvADKANeB6YH3e+Ir7micAivz0a+DEwH7gb+EyW/ef7cmkHpvvyaon7OqpYHkeBvoy0LwG3++3bgS/67c3A44ABK4Cn4s5/BOXRQrB4+7Sk1AlgDbAIOFRuHQB6gFf981i/PTbua6tSWWwAWv32F0NlMRDeL+M4/+3Lx3x5bYr72qpUFiV9J5rhNyZbOWS8/xXgriTUiWo/ktCStQw44px71Tn3AfAQsCXmPEXKOXfcOfec334P+BEwKc9HtgAPOedOOed+AhwhKLdmtgXY47f3AFeG0h90gSeBbvNrcTaR9cArzrl8qyw0VZ1wzv0AyFx5otQ6cBmw3zn3tnPuHWA/sDH63FdXtrJwzj3hnDvjXz4JTM53DF8eXc65J13w6/og6fJrGDnqRS65vhMN/xuTrxx8a9RW4Bv5jtEsdaLakhBkTQJeD71+g/wBR1MxswHgIuApn/QHvkvg/lT3CM1fRg54wsyeNbOdPm28c+64334TGO+3m70sAK5m8H+YSawTUHodSEKZAFxP0AqRMt3M/sfMvm9mF/u0SQTXn9JsZVHKd6LZ68XFwAnn3OFQWhLrRFmSEGQllpmNAh4FbnXOvQvcA8wELgSOEzQBJ8Fq59wiYBOwy8zWhN/0f3UlYi4TM2sDrgC+6ZOSWicGSVIdyMfM7gTOAHt90nFgqnPuIuDTwNfNrCuu/NWIvhODfZLBf5QlsU6ULQlB1jFgSuj1ZJ/W1MxsOEGAtdc5988AzrkTzrmzzrkPgb8n3f3T1GXknDvmn08SLGq+DDiR6gb0zyf97k1dFgSB5nPOuROQ3DrhlVoHmrpMzGwH8HHgGh904rvGfuq3nyUYezSH4LrDXYpNUxZlfCeatl6YWSvwW8DDqbQk1olKJCHIehqYbWbT/V/xVwP7Ys5TpHwf+n3Aj5xzfx1KD48t+k0gdSfJPuBqM2s3s+nAbIIBjA3PzDrNbHRqm2CA7yGCa07dHbYdeMxv7wOu9XeYrQB+HupSagaD/ipNYp0IKbUOfBfYYGZjfRfSBp/W8MxsI3AbcIVz7peh9H4za/HbMwjqwau+PN41sxX+/5trSZdfQyvjO9HMvzEfA15yzp3rBkxinahI3CPva/EguFvoxwQR951x56cG17uaoOvjBeCgf2wG/gF40afvAyaGPnOnL5+XaaI7Qgju+HneP36Y+vcHeoEDwGHg34Aen27A13xZvAgsifsaqlgWncBPgTGhtETUCYLA8jhwmmCsyA3l1AGC8UpH/OO6uK+rimVxhGBcUer/i91+30/4781B4DngN0LHWUIQgLwC/B1+BZFGeuQoi5K/E43+G5OtHHz6A8BNGfs2dZ2o9kPL6oiIiIhEIAndhSIiIiI1pyBLREREJAIKskREREQioCBLREREJAIKskREREQioCBLREREJAIKskREREQi8P9QRdbX1ySW7gAAAABJRU5ErkJggg==\n", 256 | "text/plain": [ 257 | "
" 258 | ] 259 | }, 260 | "metadata": { 261 | "needs_background": "light" 262 | }, 263 | "output_type": "display_data" 264 | } 265 | ], 266 | "source": [ 267 | "import matplotlib.pyplot as plt\n", 268 | "plt.figure(figsize=(10, 5))\n", 269 | "plt.plot(history[:1900])\n", 270 | "plt.show()" 271 | ] 272 | }, 273 | { 274 | "cell_type": "markdown", 275 | "metadata": {}, 276 | "source": [ 277 | "# Debugging" 278 | ] 279 | }, 280 | { 281 | "cell_type": "code", 282 | "execution_count": 15, 283 | "metadata": {}, 284 | "outputs": [ 285 | { 286 | "name": "stdout", 287 | "output_type": "stream", 288 | "text": [ 289 | "torch.Size([32, 1])\n", 290 | "torch.Size([32, 1])\n" 291 | ] 292 | } 293 | ], 294 | "source": [ 295 | "# Debugggging\n", 296 | "print(dqn.q_eval.shape)\n", 297 | "print(dqn.q_target.shape)" 298 | ] 299 | }, 300 | { 301 | "cell_type": "code", 302 | "execution_count": 16, 303 | "metadata": {}, 304 | "outputs": [], 305 | "source": [ 306 | "def pretty_print(s):\n", 307 | " for i, x in enumerate(s):\n", 308 | " if i == len(s)-1:\n", 309 | " end = '\\n'\n", 310 | " elif i%2 == 0:\n", 311 | " end = '-'\n", 312 | " else:\n", 313 | " end = ', ' \n", 314 | " print(x, end=end)" 315 | ] 316 | }, 317 | { 318 | "cell_type": "code", 319 | "execution_count": 37, 320 | "metadata": {}, 321 | "outputs": [ 322 | { 323 | "name": "stdout", 324 | "output_type": "stream", 325 | "text": [ 326 | "0.0375-0.15, 0.075-0.125, 0.0125-0.1625, 0.15-0.1125, 0.0625-0.1125\n", 327 | "Chosen action: 4\n", 328 | "0.06944444444444445-0.16666666666666666, 0.013888888888888888-0.1527777777777778, 0.041666666666666664-0.18055555555555555, 0.19444444444444445-0.125, 0.027777777777777776-0.027777777777777776\n", 329 | "[3.9170291 4.4086313 4.556935 4.6864247 6.0160775]\n", 330 | "[4.346579 4.302234 4.7729483 4.663835 6.043704 ]\n", 331 | "4\n", 332 | "4\n" 333 | ] 334 | } 335 | ], 336 | "source": [ 337 | "# s = env.reset()\n", 338 | "a = dqn.choose_action(s)\n", 339 | "pretty_print(s)\n", 340 | "print(f\"Chosen action: {a}\")\n", 341 | "s, r, done, info = env.step(a)\n", 342 | "pretty_print(s)\n", 343 | "state = torch.Tensor(s).cuda()\n", 344 | "out1 = dqn.target_net(state).cpu().detach().numpy()\n", 345 | "out2 = dqn.eval_net(state).cpu().detach().numpy()\n", 346 | "print(out1)\n", 347 | "print(out2)\n", 348 | "\n", 349 | "print(out1.argmax())\n", 350 | "print(out2.argmax())" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "execution_count": null, 356 | "metadata": {}, 357 | "outputs": [], 358 | "source": [ 359 | "6-2, 10-1, 2-2, 10-1, 10-1, 10-1, 4-3, 1-2, 10-1, 3-1" 360 | ] 361 | }, 362 | { 363 | "cell_type": "code", 364 | "execution_count": null, 365 | "metadata": {}, 366 | "outputs": [], 367 | "source": [ 368 | "net = Net().cuda()\n", 369 | "out = net(state)\n", 370 | "print(out)" 371 | ] 372 | }, 373 | { 374 | "cell_type": "code", 375 | "execution_count": null, 376 | "metadata": {}, 377 | "outputs": [], 378 | "source": [ 379 | "out.max(dim=0)" 380 | ] 381 | }, 382 | { 383 | "cell_type": "code", 384 | "execution_count": null, 385 | "metadata": {}, 386 | "outputs": [], 387 | "source": [ 388 | "lst = [1, 2, 3, 4, 5, 6]\n", 389 | "lst[::2]" 390 | ] 391 | }, 392 | { 393 | "cell_type": "code", 394 | "execution_count": 38, 395 | "metadata": {}, 396 | "outputs": [ 397 | { 398 | "data": { 399 | "text/plain": [ 400 | "array([0.06944444, 0.16666667, 0.01388889, 0.15277778, 0.04166667,\n", 401 | " 0.18055556, 0.19444444, 0.125 , 0.02777778, 0.02777778])" 402 | ] 403 | }, 404 | "execution_count": 38, 405 | "metadata": {}, 406 | "output_type": "execute_result" 407 | } 408 | ], 409 | "source": [ 410 | "s" 411 | ] 412 | }, 413 | { 414 | "cell_type": "code", 415 | "execution_count": 43, 416 | "metadata": {}, 417 | "outputs": [ 418 | { 419 | "name": "stdout", 420 | "output_type": "stream", 421 | "text": [ 422 | "[0.06944444 0.01388889 0.04166667 0.19444444 0.02777778]\n", 423 | "[0.16666667 0.15277778 0.18055556 0.125 0.02777778]\n" 424 | ] 425 | }, 426 | { 427 | "data": { 428 | "text/plain": [ 429 | "4" 430 | ] 431 | }, 432 | "execution_count": 43, 433 | "metadata": {}, 434 | "output_type": "execute_result" 435 | } 436 | ], 437 | "source": [ 438 | "lru = s[::2]\n", 439 | "lfu = s[1::2]\n", 440 | "print(lru)\n", 441 | "print(lfu)\n", 442 | "lru.argmin()\n", 443 | "lfu.argmin()" 444 | ] 445 | }, 446 | { 447 | "cell_type": "markdown", 448 | "metadata": {}, 449 | "source": [ 450 | "# Compare with LRU and LFU" 451 | ] 452 | }, 453 | { 454 | "cell_type": "code", 455 | "execution_count": 49, 456 | "metadata": {}, 457 | "outputs": [ 458 | { 459 | "data": { 460 | "application/vnd.jupyter.widget-view+json": { 461 | "model_id": "5e5c03126a8b49839d646fe6322b07cb", 462 | "version_major": 2, 463 | "version_minor": 0 464 | }, 465 | "text/plain": [ 466 | "HBox(children=(IntProgress(value=0, max=1000), HTML(value='')))" 467 | ] 468 | }, 469 | "metadata": {}, 470 | "output_type": "display_data" 471 | }, 472 | { 473 | "name": "stdout", 474 | "output_type": "stream", 475 | "text": [ 476 | "\n" 477 | ] 478 | }, 479 | { 480 | "data": { 481 | "application/vnd.jupyter.widget-view+json": { 482 | "model_id": "614eb5e90b1a4424acd70eaf6b9179d5", 483 | "version_major": 2, 484 | "version_minor": 0 485 | }, 486 | "text/plain": [ 487 | "HBox(children=(IntProgress(value=0, max=1000), HTML(value='')))" 488 | ] 489 | }, 490 | "metadata": {}, 491 | "output_type": "display_data" 492 | }, 493 | { 494 | "name": "stdout", 495 | "output_type": "stream", 496 | "text": [ 497 | "\n" 498 | ] 499 | }, 500 | { 501 | "data": { 502 | "application/vnd.jupyter.widget-view+json": { 503 | "model_id": "fc0f492856b2434697f60b4b9356ba16", 504 | "version_major": 2, 505 | "version_minor": 0 506 | }, 507 | "text/plain": [ 508 | "HBox(children=(IntProgress(value=0, max=1000), HTML(value='')))" 509 | ] 510 | }, 511 | "metadata": {}, 512 | "output_type": "display_data" 513 | }, 514 | { 515 | "name": "stdout", 516 | "output_type": "stream", 517 | "text": [ 518 | "\n" 519 | ] 520 | } 521 | ], 522 | "source": [ 523 | "from tqdm import tqdm_notebook as tqdm\n", 524 | "\n", 525 | "def choose_action(s, agent):\n", 526 | " if agent == 'dqn':\n", 527 | " return dqn.choose_action(s) \n", 528 | " if agent == 'lru':\n", 529 | " return s[::2].argmin()\n", 530 | " else:\n", 531 | " return s[1::2].argmin()\n", 532 | " \n", 533 | " \n", 534 | "ntests = 1000\n", 535 | "performance = {}\n", 536 | "\n", 537 | "for agent in \"dqn lru lfu\".split():\n", 538 | " for i in tqdm(range(ntests)):\n", 539 | " s = env.reset()\n", 540 | " nhits = 0\n", 541 | " while True:\n", 542 | " a = choose_action(s, agent)\n", 543 | " s_, r, done, info = env.step(a)\n", 544 | " nhits += r\n", 545 | "\n", 546 | " if done:\n", 547 | " history.append(nhits)\n", 548 | " break\n", 549 | " s = s_\n", 550 | " performance[agent] = np.array(history).mean() \n" 551 | ] 552 | }, 553 | { 554 | "cell_type": "code", 555 | "execution_count": 50, 556 | "metadata": {}, 557 | "outputs": [ 558 | { 559 | "data": { 560 | "text/plain": [ 561 | "{'dqn': 204.95244648318044,\n", 562 | " 'lru': 192.42864721485412,\n", 563 | " 'lfu': 198.62423887587823}" 564 | ] 565 | }, 566 | "execution_count": 50, 567 | "metadata": {}, 568 | "output_type": "execute_result" 569 | } 570 | ], 571 | "source": [ 572 | "performance" 573 | ] 574 | }, 575 | { 576 | "cell_type": "code", 577 | "execution_count": null, 578 | "metadata": {}, 579 | "outputs": [], 580 | "source": [] 581 | } 582 | ], 583 | "metadata": { 584 | "kernelspec": { 585 | "display_name": "Python 3", 586 | "language": "python", 587 | "name": "python3" 588 | }, 589 | "language_info": { 590 | "codemirror_mode": { 591 | "name": "ipython", 592 | "version": 3 593 | }, 594 | "file_extension": ".py", 595 | "mimetype": "text/x-python", 596 | "name": "python", 597 | "nbconvert_exporter": "python", 598 | "pygments_lexer": "ipython3", 599 | "version": "3.7.3" 600 | } 601 | }, 602 | "nbformat": 4, 603 | "nbformat_minor": 2 604 | } 605 | -------------------------------------------------------------------------------- /notebooks/.ipynb_checkpoints/LRU & LFU-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 19, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from environment import CacheEnv\n", 10 | "from collections import defaultdict" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 20, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "def lru_policy(s):\n", 20 | " least_recent = -1\n", 21 | " action = -1\n", 22 | " for key in s.keys():\n", 23 | " cur = s[key][0]\n", 24 | " if cur > least_recent:\n", 25 | " action = key\n", 26 | " least_recent = cur\n", 27 | " return action" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "def lfu_policy(s):\n", 37 | " least_frequent = 100000000\n", 38 | " action = -1\n", 39 | " for key in s.keys():\n", 40 | " cur = s[key][1]\n", 41 | " if cur < least_frequent:\n", 42 | " action = key\n", 43 | " least_frequent = cur\n", 44 | " return action" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 21, 50 | "metadata": {}, 51 | "outputs": [ 52 | { 53 | "name": "stdout", 54 | "output_type": "stream", 55 | "text": [ 56 | "Total hits: 1\n", 57 | "Total hits: 0\n", 58 | "Total hits: 0\n", 59 | "Total hits: 0\n", 60 | "Total hits: 1\n", 61 | "Total hits: 1\n", 62 | "Total hits: 1\n", 63 | "Total hits: 1\n", 64 | "Total hits: 0\n", 65 | "Total hits: 1\n", 66 | "Total hits: 1\n", 67 | "Total hits: 1\n", 68 | "Total hits: 1\n", 69 | "Total hits: 1\n", 70 | "Total hits: 0\n", 71 | "Total hits: 1\n", 72 | "Total hits: 1\n", 73 | "Total hits: 1\n", 74 | "Total hits: 0\n", 75 | "Total hits: 1\n", 76 | "Total hits: 0\n", 77 | "Total hits: 1\n", 78 | "Total hits: 0\n", 79 | "Total hits: 1\n", 80 | "Total hits: 1\n", 81 | "Total hits: 1\n", 82 | "Total hits: 1\n", 83 | "Total hits: 0\n", 84 | "Total hits: 1\n", 85 | "Total hits: 1\n", 86 | "Total hits: 1\n", 87 | "Total hits: 1\n", 88 | "Total hits: 1\n", 89 | "Total hits: 1\n", 90 | "Total hits: 1\n", 91 | "Total hits: 1\n", 92 | "Total hits: 1\n", 93 | "Total hits: 1\n", 94 | "Total hits: 1\n", 95 | "Total hits: 1\n", 96 | "Total hits: 1\n", 97 | "Total hits: 1\n", 98 | "Total hits: 1\n", 99 | "Total hits: 1\n", 100 | "Total hits: 1\n", 101 | "Total hits: 0\n", 102 | "Total hits: 1\n", 103 | "Total hits: 1\n", 104 | "Total hits: 1\n", 105 | "Total hits: 1\n", 106 | "Total hits: 1\n", 107 | "Total hits: 0\n", 108 | "Total hits: 1\n", 109 | "Total hits: 1\n", 110 | "Total hits: 1\n", 111 | "Total hits: 0\n", 112 | "Total hits: 0\n", 113 | "Total hits: 1\n", 114 | "Total hits: 1\n", 115 | "Total hits: 0\n", 116 | "Total hits: 1\n", 117 | "Total hits: 1\n", 118 | "Total hits: 1\n", 119 | "Total hits: 1\n", 120 | "Total hits: 1\n", 121 | "Total hits: 1\n", 122 | "Total hits: 1\n", 123 | "Total hits: 1\n", 124 | "Total hits: 1\n", 125 | "Total hits: 1\n", 126 | "Total hits: 0\n", 127 | "Total hits: 0\n", 128 | "Total hits: 1\n", 129 | "Total hits: 1\n", 130 | "Total hits: 1\n", 131 | "Total hits: 1\n", 132 | "Total hits: 1\n", 133 | "Total hits: 1\n", 134 | "Total hits: 1\n", 135 | "Total hits: 0\n", 136 | "Total hits: 1\n", 137 | "Total hits: 1\n", 138 | "Total hits: 1\n", 139 | "Total hits: 1\n", 140 | "Total hits: 1\n", 141 | "Total hits: 0\n", 142 | "Total hits: 1\n", 143 | "Total hits: 1\n", 144 | "Total hits: 1\n", 145 | "Total hits: 1\n", 146 | "Total hits: 0\n", 147 | "Total hits: 1\n", 148 | "Total hits: 1\n", 149 | "Total hits: 1\n", 150 | "Total hits: 1\n", 151 | "Total hits: 1\n", 152 | "Total hits: 1\n", 153 | "Total hits: 0\n", 154 | "Total hits: 0\n", 155 | "Total hits: 1\n", 156 | "Total hits: 2\n", 157 | "Total hits: 1\n", 158 | "Total hits: 1\n", 159 | "Total hits: 2\n", 160 | "Total hits: 2\n", 161 | "Total hits: 2\n", 162 | "Total hits: 1\n", 163 | "Total hits: 2\n", 164 | "Total hits: 1\n", 165 | "Total hits: 2\n", 166 | "Total hits: 0\n", 167 | "Total hits: 2\n", 168 | "Total hits: 0\n", 169 | "Total hits: 2\n", 170 | "Total hits: 2\n", 171 | "Total hits: 1\n", 172 | "Total hits: 2\n", 173 | "Total hits: 1\n", 174 | "Total hits: 2\n", 175 | "Total hits: 2\n", 176 | "Total hits: 2\n", 177 | "Total hits: 2\n", 178 | "Total hits: 1\n", 179 | "Total hits: 2\n", 180 | "Total hits: 1\n", 181 | "Total hits: 1\n", 182 | "Total hits: 2\n", 183 | "Total hits: 1\n", 184 | "Total hits: 2\n", 185 | "Total hits: 2\n", 186 | "Total hits: 0\n", 187 | "Total hits: 2\n", 188 | "Total hits: 1\n", 189 | "Total hits: 2\n", 190 | "Total hits: 2\n", 191 | "Total hits: 1\n", 192 | "Total hits: 2\n", 193 | "Total hits: 1\n", 194 | "Total hits: 2\n", 195 | "Total hits: 2\n", 196 | "Total hits: 2\n", 197 | "Total hits: 1\n", 198 | "Total hits: 0\n", 199 | "Total hits: 1\n", 200 | "Total hits: 2\n", 201 | "Total hits: 1\n", 202 | "Total hits: 2\n", 203 | "Total hits: 2\n", 204 | "Total hits: 1\n", 205 | "Total hits: 2\n", 206 | "Total hits: 3\n", 207 | "Total hits: 3\n", 208 | "Total hits: 2\n", 209 | "Total hits: 2\n", 210 | "Total hits: 1\n", 211 | "Total hits: 3\n", 212 | "Total hits: 2\n", 213 | "Total hits: 2\n", 214 | "Total hits: 3\n", 215 | "Total hits: 3\n", 216 | "Total hits: 2\n", 217 | "Total hits: 3\n", 218 | "Total hits: 2\n", 219 | "Total hits: 2\n", 220 | "Total hits: 2\n", 221 | "Total hits: 3\n", 222 | "Total hits: 3\n", 223 | "Total hits: 3\n", 224 | "Total hits: 1\n", 225 | "Total hits: 2\n", 226 | "Total hits: 1\n", 227 | "Total hits: 3\n", 228 | "Total hits: 3\n", 229 | "Total hits: 3\n", 230 | "Total hits: 3\n", 231 | "Total hits: 3\n", 232 | "Total hits: 3\n", 233 | "Total hits: 1\n", 234 | "Total hits: 1\n", 235 | "Total hits: 3\n", 236 | "Total hits: 3\n", 237 | "Total hits: 3\n", 238 | "Total hits: 1\n", 239 | "Total hits: 0\n", 240 | "Total hits: 2\n", 241 | "Total hits: 2\n", 242 | "Total hits: 3\n", 243 | "Total hits: 1\n", 244 | "Total hits: 3\n", 245 | "Total hits: 2\n", 246 | "Total hits: 2\n", 247 | "Total hits: 1\n", 248 | "Total hits: 3\n", 249 | "Total hits: 2\n", 250 | "Total hits: 3\n", 251 | "Total hits: 2\n", 252 | "Total hits: 3\n", 253 | "Total hits: 2\n", 254 | "Total hits: 3\n", 255 | "Total hits: 3\n", 256 | "Total hits: 3\n", 257 | "Total hits: 0\n", 258 | "Total hits: 3\n", 259 | "Total hits: 2\n", 260 | "Total hits: 3\n", 261 | "Total hits: 2\n", 262 | "Total hits: 4\n", 263 | "Total hits: 3\n", 264 | "Total hits: 4\n", 265 | "Total hits: 3\n", 266 | "Total hits: 4\n", 267 | "Total hits: 4\n", 268 | "Total hits: 2\n", 269 | "Total hits: 4\n", 270 | "Total hits: 2\n", 271 | "Total hits: 2\n", 272 | "Total hits: 3\n", 273 | "Total hits: 3\n", 274 | "Total hits: 3\n", 275 | "Total hits: 1\n", 276 | "Total hits: 2\n", 277 | "Total hits: 2\n", 278 | "Total hits: 4\n", 279 | "Total hits: 4\n", 280 | "Total hits: 4\n", 281 | "Total hits: 4\n", 282 | "Total hits: 1\n", 283 | "Total hits: 4\n", 284 | "Total hits: 2\n", 285 | "Total hits: 3\n", 286 | "Total hits: 3\n", 287 | "Total hits: 2\n", 288 | "Total hits: 2\n", 289 | "Total hits: 3\n", 290 | "Total hits: 4\n", 291 | "Total hits: 1\n", 292 | "Total hits: 4\n", 293 | "Total hits: 4\n", 294 | "Total hits: 4\n", 295 | "Total hits: 3\n", 296 | "Total hits: 2\n", 297 | "Total hits: 4\n", 298 | "Total hits: 2\n", 299 | "Total hits: 3\n", 300 | "Total hits: 3\n", 301 | "Total hits: 3\n", 302 | "Total hits: 1\n", 303 | "Total hits: 3\n", 304 | "Total hits: 2\n", 305 | "Total hits: 2\n", 306 | "Total hits: 5\n", 307 | "Total hits: 4\n", 308 | "Total hits: 5\n", 309 | "Total hits: 5\n", 310 | "Total hits: 3\n", 311 | "Total hits: 5\n", 312 | "Total hits: 5\n", 313 | "Total hits: 4\n", 314 | "Total hits: 5\n", 315 | "Total hits: 3\n", 316 | "Total hits: 3\n", 317 | "Total hits: 2\n", 318 | "Total hits: 5\n", 319 | "Total hits: 3\n", 320 | "Total hits: 4\n", 321 | "Total hits: 2\n", 322 | "Total hits: 4\n", 323 | "Total hits: 5\n", 324 | "Total hits: 2\n", 325 | "Total hits: 4\n", 326 | "Total hits: 3\n", 327 | "Total hits: 4\n", 328 | "Total hits: 4\n", 329 | "Total hits: 5\n", 330 | "Total hits: 5\n", 331 | "Total hits: 5\n", 332 | "Total hits: 2\n", 333 | "Total hits: 5\n", 334 | "Total hits: 5\n", 335 | "Total hits: 5\n", 336 | "Total hits: 5\n", 337 | "Total hits: 5\n", 338 | "Total hits: 5\n", 339 | "Total hits: 4\n", 340 | "Total hits: 2\n", 341 | "Total hits: 5\n", 342 | "Total hits: 3\n", 343 | "Total hits: 5\n", 344 | "Total hits: 5\n", 345 | "Total hits: 3\n", 346 | "Total hits: 4\n", 347 | "Total hits: 4\n", 348 | "Total hits: 4\n", 349 | "Total hits: 5\n", 350 | "Total hits: 2\n", 351 | "Total hits: 5\n", 352 | "Total hits: 4\n", 353 | "Total hits: 3\n", 354 | "Total hits: 5\n", 355 | "Total hits: 3\n", 356 | "Total hits: 2\n", 357 | "Total hits: 5\n", 358 | "Total hits: 5\n", 359 | "Total hits: 4\n", 360 | "Total hits: 6\n", 361 | "Total hits: 6\n", 362 | "Total hits: 3\n", 363 | "Total hits: 5\n", 364 | "Total hits: 3\n", 365 | "Total hits: 5\n", 366 | "Total hits: 2\n", 367 | "Total hits: 4\n", 368 | "Total hits: 3\n", 369 | "Total hits: 6\n", 370 | "Total hits: 4\n", 371 | "Total hits: 4\n", 372 | "Total hits: 6\n", 373 | "Total hits: 6\n", 374 | "Total hits: 4\n", 375 | "Total hits: 4\n", 376 | "Total hits: 6\n", 377 | "Total hits: 6\n", 378 | "Total hits: 4\n", 379 | "Total hits: 3\n", 380 | "Total hits: 6\n", 381 | "Total hits: 5\n", 382 | "Total hits: 6\n", 383 | "Total hits: 4\n", 384 | "Total hits: 4\n", 385 | "Total hits: 4\n", 386 | "Total hits: 3\n", 387 | "Total hits: 4\n", 388 | "Total hits: 3\n", 389 | "Total hits: 6\n", 390 | "Total hits: 4\n", 391 | "Total hits: 6\n", 392 | "Total hits: 2\n", 393 | "Total hits: 3\n", 394 | "Total hits: 3\n", 395 | "Total hits: 4\n", 396 | "Total hits: 5\n", 397 | "Total hits: 3\n", 398 | "Total hits: 2\n", 399 | "Total hits: 6\n", 400 | "Total hits: 5\n", 401 | "Total hits: 2\n", 402 | "Total hits: 4\n", 403 | "Total hits: 3\n", 404 | "Total hits: 5\n", 405 | "Total hits: 5\n", 406 | "Total hits: 4\n", 407 | "Total hits: 7\n", 408 | "Total hits: 3\n", 409 | "Total hits: 7\n", 410 | "Total hits: 4\n", 411 | "Total hits: 7\n", 412 | "Total hits: 7\n", 413 | "Total hits: 4\n", 414 | "Total hits: 4\n", 415 | "Total hits: 7\n", 416 | "Total hits: 7\n", 417 | "Total hits: 2\n", 418 | "Total hits: 7\n", 419 | "Total hits: 7\n", 420 | "Total hits: 7\n", 421 | "Total hits: 3\n", 422 | "Total hits: 6\n", 423 | "Total hits: 3\n", 424 | "Total hits: 3\n", 425 | "Total hits: 3\n", 426 | "Total hits: 3\n", 427 | "Total hits: 4\n", 428 | "Total hits: 6\n", 429 | "Total hits: 6\n", 430 | "Total hits: 4\n", 431 | "Total hits: 7\n", 432 | "Total hits: 7\n", 433 | "Total hits: 5\n", 434 | "Total hits: 3\n", 435 | "Total hits: 5\n", 436 | "Total hits: 4\n", 437 | "Total hits: 4\n", 438 | "Total hits: 4\n", 439 | "Total hits: 7\n", 440 | "Total hits: 3\n", 441 | "Total hits: 6\n", 442 | "Total hits: 6\n", 443 | "Total hits: 3\n", 444 | "Total hits: 4\n", 445 | "Total hits: 7\n", 446 | "Total hits: 4\n", 447 | "Total hits: 6\n", 448 | "Total hits: 5\n", 449 | "Total hits: 4\n", 450 | "Total hits: 5\n", 451 | "Total hits: 7\n", 452 | "Total hits: 5\n", 453 | "Total hits: 7\n", 454 | "Total hits: 3\n", 455 | "Total hits: 4\n", 456 | "Total hits: 4\n", 457 | "Total hits: 8\n", 458 | "Total hits: 6\n", 459 | "Total hits: 7\n", 460 | "Total hits: 5\n", 461 | "Total hits: 5\n", 462 | "Total hits: 4\n", 463 | "Total hits: 7\n", 464 | "Total hits: 8\n", 465 | "Total hits: 7\n", 466 | "Total hits: 4\n", 467 | "Total hits: 7\n", 468 | "Total hits: 6\n", 469 | "Total hits: 7\n", 470 | "Total hits: 6\n", 471 | "Total hits: 8\n", 472 | "Total hits: 7\n", 473 | "Total hits: 8\n", 474 | "Total hits: 4\n", 475 | "Total hits: 6\n", 476 | "Total hits: 8\n", 477 | "Total hits: 7\n", 478 | "Total hits: 5\n", 479 | "Total hits: 5\n", 480 | "Total hits: 6\n", 481 | "Total hits: 7\n", 482 | "Total hits: 8\n", 483 | "Total hits: 8\n", 484 | "Total hits: 3\n", 485 | "Total hits: 6\n", 486 | "Total hits: 6\n", 487 | "Total hits: 4\n", 488 | "Total hits: 8\n", 489 | "Total hits: 5\n", 490 | "Total hits: 5\n", 491 | "Total hits: 8\n", 492 | "Total hits: 7\n", 493 | "Total hits: 4\n", 494 | "Total hits: 4\n", 495 | "Total hits: 6\n", 496 | "Total hits: 5\n", 497 | "Total hits: 4\n", 498 | "Total hits: 6\n", 499 | "Total hits: 7\n", 500 | "Total hits: 8\n", 501 | "Total hits: 4\n", 502 | "Total hits: 4\n", 503 | "Total hits: 8\n", 504 | "Total hits: 6\n", 505 | "Total hits: 5\n", 506 | "Total hits: 9\n", 507 | "Total hits: 9\n", 508 | "Total hits: 4\n", 509 | "Total hits: 4\n", 510 | "Total hits: 4\n", 511 | "Total hits: 5\n", 512 | "Total hits: 4\n", 513 | "Total hits: 5\n", 514 | "Total hits: 6\n", 515 | "Total hits: 8\n", 516 | "Total hits: 7\n", 517 | "Total hits: 7\n", 518 | "Total hits: 6\n", 519 | "Total hits: 6\n", 520 | "Total hits: 4\n", 521 | "Total hits: 4\n", 522 | "Total hits: 8\n", 523 | "Total hits: 5\n", 524 | "Total hits: 5\n", 525 | "Total hits: 7\n", 526 | "Total hits: 7\n", 527 | "Total hits: 5\n", 528 | "Total hits: 3\n", 529 | "Total hits: 7\n", 530 | "Total hits: 7\n", 531 | "Total hits: 7\n", 532 | "Total hits: 8\n", 533 | "Total hits: 7\n", 534 | "Total hits: 7\n", 535 | "Total hits: 3\n", 536 | "Total hits: 4\n", 537 | "Total hits: 5\n", 538 | "Total hits: 6\n", 539 | "Total hits: 8\n", 540 | "Total hits: 5\n", 541 | "Total hits: 9\n", 542 | "Total hits: 7\n", 543 | "Total hits: 6\n", 544 | "Total hits: 9\n", 545 | "Total hits: 5\n", 546 | "Total hits: 6\n", 547 | "Total hits: 9\n", 548 | "Total hits: 5\n", 549 | "Total hits: 6\n", 550 | "Total hits: 7\n", 551 | "Total hits: 5\n", 552 | "Total hits: 9\n", 553 | "Total hits: 7\n", 554 | "Total hits: 8\n", 555 | "Total hits: 9\n" 556 | ] 557 | } 558 | ], 559 | "source": [ 560 | "l = 20\n", 561 | "trials = 50\n", 562 | "results = defaultdict(list)\n", 563 | "\n", 564 | "for l in range(10):\n", 565 | " for trial in range(trials):\n", 566 | " env = CacheEnv(eps_len=l)\n", 567 | " s = env.reset()\n", 568 | " done = env.done\n", 569 | "# print(\"Start: \", env.pages)\n", 570 | " while not done:\n", 571 | " a = lru_policy(s)\n", 572 | " s, r, done, observation = env.step(a)\n", 573 | " # print(f\">> Request: {env.new_page_id}\")\n", 574 | "# print(f\"Replace: {a}\")\n", 575 | "# print(observation)\n", 576 | "# print(env.pages, f\"reward: {r}\\n\")\n", 577 | " print(f\"Total hits: {env.total_hits}\")\n", 578 | " results[l].append(env.total_hits)" 579 | ] 580 | }, 581 | { 582 | "cell_type": "code", 583 | "execution_count": 23, 584 | "metadata": { 585 | "scrolled": true 586 | }, 587 | "outputs": [ 588 | { 589 | "data": { 590 | "text/plain": [ 591 | "[5,\n", 592 | " 4,\n", 593 | " 5,\n", 594 | " 5,\n", 595 | " 3,\n", 596 | " 5,\n", 597 | " 5,\n", 598 | " 4,\n", 599 | " 5,\n", 600 | " 3,\n", 601 | " 3,\n", 602 | " 2,\n", 603 | " 5,\n", 604 | " 3,\n", 605 | " 4,\n", 606 | " 2,\n", 607 | " 4,\n", 608 | " 5,\n", 609 | " 2,\n", 610 | " 4,\n", 611 | " 3,\n", 612 | " 4,\n", 613 | " 4,\n", 614 | " 5,\n", 615 | " 5,\n", 616 | " 5,\n", 617 | " 2,\n", 618 | " 5,\n", 619 | " 5,\n", 620 | " 5,\n", 621 | " 5,\n", 622 | " 5,\n", 623 | " 5,\n", 624 | " 4,\n", 625 | " 2,\n", 626 | " 5,\n", 627 | " 3,\n", 628 | " 5,\n", 629 | " 5,\n", 630 | " 3,\n", 631 | " 4,\n", 632 | " 4,\n", 633 | " 4,\n", 634 | " 5,\n", 635 | " 2,\n", 636 | " 5,\n", 637 | " 4,\n", 638 | " 3,\n", 639 | " 5,\n", 640 | " 3]" 641 | ] 642 | }, 643 | "execution_count": 23, 644 | "metadata": {}, 645 | "output_type": "execute_result" 646 | } 647 | ], 648 | "source": [ 649 | "results[5]" 650 | ] 651 | }, 652 | { 653 | "cell_type": "code", 654 | "execution_count": null, 655 | "metadata": {}, 656 | "outputs": [], 657 | "source": [] 658 | } 659 | ], 660 | "metadata": { 661 | "kernelspec": { 662 | "display_name": "Python 3", 663 | "language": "python", 664 | "name": "python3" 665 | }, 666 | "language_info": { 667 | "codemirror_mode": { 668 | "name": "ipython", 669 | "version": 3 670 | }, 671 | "file_extension": ".py", 672 | "mimetype": "text/x-python", 673 | "name": "python", 674 | "nbconvert_exporter": "python", 675 | "pygments_lexer": "ipython3", 676 | "version": "3.7.3" 677 | } 678 | }, 679 | "nbformat": 4, 680 | "nbformat_minor": 2 681 | } 682 | -------------------------------------------------------------------------------- /notebooks/.ipynb_checkpoints/Test neural net-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Is the NN enough to identify LRU or LFU indices?\n", 8 | "\n", 9 | "In this notebook I check if the nn for my DQN even capable of doing its task?" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import torch\n", 19 | "import torch.nn as nn\n", 20 | "import torch.nn.functional as F\n", 21 | "import numpy as np\n", 22 | "import gym\n", 23 | "import time\n", 24 | "from e2 import CacheEnv\n", 25 | "from tqdm import tqdm_notebook as tqdm" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 2, 31 | "metadata": {}, 32 | "outputs": [ 33 | { 34 | "name": "stdout", 35 | "output_type": "stream", 36 | "text": [ 37 | "\n", 38 | "Cache limit: 10\n", 39 | "Total Pages: 20\n" 40 | ] 41 | } 42 | ], 43 | "source": [ 44 | "# env vars\n", 45 | "EPS_LEN = 100\n", 46 | "N_PAGES = 20\n", 47 | "CACHE_LIMIT = 10 \n", 48 | "env = CacheEnv(\n", 49 | " eps_len=EPS_LEN, \n", 50 | " n_pages=N_PAGES, \n", 51 | " limit=CACHE_LIMIT\n", 52 | " )\n", 53 | "\n", 54 | "# dqn vars\n", 55 | "# N_EPS = 60000\n", 56 | "N_EPS = 1000\n", 57 | "BATCH_SIZE = 32\n", 58 | "LR_adam = 3e-4 # learning rate for Adam\n", 59 | "LR_sgd = 1e-3 # learning rate for SGD\n", 60 | "EPSILON = 0.9 # greedy policy\n", 61 | "GAMMA = 0.9 # reward discount\n", 62 | "TARGET_REPLACE_ITER = 2000 # target update frequency\n", 63 | "MEMORY_CAPACITY = 20000\n", 64 | "\n", 65 | "s = env.reset()\n", 66 | "N_ACTIONS = env.action_space_n\n", 67 | "STATE_SHAPE = (CACHE_LIMIT, 2)\n", 68 | "N_STATES = STATE_SHAPE[0]*STATE_SHAPE[1]" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 3, 74 | "metadata": {}, 75 | "outputs": [ 76 | { 77 | "data": { 78 | "application/vnd.jupyter.widget-view+json": { 79 | "model_id": "c611fdce06c14ad59cd0fdd02766d829", 80 | "version_major": 2, 81 | "version_minor": 0 82 | }, 83 | "text/plain": [ 84 | "HBox(children=(IntProgress(value=0, max=1000), HTML(value='')))" 85 | ] 86 | }, 87 | "metadata": {}, 88 | "output_type": "display_data" 89 | }, 90 | { 91 | "name": "stdout", 92 | "output_type": "stream", 93 | "text": [ 94 | "\n", 95 | "101000\n" 96 | ] 97 | } 98 | ], 99 | "source": [ 100 | "# Collect data\n", 101 | "dataX = []\n", 102 | "for _ in tqdm(range(N_EPS)):\n", 103 | " s = env.reset()\n", 104 | " dataX.append(s)\n", 105 | "\n", 106 | " while True:\n", 107 | " a = np.random.randint(0, N_ACTIONS)\n", 108 | " s, _, done, _ = env.step(a)\n", 109 | " dataX.append(s)\n", 110 | " \n", 111 | " if done:\n", 112 | " break\n", 113 | "\n", 114 | "print(len(dataX)) " 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 4, 120 | "metadata": {}, 121 | "outputs": [ 122 | { 123 | "data": { 124 | "application/vnd.jupyter.widget-view+json": { 125 | "model_id": "a5b9baec29524318b81ee1fc4ca0710a", 126 | "version_major": 2, 127 | "version_minor": 0 128 | }, 129 | "text/plain": [ 130 | "HBox(children=(IntProgress(value=0, max=101000), HTML(value='')))" 131 | ] 132 | }, 133 | "metadata": {}, 134 | "output_type": "display_data" 135 | }, 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "def get_labels(dataX):\n", 146 | " dataYLU = []\n", 147 | " dataYRU = [] \n", 148 | " for x in tqdm(dataX):\n", 149 | " lus = np.argmin(x[::2])\n", 150 | " rus = np.argmin(x[1::2])\n", 151 | " dataYLU.append(lus)\n", 152 | " dataYRU.append(rus)\n", 153 | " return dataYLU, dataYRU \n", 154 | " \n", 155 | "dataYLU, dataYRU = get_labels(dataX)" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 5, 161 | "metadata": {}, 162 | "outputs": [ 163 | { 164 | "name": "stdout", 165 | "output_type": "stream", 166 | "text": [ 167 | "101000\n", 168 | "101000\n", 169 | "101000\n" 170 | ] 171 | } 172 | ], 173 | "source": [ 174 | "print(len(dataX))\n", 175 | "print(len(dataYLU))\n", 176 | "print(len(dataYRU))" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": 6, 182 | "metadata": {}, 183 | "outputs": [ 184 | { 185 | "name": "stdout", 186 | "output_type": "stream", 187 | "text": [ 188 | "101000\n", 189 | "101000\n", 190 | "80800\n", 191 | "20200\n" 192 | ] 193 | } 194 | ], 195 | "source": [ 196 | "from sklearn.model_selection import train_test_split\n", 197 | "X = dataX\n", 198 | "Y = dataYLU\n", 199 | "XTrain, XTest, yTrain, yTest = train_test_split(X, Y, test_size = 0.2)\n", 200 | "print(len(X))\n", 201 | "print(len(Y))\n", 202 | "print(len(XTrain))\n", 203 | "print(len(XTest))" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 7, 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "from torch.utils.data import Dataset, DataLoader\n", 213 | "\n", 214 | "class CacheDataset(Dataset):\n", 215 | " def __init__(self, data, targets, transform=None):\n", 216 | " self.transform = transform\n", 217 | " self.data = torch.Tensor(data)\n", 218 | " self.targets = torch.LongTensor(targets)\n", 219 | "\n", 220 | " def __getitem__(self, index):\n", 221 | " x = self.data[index]\n", 222 | " y = self.targets[index]\n", 223 | " return x, y\n", 224 | "\n", 225 | " def __len__(self):\n", 226 | " return self.data.shape[0]" 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": 8, 232 | "metadata": {}, 233 | "outputs": [], 234 | "source": [ 235 | "train_dataset = CacheDataset(XTrain, yTrain)\n", 236 | "test_dataset = CacheDataset(XTest, yTest)\n", 237 | "\n", 238 | "train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE)\n", 239 | "test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": 9, 245 | "metadata": {}, 246 | "outputs": [], 247 | "source": [ 248 | "class Net(nn.Module):\n", 249 | " def __init__(self, ):\n", 250 | " super(Net, self).__init__()\n", 251 | " input_size = N_STATES\n", 252 | " h_dim = 50\n", 253 | " self.fc1 = nn.Linear(input_size, h_dim)\n", 254 | " self.bn1 = nn.BatchNorm1d(h_dim)\n", 255 | " self.fc2 = nn.Linear(h_dim, h_dim//4)\n", 256 | " self.bn1 = nn.BatchNorm1d(h_dim//4)\n", 257 | " self.fc3 = nn.Linear(h_dim//4, h_dim)\n", 258 | " self.bn1 = nn.BatchNorm1d(h_dim)\n", 259 | " self.out = nn.Linear(h_dim//4, N_ACTIONS)\n", 260 | "\n", 261 | " def forward(self, x):\n", 262 | "# bs = x.shape[0]\n", 263 | " x = F.relu(self.fc1(x))\n", 264 | " x = self.bn(x)\n", 265 | " x = F.relu(self.fc2(x))\n", 266 | " x = F.relu(self.fc3(x))\n", 267 | " x = self.out(x)\n", 268 | "# return x\n", 269 | " return F.softmax(x, dim=0) " 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": 10, 275 | "metadata": { 276 | "scrolled": true 277 | }, 278 | "outputs": [ 279 | { 280 | "ename": "TypeError", 281 | "evalue": "__init__() missing 1 required positional argument: 'num_features'", 282 | "output_type": "error", 283 | "traceback": [ 284 | "\u001b[0;31m------------------------------------------------------\u001b[0m", 285 | "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", 286 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mmodel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mNet\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mLR_adam\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m3e-3\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0moptimizer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptim\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAdam\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlr\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mLR_adam\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;31m# optimizer = torch.optim.SGD(model.parameters(), lr=LR_sgd)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;31m# loss_func = nn.MSELoss()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 287 | "\u001b[0;32m\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0minput_size\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mN_STATES\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mh_dim\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m50\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mBatchNorm1d\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfc1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh_dim\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfc2\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mh_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh_dim\u001b[0m\u001b[0;34m//\u001b[0m\u001b[0;36m4\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 288 | "\u001b[0;31mTypeError\u001b[0m: __init__() missing 1 required positional argument: 'num_features'" 289 | ] 290 | } 291 | ], 292 | "source": [ 293 | "model = Net().cuda()\n", 294 | "LR_adam = 3e-3\n", 295 | "optimizer = torch.optim.Adam(model.parameters(), lr=LR_adam)\n", 296 | "# optimizer = torch.optim.SGD(model.parameters(), lr=LR_sgd)\n", 297 | "# loss_func = nn.MSELoss()\n", 298 | "criterion = nn.CrossEntropyLoss()\n", 299 | "\n", 300 | "epochs = 100\n", 301 | "for epoch in tqdm(range(epochs)):\n", 302 | " totalloss = []\n", 303 | " for i, (X, y) in enumerate(train_loader):\n", 304 | " X, y = X.cuda(), y.cuda()\n", 305 | " optimizer.zero_grad()\n", 306 | " out = model(X)\n", 307 | " loss = criterion(out, y)\n", 308 | " loss.backward()\n", 309 | " optimizer.step()\n", 310 | " with torch.no_grad():\n", 311 | " totalloss.append(loss.detach().cpu().numpy())\n", 312 | " totalloss = np.array(totalloss).mean()\n", 313 | " print(f\"Epoch: [{epoch}] | Loss: {totalloss}\") " 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 29, 319 | "metadata": { 320 | "scrolled": true 321 | }, 322 | "outputs": [ 323 | { 324 | "data": { 325 | "application/vnd.jupyter.widget-view+json": { 326 | "model_id": "4642ae24a11146c2bba02067a2ed0e60", 327 | "version_major": 2, 328 | "version_minor": 0 329 | }, 330 | "text/plain": [ 331 | "HBox(children=(IntProgress(value=0), HTML(value='')))" 332 | ] 333 | }, 334 | "metadata": {}, 335 | "output_type": "display_data" 336 | }, 337 | { 338 | "name": "stdout", 339 | "output_type": "stream", 340 | "text": [ 341 | "Epoch: [0] | Loss: 2.2546284198760986\n", 342 | "Epoch: [1] | Loss: 2.1946310997009277\n", 343 | "Epoch: [2] | Loss: 2.1658849716186523\n", 344 | "Epoch: [3] | Loss: 2.1551766395568848\n", 345 | "Epoch: [4] | Loss: 2.1485772132873535\n", 346 | "Epoch: [5] | Loss: 2.1315369606018066\n", 347 | "Epoch: [6] | Loss: 2.1016390323638916\n", 348 | "Epoch: [7] | Loss: 2.0723493099212646\n", 349 | "Epoch: [8] | Loss: 2.0560410022735596\n", 350 | "Epoch: [9] | Loss: 2.0498576164245605\n", 351 | "Epoch: [10] | Loss: 2.0476937294006348\n", 352 | "Epoch: [11] | Loss: 2.0468590259552\n", 353 | "Epoch: [12] | Loss: 2.0463180541992188\n", 354 | "Epoch: [13] | Loss: 2.0460495948791504\n", 355 | "Epoch: [14] | Loss: 2.045947790145874\n", 356 | "Epoch: [15] | Loss: 2.045693874359131\n", 357 | "Epoch: [16] | Loss: 2.0456440448760986\n", 358 | "Epoch: [17] | Loss: 2.045503616333008\n", 359 | "Epoch: [18] | Loss: 2.0453832149505615\n", 360 | "Epoch: [19] | Loss: 2.0452992916107178\n", 361 | "Epoch: [20] | Loss: 2.0451860427856445\n", 362 | "Epoch: [21] | Loss: 2.045102596282959\n", 363 | "Epoch: [22] | Loss: 2.044992208480835\n", 364 | "Epoch: [23] | Loss: 2.044844627380371\n", 365 | "Epoch: [24] | Loss: 2.044734239578247\n", 366 | "Epoch: [25] | Loss: 2.0446269512176514\n", 367 | "Epoch: [26] | Loss: 2.0444576740264893\n", 368 | "Epoch: [27] | Loss: 2.044301986694336\n", 369 | "Epoch: [28] | Loss: 2.0442354679107666\n", 370 | "Epoch: [29] | Loss: 2.0441060066223145\n", 371 | "Epoch: [30] | Loss: 2.0439844131469727\n", 372 | "Epoch: [31] | Loss: 2.0438575744628906\n", 373 | "Epoch: [32] | Loss: 2.0437779426574707\n", 374 | "Epoch: [33] | Loss: 2.043729066848755\n", 375 | "Epoch: [34] | Loss: 2.0436296463012695\n", 376 | "Epoch: [35] | Loss: 2.0435824394226074\n", 377 | "Epoch: [36] | Loss: 2.043461799621582\n", 378 | "Epoch: [37] | Loss: 2.043421506881714\n", 379 | "Epoch: [38] | Loss: 2.043349504470825\n", 380 | "Epoch: [39] | Loss: 2.043292999267578\n", 381 | "Epoch: [40] | Loss: 2.0432193279266357\n", 382 | "Epoch: [41] | Loss: 2.0432820320129395\n", 383 | "Epoch: [42] | Loss: 2.0431628227233887\n", 384 | "Epoch: [43] | Loss: 2.0430920124053955\n", 385 | "Epoch: [44] | Loss: 2.043161392211914\n", 386 | "Epoch: [45] | Loss: 2.0430634021759033\n", 387 | "Epoch: [46] | Loss: 2.043029546737671\n", 388 | "Epoch: [47] | Loss: 2.0429415702819824\n", 389 | "Epoch: [48] | Loss: 2.0429136753082275\n", 390 | "Epoch: [49] | Loss: 2.0428225994110107\n", 391 | "Epoch: [50] | Loss: 2.0427842140197754\n", 392 | "Epoch: [51] | Loss: 2.042729377746582\n", 393 | "Epoch: [52] | Loss: 2.042661428451538\n", 394 | "Epoch: [53] | Loss: 2.0426313877105713\n", 395 | "Epoch: [54] | Loss: 2.042609930038452\n", 396 | "Epoch: [55] | Loss: 2.042605400085449\n", 397 | "Epoch: [56] | Loss: 2.0423998832702637\n", 398 | "Epoch: [57] | Loss: 2.0423569679260254\n", 399 | "Epoch: [58] | Loss: 2.042381763458252\n", 400 | "Epoch: [59] | Loss: 2.0422234535217285\n", 401 | "Epoch: [60] | Loss: 2.042184352874756\n", 402 | "Epoch: [61] | Loss: 2.042114019393921\n", 403 | "Epoch: [62] | Loss: 2.0420751571655273\n", 404 | "Epoch: [63] | Loss: 2.0420069694519043\n", 405 | "Epoch: [64] | Loss: 2.0419962406158447\n", 406 | "Epoch: [65] | Loss: 2.0418896675109863\n", 407 | "Epoch: [66] | Loss: 2.0418474674224854\n" 408 | ] 409 | }, 410 | { 411 | "ename": "KeyboardInterrupt", 412 | "evalue": "", 413 | "output_type": "error", 414 | "traceback": [ 415 | "\u001b[0;31m------------------------------------------------------\u001b[0m", 416 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", 417 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 418 | "\u001b[0;32m~/miniconda3/lib/python3.7/site-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 539\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_slow_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 540\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 541\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 542\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 543\u001b[0m \u001b[0mhook_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 419 | "\u001b[0;32m\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0;31m# bs = x.shape[0]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfc1\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfc2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0;31m# x = F.relu(self.fc3(x))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 420 | "\u001b[0;32m~/miniconda3/lib/python3.7/site-packages/torch/nn/functional.py\u001b[0m in \u001b[0;36mrelu\u001b[0;34m(input, inplace)\u001b[0m\n\u001b[1;32m 912\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 913\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 914\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 915\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 916\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", 421 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 422 | ] 423 | } 424 | ], 425 | "source": [ 426 | "model = Net().cuda()\n", 427 | "LR_adam = 3e-3\n", 428 | "optimizer = torch.optim.Adam(model.parameters(), lr=LR_adam)\n", 429 | "# LR_sgd = 1\n", 430 | "# optimizer = torch.optim.SGD(model.parameters(), lr=LR_sgd)\n", 431 | "# loss_func = nn.MSELoss()\n", 432 | "criterion = nn.CrossEntropyLoss()\n", 433 | "\n", 434 | "epochs = 100\n", 435 | "for epoch in tqdm(range(epochs)):\n", 436 | " totalloss = []\n", 437 | " for i, (X, y) in enumerate(train_loader):\n", 438 | " X, y = X.cuda(), y.cuda()\n", 439 | " optimizer.zero_grad()\n", 440 | " out = model(X)\n", 441 | " loss = criterion(out, y)\n", 442 | " loss.backward()\n", 443 | " optimizer.step()\n", 444 | " with torch.no_grad():\n", 445 | " totalloss.append(loss.detach().cpu().numpy())\n", 446 | " totalloss = np.array(totalloss).mean()\n", 447 | " print(f\"Epoch: [{epoch}] | Loss: {totalloss}\") " 448 | ] 449 | }, 450 | { 451 | "cell_type": "code", 452 | "execution_count": null, 453 | "metadata": {}, 454 | "outputs": [], 455 | "source": [] 456 | } 457 | ], 458 | "metadata": { 459 | "kernelspec": { 460 | "display_name": "Python 3", 461 | "language": "python", 462 | "name": "python3" 463 | }, 464 | "language_info": { 465 | "codemirror_mode": { 466 | "name": "ipython", 467 | "version": 3 468 | }, 469 | "file_extension": ".py", 470 | "mimetype": "text/x-python", 471 | "name": "python", 472 | "nbconvert_exporter": "python", 473 | "pygments_lexer": "ipython3", 474 | "version": "3.7.3" 475 | } 476 | }, 477 | "nbformat": 4, 478 | "nbformat_minor": 2 479 | } 480 | -------------------------------------------------------------------------------- /notebooks/.ipynb_checkpoints/Test_Env.py-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# from environment import CacheEnv\n", 10 | "from e2 import CacheEnv" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "env = CacheEnv(eps_len=100, n_pages=10, limit=5, verbose=True)\n", 20 | "env.toggle_human()\n", 21 | "s = env.reset()\n", 22 | "print(f\"Requested page: {env.new_page_id}\")\n", 23 | "s" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "print(env.step(1))\n", 33 | "print(f\"Requested page: {env.new_page_id}\")" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": { 40 | "scrolled": false 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "for x in range(3):\n", 45 | " s, r, done, observation = env.step(1)\n", 46 | " print(s)\n", 47 | " print(observation)\n", 48 | " print(f\"Reward: {r}\")\n", 49 | " print(f\"Done: {done}\")\n", 50 | " if done:\n", 51 | " break" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 4, 57 | "metadata": {}, 58 | "outputs": [ 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "\n", 64 | "Cache limit: 5\n", 65 | "Total Pages: 10\n" 66 | ] 67 | }, 68 | { 69 | "data": { 70 | "text/plain": [ 71 | "{4: [1, 1], 1: [1, 1], 6: [1, 1], 5: [1, 1], 0: [1, 1]}" 72 | ] 73 | }, 74 | "execution_count": 4, 75 | "metadata": {}, 76 | "output_type": "execute_result" 77 | } 78 | ], 79 | "source": [ 80 | "env = CacheEnv(eps_len=100, n_pages=10, limit=5, verbose=False)\n", 81 | "env.toggle_human()\n", 82 | "s = env.reset()\n", 83 | "s" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": 5, 89 | "metadata": { 90 | "scrolled": false 91 | }, 92 | "outputs": [ 93 | { 94 | "name": "stdout", 95 | "output_type": "stream", 96 | "text": [ 97 | "({4: [2, 1], 2: [1, 1], 6: [2, 1], 5: [2, 1], 0: [2, 1]}, 0, False, 'There were 0 hits.')\n", 98 | "\n", 99 | "\n", 100 | "({4: [4, 1], 9: [2, 1], 6: [4, 1], 5: [1, 2], 0: [4, 1]}, 1, False, 'There were 1 hits.')\n", 101 | "\n", 102 | "\n", 103 | "({4: [5, 1], 2: [1, 2], 6: [5, 1], 5: [2, 2], 0: [5, 1]}, 0, False, 'There were 0 hits.')\n", 104 | "\n", 105 | "\n", 106 | "({4: [1, 2], 9: [2, 2], 6: [7, 1], 5: [4, 2], 0: [7, 1]}, 1, False, 'There were 1 hits.')\n", 107 | "\n", 108 | "\n", 109 | "({4: [2, 2], 1: [1, 2], 6: [8, 1], 5: [5, 2], 0: [8, 1]}, 0, False, 'There were 0 hits.')\n", 110 | "\n", 111 | "\n", 112 | "({4: [6, 2], 9: [1, 6], 6: [12, 1], 5: [9, 2], 0: [12, 1]}, 3, False, 'There were 3 hits.')\n", 113 | "\n", 114 | "\n", 115 | "({4: [7, 2], 2: [1, 3], 6: [13, 1], 5: [10, 2], 0: [13, 1]}, 0, False, 'There were 0 hits.')\n", 116 | "\n", 117 | "\n", 118 | "({4: [9, 2], 1: [2, 3], 6: [1, 2], 5: [12, 2], 0: [15, 1]}, 1, False, 'There were 1 hits.')\n", 119 | "\n", 120 | "\n", 121 | "({4: [10, 2], 7: [1, 1], 6: [2, 2], 5: [13, 2], 0: [16, 1]}, 0, False, 'There were 0 hits.')\n", 122 | "\n", 123 | "\n", 124 | "({4: [11, 2], 9: [1, 7], 6: [3, 2], 5: [14, 2], 0: [17, 1]}, 0, False, 'There were 0 hits.')\n", 125 | "\n", 126 | "\n" 127 | ] 128 | } 129 | ], 130 | "source": [ 131 | "for _ in range(10):\n", 132 | " print(env.step(1))\n", 133 | " print(\"\\n\")" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": null, 139 | "metadata": {}, 140 | "outputs": [], 141 | "source": [] 142 | } 143 | ], 144 | "metadata": { 145 | "kernelspec": { 146 | "display_name": "Python 3", 147 | "language": "python", 148 | "name": "python3" 149 | }, 150 | "language_info": { 151 | "codemirror_mode": { 152 | "name": "ipython", 153 | "version": 3 154 | }, 155 | "file_extension": ".py", 156 | "mimetype": "text/x-python", 157 | "name": "python", 158 | "nbconvert_exporter": "python", 159 | "pygments_lexer": "ipython3", 160 | "version": "3.7.3" 161 | } 162 | }, 163 | "nbformat": 4, 164 | "nbformat_minor": 2 165 | } 166 | -------------------------------------------------------------------------------- /notebooks/DQN3.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Imports" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 101, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "name": "stdout", 17 | "output_type": "stream", 18 | "text": [ 19 | "Using device: cuda\n" 20 | ] 21 | }, 22 | { 23 | "data": { 24 | "text/plain": [ 25 | "" 26 | ] 27 | }, 28 | "execution_count": 101, 29 | "metadata": {}, 30 | "output_type": "execute_result" 31 | } 32 | ], 33 | "source": [ 34 | "import torch\n", 35 | "import torch.nn as nn\n", 36 | "import torch.nn.functional as F\n", 37 | "import numpy as np\n", 38 | "import gym\n", 39 | "import time\n", 40 | "from e2 import CacheEnv\n", 41 | "from tqdm import tqdm_notebook as tqdm\n", 42 | "\n", 43 | "import random\n", 44 | "import matplotlib.pyplot as plt\n", 45 | "import matplotlib\n", 46 | "from collections import namedtuple\n", 47 | "from itertools import count\n", 48 | "from PIL import Image\n", 49 | "\n", 50 | "import torch.optim as optim\n", 51 | "import math\n", 52 | "import torchvision.transforms as T\n", 53 | "\n", 54 | "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", 55 | "print(f\"Using device: {device}\")\n", 56 | "\n", 57 | "is_ipython = 'inline' in matplotlib.get_backend()\n", 58 | "if is_ipython:\n", 59 | " from IPython import display\n", 60 | "plt.ion()\n", 61 | "torch.manual_seed(7)" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 102, 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "Transition = namedtuple('Transition',\n", 71 | " ('state', 'action', 'next_state', 'reward'))\n", 72 | "\n", 73 | "\n", 74 | "class ReplayMemory(object):\n", 75 | "\n", 76 | " def __init__(self, capacity):\n", 77 | " self.capacity = capacity\n", 78 | " self.memory = []\n", 79 | " self.position = 0\n", 80 | "\n", 81 | " def push(self, *args):\n", 82 | " \"\"\"Saves a transition.\"\"\"\n", 83 | " if len(self.memory) < self.capacity:\n", 84 | " self.memory.append(None)\n", 85 | " self.memory[self.position] = Transition(*args)\n", 86 | " self.position = (self.position + 1) % self.capacity\n", 87 | "\n", 88 | " def sample(self, batch_size):\n", 89 | " return random.sample(self.memory, batch_size)\n", 90 | "\n", 91 | " def __len__(self):\n", 92 | " return len(self.memory)" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 103, 98 | "metadata": {}, 99 | "outputs": [ 100 | { 101 | "name": "stdout", 102 | "output_type": "stream", 103 | "text": [ 104 | "tensor([0.6413, 0.8875, 0.8863, 0.6594])\n" 105 | ] 106 | } 107 | ], 108 | "source": [ 109 | "x = torch.rand((1,))\n", 110 | "y = torch.rand(4,)\n", 111 | "print(x*y+x)" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": 197, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "class Net(nn.Module):\n", 121 | " def __init__(self):\n", 122 | " super(Net, self).__init__()\n", 123 | " input_size = CACHE_LIMIT\n", 124 | " self.h_dim = 16\n", 125 | " self.lu_weight = torch.rand(1).to(device)\n", 126 | " self.fu_wight = torch.rand(1).to(device)\n", 127 | "# h_dim = 30\n", 128 | " self.fc_lu = nn.Linear(input_size, self.h_dim)\n", 129 | " self.fc_fu = nn.Linear(input_size,self.h_dim)\n", 130 | " self.out_lu = nn.Linear(self.h_dim, n_actions)\n", 131 | " self.out_fu = nn.Linear(self.h_dim, n_actions)\n", 132 | "\n", 133 | " def forward(self, x):\n", 134 | " \n", 135 | "# if len(x.shape) == 1:\n", 136 | "# x = x.view(-1, x.size(0))\n", 137 | " lu_vec = x[:5].view(-1,5)\n", 138 | " print(lu_vec.shape)\n", 139 | " fu_vec = x[-5:].view(-1,5)\n", 140 | " print(fu_vec.shape)\n", 141 | "\n", 142 | " temp_lu = F.relu(self.out_lu(self.fc_lu(lu_vec)))\n", 143 | " temp_fu = F.relu(self.out_fu(self.fc_fu(fu_vec)))\n", 144 | " x = F.relu(self.lu_weight*temp_lu + self.fu_wight*temp_fu)\n", 145 | " return x" 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": 198, 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "# x = torch.rand((10,))\n", 155 | "# obj = Net()\n", 156 | "# print(obj(x))" 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": 199, 162 | "metadata": {}, 163 | "outputs": [ 164 | { 165 | "name": "stdout", 166 | "output_type": "stream", 167 | "text": [ 168 | "\n", 169 | "Cache limit: 5\n", 170 | "Total Pages: 10\n" 171 | ] 172 | } 173 | ], 174 | "source": [ 175 | "BATCH_SIZE = 128\n", 176 | "GAMMA = 0.999\n", 177 | "EPS_START = 0.9\n", 178 | "EPS_END = 0.05\n", 179 | "EPS_DECAY = 200\n", 180 | "# TARGET_UPDATE = 10\n", 181 | "TARGET_UPDATE = 200\n", 182 | "\n", 183 | "# EPS_LEN = 100\n", 184 | "EPS_LEN = 200\n", 185 | "N_PAGES = 10\n", 186 | "CACHE_LIMIT = 5 \n", 187 | "STATE_SHAPE = (CACHE_LIMIT, 2)\n", 188 | "# N_STATES = STATE_SHAPE[0]*STATE_SHAPE[1]\n", 189 | "\n", 190 | "env = CacheEnv(\n", 191 | " eps_len=EPS_LEN, \n", 192 | " n_pages=N_PAGES, \n", 193 | " limit=CACHE_LIMIT\n", 194 | " )\n", 195 | "\n", 196 | "\n", 197 | "# Get number of actions from gym action space\n", 198 | "n_actions = env.action_space_n\n", 199 | "\n", 200 | "policy_net = Net().to(device)\n", 201 | "target_net = Net().to(device)\n", 202 | "target_net.load_state_dict(policy_net.state_dict())\n", 203 | "target_net.eval()\n", 204 | "\n", 205 | "# optimizer = optim.RMSprop(policy_net.parameters())\n", 206 | "optimizer = optim.Adam(policy_net.parameters(), 3e-3)\n", 207 | "memory = ReplayMemory(10000)\n", 208 | "\n", 209 | "\n", 210 | "steps_done = 0\n", 211 | "\n", 212 | "\n", 213 | "def select_action(state):\n", 214 | "# state = torch.Tensor(state).to(device)\n", 215 | "# print(state.shape)\n", 216 | " global steps_done\n", 217 | " sample = random.random()\n", 218 | " eps_threshold = EPS_END + (EPS_START - EPS_END) * \\\n", 219 | " math.exp(-1. * steps_done / EPS_DECAY)\n", 220 | " steps_done += 1\n", 221 | " if sample > eps_threshold:\n", 222 | " with torch.no_grad():\n", 223 | " # t.max(1) will return largest column value of each row.\n", 224 | " # second column on max result is index of where max element was\n", 225 | " # found, so we pick action with the larger expected reward.\n", 226 | "# out = policy_net(state)\n", 227 | "# print(out)\n", 228 | "# print(out.shape)\n", 229 | " return np.argmax(policy_net(state).data.cpu().numpy())\n", 230 | " else:\n", 231 | " \n", 232 | " return np.random.choice(np.array(range(n_actions)))\n", 233 | "\n", 234 | "episode_durations = []\n", 235 | "\n", 236 | "\n", 237 | "def plot_durations():\n", 238 | " plt.figure(2)\n", 239 | " plt.clf()\n", 240 | " durations_t = torch.tensor(episode_durations, dtype=torch.float)\n", 241 | " plt.title('Training...')\n", 242 | " plt.xlabel('Episode')\n", 243 | " plt.ylabel('Duration')\n", 244 | " plt.plot(durations_t.numpy())\n", 245 | " # Take 100 episode averages and plot them too\n", 246 | " if len(durations_t) >= 100:\n", 247 | " means = durations_t.unfold(0, 100, 1).mean(1).view(-1)\n", 248 | " means = torch.cat((torch.zeros(99), means))\n", 249 | " plt.plot(means.numpy())\n", 250 | "\n", 251 | " plt.pause(0.001) # pause a bit so that plots are updated\n", 252 | " if is_ipython:\n", 253 | " display.clear_output(wait=True)\n", 254 | " display.display(plt.gcf())" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": 200, 260 | "metadata": {}, 261 | "outputs": [], 262 | "source": [ 263 | "batch = None\n", 264 | "def optimize_model():\n", 265 | " global batch\n", 266 | " if len(memory) < BATCH_SIZE:\n", 267 | " return\n", 268 | " transitions = memory.sample(BATCH_SIZE)\n", 269 | " # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for\n", 270 | " # detailed explanation). This converts batch-array of Transitions\n", 271 | " # to Transition of batch-arrays.\n", 272 | " batch = Transition(*zip(*transitions))\n", 273 | "\n", 274 | " # Compute a mask of non-final states and concatenate the batch elements\n", 275 | " # (a final state would've been the one after which simulation ended)\n", 276 | " non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,\n", 277 | " batch.next_state)), device=device, dtype=torch.bool)\n", 278 | " non_final_next_states = torch.cat([s for s in batch.next_state\n", 279 | " if s is not None]).view(-1, CACHE_LIMIT*2)\n", 280 | "# state_batch = torch.cat(batch.state)\n", 281 | " state_batch = torch.cat(batch.state).view(-1, CACHE_LIMIT*2)\n", 282 | "# print(type(batch.action),batch.action[0].dtype)\n", 283 | " \n", 284 | " action_batch = torch.tensor(np.array(batch.action)).to(device)\n", 285 | " reward_batch = torch.cat(batch.reward)\n", 286 | "\n", 287 | " # Compute Q(s_t, a) - the model computes Q(s_t), then we select the\n", 288 | " # columns of actions taken. These are the actions which would've been taken\n", 289 | " # for each batch state according to policy_net\n", 290 | " state_action_values = policy_net(state_batch).gather(1, action_batch)\n", 291 | "\n", 292 | " # Compute V(s_{t+1}) for all next states.\n", 293 | " # Expected values of actions for non_final_next_states are computed based\n", 294 | " # on the \"older\" target_net; selecting their best reward with max(1)[0].\n", 295 | " # This is merged based on the mask, such that we'll have either the expected\n", 296 | " # state value or 0 in case the state was final.\n", 297 | " next_state_values = torch.zeros(BATCH_SIZE, device=device)\n", 298 | " next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()\n", 299 | " # Compute the expected Q values\n", 300 | "# print(reward_batch.dtype)\n", 301 | " expected_state_action_values = (next_state_values * GAMMA) + reward_batch.float()\n", 302 | "\n", 303 | " # Compute Huber loss\n", 304 | " loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n", 305 | "\n", 306 | " # Optimize the model\n", 307 | " optimizer.zero_grad()\n", 308 | " loss.backward()\n", 309 | " for param in policy_net.parameters():\n", 310 | " param.grad.data.clamp_(-1, 1)\n", 311 | " optimizer.step()" 312 | ] 313 | }, 314 | { 315 | "cell_type": "code", 316 | "execution_count": 201, 317 | "metadata": { 318 | "scrolled": true 319 | }, 320 | "outputs": [ 321 | { 322 | "data": { 323 | "application/vnd.jupyter.widget-view+json": { 324 | "model_id": "62879918f291412f8e449b7a939b709a", 325 | "version_major": 2, 326 | "version_minor": 0 327 | }, 328 | "text/plain": [ 329 | "HBox(children=(IntProgress(value=0, max=5000), HTML(value='')))" 330 | ] 331 | }, 332 | "metadata": {}, 333 | "output_type": "display_data" 334 | }, 335 | { 336 | "name": "stdout", 337 | "output_type": "stream", 338 | "text": [ 339 | "torch.Size([1, 5])\n", 340 | "torch.Size([1, 5])\n", 341 | "torch.Size([1, 5])\n", 342 | "torch.Size([1, 5])\n", 343 | "torch.Size([1, 5])\n", 344 | "torch.Size([1, 5])\n", 345 | "torch.Size([1, 5])\n", 346 | "torch.Size([1, 5])\n", 347 | "torch.Size([1, 5])\n", 348 | "torch.Size([1, 5])\n", 349 | "torch.Size([1, 5])\n", 350 | "torch.Size([1, 5])\n", 351 | "torch.Size([1, 5])\n", 352 | "torch.Size([1, 5])\n", 353 | "torch.Size([1, 5])\n", 354 | "torch.Size([1, 5])\n", 355 | "torch.Size([1, 5])\n", 356 | "torch.Size([1, 5])\n", 357 | "torch.Size([1, 5])\n", 358 | "torch.Size([1, 5])\n", 359 | "torch.Size([1, 5])\n", 360 | "torch.Size([1, 5])\n", 361 | "torch.Size([1, 5])\n", 362 | "torch.Size([1, 5])\n", 363 | "torch.Size([1, 5])\n", 364 | "torch.Size([1, 5])\n", 365 | "torch.Size([1, 5])\n", 366 | "torch.Size([1, 5])\n", 367 | "torch.Size([1, 5])\n", 368 | "torch.Size([1, 5])\n", 369 | "torch.Size([1, 5])\n", 370 | "torch.Size([1, 5])\n", 371 | "torch.Size([1, 5])\n", 372 | "torch.Size([1, 5])\n", 373 | "torch.Size([1, 5])\n", 374 | "torch.Size([1, 5])\n", 375 | "torch.Size([1, 5])\n", 376 | "torch.Size([1, 5])\n", 377 | "torch.Size([1, 5])\n", 378 | "torch.Size([1, 5])\n", 379 | "torch.Size([1, 5])\n", 380 | "torch.Size([1, 5])\n", 381 | "torch.Size([1, 5])\n", 382 | "torch.Size([1, 5])\n", 383 | "torch.Size([1, 5])\n", 384 | "torch.Size([1, 5])\n", 385 | "torch.Size([1, 5])\n", 386 | "torch.Size([1, 5])\n", 387 | "torch.Size([1, 5])\n", 388 | "torch.Size([1, 5])\n", 389 | "torch.Size([1, 5])\n", 390 | "torch.Size([1, 5])\n", 391 | "torch.Size([1, 5])\n", 392 | "torch.Size([1, 5])\n", 393 | "torch.Size([1, 5])\n", 394 | "torch.Size([1, 5])\n", 395 | "torch.Size([1, 5])\n", 396 | "torch.Size([1, 5])\n", 397 | "torch.Size([1, 5])\n", 398 | "torch.Size([1, 5])\n", 399 | "torch.Size([1, 5])\n", 400 | "torch.Size([1, 5])\n", 401 | "torch.Size([1, 5])\n", 402 | "torch.Size([1, 5])\n", 403 | "torch.Size([1, 5])\n", 404 | "torch.Size([1, 5])\n", 405 | "torch.Size([1, 5])\n", 406 | "torch.Size([1, 5])\n", 407 | "torch.Size([1, 5])\n", 408 | "torch.Size([1, 5])\n", 409 | "torch.Size([1, 5])\n", 410 | "torch.Size([1, 5])\n", 411 | "torch.Size([1, 5])\n", 412 | "torch.Size([1, 5])\n", 413 | "torch.Size([1, 5])\n", 414 | "torch.Size([1, 5])\n", 415 | "torch.Size([1, 5])\n", 416 | "torch.Size([1, 5])\n", 417 | "torch.Size([10, 5])\n", 418 | "torch.Size([10, 5])\n", 419 | "\n" 420 | ] 421 | }, 422 | { 423 | "ename": "RuntimeError", 424 | "evalue": "invalid argument 4: Index tensor must have same dimensions as input tensor at /pytorch/aten/src/THC/generic/THCTensorScatterGather.cu:16", 425 | "output_type": "error", 426 | "traceback": [ 427 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 428 | "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", 429 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;31m# Perform one step of the optimization (on the target network)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 34\u001b[0;31m \u001b[0moptimize_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 35\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0mepisode_durations\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 430 | "\u001b[0;32m\u001b[0m in \u001b[0;36moptimize_model\u001b[0;34m()\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;31m# columns of actions taken. These are the actions which would've been taken\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;31m# for each batch state according to policy_net\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 28\u001b[0;31m \u001b[0mstate_action_values\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpolicy_net\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstate_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgather\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maction_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 29\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0;31m# Compute V(s_{t+1}) for all next states.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 431 | "\u001b[0;31mRuntimeError\u001b[0m: invalid argument 4: Index tensor must have same dimensions as input tensor at /pytorch/aten/src/THC/generic/THCTensorScatterGather.cu:16" 432 | ] 433 | } 434 | ], 435 | "source": [ 436 | "num_episodes = 5000\n", 437 | "\n", 438 | "history = []\n", 439 | "for i_episode in tqdm(range(num_episodes)):\n", 440 | " # Initialize the environment and state\n", 441 | " state = env.reset()\n", 442 | " state = torch.Tensor(state).to(device)\n", 443 | " ep_r = 0\n", 444 | " actions = []\n", 445 | " for t in count():\n", 446 | " # Select and perform an action\n", 447 | " action = select_action(state)\n", 448 | " actions.append(action)\n", 449 | "# print(f\"Selection action: {action}\")\n", 450 | " state_, reward, done, _ = env.step(action.item())\n", 451 | " state_ = torch.Tensor(state_).to(device)\n", 452 | " reward = torch.tensor([reward], device=device)\n", 453 | " ep_r += reward\n", 454 | "\n", 455 | " # Observe new state\n", 456 | "\n", 457 | " if not done:\n", 458 | " next_state = state_ \n", 459 | " else:\n", 460 | " next_state = None\n", 461 | "\n", 462 | " # Store the transition in memory\n", 463 | " memory.push(state, action, next_state, reward)\n", 464 | "\n", 465 | " # Move to the next state\n", 466 | " state = next_state\n", 467 | "\n", 468 | " # Perform one step of the optimization (on the target network)\n", 469 | " optimize_model()\n", 470 | " if done:\n", 471 | " episode_durations.append(t + 1)\n", 472 | " epr = ep_r.cpu().numpy()[0]\n", 473 | " history.append(epr)\n", 474 | " (values,counts) = np.unique(np.array(actions), return_counts=True)\n", 475 | " ind=np.argmax(counts)\n", 476 | " counts.sort()\n", 477 | " if i_episode%50 == 0:\n", 478 | " print(f\"Ep: {i_episode} | Reward: {epr} | Most common action: {values[ind]} | Top freq: {counts[::-1][:10]}\")\n", 479 | " \n", 480 | "# plot_durations()\n", 481 | " break\n", 482 | " # Update the target network, copying all weights and biases in DQN\n", 483 | " if i_episode % TARGET_UPDATE == 0:\n", 484 | " target_net.load_state_dict(policy_net.state_dict())\n", 485 | "\n", 486 | "print('Complete')\n", 487 | "env.render()\n", 488 | "env.close()\n", 489 | "plt.ioff()\n", 490 | "plt.show()\n" 491 | ] 492 | }, 493 | { 494 | "cell_type": "code", 495 | "execution_count": 149, 496 | "metadata": {}, 497 | "outputs": [ 498 | { 499 | "data": { 500 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlMAAAEvCAYAAABhSUTPAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAQaklEQVR4nO3dX4idd53H8c/XxiqsVcHMgjSpKRjBrAiVodvFCxXdJe1FcuNKC+IfirnZKruKUFGq1CuVRRDqny5KVdBavdAgkS5oRRFbOsXdYlsCQ/3ToUKj1t4Urd397sXMyjidZJ72d2bmJPN6QeE8z/PjnC/9MZN3nnNmUt0dAACem+ft9gAAAOczMQUAMEBMAQAMEFMAAAPEFADAADEFADBg32698P79+/vQoUO79fIAAJPdd999v+3uhc2u7VpMHTp0KEtLS7v18gAAk1XVr852zdt8AAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMGDLmKqqL1XVY1X187Ncr6r6TFUtV9X9VfW62Y8JADCfptyZui3J0XNcvzrJ4bX/TiT53PhYAADnhy1jqrt/lOT351hyPMlXetXdSV5aVS+f1YAAAPNsFp+ZujTJI+uOV9bOPUNVnaiqpapaOnPmzAxeGgBgd80ipmqTc73Zwu6+tbsXu3txYWHTfysQAOC8MouYWklycN3xgSSPzuB5AQDm3ixi6mSSd6z9VN9VSZ7o7t/M4HkBAObevq0WVNXXk7wxyf6qWkny0STPT5Lu/nySU0muSbKc5Mkk796uYQEA5s2WMdXd121xvZP8y8wmAgA4j/gN6AAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMmBRTVXW0qk5X1XJV3bjJ9cuq6q6q+llV3V9V18x+VACA+bNlTFXVRUluSXJ1kiNJrquqIxuWfSTJHd19RZJrk3x21oMCAMyjKXemrkyy3N0Pd/dTSW5PcnzDmk7y4rXHL0ny6OxGBACYX/smrLk0ySPrjleS/P2GNR9L8p9V9d4kf5PkLTOZDgBgzk25M1WbnOsNx9clua27DyS5JslXq+oZz11VJ6pqqaqWzpw58+ynBQCYM1NiaiXJwXXHB/LMt/GuT3JHknT3T5O8MMn+jU/U3bd292J3Ly4sLDy3iQEA5siUmLo3yeGquryqLs7qB8xPbljz6yRvTpKqenVWY8qtJwDggrdlTHX300luSHJnkoey+lN7D1TVzVV1bG3ZB5K8p6r+O8nXk7yruze+FQgAcMGZ8gH0dPepJKc2nLtp3eMHk7x+tqMBAMw/vwEdAGCAmAIAGCCmAAAGiCkAgAFiCgBggJgCABggpgAABogpAIABYgoAYICYAgAYIKYAAAaIKQCAAWIKAGCAmAIAGCCmAAAGiCkAgAFiCgBggJgCABggpgAABogpAIABYgoAYICYAgAYIKYAAAaIKQCAAWIKAGCAmAIAGCCmAAAGiCkAgAFiCgBggJgCABggpgAABogpAIABYgoAYICYAgAYIKYAAAaIKQCAAWIKAGCAmAIAGCCmAAAGiCkAgAGTYqqqjlbV6aparqobz7LmbVX1YFU9UFVfm+2YAADzad9WC6rqoiS3JPnHJCtJ7q2qk9394Lo1h5N8KMnru/vxqvrb7RoYAGCeTLkzdWWS5e5+uLufSnJ7kuMb1rwnyS3d/XiSdPdjsx0TAGA+TYmpS5M8su54Ze3ceq9K8qqq+klV3V1VR2c1IADAPNvybb4ktcm53uR5Did5Y5IDSX5cVa/p7j/81RNVnUhyIkkuu+yyZz0sAMC8mXJnaiXJwXXHB5I8usma73T3n7v7F0lOZzWu/kp339rdi929uLCw8FxnBgCYG1Ni6t4kh6vq8qq6OMm1SU5uWPPtJG9Kkqran9W3/R6e5aAAAPNoy5jq7qeT3JDkziQPJbmjux+oqpur6tjasjuT/K6qHkxyV5IPdvfvtmtoAIB5Ud0bP/60MxYXF3tpaWlXXhsA4Nmoqvu6e3Gza34DOgDAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwIBJMVVVR6vqdFUtV9WN51j31qrqqlqc3YgAAPNry5iqqouS3JLk6iRHklxXVUc2WXdJkvcluWfWQwIAzKspd6auTLLc3Q9391NJbk9yfJN1H0/yySR/nOF8AABzbUpMXZrkkXXHK2vn/qKqrkhysLu/O8PZAADm3pSYqk3O9V8uVj0vyaeTfGDLJ6o6UVVLVbV05syZ6VMCAMypKTG1kuTguuMDSR5dd3xJktck+WFV/TLJVUlObvYh9O6+tbsXu3txYWHhuU8NADAnpsTUvUkOV9XlVXVxkmuTnPz/i939RHfv7+5D3X0oyd1JjnX30rZMDAAwR7aMqe5+OskNSe5M8lCSO7r7gaq6uaqObfeAAADzbN+URd19KsmpDeduOsvaN46PBQBwfvAb0AEABogpAIABYgoAYICYAgAYIKYAAAaIKQCAAWIKAGCAmAIAGCCmAAAGiCkAgAFiCgBggJgCABggpgAABogpAIABYgoAYICYAgAYIKYAAAaIKQCAAWIKAGCAmAIAGCCmAAAGiCkAgAFiCgBggJgCABggpgAABogpAIABYgoAYICYAgAYIKYAAAaIKQCAAWIKAGCAmAIAGCCmAAAGiCkAgAFiCgBggJgCABggpgAABogpAIABYgoAYICYAgAYMCmmqupoVZ2uquWqunGT6++vqger6v6q+n5VvWL2owIAzJ8tY6qqLkpyS5KrkxxJcl1VHdmw7GdJFrv7tUm+leSTsx4UAGAeTbkzdWWS5e5+uLufSnJ7kuPrF3T3Xd395Nrh3UkOzHZMAID5NCWmLk3yyLrjlbVzZ3N9ku+NDAUAcL7YN2FNbXKuN11Y9fYki0necJbrJ5KcSJLLLrts4ogAAPNryp2plSQH1x0fSPLoxkVV9ZYkH05yrLv/tNkTdfet3b3Y3YsLCwvPZV4AgLkyJabuTXK4qi6vqouTXJvk5PoFVXVFki9kNaQem/2YAADzacuY6u6nk9yQ5M4kDyW5o7sfqKqbq+rY2rJPJXlRkm9W1X9V1cmzPB0AwAVlymem0t2nkpzacO6mdY/fMuO5AADOC34DOgDAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwAAxBQAwQEwBAAwQUwAAA8QUAMAAMQUAMEBMAQAMEFMAAAPEFADAADEFADBATAEADBBTAAADxBQAwIBJMVVVR6vqdFUtV9WNm1x/QVV9Y+36PVV1aNaDAgDMoy1jqqouSnJLkquTHElyXVUd2bDs+iSPd/crk3w6ySdmPSgAwDyacmfqyiTL3f1wdz+V5PYkxzesOZ7ky2uPv5XkzVVVsxsTAGA+TYmpS5M8su54Ze3cpmu6++kkTyR52cYnqqoTVbVUVUtnzpx5bhMDAMyRKTG12R2mfg5r0t23dvdidy8uLCxMmQ8AYK5NiamVJAfXHR9I8ujZ1lTVviQvSfL7WQwIADDPpsTUvUkOV9XlVXVxkmuTnNyw5mSSd649fmuSH3T3M+5MAQBcaPZttaC7n66qG5LcmeSiJF/q7geq6uYkS919MskXk3y1qpazekfq2u0cGgBgXmwZU0nS3aeSnNpw7qZ1j/+Y5J9nOxoAwPzzG9ABAAaIKQCAAWIKAGCAmAIAGCCmAAAGiCkAgAFiCgBgQO3WLyqvqjNJfrXNL7M/yW+3+TV49uzL/LEn88m+zB97Mp92Yl9e0d2b/sPCuxZTO6Gqlrp7cbfn4K/Zl/ljT+aTfZk/9mQ+7fa+eJsPAGCAmAIAGHChx9Stuz0Am7Iv88eezCf7Mn/syXza1X25oD8zBQCw3S70O1MAANvqgoipqjpaVaerarmqbtzk+guq6htr1++pqkM7P+XeM2Ff3l9VD1bV/VX1/ap6xW7MuZdstSfr1r21qrqq/NTSNpuyJ1X1trWvlQeq6ms7PeNeNOH712VVdVdV/Wzte9g1uzHnXlJVX6qqx6rq52e5XlX1mbU9u7+qXrdTs533MVVVFyW5JcnVSY4kua6qjmxYdn2Sx7v7lUk+neQTOzvl3jNxX36WZLG7X5vkW0k+ubNT7i0T9yRVdUmS9yW5Z2cn3Hum7ElVHU7yoSSv7+6/S/KvOz7oHjPxa+UjSe7o7iuSXJvkszs75Z50W5Kj57h+dZLDa/+dSPK5HZgpyQUQU0muTLLc3Q9391NJbk9yfMOa40m+vPb4W0neXFW1gzPuRVvuS3ff1d1Prh3eneTADs+410z5WkmSj2c1bP+4k8PtUVP25D1Jbunux5Okux/b4Rn3oin70klevPb4JUke3cH59qTu/lGS359jyfEkX+lVdyd5aVW9fCdmuxBi6tIkj6w7Xlk7t+ma7n46yRNJXrYj0+1dU/ZlveuTfG9bJ2LLPamqK5Ic7O7v7uRge9iUr5NXJXlVVf2kqu6uqnP9zZzZmLIvH0vy9qpaSXIqyXt3ZjTO4dn+uTMz+3biRbbZZneYNv6I4pQ1zNbk/+dV9fYki0nesK0Tcc49qarnZfVt8Hft1EBM+jrZl9W3Ld6Y1bu3P66q13T3H7Z5tr1syr5cl+S27v73qvqHJF9d25f/3f7xOItd+7P+QrgztZLk4LrjA3nm7da/rKmqfVm9JXuuW4WMm7Ivqaq3JPlwkmPd/acdmm2v2mpPLknymiQ/rKpfJrkqyUkfQt9WU79/fae7/9zdv0hyOqtxxfaZsi/XJ7kjSbr7p0lemNV/H47dM+nPne1wIcTUvUkOV9XlVXVxVj8IeHLDmpNJ3rn2+K1JftB+wdZ223Jf1t5S+kJWQ8rnQLbfOfeku5/o7v3dfai7D2X1c2zHuntpd8bdE6Z8//p2kjclSVXtz+rbfg/v6JR7z5R9+XWSNydJVb06qzF1ZkenZKOTSd6x9lN9VyV5ort/sxMvfN6/zdfdT1fVDUnuTHJRki919wNVdXOSpe4+meSLWb0Fu5zVO1LX7t7Ee8PEfflUkhcl+ebazwP8uruP7drQF7iJe8IOmrgndyb5p6p6MMn/JPlgd/9u96a+8E3clw8k+Y+q+resvpX0Ln9J315V9fWsvt29f+2zah9N8vwk6e7PZ/Wza9ckWU7yZJJ379hs9h4A4Lm7EN7mAwDYNWIKAGCAmAIAGCCmAAAGiCkAgAFiCgBggJgCABggpgAABvwfgzba9r2wKrIAAAAASUVORK5CYII=\n", 501 | "text/plain": [ 502 | "
" 503 | ] 504 | }, 505 | "metadata": { 506 | "needs_background": "light" 507 | }, 508 | "output_type": "display_data" 509 | } 510 | ], 511 | "source": [ 512 | "import matplotlib.pyplot as plt\n", 513 | "plt.figure(figsize=(10, 5))\n", 514 | "plt.plot(history[:1000])\n", 515 | "# plt.plot(history)\n", 516 | "plt.show()" 517 | ] 518 | }, 519 | { 520 | "cell_type": "markdown", 521 | "metadata": {}, 522 | "source": [ 523 | "# Debug" 524 | ] 525 | }, 526 | { 527 | "cell_type": "code", 528 | "execution_count": null, 529 | "metadata": {}, 530 | "outputs": [], 531 | "source": [ 532 | "# def pretty_print(s):\n", 533 | "# for i, x in enumerate(s):\n", 534 | "# if i == len(s)-1:\n", 535 | "# end = '\\n'\n", 536 | "# elif i%2 == 0:\n", 537 | "# end = '-'\n", 538 | "# else:\n", 539 | "# end = ', ' \n", 540 | "# print(x, end=end)" 541 | ] 542 | }, 543 | { 544 | "cell_type": "code", 545 | "execution_count": null, 546 | "metadata": {}, 547 | "outputs": [], 548 | "source": [] 549 | }, 550 | { 551 | "cell_type": "markdown", 552 | "metadata": {}, 553 | "source": [ 554 | "# Compare with others" 555 | ] 556 | }, 557 | { 558 | "cell_type": "code", 559 | "execution_count": null, 560 | "metadata": {}, 561 | "outputs": [], 562 | "source": [ 563 | "\n", 564 | "def choose_action(s, agent):\n", 565 | " if agent == 'dqn':\n", 566 | " return select_action(torch.Tensor(s).to(device)) \n", 567 | " if agent == 'lru':\n", 568 | " return s[::2].argmin()\n", 569 | " if agent == 'random':\n", 570 | " return np.random.randint(n_actions)\n", 571 | " else:\n", 572 | " return s[1::2].argmin()\n", 573 | " \n", 574 | " \n", 575 | "ntests = 1000\n", 576 | "performance = {}\n", 577 | "\n", 578 | "for agent in \"dqn random lru lfu\".split():\n", 579 | " for i in tqdm(range(ntests)):\n", 580 | " s = env.reset()\n", 581 | " nhits = 0\n", 582 | " while True:\n", 583 | " a = choose_action(s, agent)\n", 584 | " s_, r, done, info = env.step(a)\n", 585 | " nhits += r\n", 586 | "\n", 587 | " if done:\n", 588 | " history.append(nhits)\n", 589 | " break\n", 590 | " s = s_\n", 591 | " performance[agent] = np.array(history).mean() \n" 592 | ] 593 | }, 594 | { 595 | "cell_type": "code", 596 | "execution_count": null, 597 | "metadata": {}, 598 | "outputs": [], 599 | "source": [ 600 | "performance" 601 | ] 602 | }, 603 | { 604 | "cell_type": "code", 605 | "execution_count": null, 606 | "metadata": {}, 607 | "outputs": [], 608 | "source": [] 609 | } 610 | ], 611 | "metadata": { 612 | "kernelspec": { 613 | "display_name": "Python 3", 614 | "language": "python", 615 | "name": "python3" 616 | }, 617 | "language_info": { 618 | "codemirror_mode": { 619 | "name": "ipython", 620 | "version": 3 621 | }, 622 | "file_extension": ".py", 623 | "mimetype": "text/x-python", 624 | "name": "python", 625 | "nbconvert_exporter": "python", 626 | "pygments_lexer": "ipython3", 627 | "version": "3.7.3" 628 | } 629 | }, 630 | "nbformat": 4, 631 | "nbformat_minor": 2 632 | } 633 | -------------------------------------------------------------------------------- /notebooks/LRU & LFU.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 19, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from environment import CacheEnv\n", 10 | "from collections import defaultdict" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 20, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "def lru_policy(s):\n", 20 | " least_recent = -1\n", 21 | " action = -1\n", 22 | " for key in s.keys():\n", 23 | " cur = s[key][0]\n", 24 | " if cur > least_recent:\n", 25 | " action = key\n", 26 | " least_recent = cur\n", 27 | " return action" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "def lfu_policy(s):\n", 37 | " least_frequent = 100000000\n", 38 | " action = -1\n", 39 | " for key in s.keys():\n", 40 | " cur = s[key][1]\n", 41 | " if cur < least_frequent:\n", 42 | " action = key\n", 43 | " least_frequent = cur\n", 44 | " return action" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 21, 50 | "metadata": {}, 51 | "outputs": [ 52 | { 53 | "name": "stdout", 54 | "output_type": "stream", 55 | "text": [ 56 | "Total hits: 1\n", 57 | "Total hits: 0\n", 58 | "Total hits: 0\n", 59 | "Total hits: 0\n", 60 | "Total hits: 1\n", 61 | "Total hits: 1\n", 62 | "Total hits: 1\n", 63 | "Total hits: 1\n", 64 | "Total hits: 0\n", 65 | "Total hits: 1\n", 66 | "Total hits: 1\n", 67 | "Total hits: 1\n", 68 | "Total hits: 1\n", 69 | "Total hits: 1\n", 70 | "Total hits: 0\n", 71 | "Total hits: 1\n", 72 | "Total hits: 1\n", 73 | "Total hits: 1\n", 74 | "Total hits: 0\n", 75 | "Total hits: 1\n", 76 | "Total hits: 0\n", 77 | "Total hits: 1\n", 78 | "Total hits: 0\n", 79 | "Total hits: 1\n", 80 | "Total hits: 1\n", 81 | "Total hits: 1\n", 82 | "Total hits: 1\n", 83 | "Total hits: 0\n", 84 | "Total hits: 1\n", 85 | "Total hits: 1\n", 86 | "Total hits: 1\n", 87 | "Total hits: 1\n", 88 | "Total hits: 1\n", 89 | "Total hits: 1\n", 90 | "Total hits: 1\n", 91 | "Total hits: 1\n", 92 | "Total hits: 1\n", 93 | "Total hits: 1\n", 94 | "Total hits: 1\n", 95 | "Total hits: 1\n", 96 | "Total hits: 1\n", 97 | "Total hits: 1\n", 98 | "Total hits: 1\n", 99 | "Total hits: 1\n", 100 | "Total hits: 1\n", 101 | "Total hits: 0\n", 102 | "Total hits: 1\n", 103 | "Total hits: 1\n", 104 | "Total hits: 1\n", 105 | "Total hits: 1\n", 106 | "Total hits: 1\n", 107 | "Total hits: 0\n", 108 | "Total hits: 1\n", 109 | "Total hits: 1\n", 110 | "Total hits: 1\n", 111 | "Total hits: 0\n", 112 | "Total hits: 0\n", 113 | "Total hits: 1\n", 114 | "Total hits: 1\n", 115 | "Total hits: 0\n", 116 | "Total hits: 1\n", 117 | "Total hits: 1\n", 118 | "Total hits: 1\n", 119 | "Total hits: 1\n", 120 | "Total hits: 1\n", 121 | "Total hits: 1\n", 122 | "Total hits: 1\n", 123 | "Total hits: 1\n", 124 | "Total hits: 1\n", 125 | "Total hits: 1\n", 126 | "Total hits: 0\n", 127 | "Total hits: 0\n", 128 | "Total hits: 1\n", 129 | "Total hits: 1\n", 130 | "Total hits: 1\n", 131 | "Total hits: 1\n", 132 | "Total hits: 1\n", 133 | "Total hits: 1\n", 134 | "Total hits: 1\n", 135 | "Total hits: 0\n", 136 | "Total hits: 1\n", 137 | "Total hits: 1\n", 138 | "Total hits: 1\n", 139 | "Total hits: 1\n", 140 | "Total hits: 1\n", 141 | "Total hits: 0\n", 142 | "Total hits: 1\n", 143 | "Total hits: 1\n", 144 | "Total hits: 1\n", 145 | "Total hits: 1\n", 146 | "Total hits: 0\n", 147 | "Total hits: 1\n", 148 | "Total hits: 1\n", 149 | "Total hits: 1\n", 150 | "Total hits: 1\n", 151 | "Total hits: 1\n", 152 | "Total hits: 1\n", 153 | "Total hits: 0\n", 154 | "Total hits: 0\n", 155 | "Total hits: 1\n", 156 | "Total hits: 2\n", 157 | "Total hits: 1\n", 158 | "Total hits: 1\n", 159 | "Total hits: 2\n", 160 | "Total hits: 2\n", 161 | "Total hits: 2\n", 162 | "Total hits: 1\n", 163 | "Total hits: 2\n", 164 | "Total hits: 1\n", 165 | "Total hits: 2\n", 166 | "Total hits: 0\n", 167 | "Total hits: 2\n", 168 | "Total hits: 0\n", 169 | "Total hits: 2\n", 170 | "Total hits: 2\n", 171 | "Total hits: 1\n", 172 | "Total hits: 2\n", 173 | "Total hits: 1\n", 174 | "Total hits: 2\n", 175 | "Total hits: 2\n", 176 | "Total hits: 2\n", 177 | "Total hits: 2\n", 178 | "Total hits: 1\n", 179 | "Total hits: 2\n", 180 | "Total hits: 1\n", 181 | "Total hits: 1\n", 182 | "Total hits: 2\n", 183 | "Total hits: 1\n", 184 | "Total hits: 2\n", 185 | "Total hits: 2\n", 186 | "Total hits: 0\n", 187 | "Total hits: 2\n", 188 | "Total hits: 1\n", 189 | "Total hits: 2\n", 190 | "Total hits: 2\n", 191 | "Total hits: 1\n", 192 | "Total hits: 2\n", 193 | "Total hits: 1\n", 194 | "Total hits: 2\n", 195 | "Total hits: 2\n", 196 | "Total hits: 2\n", 197 | "Total hits: 1\n", 198 | "Total hits: 0\n", 199 | "Total hits: 1\n", 200 | "Total hits: 2\n", 201 | "Total hits: 1\n", 202 | "Total hits: 2\n", 203 | "Total hits: 2\n", 204 | "Total hits: 1\n", 205 | "Total hits: 2\n", 206 | "Total hits: 3\n", 207 | "Total hits: 3\n", 208 | "Total hits: 2\n", 209 | "Total hits: 2\n", 210 | "Total hits: 1\n", 211 | "Total hits: 3\n", 212 | "Total hits: 2\n", 213 | "Total hits: 2\n", 214 | "Total hits: 3\n", 215 | "Total hits: 3\n", 216 | "Total hits: 2\n", 217 | "Total hits: 3\n", 218 | "Total hits: 2\n", 219 | "Total hits: 2\n", 220 | "Total hits: 2\n", 221 | "Total hits: 3\n", 222 | "Total hits: 3\n", 223 | "Total hits: 3\n", 224 | "Total hits: 1\n", 225 | "Total hits: 2\n", 226 | "Total hits: 1\n", 227 | "Total hits: 3\n", 228 | "Total hits: 3\n", 229 | "Total hits: 3\n", 230 | "Total hits: 3\n", 231 | "Total hits: 3\n", 232 | "Total hits: 3\n", 233 | "Total hits: 1\n", 234 | "Total hits: 1\n", 235 | "Total hits: 3\n", 236 | "Total hits: 3\n", 237 | "Total hits: 3\n", 238 | "Total hits: 1\n", 239 | "Total hits: 0\n", 240 | "Total hits: 2\n", 241 | "Total hits: 2\n", 242 | "Total hits: 3\n", 243 | "Total hits: 1\n", 244 | "Total hits: 3\n", 245 | "Total hits: 2\n", 246 | "Total hits: 2\n", 247 | "Total hits: 1\n", 248 | "Total hits: 3\n", 249 | "Total hits: 2\n", 250 | "Total hits: 3\n", 251 | "Total hits: 2\n", 252 | "Total hits: 3\n", 253 | "Total hits: 2\n", 254 | "Total hits: 3\n", 255 | "Total hits: 3\n", 256 | "Total hits: 3\n", 257 | "Total hits: 0\n", 258 | "Total hits: 3\n", 259 | "Total hits: 2\n", 260 | "Total hits: 3\n", 261 | "Total hits: 2\n", 262 | "Total hits: 4\n", 263 | "Total hits: 3\n", 264 | "Total hits: 4\n", 265 | "Total hits: 3\n", 266 | "Total hits: 4\n", 267 | "Total hits: 4\n", 268 | "Total hits: 2\n", 269 | "Total hits: 4\n", 270 | "Total hits: 2\n", 271 | "Total hits: 2\n", 272 | "Total hits: 3\n", 273 | "Total hits: 3\n", 274 | "Total hits: 3\n", 275 | "Total hits: 1\n", 276 | "Total hits: 2\n", 277 | "Total hits: 2\n", 278 | "Total hits: 4\n", 279 | "Total hits: 4\n", 280 | "Total hits: 4\n", 281 | "Total hits: 4\n", 282 | "Total hits: 1\n", 283 | "Total hits: 4\n", 284 | "Total hits: 2\n", 285 | "Total hits: 3\n", 286 | "Total hits: 3\n", 287 | "Total hits: 2\n", 288 | "Total hits: 2\n", 289 | "Total hits: 3\n", 290 | "Total hits: 4\n", 291 | "Total hits: 1\n", 292 | "Total hits: 4\n", 293 | "Total hits: 4\n", 294 | "Total hits: 4\n", 295 | "Total hits: 3\n", 296 | "Total hits: 2\n", 297 | "Total hits: 4\n", 298 | "Total hits: 2\n", 299 | "Total hits: 3\n", 300 | "Total hits: 3\n", 301 | "Total hits: 3\n", 302 | "Total hits: 1\n", 303 | "Total hits: 3\n", 304 | "Total hits: 2\n", 305 | "Total hits: 2\n", 306 | "Total hits: 5\n", 307 | "Total hits: 4\n", 308 | "Total hits: 5\n", 309 | "Total hits: 5\n", 310 | "Total hits: 3\n", 311 | "Total hits: 5\n", 312 | "Total hits: 5\n", 313 | "Total hits: 4\n", 314 | "Total hits: 5\n", 315 | "Total hits: 3\n", 316 | "Total hits: 3\n", 317 | "Total hits: 2\n", 318 | "Total hits: 5\n", 319 | "Total hits: 3\n", 320 | "Total hits: 4\n", 321 | "Total hits: 2\n", 322 | "Total hits: 4\n", 323 | "Total hits: 5\n", 324 | "Total hits: 2\n", 325 | "Total hits: 4\n", 326 | "Total hits: 3\n", 327 | "Total hits: 4\n", 328 | "Total hits: 4\n", 329 | "Total hits: 5\n", 330 | "Total hits: 5\n", 331 | "Total hits: 5\n", 332 | "Total hits: 2\n", 333 | "Total hits: 5\n", 334 | "Total hits: 5\n", 335 | "Total hits: 5\n", 336 | "Total hits: 5\n", 337 | "Total hits: 5\n", 338 | "Total hits: 5\n", 339 | "Total hits: 4\n", 340 | "Total hits: 2\n", 341 | "Total hits: 5\n", 342 | "Total hits: 3\n", 343 | "Total hits: 5\n", 344 | "Total hits: 5\n", 345 | "Total hits: 3\n", 346 | "Total hits: 4\n", 347 | "Total hits: 4\n", 348 | "Total hits: 4\n", 349 | "Total hits: 5\n", 350 | "Total hits: 2\n", 351 | "Total hits: 5\n", 352 | "Total hits: 4\n", 353 | "Total hits: 3\n", 354 | "Total hits: 5\n", 355 | "Total hits: 3\n", 356 | "Total hits: 2\n", 357 | "Total hits: 5\n", 358 | "Total hits: 5\n", 359 | "Total hits: 4\n", 360 | "Total hits: 6\n", 361 | "Total hits: 6\n", 362 | "Total hits: 3\n", 363 | "Total hits: 5\n", 364 | "Total hits: 3\n", 365 | "Total hits: 5\n", 366 | "Total hits: 2\n", 367 | "Total hits: 4\n", 368 | "Total hits: 3\n", 369 | "Total hits: 6\n", 370 | "Total hits: 4\n", 371 | "Total hits: 4\n", 372 | "Total hits: 6\n", 373 | "Total hits: 6\n", 374 | "Total hits: 4\n", 375 | "Total hits: 4\n", 376 | "Total hits: 6\n", 377 | "Total hits: 6\n", 378 | "Total hits: 4\n", 379 | "Total hits: 3\n", 380 | "Total hits: 6\n", 381 | "Total hits: 5\n", 382 | "Total hits: 6\n", 383 | "Total hits: 4\n", 384 | "Total hits: 4\n", 385 | "Total hits: 4\n", 386 | "Total hits: 3\n", 387 | "Total hits: 4\n", 388 | "Total hits: 3\n", 389 | "Total hits: 6\n", 390 | "Total hits: 4\n", 391 | "Total hits: 6\n", 392 | "Total hits: 2\n", 393 | "Total hits: 3\n", 394 | "Total hits: 3\n", 395 | "Total hits: 4\n", 396 | "Total hits: 5\n", 397 | "Total hits: 3\n", 398 | "Total hits: 2\n", 399 | "Total hits: 6\n", 400 | "Total hits: 5\n", 401 | "Total hits: 2\n", 402 | "Total hits: 4\n", 403 | "Total hits: 3\n", 404 | "Total hits: 5\n", 405 | "Total hits: 5\n", 406 | "Total hits: 4\n", 407 | "Total hits: 7\n", 408 | "Total hits: 3\n", 409 | "Total hits: 7\n", 410 | "Total hits: 4\n", 411 | "Total hits: 7\n", 412 | "Total hits: 7\n", 413 | "Total hits: 4\n", 414 | "Total hits: 4\n", 415 | "Total hits: 7\n", 416 | "Total hits: 7\n", 417 | "Total hits: 2\n", 418 | "Total hits: 7\n", 419 | "Total hits: 7\n", 420 | "Total hits: 7\n", 421 | "Total hits: 3\n", 422 | "Total hits: 6\n", 423 | "Total hits: 3\n", 424 | "Total hits: 3\n", 425 | "Total hits: 3\n", 426 | "Total hits: 3\n", 427 | "Total hits: 4\n", 428 | "Total hits: 6\n", 429 | "Total hits: 6\n", 430 | "Total hits: 4\n", 431 | "Total hits: 7\n", 432 | "Total hits: 7\n", 433 | "Total hits: 5\n", 434 | "Total hits: 3\n", 435 | "Total hits: 5\n", 436 | "Total hits: 4\n", 437 | "Total hits: 4\n", 438 | "Total hits: 4\n", 439 | "Total hits: 7\n", 440 | "Total hits: 3\n", 441 | "Total hits: 6\n", 442 | "Total hits: 6\n", 443 | "Total hits: 3\n", 444 | "Total hits: 4\n", 445 | "Total hits: 7\n", 446 | "Total hits: 4\n", 447 | "Total hits: 6\n", 448 | "Total hits: 5\n", 449 | "Total hits: 4\n", 450 | "Total hits: 5\n", 451 | "Total hits: 7\n", 452 | "Total hits: 5\n", 453 | "Total hits: 7\n", 454 | "Total hits: 3\n", 455 | "Total hits: 4\n", 456 | "Total hits: 4\n", 457 | "Total hits: 8\n", 458 | "Total hits: 6\n", 459 | "Total hits: 7\n", 460 | "Total hits: 5\n", 461 | "Total hits: 5\n", 462 | "Total hits: 4\n", 463 | "Total hits: 7\n", 464 | "Total hits: 8\n", 465 | "Total hits: 7\n", 466 | "Total hits: 4\n", 467 | "Total hits: 7\n", 468 | "Total hits: 6\n", 469 | "Total hits: 7\n", 470 | "Total hits: 6\n", 471 | "Total hits: 8\n", 472 | "Total hits: 7\n", 473 | "Total hits: 8\n", 474 | "Total hits: 4\n", 475 | "Total hits: 6\n", 476 | "Total hits: 8\n", 477 | "Total hits: 7\n", 478 | "Total hits: 5\n", 479 | "Total hits: 5\n", 480 | "Total hits: 6\n", 481 | "Total hits: 7\n", 482 | "Total hits: 8\n", 483 | "Total hits: 8\n", 484 | "Total hits: 3\n", 485 | "Total hits: 6\n", 486 | "Total hits: 6\n", 487 | "Total hits: 4\n", 488 | "Total hits: 8\n", 489 | "Total hits: 5\n", 490 | "Total hits: 5\n", 491 | "Total hits: 8\n", 492 | "Total hits: 7\n", 493 | "Total hits: 4\n", 494 | "Total hits: 4\n", 495 | "Total hits: 6\n", 496 | "Total hits: 5\n", 497 | "Total hits: 4\n", 498 | "Total hits: 6\n", 499 | "Total hits: 7\n", 500 | "Total hits: 8\n", 501 | "Total hits: 4\n", 502 | "Total hits: 4\n", 503 | "Total hits: 8\n", 504 | "Total hits: 6\n", 505 | "Total hits: 5\n", 506 | "Total hits: 9\n", 507 | "Total hits: 9\n", 508 | "Total hits: 4\n", 509 | "Total hits: 4\n", 510 | "Total hits: 4\n", 511 | "Total hits: 5\n", 512 | "Total hits: 4\n", 513 | "Total hits: 5\n", 514 | "Total hits: 6\n", 515 | "Total hits: 8\n", 516 | "Total hits: 7\n", 517 | "Total hits: 7\n", 518 | "Total hits: 6\n", 519 | "Total hits: 6\n", 520 | "Total hits: 4\n", 521 | "Total hits: 4\n", 522 | "Total hits: 8\n", 523 | "Total hits: 5\n", 524 | "Total hits: 5\n", 525 | "Total hits: 7\n", 526 | "Total hits: 7\n", 527 | "Total hits: 5\n", 528 | "Total hits: 3\n", 529 | "Total hits: 7\n", 530 | "Total hits: 7\n", 531 | "Total hits: 7\n", 532 | "Total hits: 8\n", 533 | "Total hits: 7\n", 534 | "Total hits: 7\n", 535 | "Total hits: 3\n", 536 | "Total hits: 4\n", 537 | "Total hits: 5\n", 538 | "Total hits: 6\n", 539 | "Total hits: 8\n", 540 | "Total hits: 5\n", 541 | "Total hits: 9\n", 542 | "Total hits: 7\n", 543 | "Total hits: 6\n", 544 | "Total hits: 9\n", 545 | "Total hits: 5\n", 546 | "Total hits: 6\n", 547 | "Total hits: 9\n", 548 | "Total hits: 5\n", 549 | "Total hits: 6\n", 550 | "Total hits: 7\n", 551 | "Total hits: 5\n", 552 | "Total hits: 9\n", 553 | "Total hits: 7\n", 554 | "Total hits: 8\n", 555 | "Total hits: 9\n" 556 | ] 557 | } 558 | ], 559 | "source": [ 560 | "l = 20\n", 561 | "trials = 50\n", 562 | "results = defaultdict(list)\n", 563 | "\n", 564 | "for l in range(10):\n", 565 | " for trial in range(trials):\n", 566 | " env = CacheEnv(eps_len=l)\n", 567 | " s = env.reset()\n", 568 | " done = env.done\n", 569 | "# print(\"Start: \", env.pages)\n", 570 | " while not done:\n", 571 | " a = lru_policy(s)\n", 572 | " s, r, done, observation = env.step(a)\n", 573 | " # print(f\">> Request: {env.new_page_id}\")\n", 574 | "# print(f\"Replace: {a}\")\n", 575 | "# print(observation)\n", 576 | "# print(env.pages, f\"reward: {r}\\n\")\n", 577 | " print(f\"Total hits: {env.total_hits}\")\n", 578 | " results[l].append(env.total_hits)" 579 | ] 580 | }, 581 | { 582 | "cell_type": "code", 583 | "execution_count": 23, 584 | "metadata": { 585 | "scrolled": true 586 | }, 587 | "outputs": [ 588 | { 589 | "data": { 590 | "text/plain": [ 591 | "[5,\n", 592 | " 4,\n", 593 | " 5,\n", 594 | " 5,\n", 595 | " 3,\n", 596 | " 5,\n", 597 | " 5,\n", 598 | " 4,\n", 599 | " 5,\n", 600 | " 3,\n", 601 | " 3,\n", 602 | " 2,\n", 603 | " 5,\n", 604 | " 3,\n", 605 | " 4,\n", 606 | " 2,\n", 607 | " 4,\n", 608 | " 5,\n", 609 | " 2,\n", 610 | " 4,\n", 611 | " 3,\n", 612 | " 4,\n", 613 | " 4,\n", 614 | " 5,\n", 615 | " 5,\n", 616 | " 5,\n", 617 | " 2,\n", 618 | " 5,\n", 619 | " 5,\n", 620 | " 5,\n", 621 | " 5,\n", 622 | " 5,\n", 623 | " 5,\n", 624 | " 4,\n", 625 | " 2,\n", 626 | " 5,\n", 627 | " 3,\n", 628 | " 5,\n", 629 | " 5,\n", 630 | " 3,\n", 631 | " 4,\n", 632 | " 4,\n", 633 | " 4,\n", 634 | " 5,\n", 635 | " 2,\n", 636 | " 5,\n", 637 | " 4,\n", 638 | " 3,\n", 639 | " 5,\n", 640 | " 3]" 641 | ] 642 | }, 643 | "execution_count": 23, 644 | "metadata": {}, 645 | "output_type": "execute_result" 646 | } 647 | ], 648 | "source": [ 649 | "results[5]" 650 | ] 651 | }, 652 | { 653 | "cell_type": "code", 654 | "execution_count": null, 655 | "metadata": {}, 656 | "outputs": [], 657 | "source": [] 658 | } 659 | ], 660 | "metadata": { 661 | "kernelspec": { 662 | "display_name": "Python 3", 663 | "language": "python", 664 | "name": "python3" 665 | }, 666 | "language_info": { 667 | "codemirror_mode": { 668 | "name": "ipython", 669 | "version": 3 670 | }, 671 | "file_extension": ".py", 672 | "mimetype": "text/x-python", 673 | "name": "python", 674 | "nbconvert_exporter": "python", 675 | "pygments_lexer": "ipython3", 676 | "version": "3.7.3" 677 | } 678 | }, 679 | "nbformat": 4, 680 | "nbformat_minor": 2 681 | } 682 | -------------------------------------------------------------------------------- /notebooks/Test neural net.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Is the NN enough to identify LRU or LFU indices?\n", 8 | "\n", 9 | "In this notebook I check if the nn for my DQN even capable of doing its task?" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import torch\n", 19 | "import torch.nn as nn\n", 20 | "import torch.nn.functional as F\n", 21 | "import numpy as np\n", 22 | "import gym\n", 23 | "import time\n", 24 | "from e2 import CacheEnv\n", 25 | "from tqdm import tqdm_notebook as tqdm" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 2, 31 | "metadata": {}, 32 | "outputs": [ 33 | { 34 | "name": "stdout", 35 | "output_type": "stream", 36 | "text": [ 37 | "\n", 38 | "Cache limit: 10\n", 39 | "Total Pages: 20\n" 40 | ] 41 | } 42 | ], 43 | "source": [ 44 | "# env vars\n", 45 | "EPS_LEN = 100\n", 46 | "N_PAGES = 20\n", 47 | "CACHE_LIMIT = 10 \n", 48 | "env = CacheEnv(\n", 49 | " eps_len=EPS_LEN, \n", 50 | " n_pages=N_PAGES, \n", 51 | " limit=CACHE_LIMIT\n", 52 | " )\n", 53 | "\n", 54 | "# dqn vars\n", 55 | "# N_EPS = 60000\n", 56 | "N_EPS = 1000\n", 57 | "BATCH_SIZE = 32\n", 58 | "LR_adam = 3e-4 # learning rate for Adam\n", 59 | "LR_sgd = 1e-3 # learning rate for SGD\n", 60 | "EPSILON = 0.9 # greedy policy\n", 61 | "GAMMA = 0.9 # reward discount\n", 62 | "TARGET_REPLACE_ITER = 2000 # target update frequency\n", 63 | "MEMORY_CAPACITY = 20000\n", 64 | "\n", 65 | "s = env.reset()\n", 66 | "N_ACTIONS = env.action_space_n\n", 67 | "STATE_SHAPE = (CACHE_LIMIT, 2)\n", 68 | "N_STATES = STATE_SHAPE[0]*STATE_SHAPE[1]" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 3, 74 | "metadata": {}, 75 | "outputs": [ 76 | { 77 | "data": { 78 | "application/vnd.jupyter.widget-view+json": { 79 | "model_id": "c611fdce06c14ad59cd0fdd02766d829", 80 | "version_major": 2, 81 | "version_minor": 0 82 | }, 83 | "text/plain": [ 84 | "HBox(children=(IntProgress(value=0, max=1000), HTML(value='')))" 85 | ] 86 | }, 87 | "metadata": {}, 88 | "output_type": "display_data" 89 | }, 90 | { 91 | "name": "stdout", 92 | "output_type": "stream", 93 | "text": [ 94 | "\n", 95 | "101000\n" 96 | ] 97 | } 98 | ], 99 | "source": [ 100 | "# Collect data\n", 101 | "dataX = []\n", 102 | "for _ in tqdm(range(N_EPS)):\n", 103 | " s = env.reset()\n", 104 | " dataX.append(s)\n", 105 | "\n", 106 | " while True:\n", 107 | " a = np.random.randint(0, N_ACTIONS)\n", 108 | " s, _, done, _ = env.step(a)\n", 109 | " dataX.append(s)\n", 110 | " \n", 111 | " if done:\n", 112 | " break\n", 113 | "\n", 114 | "print(len(dataX)) " 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 4, 120 | "metadata": {}, 121 | "outputs": [ 122 | { 123 | "data": { 124 | "application/vnd.jupyter.widget-view+json": { 125 | "model_id": "a5b9baec29524318b81ee1fc4ca0710a", 126 | "version_major": 2, 127 | "version_minor": 0 128 | }, 129 | "text/plain": [ 130 | "HBox(children=(IntProgress(value=0, max=101000), HTML(value='')))" 131 | ] 132 | }, 133 | "metadata": {}, 134 | "output_type": "display_data" 135 | }, 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "def get_labels(dataX):\n", 146 | " dataYLU = []\n", 147 | " dataYRU = [] \n", 148 | " for x in tqdm(dataX):\n", 149 | " lus = np.argmin(x[::2])\n", 150 | " rus = np.argmin(x[1::2])\n", 151 | " dataYLU.append(lus)\n", 152 | " dataYRU.append(rus)\n", 153 | " return dataYLU, dataYRU \n", 154 | " \n", 155 | "dataYLU, dataYRU = get_labels(dataX)" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 5, 161 | "metadata": {}, 162 | "outputs": [ 163 | { 164 | "name": "stdout", 165 | "output_type": "stream", 166 | "text": [ 167 | "101000\n", 168 | "101000\n", 169 | "101000\n" 170 | ] 171 | } 172 | ], 173 | "source": [ 174 | "print(len(dataX))\n", 175 | "print(len(dataYLU))\n", 176 | "print(len(dataYRU))" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": 6, 182 | "metadata": {}, 183 | "outputs": [ 184 | { 185 | "name": "stdout", 186 | "output_type": "stream", 187 | "text": [ 188 | "101000\n", 189 | "101000\n", 190 | "80800\n", 191 | "20200\n" 192 | ] 193 | } 194 | ], 195 | "source": [ 196 | "from sklearn.model_selection import train_test_split\n", 197 | "X = dataX\n", 198 | "Y = dataYLU\n", 199 | "XTrain, XTest, yTrain, yTest = train_test_split(X, Y, test_size = 0.2)\n", 200 | "print(len(X))\n", 201 | "print(len(Y))\n", 202 | "print(len(XTrain))\n", 203 | "print(len(XTest))" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 7, 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "from torch.utils.data import Dataset, DataLoader\n", 213 | "\n", 214 | "class CacheDataset(Dataset):\n", 215 | " def __init__(self, data, targets, transform=None):\n", 216 | " self.transform = transform\n", 217 | " self.data = torch.Tensor(data)\n", 218 | " self.targets = torch.LongTensor(targets)\n", 219 | "\n", 220 | " def __getitem__(self, index):\n", 221 | " x = self.data[index]\n", 222 | " y = self.targets[index]\n", 223 | " return x, y\n", 224 | "\n", 225 | " def __len__(self):\n", 226 | " return self.data.shape[0]" 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": 8, 232 | "metadata": {}, 233 | "outputs": [], 234 | "source": [ 235 | "train_dataset = CacheDataset(XTrain, yTrain)\n", 236 | "test_dataset = CacheDataset(XTest, yTest)\n", 237 | "\n", 238 | "train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE)\n", 239 | "test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": 16, 245 | "metadata": {}, 246 | "outputs": [], 247 | "source": [ 248 | "class Net(nn.Module):\n", 249 | " def __init__(self, ):\n", 250 | " super(Net, self).__init__()\n", 251 | " input_size = N_STATES\n", 252 | " h_dim = 50\n", 253 | " self.fc1 = nn.Linear(input_size, h_dim)\n", 254 | " self.bn1 = nn.BatchNorm1d(h_dim)\n", 255 | " self.fc2 = nn.Linear(h_dim, h_dim//4)\n", 256 | " self.bn2 = nn.BatchNorm1d(h_dim//4)\n", 257 | " self.fc3 = nn.Linear(h_dim//4, h_dim)\n", 258 | " self.bn3 = nn.BatchNorm1d(h_dim)\n", 259 | " self.out = nn.Linear(h_dim, N_ACTIONS)\n", 260 | "\n", 261 | " def forward(self, x):\n", 262 | "# bs = x.shape[0]\n", 263 | " x = F.relu(self.fc1(x))\n", 264 | " x = self.bn1(x)\n", 265 | " x = F.relu(self.fc2(x))\n", 266 | " x = self.bn2(x)\n", 267 | " x = F.relu(self.fc3(x))\n", 268 | " x = self.bn3(x)\n", 269 | " x = self.out(x)\n", 270 | "# return x\n", 271 | " return F.softmax(x, dim=0) " 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": 17, 277 | "metadata": { 278 | "scrolled": true 279 | }, 280 | "outputs": [ 281 | { 282 | "data": { 283 | "application/vnd.jupyter.widget-view+json": { 284 | "model_id": "2eeaff486d9a4cdba33942ca5840c427", 285 | "version_major": 2, 286 | "version_minor": 0 287 | }, 288 | "text/plain": [ 289 | "HBox(children=(IntProgress(value=0), HTML(value='')))" 290 | ] 291 | }, 292 | "metadata": {}, 293 | "output_type": "display_data" 294 | }, 295 | { 296 | "name": "stdout", 297 | "output_type": "stream", 298 | "text": [ 299 | "Epoch: [0] | Loss: 2.187633514404297\n", 300 | "Epoch: [1] | Loss: 2.144232749938965\n", 301 | "Epoch: [2] | Loss: 2.1371021270751953\n", 302 | "Epoch: [3] | Loss: 2.1437060832977295\n", 303 | "Epoch: [4] | Loss: 2.121243715286255\n", 304 | "Epoch: [5] | Loss: 2.1124751567840576\n", 305 | "Epoch: [6] | Loss: 2.105055093765259\n", 306 | "Epoch: [7] | Loss: 2.0993552207946777\n", 307 | "Epoch: [8] | Loss: 2.0964062213897705\n", 308 | "Epoch: [9] | Loss: 2.0916545391082764\n", 309 | "Epoch: [10] | Loss: 2.0877633094787598\n", 310 | "Epoch: [11] | Loss: 2.085021734237671\n", 311 | "Epoch: [12] | Loss: 2.0823757648468018\n", 312 | "Epoch: [13] | Loss: 2.0809569358825684\n", 313 | "Epoch: [14] | Loss: 2.0783753395080566\n", 314 | "Epoch: [15] | Loss: 2.075209140777588\n", 315 | "Epoch: [16] | Loss: 2.0703678131103516\n", 316 | "Epoch: [17] | Loss: 2.0702061653137207\n", 317 | "Epoch: [18] | Loss: 2.066880702972412\n", 318 | "Epoch: [19] | Loss: 2.0660462379455566\n", 319 | "Epoch: [20] | Loss: 2.0639312267303467\n", 320 | "Epoch: [21] | Loss: 2.061947822570801\n", 321 | "Epoch: [22] | Loss: 2.060666561126709\n", 322 | "Epoch: [23] | Loss: 2.059048891067505\n", 323 | "Epoch: [24] | Loss: 2.057486057281494\n", 324 | "Epoch: [25] | Loss: 2.0560691356658936\n", 325 | "Epoch: [26] | Loss: 2.0552005767822266\n", 326 | "Epoch: [27] | Loss: 2.056312322616577\n", 327 | "Epoch: [28] | Loss: 2.0555484294891357\n", 328 | "Epoch: [29] | Loss: 2.0530333518981934\n", 329 | "Epoch: [30] | Loss: 2.063065767288208\n", 330 | "Epoch: [31] | Loss: 2.0705971717834473\n", 331 | "Epoch: [32] | Loss: 2.0673348903656006\n", 332 | "Epoch: [33] | Loss: 2.0656776428222656\n", 333 | "Epoch: [34] | Loss: 2.0652015209198\n", 334 | "Epoch: [35] | Loss: 2.0645012855529785\n", 335 | "Epoch: [36] | Loss: 2.0647106170654297\n", 336 | "Epoch: [37] | Loss: 2.063448667526245\n", 337 | "Epoch: [38] | Loss: 2.0621421337127686\n", 338 | "Epoch: [39] | Loss: 2.063877582550049\n", 339 | "Epoch: [40] | Loss: 2.0610716342926025\n", 340 | "Epoch: [41] | Loss: 2.0602352619171143\n", 341 | "Epoch: [42] | Loss: 2.0598061084747314\n", 342 | "Epoch: [43] | Loss: 2.0579872131347656\n", 343 | "Epoch: [44] | Loss: 2.0548243522644043\n", 344 | "Epoch: [45] | Loss: 2.0516254901885986\n", 345 | "Epoch: [46] | Loss: 2.0497026443481445\n", 346 | "Epoch: [47] | Loss: 2.048828125\n", 347 | "Epoch: [48] | Loss: 2.0497946739196777\n", 348 | "Epoch: [49] | Loss: 2.0482306480407715\n", 349 | "Epoch: [50] | Loss: 2.0477547645568848\n", 350 | "Epoch: [51] | Loss: 2.0470199584960938\n", 351 | "Epoch: [52] | Loss: 2.0468432903289795\n", 352 | "Epoch: [53] | Loss: 2.0469202995300293\n", 353 | "Epoch: [54] | Loss: 2.046201229095459\n", 354 | "Epoch: [55] | Loss: 2.0457727909088135\n", 355 | "Epoch: [56] | Loss: 2.0458312034606934\n", 356 | "Epoch: [57] | Loss: 2.045846939086914\n", 357 | "Epoch: [58] | Loss: 2.0454676151275635\n", 358 | "Epoch: [59] | Loss: 2.045536518096924\n", 359 | "Epoch: [60] | Loss: 2.0453977584838867\n", 360 | "Epoch: [61] | Loss: 2.045363187789917\n", 361 | "Epoch: [62] | Loss: 2.045435905456543\n", 362 | "Epoch: [63] | Loss: 2.045151710510254\n", 363 | "Epoch: [64] | Loss: 2.0454161167144775\n", 364 | "Epoch: [65] | Loss: 2.045161008834839\n", 365 | "Epoch: [66] | Loss: 2.045203924179077\n", 366 | "Epoch: [67] | Loss: 2.0451138019561768\n", 367 | "Epoch: [68] | Loss: 2.044893264770508\n", 368 | "Epoch: [69] | Loss: 2.0448856353759766\n", 369 | "Epoch: [70] | Loss: 2.0448765754699707\n", 370 | "Epoch: [71] | Loss: 2.0448691844940186\n", 371 | "Epoch: [72] | Loss: 2.044767379760742\n", 372 | "Epoch: [73] | Loss: 2.0447545051574707\n", 373 | "Epoch: [74] | Loss: 2.0446510314941406\n", 374 | "Epoch: [75] | Loss: 2.0445046424865723\n", 375 | "Epoch: [76] | Loss: 2.044635057449341\n", 376 | "Epoch: [77] | Loss: 2.044440746307373\n", 377 | "Epoch: [78] | Loss: 2.0449013710021973\n", 378 | "Epoch: [79] | Loss: 2.044398307800293\n", 379 | "Epoch: [80] | Loss: 2.0446112155914307\n", 380 | "Epoch: [81] | Loss: 2.046970844268799\n", 381 | "Epoch: [82] | Loss: 2.0472493171691895\n", 382 | "Epoch: [83] | Loss: 2.045949935913086\n", 383 | "Epoch: [84] | Loss: 2.045917510986328\n", 384 | "Epoch: [85] | Loss: 2.0451414585113525\n", 385 | "Epoch: [86] | Loss: 2.0451531410217285\n", 386 | "Epoch: [87] | Loss: 2.0451693534851074\n", 387 | "Epoch: [88] | Loss: 2.0447967052459717\n", 388 | "Epoch: [89] | Loss: 2.044837474822998\n", 389 | "Epoch: [90] | Loss: 2.044792413711548\n", 390 | "Epoch: [91] | Loss: 2.0447239875793457\n", 391 | "Epoch: [92] | Loss: 2.0448155403137207\n", 392 | "Epoch: [93] | Loss: 2.0446364879608154\n", 393 | "Epoch: [94] | Loss: 2.044811248779297\n", 394 | "Epoch: [95] | Loss: 2.044571876525879\n", 395 | "Epoch: [96] | Loss: 2.0447733402252197\n", 396 | "Epoch: [97] | Loss: 2.0447912216186523\n", 397 | "Epoch: [98] | Loss: 2.0448620319366455\n", 398 | "Epoch: [99] | Loss: 2.044692039489746\n", 399 | "\n" 400 | ] 401 | } 402 | ], 403 | "source": [ 404 | "model = Net().cuda()\n", 405 | "LR_adam = 3e-3\n", 406 | "optimizer = torch.optim.Adam(model.parameters(), lr=LR_adam)\n", 407 | "# optimizer = torch.optim.SGD(model.parameters(), lr=LR_sgd)\n", 408 | "# loss_func = nn.MSELoss()\n", 409 | "criterion = nn.CrossEntropyLoss()\n", 410 | "\n", 411 | "epochs = 100\n", 412 | "for epoch in tqdm(range(epochs)):\n", 413 | " totalloss = []\n", 414 | " for i, (X, y) in enumerate(train_loader):\n", 415 | " X, y = X.cuda(), y.cuda()\n", 416 | " optimizer.zero_grad()\n", 417 | " out = model(X)\n", 418 | " loss = criterion(out, y)\n", 419 | " loss.backward()\n", 420 | " optimizer.step()\n", 421 | " with torch.no_grad():\n", 422 | " totalloss.append(loss.detach().cpu().numpy())\n", 423 | " totalloss = np.array(totalloss).mean()\n", 424 | " print(f\"Epoch: [{epoch}] | Loss: {totalloss}\") " 425 | ] 426 | }, 427 | { 428 | "cell_type": "code", 429 | "execution_count": null, 430 | "metadata": { 431 | "scrolled": true 432 | }, 433 | "outputs": [], 434 | "source": [ 435 | "model = Net().cuda()\n", 436 | "LR_adam = 3e-3\n", 437 | "optimizer = torch.optim.Adam(model.parameters(), lr=LR_adam)\n", 438 | "# LR_sgd = 1\n", 439 | "# optimizer = torch.optim.SGD(model.parameters(), lr=LR_sgd)\n", 440 | "# loss_func = nn.MSELoss()\n", 441 | "criterion = nn.CrossEntropyLoss()\n", 442 | "\n", 443 | "epochs = 100\n", 444 | "for epoch in tqdm(range(epochs)):\n", 445 | " totalloss = []\n", 446 | " for i, (X, y) in enumerate(train_loader):\n", 447 | " X, y = X.cuda(), y.cuda()\n", 448 | " optimizer.zero_grad()\n", 449 | " out = model(X)\n", 450 | " loss = criterion(out, y)\n", 451 | " loss.backward()\n", 452 | " optimizer.step()\n", 453 | " with torch.no_grad():\n", 454 | " totalloss.append(loss.detach().cpu().numpy())\n", 455 | " totalloss = np.array(totalloss).mean()\n", 456 | " print(f\"Epoch: [{epoch}] | Loss: {totalloss}\") " 457 | ] 458 | }, 459 | { 460 | "cell_type": "code", 461 | "execution_count": null, 462 | "metadata": {}, 463 | "outputs": [], 464 | "source": [] 465 | } 466 | ], 467 | "metadata": { 468 | "kernelspec": { 469 | "display_name": "Python 3", 470 | "language": "python", 471 | "name": "python3" 472 | }, 473 | "language_info": { 474 | "codemirror_mode": { 475 | "name": "ipython", 476 | "version": 3 477 | }, 478 | "file_extension": ".py", 479 | "mimetype": "text/x-python", 480 | "name": "python", 481 | "nbconvert_exporter": "python", 482 | "pygments_lexer": "ipython3", 483 | "version": "3.7.3" 484 | } 485 | }, 486 | "nbformat": 4, 487 | "nbformat_minor": 2 488 | } 489 | -------------------------------------------------------------------------------- /notebooks/Test_Env.py.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# from environment import CacheEnv\n", 10 | "from e2 import CacheEnv" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "env = CacheEnv(eps_len=100, n_pages=10, limit=5, verbose=True)\n", 20 | "env.toggle_human()\n", 21 | "s = env.reset()\n", 22 | "print(f\"Requested page: {env.new_page_id}\")\n", 23 | "s" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "print(env.step(1))\n", 33 | "print(f\"Requested page: {env.new_page_id}\")" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": { 40 | "scrolled": false 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "for x in range(3):\n", 45 | " s, r, done, observation = env.step(1)\n", 46 | " print(s)\n", 47 | " print(observation)\n", 48 | " print(f\"Reward: {r}\")\n", 49 | " print(f\"Done: {done}\")\n", 50 | " if done:\n", 51 | " break" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 4, 57 | "metadata": {}, 58 | "outputs": [ 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "\n", 64 | "Cache limit: 5\n", 65 | "Total Pages: 10\n" 66 | ] 67 | }, 68 | { 69 | "data": { 70 | "text/plain": [ 71 | "{4: [1, 1], 1: [1, 1], 6: [1, 1], 5: [1, 1], 0: [1, 1]}" 72 | ] 73 | }, 74 | "execution_count": 4, 75 | "metadata": {}, 76 | "output_type": "execute_result" 77 | } 78 | ], 79 | "source": [ 80 | "env = CacheEnv(eps_len=100, n_pages=10, limit=5, verbose=False)\n", 81 | "env.toggle_human()\n", 82 | "s = env.reset()\n", 83 | "s" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": 5, 89 | "metadata": { 90 | "scrolled": false 91 | }, 92 | "outputs": [ 93 | { 94 | "name": "stdout", 95 | "output_type": "stream", 96 | "text": [ 97 | "({4: [2, 1], 2: [1, 1], 6: [2, 1], 5: [2, 1], 0: [2, 1]}, 0, False, 'There were 0 hits.')\n", 98 | "\n", 99 | "\n", 100 | "({4: [4, 1], 9: [2, 1], 6: [4, 1], 5: [1, 2], 0: [4, 1]}, 1, False, 'There were 1 hits.')\n", 101 | "\n", 102 | "\n", 103 | "({4: [5, 1], 2: [1, 2], 6: [5, 1], 5: [2, 2], 0: [5, 1]}, 0, False, 'There were 0 hits.')\n", 104 | "\n", 105 | "\n", 106 | "({4: [1, 2], 9: [2, 2], 6: [7, 1], 5: [4, 2], 0: [7, 1]}, 1, False, 'There were 1 hits.')\n", 107 | "\n", 108 | "\n", 109 | "({4: [2, 2], 1: [1, 2], 6: [8, 1], 5: [5, 2], 0: [8, 1]}, 0, False, 'There were 0 hits.')\n", 110 | "\n", 111 | "\n", 112 | "({4: [6, 2], 9: [1, 6], 6: [12, 1], 5: [9, 2], 0: [12, 1]}, 3, False, 'There were 3 hits.')\n", 113 | "\n", 114 | "\n", 115 | "({4: [7, 2], 2: [1, 3], 6: [13, 1], 5: [10, 2], 0: [13, 1]}, 0, False, 'There were 0 hits.')\n", 116 | "\n", 117 | "\n", 118 | "({4: [9, 2], 1: [2, 3], 6: [1, 2], 5: [12, 2], 0: [15, 1]}, 1, False, 'There were 1 hits.')\n", 119 | "\n", 120 | "\n", 121 | "({4: [10, 2], 7: [1, 1], 6: [2, 2], 5: [13, 2], 0: [16, 1]}, 0, False, 'There were 0 hits.')\n", 122 | "\n", 123 | "\n", 124 | "({4: [11, 2], 9: [1, 7], 6: [3, 2], 5: [14, 2], 0: [17, 1]}, 0, False, 'There were 0 hits.')\n", 125 | "\n", 126 | "\n" 127 | ] 128 | } 129 | ], 130 | "source": [ 131 | "for _ in range(10):\n", 132 | " print(env.step(1))\n", 133 | " print(\"\\n\")" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": null, 139 | "metadata": {}, 140 | "outputs": [], 141 | "source": [] 142 | } 143 | ], 144 | "metadata": { 145 | "kernelspec": { 146 | "display_name": "Python 3", 147 | "language": "python", 148 | "name": "python3" 149 | }, 150 | "language_info": { 151 | "codemirror_mode": { 152 | "name": "ipython", 153 | "version": 3 154 | }, 155 | "file_extension": ".py", 156 | "mimetype": "text/x-python", 157 | "name": "python", 158 | "nbconvert_exporter": "python", 159 | "pygments_lexer": "ipython3", 160 | "version": "3.7.3" 161 | } 162 | }, 163 | "nbformat": 4, 164 | "nbformat_minor": 2 165 | } 166 | -------------------------------------------------------------------------------- /notebooks/__pycache__/e2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/notebooks/__pycache__/e2.cpython-37.pyc -------------------------------------------------------------------------------- /notebooks/__pycache__/environment.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/notebooks/__pycache__/environment.cpython-37.pyc -------------------------------------------------------------------------------- /notebooks/__pycache__/os_sim.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/notebooks/__pycache__/os_sim.cpython-37.pyc -------------------------------------------------------------------------------- /notebooks/e2.py: -------------------------------------------------------------------------------- 1 | ../e2.py -------------------------------------------------------------------------------- /notebooks/e3.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | import pandas as pd 4 | import pickle 5 | from collections import defaultdict, OrderedDict 6 | from os_sim import OS 7 | 8 | LIMIT = 3 9 | N_PAGES = 5 10 | EPS_LEN = 3 11 | POS_REW = 1 12 | NEG_REW = -1 13 | HEAVY_NEG_R = -10 14 | 15 | class CacheEnv(gym.Env): 16 | metadata = {'render.modes': ['human']} 17 | def __init__(self, limit=LIMIT, n_pages=N_PAGES, eps_len=EPS_LEN, human=False, verbose=False): 18 | super(CacheEnv, self).__init__() 19 | self.limit = limit 20 | self.n_pages = n_pages 21 | self.eps_len = eps_len 22 | self.os = OS(limit, n_pages) 23 | self.pages, self.NT = self.os.init_pages() 24 | self.timestep = 0 #counter; if this reaches eps_len, return done=True 25 | self.done = False 26 | self.new_page_id = -1 27 | self.action_space_n = limit 28 | self.human = human 29 | self.verbose = verbose 30 | 31 | def step(self, action, test=False): 32 | """ 33 | OS just asked for a page not in memory (stored in self.new_page_id). 34 | Replace page at `action` index to make space for the page. 35 | Then keep asking OS for more pages until a miss occurs. 36 | For ever hit meanwhile, increase positive reward by 1. 37 | """ 38 | self.timestep += 1 39 | if self.timestep >= self.eps_len: 40 | self.done = True #Episode reached its end 41 | 42 | 43 | if self.verbose: 44 | self.print_cache() 45 | 46 | self.allocate_cache(self.new_page_id, action) #we took action to make space for this page 47 | 48 | if self.verbose: 49 | print(f"Allocated {self.new_page_id} at index {action}") 50 | self.print_cache() 51 | 52 | reward = 0 53 | nhits = 0 54 | hit = True 55 | while(True): #until page miss occurs 56 | new_page_id = self.os.get_id() #This is page requested by the OS 57 | self.new_page_id = new_page_id #Store for debugging 58 | if self.verbose: 59 | print(f"== Page: {new_page_id} requested!") 60 | if self.is_allocated(new_page_id): 61 | if self.verbose: 62 | print(f"Page: {new_page_id} is allocated. Hit!") 63 | r = self.access_page(new_page_id) 64 | reward += r 65 | nhits += 1 66 | else: 67 | if self.verbose: 68 | print(f"Page: {new_page_id} Not allocated!! MISS!") 69 | break 70 | 71 | observation = f"There were {nhits} hits." 72 | return self.nn_state(), reward, self.done, observation 73 | 74 | def reset(self): 75 | self.timestep = 0 76 | self.pages, self.NT = self.os.init_pages() #self.NT keeps record of number of times a page was accessed. This info might be lost after page is removed so its saved here. 77 | self.done = False 78 | self.new_page_id = self.page_not_in_memory() #this will cause a miss and make agent choose an action for step. 79 | # return self.pages 80 | return self.nn_state() 81 | 82 | def page_not_in_memory(self): 83 | current_pages = set(self.pages.keys()) 84 | all_pages = set([i for i in range(self.n_pages)]) 85 | left_pages = list(all_pages-current_pages) 86 | return np.random.choice(left_pages) 87 | 88 | 89 | def render(self, mode='human'): 90 | pass 91 | 92 | def close(self): 93 | pass 94 | 95 | def is_allocated(self, id): 96 | """ 97 | returns true if 'id' is allocated a cache currently 98 | """ 99 | if id in self.pages.keys(): 100 | return True 101 | return False 102 | 103 | def toggle_human(self): 104 | self.human = not self.human 105 | 106 | @staticmethod 107 | def normalize(arr): 108 | return arr/arr.sum() 109 | 110 | def nn_state(self): 111 | """returns state in numpy format for neural net inpu""" 112 | if self.human: 113 | return self.pages 114 | state_lu = [] 115 | state_fu = [] 116 | for k in self.pages: 117 | vals = self.pages[k] 118 | state_lu.append(vals[0]) #Flatten 119 | state_fu.append(vals[1]) #Flatten 120 | 121 | v_1 = self.normalize(np.array(state_lu)) 122 | v_2 = self.normalize(np.array(state_fu)) 123 | return np.concatenate((v_1,v_2),axis=0) 124 | 125 | def print_cache(self): 126 | print(self.pages) 127 | 128 | 129 | def access_page(self, id): 130 | """change counters of a page requested that is currently in cache""" 131 | hit = True #HIT! 132 | page = self.pages[id] 133 | self.pages[id][0] = 1 #Last accessed 1 timestep ago 134 | self.pages[id][1] += 1 #Increase local fu counter 135 | self.NT[id] += 1 #Increase global fu counter 136 | reward = POS_REW #pos reward for hit 137 | 138 | #For all the pages except id, increament their lu counter 139 | for page_id in self.pages.keys(): 140 | if page_id == id: 141 | continue 142 | else: 143 | self.pages[page_id][0] += 1 144 | 145 | return reward 146 | 147 | def allocate_cache(self, id, action=None): 148 | """ 149 | remove page at 'action' 150 | add page 'id' 151 | """ 152 | id = int(id) 153 | self.NT[id] += 1 #increase global fu counter 154 | 155 | #For all the pages except id, increament their lu counter 156 | for page_id in self.pages.keys(): 157 | if page_id == id: 158 | continue 159 | else: 160 | self.pages[page_id][0] += 1 161 | 162 | 163 | action = int(action) 164 | old_key = list(self.pages.keys())[action] #the page at index 'action' which will be replaced 165 | new_key = id #new page whose id is id 166 | #update the dictionary to contain new page's counter (update value) 167 | new_value = [1, self.NT[id]] 168 | #update the dictionary to contain new page's key (replace key) 169 | self.pages = dict(OrderedDict([(new_key, new_value) if k == old_key else (k, v) for k, v in self.pages.items()])) 170 | 171 | 172 | if __name__ == "__main__": 173 | env = CacheEnv() 174 | env.reset() 175 | -------------------------------------------------------------------------------- /notebooks/environment.py: -------------------------------------------------------------------------------- 1 | ../environment.py -------------------------------------------------------------------------------- /notebooks/os_sim.py: -------------------------------------------------------------------------------- 1 | ../os_sim.py -------------------------------------------------------------------------------- /os_sim.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | import pandas as pd 4 | import pickle 5 | import random 6 | from collections import defaultdict 7 | 8 | 9 | 10 | class OS(): 11 | """ 12 | Simulate a simple OS cache handler 13 | """ 14 | def __init__(self, limit, n_pages): 15 | print(self) 16 | print(f"Cache limit: {limit}") 17 | print(f"Total Pages: {n_pages}") 18 | super(OS, self).__init__() 19 | self.limit = limit 20 | self.n_pages = n_pages 21 | self.P = self.get_P(n_pages) 22 | if len(self.P) != self.n_pages: 23 | raise Exception("Size mismatch for P and n_pages") 24 | 25 | @staticmethod 26 | def get_P(n): 27 | x = np.random.rand(n) 28 | x /= x.sum() 29 | return x 30 | 31 | def init_pages(self): 32 | pages = {} 33 | NT = defaultdict(int) 34 | starting_pages = random.sample([i for i in range(self.n_pages)], self.limit) 35 | for i in range(self.limit): 36 | page_id = starting_pages[i] 37 | # page_id = i #For now let it be sequential 38 | lu = 1 #No. of timesteps ago this page was accessed ~LRU 39 | nt = 1 #No. of times this page was accessed ~LFU 40 | page = [lu, nt] 41 | NT[page_id] += 1 # Update NT dict 42 | pages[page_id] = page 43 | return pages, NT 44 | 45 | 46 | 47 | def get_id(self): 48 | return int(np.random.choice(np.arange(self.n_pages), p=self.P)) 49 | -------------------------------------------------------------------------------- /results/lfu.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/results/lfu.pkl -------------------------------------------------------------------------------- /results/lru.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/results/lru.pkl -------------------------------------------------------------------------------- /results/rlcar.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sumanyumuku98/RL-CAR/dda960315ff8b0c8caa95005fed4d79f4af8738d/results/rlcar.pkl --------------------------------------------------------------------------------