├── .gitignore ├── Code ├── common │ └── rl_utils.py ├── ppo.py ├── requriments.txt ├── sac.py └── sac_c.py ├── README.md ├── pic ├── DQN_Atari.png ├── PPO_Clip.png ├── PPO_KL.png ├── QAC.png ├── TRPO.png ├── TRPO_2.png ├── converge_vfa.png ├── evalimp.png ├── func_design.png ├── mc_epsion.png ├── qpi.png ├── rl.png ├── rl_free.png ├── sarsa.png ├── sarsa_vfa.png ├── td_mc_dp_diff.png ├── traning_stable.png └── vpi.png ├── rl_sisyphes.pdf ├── rl_sisyphes.tex └── setting_list.tex /.gitignore: -------------------------------------------------------------------------------- 1 | Code/common/__pycache__/* 2 | *.aux 3 | *.gz 4 | *.toc 5 | *.out 6 | *.log 7 | 8 | -------------------------------------------------------------------------------- /Code/common/rl_utils.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | import numpy as np 3 | import torch 4 | import collections 5 | import random 6 | 7 | from warnings import filterwarnings 8 | filterwarnings(action='ignore', category=DeprecationWarning, message='`np.bool8` is a deprecated alias') 9 | 10 | class ReplayBuffer: 11 | def __init__(self, capacity): 12 | self.buffer = collections.deque(maxlen=capacity) 13 | 14 | def add(self, state, action, reward, next_state, done): 15 | self.buffer.append((state, action, reward, next_state, done)) 16 | 17 | def sample(self, batch_size): 18 | transitions = random.sample(self.buffer, batch_size) 19 | state, action, reward, next_state, done = zip(*transitions) 20 | return np.array(state), action, reward, np.array(next_state), done 21 | 22 | def size(self): 23 | return len(self.buffer) 24 | 25 | def moving_average(a, window_size): 26 | cumulative_sum = np.cumsum(np.insert(a, 0, 0)) 27 | middle = (cumulative_sum[window_size:] - cumulative_sum[:-window_size]) / window_size 28 | r = np.arange(1, window_size-1, 2) 29 | begin = np.cumsum(a[:window_size-1])[::2] / r 30 | end = (np.cumsum(a[:-window_size:-1])[::2] / r)[::-1] 31 | return np.concatenate((begin, middle, end)) 32 | 33 | def train_on_policy_agent(env, agent, num_episodes): 34 | return_list = [] 35 | for i in range(10): 36 | with tqdm(total=int(num_episodes/10), desc='Iteration %d' % i) as pbar: 37 | for i_episode in range(int(num_episodes/10)): 38 | episode_return = 0 39 | transition_dict = {'states': [], 'actions': [], 'next_states': [], 'rewards': [], 'dones': []} 40 | state = env.reset() 41 | done = False 42 | while not done: 43 | if len(state) == 2: 44 | state = state[0] 45 | action = agent.take_action(state) 46 | next_state, reward, done, _ = env.step(action)[:4] 47 | transition_dict['states'].append(state) 48 | transition_dict['actions'].append(action) 49 | transition_dict['next_states'].append(next_state) 50 | transition_dict['rewards'].append(reward) 51 | transition_dict['dones'].append(done) 52 | state = next_state 53 | episode_return += reward 54 | return_list.append(episode_return) 55 | agent.update(transition_dict) 56 | if (i_episode+1) % 10 == 0: 57 | pbar.set_postfix({'episode': '%d' % (num_episodes/10 * i + i_episode+1), 'return': '%.3f' % np.mean(return_list[-10:])}) 58 | pbar.update(1) 59 | return return_list 60 | 61 | def train_off_policy_agent(env, agent, num_episodes, replay_buffer, minimal_size, batch_size): 62 | return_list = [] 63 | for i in range(10): 64 | with tqdm(total=int(num_episodes/10), desc='Iteration %d' % i) as pbar: 65 | for i_episode in range(int(num_episodes/10)): 66 | episode_return = 0 67 | state = env.reset() 68 | done = False 69 | while not done: 70 | if len(state) == 2: 71 | state = state[0] 72 | action = agent.take_action(state) 73 | next_state, reward, done, _ = env.step(action)[:4] 74 | replay_buffer.add(state, action, reward, next_state, done) 75 | state = next_state 76 | episode_return += reward 77 | if replay_buffer.size() > minimal_size: 78 | b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size) 79 | transition_dict = {'states': b_s, 'actions': b_a, 'next_states': b_ns, 'rewards': b_r, 'dones': b_d} 80 | agent.update(transition_dict) 81 | return_list.append(episode_return) 82 | if (i_episode+1) % 10 == 0: 83 | pbar.set_postfix({'episode': '%d' % (num_episodes/10 * i + i_episode+1), 'return': '%.3f' % np.mean(return_list[-10:])}) 84 | pbar.update(1) 85 | return return_list 86 | 87 | 88 | def compute_advantage(gamma, lmbda, td_delta): 89 | td_delta = td_delta.detach().numpy() 90 | advantage_list = [] 91 | advantage = 0.0 92 | for delta in td_delta[::-1]: 93 | advantage = gamma * lmbda * advantage + delta 94 | advantage_list.append(advantage) 95 | advantage_list.reverse() 96 | return torch.tensor(advantage_list, dtype=torch.float) 97 | -------------------------------------------------------------------------------- /Code/ppo.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import torch 3 | import torch.nn.functional as F 4 | import matplotlib.pyplot as plt 5 | import common.rl_utils as rl_utils 6 | 7 | 8 | class PolicyNet(torch.nn.Module): 9 | def __init__(self, state_dim, hidden_dim, action_dim): 10 | super(PolicyNet, self).__init__() 11 | self.fc1 = torch.nn.Linear(state_dim, hidden_dim) 12 | self.fc2 = torch.nn.Linear(hidden_dim, action_dim) 13 | 14 | def forward(self, x): 15 | x = F.relu(self.fc1(x)) 16 | return F.softmax(self.fc2(x), dim=1) 17 | 18 | 19 | class ValueNet(torch.nn.Module): 20 | def __init__(self, state_dim, hidden_dim): 21 | super(ValueNet, self).__init__() 22 | self.fc1 = torch.nn.Linear(state_dim, hidden_dim) 23 | self.fc2 = torch.nn.Linear(hidden_dim, 1) 24 | 25 | def forward(self, x): 26 | x = F.relu(self.fc1(x)) 27 | return self.fc2(x) 28 | 29 | 30 | class PPO: 31 | ''' PPO算法,采用截断方式 ''' 32 | def __init__(self, state_dim, hidden_dim, action_dim, actor_lr, critic_lr, 33 | lmbda, epochs, eps, gamma, device): 34 | self.actor = PolicyNet(state_dim, hidden_dim, action_dim).to(device) 35 | self.critic = ValueNet(state_dim, hidden_dim).to(device) 36 | self.state_dim = state_dim 37 | self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), 38 | lr=actor_lr) 39 | self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), 40 | lr=critic_lr) 41 | self.gamma = gamma 42 | self.lmbda = lmbda 43 | self.epochs = epochs # 一条序列的数据用来训练轮数 44 | self.eps = eps # PPO中截断范围的参数 45 | self.device = device 46 | 47 | def take_action(self, state): 48 | state = torch.tensor([state], dtype=torch.float).to(self.device).view(1, self.state_dim) 49 | probs = self.actor(state) 50 | action_dist = torch.distributions.Categorical(probs) 51 | action = action_dist.sample() 52 | return action.item() 53 | 54 | def update(self, transition_dict): 55 | states = torch.tensor(transition_dict['states'], 56 | dtype=torch.float).to(self.device) 57 | actions = torch.tensor(transition_dict['actions']).view(-1, 1).to( 58 | self.device) 59 | rewards = torch.tensor(transition_dict['rewards'], 60 | dtype=torch.float).view(-1, 1).to(self.device) 61 | next_states = torch.tensor(transition_dict['next_states'], 62 | dtype=torch.float).to(self.device) 63 | dones = torch.tensor(transition_dict['dones'], 64 | dtype=torch.float).view(-1, 1).to(self.device) 65 | td_target = rewards + self.gamma * self.critic(next_states) * (1 - 66 | dones) 67 | td_delta = td_target - self.critic(states) 68 | advantage = rl_utils.compute_advantage(self.gamma, self.lmbda, 69 | td_delta.cpu()).to(self.device) 70 | old_log_probs = torch.log(self.actor(states).gather(1, 71 | actions)).detach() 72 | 73 | for _ in range(self.epochs): 74 | log_probs = torch.log(self.actor(states).gather(1, actions)) 75 | ratio = torch.exp(log_probs - old_log_probs) 76 | surr1 = ratio * advantage 77 | surr2 = torch.clamp(ratio, 1 - self.eps, 78 | 1 + self.eps) * advantage # 截断 79 | actor_loss = torch.mean(-torch.min(surr1, surr2)) # PPO损失函数 80 | critic_loss = torch.mean( 81 | F.mse_loss(self.critic(states), td_target.detach())) 82 | self.actor_optimizer.zero_grad() 83 | self.critic_optimizer.zero_grad() 84 | actor_loss.backward() 85 | critic_loss.backward() 86 | self.actor_optimizer.step() 87 | self.critic_optimizer.step() 88 | 89 | actor_lr = 1e-3 90 | critic_lr = 1e-2 91 | num_episodes = 500 92 | hidden_dim = 128 93 | gamma = 0.98 94 | lmbda = 0.95 95 | epochs = 10 96 | eps = 0.2 97 | device = torch.device("cuda") if torch.cuda.is_available() else torch.device( 98 | "cpu") 99 | 100 | env_name = 'CartPole-v1' 101 | env = gym.make("CartPole-v1", render_mode="rgb_array") 102 | env.reset() 103 | env.render() 104 | torch.manual_seed(0) 105 | state_dim = env.observation_space.shape[0] 106 | action_dim = env.action_space.n 107 | agent = PPO(state_dim, hidden_dim, action_dim, actor_lr, critic_lr, lmbda, 108 | epochs, eps, gamma, device) 109 | 110 | return_list = rl_utils.train_on_policy_agent(env, agent, num_episodes) 111 | 112 | 113 | episodes_list = list(range(len(return_list))) 114 | plt.plot(episodes_list, return_list) 115 | plt.xlabel('Episodes') 116 | plt.ylabel('Returns') 117 | plt.title('PPO on {}'.format(env_name)) 118 | plt.show() 119 | 120 | mv_return = rl_utils.moving_average(return_list, 9) 121 | plt.plot(episodes_list, mv_return) 122 | plt.xlabel('Episodes') 123 | plt.ylabel('Returns') 124 | plt.title('PPO on {}'.format(env_name)) 125 | plt.show() -------------------------------------------------------------------------------- /Code/requriments.txt: -------------------------------------------------------------------------------- 1 | # wsl2 ubuntu18.04 2 | gym==0.26.1 3 | torch==1.10.0+cu11 4 | torchvision==0.11.1+cu113 -------------------------------------------------------------------------------- /Code/sac.py: -------------------------------------------------------------------------------- 1 | import random 2 | import gym 3 | import numpy as np 4 | import torch 5 | import torch.nn.functional as F 6 | import matplotlib.pyplot as plt 7 | import common.rl_utils as rl_utils 8 | 9 | 10 | class PolicyNet(torch.nn.Module): 11 | def __init__(self, state_dim, hidden_dim, action_dim): 12 | super(PolicyNet, self).__init__() 13 | self.fc1 = torch.nn.Linear(state_dim, hidden_dim) 14 | self.fc2 = torch.nn.Linear(hidden_dim, action_dim) 15 | 16 | def forward(self, x): 17 | x = F.relu(self.fc1(x)) 18 | return F.softmax(self.fc2(x), dim=1) 19 | 20 | 21 | class QValueNet(torch.nn.Module): 22 | ''' 只有一层隐藏层的Q网络 ''' 23 | def __init__(self, state_dim, hidden_dim, action_dim): 24 | super(QValueNet, self).__init__() 25 | self.fc1 = torch.nn.Linear(state_dim, hidden_dim) 26 | self.fc2 = torch.nn.Linear(hidden_dim, action_dim) 27 | 28 | def forward(self, x): 29 | x = F.relu(self.fc1(x)) 30 | return self.fc2(x) 31 | 32 | class SAC: 33 | ''' 处理离散动作的SAC算法 ''' 34 | def __init__(self, state_dim, hidden_dim, action_dim, actor_lr, critic_lr, 35 | alpha_lr, target_entropy, tau, gamma, device): 36 | # 策略网络 37 | self.actor = PolicyNet(state_dim, hidden_dim, action_dim).to(device) 38 | # 第一个Q网络 39 | self.critic_1 = QValueNet(state_dim, hidden_dim, action_dim).to(device) 40 | # 第二个Q网络 41 | self.critic_2 = QValueNet(state_dim, hidden_dim, action_dim).to(device) 42 | self.target_critic_1 = QValueNet(state_dim, hidden_dim, 43 | action_dim).to(device) # 第一个目标Q网络 44 | self.target_critic_2 = QValueNet(state_dim, hidden_dim, 45 | action_dim).to(device) # 第二个目标Q网络 46 | # 令目标Q网络的初始参数和Q网络一样 47 | self.target_critic_1.load_state_dict(self.critic_1.state_dict()) 48 | self.target_critic_2.load_state_dict(self.critic_2.state_dict()) 49 | self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), 50 | lr=actor_lr) 51 | self.critic_1_optimizer = torch.optim.Adam(self.critic_1.parameters(), 52 | lr=critic_lr) 53 | self.critic_2_optimizer = torch.optim.Adam(self.critic_2.parameters(), 54 | lr=critic_lr) 55 | # 使用alpha的log值,可以使训练结果比较稳定 56 | self.log_alpha = torch.tensor(np.log(0.01), dtype=torch.float) 57 | self.log_alpha.requires_grad = True # 可以对alpha求梯度 58 | self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], 59 | lr=alpha_lr) 60 | self.target_entropy = target_entropy # 目标熵的大小 61 | self.gamma = gamma 62 | self.tau = tau 63 | self.device = device 64 | 65 | def take_action(self, state): 66 | state = torch.tensor(np.array([state], dtype=np.float32), dtype=torch.float).to(self.device) 67 | probs = self.actor(state) 68 | action_dist = torch.distributions.Categorical(probs) 69 | action = action_dist.sample() 70 | return action.item() 71 | 72 | # 计算目标Q值,直接用策略网络的输出概率进行期望计算 73 | def calc_target(self, rewards, next_states, dones): 74 | next_probs = self.actor(next_states) 75 | next_log_probs = torch.log(next_probs + 1e-8) 76 | entropy = -torch.sum(next_probs * next_log_probs, dim=1, keepdim=True) 77 | q1_value = self.target_critic_1(next_states) 78 | q2_value = self.target_critic_2(next_states) 79 | min_qvalue = torch.sum(next_probs * torch.min(q1_value, q2_value), 80 | dim=1, 81 | keepdim=True) 82 | next_value = min_qvalue + self.log_alpha.exp() * entropy 83 | td_target = rewards + self.gamma * next_value * (1 - dones) 84 | return td_target 85 | 86 | def soft_update(self, net, target_net): 87 | for param_target, param in zip(target_net.parameters(), 88 | net.parameters()): 89 | param_target.data.copy_(param_target.data * (1.0 - self.tau) + 90 | param.data * self.tau) 91 | 92 | def update(self, transition_dict): 93 | states = torch.tensor(transition_dict['states'], 94 | dtype=torch.float).to(self.device) 95 | actions = torch.tensor(transition_dict['actions']).view(-1, 1).to( 96 | self.device) # 动作不再是float类型 97 | rewards = torch.tensor(transition_dict['rewards'], 98 | dtype=torch.float).view(-1, 1).to(self.device) 99 | next_states = torch.tensor(transition_dict['next_states'], 100 | dtype=torch.float).to(self.device) 101 | dones = torch.tensor(transition_dict['dones'], 102 | dtype=torch.float).view(-1, 1).to(self.device) 103 | 104 | # 更新两个Q网络 105 | td_target = self.calc_target(rewards, next_states, dones) 106 | critic_1_q_values = self.critic_1(states).gather(1, actions) 107 | critic_1_loss = torch.mean( 108 | F.mse_loss(critic_1_q_values, td_target.detach())) 109 | critic_2_q_values = self.critic_2(states).gather(1, actions) 110 | critic_2_loss = torch.mean( 111 | F.mse_loss(critic_2_q_values, td_target.detach())) 112 | self.critic_1_optimizer.zero_grad() 113 | critic_1_loss.backward() 114 | self.critic_1_optimizer.step() 115 | self.critic_2_optimizer.zero_grad() 116 | critic_2_loss.backward() 117 | self.critic_2_optimizer.step() 118 | 119 | # 更新策略网络 120 | probs = self.actor(states) 121 | log_probs = torch.log(probs + 1e-8) 122 | # 直接根据概率计算熵 123 | entropy = -torch.sum(probs * log_probs, dim=1, keepdim=True) # 124 | q1_value = self.critic_1(states) 125 | q2_value = self.critic_2(states) 126 | min_qvalue = torch.sum(probs * torch.min(q1_value, q2_value), 127 | dim=1, 128 | keepdim=True) # 直接根据概率计算期望 129 | actor_loss = torch.mean(-self.log_alpha.exp() * entropy - min_qvalue) 130 | self.actor_optimizer.zero_grad() 131 | actor_loss.backward() 132 | self.actor_optimizer.step() 133 | 134 | # 更新alpha值 135 | alpha_loss = torch.mean( 136 | (entropy - target_entropy).detach() * self.log_alpha.exp()) 137 | self.log_alpha_optimizer.zero_grad() 138 | alpha_loss.backward() 139 | self.log_alpha_optimizer.step() 140 | 141 | self.soft_update(self.critic_1, self.target_critic_1) 142 | self.soft_update(self.critic_2, self.target_critic_2) 143 | 144 | 145 | actor_lr = 1e-3 146 | critic_lr = 1e-2 147 | alpha_lr = 1e-2 148 | num_episodes = 200 149 | hidden_dim = 128 150 | gamma = 0.98 151 | tau = 0.005 # 软更新参数 152 | buffer_size = 10000 153 | minimal_size = 500 154 | batch_size = 64 155 | target_entropy = -1 156 | device = torch.device("cuda") if torch.cuda.is_available() else torch.device( 157 | "cpu") 158 | 159 | env_name = 'CartPole-v1' 160 | env = gym.make("CartPole-v1", render_mode="rgb_array") 161 | env.reset() 162 | env.render() 163 | random.seed(0) 164 | np.random.seed(0) 165 | torch.manual_seed(0) 166 | replay_buffer = rl_utils.ReplayBuffer(buffer_size) 167 | state_dim = env.observation_space.shape[0] 168 | action_dim = env.action_space.n 169 | agent = SAC(state_dim, hidden_dim, action_dim, actor_lr, critic_lr, alpha_lr, 170 | target_entropy, tau, gamma, device) 171 | 172 | return_list = rl_utils.train_off_policy_agent(env, agent, num_episodes, 173 | replay_buffer, minimal_size, 174 | batch_size) 175 | 176 | episodes_list = list(range(len(return_list))) 177 | plt.plot(episodes_list, return_list) 178 | plt.xlabel('Episodes') 179 | plt.ylabel('Returns') 180 | plt.title('SAC on {}'.format(env_name)) 181 | plt.show() 182 | 183 | mv_return = rl_utils.moving_average(return_list, 9) 184 | plt.plot(episodes_list, mv_return) 185 | plt.xlabel('Episodes') 186 | plt.ylabel('Returns') 187 | plt.title('SAC on {}'.format(env_name)) 188 | # plt.show() 189 | plt.savefig('r.png') -------------------------------------------------------------------------------- /Code/sac_c.py: -------------------------------------------------------------------------------- 1 | import random 2 | import gym 3 | import numpy as np 4 | import torch 5 | import torch.nn.functional as F 6 | from torch.distributions import Normal 7 | import matplotlib.pyplot as plt 8 | import common.rl_utils as rl_utils 9 | 10 | class PolicyNetContinuous(torch.nn.Module): 11 | def __init__(self, state_dim, hidden_dim, action_dim, action_bound): 12 | super(PolicyNetContinuous, self).__init__() 13 | self.fc1 = torch.nn.Linear(state_dim, hidden_dim) 14 | self.fc_mu = torch.nn.Linear(hidden_dim, action_dim) 15 | self.fc_std = torch.nn.Linear(hidden_dim, action_dim) 16 | self.action_bound = action_bound 17 | 18 | def forward(self, x): 19 | x = F.relu(self.fc1(x)) 20 | mu = self.fc_mu(x) 21 | std = F.softplus(self.fc_std(x)) 22 | dist = Normal(mu, std) 23 | normal_sample = dist.rsample() # rsample()是重参数化采样 24 | log_prob = dist.log_prob(normal_sample) 25 | action = torch.tanh(normal_sample) 26 | # 计算tanh_normal分布的对数概率密度 27 | log_prob = log_prob - torch.log(1 - torch.tanh(action).pow(2) + 1e-7) 28 | action = action * self.action_bound 29 | return action, log_prob 30 | 31 | 32 | class QValueNetContinuous(torch.nn.Module): 33 | def __init__(self, state_dim, hidden_dim, action_dim): 34 | super(QValueNetContinuous, self).__init__() 35 | self.fc1 = torch.nn.Linear(state_dim + action_dim, hidden_dim) 36 | self.fc2 = torch.nn.Linear(hidden_dim, hidden_dim) 37 | self.fc_out = torch.nn.Linear(hidden_dim, 1) 38 | 39 | def forward(self, x, a): 40 | cat = torch.cat([x, a], dim=1) 41 | x = F.relu(self.fc1(cat)) 42 | x = F.relu(self.fc2(x)) 43 | return self.fc_out(x) 44 | 45 | class SACContinuous: 46 | ''' 处理连续动作的SAC算法 ''' 47 | def __init__(self, state_dim, hidden_dim, action_dim, action_bound, 48 | actor_lr, critic_lr, alpha_lr, target_entropy, tau, gamma, 49 | device): 50 | self.actor = PolicyNetContinuous(state_dim, hidden_dim, action_dim, 51 | action_bound).to(device) # 策略网络 52 | self.critic_1 = QValueNetContinuous(state_dim, hidden_dim, 53 | action_dim).to(device) # 第一个Q网络 54 | self.critic_2 = QValueNetContinuous(state_dim, hidden_dim, 55 | action_dim).to(device) # 第二个Q网络 56 | self.target_critic_1 = QValueNetContinuous(state_dim, 57 | hidden_dim, action_dim).to( 58 | device) # 第一个目标Q网络 59 | self.target_critic_2 = QValueNetContinuous(state_dim, 60 | hidden_dim, action_dim).to( 61 | device) # 第二个目标Q网络 62 | # 令目标Q网络的初始参数和Q网络一样 63 | self.target_critic_1.load_state_dict(self.critic_1.state_dict()) 64 | self.target_critic_2.load_state_dict(self.critic_2.state_dict()) 65 | self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), 66 | lr=actor_lr) 67 | self.critic_1_optimizer = torch.optim.Adam(self.critic_1.parameters(), 68 | lr=critic_lr) 69 | self.critic_2_optimizer = torch.optim.Adam(self.critic_2.parameters(), 70 | lr=critic_lr) 71 | # 使用alpha的log值,可以使训练结果比较稳定 72 | self.log_alpha = torch.tensor(np.log(0.01), dtype=torch.float) 73 | self.log_alpha.requires_grad = True # 可以对alpha求梯度 74 | self.log_alpha_optimizer = torch.optim.Adam([self.log_alpha], 75 | lr=alpha_lr) 76 | self.target_entropy = target_entropy # 目标熵的大小 77 | self.gamma = gamma 78 | self.tau = tau 79 | self.device = device 80 | 81 | def take_action(self, state): 82 | state = torch.tensor(np.array([state], dtype=np.float32), dtype=torch.float).to(self.device) 83 | 84 | action = self.actor(state)[0] 85 | return [action.item()] 86 | 87 | def calc_target(self, rewards, next_states, dones): # 计算目标Q值 88 | next_actions, log_prob = self.actor(next_states) 89 | entropy = -log_prob 90 | q1_value = self.target_critic_1(next_states, next_actions) 91 | q2_value = self.target_critic_2(next_states, next_actions) 92 | next_value = torch.min(q1_value, 93 | q2_value) + self.log_alpha.exp() * entropy 94 | td_target = rewards + self.gamma * next_value * (1 - dones) 95 | return td_target 96 | 97 | def soft_update(self, net, target_net): 98 | for param_target, param in zip(target_net.parameters(), 99 | net.parameters()): 100 | param_target.data.copy_(param_target.data * (1.0 - self.tau) + 101 | param.data * self.tau) 102 | 103 | def update(self, transition_dict): 104 | states = torch.tensor(transition_dict['states'], 105 | dtype=torch.float).to(self.device) 106 | actions = torch.tensor(transition_dict['actions'], 107 | dtype=torch.float).view(-1, 1).to(self.device) 108 | rewards = torch.tensor(transition_dict['rewards'], 109 | dtype=torch.float).view(-1, 1).to(self.device) 110 | next_states = torch.tensor(transition_dict['next_states'], 111 | dtype=torch.float).to(self.device) 112 | dones = torch.tensor(transition_dict['dones'], 113 | dtype=torch.float).view(-1, 1).to(self.device) 114 | # 和之前章节一样,对倒立摆环境的奖励进行重塑以便训练 115 | rewards = (rewards + 8.0) / 8.0 116 | 117 | # 更新两个Q网络 118 | td_target = self.calc_target(rewards, next_states, dones) 119 | critic_1_loss = torch.mean( 120 | F.mse_loss(self.critic_1(states, actions), td_target.detach())) 121 | critic_2_loss = torch.mean( 122 | F.mse_loss(self.critic_2(states, actions), td_target.detach())) 123 | self.critic_1_optimizer.zero_grad() 124 | critic_1_loss.backward() 125 | self.critic_1_optimizer.step() 126 | self.critic_2_optimizer.zero_grad() 127 | critic_2_loss.backward() 128 | self.critic_2_optimizer.step() 129 | 130 | # 更新策略网络 131 | new_actions, log_prob = self.actor(states) 132 | entropy = -log_prob 133 | q1_value = self.critic_1(states, new_actions) 134 | q2_value = self.critic_2(states, new_actions) 135 | actor_loss = torch.mean(-self.log_alpha.exp() * entropy - 136 | torch.min(q1_value, q2_value)) 137 | self.actor_optimizer.zero_grad() 138 | actor_loss.backward() 139 | self.actor_optimizer.step() 140 | 141 | # 更新alpha值 142 | alpha_loss = torch.mean( 143 | (entropy - self.target_entropy).detach() * self.log_alpha.exp()) 144 | self.log_alpha_optimizer.zero_grad() 145 | alpha_loss.backward() 146 | self.log_alpha_optimizer.step() 147 | 148 | self.soft_update(self.critic_1, self.target_critic_1) 149 | self.soft_update(self.critic_2, self.target_critic_2) 150 | 151 | env_name = 'Pendulum-v1' 152 | env = gym.make(env_name, render_mode="rgb_array") 153 | env.reset() 154 | env.render() 155 | state_dim = env.observation_space.shape[0] # 3 156 | action_dim = env.action_space.shape[0] # 1 157 | action_bound = env.action_space.high[0] # 动作最大值, 2.0 158 | # print(state_dim, action_dim, action_bound) 159 | random.seed(0) 160 | np.random.seed(0) 161 | torch.manual_seed(0) 162 | 163 | actor_lr = 3e-4 164 | critic_lr = 3e-3 165 | alpha_lr = 3e-4 166 | num_episodes = 100 167 | hidden_dim = 128 168 | gamma = 0.99 169 | tau = 0.005 # 软更新参数 170 | buffer_size = 100000 171 | minimal_size = 1000 172 | batch_size = 64 173 | target_entropy = -env.action_space.shape[0] 174 | device = torch.device("cuda") if torch.cuda.is_available() else torch.device( 175 | "cpu") 176 | 177 | replay_buffer = rl_utils.ReplayBuffer(buffer_size) 178 | agent = SACContinuous(state_dim, hidden_dim, action_dim, action_bound, 179 | actor_lr, critic_lr, alpha_lr, target_entropy, tau, 180 | gamma, device) 181 | 182 | return_list = rl_utils.train_off_policy_agent(env, agent, num_episodes, 183 | replay_buffer, minimal_size, 184 | batch_size) 185 | 186 | episodes_list = list(range(len(return_list))) 187 | plt.plot(episodes_list, return_list) 188 | plt.xlabel('Episodes') 189 | plt.ylabel('Returns') 190 | plt.title('SAC on {}'.format(env_name)) 191 | plt.show() 192 | 193 | mv_return = rl_utils.moving_average(return_list, 9) 194 | plt.plot(episodes_list, mv_return) 195 | plt.xlabel('Episodes') 196 | plt.ylabel('Returns') 197 | plt.title('SAC on {}'.format(env_name)) 198 | plt.show() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RL 2 | basic theory and code of RL. 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /pic/DQN_Atari.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/DQN_Atari.png -------------------------------------------------------------------------------- /pic/PPO_Clip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/PPO_Clip.png -------------------------------------------------------------------------------- /pic/PPO_KL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/PPO_KL.png -------------------------------------------------------------------------------- /pic/QAC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/QAC.png -------------------------------------------------------------------------------- /pic/TRPO.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/TRPO.png -------------------------------------------------------------------------------- /pic/TRPO_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/TRPO_2.png -------------------------------------------------------------------------------- /pic/converge_vfa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/converge_vfa.png -------------------------------------------------------------------------------- /pic/evalimp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/evalimp.png -------------------------------------------------------------------------------- /pic/func_design.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/func_design.png -------------------------------------------------------------------------------- /pic/mc_epsion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/mc_epsion.png -------------------------------------------------------------------------------- /pic/qpi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/qpi.png -------------------------------------------------------------------------------- /pic/rl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/rl.png -------------------------------------------------------------------------------- /pic/rl_free.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/rl_free.png -------------------------------------------------------------------------------- /pic/sarsa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/sarsa.png -------------------------------------------------------------------------------- /pic/sarsa_vfa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/sarsa_vfa.png -------------------------------------------------------------------------------- /pic/td_mc_dp_diff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/td_mc_dp_diff.png -------------------------------------------------------------------------------- /pic/traning_stable.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/traning_stable.png -------------------------------------------------------------------------------- /pic/vpi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/pic/vpi.png -------------------------------------------------------------------------------- /rl_sisyphes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/foocker/RL/38446276f5472f04213896b799a72f5d08d154b9/rl_sisyphes.pdf -------------------------------------------------------------------------------- /rl_sisyphes.tex: -------------------------------------------------------------------------------- 1 | \documentclass[UTF8]{ctexart} 2 | \usepackage[colorlinks=true]{hyperref} 3 | 4 | \usepackage{amsmath, bm,amsfonts} 5 | \usepackage{hyperref} 6 | \usepackage[normalem]{ulem} 7 | % \usepackage{enumitem} 8 | % \setlist{nosep} 9 | \usepackage{caption} 10 | \usepackage{graphicx} 11 | % \graphicspath{{./pic/}} 12 | \usepackage[usenames, dvipsnames]{xcolor} 13 | \usepackage{color} 14 | \usepackage{listings} 15 | \input{setting_list} 16 | \usepackage{hologo} 17 | \usepackage{subfigure} 18 | \usepackage{changepage} 19 | 20 | \ctexset{ 21 | section = { 22 | titleformat = \raggedright, 23 | name = {第,节}, 24 | number = \chinese{section} 25 | } 26 | } 27 | 28 | \title{简易强化学习} 29 | \author{Sisyphes} 30 | % \date{\today} 31 | \date{2020年6月6日} 32 | 33 | \begin{document} 34 | \maketitle 35 | \tableofcontents 36 | \newpage 37 | 38 | \section{一些废话} 39 | 40 | RL的学习应该会比较漫长。一个假想的研究员的学习情况,也许首先会看sutton的 complete draft, 41 | 同时实现一些经典算法,然后紧跟学术潮流,看一些来自openAI, DeepMind等的论文, 42 | 这中间也许会选一门课程,比如David Silver,伯克利RL或周博磊等,偶尔也许会翻阅一些博客。 43 | 我的初略体验(一周)是,这里面会存在一些不可避免的问题,比如流行算法的快速更新,新算法实验成本高, 44 | 公式细节上似乎不太严格等。为了较大限度的减少以上问题,扎实经典基础理论,消化新论 45 | 文核心思想(不必推导每个细节),加强代码实践,侧重建模能力练习,也许还不错。 46 | 47 | 本小册子来源于\href{https://github.com/zhoubolei/introRL}{zhoubolei RL},之所以有以上设想, 48 | 也源于周老师的讲义,比如在其中会存在(列表可见\ref{sec:seemswrong}),同样的符号,定义有微差 49 | (一些公式来自sutton,一些公式来自新论文,在约定上的细节差异),下标不严谨,期望下标省略,这些会增加一些理解负担,通过做实验来 50 | 确定会存在更多其他问题。另外,算法的快速迭代,去推导每一步会显得荒谬,而实际建模有近 51 | 似需求,黑盒化,因此着重练习建模能力,在某一具体问题上深入挖掘,或许会不错。周老师的讲课非常棒,提纲挈领,简明扼要, 52 | 深入浅出。 53 | 54 | 所以,本小册子,以基本概念,基本定理,问题建模,代码实现,新论文的近似阅读为逻辑展开。当前版本0.1,将来若有 55 | 时间,会逐渐完善。 56 | 57 | \subsection{废话补充2023.3.11} 58 | 三年过去了。 59 | 60 | 列举一些基本认识,和新的(相对于2020年)比较清晰的入门教程。 61 | \begin{itemize} 62 | \item \href{https://hrl.boyuai.com/}{动手学强化学习}包含比较全面的介绍,以及代码。代码存在版本问题,我更改了一些细节, 63 | 适应了现在的版本gym=0.26.x。 64 | \end{itemize} 65 | 66 | 强化学习基本工具的认识: 67 | \begin{itemize} 68 | \item Issac Gym:GPU采样 69 | \item \href{https://www.zhihu.com/question/333671830/answer/2387452922}{Isaac Gym的经验介绍} 70 | \item gym 71 | \item \href{https://github.com/Unity-Technologies/ml-agents}{ml-agents} 72 | \item \href{https://www.zhihu.com/question/333671830/answer/2383509386}{各种基本库的经验介绍}。 73 | \end{itemize} 74 | 75 | 链接形式补充了一些遗留的基础,别人写的不错,我也懒得滥竽充数了。个人主要集中在一些基础代码,项目实战,RLHF, 76 | 角色动画,图像生成的精准控制相关的内容上。 77 | 78 | \newpage 79 | \section{基本认识} 80 | \subsection{什么是强化学习} 81 | 82 | \begin{figure}[htbp] 83 | \centering 84 | \includegraphics[width=9.7cm, height=3.7cm]{./pic/rl.png} 85 | \caption{RL Prime} 86 | \label{subrl1} 87 | \end{figure} 88 | a computational approach to learning whereby an agent tries 89 | to maximize the total amount of reward it receives while 90 | interacting with a complex and uncertain environment. --Sutton and Barto 91 | 92 | \begin{itemize} 93 | 94 | \item 基本要素: 95 | \begin{itemize} 96 | \item Agent(智能体) 97 | \item model(模型) 98 | \item value(价值函数) 99 | \item policy(策略函数) 100 | \end{itemize} 101 | 102 | \item 特点: 103 | \begin{itemize} 104 | \item 试错探索 105 | \item 延迟回报 106 | \item 时间问题(序列数据,无既定分布) 107 | \item Agent的动作会影响接下来的输入数据,并改变环境的状态 108 | \end{itemize} 109 | \item 例子: 110 | 111 | \begin{itemize} 112 | \item 策略游戏,跑跑卡丁车,纸片游戏,围棋等 113 | \item 机器人走路,穿衣,抓取东西,拧魔方 114 | \item 路径规划,机器控制 115 | \end{itemize} 116 | 117 | \end{itemize} 118 | 119 | 120 | \subsection{符号定义} 121 | \subsection{小技巧} 122 | \label{sub:smalltricks} 123 | \textbf{增量平均}:一个简单的变换将序列平均值的计算转化为前一个平均值和当前值与前平均值的“差”的和。 124 | \begin{equation} \label{Incremental:Mean} 125 | \begin{aligned} 126 | \mu_{t} &=\frac{1}{t} \sum_{j=1}^{t} x_{j} \\ 127 | &=\frac{1}{t}\left(x_{t}+\sum_{j=1}^{t-1} x_{j}\right) \\ 128 | &=\frac{1}{t}\left(x_{t}+(t-1) \mu_{t-1}\right) \\ 129 | &=\mu_{t-1}+\frac{1}{t}\left(x_{t}-\mu_{t-1}\right) 130 | \end{aligned} 131 | \end{equation} 132 | 133 | % \tag{label}, amsmath, \label{eqn:einstein} 134 | 135 | 136 | \textbf{重要采样}:分布$P$不好采样,用更好采样的$Q$替换它,只是需要同时乘以$P$相对于$Q$的权重因子。 137 | 138 | \[ 139 | E_{x \sim P}[f(x)]=\int f(x) P(x) d x \approx \frac{1}{n} \sum_{i} f\left(x_{i}\right) \label{Important:Sample} 140 | \] 141 | 142 | \[ 143 | \begin{aligned} 144 | \mathbb{E}_{x \sim P}[f(x)] &=\int P(x) f(x) d x \\ 145 | &=\int Q(x) \frac{P(x)}{Q(x)} f(x) d x \\ 146 | &=\mathbb{E}_{x \sim Q}\left[\frac{P(x)}{Q(x)} f(x)\right] \approx \frac{1}{n} \sum_{i} \frac{P\left(x_{i}\right)}{Q\left(x_{i}\right)} f\left(x_{i}\right) 147 | \end{aligned} 148 | \] 149 | 150 | \textbf{对数求导}:利用对数导数为其倒数的特点,可以将连乘函数的导数和化(注意其和重要采样的联合使用)。 151 | 152 | \[ 153 | \begin{aligned} 154 | \nabla_{\theta} \mathbb{E}_{x \sim p_{\theta}(x)}[f(x)] &=\nabla_{\theta} \int f(x) p_{\theta}(x) d x \\ 155 | &=\int f(x) \nabla_{\theta} p_{\theta}(x) d x \\ 156 | &=\int f(x) p_{\theta}(x) \nabla_{\theta} \log p_{\theta}(x) d x \\ 157 | &=\mathbb{E}_{x \sim p_{\theta}(x)}\left[f(x) \nabla_{\theta} \log p_{\theta}(x)\right] \\ 158 | & \approx \frac{1}{N} \sum_{i=1}^{N} f\left(x_{i}\right) \nabla_{\theta} \log p_{\theta}\left(x_{i}\right) 159 | \end{aligned} 160 | \] 161 | 162 | 163 | \textbf{重参数化}:利用分布的映射关系,将复杂函数的采样转化为从简单分布采样,然后映射到复杂分布上去,从而达到 164 | 解决了复杂分布采样难问题。 165 | 166 | \[ 167 | \begin{aligned} 168 | \varepsilon & \sim q(\varepsilon) \\ 169 | x &=g_{\theta}(\varepsilon) \\ 170 | \nabla_{\theta} \mathbb{E}_{x \sim p_{\theta}(x)}[f(x)] &=\nabla_{\theta} \mathbb{E}_{\varepsilon \sim q(\varepsilon)}\left[f\left(g_{\theta}(\varepsilon)\right)\right] \\ 171 | &=\mathbb{E}_{\varepsilon \sim q(\varepsilon)}\left[\nabla_{\theta} f\left(g_{\theta}(\varepsilon)\right)\right] \\ 172 | & \approx \frac{1}{N} \sum_{i=1}^{N}\left(\nabla_{\theta} f\left(g_{\theta}\left(\varepsilon_{i}\right)\right)\right) 173 | \end{aligned} 174 | \] 175 | 176 | 177 | \textbf{共轭梯度}:使梯度的方向保持共轭关系(垂直),极大加快优化速度, 178 | \href{https://en.wikipedia.org/wiki/Conjugate_gradient_method}{Conjugate gradient method} 179 | 180 | 181 | \subsection{基本定理} 182 | 183 | \section{Markov Models} 184 | \subsection{MRP} 185 | “\href{https://en.wikipedia.org/wiki/Markov_chain}{Markov chain}因俄国数学家安德烈·马尔可夫得名, 186 | 为状态空间中经过从一个状态到另一个状态的转换的随机过程。该过程要求具备“无记忆”的性质: 187 | 下一状态的概率分布只能由当前状态决定,在时间序列中它前面的事件均与之无关。” 188 | 189 | 数学符号语言为: 190 | 历史状态:$h_{t}=\left\{s_{1}, s_{2}, s_{3}, \ldots, s_{t}\right\}$,状态$s_t$为Markovian当且仅当 191 | \[ \begin{array}{c} 192 | p\left(s_{t+1} | s_{t}\right)=p\left(s_{t+1} | h_{t}\right) \\ 193 | p\left(s_{t+1} | s_{t}, a_{t}\right)=p\left(s_{t+1} | h_{t}, a_{t}\right) 194 | \end{array} 195 | \] 196 | 197 | % 容易写出状态转移矩阵 198 | % \[ 199 | % P=\left[\begin{array}{cccc} 200 | % P\left(s_{1} | s_{1}\right) & P\left(s_{2} | s_{1}\right) & \dots & P\left(s_{N} | s_{1}\right) \\ 201 | % P\left(s_{1} | s_{2}\right) & P\left(s_{2} | s_{2}\right) & \dots & P\left(s_{N} | s_{2}\right) \\ 202 | % \vdots & \vdots & \ddots & \vdots \\ 203 | % P\left(s_{1} | s_{N}\right) & P\left(s_{2} | s_{N}\right) & \dots & P\left(s_{N} | s_{N}\right) 204 | % \end{array}\right] 205 | % \] 206 | 207 | 若状态转移过程中有奖励(reward),则称之为MRP(Markov Reward Process)。 208 | 其基本元素有: 209 | \begin{adjustwidth}{1.3cm}{0cm} 210 | $S$: 有限状态集($s \in S$)\\ 211 | $P$:状态转移概率$P\left(S_{t+1}=s^{\prime} | s_{t}=s\right)$\\ 212 | $R$:奖励函数(reward)$R\left(s_{t}=s\right)=\mathbb{E}\left[r_{t} | s_{t}=s\right]$\\ 213 | $\gamma$:折扣因子$\gamma \in[0,1]$ 214 | \end{adjustwidth} 215 | 216 | 在实际的RL交互环境中,还需要定义三个变量: 217 | 218 | \begin{adjustwidth}{1.3cm}{0cm} 219 | Horizon: 不同 episode(一个探索周期)的时间步长的最大值\\ 220 | Return: 时间$t$到Horizon的折扣回报和$G_{t}=R_{t+1}+\gamma R_{t+2}+\gamma^{2} R_{t+3}+\gamma^{3} R_{t+4}+\ldots+\gamma^{T-t-1} R_{T} \label{gt}$\\ 221 | 状态价值函数:状态$s$在$t$时刻得到的回报的期望值$V_{t}(s)=\mathbb{E}\left[G_{t} | s_{t}=s\right]$ 222 | \end{adjustwidth} 223 | 224 | 根据定义,对价值函数做变换:$V(s)=\mathbb{E}\left[R_{t+1}+\gamma \mathbb{E}\left[R_{t+2}+\gamma R_{t+3}+ 225 | \gamma^{2} R_{t+4}+\ldots\right] | s_{t}=s\right]$ 226 | 容易得出Bellman equation 227 | 228 | \begin{equation} 229 | V(s)=\underbrace{R(s)}_{\text {Immediate reward }}+ 230 | \underbrace{\gamma \sum_{s^{\prime} \in S} P\left(s^{\prime} | s\right) 231 | V\left(s^{\prime}\right)}_{\text {Discounted sum of future reward }} 232 | \end{equation} 233 | 234 | 于是对于有限状态的MRP,有: 235 | $$\begin{array}{c} 236 | {\left[\begin{array}{c} 237 | V\left(s_{1}\right) \\ 238 | V\left(s_{2}\right) \\ 239 | \vdots \\ 240 | V\left(s_{N}\right) 241 | \end{array}\right]=\left[\begin{array}{c} 242 | R\left(s_{1}\right) \\ 243 | R\left(s_{2}\right) \\ 244 | \vdots \\ 245 | R\left(s_{N}\right) 246 | \end{array}\right]+\gamma\left[\begin{array}{cccc} 247 | P\left(s_{1} | s_{1}\right) & P\left(s_{2} | s_{1}\right) & \dots & P\left(s_{N} | s_{1}\right) \\ 248 | P\left(s_{1} | s_{2}\right) & P\left(s_{2} | s_{2}\right) & \dots & P\left(s_{N} | s_{2}\right) \\ 249 | \vdots & \vdots & \ddots & \vdots \\ 250 | P\left(s_{1} | s_{N}\right) & P\left(s_{2} | s_{N}\right) & \dots & P\left(s_{N} | s_{N}\right) 251 | \end{array}\right]\left[\begin{array}{c} 252 | V\left(s_{1}\right) \\ 253 | V\left(s_{2}\right) \\ 254 | \vdots \\ 255 | V\left(s_{N}\right) 256 | \end{array}\right]} 257 | \end{array}$$ 258 | 259 | 即$V=R+\gamma P V$,其代数解为$V=(I-\gamma P)^{-1} R$。不过实际应用中因$P$过大, 260 | 求逆运算复杂度$O\left(N^{3}\right)$过高,会选择迭代方式求解。包含动态规划,蒙特卡洛估计, 261 | Temporal-Difference 学习。 262 | 263 | \subsection{MDP} 264 | MDP:在MRP上增加一个动作项,可用$(S, A, P, R, \gamma)$表示。其中 265 | $A$有限的动作集, 266 | $P^{a}, P\left(s_{t+1}=s^{\prime} | s_{t}=s, a_{t}=a\right), 267 | R\left(s_{t}=s, a_{t}=a\right)=\mathbb{E}\left[r_{t} | s_{t}=s, a_{t}=a\right]$ 268 | 269 | MDP是对决策过程的建模,目标是在决策集(Policies)中寻找最优决策。其中决策函数是动作在状态空间 270 | 的概率分布,用$\pi(a | s)=P\left(a_{t}=a | s_{t}=s\right)$表示状态$s$执行动作$a$的概率,决策 271 | 函数是时间独立的,即对任意$t>0$,$A_{t} \sim \pi(a | s)$。 272 | 273 | 给定MDP($S, A, P, R, \gamma$) 以及决策$\pi$,状态序列$S_1, S_2, \ldots $是一个马尔科夫 274 | 过程$(S, P^{\pi})$, 状态、奖励序列$S_1, R_2, S_2, R_3, \ldots$是马尔科夫奖励过程 275 | $(S, P^{\pi}, R^{\pi}, \gamma)$ 276 | 其中 277 | $$\begin{aligned} 278 | P^{\pi}\left(s^{\prime} | s\right) &=\sum_{a \in A} \pi(a | s) P\left(s^{\prime} | s, a\right) \\ 279 | R^{\pi}(s) &=\sum_{a \in A} \pi(a | s) R(s, a) 280 | \end{aligned} 281 | $$ 282 | 283 | 284 | 策略$\pi$下开始状态为$s$的状态价值函数 285 | $v^{\pi}(s)=\mathbb{E}_{\pi}\left[G_{t} | s_{t}=s\right]$, 286 | 动作价值函数 287 | $q^{\pi}(s, a)=\mathbb{E}_{\pi}\left[G_{t} | s_{t}=s, A_{t}=a\right]$, 288 | 表示在状态$s$执行动作$a$后,按照策略$\pi$执行下去的回报期望。 289 | 根据以上定义,容易得两者的关系: 290 | 291 | \begin{equation} 292 | v^{\pi}(s)=\sum_{a \in A} \pi(a | s) q^{\pi}(s, a) 293 | \end{equation} 294 | 295 | \begin{equation}\label{qsav} 296 | q^{\pi}(s, a)=R_{s}^{a}+\gamma \sum_{s^{\prime} \in S} P\left(s^{\prime} | s, a\right) v^{\pi}\left(s^{\prime}\right) 297 | \end{equation} 298 | 299 | 300 | 其贝尔曼递归形式也容易写出: 301 | \begin{equation} 302 | v^{\pi}(s)=E_{\pi}\left[R_{t+1}+\gamma v^{\pi}\left(s_{t+1}\right) | s_{t}=s\right] 303 | \end{equation} 304 | 305 | \begin{equation} 306 | q^{\pi}(s, a)=E_{\pi}\left[R_{t+1}+\gamma q^{\pi}\left(s_{t+1}, A_{t+1}\right) | s_{t}=s, A_{t}=a\right] 307 | \end{equation} 308 | 309 | 分别把(4)带入(3),(3)带入(4)得到: 310 | \begin{equation} 311 | \begin{aligned} 312 | v^{\pi}(s) &=\sum_{a \in A} \pi(a | s)\left(R(s, a)+\gamma \sum_{s^{\prime} \in S} P\left(s^{\prime} | s, a\right) v^{\pi}\left(s^{\prime}\right)\right) \\ 313 | q^{\pi}(s, a) &=R(s, a)+\gamma \sum_{s^{\prime} \in S} P\left(s^{\prime} | s, a\right) \sum_{a^{\prime} \in A} \pi\left(a^{\prime} | s^{\prime}\right) 314 | q^{\pi}\left(s^{\prime}, a^{\prime}\right) 315 | \end{aligned} 316 | \end{equation} 317 | 318 | 319 | 公式(7)的图(Backup Diagram)表示: 320 | 321 | \begin{figure}[htbp] 322 | \centering 323 | \begin{minipage}[t]{0.48\textwidth} 324 | \centering 325 | \includegraphics[width=5.5cm, height=3.8cm]{./pic/vpi.png} 326 | \caption{V} 327 | \label{piVpi} 328 | \end{minipage} 329 | \begin{minipage}[t]{0.48\textwidth} 330 | \centering 331 | \includegraphics[width=6cm,height=3.7cm]{./pic/qpi.png} 332 | \caption{Q} 333 | \label{picQpi} 334 | \end{minipage} 335 | \end{figure} 336 | 337 | 简言之,一个状态下可采取不同动作,得到状态动作价值; 338 | 一个状态下执行不同的动作,得到奖励,并进入不同的状态; 339 | 一个状态并执行了某一动作,得到即刻回报,进入不同状态,并执行不同动作。 340 | 341 | 建立好模型,并得到了一些关系式,\textbf{问题:如何计算?} 342 | 343 | \subsection{评估和优化} 344 | 345 | 利用\ref{picQpi}, \ref{piVpi}的关系式递推计算,如下迭代: 346 | $$v_{t+1}(s)=\sum_{a \in \mathcal{A}} \pi(a | s)\left(R(s, a)+\gamma \sum_{s^{\prime} 347 | \in \mathcal{S}} P\left(s^{\prime} | s, a\right) v_{t}\left(s^{\prime}\right)\right),\\ 348 | v_{1} \rightarrow v_{2} \rightarrow \ldots \rightarrow v^{\pi}$$ 349 | 即能得到给定策略$\pi$下的价值函数。 350 | 这里递推关系和一般递关系比如Fibonacci数列顺序上 351 | 是相反的,在实际优化中会一次更新所有值(有限表格,和关键词bootstrapping吻合),效率虽慢,但能得到最优解。 352 | 353 | 策略的最优价值函数:$v^{*}(s)=\max _{\pi} v^{\pi}(s)$ 354 | 355 | 最优策略:$\pi^{*}(s)=\arg \max _{\pi} v^{\pi}(s)$ 356 | 或 357 | $$\pi^{*}(a | s)=\left\{\begin{array}{ll} 358 | 1, & \text { if } a=\arg \max _{a \in A} q^{*}(s, a) \\ 359 | 0, & \text { otherwise } 360 | \end{array}\right.$$ 361 | 362 | 在求得最优状态价值函数或动作价值函数后,根据如上两条定义,很容易得到最优策略。 363 | 这个过程被称为值迭代。 364 | 365 | 这里还有另外一种方式得到最佳策略,一边policy evaluation($V$值更新,划分不是很准确), 366 | 一边利用更新的价值函数计算出动作价值函数然后更新策略(greedy)。 367 | 即: 368 | $$q^{\pi_{i}}(s, a)=R(s, a)+\gamma \sum_{s^{\prime} \in S} P\left(s^{\prime} | s, 369 | a\right) v^{\pi_{i}}\left(s^{\prime}\right)$$ 370 | $$\pi_{i+1}(s)=\underset{a}{\arg \max } q^{\pi_{i}}(s, a)$$ 371 | 372 | 用图可表示为: 373 | \begin{figure}[htbp] 374 | \centering 375 | \includegraphics[width=6.7cm, height=2.7cm]{./pic/evalimp.png} 376 | \caption{rl} 377 | \end{figure} 378 | 379 | 一个MDP模型存在唯一的最优价值函数(证明见Sutton),但最优策略函数不唯一。 380 | 381 | 382 | \begin{center} 383 | Table: Dynamic Programming Algorithms 384 | \end{center} 385 | $$\begin{array}{|c|c|c|} 386 | \hline \text { Problem } & \text { Bellman Equation } & \text { Algorithm } \\ 387 | \hline \text { Prediction } & \text { Bellman Expectation Equation } & \text { Iterative Policy Evaluation } \\ 388 | \hline \text { Control } & \text { Bellman Expectation Equation } & \text { Policy Iteration } \\ 389 | \hline \text { Control } & \text { Bellman Optimality Equation } & \text { Value Iteration } \\ 390 | \hline 391 | \end{array}$$ 392 | 393 | 一些存疑的地方:在RL中总是会存在各种分类,这对初学者不太友好。比如上面的总结,以及讲义中给出的 394 | 两种策略优化的方式,在第二种方式中为何不直接迭代$Q(s,a)$然后更新策略,以此迭代。另外policy evaluation 395 | 是对价值函数$V$的计算,和策略是隐含关系,为何不是值迭代?看上面的表格,就有一点错乱的感觉。中间2,3行是期望 396 | 方差,后列对应的却是评估和迭代,而末列3,4行为迭代,对应的中间却一个期望方程,一个最优方程。 397 | 398 | 简化:值迭代+最后求最值,值迭代+同时策略迭代。 399 | 400 | 理论补充说明(证明见Sutton):策略优化是一个保序的过程。 401 | 一轮优化得到的$\pi^{\prime}(s)=\underset{a}{\arg \max } q^{\pi}(s, a)$有 402 | $v_{\pi^{\prime}}(s) \geq v^{\pi}(s)$,且 403 | \begin{equation} 404 | \begin{aligned} v^{*}(s) &=\max _{a} q^{*}(s, a) \\ q^{*}(s, a) 405 | &=R(s, a)+\gamma \sum_{s^{\prime} \in S} P\left(s^{\prime} | s, 406 | a\right) v^{*}\left(s^{\prime}\right) \end{aligned} 407 | \end{equation} 408 | 409 | 于是得到: 410 | \begin{equation} 411 | \begin{aligned} v^{*}(s) &=\max _{a} R(s, a) + \gamma \sum_{s^{\prime} \in S }P\left(s^{\prime} | s, 412 | a\right) v^{*}\left(s^{\prime}\right)\\ 413 | q^{*}(s, a) &=R(s, a)+\gamma \sum_{s^{\prime} \in S} P\left(s^{\prime} | s, 414 | a\right) \max_{a^{\prime}} q^{*}(s^{\prime}, a^{\prime}) \\ 415 | \end{aligned} 416 | \end{equation} 417 | 取名为\textbf{最优Bellman方程}。 418 | 419 | 以上给出了\ref{subrl1}的MDP建模过程,并给出了在MDP已知的情况下,如何做策略评估,策略迭代,价值迭代。 420 | 但现实世界中MDP模型不存在,或很难给出的情况很常见。 421 | 这种情况,可用图\ref{subrl2}来表示。 422 | \begin{figure}[htbp] 423 | \centering 424 | \includegraphics[width=5cm, height=1.6cm]{./pic/rl_free.png} 425 | \caption{RL Free} 426 | \label{subrl2} 427 | \end{figure} 428 | 学术界称其为Model-free。 429 | 430 | \textbf{问题:如何做Model-free 的预测(值估计)和控制(策略优化)?} 431 | 432 | 433 | \section{Markov Models Free} 434 | 435 | Markov Models Free表示没有状态转移和奖励函数的RL模型,如图\ref{subrl2}所示。 436 | 此时采用让Agent与环境交互,搜集数据,以频率统计的方式来模拟MDP。于是定义trajectory/episode 437 | $\{S_1, A_1, R_1, S_2, \ldots, S_T, A_T, R_T\}$,表示Agent从状态$S_1$开始做一个完整的(直到结束,terminal) 438 | 状态、动作、及时获得的采集链。 439 | 440 | 那么在策略$\pi$下,如何从利用这些trajectories来评估价值函数? 441 | 442 | \subsection{评估(预测)} 443 | 对价值函数$V$的估计。 444 | \subsubsection{MC} 445 | $N(s)$表示状态$s$在trajectories出现过的次数(有一次tracjectory表示一次的理解误差), 446 | 根据$G_t$定义\eqref{gt},容易从trajectories算出状态$s$下的$G_t$,因$V_t$表示$G_t$的期望,在MC过程, 447 | 可用平均值替代,于是结合\eqref{Incremental:Mean},在一个episode/tracjectory中,容易得到 448 | $$\begin{array}{l} 449 | N\left(S_{t}\right) \leftarrow N\left(S_{t}\right)+1 \\ 450 | v\left(S_{t}\right) \leftarrow v\left(S_{t}\right)+\frac{1}{N\left(S_{t}\right)}\left(G_{t}-v\left(S_{t}\right)\right) 451 | \end{array} 452 | $$ 453 | 在不严格要求统计关系的模型中,也可以将其简化为: 454 | $v\left(S_{t}\right) \leftarrow v\left(S_{t}\right)+\alpha \left(G_{t}-v\left(S_{t}\right)\right)$ 455 | 456 | MC方式的特点: 457 | \begin{itemize} 458 | \item 必须等到一个episode结束才能更新,从完整的episode中学习 459 | \item 只能在episodic(terminating,得结束)的环境中学习 460 | \item 不需要模型具有Markov特性,在非Markov环境中效果更好 461 | \end{itemize} 462 | 463 | \subsubsection{TD} 464 | Temporal-Difference Learning,克服了MC的必须实验到状态终结的问题(方差大), 465 | 将$G_t$替换为 466 | $R_{t+1} + \gamma v(S_{t+1})$即为$TD(0)$,于是有 467 | \begin{equation}v\left(S_{t}\right) \leftarrow v\left(S_{t}\right)+\alpha\left(R_{t+1}+\gamma v\left(S_{t+1}\right)-v\left(S_{t}\right)\right) \label{TDV} 468 | \end{equation} 469 | 其中$R_{t+1} + \gamma v(S_{t+1})$叫TD target,$\sigma_t=R_{t+1} + \gamma v(S_{t+1})-v(S_t)$叫 470 | TD error。需要注意的是,$v(S_t)$利用了$v(S_{t+1})$,bootstrapping方式,属于自举。 471 | 472 | 容易想出$TD(\infty)$即为MC。他们的关系可用下图刻画: 473 | 474 | \begin{figure}[htbp] 475 | \centering 476 | \includegraphics[width=6.4cm, height=5.2cm]{./pic/td_mc_dp_diff.png} 477 | \caption{mcdptd} 478 | \end{figure} 479 | 480 | \subsection{优化(控制)} 481 | 策略优化:价值函数迭代+Arg Max或策略迭代(策略估计$Q(s, a)$+策略改进)。 482 | 483 | 回想上节的策略迭代,是知道$R(s, a), P(s^{\prime}|s, a)$的,但这里未知,如何 484 | 在采样的过程中进行策略改进?已有答案是$\epsilon-Greedy$探索法。 485 | 486 | 487 | $\epsilon-Greedy$ exploration是指以$1-\epsilon$的概率选择使当前动作价值函数 488 | 最大的动作,以$\epsilon$的概率选择其余动作,得到新策略: 489 | 490 | $$\pi(a | s)=\left\{\begin{array}{ll} 491 | \epsilon /|\mathcal{A}|+1-\epsilon & \text { if } a^{*}=\arg \max _{a \in \mathcal{A}} Q(s, a) \\ 492 | \epsilon /|\mathcal{A}| & \text { otherwise } 493 | \end{array}\right. 494 | $$ 495 | 496 | 497 | 以这种方式改进策略,有如下定理保证: 498 | \begin{theorem}[Policy improvement theorem] 499 | For any $\epsilon-greedy$ policy $\pi$, the $\epsilon-greedy$ policy 500 | $\pi^{\prime} $with respect $q_{\pi}$ is an improvement, 501 | $v_{\pi^{\prime}}(s) \geq v_{\pi}(s)$. 502 | \end{theorem} 503 | 504 | 证明见Sutton。 505 | 506 | 于是容易写出$\epsilon-greedy$ 版本的 MC Exploration算法流程: 507 | \begin{figure}[htbp] 508 | \centering 509 | \includegraphics[width=8cm, height=4.1cm]{./pic/mc_epsion.png} 510 | \caption{mcepsion} 511 | \end{figure} 512 | 513 | 514 | \subsubsection{Sarsa} 515 | 在$\epsilon-greedy$ 策略改进中使用TD而不是MC即为Sarsa算法。 516 | 这和在价值更新中将MC改进为TD是同样的道理,且在一定程度上,能减少函数值的方差。 517 | 于是容易从\eqref{TDV}中写出$Q(s,a)$函数版本(值估计转化为策略优化)。 518 | $Q\left(S_{t}, A_{t}\right) \leftarrow Q\left(S_{t}, A_{t}\right)+\alpha\left[R_{t+1}+\gamma Q\left(S_{t+1}, 519 | A_{t+1}\right)-Q\left(S_{t}, A_{t}\right)\right]$ 520 | 521 | 综合以上,容易写出one-step 的Sarsa算法流程: 522 | 523 | \begin{figure}[htbp] 524 | \centering 525 | \includegraphics[width=6.1cm, height=2.6cm]{./pic/sarsa.png} 526 | \caption{sarsa} 527 | \end{figure} 528 | 529 | 脑补一下n-step版本。 530 | 531 | \subsubsection{Q-Learning} 532 | Sarsa明显是自举的(bootstrapping),其在下一步状态所做的动作仍由当前$Q(s,a)$函数选出, 533 | 在策略控制上其被分类为On-Policy control,而本小节的Q-Learning为Off-Policy control,其在 534 | 下一个状态的动作选择不是由当前$Q$选出。 535 | \begin{equation}\label{qlearning} 536 | Q\left(S_{t}, A_{t}\right) \leftarrow Q\left(S_{t}, A_{t}\right)+\alpha\left[R_{t+1}+\gamma \max _{a} Q\left(S_{t+1}, a\right)-Q\left(S_{t}, A_{t}\right)\right] 537 | \end{equation} 538 | 539 | \subsection{重要采样的应用} 540 | 策略$\pi$在优化过程中,其形式可能会变得复杂,不好采样,于是根据\eqref{Important:Sample}, 541 | 可选择辅助策略$\mu$来生成episodes:$S_{1}, A_{1}, R_{2}, \dots, S_{T} \sim \mu$, 542 | 计算其$G_t$。 543 | 544 | 若Off-Policy 为Monte Carlo,由\eqref{Important:Sample}可得 $G_{told}$和$G_{tnew}$的关系: 545 | 546 | $$G_{t}^{\pi / \mu}=\frac{\pi\left(A_{t} | S_{t}\right)}{\mu\left(A_{t} | S_{t}\right)} \frac{\pi\left(A_{t+1} | S_{t+1}\right)}{\mu\left(A_{t+1} | S_{t+1}\right)} \cdots 547 | \frac{\pi\left(A_{T} | S_{T}\right)}{\mu\left(A_{T} | S_{T}\right)} G_{t}$$ 548 | 549 | 于是在采样策略$\mu$下的其价值更新变为: 550 | $$V\left(S_{t}\right) \leftarrow V\left(S_{t}\right)+\alpha\left(G_{t}^{\pi / \mu}-V\left(S_{t}\right)\right)$$ 551 | 552 | 若Off-Policy为TD,容易得到其价值更新为: 553 | \begin{equation} 554 | V\left(S_{t}\right) \leftarrow V\left(S_{t}\right)+\alpha\left(\frac{\pi\left(A_{t} | S_{t}\right)}{\mu\left(A_{t} | S_{t}\right)}\left(R_{t+1}+ 555 | \lambda V\left(S_{t+1}\right)\right)-V\left(S_{t}\right)\right) 556 | \end{equation} 557 | 558 | 559 | \textbf{问题:离线策略Q-Learning为何不能使用重要采样?} 560 | 561 | 因为Q-Learning不需要在策略分布上做价值的期望估计。 562 | 完整答案请看 563 | \href{https://www.quora.com/Why-doesn-t-DQN-use-importance-sampling-Dont-we-always-use-this-method-to-correct-the-sampling-error-produced-by-the-off-policy}{这里}。 564 | 565 | \subsection{DP和TD的差异} 566 | 567 | $$\begin{array}{l|l} 568 | \text { Expected Update (DP) } & \text { Sample Update (TD) } \\ 569 | \hline \text { Iterative Policy Evaluation } & \text { TD Learning } \\ 570 | V(s) \leftarrow \mathbb{E}\left[R+\gamma V\left(S^{\prime}\right) | s\right] & V(S) \leftarrow^{\alpha} R+\gamma V\left(S^{\prime}\right) \\ 571 | \hline \text { Q-Policy Iteration } & \text { Sarsa } \\ 572 | Q(S, A) \leftarrow \mathbb{E}\left[R+\gamma Q\left(S^{\prime}, A^{\prime}\right) | s, a\right] & Q(S, A) \leftarrow^{\alpha} R+\gamma Q\left(S^{\prime}, A^{\prime}\right) \\ 573 | \hline \text { Q-Value Iteration } & \text { Q-Learning } \\ 574 | Q(S, A) \leftarrow \mathbb{E}\left[R+\gamma \max _{a^{\prime} \in \mathcal{A}} Q\left(S^{\prime}, A^{\prime}\right) | s, a\right] & Q(S, A) \leftarrow^{\alpha} R+\gamma \max _{a^{\prime} \in \mathcal{A}} Q\left(S^{\prime}, a^{\prime}\right) \\ 575 | \hline \text { where } x \leftarrow^{\alpha} y \text { is defined as } x \leftarrow x+\alpha(y-x) & \\ 576 | \end{array} 577 | $$ 578 | 579 | \section{函数参数化} 580 | 前面提出的一些值估计,策略改进的方法,但都是以统计为基础,在和环境交互的同时,搜集状态 581 | 序列,计算统计量,进行价值,状态动作函数的更新(表格式计算)。经典的例子Cliff walk:$4\times 16$个状态;Mountain car:1600个状态; 582 | Tic-Tac-Toe:1000个状态。但当面对西洋棋($10^{47}$),围棋($10^{170}$),器械臂、直升机(连续状态)等情况, 583 | 就显得肌无力。使用带参数的函数,优化参数是可行的。数学上可表达如下: 584 | 585 | \begin{equation} 586 | \begin{aligned} 587 | \hat{v}(s, \mathbf{w}) & \approx v^{\pi}(s) \\ 588 | \hat{q}(s, a, \mathbf{w}) & \approx q^{\pi}(s, a) \\ 589 | \hat{\pi}(a, s, \mathbf{w}) & \approx \pi(a | s) 590 | \end{aligned} 591 | \end{equation} 592 | 这样做还有另一个好处,有了关于状态的具体函数,可计算所有状态的价值。 593 | 594 | \textbf{问题:怎么具体设计函数?参数如何更新?策略如何优化?} 595 | 本小节回答前两问,第三问见节\ref{sec:PGD}(当采用可微函数时)。 596 | 597 | \begin{figure}[htbp] 598 | \centering 599 | \includegraphics[width=5.1cm, height=3.2cm]{./pic/func_design.png} 600 | \caption{func design} 601 | \label{func:design} 602 | \end{figure} 603 | 604 | 函数逼近的可选方案: 605 | \begin{itemize} 606 | \item 特征的线性组合 607 | \item 神经网络 608 | \item 决策树 609 | \item 最邻距离 610 | \end{itemize} 611 | 这里只关心可微方式,输入输出参考\ref{func:design}。 612 | 613 | \subsection{线性组合} 614 | 615 | 对于特征的线性组合,若用$\mathbf{x}(s)=\left(x_{1}(s), \ldots, x_{n}(s)\right)^{T}$ 616 | 表示状态特征向量,则价值函数可表示为:$\hat{v}(s, \mathbf{w})=\mathbf{x}(s)^{T} \mathbf{w}=\sum_{j=1}^{n} x_{j}(s) w_{j}$, 617 | 若目标函数采用平方差,则优化目标:$J(\mathbf{w})=\mathbb{E}_{\pi}\left[\left(v^{\pi}(s)-\mathbf{x}(s)^{T} \mathbf{w}\right)^{2}\right]$ 618 | 其梯度更新:$\Delta \mathbf{w}=\alpha\left(v^{\pi}(s)-\hat{v}(s, \mathbf{w})\right) \mathbf{x}(s)$ 619 | ,若把参数更新方式写成文字形式,有: 620 | 621 | $$\textbf{参数变化量} = \textbf{步长} \times \textbf{预测误差} \times \textbf{特征向量}$$ 622 | 623 | 数学抽象做完了,回到实际环境中,需要把理想的$v^{\pi}(s)$替换回实际中的值。结合上一节的MC,TD 624 | 更新方式,容易得到各自对应的更新版本。 625 | 626 | 对MC: 627 | $\Delta \mathbf{w}=\alpha\left(G_{t}-\hat{v}(s_t, \mathbf{w})\right) \nabla_{\mathbf{w}} \hat{v}\left(s_{t}, \mathbf{w}\right)$ 628 | 629 | 对TD(0): 630 | $\Delta \mathbf{w}=\alpha\left(R_{t+1} + \gamma \hat{v}(s_{t+1}, \mathbf{w})-\hat{v}(s_t, \mathbf{w})\right) \nabla_{\mathbf{w}} \hat{v}\left(s_{t}, \mathbf{w}\right)$ 631 | 632 | 在线性组合的方式下, 633 | 634 | MC: 635 | $\Delta \mathbf{w}=\alpha\left(G_{t}-\hat{v}(s_t, \mathbf{w})\right) \mathbf{x}(s_t)$ 636 | 637 | TD(0): 638 | $\Delta \mathbf{w}=\alpha\left(R_{t+1} + \gamma \hat{v}(s_{t+1}, \mathbf{w})-\hat{v}(s_t, \mathbf{w})\right) \mathbf{x}(s_t)$ 639 | 640 | 需要注意的是,上述梯度下降为semi-gradient,因为其目标值中它自己。 641 | 642 | 同理可得,控制算法的更新方式: 643 | 644 | MC: 645 | \begin{equation}\Delta \mathbf{w}=\alpha\left(G_{t}-\hat{q}\left(s_{t}, a_{t}, \mathbf{w}\right)\right) \nabla_{\mathbf{w}} \hat{q}\left(s_{t}, a_{t}, \mathbf{w}\right)\end{equation} 646 | 647 | Sarsa: 648 | \begin{equation}\Delta \mathbf{w}=\alpha\left(R_{t+1}+\gamma \hat{q}\left(s_{t+1}, a_{t+1}, \mathbf{w}\right)-\hat{q}\left(s_{t}, a_{t}, \mathbf{w}\right)\right) \nabla_{\mathbf{w}} \hat{q}\left(s_{t}, a_{t}, \mathbf{w}\right)\end{equation} 649 | 650 | Q-Learning: 651 | \begin{equation}\Delta \mathbf{w}=\alpha\left(R_{t+1}+\gamma \max _{a} \hat{q}\left(s_{t+1}, a, \mathbf{w}\right)-\hat{q}\left(s_{t}, a_{t}, \mathbf{w}\right)\right) \nabla_{\mathbf{w}} \hat{q}\left(s_{t}, a_{t}, \mathbf{w}\right)\end{equation} 652 | Sarsa的VFA(Value Function Approximation)控制算法流程: 653 | 654 | \begin{figure}[htbp] 655 | \centering 656 | \includegraphics[width=7.1cm, height=3.5cm]{./pic/sarsa_vfa.png} 657 | \caption{sarsa vfa} 658 | \label{sarsa:vfa} 659 | \end{figure} 660 | 661 | \textbf{问题:参数逼近的控制算法收敛性如何?}见下表: 662 | 663 | \begin{figure}[htbp] 664 | \centering 665 | \includegraphics[width=8.1cm, height=2.8cm]{./pic/converge_vfa.png} 666 | \caption{converge vfa} 667 | \label{converge:vfa} 668 | \end{figure} 669 | 670 | 671 | \subsection{DQN} 672 | 特征线性组合的难点和以前CV手工设计特征类似,如何做出好的特征表示,如何对复杂问题进行有效的特征选取等问题。 673 | 类似于CNN替换SIFT,ORB等特征提取方式,2015年,DeepMind发表了一篇名为“Human-level control through deep reinforcement learning” 674 | 的文章,将DL引入了RL,给出了一种有效的价值函数的非线性表达方法,同时不需要手工设计特征,并在 675 | Breakout, Pong, Montezuma’s Revenge, Private Eye四款游戏上达到了人类专家水平。随之而来的是DQN的各种升级,可参考 676 | \href{https://deepmind.com/blog/article/Agent57-Outperforming-the-human-Atari-benchmark}{DQN综述}。 677 | 678 | DQN对Atari Games的建模\ref{DQN:Atari}, 679 | \begin{figure}[htbp] 680 | \centering 681 | \includegraphics[width=6.7cm, height=3.7cm]{./pic/DQN_Atari.png} 682 | \caption{DQN Atari} 683 | \label{DQN:Atari} 684 | \end{figure} 685 | 从中可以看到基本思想就是:输入像素图,输出状态动作函数值。该建模方式有一些固有的坑, 686 | 论文中对采样关联,目标值非平稳两个问题给出了解决方案,分别是经验池采样,固定目标函数。 687 | 688 | 经验池是指设定一个缓存区$\mathcal{D}$, 存放历史样本 689 | $\left(s, a, r, s^{\prime}\right)$,并从中采样。 690 | 固定目标函数是指,使用另外一组参数$\mathbf{w}^{-}$来计算目标值$r+\gamma \max _{a^{\prime}} \hat{Q}(s^{\prime}, a^{\prime}, \mathbf{w}^{-})$, 691 | 这里样本来自$\mathcal{D}$。 692 | 最终参数更新量为: 693 | \begin{equation} 694 | \Delta \mathbf{w}=\alpha\left(r+\gamma \max _{a^{\prime}} \hat{Q}\left(s^{\prime}, a^{\prime}, \mathbf{w}\right)- 695 | Q(s, a, \mathbf{w})\right) \nabla_{\mathbf{w}} \hat{Q}(s, a, \mathbf{w}) 696 | \end{equation} 697 | 698 | % DQN的问题远不止以上两点,综述论文可参考:\href{https://arxiv.org/pdf/1710.02298.pdf}{Rainbow} 699 | \section{策略参数化} 700 | \subsection{基础建模} 701 | \label{sec:PGD} 702 | 上一节提出了函数参数化,并给出了值函数的实现例子,这一节给出策略函数的实现方式。虽然arg max 值函数可得到策略, 703 | 但这种方式基本不能给出随机策略,实际情况,会有大量非完全确定的策略需求,比如在石头剪刀布游戏中,其最优策略(纳什均衡点)为随机策略, 704 | 在一些带有闭环的探索游戏中,也需要随机策略来避免无限循环。而若将策略函数设置为动作的概率分布函数,则可实现这一点,同时有也可省去 705 | 值函数。 706 | 707 | 设$\pi_{\theta}(s, a)$是以$\theta$为参数的策略函数,如何定义策略的质量? 708 | 709 | 在episodic环境中,可定义为:$J_{1}(\theta)=V^{\pi_{\theta}}(s_1)=\mathbb{E}_{\pi_{\theta}}[v_1]$, 710 | 始状态的期望价值。在持续(continuing)环境中,可以有两种定义方式:利用平均价值有,$J_{avV}(\theta)=\sum_{s}d^{\pi_{\theta}}(s)V^{\pi_{\theta}}(s)$, 711 | 利用一步时间段的平均回报值有,$J_{avR}(\theta)=\sum_{s}d^{\pi_{\theta}}(s)\sum_{a}\pi_{\theta}(s,a)R(s,a)$, 712 | 其中$d^{\pi_{\theta}}$是$\pi_{\theta}$的马尔科夫平稳分布。直观来讲,最后一种更为方便,其对应优化目标可表示为: 713 | 714 | \begin{equation}\theta^{*}=\underset{\theta}{\arg \max } \mathbb{E}_{\tau \sim \pi_{\theta}} 715 | \left[\sum_{t} r\left(s_{t}, a_{t}^{\tau}\right)\right]\end{equation} 716 | 其中$\tau$是策略函数$\pi_{\theta}$下的一个采样trajectory。 717 | 718 | 对于$J(\theta)$的优化:若$J(\theta)$可微,可用梯度下降,共轭梯度,拟-牛顿等方法,若$J(\theta)$不可微,或倒数难算,可用采用 719 | Cross-entropy method (CEM),Hill climbing,Evolution algorithm等。 720 | 721 | 722 | 在多步MDP模型中,状态-动作 trajectory服从以下关系: 723 | $$\tau = (s_0, a_0, r_1, 724 | \ldots, s_{T-1}, r_{T-1}, s_T) \sim (\pi_{\theta}, 725 | P(s_{t+1}|s_{t}, a_t)),$$ 726 | 用$R_{\tau}=\sum_{t=0}^{T}R(s_t, a_t)$表示一trajector的回报和。于是 727 | \begin{equation}\label{mdpjthta} J(\theta)=\mathbb{E}_{\pi_{\theta}}\left[\sum_{t=0}^{T} R\left(s_{t}, 728 | a_{t}\right)\right]=\sum_{\tau} P(\tau ; \theta) R(\tau),\end{equation} 729 | 其中$P(\tau ; \theta)=\mu\left(s_{0}\right) \prod_{t=0}^{T-1} \pi_{\theta}\left(a_{t} | s_{t}\right) p\left(s_{t+1} | s_{t}, a_{t}\right)$ 730 | 表示策略$\pi_{\theta}$下,该trajectory出现的概率。此时优化目标为: 731 | 732 | \begin{equation}\theta^{*}=\underset{\theta}{\arg \max } J(\theta)= 733 | \underset{\theta}{\arg \max } \sum_{\tau} P(\tau ; \theta) R(\tau)\end{equation} 734 | 结合\ref{sub:smalltricks}的对数技巧,容易得出式\eqref{mdpjthta}的梯度为: 735 | 736 | \begin{equation}\nabla_{\theta} J(\theta)=\sum_{\tau} P(\tau ; \theta) R(\tau) \nabla_{\theta} \log P(\tau ; \theta)\end{equation} 737 | 将$\nabla_{\theta} \log P(\tau ; \theta)$展开: 738 | \begin{equation}\begin{aligned} 739 | \nabla_{\theta} \log P(\tau ; \theta) &=\nabla_{\theta} \log \left[\mu\left(s_{0}\right) \prod_{t=0}^{T-1} \pi_{\theta}\left(a_{t} | s_{t}\right) p\left(s_{t+1} | s_{t}, a_{t}\right)\right] \\ 740 | &=\nabla_{\theta}\left[\log \mu\left(s_{0}\right)+\sum_{t=0}^{T-1} \log \pi_{\theta}\left(a_{t} | s_{t}\right)+\log p\left(s_{t+1} | s_{t}, a_{t}\right)\right] \\ 741 | &=\sum_{t=0}^{T-1} \nabla_{\theta} \log \pi_{\theta}\left(a_{t} | s_{t}\right) 742 | \end{aligned} 743 | \end{equation} 744 | 于是多步MDP的策略梯度最终表示为: 745 | \begin{equation} 746 | \nabla_{\theta} J(\theta) \approx \frac{1}{m} \sum_{i=1}^{m} R\left(\tau_{i}\right) \sum_{t=0}^{T-1} \nabla_{\theta} 747 | \log \pi_{\theta}\left(a_{t}^{i} | s_{t}^{i}\right) 748 | \end{equation} 749 | 750 | 最终的梯度公式可以做一个直观认识:{\color{red}对于权重函数的期望(积分),其梯度方向为原始概率的对数的导数乘以对应权重(注意这里是一个trajectory)}。 751 | 有了这一直观认识,我们可以猜一猜其他各种形式的策略优化函数的梯度。 752 | 753 | 当权重值为$G_t$(在一个trajectory,从时间t开始获得的奖励)时: 754 | \begin{equation} \label{JGt} \nabla_{\theta} J(\theta)=\mathbb{E}_{\pi_{\theta}}\left[\sum_{t=0}^{T-1} G_{t} 755 | \cdot \nabla_{\theta} \log \pi_{\theta}\left(a_{t} | s_{t}\right)\right]\end{equation} 756 | 757 | 当权重值为$Q_{w}(s_t, a_t)$时: 758 | \begin{equation} \label{JQsa}\nabla_{\theta} J(\theta)=\mathbb{E}_{\pi_{\theta}}\left[\sum_{t=0}^{T-1} Q_{\color{red}{w}}(s_t, a_t) 759 | \cdot \nabla_{\color{red}{\theta}} \log \pi_{\theta}\left(a_{t} | s_{t}\right)\right]\end{equation} 760 | 761 | 验证:见周博磊讲义第5章。也就是说以上猜测确为事实。 762 | 763 | 不过在一个trajectory中,$G_t$往往方差较大,如何做到减小其方差,但保持其期望不变? 764 | 答案是减去回报值的期望。 765 | 766 | 基准值$b(s_t) = \mathbb{E}[r_t + r_{t+1} + \ldots + r_{T-1}]$,有 767 | $\mathbb{E}_{\tau}[\nabla_{\theta}\log\pi_{\theta}(a_{t}|s_t)b(s_t))]=0$,并且 768 | \begin{equation}\begin{aligned} 769 | E_{\tau}\left[\nabla_{\theta} \log \pi_{\theta}\left(a_{t} | s_{t}\right)\left(G_{t}-b\left(s_{t}\right)\right)\right] &=E_{\tau}\left[\nabla_{\theta} \log \pi_{\theta}\left(a_{t} | s_{t}\right) G_{t}\right] \\ 770 | \operatorname{Var}_{\tau}\left[\nabla_{\theta} \log \pi_{\theta}\left(a_{t} | s_{t}\right)\left(G_{t}-b\left(s_{t}\right)\right)\right] &<\operatorname{Var}_{\tau}\left[\nabla_{\theta} \log \pi_{\theta}\left(a_{t} | s_{t}\right) G_{t}\right] 771 | \end{aligned} 772 | \end{equation} 773 | 774 | 于是优化函数变为: 775 | \begin{equation} \label{JGt} \nabla_{\theta} J(\theta)=\mathbb{E}_{\pi_{\theta}}\left[\sum_{t=0}^{T-1} {\color{red} (G_{t} - b_{w}(s_t))} 776 | \cdot \nabla_{\theta} \log \pi_{\theta}\left(a_{t} | s_{t}\right)\right]\end{equation} 777 | 778 | 如法炮制,\eqref{JQsa}也可做减基准值的改动。不过这里还是回到RL原始的发展顺序来。在教材中,\eqref{JQsa}被称为 779 | Actor-Critic Policy Gradient,原因在于$Q_{w}(s, a)$担任了Actor的角色,$\pi_{\theta}(a|s)$扮演了 780 | Critic角色,他们各自有各自的参数,这和生成模型GAN异曲同工。 781 | 782 | \subsubsection{演员评论家} 783 | 当用线性价值函数来逼近Actor时:$Q_{\mathbf{w}}(s,a)=\phi(s, a)^{T}\mathbf{w}$。 784 | 此时Critic由线性的$TD(0)$更新,Actor由策略梯度更新。一个简单版本见\ref{QAC}。 785 | 786 | \begin{figure}[htbp] 787 | \centering 788 | \includegraphics[width=8cm, height=3.1cm]{./pic/QAC.png} 789 | \caption{QAC} 790 | \label{QAC} 791 | \end{figure} 792 | 793 | 上面提到的Actor-Critic算法的减基改动,怎么做?回想一下策略$\pi$下的$Q,V$的定义是什么。 794 | $Q^{\pi, \gamma}(s, a)=\mathbb{E}_{\pi}\left[r_{1}+\gamma r_{2}+\ldots | s_{1}=s, a_{1}=a\right]$ 795 | 796 | $V^{\pi, \gamma}(s)=\mathbb{E}_{\pi}\left[r_{1}+\gamma r_{2}+\ldots | s_{1}=s\right]=\mathbb{E}_{a \sim \pi}[Q^{\pi, \gamma}(s,a)]$ 797 | 因状态价值函数是状态-动作价值函数的无偏估计,因此,只需做$Q \leftarrow Q -V$(按算法更新写法)即可。 798 | 于是我们得到一个重要的函数:优势函数。 799 | \begin{definition}[Advantage function] 800 | $$A^{\pi, \gamma}(s, a) = Q^{\pi, \gamma}(s, a) - V^{\pi, \gamma}(s) $$ 801 | \end{definition} 802 | 803 | 其对对应的策略梯度为: 804 | 805 | \begin{equation} \label{JAC} \begin{aligned}\nabla_{\theta} J(\theta)&=\mathbb{E}_{\pi_{\theta}}\left[\sum_{t=0}^{T-1} (Q_{{\color{red} w}}(s_t, a_t) - V(s_t)) 806 | \cdot \nabla_{\theta} \log \pi_{{\color{red} \theta}}\left(a_{t} | s_{t}\right)\right] \\ 807 | &= \mathbb{E}_{\pi_{\theta}}\left[\sum_{t=0}^{T-1} (A_{{\color{red} w}}(s_t, a_t) 808 | \cdot \nabla_{\theta} \log \pi_{{\color{red} \theta}}\left(a_{t} | s_{t}\right)\right] \end{aligned} 809 | \end{equation} 810 | 811 | 以上就是基本的策略梯度算法。那么策略梯度有什么问题?我想应该需要更详细的推导,分析,以及实验了。 812 | 关于策略梯度能克服不可微操作,可参考\href{http://karpathy.github.io/2016/05/31/rl/}{PG overcome the non-differentiable computation}。 813 | 814 | \subsection{策略梯度的改进} 815 | 不同算法的优缺点,除了理论推导,实际实验情况也非常重要。 816 | \subsubsection{Policy Gradient→TRPO→ACKTR→PPO} 817 | 818 | 在\href{https://arxiv.org/abs/1506.02438}{High-Dimensional Continuous Control Using Generalized Advantage Estimation}一文中, 819 | 可以看到,策略梯度可以有多种: 820 | \begin{equation}\begin{aligned} 821 | \nabla_{\theta} J(\theta) &=\mathbb{E}_{\pi_{\theta}}\left[\nabla_{\theta} \log \pi_{\theta}(s, a) G_{t}\right]-\text { REINFORCE } \\ 822 | &=\mathbb{E}_{\pi_{\theta}}\left[\nabla_{\theta} \log \pi_{\theta}(s, a) Q^{\mathrm{w}}(s, a)\right]-\mathrm{Q} \text { Actor-Critic } \\ 823 | &=\mathbb{E}_{\pi_{\theta}}\left[\nabla_{\theta} \log \pi_{\theta}(s, a) A^{\mathrm{w}}(s, a)\right]-\text { Advantage Actor-Critic } \\ 824 | &=\mathbb{E}_{\pi_{\theta}}\left[\nabla_{\theta} \log \pi_{\theta}(s, a) \delta\right]-\text { TD Actor-Critic } 825 | \end{aligned} 826 | \end{equation} 827 | 前三种已经见过了,对于第四种,其和优势函数的关系,可能并不能一眼看出。 828 | 其实我们有如下结论: 829 | \begin{corollary} 830 | 设价值函数,$TD$ 误差分别为$V^{\pi_{\theta}(s)}$, $\delta^{\pi_{\theta}}=r(s, a)+\gamma V^{\pi_{\theta}}\left(s^{\prime}\right)-V^{\pi_{\theta}}(s)$,则 831 | $\mathbb{E}_{\pi_{\theta}}[\delta^{\pi_{\theta}}|s,a] = A^{\pi_{\theta}}(s,a)$。 832 | \end{corollary} 833 | 根据\eqref{qsav},即可证明。 834 | 835 | 策略梯度的问题: 836 | \begin{itemize} 837 | \item 因为在线学习的原因,采样效率不高 838 | \item 策略更新过大或者步长不合理会导致训练崩溃 839 | \begin{itemize} 840 | \item 和强监督不同这里更新的数据不是独立的 841 | \item 步长太大,导致策略不好,不好的策略搜集了更差的数据,恶性循环严重 842 | \item 很难从坏策略中逃出来,大概率致使模型崩塌 843 | \end{itemize} 844 | \end{itemize} 845 | 846 | \begin{figure}[htbp] 847 | \centering 848 | \includegraphics[width=5.6cm, height=3.4cm]{./pic/traning_stable.png} 849 | \caption{traning unstatble} 850 | \label{traning:unstatble} 851 | \end{figure} 852 | 853 | 为了解决训练不稳定,在线更新问题,John Schulman等人在TRPO中提出了用置信区间和自然梯度 854 | 下降来克服训练不稳定问题,并顺势而为,用重要采样将在线改为离线。 855 | 856 | 核心思想就是限制策略更新前后的差异。因为策略是概率函数,于是可用KL散度来 857 | 限制。$$d^{*}=\arg \max J(\theta+d), \text { s.t. } K L\left[\pi_{\theta} \| \pi_{\theta+d}\right]=c$$ 858 | 将重要采样用上则为: 859 | $$\begin{array}{l} 860 | \qquad J_{\theta_{\text {old }}}(\theta)=\mathbb{E}_{t}\left[\frac{\pi_{\theta}\left(a_{t} | s_{t}\right)}{\pi_{\theta_{\text {old }}}\left(a_{t} | s_{t}\right)} R_{t}\right] \\ 861 | \text { subject to } K L\left(\pi_{\theta_{\text {old }}}\left(. | s_{t}\right)|| \pi_{\theta}\left(. | s_{t}\right)\right) \leq \delta 862 | \end{array} 863 | $$ 864 | 经过一些计算,得到更新方式: 865 | $$\theta_{t+1}=\theta_{t}+\sqrt{\frac{2 \delta}{g^{T} H^{-1} g}} H^{-1} g$$ 866 | 其中 867 | $$H=\nabla_{\theta}^{2} K L\left(\pi_{\theta_{t}} \| \pi_{\theta}\right)=E_{a, s \sim \pi_{\theta_{t}}}\left[\nabla_{\theta} \log \pi_{\theta}(a, s) \nabla_{\theta} \log \pi_{\theta}(a, s)^{T}\right]$$ 868 | 869 | 综合以上,TRPO算法的自然梯度下降算法流程: 870 | 871 | \begin{figure}[htbp] 872 | \centering 873 | \begin{minipage}[t]{0.48\textwidth} 874 | \centering 875 | \includegraphics[width=6.8cm, height=4.2cm]{./pic/TRPO.png} 876 | \caption{Nature TRPO} 877 | \label{Nature:TRPO} 878 | \end{minipage} 879 | \begin{minipage}[t]{0.48\textwidth} 880 | \centering 881 | \includegraphics[width=7cm, height=4cm]{./pic/TRPO_2.png} 882 | \caption{Conjugate Nature TRPO} 883 | \label{Nature:CTRPO} 884 | \end{minipage} 885 | \end{figure} 886 | 887 | 将Fisher Information Matrix的逆用共轭算法实现的算法流程见\ref{Nature:CTRPO}。 888 | 889 | 而ACKTR则对FIM的逆的计算做了进一步改进(使用了矩阵的分块计算)。 890 | 891 | 892 | PPO做了两点改进,第一将合并了TRPO的限制条件和函数主体: 893 | $$\operatorname{maximize}_{\theta} \mathbb{E}_{t}\left[\frac{\pi_{\theta}\left(a_{t} | 894 | s_{t}\right)}{\pi_{\theta_{\text {old }}}\left(a_{t} | s_{t}\right)} A_{t}\right]-\beta 895 | \mathbb{E}_{t}\left[K L\left[\pi_{\theta_{\text {old }}}\left(\cdot | s_{t}\right), 896 | \pi_{\theta}\left(\cdot | s_{t}\right)\right]\right]$$ 897 | 合并的好处是省略了二阶导数的计算,完全转化为一阶导数,时间上更快。 898 | 899 | 在前后策略差异的限制上,$\delta$也做了动态调整,见\ref{PPO:Penalty}。 900 | 901 | 第二对新旧策略和优势函数上做了一些简单粗暴的限制。具体如下: 902 | 903 | $$L_{t}(\theta)=\min \left(r_{t}(\theta) \hat{A}_{t}, \operatorname{clip}\left(r_{t}(\theta), 1-\epsilon, 1+\epsilon\right) \hat{A}_{t}\right)$$ 904 | 直观上理解,当新旧策略的比率在($1-\epsilon, 1+\epsilon$)之外时,优势函数将按如上截取。 905 | 也就是说要当新旧策略差距大时,对策略函数做惩罚。 906 | 907 | 算法流程如下: 908 | 909 | \begin{figure}[htbp] 910 | \centering 911 | \begin{minipage}[t]{0.48\textwidth} 912 | \centering 913 | \includegraphics[width=6.5cm, height=3.7cm]{./pic/PPO_KL.png} 914 | \caption{PPO KL Penalty} 915 | \label{PPO:Penalty} 916 | \end{minipage} 917 | \begin{minipage}[t]{0.48\textwidth} 918 | \centering 919 | \includegraphics[width=6.2cm, height=3.7cm]{./pic/PPO_Clip.png} 920 | \caption{PPO Clip} 921 | \label{PPO:Clip} 922 | \end{minipage} 923 | \end{figure} 924 | 925 | \subsubsection{Q-learning→DDPG→TD3→SAC} 926 | \begin{itemize} 927 | \item \href{https://hrl.boyuai.com/chapter/2/sac%E7%AE%97%E6%B3%95}{SAC} 928 | \end{itemize} 929 | 930 | \section{Model Based} 931 | 参考\href{https://hrl.boyuai.com/chapter/3/%E5%9F%BA%E4%BA%8E%E6%A8%A1%E5%9E%8B%E7%9A%84%E7%AD%96%E7%95%A5%E4%BC%98%E5%8C%96}{基于模型的策略优化}。 932 | 933 | 934 | \section{模仿学习} 935 | 参考\href{https://hrl.boyuai.com/chapter/3/%E6%A8%A1%E4%BB%BF%E5%AD%A6%E4%B9%A0}{模仿学习} 936 | 937 | \section{分布式系统} 938 | 后续也许着重了解。 939 | 940 | \section{多智能体} 941 | \begin{itemize} 942 | \item \href{https://hrl.boyuai.com/chapter/3/%E5%A4%9A%E6%99%BA%E8%83%BD%E4%BD%93%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E5%85%A5%E9%97%A8}{一} 943 | \item \href{https://hrl.boyuai.com/chapter/3/%E5%A4%9A%E6%99%BA%E8%83%BD%E4%BD%93%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0%E8%BF%9B%E9%98%B6}{二} 944 | \end{itemize} 945 | 946 | \section{Code} 947 | 参考code目录,或者 948 | \begin{itemize} 949 | \item \href{https://github.com/thu-ml/tianshou}{tianshou} 950 | \item \href{https://github.com/DLR-RM/stable-baselines3}{stable-baselines3} 951 | \item \href{https://github.com/boyu-ai/Hands-on-RL}{Hands-on-RL}版本老,目录Code部分是对其的更新 952 | \item \href{https://www.raylib.com/}{raylib} 953 | \end{itemize} 954 | 955 | \section{RLHF} 956 | \begin{itemize} 957 | \item \href{https://github.com/HarderThenHarder/transformers_tasks/tree/main/RLHF}{包含ChatGPT Reward Model训练流程} 958 | \end{itemize} 959 | 960 | \end{document} -------------------------------------------------------------------------------- /setting_list.tex: -------------------------------------------------------------------------------- 1 | % settings for listings.sty 2 | % \newtheorem{theorem}{\hspace{2em}定理}[Section] 3 | % \newtheorem{theorem}{定理}[section] 4 | 5 | \newtheorem{theorem}{\indent 定理}[section] 6 | \newtheorem{lemma}[theorem]{\indent 引理} 7 | \newtheorem{proposition}[theorem]{\indent 命题} 8 | \newtheorem{corollary}[theorem]{\indent 推论} 9 | \newtheorem{definition}{\indent 定义}[section] 10 | \newtheorem{example}{\indent 例}[section] 11 | \newtheorem{remark}{\indent 注}[section] 12 | \newenvironment{solution}{\begin{proof}[\indent\textbf{解}]}{\end{proof}} 13 | \renewcommand{\proofname}{\indent \textbf{证明}} 14 | 15 | \renewcommand{\lstlistingname}{代码清单} 16 | \lstdefinestyle{lfonts}{ 17 | basicstyle = \footnotesize\ttfamily, 18 | stringstyle = \color{purple}, 19 | keywordstyle = \color{blue!60!black}\bfseries, 20 | commentstyle = \color{olive}\scshape, 21 | } 22 | \lstdefinestyle{lnumbers}{ 23 | numbers = left, 24 | numberstyle = \tiny, 25 | numbersep = 1em, 26 | firstnumber = 1, 27 | stepnumber = 1, 28 | } 29 | \lstdefinestyle{llayout}{ 30 | breaklines = true, 31 | tabsize = 2, 32 | columns = flexible, 33 | } 34 | \lstdefinestyle{lgeometry}{ 35 | xleftmargin = 15pt, 36 | xrightmargin = 0pt, 37 | frame = tb, 38 | framesep = \fboxsep, 39 | framexleftmargin = 15pt, 40 | } 41 | \lstdefinestyle{lgeneral}{ 42 | style = lfonts, 43 | style = lnumbers, 44 | style = llayout, 45 | style = lgeometry, 46 | } 47 | \def\beginlstdelim#1#2#3{% 48 | \def\endlstdelim{#2\egroup}% 49 | \ttfamily#1\bgroup\color{#3}\aftergroup\endlstdelim} 50 | \lstdefinestyle{ldelims}{ 51 | moredelim = **[is][\beginlstdelim{\$}{\$}{orange}]{\$}{\$}, 52 | moredelim = **[is][\beginlstdelim{\{}{\}}{ForestGreen}]{\{}{\}}, 53 | moredelim = **[is][\beginlstdelim{[}{]}{cyan}]{[}{]}, 54 | } 55 | % LaTeX lst style 56 | \lstdefinestyle{lltx}{ 57 | language = {[LaTeX]TeX}, 58 | style = lgeneral, 59 | style = ldelims, 60 | morekeywords = {% LaTeX original commands 61 | maketitle, 62 | rmfamily, sffamily, ttfamily, 63 | itshape, slshape, scshape, 64 | mdseries, bfseries, emph, 65 | textrm, textsf, texttt, 66 | textit, textsl, textsc, 67 | textmd, textbf, 68 | newcommand, renewcommand, providecommand, 69 | cs, meta, marg, oarg, parg 70 | } 71 | } 72 | \lstdefinestyle{iltx}{ 73 | style = lltx, 74 | basicstyle = \ttfamily 75 | } 76 | \lstdefinestyle{lbash}{ 77 | language = {bash}, 78 | style = lgeneral, 79 | } 80 | \lstdefinestyle{ibash}{ 81 | style = lbash, 82 | basicstyle = \ttfamily 83 | } 84 | 85 | % code style setting 86 | \definecolor{codegreen}{rgb}{0,0.6,0} 87 | \definecolor{codegray}{rgb}{0.5,0.5,0.5} 88 | \definecolor{codepurple}{rgb}{0.58,0,0.82} 89 | \definecolor{backcolour}{rgb}{0.95,0.95,0.92} 90 | 91 | \lstdefinestyle{mystyle}{ 92 | backgroundcolor=\color{backcolour}, 93 | commentstyle=\color{codegreen}, 94 | keywordstyle=\color{magenta}, 95 | numberstyle=\tiny\color{codegray}, 96 | stringstyle=\color{codepurple}, 97 | basicstyle=\footnotesize, 98 | breakatwhitespace=false, 99 | breaklines=true, 100 | captionpos=b, 101 | keepspaces=true, 102 | numbers=left, 103 | numbersep=5pt, 104 | showspaces=false, 105 | showstringspaces=false, 106 | showtabs=false, 107 | tabsize=2 108 | } 109 | \endinput 110 | --------------------------------------------------------------------------------