policy gradient实现mountain_car的代码
时间: 2023-11-16 10:07:03 浏览: 133
code - lecture 7 - policy gradient_policy_Reinforcement_policygr
下面是使用Policy Gradient算法实现Mountain Car问题的代码,使用的是PyTorch框架:
```python
import gym
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
class Policy(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Policy, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.softmax(self.fc2(x), dim=1)
return x
def select_action(state):
state = torch.from_numpy(state).float().unsqueeze(0)
probs = policy(state)
m = torch.distributions.Categorical(probs)
action = m.sample()
policy.saved_log_probs.append(m.log_prob(action))
return action.item()
def finish_episode():
R = 0
policy_loss = []
returns = []
for r in policy.rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + eps)
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
env = gym.make('MountainCar-v0')
gamma = 0.99
eps = np.finfo(np.float32).eps.item()
policy = Policy(env.observation_space.shape[0], 128, env.action_space.n)
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
running_reward = 10
for i_episode in range(1000):
state, ep_reward = env.reset(), 0
for t in range(10000):
action = select_action(state)
state, reward, done, _ = env.step(action)
policy.rewards.append(reward)
ep_reward += reward
if done:
break
running_reward = 0.05 * ep_reward + (1 - 0.05) * running_reward
finish_episode()
if i_episode % 50 == 0:
print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format(
i_episode, ep_reward, running_reward))
if running_reward > env.spec.reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
```
这段代码使用了PyTorch的神经网络模块来构建Policy Gradient算法的策略网络,并在Mountain Car环境中进行训练。具体实现过程可以参考注释。
阅读全文