ddpg实现cartpolo的pytorch代码
时间: 2024-04-30 10:21:52 浏览: 268
下面是使用PyTorch实现DDPG算法在CartPole上的代码示例:
```python
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
# DDPG Actor网络
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_dim, 400)
self.fc2 = nn.Linear(400, 300)
self.fc3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = self.max_action * torch.tanh(self.fc3(x))
return x
# DDPG Critic网络
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_dim + action_dim, 400)
self.fc2 = nn.Linear(400, 300)
self.fc3 = nn.Linear(300, 1)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# DDPG算法
class DDPG:
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target.load_state_dict(self.actor.state_dict())
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=1e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = Critic(state_dim, action_dim).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-3)
self.memory = ReplayBuffer(1000000, state_dim, action_dim)
self.batch_size = 128
self.gamma = 0.99
self.tau = 0.001
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self):
if len(self.memory) < self.batch_size:
return
state, action, next_state, reward, done = self.memory.sample(self.batch_size)
state = torch.FloatTensor(state).to(device)
action = torch.FloatTensor(action).to(device)
next_state = torch.FloatTensor(next_state).to(device)
reward = torch.FloatTensor(reward).to(device)
done = torch.FloatTensor(1 - done).to(device)
# 计算actor的loss
actor_loss = -self.critic(state, self.actor(state)).mean()
# 计算critic的loss
next_action = self.actor_target(next_state)
target_Q = self.critic_target(next_state, next_action)
target_Q = reward + (done * self.gamma * target_Q).detach()
current_Q = self.critic(state, action)
critic_loss = F.mse_loss(current_Q, target_Q)
# 更新Actor和Critic网络的参数
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# 更新Actor和Critic的目标网络参数
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.critic.state_dict(), filename + "_critic")
def load(self, filename):
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.critic.load_state_dict(torch.load(filename + "_critic"))
# 经验回放缓存区
class ReplayBuffer:
def __init__(self, max_size, state_dim, action_dim):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.done = np.zeros((max_size, 1))
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.done[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
idx = np.random.randint(0, self.size, size=batch_size)
return (
self.state[idx],
self.action[idx],
self.next_state[idx],
self.reward[idx],
self.done[idx]
)
# 使用CartPole环境进行DDPG算法训练
if __name__ == "__main__":
env = gym.make("CartPole-v1")
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ddpg = DDPG(state_dim, action_dim, max_action)
total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
done = True
while total_timesteps < 1000000:
if done:
if total_timesteps != 0:
print("Total Timesteps: {} Episode Num: {} Reward: {}".format(total_timesteps, episode_num, episode_reward))
ddpg.train()
obs = env.reset()
done = False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
if total_timesteps < 10000:
action = env.action_space.sample()
else:
action = ddpg.select_action(np.array(obs))
action = np.clip(action, -max_action, max_action)
new_obs, reward, done, _ = env.step(action)
episode_reward += reward
ddpg.memory.add(obs, action, new_obs, reward, done)
obs = new_obs
total_timesteps += 1
episode_timesteps += 1
timesteps_since_eval += 1
if total_timesteps % 10000 == 0:
ddpg.save("ddpg")
if timesteps_since_eval >= 5000:
timesteps_since_eval %= 5000
eval_reward = eval_policy(ddpg)
print("Timestep: {} Eval reward: {}".format(total_timesteps, eval_reward))
```
其中,`Actor`和`Critic`分别为Actor网络和Critic网络的实现,`DDPG`为DDPG算法的实现,`ReplayBuffer`为经验回放缓存区的实现。
在使用CartPole环境进行训练时,可以通过调整参数来获得更好的结果。例如,可以通过调整`batch_size`、`gamma`和`tau`等参数来改进DDPG算法的性能。
阅读全文