ddpg代码pytorch实现
时间: 2023-12-11 07:31:53 浏览: 102
DDRNet.pytorch:这是用于道路场景的实时和精确语义分割的深度双分辨率网络的非官方代码。 无需使用推断加速和额外数据,即可在城市景观和camvid上实现精度与速度之间的最新权衡
以下是使用PyTorch实现DDPG算法的代码示例:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import gym
# 定义Actor网络
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.layer1 = nn.Linear(state_dim, 400)
self.layer2 = nn.Linear(400, 300)
self.layer3 = nn.Linear(300, action_dim)
self.max_action = max_action
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.max_action * torch.tanh(self.layer3(x))
return x
# 定义Critic网络
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.layer1 = nn.Linear(state_dim + action_dim,400)
self.layer2 = nn.Linear(400, 300)
self.layer3 = nn.Linear(300, 1)
def forward(self, x, u):
xu = torch.cat([x, u], 1)
x = F.relu(self.layer1(xu))
x = F.relu(self.layer2(x))
x = self.layer3(x)
return x
# 定义DDPG算法
class DDPG(object):
def __init__(self, state_dim, action_dim, max_action):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target.load_state_dict(self.actor.state_dict())
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=1e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = Critic(state_dim, action_dim).to(device)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-3)
self.max_action = max_action
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, iterations, batch_size=64, discount=0.99, tau=0.001):
for it in range(iterations):
# 从缓存中随机采样一批数据
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(batch_size)
state = torch.FloatTensor(batch_states).to(device)
next_state = torch.FloatTensor(batch_next_states).to(device)
action = torch.FloatTensor(batch_actions).to(device)
reward = torch.FloatTensor(batch_rewards).to(device)
done = torch.FloatTensor(batch_dones).to(device)
# 计算Q值
Q = self.critic(state, action)
next_action = self.actor_target(next_state)
next_Q = self.critic_target(next_state, next_action.detach())
target_Q = reward + (1 - done) * discount * next_Q
# 计算Critic损失并更新网络
critic_loss = F.mse_loss(Q, target_Q.detach())
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# 计算Actor损失并更新网络
actor_loss = -self.critic(state, self.actor(state)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# 更新目标网络
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)
# 定义经验回放缓存
class ReplayBuffer(object):
def __init__(self, max_size=1000000):
self.storage = []
self.max_size = max_size
self.ptr = 0
def add(self, state, next_state, action, reward, done):
data = (state, next_state, action, reward, done)
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = data
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(data)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = [], [], [], [], []
for i in ind:
state, next_state, action, reward, done = self.storage[i]
batch_states.append(np.array(state, copy=False))
batch_next_states.append(np.array(next_state, copy=False))
batch_actions.append(np.array(action, copy=False))
batch_rewards.append(np.array(reward, copy=False))
batch_dones.append(np.array(done, copy=False))
return np.array(batch_states), np.array(batch_next_states), np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1)
# 定义超参数
env_name = "MountainCarContinuous-v0"
seed = 0
start_timesteps = 1e4
max_timesteps = 5e5
expl_noise = 0.1
batch_size = 64
discount = 0.99
tau = 0.001
policy_noise = 0.2
noise_clip = 0.5
policy_freq = 2
# 创建环境
env = gym.make(env_name)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
# 设置随机种子
env.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
# 创建DDPG对象
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ddpg = DDPG(state_dim, action_dim, max_action)
# 创建经验回放缓存
replay_buffer = ReplayBuffer()
# 初始化状态和分数
state, done, score = env.reset(), False, 0
# 开始训练
for t in range(int(max_timesteps)):
if t < start_timesteps:
action = env.action_space.sample()
else:
action = (
ddpg.select_action(np.array(state))
+ np.random.normal(0, max_action * expl_noise, size=action_dim)
).clip(-max_action, max_action)
next_state, reward, done, _ = env.step(action)
replay_buffer.add(state, next_state, action, reward, done)
state = next_state
score += reward
if done:
state, done, score = env.reset(), False, 0
if t >= start_timesteps:
ddpg.train(replay_buffer, batch_size, discount, tau)
if t % 10000 == 0:
print("Timestep: {} Score: {}".format(t, score))
# 关闭环境
env.close()
```
阅读全文