ddpg的pytorch例程

时间: 2023-11-12 08:59:36 浏览: 46
这是一个使用PyTorch实现DDPG算法的例程: ```python import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np # 定义Actor网络 class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.layer1 = nn.Linear(state_dim, 400) self.layer2 = nn.Linear(400, 300) self.layer3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, x): x = F.relu(self.layer1(x)) x = F.relu(self.layer2(x)) x = self.max_action * torch.tanh(self.layer3(x)) return x # 定义Critic网络 class Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() self.layer1 = nn.Linear(state_dim + action_dim, 400) self.layer2 = nn.Linear(400, 300) self.layer3 = nn.Linear(300, 1) def forward(self, x, u): xu = torch.cat([x, u], 1) x = F.relu(self.layer1(xu)) x = F.relu(self.layer2(x)) x = self.layer3(x) return x # 定义DDPG算法 class DDPG(object): def __init__(self, state_dim, action_dim, max_action): self.actor = Actor(state_dim, action_dim, max_action).to(device) self.actor_target = Actor(state_dim, action_dim, max_action).to(device) self.actor_target.load_state_dict(self.actor.state_dict()) self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=1e-4) self.critic = Critic(state_dim, action_dim).to(device) self.critic_target = Critic(state_dim, action_dim).to(device) self.critic_target.load_state_dict(self.critic.state_dict()) self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-3) self.max_action = max_action def select_action(self, state): state = torch.FloatTensor(state.reshape(1, -1)).to(device) return self.actor(state).cpu().data.numpy().flatten() def train(self, replay_buffer, iterations, batch_size=100, discount=0.99, tau=0.005): for it in range(iterations): # 从replay buffer中随机采样一个batch的数据 batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = replay_buffer.sample(batch_size) state = torch.FloatTensor(batch_states).to(device) next_state = torch.FloatTensor(batch_next_states).to(device) action = torch.FloatTensor(batch_actions).to(device) reward = torch.FloatTensor(batch_rewards).to(device) done = torch.FloatTensor(batch_dones).to(device) # 训练Critic网络 target_Q = self.critic_target(next_state, self.actor_target(next_state)) target_Q = reward + ((1 - done) * discount * target_Q).detach() current_Q = self.critic(state, action) critic_loss = F.mse_loss(current_Q, target_Q) self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() # 训练Actor网络 actor_loss = -self.critic(state, self.actor(state)).mean() self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # 更新target网络 for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) def save(self, filename): torch.save(self.actor.state_dict(), filename + "_actor") torch.save(self.critic.state_dict(), filename + "_critic") def load(self, filename): self.actor.load_state_dict(torch.load(filename + "_actor")) self.critic.load_state_dict(torch.load(filename + "_critic")) # 定义Replay Buffer class ReplayBuffer(object): def __init__(self, max_size=1000000): self.storage = [] self.max_size = max_size self.ptr = 0 def add(self, state, next_state, action, reward, done): data = (state, next_state, action, reward, done) if len(self.storage) == self.max_size: self.storage[int(self.ptr)] = data self.ptr = (self.ptr + 1) % self.max_size else: self.storage.append(data) def sample(self, batch_size): ind = np.random.randint(0, len(self.storage), size=batch_size) batch_states = [] batch_next_states = [] batch_actions = [] batch_rewards = [] batch_dones = [] for i in ind: state, next_state, action, reward, done = self.storage[i] batch_states.append(np.array(state, copy=False)) batch_next_states.append(np.array(next_state, copy=False)) batch_actions.append(np.array(action, copy=False)) batch_rewards.append(np.array(reward, copy=False)) batch_dones.append(np.array(done, copy=False)) return np.array(batch_states), np.array(batch_next_states), np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1) # 定义环境 class Environment(object): def __init__(self): self.state_dim = 3 self.action_dim = 1 self.max_action = 2.0 def step(self, action): next_state = np.zeros(3) next_state[0] = 0.5 next_state[1] = 0.5 * np.sin(3 * next_state[0]) + 0.5 * np.sin(2 * next_state[1]) next_state[2] = 0.5 * np.sin(2 * next_state[0]) + 0.5 * np.sin(3 * next_state[1]) reward = -np.linalg.norm(next_state - np.array([1, 0, 0])) # 奖励函数为到目标点的欧式距离的相反数 done = False return next_state, reward, done def reset(self): state = np.zeros(3) state[0] = np.random.uniform(low=-0.1, high=0.1) state[1] = np.random.uniform(low=-0.1, high=0.1) state[2] = np.random.uniform(low=-0.1, high=0.1) return state # 训练DDPG env = Environment() replay_buffer = ReplayBuffer() ddpg = DDPG(env.state_dim, env.action_dim, env.max_action) for i in range(100000): state = env.reset() done = False while not done: action = ddpg.select_action(state) next_state, reward, done = env.step(action) replay_buffer.add(state, next_state, action, reward, done) state = next_state if len(replay_buffer.storage) > 1000: ddpg.train(replay_buffer, 100) # 测试DDPG state = env.reset() done = False while not done: action = ddpg.select_action(state) next_state, reward, done = env.step(action) state = next_state print(state) # 保存DDPG模型 ddpg.save("ddpg") ```

相关推荐

最新推荐

recommend-type

pytorch之添加BN的实现

今天小编就为大家分享一篇pytorch之添加BN的实现,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧
recommend-type

Pytorch转tflite方式

主要介绍了Pytorch转tflite方式,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧
recommend-type

PyTorch官方教程中文版.pdf

Py Torch是一个基于 Torch的 Python开源机器学习库,用于自然语言处理等应用程序。它主要由Facebook的人工智能小组开发,不仅能够实现强大的GPU加速,同时还支持动态神经网络,这点是现在很多主流框架如 TensorFlow...
recommend-type

pytorch之inception_v3的实现案例

今天小编就为大家分享一篇pytorch之inception_v3的实现案例,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧
recommend-type

pytorch查看模型weight与grad方式

主要介绍了pytorch查看模型weight与grad方式,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧
recommend-type

zigbee-cluster-library-specification

最新的zigbee-cluster-library-specification说明文档。
recommend-type

管理建模和仿真的文件

管理Boualem Benatallah引用此版本:布阿利姆·贝纳塔拉。管理建模和仿真。约瑟夫-傅立叶大学-格勒诺布尔第一大学,1996年。法语。NNT:电话:00345357HAL ID:电话:00345357https://theses.hal.science/tel-003453572008年12月9日提交HAL是一个多学科的开放存取档案馆,用于存放和传播科学研究论文,无论它们是否被公开。论文可以来自法国或国外的教学和研究机构,也可以来自公共或私人研究中心。L’archive ouverte pluridisciplinaire
recommend-type

优化MATLAB分段函数绘制:提升效率,绘制更快速

![优化MATLAB分段函数绘制:提升效率,绘制更快速](https://ucc.alicdn.com/pic/developer-ecology/666d2a4198c6409c9694db36397539c1.png?x-oss-process=image/resize,s_500,m_lfit) # 1. MATLAB分段函数绘制概述** 分段函数绘制是一种常用的技术,用于可视化不同区间内具有不同数学表达式的函数。在MATLAB中,分段函数可以通过使用if-else语句或switch-case语句来实现。 **绘制过程** MATLAB分段函数绘制的过程通常包括以下步骤: 1.
recommend-type

SDN如何实现简易防火墙

SDN可以通过控制器来实现简易防火墙。具体步骤如下: 1. 定义防火墙规则:在控制器上定义防火墙规则,例如禁止某些IP地址或端口访问,或者只允许来自特定IP地址或端口的流量通过。 2. 获取流量信息:SDN交换机会将流量信息发送给控制器。控制器可以根据防火墙规则对流量进行过滤。 3. 过滤流量:控制器根据防火墙规则对流量进行过滤,满足规则的流量可以通过,不满足规则的流量则被阻止。 4. 配置交换机:控制器根据防火墙规则配置交换机,只允许通过满足规则的流量,不满足规则的流量则被阻止。 需要注意的是,这种简易防火墙并不能完全保护网络安全,只能起到一定的防护作用,对于更严格的安全要求,需要
recommend-type

JSBSim Reference Manual

JSBSim参考手册,其中包含JSBSim简介,JSBSim配置文件xml的编写语法,编程手册以及一些应用实例等。其中有部分内容还没有写完,估计有生之年很难看到完整版了,但是内容还是很有参考价值的。