适用于混合环境和离散动作空间的MADDPG代码
时间: 2024-02-13 13:06:47 浏览: 37
以下是适用于混合环境和离散动作空间的MADDPG代码示例:
```python
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
# 定义 Actor 和 Critic 网络
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, action_dim)
def forward(self, state):
x = torch.relu(self.fc1(state))
x = torch.relu(self.fc2(x))
x = torch.tanh(self.fc3(x))
return x
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_dim + action_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
def forward(self, state, action):
x = torch.cat([state, action], dim=1)
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义 MADDPG 类
class MADDPG:
def __init__(self, state_dim, action_dim, hidden_size, discount_factor=0.99, tau=0.01, learning_rate=0.001):
self.num_agents = len(state_dim)
self.discount_factor = discount_factor
self.tau = tau
# 初始化 Actor 和 Critic 网络
self.actors = [Actor(state_dim[i], action_dim[i], hidden_size) for i in range(self.num_agents)]
self.target_actors = [Actor(state_dim[i], action_dim[i], hidden_size) for i in range(self.num_agents)]
self.critics = [Critic(sum(state_dim), sum(action_dim), hidden_size) for i in range(self.num_agents)]
self.target_critics = [Critic(sum(state_dim), sum(action_dim), hidden_size) for i in range(self.num_agents)]
# 初始化 Actor 和 Critic 优化器
self.actor_optimizers = [optim.Adam(actor.parameters(), lr=learning_rate) for actor in self.actors]
self.critic_optimizers = [optim.Adam(critic.parameters(), lr=learning_rate) for critic in self.critics]
# 将 target 网络参数设置为与本地网络相同
for i in range(self.num_agents):
self.target_actors[i].load_state_dict(self.actors[i].state_dict())
self.target_critics[i].load_state_dict(self.critics[i].state_dict())
def get_actions(self, states, noise_scale=0.1):
actions = []
for i in range(self.num_agents):
state = torch.FloatTensor(states[i]).unsqueeze(0)
action = self.actors[i](state).squeeze(0).detach().numpy()
noise = noise_scale * np.random.randn(*action.shape)
action = np.clip(action + noise, -1, 1)
actions.append(action)
return actions
def update(self, states, actions, rewards, next_states, dones):
states = [torch.FloatTensor(np.concatenate(states, axis=1))]
actions = [torch.FloatTensor(np.concatenate(actions, axis=1))]
rewards = [torch.FloatTensor(rewards)]
next_states = [torch.FloatTensor(np.concatenate(next_states, axis=1))]
dones = [torch.FloatTensor(dones)]
# 更新 Critic 网络
target_actions = []
for i in range(self.num_agents):
target_actions.append(self.target_actors[i](next_states[i]).detach())
target_actions = torch.cat(target_actions, dim=1)
target_q_values = []
for i in range(self.num_agents):
target_q_values.append(self.target_critics[i](next_states[0], target_actions).detach())
target_q_values = torch.cat(target_q_values, dim=1)
target_q_values = rewards[0] + (self.discount_factor * target_q_values * (1 - dones[0]))
q_values = []
for i in range(self.num_agents):
q_values.append(self.critics[i](states[0], actions[0][:, sum(self.num_agents[:i]):sum(self.num_agents[:i+1])]))
critic_losses = []
for i in range(self.num_agents):
critic_losses.append(nn.functional.mse_loss(q_values[i], target_q_values[:, i:i+1]))
for i in range(self.num_agents):
self.critic_optimizers[i].zero_grad()
critic_losses[i].backward()
self.critic_optimizers[i].step()
# 更新 Actor 网络
updated_actions = []
for i in range(self.num_agents):
updated_actions.append(self.actors[i](states[0][:, sum(self.num_agents[:i]):sum(self.num_agents[:i+1])]))
updated_actions = torch.cat(updated_actions, dim=1)
actor_losses = []
for i in range(self.num_agents):
actor_losses.append(-self.critics[i](states[0], updated_actions[:, sum(self.num_agents[:i]):sum(self.num_agents[:i+1])]).mean())
for i in range(self.num_agents):
self.actor_optimizers[i].zero_grad()
actor_losses[i].backward()
self.actor_optimizers[i].step()
# 更新 target 网络参数
for i in range(self.num_agents):
for local_params, target_params in zip(self.actors[i].parameters(), self.target_actors[i].parameters()):
target_params.data.copy_(self.tau * local_params.data + (1 - self.tau) * target_params.data)
for local_params, target_params in zip(self.critics[i].parameters(), self.target_critics[i].parameters()):
target_params.data.copy_(self.tau * local_params.data + (1 - self.tau) * target_params.data)
```
其中,`state_dim` 是代理的状态空间维度的列表,`action_dim` 是代理的动作空间维度的列表,`hidden_size` 是 Actor 和 Critic 网络的隐藏层大小,`discount_factor` 是折扣因子,`tau` 是软更新参数,`learning_rate` 是 Actor 和 Critic 网络的学习率。`get_actions` 方法返回代理的动作,`update` 方法更新 Actor 和 Critic 网络参数。