class PPOMemory: def __init__(self, batch_size): self.states = [] self.probs = [] self.vals = [] self.actions = [] self.rewards = [] self.dones = [] self.batch_size = batch_size def sample(self): batch_step = np.arange(0, len(self.states), self.batch_size) indices = np.arange(len(self.states), dtype=np.int64) np.random.shuffle(indices) batches = [indices[i:i+self.batch_size] for i in batch_step] return np.array(self.states),np.array(self.actions),np.array(self.probs),\ np.array(self.vals),np.array(self.rewards),np.array(self.dones),batches def push(self, state, action, probs, vals, reward, done): self.states.append(state) self.actions.append(action) self.probs.append(probs) self.vals.append(vals) self.rewards.append(reward) self.dones.append(done) def clear(self): self.states = [] self.probs = [] self.actions = [] self.rewards = [] self.dones = [] self.vals = []
时间: 2024-04-02 09:34:06 浏览: 15
这段代码是一个 PPO 算法中的经验回放(experience replay)类,用于存储和采样交互数据。具体来说,这个类有以下几个成员函数:
1. 构造函数 __init__:初始化经验回放缓存的大小 batch_size,并创建空的列表来存储状态、动作、概率、价值、奖励和完成标志等信息。
2. 采样函数 sample:将经验回放缓存中的数据按照 batch_size 分成若干个 batch,然后随机打乱数据索引,最后返回一个包含所有 batch 的列表。每个 batch 包含一个状态状态列表、一个动作列表、一个概率列表、一个价值列表、一个奖励列表和一个完成标志列表。
3. 存储函数 push:将交互数据(即一个状态 state、一个动作 action、一个概率 probs、一个价值 vals、一个奖励 reward 和一个完成标志 done)存储到经验回放缓存中。
4. 清空函数 clear:清空经验回放缓存,以便下一次使用。
整个经验回放类的作用是存储和采样交互数据,以便训练 PPO 算法时能够从多个交互轮次中有效地学习。其中,采样函数 sample 会将数据随机打乱,以避免过于相关的数据干扰训练。
相关问题
多智能体编队actor-critic pytorch代码
以下是一个简单的多智能体编队actor-critic算法的PyTorch实现代码。该代码演示了如何使用actor-critic算法来训练多个智能体以形成编队。该代码使用一个简单的环境,该环境包含一个目标点和多个智能体,智能体的任务是在不碰撞的情况下尽可能接近目标点。
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import random
# 定义一个智能体类
class Agent:
def __init__(self, state_size, action_size, agent_id, num_agents):
self.state_size = state_size
self.action_size = action_size
self.agent_id = agent_id
self.num_agents = num_agents
# 定义actor网络和critic网络
self.actor = Actor(state_size, action_size)
self.critic = Critic(state_size)
# 定义优化器
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=0.001)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=0.001)
# 定义经验回放缓冲区
self.memory = ReplayBuffer(action_size)
def act(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
action_probs = F.softmax(self.actor(state), dim=1)
action_probs = action_probs.detach().numpy().squeeze()
action = np.random.choice(self.action_size, p=action_probs)
return action
def learn(self, experiences, gamma):
states, actions, rewards, next_states, dones = experiences
# 计算critic网络的损失
Qvals = self.critic(states)
Qvals_next = self.critic(next_states)
Qval = Qvals.gather(1, actions)
Qval_next = rewards + gamma * Qvals_next.max(1)[0].unsqueeze(1) * (1 - dones)
critic_loss = F.mse_loss(Qval, Qval_next.detach())
# 更新critic网络
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# 计算actor网络的损失
probs = F.softmax(self.actor(states), dim=1)
log_probs = torch.log(probs.gather(1, actions))
Qvals = self.critic(states)
advantages = Qvals.detach() - Qvals.mean()
actor_loss = -(log_probs * advantages).mean()
# 更新actor网络
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# 定义一个actor网络
class Actor(nn.Module):
def __init__(self, state_size, action_size):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_size, 32)
self.fc2 = nn.Linear(32, 64)
self.fc3 = nn.Linear(64, action_size)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义一个critic网络
class Critic(nn.Module):
def __init__(self, state_size):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_size, 32)
self.fc2 = nn.Linear(32, 64)
self.fc3 = nn.Linear(64, 1)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义一个经验回放缓冲区
class ReplayBuffer:
def __init__(self, action_size, buffer_size=10000, batch_size=128):
self.action_size = action_size
self.buffer_size = buffer_size
self.batch_size = batch_size
self.memory = []
self.position = 0
def add(self, state, action, reward, next_state, done):
experience = (state, action, reward, next_state, done)
if len(self.memory) < self.buffer_size:
self.memory.append(None)
self.memory[self.position] = experience
self.position = (self.position + 1) % self.buffer_size
def sample(self):
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e[0] for e in experiences if e is not None])).float()
actions = torch.from_numpy(np.vstack([e[1] for e in experiences if e is not None])).long()
rewards = torch.from_numpy(np.vstack([e[2] for e in experiences if e is not None])).float()
next_states = torch.from_numpy(np.vstack([e[3] for e in experiences if e is not None])).float()
dones = torch.from_numpy(np.vstack([e[4] for e in experiences if e is not None]).astype(np.uint8)).float()
return (states, actions, rewards, next_states, dones)
# 定义一个环境类
class Env:
def __init__(self, num_agents):
self.num_agents = num_agents
self.state_size = 4
self.action_size = 2
self.target_pos = np.array([0.0, 0.0])
self.agent_pos = np.random.uniform(-1, 1, size=(self.num_agents, 2))
def reset(self):
self.target_pos = np.array([0.0, 0.0])
self.agent_pos = np.random.uniform(-1, 1, size=(self.num_agents, 2))
obs = np.hstack([self.agent_pos, self.target_pos])
return obs
def step(self, actions):
actions = np.clip(actions, -1, 1)
self.agent_pos += actions
self.agent_pos = np.clip(self.agent_pos, -1, 1)
obs = np.hstack([self.agent_pos, self.target_pos])
rewards = np.zeros(self.num_agents)
for i in range(self.num_agents):
dist = np.linalg.norm(self.agent_pos[i] - self.target_pos)
if dist < 0.1:
rewards[i] = 1
dones = np.zeros(self.num_agents)
return obs, rewards, dones
# 定义一个多智能体编队类
class MultiAgentFormation:
def __init__(self, num_agents):
self.env = Env(num_agents)
self.num_agents = num_agents
self.state_size = self.env.state_size * self.num_agents
self.action_size = self.env.action_size
self.gamma = 0.99
self.agents = [Agent(self.env.state_size, self.env.action_size, i, self.num_agents) for i in range(self.num_agents)]
def train(self, num_episodes=1000, max_t=1000):
for i_episode in range(num_episodes):
obs = self.env.reset()
for t in range(max_t):
actions = []
for i in range(self.num_agents):
action = self.agents[i].act(obs[i])
actions.append(action)
next_obs, rewards, dones = self.env.step(actions)
for i in range(self.num_agents):
self.agents[i].memory.add(obs[i], actions[i], rewards[i], next_obs[i], dones[i])
obs = next_obs
if len(self.agents[0].memory) > self.agents[0].memory.batch_size:
for agent in self.agents:
experiences = agent.memory.sample()
agent.learn(experiences, self.gamma)
if np.any(dones):
break
# 测试
ma = MultiAgentFormation(num_agents=3)
ma.train()
```
用RBM识别尾矿坝异常模式,给出他的代码
以下是使用Python实现的RBM模型,用于尾矿坝异常模式识别:
首先,需要导入必要的库:
```python
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
```
接着,读取尾矿坝数据集,将数据集划分为训练集和测试集:
```python
data = pd.read_csv('tailings_dataset.csv')
X = data.drop(['Class'], axis=1).values
y = data['Class'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
```
然后,定义RBM类:
```python
class RBM:
def __init__(self, n_visible, n_hidden, learning_rate=0.1, batch_size=10, n_epochs=100):
self.n_visible = n_visible
self.n_hidden = n_hidden
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_epochs = n_epochs
self.W = tf.Variable(tf.random.normal(shape=(n_visible, n_hidden), mean=0.0, stddev=0.01))
self.v_bias = tf.Variable(tf.zeros(shape=(n_visible, 1)))
self.h_bias = tf.Variable(tf.zeros(shape=(n_hidden, 1)))
def sigmoid(self, x):
return 1 / (1 + tf.exp(-x))
def prob_h_given_v(self, v):
return self.sigmoid(tf.matmul(v, self.W) + self.h_bias)
def prob_v_given_h(self, h):
return self.sigmoid(tf.matmul(h, tf.transpose(self.W)) + self.v_bias)
def sample_prob(self, probs):
return tf.nn.relu(tf.sign(probs - tf.random.uniform(shape=tf.shape(probs))))
def gibbs_sampling(self, v):
h_probs = self.prob_h_given_v(v)
h_states = self.sample_prob(h_probs)
v_probs = self.prob_v_given_h(h_states)
v_states = self.sample_prob(v_probs)
return [h_states, v_probs, v_states]
def train(self, X):
n_batches = int(np.ceil(len(X) / self.batch_size))
X_tf = tf.convert_to_tensor(X, dtype=tf.float32)
for epoch in range(self.n_epochs):
for batch in range(n_batches):
v0 = X_tf[batch * self.batch_size:(batch + 1) * self.batch_size]
h0_probs = self.prob_h_given_v(v0)
h0_states = self.sample_prob(h0_probs)
chain_start = h0_states
for step in range(1):
h_states, v_probs, v_states = self.gibbs_sampling(chain_start)
chain_start = v_states
positive_grad = tf.matmul(tf.transpose(v0), h0_probs)
negative_grad = tf.matmul(tf.transpose(v_states), h_states)
W_grad = (positive_grad - negative_grad) / tf.cast(tf.shape(v0)[0], tf.float32)
vb_grad = tf.reduce_mean(v0 - v_states, axis=0, keepdims=True)
hb_grad = tf.reduce_mean(h0_probs - h_states, axis=0, keepdims=True)
self.W.assign_add(self.learning_rate * W_grad)
self.v_bias.assign_add(self.learning_rate * vb_grad)
self.h_bias.assign_add(self.learning_rate * hb_grad)
```
最后,实例化RBM类并进行训练:
```python
rbm = RBM(n_visible=X_train.shape[1], n_hidden=10, learning_rate=0.01, batch_size=20, n_epochs=1000)
rbm.train(X_train)
```
可以使用训练好的模型进行预测:
```python
h_probs = rbm.prob_h_given_v(X_test)
h_states = rbm.sample_prob(h_probs)
v_probs = rbm.prob_v_given_h(h_states)
v_states = rbm.sample_prob(v_probs)
y_pred = [1 if np.any(x) else 0 for x in v_states]
accuracy = accuracy_score(y_test, y_pred)
print('Accuracy:', accuracy)
```
完整代码如下所示: