python强化学习代码
时间: 2023-11-25 19:51:28 浏览: 147
基于Python的强化学习代码通常使用强化学习框架来实现。其中比较常用的框架包括OpenAI Gym和PyTorch等。下面是一个使用PyTorch实现DQN算法的强化学习代码示例:
```python
import gym
import torch
import random
import numpy as np
from collections import deque
from torch import nn, optim
class DQN(nn.Module):
def __init__(self, state_dim, action_dim):
super(DQN, self).__init__()
self.fc1 = nn.Linear(state_dim, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, action_dim)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
class ReplayBuffer:
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return np.array(state), np.array(action), np.array(reward, dtype=np.float32), np.array(next_state), np.array(done, dtype=np.uint8)
def __len__(self):
return len(self.buffer)
class Agent:
def __init__(self, state_dim, action_dim, lr, gamma, epsilon, buffer_capacity, batch_size):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.action_dim = action_dim
self.gamma = gamma
self.epsilon = epsilon
self.batch_size = batch_size
self.buffer = ReplayBuffer(buffer_capacity)
self.policy_net = DQN(state_dim, action_dim).to(self.device)
self.target_net = DQN(state_dim, action_dim).to(self.device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=lr)
def act(self, state):
if random.random() < self.epsilon:
return random.randint(0, self.action_dim - 1)
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
with torch.no_grad():
q_value = self.policy_net(state)
return q_value.argmax(dim=1).item()
def update(self):
if len(self.buffer) < self.batch_size:
return
state, action, reward, next_state, done = self.buffer.sample(self.batch_size)
state = torch.FloatTensor(state).to(self.device)
action = torch.LongTensor(action).to(self.device)
reward = torch.FloatTensor(reward).to(self.device)
next_state = torch.FloatTensor(next_state).to(self.device)
done = torch.FloatTensor(done).to(self.device)
q_value = self.policy_net(state).gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value = self.target_net(next_state).max(1)[0]
expected_q_value = reward + self.gamma * next_q_value * (1 - done)
loss = nn.MSELoss()(q_value, expected_q_value.detach())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def update_target(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
def save(self, path):
torch.save(self.policy_net.state_dict(), path)
def load(self, path):
self.policy_net.load_state_dict(torch.load(path))
env = gym.make('CartPole-v0')
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
agent = Agent(state_dim, action_dim, lr=0.001, gamma=0.99, epsilon=0.1, buffer_capacity=10000, batch_size=64)
for episode in range(1000):
state = env.reset()
total_reward = 0
while True:
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
agent.buffer.push(state, action, reward, next_state, done)
total_reward += reward
state = next_state
agent.update()
if done:
break
agent.update_target()
print("Episode: {}, total reward: {}".format(episode, total_reward))
agent.save("dqn.pth")
```
该代码实现了一个使用DQN算法解决CartPole-v0环境的强化学习任务的智能体。其中,DQN类定义了一个三层全连接神经网络,ReplayBuffer类定义了一个经验回放缓存,Agent类定义了一个智能体,包含了策略网络和目标网络,并实现了动作选择、经验回放和网络更新等功能。在主函数中,我们使用该智能体在CartPole-v0环境中进行了1000个episode的训练,并将训练好的策略网络保存到了文件"dqn.pth"中。
阅读全文