soft actor critic tensorflow1.x code
时间: 2023-12-09 16:05:40 浏览: 122
tensorflow源代码
以下是一个使用TensorFlow 1.x实现Soft Actor-Critic(SAC)算法的代码示例:
```python
import tensorflow as tf
import numpy as np
import gym
# Hyperparameters
gamma = 0.99
tau = 0.005
hidden_sizes = [256, 256]
batch_size = 100
buffer_size = 10000
learning_rate = 0.0003
alpha = 0.2
target_entropy = -1.0
# Create the replay buffer
class ReplayBuffer:
def __init__(self, obs_dim, act_dim, size):
self.obs_buf = np.zeros((size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros((size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.next_obs_buf = np.zeros((size, obs_dim), dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.next_obs_buf[self.ptr] = next_obs
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=batch_size):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs=self.obs_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
next_obs=self.next_obs_buf[idxs],
done=self.done_buf[idxs])
# Create the actor and critic networks
class MLP(tf.keras.Model):
def __init__(self, sizes, activation=tf.nn.relu, output_activation=None):
super(MLP, self).__init__()
self.layers_ = []
for i, size in enumerate(sizes[:-1]):
layer = tf.keras.layers.Dense(units=size, activation=activation)
self.layers_.append(layer)
self.layers_.append(tf.keras.layers.Dense(units=sizes[-1], activation=output_activation))
def call(self, inputs):
x = inputs
for layer in self.layers_:
x = layer(x)
return x
class ActorCritic(tf.keras.Model):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation=tf.nn.relu, output_activation=None):
super(ActorCritic, self).__init__()
self.q1 = MLP(hidden_sizes + [1], activation, output_activation)
self.q2 = MLP(hidden_sizes + [1], activation, output_activation)
self.v = MLP(hidden_sizes + [1], activation, output_activation)
self.pi = MLP(hidden_sizes + [act_dim], activation, tf.nn.tanh)
def call(self, obs, act=None):
q1 = self.q1(tf.concat([obs, act], axis=-1))
q2 = self.q2(tf.concat([obs, act], axis=-1))
v = self.v(obs)
pi = self.pi(obs)
return q1, q2, v, pi
def act(self, obs):
pi = self.pi(obs)
return pi.numpy()
# Create the SAC agent
class SAC:
def __init__(self, obs_dim, act_dim, hidden_sizes, buffer_size, batch_size, learning_rate, alpha, gamma, tau, target_entropy):
self.q_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
self.v_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
self.pi_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
self.replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=buffer_size)
self.batch_size = batch_size
self.alpha = alpha
self.gamma = gamma
self.tau = tau
self.target_entropy = target_entropy
self.obs_dim = obs_dim
self.act_dim = act_dim
self.hidden_sizes = hidden_sizes
self.actor_critic = ActorCritic(obs_dim, act_dim, hidden_sizes)
def update(self, data):
obs = data['obs']
act = data['act']
rew = data['rew']
next_obs = data['next_obs']
done = data['done']
with tf.GradientTape(persistent=True) as tape:
q1, q2, v, pi = self.actor_critic(obs, act)
_, _, _, next_pi = self.actor_critic(next_obs)
v_target = self.target_v(next_obs, next_pi)
q_target = rew + self.gamma * (1 - done) * v_target
q1_loss = tf.reduce_mean(tf.square(q1 - q_target))
q2_loss = tf.reduce_mean(tf.square(q2 - q_target))
v_loss = tf.reduce_mean(tf.square(v - v_target))
pi_loss = tf.reduce_mean(self.alpha * pi.log_prob(act) - q1)
alpha_loss = tf.reduce_mean(-self.alpha * (self.target_entropy - pi.entropy()))
q1_grads = tape.gradient(q1_loss, self.actor_critic.q1.trainable_variables)
self.q_optimizer.apply_gradients(zip(q1_grads, self.actor_critic.q1.trainable_variables))
q2_grads = tape.gradient(q2_loss, self.actor_critic.q2.trainable_variables)
self.q_optimizer.apply_gradients(zip(q2_grads, self.actor_critic.q2.trainable_variables))
v_grads = tape.gradient(v_loss, self.actor_critic.v.trainable_variables)
self.v_optimizer.apply_gradients(zip(v_grads, self.actor_critic.v.trainable_variables))
pi_grads = tape.gradient(pi_loss, self.actor_critic.pi.trainable_variables)
self.pi_optimizer.apply_gradients(zip(pi_grads, self.actor_critic.pi.trainable_variables))
alpha_grads = tape.gradient(alpha_loss, [self.alpha])
self.alpha = tf.clip_by_value(self.alpha - learning_rate * alpha_grads[0], 0, 1)
del tape
def target_v(self, obs, pi):
q1, q2, _, _ = self.actor_critic(obs, pi)
q = tf.minimum(q1, q2)
v_target = tf.reduce_mean(q - self.alpha * pi.entropy())
return v_target
def train(self):
data = self.replay_buffer.sample_batch(self.batch_size)
self.update(data)
def store(self, obs, act, rew, next_obs, done):
self.replay_buffer.store(obs, act, rew, next_obs, done)
def act(self, obs):
return self.actor_critic.act(obs)
def save(self, save_path):
self.actor_critic.save_weights(save_path)
def load(self, load_path):
self.actor_critic.load_weights(load_path)
# Train the SAC agent on the gym environment
env = gym.make('Pendulum-v0')
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
sac = SAC(obs_dim, act_dim, hidden_sizes, buffer_size, batch_size, learning_rate, alpha, gamma, tau, target_entropy)
for i in range(1000):
obs = env.reset()
total_reward = 0
done = False
while not done:
act = sac.act(obs.reshape(1, -1))
next_obs, rew, done, _ = env.step(act[0])
sac.store(obs, act, rew, next_obs, done)
sac.train()
obs = next_obs
total_reward += rew
print('Epoch: {}, Total Reward: {:.2f}'.format(i, total_reward))
sac.save('sac_model')
```
请注意,这只是一个基本的实现示例,其中有许多可以进行改进和优化的方面。
阅读全文