lstm python 代码实现
时间: 2023-09-12 17:05:57 浏览: 91
lstm 成功,lstm实现,Python
5星 · 资源好评率100%
以下是一个简单的LSTM模型的Python代码实现:
```python
import numpy as np
import tensorflow as tf
# 定义超参数
learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200
# 定义LSTM模型
class LSTM(tf.keras.Model):
def __init__(self, num_units):
super(LSTM, self).__init__()
self.lstm = tf.keras.layers.LSTM(num_units)
self.fc = tf.keras.layers.Dense(1, activation='sigmoid')
def call(self, x):
x = self.lstm(x)
x = self.fc(x)
return x
# 加载数据
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 28, 28) / 255.0
x_test = x_test.reshape(-1, 28, 28) / 255.0
# 定义模型和优化器
model = LSTM(128)
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
# 定义损失函数
def loss_fn(logits, labels):
return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
# 定义准确率函数
def accuracy_fn(logits, labels):
preds = tf.cast(tf.greater(tf.sigmoid(logits), 0.5), tf.float32)
acc = tf.reduce_mean(tf.cast(tf.equal(preds, labels), tf.float32))
return acc
# 定义训练函数
def train(model, optimizer, x_train, y_train):
num_batches = x_train.shape[0] // batch_size
for step in range(training_steps):
indices = np.random.choice(x_train.shape[0], batch_size, replace=False)
x_batch, y_batch = x_train[indices], y_train[indices]
with tf.GradientTape() as tape:
logits = model(x_batch)
loss = loss_fn(logits, y_batch)
acc = accuracy_fn(logits, y_batch)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if step % display_step == 0:
print("Step:", step, "Loss:", loss.numpy(), "Accuracy:", acc.numpy())
# 开始训练
train(model, optimizer, x_train, y_train)
```
阅读全文