dataset = [] for data in np.arange(0, 3, .01): data = math.sin(data * math.pi) dataset.append(data) dataset = np.array(dataset) dataset = dataset.astype('float32') max_value = np.max(dataset) min_value = np.min(dataset) scalar = max_value - min_value dataset = list(map(lambda x: x / scalar, dataset)) def create_dataset(dataset, look_back=3): dataX, dataY = [], [] for i in range(len(dataset) - look_back): a = dataset[i:(i + look_back)] dataX.append(a) dataY.append(dataset[i + look_back]) return np.array(dataX), np.array(dataY)以这段代码为基础写预测正弦函数的RNN代码,绘图真实值和预测值
时间: 2024-03-17 15:46:08 浏览: 94
好的,以下是基于给出代码完成预测正弦函数的RNN代码:
```
import numpy as np
import math
import tensorflow as tf
import matplotlib.pyplot as plt
# 生成数据
dataset = []
for data in np.arange(0, 3, .01):
data = math.sin(data * math.pi)
dataset.append(data)
dataset = np.array(dataset)
dataset = dataset.astype('float32')
max_value = np.max(dataset)
min_value = np.min(dataset)
scalar = max_value - min_value
dataset = list(map(lambda x: x / scalar, dataset))
# 创建数据集
look_back = 5
trainX, trainY = create_dataset(dataset, look_back)
# 转换为张量
trainX = tf.convert_to_tensor(trainX)
trainY = tf.convert_to_tensor(trainY)
# 定义模型
model = tf.keras.Sequential([
tf.keras.layers.LSTM(32, input_shape=(look_back, 1)),
tf.keras.layers.Dense(1)
])
# 编译模型
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.MeanSquaredError())
# 训练模型
history = model.fit(trainX, trainY, epochs=100, batch_size=16, verbose=2)
# 预测结果
predicted = []
for i in range(len(dataset) - look_back):
x = np.array(dataset[i:i+look_back])
x = np.reshape(x, (1, look_back, 1))
y = model.predict(x)
predicted.append(y[0][0])
# 反归一化
predicted = np.array(predicted)
predicted = predicted * scalar
# 绘图
plt.plot(dataset[look_back:], label='True Data')
plt.plot(predicted, label='Predicted Data')
plt.legend()
plt.show()
```
在这个 RNN 模型中,我们使用 LSTM 层来学习时间序列数据的特征,然后使用全连接层来进行预测。训练过程中我们使用了 Adam 优化器和均方误差损失函数。最后,我们使用模型对测试集进行预测,并将预测结果和真实值绘制在同一张图上,以便进行比较。
阅读全文