import numpy as np import math import tensorflow as tf import matplotlib.pyplot as plt # 生成数据 dataset = [] for data in np.arange(0, 3, .01): data = math.sin(data * math.pi) dataset.append(data) dataset = np.array(dataset) dataset = dataset.astype('float32') max_value = np.max(dataset) min_value = np.min(dataset) scalar = max_value - min_value dataset = list(map(lambda x: x / scalar, dataset)) # 创建数据集 look_back = 5 trainX, trainY = [], [] for i in range(len(dataset) - look_back): trainX.append(dataset[i:i+look_back]) trainY.append(dataset[i+look_back]) trainX = np.array(trainX) trainY = np.array(trainY) # 转换为张量 trainX = tf.convert_to_tensor(trainX) trainY = tf.convert_to_tensor(trainY) # 定义模型 model = tf.keras.Sequential([ tf.keras.layers.SimpleRNN(32, input_shape=(look_back, 1)), tf.keras.layers.Dense(1) ]) # 编译模型 model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.MeanSquaredError()) # 训练模型 history = model.fit(trainX, trainY, epochs=100, batch_size=16, verbose=2) # 预测结果 predicted = [] for i in range(len(dataset) - look_back): x = np.array(dataset[i:i+look_back]) x = np.reshape(x, (1, look_back, 1)) y = model.predict(x) predicted.append(y[0][0]) # 反归一化 predicted = np.array(predicted) predicted = predicted * scalar # 绘图 plt.plot(dataset[look_back:], label='True Data') plt.plot(predicted, label='Predicted Data') plt.legend() plt.show()生成的真实值只有一半大小

时间: 2024-03-19 17:41:57 浏览: 14
根据您提供的代码,我猜测您可能是在对预测结果进行反归一化时出现了问题。在代码中,您将预测结果乘以了一个名为scalar的标量值,但是scalar的计算方式并没有给出。如果scalar的计算方式不正确,可能导致反归一化后的预测结果不准确。 另外,您在代码中没有给出任何关于预测结果大小的具体描述。如果您说生成的真实值只有一半大小,可能需要更多上下文信息才能理解您的意思。
相关问题

import wfdb import numpy as np import matplotlib.pyplot as plt from scipy import signal # 读取心电数据 record = wfdb.rdrecord('D:/下载/ptb-xl-a-large-publicly-available-electrocardiography-dataset-1.0.3/records100/00000/00003_lr') # 获取心电信号 ecg_signal = record.p_signal[:, 0] # 获取采样频率 fs = record.fs # 获取时间轴 t = np.arange(ecg_signal.size) / fs # 去除基线漂移 ecg_detrend = signal.detrend(ecg_signal) # 定义滤波器 b, a = signal.butter(4, 0.2, 'low') # 进行滤波 ecg_filt = signal.filtfilt(b, a, ecg_detrend)请问用的是什么滤波方法

这段代码使用的是双向无延迟滤波器(即 `filtfilt` 函数),对信号进行了低通滤波,截止频率为 0.2 * fs,滤波器阶数为 4。由于 `filtfilt` 函数是双向滤波,因此可以消除滤波器引入的相位延迟,保持信号的相位不变。这种滤波方法可以更好地保留信号的高频成分和细节信息,同时去除低频噪声和基线漂移。

import wfdbimport numpy as npimport matplotlib.pyplot as pltfrom scipy import signal# 读取心电数据record = wfdb.rdrecord('D:/下载/ptb-xl-a-large-publicly-available-electrocardiography-dataset-1.0.3/records100/00000/00003_lr')# 获取心电信号ecg_signal = record.p_signal[:, 0]# 获取采样频率fs = record.fs# 获取时间轴t = np.arange(ecg_signal.size) / fs# 去除基线漂移ecg_detrend = signal.detrend(ecg_signal)# 定义滤波器b, a = signal.butter(4, 0.2, 'low')# 进行滤波ecg_filt = signal.filtfilt(b, a, ecg_detrend)# 绘制原始信号,滤波后的信号以及去除基线漂移后的信号plt.figure(figsize=(12, 6))plt.subplot(3, 1, 1)plt.plot(t, ecg_signal, 'b')plt.title('Original Signal')plt.xlabel('Time (s)')plt.ylabel('Amplitude (mV)')plt.subplot(3, 1, 2)plt.plot(t, ecg_filt, 'r')plt.title('Filtered Signal')plt.xlabel('Time (s)')plt.ylabel('Amplitude (mV)')plt.subplot(3, 1, 3)plt.plot(t, ecg_detrend, 'g')plt.title('Detrended Signal')plt.xlabel('Time (s)')plt.ylabel('Amplitude (mV)')plt.tight_layout()plt.show()请在这段代码的基础上加上绘制去除基线漂移并滤波后的图像

好的,你可以在代码的基础上加上以下代码来绘制去除基线漂移并滤波后的信号: ```python ecg_detrend_filt = signal.filtfilt(b, a, ecg_detrend) plt.figure(figsize=(8, 4)) plt.plot(t, ecg_detrend_filt, 'k') plt.title('Detrended and Filtered Signal') plt.xlabel('Time (s)') plt.ylabel('Amplitude (mV)') plt.show() ``` 完整代码如下: ```python import wfdb import numpy as np import matplotlib.pyplot as plt from scipy import signal # 读取心电数据 record = wfdb.rdrecord('D:/下载/ptb-xl-a-large-publicly-available-electrocardiography-dataset-1.0.3/records100/00000/00003_lr') # 获取心电信号 ecg_signal = record.p_signal[:, 0] # 获取采样频率 fs = record.fs # 获取时间轴 t = np.arange(ecg_signal.size) / fs # 去除基线漂移 ecg_detrend = signal.detrend(ecg_signal) # 定义滤波器 b, a = signal.butter(4, 0.2, 'low') # 进行滤波 ecg_filt = signal.filtfilt(b, a, ecg_detrend) # 绘制原始信号,滤波后的信号以及去除基线漂移后的信号 plt.figure(figsize=(12, 6)) plt.subplot(3, 1, 1) plt.plot(t, ecg_signal, 'b') plt.title('Original Signal') plt.xlabel('Time (s)') plt.ylabel('Amplitude (mV)') plt.subplot(3, 1, 2) plt.plot(t, ecg_filt, 'r') plt.title('Filtered Signal') plt.xlabel('Time (s)') plt.ylabel('Amplitude (mV)') plt.subplot(3, 1, 3) plt.plot(t, ecg_detrend, 'g') plt.title('Detrended Signal') plt.xlabel('Time (s)') plt.ylabel('Amplitude (mV)') plt.tight_layout() plt.show() # 绘制去除基线漂移并滤波后的信号 ecg_detrend_filt = signal.filtfilt(b, a, ecg_detrend) plt.figure(figsize=(8, 4)) plt.plot(t, ecg_detrend_filt, 'k') plt.title('Detrended and Filtered Signal') plt.xlabel('Time (s)') plt.ylabel('Amplitude (mV)') plt.show() ``` 运行后,应该会显示出三个图像。第一个图像是原始信号,第二个图像是滤波后的信号,第三个图像是去除基线漂移后的信号。第四个图像是去除基线漂移并滤波后的信号。

相关推荐

import numpy as np import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt Let us define a plt function for simplicity def plt_loss(x,training_metric,testing_metric,ax,colors = ['b']): ax.plot(x,training_metric,'b',label = 'Train') ax.plot(x,testing_metric,'k',label = 'Test') ax.set_xlabel('Epochs') ax.set_ylabel('Accuracy') plt.legend() plt.grid() plt.show() tf.keras.utils.set_random_seed(1) We import the Minist Dataset using Keras.datasets (train_data, train_labels), (test_data, test_labels) = keras.datasets.mnist.load_data() We first vectorize the image (28*28) into a vector (784) train_data = train_data.reshape(train_data.shape[0],train_data.shape[1]train_data.shape[2]) # 60000784 test_data = test_data.reshape(test_data.shape[0],test_data.shape[1]test_data.shape[2]) # 10000784 We next change label number to a 10 dimensional vector, e.g., 1-> train_labels = keras.utils.to_categorical(train_labels,10) test_labels = keras.utils.to_categorical(test_labels,10) start to build a MLP model N_batch_size = 5000 N_epochs = 100 lr = 0.01 we build a three layer model, 784 -> 64 -> 10 MLP_3 = keras.models.Sequential([ keras.layers.Dense(128, input_shape=(784,),activation='relu'), keras.layers.Dense(64, activation='relu'), keras.layers.Dense(10,activation='softmax') ]) MLP_3.compile( optimizer=keras.optimizers.Adam(lr), loss= 'categorical_crossentropy', metrics = ['accuracy'] ) History = MLP_3.fit(train_data,train_labels, batch_size = N_batch_size, epochs = N_epochs,validation_data=(test_data,test_labels), shuffle=False) train_acc = History.history['accuracy'] test_acc = History.history对于该模型,使用不同数量的训练数据(5000,10000,15000,…,60000,公差=5000的等差数列),绘制训练集和测试集准确率(纵轴)关于训练数据大小(横轴)的曲线

import random import numpy as np import matplotlib.pyplot as plt 生成随机坐标点 def generate_points(num_points): points = [] for i in range(num_points): x = random.uniform(-10, 10) y = random.uniform(-10, 10) points.append([x, y]) return points 计算欧几里得距离 def euclidean_distance(point1, point2): return np.sqrt(np.sum(np.square(np.array(point1) - np.array(point2)))) K-means算法实现 def kmeans(points, k, num_iterations=100): num_points = len(points) # 随机选择k个点作为初始聚类中心 centroids = random.sample(points, k) # 初始化聚类标签和距离 labels = np.zeros(num_points) distances = np.zeros((num_points, k)) for i in range(num_iterations): # 计算每个点到每个聚类中心的距离 for j in range(num_points): for l in range(k): distances[j][l] = euclidean_distance(points[j], centroids[l]) # 根据距离将点分配到最近的聚类中心 for j in range(num_points): labels[j] = np.argmin(distances[j]) # 更新聚类中心 for l in range(k): centroids[l] = np.mean([points[j] for j in range(num_points) if labels[j] == l], axis=0) return labels, centroids 生成坐标点 points = generate_points(100) 对点进行K-means聚类 k_values = [2, 3, 4] for k in k_values: labels, centroids = kmeans(points, k) # 绘制聚类结果 colors = [‘r’, ‘g’, ‘b’, ‘y’, ‘c’, ‘m’] for i in range(k): plt.scatter([points[j][0] for j in range(len(points)) if labels[j] == i], [points[j][1] for j in range(len(points)) if labels[j] == i], color=colors[i]) plt.scatter([centroid[0] for centroid in centroids], [centroid[1] for centroid in centroids], marker=‘x’, color=‘k’, s=100) plt.title(‘K-means clustering with k={}’.format(k)) plt.show()import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import load_iris 载入数据集 iris = load_iris() X = iris.data y = iris.target K-means聚类 kmeans = KMeans(n_clusters=3, random_state=0).fit(X) 可视化结果 plt.scatter(X[:, 0], X[:, 1], c=kmeans.labels_) plt.xlabel(‘Sepal length’) plt.ylabel(‘Sepal width’) plt.title(‘K-means clustering on iris dataset’) plt.show()对这个算法的结果用SSE,轮廓系数,方差比率准则,DBI几个指标分析

import numpy import numpy as np import matplotlib.pyplot as plt import math import torch from torch import nn from torch.utils.data import DataLoader, Dataset import os os.environ['KMP_DUPLICATE_LIB_OK']='True' dataset = [] for data in np.arange(0, 3, .01): data = math.sin(data * math.pi) dataset.append(data) dataset = np.array(dataset) dataset = dataset.astype('float32') max_value = np.max(dataset) min_value = np.min(dataset) scalar = max_value - min_value print(scalar) dataset = list(map(lambda x: x / scalar, dataset)) def create_dataset(dataset, look_back=3): dataX, dataY = [], [] for i in range(len(dataset) - look_back): a = dataset[i:(i + look_back)] dataX.append(a) dataY.append(dataset[i + look_back]) return np.array(dataX), np.array(dataY) data_X, data_Y = create_dataset(dataset) train_X, train_Y = data_X[:int(0.8 * len(data_X))], data_Y[:int(0.8 * len(data_Y))] test_X, test_Y = data_Y[int(0.8 * len(data_X)):], data_Y[int(0.8 * len(data_Y)):] train_X = train_X.reshape(-1, 1, 3).astype('float32') train_Y = train_Y.reshape(-1, 1, 3).astype('float32') test_X = test_X.reshape(-1, 1, 3).astype('float32') train_X = torch.from_numpy(train_X) train_Y = torch.from_numpy(train_Y) test_X = torch.from_numpy(test_X) class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size=1, num_layer=2): super(RNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.num_layer = num_layer self.rnn = nn.RNN(input_size, hidden_size, batch_first=True) self.linear = nn.Linear(hidden_size, output_size) def forward(self, x): out, h = self.rnn(x) out = self.linear(out[0]) return out net = RNN(3, 20) criterion = nn.MSELoss(reduction='mean') optimizer = torch.optim.Adam(net.parameters(), lr=1e-2) train_loss = [] test_loss = [] for e in range(1000): pred = net(train_X) loss = criterion(pred, train_Y) optimizer.zero_grad() # 反向传播 loss.backward() optimizer.step() if (e + 1) % 100 == 0: print('Epoch:{},loss:{:.10f}'.format(e + 1, loss.data.item())) train_loss.append(loss.item()) plt.plot(train_loss, label='train_loss') plt.legend() plt.show()请适当修改代码,并写出预测值和真实值的代码

import numpy as npimport pandas as pdimport matplotlib.pyplot as pltfrom sklearn.preprocessing import MinMaxScalerfrom keras.models import Sequentialfrom keras.layers import Dense, LSTM# 读取数据dataset = pd.read_csv('wind_speed.csv', header=0, index_col=0)dataset.index = pd.to_datetime(dataset.index)dataset = dataset.resample('H').mean()# 数据预处理scaler = MinMaxScaler(feature_range=(0, 1))dataset_scaled = scaler.fit_transform(dataset)# 创建训练集和测试集train_size = int(len(dataset_scaled) * 0.8)test_size = len(dataset_scaled) - train_sizetrain, test = dataset_scaled[0:train_size, :], dataset_scaled[train_size:len(dataset_scaled), :]# 创建数据集def create_dataset(dataset, look_back): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY)look_back = 24trainX, trainY = create_dataset(train, look_back)testX, testY = create_dataset(test, look_back)# 调整数据维度trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))# 创建LSTM模型model = Sequential()model.add(LSTM(50, input_shape=(look_back, 1)))model.add(Dense(1))model.compile(loss='mean_squared_error', optimizer='adam')model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)# 预测数据trainPredict = model.predict(trainX)testPredict = model.predict(testX)# 反转数据缩放trainPredict = scaler.inverse_transform(trainPredict)trainY = scaler.inverse_transform([trainY])testPredict = scaler.inverse_transform(testPredict)testY = scaler.inverse_transform([testY])# 绘制预测结果plt.plot(trainY[0], label='Train Data')plt.plot(trainPredict[:,0], label='Predicted Train Data')plt.plot(testY[0], label='Test Data')plt.plot(testPredict[:,0], label='Predicted Test Data')plt.legend(loc='best')plt.show()

import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score, f1_score, roc_curve, roc_auc_score # 1. 数据读取与处理 data = pd.read_csv('data.csv') X = data.drop('target', axis=1) y = data['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # 2. 模型训练 model = LogisticRegression() model.fit(X_train, y_train) # 3. 模型预测 y_pred = model.predict(X_test) y_prob = model.predict_proba(X_test)[:, 1] # 4. 绘制二分类混淆矩阵 confusion_mat = confusion_matrix(y_test, y_pred) plt.imshow(confusion_mat, cmap=plt.cm.Blues) plt.title('Confusion Matrix') plt.colorbar() tick_marks = np.arange(2) plt.xticks(tick_marks, ['0', '1']) plt.yticks(tick_marks, ['0', '1']) plt.xlabel('Predicted Label') plt.ylabel('True Label') for i in range(2): for j in range(2): plt.text(j, i, confusion_mat[i, j], ha='center', va='center', color='white' if confusion_mat[i, j] > confusion_mat.max() / 2 else 'black') plt.show() # 5. 计算精确率、召回率和F1-score precision = precision_score(y_test, y_pred) recall = recall_score(y_test, y_pred) f1 = f1_score(y_test, y_pred) # 6. 计算AUC指标和绘制ROC曲线 auc = roc_auc_score(y_test, y_prob) fpr, tpr, thresholds = roc_curve(y_test, y_prob) plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % auc) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC Curve') plt.legend(loc="lower right") plt.show() # 7. 输出结果 print('Precision:', precision) print('Recall:', recall) print('F1-score:', f1) print('AUC:', auc)对每行代码进行注释

翻译这段程序并自行赋值调用:import matplotlib.pyplot as plt import numpy as np import sklearn import sklearn.datasets import sklearn.linear_model def plot_decision_boundary(model, X, y): # Set min and max values and give it some padding x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1 y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole grid Z = model(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.ylabel('x2') plt.xlabel('x1') plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral) def sigmoid(x): s = 1/(1+np.exp(-x)) return s def load_planar_dataset(): np.random.seed(1) m = 400 # number of examples N = int(m/2) # number of points per class print(np.random.randn(N)) D = 2 # dimensionality X = np.zeros((m,D)) # data matrix where each row is a single example Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue) a = 4 # maximum ray of the flower for j in range(2): ix = range(Nj,N(j+1)) t = np.linspace(j3.12,(j+1)3.12,N) + np.random.randn(N)0.2 # theta r = anp.sin(4t) + np.random.randn(N)0.2 # radius X[ix] = np.c_[rnp.sin(t), rnp.cos(t)] Y[ix] = j X = X.T Y = Y.T return X, Y def load_extra_datasets(): N = 200 noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3) noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2) blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6) gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None) no_structure = np.random.rand(N, 2), np.random.rand(N, 2) return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure

将冒号后面的代码改写成一个nn.module类:import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense, LSTM data1 = pd.read_csv("终极1.csv", usecols=[17], encoding='gb18030') df = data1.fillna(method='ffill') data = df.values.reshape(-1, 1) scaler = MinMaxScaler(feature_range=(0, 1)) data = scaler.fit_transform(data) train_size = int(len(data) * 0.8) test_size = len(data) - train_size train, test = data[0:train_size, :], data[train_size:len(data), :] def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY) look_back = 30 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) model = Sequential() model.add(LSTM(50, input_shape=(1, look_back), return_sequences=True)) model.add(LSTM(50)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=6, batch_size=1, verbose=2) trainPredict = model.predict(trainX) testPredict = model.predict(testX) trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY])

优化这个代码import xarray as xr import netCDF4 as nc import pandas as pd import numpy as np import datetime import matplotlib.pyplot as plt import cartopy.mpl.ticker as cticker import cartopy.crs as ccrs import cartopy.feature as cfeature ds = xr.open_dataset('C:/Users/cindy/Desktop/SP.nc', engine='netcdf4') # 读取原始数据 ds_temp = xr.open_dataset('C:/Users/cindy/Desktop/SP.nc') # 区域提取* south_asia = ds_temp.sel(latitude=slice(38, 28), longitude=slice(75, 103)) indian_ocean = ds_temp.sel(latitude=slice(5, -15), longitude=slice(60, 100)) # 高度插值 south_asia_200hpa = south_asia.t.interp(level=200) indian_ocean_200hpa = indian_ocean.t.interp(level=200) south_asia_400hpa = south_asia.t.interp(level=400) indian_ocean_400hpa = indian_ocean.t.interp(level=400) # 区域平均 TTP = south_asia_400hpa.mean(dim=('latitude', 'longitude'))#.values TTIO = indian_ocean_400hpa.mean(dim=('latitude', 'longitude'))# TTP_200hpa = south_asia_200hpa.mean(dim=('latitude', 'longitude')) TTIO_200hpa = indian_ocean_200hpa.mean(dim=('latitude', 'longitude')) tlup=(TTP-TTIO)-(TTP_200hpa-TTIO_200hpa)-(-5.367655815) # 定义画图区域和投影方式 fig = plt.figure(figsize=[10, 8]) ax = plt.axes(projection=ccrs.PlateCarree()) # 添加地图特征 ax.set_extent([60, 140, -15, 60], crs=ccrs.PlateCarree()) ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidths=0.5) ax.add_feature(cfeature.LAND.with_scale('50m'), facecolor='lightgray') ax.add_feature(cfeature.OCEAN.with_scale('50m'), facecolor='white') # 画距平场 im = ax.contourf(TTP_200hpa, TTP, tlup, cmap='coolwarm', levels=np.arange(-4, 4.5, 0.5), extend='both') # 添加色标 cbar = plt.colorbar(im, ax=ax, shrink=0.8) cbar.set_label('Temperature anomaly (°C)') # 添加经纬度坐标轴标签 ax.set_xticks(np.arange(60, 105, 10), crs=ccrs.PlateCarree()) ax.set_yticks(np.arange(-10, 40, 10), crs=ccrs.PlateCarree()) lon_formatter = cticker.LongitudeFormatter() lat_formatter = cticker.LatitudeFormatter() ax.xaxis.set_major_formatter(lon_formatter) ax.yaxis.set_major_formatter(lat_formatter) # 添加标题和保存图片 plt.title('Temperature anomaly at 400hPa over South Asia and the Indian Ocean') plt.savefig('temperature_anomaly.png', dpi=300) plt.show()

import numpy as np import matplotlib.pyplot as plt import math import torch from torch import nn import pdb from torch.autograd import Variable import os os.environ['KMP_DUPLICATE_LIB_OK']='True' dataset = [] for data in np.arange(0, 3, .01): data = math.sin(data * math.pi) dataset.append(data) dataset = np.array(dataset) dataset = dataset.astype('float32') max_value = np.max(dataset) min_value = np.min(dataset) scalar = max_value - min_value dataset = list(map(lambda x: x / scalar, dataset)) def create_dataset(dataset, look_back=3): dataX, dataY = [], [] for i in range(len(dataset) - look_back): a = dataset[i:(i + look_back)] dataX.append(a) dataY.append(dataset[i + look_back]) return np.array(dataX), np.array(dataY) data_X, data_Y = create_dataset(dataset) # 对训练集测试集划分,划分比例0.8 train_X, train_Y = data_X[:int(0.8 * len(data_X))], data_Y[:int(0.8 * len(data_Y))] test_X, test_Y = data_Y[int(0.8 * len(data_X)):], data_Y[int(0.8 * len(data_Y)):] train_X = train_X.reshape(-1, 1, 3).astype('float32') train_Y = train_Y.reshape(-1, 1, 3).astype('float32') test_X = test_X.reshape(-1, 1, 3).astype('float32') class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size=1, num_layer=2): super(RNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.num_layer = num_layer self.rnn = nn.RNN(input_size, hidden_size, batch_first=True) self.linear = nn.Linear(hidden_size, output_size) def forward(self, x): # 补充forward函数 out, h = self.rnn(x) out = self.linear(out[0]) # print("output的形状", out.shape) return out net = RNN(3, 20) criterion = nn.MSELoss(reduction='mean') optimizer = torch.optim.Adam(net.parameters(), lr=1e-2) train_loss = [] test_loss = [] for e in range(1000): pred = net(train_X) loss = criterion(pred, train_Y) optimizer.zero_grad() # 反向传播 loss.backward() optimizer.step() if (e + 1) % 100 == 0: print('Epoch:{},loss:{:.10f}'.format(e + 1, loss.data.item())) train_loss.append(loss.item()) plt.plot(train_loss, label='train_loss') plt.legend() plt.show()画出预测值真实值图

import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential from keras.layers import Dense, LSTM from sklearn.metrics import r2_score,median_absolute_error,mean_absolute_error # 读取数据 data = pd.read_csv(r'C:/Users/Ljimmy/Desktop/yyqc/peijian/销量数据rnn.csv') # 取出特征参数 X = data.iloc[:,2:].values # 数据归一化 scaler = MinMaxScaler(feature_range=(0, 1)) X[:, 0] = scaler.fit_transform(X[:, 0].reshape(-1, 1)).flatten() #X = scaler.fit_transform(X) #scaler.fit(X) #X = scaler.transform(X) # 划分训练集和测试集 train_size = int(len(X) * 0.8) test_size = len(X) - train_size train, test = X[0:train_size, :], X[train_size:len(X), :] # 转换为监督学习问题 def create_dataset(dataset, look_back=1): X, Y = [], [] for i in range(len(dataset) - look_back - 1): a = dataset[i:(i + look_back), :] X.append(a) Y.append(dataset[i + look_back, 0]) return np.array(X), np.array(Y) look_back = 12 X_train, Y_train = create_dataset(train, look_back) #Y_train = train[:, 2:] # 取第三列及以后的数据 X_test, Y_test = create_dataset(test, look_back) #Y_test = test[:, 2:] # 取第三列及以后的数据 # 转换为3D张量 X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) # 构建LSTM模型 model = Sequential() model.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1))) model.add(LSTM(units=50)) model.add(Dense(units=1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(X_train, Y_train, epochs=5, batch_size=32) #model.fit(X_train, Y_train.reshape(Y_train.shape[0], 1), epochs=10, batch_size=32) # 预测下一个月的销量 last_month_sales = data.tail(12).iloc[:,2:].values #last_month_sales = data.tail(1)[:,2:].values last_month_sales = scaler.transform(last_month_sales) last_month_sales = np.reshape(last_month_sales, (1, look_back, 1)) next_month_sales = model.predict(last_month_sales) next_month_sales = scaler.inverse_transform(next_month_sales) print('Next month sales: %.0f' % next_month_sales[0][0]) # 计算RMSE误差 rmse = np.sqrt(np.mean((next_month_sales - last_month_sales) ** 2)) print('Test RMSE: %.3f' % rmse)IndexError Traceback (most recent call last) Cell In[1], line 36 33 X_test, Y_test = create_dataset(test, look_back) 34 #Y_test = test[:, 2:] # 取第三列及以后的数据 35 # 转换为3D张量 ---> 36 X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) 37 X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) 38 # 构建LSTM模型 IndexError: tuple index out of range代码修改

最新推荐

recommend-type

zigbee-cluster-library-specification

最新的zigbee-cluster-library-specification说明文档。
recommend-type

管理建模和仿真的文件

管理Boualem Benatallah引用此版本:布阿利姆·贝纳塔拉。管理建模和仿真。约瑟夫-傅立叶大学-格勒诺布尔第一大学,1996年。法语。NNT:电话:00345357HAL ID:电话:00345357https://theses.hal.science/tel-003453572008年12月9日提交HAL是一个多学科的开放存取档案馆,用于存放和传播科学研究论文,无论它们是否被公开。论文可以来自法国或国外的教学和研究机构,也可以来自公共或私人研究中心。L’archive ouverte pluridisciplinaire
recommend-type

实现实时数据湖架构:Kafka与Hive集成

![实现实时数据湖架构:Kafka与Hive集成](https://img-blog.csdnimg.cn/img_convert/10eb2e6972b3b6086286fc64c0b3ee41.jpeg) # 1. 实时数据湖架构概述** 实时数据湖是一种现代数据管理架构,它允许企业以低延迟的方式收集、存储和处理大量数据。与传统数据仓库不同,实时数据湖不依赖于预先定义的模式,而是采用灵活的架构,可以处理各种数据类型和格式。这种架构为企业提供了以下优势: - **实时洞察:**实时数据湖允许企业访问最新的数据,从而做出更明智的决策。 - **数据民主化:**实时数据湖使各种利益相关者都可
recommend-type

spring添加xml配置文件

1. 创建一个新的Spring配置文件,例如"applicationContext.xml"。 2. 在文件头部添加XML命名空间和schema定义,如下所示: ``` <beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.springframework.org/schema/beans
recommend-type

JSBSim Reference Manual

JSBSim参考手册,其中包含JSBSim简介,JSBSim配置文件xml的编写语法,编程手册以及一些应用实例等。其中有部分内容还没有写完,估计有生之年很难看到完整版了,但是内容还是很有参考价值的。
recommend-type

"互动学习:行动中的多样性与论文攻读经历"

多样性她- 事实上SCI NCES你的时间表ECOLEDO C Tora SC和NCESPOUR l’Ingén学习互动,互动学习以行动为中心的强化学习学会互动,互动学习,以行动为中心的强化学习计算机科学博士论文于2021年9月28日在Villeneuve d'Asq公开支持马修·瑟林评审团主席法布里斯·勒菲弗尔阿维尼翁大学教授论文指导奥利维尔·皮耶昆谷歌研究教授:智囊团论文联合主任菲利普·普雷教授,大学。里尔/CRISTAL/因里亚报告员奥利维耶·西格德索邦大学报告员卢多维奇·德诺耶教授,Facebook /索邦大学审查员越南圣迈IMT Atlantic高级讲师邀请弗洛里安·斯特鲁布博士,Deepmind对于那些及时看到自己错误的人...3谢谢你首先,我要感谢我的两位博士生导师Olivier和Philippe。奥利维尔,"站在巨人的肩膀上"这句话对你来说完全有意义了。从科学上讲,你知道在这篇论文的(许多)错误中,你是我可以依
recommend-type

实现实时监控告警系统:Kafka与Grafana整合

![实现实时监控告警系统:Kafka与Grafana整合](https://imgconvert.csdnimg.cn/aHR0cHM6Ly9tbWJpei5xcGljLmNuL21tYml6X2pwZy9BVldpY3ladXVDbEZpY1pLWmw2bUVaWXFUcEdLT1VDdkxRSmQxZXB5R1lxaWNlUjA2c0hFek5Qc3FyRktudFF1VDMxQVl3QTRXV2lhSWFRMEFRc0I1cW1ZOGcvNjQw?x-oss-process=image/format,png) # 1.1 Kafka集群架构 Kafka集群由多个称为代理的服务器组成,这
recommend-type

输出这段Python代码输出所有3位整数中,个位是5且是3的倍数的整数

``` for i in range(100,1000): if i%10 == 5 and i%3 == 0: print(i) ``` 输出结果: ``` 105 135 165 195 225 255 285 315 345 375 405 435 465 495 525 555 585 615 645 675 705 735 765 795 825 855 885 915 945 975 ```
recommend-type

c++校园超市商品信息管理系统课程设计说明书(含源代码) (2).pdf

校园超市商品信息管理系统课程设计旨在帮助学生深入理解程序设计的基础知识,同时锻炼他们的实际操作能力。通过设计和实现一个校园超市商品信息管理系统,学生掌握了如何利用计算机科学与技术知识解决实际问题的能力。在课程设计过程中,学生需要对超市商品和销售员的关系进行有效管理,使系统功能更全面、实用,从而提高用户体验和便利性。 学生在课程设计过程中展现了积极的学习态度和纪律,没有缺勤情况,演示过程流畅且作品具有很强的使用价值。设计报告完整详细,展现了对问题的深入思考和解决能力。在答辩环节中,学生能够自信地回答问题,展示出扎实的专业知识和逻辑思维能力。教师对学生的表现予以肯定,认为学生在课程设计中表现出色,值得称赞。 整个课程设计过程包括平时成绩、报告成绩和演示与答辩成绩三个部分,其中平时表现占比20%,报告成绩占比40%,演示与答辩成绩占比40%。通过这三个部分的综合评定,最终为学生总成绩提供参考。总评分以百分制计算,全面评估学生在课程设计中的各项表现,最终为学生提供综合评价和反馈意见。 通过校园超市商品信息管理系统课程设计,学生不仅提升了对程序设计基础知识的理解与应用能力,同时也增强了团队协作和沟通能力。这一过程旨在培养学生综合运用技术解决问题的能力,为其未来的专业发展打下坚实基础。学生在进行校园超市商品信息管理系统课程设计过程中,不仅获得了理论知识的提升,同时也锻炼了实践能力和创新思维,为其未来的职业发展奠定了坚实基础。 校园超市商品信息管理系统课程设计的目的在于促进学生对程序设计基础知识的深入理解与掌握,同时培养学生解决实际问题的能力。通过对系统功能和用户需求的全面考量,学生设计了一个实用、高效的校园超市商品信息管理系统,为用户提供了更便捷、更高效的管理和使用体验。 综上所述,校园超市商品信息管理系统课程设计是一项旨在提升学生综合能力和实践技能的重要教学活动。通过此次设计,学生不仅深化了对程序设计基础知识的理解,还培养了解决实际问题的能力和团队合作精神。这一过程将为学生未来的专业发展提供坚实基础,使其在实际工作中能够胜任更多挑战。
recommend-type

关系数据表示学习

关系数据卢多维奇·多斯桑托斯引用此版本:卢多维奇·多斯桑托斯。关系数据的表示学习机器学习[cs.LG]。皮埃尔和玛丽·居里大学-巴黎第六大学,2017年。英语。NNT:2017PA066480。电话:01803188HAL ID:电话:01803188https://theses.hal.science/tel-01803188提交日期:2018年HAL是一个多学科的开放存取档案馆,用于存放和传播科学研究论文,无论它们是否被公开。论文可以来自法国或国外的教学和研究机构,也可以来自公共或私人研究中心。L’archive ouverte pluridisciplinaireUNIVERSITY PIERRE和 MARIE CURIE计算机科学、电信和电子学博士学院(巴黎)巴黎6号计算机科学实验室D八角形T HESIS关系数据表示学习作者:Ludovic DOS SAntos主管:Patrick GALLINARI联合主管:本杰明·P·伊沃瓦斯基为满足计算机科学博士学位的要求而提交的论文评审团成员:先生蒂埃里·A·退休记者先生尤尼斯·B·恩