mse = F.mse_loss(out, y, reduction='mean')
时间: 2023-12-23 17:02:52 浏览: 272
这是一段PyTorch代码,用于计算均方误差(Mean Squared Error, MSE)损失函数。其中 `F` 是PyTorch框架中的函数库 `torch.nn.functional` ,`mse_loss` 是该函数库中用于计算MSE损失的函数。该函数的输入参数包括预测输出 `out`、真实标签 `y`,以及 `reduction` 参数,用于指定损失函数的缩减方式,这里设置为 `'mean'`,表示对每个样本的MSE损失取平均值。
具体来说,该损失函数的计算公式为:
$ MSE = \frac{1}{N} \sum_{i=1}^{N} (y_i - \hat{y}_i)^2 $
其中 $N$ 表示样本数量,$y_i$ 和 $\hat{y}_i$ 分别表示第 $i$ 个样本的真实标签和预测输出。计算出的MSE值越小,表示模型的预测结果越接近真实标签,模型的拟合效果越好。
相关问题
import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt from torch import autograd """ 用神经网络模拟微分方程,f(x)'=f(x),初始条件f(0) = 1 """ class Net(nn.Module): def __init__(self, NL, NN): # NL n个l(线性,全连接)隐藏层, NN 输入数据的维数, # NL是有多少层隐藏层 # NN是每层的神经元数量 super(Net, self).__init__() self.input_layer = nn.Linear(1, NN) self.hidden_layer = nn.Linear(NN,int(NN/2)) ## 原文这里用NN,我这里用的下采样,经过实验验证,“等采样”更优。更多情况有待我实验验证。 self.output_layer = nn.Linear(int(NN/2), 1) def forward(self, x): out = torch.tanh(self.input_layer(x)) out = torch.tanh(self.hidden_layer(out)) out_final = self.output_layer(out) return out_final net=Net(4,20) # 4层 20个 mse_cost_function = torch.nn.MSELoss(reduction='mean') # Mean squared error 均方误差求 optimizer = torch.optim.Adam(net.parameters(),lr=1e-4) # 优化器 def ode_01(x,net): y=net(x) y_x = autograd.grad(y, x,grad_outputs=torch.ones_like(net(x)),create_graph=True)[0] return y-y_x # y-y' = 0 # requires_grad=True).unsqueeze(-1) plt.ion() # 动态图 iterations=200000 for epoch in range(iterations): optimizer.zero_grad() # 梯度归0 ## 求边界条件的损失函数 x_0 = torch.zeros(2000, 1) y_0 = net(x_0) mse_i = mse_cost_function(y_0, torch.ones(2000, 1)) # f(0) - 1 = 0 ## 方程的损失函数 x_in = np.random.uniform(low=0.0, high=2.0, size=(2000, 1)) pt_x_in = autograd.Variable(torch.from_numpy(x_in).float(), requires_grad=True) # x 随机数 pt_y_colection=ode_01(pt_x_in,net) pt_all_zeros= autograd.Variable(torch.from_numpy(np.zeros((2000,1))).float(), requires_grad=False) mse_f=mse_cost_function(pt_y_colection, pt_all_zeros) # y-y' = 0 loss = mse_i + mse_f loss.backward() # 反向传播 optimizer.step() # 优化下一步。This is equivalent to : theta_new = theta_old - alpha * derivative of J w.r.t theta if epoch%1000==0: y = torch.exp(pt_x_in) # y 真实值 y_train0 = net(pt_x_in) # y 预测值 print(epoch, "Traning Loss:", loss.data) print(f'times {epoch} - loss: {loss.item()} - y_0: {y_0}') plt.cla() plt.scatter(pt_x_in.detach().numpy(), y.detach().numpy()) plt.scatter(pt_x_in.detach().numpy(), y_train0.detach().numpy(),c='red') plt.pause(0.1)
这是一段 Python 代码,主要是在导入一些库和定义一个神经网络模型类 "Net"。代码中导入了 torch、torch.nn、numpy 和 matplotlib.pyplot 库,并在 "Net" 类中进行了一些初始化。代码还提到了一个微分方程:f(x)' = f(x), 初始条件f(0) = 1, 用神经网络模拟。
import numpy import numpy as np import matplotlib.pyplot as plt import math import torch from torch import nn from torch.utils.data import DataLoader, Dataset import os os.environ['KMP_DUPLICATE_LIB_OK']='True' dataset = [] for data in np.arange(0, 3, .01): data = math.sin(data * math.pi) dataset.append(data) dataset = np.array(dataset) dataset = dataset.astype('float32') max_value = np.max(dataset) min_value = np.min(dataset) scalar = max_value - min_value print(scalar) dataset = list(map(lambda x: x / scalar, dataset)) def create_dataset(dataset, look_back=3): dataX, dataY = [], [] for i in range(len(dataset) - look_back): a = dataset[i:(i + look_back)] dataX.append(a) dataY.append(dataset[i + look_back]) return np.array(dataX), np.array(dataY) data_X, data_Y = create_dataset(dataset) train_X, train_Y = data_X[:int(0.8 * len(data_X))], data_Y[:int(0.8 * len(data_Y))] test_X, test_Y = data_Y[int(0.8 * len(data_X)):], data_Y[int(0.8 * len(data_Y)):] train_X = train_X.reshape(-1, 1, 3).astype('float32') train_Y = train_Y.reshape(-1, 1, 3).astype('float32') test_X = test_X.reshape(-1, 1, 3).astype('float32') train_X = torch.from_numpy(train_X) train_Y = torch.from_numpy(train_Y) test_X = torch.from_numpy(test_X) class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size=1, num_layer=2): super(RNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.num_layer = num_layer self.rnn = nn.RNN(input_size, hidden_size, batch_first=True) self.linear = nn.Linear(hidden_size, output_size) def forward(self, x): out, h = self.rnn(x) out = self.linear(out[0]) return out net = RNN(3, 20) criterion = nn.MSELoss(reduction='mean') optimizer = torch.optim.Adam(net.parameters(), lr=1e-2) train_loss = [] test_loss = [] for e in range(1000): pred = net(train_X) loss = criterion(pred, train_Y) optimizer.zero_grad() # 反向传播 loss.backward() optimizer.step() if (e + 1) % 100 == 0: print('Epoch:{},loss:{:.10f}'.format(e + 1, loss.data.item())) train_loss.append(loss.item()) plt.plot(train_loss, label='train_loss') plt.legend() plt.show()请适当修改代码,并写出预测值和真实值的代码
以下是适当修改后的代码以及预测值和真实值的代码:
```
import numpy as np
import matplotlib.pyplot as plt
import math
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
dataset = []
for data in np.arange(0, 3, .01):
data = math.sin(data * math.pi)
dataset.append(data)
dataset = np.array(dataset)
dataset = dataset.astype('float32')
max_value = np.max(dataset)
min_value = np.min(dataset)
scalar = max_value - min_value
print(scalar)
dataset = list(map(lambda x: x / scalar, dataset))
def create_dataset(dataset, look_back=3):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
a = dataset[i:(i + look_back)]
dataX.append(a)
dataY.append(dataset[i + look_back])
return np.array(dataX), np.array(dataY)
data_X, data_Y = create_dataset(dataset)
train_X, train_Y = data_X[:int(0.8 * len(data_X))], data_Y[:int(0.8 * len(data_Y))]
test_X, test_Y = data_Y[int(0.8 * len(data_X)):], data_Y[int(0.8 * len(data_Y)):]
train_X = train_X.reshape(-1, 1, 3).astype('float32')
train_Y = train_Y.reshape(-1, 1, 1).astype('float32')
test_X = test_X.reshape(-1, 1, 3).astype('float32')
train_X = torch.from_numpy(train_X)
train_Y = torch.from_numpy(train_Y)
test_X = torch.from_numpy(test_X)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size=1, num_layer=2):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layer = num_layer
self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, h = self.rnn(x)
out = self.linear(out[:, -1, :])
return out
net = RNN(3, 20)
criterion = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(net.parameters(), lr=1e-2)
train_loss = []
test_loss = []
for e in range(1000):
pred = net(train_X)
loss = criterion(pred, train_Y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (e + 1) % 100 == 0:
print('Epoch:{},loss:{:.10f}'.format(e + 1, loss.data.item()))
train_loss.append(loss.item())
plt.plot(train_loss, label='train_loss')
plt.legend()
plt.show()
# 预测值和真实值的代码
net.eval()
pred_test = net(test_X)
pred_test = pred_test.detach().numpy() * scalar
test_Y = test_Y.reshape(-1, 1) * scalar
plt.plot(pred_test, label='predict')
plt.plot(test_Y, label='true')
plt.legend()
plt.show()
```
阅读全文