用python写一个用一层神经网络和relu函数来拟合x的平方的代码
时间: 2024-02-13 22:01:12 浏览: 172
基于Python神经网络实现在数据拟合中应用(源码+数据).rar
5星 · 资源好评率100%
以下是用一层神经网络和ReLU函数来拟合x的平方的Python代码示例:
```python
import numpy as np
import matplotlib.pyplot as plt
# 生成训练数据
n = 100
x = np.linspace(-1, 1, n).reshape(n, 1)
y = x ** 2
# 定义一层神经网络
class LinearLayer:
def __init__(self, input_dim, output_dim):
self.W = np.random.randn(input_dim, output_dim) * 0.01
self.b = np.zeros((1, output_dim))
self.output = None
self.dW = None
self.db = None
def forward(self, input):
self.output = np.dot(input, self.W) + self.b
return self.output
def backward(self, dout):
self.dW = np.dot(self.input.T, dout)
self.db = np.sum(dout, axis=0, keepdims=True)
dinput = np.dot(dout, self.W.T)
return dinput
# 定义ReLU激活函数
class ReLU:
def __init__(self):
self.mask = None
def forward(self, input):
self.mask = (input <= 0)
output = input.copy()
output[self.mask] = 0
return output
def backward(self, dout):
dout[self.mask] = 0
dinput = dout
return dinput
# 定义神经网络模型
class NeuralNet:
def __init__(self, input_dim, hidden_dim, output_dim):
self.linear1 = LinearLayer(input_dim, hidden_dim)
self.relu = ReLU()
self.linear2 = LinearLayer(hidden_dim, output_dim)
def forward(self, input):
output1 = self.linear1.forward(input)
output2 = self.relu.forward(output1)
output3 = self.linear2.forward(output2)
return output3
def backward(self, dout):
dout2 = self.linear2.backward(dout)
dout1 = self.relu.backward(dout2)
dinput = self.linear1.backward(dout1)
return dinput
# 定义损失函数
class Loss:
def __init__(self):
self.loss = None
self.dout = None
def mean_squared_error(self, y_pred, y_true):
self.loss = np.mean((y_pred - y_true) ** 2)
self.dout = 2 * (y_pred - y_true) / y_pred.shape[0]
return self.loss
def backward(self):
return self.dout
# 设置超参数
input_dim = 1
hidden_dim = 10
output_dim = 1
learning_rate = 0.1
num_epochs = 1000
# 创建神经网络模型和损失函数对象
net = NeuralNet(input_dim, hidden_dim, output_dim)
loss_fn = Loss()
# 训练模型
for epoch in range(num_epochs):
# 前向传播
y_pred = net.forward(x)
loss = loss_fn.mean_squared_error(y_pred, y)
# 反向传播
dout = loss_fn.backward()
net.backward(dout)
# 更新参数
net.linear1.W -= learning_rate * net.linear1.dW
net.linear1.b -= learning_rate * net.linear1.db
net.linear2.W -= learning_rate * net.linear2.dW
net.linear2.b -= learning_rate * net.linear2.db
# 每迭代100次输出一次损失函数值
if (epoch+1) % 100 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss))
# 测试模型
y_pred = net.forward(x)
plt.scatter(x, y, marker='.')
plt.plot(x, y_pred, color='r')
plt.show()
```
在上述代码中,我们首先生成了100个训练数据,然后定义了一层神经网络类和ReLU类,以及神经网络模型类和损失函数类。接着,我们设置了模型的超参数,包括输入维度、隐藏层维度、输出维度、学习率和迭代次数等。然后,我们创建了神经网络模型和损失函数对象,并使用训练数据进行模型训练。最后,我们对模型进行了测试,并绘制了拟合曲线。
阅读全文