五层全连接神经网络拟合函数y=x^2+2x-3的python代码
时间: 2023-09-20 20:05:42 浏览: 134
基于python的完整神经网络结构代码
### 回答1:
import numpy as np
x = np.arange(-3,3,0.1)
y = x**2*2*x - 3
import torch
from torch.autograd import Variable
w1 = Variable(torch.Tensor([1]), requires_grad=True)
w2 = Variable(torch.Tensor([1]), requires_grad=True)
w3 = Variable(torch.Tensor([1]), requires_grad=True)
w4 = Variable(torch.Tensor([1]), requires_grad=True)
w5 = Variable(torch.Tensor([1]), requires_grad=True) def forward(x):
y_pred = w1*x**4 + w2*x**3 + w3*x**2 + w4*x + w5
return y_pred # Loss function
def loss(y, y_pred):
return torch.mean((y_pred - y)**2) # Optimizer
optimizer = torch.optim.SGD([w1, w2, w3, w4, w5], lr=0.01)
# Training
for epoch in range(100):
# Forward pass
y_pred = forward(x)
# Compute loss
l = loss(y, y_pred)
# Backward pass
l.backward()
# Update the parameters
optimizer.step()
# Zero the gradients
optimizer.zero_grad() # Final prediction
y_pred = forward(x) print("五层全连接神经网络拟合函数y=x^2 2x-3的Python代码如下:")
print("import numpy as np")
print("x = np.arange(-3,3,0.1)")
print("y = x**2*2*x - 3")
print("import torch")
print("from torch.autograd import Variable")
print("w1 = Variable(torch.Tensor([1]), requires_grad=True)")
print("w2 = Variable(torch.Tensor([1]), requires_grad=True)")
print("w3 = Variable(torch.Tensor([1]), requires_grad=True)")
print("w4 = Variable(torch.Tensor([1]), requires_grad=True)")
print("w5 = Variable(torch.Tensor([1]), requires_grad=True)")
print("def forward(x):")
print(" y_pred = w1*x**4 + w2*x**3 + w3*x**2 + w4*x + w5")
print(" return y_pred")
print("# Loss function")
print("def loss(y, y_pred):")
print(" return torch.mean((y_pred - y)**2)")
print("# Optimizer")
print("optimizer = torch.optim.SGD([w1, w2, w3, w4, w5], lr=0.01)")
print("# Training")
print("for epoch in range(100):")
print(" # Forward pass")
print(" y_pred = forward(x)")
print(" # Compute loss")
print(" l = loss(y, y_pred)")
print(" # Backward pass")
print(" l.backward()")
print(" # Update the parameters")
print(" optimizer.step()")
print(" # Zero the gradients")
print(" optimizer.zero_grad()")
print("# Final prediction")
print("y_pred = forward(x)")
### 回答2:
import numpy as np
import matplotlib.pyplot as plt
# 定义激活函数
def activation(x):
return 1 / (1 + np.exp(-x))
# 定义导数函数
def derivative_activation(x):
return activation(x) * (1 - activation(x))
# 定义神经网络类
class NeuralNetwork:
def __init__(self, layers):
self.layers = layers
self.weights = [np.random.random((layers[i], layers[i+1])) for i in range(len(layers)-1)]
self.biases = [np.random.random(layers[i+1]) for i in range(len(layers)-1)]
def forward_propagation(self, x):
self.a = [x]
self.z = []
for i in range(len(self.layers)-1):
self.z.append(np.dot(self.a[-1], self.weights[i]) + self.biases[i])
self.a.append(activation(self.z[-1]))
return self.a[-1]
def back_propagation(self, x, y, learning_rate):
delta = 2 * (self.a[-1] - y) * derivative_activation(self.z[-1])
nabla_w = [np.zeros((self.layers[i], self.layers[i+1])) for i in range(len(self.layers)-1)]
nabla_b = [np.zeros(self.layers[i+1]) for i in range(len(self.layers)-1)]
nabla_w[-1] = np.dot(self.a[-2].T, delta)
nabla_b[-1] = delta
for i in range(len(self.layers)-3, -1, -1):
delta = np.dot(delta, self.weights[i+1].T) * derivative_activation(self.z[i])
nabla_w[i] = np.dot(self.a[i].T, delta)
nabla_b[i] = delta
for i in range(len(self.layers)-2, -1, -1):
self.weights[i] -= learning_rate * nabla_w[i]
self.biases[i] -= learning_rate * nabla_b[i]
def train(self, x_train, y_train, epochs, learning_rate):
for epoch in range(epochs):
for x, y in zip(x_train, y_train):
output = self.forward_propagation(x)
self.back_propagation(x, y, learning_rate)
def predict(self, x):
return self.forward_propagation(x)
# 准备训练数据
x_train = np.linspace(-10, 10, 100)
y_train = np.square(x_train) + 2 * x_train - 3
# 创建神经网络并训练
nn = NeuralNetwork([1, 5, 5, 5, 5, 1])
nn.train(x_train, y_train, epochs=10000, learning_rate=0.001)
# 准备测试数据
x_test = np.linspace(-10, 10, 100)
y_test = np.square(x_test) + 2 * x_test - 3
# 使用神经网络进行预测
y_pred = np.zeros_like(x_test)
for i, x in enumerate(x_test):
y_pred[i] = nn.predict(x)
# 绘制拟合曲线
plt.plot(x_train, y_train, 'bo', label='Training data')
plt.plot(x_test, y_test, 'g-', label='True data')
plt.plot(x_test, y_pred, 'r-', label='Predicted data')
plt.legend()
plt.show()
### 回答3:
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# 定义数据
x = np.linspace(-10, 10, 100)
y = x**2 + 2*x - 3
# 将数据转化为张量
x_tensor = torch.from_numpy(x).float()
y_tensor = torch.from_numpy(y).float()
# 定义神经网络模型
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(1, 10)
self.fc2 = nn.Linear(10, 20)
self.fc3 = nn.Linear(20, 10)
self.fc4 = nn.Linear(10, 1)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.relu(self.fc3(x))
x = self.fc4(x)
return x
# 初始化神经网络
model = Net()
# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# 训练神经网络
for epoch in range(1000):
# forward
output = model(x_tensor.unsqueeze(1))
loss = criterion(output.squeeze(), y_tensor)
# backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 绘制拟合曲线
x_test = np.linspace(-10, 10, 100)
x_test_tensor = torch.from_numpy(x_test).float()
y_pred = model(x_test_tensor.unsqueeze(1)).detach().numpy().flatten()
plt.plot(x, y, label='ground truth')
plt.plot(x_test, y_pred, label='fitted curve')
plt.legend()
plt.show()
阅读全文