请写一段有三个输入单元,六个中间单元,一个输出单元的三层神经网络的代码
时间: 2023-02-20 08:21:21 浏览: 45
# 代码
import numpy as np
# 输入层
input_layer_size = 3
# 中间层
hidden_layer_size = 6
# 输出层
output_layer_size = 1
# 权重
W1 = np.random.randn(input_layer_size, hidden_layer_size)
W2 = np.random.randn(hidden_layer_size, output_layer_size)
# 偏置
b1 = np.random.randn(hidden_layer_size)
b2 = np.random.randn(output_layer_size)
# 输入
X = np.random.randn(input_layer_size)
# 前向传播
z1 = np.dot(X, W1) + b1
a1 = np.tanh(z1)
z2 = np.dot(a1, W2) + b2
a2 = np.tanh(z2)
# 输出
output = a2
相关问题
请写一段有三个输入单元,六个中间单元,一个输出单元的三层BP神经网络的代码
# 代码
import numpy as np
# 设置输入、中间、输出单元数
input_units = 3
hidden_units = 6
output_units = 1
# 设置学习率
learning_rate = 0.5
# 初始化输入层、隐藏层、输出层的权重
w_in = np.random.randn(input_units, hidden_units)
w_out = np.random.randn(hidden_units, output_units)
# 定义sigmoid函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 定义梯度下降函数
def grad_descent(x, y, w_in, w_out, learning_rate):
# 前向传播
z_in = np.dot(x, w_in)
z_out = np.dot(z_in, w_out)
a_out = sigmoid(z_out)
# 计算损失
loss = 0.5 * np.mean(np.square(y - a_out))
# 反向传播
delta_out = (y - a_out) * a_out * (1 - a_out)
delta_in = np.dot(delta_out, w_out.T) * z_in * (1 - z_in)
# 更新权重
w_out += learning_rate * np.dot(z_in.T, delta_out)
w_in += learning_rate * np.dot(x.T, delta_in)
return w_in, w_out, loss
# 训练模型
for i in range(100):
w_in, w_out, loss = grad_descent(x, y, w_in, w_out, learning_rate)
if (i+1) % 10 == 0:
print("第{}次迭代,损失为:{}".format(i+1, loss))
请用Python写一段有三个输入单元,六个中间单元,一个输出单元的三层BP神经网络的代码
import numpy as np
# 输入层
X = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 1], [0, 0, 0]])
# 输出层
y = np.array([[0, 1, 1, 0]]).T
# 初始化权重
w1 = np.random.random((3, 6))
w2 = np.random.random((6, 1))
# 学习率
learning_rate = 0.1
# 前向传播
for epoch in range(1000):
# 计算第一层
z1 = np.dot(X, w1)
a1 = np.tanh(z1)
# 计算第二层
z2 = np.dot(a1, w2)
a2 = np.tanh(z2)
# 计算损失函数
loss = np.mean((a2 - y) ** 2)
# 反向传播
dz2 = (a2 - y) * (1 - np.tanh(z2) ** 2)
dw2 = np.dot(a1.T, dz2)
dz1 = np.dot(dz2, w2.T) * (1 - np.tanh(z1) ** 2)
dw1 = np.dot(X.T, dz1)
# 更新权重
w1 -= learning_rate * dw1
w2 -= learning_rate * dw2
# 输出结果
print(a2)
阅读全文