import numpy as np import copy def sigmod(x): return 1 / (1 + np.exp(-x)) def train(ary_input, ary_w, ary_b, ary_target, learning_rate=0.1): n = len(ary_input) # 隐藏层 ary_hidden_net = np.zeros(n) ary_hidden_out = np.zeros(n) ary_w_new = copy.deepcopy(ary_w) for i in range(n): ary_hidden_net[i] = 0 for j in range(n): ary_hidden_net[i] += ary_w[i] * ary_input[j] ary_hidden_net[i] += ary_b[0] ary_hidden_out[i] = sigmod(ary_hidden_net[i]) # 输出层 output_net = 0 for i in range(n): output_net += ary_hidden_out[i] * ary_w[n][i] output_net += ary_b[1] output_out = sigmod(output_net) # 反向传播 output_delta = (ary_target - output_out) * output_out * (1 - output_out) hidden_delta = np.zeros(n) for i in range(n): hidden_delta = output_delta * ary_w[n][i] * ary_hidden_out[i] * (1 - ary_hidden_out[i]) for j in range(n): ary_w_new[i][j] += learning_rate * hidden_delta * ary_input[j] ary_w_new[n][i] += learning_rate * output_delta * ary_hidden_out[i] ary_b[0] += learning_rate * hidden_delta ary_b[1] += learning_rate * output_delta return ary_w_new, ary_b # 示例代码 ary_input = [0.1, 0.2, 0.3] ary_w = [ [0.1, 0.2, 0.3], [0.2, 0.3, 0.4], [0.3, 0.4, 0.5], [0.4, 0.5, 0.6] ] ary_b = [0.1, 0.2] ary_target = [0.5, 0.5] for i in range(1000): ary_w, ary_b = train(ary_input, ary_w, ary_b, ary_target) print("Updated weights:", ary_w)
时间: 2024-02-17 10:00:17 浏览: 128
import numpy as np import matplotlib.pyplot as plt import math
这段代码似乎是一个简单的神经网络,但在训练函数中,你没有正确地更新隐藏层的delta值,因为你在第22行中使用了赋值操作(=),而不是累加操作(+=),因此你只计算了最后一个hidden_delta值。你需要将第22行修改为:
```
hidden_delta += output_delta * ary_w[n][i] * ary_hidden_out[i] * (1 - ary_hidden_out[i])
```
此外,你在第26行中对隐藏层delta进行了错误的乘法操作,应该改为:
```
hidden_delta *= ary_hidden_out[i] * (1 - ary_hidden_out[i])
```
最后,你在第32行中假设输出层的输入具有与隐藏层相同的维度大小,这是不正确的。你需要将ary_w[n]的大小从n更改为隐藏层大小。你可以将第32行修改为:
```
output_net = 0
for i in range(n):
output_net += ary_hidden_out[i] * ary_w[n][i]
output_net += ary_b[1]
output_out = sigmod(output_net)
```
修改后的代码如下所示:
阅读全文