帮我写一段神经网络代码
时间: 2023-09-03 16:26:27 浏览: 93
python神经网络学习分享-时梨.pdf
### 回答1:
神经网络代码示例:import tensorflow as tf# 定义输入与输出
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])# 定义权重与偏置
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))# 定义模型
pred = tf.nn.softmax(tf.matmul(x, W) + b)# 定义损失函数
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))# 定义优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cost)# 初始化变量
init = tf.global_variables_initializer()# 开始训练
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys})
avg_cost += c / total_batch
if (epoch+1) % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print ("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
### 回答2:
神经网络是一种模拟人脑思维方式的计算机算法,用来处理复杂的数据和模式识别问题。下面是一个简单的神经网络示例代码:
import numpy as np
# 定义激活函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 定义神经网络类
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
# 初始化权重
self.W1 = np.random.randn(self.input_size, self.hidden_size)
self.W2 = np.random.randn(self.hidden_size, self.output_size)
def forward(self, X):
# 前向传播
self.z = np.dot(X, self.W1)
self.z2 = sigmoid(self.z)
self.z3 = np.dot(self.z2, self.W2)
y_hat = sigmoid(self.z3)
return y_hat
def backpropagation(self, X, y, y_hat, learning_rate):
# 反向传播
delta3 = (y - y_hat) * sigmoid(self.z3) * (1 - sigmoid(self.z3))
dW2 = np.dot(self.z2.T, delta3)
delta2 = np.dot(delta3, self.W2.T) * sigmoid(self.z2) * (1 - sigmoid(self.z2))
dW1 = np.dot(X.T, delta2)
# 权重更新
self.W1 += learning_rate * dW1
self.W2 += learning_rate * dW2
# 测试
X = np.array([[0, 1], [1, 0], [1, 1], [0, 0]])
y = np.array([[1], [1], [0], [0]])
input_size = 2
hidden_size = 3
output_size = 1
learning_rate = 0.1
epochs = 10000
nn = NeuralNetwork(input_size, hidden_size, output_size)
for epoch in range(epochs):
y_hat = nn.forward(X)
nn.backpropagation(X, y, y_hat, learning_rate)
# 预测
test = np.array([[1, 1]])
prediction = nn.forward(test)
print("Prediction:", prediction)
阅读全文