给我一个神经网络模型的python代码
时间: 2023-09-09 13:05:31 浏览: 81
### 回答1:
我可以提供一个神经网络模型的Python代码示例:import numpy as np
import tensorflow as tf# Build the graph
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)# Train
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
### 回答2:
以下是一个简单的神经网络模型的 Python 代码示例:
```python
import numpy as np
# 定义神经网络类
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
# 初始化权重和偏置
self.weights1 = np.random.randn(input_size, hidden_size)
self.bias1 = np.zeros((1, hidden_size))
self.weights2 = np.random.randn(hidden_size, output_size)
self.bias2 = np.zeros((1, output_size))
def forward(self, X):
# 前向传播
self.z1 = np.dot(X, self.weights1) + self.bias1
self.a1 = self.sigmoid(self.z1)
self.z2 = np.dot(self.a1, self.weights2) + self.bias2
self.a2 = self.sigmoid(self.z2)
return self.a2
def sigmoid(self, x):
# sigmoid 函数
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
# sigmoid 函数的导数
return x * (1 - x)
def backward(self, X, y, output, learning_rate):
# 反向传播
self.error = y - output
self.delta2 = self.error * self.sigmoid_derivative(output)
self.error_hidden = np.dot(self.delta2, self.weights2.T)
self.delta1 = self.error_hidden * self.sigmoid_derivative(self.a1)
# 权重和偏置更新
self.weights2 += np.dot(self.a1.T, self.delta2) * learning_rate
self.bias2 += np.sum(self.delta2, axis=0, keepdims=True) * learning_rate
self.weights1 += np.dot(X.T, self.delta1) * learning_rate
self.bias1 += np.sum(self.delta1, axis=0, keepdims=True) * learning_rate
def train(self, X, y, epochs, learning_rate):
for epoch in range(epochs):
output = self.forward(X)
self.backward(X, y, output, learning_rate)
# 创建训练数据
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
# 创建神经网络对象
model = NeuralNetwork(2, 4, 1)
# 训练神经网络
model.train(X, y, epochs=10000, learning_rate=1)
# 测试神经网络
output = model.forward(X)
print("预测结果:")
print(output)
```
这个代码示例是一个简单的两层神经网络模型,用于解决 XOR 问题。其中包含了前向传播、反向传播和训练过程。训练数据为 XOR 的输入和输出。代码通过计算预测输出并与真实输出比较来进行训练和测试,并打印出最终的预测结果。
### 回答3:
以下是一个简单的神经网络模型的Python代码示例:
```python
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.weights1 = np.random.randn(self.input_size, self.hidden_size)
self.weights2 = np.random.randn(self.hidden_size, self.output_size)
def forward(self, X):
self.hidden_layer = sigmoid(np.dot(X, self.weights1))
self.output_layer = sigmoid(np.dot(self.hidden_layer, self.weights2))
return self.output_layer
def backward(self, X, y, output, learning_rate):
self.output_error = y - output
self.output_delta = self.output_error * sigmoid_derivative(output)
self.hidden_error = np.dot(self.output_delta, self.weights2.T)
self.hidden_delta = self.hidden_error * sigmoid_derivative(self.hidden_layer)
self.weights2 += np.dot(self.hidden_layer.T, self.output_delta) * learning_rate
self.weights1 += np.dot(X.T, self.hidden_delta) * learning_rate
def train(self, X, y, epochs, learning_rate):
for i in range(epochs):
output = self.forward(X)
self.backward(X, y, output, learning_rate)
def predict(self, X):
return self.forward(X)
# 示例用法
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
model = NeuralNetwork(input_size=2, hidden_size=4, output_size=1)
model.train(X, y, epochs=1000, learning_rate=0.1)
print("预测结果:")
print(model.predict(X))
```
这个代码实现了一个包含一个隐藏层的简单神经网络。它使用Sigmoid作为激活函数,并通过反向传播算法来进行训练。在实例化神经网络时,需要指定输入层、隐藏层和输出层的大小。然后可以使用`train`方法进行训练,并使用`predict`方法进行预测。在示例中,使用4个训练样本来实现异或逻辑运算。