bp神经网络聚类的例题,python做的例题
时间: 2023-06-19 21:07:53 浏览: 103
以下是一个使用BP神经网络进行聚类的Python实现的示例:
首先,我们需要导入必要的库,包括numpy和sklearn:
```python
import numpy as np
from sklearn.cluster import KMeans
```
然后,我们定义一个简单的BP神经网络类:
```python
class BPNN:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.weights1 = np.random.randn(self.input_size, self.hidden_size)
self.bias1 = np.random.randn(self.hidden_size)
self.weights2 = np.random.randn(self.hidden_size, self.output_size)
self.bias2 = np.random.randn(self.output_size)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def forward(self, x):
z1 = np.dot(x, self.weights1) + self.bias1
a1 = self.sigmoid(z1)
z2 = np.dot(a1, self.weights2) + self.bias2
a2 = self.sigmoid(z2)
return a2
def loss(self, x, y):
y_hat = self.forward(x)
return np.sum((y - y_hat) ** 2)
def train(self, x, y, lr):
z1 = np.dot(x, self.weights1) + self.bias1
a1 = self.sigmoid(z1)
z2 = np.dot(a1, self.weights2) + self.bias2
a2 = self.sigmoid(z2)
delta2 = (a2 - y) * a2 * (1 - a2)
delta1 = np.dot(delta2, self.weights2.T) * a1 * (1 - a1)
self.weights2 -= lr * np.dot(a1.T, delta2)
self.bias2 -= lr * np.sum(delta2, axis=0)
self.weights1 -= lr * np.dot(x.T, delta1)
self.bias1 -= lr * np.sum(delta1, axis=0)
```
接下来,我们使用KMeans算法生成一些随机数据,并将其标准化:
```python
data = np.random.randn(100, 2)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
```
然后,我们使用KMeans算法将数据聚类成5个簇:
```python
kmeans = KMeans(n_clusters=5)
kmeans.fit(data)
labels = kmeans.labels_
```
接下来,我们将每个数据点的标签转换为一个one-hot向量:
```python
one_hot_labels = np.zeros((len(labels), 5))
for i, label in enumerate(labels):
one_hot_labels[i, label] = 1
```
然后,我们将数据集分成训练集和测试集:
```python
train_data = data[:80]
train_labels = one_hot_labels[:80]
test_data = data[80:]
test_labels = one_hot_labels[80:]
```
最后,我们训练一个BP神经网络并对测试数据进行预测:
```python
bpnn = BPNN(input_size=2, hidden_size=10, output_size=5)
for i in range(1000):
bpnn.train(train_data, train_labels, lr=0.1)
predictions = bpnn.forward(test_data)
predicted_labels = np.argmax(predictions, axis=1)
```
完整代码如下:
```python
import numpy as np
from sklearn.cluster import KMeans
class BPNN:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.weights1 = np.random.randn(self.input_size, self.hidden_size)
self.bias1 = np.random.randn(self.hidden_size)
self.weights2 = np.random.randn(self.hidden_size, self.output_size)
self.bias2 = np.random.randn(self.output_size)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def forward(self, x):
z1 = np.dot(x, self.weights1) + self.bias1
a1 = self.sigmoid(z1)
z2 = np.dot(a1, self.weights2) + self.bias2
a2 = self.sigmoid(z2)
return a2
def loss(self, x, y):
y_hat = self.forward(x)
return np.sum((y - y_hat) ** 2)
def train(self, x, y, lr):
z1 = np.dot(x, self.weights1) + self.bias1
a1 = self.sigmoid(z1)
z2 = np.dot(a1, self.weights2) + self.bias2
a2 = self.sigmoid(z2)
delta2 = (a2 - y) * a2 * (1 - a2)
delta1 = np.dot(delta2, self.weights2.T) * a1 * (1 - a1)
self.weights2 -= lr * np.dot(a1.T, delta2)
self.bias2 -= lr * np.sum(delta2, axis=0)
self.weights1 -= lr * np.dot(x.T, delta1)
self.bias1 -= lr * np.sum(delta1, axis=0)
data = np.random.randn(100, 2)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans = KMeans(n_clusters=5)
kmeans.fit(data)
labels = kmeans.labels_
one_hot_labels = np.zeros((len(labels), 5))
for i, label in enumerate(labels):
one_hot_labels[i, label] = 1
train_data = data[:80]
train_labels = one_hot_labels[:80]
test_data = data[80:]
test_labels = one_hot_labels[80:]
bpnn = BPNN(input_size=2, hidden_size=10, output_size=5)
for i in range(1000):
bpnn.train(train_data, train_labels, lr=0.1)
predictions = bpnn.forward(test_data)
predicted_labels = np.argmax(predictions, axis=1)
print("True Labels: ", np.argmax(test_labels, axis=1))
print("Predicted Labels: ", predicted_labels)
```
阅读全文