acc = a.ravel() == b.ravel() acc.sum()
时间: 2024-04-25 20:22:47 浏览: 6
这段代码计算了两个数组a和b的元素逐个比较后相等的数量。首先,通过`ravel()`函数将两个数组a和b转化为一维数组,然后通过`==`运算符进行元素级别的比较,得到一个布尔数组。最后,使用`sum()`函数对布尔数组进行求和操作,即将True值(相等的元素)转化为1,False值(不相等的元素)转化为0。因此,`acc.sum()`将返回相等元素的数量。
相关问题
def Land_cover_pred_plot(array_folder,raster_file, reference_file,ML_algo, plot = False): df_train , train_array = get_data_eval(array_folder,raster_file, reference_file) df_train = df_train.dropna() print(df_train) train_array = np.array(train_array, dtype=object) tile_df = pd.DataFrame() for i, array in enumerate(train_array[0]): # print(train_array[i], train_array_name[i]) tile_df[train_array[1][i]] = np.nan_to_num(array.ravel(), copy=False) # print(train_array[0][i], train_array[1][i]) X_train, X_test, y_train, y_test = train_test_split(df_train.drop('type' , axis = 1),df_train['type'],test_size = 0.1) print(X_train) ML_algo.fit(X_train,y_train) test_pred = ML_algo.predict(X_test) confusion_mat = confusion_matrix(y_test,test_pred) classification_repo = classification_report(y_test, test_pred) test_acc = accuracy_score(y_test, test_pred) print("Confusion Matri : \n", confusion_mat) print("Classification Report : \n", classification_repo) print("Accuracy on Test : ", test_acc) pred_array = ML_algo.predict(tile_df) mask_array = np.reshape(pred_array, train_array[0][0].shape) class_sum = [] for i,j in enumerate(df_train['type'].unique()): sum = (mask_array == j).sum() class_sum.append([j,sum]) print(class_sum) print(mask_array) if plot == True: arr_f = np.array(mask_array, dtype = float) arr_f = np.rot90(arr_f, axes=(-2,-1)) arr_f = np.flip(arr_f,0) plt.imshow(arr_f) plt.colorbar() return mask_array
该函数是一个用于地表覆盖预测和绘图的函数。它需要一个包含训练数据的文件夹路径,一个栅格文件和一个参考文件作为输入。它还需要一个机器学习算法和一个布尔值作为是否要绘制图表的标志。函数调用 get_data_eval 函数来获取训练数据,并使用 train_test_split 函数将其分成训练集和测试集。然后,使用机器学习算法来拟合训练数据,预测测试数据,并计算准确度、混淆矩阵和分类报告。最后,使用训练后的模型来预测栅格文件中的地表覆盖,并将结果绘制成图表(如果 plot 参数为 True)。函数返回预测结果的数组。
在SVM中,linear_svm.py、linear_classifier.py和svm.ipynb中相应的代码
linear_svm.py:
```python
import numpy as np
class LinearSVM:
def __init__(self, lr=0.01, reg=0.01, num_iters=1000, batch_size=32):
self.lr = lr
self.reg = reg
self.num_iters = num_iters
self.batch_size = batch_size
self.W = None
self.b = None
def train(self, X, y):
num_train, dim = X.shape
num_classes = np.max(y) + 1
if self.W is None:
self.W = 0.001 * np.random.randn(dim, num_classes)
self.b = np.zeros((1, num_classes))
loss_history = []
for i in range(self.num_iters):
batch_idx = np.random.choice(num_train, self.batch_size)
X_batch = X[batch_idx]
y_batch = y[batch_idx]
loss, grad_W, grad_b = self.loss(X_batch, y_batch)
loss_history.append(loss)
self.W -= self.lr * grad_W
self.b -= self.lr * grad_b
return loss_history
def predict(self, X):
scores = X.dot(self.W) + self.b
y_pred = np.argmax(scores, axis=1)
return y_pred
def loss(self, X_batch, y_batch):
num_train = X_batch.shape[0]
scores = X_batch.dot(self.W) + self.b
correct_scores = scores[range(num_train), y_batch]
margins = np.maximum(0, scores - correct_scores[:, np.newaxis] + 1)
margins[range(num_train), y_batch] = 0
loss = np.sum(margins) / num_train + 0.5 * self.reg * np.sum(self.W * self.W)
num_pos = np.sum(margins > 0, axis=1)
dscores = np.zeros_like(scores)
dscores[margins > 0] = 1
dscores[range(num_train), y_batch] -= num_pos
dscores /= num_train
grad_W = np.dot(X_batch.T, dscores) + self.reg * self.W
grad_b = np.sum(dscores, axis=0, keepdims=True)
return loss, grad_W, grad_b
```
linear_classifier.py:
```python
import numpy as np
class LinearClassifier:
def __init__(self, lr=0.01, reg=0.01, num_iters=1000, batch_size=32):
self.lr = lr
self.reg = reg
self.num_iters = num_iters
self.batch_size = batch_size
self.W = None
self.b = None
def train(self, X, y):
num_train, dim = X.shape
num_classes = np.max(y) + 1
if self.W is None:
self.W = 0.001 * np.random.randn(dim, num_classes)
self.b = np.zeros((1, num_classes))
loss_history = []
for i in range(self.num_iters):
batch_idx = np.random.choice(num_train, self.batch_size)
X_batch = X[batch_idx]
y_batch = y[batch_idx]
loss, grad_W, grad_b = self.loss(X_batch, y_batch)
loss_history.append(loss)
self.W -= self.lr * grad_W
self.b -= self.lr * grad_b
return loss_history
def predict(self, X):
scores = X.dot(self.W) + self.b
y_pred = np.argmax(scores, axis=1)
return y_pred
def loss(self, X_batch, y_batch):
num_train = X_batch.shape[0]
scores = X_batch.dot(self.W) + self.b
correct_scores = scores[range(num_train), y_batch]
margins = np.maximum(0, scores - correct_scores[:, np.newaxis] + 1)
margins[range(num_train), y_batch] = 0
loss = np.sum(margins) / num_train + 0.5 * self.reg * np.sum(self.W * self.W)
num_pos = np.sum(margins > 0, axis=1)
dscores = np.zeros_like(scores)
dscores[margins > 0] = 1
dscores[range(num_train), y_batch] -= num_pos
dscores /= num_train
grad_W = np.dot(X_batch.T, dscores) + self.reg * self.W
grad_b = np.sum(dscores, axis=0, keepdims=True)
return loss, grad_W, grad_b
```
svm.ipynb:
```python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_moons
from sklearn.model_selection import train_test_split
from linear_classifier import LinearClassifier
def plot_data(X, y, title):
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdBu)
plt.title(title)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()
def plot_decision_boundary(clf, X, y, title):
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdBu)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xx = np.linspace(xlim[0], xlim[1], 100)
yy = np.linspace(ylim[0], ylim[1], 100)
XX, YY = np.meshgrid(xx, yy)
xy = np.vstack([XX.ravel(), YY.ravel()]).T
Z = clf.predict(xy).reshape(XX.shape)
plt.contour(XX, YY, Z, levels=[0], colors='k', linestyles='-')
plt.title(title)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.show()
def main():
X, y = make_blobs(n_samples=200, centers=2, random_state=42)
plot_data(X, y, 'Linearly Separable Data')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
clf = LinearClassifier()
loss_history = clf.train(X_train, y_train)
train_acc = np.mean(clf.predict(X_train) == y_train)
test_acc = np.mean(clf.predict(X_test) == y_test)
print('Train accuracy: {:.3f}, Test accuracy: {:.3f}'.format(train_acc, test_acc))
plot_decision_boundary(clf, X, y, 'Linear SVM')
if __name__ == '__main__':
main()
```
以上的代码实现了一个简单的线性 SVM,可以用于二分类问题。在 `svm.ipynb` 文件中,我们使用 `make_blobs` 生成了一个线性可分的数据集,然后将其拆分为训练集和测试集。接着,我们使用 `LinearClassifier` 对训练集进行训练,并在测试集上评估模型性能。最后,我们绘制了模型的决策边界。