def des_distance(deep_des1,deep_des2): error = deep_des1-deep_des2 RMSE = np.sqrt(np.sum(np.square(error),axis=1))/float(deep_des1.shape[0]) return RMSE def deep_match(kp1_location,kp2_location,deep_des1,deep_des2,ratio): deep_kp1 = [] deep_kp2 = [] for i in range(deep_des1.shape[0]): des = np.tile(deep_des1[i],(deep_des2.shape[0],1)) error = des - deep_des2 RMSE = np.sqrt(np.sum(np.square(error),axis=1)/float(error.shape[1])) small_index = np.argsort(RMSE, axis=0) if RMSE[small_index[0]]< RMSE[small_index[1]]*ratio: deep_kp1.append((kp1_location[i][0],kp1_location[i][1])) deep_kp2.append((kp2_location[small_index[0]][0],kp2_location[small_index[0]][1])) #deep_des2 = np.delete(deep_des2, small_index[0], 0) return deep_kp1,deep_kp2 #match sift keypoints def match(kp1_location,kp2_location,deep_des1,deep_des2,ratio): deep_kp1 = [] deep_kp2 = [] des1 = np.matrix(deep_des1) des2 = np.matrix(deep_des2) for i in range(des1.shape[0]): des1_ = np.tile(des1[i],(des2.shape[0],1)) error = des1_ - des2 RMSE = np.sqrt(np.sum(np.square(error),axis=1)/float(error.shape[1])) small_index = np.argsort(RMSE, axis=0) if RMSE[small_index[0,0],0] < RMSE[small_index[1,0],0]*ratio: deep_kp1.append((kp1_location[i][0],kp1_location[i][1])) deep_kp2.append((kp2_location[small_index[0,0]][0],kp2_location[small_index[0,0]][1])) #deep_des2 = np.delete(deep_des2, small_index[0], 0) return deep_kp1,deep_kp2 def delete_duplications(kp1,kp2,des1,des2): temp_index = [] for i in range(kp1.shape[0]): for j in range(i+1,kp1.shape[0],1): if i!=j and (kp1[i]==kp1[j]).all(): temp_index.append(j) temp = list(set(temp_index)) kp1_ = np.delete(kp1,temp,0) des1_ = np.delete(des1,temp,0) temp_index = [] for k in range(kp2.shape[0]): for l in range(k+1,kp2.shape[0],1): if k!=l and (kp2[k]==kp2[l]).all(): temp_index.append(l) temp = list(set(temp_index)) kp2_ = np.delete(kp2,temp,0) des2_ = np.delete(des2,temp,0) return kp1_,kp2_,des1_,des2_
时间: 2023-06-03 10:01:32 浏览: 63
这是两个函数,第一个函数计算两个向量之间的欧式距离的均方根误差(RMSE),第二个函数实现了用深度神经网络进行特征匹配。在第二个函数中,会对第一个特征向量与第二个特征向量进行比对,找到最小的RMSE,如果最小RMSE小于第二小RMSE的ratio倍,则匹配成功,将其对应的坐标添加到结果列表中。
相关问题
model.compile(loss=my_rmse, optimizer=optimizer)怎么添加R2
您可以尝试使用Keras的自定义评估指标来添加R2。以下是示例代码:
``` python
import keras.backend as K
def my_rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
def r_square(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return 1 - SS_res/(SS_tot + K.epsilon())
model.compile(loss=my_rmse, optimizer=optimizer, metrics=[r_square])
```
在上面的代码中,我们定义了两个自定义评估指标函数,一个是用于RMSE的`my_rmse`,另一个是用于R2的`r_square`。在`model.compile`中,我们将这两个评估指标都传递给了`metrics`参数。这样,在训练过程中,Keras将计算并记录这两个指标的值,并在每个时期结束时输出它们的平均值。
loss='mean_squared_error'如何改为RMSE
在Keras中,可以使用`Root Mean Squared Error (RMSE)`作为损失函数,可以通过将`mean_squared_error`替换为`mean_squared_error`的平方根来实现。
具体代码如下:
```python
from keras.metrics import RootMeanSquaredError
model.compile(loss='mean_squared_error', optimizer='adam', metrics=[RootMeanSquaredError()])
```
在模型编译时,将损失函数设为`mean_squared_error`,并将指标(metrics)设为`RootMeanSquaredError()`,即可将损失函数改为RMSE。
相关推荐
![rar](https://img-home.csdnimg.cn/images/20210720083606.png)
![rar](https://img-home.csdnimg.cn/images/20210720083606.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)