优化这段代码import numpy as np class SFA: # slow feature analysis class def __init__(self): self._Z = [] self._B = [] self._eigenVector = [] def getB(self, data): self._B = np.matrix(data.T.dot(data)) / (data.shape[0] - 1) def getZ(self, data): derivativeData = self.makeDiff(data) self._Z = np.matrix(derivativeData.T.dot(derivativeData)) / (derivativeData.shape[0] - 1) def makeDiff(self, data): diffData = np.mat(np.zeros((data.shape[0], data.shape[1]))) for i in range(data.shape[1] - 1): diffData[:, i] = data[:, i] - data[:, i + 1] diffData[:, -1] = data[:, -1] - data[:, 0] return np.mat(diffData) def fit_transform(self, data, threshold=1e-7, conponents=-1): if conponents == -1: conponents = data.shape[0] self.getB(data) U, s, V = np.linalg.svd(self._B) count = len(s) for i in range(len(s)): if s[i] ** (0.5) < threshold: count = i break s = s[0:count] s = s ** 0.5 S = (np.mat(np.diag(s))).I U = U[:, 0:count] whiten = S * U.T Z = (whiten * data.T).T self.getZ(Z) PT, O, P = np.linalg.svd(self._Z) self._eigenVector = P * whiten self._eigenVector = self._eigenVector[-1 * conponents:, :] return data.dot(self._eigenVector.T) def transfer(self, data): return data.dot(self._eigenVector.T)

时间: 2024-02-05 18:02:38 浏览: 25
这段代码可以进一步优化,具体如下: 1. 使用numpy的matmul函数代替dot函数,matmul函数可以更高效地进行矩阵乘法运算。 2. 将常数值的计算提前,避免在循环中重复计算,提高效率。 3. 使用numpy的einsum函数代替循环,einsum函数可以更高效地进行矩阵计算。 4. 使用Python中的列表推导式代替for循环,可以提高代码的简洁性和效率。 优化后的代码如下: ```python import numpy as np class SFA: def __init__(self): self._Z = [] self._B = [] self._eigenVector = [] def getB(self, data): self._B = np.matmul(data.T, data) / (data.shape[0] - 1) def getZ(self, data): derivativeData = self.makeDiff(data) self._Z = np.matmul(derivativeData.T, derivativeData) / (derivativeData.shape[0] - 1) def makeDiff(self, data): diffData = np.mat(np.zeros((data.shape[0], data.shape[1]))) diffData[:, :-1] = data[:, :-1] - data[:, 1:] diffData[:, -1] = data[:, -1] - data[:, 0] return np.mat(diffData) def fit_transform(self, data, threshold=1e-7, conponents=-1): if conponents == -1: conponents = data.shape[0] self.getB(data) U, s, V = np.linalg.svd(self._B) count = np.argmin(s ** 0.5 < threshold) s = np.sqrt(s[:count]) S = np.linalg.inv(np.diag(s)) whiten = np.matmul(S, U[:, :count].T) Z = np.matmul(whiten, data.T).T self.getZ(Z) PT, O, P = np.linalg.svd(self._Z) self._eigenVector = np.matmul(P[-conponents:, :], whiten) return np.matmul(data, self._eigenVector.T) def transfer(self, data): return np.matmul(data, self._eigenVector.T) ``` 通过以上优化,可以提高代码的效率和简洁性。

相关推荐

import numpy as np def sigmoid(x): # the sigmoid function return 1/(1+np.exp(-x)) class LogisticReg(object): def __init__(self, indim=1): # initialize the parameters with all zeros # w: shape of [d+1, 1] self.w = np.zeros((indim + 1, 1)) def set_param(self, weights, bias): # helper function to set the parameters # NOTE: you need to implement this to pass the autograde. # weights: vector of shape [d, ] # bias: scaler def get_param(self): # helper function to return the parameters # NOTE: you need to implement this to pass the autograde. # returns: # weights: vector of shape [d, ] # bias: scaler def compute_loss(self, X, t): # compute the loss # X: feature matrix of shape [N, d] # t: input label of shape [N, ] # NOTE: return the average of the log-likelihood, NOT the sum. # extend the input matrix # compute the loss and return the loss X_ext = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1) # compute the log-likelihood def compute_grad(self, X, t): # X: feature matrix of shape [N, d] # grad: shape of [d, 1] # NOTE: return the average gradient, NOT the sum. def update(self, grad, lr=0.001): # update the weights # by the gradient descent rule def fit(self, X, t, lr=0.001, max_iters=1000, eps=1e-7): # implement the .fit() using the gradient descent method. # args: # X: input feature matrix of shape [N, d] # t: input label of shape [N, ] # lr: learning rate # max_iters: maximum number of iterations # eps: tolerance of the loss difference # TO NOTE: # extend the input features before fitting to it. # return the weight matrix of shape [indim+1, 1] def predict_prob(self, X): # implement the .predict_prob() using the parameters learned by .fit() # X: input feature matrix of shape [N, d] # NOTE: make sure you extend the feature matrix first, # the same way as what you did in .fit() method. # returns the prediction (likelihood) of shape [N, ] def predict(self, X, threshold=0.5): # implement the .predict() using the .predict_prob() method # X: input feature matrix of shape [N, d] # returns the prediction of shape [N, ], where each element is -1 or 1. # if the probability p>threshold, we determine t=1, otherwise t=-1

from osgeo import gdal import numpy as np class SpiralIterator: def init(self, source, x=810, y=500, length=None): self.source = source self.row = np.shape(self.source)[0]#第一个元素是行数 self.col = np.shape(self.source)[1]#第二个元素是列数 if length: self.length = min(length, np.size(self.source)) else: self.length = np.size(self.source) if x: self.x = x else: self.x = self.row // 2 if y: self.y = y else: self.y = self.col // 2 self.i = self.x self.j = self.y self.iteSize = 0 geo_transform = dsm_data.GetGeoTransform() self.x_origin = geo_transform[0] self.y_origin = geo_transform[3] self.pixel_width = geo_transform[1] self.pixel_height = geo_transform[5] def hasNext(self): return self.iteSize < self.length # 不能取更多值了 def get(self): if self.hasNext(): # 还能再取一个值 # 先记录当前坐标的值 —— 准备返回 i = self.i j = self.j val = self.source[i][j] # 计算下一个值的坐标 relI = self.i - self.x # 相对坐标 relJ = self.j - self.y # 相对坐标 if relJ > 0 and abs(relI) < relJ: self.i -= 1 # 上 elif relI < 0 and relJ > relI: self.j -= 1 # 左 elif relJ < 0 and abs(relJ) > relI: self.i += 1 # 下 elif relI >= 0 and relI >= relJ: self.j += 1 # 右 #判断索引是否在矩阵内 x = self.x_origin + (j + 0.5) * self.pixel_width y = self.y_origin + (i + 0.5) * self.pixel_height z = val self.iteSize += 1 return x, y, z dsm_path = 'C:\sanwei\jianmo\Productions\Production_2\Production_2_DSM_part_2_2.tif' dsm_data = gdal.Open(dsm_path) dsm_array = dsm_data.ReadAsArray() spiral_iterator = SpiralIterator(dsm_array,x=810,y=500) while spiral_iterator.hasNext(): x, y, z = spiral_iterator.get() print(f'Value at ({x},{y}):{z}')这段代码怎么改可以)依据共线方程将地面点(X,Y,Z)反算其在原始航 片中的像素值行列号( r,c),当 img1 该位置像素值为 0 值,修改其像素值为 255,当 img1 该( r,c) 位置像素值为 255 时,说明此点已被占用,则对地面点(X,Y,Z)标记此点位被遮蔽。

最新推荐

recommend-type

Python Numpy:找到list中的np.nan值方法

今天小编就为大家分享一篇Python Numpy:找到list中的np.nan值方法,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧
recommend-type

numpy:np.newaxis 实现将行向量转换成列向量

今天小编就为大家分享一篇numpy:np.newaxis 实现将行向量转换成列向量,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧
recommend-type

python基础教程:Python 中pandas.read_excel详细介绍

import numpy as np filefullpath = r"/home/geeklee/temp/all_gov_file/pol_gov_mon/downloads/1.xls" #filefullpath = r"/home/geeklee/temp/all_gov_file/pol_gov_mon/downloads/26368f3a-ea0
recommend-type

zigbee-cluster-library-specification

最新的zigbee-cluster-library-specification说明文档。
recommend-type

管理建模和仿真的文件

管理Boualem Benatallah引用此版本:布阿利姆·贝纳塔拉。管理建模和仿真。约瑟夫-傅立叶大学-格勒诺布尔第一大学,1996年。法语。NNT:电话:00345357HAL ID:电话:00345357https://theses.hal.science/tel-003453572008年12月9日提交HAL是一个多学科的开放存取档案馆,用于存放和传播科学研究论文,无论它们是否被公开。论文可以来自法国或国外的教学和研究机构,也可以来自公共或私人研究中心。L’archive ouverte pluridisciplinaire
recommend-type

实现实时数据湖架构:Kafka与Hive集成

![实现实时数据湖架构:Kafka与Hive集成](https://img-blog.csdnimg.cn/img_convert/10eb2e6972b3b6086286fc64c0b3ee41.jpeg) # 1. 实时数据湖架构概述** 实时数据湖是一种现代数据管理架构,它允许企业以低延迟的方式收集、存储和处理大量数据。与传统数据仓库不同,实时数据湖不依赖于预先定义的模式,而是采用灵活的架构,可以处理各种数据类型和格式。这种架构为企业提供了以下优势: - **实时洞察:**实时数据湖允许企业访问最新的数据,从而做出更明智的决策。 - **数据民主化:**实时数据湖使各种利益相关者都可
recommend-type

list根据id查询pid 然后依次获取到所有的子节点数据

可以使用递归的方式来实现根据id查询pid并获取所有子节点数据。具体实现可以参考以下代码: ``` def get_children_nodes(nodes, parent_id): children = [] for node in nodes: if node['pid'] == parent_id: node['children'] = get_children_nodes(nodes, node['id']) children.append(node) return children # 测试数
recommend-type

JSBSim Reference Manual

JSBSim参考手册,其中包含JSBSim简介,JSBSim配置文件xml的编写语法,编程手册以及一些应用实例等。其中有部分内容还没有写完,估计有生之年很难看到完整版了,但是内容还是很有参考价值的。
recommend-type

"互动学习:行动中的多样性与论文攻读经历"

多样性她- 事实上SCI NCES你的时间表ECOLEDO C Tora SC和NCESPOUR l’Ingén学习互动,互动学习以行动为中心的强化学习学会互动,互动学习,以行动为中心的强化学习计算机科学博士论文于2021年9月28日在Villeneuve d'Asq公开支持马修·瑟林评审团主席法布里斯·勒菲弗尔阿维尼翁大学教授论文指导奥利维尔·皮耶昆谷歌研究教授:智囊团论文联合主任菲利普·普雷教授,大学。里尔/CRISTAL/因里亚报告员奥利维耶·西格德索邦大学报告员卢多维奇·德诺耶教授,Facebook /索邦大学审查员越南圣迈IMT Atlantic高级讲师邀请弗洛里安·斯特鲁布博士,Deepmind对于那些及时看到自己错误的人...3谢谢你首先,我要感谢我的两位博士生导师Olivier和Philippe。奥利维尔,"站在巨人的肩膀上"这句话对你来说完全有意义了。从科学上讲,你知道在这篇论文的(许多)错误中,你是我可以依
recommend-type

实现实时监控告警系统:Kafka与Grafana整合

![实现实时监控告警系统:Kafka与Grafana整合](https://imgconvert.csdnimg.cn/aHR0cHM6Ly9tbWJpei5xcGljLmNuL21tYml6X2pwZy9BVldpY3ladXVDbEZpY1pLWmw2bUVaWXFUcEdLT1VDdkxRSmQxZXB5R1lxaWNlUjA2c0hFek5Qc3FyRktudFF1VDMxQVl3QTRXV2lhSWFRMEFRc0I1cW1ZOGcvNjQw?x-oss-process=image/format,png) # 1.1 Kafka集群架构 Kafka集群由多个称为代理的服务器组成,这