class srmNeuronFunc(object): funclists = ['srm_forward<float>', 'srm_backward<float>'] cu_module = cp.RawModule(code=CU_SOURCE_CODE_RAW_STRING, options=('-std=c++11', '-I ' + _CURPATH), name_expressions=funclists) neuron_FP = cu_module.get_function(funclists[0]) neuron_BP = cu_module.get_function(funclists[1]) @staticmethod def forward(inputs: Tensor, taum: float, taus: float, e_taug: float, v_th: float) -> List[Tensor]: spikes = torch.zeros_like(inputs) delta_ut = torch.zeros_like(inputs) delta_u = torch.zeros_like(inputs) B, T, dim = *inputs.shape[:2], inputs[0][0].numel() with cp.cuda.Device(inputs.get_device()): srmNeuronFunc.neuron_FP(((B * dim + 1023) // 1024,), (1024,), ( tensor_to_cparray(inputs.contiguous()), tensor_to_cparray(spikes.contiguous()), tensor_to_cparray(delta_ut.contiguous()), tensor_to_cparray(delta_u.contiguous()), cp.float32(taum), cp.float32(taus), cp.float32(e_taug), cp.float32(v_th), cp.int32(B), cp.int32(T), cp.int32(dim) )) return spikes, delta_ut, delta_u @staticmethod def backward(grad_out: Tensor, delta_ut: Tensor, delta_u: Tensor, spikes: Tensor, epsw: Tensor, epst: Tensor) -> List[Tensor]: grad_w = torch.zeros_like(grad_out) grad_t = torch.zeros_like(grad_out) B, T, dim = *grad_out.shape[:2], grad_out[0][0].numel() with cp.cuda.Device(grad_out.get_device()): srmNeuronFunc.neuron_BP(((B * dim + 1023) // 1024,), (1024,), ( tensor_to_cparray(grad_out.contiguous()), tensor_to_cparray(delta_ut.contiguous()), tensor_to_cparray(delta_u.contiguous()), tensor_to_cparray(spikes.contiguous()), tensor_to_cparray(epsw), tensor_to_cparray(epst), tensor_to_cparray(grad_w.contiguous()), tensor_to_cparray(grad_t.contiguous()), cp.int32(B), cp.int32(T), cp.int32(dim) )) return grad_w, grad_t
时间: 2023-12-09 07:03:28 浏览: 153
这是一个使用 CuPy 实现的神经元函数。它包括了前向传播和反向传播两个函数。前向传播函数将输入张量作为参数,计算输出张量,并返回输出张量、delta_ut 张量和 delta_u 张量。反向传播函数将输出梯度、delta_ut 张量、delta_u 张量、spikes 张量、epsw 张量和 epst 张量作为参数,计算权重梯度和时间常数梯度,并返回它们。使用 CuPy 实现可以加速计算,因为它利用了 GPU 的并行计算能力。
相关问题
class srmLinear(nn.Linear): def __init__(self, in_features: int, out_features: int, bias: bool = False, v_th: float = 1.0, taum: float = 5., taus: float = 3., taug: float = 2.5, weight_norm: bool = True, eps: float = 1e-5) -> None: super().__init__(in_features, out_features, bias) nn.init.orthogonal_(self.weight) self.taum = taum self.taus = taus self.taug = taug self.v_th = v_th self.epsw = None self.epst = None self.e_taum = 1. - 1. / taum self.e_taus = 1. - 1. / taus self.e_taug = 1. - 1. / taug self.linear_func = srmLinearFunc.apply if weight_norm: self.bn_weight = nn.Parameter(torch.ones(out_features)) self.bn_bias = nn.Parameter(torch.zeros(out_features)) else: self.bn_weight = None self.bn_bias = None self.register_buffer('eps', torch.tensor([eps])) def forward(self, inputs: Tensor) -> Tensor: self.batch_reset(inputs) return self.linear_func( inputs, self.weight, self.bn_weight, self.bn_bias, self.eps, self.v_th, self.taum, self.taus, self.e_taug, self.epsw, self.epst ) def batch_reset(self, inputs: Tensor) -> None: if self.epsw is None or self.epsw.shape[0] != inputs.shape[1]: coefficient = self.taum / (self.taum - self.taus) # for i in range(inputs.shape[1]): self.epst = torch.FloatTensor([-self.e_taug ** (1 + i) for i in range(inputs.shape[1])]).to(inputs) self.epsw = torch.FloatTensor( [coefficient * (self.e_taum ** (1 + i) - self.e_taus ** (1 + i)) for i in range(inputs.shape[1])] ).to(inputs)
这是一个基于 PyTorch 实现的自适应脉冲编码神经网络(Adaptive Pulse-Coded Neural Network)中的线性层(Linear Layer)代码,其中引入了 Spike Response Model(SRM)的概念。SRM 是一种对神经元(Neuron)的响应方式进行建模的方法,即神经元只有当输入信号超过一个阈值时才会产生输出脉冲。此外,该代码还实现了权重归一化(Weight Normalization)和 Batch Normalization 等技术。
class srmLinearFunc(torch.autograd.Function): @staticmethod def forward(ctx, inputs: Tensor, weight: Tensor, bn_weight: Tensor, bn_bias: Tensor, eps: Tensor, v_th: Tensor, taum: float, taus: float, e_taug: float, epsw: Tensor, epst: Tensor, ) -> Tensor: if bn_weight is not None: x, normx, varx = BN1dForward(weight.t(), bn_weight, bn_bias, eps) else: x = weight.t() normx = varx = bn_weight = bn_bias = eps x = inputs.matmul(x) spikes, delta_ut, delta_u = srmNeuronFunc.forward(x, taum, taus, e_taug, v_th) ctx.save_for_backward( inputs, weight, bn_weight, bn_bias, normx, varx, eps, spikes, delta_ut, delta_u, epsw, epst, ) # print('linear', spikes.sum()) return spikes @staticmethod def backward(ctx, grad_out: Tensor) -> List[Optional[Tensor]]: inputs, weight, bn_weight, bn_bias, normx, varx, eps, spikes, delta_ut, delta_u, epsw, epst, = ctx.saved_tensors grad_w, grad_t = srmNeuronFunc.backward(grad_out, delta_ut, delta_u, spikes, epsw, epst) # grad_w: b t dout, weight: dout din, inputs: b t din grad_w = grad_w.transpose(1, 2).matmul(inputs).sum(dim=0) if eps.shape != bn_weight.shape or eps != bn_weight: grad_w, grad_bnw, grad_bnb = BN1dBackward(grad_w.t(), normx, varx, eps, bn_weight) grad_w = grad_w.t() x = (normx * bn_weight + bn_bias).t() else: grad_bnw = None grad_bnb = None x = weight grad_t = torch.matmul(grad_t, x) return grad_t * 0.85, grad_w, grad_bnw, grad_bnb, None, None, None, None, None, None, None
这是一个使用 PyTorch 实现的线性函数,其中包含了一些参数和激活函数。在前向传播过程中,它通过输入和权重计算出输出,然后使用 SRM 神经元激活函数将输出转换为脉冲信号。在反向传播过程中,它根据输出脉冲和一些其他参数计算出相应的梯度。该函数还包括对 Batch Normalization 的支持。
阅读全文
相关推荐















