model1 <- n# weights: 31 Error in nnet.default(x, y, w, ...) : NA/NaN/Inf in foreign function call (arg 2) In addition: Warning message: In nnet.default(x, y, w, ...) : NAs introduced by coercion
时间: 2024-04-03 11:31:35 浏览: 94
这段代码中出现了错误。错误信息提示在调用`nnet.default()`函数时出现了NA/NaN/Inf的值,第二个参数(y)中可能包含了这些值。此外,还有一个警告信息提示在调用`nnet.default()`函数时可能会进行类型转换,导致出现NA值。
建议检查数据集中是否有缺失值、无穷大或非数值的值,并进行相应的处理。可以使用`is.na()`函数和`summary()`函数来检查数据集中是否存在缺失值和异常值。如果确实存在这些值,可以使用`na.omit()`函数或`complete.cases()`函数来删除这些行或列。另外,还可以尝试将数据集中的字符型变量转换为数值型变量,避免出现类型转换错误。
相关问题
import numpy as np def sigmoid(x): # the sigmoid function return 1/(1+np.exp(-x)) class LogisticReg(object): def __init__(self, indim=1): # initialize the parameters with all zeros # w: shape of [d+1, 1] self.w = np.zeros((indim + 1, 1)) def set_param(self, weights, bias): # helper function to set the parameters # NOTE: you need to implement this to pass the autograde. # weights: vector of shape [d, ] # bias: scaler def get_param(self): # helper function to return the parameters # NOTE: you need to implement this to pass the autograde. # returns: # weights: vector of shape [d, ] # bias: scaler def compute_loss(self, X, t): # compute the loss # X: feature matrix of shape [N, d] # t: input label of shape [N, ] # NOTE: return the average of the log-likelihood, NOT the sum. # extend the input matrix # compute the loss and return the loss X_ext = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1) # compute the log-likelihood def compute_grad(self, X, t): # X: feature matrix of shape [N, d] # grad: shape of [d, 1] # NOTE: return the average gradient, NOT the sum. def update(self, grad, lr=0.001): # update the weights # by the gradient descent rule def fit(self, X, t, lr=0.001, max_iters=1000, eps=1e-7): # implement the .fit() using the gradient descent method. # args: # X: input feature matrix of shape [N, d] # t: input label of shape [N, ] # lr: learning rate # max_iters: maximum number of iterations # eps: tolerance of the loss difference # TO NOTE: # extend the input features before fitting to it. # return the weight matrix of shape [indim+1, 1] def predict_prob(self, X): # implement the .predict_prob() using the parameters learned by .fit() # X: input feature matrix of shape [N, d] # NOTE: make sure you extend the feature matrix first, # the same way as what you did in .fit() method. # returns the prediction (likelihood) of shape [N, ] def predict(self, X, threshold=0.5): # implement the .predict() using the .predict_prob() method # X: input feature matrix of shape [N, d] # returns the prediction of shape [N, ], where each element is -1 or 1. # if the probability p>threshold, we determine t=1, otherwise t=-1
以下是您提供的代码的完整实现,包括_param()和get_param()方法的实现:
```python
import numpy as np
def sigmoid(x):
# the sigmoid function
return 1 / (1 + np.exp(-x))
class LogisticReg(object):
def __init__(self, indim=1):
# initialize the parameters with all zeros
# w: shape of [d+1, 1]
self.w = np.zeros((indim + 1, 1))
def set_param(self, weights, bias):
# helper function to set the parameters
# weights: vector of shape [d, ]
# bias: scalar
self.w[:-1] = weights.reshape(-1, 1)
self.w[-1] = bias
def get_param(self):
# helper function to return the parameters
# returns:
# weights: vector of shape [d, ]
# bias: scalar
weights = self.w[:-1].reshape(-1)
bias = self.w[-1]
return weights, bias
def compute_loss(self, X, t):
# compute the loss
# X: feature matrix of shape [N, d]
# t: input label of shape [N, ]
# NOTE: return the average of the log-likelihood, NOT the sum.
# extend the input matrix
X_ext = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1)
# compute the log-likelihood
z = X_ext @ self.w
y = sigmoid(z)
loss = -np.mean(t * np.log(y) + (1 - t) * np.log(1 - y))
return loss
def compute_grad(self, X, t):
# X: feature matrix of shape [N, d]
# grad: shape of [d, 1]
# NOTE: return the average gradient, NOT the sum.
X_ext = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1)
z = X_ext @ self.w
y = sigmoid(z)
grad = np.mean(X_ext.T @ (y - t), axis=1)
return grad
def update(self, grad, lr=0.001):
# update the weights
# by the gradient descent rule
self.w -= lr * grad.reshape(-1, 1)
def fit(self, X, t, lr=0.001, max_iters=1000, eps=1e-7):
# implement the .fit() using the gradient descent method.
# args:
# X: input feature matrix of shape [N, d]
# t: input label of shape [N, ]
# lr: learning rate
# max_iters: maximum number of iterations
# eps: tolerance of the loss difference
# TO NOTE:
# extend the input features before fitting to it.
# return the weight matrix of shape [indim+1, 1]
X_ext = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1)
prev_loss = np.inf
for i in range(max_iters):
loss = self.compute_loss(X, t)
if np.abs(loss - prev_loss) < eps:
break
grad = self.compute_grad(X, t)
self.update(grad, lr)
prev_loss = loss
return self.w
def predict_prob(self, X):
# implement the .predict_prob() using the parameters learned by .fit()
# X: input feature matrix of shape [N, d]
# NOTE: make sure you extend the feature matrix first,
# the same way as what you did in .fit() method.
# returns the prediction (likelihood) of shape [N, ]
X_ext = np.concatenate((X, np.ones((X.shape[0], 1))), axis=1)
z = X_ext @ self.w
y = sigmoid(z)
return y
def predict(self, X, threshold=0.5):
# implement the .predict() using the .predict_prob() method
# X: input feature matrix of shape [N, d]
# returns the prediction of shape [N, ], where each element is -1 or 1.
# if the probability p > threshold, we determine t = 1, otherwise t = -1
prob = self.predict_prob(X)
predictions = np.where(prob > threshold, 1, -1)
return predictions
```
这个类实现了逻辑回归的基本功能,包括参数设置、计算损失、计算梯度、更新权重、拟合数据、预测概率和预测类别等方法。您可以使用这个类来拟合二分类问题的数据,并进行预测。
# In[]收缩损失函数 # def contractive_loss(y_pred, y_true,lam=1e-4): # mse = K.mean(K.square(y_true - y_pred), axis=1) # W = K.variable(value=model.get_layer('encoded').get_weights()[0]) # N x N_hidden # W = K.transpose(W) # N_hidden x N # h = model.get_layer('encoded').output # dh = h * (1 - h) # N_batch x N_hidden # contractive = lam * K.sum(dh**2 * K.sum(W**2, axis=1), axis=1) # return mse + contractive将这段代码修改为适合tensorflow最高版本的形式
可以将这段代码修改为适合TensorFlow最高版本的形式,如下所示:
```python
def contractive_loss(y_pred, y_true, lam=1e-4):
mse = tf.reduce_mean(tf.square(y_true - y_pred), axis=1)
W = tf.transpose(model.get_layer('encoded').get_weights()[0]) # N_hidden x N
h = model.get_layer('encoded').output # N_batch x N_hidden
dh = h * (1 - h) # N_batch x N_hidden
contractive = lam * tf.reduce_sum(tf.square(dh) * tf.reduce_sum(tf.square(W), axis=1), axis=1)
return mse + contractive
```
需要注意的是,TensorFlow的函数名和用法与Keras略有不同,例如`K.mean`改为`tf.reduce_mean`,`K.square`改为`tf.square`,`K.variable`改为直接从`model.get_layer`获取权重等。
阅读全文