参考以下两段代码代码:第一段:# Lab5: Cross-Validation and the Bootstrap # The Validation Set Approach install.packages("ISLR") library(ISLR) set.seed(1) train=sample(392,196) lm.fit=lm(mpg~horsepower,data=Auto,subset=train) attach(Auto) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) set.seed(2) train=sample(392,196) lm.fit=lm(mpg~horsepower,subset=train) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) # Leave-One-Out Cross-Validation glm.fit=glm(mpg~horsepower,data=Auto) coef(glm.fit) lm.fit=lm(mpg~horsepower,data=Auto) coef(lm.fit) library(boot) glm.fit=glm(mpg~horsepower,data=Auto) cv.err=cv.glm(Auto,glm.fit) cv.err$delta cv.error=rep(0,5) for (i in 1:5){ glm.fit=glm(mpg~poly(horsepower,i),data=Auto) cv.error[i]=cv.glm(Auto,glm.fit)$delta[1] } cv.error第二段:library(caret) library(klaR) data(iris) splt=0.80 trainIndex <- createDataPartition(iris$Species,p=split,list=FALSE) data_train <- iris[ trainIndex,] data_test <- iris[-trainIndex,] model <- NaiveBayes(Species~.,data=data_train) x_test <- data_test[,1:4] y_test <- data_test[,5] predictions <- predict(model,x_test) confusionMatrix(predictions$class,y_test)。写出R代码完成以下任务:①建立50×30的随机数据和30个变量;②生成三组不同系数的①线性模型;③(线性回归中)分别计算这三组的CV值;④(岭回归中)分别画出这三组的两张图,两张图均以lambd为横坐标,一张图以CV error为纵坐标,一张图以Prediction error为纵坐标,两张图同分开在Plots位置
时间: 2024-02-22 11:58:59 浏览: 161
以下是针对任务的R代码实现:
```
# ①建立50×30的随机数据和30个变量
set.seed(123)
X <- matrix(rnorm(50*30), ncol=30)
y <- rnorm(50)
# ②生成三组不同系数的线性模型
beta1 <- rnorm(30, mean=1, sd=0.5)
beta2 <- rnorm(30, mean=2, sd=0.5)
beta3 <- rnorm(30, mean=3, sd=0.5)
# 定义一个函数用于计算线性回归的CV值
cv_linear <- function(X, y, k=10, lambda=NULL) {
n <- nrow(X)
if (is.null(lambda)) {
lambda <- seq(0, 1, length.out=100)
}
mse <- rep(0, length(lambda))
folds <- sample(rep(1:k, length.out=n))
for (i in 1:k) {
X_train <- X[folds!=i, ]
y_train <- y[folds!=i]
X_test <- X[folds==i, ]
y_test <- y[folds==i]
for (j in 1:length(lambda)) {
fit <- glmnet(X_train, y_train, alpha=0, lambda=lambda[j])
y_pred <- predict(fit, newx=X_test)
mse[j] <- mse[j] + mean((y_test - y_pred)^2)
}
}
mse <- mse / k
return(mse)
}
# ③(线性回归中)分别计算这三组的CV值
lambda <- seq(0, 1, length.out=100)
mse1 <- cv_linear(X, y, lambda=lambda)
mse2 <- cv_linear(X, y, lambda=lambda)
mse3 <- cv_linear(X, y, lambda=lambda)
# ④(岭回归中)分别画出这三组的两张图,两张图均以lambd为横坐标,一张图以CV error为纵坐标,一张图以Prediction error为纵坐标,两张图同分开在Plots位置
library(glmnet)
par(mfrow=c(1,2))
# 画CV error图
plot(lambda, mse1, type="l", xlab="lambda", ylab="CV error", main="Beta1")
points(lambda, mse2, type="l", col="red")
points(lambda, mse3, type="l", col="blue")
# 画Prediction error图
fit1 <- glmnet(X, y, alpha=0, lambda=lambda[which.min(mse1)])
fit2 <- glmnet(X, y, alpha=0, lambda=lambda[which.min(mse2)])
fit3 <- glmnet(X, y, alpha=0, lambda=lambda[which.min(mse3)])
y_pred1 <- predict(fit1, newx=X)
y_pred2 <- predict(fit2, newx=X)
y_pred3 <- predict(fit3, newx=X)
pred_error1 <- mean((y - y_pred1)^2)
pred_error2 <- mean((y - y_pred2)^2)
pred_error3 <- mean((y - y_pred3)^2)
plot(lambda, pred_error1, type="l", xlab="lambda", ylab="Prediction error", main="Beta1")
points(lambda, pred_error2, type="l", col="red")
points(lambda, pred_error3, type="l", col="blue")
```
需要注意的是,由于没有给出具体的数据,以上代码中的数据都是随机生成的,因此在不同的运行中结果可能会略有不同。
阅读全文