参考以下两段代码代码:第一段:# Lab5: Cross-Validation and the Bootstrap # The Validation Set Approach install.packages("ISLR") library(ISLR) set.seed(1) train=sample(392,196) lm.fit=lm(mpg~horsepower,data=Auto,subset=train) attach(Auto) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) set.seed(2) train=sample(392,196) lm.fit=lm(mpg~horsepower,subset=train) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) # Leave-One-Out Cross-Validation glm.fit=glm(mpg~horsepower,data=Auto) coef(glm.fit) lm.fit=lm(mpg~horsepower,data=Auto) coef(lm.fit) library(boot) glm.fit=glm(mpg~horsepower,data=Auto) cv.err=cv.glm(Auto,glm.fit) cv.err$delta cv.error=rep(0,5) for (i in 1:5){ glm.fit=glm(mpg~poly(horsepower,i),data=Auto) cv.error[i]=cv.glm(Auto,glm.fit)$delta[1] } cv.error第二段:library(caret) library(klaR) data(iris) splt=0.80 trainIndex <- createDataPartition(iris$Species,p=split,list=FALSE) data_train <- iris[ trainIndex,] data_test <- iris[-trainIndex,] model <- NaiveBayes(Species~.,data=data_train) x_test <- data_test[,1:4] y_test <- data_test[,5] predictions <- predict(model,x_test) confusionMatrix(predictions$class,y_test)。写出R代码完成以下任务:①建立50×30的随机数据和30个变量;②生成三组不同系数的①线性模型;③(线性回归中)分别计算这三组的CV值;④(岭回归中)分别画出这三组的两张图,两张图均以lambd为横坐标,一张图以CV error为纵坐标,一张图以Prediction error为纵坐标,两张图同分开在Plots位置,而且三组一组画完,再画另一组
时间: 2024-02-23 07:56:41 浏览: 157
jakarta.validation-api-2.0.2-API文档-中文版.zip
5星 · 资源好评率100%
以下是R代码实现:
# 生成50*30的随机数据和30个变量
set.seed(1)
X <- matrix(rnorm(1500), nrow = 50, ncol = 30)
y <- rnorm(50)
# 生成三组不同系数的线性模型
beta1 <- rnorm(30)
beta2 <- c(rep(1, 10), rep(0, 20))
beta3 <- seq(from = 0.1, to = 3, length.out = 30)
# 线性回归中分别计算这三组的CV值
library(boot)
cv.error1 <- rep(0, 10)
cv.error2 <- rep(0, 10)
cv.error3 <- rep(0, 10)
for (i in 1:10) {
glm.fit1 <- glm(y ~ X[,1:i])
cv.error1[i] <- cv.glm(X, y, glm.fit1)$delta[1]
glm.fit2 <- glm(y ~ X[,1:i], family = "gaussian", lambda = 0.5)
cv.error2[i] <- cv.glmnet(X, y, alpha = 0, lambda = seq(0.1, 1, length.out = 100))$cvm[100]
glm.fit3 <- glm(y ~ X[,1:i], family = "gaussian", lambda = 0.5)
cv.error3[i] <- cv.glmnet(X, y, alpha = 1, lambda = seq(0.1, 1, length.out = 100))$cvm[100]
}
# 岭回归中分别画出这三组的两张图
library(glmnet)
par(mfrow = c(2, 3))
for (i in 1:3) {
glm.fit <- glmnet(X, y, alpha = 0, lambda = seq(0.1, 1, length.out = 100))
plot(glm.fit, xvar = "lambda", main = paste0("CV Error (Model ", i, ")"))
glm.fit <- glmnet(X, y, alpha = 1, lambda = seq(0.1, 1, length.out = 100))
plot(glm.fit, xvar = "lambda", main = paste0("Prediction Error (Model ", i, ")"))
}
阅读全文