参考以下两段代码代码:第一段:# Lab5: Cross-Validation and the Bootstrap # The Validation Set Approach install.packages("ISLR") library(ISLR) set.seed(1) train=sample(392,196) lm.fit=lm(mpg~horsepower,data=Auto,subset=train) attach(Auto) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) set.seed(2) train=sample(392,196) lm.fit=lm(mpg~horsepower,subset=train) mean((mpg-predict(lm.fit,Auto))[-train]^2) lm.fit2=lm(mpg~poly(horsepower,2),data=Auto,subset=train) mean((mpg-predict(lm.fit2,Auto))[-train]^2) lm.fit3=lm(mpg~poly(horsepower,3),data=Auto,subset=train) mean((mpg-predict(lm.fit3,Auto))[-train]^2) # Leave-One-Out Cross-Validation glm.fit=glm(mpg~horsepower,data=Auto) coef(glm.fit) lm.fit=lm(mpg~horsepower,data=Auto) coef(lm.fit) library(boot) glm.fit=glm(mpg~horsepower,data=Auto) cv.err=cv.glm(Auto,glm.fit) cv.err$delta cv.error=rep(0,5) for (i in 1:5){ glm.fit=glm(mpg~poly(horsepower,i),data=Auto) cv.error[i]=cv.glm(Auto,glm.fit)$delta[1] } cv.error第二段:library(caret) library(klaR) data(iris) splt=0.80 trainIndex <- createDataPartition(iris$Species,p=split,list=FALSE) data_train <- iris[ trainIndex,] data_test <- iris[-trainIndex,] model <- NaiveBayes(Species~.,data=data_train) x_test <- data_test[,1:4] y_test <- data_test[,5] predictions <- predict(model,x_test) confusionMatrix(predictions$class,y_test)。写出R代码完成以下任务:①建立50×30的随机数据和30个变量;②生成三组不同系数的①线性模型;③(线性回归中)分别计算这三组的CV值;④(岭回归中)分别画出这三组的两张图,两张图均以lambd为横坐标,一张图以CV error为纵坐标,一张图以Prediction error为纵坐标,两张图同分开在Plots位置,而且三组一组画完,再画另一组
时间: 2024-02-23 22:56:40 浏览: 39
以下是代码实现:
# 生成50x30的随机数据
set.seed(123)
data <- matrix(rnorm(1500), nrow=50, ncol=30)
# 生成三组不同系数的线性模型
set.seed(456)
coef1 <- rnorm(30, mean=2, sd=1)
coef2 <- rnorm(30, mean=-2, sd=1)
coef3 <- rnorm(30, mean=0, sd=1)
model1 <- lm(data[,1] ~ data[,2:30] %*% coef1)
model2 <- lm(data[,1] ~ data[,2:30] %*% coef2)
model3 <- lm(data[,1] ~ data[,2:30] %*% coef3)
# 线性回归中计算CV值
library(boot)
cv.error1 <- cv.glm(data, model1, K=10)$delta[1]
cv.error2 <- cv.glm(data, model2, K=10)$delta[1]
cv.error3 <- cv.glm(data, model3, K=10)$delta[1]
# 岭回归中画图
library(glmnet)
set.seed(789)
ridge1 <- glmnet(data[,2:30], data[,1], alpha=0)
ridge2 <- glmnet(data[,2:30], data[,1], alpha=0.5)
ridge3 <- glmnet(data[,2:30], data[,1], alpha=1)
par(mfrow=c(2,3))
plot(ridge1, xvar="lambda", label=TRUE)
plot(ridge2, xvar="lambda", label=TRUE)
plot(ridge3, xvar="lambda", label=TRUE)
par(mfrow=c(1,1))
# 岭回归中计算CV error和Prediction error
cv.error1 <- rep(0, 100)
pred.error1 <- rep(0, 100)
for (i in 1:100) {
ridge.fit <- cv.glmnet(data[,2:30], data[,1], alpha=0)$glmnet.fit
cv.error1[i] <- min(ridge.fit$cvm)
pred.error1[i] <- mean((data[,1] - predict(ridge.fit, newx=data[,2:30], s=ridge.fit$lambda.min))^2)
}
cv.error2 <- rep(0, 100)
pred.error2 <- rep(0, 100)
for (i in 1:100) {
ridge.fit <- cv.glmnet(data[,2:30], data[,1], alpha=0.5)$glmnet.fit
cv.error2[i] <- min(ridge.fit$cvm)
pred.error2[i] <- mean((data[,1] - predict(ridge.fit, newx=data[,2:30], s=ridge.fit$lambda.min))^2)
}
cv.error3 <- rep(0, 100)
pred.error3 <- rep(0, 100)
for (i in 1:100) {
ridge.fit <- cv.glmnet(data[,2:30], data[,1], alpha=1)$glmnet.fit
cv.error3[i] <- min(ridge.fit$cvm)
pred.error3[i] <- mean((data[,1] - predict(ridge.fit, newx=data[,2:30], s=ridge.fit$lambda.min))^2)
}
# 岭回归中绘制CV error和Prediction error图
par(mfrow=c(2,3))
plot(ridge1, xvar="lambda", label=TRUE)
plot(cv.error1, type="l", xlab="Lambda", ylab="CV error")
plot(pred.error1, type="l", xlab="Lambda", ylab="Prediction error")
plot(ridge2, xvar="lambda", label=TRUE)
plot(cv.error2, type="l", xlab="Lambda", ylab="CV error")
plot(pred.error2, type="l", xlab="Lambda", ylab="Prediction error")
plot(ridge3, xvar="lambda", label=TRUE)
plot(cv.error3, type="l", xlab="Lambda", ylab="CV error")
plot(pred.error3, type="l", xlab="Lambda", ylab="Prediction error")
par(mfrow=c(1,1))