import numpy as np import pylab as pl import pandas as pd from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split X2=[] X3=[] X4=[] X5=[] X6=[] X7=[] X1=[i for i in range(1,24) for j in range(128)] df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(3,)) X2=df.values.tolist() x2=[] x21=[] for i in X2: if X2.index(i)<=2927: #两个单元楼的分隔数 x2.append(i) else: x21.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(4,)) X3=df.values.tolist() x3=[] x31=[] for i in X3: if X3.index(i)<=2927: x3.append(i) else: x31.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(5,)) X4=df.values.tolist() x4=[] x41=[] for i in X4: if X4.index(i)<=2927: x4.append(i) else: x41.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(6,)) X5=df.values.tolist() x5=[] x51=[] for i in X5: if X5.index(i)<=2927: x5.append(i) else: x51.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(7,)) X6=df.values.tolist() x6=[] x61=[] for i in X6: if X6.index(i)<=2927: x6.append(i) else: x61.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(8,)) X7=df.values.tolist() x7=[] x71=[] for i in X7: if X7.index(i)<=2927: x7.append(i) else: x71.append(i) np.random.seed(42) q=np.array(X1[:2922]) w=np.array(x21[:2922]) e=np.array(x31[:2922]) r=np.array(x41[:2922]) t=np.array(x51[:2922]) p=np.array(x61[:2922]) u=np.array(x71[:2922]) eps=np.random.normal(0,0.05,152) X=np.c_[q,w,e,r,t,p,u] beta=[0.1,0.15,0.2,0.5,0.33,0.45,0.6] y=np.dot(X,beta)X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) alpha = 0.1 # 设置岭回归的惩罚参数 ridge = Ridge(alpha=alpha) ridge.fit(X_train, y_train) y_pred = ridge.predict(X_test) mse = mean_squared_error(y_test, y_pred) print('MSE:', mse) coef = ridge.coef_ # 计算岭回归的系数 intercept = ridge.intercept_ # 计算岭回归的截距 print('Coefficients:', coef) print('Intercept:', intercept)
时间: 2024-02-28 14:54:49 浏览: 262
python 3.74 运行import numpy as np 报错lib\site-packages\numpy\__init__.py
好的,如果要保留时间序列的前提下更改代码,可以考虑以下几点:
1. 由于时间序列x1已经在代码中生成,可以不用再重新生成。
2. 如果数据量足够大,可以考虑将数据随机分为训练集和测试集,这样可以更好地评估模型的性能。在代码中,可以将数据集随机分割为训练集和测试集,例如:
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
```
其中,X和y分别是特征矩阵和标签向量。
3. 在岭回归模型的训练中,可以考虑选择最优的惩罚参数alpha。可以使用交叉验证的方法来选择最优的alpha值。例如:
```
from sklearn.model_selection import GridSearchCV
param_grid = [{'alpha': np.logspace(-3, 3, 13)}]
ridge = Ridge()
grid_search = GridSearchCV(ridge, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(X_train, y_train)
alpha = grid_search.best_params_['alpha']
```
这里使用了sklearn中的GridSearchCV函数,通过交叉验证来选择最优的alpha值,从而得到更好的模型效果。
4. 最后,可以输出模型的系数和截距,以及评估指标MSE等信息。例如:
```
coef = ridge.coef_
intercept = ridge.intercept_
print('Coefficients:', coef)
print('Intercept:', intercept)
y_pred = ridge.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print('MSE:', mse)
```
这样可以更好地了解模型的性能和参数。
阅读全文