df = pd.read_excel(r'分区.xlsx',dtype=object,usecols=['区域','语言','国家'])ValueError: '语言' is not in list
时间: 2023-08-06 12:23:46 浏览: 246
这个错误提示意味着你读取的 Excel 文件中没有名为 `语言` 的列。你可以检查一下文件中的列名是否正确。
另外,`usecols` 参数指定了你要读取的列,如果文件中没有该列,也会报出这个错误。你可以检查一下该参数是否正确设置了。如果不需要限制列,可以省略该参数,例如:
```python
df = pd.read_excel(r'分区.xlsx')
```
这样会读取该 Excel 文件中的所有列。如果你需要限制列,可以像这样指定:
```python
df = pd.read_excel(r'分区.xlsx', usecols=['区域', '国家'])
```
这样就只会读取 `区域` 和 `国家` 两列。如果文件中没有这些列,也会报错,需要检查一下文件中的列名是否正确。
相关问题
TextData = pd.read_excel('train.xlsx', header=None, usecols=[1]) TextData.columns = ['label'] TextData['texts'] = pd.read_excel('train.xlsx', header=None, usecols=[0]) # 读入测试数据 TextDataTest = pd.read_excel('test.xlsx', header=None, usecols=[1]) TextDataTest.columns = ['label'] TextDataTest['texts'] = pd.read_excel('test.xlsx', header=None, usecols=[0]) # 设置计数器,用jieba.cut对句子进行分词,并用Vocab构建词表 counter = Counter() for (label, texts) in TextData.values: counter.update(jieba.cut(texts)) min_freq = 1 # 设置最小频次 vocab = Vocab(counter, min_freq=min_freq) print(vocab['我'])出现Vocab.init() got an unexpected keyword argument 'min_freq'错误怎么改
这个错误可能是因为你使用的`torchtext`版本不支持`min_freq`参数。在`torchtext 0.9.0`版本中,`Vocab`类的构造函数不再支持`min_freq`参数,你可以使用`min_freqs`参数来设置最小频次。
下面是一个示例代码,展示如何使用`min_freqs`参数设置最小频次:
```
from collections import Counter
import jieba
from torchtext.vocab import Vocab
TextData = pd.read_excel('train.xlsx', header=None, usecols=[1])
TextData.columns = ['label']
TextData['texts'] = pd.read_excel('train.xlsx', header=None, usecols=[0])
# 设置计数器,用jieba.cut对句子进行分词,并用Vocab构建词表
min_freq = 1 # 设置最小频次
counter = Counter(word for (label, texts) in TextData.values for word in jieba.cut(texts))
vocab = Vocab(counter, min_freqs={word: freq for word, freq in counter.items() if freq >= min_freq})
print(vocab['我'])
```
请尝试使用这个代码片段来解决你的问题。
import numpy as np import pylab as pl import pandas as pd from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split X2=[] X3=[] X4=[] X5=[] X6=[] X7=[] df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(3,)) X2=df.values.tolist() x2=[] for i in X2: if X2.index(i)<=2927: #两个单元楼的分隔数 x2.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(4,)) X3=df.values.tolist() x3=[] for i in X3: if X3.index(i)<=2927: x3.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(5,)) X4=df.values.tolist() x4=[] for i in X4: if X4.index(i)<=2927: x4.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(6,)) X5=df.values.tolist() x5=[] for i in X5: if X5.index(i)<=2927: x5.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(7,)) X6=df.values.tolist() x6=[] for i in X6: if X6.index(i)<=2927: x6.append(i) df=pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx',header=0,usecols=(8,)) X7=df.values.tolist() x7=[] for i in X7: if X7.index(i)<=2927: x7.append(i) np.random.seed(42) q=np.array(X2[:2922]) w=np.array(x3[:2922]) e=np.array(x4[:2922]) r=np.array(x5[:2922]) t=np.array(x6[:2922]) p=np.array(x7[:2922]) eps=np.random.normal(0,0.05,152) X=np.c_[q,w,e,r,t,p] beta=[0.1,0.15,0.2,0.5,0.33,0.45] y=np.dot(X,beta)X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) alpha = 0.1 # 设置岭回归的惩罚参数 ridge = Ridge(alpha=alpha) ridge.fit(X_train, y_train) y_pred = ridge.predict(X_test) mse = mean_squared_error(y_test, y_pred) print('MSE:', mse) coef = ridge.coef_ # 计算岭回归的系数 intercept = ridge.intercept_ # 计算岭回归的截距 print('Coefficients:', coef) print('Intercept:', intercept)修改这个代码,要求增加时间序列x1参与建模
import numpy as np
import pandas as pd
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
df = pd.read_excel('C:/Users/86147/OneDrive/文档/777.xlsx', header=0, usecols=(1, 3, 4, 5, 6, 7, 8))
X1 = df.iloc[:, 0].values.reshape(-1, 1)
X2 = df.iloc[:, 1].values.reshape(-1, 1)
X3 = df.iloc[:, 2].values.reshape(-1, 1)
X4 = df.iloc[:, 3].values.reshape(-1, 1)
X5 = df.iloc[:, 4].values.reshape(-1, 1)
X6 = df.iloc[:, 5].values.reshape(-1, 1)
X7 = df.iloc[:, 6].values.reshape(-1, 1)
np.random.seed(42)
q = np.array(X1[:2922])
w = np.array(X2[:2922])
e = np.array(X3[:2922])
r = np.array(X4[:2922])
t = np.array(X5[:2922])
p = np.array(X6[:2922])
o = np.array(X7[:2922])
eps = np.random.normal(0, 0.05, 152)
X = np.concatenate((q, w, e, r, t, p, o), axis=1)
beta = [0.1, 0.15, 0.2, 0.5, 0.33, 0.45, 0.25]
y = np.dot(X, beta)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
alpha = 0.1
ridge = Ridge(alpha=alpha)
ridge.fit(X_train, y_train)
y_pred = ridge.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print('MSE:', mse)
coef = ridge.coef_
intercept = ridge.intercept_
print('Coefficients:', coef)
print('Intercept:', intercept)
阅读全文