import numpy as np # 定义参数 n_lags = 31 # 滞后阶数 n_vars = 6 # 变量数量 alpha = 0.05 # 置信水平 # 准备数据 data = np.array([820.95715,819.17877,801.60077,30,26164.9,11351.8], [265.5425,259.05476,257.48619,11.4,12525,4296.5], [696.9681,685.54114,663.32014,47.5,23790.484,8344.8], [4556.1091,440.58995,433.21995,24.6,12931.388,5575.4], [360.08693,353.75386,351.59186,26.9,11944.322,4523], [938.55919,922.25468,894.26468,35.3,27177.893,8287.4], [490.47837,477.35237,385.17474,24.5,14172.1,6650.4], [553.15463,452.35042,425.92277,32.9,16490.17,7795], [740.35759,721.68259,721.68259,15.5,26117.755,7511.7], [1581.99576,1579.50357,1571.23257,65.4,59386.7,15347.2], [1360.91636,1360.20825,1358.11425,66.4,57160.533,8080], [564.06146,560.91611,559.08711,35.2,22361.86,6165.4], [732.17283,727.25063,725.93863,29.7,22177.389,4393.2], [424.12777,424.10579,411.19979,21.6,14691.359,4695.6], [1439.38133,1437.85585,1436.67585,77.3,50123.672,15479], [961.92496,935.21589,931.28189,45.7,28073.9,11273.3], [881.92808,868.65804,832.44504,46.1,27409.15,11224.4], [713.32299,710.75882,707.42682,35.8,24887.111,5164.2], [2657.28891,2599.20299,2515.67859,92,94207.179,19066.4], [420.95033,418.22931,416.80631,25.6,13309.9,7020], [193.92636,193.84936,193.83836,10.9,6133,6139.5], [499.81565,493.73678,485.2468,20.9,13555.897,3412], [951.93942,939.58126,930.049,45.6,27245.608,7752.5], [309.88498,297.05055,295.69055,22.6,11929.038,3903.2], [411.87141,406.63838,389.29638,27.8,12197.085,3834.1], [45.53226,39.24379,55.34631667,7.5,1872.333333,564.3], [532.67282,524.78031,520.89851,24,18041.642,3902], [269.00374,266.96222,211.14422,20.3,7163.069,3515.4], [91.95276,88.77094,85.74583,7.7,1962.8,645.8], [120.60234,116.39872,113.85872,9.8,4227.003,1706.2], [362.98862,350.36495,318.70232,23.7,11615.383,5752.1]) # 计算VAR模型的系数 X = np.zeros((data.shape[0] - n_lags, n_lags * n_vars)) y = np.zeros((data.shape[0] - n_lags, n_vars)) for i in range(n_lags, data.shape[0]): X[i-n_lags, :] = data[i-n_lags:i, :].reshape(1, -1) y[i-n_lags, :] = data[i, :] coefficients = np.linalg.inv(X.T @ X) @ X.T @ y # 计算残差 residuals = y - X @ coefficients # 计算PVAR模型的紧贴矩阵 T = residuals[n_lags:, :] @ residuals[:-n_lags, :].T / (data.shape[0] - n_lags) # 计算PVAR模型的系数 u, s, vh = np.linalg.svd(T) S_inv = np.diag(np.sqrt(s[:n_vars])) @ np.linalg.inv(vh[:n_vars, :]) A = S_inv @ u[:, :n_vars].T @ residuals[n_lags:, :].T # 计算置信区间 t_value = np.abs(np.tinv(alpha/2, data.shape[0]-n_lags-n_vars)) se = np.sqrt((1/(data.shape[0]-n_lags-n_vars)) * (np.sum(residuals[n_lags:, :]**2) / (data.shape[0]-n_lags-n_vars-1))) conf_int = t_value * se print("PVAR模型的系数:\n", A) print("置信区间:[{:.4f}, {:.4f}]".format(A.mean() - conf_int, A.mean() + conf_int))这段代码有什么错误
时间: 2023-07-02 07:03:35 浏览: 120
这段代码有几个错误:
1.在定义数据的时候,应该用双方括号而不是单方括号,这样才能得到一个二维数组。
2.在计算残差的时候,应该用y - X @ coefficients,而不是y - coefficients @ X。
3.在计算置信区间的时候,应该使用样本标准差而不是总体标准差,所以分母应该是(data.shape[0]-n_lags-n_vars-1)而不是(data.shape[0]-n_lags-n_vars)。
下面是修改后的代码:
```python
import numpy as np
# 定义参数
n_lags = 31 # 滞后阶数
n_vars = 6 # 变量数量
alpha = 0.05 # 置信水平
# 准备数据
data = np.array([[820.95715,819.17877,801.60077,30,26164.9,11351.8],
[265.5425,259.05476,257.48619,11.4,12525,4296.5],
[696.9681,685.54114,663.32014,47.5,23790.484,8344.8],
[4556.1091,440.58995,433.21995,24.6,12931.388,5575.4],
[360.08693,353.75386,351.59186,26.9,11944.322,4523],
[938.55919,922.25468,894.26468,35.3,27177.893,8287.4],
[490.47837,477.35237,385.17474,24.5,14172.1,6650.4],
[553.15463,452.35042,425.92277,32.9,16490.17,7795],
[740.35759,721.68259,721.68259,15.5,26117.755,7511.7],
[1581.99576,1579.50357,1571.23257,65.4,59386.7,15347.2],
[1360.91636,1360.20825,1358.11425,66.4,57160.533,8080],
[564.06146,560.91611,559.08711,35.2,22361.86,6165.4],
[732.17283,727.25063,725.93863,29.7,22177.389,4393.2],
[424.12777,424.10579,411.19979,21.6,14691.359,4695.6],
[1439.38133,1437.85585,1436.67585,77.3,50123.672,15479],
[961.92496,935.21589,931.28189,45.7,28073.9,11273.3],
[881.92808,868.65804,832.44504,46.1,27409.15,11224.4],
[713.32299,710.75882,707.42682,35.8,24887.111,5164.2],
[2657.28891,2599.20299,2515.67859,92,94207.179,19066.4],
[420.95033,418.22931,416.80631,25.6,13309.9,7020],
[193.92636,193.84936,193.83836,10.9,6133,6139.5],
[499.81565,493.73678,485.2468,20.9,13555.897,3412],
[951.93942,939.58126,930.049,45.6,27245.608,7752.5],
[309.88498,297.05055,295.69055,22.6,11929.038,3903.2],
[411.87141,406.63838,389.29638,27.8,12197.085,3834.1],
[45.53226,39.24379,55.34631667,7.5,1872.333333,564.3],
[532.67282,524.78031,520.89851,24,18041.642,3902],
[269.00374,266.96222,211.14422,20.3,7163.069,3515.4],
[91.95276,88.77094,85.74583,7.7,1962.8,645.8],
[120.60234,116.39872,113.85872,9.8,4227.003,1706.2],
[362.98862,350.36495,318.70232,23.7,11615.383,5752.1]])
# 计算VAR模型的系数
X = np.zeros((data.shape[0] - n_lags, n_lags * n_vars))
y = np.zeros((data.shape[0] - n_lags, n_vars))
for i in range(n_lags, data.shape[0]):
X[i-n_lags, :] = data[i-n_lags:i, :].reshape(1, -1)
y[i-n_lags, :] = data[i, :]
coefficients = np.linalg.inv(X.T @ X) @ X.T @ y
# 计算残差
residuals = y - X @ coefficients
# 计算PVAR模型的紧贴矩阵
T = residuals[n_lags:, :] @ residuals[:-n_lags, :].T / (data.shape[0] - n_lags)
# 计算PVAR模型的系数
u, s, vh = np.linalg.svd(T)
S_inv = np.diag(np.sqrt(s[:n_vars])) @ np.linalg.inv(vh[:n_vars, :])
A = S_inv @ u[:, :n_vars].T @ residuals[n_lags:, :].T
# 计算置信区间
t_value = np.abs(np.tinv(alpha/2, data.shape[0]-n_lags-n_vars-1))
se = np.sqrt((1/(data.shape[0]-n_lags-n_vars-1)) * (np.sum(residuals[n_lags:, :]**2) / (data.shape[0]-n_lags-n_vars-1)))
conf_int = t_value * se
print("PVAR模型的系数:\n", A)
print("置信区间:[{:.4f}, {:.4f}]".format(A.mean() - conf_int, A.mean() + conf_int))
```
阅读全文