import numpy as np n = int(input('输入每个维度的元素个数:')) a = np.arange(1,n**2+1).reshape(n,n) res = np.trace(a) print('矩阵的迹为',res)怎么把输出中的空格去掉
时间: 2024-03-23 15:43:23 浏览: 72
可以在print函数中使用参数sep来设置分隔符,将其设置为空字符串即可去掉输出中的空格。修改后的代码如下:
```python
import numpy as np
n = int(input('输入每个维度的元素个数:'))
a = np.arange(1,n**2+1).reshape(n,n)
res = np.trace(a)
print('矩阵的迹为', res, sep='')
```
这样输出的结果中就不会再有空格了。
相关问题
将冒号后面的代码改写成一个nn.module类:import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense, LSTM data1 = pd.read_csv("终极1.csv", usecols=[17], encoding='gb18030') df = data1.fillna(method='ffill') data = df.values.reshape(-1, 1) scaler = MinMaxScaler(feature_range=(0, 1)) data = scaler.fit_transform(data) train_size = int(len(data) * 0.8) test_size = len(data) - train_size train, test = data[0:train_size, :], data[train_size:len(data), :] def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY) look_back = 30 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1])) model = Sequential() model.add(LSTM(50, input_shape=(1, look_back), return_sequences=True)) model.add(LSTM(50)) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=6, batch_size=1, verbose=2) trainPredict = model.predict(trainX) testPredict = model.predict(testX) trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY])
以下是将代码改写成一个nn.module类:
```python
import torch.nn as nn
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, LSTM
class LSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers, look_back):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.look_back = look_back
self.lstm1 = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.lstm2 = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
x = x.view(-1, self.look_back, 1)
out, _ = self.lstm1(x, (h0, c0))
out, _ = self.lstm2(out, (h0, c0))
out = self.fc(out[:, -1, :])
return out
data1 = pd.read_csv("终极1.csv", usecols=[17], encoding='gb18030')
df = data1.fillna(method='ffill')
data = df.values.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
data = scaler.fit_transform(data)
train_size = int(len(data) * 0.8)
test_size = len(data) - train_size
train, test = data[0:train_size, :], data[train_size:len(data), :]
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 30
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = LSTMModel(input_size=1, hidden_size=50, output_size=1, num_layers=2, look_back=look_back).to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
num_epochs = 6
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(zip(trainX, trainY)):
inputs = torch.from_numpy(inputs).float().to(device)
labels = torch.from_numpy(labels).float().to(device)
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{len(trainX)}], Loss: {loss.item():.4f}')
model.eval()
trainPredict = scaler.inverse_transform(model(torch.from_numpy(trainX).float().to(device)).detach().cpu().numpy())
testPredict = scaler.inverse_transform(model(torch.from_numpy(testX).float().to(device)).detach().cpu().numpy())
trainY = scaler.inverse_transform([trainY])
testY = scaler.inverse_transform([testY])
```
这个类接受5个参数,分别是:
- input_size: LSTM的输入维度,对于这个例子来说,input_size=1
- hidden_size: LSTM的隐藏层维度
- output_size: LSTM的输出维度,对于这个例子来说,output_size=1
- num_layers: LSTM的层数
- look_back: 每个样本的序列长度,对于这个例子来说,look_back=30
import numpy as npimport pandas as pdimport matplotlib.pyplot as pltfrom sklearn.preprocessing import MinMaxScalerfrom keras.models import Sequentialfrom keras.layers import Dense, LSTM# 读取数据dataset = pd.read_csv('wind_speed.csv', header=0, index_col=0)dataset.index = pd.to_datetime(dataset.index)dataset = dataset.resample('H').mean()# 数据预处理scaler = MinMaxScaler(feature_range=(0, 1))dataset_scaled = scaler.fit_transform(dataset)# 创建训练集和测试集train_size = int(len(dataset_scaled) * 0.8)test_size = len(dataset_scaled) - train_sizetrain, test = dataset_scaled[0:train_size, :], dataset_scaled[train_size:len(dataset_scaled), :]# 创建数据集def create_dataset(dataset, look_back): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY)look_back = 24trainX, trainY = create_dataset(train, look_back)testX, testY = create_dataset(test, look_back)# 调整数据维度trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))# 创建LSTM模型model = Sequential()model.add(LSTM(50, input_shape=(look_back, 1)))model.add(Dense(1))model.compile(loss='mean_squared_error', optimizer='adam')model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)# 预测数据trainPredict = model.predict(trainX)testPredict = model.predict(testX)# 反转数据缩放trainPredict = scaler.inverse_transform(trainPredict)trainY = scaler.inverse_transform([trainY])testPredict = scaler.inverse_transform(testPredict)testY = scaler.inverse_transform([testY])# 绘制预测结果plt.plot(trainY[0], label='Train Data')plt.plot(trainPredict[:,0], label='Predicted Train Data')plt.plot(testY[0], label='Test Data')plt.plot(testPredict[:,0], label='Predicted Test Data')plt.legend(loc='best')plt.show()
这段代码是使用 LSTM 模型对风速数据进行预测的代码。具体来说,代码中使用了 Pandas 库读取风速数据,并进行了预处理和归一化。然后将数据集划分为训练集和测试集,并创建了一个函数 create_dataset 来生成数据集。接着,使用 Keras 库建立 LSTM 模型,并使用训练集对其进行训练。最后,使用训练好的模型对训练集和测试集进行预测,并将预测结果进行可视化。
阅读全文