t1 = torch.empty(2) t2 = torch.zeros(2, 2) t3 = torch.ones(2, 2, 2) t4 = torch.full((2, 2, 2, 2), 3.) 作用
时间: 2024-05-28 17:13:36 浏览: 132
这四个语句都是用 PyTorch 创建张量(tensor)的方法。具体作用如下:
1. `t1 = torch.empty(2)`:创建一个形状为 (2,) 的未初始化张量,即张量的值是随机的。
2. `t2 = torch.zeros(2, 2)`:创建一个形状为 (2, 2) 的张量,所有元素的值都为 0。
3. `t3 = torch.ones(2, 2, 2)`:创建一个形状为 (2, 2, 2) 的张量,所有元素的值都为 1。
4. `t4 = torch.full((2, 2, 2, 2), 3.)`:创建一个形状为 (2, 2, 2, 2) 的张量,所有元素的值都为 3.0。
这些张量可以用于构建神经网络模型的输入、输出以及中间层的数据。
相关问题
人工构造的数据集如下: import torch import matplotlib.pyplot as plt n_data = torch.ones(50, 2) x1 = torch.normal(2 * n_data, 1) y1 = torch.zeros(50) x2 = torch.normal(-2 * n_data, 1) y2 = torch.ones(50) x = torch.cat((x1, x2), 0).type(torch.FloatTensor) y = torch.cat((y1, y2), 0).type(torch.FloatTensor) 请用python从0实现logistic回归(只借助Tensor和Numpy相关的库)
要使用 Python 从零实现逻辑回归,只借助 Tensor 和 NumPy 相关的库,可以按照以下步骤进行操作:
1. 导入所需的库:
```python
import numpy as np
import torch
```
2. 准备数据:
```python
n_data = torch.ones(50, 2)
x1 = torch.normal(2 * n_data, 1)
y1 = torch.zeros(50)
x2 = torch.normal(-2 * n_data, 1)
y2 = torch.ones(50)
x = torch.cat((x1, x2), 0).type(torch.FloatTensor)
y = torch.cat((y1, y2), 0).type(torch.FloatTensor)
```
3. 定义模型参数:
```python
w = torch.randn(2, 1, requires_grad=True) # 权重参数
b = torch.zeros(1, requires_grad=True) # 偏置参数
```
4. 定义模型和损失函数:
```python
def logistic_regression(x):
return torch.sigmoid(torch.mm(x, w) + b)
def cross_entropy(y_pred, y):
return -torch.mean(y * torch.log(y_pred) + (1 - y) * torch.log(1 - y_pred))
```
5. 定义优化器和训练模型:
```python
lr = 0.01 # 学习率
num_epochs = 1000 # 迭代次数
for epoch in range(num_epochs):
# 前向传播
y_pred = logistic_regression(x)
loss = cross_entropy(y_pred, y)
# 反向传播和优化
loss.backward() # 计算梯度
with torch.no_grad():
w -= lr * w.grad # 更新权重参数
b -= lr * b.grad # 更新偏置参数
w.grad.zero_() # 清零梯度
b.grad.zero_() # 清零梯度
if (epoch+1) % 100 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
```
6. 使用模型进行预测:
```python
with torch.no_grad():
y_pred = logistic_regression(x)
predicted = (y_pred >= 0.5).float()
print('Predicted:', predicted)
```
7. 可视化结果:
```python
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=predicted.numpy().flatten(), s=100, lw=0, cmap='RdYlGn')
plt.show()
```
这样就完成了从零实现逻辑回归的过程。请注意,这里使用了 PyTorch 的自动求导功能来计算梯度,并且使用了 NumPy 进行数据可视化。
import torch import os import torch.nn as nn import torch.optim as optim import numpy as np import random class Net(nn.Module): def init(self): super(Net, self).init() self.conv1 = nn.Conv2d(1, 16, kernel_size=3,stride=1) self.pool = nn.MaxPool2d(kernel_size=2,stride=2) self.conv2 = nn.Conv2d(16, 32, kernel_size=3,stride=1) self.fc1 = nn.Linear(32 * 9 * 9, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 2) def forward(self, x): x = self.pool(nn.functional.relu(self.conv1(x))) x = self.pool(nn.functional.relu(self.conv2(x))) x = x.view(-1, 32 * 9 * 9) x = nn.functional.relu(self.fc1(x)) x = nn.functional.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) folder_path = 'random_matrices2' # 创建空的tensor x = torch.empty((40, 1, 42, 42)) # 遍历文件夹内的文件,将每个矩阵转化为tensor并存储 for j in range(40): for j in range(40): file_name = 'matrix_{}.npy'.format(j) file_path = os.path.join(folder_path, file_name) matrix = np.load(file_path) x[j] = torch.from_numpy(matrix).unsqueeze(0) #y = torch.cat((torch.zeros(20), torch.ones(20))) #y = torch.cat((torch.zeros(20, dtype=torch.long), torch.ones(20, dtype=torch.long))) y = torch.cat((torch.zeros(20, dtype=torch.long), torch.ones(20, dtype=torch.long)), dim=0) for epoch in range(10): running_loss = 0.0 for i in range(40): inputs = x[i] labels = y[i] optimizer.zero_grad() outputs = net(inputs) #loss = criterion(outputs, labels) loss = criterion(outputs.unsqueeze(0), labels.unsqueeze(0)) loss.backward() optimizer.step() running_loss += loss.item() print('[%d] loss: %.3f' % (epoch + 1, running_loss / 40)) print('Finished Training')报错RuntimeError: Expected target size [1, 2], got [1]怎么修改?
可以将 labels.unsqueeze(0) 修改为 labels.unsqueeze(0).long(),即将标签张量转化为 long 类型。修改后的代码如下:
```
import torch
import os
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3,stride=1)
self.pool = nn.MaxPool2d(kernel_size=2,stride=2)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3,stride=1)
self.fc1 = nn.Linear(32 * 9 * 9, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 2)
def forward(self, x):
x = self.pool(nn.functional.relu(self.conv1(x)))
x = self.pool(nn.functional.relu(self.conv2(x)))
x = x.view(-1, 32 * 9 * 9)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
folder_path = 'random_matrices2'
# 创建空的tensor
x = torch.empty((40, 1, 42, 42))
# 遍历文件夹内的文件,将每个矩阵转化为tensor并存储
for j in range(40):
file_name = 'matrix_{}.npy'.format(j)
file_path = os.path.join(folder_path, file_name)
matrix = np.load(file_path)
x[j] = torch.from_numpy(matrix).unsqueeze(0)
y = torch.cat((torch.zeros(20, dtype=torch.long), torch.ones(20, dtype=torch.long)), dim=0)
for epoch in range(10):
running_loss = 0.0
for i in range(40):
inputs = x[i]
labels = y[i]
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs.unsqueeze(0), labels.unsqueeze(0).long())
loss.backward()
optimizer.step()
running_loss += loss.item()
print('[%d] loss: %.3f' % (epoch + 1, running_loss / 40))
print('Finished Training')
```
阅读全文