data_norm = np.concatenate((data_norm, np.array([self.displaySamples]).reshape(-1, 1) / self.engine.P['MaxFeedbackVal']), -1)
时间: 2024-04-11 13:32:31 浏览: 71
这行代码是将一个新的列添加到`data_norm`数组中。具体来说,它使用`np.concatenate`函数将`data_norm`和一个新的列(通过将`self.displaySamples`转换为一维数组后进行重塑)进行连接。在连接时,使用`/ self.engine.P['MaxFeedbackVal']`将`self.displaySamples`的值除以`self.engine.P['MaxFeedbackVal']`,以进行归一化处理。
最后的参数`-1`表示在连接操作中将新的列添加到最后一列。这样,`data_norm`将成为一个包含原始数据以及新添加的归一化列的二维数组。
相关问题
# 提取特征并保存 def get_feat(image_list, name_list, label_list, savePath): i = 0 for image in image_list: try: # 如果是灰度图片 把3改为-1 image = np.reshape(image, (image_height, image_width, 3)) except: print('发送了异常,图片大小size不满足要求:',name_list[i]) continue gray = rgb2gray(image) / 255.0 # 这句话根据你的尺寸改改 fd = hog(gray, orientations=12,block_norm='L1', pixels_per_cell=[8, 8], cells_per_block=[4, 4], visualize=False, transform_sqrt=True) fd = np.concatenate((fd, [label_list[i]])) fd_name = name_list[i] + '.feat' fd_path = os.path.join(savePath, fd_name) joblib.dump(fd, fd_path) i += 1 print("Test features are extracted and saved.")加入颜色特征代码
以下是添加颜色特征的代码示例:
```python
import cv2
import glob
import os
import numpy as np
from skimage.feature import hog
from skimage.color import rgb2gray
from sklearn.externals import joblib
def get_feat(image_list, name_list, label_list, savePath):
i = 0
for image in image_list:
try:
# 如果是灰度图片 把3改为-1
image = np.reshape(image, (image_height, image_width, 3))
except:
print('发送了异常,图片大小size不满足要求:',name_list[i])
continue
gray = rgb2gray(image) / 255.0
# 添加颜色特征
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
color_hist = cv2.calcHist([hsv], [0, 1, 2], None, [8, 8, 8], [0, 180, 0, 256, 0, 256])
color_hist = cv2.normalize(color_hist, color_hist).flatten()
# 使用 HOG 特征
hog_feat = hog(gray, orientations=12, block_norm='L1', pixels_per_cell=[8, 8], cells_per_block=[4, 4], visualize=False, transform_sqrt=True)
# 组合特征向量
feat = np.concatenate((hog_feat, color_hist, [label_list[i]]))
feat_name = name_list[i] + '.feat'
feat_path = os.path.join(savePath, feat_name)
joblib.dump(feat, feat_path)
i += 1
print("Features are extracted and saved.")
```
在这个示例中,首先使用 OpenCV 的 cvtColor 函数将图像从 RGB 转换为 HSV,然后使用 calcHist 函数计算 HSV 颜色直方图。接下来,使用 normalize 函数对颜色直方图进行归一化,并将其展平为一维数组。最后,将 HOG 特征和颜色特征连接起来,形成最终的特征向量。
Transformer and Self-Attention Pytorch代码
以下是使用PyTorch实现Transformer和Self-Attention的示例代码:
## Self-Attention
```python
import torch
import torch.nn as nn
class SelfAttention(nn.Module):
def __init__(self, embed_size, heads):
super(SelfAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert (self.head_dim * heads == embed_size), "Embed size needs to be divisible by heads"
self.values = nn.Linear(self.head_dim, self.head_dim, bias=False)
self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False)
self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False)
self.fc_out = nn.Linear(heads * self.head_dim, embed_size)
def forward(self, values, keys, queries, mask):
# Get number of training examples
N = queries.shape[0]
value_len, key_len, query_len = values.shape[1], keys.shape[1], queries.shape[1]
# Split embedding into self.heads pieces
values = values.reshape(N, value_len, self.heads, self.head_dim)
keys = keys.reshape(N, key_len, self.heads, self.head_dim)
queries = queries.reshape(N, query_len, self.heads, self.head_dim)
# Transpose to get dimensions batch_size * self.heads * seq_len * self.head_dim
values = values.permute(0, 2, 1, 3)
keys = keys.permute(0, 2, 1, 3)
queries = queries.permute(0, 2, 1, 3)
# Calculate energy
energy = torch.matmul(queries, keys.permute(0, 1, 3, 2))
if mask is not None:
energy = energy.masked_fill(mask == 0, float("-1e20"))
# Apply softmax to get attention scores
attention = torch.softmax(energy / (self.embed_size ** (1/2)), dim=-1)
# Multiply attention scores with values
out = torch.matmul(attention, values)
# Concatenate and linearly transform output
out = out.permute(0, 2, 1, 3).reshape(N, query_len, self.heads * self.head_dim)
out = self.fc_out(out)
return out
```
## Transformer
```python
import torch
import torch.nn as nn
from torch.nn.modules.activation import MultiheadAttention
class TransformerBlock(nn.Module):
def __init__(self, embed_size, heads, dropout, forward_expansion):
super(TransformerBlock, self).__init__()
self.attention = MultiheadAttention(embed_dim=embed_size, num_heads=heads)
self.norm1 = nn.LayerNorm(embed_size)
self.norm2 = nn.LayerNorm(embed_size)
self.feed_forward = nn.Sequential(
nn.Linear(embed_size, forward_expansion * embed_size),
nn.ReLU(),
nn.Linear(forward_expansion * embed_size, embed_size)
)
self.dropout = nn.Dropout(dropout)
def forward(self, value, key, query, mask):
attention_output, _ = self.attention(query, key, value, attn_mask=mask)
x = self.dropout(self.norm1(attention_output + query))
forward_output = self.feed_forward(x)
out = self.dropout(self.norm2(forward_output + x))
return out
class Encoder(nn.Module):
def __init__(self, src_vocab_size, embed_size, num_layers, heads, device, forward_expansion, dropout, max_length):
super(Encoder, self).__init__()
self.embed_size = embed_size
self.device = device
self.word_embedding = nn.Embedding(src_vocab_size, embed_size)
self.position_embedding = nn.Embedding(max_length, embed_size)
self.layers = nn.ModuleList([
TransformerBlock(embed_size, heads, dropout, forward_expansion) for _ in range(num_layers)
])
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask):
N, seq_length = x.shape
positions = torch.arange(0, seq_length).expand(N, seq_length).to(self.device)
out = self.dropout(self.word_embedding(x) + self.position_embedding(positions))
for layer in self.layers:
out = layer(out, out, out, mask)
return out
class DecoderBlock(nn.Module):
def __init__(self, embed_size, heads, forward_expansion, dropout, device):
super(DecoderBlock, self).__init__()
self.norm = nn.LayerNorm(embed_size)
self.attention = MultiheadAttention(embed_size, heads)
self.transformer_block = TransformerBlock(embed_size, heads, dropout, forward_expansion)
self.dropout = nn.Dropout(dropout)
def forward(self, x, value, key, src_mask, trg_mask):
attention_output, _ = self.attention(x, x, x, attn_mask=trg_mask)
query = self.dropout(self.norm(attention_output + x))
out = self.transformer_block(value, key, query, src_mask)
return out
class Decoder(nn.Module):
def __init__(self, trg_vocab_size, embed_size, num_layers, heads, forward_expansion, dropout, device, max_length):
super(Decoder, self).__init__()
self.embed_size = embed_size
self.device = device
self.word_embedding = nn.Embedding(trg_vocab_size, embed_size)
self.position_embedding = nn.Embedding(max_length, embed_size)
self.layers = nn.ModuleList([
DecoderBlock(embed_size, heads, forward_expansion, dropout, device) for _ in range(num_layers)
])
self.fc_out = nn.Linear(embed_size, trg_vocab_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, enc_out, src_mask, trg_mask):
N, seq_length = x.shape
positions = torch.arange(0, seq_length).expand(N, seq_length).to(self.device)
x = self.dropout(self.word_embedding(x) + self.position_embedding(positions))
for layer in self.layers:
x = layer(x, enc_out, enc_out, src_mask, trg_mask)
out = self.fc_out(x)
return out
```
这些代码可以用于实现Transformer和Self-Attention模型。但这只是示例,你需要根据你的数据和任务来调整这些代码中的各种超参数和结构。
阅读全文