_mm512_loadu_si512
时间: 2023-08-07 20:04:12 浏览: 66
_mm512_loadu_si512是一个用于从内存中加载未对齐数据的指令。它可以加载一个512位的数据块到一个__m512i寄存器中。这个指令相对于_mm512_load_si512来说稍微慢一些,因为它需要处理未对齐的数据。\[2\]
#### 引用[.reference_title]
- *1* *2* *3* [AVX512笔记](https://blog.csdn.net/longxiawei/article/details/129713310)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control_2,239^v3^insert_chatgpt"}} ] [.reference_item]
[ .reference_list ]
相关问题
使用getRotationMatrix2D创建变换矩阵通过SIMD指令加速warpAffine带参数WARP_INVERSE_MAP效果例程C++
下面是一个使用SIMD指令加速warpAffine函数的例程,该例程使用getRotationMatrix2D函数创建变换矩阵,并使用WARP_INVERSE_MAP参数来反向映射。这个例程使用OpenCV库实现,需要确保您已经安装了OpenCV库并正确配置了编译环境。
```c++
#include <opencv2/opencv.hpp>
#include <opencv2/core/hal/intrin.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat src = imread("test.jpg");
if (src.empty())
{
cout << "Could not open or find the image!\n" << endl;
return -1;
}
double angle = 45.0;
double scale = 1.0;
Point2f center(src.cols / 2.0, src.rows / 2.0);
Mat rot_mat = getRotationMatrix2D(center, angle, scale);
const int N = 4;
int rows = src.rows, cols = src.cols;
int x[N] = { 0, cols, 0, cols };
int y[N] = { 0, 0, rows, rows };
float fx[N], fy[N];
__m128i zero = _mm_set1_epi32(0);
__m128i one = _mm_set1_epi32(1);
__m128i cols_epi32 = _mm_set1_epi32(cols);
__m128i rows_epi32 = _mm_set1_epi32(rows);
__m128 fx_0, fx_1, fx_2, fx_3;
__m128 fy_0, fy_1, fy_2, fy_3;
__m128i x_epi32 = _mm_loadu_si128((__m128i*)x);
__m128i y_epi32 = _mm_loadu_si128((__m128i*)y);
__m128i x_0 = _mm_unpacklo_epi32(x_epi32, zero);
__m128i x_1 = _mm_unpackhi_epi32(x_epi32, zero);
__m128i y_0 = _mm_unpacklo_epi32(y_epi32, zero);
__m128i y_1 = _mm_unpackhi_epi32(y_epi32, zero);
__m128 x_0f = _mm_cvtepi32_ps(x_0);
__m128 x_1f = _mm_cvtepi32_ps(x_1);
__m128 y_0f = _mm_cvtepi32_ps(y_0);
__m128 y_1f = _mm_cvtepi32_ps(y_1);
__m128 center_x = _mm_set1_ps(center.x);
__m128 center_y = _mm_set1_ps(center.y);
__m128 scale_f = _mm_set1_ps(scale);
__m128i add_mask = _mm_set1_epi32(0xffffffff);
__m128i sub_mask = _mm_set1_epi32(0x80000000);
__m128 m00 = _mm_set1_ps(rot_mat.at<double>(0, 0));
__m128 m01 = _mm_set1_ps(rot_mat.at<double>(0, 1));
__m128 m02 = _mm_set1_ps(rot_mat.at<double>(0, 2));
__m128 m10 = _mm_set1_ps(rot_mat.at<double>(1, 0));
__m128 m11 = _mm_set1_ps(rot_mat.at<double>(1, 1));
__m128 m12 = _mm_set1_ps(rot_mat.at<double>(1, 2));
fx_0 = _mm_mul_ps(_mm_sub_ps(x_0f, center_x), m00);
fy_0 = _mm_mul_ps(_mm_sub_ps(x_0f, center_x), m10);
fx_0 = _mm_add_ps(fx_0, _mm_mul_ps(_mm_sub_ps(y_0f, center_y), m01));
fy_0 = _mm_add_ps(fy_0, _mm_mul_ps(_mm_sub_ps(y_0f, center_y), m11));
fx_0 = _mm_add_ps(fx_0, center_x);
fy_0 = _mm_add_ps(fy_0, center_y);
fx_0 = _mm_mul_ps(fx_0, scale_f);
fy_0 = _mm_mul_ps(fy_0, scale_f);
fx_0 = _mm_add_ps(fx_0, center_x);
fy_0 = _mm_add_ps(fy_0, center_y);
fx_0 = _mm_round_ps(fx_0, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
fy_0 = _mm_round_ps(fy_0, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
__m128i fx_0i = _mm_cvtps_epi32(fx_0);
__m128i fy_0i = _mm_cvtps_epi32(fy_0);
fx_1 = _mm_mul_ps(_mm_sub_ps(x_1f, center_x), m00);
fy_1 = _mm_mul_ps(_mm_sub_ps(x_1f, center_x), m10);
fx_1 = _mm_add_ps(fx_1, _mm_mul_ps(_mm_sub_ps(y_0f, center_y), m01));
fy_1 = _mm_add_ps(fy_1, _mm_mul_ps(_mm_sub_ps(y_0f, center_y), m11));
fx_1 = _mm_add_ps(fx_1, center_x);
fy_1 = _mm_add_ps(fy_1, center_y);
fx_1 = _mm_mul_ps(fx_1, scale_f);
fy_1 = _mm_mul_ps(fy_1, scale_f);
fx_1 = _mm_add_ps(fx_1, center_x);
fy_1 = _mm_add_ps(fy_1, center_y);
fx_1 = _mm_round_ps(fx_1, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
fy_1 = _mm_round_ps(fy_1, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
__m128i fx_1i = _mm_cvtps_epi32(fx_1);
__m128i fy_1i = _mm_cvtps_epi32(fy_1);
fx_2 = _mm_mul_ps(_mm_sub_ps(x_0f, center_x), m00);
fy_2 = _mm_mul_ps(_mm_sub_ps(x_0f, center_x), m10);
fx_2 = _mm_add_ps(fx_2, _mm_mul_ps(_mm_sub_ps(y_1f, center_y), m01));
fy_2 = _mm_add_ps(fy_2, _mm_mul_ps(_mm_sub_ps(y_1f, center_y), m11));
fx_2 = _mm_add_ps(fx_2, center_x);
fy_2 = _mm_add_ps(fy_2, center_y);
fx_2 = _mm_mul_ps(fx_2, scale_f);
fy_2 = _mm_mul_ps(fy_2, scale_f);
fx_2 = _mm_add_ps(fx_2, center_x);
fy_2 = _mm_add_ps(fy_2, center_y);
fx_2 = _mm_round_ps(fx_2, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
fy_2 = _mm_round_ps(fy_2, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
__m128i fx_2i = _mm_cvtps_epi32(fx_2);
__m128i fy_2i = _mm_cvtps_epi32(fy_2);
fx_3 = _mm_mul_ps(_mm_sub_ps(x_1f, center_x), m00);
fy_3 = _mm_mul_ps(_mm_sub_ps(x_1f, center_x), m10);
fx_3 = _mm_add_ps(fx_3, _mm_mul_ps(_mm_sub_ps(y_1f, center_y), m01));
fy_3 = _mm_add_ps(fy_3, _mm_mul_ps(_mm_sub_ps(y_1f, center_y), m11));
fx_3 = _mm_add_ps(fx_3, center_x);
fy_3 = _mm_add_ps(fy_3, center_y);
fx_3 = _mm_mul_ps(fx_3, scale_f);
fy_3 = _mm_mul_ps(fy_3, scale_f);
fx_3 = _mm_add_ps(fx_3, center_x);
fy_3 = _mm_add_ps(fy_3, center_y);
fx_3 = _mm_round_ps(fx_3, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
fy_3 = _mm_round_ps(fy_3, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
__m128i fx_3i = _mm_cvtps_epi32(fx_3);
__m128i fy_3i = _mm_cvtps_epi32(fy_3);
int fx[N], fy[N];
_mm_storeu_si128((__m128i*)fx, fx_0i);
_mm_storeu_si128((__m128i*)(fx + 2), fx_1i);
_mm_storeu_si128((__m128i*)fy, fy_0i);
_mm_storeu_si128((__m128i*)(fy + 2), fy_1i);
fx[1] = fx[2];
fy[1] = fy[2];
_mm_storeu_si128((__m128i*)(fx + 4), fx_2i);
_mm_storeu_si128((__m128i*)(fy + 4), fy_2i);
fx[3] = fx[4];
fy[3] = fy[4];
_mm_storeu_si128((__m128i*)(fx + 6), fx_3i);
_mm_storeu_si128((__m128i*)(fy + 6), fy_3i);
Mat dst(rows, cols, src.type());
for (int r = 0; r < rows; r++)
{
for (int c = 0; c < cols; c += 8)
{
__m256i c_epi32 = _mm256_set_epi32(c + 7, c + 6, c + 5, c + 4, c + 3, c + 2, c + 1, c);
__m256i r_epi32 = _mm256_set1_epi32(r);
__m256i index = _mm256_add_epi32(_mm256_mullo_epi32(r_epi32, cols_epi32), c_epi32);
__m256i index_0 = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(index));
__m256i index_1 = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(index, 1));
__m256i index_0_4 = _mm256_mullo_epi32(_mm256_srli_si256(index_0, 2), _mm256_set1_epi32(4));
__m256i index_1_4 = _mm256_mullo_epi32(_mm256_srli_si256(index_1, 2), _mm256_set1_epi32(4));
__m256i index_0_0 = _mm256_add_epi32(index_0, _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0));
__m256i index_1_0 = _mm256_add_epi32(index_1, _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0));
__m256i index_0_1 = _mm256_add_epi32(index_0, _mm256_set_epi32(8, 8, 8, 8, 4, 4, 4, 4));
__m256i index_1_1 = _mm256_add_epi32(index_1, _mm256_set_epi32(8, 8, 8, 8, 4, 4, 4, 4));
__m256i index_0_2 = _mm256_add_epi32(index_0, _mm256_set_epi32(16, 16, 16, 16, 12, 12, 12, 12));
__m256i index_1_2 = _mm256_add_epi32(index_1, _mm256_set_epi32(16, 16, 16, 16, 12, 12, 12, 12));
__m256i index_0_3 = _mm256_add_epi32(index_0, _mm256_set_epi32(24, 24, 24, 24, 20, 20, 20, 20));
__m256i index_1_3 = _mm256_add_epi32(index_1, _mm256_set_epi32(24, 24, 24, 24, 20, 20, 20, 20));
__m256i fx_epi32 = _mm256_set_epi32(fx[index_1_3.m256i_i32[7]], fx[index_1_3.m256i_i32[6]],
fx[index_1_3.m256i_i32[5]], fx[index_1_3.m256i_i32[4]], fx[index_1_1.m256i_i32[3]],
fx[index_1_1.m256i_i32[2]], fx[index_1_1.m256i_i32[1]], fx[index_1_1.m256i_i32[0]]);
__m256i fy_epi32 = _mm256_set_epi32(fy[index_1_3.m256i_i32[7]], fy[index_1_3.m256i_i32[6]],
fy[index_1_3.m256i_i32[5]], fy[index_1_3.m256i_i32[4]], fy[index_1_1.m256i_i32[3]],
fy[index_1_1.m256i_i32[2]], fy[index_1_1.m256i_i32[1]], fy[index_1_1.m256i_i32[0]]);
__m256i diff_x_0 = _mm256_sub_epi32(_mm256_castsi128_si256(_mm256_extracti128_si256(index_0, 0)), index_0_0);
__m256i diff_x_1 = _mm256_sub_epi32(_mm256_castsi128_si256(_mm256_extracti128_si256(index_0, 1)), index_0_1);
__m256i diff_x_2 = _mm256_sub_epi32(_mm256_castsi128_si256(_mm256_extracti128_si256(index_1, 0)), index_1_0);
__m256i diff_x_3 = _mm256_sub_epi32(_mm256_castsi128_si256(_mm256_extracti128_si256(index_1, 1)), index_1_1);
__m256i diff_y_0 = _mm256_mullo_epi32(diff_x_0, _mm256_set1_epi32(cols));
__m256i diff_y_1 = _mm256_mullo_epi32(diff_x_1, _mm256_set1_epi32(cols));
__m256i diff_y_2 = _mm256_mullo_epi32(diff_x_2, _mm256_set1_epi32(cols));
__m256i diff_y_3 = _mm256_mullo_epi32(diff_x_3, _mm256_set1_epi32(cols));
__m256i index_0_0_0 = _mm256_add_epi32(index_0_0, diff_y_0);
__m256i index_0_0_1 = _mm256_add_epi32(index_0_0, diff_y_1);
__m256i index_1_0_0 = _mm256_add_epi32(index_1_0, diff_y_2);
__m256i index_1_0_1 = _mm256_add_epi32(index_1_0, diff_y_3);
__m256i index_0_1_0 = _mm256_add_epi32(index_0_1, diff_y_0);
__m256i index_0_1_1 = _mm256_add_epi32(index_0_1, diff_y_1);
__m256i index_1_1_0 = _mm256_add_epi32(index_1_1, diff_y_2);
__m256i index_1_1_1 = _mm256_add_epi32(index_1_1, diff_y_3);
__m256i index_0_2_0 = _mm256_add_epi32(index_0_2, diff_y_0);
__m256i index_0_2_1 = _mm256_add_epi32(index_0_2, diff_y_1);
__m256i index_1_2_0 = _mm256_add_epi32(index_1_2, diff_y_2);
__m256i index_1_2_1 = _mm256_add_epi32(index_1_2, diff_y_3);
__m256i index_0_3_0 = _mm256_add_epi32(index_0_3, diff_y_0);
__m256i index_0_3_1 = _mm256_add_epi32(index_0_3, diff_y_1);
__m256i
如何使用AVX2指令集加速cv::bitwise_and函数
AVX2指令集是一组针对Intel处理器的指令集,可以在处理器上并行执行多个操作,可以用来加速cv::bitwise_and函数。
以下是使用AVX2指令集加速cv::bitwise_and函数的示范代码:
```c++
#include <immintrin.h> // 包含AVX2指令集的头文件
void bitwise_and_avx2(const cv::Mat& src1, const cv::Mat& src2, cv::Mat& dst)
{
CV_Assert(src1.type() == CV_8UC1 && src2.type() == CV_8UC1); // 确保输入图像类型为8位无符号整数类型
dst.create(src1.size(), src1.type()); // 创建与输入图像相同大小和类型的输出图像
int rows = src1.rows;
int cols = src1.cols;
const uchar* src1_ptr = src1.ptr<uchar>();
const uchar* src2_ptr = src2.ptr<uchar>();
uchar* dst_ptr = dst.ptr<uchar>();
const int step = dst.step; // 图像步长
__m256i v_mask = _mm256_set1_epi8(0xff); // 创建掩码向量
for (int i = 0; i < rows; i++)
{
int j = 0;
// 循环处理8个像素点
for (; j <= cols - 8; j += 8)
{
__m256i v_src1 = _mm256_loadu_si256((__m256i*)(src1_ptr + i * step + j)); // 读取src1中的8个像素点
__m256i v_src2 = _mm256_loadu_si256((__m256i*)(src2_ptr + i * step + j)); // 读取src2中的8个像素点
__m256i v_dst = _mm256_and_si256(v_src1, v_src2); // 对8个像素点进行按位与运算
_mm256_storeu_si256((__m256i*)(dst_ptr + i * step + j), v_dst); // 将8个像素点写入输出图像
}
// 处理剩余的像素点
for (; j < cols; j++)
{
dst_ptr[i * step + j] = src1_ptr[i * step + j] & src2_ptr[i * step + j]; // 对单个像素点进行按位与运算
}
}
}
```
上述代码使用AVX2指令集对8个像素点进行按位与运算,从而提高了计算速度。但是,由于AVX2指令集只能在支持AVX2的CPU上运行,因此需要在运行代码前检查CPU是否支持AVX2指令集。
相关推荐
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)