用MPI编写两个n阶方阵a和b的Cannon矩阵乘法程序,结果存放在方阵c中。a、b、c都采用块棋盘划分存储,每个子块的大小都为⌈n/√p⌉×⌈n/√p⌉,不足的部分用0填充。a和b中的每个数都初始化为一个0到1之间的随机double型值:rand()/double(RAND_MAX)。为了验证结果正确性,也实现简单并行分块乘法(可以通过函数MPI_Comm_split和MPI_Bcast实现行广播或列广播),并比较是否与Cannon矩阵乘法的结果相等。执行时间不包括a, b, c的初始化时间和检测结果正确性的时间,通过函数MPI_Wtime计算。在下面写出完整的程序代码,并添加必要的注释。
时间: 2024-02-06 18:09:27 浏览: 262
基于MPI得并行矩阵乘法 Cannon算法实现
4星 · 用户满意度95%
```C++
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define MAX_NUM 100 // 随机数的最大值
// 定义矩阵的块大小
#define BLOCK_SIZE(n, p) (int)ceil((double)n / sqrt(p))
// 定义矩阵元素在矩阵中的位置
#define POS(i, j, n) ((i)*(n) + (j))
// 随机生成一个n阶方阵,每个元素的值在0到1之间
void rand_matrix(double* matrix, int n)
{
srand((unsigned)time(NULL));
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
matrix[POS(i, j, n)] = (double)rand() / RAND_MAX;
}
}
}
// 打印矩阵
void print_matrix(double* matrix, int n)
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
printf("%.4f ", matrix[POS(i, j, n)]);
}
printf("\n");
}
printf("\n");
}
// 矩阵乘法,c = a * b
void matrix_multiply(double* a, double* b, double* c, int n, int block_size)
{
// 初始化结果矩阵
for (int i = 0; i < n * n; i++)
{
c[i] = 0;
}
// 计算每个进程的坐标和进程号
int rank, size;
int coords[2];
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Cart_coords(MPI_COMM_WORLD, rank, 2, coords);
// 创建2D拓扑结构
MPI_Comm cart_comm;
int periods[2] = { 1, 1 };
MPI_Cart_create(MPI_COMM_WORLD, 2, (int[]){ sqrt(size), sqrt(size) }, periods, 1, &cart_comm);
// 创建块数据类型
MPI_Datatype block_type;
MPI_Type_vector(block_size, block_size, n, MPI_DOUBLE, &block_type);
MPI_Type_commit(&block_type);
// 计算块的起始位置
int block_start_a = POS(coords[0] * block_size, coords[1] * block_size, n);
int block_start_b = POS(coords[0] * block_size, coords[1] * block_size, n);
int block_start_c = POS(coords[0] * block_size, coords[1] * block_size, n);
// 将a和b分成sqrt(p)块,每块大小为block_size
double* block_a = (double*)malloc(sizeof(double) * block_size * block_size);
double* block_b = (double*)malloc(sizeof(double) * block_size * block_size);
double* block_c = (double*)malloc(sizeof(double) * block_size * block_size);
for (int i = 0; i < sqrt(size); i++)
{
int row = (coords[0] + i) % sqrt(size);
int col = (coords[1] + i) % sqrt(size);
int block_start_a_i = POS(row * block_size, coords[1] * block_size, n);
int block_start_b_i = POS(coords[0] * block_size, col * block_size, n);
MPI_Datatype sub_block_type;
MPI_Type_create_subarray(2, (int[]){ n, n }, (int[]){ block_size, block_size }, (int[]){ block_start_a_i, block_start_b_i }, MPI_ORDER_C, MPI_DOUBLE, &sub_block_type);
MPI_Type_commit(&sub_block_type);
// 发送和接收块数据
MPI_Sendrecv(&a[block_start_a_i], 1, sub_block_type, row * sqrt(size) + col, 0, block_a, block_size * block_size, MPI_DOUBLE, row * sqrt(size) + col, 0, cart_comm, MPI_STATUS_IGNORE);
MPI_Sendrecv(&b[block_start_b_i], 1, sub_block_type, row * sqrt(size) + col, 0, block_b, block_size * block_size, MPI_DOUBLE, row * sqrt(size) + col, 0, cart_comm, MPI_STATUS_IGNORE);
// 计算块乘积
for (int j = 0; j < block_size; j++)
{
for (int k = 0; k < block_size; k++)
{
for (int l = 0; l < block_size; l++)
{
block_c[POS(j, k, block_size)] += block_a[POS(j, l, block_size)] * block_b[POS(l, k, block_size)];
}
}
}
}
// 将每个块的结果复制到结果矩阵中
for (int i = 0; i < sqrt(size); i++)
{
int row = (coords[0] - i + sqrt(size)) % sqrt(size);
int col = (coords[1] - i + sqrt(size)) % sqrt(size);
int block_start_c_i = POS(row * block_size, col * block_size, n);
MPI_Datatype sub_block_type;
MPI_Type_create_subarray(2, (int[]){ n, n }, (int[]){ block_size, block_size }, (int[]){ block_start_c_i, block_start_c_i }, MPI_ORDER_C, MPI_DOUBLE, &sub_block_type);
MPI_Type_commit(&sub_block_type);
MPI_Sendrecv(block_c, block_size * block_size, MPI_DOUBLE, row * sqrt(size) + col, 0, &c[block_start_c_i], 1, sub_block_type, row * sqrt(size) + col, 0, cart_comm, MPI_STATUS_IGNORE);
}
// 释放内存
MPI_Type_free(&block_type);
free(block_a);
free(block_b);
free(block_c);
}
// 简单并行分块乘法,c = a * b,每个进程只负责一行
void simple_matrix_multiply(double* a, double* b, double* c, int n)
{
// 初始化结果矩阵
for (int i = 0; i < n * n; i++)
{
c[i] = 0;
}
// 计算每个进程的坐标和进程号
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// 计算每个进程负责的行号
int row = rank;
// 计算每个进程负责的行的起始位置
int block_start_a = POS(row, 0, n);
int block_start_b = POS(0, 0, n);
int block_start_c = POS(row, 0, n);
// 计算块的大小
int block_size = BLOCK_SIZE(n, size);
// 将a和b分成block_size块,每块大小为1
double* block_a = (double*)malloc(sizeof(double) * block_size);
double* block_b = (double*)malloc(sizeof(double) * block_size);
double* block_c = (double*)malloc(sizeof(double) * block_size);
for (int i = 0; i < n; i++)
{
// 发送和接收块数据
MPI_Bcast(&a[block_start_a + i], 1, MPI_DOUBLE, i % size, MPI_COMM_WORLD);
MPI_Bcast(&b[POS(i, 0, n)], n, MPI_DOUBLE, row, MPI_COMM_WORLD);
// 计算块乘积
for (int j = 0; j < block_size; j++)
{
block_a[j] = a[block_start_a + j];
block_b[j] = b[POS(j, 0, n)];
block_c[j] = 0;
}
for (int j = 0; j < block_size; j++)
{
for (int k = 0; k < block_size; k++)
{
block_c[j] += block_a[k] * block_b[k * block_size + j];
}
}
// 将块的结果复制到结果矩阵中
for (int j = 0; j < block_size; j++)
{
c[block_start_c + i * block_size + j] = block_c[j];
}
}
// 释放内存
free(block_a);
free(block_b);
free(block_c);
}
int main(int argc, char** argv)
{
MPI_Init(&argc, &argv);
int n = 4; // 方阵的阶数
double* a = (double*)malloc(sizeof(double) * n * n);
double* b = (double*)malloc(sizeof(double) * n * n);
double* c1 = (double*)malloc(sizeof(double) * n * n); // Cannon矩阵乘法的结果
double* c2 = (double*)malloc(sizeof(double) * n * n); // 简单并行分块乘法的结果
// 随机生成矩阵a和b
rand_matrix(a, n);
rand_matrix(b, n);
// 打印矩阵a和b
printf("Matrix A:\n");
print_matrix(a, n);
printf("Matrix B:\n");
print_matrix(b, n);
// 计算Cannon矩阵乘法的结果
double start1 = MPI_Wtime();
matrix_multiply(a, b, c1, n, BLOCK_SIZE(n, MPI_COMM_WORLD));
double end1 = MPI_Wtime();
// 计算简单并行分块乘法的结果
double start2 = MPI_Wtime();
simple_matrix_multiply(a, b, c2, n);
double end2 = MPI_Wtime();
// 打印结果
printf("Cannon matrix multiplication result:\n");
print_matrix(c1, n);
printf("Simple parallel block multiplication result:\n");
print_matrix(c2, n);
// 验证结果的正确性
int is_correct = 1;
for (int i = 0; i < n * n; i++)
{
if (fabs(c1[i] - c2[i]) > 1e-6)
{
is_correct = 0;
break;
}
}
if (is_correct)
{
printf("The results of two methods are the same.\n");
}
else
{
printf("The results of two methods are different.\n");
}
// 打印时间
printf("Cannon matrix multiplication time: %f seconds\n", end1 - start1);
printf("Simple parallel block multiplication time: %f seconds\n", end2 - start2);
// 释放内存
free(a);
free(b);
free(c1);
free(c2);
MPI_Finalize();
return 0;
}
```
阅读全文