详细说说MPI.COMM_WORLD.rand
时间: 2023-04-01 16:00:40 浏览: 107
MPI.COMM_WORLD.rand 是 MPI 中的一个函数,用于生成随机数。它可以在 MPI_COMM_WORLD 中生成一个随机数,这个随机数是在 和 1 之间的浮点数。这个函数可以用于并行计算中的随机数生成,以及一些需要随机数的算法中。需要注意的是,MPI.COMM_WORLD.rand 生成的随机数是伪随机数,其随机性是有限的,不能用于加密等需要高度随机性的场合。
相关问题
改进以下代码:#include <stdio.h> #include <stdlib.h> #include <mpi.h> #define N 4000 #define TAG 0 void merge(int arr[], int l, int m, int r) { int i, j, k; int n1 = m - l + 1; int n2 = r - m; int L[4000], R[4000]; for (i = 0; i < n1; i++) L[i] = arr[l + i]; for (j = 0; j < n2; j++) R[j] = arr[m + 1 + j]; i = 0; j = 0; k = l; while (i < n1 && j < n2) { if (L[i] <= R[j]) { arr[k] = L[i]; i++; } else { arr[k] = R[j]; j++; } k++; } while (i < n1) { arr[k] = L[i]; i++; k++; } while (j < n2) { arr[k] = R[j]; j++; k++; } } void mergeSort(int arr[], int l, int r) { if (l < r) { int m = l + (r - l) / 2; mergeSort(arr, l, m); mergeSort(arr, m + 1, r); merge(arr, l, m, r); } } int main(int argc, char** argv) { int rank, size; int i, j, k; int A[N], B[N]; int block_size, start, end; double start_time, end_time; MPI_Status status; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); block_size = N / size; start = rank * block_size; end = start + block_size - 1; if (rank == size - 1) { end = N - 1; } if (rank == 0) { printf("Generating random array...\n"); for (i = 0; i < N; i++) { A[i] = rand() % 100000; } printf("Sorting array...\n"); } MPI_Barrier(MPI_COMM_WORLD); start_time = MPI_Wtime(); MPI_Scatter(A, block_size, MPI_INT, &B[start], block_size, MPI_INT, 0, MPI_COMM_WORLD); mergeSort(B, start, end); for (i = 0; i < size; i++) { if (rank == i) { MPI_Send(&B[start], block_size, MPI_INT, (rank + 1) % size, TAG, MPI_COMM_WORLD); } else if (rank == (i + 1) % size) { MPI_Recv(&B[start], block_size, MPI_INT, i, TAG, MPI_COMM_WORLD, &status); } } MPI_Barrier(MPI_COMM_WORLD); end_time = MPI_Wtime(); if (rank == 0) { printf("Writing result to file...\n"); FILE* fp; errno_t err; err = fopen_s(&fp, "sorted_array.txt", "w"); for (i = 0; i < N; i++) { fprintf(fp, "%d\n", B[i]); } fclose(fp); printf("Done!\n"); printf("Time used: %.6f seconds\n", end_time - start_time); } MPI_Finalize(); return 0; }
以下是改进后的代码:
```
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define N 4000
#define TAG 0
void merge(int arr[], int l, int m, int r) {
int i, j, k;
int n1 = m - l + 1;
int n2 = r - m;
int *L = (int*)malloc(n1 * sizeof(int));
int *R = (int*)malloc(n2 * sizeof(int));
for (i = 0; i < n1; i++)
L[i] = arr[l + i];
for (j = 0; j < n2; j++)
R[j] = arr[m + 1 + j];
i = 0;
j = 0;
k = l;
while (i < n1 && j < n2) {
if (L[i] <= R[j]) {
arr[k] = L[i];
i++;
}
else {
arr[k] = R[j];
j++;
}
k++;
}
while (i < n1) {
arr[k] = L[i];
i++;
k++;
}
while (j < n2) {
arr[k] = R[j];
j++;
k++;
}
free(L);
free(R);
}
void mergeSort(int arr[], int l, int r) {
if (l < r) {
int m = l + (r - l) / 2;
mergeSort(arr, l, m);
mergeSort(arr, m + 1, r);
merge(arr, l, m, r);
}
}
int main(int argc, char** argv) {
int rank, size;
int i, j, k;
int *A, *B;
int block_size, start, end;
double start_time, end_time;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
block_size = N / size;
start = rank * block_size;
end = start + block_size - 1;
if (rank == size - 1) {
end = N - 1;
}
if (rank == 0) {
printf("Generating random array...\n");
A = (int*)malloc(N * sizeof(int));
for (i = 0; i < N; i++) {
A[i] = rand() % 100000;
}
printf("Sorting array...\n");
}
B = (int*)malloc(block_size * sizeof(int));
MPI_Barrier(MPI_COMM_WORLD);
start_time = MPI_Wtime();
MPI_Scatter(A, block_size, MPI_INT, B, block_size, MPI_INT, 0, MPI_COMM_WORLD);
mergeSort(B, 0, block_size - 1);
for (i = 0; i < size; i++) {
if (rank == i) {
MPI_Send(B, block_size, MPI_INT, (rank + 1) % size, TAG, MPI_COMM_WORLD);
}
else if (rank == (i + 1) % size) {
MPI_Recv(B, block_size, MPI_INT, i, TAG, MPI_COMM_WORLD, &status);
}
}
MPI_Barrier(MPI_COMM_WORLD);
end_time = MPI_Wtime();
if (rank == 0) {
printf("Writing result to file...\n");
FILE* fp;
errno_t err;
err = fopen_s(&fp, "sorted_array.txt", "w");
for (i = 0; i < N; i++) {
fprintf(fp, "%d\n", B[i]);
}
fclose(fp);
free(A);
printf("Done!\n");
printf("Time used: %.6f seconds\n", end_time - start_time);
}
free(B);
MPI_Finalize();
return 0;
}
```
改进点如下:
1. 在 `merge` 函数中使用动态内存分配,避免了栈溢出的问题。
2. 在 `main` 函数中使用动态内存分配,避免了数组过大时栈溢出的问题。
3. 在 `MPI_Scatter` 函数中直接传递数组指针,避免了在发送和接收数据时进行复制的问题。
4. 在 `MPI_Send` 和 `MPI_Recv` 函数中,发送和接收的数据都是 `B` 数组中的一段,因此直接传递 `B` 数组的指针即可。同时,在发送和接收数据时,将 `TAG` 设为 0,以避免与其他消息混淆。
5. 在结束程序前,释放 `A` 和 `B` 数组占用的内存。
用MPI编写两个n阶方阵a和b的Cannon矩阵乘法程序,结果存放在方阵c中。a、b、c都采用块棋盘划分存储,每个子块的大小都为⌈n/√p⌉×⌈n/√p⌉,不足的部分用0填充。a和b中的每个数都初始化为一个0到1之间的随机double型值:rand()/double(RAND_MAX)。为了验证结果正确性,也实现简单并行分块乘法(可以通过函数MPI_Comm_split和MPI_Bcast实现行广播或列广播),并比较是否与Cannon矩阵乘法的结果相等。执行时间不包括a, b, c的初始化时间和检测结果正确性的时间,通过函数MPI_Wtime计算。在下面写出完整的程序代码,并添加必要的注释。
```C++
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define MAX_NUM 100 // 随机数的最大值
// 定义矩阵的块大小
#define BLOCK_SIZE(n, p) (int)ceil((double)n / sqrt(p))
// 定义矩阵元素在矩阵中的位置
#define POS(i, j, n) ((i)*(n) + (j))
// 随机生成一个n阶方阵,每个元素的值在0到1之间
void rand_matrix(double* matrix, int n)
{
srand((unsigned)time(NULL));
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
matrix[POS(i, j, n)] = (double)rand() / RAND_MAX;
}
}
}
// 打印矩阵
void print_matrix(double* matrix, int n)
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
printf("%.4f ", matrix[POS(i, j, n)]);
}
printf("\n");
}
printf("\n");
}
// 矩阵乘法,c = a * b
void matrix_multiply(double* a, double* b, double* c, int n, int block_size)
{
// 初始化结果矩阵
for (int i = 0; i < n * n; i++)
{
c[i] = 0;
}
// 计算每个进程的坐标和进程号
int rank, size;
int coords[2];
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Cart_coords(MPI_COMM_WORLD, rank, 2, coords);
// 创建2D拓扑结构
MPI_Comm cart_comm;
int periods[2] = { 1, 1 };
MPI_Cart_create(MPI_COMM_WORLD, 2, (int[]){ sqrt(size), sqrt(size) }, periods, 1, &cart_comm);
// 创建块数据类型
MPI_Datatype block_type;
MPI_Type_vector(block_size, block_size, n, MPI_DOUBLE, &block_type);
MPI_Type_commit(&block_type);
// 计算块的起始位置
int block_start_a = POS(coords[0] * block_size, coords[1] * block_size, n);
int block_start_b = POS(coords[0] * block_size, coords[1] * block_size, n);
int block_start_c = POS(coords[0] * block_size, coords[1] * block_size, n);
// 将a和b分成sqrt(p)块,每块大小为block_size
double* block_a = (double*)malloc(sizeof(double) * block_size * block_size);
double* block_b = (double*)malloc(sizeof(double) * block_size * block_size);
double* block_c = (double*)malloc(sizeof(double) * block_size * block_size);
for (int i = 0; i < sqrt(size); i++)
{
int row = (coords[0] + i) % sqrt(size);
int col = (coords[1] + i) % sqrt(size);
int block_start_a_i = POS(row * block_size, coords[1] * block_size, n);
int block_start_b_i = POS(coords[0] * block_size, col * block_size, n);
MPI_Datatype sub_block_type;
MPI_Type_create_subarray(2, (int[]){ n, n }, (int[]){ block_size, block_size }, (int[]){ block_start_a_i, block_start_b_i }, MPI_ORDER_C, MPI_DOUBLE, &sub_block_type);
MPI_Type_commit(&sub_block_type);
// 发送和接收块数据
MPI_Sendrecv(&a[block_start_a_i], 1, sub_block_type, row * sqrt(size) + col, 0, block_a, block_size * block_size, MPI_DOUBLE, row * sqrt(size) + col, 0, cart_comm, MPI_STATUS_IGNORE);
MPI_Sendrecv(&b[block_start_b_i], 1, sub_block_type, row * sqrt(size) + col, 0, block_b, block_size * block_size, MPI_DOUBLE, row * sqrt(size) + col, 0, cart_comm, MPI_STATUS_IGNORE);
// 计算块乘积
for (int j = 0; j < block_size; j++)
{
for (int k = 0; k < block_size; k++)
{
for (int l = 0; l < block_size; l++)
{
block_c[POS(j, k, block_size)] += block_a[POS(j, l, block_size)] * block_b[POS(l, k, block_size)];
}
}
}
}
// 将每个块的结果复制到结果矩阵中
for (int i = 0; i < sqrt(size); i++)
{
int row = (coords[0] - i + sqrt(size)) % sqrt(size);
int col = (coords[1] - i + sqrt(size)) % sqrt(size);
int block_start_c_i = POS(row * block_size, col * block_size, n);
MPI_Datatype sub_block_type;
MPI_Type_create_subarray(2, (int[]){ n, n }, (int[]){ block_size, block_size }, (int[]){ block_start_c_i, block_start_c_i }, MPI_ORDER_C, MPI_DOUBLE, &sub_block_type);
MPI_Type_commit(&sub_block_type);
MPI_Sendrecv(block_c, block_size * block_size, MPI_DOUBLE, row * sqrt(size) + col, 0, &c[block_start_c_i], 1, sub_block_type, row * sqrt(size) + col, 0, cart_comm, MPI_STATUS_IGNORE);
}
// 释放内存
MPI_Type_free(&block_type);
free(block_a);
free(block_b);
free(block_c);
}
// 简单并行分块乘法,c = a * b,每个进程只负责一行
void simple_matrix_multiply(double* a, double* b, double* c, int n)
{
// 初始化结果矩阵
for (int i = 0; i < n * n; i++)
{
c[i] = 0;
}
// 计算每个进程的坐标和进程号
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// 计算每个进程负责的行号
int row = rank;
// 计算每个进程负责的行的起始位置
int block_start_a = POS(row, 0, n);
int block_start_b = POS(0, 0, n);
int block_start_c = POS(row, 0, n);
// 计算块的大小
int block_size = BLOCK_SIZE(n, size);
// 将a和b分成block_size块,每块大小为1
double* block_a = (double*)malloc(sizeof(double) * block_size);
double* block_b = (double*)malloc(sizeof(double) * block_size);
double* block_c = (double*)malloc(sizeof(double) * block_size);
for (int i = 0; i < n; i++)
{
// 发送和接收块数据
MPI_Bcast(&a[block_start_a + i], 1, MPI_DOUBLE, i % size, MPI_COMM_WORLD);
MPI_Bcast(&b[POS(i, 0, n)], n, MPI_DOUBLE, row, MPI_COMM_WORLD);
// 计算块乘积
for (int j = 0; j < block_size; j++)
{
block_a[j] = a[block_start_a + j];
block_b[j] = b[POS(j, 0, n)];
block_c[j] = 0;
}
for (int j = 0; j < block_size; j++)
{
for (int k = 0; k < block_size; k++)
{
block_c[j] += block_a[k] * block_b[k * block_size + j];
}
}
// 将块的结果复制到结果矩阵中
for (int j = 0; j < block_size; j++)
{
c[block_start_c + i * block_size + j] = block_c[j];
}
}
// 释放内存
free(block_a);
free(block_b);
free(block_c);
}
int main(int argc, char** argv)
{
MPI_Init(&argc, &argv);
int n = 4; // 方阵的阶数
double* a = (double*)malloc(sizeof(double) * n * n);
double* b = (double*)malloc(sizeof(double) * n * n);
double* c1 = (double*)malloc(sizeof(double) * n * n); // Cannon矩阵乘法的结果
double* c2 = (double*)malloc(sizeof(double) * n * n); // 简单并行分块乘法的结果
// 随机生成矩阵a和b
rand_matrix(a, n);
rand_matrix(b, n);
// 打印矩阵a和b
printf("Matrix A:\n");
print_matrix(a, n);
printf("Matrix B:\n");
print_matrix(b, n);
// 计算Cannon矩阵乘法的结果
double start1 = MPI_Wtime();
matrix_multiply(a, b, c1, n, BLOCK_SIZE(n, MPI_COMM_WORLD));
double end1 = MPI_Wtime();
// 计算简单并行分块乘法的结果
double start2 = MPI_Wtime();
simple_matrix_multiply(a, b, c2, n);
double end2 = MPI_Wtime();
// 打印结果
printf("Cannon matrix multiplication result:\n");
print_matrix(c1, n);
printf("Simple parallel block multiplication result:\n");
print_matrix(c2, n);
// 验证结果的正确性
int is_correct = 1;
for (int i = 0; i < n * n; i++)
{
if (fabs(c1[i] - c2[i]) > 1e-6)
{
is_correct = 0;
break;
}
}
if (is_correct)
{
printf("The results of two methods are the same.\n");
}
else
{
printf("The results of two methods are different.\n");
}
// 打印时间
printf("Cannon matrix multiplication time: %f seconds\n", end1 - start1);
printf("Simple parallel block multiplication time: %f seconds\n", end2 - start2);
// 释放内存
free(a);
free(b);
free(c1);
free(c2);
MPI_Finalize();
return 0;
}
```
阅读全文