File size: 3,072 Bytes
1bac010
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
// matrix_mul_mpi.c
// MPI 并行矩阵乘法:按行分块 Scatterv,广播 B,Barrier 同步,仅测乘法部分计时

#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>

int main(int argc, char *argv[]) {
    MPI_Init(&argc, &argv);
    int size, rank;
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    int N = 8000;
    if (argc >= 2) {
        N = atoi(argv[1]);
    }
    // 计算每个进程分得的行数
    int base = N / size;
    int rem  = N % size;
    int local_rows = base + (rank < rem ? 1 : 0);

    // 根进程用来设置 Scatterv 参数
    int *sendcounts = NULL, *displs = NULL;
    if (rank == 0) {
        sendcounts = malloc(size * sizeof(int));
        displs     = malloc(size * sizeof(int));
        int offset = 0;
        for (int i = 0; i < size; i++) {
            int rows = base + (i < rem ? 1 : 0);
            sendcounts[i] = rows * N;
            displs[i]     = offset * N;
            offset += rows;
        }
    }

    // 为本地数据分配内存
    double *A_local = malloc((size_t)local_rows * N * sizeof(double));
    double *B       = malloc((size_t)N * N * sizeof(double));
    double *C_local = malloc((size_t)local_rows * N * sizeof(double));
    if (!A_local || !B || !C_local) {
        fprintf(stderr, "进程 %d: 内存分配失败\n", rank);
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }

    // 根进程生成全矩阵 A、B
    double *A = NULL;
    if (rank == 0) {
        A = malloc((size_t)N * N * sizeof(double));
        srand(0);
        for (long long i = 0; i < (long long)N * N; i++) {
            A[i] = rand() / (double)RAND_MAX;
            B[i] = rand() / (double)RAND_MAX;
        }
    }

    // 将 A 按行分块发给各进程
    MPI_Scatterv(A, sendcounts, displs, MPI_DOUBLE,
                 A_local, local_rows * N, MPI_DOUBLE,
                 0, MPI_COMM_WORLD);
    // 广播 B 给所有进程
    MPI_Bcast(B, N * N, MPI_DOUBLE, 0, MPI_COMM_WORLD);

    // 同步后计时,仅乘法部分
    MPI_Barrier(MPI_COMM_WORLD);
    double t0 = MPI_Wtime();

    for (int i = 0; i < local_rows; i++) {
        for (int j = 0; j < N; j++) {
            double sum = 0.0;
            for (int k = 0; k < N; k++) {
                sum += A_local[(long long)i * N + k] * B[(long long)k * N + j];
            }
            C_local[(long long)i * N + j] = sum;
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);
    double t1 = MPI_Wtime();
    double local_elapsed = t1 - t0;
    double max_elapsed;

    // 取所有进程中最长的计算时间作为并行耗时
    MPI_Reduce(&local_elapsed, &max_elapsed, 1, MPI_DOUBLE,
               MPI_MAX, 0, MPI_COMM_WORLD);

    if (rank == 0) {
        printf("Parallel multiplication time: %.6f seconds with %d processes\n",
               max_elapsed, size);
    }

    // 释放资源
    free(A_local);
    free(B);
    free(C_local);
    if (rank == 0) {
        free(A);
        free(sendcounts);
        free(displs);
    }
    MPI_Finalize();
    return 0;
}