|
| 1 | +#include <mpi.h> |
| 2 | +#include <stdint.h> |
| 3 | +#include <stdlib.h> |
| 4 | +#include <string.h> |
| 5 | + |
| 6 | +#include <vector> |
| 7 | + |
| 8 | +#include "../../code/include/matrix.h" |
| 9 | + |
| 10 | +#if 0 |
| 11 | + |
| 12 | +// P: #procs |
| 13 | + |
| 14 | +unsigned long chunk_size = (a.size() * b.size()) / num_procs; // == NM/P |
| 15 | +unsigned long last_chunk_size = chunk_size + ((a.size() * b.size()) % num_procs); |
| 16 | +// snd_chunk_length and rcv_chunk_length == last_chunk_size for one round only, otherwise chunk_size |
| 17 | + |
| 18 | +/* |
| 19 | + * P - 1 rounds |
| 20 | + * o + L + (snd_chunk_length * 8 - 1) * G + g |
| 21 | + * snd_chunk_length ADDs |
| 22 | + * |
| 23 | + * P - 1 rounds |
| 24 | + * o + L + (snd_chunk_length * 8 - 1) * G + g |
| 25 | + */ |
| 26 | + |
| 27 | +/* |
| 28 | + * ADDs: (P - 1) * rcv_chunk_length |
| 29 | + * = (P - 1) * (N * M / P) + P |
| 30 | + * = O(NM + P) |
| 31 | + * |
| 32 | + * MULTs: NM |
| 33 | + * |
| 34 | + * LogGP = 2 * (P - 1) * (o + L + ((NM/P) * 8 - 1) * G + g) + 2 * P * G |
| 35 | + * = O(2 * (P - 1) * (o + L + ((NM/P) * 8 - 1) * G + g) + 2 * P * G) |
| 36 | + * = O(P * (o + L + (NM/P) * G + g) + P * G) |
| 37 | + */ |
| 38 | + |
| 39 | +#endif |
| 40 | + |
| 41 | +extern MPI_Comm comm; |
| 42 | +extern int rank; |
| 43 | +extern int num_procs; |
| 44 | + |
| 45 | +void allreduce_ring(const std::vector<vector>& a_in, |
| 46 | + const std::vector<vector>& b_in, matrix& result) { |
| 47 | + const auto& a = a_in[rank]; |
| 48 | + const auto& b = b_in[rank]; |
| 49 | + |
| 50 | + // #### NM mults |
| 51 | + result.set_outer_product(a, b); |
| 52 | + auto current = result.get_ptr(); |
| 53 | + |
| 54 | + unsigned long chunk_size = (a.size() * b.size()) / num_procs; |
| 55 | + unsigned long last_chunk_size = |
| 56 | + chunk_size + ((a.size() * b.size()) % num_procs); |
| 57 | + auto destination = (rank + 1) % num_procs; |
| 58 | + auto source = (rank - 1) % num_procs; |
| 59 | + |
| 60 | + // Send partial results through the ring until everyone has everything |
| 61 | + // #### P - 1 rounds |
| 62 | + for (int i = 0; i < num_procs - 1; ++i) { |
| 63 | + // Determine current chunk offset and length |
| 64 | + auto snd_chunk_index = (rank + num_procs - i) % num_procs; |
| 65 | + auto snd_chunk_offset = snd_chunk_index * chunk_size; |
| 66 | + int snd_chunk_length = snd_chunk_index == num_procs - 1 |
| 67 | + ? (int)last_chunk_size |
| 68 | + : (int)chunk_size; |
| 69 | + |
| 70 | + // Calculate chunk length of receiving chunk |
| 71 | + auto rcv_chunk_index = (snd_chunk_index + num_procs - 1) % num_procs; |
| 72 | + auto rcv_chunk_offset = rcv_chunk_index * chunk_size; |
| 73 | + int rcv_chunk_length = rcv_chunk_index == num_procs - 1 |
| 74 | + ? (int)last_chunk_size |
| 75 | + : (int)chunk_size; |
| 76 | + |
| 77 | + // Send current chunk to next node |
| 78 | + MPI_Request sendRequest = MPI_REQUEST_NULL; |
| 79 | + /* |
| 80 | + * #### Isend snd_chunk_length * 8 |
| 81 | + */ |
| 82 | + MPI_Isend(current + snd_chunk_offset, snd_chunk_length, MPI_DOUBLE, |
| 83 | + destination, 0, comm, &sendRequest); |
| 84 | + |
| 85 | + // Receive chunk from previous node |
| 86 | + auto recv_chunk = new double[rcv_chunk_length]; |
| 87 | + MPI_Recv(recv_chunk, rcv_chunk_length, MPI_DOUBLE, source, 0, comm, |
| 88 | + MPI_STATUS_IGNORE); |
| 89 | + |
| 90 | + // The message should be received so we can wait on it |
| 91 | + MPI_Wait(&sendRequest, MPI_STATUS_IGNORE); |
| 92 | + |
| 93 | + // Add received chunk to current matrix |
| 94 | + /* |
| 95 | + * #### rcv_chunk_length ADDs |
| 96 | + */ |
| 97 | + for (int j = 0; j < rcv_chunk_length; ++j) { |
| 98 | + current[rcv_chunk_offset + j] += recv_chunk[j]; |
| 99 | + } |
| 100 | + |
| 101 | + delete[] recv_chunk; |
| 102 | + } |
| 103 | + |
| 104 | + // At this point the current node should have the result of the chunk with |
| 105 | + // index (rank + 1). We then have to distribute all result chunks |
| 106 | + // #### P - 1 rounds |
| 107 | + for (int i = 0; i < num_procs - 1; ++i) { |
| 108 | + // Determine current chunk offset and length |
| 109 | + auto snd_chunk_index = (rank + num_procs - i + 1) % num_procs; |
| 110 | + auto snd_chunk_offset = snd_chunk_index * chunk_size; |
| 111 | + int snd_chunk_length = snd_chunk_index == num_procs - 1 |
| 112 | + ? (int)last_chunk_size |
| 113 | + : (int)chunk_size; |
| 114 | + |
| 115 | + // Calculate chunk length of receiving chunk |
| 116 | + auto rcv_chunk_index = (snd_chunk_index + num_procs - 1) % num_procs; |
| 117 | + auto rcv_chunk_offset = rcv_chunk_index * chunk_size; |
| 118 | + int rcv_chunk_length = rcv_chunk_index == num_procs - 1 |
| 119 | + ? (int)last_chunk_size |
| 120 | + : (int)chunk_size; |
| 121 | + |
| 122 | + // Send current chunk to next node |
| 123 | + MPI_Request sendRequest = MPI_REQUEST_NULL; |
| 124 | + /* |
| 125 | + * Isend chunk_size * 8 bytes |
| 126 | + */ |
| 127 | + MPI_Isend(current + snd_chunk_offset, snd_chunk_length, MPI_DOUBLE, |
| 128 | + destination, 0, comm, &sendRequest); |
| 129 | + |
| 130 | + // Receive chunk from previous node |
| 131 | + MPI_Recv(current + rcv_chunk_offset, rcv_chunk_length, MPI_DOUBLE, |
| 132 | + source, 0, comm, MPI_STATUS_IGNORE); |
| 133 | + |
| 134 | + // The message should be received so we can wait on it |
| 135 | + MPI_Wait(&sendRequest, MPI_STATUS_IGNORE); |
| 136 | + } |
| 137 | +} |
0 commit comments