DOLFIN-X
DOLFIN-X C++ interface
MPI.h
1 // Copyright (C) 2007-2014 Magnus Vikstrøm and Garth N. Wells
2 //
3 // This file is part of DOLFINX (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include <algorithm>
10 #include <array>
11 #include <cassert>
12 #include <complex>
13 #include <cstdint>
14 #include <dolfinx/graph/AdjacencyList.h>
15 #include <iostream>
16 #include <numeric>
17 #include <set>
18 #include <type_traits>
19 #include <utility>
20 #include <vector>
21 
22 #define MPICH_IGNORE_CXX_SEEK 1
23 #include <mpi.h>
24 
25 namespace dolfinx
26 {
27 
30 class MPI
31 {
32 public:
35  class Comm
36  {
37  public:
39  explicit Comm(MPI_Comm comm, bool duplicate = true);
40 
42  Comm(const Comm& comm);
43 
45  Comm(Comm&& comm) noexcept;
46 
47  // Disable copy assignment operator
48  Comm& operator=(const Comm& comm) = delete;
49 
51  Comm& operator=(Comm&& comm) noexcept;
52 
54  ~Comm();
55 
57  MPI_Comm comm() const;
58 
59  private:
60  // MPI communicator
61  MPI_Comm _comm;
62  };
63 
65  static int rank(MPI_Comm comm);
66 
69  static int size(MPI_Comm comm);
70 
73  template <typename T>
75  all_to_all(MPI_Comm comm, const graph::AdjacencyList<T>& send_data);
76 
89  static std::vector<int> compute_graph_edges(MPI_Comm comm,
90  const std::set<int>& edges);
91 
95  template <typename T>
97  neighbor_all_to_all(MPI_Comm neighbor_comm,
98  const std::vector<int>& send_offsets,
99  const std::vector<T>& send_data);
100 
106  static std::tuple<std::vector<int>, std::vector<int>>
107  neighbors(MPI_Comm neighbor_comm);
108 
111  static std::size_t global_offset(MPI_Comm comm, std::size_t range,
112  bool exclusive);
113 
116  static std::array<std::int64_t, 2> local_range(int process, std::int64_t N,
117  int size);
118 
124  static int index_owner(int size, std::size_t index, std::size_t N);
125 
126  template <typename T>
127  struct dependent_false : std::false_type
128  {
129  };
130 
132  template <typename T>
133  static MPI_Datatype mpi_type()
134  {
135  static_assert(dependent_false<T>::value, "Unknown MPI type");
136  throw std::runtime_error("MPI data type unknown");
137  return MPI_CHAR;
138  }
139 };
140 
141 // Turn off doxygen for these template specialisations
143 // Specialisations for MPI_Datatypes
144 template <>
145 inline MPI_Datatype MPI::mpi_type<float>()
146 {
147  return MPI_FLOAT;
148 }
149 template <>
150 inline MPI_Datatype MPI::mpi_type<double>()
151 {
152  return MPI_DOUBLE;
153 }
154 template <>
155 inline MPI_Datatype MPI::mpi_type<std::complex<double>>()
156 {
157  return MPI_DOUBLE_COMPLEX;
158 }
159 template <>
160 inline MPI_Datatype MPI::mpi_type<short int>()
161 {
162  return MPI_SHORT;
163 }
164 template <>
165 inline MPI_Datatype MPI::mpi_type<int>()
166 {
167  return MPI_INT;
168 }
169 template <>
170 inline MPI_Datatype MPI::mpi_type<unsigned int>()
171 {
172  return MPI_UNSIGNED;
173 }
174 template <>
175 inline MPI_Datatype MPI::mpi_type<long int>()
176 {
177  return MPI_LONG;
178 }
179 template <>
180 inline MPI_Datatype MPI::mpi_type<unsigned long>()
181 {
182  return MPI_UNSIGNED_LONG;
183 }
184 template <>
185 inline MPI_Datatype MPI::mpi_type<long long>()
186 {
187  return MPI_LONG_LONG;
188 }
189 template <>
190 inline MPI_Datatype MPI::mpi_type<unsigned long long>()
191 {
192  return MPI_UNSIGNED_LONG_LONG;
193 }
194 template <>
195 inline MPI_Datatype MPI::mpi_type<bool>()
196 {
197  return MPI_C_BOOL;
198 }
200 //---------------------------------------------------------------------------
201 template <typename T>
202 graph::AdjacencyList<T>
204  const graph::AdjacencyList<T>& send_data)
205 {
206  const std::vector<std::int32_t>& send_offsets = send_data.offsets();
207  const std::vector<T>& values_in = send_data.array();
208 
209  const int comm_size = MPI::size(comm);
210  assert(send_data.num_nodes() == comm_size);
211 
212  // Data size per destination rank
213  std::vector<int> send_size(comm_size);
214  std::adjacent_difference(std::next(send_offsets.begin(), +1),
215  send_offsets.end(), send_size.begin());
216 
217  // Get received data sizes from each rank
218  std::vector<int> recv_size(comm_size);
219  MPI_Alltoall(send_size.data(), 1, mpi_type<int>(), recv_size.data(), 1,
220  mpi_type<int>(), comm);
221 
222  // Compute receive offset
223  std::vector<std::int32_t> recv_offset(comm_size + 1);
224  recv_offset[0] = 0;
225  std::partial_sum(recv_size.begin(), recv_size.end(),
226  std::next(recv_offset.begin()));
227 
228  // Send/receive data
229  std::vector<T> recv_values(recv_offset[comm_size]);
230  MPI_Alltoallv(values_in.data(), send_size.data(), send_offsets.data(),
231  mpi_type<T>(), recv_values.data(), recv_size.data(),
232  recv_offset.data(), mpi_type<T>(), comm);
233 
234  return graph::AdjacencyList<T>(std::move(recv_values),
235  std::move(recv_offset));
236 }
237 //-----------------------------------------------------------------------------
238 template <typename T>
240 dolfinx::MPI::neighbor_all_to_all(MPI_Comm neighbor_comm,
241  const std::vector<int>& send_offsets,
242  const std::vector<T>& send_data)
243 {
244  // Get neighbor processes
245  int indegree(-1), outdegree(-2), weighted(-1);
246  MPI_Dist_graph_neighbors_count(neighbor_comm, &indegree, &outdegree,
247  &weighted);
248 
249  assert((int)send_data.size() == send_offsets.back());
250  assert(send_offsets[0] == 0);
251 
252  // Get receive sizes
253  std::vector<int> send_sizes(outdegree, 0);
254  std::vector<int> recv_sizes(indegree);
255  std::adjacent_difference(std::next(send_offsets.begin()), send_offsets.end(),
256  send_sizes.begin());
257  MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI::mpi_type<int>(),
258  recv_sizes.data(), 1, MPI::mpi_type<int>(),
259  neighbor_comm);
260 
261  // Work out recv offsets
262  std::vector<int> recv_offsets(recv_sizes.size() + 1);
263  recv_offsets[0] = 0;
264  std::partial_sum(recv_sizes.begin(), recv_sizes.end(),
265  std::next(recv_offsets.begin(), 1));
266 
267  std::vector<T> recv_data(recv_offsets[recv_offsets.size() - 1]);
268  MPI_Neighbor_alltoallv(
269  send_data.data(), send_sizes.data(), send_offsets.data(),
270  MPI::mpi_type<T>(), recv_data.data(), recv_sizes.data(),
271  recv_offsets.data(), MPI::mpi_type<T>(), neighbor_comm);
272 
273  return graph::AdjacencyList<T>(std::move(recv_data), std::move(recv_offsets));
274 }
275 //---------------------------------------------------------------------------
276 
277 } // namespace dolfinx
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:36
~Comm()
Destructor (frees wrapped communicator)
Definition: MPI.cpp:43
Comm(MPI_Comm comm, bool duplicate=true)
Duplicate communicator and wrap duplicate.
Definition: MPI.cpp:13
MPI_Comm comm() const
Return the underlying MPI_Comm object.
Definition: MPI.cpp:77
This class provides utility functions for easy communication with MPI and handles cases when DOLFINX ...
Definition: MPI.h:31
static std::vector< int > compute_graph_edges(MPI_Comm comm, const std::set< int > &edges)
Definition: MPI.cpp:139
static MPI_Datatype mpi_type()
MPI Type.
Definition: MPI.h:133
static std::array< std::int64_t, 2 > local_range(int process, std::int64_t N, int size)
Return local range for given process, splitting [0, N - 1] into size() portions of almost equal size.
Definition: MPI.cpp:105
static int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition: MPI.cpp:79
static int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition: MPI.cpp:87
static int index_owner(int size, std::size_t index, std::size_t N)
Return which process owns index (inverse of local_range)
Definition: MPI.cpp:123
static std::size_t global_offset(MPI_Comm comm, std::size_t range, bool exclusive)
Find global offset (index) (wrapper for MPI_(Ex)Scan with MPI_SUM as reduction op)
Definition: MPI.cpp:94
static graph::AdjacencyList< T > neighbor_all_to_all(MPI_Comm neighbor_comm, const std::vector< int > &send_offsets, const std::vector< T > &send_data)
neighborhood all-to-all. Send data to neighbors using offsets into contiguous data array....
Definition: MPI.h:240
static std::tuple< std::vector< int >, std::vector< int > > neighbors(MPI_Comm neighbor_comm)
Definition: MPI.cpp:161
static graph::AdjacencyList< T > all_to_all(MPI_Comm comm, const graph::AdjacencyList< T > &send_data)
Send in_values[p0] to process p0 and receive values from process p1 in out_values[p1].
Definition: MPI.h:203
This class provides a static adjacency list data structure. It is commonly used to store directed gra...
Definition: AdjacencyList.h:46
const std::vector< std::int32_t > & offsets() const
Offset for each node in array() (const version)
Definition: AdjacencyList.h:151
std::int32_t num_nodes() const
Get the number of nodes.
Definition: AdjacencyList.h:113
const std::vector< T > & array() const
Return contiguous array of links for all nodes (const version)
Definition: AdjacencyList.h:145
Definition: MPI.h:128