preCICE v3.1.2
Loading...
Searching...
No Matches
MPIDirectCommunication.cpp
Go to the documentation of this file.
1#ifndef PRECICE_NO_MPI
2
3#include <cstddef>
4#include <memory>
5
9#include "utils/Parallel.hpp"
10#include "utils/assertion.hpp"
11#include "utils/span_tools.hpp"
12
13namespace precice::com {
15 : _commState(utils::Parallel::current())
16{
17}
18
24
26{
29 int remoteSize = 0;
30 MPI_Comm_remote_size(communicator(), &remoteSize);
31 return remoteSize;
32}
33
35 std::string const &requesterName,
36 std::string const &tag,
37 int acceptorRank,
38 int rankOffset)
39{
40 PRECICE_TRACE(acceptorName, requesterName);
42 // MPI Direct Comm only supports IntraComm connections
43 PRECICE_ASSERT(rankOffset == 1, "MPIDirectCommunication only supports IntraComm Communications!");
44 setRankOffset(rankOffset);
45
47 _isConnected = true;
48
49 PRECICE_ASSERT(acceptorRank == 0, "The Acceptor/Primary rank has to be rank 0!");
50 PRECICE_ASSERT(_commState->rank() == acceptorRank, "The given acceptor rank does not match the communicator rank!");
51}
52
54{
56
57 if (not isConnected())
58 return;
59
60 _isConnected = false;
61}
62
64 std::string const &requesterName,
65 std::string const &tag,
66 int requesterRank,
67 int requesterCommunicatorSize)
68{
69 PRECICE_TRACE(acceptorName, requesterName);
71
72 setRankOffset(0); //rankOffset makes no sense here
74 _isConnected = true;
75
76 PRECICE_ASSERT(requesterRank == _commState->rank() - 1);
77 PRECICE_ASSERT(requesterCommunicatorSize + 1 == _commState->size());
78}
79
81{
82 PRECICE_TRACE(itemsToSend.size());
83 PRECICE_ASSERT(itemsToSend.size() == itemsToReceive.size());
84 Rank rank = _commState->rank();
85 MPI_Reduce(const_cast<double *>(itemsToSend.data()), itemsToReceive.data(), itemsToSend.size(), MPI_DOUBLE, MPI_SUM, rank, _commState->comm);
86}
87
89{
90 PRECICE_TRACE(itemsToSend.size());
91 PRECICE_ASSERT(itemsToSend.size() == itemsToReceive.size());
92 MPI_Reduce(const_cast<double *>(itemsToSend.data()), itemsToReceive.data(), itemsToSend.size(), MPI_DOUBLE, MPI_SUM, primaryRank, _commState->comm);
93}
94
95void MPIDirectCommunication::reduceSum(int itemToSend, int &itemsToReceive)
96{
98 Rank rank = _commState->rank();
99 MPI_Reduce(&itemToSend, &itemsToReceive, 1, MPI_INT, MPI_SUM, rank, _commState->comm);
100}
101
102void MPIDirectCommunication::reduceSum(int itemToSend, int &itemsToReceive, Rank primaryRank)
103{
105 MPI_Reduce(&itemToSend, &itemsToReceive, 1, MPI_INT, MPI_SUM, primaryRank, _commState->comm);
106}
107
109{
110 PRECICE_TRACE(itemsToSend.size());
111 PRECICE_ASSERT(itemsToSend.size() == itemsToReceive.size());
112 MPI_Allreduce(const_cast<double *>(itemsToSend.data()), itemsToReceive.data(), itemsToSend.size(), MPI_DOUBLE, MPI_SUM, _commState->comm);
113}
114
116{
117 PRECICE_TRACE(itemsToSend.size());
118 PRECICE_ASSERT(itemsToSend.size() == itemsToReceive.size());
119 MPI_Allreduce(const_cast<double *>(itemsToSend.data()), itemsToReceive.data(), itemsToReceive.size(), MPI_DOUBLE, MPI_SUM, _commState->comm);
120}
121
122void MPIDirectCommunication::allreduceSum(double itemToSend, double &itemToReceive)
123{
125 MPI_Allreduce(&itemToSend, &itemToReceive, 1, MPI_DOUBLE, MPI_SUM, _commState->comm);
126}
127
128void MPIDirectCommunication::allreduceSum(double itemToSend, double &itemToReceive, Rank primaryRank)
129{
131 MPI_Allreduce(&itemToSend, &itemToReceive, 1, MPI_DOUBLE, MPI_SUM, _commState->comm);
132}
133
134void MPIDirectCommunication::allreduceSum(int itemToSend, int &itemToReceive)
135{
137 MPI_Allreduce(&itemToSend, &itemToReceive, 1, MPI_INT, MPI_SUM, _commState->comm);
138}
139
140void MPIDirectCommunication::allreduceSum(int itemToSend, int &itemToReceive, Rank primaryRank)
141{
143 MPI_Allreduce(&itemToSend, &itemToReceive, 1, MPI_INT, MPI_SUM, _commState->comm);
144}
145
147{
148 PRECICE_TRACE(itemsToSend.size());
149 MPI_Bcast(const_cast<int *>(itemsToSend.data()), itemsToSend.size(), MPI_INT, 0, _commState->comm);
150}
151
152void MPIDirectCommunication::broadcast(precice::span<int> itemsToReceive, int rankBroadcaster)
153{
154 PRECICE_TRACE(itemsToReceive.size());
155 MPI_Bcast(itemsToReceive.data(), itemsToReceive.size(), MPI_INT, rankBroadcaster, _commState->comm);
156}
157
159{
161 broadcast(precice::refToSpan<const int>(itemToSend));
162}
163
164void MPIDirectCommunication::broadcast(int &itemToReceive, Rank rankBroadcaster)
165{
167 broadcast(precice::refToSpan<int>(itemToReceive), rankBroadcaster);
168}
169
171{
172 PRECICE_TRACE(itemsToSend.size());
173 MPI_Bcast(const_cast<double *>(itemsToSend.data()), itemsToSend.size(), MPI_DOUBLE, 0, _commState->comm);
174}
175
176void MPIDirectCommunication::broadcast(precice::span<double> itemsToReceive, int rankBroadcaster)
177{
178 PRECICE_TRACE(itemsToReceive.size());
179 MPI_Bcast(itemsToReceive.data(), itemsToReceive.size(), MPI_DOUBLE, rankBroadcaster, _commState->comm);
180}
181
183{
185 broadcast(precice::refToSpan<const double>(itemToSend));
186}
187
188void MPIDirectCommunication::broadcast(double &itemToReceive, Rank rankBroadcaster)
189{
191 broadcast(precice::refToSpan<double>(itemToReceive), rankBroadcaster);
192}
193
195{
197 int item = itemToSend;
198 broadcast(item);
199}
200
201void MPIDirectCommunication::broadcast(bool &itemToReceive, Rank rankBroadcaster)
202{
204 int item;
205 broadcast(item, rankBroadcaster);
206 itemToReceive = item;
207}
208
213
215{
216 // Correct _rankOffset if we are on the primary rank
217 return rank;
218}
219
221{
222 return rank;
223}
224
225} // namespace precice::com
226
227#endif // not PRECICE_NO_MPI
#define PRECICE_TRACE(...)
Definition LogMacros.hpp:95
int MPI_Reduce(const T *sendbuf, T *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
Definition MPI_Mock.hpp:44
int MPI_Allreduce(const T *sendbuf, T *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
Definition MPI_Mock.hpp:37
#define PRECICE_ASSERT(...)
Definition assertion.hpp:87
void setRankOffset(Rank rankOffset)
Set rank offset.
virtual bool isConnected()
Returns true, if a connection to a remote participant has been setup.
virtual void acceptConnection(std::string const &acceptorName, std::string const &requesterName, std::string const &tag, int acceptorRank, int rankOffset=0) override
virtual void requestConnection(std::string const &acceptorName, std::string const &requesterName, std::string const &tag, int requesterRank, int requesterCommunicatorSize) override
virtual size_t getRemoteCommunicatorSize() override
Returns the number of processes in the remote communicator.
virtual void broadcast(precice::span< const int > itemsToSend) override
virtual int adjustRank(Rank rank) const override
Turn the rank adjustment into a noop for direct communication.
utils::Parallel::CommStatePtr _commState
CommState to use.
virtual void closeConnection() override
See precice::com::Communication::closeConnection().
virtual void allreduceSum(precice::span< double const > itemsToSend, precice::span< double > itemsToReceive, Rank primaryRank) override
virtual MPI_Comm & communicator(Rank rank=0) override
Returns the communicator.
virtual void reduceSum(precice::span< double const > itemsToSend, precice::span< double > itemsToReceive, Rank primaryRank) override
Performs a reduce summation on the rank given by primaryRank.
A C++ 11 implementation of the non-owning C++20 std::span type.
Definition span.hpp:284
constexpr pointer data() const noexcept
Definition span.hpp:500
constexpr size_type size() const noexcept
Definition span.hpp:469
static CommStatePtr current()
Returns an owning pointer to the current CommState.
Definition Parallel.cpp:147
contains the data communication abstraction layer.
int Rank
Definition Types.hpp:37