23 static double data_0[] = {10.0, 20.0, 40.0, 80.0};
24 static double data_1[] = {30.0, 50.0, 60.0, 90.0};
25 static double data_2[] = {70.0, 100.0};
27 static double *data[] = {data_0, data_1, data_2};
28 static int size[] = {
sizeof(data_0) /
sizeof(*data_0),
29 sizeof(data_1) /
sizeof(*data_1),
30 sizeof(data_2) /
sizeof(*data_2)};
32 return std::move(
vector<double>(data[rank], data[rank] + size[rank]));
40 static double data_0[] = {10.0 + 2, 20.0 + 1, 40.0 + 2, 80.0 + 5};
41 static double data_1[] = {30.0 + 2, 50.0 + 1, 60.0 + 3, 90.0 + 5};
42 static double data_2[] = {70.0 + 3, 100.0 + 5};
44 static double *data[] = {data_0, data_1, data_2};
45 static int size[] = {
sizeof(data_0) /
sizeof(*data_0),
46 sizeof(data_1) /
sizeof(*data_1),
47 sizeof(data_2) /
sizeof(*data_2)};
49 return std::move(
vector<double>(data[rank], data[rank] + size[rank]));
58 if (data.size() != expectedData.
size())
61 for (
int i = 0; i < data.size(); ++i) {
62 valid &= (data[i] == expectedData[i]);
68int main(
int argc,
char **argv)
70 std::cout <<
"Running communication dummy\n";
74 MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
80 std::cout <<
"Please run with 3 mpi processes\n";
93 utils::Parallel::initializeMPI(NULL, NULL);
97 utils::Parallel::initializeMPI(NULL, NULL);
122 mesh->setGlobalNumberOfVertices(10);
124 mesh->getVertexDistribution()[0].push_back(0);
125 mesh->getVertexDistribution()[0].push_back(1);
126 mesh->getVertexDistribution()[0].push_back(3);
127 mesh->getVertexDistribution()[0].push_back(7);
129 mesh->getVertexDistribution()[1].push_back(2);
130 mesh->getVertexDistribution()[1].push_back(4);
131 mesh->getVertexDistribution()[1].push_back(5);
132 mesh->getVertexDistribution()[1].push_back(8);
134 mesh->getVertexDistribution()[2].push_back(6);
135 mesh->getVertexDistribution()[2].push_back(9);
145 for (
auto cf : cfs) {
155 c.
send(data.data(), data.size());
157 c.
receive(data.data(), data.size());
166 cout <<
"----------\n";
173 std::cout <<
"Stop communication dummy\n";
int MPI_Comm_rank(MPI_Comm comm, int *rank)
static MPI_Comm MPI_COMM_WORLD
int MPI_Comm_size(MPI_Comm comm, int *size)
Provides connection methods for processes located in one communicator.
Point-to-point communication implementation of DistributedCommunication.
void receive(precice::span< double > itemsToReceive, int valueDimension=1) override
Receives a subset of local double values corresponding to local indices deduced from the current and ...
void requestConnection(std::string const &acceptorName, std::string const &requesterName) override
Requests connection from participant, which has to call acceptConnection().
void send(precice::span< double const > itemsToSend, int valueDimension=1) override
Sends a subset of local double values corresponding to local indices deduced from the current and rem...
Container and creator for meshes.
static int getSize()
Number of ranks. This includes ranks from both participants, e.g. minimal size is 2.
static Rank getRank()
Current rank.
static bool isPrimary()
True if this process is running the primary rank.
static bool isSecondary()
True if this process is running a secondary rank.
static com::PtrCommunication & getCommunication()
Intra-participant communication.
static void splitCommunicator(std::optional< int > group=std::nullopt)
Splits and creates a local MPI communicator according to groupName.
bool validate(vector< double > const &data)
vector< double > getData()
int main(int argc, char **argv)
vector< double > getExpectedData()
std::shared_ptr< CommunicationFactory > PtrCommunicationFactory
std::shared_ptr< Communication > PtrCommunication
Main namespace of the precice library.