24 static double data_0[] = {rand(), rand()};
25 static double data_1[] = {rand(), rand(), rand()};
26 static double data_2[] = {rand(), rand()};
27 static double *data_3;
28 static double data_4[] = {rand(), rand(), rand()};
30 static double *data[] = {data_0, data_1, data_2, data_3, data_4};
31 static int size[] = {
sizeof(data_0) /
sizeof(*data_0),
32 sizeof(data_1) /
sizeof(*data_1),
33 sizeof(data_2) /
sizeof(*data_2),
35 sizeof(data_4) /
sizeof(*data_4)};
37 return std::move(
vector<double>(data[rank], data[rank] + size[rank]));
45 static double data_0[] = {20.0, 50.0};
46 static double data_1[] = {10.0, 30.0, 40.0};
47 static double data_2[] = {60.0, 70.0};
48 static double *data_3;
49 static double data_4[] = {80.0, 90.0, 100.0};
51 static double *data[] = {data_0, data_1, data_2, data_3, data_4};
52 static int size[] = {
sizeof(data_0) /
sizeof(*data_0),
53 sizeof(data_1) /
sizeof(*data_1),
54 sizeof(data_2) /
sizeof(*data_2),
56 sizeof(data_4) /
sizeof(*data_4)};
58 return std::move(
vector<double>(data[rank], data[rank] + size[rank]));
67 if (data.size() != expectedData.
size())
70 for (
int i = 0; i < data.size(); ++i) {
71 valid &= (data[i] == expectedData[i]);
79 for (
int i = 0; i < data.size(); ++i) {
84int main(
int argc,
char **argv)
86 std::cout <<
"Running communication dummy\n";
90 MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
96 std::cout <<
"Please run with 5 mpi processes\n";
109 utils::Parallel::initializeMPI(NULL, NULL);
113 utils::Parallel::initializeMPI(NULL, NULL);
138 mesh->setGlobalNumberOfVertices(10);
140 mesh->getVertexDistribution()[0].push_back(1);
141 mesh->getVertexDistribution()[0].push_back(4);
143 mesh->getVertexDistribution()[1].push_back(0);
144 mesh->getVertexDistribution()[1].push_back(2);
145 mesh->getVertexDistribution()[1].push_back(3);
149 mesh->getVertexDistribution()[2].push_back(5);
150 mesh->getVertexDistribution()[2].push_back(6);
152 mesh->getVertexDistribution()[4].push_back(7);
153 mesh->getVertexDistribution()[4].push_back(8);
154 mesh->getVertexDistribution()[4].push_back(9);
164 for (
auto cf : cfs) {
170 <<
"Connected!" <<
'\n';
174 c.
receive(data.data(), data.size());
178 <<
"Success!" <<
'\n';
181 <<
"Failure!" <<
'\n';
185 c.
send(data.data(), data.size());
187 cout <<
"----------" <<
'\n';
194 std::cout <<
"Stop communication dummy\n";
int MPI_Comm_rank(MPI_Comm comm, int *rank)
static MPI_Comm MPI_COMM_WORLD
int MPI_Comm_size(MPI_Comm comm, int *size)
Provides connection methods for processes located in one communicator.
Point-to-point communication implementation of DistributedCommunication.
void receive(precice::span< double > itemsToReceive, int valueDimension=1) override
Receives a subset of local double values corresponding to local indices deduced from the current and ...
void acceptConnection(std::string const &acceptorName, std::string const &requesterName) override
Accepts connection from participant, which has to call requestConnection().
void send(precice::span< double const > itemsToSend, int valueDimension=1) override
Sends a subset of local double values corresponding to local indices deduced from the current and rem...
Container and creator for meshes.
static int getSize()
Number of ranks. This includes ranks from both participants, e.g. minimal size is 2.
static Rank getRank()
Current rank.
static bool isPrimary()
True if this process is running the primary rank.
static bool isSecondary()
True if this process is running a secondary rank.
static com::PtrCommunication & getCommunication()
Intra-participant communication.
static void splitCommunicator(std::optional< int > group=std::nullopt)
Splits and creates a local MPI communicator according to groupName.
bool validate(vector< double > const &data)
vector< double > getData()
int main(int argc, char **argv)
vector< double > getExpectedData()
void process(vector< double > &data)
std::shared_ptr< CommunicationFactory > PtrCommunicationFactory
std::shared_ptr< Communication > PtrCommunication
Main namespace of the precice library.