Skip to content

Commit 0303c56

Browse files
committed
back-end C-style MPI bindings
1 parent 4fbd7a9 commit 0303c56

File tree

9 files changed

+186
-161
lines changed

9 files changed

+186
-161
lines changed

algorithms/generators.cpp

Lines changed: 27 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
// generators.cpp
21
// Implementations of methods to initialize MMSP grids
32
#ifndef _GENERATORS_CPP_
43
#define _GENERATORS_CPP_
@@ -81,9 +80,9 @@ MMSP::vector<int> getPosition(const DistanceVoxel& dv) {
8180

8281
template<int dim, typename T>
8382
void exact_voronoi(MMSP::grid<dim, MMSP::sparse<T> >& grid, const std::vector<std::vector<MMSP::vector<int> > >& seeds) {
84-
int id=0;
83+
int id = 0;
8584
#ifdef MPI_VERSION
86-
id=MPI::COMM_WORLD.Get_rank();
85+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
8786
#endif
8887
// Exact Voronoi tessellation from seeds, based on Euclidean distance function. Runtime is O(Nseeds*L*W*H).
8988
// seeds must contain every seed from every rank.
@@ -193,7 +192,7 @@ void approximate_voronoi(MMSP::grid<dim, MMSP::sparse<T> >& grid, const std::vec
193192
#endif
194193
int id = 0;
195194
#ifdef MPI_VERSION
196-
id = MPI::COMM_WORLD.Get_rank();
195+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
197196
#endif
198197
// Perform the tessellation, using fast-marching fanciness
199198
if (dim == 2) {
@@ -400,7 +399,9 @@ void approximate_voronoi(MMSP::grid<dim, MMSP::sparse<T> >& grid, const std::vec
400399
}
401400
#ifdef DEBUG
402401
#ifdef MPI_VERSION
403-
if (MPI::COMM_WORLD.Get_rank()==0)
402+
int id = 0;
403+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
404+
if (id==0)
404405
#endif
405406
std::cout<<"Completed approximate tessellation ("<<time(NULL)-tstart<<" sec)."<<std::endl;
406407
#endif
@@ -409,23 +410,24 @@ void approximate_voronoi(MMSP::grid<dim, MMSP::sparse<T> >& grid, const std::vec
409410
void seedswap(std::vector<std::vector<MMSP::vector<int> > >& seeds)
410411
{
411412
#ifdef MPI_VERSION
412-
int id=MPI::COMM_WORLD.Get_rank();
413-
int np=MPI::COMM_WORLD.Get_size();
413+
int id=0, np=0;
414+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
415+
MPI_Comm_size(MPI_COMM_WORLD, &np);
414416
// Exchange seeds between all processors
415417
int send_size=3*seeds[id].size(); // number of integers
416418
int* send_buffer = new int[send_size]; // number of integers
417419
send_size = seeds_to_buffer(seeds[id], send_buffer);
418420
int* seed_sizes = new int[np];
419-
MPI::COMM_WORLD.Barrier();
420-
MPI::COMM_WORLD.Allgather(&send_size, 1, MPI_INT, seed_sizes, 1, MPI_INT);
421+
MPI_Barrier(MPI_COMM_WORLD);
422+
MPI_Allgather(&send_size, 1, MPI_INT, seed_sizes, 1, MPI_INT, MPI_COMM_WORLD);
421423
int total_size=0;
422424
for (int i=0; i<np; ++i) total_size+=seed_sizes[i];
423425
int* offsets = new int[np];
424426
offsets[0]=0;
425427
for (int i=1; i<np; ++i) offsets[i]=seed_sizes[i-1]+offsets[i-1];
426428
int* seed_block = new int[total_size];
427-
MPI::COMM_WORLD.Barrier();
428-
MPI::COMM_WORLD.Allgatherv(send_buffer, send_size, MPI_INT, seed_block, seed_sizes, offsets, MPI_INT);
429+
MPI_Barrier(MPI_COMM_WORLD);
430+
MPI_Allgatherv(send_buffer, send_size, MPI_INT, seed_block, seed_sizes, offsets, MPI_INT, MPI_COMM_WORLD);
429431
delete [] send_buffer; send_buffer=NULL;
430432

431433
for (int i=0; i<np; ++i) {
@@ -437,8 +439,8 @@ void seedswap(std::vector<std::vector<MMSP::vector<int> > >& seeds)
437439
delete [] seed_block; seed_block=NULL;
438440
int vote=1;
439441
int total_procs=0;
440-
MPI::COMM_WORLD.Barrier();
441-
MPI::COMM_WORLD.Allreduce(&vote, &total_procs, 1, MPI_INT, MPI_SUM);
442+
MPI_Barrier(MPI_COMM_WORLD);
443+
MPI_Allreduce(&vote, &total_procs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
442444
#ifdef DEBUG
443445
if (id==0) std::cout<<"Synchronized "<<np*seeds[id].size()<<" seeds on "<<total_procs<<" ranks."<<std::endl;
444446
#endif
@@ -448,11 +450,10 @@ void seedswap(std::vector<std::vector<MMSP::vector<int> > >& seeds)
448450
template<int dim>
449451
void seeds_from_poisson_process(const int x0[dim], const int x1[dim], const int g0[dim], const int g1[dim], const int& nseeds, std::vector<std::vector<MMSP::vector<int> > >& seeds)
450452
{
451-
int id=0;
452-
int np=1;
453+
int id=0, np=1;
453454
#ifdef MPI_VERSION
454-
id=MPI::COMM_WORLD.Get_rank();
455-
np=MPI::COMM_WORLD.Get_size();
455+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
456+
MPI_Comm_size(MPI_COMM_WORLD, &np);
456457
#endif
457458
std::vector<MMSP::vector<int> > local_seeds; // blank for now
458459
seeds.clear();
@@ -482,7 +483,7 @@ void seeds_to_file(const int g0[dim], const int g1[dim], const std::vector<std::
482483
{
483484
int id=0;
484485
#ifdef MPI_VERSION
485-
id=MPI::COMM_WORLD.Get_rank();
486+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
486487
#endif
487488
if (id==0) {
488489
std::ofstream output(filename);
@@ -496,18 +497,17 @@ void seeds_to_file(const int g0[dim], const int g1[dim], const std::vector<std::
496497
output.close();
497498
}
498499
#ifdef MPI_VERSION
499-
MPI::COMM_WORLD.Barrier();
500+
MPI_Barrier(MPI_COMM_WORLD);
500501
#endif
501502
}
502503

503504
template<int dim>
504505
void seeds_from_file(const int x0[dim], const int x1[dim], const int g0[dim], const int g1[dim], const char* seedfilename, std::vector<std::vector<MMSP::vector<int> > >& seeds)
505506
{
506-
int id=0;
507-
int np=1;
507+
int id=0, np=1;
508508
#ifdef MPI_VERSION
509-
id=MPI::COMM_WORLD.Get_rank();
510-
np=MPI::COMM_WORLD.Get_size();
509+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
510+
MPI_Comm_size(MPI_COMM_WORLD, &np);
511511
#endif
512512
std::ifstream input(seedfilename);
513513
if (!input) {
@@ -547,11 +547,10 @@ void seeds_from_file(const int x0[dim], const int x1[dim], const int g0[dim], co
547547
template<int dim>
548548
void honeycomb_seeds(const int x0[dim], const int x1[dim], const int g0[dim], const int g1[dim], const int a, std::vector<std::vector<MMSP::vector<int> > >& seeds)
549549
{
550-
int id=0;
551-
int np=1;
550+
int id=0, np=1;
552551
#ifdef MPI_VERSION
553-
id=MPI::COMM_WORLD.Get_rank();
554-
np=MPI::COMM_WORLD.Get_size();
552+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
553+
MPI_Comm_size(MPI_COMM_WORLD, &np);
555554
#endif
556555
std::vector<MMSP::vector<int> > local_seeds; // blank for now
557556
seeds.clear();
@@ -597,7 +596,7 @@ void tessellate(MMSP::grid<dim, MMSP::sparse<T> >& grid, const int& nseeds) {
597596
approximate_voronoi<dim,T>(grid, seeds);
598597
#else
599598
exact_voronoi<dim,T>(grid, seeds);
600-
MPI::COMM_WORLD.Barrier();
599+
MPI_Barrier(MPI_COMM_WORLD);
601600
#endif
602601
} // tessellate
603602

algorithms/tessellate.hpp

Lines changed: 24 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,8 @@ namespace MMSP {
114114
#ifdef MPI_VERSION
115115
template<int dim, typename T>
116116
void exact_voronoi(MMSP::grid<dim, sparse<T> >& grid, const std::vector<std::vector<MMSP::vector<int> > >& seeds) {
117-
int id=MPI::COMM_WORLD.Get_rank();
117+
int id=0;
118+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
118119
// Exact Voronoi tessellation from seeds, based on Euclidean distance function. Runtime is O(Nseeds*L*W*H).
119120
// seeds must contain every seed from every rank.
120121
#ifdef DEBUG
@@ -223,8 +224,9 @@ void approximate_voronoi(MMSP::grid<dim, sparse<T> >& grid, const std::vector<st
223224
#endif
224225
int id = 0;
225226
#ifdef MPI_VERSION
226-
id = MPI::COMM_WORLD.Get_rank();
227-
int np = MPI::COMM_WORLD.Get_size();
227+
int np=1;
228+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
229+
MPI_Comm_size(MPI_COMM_WORLD, &np);
228230
#endif
229231
// Perform the tessellation, using fast-marching fanciness
230232
if (dim == 2) {
@@ -431,7 +433,10 @@ void approximate_voronoi(MMSP::grid<dim, sparse<T> >& grid, const std::vector<st
431433
}
432434
#ifdef DEBUG
433435
#ifdef MPI_VERSION
434-
if (MPI::COMM_WORLD.Get_rank()==0)
436+
int rank=0;
437+
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
438+
439+
if (rank==0)
435440
#endif
436441
std::cout<<"Completed approximate tessellation ("<<time(NULL)-tstart<<" sec)."<<std::endl;
437442
#endif
@@ -440,11 +445,10 @@ void approximate_voronoi(MMSP::grid<dim, sparse<T> >& grid, const std::vector<st
440445

441446
template<int dim, typename T>
442447
void tessellate(const std::vector<MMSP::vector<int> >& local_seeds, std::vector<std::vector<MMSP::vector<int> > >& seeds, MMSP::grid<dim, sparse<T> >& grid) {
443-
int id=0;
444-
int np=1;
448+
int id=0, np=1;
445449
#ifdef MPI_VERSION
446-
id=MPI::COMM_WORLD.Get_rank();
447-
np=MPI::COMM_WORLD.Get_size();
450+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
451+
MPI_Comm_size(MPI_COMM_WORLD, &np);
448452
#endif
449453

450454
#ifndef MPI_VERSION
@@ -455,16 +459,16 @@ void tessellate(const std::vector<MMSP::vector<int> >& local_seeds, std::vector<
455459
int* send_buffer = new int[send_size]; // number of integers
456460
send_size = seeds_to_buffer(local_seeds, send_buffer);
457461
int* seed_sizes = new int[np];
458-
MPI::COMM_WORLD.Barrier();
459-
MPI::COMM_WORLD.Allgather(&send_size, 1, MPI_INT, seed_sizes, 1, MPI_INT);
462+
MPI_Barrier(MPI_COMM_WORLD);
463+
MPI_Allgather(&send_size, 1, MPI_INT, seed_sizes, 1, MPI_INT, MPI_COMM_WORLD);
460464
int total_size=0;
461465
for (int i=0; i<np; ++i) total_size+=seed_sizes[i];
462466
int* offsets = new int[np];
463467
offsets[0]=0;
464468
for (int i=1; i<np; ++i) offsets[i]=seed_sizes[i-1]+offsets[i-1];
465469
int* seed_block = new int[total_size];
466-
MPI::COMM_WORLD.Barrier();
467-
MPI::COMM_WORLD.Allgatherv(send_buffer, send_size, MPI_INT, seed_block, seed_sizes, offsets, MPI_INT);
470+
MPI_Barrier(MPI_COMM_WORLD);
471+
MPI_Allgatherv(send_buffer, send_size, MPI_INT, seed_block, seed_sizes, offsets, MPI_INT, MPI_COMM_WORLD);
468472
delete [] send_buffer; send_buffer=NULL;
469473

470474
for (int i=0; i<np; ++i) {
@@ -476,8 +480,8 @@ void tessellate(const std::vector<MMSP::vector<int> >& local_seeds, std::vector<
476480
delete [] seed_block; seed_block=NULL;
477481
int vote=1;
478482
int total_procs=0;
479-
MPI::COMM_WORLD.Barrier();
480-
MPI::COMM_WORLD.Allreduce(&vote, &total_procs, 1, MPI_INT, MPI_SUM);
483+
MPI_Barrier(MPI_COMM_WORLD);
484+
MPI_Allreduce(&vote, &total_procs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
481485
if (id==0) std::cout<<"Synchronized "<<total_size/3<<" seeds on "<<total_procs<<" ranks."<<std::endl;
482486
#endif
483487

@@ -486,9 +490,9 @@ void tessellate(const std::vector<MMSP::vector<int> >& local_seeds, std::vector<
486490
approximate_voronoi<dim,T>(grid, seeds);
487491
#else
488492
exact_voronoi<dim,T>(grid, seeds);
489-
MPI::COMM_WORLD.Barrier();
493+
MPI_Barrier(MPI_COMM_WORLD);
490494
total_procs=0;
491-
MPI::COMM_WORLD.Allreduce(&vote, &total_procs, 1, MPI_INT, MPI_SUM);
495+
MPI_Allreduce(&vote, &total_procs, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
492496
#endif
493497
} // tessellate
494498

@@ -497,7 +501,7 @@ template<int dim, typename T>
497501
void seeds_from_prng(const MMSP::grid<dim, sparse<T> >& grid, const int& nseeds, std::vector<MMSP::vector<int> >& local_seeds) {
498502
int id=0;
499503
#ifdef MPI_VERSION
500-
id=MPI::COMM_WORLD.Get_rank();
504+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
501505
#endif
502506

503507
unsigned long int pseudorand_seed = time(NULL);
@@ -524,7 +528,7 @@ template<int dim, typename T>
524528
void seeds_from_file(const MMSP::grid<dim, sparse<T> >& grid, const std::string& seed_filename, std::vector<MMSP::vector<int> >& local_seeds) {
525529
int id=0;
526530
#ifdef MPI_VERSION
527-
id=MPI::COMM_WORLD.Get_rank();
531+
MPI_Comm_rank(MPI_COMM_WORLD, &id);
528532
#endif
529533

530534
if (id == 0) std::cout << "Reading seeds from " << seed_filename << std::endl;
@@ -565,7 +569,7 @@ template<int dim, typename T>
565569
void tessellate(MMSP::grid<dim, sparse<T> >& grid, const int& nseeds) {
566570
int np=1;
567571
#ifdef MPI_VERSION
568-
np=MPI::COMM_WORLD.Get_size();
572+
MPI_Comm_size(MPI_COMM_WORLD, &np);
569573
#endif
570574
std::vector<MMSP::vector<int> > local_seeds; // blank for now
571575
std::vector<std::vector<MMSP::vector<int> > > seeds;
@@ -583,7 +587,7 @@ template<int dim, typename T>
583587
void tessellate(MMSP::grid<dim, sparse<T> >& grid, const std::string& seed_filename) {
584588
int np=1;
585589
#ifdef MPI_VERSION
586-
np=MPI::COMM_WORLD.Get_size();
590+
MPI_Comm_size(MPI_COMM_WORLD, &np);
587591
#endif
588592
std::vector<MMSP::vector<int> > local_seeds; // blank for now
589593
std::vector<std::vector<MMSP::vector<int> > > seeds;

doc/MMSP.chapter3.tex

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ \subsection{The {\tt Hello MMSP!} program}
3434
MMSP::Finalize();
3535
\end{verbatim}
3636
\end{shadebox}
37-
What do these lines do? For single processor programs, they do absolutely nothing -- they could actually be removed without any consequences. However, for programs that use the message passing interface (MPI), they act as wrappers for the similarly named {\tt MPI::Init} and {\tt MPI::Finalize} commands. It's useful to include them here because they'll allow us to write programs that may be compiled for both single or multiple processor environments.
37+
What do these lines do? For single processor programs, they do absolutely nothing -- they could actually be removed without any consequences. However, for programs that use the message passing interface (MPI), they act as wrappers for the similarly named {\tt MPI_Init} and {\tt MPI_Finalize} commands. It's useful to include them here because they'll allow us to write programs that may be compiled for both single or multiple processor environments.
3838

3939
Programmers familiar with {\tt c++} will notice that there's obviously some \MMSP\ namespace being used here. For those less familiar, namespaces are a somewhat recent addition to {\tt c++} that are used as a means of avoiding naming conflicts. We can avoid using namespace resolution so frequently if we use an appropriate {\tt using} statement, i.e.
4040
\begin{shadebox}

0 commit comments

Comments
 (0)