planc
Parallel Lowrank Approximation with Non-negativity Constraints
distntfmpicomm.hpp
Go to the documentation of this file.
1 /* Copyright 2016 Ramakrishnan Kannan */
2 
3 #ifndef DISTNTF_DISTNTFMPICOMM_HPP_
4 #define DISTNTF_DISTNTFMPICOMM_HPP_
5 
6 #define MPI_CART_DIMS m_proc_grids.n_rows
7 
8 #include <mpi.h>
9 #include <vector>
10 namespace planc {
11 
13  private:
14  unsigned int m_global_rank;
15  unsigned int m_num_procs;
16  UVEC m_proc_grids;
17  MPI_Comm m_cart_comm;
19  MPI_Comm *m_fiber_comm;
21  MPI_Comm *m_slice_comm;
22  UVEC m_fiber_ranks;
23  UVEC m_slice_ranks;
24  UVEC m_slice_sizes;
25  std::vector<int> m_coords;
26 
27  public:
28  void printConfig() {
29  if (rank() == 0) {
30  INFO << "successfully setup MPI communicators" << std::endl;
31  INFO << "size=" << size() << std::endl;
32  INFO << "processor grid size::" << m_proc_grids;
33  int slice_size;
34  MPI_Comm current_slice_comm;
35  for (unsigned int i = 0; i < MPI_CART_DIMS; i++) {
36  current_slice_comm = this->m_slice_comm[i];
37  MPI_Comm_size(current_slice_comm, &slice_size);
38  INFO << "Numprocs in slice " << i << "::" << slice_size << std::endl;
39  }
40  int fiber_size;
41  MPI_Comm current_fiber_comm;
42  for (unsigned int i = 0; i < MPI_CART_DIMS; i++) {
43  current_fiber_comm = this->m_fiber_comm[i];
44  MPI_Comm_size(current_fiber_comm, &fiber_size);
45  INFO << "Numprocs in fiber " << i << "::" << fiber_size << std::endl;
46  }
47  }
48  UVEC cooprint(MPI_CART_DIMS);
49  for (unsigned int ii = 0; ii < MPI_CART_DIMS; ii++) {
50  cooprint[ii] = m_coords[ii];
51  }
52 
53  MPI_Barrier(MPI_COMM_WORLD);
54  for (int i = 0; i < size(); i++) {
55  if (i == rank()) {
56  INFO << "slice ranks of rank::" << m_global_rank
57  << "::" << m_slice_ranks << std::endl;
58  INFO << "fiber ranks of rank::" << m_global_rank
59  << "::" << m_fiber_ranks << std::endl;
60  INFO << "coordinates::" << m_global_rank << "::" << cooprint
61  << std::endl;
62  }
63  MPI_Barrier(MPI_COMM_WORLD);
64  }
65  }
66 
70  NTFMPICommunicator(int argc, char *argv[], const UVEC &i_dims)
71  : m_proc_grids(i_dims) {
72  // Get the number of MPI processes
73  MPI_Init(&argc, &argv);
74  MPI_Comm_rank(MPI_COMM_WORLD, reinterpret_cast<int *>(&m_global_rank));
75  MPI_Comm_size(MPI_COMM_WORLD, reinterpret_cast<int *>(&m_num_procs));
76  if (m_num_procs != arma::prod(m_proc_grids)) {
77  ERR << "number of mpi process and process grid doesn't match";
78  MPI_Barrier(MPI_COMM_WORLD);
79  MPI_Abort(MPI_COMM_WORLD, 1);
80  exit(-1);
81  }
82  // Create a virtual topology MPI communicator
83  std::vector<int> periods(MPI_CART_DIMS);
84  std::vector<int> m_proc_grids_vec =
85  arma::conv_to<std::vector<int>>::from(m_proc_grids);
86  for (unsigned int i = 0; i < MPI_CART_DIMS; i++) periods[i] = 1;
87  int reorder = 0;
88  MPI_Cart_create(MPI_COMM_WORLD, MPI_CART_DIMS, &m_proc_grids_vec[0],
89  &periods[0], reorder, &m_cart_comm);
90 
91  // Allocate memory for subcommunicators
92  m_fiber_comm = new MPI_Comm[MPI_CART_DIMS];
93  m_slice_comm = new MPI_Comm[MPI_CART_DIMS];
94 
95  // Get the subcommunicators
96  std::vector<int> remainDims(MPI_CART_DIMS);
97  for (unsigned int i = 0; i < remainDims.size(); i++) remainDims[i] = 1;
98  // initialize the fiber ranks
99  m_slice_ranks = arma::zeros<UVEC>(MPI_CART_DIMS);
100  int current_slice_rank;
101  for (unsigned int i = 0; i < MPI_CART_DIMS; i++) {
102  remainDims[i] = 0;
103  MPI_Cart_sub(m_cart_comm, &remainDims[0], &(m_slice_comm[i]));
104  remainDims[i] = 1;
105  MPI_Comm_rank(m_slice_comm[i], &current_slice_rank);
106  m_slice_ranks[i] = current_slice_rank;
107  }
108  for (unsigned int i = 0; i < remainDims.size(); i++) remainDims[i] = 0;
109  m_fiber_ranks = arma::zeros<UVEC>(MPI_CART_DIMS);
110  int current_fiber_rank;
111  for (unsigned int i = 0; i < MPI_CART_DIMS; i++) {
112  remainDims[i] = 1;
113  MPI_Cart_sub(m_cart_comm, &remainDims[0], &(m_fiber_comm[i]));
114  remainDims[i] = 0;
115  MPI_Comm_rank(m_fiber_comm[i], &current_fiber_rank);
116  m_fiber_ranks[i] = current_fiber_rank;
117  }
118  // Get the coordinates
119  m_coords.resize(MPI_CART_DIMS, 0);
120  MPI_Cart_coords(m_cart_comm, m_global_rank, MPI_CART_DIMS, &m_coords[0]);
121  // Get the slice size
122  m_slice_sizes = arma::zeros<UVEC>(MPI_CART_DIMS);
123  for (unsigned int i = 0; i < MPI_CART_DIMS; i++) {
124  m_slice_sizes[i] = m_num_procs / m_proc_grids[i];
125  }
126  }
127 
129  MPI_Barrier(MPI_COMM_WORLD);
130  int finalized;
131  MPI_Finalized(&finalized);
132 
133  if (!finalized) {
134  for (unsigned int i = 0; i < MPI_CART_DIMS; i++) {
135  MPI_Comm_free(&m_fiber_comm[i]);
136  MPI_Comm_free(&m_slice_comm[i]);
137  }
138  }
139  delete[] m_fiber_comm;
140  delete[] m_slice_comm;
141  }
142 
143  const MPI_Comm &cart_comm() const { return m_cart_comm; }
144  void coordinates(int *o_c) const {
145  MPI_Cart_coords(m_cart_comm, m_global_rank, MPI_CART_DIMS, o_c);
146  }
147  std::vector<int> coordinates() const { return this->m_coords; }
149  const MPI_Comm &fiber(const int i) const { return m_fiber_comm[i]; }
151  const MPI_Comm &slice(const int i) const { return m_slice_comm[i]; }
153  int rank(const int *i_coords) const {
154  int my_rank;
155  MPI_Cart_rank(m_cart_comm, i_coords, &my_rank);
156  return my_rank;
157  }
159  int size() const { return m_num_procs; }
160  int size(const int i_d) const {
161  int n_procs;
162  MPI_Comm_size(m_slice_comm[i_d], &n_procs);
163  return n_procs;
164  }
166  int fiber_rank(int i) const { return m_fiber_ranks[i]; }
168  int slice_rank(int i) const { return m_slice_ranks[i]; }
170  UVEC proc_grids() const { return this->m_proc_grids; }
172  int rank() const { return m_global_rank; }
173  int num_slices(int mode) const { return m_proc_grids[mode]; }
174  int slice_num(int mode) const { return m_coords[mode]; }
175  int slice_size(int mode) const { return m_slice_sizes[mode]; }
176 
182  bool isparticipating(unsigned int mode) {
183  bool rc = true;
184  size_t num_modes = this->m_proc_grids.n_rows;
185  for (unsigned int i = 0; i < num_modes; i++) {
186  if (i != mode && this->m_coords[i] != 0) {
187  rc = false;
188  break;
189  }
190  }
191  return rc;
192  }
193 };
194 
195 } // namespace planc
196 
197 #endif // DISTNTF_DISTNTFMPICOMM_HPP_
int slice_size(int mode) const
bool isparticipating(unsigned int mode)
Return true only for those processors whose coordinates are non-zero for the mode and zero non-modes...
void coordinates(int *o_c) const
int rank(const int *i_coords) const
Returns the rank of current MPI process given the cartesian coordinates.
const MPI_Comm & slice(const int i) const
Returns the slice communicator.
NTFMPICommunicator(int argc, char *argv[], const UVEC &i_dims)
Constructor for setting up the nD grid communicators.
int size() const
Returns the number of MPI processors.
int slice_num(int mode) const
#define UVEC
Definition: utils.h:58
const MPI_Comm & cart_comm() const
#define ERR
Definition: utils.h:28
int rank() const
Returns the global rank.
UVEC proc_grids() const
Returns the process grid for which the communicators are setup.
#define MPI_CART_DIMS
std::vector< int > coordinates() const
#define INFO
Definition: utils.h:36
const MPI_Comm & fiber(const int i) const
Returns the fiber communicator.
int num_slices(int mode) const
int fiber_rank(int i) const
Returns the fiber rank on a particular fiber grid.
int slice_rank(int i) const
Returns the slice rank on a particular slice grid.
ncp_factors contains the factors of the ncp every ith factor is of size n_i * k number of factors is ...
Definition: ncpfactors.hpp:20
int size(const int i_d) const