3 #ifndef DISTNTF_DISTNTFMPICOMM_HPP_ 4 #define DISTNTF_DISTNTFMPICOMM_HPP_ 6 #define MPI_CART_DIMS m_proc_grids.n_rows 14 unsigned int m_global_rank;
15 unsigned int m_num_procs;
19 MPI_Comm *m_fiber_comm;
21 MPI_Comm *m_slice_comm;
25 std::vector<int> m_coords;
30 INFO <<
"successfully setup MPI communicators" << std::endl;
31 INFO <<
"size=" <<
size() << std::endl;
32 INFO <<
"processor grid size::" << m_proc_grids;
34 MPI_Comm current_slice_comm;
36 current_slice_comm = this->m_slice_comm[i];
37 MPI_Comm_size(current_slice_comm, &
slice_size);
38 INFO <<
"Numprocs in slice " << i <<
"::" <<
slice_size << std::endl;
41 MPI_Comm current_fiber_comm;
43 current_fiber_comm = this->m_fiber_comm[i];
44 MPI_Comm_size(current_fiber_comm, &fiber_size);
45 INFO <<
"Numprocs in fiber " << i <<
"::" << fiber_size << std::endl;
50 cooprint[ii] = m_coords[ii];
53 MPI_Barrier(MPI_COMM_WORLD);
54 for (
int i = 0; i <
size(); i++) {
56 INFO <<
"slice ranks of rank::" << m_global_rank
57 <<
"::" << m_slice_ranks << std::endl;
58 INFO <<
"fiber ranks of rank::" << m_global_rank
59 <<
"::" << m_fiber_ranks << std::endl;
60 INFO <<
"coordinates::" << m_global_rank <<
"::" << cooprint
63 MPI_Barrier(MPI_COMM_WORLD);
71 : m_proc_grids(i_dims) {
73 MPI_Init(&argc, &argv);
74 MPI_Comm_rank(MPI_COMM_WORLD, reinterpret_cast<int *>(&m_global_rank));
75 MPI_Comm_size(MPI_COMM_WORLD, reinterpret_cast<int *>(&m_num_procs));
76 if (m_num_procs != arma::prod(m_proc_grids)) {
77 ERR <<
"number of mpi process and process grid doesn't match";
78 MPI_Barrier(MPI_COMM_WORLD);
79 MPI_Abort(MPI_COMM_WORLD, 1);
84 std::vector<int> m_proc_grids_vec =
85 arma::conv_to<std::vector<int>>::from(m_proc_grids);
86 for (
unsigned int i = 0; i <
MPI_CART_DIMS; i++) periods[i] = 1;
88 MPI_Cart_create(MPI_COMM_WORLD,
MPI_CART_DIMS, &m_proc_grids_vec[0],
89 &periods[0], reorder, &m_cart_comm);
97 for (
unsigned int i = 0; i < remainDims.size(); i++) remainDims[i] = 1;
100 int current_slice_rank;
103 MPI_Cart_sub(m_cart_comm, &remainDims[0], &(m_slice_comm[i]));
105 MPI_Comm_rank(m_slice_comm[i], ¤t_slice_rank);
106 m_slice_ranks[i] = current_slice_rank;
108 for (
unsigned int i = 0; i < remainDims.size(); i++) remainDims[i] = 0;
110 int current_fiber_rank;
113 MPI_Cart_sub(m_cart_comm, &remainDims[0], &(m_fiber_comm[i]));
115 MPI_Comm_rank(m_fiber_comm[i], ¤t_fiber_rank);
116 m_fiber_ranks[i] = current_fiber_rank;
120 MPI_Cart_coords(m_cart_comm, m_global_rank,
MPI_CART_DIMS, &m_coords[0]);
124 m_slice_sizes[i] = m_num_procs / m_proc_grids[i];
129 MPI_Barrier(MPI_COMM_WORLD);
131 MPI_Finalized(&finalized);
135 MPI_Comm_free(&m_fiber_comm[i]);
136 MPI_Comm_free(&m_slice_comm[i]);
139 delete[] m_fiber_comm;
140 delete[] m_slice_comm;
143 const MPI_Comm &
cart_comm()
const {
return m_cart_comm; }
145 MPI_Cart_coords(m_cart_comm, m_global_rank,
MPI_CART_DIMS, o_c);
149 const MPI_Comm &
fiber(
const int i)
const {
return m_fiber_comm[i]; }
151 const MPI_Comm &
slice(
const int i)
const {
return m_slice_comm[i]; }
153 int rank(
const int *i_coords)
const {
155 MPI_Cart_rank(m_cart_comm, i_coords, &my_rank);
159 int size()
const {
return m_num_procs; }
160 int size(
const int i_d)
const {
162 MPI_Comm_size(m_slice_comm[i_d], &n_procs);
172 int rank()
const {
return m_global_rank; }
173 int num_slices(
int mode)
const {
return m_proc_grids[mode]; }
174 int slice_num(
int mode)
const {
return m_coords[mode]; }
175 int slice_size(
int mode)
const {
return m_slice_sizes[mode]; }
184 size_t num_modes = this->m_proc_grids.n_rows;
185 for (
unsigned int i = 0; i < num_modes; i++) {
186 if (i != mode && this->m_coords[i] != 0) {
197 #endif // DISTNTF_DISTNTFMPICOMM_HPP_ int slice_size(int mode) const
bool isparticipating(unsigned int mode)
Return true only for those processors whose coordinates are non-zero for the mode and zero non-modes...
void coordinates(int *o_c) const
int rank(const int *i_coords) const
Returns the rank of current MPI process given the cartesian coordinates.
const MPI_Comm & slice(const int i) const
Returns the slice communicator.
NTFMPICommunicator(int argc, char *argv[], const UVEC &i_dims)
Constructor for setting up the nD grid communicators.
int size() const
Returns the number of MPI processors.
int slice_num(int mode) const
const MPI_Comm & cart_comm() const
int rank() const
Returns the global rank.
UVEC proc_grids() const
Returns the process grid for which the communicators are setup.
std::vector< int > coordinates() const
const MPI_Comm & fiber(const int i) const
Returns the fiber communicator.
int num_slices(int mode) const
int fiber_rank(int i) const
Returns the fiber rank on a particular fiber grid.
int slice_rank(int i) const
Returns the slice rank on a particular slice grid.
ncp_factors contains the factors of the ncp every ith factor is of size n_i * k number of factors is ...
int size(const int i_d) const