Skip to content
Snippets Groups Projects
Commit 6fba1f29 authored by Thomas Jahns's avatar Thomas Jahns :cartwheel:
Browse files

Integrate refactored support function with only caller.

parent 9983926d
No related branches found
No related tags found
No related merge requests found
......@@ -80,51 +80,6 @@ void xt_exchanger_a_exchange(Xt_exchanger exchanger,
exchanger->vtable->a_exchange(exchanger, src_data, dst_data, request);
}
static inline int
adjusted_rank(int r, int comm_rank, int comm_size)
{
return r + (r <= comm_rank ? comm_size : 0);
}
#define XT_SORTFUNC_DECL static
#define SORT_TYPE struct Xt_redist_msg
#define SORT_TYPE_SUFFIX redist_msg
#define SORT_TYPE_CMP_LT(u,v,i,j) \
(adjusted_rank((u).rank, comm_rank, comm_size) \
< adjusted_rank((v).rank, comm_rank, comm_size))
#define SORT_TYPE_CMP_LE(u,v,i,j) \
(adjusted_rank((u).rank, comm_rank, comm_size) \
<= adjusted_rank((v).rank, comm_rank, comm_size))
#define SORT_TYPE_CMP_EQ(u,v,i,j) (((u).rank) == ((v).rank))
#define XT_SORT_EXTRA_ARGS_DECL , int comm_rank, int comm_size
#define XT_SORT_EXTRA_ARGS_PASS , comm_rank, comm_size
#define XT_SORT_VECSWAP_EXTRA_ARGS_DECL
#define XT_SORT_VECSWAP_EXTRA_ARGS_PASS
#include "xt_quicksort_base.h"
void xt_exchanger_internal_optimize(size_t n, struct Xt_redist_msg *msgs,
MPI_Comm comm) {
int comm_size, comm_rank, is_inter;
xt_mpi_call(MPI_Comm_rank(comm, &comm_rank), comm);
xt_mpi_call(MPI_Comm_test_inter(comm, &is_inter), comm);
int (*get_comm_size)(MPI_Comm, int *)
= is_inter ? MPI_Comm_remote_size : MPI_Comm_size;
xt_mpi_call(get_comm_size(comm, &comm_size), comm);
/* In order to avoid congestion of messages, the order of send and receive
* messages is changed. This is done by sorting the messages according to the
* rank of the respective message partner. Before the sorting to ranks that
* are smaller or equal to the local rank the size of the communicator is
* added.
* example: process 5 is supposed to communicate with processes: 9, 5, 2, 6, 1
* 1. add comm_size(10): 9, 15, 12, 6, 11
* 2. sort: 6, 9, 11, 12, 15 -> final order: 6, 9, 1, 2, 5
*/
xt_quicksort_redist_msg(msgs, n, comm_rank, comm_size);
}
int
xt_exchanger_get_msg_ranks(Xt_exchanger exchanger,
enum xt_msg_direction direction,
......
......@@ -116,19 +116,6 @@ xt_exchanger_copy(Xt_exchanger orig, MPI_Comm new_comm, int new_tag_offset);
PPM_DSO_INTERNAL void
xt_exchanger_delete(Xt_exchanger);
/**
* Support routine, that reorders a number of provided messages, such that
* network congestion is potentially reduced.
* @param[in] n number of messages
* @param[in,out] msgs messages to be reordered
* @param[in] comm communicator associated to the messages
* @remark The first data element in the messages needs to be an integer
* containing the rank.
*/
PPM_DSO_INTERNAL void
xt_exchanger_internal_optimize(size_t n, struct Xt_redist_msg *msgs,
MPI_Comm comm);
/**
* gets a copy of the MPI_Datatype used for a specificed message
* @param[in] exchanger exchanger object
......
......@@ -117,6 +117,29 @@ xt_exchanger_simple_base_alloc(size_t nmsg)
return exchanger;
}
static inline int
adjusted_rank(int r, int comm_rank, int comm_size)
{
return r + (r <= comm_rank ? comm_size : 0);
}
#define XT_SORTFUNC_DECL static
#define SORT_TYPE struct Xt_redist_msg
#define SORT_TYPE_SUFFIX redist_msg
#define SORT_TYPE_CMP_LT(u,v,i,j) \
(adjusted_rank((u).rank, comm_rank, comm_size) \
< adjusted_rank((v).rank, comm_rank, comm_size))
#define SORT_TYPE_CMP_LE(u,v,i,j) \
(adjusted_rank((u).rank, comm_rank, comm_size) \
<= adjusted_rank((v).rank, comm_rank, comm_size))
#define SORT_TYPE_CMP_EQ(u,v,i,j) (((u).rank) == ((v).rank))
#define XT_SORT_EXTRA_ARGS_DECL , int comm_rank, int comm_size
#define XT_SORT_EXTRA_ARGS_PASS , comm_rank, comm_size
#define XT_SORT_VECSWAP_EXTRA_ARGS_DECL
#define XT_SORT_VECSWAP_EXTRA_ARGS_PASS
#include "xt_quicksort_base.h"
Xt_exchanger
xt_exchanger_simple_base_new(int nsend, int nrecv,
const struct Xt_redist_msg *send_msgs,
......@@ -154,11 +177,30 @@ xt_exchanger_simple_base_new(int nsend, int nrecv,
exchanger->s_func = s_func;
exchanger->a_func = a_func;
xt_exchanger_internal_optimize((size_t)nsend, exchanger->msgs,
comm);
xt_exchanger_internal_optimize((size_t)nrecv, exchanger->msgs + nsend,
comm);
{
int comm_size, comm_rank, is_inter;
xt_mpi_call(MPI_Comm_rank(comm, &comm_rank), comm);
xt_mpi_call(MPI_Comm_test_inter(comm, &is_inter), comm);
int (*get_comm_size)(MPI_Comm, int *)
= is_inter ? MPI_Comm_remote_size : MPI_Comm_size;
xt_mpi_call(get_comm_size(comm, &comm_size), comm);
/* In order to avoid congestion of messages, the order of send and
* receive messages is changed. This is done by sorting the
* messages according to the rank of the respective message
* partner. Before the sorting to ranks that are smaller or equal
* to the local rank the size of the communicator is added.
*
* example: process 5 is supposed to communicate with
* processes: 9, 5, 2, 6, 1
* 1. add comm_size(10): 9, 15, 12, 6, 11
* 2. sort: 6, 9, 11, 12, 15
* 3. subtrace comm_size again -> final order: 6, 9, 1, 2, 5
*/
xt_quicksort_redist_msg(exchanger->msgs, (size_t)nsend,
comm_rank, comm_size);
xt_quicksort_redist_msg(exchanger->msgs + (size_t)nsend, (size_t)nrecv,
comm_rank, comm_size);
}
return (Xt_exchanger)exchanger;
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment