root/ompi/mca/coll/libnbc/nbc_ialltoall.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. NBC_Alltoall_args_compare
  2. nbc_alltoall_init
  3. ompi_coll_libnbc_ialltoall
  4. nbc_alltoall_inter_init
  5. ompi_coll_libnbc_ialltoall_inter
  6. a2a_sched_pairwise
  7. a2a_sched_linear
  8. a2a_sched_diss
  9. a2a_sched_inplace
  10. ompi_coll_libnbc_alltoall_init
  11. ompi_coll_libnbc_alltoall_inter_init

   1 /* -*- Mode: C; c-basic-offset:2 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2006      The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2006      The Technical University of Chemnitz. All
   7  *                         rights reserved.
   8  * Copyright (c) 2013-2015 Los Alamos National Security, LLC. All rights
   9  *                         reserved.
  10  * Copyright (c) 2014      NVIDIA Corporation.  All rights reserved.
  11  * Copyright (c) 2014-2018 Research Organization for Information Science
  12  *                         and Technology (RIST).  All rights reserved.
  13  * Copyright (c) 2017      IBM Corporation.  All rights reserved.
  14  * Copyright (c) 2018      FUJITSU LIMITED.  All rights reserved.
  15  * $COPYRIGHT$
  16  *
  17  * Additional copyrights may follow
  18  *
  19  * Author(s): Torsten Hoefler <htor@cs.indiana.edu>
  20  *
  21  */
  22 #include "nbc_internal.h"
  23 
  24 static inline int a2a_sched_linear(int rank, int p, MPI_Aint sndext, MPI_Aint rcvext, NBC_Schedule *schedule,
  25                                    const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf,
  26                                    int recvcount, MPI_Datatype recvtype, MPI_Comm comm);
  27 static inline int a2a_sched_pairwise(int rank, int p, MPI_Aint sndext, MPI_Aint rcvext, NBC_Schedule *schedule,
  28                                      const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf,
  29                                      int recvcount, MPI_Datatype recvtype, MPI_Comm comm);
  30 static inline int a2a_sched_diss(int rank, int p, MPI_Aint sndext, MPI_Aint rcvext, NBC_Schedule* schedule,
  31                                  const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf,
  32                                  int recvcount, MPI_Datatype recvtype, MPI_Comm comm, void* tmpbuf);
  33 static inline int a2a_sched_inplace(int rank, int p, NBC_Schedule* schedule, void* buf, int count,
  34                                    MPI_Datatype type, MPI_Aint ext, ptrdiff_t gap, MPI_Comm comm);
  35 
  36 #ifdef NBC_CACHE_SCHEDULE
  37 /* tree comparison function for schedule cache */
  38 int NBC_Alltoall_args_compare(NBC_Alltoall_args *a, NBC_Alltoall_args *b, void *param) {
  39   if ((a->sendbuf == b->sendbuf) &&
  40       (a->sendcount == b->sendcount) &&
  41       (a->sendtype == b->sendtype) &&
  42       (a->recvbuf == b->recvbuf) &&
  43       (a->recvcount == b->recvcount) &&
  44       (a->recvtype == b->recvtype)) {
  45     return 0;
  46   }
  47 
  48   if( a->sendbuf < b->sendbuf ) {
  49     return -1;
  50   }
  51 
  52   return 1;
  53 }
  54 #endif
  55 
  56 /* simple linear MPI_Ialltoall the (simple) algorithm just sends to all nodes */
  57 static int nbc_alltoall_init(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
  58                              MPI_Datatype recvtype, struct ompi_communicator_t *comm, ompi_request_t ** request,
  59                              struct mca_coll_base_module_2_3_0_t *module, bool persistent)
  60 {
  61   int rank, p, res;
  62   MPI_Aint datasize;
  63   size_t a2asize, sndsize;
  64   NBC_Schedule *schedule;
  65   MPI_Aint rcvext, sndext;
  66 #ifdef NBC_CACHE_SCHEDULE
  67   NBC_Alltoall_args *args, *found, search;
  68 #endif
  69   char *rbuf, *sbuf, inplace;
  70   enum {NBC_A2A_LINEAR, NBC_A2A_PAIRWISE, NBC_A2A_DISS, NBC_A2A_INPLACE} alg;
  71   void *tmpbuf = NULL;
  72   ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;
  73   ptrdiff_t span, gap;
  74 
  75   NBC_IN_PLACE(sendbuf, recvbuf, inplace);
  76 
  77   rank = ompi_comm_rank (comm);
  78   p = ompi_comm_size (comm);
  79 
  80   res = ompi_datatype_type_extent(sendtype, &sndext);
  81   if (MPI_SUCCESS != res) {
  82     NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
  83     return res;
  84   }
  85 
  86   res = ompi_datatype_type_extent(recvtype, &rcvext);
  87   if (MPI_SUCCESS != res) {
  88     NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
  89     return res;
  90   }
  91 
  92   res = ompi_datatype_type_size(sendtype, &sndsize);
  93   if (MPI_SUCCESS != res) {
  94     NBC_Error("MPI Error in ompi_datatype_type_size() (%i)", res);
  95     return res;
  96   }
  97 
  98   /* algorithm selection */
  99   a2asize = sndsize * sendcount * p;
 100   /* this number is optimized for TCP on odin.cs.indiana.edu */
 101   if (inplace) {
 102     alg = NBC_A2A_INPLACE;
 103   } else if((p <= 8) && ((a2asize < 1<<17) || (sndsize*sendcount < 1<<12))) {
 104     /* just send as fast as we can if we have less than 8 peers, if the
 105      * total communicated size is smaller than 1<<17 *and* if we don't
 106      * have eager messages (msgsize < 1<<13) */
 107     alg = NBC_A2A_LINEAR;
 108   } else if(a2asize < (1<<12)*(unsigned int)p) {
 109     /*alg = NBC_A2A_DISS;*/
 110     alg = NBC_A2A_LINEAR;
 111   } else
 112     alg = NBC_A2A_LINEAR; /*NBC_A2A_PAIRWISE;*/
 113 
 114   /* allocate temp buffer if we need one */
 115   if (alg == NBC_A2A_INPLACE) {
 116     span = opal_datatype_span(&recvtype->super, recvcount, &gap);
 117     tmpbuf = malloc(span);
 118     if (OPAL_UNLIKELY(NULL == tmpbuf)) {
 119       return OMPI_ERR_OUT_OF_RESOURCE;
 120     }
 121   } else if (alg == NBC_A2A_DISS) {
 122     /* persistent operation is not supported currently for this algorithm */
 123     assert(! persistent);
 124 
 125     if(NBC_Type_intrinsic(sendtype)) {
 126       datasize = sndext * sendcount;
 127     } else {
 128       res = ompi_datatype_pack_external_size("external32", sendcount, sendtype, &datasize);
 129       if (MPI_SUCCESS != res) {
 130         NBC_Error("MPI Error in ompi_datatype_pack_external_size() (%i)", res);
 131         return res;
 132       }
 133     }
 134 
 135     /* allocate temporary buffers */
 136     if ((p & 1) == 0) {
 137       tmpbuf = malloc (datasize * p * 2);
 138     } else {
 139       /* we cannot divide p by two, so alloc more to be safe ... */
 140       tmpbuf = malloc (datasize * (p / 2 + 1) * 2 * 2);
 141     }
 142 
 143     if (OPAL_UNLIKELY(NULL == tmpbuf)) {
 144       return OMPI_ERR_OUT_OF_RESOURCE;
 145     }
 146 
 147     /* phase 1 - rotate n data blocks upwards into the tmpbuffer */
 148 #if OPAL_CUDA_SUPPORT
 149     if (NBC_Type_intrinsic(sendtype) && !(opal_cuda_check_bufs((char *)sendbuf, (char *)recvbuf))) {
 150 #else
 151     if (NBC_Type_intrinsic(sendtype)) {
 152 #endif /* OPAL_CUDA_SUPPORT */
 153       /* contiguous - just copy (1st copy) */
 154       memcpy (tmpbuf, (char *) sendbuf + datasize * rank, datasize * (p - rank));
 155       if (rank != 0) {
 156         memcpy ((char *) tmpbuf + datasize * (p - rank), sendbuf, datasize * rank);
 157       }
 158     } else {
 159       MPI_Aint pos=0;
 160 
 161       /* non-contiguous - pack */
 162       res = ompi_datatype_pack_external ("external32", (char *) sendbuf + (intptr_t)rank * (intptr_t)sendcount * sndext, (intptr_t)(p - rank) * (intptr_t)sendcount, sendtype, tmpbuf,
 163                       (intptr_t)(p - rank) * datasize, &pos);
 164       if (OPAL_UNLIKELY(MPI_SUCCESS != res)) {
 165         NBC_Error("MPI Error in ompi_datatype_pack_external() (%i)", res);
 166         free(tmpbuf);
 167         return res;
 168       }
 169 
 170       if (rank != 0) {
 171         pos = 0;
 172         res = ompi_datatype_pack_external("external32", sendbuf, (intptr_t)rank * (intptr_t)sendcount, sendtype, (char *) tmpbuf + datasize * (intptr_t)(p - rank),
 173                        rank * datasize, &pos);
 174         if (OPAL_UNLIKELY(MPI_SUCCESS != res)) {
 175           NBC_Error("MPI Error in ompi_datatype_pack_external() (%i)", res);
 176           free(tmpbuf);
 177           return res;
 178         }
 179       }
 180     }
 181   }
 182 
 183 #ifdef NBC_CACHE_SCHEDULE
 184   /* search schedule in communicator specific tree */
 185   search.sendbuf = sendbuf;
 186   search.sendcount = sendcount;
 187   search.sendtype = sendtype;
 188   search.recvbuf = recvbuf;
 189   search.recvcount = recvcount;
 190   search.recvtype = recvtype;
 191   found = (NBC_Alltoall_args *) hb_tree_search ((hb_tree *) libnbc_module->NBC_Dict[NBC_ALLTOALL], &search);
 192   if (NULL == found) {
 193 #endif
 194     /* not found - generate new schedule */
 195     schedule = OBJ_NEW(NBC_Schedule);
 196     if (OPAL_UNLIKELY(NULL == schedule)) {
 197       free(tmpbuf);
 198       return OMPI_ERR_OUT_OF_RESOURCE;
 199     }
 200 
 201     if (!inplace) {
 202       /* copy my data to receive buffer */
 203       rbuf = (char *) recvbuf + (MPI_Aint)rank * (MPI_Aint)recvcount * rcvext;
 204       sbuf = (char *) sendbuf + (MPI_Aint)rank * (MPI_Aint)sendcount * sndext;
 205       res = NBC_Sched_copy (sbuf, false, sendcount, sendtype,
 206                             rbuf, false, recvcount, recvtype, schedule, false);
 207       if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 208         OBJ_RELEASE(schedule);
 209         free(tmpbuf);
 210         return res;
 211       }
 212     }
 213 
 214     switch(alg) {
 215       case NBC_A2A_INPLACE:
 216         res = a2a_sched_inplace(rank, p, schedule, recvbuf, recvcount, recvtype, rcvext, gap, comm);
 217         break;
 218       case NBC_A2A_LINEAR:
 219         res = a2a_sched_linear(rank, p, sndext, rcvext, schedule, sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
 220         break;
 221       case NBC_A2A_DISS:
 222         res = a2a_sched_diss(rank, p, sndext, rcvext, schedule, sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, tmpbuf);
 223         break;
 224       case NBC_A2A_PAIRWISE:
 225         res = a2a_sched_pairwise(rank, p, sndext, rcvext, schedule, sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
 226         break;
 227     }
 228 
 229     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 230       OBJ_RELEASE(schedule);
 231       free(tmpbuf);
 232       return res;
 233     }
 234 
 235     res = NBC_Sched_commit(schedule);
 236     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 237       OBJ_RELEASE(schedule);
 238       free(tmpbuf);
 239       return res;
 240     }
 241 
 242 #ifdef NBC_CACHE_SCHEDULE
 243     /* save schedule to tree */
 244     args = (NBC_Alltoall_args *) malloc (sizeof (args));
 245     if (NULL != args) {
 246       args->sendbuf = sendbuf;
 247       args->sendcount = sendcount;
 248       args->sendtype = sendtype;
 249       args->recvbuf = recvbuf;
 250       args->recvcount = recvcount;
 251       args->recvtype = recvtype;
 252       args->schedule = schedule;
 253       res = hb_tree_insert ((hb_tree *) libnbc_module->NBC_Dict[NBC_ALLTOALL], args, args, 0);
 254       if (0 == res) {
 255         OBJ_RETAIN(schedule);
 256 
 257         /* increase number of elements for A2A */
 258         if (++libnbc_module->NBC_Dict_size[NBC_ALLTOALL] > NBC_SCHED_DICT_UPPER) {
 259           NBC_SchedCache_dictwipe ((hb_tree *) libnbc_module->NBC_Dict[NBC_ALLTOALL],
 260                                    &libnbc_module->NBC_Dict_size[NBC_ALLTOALL]);
 261         }
 262       } else {
 263         NBC_Error("error in dict_insert() (%i)", res);
 264         free (args);
 265       }
 266     }
 267   } else {
 268     /* found schedule */
 269     schedule = found->schedule;
 270     OBJ_RETAIN(schedule);
 271   }
 272 #endif
 273 
 274   res = NBC_Schedule_request(schedule, comm, libnbc_module, persistent, request, tmpbuf);
 275   if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 276     OBJ_RELEASE(schedule);
 277     free(tmpbuf);
 278     return res;
 279   }
 280 
 281   return OMPI_SUCCESS;
 282 }
 283 
 284 int ompi_coll_libnbc_ialltoall(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
 285                                MPI_Datatype recvtype, struct ompi_communicator_t *comm, ompi_request_t ** request,
 286                                struct mca_coll_base_module_2_3_0_t *module) {
 287     int res = nbc_alltoall_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
 288                                 comm, request, module, false);
 289     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 290         return res;
 291     }
 292   
 293     res = NBC_Start(*(ompi_coll_libnbc_request_t **)request);
 294     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 295         NBC_Return_handle (*(ompi_coll_libnbc_request_t **)request);
 296         *request = &ompi_request_null.request;
 297         return res;
 298     }
 299 
 300     return OMPI_SUCCESS;
 301 }
 302 
 303 static int nbc_alltoall_inter_init (const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
 304                                     MPI_Datatype recvtype, struct ompi_communicator_t *comm, ompi_request_t ** request,
 305                                     struct mca_coll_base_module_2_3_0_t *module, bool persistent)
 306 {
 307   int res, rsize;
 308   MPI_Aint sndext, rcvext;
 309   NBC_Schedule *schedule;
 310   char *rbuf, *sbuf;
 311   ompi_coll_libnbc_module_t *libnbc_module = (ompi_coll_libnbc_module_t*) module;
 312 
 313   rsize = ompi_comm_remote_size (comm);
 314 
 315   res = ompi_datatype_type_extent (sendtype, &sndext);
 316   if (MPI_SUCCESS != res) {
 317     NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
 318     return res;
 319   }
 320 
 321   res = ompi_datatype_type_extent (recvtype, &rcvext);
 322   if (MPI_SUCCESS != res) {
 323     NBC_Error("MPI Error in ompi_datatype_type_extent() (%i)", res);
 324     return res;
 325   }
 326 
 327   schedule = OBJ_NEW(NBC_Schedule);
 328   if (OPAL_UNLIKELY(NULL == schedule)) {
 329     return OMPI_ERR_OUT_OF_RESOURCE;
 330   }
 331 
 332   for (int i = 0; i < rsize; i++) {
 333     /* post all sends */
 334     sbuf = (char *) sendbuf + i * sendcount * sndext;
 335     res = NBC_Sched_send (sbuf, false, sendcount, sendtype, i, schedule, false);
 336     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 337       break;
 338     }
 339 
 340     /* post all receives */
 341     rbuf = (char *) recvbuf + i * recvcount * rcvext;
 342     res = NBC_Sched_recv (rbuf, false, recvcount, recvtype, i, schedule, false);
 343     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 344       break;
 345     }
 346   }
 347 
 348   if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 349     OBJ_RELEASE(schedule);
 350     return res;
 351   }
 352 
 353   res = NBC_Sched_commit (schedule);
 354   if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 355     OBJ_RELEASE(schedule);
 356     return res;
 357   }
 358 
 359   res = NBC_Schedule_request(schedule, comm, libnbc_module, persistent, request, NULL);
 360   if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 361     OBJ_RELEASE(schedule);
 362     return res;
 363   }
 364 
 365   return OMPI_SUCCESS;
 366 }
 367 
 368 int ompi_coll_libnbc_ialltoall_inter (const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
 369                                       MPI_Datatype recvtype, struct ompi_communicator_t *comm, ompi_request_t ** request,
 370                                       struct mca_coll_base_module_2_3_0_t *module) {
 371     int res = nbc_alltoall_inter_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
 372                                       comm, request, module, false);
 373     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 374         return res;
 375     }
 376   
 377     res = NBC_Start(*(ompi_coll_libnbc_request_t **)request);
 378     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 379         NBC_Return_handle (*(ompi_coll_libnbc_request_t **)request);
 380         *request = &ompi_request_null.request;
 381         return res;
 382     }
 383 
 384     return OMPI_SUCCESS;
 385 }
 386 
 387 static inline int a2a_sched_pairwise(int rank, int p, MPI_Aint sndext, MPI_Aint rcvext, NBC_Schedule* schedule,
 388                                      const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
 389                                      MPI_Datatype recvtype, MPI_Comm comm) {
 390   int res;
 391 
 392   if (p < 2) {
 393     return OMPI_SUCCESS;
 394   }
 395 
 396   for (int r = 1 ; r < p ; ++r) {
 397     int sndpeer = (rank + r) % p;
 398     int rcvpeer = (rank - r + p) % p;
 399 
 400     char *rbuf = (char *) recvbuf + rcvpeer * recvcount * rcvext;
 401     res = NBC_Sched_recv (rbuf, false, recvcount, recvtype, rcvpeer, schedule, false);
 402     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 403       return res;
 404     }
 405 
 406     char *sbuf = (char *) sendbuf + sndpeer * sendcount * sndext;
 407     res = NBC_Sched_send (sbuf, false, sendcount, sendtype, sndpeer, schedule, true);
 408     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 409       return res;
 410     }
 411   }
 412 
 413   return OMPI_SUCCESS;
 414 }
 415 
 416 static inline int a2a_sched_linear(int rank, int p, MPI_Aint sndext, MPI_Aint rcvext, NBC_Schedule* schedule,
 417                                    const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
 418                                    MPI_Datatype recvtype, MPI_Comm comm) {
 419   int res;
 420 
 421   for (int r = 0 ; r < p ; ++r) {
 422     /* easy algorithm */
 423     if (r == rank) {
 424       continue;
 425     }
 426 
 427     char *rbuf = (char *) recvbuf + (intptr_t)r * (intptr_t)recvcount * rcvext;
 428     res = NBC_Sched_recv (rbuf, false, recvcount, recvtype, r, schedule, false);
 429     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 430       return res;
 431     }
 432 
 433     char *sbuf = (char *) sendbuf + (intptr_t)r * (intptr_t)sendcount * sndext;
 434     res = NBC_Sched_send (sbuf, false, sendcount, sendtype, r, schedule, false);
 435     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 436       return res;
 437     }
 438   }
 439 
 440   return OMPI_SUCCESS;
 441 }
 442 
 443 static inline int a2a_sched_diss(int rank, int p, MPI_Aint sndext, MPI_Aint rcvext, NBC_Schedule* schedule,
 444                                  const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
 445                                  MPI_Datatype recvtype, MPI_Comm comm, void* tmpbuf) {
 446   int res, speer, rpeer, virtp;
 447   MPI_Aint datasize, offset;
 448   char *rbuf, *rtmpbuf, *stmpbuf;
 449 
 450   if (p < 2) {
 451     return OMPI_SUCCESS;
 452   }
 453 
 454   if(NBC_Type_intrinsic(sendtype)) {
 455     datasize = sndext*sendcount;
 456   } else {
 457     res = ompi_datatype_pack_external_size("external32", sendcount, sendtype, &datasize);
 458     if (MPI_SUCCESS != res) {
 459       NBC_Error("MPI Error in ompi_datatype_pack_external_size() (%i)", res);
 460       return res;
 461     }
 462   }
 463 
 464   /* allocate temporary buffers */
 465   if ((p & 1) == 0) {
 466     rtmpbuf = (char *)tmpbuf + datasize * p;
 467     stmpbuf = (char *)tmpbuf + datasize * (p + p / 2);
 468   } else {
 469     /* we cannot divide p by two, so alloc more to be safe ... */
 470     virtp = (p / 2 + 1) * 2;
 471     rtmpbuf = (char *)tmpbuf + datasize * p;
 472     stmpbuf = (char *)tmpbuf + datasize * (p + virtp / 2);
 473   }
 474 
 475   /* phase 2 - communicate */
 476   for (int r = 1; r < p; r <<= 1) {
 477     offset = 0;
 478     for (int i = 1 ; i < p; ++i) {
 479       /* test if bit r is set in rank number i */
 480       if (i & r) {
 481         /* copy data to sendbuffer (2nd copy) - could be avoided using iovecs */
 482         /*printf("[%i] round %i: copying element %i to buffer %lu\n", rank, r, i, (unsigned long)(stmpbuf+offset));*/
 483         res = NBC_Sched_copy((void *)(intptr_t)(i * datasize), true, datasize, MPI_BYTE, stmpbuf + offset -
 484                              (intptr_t)tmpbuf, true, datasize, MPI_BYTE, schedule, false);
 485         if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 486           return res;
 487         }
 488         offset += datasize;
 489       }
 490     }
 491 
 492     speer = (rank + r) % p;
 493     /* add p because modulo does not work with negative values */
 494     rpeer = ((rank - r) + p) % p;
 495 
 496     res = NBC_Sched_recv (rtmpbuf - (intptr_t)tmpbuf, true, offset, MPI_BYTE, rpeer, schedule, false);
 497     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 498       return res;
 499     }
 500 
 501     res = NBC_Sched_send (stmpbuf - (intptr_t)tmpbuf, true, offset, MPI_BYTE, speer, schedule, true);
 502     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 503       return res;
 504     }
 505 
 506     /* unpack from buffer */
 507     offset = 0;
 508     for (int i = 1; i < p; ++i) {
 509       /* test if bit r is set in rank number i */
 510       if (i & r) {
 511         /* copy data to tmpbuffer (3rd copy) - could be avoided using iovecs */
 512         res = NBC_Sched_copy (rtmpbuf + offset - (intptr_t)tmpbuf, true, datasize, MPI_BYTE,
 513                               (void *)(intptr_t)(i * datasize), true, datasize, MPI_BYTE, schedule,
 514                               false);
 515         if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 516           return res;
 517         }
 518 
 519         offset += datasize;
 520       }
 521     }
 522   }
 523 
 524   /* phase 3 - reorder - data is now in wrong order in tmpbuf - reorder it into recvbuf */
 525   for (int i = 0 ; i < p; ++i) {
 526     rbuf = (char *) recvbuf + ((rank - i + p) % p) * recvcount * rcvext;
 527     res = NBC_Sched_unpack ((void *)(intptr_t) (i * datasize), true, recvcount, recvtype, rbuf, false, schedule,
 528                             false);
 529     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 530       return res;
 531     }
 532   }
 533 
 534   return OMPI_SUCCESS;
 535 }
 536 
 537 static inline int a2a_sched_inplace(int rank, int p, NBC_Schedule* schedule, void* buf, int count,
 538                                    MPI_Datatype type, MPI_Aint ext, ptrdiff_t gap, MPI_Comm comm) {
 539   int res;
 540 
 541   for (int i = 1 ; i < (p+1)/2 ; i++) {
 542     int speer = (rank + i) % p;
 543     int rpeer = (rank + p - i) % p;
 544     char *sbuf = (char *) buf + (intptr_t)speer * (intptr_t)count * ext;
 545     char *rbuf = (char *) buf + (intptr_t)rpeer * (intptr_t)count * ext;
 546 
 547     res = NBC_Sched_copy (rbuf, false, count, type,
 548                           (void *)(-gap), true, count, type,
 549                           schedule, true);
 550     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 551       return res;
 552     }
 553     res = NBC_Sched_send (sbuf, false , count, type, speer, schedule, false);
 554     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 555       return res;
 556     }
 557     res = NBC_Sched_recv (rbuf, false , count, type, rpeer, schedule, true);
 558     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 559       return res;
 560     }
 561 
 562     res = NBC_Sched_send ((void *)(-gap), true, count, type, rpeer, schedule, false);
 563     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 564       return res;
 565     }
 566     res = NBC_Sched_recv (sbuf, false, count, type, speer, schedule, true);
 567     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 568       return res;
 569     }
 570   }
 571   if (0 == (p%2)) {
 572     int peer = (rank + p/2) % p;
 573 
 574     char *tbuf = (char *) buf + (intptr_t)peer * (intptr_t)count * ext;
 575     res = NBC_Sched_copy (tbuf, false, count, type,
 576                           (void *)(-gap), true, count, type,
 577                           schedule, true);
 578     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 579       return res;
 580     }
 581     res = NBC_Sched_send ((void *)(-gap), true , count, type, peer, schedule, false);
 582     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 583       return res;
 584     }
 585     res = NBC_Sched_recv (tbuf, false , count, type, peer, schedule, true);
 586     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 587       return res;
 588     }
 589   }
 590 
 591   return OMPI_SUCCESS;
 592 }
 593 
 594 int ompi_coll_libnbc_alltoall_init (const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
 595                                     MPI_Datatype recvtype, struct ompi_communicator_t *comm, MPI_Info info, ompi_request_t ** request,
 596                                     struct mca_coll_base_module_2_3_0_t *module) {
 597     int res = nbc_alltoall_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
 598                                 comm, request, module, true);
 599     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 600         return res;
 601     }
 602 
 603     return OMPI_SUCCESS;
 604 }
 605 
 606 int ompi_coll_libnbc_alltoall_inter_init (const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
 607                                           MPI_Datatype recvtype, struct ompi_communicator_t *comm, MPI_Info info, ompi_request_t ** request,
 608                                           struct mca_coll_base_module_2_3_0_t *module) {
 609     int res = nbc_alltoall_inter_init(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
 610                                       comm, request, module, true);
 611     if (OPAL_UNLIKELY(OMPI_SUCCESS != res)) {
 612         return res;
 613     }
 614 
 615     return OMPI_SUCCESS;
 616 }

/* [<][>][^][v][top][bottom][index][help] */