root/ompi/mca/fcoll/base/fcoll_base_coll_array.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ompi_fcoll_base_coll_allgatherv_array
  2. ompi_fcoll_base_coll_gatherv_array
  3. ompi_fcoll_base_coll_scatterv_array
  4. ompi_fcoll_base_coll_allgather_array
  5. ompi_fcoll_base_coll_gather_array
  6. ompi_fcoll_base_coll_bcast_array

   1 /* -*- Mode: C; c-basic-offset:4 ; -*- */
   2 /*
   3  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2007 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2005 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2008-2016 University of Houston. All rights reserved.
  14  * Copyright (c) 2017      Research Organization for Information Science
  15  *                         and Technology (RIST). All rights reserved.
  16  * Copyright (c) 2017      IBM Corporation. All rights reserved.
  17  * $COPYRIGHT$
  18  *
  19  * Additional copyrights may follow
  20  *
  21  * $HEADER$
  22  */
  23 
  24 #include "ompi_config.h"
  25 
  26 #include "ompi/runtime/params.h"
  27 #include "ompi/communicator/communicator.h"
  28 #include "ompi/mca/pml/pml.h"
  29 #include "opal/datatype/opal_datatype.h"
  30 #include "ompi/datatype/ompi_datatype.h"
  31 #include "ompi/request/request.h"
  32 
  33 #include <math.h>
  34 #include "ompi/mca/fcoll/base/fcoll_base_coll_array.h"
  35 #include "ompi/mca/common/ompio/common_ompio.h"
  36 
  37 
  38 int ompi_fcoll_base_coll_allgatherv_array (void *sbuf,
  39                                       int scount,
  40                                       ompi_datatype_t *sdtype,
  41                                       void *rbuf,
  42                                       int *rcounts,
  43                                       int *disps,
  44                                       ompi_datatype_t *rdtype,
  45                                       int root_index,
  46                                       int *procs_in_group,
  47                                       int procs_per_group,
  48                                       ompi_communicator_t *comm)
  49 {
  50     int err = OMPI_SUCCESS;
  51     ptrdiff_t extent, lb;
  52     int i, rank, j;
  53     char *send_buf = NULL;
  54     struct ompi_datatype_t *newtype, *send_type;
  55 
  56     rank = ompi_comm_rank (comm);
  57     for (j = 0; j < procs_per_group; j++) {
  58         if (procs_in_group[j] == rank) {
  59             break;
  60         }
  61     }
  62 
  63     if (MPI_IN_PLACE == sbuf) {
  64         err = opal_datatype_get_extent (&rdtype->super, &lb, &extent);
  65         if (OMPI_SUCCESS != err) {
  66             return OMPI_ERROR;
  67         }
  68         send_type = rdtype;
  69         send_buf = (char*)rbuf;
  70 
  71         for (i = 0; i < j; i++) {
  72             send_buf += (rcounts[i] * extent);
  73         }
  74     }
  75     else {
  76         send_buf = (char*)sbuf;
  77         send_type = sdtype;
  78     }
  79 
  80     err = ompi_fcoll_base_coll_gatherv_array (send_buf,
  81                                          rcounts[j],
  82                                          send_type,
  83                                          rbuf,
  84                                          rcounts,
  85                                          disps,
  86                                          rdtype,
  87                                          root_index,
  88                                          procs_in_group,
  89                                          procs_per_group,
  90                                          comm);
  91     if (OMPI_SUCCESS != err) {
  92         return err;
  93     }
  94 
  95     err = ompi_datatype_create_indexed (procs_per_group,
  96                                         rcounts,
  97                                         disps,
  98                                         rdtype,
  99                                         &newtype);
 100     if (MPI_SUCCESS != err) {
 101         return err;
 102     }
 103     err = ompi_datatype_commit (&newtype);
 104     if(MPI_SUCCESS != err) {
 105         return err;
 106     }
 107     
 108     ompi_fcoll_base_coll_bcast_array (rbuf,
 109                                  1,
 110                                  newtype,
 111                                  root_index,
 112                                  procs_in_group,
 113                                  procs_per_group,
 114                                  comm);
 115     
 116     ompi_datatype_destroy (&newtype);
 117 
 118     return OMPI_SUCCESS;
 119 }
 120 
 121 int ompi_fcoll_base_coll_gatherv_array (void *sbuf,
 122                                    int scount,
 123                                    ompi_datatype_t *sdtype,
 124                                    void *rbuf,
 125                                    int *rcounts,
 126                                    int *disps,
 127                                    ompi_datatype_t *rdtype,
 128                                    int root_index,
 129                                    int *procs_in_group,
 130                                    int procs_per_group,
 131                                    struct ompi_communicator_t *comm)
 132 {
 133     int i, rank;
 134     int err = OMPI_SUCCESS;
 135     char *ptmp;
 136     ptrdiff_t extent, lb;
 137     ompi_request_t **reqs=NULL;
 138 
 139     rank = ompi_comm_rank (comm);
 140 
 141     if (procs_in_group[root_index] != rank)  {
 142         if (scount > 0) {
 143             return MCA_PML_CALL(send(sbuf,
 144                                      scount,
 145                                      sdtype,
 146                                      procs_in_group[root_index],
 147                                      FCOLL_TAG_GATHERV,
 148                                      MCA_PML_BASE_SEND_STANDARD,
 149                                      comm));
 150         }
 151         return err;
 152     }
 153 
 154     /* writer processes, loop receiving data from proceses
 155        belonging to each corresponding root */
 156 
 157     err = opal_datatype_get_extent (&rdtype->super, &lb, &extent);
 158     if (OMPI_SUCCESS != err) {
 159         return OMPI_ERROR;
 160     }
 161     reqs = (ompi_request_t **) malloc ( procs_per_group *sizeof(ompi_request_t *));
 162     if ( NULL == reqs ) {
 163         return OMPI_ERR_OUT_OF_RESOURCE;
 164     }
 165     for (i=0; i<procs_per_group; i++) {
 166         ptmp = ((char *) rbuf) + (extent * disps[i]);
 167 
 168         if (procs_in_group[i] == rank) {
 169             if (MPI_IN_PLACE != sbuf &&
 170                 (0 < scount) &&
 171                 (0 < rcounts[i])) {
 172                 err = ompi_datatype_sndrcv (sbuf,
 173                                             scount,
 174                                             sdtype,
 175                                             ptmp,
 176                                             rcounts[i],
 177                                             rdtype);
 178             }
 179             reqs[i] = MPI_REQUEST_NULL;
 180         }
 181         else {
 182             /* Only receive if there is something to receive */
 183             if (rcounts[i] > 0) {
 184                 err = MCA_PML_CALL(irecv(ptmp,
 185                                          rcounts[i],
 186                                          rdtype,
 187                                          procs_in_group[i],
 188                                          FCOLL_TAG_GATHERV,
 189                                          comm,
 190                                          &reqs[i]));
 191             }
 192             else {
 193                 reqs[i] = MPI_REQUEST_NULL;
 194             }
 195         }
 196 
 197         if (OMPI_SUCCESS != err) {
 198             free ( reqs );
 199             return err;
 200         }
 201     }
 202     /* All done */
 203     err = ompi_request_wait_all ( procs_per_group, reqs, MPI_STATUSES_IGNORE );
 204     if ( NULL != reqs ) {
 205         free ( reqs );
 206     }
 207     return err;
 208 }
 209 
 210 int ompi_fcoll_base_coll_scatterv_array (void *sbuf,
 211                                     int *scounts,
 212                                     int *disps,
 213                                     ompi_datatype_t *sdtype,
 214                                     void *rbuf,
 215                                     int rcount,
 216                                     ompi_datatype_t *rdtype,
 217                                     int root_index,
 218                                     int *procs_in_group,
 219                                     int procs_per_group,
 220                                     struct ompi_communicator_t *comm)
 221 {
 222     int i, rank;
 223     int err = OMPI_SUCCESS;
 224     char *ptmp;
 225     ptrdiff_t extent, lb;
 226     ompi_request_t ** reqs=NULL;
 227 
 228     rank = ompi_comm_rank (comm);
 229 
 230     if (procs_in_group[root_index] != rank) {
 231         if (rcount > 0) {
 232             err = MCA_PML_CALL(recv(rbuf,
 233                                     rcount,
 234                                     rdtype,
 235                                     procs_in_group[root_index],
 236                                     FCOLL_TAG_SCATTERV,
 237                                     comm,
 238                                     MPI_STATUS_IGNORE));
 239         }
 240         return err;
 241     }
 242 
 243     /* writer processes, loop sending data to proceses
 244        belonging to each corresponding root */
 245 
 246     err = opal_datatype_get_extent (&sdtype->super, &lb, &extent);
 247     if (OMPI_SUCCESS != err) {
 248         return OMPI_ERROR;
 249     }
 250     reqs = ( ompi_request_t **) malloc ( procs_per_group * sizeof ( ompi_request_t *));
 251     if (NULL == reqs ) {
 252         return OMPI_ERR_OUT_OF_RESOURCE;
 253     }
 254 
 255     for (i=0 ; i<procs_per_group ; ++i) {
 256         ptmp = ((char *) sbuf) + (extent * disps[i]);
 257 
 258         if (procs_in_group[i] == rank) {
 259             if (MPI_IN_PLACE != sbuf &&
 260                 (0 < scounts[i]) &&
 261                 (0 < rcount)) {
 262                 err = ompi_datatype_sndrcv (ptmp,
 263                                             scounts[i],
 264                                             sdtype,
 265                                             rbuf,
 266                                             rcount,
 267                                             rdtype);
 268             }
 269             reqs[i] = MPI_REQUEST_NULL;
 270         }
 271         else {
 272             /* Only receive if there is something to receive */
 273             if (scounts[i] > 0) {
 274                 err = MCA_PML_CALL(isend(ptmp,
 275                                          scounts[i],
 276                                          sdtype,
 277                                          procs_in_group[i],
 278                                          FCOLL_TAG_SCATTERV,
 279                                          MCA_PML_BASE_SEND_STANDARD,
 280                                          comm,
 281                                          &reqs[i]));
 282             }
 283             else {
 284                 reqs[i] = MPI_REQUEST_NULL;
 285             }
 286         }
 287         if (OMPI_SUCCESS != err) {
 288             free ( reqs );
 289             return err;
 290         }
 291     }
 292     /* All done */
 293     err = ompi_request_wait_all ( procs_per_group, reqs, MPI_STATUSES_IGNORE );
 294     if ( NULL != reqs ) {
 295         free ( reqs );
 296     }
 297     return err;
 298 }
 299 
 300 int ompi_fcoll_base_coll_allgather_array (void *sbuf,
 301                                      int scount,
 302                                      ompi_datatype_t *sdtype,
 303                                      void *rbuf,
 304                                      int rcount,
 305                                      ompi_datatype_t *rdtype,
 306                                      int root_index,
 307                                      int *procs_in_group,
 308                                      int procs_per_group,
 309                                      ompi_communicator_t *comm)
 310 {
 311     int err = OMPI_SUCCESS;
 312     int rank;
 313     ptrdiff_t extent, lb;
 314 
 315     rank = ompi_comm_rank (comm);
 316 
 317     if (((void *) 1) == sbuf && 0 != rank) {
 318         err = opal_datatype_get_extent (&rdtype->super, &lb, &extent);
 319         if (OMPI_SUCCESS != err) {
 320             return OMPI_ERROR;
 321         }
 322         sbuf = ((char*) rbuf) + (rank * extent * rcount);
 323         sdtype = rdtype;
 324         scount = rcount;
 325     }
 326 
 327     /* Gather and broadcast. */
 328     err = ompi_fcoll_base_coll_gather_array (sbuf,
 329                                         scount,
 330                                         sdtype,
 331                                         rbuf,
 332                                         rcount,
 333                                         rdtype,
 334                                         root_index,
 335                                         procs_in_group,
 336                                         procs_per_group,
 337                                         comm);
 338     
 339     if (OMPI_SUCCESS == err) {
 340         err = ompi_fcoll_base_coll_bcast_array (rbuf,
 341                                            rcount * procs_per_group,
 342                                            rdtype,
 343                                            root_index,
 344                                            procs_in_group,
 345                                            procs_per_group,
 346                                            comm);
 347     }
 348     /* All done */
 349 
 350     return err;
 351 }
 352 
 353 int ompi_fcoll_base_coll_gather_array (void *sbuf,
 354                                   int scount,
 355                                   ompi_datatype_t *sdtype,
 356                                   void *rbuf,
 357                                   int rcount,
 358                                   ompi_datatype_t *rdtype,
 359                                   int root_index,
 360                                   int *procs_in_group,
 361                                   int procs_per_group,
 362                                   struct ompi_communicator_t *comm)
 363 {
 364     int i;
 365     int rank;
 366     char *ptmp;
 367     ptrdiff_t incr;
 368     ptrdiff_t extent, lb;
 369     int err = OMPI_SUCCESS;
 370     ompi_request_t ** reqs=NULL;
 371 
 372     rank = ompi_comm_rank (comm);
 373 
 374     /* Everyone but the writers sends data and returns. */
 375     if (procs_in_group[root_index] != rank) {
 376         err = MCA_PML_CALL(send(sbuf,
 377                                 scount,
 378                                 sdtype,
 379                                 procs_in_group[root_index],
 380                                 FCOLL_TAG_GATHER,
 381                                 MCA_PML_BASE_SEND_STANDARD,
 382                                 comm));
 383         return err;
 384     }
 385 
 386     /* writers, loop receiving the data. */
 387     opal_datatype_get_extent (&rdtype->super, &lb, &extent);
 388     incr = extent * rcount;
 389 
 390     reqs = ( ompi_request_t **) malloc ( procs_per_group * sizeof ( ompi_request_t *));
 391     if (NULL == reqs ) {
 392         return OMPI_ERR_OUT_OF_RESOURCE;
 393     }
 394 
 395     for (i = 0, ptmp = (char *) rbuf;
 396          i < procs_per_group;
 397          ++i, ptmp += incr) {
 398         if (procs_in_group[i] == rank) {
 399             if (MPI_IN_PLACE != sbuf) {
 400                 err = ompi_datatype_sndrcv (sbuf,
 401                                             scount,
 402                                             sdtype ,
 403                                             ptmp,
 404                                             rcount,
 405                                             rdtype);
 406             }
 407             else {
 408                 err = OMPI_SUCCESS;
 409             }
 410             reqs[i] = MPI_REQUEST_NULL;
 411         }
 412         else {
 413             err = MCA_PML_CALL(irecv(ptmp,
 414                                      rcount,
 415                                      rdtype,
 416                                      procs_in_group[i],
 417                                      FCOLL_TAG_GATHER,
 418                                      comm,
 419                                      &reqs[i]));
 420             /*
 421             for (k=0 ; k<4 ; k++)
 422                 printf ("RECV %p  %d \n",
 423                         ((struct iovec *)ptmp)[k].iov_base,
 424                         ((struct iovec *)ptmp)[k].iov_len);
 425             */
 426         }
 427 
 428         if (OMPI_SUCCESS != err) {
 429             free ( reqs );
 430             return err;
 431         }
 432     }
 433 
 434     /* All done */
 435     err = ompi_request_wait_all ( procs_per_group, reqs, MPI_STATUSES_IGNORE );
 436     if ( NULL != reqs ) {
 437         free ( reqs );
 438     }
 439 
 440     return err;
 441 }
 442 
 443 int ompi_fcoll_base_coll_bcast_array (void *buff,
 444                                  int count,
 445                                  ompi_datatype_t *datatype,
 446                                  int root_index,
 447                                  int *procs_in_group,
 448                                  int procs_per_group,
 449                                  ompi_communicator_t *comm)
 450 {
 451     int i, rank;
 452     int err = OMPI_SUCCESS;
 453     ompi_request_t ** reqs=NULL;
 454 
 455     rank = ompi_comm_rank (comm);
 456 
 457     /* Non-writers receive the data. */
 458     if (procs_in_group[root_index] != rank) {
 459         err = MCA_PML_CALL(recv(buff,
 460                                 count,
 461                                 datatype,
 462                                 procs_in_group[root_index],
 463                                 FCOLL_TAG_BCAST,
 464                                 comm,
 465                                 MPI_STATUS_IGNORE));
 466         return err;
 467     }
 468 
 469     /* Writers sends data to all others. */
 470     reqs = ( ompi_request_t **) malloc ( procs_per_group * sizeof ( ompi_request_t *));
 471     if (NULL == reqs ) {
 472         return OMPI_ERR_OUT_OF_RESOURCE;
 473     }
 474 
 475     for (i=0 ; i<procs_per_group ; i++) {
 476         if (procs_in_group[i] == rank) {
 477             reqs[i] = MPI_REQUEST_NULL;
 478             continue;
 479         }
 480 
 481         err = MCA_PML_CALL(isend(buff,
 482                                  count,
 483                                  datatype,
 484                                  procs_in_group[i],
 485                                  FCOLL_TAG_BCAST,
 486                                  MCA_PML_BASE_SEND_STANDARD,
 487                                  comm,
 488                                  &reqs[i]));
 489         if (OMPI_SUCCESS != err) {
 490             free ( reqs );
 491             return err;
 492         }
 493     }
 494     err = ompi_request_wait_all ( procs_per_group, reqs, MPI_STATUSES_IGNORE );
 495     if ( NULL != reqs ) {
 496         free ( reqs );
 497     }
 498 
 499     return err;
 500 }

/* [<][>][^][v][top][bottom][index][help] */