root/ompi/mca/coll/basic/coll_basic_alltoallw.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mca_coll_basic_alltoallw_intra_inplace
  2. mca_coll_basic_alltoallw_intra
  3. mca_coll_basic_alltoallw_inter

   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2016 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2005 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2012      Oak Ridge National Labs.  All rights reserved.
  14  * Copyright (c) 2013      Los Alamos National Security, LLC. All rights
  15  *                         reserved.
  16  * Copyright (c) 2013      FUJITSU LIMITED.  All rights reserved.
  17  * Copyright (c) 2014-2016 Research Organization for Information Science
  18  *                         and Technology (RIST). All rights reserved.
  19  * Copyright (c) 2014      Cisco Systems, Inc.  All rights reserved.
  20  * Copyright (c) 2017      IBM Corporation. All rights reserved.
  21  * $COPYRIGHT$
  22  *
  23  * Additional copyrights may follow
  24  *
  25  * $HEADER$
  26  */
  27 
  28 #include "ompi_config.h"
  29 #include "coll_basic.h"
  30 
  31 #include "mpi.h"
  32 #include "ompi/constants.h"
  33 #include "ompi/datatype/ompi_datatype.h"
  34 #include "ompi/mca/coll/coll.h"
  35 #include "ompi/mca/coll/base/coll_tags.h"
  36 #include "ompi/mca/pml/pml.h"
  37 
  38 
  39 static int
  40 mca_coll_basic_alltoallw_intra_inplace(const void *rbuf, const int *rcounts, const int *rdisps,
  41                                        struct ompi_datatype_t * const *rdtypes,
  42                                        struct ompi_communicator_t *comm,
  43                                        mca_coll_base_module_t *module)
  44 {
  45     int i, j, size, rank, err = MPI_SUCCESS, max_size;
  46     ompi_request_t *req;
  47     char *tmp_buffer, *save_buffer = NULL;
  48     ptrdiff_t ext, gap = 0;
  49 
  50     /* Initialize. */
  51 
  52     size = ompi_comm_size(comm);
  53     rank = ompi_comm_rank(comm);
  54 
  55     /* If only one process, we're done. */
  56     if (1 == size) {
  57         return MPI_SUCCESS;
  58     }
  59 
  60     /* Find the largest receive amount */
  61     for (i = 0, max_size = 0 ; i < size ; ++i) {
  62         ext = opal_datatype_span(&rdtypes[i]->super, rcounts[i], &gap);
  63 
  64         max_size = ext > max_size ? ext : max_size;
  65     }
  66 
  67     /* Allocate a temporary buffer */
  68     tmp_buffer = save_buffer = calloc (max_size, 1);
  69     if (NULL == tmp_buffer) {
  70         return OMPI_ERR_OUT_OF_RESOURCE;
  71     }
  72     tmp_buffer -= gap;
  73 
  74     /* in-place alltoallw slow algorithm (but works) */
  75     for (i = 0 ; i < size ; ++i) {
  76         size_t msg_size_i;
  77         ompi_datatype_type_size(rdtypes[i], &msg_size_i);
  78         msg_size_i *= rcounts[i];
  79         for (j = i+1 ; j < size ; ++j) {
  80             size_t msg_size_j;
  81             ompi_datatype_type_size(rdtypes[j], &msg_size_j);
  82             msg_size_j *= rcounts[j];
  83 
  84             /* Initiate all send/recv to/from others. */
  85             if (i == rank && msg_size_j != 0) {
  86                 /* Copy the data into the temporary buffer */
  87                 err = ompi_datatype_copy_content_same_ddt (rdtypes[j], rcounts[j],
  88                                                            tmp_buffer, (char *) rbuf + rdisps[j]);
  89                 if (MPI_SUCCESS != err) { goto error_hndl; }
  90 
  91                 /* Exchange data with the peer */
  92                 err = MCA_PML_CALL(irecv ((char *) rbuf + rdisps[j], rcounts[j], rdtypes[j],
  93                                           j, MCA_COLL_BASE_TAG_ALLTOALLW, comm, &req));
  94                 if (MPI_SUCCESS != err) { goto error_hndl; }
  95 
  96                 err = MCA_PML_CALL(send ((void *) tmp_buffer,  rcounts[j], rdtypes[j],
  97                                           j, MCA_COLL_BASE_TAG_ALLTOALLW, MCA_PML_BASE_SEND_STANDARD,
  98                                           comm));
  99                 if (MPI_SUCCESS != err) { goto error_hndl; }
 100             } else if (j == rank && msg_size_i != 0) {
 101                 /* Copy the data into the temporary buffer */
 102                 err = ompi_datatype_copy_content_same_ddt (rdtypes[i], rcounts[i],
 103                                                            tmp_buffer, (char *) rbuf + rdisps[i]);
 104                 if (MPI_SUCCESS != err) { goto error_hndl; }
 105 
 106                 /* Exchange data with the peer */
 107                 err = MCA_PML_CALL(irecv ((char *) rbuf + rdisps[i], rcounts[i], rdtypes[i],
 108                                           i, MCA_COLL_BASE_TAG_ALLTOALLW, comm, &req));
 109                 if (MPI_SUCCESS != err) { goto error_hndl; }
 110 
 111                 err = MCA_PML_CALL(send ((void *) tmp_buffer,  rcounts[i], rdtypes[i],
 112                                           i, MCA_COLL_BASE_TAG_ALLTOALLW, MCA_PML_BASE_SEND_STANDARD,
 113                                           comm));
 114                 if (MPI_SUCCESS != err) { goto error_hndl; }
 115             } else {
 116                 continue;
 117             }
 118 
 119             /* Wait for the requests to complete */
 120             err = ompi_request_wait (&req, MPI_STATUSES_IGNORE);
 121             if (MPI_SUCCESS != err) { goto error_hndl; }
 122         }
 123     }
 124 
 125  error_hndl:
 126     /* Free the temporary buffer */
 127     free (save_buffer);
 128 
 129     /* All done */
 130 
 131     return err;
 132 }
 133 
 134 
 135 /*
 136  *      alltoallw_intra
 137  *
 138  *      Function:       - MPI_Alltoallw
 139  *      Accepts:        - same as MPI_Alltoallw()
 140  *      Returns:        - MPI_SUCCESS or an MPI error code
 141  */
 142 int
 143 mca_coll_basic_alltoallw_intra(const void *sbuf, const int *scounts, const int *sdisps,
 144                                struct ompi_datatype_t * const *sdtypes,
 145                                void *rbuf, const int *rcounts, const int *rdisps,
 146                                struct ompi_datatype_t * const *rdtypes,
 147                                struct ompi_communicator_t *comm,
 148                                mca_coll_base_module_t *module)
 149 {
 150     int i, size, rank, err, nreqs;
 151     char *psnd, *prcv;
 152     ompi_request_t **preq, **reqs;
 153 
 154     /* Initialize. */
 155     if (MPI_IN_PLACE == sbuf) {
 156         return mca_coll_basic_alltoallw_intra_inplace (rbuf, rcounts, rdisps,
 157                                                        rdtypes, comm, module);
 158     }
 159 
 160     size = ompi_comm_size(comm);
 161     rank = ompi_comm_rank(comm);
 162 
 163     /* simple optimization */
 164 
 165     psnd = ((char *) sbuf) + sdisps[rank];
 166     prcv = ((char *) rbuf) + rdisps[rank];
 167 
 168     err = ompi_datatype_sndrcv(psnd, scounts[rank], sdtypes[rank],
 169                                prcv, rcounts[rank], rdtypes[rank]);
 170     if (MPI_SUCCESS != err) {
 171         return err;
 172     }
 173 
 174     /* If only one process, we're done. */
 175 
 176     if (1 == size) {
 177         return MPI_SUCCESS;
 178     }
 179 
 180     /* Initiate all send/recv to/from others. */
 181 
 182     nreqs = 0;
 183     reqs = preq = ompi_coll_base_comm_get_reqs(module->base_data, 2 * size);
 184     if( NULL == reqs ) { return OMPI_ERR_OUT_OF_RESOURCE; }
 185 
 186     /* Post all receives first -- a simple optimization */
 187 
 188     for (i = 0; i < size; ++i) {
 189         size_t msg_size;
 190         ompi_datatype_type_size(rdtypes[i], &msg_size);
 191         msg_size *= rcounts[i];
 192 
 193         if (i == rank || 0 == msg_size)
 194             continue;
 195 
 196         prcv = ((char *) rbuf) + rdisps[i];
 197         err = MCA_PML_CALL(irecv_init(prcv, rcounts[i], rdtypes[i],
 198                                       i, MCA_COLL_BASE_TAG_ALLTOALLW, comm,
 199                                       preq++));
 200         ++nreqs;
 201         if (MPI_SUCCESS != err) {
 202             ompi_coll_base_free_reqs(reqs, nreqs);
 203             return err;
 204         }
 205     }
 206 
 207     /* Now post all sends */
 208 
 209     for (i = 0; i < size; ++i) {
 210         size_t msg_size;
 211         ompi_datatype_type_size(sdtypes[i], &msg_size);
 212         msg_size *= scounts[i];
 213 
 214         if (i == rank || 0 == msg_size)
 215             continue;
 216 
 217         psnd = ((char *) sbuf) + sdisps[i];
 218         err = MCA_PML_CALL(isend_init(psnd, scounts[i], sdtypes[i],
 219                                       i, MCA_COLL_BASE_TAG_ALLTOALLW,
 220                                       MCA_PML_BASE_SEND_STANDARD, comm,
 221                                       preq++));
 222         ++nreqs;
 223         if (MPI_SUCCESS != err) {
 224             ompi_coll_base_free_reqs(reqs, nreqs);
 225             return err;
 226         }
 227     }
 228 
 229     /* Start your engines.  This will never return an error. */
 230 
 231     MCA_PML_CALL(start(nreqs, reqs));
 232 
 233     /* Wait for them all.  If there's an error, note that we don't care
 234      * what the error was -- just that there *was* an error.  The PML
 235      * will finish all requests, even if one or more of them fail.
 236      * i.e., by the end of this call, all the requests are free-able.
 237      * So free them anyway -- even if there was an error, and return the
 238      * error after we free everything. */
 239 
 240     err = ompi_request_wait_all(nreqs, reqs, MPI_STATUSES_IGNORE);
 241     /* Free the requests in all cases as they are persistent */
 242     ompi_coll_base_free_reqs(reqs, nreqs);
 243 
 244     /* All done */
 245     return err;
 246 }
 247 
 248 
 249 /*
 250  *      alltoallw_inter
 251  *
 252  *      Function:       - MPI_Alltoallw
 253  *      Accepts:        - same as MPI_Alltoallw()
 254  *      Returns:        - MPI_SUCCESS or an MPI error code
 255  */
 256 int
 257 mca_coll_basic_alltoallw_inter(const void *sbuf, const int *scounts, const int *sdisps,
 258                                struct ompi_datatype_t * const *sdtypes,
 259                                void *rbuf, const int *rcounts, const int *rdisps,
 260                                struct ompi_datatype_t * const *rdtypes,
 261                                struct ompi_communicator_t *comm,
 262                                mca_coll_base_module_t *module)
 263 {
 264     int i, size, err, nreqs;
 265     char *psnd, *prcv;
 266     ompi_request_t **preq, **reqs;
 267 
 268     /* Initialize. */
 269     size = ompi_comm_remote_size(comm);
 270 
 271     /* Initiate all send/recv to/from others. */
 272     nreqs = 0;
 273     reqs = preq = ompi_coll_base_comm_get_reqs(module->base_data, 2 * size);
 274     if( NULL == reqs ) { return OMPI_ERR_OUT_OF_RESOURCE; }
 275 
 276     /* Post all receives first -- a simple optimization */
 277     for (i = 0; i < size; ++i) {
 278         size_t msg_size;
 279         ompi_datatype_type_size(rdtypes[i], &msg_size);
 280         msg_size *= rcounts[i];
 281 
 282         if (0 == msg_size)
 283             continue;
 284 
 285         prcv = ((char *) rbuf) + rdisps[i];
 286         err = MCA_PML_CALL(irecv_init(prcv, rcounts[i], rdtypes[i],
 287                                       i, MCA_COLL_BASE_TAG_ALLTOALLW,
 288                                       comm, preq++));
 289         ++nreqs;
 290         if (OMPI_SUCCESS != err) {
 291             ompi_coll_base_free_reqs(reqs, nreqs);
 292             return err;
 293         }
 294     }
 295 
 296     /* Now post all sends */
 297     for (i = 0; i < size; ++i) {
 298         size_t msg_size;
 299         ompi_datatype_type_size(sdtypes[i], &msg_size);
 300         msg_size *= scounts[i];
 301 
 302         if (0 == msg_size)
 303             continue;
 304 
 305         psnd = ((char *) sbuf) + sdisps[i];
 306         err = MCA_PML_CALL(isend_init(psnd, scounts[i], sdtypes[i],
 307                                       i, MCA_COLL_BASE_TAG_ALLTOALLW,
 308                                       MCA_PML_BASE_SEND_STANDARD, comm,
 309                                       preq++));
 310         ++nreqs;
 311         if (OMPI_SUCCESS != err) {
 312             ompi_coll_base_free_reqs(reqs, nreqs);
 313             return err;
 314         }
 315     }
 316 
 317     /* Start your engines.  This will never return an error. */
 318     MCA_PML_CALL(start(nreqs, reqs));
 319 
 320     /* Wait for them all.  If there's an error, note that we don't care
 321      * what the error was -- just that there *was* an error.  The PML
 322      * will finish all requests, even if one or more of them fail.
 323      * i.e., by the end of this call, all the requests are free-able.
 324      * So free them anyway -- even if there was an error, and return the
 325      * error after we free everything. */
 326     err = ompi_request_wait_all(nreqs, reqs, MPI_STATUSES_IGNORE);
 327 
 328     /* Free the requests in all cases as they are persistent */
 329     ompi_coll_base_free_reqs(reqs, nreqs);
 330 
 331     /* All done */
 332     return err;
 333 }

/* [<][>][^][v][top][bottom][index][help] */