root/ompi/mpi/c/sendrecv_replace.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. MPI_Sendrecv_replace

   1 /*
   2  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
   3  *                         University Research and Technology
   4  *                         Corporation.  All rights reserved.
   5  * Copyright (c) 2004-2018 The University of Tennessee and The University
   6  *                         of Tennessee Research Foundation.  All rights
   7  *                         reserved.
   8  * Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
   9  *                         University of Stuttgart.  All rights reserved.
  10  * Copyright (c) 2004-2005 The Regents of the University of California.
  11  *                         All rights reserved.
  12  * Copyright (c) 2010-2012 Oracle and/or its affiliates.  All rights reserved.
  13  * Copyright (c) 2015      Research Organization for Information Science
  14  *                         and Technology (RIST). All rights reserved.
  15  * Copyright (c) 2017      IBM Corporation.  All rights reserved.
  16  * $COPYRIGHT$
  17  *
  18  * Additional copyrights may follow
  19  *
  20  * $HEADER$
  21  */
  22 #include "ompi_config.h"
  23 
  24 #include "ompi/mpi/c/bindings.h"
  25 #include "ompi/runtime/params.h"
  26 #include "ompi/communicator/communicator.h"
  27 #include "ompi/errhandler/errhandler.h"
  28 #include "ompi/datatype/ompi_datatype.h"
  29 #include "opal/datatype/opal_convertor.h"
  30 #include "ompi/mca/pml/pml.h"
  31 #include "ompi/proc/proc.h"
  32 #include "ompi/memchecker.h"
  33 #include "ompi/runtime/ompi_spc.h"
  34 
  35 #if OMPI_BUILD_MPI_PROFILING
  36 #if OPAL_HAVE_WEAK_SYMBOLS
  37 #pragma weak MPI_Sendrecv_replace = PMPI_Sendrecv_replace
  38 #endif
  39 #define MPI_Sendrecv_replace PMPI_Sendrecv_replace
  40 #endif
  41 
  42 static const char FUNC_NAME[] = "MPI_Sendrecv_replace";
  43 
  44 
  45 int MPI_Sendrecv_replace(void * buf, int count, MPI_Datatype datatype,
  46                          int dest, int sendtag, int source, int recvtag,
  47                          MPI_Comm comm, MPI_Status *status)
  48 
  49 {
  50     int rc = MPI_SUCCESS;
  51 
  52     SPC_RECORD(OMPI_SPC_SENDRECV_REPLACE, 1);
  53 
  54     MEMCHECKER(
  55                memchecker_datatype(datatype);
  56                memchecker_call(&opal_memchecker_base_isdefined, buf, count, datatype);
  57                memchecker_comm(comm);
  58                );
  59 
  60     if ( MPI_PARAM_CHECK ) {
  61         rc = MPI_SUCCESS;
  62         OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
  63         OMPI_CHECK_DATATYPE_FOR_RECV(rc, datatype, count);
  64 
  65         if (ompi_comm_invalid(comm)) {
  66             return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM, FUNC_NAME);
  67         } else if (dest != MPI_PROC_NULL && ompi_comm_peer_invalid(comm, dest)) {
  68             rc = MPI_ERR_RANK;
  69         } else if (sendtag < 0 || sendtag > mca_pml.pml_max_tag) {
  70             rc = MPI_ERR_TAG;
  71         } else if (source != MPI_PROC_NULL && source != MPI_ANY_SOURCE && ompi_comm_peer_invalid(comm, source)) {
  72             rc = MPI_ERR_RANK;
  73         } else if (((recvtag < 0) && (recvtag !=  MPI_ANY_TAG)) || (recvtag > mca_pml.pml_max_tag)) {
  74             rc = MPI_ERR_TAG;
  75         }
  76         OMPI_ERRHANDLER_CHECK(rc, comm, rc, FUNC_NAME);
  77     }
  78 
  79     OPAL_CR_ENTER_LIBRARY();
  80 
  81     /* simple case */
  82     if ( source == MPI_PROC_NULL || dest == MPI_PROC_NULL || count == 0 ) {
  83         rc = PMPI_Sendrecv(buf, count, datatype, dest, sendtag, buf, count, datatype, source, recvtag, comm, status);
  84 
  85         OPAL_CR_EXIT_LIBRARY();
  86         return rc;
  87     }
  88 
  89     /**
  90      * If we look for an optimal solution, then we should receive the data into a temporary buffer
  91      * and once the send completes we would unpack back into the original buffer. However, if the
  92      * sender is unknown, this approach can only be implementing by receiving with the recv datatype
  93      * (potentially non-contiguous) and thus the allocated memory will be larger than the size of the
  94      * datatype. A simpler, but potentially less efficient approach is to work on the data we have
  95      * control of, aka the sent data, and pack it into a contiguous buffer before posting the receive.
  96      * Once the send completes, we free it.
  97      */
  98     opal_convertor_t convertor;
  99     unsigned char packed_data[2048];
 100     struct iovec iov = { .iov_base = packed_data, .iov_len = sizeof(packed_data) };
 101     size_t packed_size, max_data;
 102     uint32_t iov_count;
 103     ompi_status_public_t recv_status;
 104     ompi_proc_t* proc = ompi_comm_peer_lookup(comm, dest);
 105     if(proc == NULL) {
 106         rc = MPI_ERR_RANK;
 107         OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
 108     }
 109 
 110     /* initialize convertor to unpack recv buffer */
 111     OBJ_CONSTRUCT(&convertor, opal_convertor_t);
 112     opal_convertor_copy_and_prepare_for_send( proc->super.proc_convertor, &(datatype->super),
 113                                               count, buf, 0, &convertor );
 114 
 115     /* setup a buffer for recv */
 116     opal_convertor_get_packed_size( &convertor, &packed_size );
 117     if( packed_size > sizeof(packed_data) ) {
 118         rc = PMPI_Alloc_mem(packed_size, MPI_INFO_NULL, &iov.iov_base);
 119         if(OMPI_SUCCESS != rc) {
 120             rc = OMPI_ERR_OUT_OF_RESOURCE;
 121             goto cleanup_and_return;
 122         }
 123         iov.iov_len = packed_size;
 124     }
 125     max_data = packed_size;
 126     iov_count = 1;
 127     rc = opal_convertor_pack(&convertor, &iov, &iov_count, &max_data);
 128     
 129     /* recv into temporary buffer */
 130     rc = PMPI_Sendrecv( iov.iov_base, packed_size, MPI_PACKED, dest, sendtag, buf, count,
 131                         datatype, source, recvtag, comm, &recv_status );
 132 
 133  cleanup_and_return:
 134     /* return status to user */
 135     if(status != MPI_STATUS_IGNORE) {
 136         *status = recv_status;
 137     }
 138 
 139     /* release resources */
 140     if(packed_size > sizeof(packed_data)) {
 141         PMPI_Free_mem(iov.iov_base);
 142     }
 143     OBJ_DESTRUCT(&convertor);
 144 
 145     OPAL_CR_EXIT_LIBRARY();
 146     OMPI_ERRHANDLER_RETURN(rc, comm, rc, FUNC_NAME);
 147 }

/* [<][>][^][v][top][bottom][index][help] */