root/ompi/mpi/c/reduce_scatter.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. MPI_Reduce_scatter

   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2017 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2005 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2006-2012 Cisco Systems, Inc.  All rights reserved.
  14  * Copyright (c) 2012-2013 Los Alamos National Security, LLC.  All rights
  15  *                         reserved.
  16  * Copyright (c) 2015-2018 Research Organization for Information Science
  17  *                         and Technology (RIST). All rights reserved.
  18  * $COPYRIGHT$
  19  *
  20  * Additional copyrights may follow
  21  *
  22  * $HEADER$
  23  */
  24 #include "ompi_config.h"
  25 #include <stdio.h>
  26 
  27 #include "ompi/mpi/c/bindings.h"
  28 #include "ompi/runtime/params.h"
  29 #include "ompi/communicator/communicator.h"
  30 #include "ompi/errhandler/errhandler.h"
  31 #include "ompi/datatype/ompi_datatype.h"
  32 #include "ompi/op/op.h"
  33 #include "ompi/memchecker.h"
  34 #include "ompi/runtime/ompi_spc.h"
  35 
  36 #if OMPI_BUILD_MPI_PROFILING
  37 #if OPAL_HAVE_WEAK_SYMBOLS
  38 #pragma weak MPI_Reduce_scatter = PMPI_Reduce_scatter
  39 #endif
  40 #define MPI_Reduce_scatter PMPI_Reduce_scatter
  41 #endif
  42 
  43 static const char FUNC_NAME[] = "MPI_Reduce_scatter";
  44 
  45 
  46 int MPI_Reduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[],
  47                        MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
  48 {
  49     int i, err, size, count;
  50 
  51     SPC_RECORD(OMPI_SPC_REDUCE_SCATTER, 1);
  52 
  53     MEMCHECKER(
  54         int rank;
  55 
  56         size = ompi_comm_size(comm);
  57         rank = ompi_comm_rank(comm);
  58         for (count = i = 0; i < size; ++i) {
  59             if (0 == recvcounts[i]) {
  60                 count += recvcounts[i];
  61             }
  62         }
  63 
  64         memchecker_comm(comm);
  65         memchecker_datatype(datatype);
  66 
  67         /* check receive buffer of current proccess, whether it's addressable. */
  68         memchecker_call(&opal_memchecker_base_isaddressable, recvbuf,
  69                         recvcounts[rank], datatype);
  70 
  71         /* check whether the actual send buffer is defined. */
  72         if(MPI_IN_PLACE == sendbuf) {
  73             memchecker_call(&opal_memchecker_base_isdefined, recvbuf, count, datatype);
  74         } else {
  75             memchecker_call(&opal_memchecker_base_isdefined, sendbuf, count, datatype);
  76 
  77         }
  78     );
  79 
  80     if (MPI_PARAM_CHECK) {
  81         char *msg;
  82         err = MPI_SUCCESS;
  83         OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
  84         if (ompi_comm_invalid(comm)) {
  85             return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COMM,
  86                                           FUNC_NAME);
  87         }
  88 
  89         /* Unrooted operation; same checks for all ranks on both
  90            intracommunicators and intercommunicators */
  91 
  92         else if (MPI_OP_NULL == op || NULL == op) {
  93           err = MPI_ERR_OP;
  94         } else if (!ompi_op_is_valid(op, datatype, &msg, FUNC_NAME)) {
  95             int ret = OMPI_ERRHANDLER_INVOKE(comm, MPI_ERR_OP, msg);
  96             free(msg);
  97             return ret;
  98         } else if (NULL == recvcounts) {
  99           err = MPI_ERR_COUNT;
 100         } else if (MPI_IN_PLACE == recvbuf) {
 101           err = MPI_ERR_ARG;
 102         }
 103         OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
 104 
 105         /* Based on the standard each group has to provide the same total
 106            number of elements, so the size of the recvcounts array depends
 107            on the number of participants in the local group.  */
 108         size = ompi_comm_size(comm);
 109         for (i = 0; i < size; ++i) {
 110           OMPI_CHECK_DATATYPE_FOR_SEND(err, datatype, recvcounts[i]);
 111           OMPI_ERRHANDLER_CHECK(err, comm, err, FUNC_NAME);
 112         }
 113     }
 114 
 115     /* MPI-1, p114, says that each process must supply at least one
 116        element.  But at least the Pallas benchmarks call MPI_REDUCE
 117        with a count of 0.  So be sure to handle it.  Grrr... */
 118 
 119     size = ompi_comm_size(comm);
 120     for (count = i = 0; i < size; ++i) {
 121         if (0 == recvcounts[i]) {
 122             ++count;
 123         }
 124     }
 125     if (size == count) {
 126         return MPI_SUCCESS;
 127     }
 128 
 129     OPAL_CR_ENTER_LIBRARY();
 130 
 131     /* Invoke the coll component to perform the back-end operation */
 132 
 133     OBJ_RETAIN(op);
 134     err = comm->c_coll->coll_reduce_scatter(sendbuf, recvbuf, recvcounts,
 135                                            datatype, op, comm,
 136                                            comm->c_coll->coll_reduce_scatter_module);
 137     OBJ_RELEASE(op);
 138     OMPI_ERRHANDLER_RETURN(err, comm, err, FUNC_NAME);
 139 }

/* [<][>][^][v][top][bottom][index][help] */