root/oshmem/mca/scoll/mpi/scoll_mpi_ops.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mca_scoll_mpi_barrier
  2. mca_scoll_mpi_broadcast
  3. mca_scoll_mpi_collect
  4. mca_scoll_mpi_reduce

   1 /**
   2   Copyright (c) 2011 Mellanox Technologies. All rights reserved.
   3   Copyright (c) 2017      IBM Corporation.  All rights reserved.
   4   $COPYRIGHT$
   5 
   6   Additional copyrights may follow
   7 
   8   $HEADER$
   9  */
  10 
  11 #include "ompi_config.h"
  12 #include "ompi/constants.h"
  13 #include "scoll_mpi.h"
  14 #include "scoll_mpi_dtypes.h"
  15 
  16 #define INCOMPATIBLE_SHMEM_OMPI_COLL_APIS 1
  17 
  18 int mca_scoll_mpi_barrier(struct oshmem_group_t *group, long *pSync, int alg)
  19 {
  20     mca_scoll_mpi_module_t *mpi_module;
  21     int rc;
  22     MPI_COLL_VERBOSE(20,"RUNNING MPI BARRIER");
  23     mpi_module = (mca_scoll_mpi_module_t *) group->g_scoll.scoll_barrier_module;
  24 
  25     rc = mpi_module->comm->c_coll->coll_barrier(mpi_module->comm, mpi_module->comm->c_coll->coll_barrier_module);
  26     if (OMPI_SUCCESS != rc){
  27         MPI_COLL_VERBOSE(20,"RUNNING FALLBACK BARRIER");
  28         PREVIOUS_SCOLL_FN(mpi_module, barrier, group,
  29                 pSync,
  30                 SCOLL_DEFAULT_ALG);
  31     }
  32     return rc;
  33 }
  34 
  35 int mca_scoll_mpi_broadcast(struct oshmem_group_t *group,
  36                             int PE_root,
  37                             void *target,
  38                             const void *source,
  39                             size_t nlong,
  40                             long *pSync,
  41                             bool nlong_type,
  42                             int alg)
  43 {
  44     mca_scoll_mpi_module_t *mpi_module;
  45     ompi_datatype_t* dtype;
  46     int rc;
  47     void* buf;
  48     int root;
  49     MPI_COLL_VERBOSE(20,"RUNNING MPI BCAST");
  50     mpi_module = (mca_scoll_mpi_module_t *) group->g_scoll.scoll_broadcast_module;
  51     if (group->my_pe == PE_root) {
  52         buf = (void *) source;
  53     } else {
  54         buf = target;
  55     }
  56     dtype = &ompi_mpi_char.dt;
  57     root = oshmem_proc_group_find_id(group, PE_root);
  58     /* Open SHMEM specification has the following constrains (page 85):
  59      * "If using C/C++, nelems must be of type integer. If you are using Fortran, it must be a
  60      *  default integer value". And also fortran signature says "INTEGER".
  61      *  Since ompi coll components doesn't support size_t at the moment,
  62      *  and considering this contradiction, we cast size_t to int here
  63      *  in case if the value is less than INT_MAX and fallback to previous module otherwise. */
  64     if (OPAL_UNLIKELY(!nlong_type || (INT_MAX < nlong))) {
  65 #ifdef INCOMPATIBLE_SHMEM_OMPI_COLL_APIS
  66         MPI_COLL_VERBOSE(20,"RUNNING FALLBACK BCAST");
  67         PREVIOUS_SCOLL_FN(mpi_module, broadcast, group,
  68                 PE_root,
  69                 target,
  70                 source,
  71                 nlong,
  72                 pSync,
  73                 nlong_type,
  74                 SCOLL_DEFAULT_ALG);
  75         return rc;
  76 #else
  77         MPI_COLL_ERROR(20, "variable broadcast length, or exceeds INT_MAX: %zu", nlong);
  78         return OSHMEM_ERR_NOT_SUPPORTED;
  79 #endif
  80     }
  81 
  82     /* Do nothing on zero-length request */
  83     if (OPAL_UNLIKELY(!nlong)) {
  84         return OSHMEM_SUCCESS;
  85     }
  86 
  87     rc = mpi_module->comm->c_coll->coll_bcast(buf, nlong, dtype, root, mpi_module->comm, mpi_module->comm->c_coll->coll_bcast_module);
  88     if (OMPI_SUCCESS != rc){
  89         MPI_COLL_VERBOSE(20,"RUNNING FALLBACK BCAST");
  90         PREVIOUS_SCOLL_FN(mpi_module, broadcast, group,
  91                 PE_root,
  92                 target,
  93                 source,
  94                 nlong,
  95                 pSync,
  96                 nlong_type,
  97                 SCOLL_DEFAULT_ALG);
  98     }
  99     return rc;
 100 }
 101 
 102 int mca_scoll_mpi_collect(struct oshmem_group_t *group,
 103                           void *target,
 104                           const void *source,
 105                           size_t nlong,
 106                           long *pSync,
 107                           bool nlong_type,
 108                           int alg)
 109 {
 110     ompi_datatype_t* stype = &ompi_mpi_char.dt;
 111     ompi_datatype_t* rtype = &ompi_mpi_char.dt;
 112     mca_scoll_mpi_module_t *mpi_module;
 113     int rc;
 114     int len;
 115     int i;
 116     void *sbuf, *rbuf;
 117     int *disps, *recvcounts;
 118     MPI_COLL_VERBOSE(20,"RUNNING MPI ALLGATHER");
 119     mpi_module = (mca_scoll_mpi_module_t *) group->g_scoll.scoll_collect_module;
 120 
 121     if (nlong_type == true) {
 122         /* Do nothing on zero-length request */
 123         if (OPAL_UNLIKELY(!nlong)) {
 124             return OSHMEM_SUCCESS;
 125         }
 126 
 127         sbuf = (void *) source;
 128         rbuf = target;
 129         /* Open SHMEM specification has the following constrains (page 85):
 130          * "If using C/C++, nelems must be of type integer. If you are using Fortran, it must be a
 131          *  default integer value". And also fortran signature says "INTEGER".
 132          *  Since ompi coll components doesn't support size_t at the moment,
 133          *  and considering this contradiction, we cast size_t to int here
 134          *  in case if the value is less than INT_MAX and fallback to previous module otherwise. */
 135 #ifdef INCOMPATIBLE_SHMEM_OMPI_COLL_APIS
 136         if (INT_MAX < nlong) {
 137             MPI_COLL_VERBOSE(20,"RUNNING FALLBACK COLLECT");
 138             PREVIOUS_SCOLL_FN(mpi_module, collect, group,
 139                     target,
 140                     source,
 141                     nlong,
 142                     pSync,
 143                     nlong_type,
 144                     SCOLL_DEFAULT_ALG);
 145             return rc;
 146         }
 147         rc = mpi_module->comm->c_coll->coll_allgather(sbuf, (int)nlong, stype, rbuf, (int)nlong, rtype, mpi_module->comm, mpi_module->comm->c_coll->coll_allgather_module);
 148 #else
 149         rc = mpi_module->comm->c_coll->coll_allgather(sbuf, nlong, stype, rbuf, nlong, rtype, mpi_module->comm, mpi_module->comm->c_coll->coll_allgather_module);
 150 #endif
 151         if (OMPI_SUCCESS != rc){
 152             MPI_COLL_VERBOSE(20,"RUNNING FALLBACK FCOLLECT");
 153             PREVIOUS_SCOLL_FN(mpi_module, collect, group,
 154                     target,
 155                     source,
 156                     nlong,
 157                     pSync,
 158                     nlong_type,
 159                     SCOLL_DEFAULT_ALG);
 160         }
 161     } else {
 162         if (INT_MAX < nlong) {
 163             MPI_COLL_VERBOSE(20,"RUNNING FALLBACK COLLECT");
 164             PREVIOUS_SCOLL_FN(mpi_module, collect, group,
 165                               target,
 166                               source,
 167                               nlong,
 168                               pSync,
 169                               nlong_type,
 170                               SCOLL_DEFAULT_ALG);
 171             return rc;
 172         }
 173 
 174         len   = nlong;
 175         disps = malloc(group->proc_count * sizeof(*disps));
 176         if (disps == NULL) {
 177             rc = OSHMEM_ERR_OUT_OF_RESOURCE;
 178             goto complete;
 179         }
 180 
 181         recvcounts = malloc(group->proc_count * sizeof(*recvcounts));
 182         if (recvcounts == NULL) {
 183             rc = OSHMEM_ERR_OUT_OF_RESOURCE;
 184             goto failed_mem;
 185         }
 186 
 187         rc = mpi_module->comm->c_coll->coll_allgather(&len, sizeof(len), stype, recvcounts,
 188                                                       sizeof(len), rtype, mpi_module->comm,
 189                                                       mpi_module->comm->c_coll->coll_allgather_module);
 190         if (rc != OSHMEM_SUCCESS) {
 191             goto failed_allgather;
 192         }
 193 
 194         disps[0] = 0;
 195         for (i = 1; i < group->proc_count; i++) {
 196             disps[i] = disps[i - 1] + recvcounts[i - 1];
 197         }
 198 
 199         rc = mpi_module->comm->c_coll->coll_allgatherv(source, nlong, stype, target, recvcounts,
 200                                                        disps, rtype, mpi_module->comm,
 201                                                        mpi_module->comm->c_coll->coll_allgatherv_module);
 202 failed_allgather:
 203         free(recvcounts);
 204 failed_mem:
 205         free(disps);
 206     }
 207 complete:
 208     return rc;
 209 }
 210 
 211 
 212 int mca_scoll_mpi_reduce(struct oshmem_group_t *group,
 213         struct oshmem_op_t *op,
 214         void *target,
 215         const void *source,
 216         size_t nlong,
 217         long *pSync,
 218         void *pWrk,
 219         int alg)
 220 {
 221     mca_scoll_mpi_module_t *mpi_module;
 222     struct ompi_datatype_t* dtype;
 223     struct ompi_op_t *h_op;
 224     int rc;
 225     size_t count;
 226     MPI_COLL_VERBOSE(20,"RUNNING MPI REDUCE");
 227     void *sbuf, *rbuf;
 228     mpi_module = (mca_scoll_mpi_module_t *) group->g_scoll.scoll_reduce_module;
 229     sbuf = (void *) source;
 230     rbuf = target;
 231     dtype = shmem_dtype_to_ompi_dtype(op);
 232     h_op = shmem_op_to_ompi_op(op->op);
 233     count = nlong/op->dt_size;
 234 
 235     /* Do nothing on zero-length request */
 236     if (OPAL_UNLIKELY(!nlong)) {
 237         return OSHMEM_SUCCESS;
 238     }
 239 
 240     /* Open SHMEM specification has the following constrains (page 85):
 241      * "If using C/C++, nelems must be of type integer. If you are using Fortran, it must be a
 242      *  default integer value". And also fortran signature says "INTEGER".
 243      *  Since ompi coll components doesn't support size_t at the moment,
 244      *  and considering this contradiction, we cast size_t to int here
 245      *  in case if the value is less than INT_MAX and fallback to previous module otherwise. */
 246 #ifdef INCOMPATIBLE_SHMEM_OMPI_COLL_APIS
 247     if (INT_MAX < count) {
 248         MPI_COLL_VERBOSE(20,"RUNNING FALLBACK REDUCE");
 249         PREVIOUS_SCOLL_FN(mpi_module, reduce, group,
 250                 op,
 251                 target,
 252                 source,
 253                 nlong,
 254                 pSync,
 255                 pWrk,
 256                 SCOLL_DEFAULT_ALG);
 257         return rc;
 258     }
 259     rc = mpi_module->comm->c_coll->coll_allreduce(sbuf, rbuf, (int)count, dtype, h_op, mpi_module->comm, mpi_module->comm->c_coll->coll_allreduce_module);
 260 #else
 261     rc = mpi_module->comm->c_coll->coll_allreduce(sbuf, rbuf, count, dtype, h_op, mpi_module->comm, mpi_module->comm->c_coll->coll_allreduce_module);
 262 #endif
 263     if (OMPI_SUCCESS != rc){
 264         MPI_COLL_VERBOSE(20,"RUNNING FALLBACK REDUCE");
 265         PREVIOUS_SCOLL_FN(mpi_module, reduce, group,
 266                 op,
 267                 target,
 268                 source,
 269                 nlong,
 270                 pSync,
 271                 pWrk,
 272                 SCOLL_DEFAULT_ALG);
 273     }
 274     return rc;
 275 }

/* [<][>][^][v][top][bottom][index][help] */