root/ompi/mpi/c/intercomm_create.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. MPI_Intercomm_create

   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2017 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2008 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2005 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2006-2007 Cisco Systems, Inc.  All rights reserved.
  14  * Copyright (c) 2006-2009 University of Houston.  All rights reserved.
  15  * Copyright (c) 2012-2013 Inria.  All rights reserved.
  16  * Copyright (c) 2014-2015 Research Organization for Information Science
  17  *                         and Technology (RIST). All rights reserved.
  18  * Copyright (c) 2016      Los Alamos National Security, LLC.  All rights
  19  *                         reserved.
  20  * $COPYRIGHT$
  21  *
  22  * Additional copyrights may follow
  23  *
  24  * $HEADER$
  25  */
  26 
  27 #include "ompi_config.h"
  28 
  29 #include "ompi/mpi/c/bindings.h"
  30 #include "ompi/runtime/params.h"
  31 #include "ompi/errhandler/errhandler.h"
  32 #include "ompi/mca/pml/pml.h"
  33 #include "ompi/communicator/communicator.h"
  34 #include "ompi/request/request.h"
  35 #include "ompi/memchecker.h"
  36 
  37 #if OMPI_BUILD_MPI_PROFILING
  38 #if OPAL_HAVE_WEAK_SYMBOLS
  39 #pragma weak MPI_Intercomm_create = PMPI_Intercomm_create
  40 #endif
  41 #define MPI_Intercomm_create PMPI_Intercomm_create
  42 #endif
  43 
  44 static const char FUNC_NAME[] = "MPI_Intercomm_create";
  45 
  46 
  47 int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
  48                          MPI_Comm bridge_comm, int remote_leader,
  49                          int tag, MPI_Comm *newintercomm)
  50 {
  51     int local_size=0, local_rank=0;
  52     int lleader=0, rleader=0;
  53     ompi_communicator_t *newcomp=NULL;
  54     struct ompi_proc_t **rprocs=NULL;
  55     int rc=0, rsize=0;
  56     ompi_proc_t **proc_list=NULL;
  57     int j;
  58     ompi_group_t *new_group_pointer;
  59 
  60     MEMCHECKER(
  61         memchecker_comm(local_comm);
  62         memchecker_comm(bridge_comm);
  63     );
  64 
  65     if ( MPI_PARAM_CHECK ) {
  66         OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
  67 
  68         if ( ompi_comm_invalid ( local_comm ) ||
  69              ( local_comm->c_flags & OMPI_COMM_INTER ) )
  70             return OMPI_ERRHANDLER_INVOKE ( MPI_COMM_WORLD, MPI_ERR_COMM,
  71                                             FUNC_NAME);
  72 
  73         if ( NULL == newintercomm )
  74             return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
  75                                             FUNC_NAME);
  76 
  77         /* if ( tag < 0 || tag > MPI_TAG_UB )
  78              return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
  79                                              FUNC_NAME);
  80         */
  81     }
  82 
  83     OPAL_CR_ENTER_LIBRARY();
  84 
  85     local_size = ompi_comm_size ( local_comm );
  86     local_rank = ompi_comm_rank ( local_comm );
  87     lleader = local_leader;
  88     rleader = remote_leader;
  89 
  90     if ( MPI_PARAM_CHECK ) {
  91         if ( (0 > local_leader) || (local_leader >= local_size) )
  92             return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
  93                                             FUNC_NAME);
  94 
  95         /* remember that the remote_leader and bridge_comm arguments
  96            just have to be valid at the local_leader */
  97         if ( local_rank == local_leader ) {
  98             if ( ompi_comm_invalid ( bridge_comm ) ||
  99                  (bridge_comm->c_flags & OMPI_COMM_INTER) ) {
 100                 OPAL_CR_EXIT_LIBRARY();
 101                 return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_COMM,
 102                                                 FUNC_NAME);
 103             }
 104             if ( (remote_leader < 0) || (remote_leader >= ompi_comm_size(bridge_comm))) {
 105                 OPAL_CR_EXIT_LIBRARY();
 106                 return OMPI_ERRHANDLER_INVOKE ( local_comm, MPI_ERR_ARG,
 107                                                 FUNC_NAME);
 108             }
 109         } /* if ( local_rank == local_leader ) */
 110     }
 111 
 112     if ( local_rank == local_leader ) {
 113         MPI_Request req;
 114 
 115         /* local leader exchange group sizes lists */
 116         rc = MCA_PML_CALL(irecv(&rsize, 1, MPI_INT, rleader, tag, bridge_comm,
 117                                 &req));
 118         if ( rc != MPI_SUCCESS ) {
 119             goto err_exit;
 120         }
 121         rc = MCA_PML_CALL(send (&local_size, 1, MPI_INT, rleader, tag,
 122                                 MCA_PML_BASE_SEND_STANDARD, bridge_comm));
 123         if ( rc != MPI_SUCCESS ) {
 124             goto err_exit;
 125         }
 126         rc = ompi_request_wait( &req, MPI_STATUS_IGNORE);
 127         if ( rc != MPI_SUCCESS ) {
 128             goto err_exit;
 129         }
 130     }
 131 
 132     /* bcast size and list of remote processes to all processes in local_comm */
 133     rc = local_comm->c_coll->coll_bcast ( &rsize, 1, MPI_INT, lleader,
 134                                          local_comm,
 135                                          local_comm->c_coll->coll_bcast_module);
 136     if ( rc != MPI_SUCCESS ) {
 137         goto err_exit;
 138     }
 139 
 140     rc = ompi_comm_get_rprocs( local_comm, bridge_comm, lleader,
 141                                remote_leader, tag, rsize, &rprocs );
 142     if ( OMPI_SUCCESS != rc ) {
 143         goto err_exit;
 144     }
 145 
 146     if ( MPI_PARAM_CHECK ) {
 147         if(OMPI_GROUP_IS_DENSE(local_comm->c_local_group)) {
 148             rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
 149                                               local_comm->c_local_group->grp_proc_pointers,
 150                                               rsize,
 151                                               rprocs);
 152         }
 153         else {
 154             proc_list = (ompi_proc_t **) calloc (local_comm->c_local_group->grp_proc_count,
 155                                                  sizeof (ompi_proc_t *));
 156             for(j=0 ; j<local_comm->c_local_group->grp_proc_count ; j++) {
 157                 proc_list[j] = ompi_group_peer_lookup(local_comm->c_local_group,j);
 158             }
 159             rc = ompi_comm_overlapping_groups(local_comm->c_local_group->grp_proc_count,
 160                                               proc_list,
 161                                               rsize,
 162                                               rprocs);
 163         }
 164         if ( OMPI_SUCCESS != rc ) {
 165             goto err_exit;
 166         }
 167     }
 168     new_group_pointer = ompi_group_allocate(rsize);
 169     if( NULL == new_group_pointer ) {
 170         rc = MPI_ERR_GROUP;
 171         goto err_exit;
 172     }
 173 
 174     /* put group elements in the list */
 175     for (j = 0; j < rsize; j++) {
 176         new_group_pointer->grp_proc_pointers[j] = rprocs[j];
 177         OBJ_RETAIN(rprocs[j]);
 178     }
 179 
 180     rc = ompi_comm_set ( &newcomp,                                     /* new comm */
 181                          local_comm,                                   /* old comm */
 182                          local_comm->c_local_group->grp_proc_count,    /* local_size */
 183                          NULL,                                         /* local_procs*/
 184                          rsize,                                        /* remote_size */
 185                          NULL,                                         /* remote_procs */
 186                          NULL,                                         /* attrs */
 187                          local_comm->error_handler,                    /* error handler*/
 188                          false,                                        /* dont copy the topo */
 189                          local_comm->c_local_group,                    /* local group */
 190                          new_group_pointer                             /* remote group */
 191                          );
 192 
 193     if ( MPI_SUCCESS != rc ) {
 194         goto err_exit;
 195     }
 196 
 197     OBJ_RELEASE(new_group_pointer);
 198     new_group_pointer = MPI_GROUP_NULL;
 199 
 200     /* Determine context id. It is identical to f_2_c_handle */
 201     rc = ompi_comm_nextcid (newcomp, local_comm, bridge_comm, &lleader,
 202                             &rleader, false, OMPI_COMM_CID_INTRA_BRIDGE);
 203     if ( MPI_SUCCESS != rc ) {
 204         goto err_exit;
 205     }
 206 
 207     /* activate comm and init coll-module */
 208     rc = ompi_comm_activate (&newcomp, local_comm, bridge_comm, &lleader, &rleader,
 209                              false, OMPI_COMM_CID_INTRA_BRIDGE);
 210     if ( MPI_SUCCESS != rc ) {
 211         goto err_exit;
 212     }
 213 
 214  err_exit:
 215     OPAL_CR_EXIT_LIBRARY();
 216 
 217     if ( NULL != rprocs ) {
 218         free ( rprocs );
 219     }
 220     if ( NULL != proc_list ) {
 221         free ( proc_list );
 222     }
 223     if ( OMPI_SUCCESS != rc ) {
 224         *newintercomm = MPI_COMM_NULL;
 225         return OMPI_ERRHANDLER_INVOKE(local_comm, rc,
 226                                       FUNC_NAME);
 227     }
 228 
 229     *newintercomm = newcomp;
 230     return MPI_SUCCESS;
 231 }
 232 

/* [<][>][^][v][top][bottom][index][help] */