root/ompi/runtime/ompi_mpi_init.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. _process_name_print_for_opal
  2. _process_name_compare
  3. _convert_string_to_process_name
  4. _convert_process_name_to_string
  5. ompi_mpi_thread_level
  6. ompi_register_mca_variables
  7. fence_release
  8. ompi_mpi_init

   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2004-2010 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2014 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2005 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2006-2018 Cisco Systems, Inc.  All rights reserved
  14  * Copyright (c) 2006-2015 Los Alamos National Security, LLC.  All rights
  15  *                         reserved.
  16  * Copyright (c) 2006-2009 University of Houston. All rights reserved.
  17  * Copyright (c) 2008-2009 Sun Microsystems, Inc.  All rights reserved.
  18  * Copyright (c) 2011      Sandia National Laboratories. All rights reserved.
  19  * Copyright (c) 2012-2013 Inria.  All rights reserved.
  20  * Copyright (c) 2014-2018 Intel, Inc.  All rights reserved.
  21  * Copyright (c) 2014-2016 Research Organization for Information Science
  22  *                         and Technology (RIST). All rights reserved.
  23  * Copyright (c) 2016-2018 Mellanox Technologies Ltd. All rights reserved.
  24  *
  25  * Copyright (c) 2016-2017 IBM Corporation. All rights reserved.
  26  * Copyright (c) 2018      FUJITSU LIMITED.  All rights reserved.
  27  * $COPYRIGHT$
  28  *
  29  * Additional copyrights may follow
  30  *
  31  * $HEADER$
  32  */
  33 
  34 #include "ompi_config.h"
  35 
  36 #ifdef HAVE_SYS_TIME_H
  37 #include <sys/time.h>
  38 #endif  /* HAVE_SYS_TIME_H */
  39 #include <pthread.h>
  40 #ifdef HAVE_UNISTD_H
  41 #include <unistd.h>
  42 #endif
  43 
  44 #include "mpi.h"
  45 #include "opal/class/opal_list.h"
  46 #include "opal/mca/base/base.h"
  47 #include "opal/mca/hwloc/base/base.h"
  48 #include "opal/runtime/opal_progress.h"
  49 #include "opal/threads/threads.h"
  50 #include "opal/util/arch.h"
  51 #include "opal/util/argv.h"
  52 #include "opal/util/output.h"
  53 #include "opal/util/error.h"
  54 #include "opal/util/stacktrace.h"
  55 #include "opal/util/show_help.h"
  56 #include "opal/runtime/opal.h"
  57 #include "opal/mca/event/event.h"
  58 #include "opal/mca/allocator/base/base.h"
  59 #include "opal/mca/rcache/base/base.h"
  60 #include "opal/mca/rcache/rcache.h"
  61 #include "opal/mca/mpool/base/base.h"
  62 #include "opal/mca/btl/base/base.h"
  63 #include "opal/mca/pmix/base/base.h"
  64 #include "opal/util/timings.h"
  65 #include "opal/util/opal_environ.h"
  66 
  67 #include "ompi/constants.h"
  68 #include "ompi/mpi/fortran/base/constants.h"
  69 #include "ompi/runtime/mpiruntime.h"
  70 #include "ompi/runtime/params.h"
  71 #include "ompi/communicator/communicator.h"
  72 #include "ompi/info/info.h"
  73 #include "ompi/errhandler/errcode.h"
  74 #include "ompi/errhandler/errhandler.h"
  75 #include "ompi/interlib/interlib.h"
  76 #include "ompi/request/request.h"
  77 #include "ompi/message/message.h"
  78 #include "ompi/op/op.h"
  79 #include "ompi/mca/op/op.h"
  80 #include "ompi/mca/op/base/base.h"
  81 #include "ompi/file/file.h"
  82 #include "ompi/attribute/attribute.h"
  83 #include "ompi/mca/pml/pml.h"
  84 #include "ompi/mca/bml/bml.h"
  85 #include "ompi/mca/pml/base/base.h"
  86 #include "ompi/mca/bml/base/base.h"
  87 #include "ompi/mca/osc/base/base.h"
  88 #include "ompi/mca/coll/base/base.h"
  89 #include "ompi/mca/io/io.h"
  90 #include "ompi/mca/io/base/base.h"
  91 #include "ompi/mca/rte/rte.h"
  92 #include "ompi/mca/rte/base/base.h"
  93 #include "ompi/debuggers/debuggers.h"
  94 #include "ompi/proc/proc.h"
  95 #include "ompi/mca/pml/base/pml_base_bsend.h"
  96 #include "ompi/dpm/dpm.h"
  97 #include "ompi/mpiext/mpiext.h"
  98 #include "ompi/mca/hook/base/base.h"
  99 #include "ompi/util/timings.h"
 100 
 101 #if OPAL_ENABLE_FT_CR == 1
 102 #include "ompi/mca/crcp/crcp.h"
 103 #include "ompi/mca/crcp/base/base.h"
 104 #endif
 105 #include "ompi/runtime/ompi_cr.h"
 106 
 107 /* newer versions of gcc have poisoned this deprecated feature */
 108 #ifdef HAVE___MALLOC_INITIALIZE_HOOK
 109 #include "opal/mca/memory/base/base.h"
 110 /* So this sucks, but with OPAL in its own library that is brought in
 111    implicity from libmpi, there are times when the malloc initialize
 112    hook in the memory component doesn't work.  So we have to do it
 113    from here, since any MPI code is going to call MPI_Init... */
 114 OPAL_DECLSPEC void (*__malloc_initialize_hook) (void) =
 115     opal_memory_base_malloc_init_hook;
 116 #endif
 117 
 118 /* This is required for the boundaries of the hash tables used to store
 119  * the F90 types returned by the MPI_Type_create_f90_XXX functions.
 120  */
 121 #include <float.h>
 122 
 123 #if OPAL_CC_USE_PRAGMA_IDENT
 124 #pragma ident OMPI_IDENT_STRING
 125 #elif OPAL_CC_USE_IDENT
 126 #ident OMPI_IDENT_STRING
 127 #endif
 128 const char ompi_version_string[] = OMPI_IDENT_STRING;
 129 
 130 /*
 131  * Global variables and symbols for the MPI layer
 132  */
 133 
 134 opal_atomic_int32_t ompi_mpi_state = OMPI_MPI_STATE_NOT_INITIALIZED;
 135 volatile bool ompi_rte_initialized = false;
 136 
 137 bool ompi_mpi_thread_multiple = false;
 138 int ompi_mpi_thread_requested = MPI_THREAD_SINGLE;
 139 int ompi_mpi_thread_provided = MPI_THREAD_SINGLE;
 140 
 141 opal_thread_t *ompi_mpi_main_thread = NULL;
 142 
 143 /*
 144  * These variables are for the MPI F08 bindings (F08 must bind Fortran
 145  * varaiables to symbols; it cannot bind Fortran variables to the
 146  * address of a C variable).
 147  */
 148 
 149 ompi_predefined_datatype_t *ompi_mpi_character_addr = &ompi_mpi_character;
 150 ompi_predefined_datatype_t *ompi_mpi_logical_addr   = &ompi_mpi_logical;
 151 ompi_predefined_datatype_t *ompi_mpi_logical1_addr  = &ompi_mpi_logical1;
 152 ompi_predefined_datatype_t *ompi_mpi_logical2_addr  = &ompi_mpi_logical2;
 153 ompi_predefined_datatype_t *ompi_mpi_logical4_addr  = &ompi_mpi_logical4;
 154 ompi_predefined_datatype_t *ompi_mpi_logical8_addr  = &ompi_mpi_logical8;
 155 ompi_predefined_datatype_t *ompi_mpi_integer_addr   = &ompi_mpi_integer;
 156 ompi_predefined_datatype_t *ompi_mpi_integer1_addr  = &ompi_mpi_integer1;
 157 ompi_predefined_datatype_t *ompi_mpi_integer2_addr  = &ompi_mpi_integer2;
 158 ompi_predefined_datatype_t *ompi_mpi_integer4_addr  = &ompi_mpi_integer4;
 159 ompi_predefined_datatype_t *ompi_mpi_integer8_addr  = &ompi_mpi_integer8;
 160 ompi_predefined_datatype_t *ompi_mpi_integer16_addr = &ompi_mpi_integer16;
 161 ompi_predefined_datatype_t *ompi_mpi_real_addr      = &ompi_mpi_real;
 162 ompi_predefined_datatype_t *ompi_mpi_real2_addr     = &ompi_mpi_real2;
 163 ompi_predefined_datatype_t *ompi_mpi_real4_addr     = &ompi_mpi_real4;
 164 ompi_predefined_datatype_t *ompi_mpi_real8_addr     = &ompi_mpi_real8;
 165 ompi_predefined_datatype_t *ompi_mpi_real16_addr    = &ompi_mpi_real16;
 166 ompi_predefined_datatype_t *ompi_mpi_dblprec_addr   = &ompi_mpi_dblprec;
 167 ompi_predefined_datatype_t *ompi_mpi_cplex_addr     = &ompi_mpi_cplex;
 168 ompi_predefined_datatype_t *ompi_mpi_complex4_addr  = &ompi_mpi_complex4;
 169 ompi_predefined_datatype_t *ompi_mpi_complex8_addr  = &ompi_mpi_complex8;
 170 ompi_predefined_datatype_t *ompi_mpi_complex16_addr = &ompi_mpi_complex16;
 171 ompi_predefined_datatype_t *ompi_mpi_complex32_addr = &ompi_mpi_complex32;
 172 ompi_predefined_datatype_t *ompi_mpi_dblcplex_addr  = &ompi_mpi_dblcplex;
 173 ompi_predefined_datatype_t *ompi_mpi_2real_addr     = &ompi_mpi_2real;
 174 ompi_predefined_datatype_t *ompi_mpi_2dblprec_addr  = &ompi_mpi_2dblprec;
 175 ompi_predefined_datatype_t *ompi_mpi_2integer_addr  = &ompi_mpi_2integer;
 176 
 177 struct ompi_status_public_t *ompi_mpi_status_ignore_addr =
 178     (ompi_status_public_t *) 0;
 179 struct ompi_status_public_t *ompi_mpi_statuses_ignore_addr =
 180     (ompi_status_public_t *) 0;
 181 
 182 /*
 183  * These variables are here, rather than under ompi/mpi/c/foo.c
 184  * because it is not sufficient to have a .c file that only contains
 185  * variables -- you must have a function that is invoked from
 186  * elsewhere in the code to guarantee that all linkers will pull in
 187  * the .o file from the library.  Hence, although these are MPI
 188  * constants, we might as well just define them here (i.e., in a file
 189  * that already has a function that is guaranteed to be linked in,
 190  * rather than make a new .c file with the constants and a
 191  * corresponding dummy function that is invoked from this function).
 192  *
 193  * Additionally, there can be/are strange linking paths such that
 194  * ompi_info needs symbols such as ompi_fortran_status_ignore,
 195  * which, if they weren't here with a collection of other global
 196  * symbols that are initialized (which seems to force this .o file to
 197  * be pulled into the resolution process, because ompi_info certainly
 198  * does not call ompi_mpi_init()), would not be able to be found by
 199  * the OSX linker.
 200  *
 201  * NOTE: See the big comment in ompi/mpi/fortran/base/constants.h
 202  * about why we have four symbols for each of the common blocks (e.g.,
 203  * the Fortran equivalent(s) of MPI_STATUS_IGNORE).  Here, we can only
 204  * have *one* value (not four).  So the only thing we can do is make
 205  * it equal to the fortran compiler convention that was selected at
 206  * configure time.  Note that this is also true for the value of
 207  * .TRUE. from the Fortran compiler, so even though Open MPI supports
 208  * all four Fortran symbol conventions, it can only support one
 209  * convention for the two C constants (MPI_FORTRAN_STATUS[ES]_IGNORE)
 210  * and only support one compiler for the value of .TRUE.  Ugh!!
 211  *
 212  * Note that the casts here are ok -- we're *only* comparing pointer
 213  * values (i.e., they'll never be de-referenced).  The global symbols
 214  * are actually of type (ompi_fortran_common_t) (for alignment
 215  * issues), but MPI says that MPI_F_STATUS[ES]_IGNORE must be of type
 216  * (MPI_Fint*).  Hence, we have to cast to make compilers not
 217  * complain.
 218  */
 219 #if OMPI_BUILD_FORTRAN_BINDINGS
 220 #  if OMPI_FORTRAN_CAPS
 221 MPI_Fint *MPI_F_STATUS_IGNORE = (MPI_Fint*) &MPI_FORTRAN_STATUS_IGNORE;
 222 MPI_Fint *MPI_F_STATUSES_IGNORE = (MPI_Fint*) &MPI_FORTRAN_STATUSES_IGNORE;
 223 #  elif OMPI_FORTRAN_PLAIN
 224 MPI_Fint *MPI_F_STATUS_IGNORE = (MPI_Fint*) &mpi_fortran_status_ignore;
 225 MPI_Fint *MPI_F_STATUSES_IGNORE = (MPI_Fint*) &mpi_fortran_statuses_ignore;
 226 #  elif OMPI_FORTRAN_SINGLE_UNDERSCORE
 227 MPI_Fint *MPI_F_STATUS_IGNORE = (MPI_Fint*) &mpi_fortran_status_ignore_;
 228 MPI_Fint *MPI_F_STATUSES_IGNORE = (MPI_Fint*) &mpi_fortran_statuses_ignore_;
 229 #  elif OMPI_FORTRAN_DOUBLE_UNDERSCORE
 230 MPI_Fint *MPI_F_STATUS_IGNORE = (MPI_Fint*) &mpi_fortran_status_ignore__;
 231 MPI_Fint *MPI_F_STATUSES_IGNORE = (MPI_Fint*) &mpi_fortran_statuses_ignore__;
 232 #  else
 233 #    error Unrecognized Fortran name mangling scheme
 234 #  endif
 235 #else
 236 MPI_Fint *MPI_F_STATUS_IGNORE = NULL;
 237 MPI_Fint *MPI_F_STATUSES_IGNORE = NULL;
 238 #endif  /* OMPI_BUILD_FORTRAN_BINDINGS */
 239 
 240 
 241 /* Constants for the Fortran layer.  These values are referred to via
 242    common blocks in the Fortran equivalents.  See
 243    ompi/mpi/fortran/base/constants.h for a more detailed explanation.
 244 
 245    The values are *NOT* initialized.  We do not use the values of
 246    these constants; only their addresses (because they're always
 247    passed by reference by Fortran).
 248 
 249    Initializing upon instantiation these can reveal size and/or
 250    alignment differences between Fortran and C (!) which can cause
 251    warnings or errors upon linking (e.g., making static libraries with
 252    the intel 9.0 compilers on 64 bit platforms shows alignment
 253    differences between libmpi.a and the user's application, resulting
 254    in a linker warning).  FWIW, if you initialize these variables in
 255    functions (i.e., not at the instantiation in the global scope), the
 256    linker somehow "figures it all out" (w.r.t. different alignments
 257    between fortan common blocks and the corresponding C variables) and
 258    no linker warnings occur.
 259 
 260    Note that the rationale for the types of each of these variables is
 261    discussed in ompi/include/mpif-common.h.  Do not change the types
 262    without also modifying ompi/mpi/fortran/base/constants.h and
 263    ompi/include/mpif-common.h.
 264  */
 265 
 266 #include "mpif-c-constants.h"
 267 
 268 /*
 269  * Hash tables for MPI_Type_create_f90* functions
 270  */
 271 opal_hash_table_t ompi_mpi_f90_integer_hashtable = {{0}};
 272 opal_hash_table_t ompi_mpi_f90_real_hashtable = {{0}};
 273 opal_hash_table_t ompi_mpi_f90_complex_hashtable = {{0}};
 274 
 275 /*
 276  * Per MPI-2:9.5.3, MPI_REGISTER_DATAREP is a memory leak.  There is
 277  * no way to *de*register datareps once they've been registered.  So
 278  * we have to track all registrations here so that they can be
 279  * de-registered during MPI_FINALIZE so that memory-tracking debuggers
 280  * don't show Open MPI as leaking memory.
 281  */
 282 opal_list_t ompi_registered_datareps = {{0}};
 283 
 284 bool ompi_enable_timing = false;
 285 extern bool ompi_mpi_yield_when_idle;
 286 extern int ompi_mpi_event_tick_rate;
 287 
 288 /**
 289  * Static functions used to configure the interactions between the OPAL and
 290  * the runtime.
 291  */
 292 static char*
 293 _process_name_print_for_opal(const opal_process_name_t procname)
 294 {
 295     ompi_process_name_t* rte_name = (ompi_process_name_t*)&procname;
 296     return OMPI_NAME_PRINT(rte_name);
 297 }
 298 
 299 static int
 300 _process_name_compare(const opal_process_name_t p1, const opal_process_name_t p2)
 301 {
 302     ompi_process_name_t* o1 = (ompi_process_name_t*)&p1;
 303     ompi_process_name_t* o2 = (ompi_process_name_t*)&p2;
 304     return ompi_rte_compare_name_fields(OMPI_RTE_CMP_ALL, o1, o2);
 305 }
 306 
 307 static int _convert_string_to_process_name(opal_process_name_t *name,
 308                                            const char* name_string)
 309 {
 310     return ompi_rte_convert_string_to_process_name(name, name_string);
 311 }
 312 
 313 static int _convert_process_name_to_string(char** name_string,
 314                                           const opal_process_name_t *name)
 315 {
 316     return ompi_rte_convert_process_name_to_string(name_string, name);
 317 }
 318 
 319 void ompi_mpi_thread_level(int requested, int *provided)
 320 {
 321     /**
 322      * These values are monotonic; MPI_THREAD_SINGLE < MPI_THREAD_FUNNELED
 323      *                             < MPI_THREAD_SERIALIZED < MPI_THREAD_MULTIPLE.
 324      * If possible, the call will return provided = required. Failing this,
 325      * the call will return the least supported level such that
 326      * provided > required. Finally, if the user requirement cannot be
 327      * satisfied, then the call will return in provided the highest
 328      * supported level.
 329      */
 330     ompi_mpi_thread_requested = requested;
 331 
 332     ompi_mpi_thread_provided = *provided = requested;
 333 
 334     if (!ompi_mpi_main_thread) {
 335         ompi_mpi_main_thread = opal_thread_get_self();
 336     }
 337 
 338     ompi_mpi_thread_multiple = (ompi_mpi_thread_provided ==
 339                                 MPI_THREAD_MULTIPLE);
 340 }
 341 
 342 static int ompi_register_mca_variables(void)
 343 {
 344     int ret;
 345 
 346     /* Register MPI variables */
 347     if (OMPI_SUCCESS != (ret = ompi_mpi_register_params())) {
 348         return ret;
 349     }
 350 
 351     /* check to see if we want timing information */
 352     /* TODO: enable OMPI init and OMPI finalize timings if
 353      * this variable was set to 1!
 354      */
 355     ompi_enable_timing = false;
 356     (void) mca_base_var_register("ompi", "ompi", NULL, "timing",
 357                                  "Request that critical timing loops be measured",
 358                                  MCA_BASE_VAR_TYPE_BOOL, NULL, 0, 0,
 359                                  OPAL_INFO_LVL_9,
 360                                  MCA_BASE_VAR_SCOPE_READONLY,
 361                                  &ompi_enable_timing);
 362 
 363     return OMPI_SUCCESS;
 364 }
 365 
 366 static void fence_release(int status, void *cbdata)
 367 {
 368     volatile bool *active = (volatile bool*)cbdata;
 369     OPAL_ACQUIRE_OBJECT(active);
 370     *active = false;
 371     OPAL_POST_OBJECT(active);
 372 }
 373 
 374 int ompi_mpi_init(int argc, char **argv, int requested, int *provided,
 375                   bool reinit_ok)
 376 {
 377     int ret;
 378     ompi_proc_t** procs;
 379     size_t nprocs;
 380     char *error = NULL;
 381     ompi_errhandler_errtrk_t errtrk;
 382     opal_list_t info;
 383     opal_value_t *kv;
 384     volatile bool active;
 385     bool background_fence = false;
 386 
 387     OMPI_TIMING_INIT(64);
 388 
 389     ompi_hook_base_mpi_init_top(argc, argv, requested, provided);
 390 
 391     /* Ensure that we were not already initialized or finalized. */
 392     int32_t expected = OMPI_MPI_STATE_NOT_INITIALIZED;
 393     int32_t desired  = OMPI_MPI_STATE_INIT_STARTED;
 394     opal_atomic_wmb();
 395     if (!opal_atomic_compare_exchange_strong_32(&ompi_mpi_state, &expected,
 396                                                 desired)) {
 397         // If we failed to atomically transition ompi_mpi_state from
 398         // NOT_INITIALIZED to INIT_STARTED, then someone else already
 399         // did that, and we should return.
 400         if (expected >= OMPI_MPI_STATE_FINALIZE_STARTED) {
 401             opal_show_help("help-mpi-runtime.txt",
 402                            "mpi_init: already finalized", true);
 403             return MPI_ERR_OTHER;
 404         } else if (expected >= OMPI_MPI_STATE_INIT_STARTED) {
 405             // In some cases (e.g., oshmem_shmem_init()), we may call
 406             // ompi_mpi_init() multiple times.  In such cases, just
 407             // silently return successfully once the initializing
 408             // thread has completed.
 409             if (reinit_ok) {
 410                 while (ompi_mpi_state < OMPI_MPI_STATE_INIT_COMPLETED) {
 411                     usleep(1);
 412                 }
 413                 return MPI_SUCCESS;
 414             }
 415 
 416             opal_show_help("help-mpi-runtime.txt",
 417                            "mpi_init: invoked multiple times", true);
 418             return MPI_ERR_OTHER;
 419         }
 420     }
 421 
 422     /* Figure out the final MPI thread levels.  If we were not
 423        compiled for support for MPI threads, then don't allow
 424        MPI_THREAD_MULTIPLE.  Set this stuff up here early in the
 425        process so that other components can make decisions based on
 426        this value. */
 427 
 428     ompi_mpi_thread_level(requested, provided);
 429 
 430     /* Setup enough to check get/set MCA params */
 431     if (OPAL_SUCCESS != (ret = opal_init_util(&argc, &argv))) {
 432         error = "ompi_mpi_init: opal_init_util failed";
 433         goto error;
 434     }
 435     OMPI_TIMING_IMPORT_OPAL("opal_init_util");
 436 
 437     /* If thread support was enabled, then setup OPAL to allow for them. This must be done
 438      * early to prevent a race condition that can occur with orte_init(). */
 439     if (*provided != MPI_THREAD_SINGLE) {
 440         opal_set_using_threads(true);
 441     }
 442 
 443     /* Convince OPAL to use our naming scheme */
 444     opal_process_name_print = _process_name_print_for_opal;
 445     opal_compare_proc = _process_name_compare;
 446     opal_convert_string_to_process_name = _convert_string_to_process_name;
 447     opal_convert_process_name_to_string = _convert_process_name_to_string;
 448     opal_proc_for_name = ompi_proc_for_name;
 449 
 450     /* Register MCA variables */
 451     if (OPAL_SUCCESS != (ret = ompi_register_mca_variables())) {
 452         error = "ompi_mpi_init: ompi_register_mca_variables failed";
 453         goto error;
 454     }
 455 
 456     if (OPAL_SUCCESS != (ret = opal_arch_set_fortran_logical_size(sizeof(ompi_fortran_logical_t)))) {
 457         error = "ompi_mpi_init: opal_arch_set_fortran_logical_size failed";
 458         goto error;
 459     }
 460 
 461     /* _After_ opal_init_util() but _before_ orte_init(), we need to
 462        set an MCA param that tells libevent that it's ok to use any
 463        mechanism in libevent that is available on this platform (e.g.,
 464        epoll and friends).  Per opal/event/event.s, we default to
 465        select/poll -- but we know that MPI processes won't be using
 466        pty's with the event engine, so it's ok to relax this
 467        constraint and let any fd-monitoring mechanism be used. */
 468 
 469     ret = mca_base_var_find("opal", "event", "*", "event_include");
 470     if (ret >= 0) {
 471         char *allvalue = "all";
 472         /* We have to explicitly "set" the MCA param value here
 473            because libevent initialization will re-register the MCA
 474            param and therefore override the default. Setting the value
 475            here puts the desired value ("all") in different storage
 476            that is not overwritten if/when the MCA param is
 477            re-registered. This is unless the user has specified a different
 478            value for this MCA parameter. Make sure we check to see if the
 479            default is specified before forcing "all" in case that is not what
 480            the user desires. Note that we do *NOT* set this value as an
 481            environment variable, just so that it won't be inherited by
 482            any spawned processes and potentially cause unintented
 483            side-effects with launching RTE tools... */
 484         mca_base_var_set_value(ret, allvalue, 4, MCA_BASE_VAR_SOURCE_DEFAULT, NULL);
 485     }
 486 
 487     /* open the ompi hook framework */
 488     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_hook_base_framework, 0))) {
 489         error = "ompi_hook_base_open() failed";
 490         goto error;
 491     }
 492 
 493     ompi_hook_base_mpi_init_top_post_opal(argc, argv, requested, provided);
 494 
 495 
 496     OMPI_TIMING_NEXT("initialization");
 497 
 498     /* if we were not externally started, then we need to setup
 499      * some envars so the MPI_INFO_ENV can get the cmd name
 500      * and argv (but only if the user supplied a non-NULL argv!), and
 501      * the requested thread level
 502      */
 503     if (NULL == getenv("OMPI_COMMAND") && NULL != argv && NULL != argv[0]) {
 504         opal_setenv("OMPI_COMMAND", argv[0], true, &environ);
 505     }
 506     if (NULL == getenv("OMPI_ARGV") && 1 < argc) {
 507         char *tmp;
 508         tmp = opal_argv_join(&argv[1], ' ');
 509         opal_setenv("OMPI_ARGV", tmp, true, &environ);
 510         free(tmp);
 511     }
 512 
 513     /* open the rte framework */
 514     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_rte_base_framework, 0))) {
 515         error = "ompi_rte_base_open() failed";
 516         goto error;
 517     }
 518     /* no select is required as this is a static framework */
 519 
 520     /* Setup RTE */
 521     if (OMPI_SUCCESS != (ret = ompi_rte_init(NULL, NULL))) {
 522         error = "ompi_mpi_init: ompi_rte_init failed";
 523         goto error;
 524     }
 525     OMPI_TIMING_NEXT("rte_init");
 526     OMPI_TIMING_IMPORT_OPAL("orte_ess_base_app_setup");
 527     OMPI_TIMING_IMPORT_OPAL("rte_init");
 528 
 529     ompi_rte_initialized = true;
 530 
 531     /* Register the default errhandler callback  */
 532     errtrk.status = OPAL_ERROR;
 533     errtrk.active = true;
 534     /* we want to go first */
 535     OBJ_CONSTRUCT(&info, opal_list_t);
 536     kv = OBJ_NEW(opal_value_t);
 537     kv->key = strdup(OPAL_PMIX_EVENT_HDLR_PREPEND);
 538     opal_list_append(&info, &kv->super);
 539     /* give it a name so we can distinguish it */
 540     kv = OBJ_NEW(opal_value_t);
 541     kv->key = strdup(OPAL_PMIX_EVENT_HDLR_NAME);
 542     kv->type = OPAL_STRING;
 543     kv->data.string = strdup("MPI-Default");
 544     opal_list_append(&info, &kv->super);
 545     opal_pmix.register_evhandler(NULL, &info, ompi_errhandler_callback,
 546                                  ompi_errhandler_registration_callback,
 547                                  (void*)&errtrk);
 548     OMPI_LAZY_WAIT_FOR_COMPLETION(errtrk.active);
 549 
 550     OPAL_LIST_DESTRUCT(&info);
 551     if (OPAL_SUCCESS != errtrk.status) {
 552         error = "Error handler registration";
 553         ret = errtrk.status;
 554         goto error;
 555     }
 556 
 557     /* declare our presence for interlib coordination, and
 558      * register for callbacks when other libs declare */
 559     if (OMPI_SUCCESS != (ret = ompi_interlib_declare(*provided, OMPI_IDENT_STRING))) {
 560         error = "ompi_interlib_declare";
 561         goto error;
 562     }
 563 
 564     /* initialize datatypes. This step should be done early as it will
 565      * create the local convertor and local arch used in the proc
 566      * init.
 567      */
 568     if (OMPI_SUCCESS != (ret = ompi_datatype_init())) {
 569         error = "ompi_datatype_init() failed";
 570         goto error;
 571     }
 572 
 573     /* Initialize OMPI procs */
 574     if (OMPI_SUCCESS != (ret = ompi_proc_init())) {
 575         error = "mca_proc_init() failed";
 576         goto error;
 577     }
 578 
 579     /* Initialize the op framework. This has to be done *after*
 580        ddt_init, but befor mca_coll_base_open, since some collective
 581        modules (e.g., the hierarchical coll component) may need ops in
 582        their query function. */
 583     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_op_base_framework, 0))) {
 584         error = "ompi_op_base_open() failed";
 585         goto error;
 586     }
 587     if (OMPI_SUCCESS !=
 588         (ret = ompi_op_base_find_available(OPAL_ENABLE_PROGRESS_THREADS,
 589                                            ompi_mpi_thread_multiple))) {
 590         error = "ompi_op_base_find_available() failed";
 591         goto error;
 592     }
 593     if (OMPI_SUCCESS != (ret = ompi_op_init())) {
 594         error = "ompi_op_init() failed";
 595         goto error;
 596     }
 597 
 598     /* Open up MPI-related MCA components */
 599 
 600     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&opal_allocator_base_framework, 0))) {
 601         error = "mca_allocator_base_open() failed";
 602         goto error;
 603     }
 604     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&opal_rcache_base_framework, 0))) {
 605         error = "mca_rcache_base_open() failed";
 606         goto error;
 607     }
 608     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&opal_mpool_base_framework, 0))) {
 609         error = "mca_mpool_base_open() failed";
 610         goto error;
 611     }
 612     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_bml_base_framework, 0))) {
 613         error = "mca_bml_base_open() failed";
 614         goto error;
 615     }
 616     if (OMPI_SUCCESS != (ret = mca_bml_base_init (1, ompi_mpi_thread_multiple))) {
 617         error = "mca_bml_base_init() failed";
 618         goto error;
 619     }
 620     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_pml_base_framework, 0))) {
 621         error = "mca_pml_base_open() failed";
 622         goto error;
 623     }
 624     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_coll_base_framework, 0))) {
 625         error = "mca_coll_base_open() failed";
 626         goto error;
 627     }
 628 
 629     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_osc_base_framework, 0))) {
 630         error = "ompi_osc_base_open() failed";
 631         goto error;
 632     }
 633 
 634 #if OPAL_ENABLE_FT_CR == 1
 635     if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_crcp_base_framework, 0))) {
 636         error = "ompi_crcp_base_open() failed";
 637         goto error;
 638     }
 639 #endif
 640 
 641     /* In order to reduce the common case for MPI apps (where they
 642        don't use MPI-2 IO or MPI-1 topology functions), the io and
 643        topo frameworks are initialized lazily, at the first use of
 644        relevant functions (e.g., MPI_FILE_*, MPI_CART_*, MPI_GRAPH_*),
 645        so they are not opened here. */
 646 
 647     /* Select which MPI components to use */
 648 
 649     if (OMPI_SUCCESS !=
 650         (ret = mca_pml_base_select(OPAL_ENABLE_PROGRESS_THREADS,
 651                                    ompi_mpi_thread_multiple))) {
 652         error = "mca_pml_base_select() failed";
 653         goto error;
 654     }
 655 
 656     OMPI_TIMING_IMPORT_OPAL("orte_init");
 657     OMPI_TIMING_NEXT("rte_init-commit");
 658 
 659     /* exchange connection info - this function may also act as a barrier
 660      * if data exchange is required. The modex occurs solely across procs
 661      * in our job. If a barrier is required, the "modex" function will
 662      * perform it internally */
 663     opal_pmix.commit();
 664     OMPI_TIMING_NEXT("commit");
 665 #if (OPAL_ENABLE_TIMING)
 666     if (OMPI_TIMING_ENABLED && !opal_pmix_base_async_modex &&
 667             opal_pmix_collect_all_data) {
 668         if (OMPI_SUCCESS != (ret = opal_pmix.fence(NULL, 0))) {
 669             error = "timing: pmix-barrier-1 failed";
 670             goto error;
 671         }
 672         OMPI_TIMING_NEXT("pmix-barrier-1");
 673         if (OMPI_SUCCESS != (ret = opal_pmix.fence(NULL, 0))) {
 674             error = "timing: pmix-barrier-2 failed";
 675             goto error;
 676         }
 677         OMPI_TIMING_NEXT("pmix-barrier-2");
 678     }
 679 #endif
 680 
 681     /* If we have a non-blocking fence:
 682      * if we are doing an async modex, but we are collecting all
 683      * data, then execute the non-blocking modex in the background.
 684      * All calls to modex_recv will be cached until the background
 685      * modex completes. If collect_all_data is false, then we skip
 686      * the fence completely and retrieve data on-demand from the
 687      * source node.
 688      *
 689      * If we do not have a non-blocking fence, then we must always
 690      * execute the blocking fence as the system does not support
 691      * later data retrieval. */
 692     if (NULL != opal_pmix.fence_nb) {
 693         if (opal_pmix_base_async_modex && opal_pmix_collect_all_data) {
 694             /* execute the fence_nb in the background to collect
 695              * the data */
 696             background_fence = true;
 697             active = true;
 698             OPAL_POST_OBJECT(&active);
 699             if( OMPI_SUCCESS != (ret = opal_pmix.fence_nb(NULL, true,
 700                                                     fence_release,
 701                                                     (void*)&active))) {
 702                 error = "opal_pmix.fence_nb() failed";
 703                 goto error;
 704             }
 705 
 706         } else if (!opal_pmix_base_async_modex) {
 707             /* we want to do the modex */
 708             active = true;
 709             OPAL_POST_OBJECT(&active);
 710             if( OMPI_SUCCESS != (ret = opal_pmix.fence_nb(NULL,
 711                 opal_pmix_collect_all_data, fence_release, (void*)&active))) {
 712                 error = "opal_pmix.fence_nb() failed";
 713                 goto error;
 714             }
 715             /* cannot just wait on thread as we need to call opal_progress */
 716             OMPI_LAZY_WAIT_FOR_COMPLETION(active);
 717         }
 718         /* otherwise, we don't want to do the modex, so fall thru */
 719     } else if (!opal_pmix_base_async_modex || opal_pmix_collect_all_data) {
 720         if( OMPI_SUCCESS != (ret = opal_pmix.fence(NULL,
 721                                                 opal_pmix_collect_all_data))) {
 722             error = "opal_pmix.fence() failed";
 723             goto error;
 724         }
 725     }
 726 
 727     OMPI_TIMING_NEXT("modex");
 728 
 729     /* select buffered send allocator component to be used */
 730     if( OMPI_SUCCESS !=
 731         (ret = mca_pml_base_bsend_init(ompi_mpi_thread_multiple))) {
 732         error = "mca_pml_base_bsend_init() failed";
 733         goto error;
 734     }
 735 
 736     if (OMPI_SUCCESS !=
 737         (ret = mca_coll_base_find_available(OPAL_ENABLE_PROGRESS_THREADS,
 738                                             ompi_mpi_thread_multiple))) {
 739         error = "mca_coll_base_find_available() failed";
 740         goto error;
 741     }
 742 
 743     if (OMPI_SUCCESS !=
 744         (ret = ompi_osc_base_find_available(OPAL_ENABLE_PROGRESS_THREADS,
 745                                             ompi_mpi_thread_multiple))) {
 746         error = "ompi_osc_base_find_available() failed";
 747         goto error;
 748     }
 749 
 750 #if OPAL_ENABLE_FT_CR == 1
 751     if (OMPI_SUCCESS != (ret = ompi_crcp_base_select() ) ) {
 752         error = "ompi_crcp_base_select() failed";
 753         goto error;
 754     }
 755 #endif
 756 
 757     /* io and topo components are not selected here -- see comment
 758        above about the io and topo frameworks being loaded lazily */
 759 
 760     /* Initialize each MPI handle subsystem */
 761     /* initialize requests */
 762     if (OMPI_SUCCESS != (ret = ompi_request_init())) {
 763         error = "ompi_request_init() failed";
 764         goto error;
 765     }
 766 
 767     if (OMPI_SUCCESS != (ret = ompi_message_init())) {
 768         error = "ompi_message_init() failed";
 769         goto error;
 770     }
 771 
 772     /* initialize info */
 773     if (OMPI_SUCCESS != (ret = ompi_mpiinfo_init())) {
 774         error = "ompi_info_init() failed";
 775         goto error;
 776     }
 777 
 778     /* initialize error handlers */
 779     if (OMPI_SUCCESS != (ret = ompi_errhandler_init())) {
 780         error = "ompi_errhandler_init() failed";
 781         goto error;
 782     }
 783 
 784     /* initialize error codes */
 785     if (OMPI_SUCCESS != (ret = ompi_mpi_errcode_init())) {
 786         error = "ompi_mpi_errcode_init() failed";
 787         goto error;
 788     }
 789 
 790     /* initialize internal error codes */
 791     if (OMPI_SUCCESS != (ret = ompi_errcode_intern_init())) {
 792         error = "ompi_errcode_intern_init() failed";
 793         goto error;
 794     }
 795 
 796     /* initialize groups  */
 797     if (OMPI_SUCCESS != (ret = ompi_group_init())) {
 798         error = "ompi_group_init() failed";
 799         goto error;
 800     }
 801 
 802     /* initialize communicators */
 803     if (OMPI_SUCCESS != (ret = ompi_comm_init())) {
 804         error = "ompi_comm_init() failed";
 805         goto error;
 806     }
 807 
 808     /* initialize file handles */
 809     if (OMPI_SUCCESS != (ret = ompi_file_init())) {
 810         error = "ompi_file_init() failed";
 811         goto error;
 812     }
 813 
 814     /* initialize windows */
 815     if (OMPI_SUCCESS != (ret = ompi_win_init())) {
 816         error = "ompi_win_init() failed";
 817         goto error;
 818     }
 819 
 820     /* initialize attribute meta-data structure for comm/win/dtype */
 821     if (OMPI_SUCCESS != (ret = ompi_attr_init())) {
 822         error = "ompi_attr_init() failed";
 823         goto error;
 824     }
 825 
 826     /* identify the architectures of remote procs and setup
 827      * their datatype convertors, if required
 828      */
 829     if (OMPI_SUCCESS != (ret = ompi_proc_complete_init())) {
 830         error = "ompi_proc_complete_init failed";
 831         goto error;
 832     }
 833 
 834     /* start PML/BTL's */
 835     ret = MCA_PML_CALL(enable(true));
 836     if( OMPI_SUCCESS != ret ) {
 837         error = "PML control failed";
 838         goto error;
 839     }
 840 
 841     /* some btls/mtls require we call add_procs with all procs in the job.
 842      * since the btls/mtls have no visibility here it is up to the pml to
 843      * convey this requirement */
 844     if (mca_pml_base_requires_world ()) {
 845         if (NULL == (procs = ompi_proc_world (&nprocs))) {
 846             error = "ompi_proc_get_allocated () failed";
 847             goto error;
 848         }
 849     } else {
 850         /* add all allocated ompi_proc_t's to PML (below the add_procs limit this
 851          * behaves identically to ompi_proc_world ()) */
 852         if (NULL == (procs = ompi_proc_get_allocated (&nprocs))) {
 853             error = "ompi_proc_get_allocated () failed";
 854             goto error;
 855         }
 856     }
 857     ret = MCA_PML_CALL(add_procs(procs, nprocs));
 858     free(procs);
 859     /* If we got "unreachable", then print a specific error message.
 860        Otherwise, if we got some other failure, fall through to print
 861        a generic message. */
 862     if (OMPI_ERR_UNREACH == ret) {
 863         opal_show_help("help-mpi-runtime.txt",
 864                        "mpi_init:startup:pml-add-procs-fail", true);
 865         error = NULL;
 866         goto error;
 867     } else if (OMPI_SUCCESS != ret) {
 868         error = "PML add procs failed";
 869         goto error;
 870     }
 871 
 872     MCA_PML_CALL(add_comm(&ompi_mpi_comm_world.comm));
 873     MCA_PML_CALL(add_comm(&ompi_mpi_comm_self.comm));
 874 
 875     /*
 876      * Dump all MCA parameters if requested
 877      */
 878     if (ompi_mpi_show_mca_params) {
 879         ompi_show_all_mca_params(ompi_mpi_comm_world.comm.c_my_rank,
 880                                  nprocs,
 881                                  ompi_process_info.nodename);
 882     }
 883 
 884     /* Do we need to wait for a debugger? */
 885     ompi_rte_wait_for_debugger();
 886 
 887     /* Next timing measurement */
 888     OMPI_TIMING_NEXT("modex-barrier");
 889 
 890     /* if we executed the above fence in the background, then
 891      * we have to wait here for it to complete. However, there
 892      * is no reason to do two barriers! */
 893     if (background_fence) {
 894         OMPI_LAZY_WAIT_FOR_COMPLETION(active);
 895     } else if (!ompi_async_mpi_init) {
 896         /* wait for everyone to reach this point - this is a hard
 897          * barrier requirement at this time, though we hope to relax
 898          * it at a later point */
 899         if (NULL != opal_pmix.fence_nb) {
 900             active = true;
 901             OPAL_POST_OBJECT(&active);
 902             if (OMPI_SUCCESS != (ret = opal_pmix.fence_nb(NULL, false,
 903                                fence_release, (void*)&active))) {
 904                 error = "opal_pmix.fence_nb() failed";
 905                 goto error;
 906             }
 907             OMPI_LAZY_WAIT_FOR_COMPLETION(active);
 908         } else {
 909             if (OMPI_SUCCESS != (ret = opal_pmix.fence(NULL, false))) {
 910                 error = "opal_pmix.fence() failed";
 911                 goto error;
 912             }
 913         }
 914     }
 915 
 916     /* check for timing request - get stop time and report elapsed
 917        time if so, then start the clock again */
 918     OMPI_TIMING_NEXT("barrier");
 919 
 920 #if OPAL_ENABLE_PROGRESS_THREADS == 0
 921     /* Start setting up the event engine for MPI operations.  Don't
 922        block in the event library, so that communications don't take
 923        forever between procs in the dynamic code.  This will increase
 924        CPU utilization for the remainder of MPI_INIT when we are
 925        blocking on RTE-level events, but may greatly reduce non-TCP
 926        latency. */
 927     opal_progress_set_event_flag(OPAL_EVLOOP_NONBLOCK);
 928 #endif
 929 
 930     /* wire up the mpi interface, if requested.  Do this after the
 931        non-block switch for non-TCP performance.  Do before the
 932        polling change as anyone with a complex wire-up is going to be
 933        using the oob. */
 934     if (OMPI_SUCCESS != (ret = ompi_init_preconnect_mpi())) {
 935         error = "ompi_mpi_do_preconnect_all() failed";
 936         goto error;
 937     }
 938 
 939     /* Setup the dynamic process management (DPM) subsystem */
 940     if (OMPI_SUCCESS != (ret = ompi_dpm_init())) {
 941         error = "ompi_dpm_init() failed";
 942         goto error;
 943     }
 944 
 945     /* Determine the overall threadlevel support of all processes
 946        in MPI_COMM_WORLD. This has to be done before calling
 947        coll_base_comm_select, since some of the collective components
 948        e.g. hierarch, might create subcommunicators. The threadlevel
 949        requested by all processes is required in order to know
 950        which cid allocation algorithm can be used. */
 951     if (OMPI_SUCCESS != ( ret = ompi_comm_cid_init ())) {
 952         error = "ompi_mpi_init: ompi_comm_cid_init failed";
 953         goto error;
 954     }
 955 
 956     /* Init coll for the comms. This has to be after dpm_base_select,
 957        (since dpm.mark_dyncomm is not set in the communicator creation
 958        function else), but before dpm.dyncom_init, since this function
 959        might require collective for the CID allocation. */
 960     if (OMPI_SUCCESS !=
 961         (ret = mca_coll_base_comm_select(MPI_COMM_WORLD))) {
 962         error = "mca_coll_base_comm_select(MPI_COMM_WORLD) failed";
 963         goto error;
 964     }
 965 
 966     if (OMPI_SUCCESS !=
 967         (ret = mca_coll_base_comm_select(MPI_COMM_SELF))) {
 968         error = "mca_coll_base_comm_select(MPI_COMM_SELF) failed";
 969         goto error;
 970     }
 971 
 972     /* Check whether we have been spawned or not.  We introduce that
 973        at the very end, since we need collectives, datatypes, ptls
 974        etc. up and running here.... */
 975     if (OMPI_SUCCESS != (ret = ompi_dpm_dyn_init())) {
 976         error = "ompi_dpm_dyn_init() failed";
 977         goto error;
 978     }
 979 
 980     /*
 981      * Startup the Checkpoint/Restart Mech.
 982      * Note: Always do this so tools don't hang when
 983      * in a non-checkpointable build
 984      */
 985     if (OMPI_SUCCESS != (ret = ompi_cr_init())) {
 986         error = "ompi_cr_init";
 987         goto error;
 988     }
 989 
 990     /* Undo OPAL calling opal_progress_event_users_increment() during
 991        opal_init, to get better latency when not using TCP.  Do
 992        this *after* dyn_init, as dyn init uses lots of RTE
 993        communication and we don't want to hinder the performance of
 994        that code. */
 995     opal_progress_event_users_decrement();
 996 
 997     /* see if yield_when_idle was specified - if so, use it */
 998     opal_progress_set_yield_when_idle(ompi_mpi_yield_when_idle);
 999 
1000     /* negative value means use default - just don't do anything */
1001     if (ompi_mpi_event_tick_rate >= 0) {
1002         opal_progress_set_event_poll_rate(ompi_mpi_event_tick_rate);
1003     }
1004 
1005     /* At this point, we are fully configured and in MPI mode.  Any
1006        communication calls here will work exactly like they would in
1007        the user's code.  Setup the connections between procs and warm
1008        them up with simple sends, if requested */
1009 
1010     if (OMPI_SUCCESS != (ret = ompi_mpiext_init())) {
1011         error = "ompi_mpiext_init";
1012         goto error;
1013     }
1014 
1015     /* Fall through */
1016  error:
1017     if (ret != OMPI_SUCCESS) {
1018         /* Only print a message if one was not already printed */
1019         if (NULL != error && OMPI_ERR_SILENT != ret) {
1020             const char *err_msg = opal_strerror(ret);
1021             opal_show_help("help-mpi-runtime.txt",
1022                            "mpi_init:startup:internal-failure", true,
1023                            "MPI_INIT", "MPI_INIT", error, err_msg, ret);
1024         }
1025         ompi_hook_base_mpi_init_error(argc, argv, requested, provided);
1026         OMPI_TIMING_FINALIZE;
1027         return ret;
1028     }
1029 
1030     /* Initialize the registered datarep list to be empty */
1031     OBJ_CONSTRUCT(&ompi_registered_datareps, opal_list_t);
1032 
1033     /* Initialize the arrays used to store the F90 types returned by the
1034      *  MPI_Type_create_f90_XXX functions.
1035      */
1036     OBJ_CONSTRUCT( &ompi_mpi_f90_integer_hashtable, opal_hash_table_t);
1037     opal_hash_table_init(&ompi_mpi_f90_integer_hashtable, 16 /* why not? */);
1038 
1039     OBJ_CONSTRUCT( &ompi_mpi_f90_real_hashtable, opal_hash_table_t);
1040     opal_hash_table_init(&ompi_mpi_f90_real_hashtable, FLT_MAX_10_EXP);
1041 
1042     OBJ_CONSTRUCT( &ompi_mpi_f90_complex_hashtable, opal_hash_table_t);
1043     opal_hash_table_init(&ompi_mpi_f90_complex_hashtable, FLT_MAX_10_EXP);
1044 
1045     /* All done.  Wasn't that simple? */
1046     opal_atomic_wmb();
1047     opal_atomic_swap_32(&ompi_mpi_state, OMPI_MPI_STATE_INIT_COMPLETED);
1048 
1049     /* Finish last measurement, output results
1050      * and clear timing structure */
1051     OMPI_TIMING_NEXT("barrier-finish");
1052     OMPI_TIMING_OUT;
1053     OMPI_TIMING_FINALIZE;
1054 
1055     ompi_hook_base_mpi_init_bottom(argc, argv, requested, provided);
1056 
1057     return MPI_SUCCESS;
1058 }

/* [<][>][^][v][top][bottom][index][help] */