root/ompi/runtime/ompi_mpi_abort.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. try_kill_peers
  2. ompi_mpi_abort

   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2014 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2005 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2006-2018 Cisco Systems, Inc.  All rights reserved
  14  * Copyright (c) 2010-2011 Oak Ridge National Labs.  All rights reserved.
  15  * Copyright (c) 2014      Research Organization for Information Science
  16  *                         and Technology (RIST). All rights reserved.
  17  * Copyright (c) 2015      Los Alamos National Security, LLC. All rights
  18  *                         reserved.
  19  * Copyright (c) 2015      Mellanox Technologies, Inc.
  20  *                         All rights reserved.
  21  * Copyright (c) 2017      FUJITSU LIMITED.  All rights reserved.
  22  * $COPYRIGHT$
  23  *
  24  * Additional copyrights may follow
  25  *
  26  * $HEADER$
  27  */
  28 
  29 #include "ompi_config.h"
  30 
  31 #ifdef HAVE_UNISTD_H
  32 #include <unistd.h>
  33 #endif
  34 #ifdef HAVE_SYS_TYPES_H
  35 #include <sys/types.h>
  36 #endif
  37 #ifdef HAVE_SYS_PARAM_H
  38 #include <sys/param.h>
  39 #endif
  40 #ifdef HAVE_NETDB_H
  41 #include <netdb.h>
  42 #endif
  43 #include <errno.h>
  44 
  45 #include "opal/mca/backtrace/backtrace.h"
  46 #include "opal/util/error.h"
  47 #include "opal/runtime/opal_params.h"
  48 
  49 #include "ompi/communicator/communicator.h"
  50 #include "ompi/runtime/mpiruntime.h"
  51 #include "ompi/runtime/params.h"
  52 #include "ompi/debuggers/debuggers.h"
  53 #include "ompi/errhandler/errcode.h"
  54 
  55 static bool have_been_invoked = false;
  56 
  57 
  58 /*
  59  * Local helper function to build an array of all the procs in a
  60  * communicator, excluding this process.
  61  *
  62  * Killing a just the indicated peers must be implemented for
  63  * MPI_Abort() to work according to the standard language for
  64  * a 'high-quality' implementation.
  65  *
  66  * It would be nifty if we could differentiate between the
  67  * abort scenarios (but we don't, currently):
  68  *      - MPI_Abort()
  69  *      - MPI_ERRORS_ARE_FATAL
  70  *      - Victim of MPI_Abort()
  71  */
  72 static void try_kill_peers(ompi_communicator_t *comm,
  73                            int errcode)
  74 {
  75     int nprocs;
  76     ompi_process_name_t *procs;
  77 
  78     nprocs = ompi_comm_size(comm);
  79     /* ompi_comm_remote_size() returns 0 if not an intercomm, so
  80        this is safe */
  81     nprocs += ompi_comm_remote_size(comm);
  82 
  83     procs = (ompi_process_name_t*) calloc(nprocs, sizeof(ompi_process_name_t));
  84     if (NULL == procs) {
  85         /* quick clean orte and get out */
  86         ompi_rte_abort(errno, "Abort: unable to alloc memory to kill procs");
  87     }
  88 
  89     /* put all the local group procs in the abort list */
  90     int rank, i, count;
  91     rank = ompi_comm_rank(comm);
  92     for (count = i = 0; i < ompi_comm_size(comm); ++i) {
  93         if (rank == i) {
  94             /* Don't include this process in the array */
  95             --nprocs;
  96         } else {
  97             assert(count <= nprocs);
  98             procs[count++] =
  99                 *OMPI_CAST_RTE_NAME(&ompi_group_get_proc_ptr(comm->c_remote_group, i, true)->super.proc_name);
 100         }
 101     }
 102 
 103     /* if requested, kill off remote group procs too */
 104     for (i = 0; i < ompi_comm_remote_size(comm); ++i) {
 105         assert(count <= nprocs);
 106         procs[count++] =
 107             *OMPI_CAST_RTE_NAME(&ompi_group_get_proc_ptr(comm->c_remote_group, i, true)->super.proc_name);
 108     }
 109 
 110     if (nprocs > 0) {
 111         ompi_rte_abort_peers(procs, nprocs, errcode);
 112     }
 113 
 114     /* We could fall through here if ompi_rte_abort_peers() fails, or
 115        if (nprocs == 0).  Either way, tidy up and let the caller
 116        handle it. */
 117     free(procs);
 118 }
 119 
 120 int
 121 ompi_mpi_abort(struct ompi_communicator_t* comm,
 122                int errcode)
 123 {
 124     char *host, hostname[OPAL_MAXHOSTNAMELEN];
 125     pid_t pid = 0;
 126 
 127     /* Protection for recursive invocation */
 128     if (have_been_invoked) {
 129         return OMPI_SUCCESS;
 130     }
 131     have_been_invoked = true;
 132 
 133     /* If MPI is initialized, we know we have a runtime nodename, so
 134        use that.  Otherwise, call gethostname. */
 135     if (ompi_rte_initialized) {
 136         host = ompi_process_info.nodename;
 137     } else {
 138         gethostname(hostname, sizeof(hostname));
 139         host = hostname;
 140     }
 141     pid = getpid();
 142 
 143     /* Should we print a stack trace?  Not aggregated because they
 144        might be different on all processes. */
 145     if (opal_abort_print_stack) {
 146         char **messages;
 147         int len, i;
 148 
 149         if (OPAL_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
 150             for (i = 0; i < len; ++i) {
 151                 fprintf(stderr, "[%s:%05d] [%d] func:%s\n", host, (int) pid,
 152                         i, messages[i]);
 153                 fflush(stderr);
 154             }
 155             free(messages);
 156         } else {
 157             /* This will print an message if it's unable to print the
 158                backtrace, so we don't need an additional "else" clause
 159                if opal_backtrace_print() is not supported. */
 160             opal_backtrace_print(stderr, NULL, 1);
 161         }
 162     }
 163 
 164     /* Wait for a while before aborting */
 165     opal_delay_abort();
 166 
 167     /* If the RTE isn't setup yet/any more, then don't even try
 168        killing everyone.  Sorry, Charlie... */
 169     int32_t state = ompi_mpi_state;
 170     if (!ompi_rte_initialized) {
 171         fprintf(stderr, "[%s:%05d] Local abort %s completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
 172                 host, (int) pid,
 173                 state >= OMPI_MPI_STATE_FINALIZE_STARTED ?
 174                 "after MPI_FINALIZE started" : "before MPI_INIT completed");
 175         _exit(errcode == 0 ? 1 : errcode);
 176     }
 177 
 178     /* If OMPI is initialized and we have a non-NULL communicator,
 179        then try to kill just that set of processes */
 180     if (state >= OMPI_MPI_STATE_INIT_COMPLETED &&
 181         state < OMPI_MPI_STATE_FINALIZE_PAST_COMM_SELF_DESTRUCT &&
 182         NULL != comm) {
 183         try_kill_peers(comm, errcode);
 184     }
 185 
 186     /* We can fall through to here in a few cases:
 187 
 188        1. The attempt to kill just a subset of peers via
 189           try_kill_peers() failed (e.g., as of July 2014, ORTE does
 190           returns NOT_IMPLENTED from orte_rte_abort_peers()).
 191        2. MPI wasn't initialized, was already finalized, or we got a
 192           NULL communicator.
 193 
 194        In all of these cases, the only sensible thing left to do is to
 195        kill the entire job.  Wah wah. */
 196     ompi_rte_abort(errcode, NULL);
 197 
 198     /* Does not return */
 199 }

/* [<][>][^][v][top][bottom][index][help] */