This source file includes following definitions.
- try_kill_peers
- ompi_mpi_abort
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include "ompi_config.h"
30
31 #ifdef HAVE_UNISTD_H
32 #include <unistd.h>
33 #endif
34 #ifdef HAVE_SYS_TYPES_H
35 #include <sys/types.h>
36 #endif
37 #ifdef HAVE_SYS_PARAM_H
38 #include <sys/param.h>
39 #endif
40 #ifdef HAVE_NETDB_H
41 #include <netdb.h>
42 #endif
43 #include <errno.h>
44
45 #include "opal/mca/backtrace/backtrace.h"
46 #include "opal/util/error.h"
47 #include "opal/runtime/opal_params.h"
48
49 #include "ompi/communicator/communicator.h"
50 #include "ompi/runtime/mpiruntime.h"
51 #include "ompi/runtime/params.h"
52 #include "ompi/debuggers/debuggers.h"
53 #include "ompi/errhandler/errcode.h"
54
55 static bool have_been_invoked = false;
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72 static void try_kill_peers(ompi_communicator_t *comm,
73 int errcode)
74 {
75 int nprocs;
76 ompi_process_name_t *procs;
77
78 nprocs = ompi_comm_size(comm);
79
80
81 nprocs += ompi_comm_remote_size(comm);
82
83 procs = (ompi_process_name_t*) calloc(nprocs, sizeof(ompi_process_name_t));
84 if (NULL == procs) {
85
86 ompi_rte_abort(errno, "Abort: unable to alloc memory to kill procs");
87 }
88
89
90 int rank, i, count;
91 rank = ompi_comm_rank(comm);
92 for (count = i = 0; i < ompi_comm_size(comm); ++i) {
93 if (rank == i) {
94
95 --nprocs;
96 } else {
97 assert(count <= nprocs);
98 procs[count++] =
99 *OMPI_CAST_RTE_NAME(&ompi_group_get_proc_ptr(comm->c_remote_group, i, true)->super.proc_name);
100 }
101 }
102
103
104 for (i = 0; i < ompi_comm_remote_size(comm); ++i) {
105 assert(count <= nprocs);
106 procs[count++] =
107 *OMPI_CAST_RTE_NAME(&ompi_group_get_proc_ptr(comm->c_remote_group, i, true)->super.proc_name);
108 }
109
110 if (nprocs > 0) {
111 ompi_rte_abort_peers(procs, nprocs, errcode);
112 }
113
114
115
116
117 free(procs);
118 }
119
120 int
121 ompi_mpi_abort(struct ompi_communicator_t* comm,
122 int errcode)
123 {
124 char *host, hostname[OPAL_MAXHOSTNAMELEN];
125 pid_t pid = 0;
126
127
128 if (have_been_invoked) {
129 return OMPI_SUCCESS;
130 }
131 have_been_invoked = true;
132
133
134
135 if (ompi_rte_initialized) {
136 host = ompi_process_info.nodename;
137 } else {
138 gethostname(hostname, sizeof(hostname));
139 host = hostname;
140 }
141 pid = getpid();
142
143
144
145 if (opal_abort_print_stack) {
146 char **messages;
147 int len, i;
148
149 if (OPAL_SUCCESS == opal_backtrace_buffer(&messages, &len)) {
150 for (i = 0; i < len; ++i) {
151 fprintf(stderr, "[%s:%05d] [%d] func:%s\n", host, (int) pid,
152 i, messages[i]);
153 fflush(stderr);
154 }
155 free(messages);
156 } else {
157
158
159
160 opal_backtrace_print(stderr, NULL, 1);
161 }
162 }
163
164
165 opal_delay_abort();
166
167
168
169 int32_t state = ompi_mpi_state;
170 if (!ompi_rte_initialized) {
171 fprintf(stderr, "[%s:%05d] Local abort %s completed successfully, but am not able to aggregate error messages, and not able to guarantee that all other processes were killed!\n",
172 host, (int) pid,
173 state >= OMPI_MPI_STATE_FINALIZE_STARTED ?
174 "after MPI_FINALIZE started" : "before MPI_INIT completed");
175 _exit(errcode == 0 ? 1 : errcode);
176 }
177
178
179
180 if (state >= OMPI_MPI_STATE_INIT_COMPLETED &&
181 state < OMPI_MPI_STATE_FINALIZE_PAST_COMM_SELF_DESTRUCT &&
182 NULL != comm) {
183 try_kill_peers(comm, errcode);
184 }
185
186
187
188
189
190
191
192
193
194
195
196 ompi_rte_abort(errcode, NULL);
197
198
199 }