This source file includes following definitions.
- main
1
2
3
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include "mpi.h"
8 #include <unistd.h>
9 #include <string.h>
10 #include <time.h>
11
12 #define MAX_RR_NAME 7
13
14 int main(int argc, char *argv[])
15 {
16 MPI_Status status;
17 int mpierr;
18 int rank;
19 int nproc;
20 int tag0=41;
21 int tag1=42;
22 int tag2=43;
23 int warmup=1;
24
25 char process_name[MPI_MAX_PROCESSOR_NAME + 1];
26 char partner_name[MPI_MAX_PROCESSOR_NAME + 1];
27
28 char rr_blank[] = {" "};
29 char rr_empty[] = {"???????"};
30
31 int n_bytes=128*1024*1024;
32 int n_loops=2;
33 unsigned char* send_buff;
34 unsigned char* recv_buff;
35
36 int i,j,k,m,count,mismatch;
37
38 double et1,et2,mbs;
39 double avg_mbs=0, sum_avg_mbs=0;
40 int xfers=0, sum_xfers=0;
41 double max_mbs=-1.0,min_mbs=999999.9;
42 double r_max_mbs,r_min_mbs;
43
44 time_t curtime;
45 struct tm *loctime;
46
47 if ( argc > 2 )
48 {
49 n_loops = atoi(argv[2]);
50 n_loops = n_loops < 1 ? 10 : n_loops;
51 }
52 if ( argc > 1 )
53 {
54 n_bytes = atoi(argv[1]);
55 n_bytes = n_bytes < 1 ? 32768 : n_bytes;
56 }
57
58 send_buff = (unsigned char *) valloc(n_bytes);
59 recv_buff = (unsigned char *) valloc(n_bytes);
60
61 for ( i=0; i<n_bytes; i++ )
62 {
63 send_buff[i] = i%128;
64 }
65
66 mpierr = MPI_Init(&argc, &argv);
67 if (mpierr != MPI_SUCCESS)
68 {
69 fprintf(stderr, "MPI Error %d (MPI_Init)\n",mpierr);
70 fflush(stderr);
71 MPI_Abort(MPI_COMM_WORLD, -1);
72 }
73
74 MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
75
76 mpierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
77 if (mpierr != MPI_SUCCESS || rank < 0)
78 {
79 fprintf(stderr, "MPI Error %d (MPI_Comm_rank)\n",mpierr);
80 fflush(stderr);
81 MPI_Abort(MPI_COMM_WORLD, -1);
82 }
83
84 if ( rank == 0 )
85 {
86 curtime = time (NULL);
87 loctime = localtime (&curtime);
88 printf("\n %s\n",asctime (loctime));
89 }
90
91 mpierr = MPI_Comm_size(MPI_COMM_WORLD, &nproc);
92 if (mpierr != MPI_SUCCESS || nproc < 1 || nproc <= rank)
93 {
94 fprintf(stderr, "MPI Error %d (MPI_Comm_size) [%d]\n",mpierr, rank);
95 fflush(stderr);
96 MPI_Abort(MPI_COMM_WORLD, -1);
97 }
98
99 mpierr = MPI_Get_processor_name(process_name, &count);
100 if (mpierr != MPI_SUCCESS)
101 {
102 fprintf(stderr,"MPI Error %d (MPI_Get_processor_name) [%d]\n", mpierr, rank);
103 sprintf(process_name, "%s", rr_empty);
104 }
105 else
106 {
107 if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
108 process_name[MAX_RR_NAME] = '\0';
109 }
110
111 for ( i=0; i<nproc; i++ )
112 {
113 mpierr = MPI_Barrier(MPI_COMM_WORLD);
114 if (mpierr != MPI_SUCCESS)
115 {
116 fprintf(stderr, "MPI Error %d (MPI_Barrier) [%d]\n", mpierr, rank);
117 fflush(stderr);
118 MPI_Abort(MPI_COMM_WORLD, -1);
119 }
120 for ( j=0; j<nproc; j++ )
121 {
122 if ( i != j )
123 {
124 if (rank == j)
125 {
126 mpierr = MPI_Sendrecv(process_name, MPI_MAX_PROCESSOR_NAME + 1, MPI_CHAR, i, tag0,
127 partner_name, MPI_MAX_PROCESSOR_NAME + 1, MPI_CHAR, i, tag0, MPI_COMM_WORLD, &status);
128 if (mpierr != MPI_SUCCESS)
129 {
130 fprintf(stderr,"MPI Error %d (MPI_Sendrecv) %s [%d,%d]\n",mpierr,process_name,rank,i);
131 fflush(stderr);
132 MPI_Abort(MPI_COMM_WORLD, -1);
133 }
134 for ( k=0; k<n_bytes; k++ )
135 {
136 recv_buff[k] = 0x80;
137 }
138 }
139 if ( rank == i )
140 {
141 mpierr = MPI_Sendrecv(process_name, MPI_MAX_PROCESSOR_NAME + 1, MPI_CHAR, j, tag0,
142 partner_name, MPI_MAX_PROCESSOR_NAME + 1, MPI_CHAR, j, tag0, MPI_COMM_WORLD, &status);
143 if (mpierr != MPI_SUCCESS)
144 {
145 fprintf(stderr,"MPI Error %d (MPI_Sendrecv) %s [%d,%d]\n",mpierr,process_name,i,j);
146 fflush(stderr);
147 MPI_Abort(MPI_COMM_WORLD, -1);
148 }
149 }
150 for ( k=0; k<n_loops+warmup; k++ )
151 {
152 if ( rank == i )
153 {
154 if (k == warmup) et1 = MPI_Wtime();
155 mpierr = MPI_Send(send_buff, n_bytes, MPI_BYTE, j, tag1, MPI_COMM_WORLD);
156 if (mpierr != MPI_SUCCESS)
157 {
158 fprintf(stderr,"MPI Error %d (MPI_Send) %s [4%d] --> %s [4%d]\n",mpierr,process_name,i,partner_name,j);
159 fflush(stderr);
160 MPI_Abort(MPI_COMM_WORLD, -1);
161 }
162 }
163 if ( rank == j )
164 {
165 mpierr = MPI_Recv(recv_buff, n_bytes, MPI_BYTE, i, tag1, MPI_COMM_WORLD, &status);
166 if (mpierr != MPI_SUCCESS)
167 {
168 fprintf(stderr,"MPI Error %d (MPI_Recv) %s [4%d] <-- %s [4%d]\n",mpierr,process_name,j,partner_name,i);
169 fflush(stderr);
170 MPI_Abort(MPI_COMM_WORLD, -1);
171 }
172 if (k == n_loops+warmup-1) et2 = MPI_Wtime();
173 }
174 }
175 if ( rank == i )
176 {
177 mpierr = MPI_Send(&et1, 1, MPI_DOUBLE, j, tag1, MPI_COMM_WORLD);
178 if (mpierr != MPI_SUCCESS)
179 {
180 fprintf(stderr,"MPI Error %d (MPI_Send) %s [4%d] --> %s [4%d]\n",mpierr,process_name,i,partner_name,j);
181 fflush(stderr);
182 MPI_Abort(MPI_COMM_WORLD, -1);
183 }
184 }
185 if ( rank == j )
186 {
187 mpierr = MPI_Recv(&et1, 1, MPI_DOUBLE, i, tag1, MPI_COMM_WORLD, &status);
188 if (mpierr != MPI_SUCCESS)
189 {
190 fprintf(stderr,"MPI Error %d (MPI_Recv) %s [4%d] <-- %s [4%d]\n",mpierr,process_name,j,partner_name,i);
191 fflush(stderr);
192 MPI_Abort(MPI_COMM_WORLD, -1);
193 }
194 mbs = ((double)n_loops*n_bytes)/(1000000.0*(et2-et1));
195 if (mbs < 50.0)
196 {
197 printf(" %s [%4d] =====>> %s [%4d] %9.1f mbs SLOW!\n",partner_name,i,process_name,j,mbs);
198 }
199 else
200 {
201 printf(" %s [%4d] =====>> %s [%4d] %9.1f mbs\n",partner_name,i,process_name,j,mbs);
202 }
203
204 min_mbs = (mbs < min_mbs) ? mbs:min_mbs;
205 max_mbs = (mbs > max_mbs) ? mbs:max_mbs;
206
207 avg_mbs += mbs;
208 xfers++;
209 mismatch = 0;
210 for ( k=0; k<n_bytes; k++ )
211 {
212 if ( recv_buff[k] != k%128 ) mismatch++;
213 }
214 if ( mismatch ) printf(" WARNING! %d data mismatches\n",mismatch);
215 fflush(stdout);
216 }
217 }
218 }
219 }
220
221 mpierr = MPI_Reduce(&xfers, &sum_xfers, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
222 if (mpierr != MPI_SUCCESS)
223 {
224 fprintf(stderr,"MPI Error %d (MPI_Reduce) %s [%d]\n",mpierr,process_name,rank);
225 fflush(stderr);
226 MPI_Abort(MPI_COMM_WORLD, -1);
227 }
228
229 mpierr = MPI_Reduce(&avg_mbs, &sum_avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
230 if (mpierr != MPI_SUCCESS)
231 {
232 fprintf(stderr,"MPI Error %d (MPI_Reduce) %s [%d]\n",mpierr,process_name,rank);
233 fflush(stderr);
234 MPI_Abort(MPI_COMM_WORLD, -1);
235 }
236
237 mpierr = MPI_Reduce(&min_mbs, &r_min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
238 if (mpierr != MPI_SUCCESS)
239 {
240 fprintf(stderr,"MPI Error %d (MPI_Reduce) %s [%d]\n",mpierr,process_name,rank);
241 fflush(stderr);
242 MPI_Abort(MPI_COMM_WORLD, -1);
243 }
244
245 mpierr = MPI_Reduce(&max_mbs, &r_max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
246 if (mpierr != MPI_SUCCESS)
247 {
248 fprintf(stderr,"MPI Error %d (MPI_Reduce) %s [%d]\n",mpierr,process_name,rank);
249 fflush(stderr);
250 MPI_Abort(MPI_COMM_WORLD, -1);
251 }
252
253 mpierr = MPI_Finalize();
254 if (mpierr != MPI_SUCCESS)
255 {
256 fprintf(stderr,"MPI Error %d (MPI_Finalize) %s [%d]\n",mpierr,process_name,rank);
257 fflush(stderr);
258 MPI_Abort(MPI_COMM_WORLD, -1);
259 }
260
261 fflush(stdout);
262
263 if ( rank == 0 )
264 {
265 mbs = sum_avg_mbs/sum_xfers;
266 printf("\n average tranfer rate for %d transfers: %9.1f mbs\n",sum_xfers, mbs);
267 printf(" minimum tranfer rate for %d transfers: %9.1f mbs\n",sum_xfers, r_min_mbs);
268 printf(" maximum tranfer rate for %d transfers: %9.1f mbs\n",sum_xfers, r_max_mbs);
269 fflush(stdout);
270 }
271
272 return 0;
273 }