root/test/monitoring/check_monitoring.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. GENERATE_CS
  2. pvar_all_finalize
  3. pvar_pml_check
  4. pvar_osc_check
  5. pvar_coll_check
  6. main

   1 /*
   2  * Copyright (c) 2016-2017 Inria.  All rights reserved.
   3  * Copyright (c) 2017      The University of Tennessee and The University
   4  *                         of Tennessee Research Foundation.  All rights
   5  *                         reserved.
   6  * $COPYRIGHT$
   7  *
   8  * Additional copyrights may follow
   9  *
  10  * $HEADER$
  11  */
  12 
  13 /*
  14   Check the well working of the monitoring component for Open-MPI.
  15 
  16   To be run as:
  17 
  18   mpirun -np 4 --mca pml_monitoring_enable 2 ./check_monitoring
  19 */
  20 
  21 #include <mpi.h>
  22 #include <stdlib.h>
  23 #include <stdio.h>
  24 #include <string.h>
  25 
  26 #define PVAR_GENERATE_VARIABLES(pvar_prefix, pvar_name, pvar_class)     \
  27     /* Variables */                                                     \
  28     static MPI_T_pvar_handle pvar_prefix ## _handle;                    \
  29     static const char pvar_prefix ## _pvar_name[] = pvar_name;          \
  30     static int pvar_prefix ## _pvar_idx;                                \
  31     /* Functions */                                                     \
  32     static inline int pvar_prefix ## _start(MPI_T_pvar_session session) \
  33     {                                                                   \
  34         int MPIT_result;                                                \
  35         MPIT_result = MPI_T_pvar_start(session, pvar_prefix ## _handle); \
  36         if( MPI_SUCCESS != MPIT_result ) {                              \
  37             fprintf(stderr, "Failed to start handle on \"%s\" pvar, check that you have " \
  38                     "enabled the monitoring component.\n", pvar_prefix ## _pvar_name); \
  39             MPI_Abort(MPI_COMM_WORLD, MPIT_result);                     \
  40         }                                                               \
  41         return MPIT_result;                                             \
  42     }                                                                   \
  43     static inline int pvar_prefix ## _init(MPI_T_pvar_session session)  \
  44     {                                                                   \
  45         int MPIT_result;                                                \
  46         MPI_Comm comm = MPI_COMM_WORLD;                                 \
  47         /* Get index */                                                 \
  48         MPIT_result = MPI_T_pvar_get_index(pvar_prefix ## _pvar_name,   \
  49                                            pvar_class,                  \
  50                                            &(pvar_prefix ## _pvar_idx)); \
  51         if( MPI_SUCCESS != MPIT_result ) {                              \
  52             fprintf(stderr, "Cannot find monitoring MPI_Tool \"%s\" pvar, check that you have " \
  53                     "enabled the monitoring component.\n", pvar_prefix ## _pvar_name); \
  54             MPI_Abort(MPI_COMM_WORLD, MPIT_result);                     \
  55             return MPIT_result;                                         \
  56         }                                                               \
  57         /* Allocate handle */                                           \
  58         /* Allocating a new PVAR in a session will reset the counters */ \
  59         int count;                                                      \
  60         MPIT_result = MPI_T_pvar_handle_alloc(session, pvar_prefix ## _pvar_idx, \
  61                                               &comm, &(pvar_prefix ## _handle), \
  62                                               &count);                  \
  63         if( MPI_SUCCESS != MPIT_result ) {                              \
  64             fprintf(stderr, "Failed to allocate handle on \"%s\" pvar, check that you have " \
  65                     "enabled the monitoring component.\n", pvar_prefix ## _pvar_name); \
  66             MPI_Abort(MPI_COMM_WORLD, MPIT_result);                     \
  67             return MPIT_result;                                         \
  68         }                                                               \
  69         /* Start PVAR */                                                \
  70         return pvar_prefix ## _start(session);                          \
  71     }                                                                   \
  72     static inline int pvar_prefix ## _stop(MPI_T_pvar_session session)  \
  73     {                                                                   \
  74         int MPIT_result;                                                \
  75         MPIT_result = MPI_T_pvar_stop(session, pvar_prefix ## _handle); \
  76         if( MPI_SUCCESS != MPIT_result ) {                              \
  77             fprintf(stderr, "Failed to stop handle on \"%s\" pvar, check that you have " \
  78                     "enabled the monitoring component.\n", pvar_prefix ## _pvar_name); \
  79             MPI_Abort(MPI_COMM_WORLD, MPIT_result);                     \
  80         }                                                               \
  81         return MPIT_result;                                             \
  82     }                                                                   \
  83     static inline int pvar_prefix ## _finalize(MPI_T_pvar_session session) \
  84     {                                                                   \
  85         int MPIT_result;                                                \
  86         /* Stop PVAR */                                                 \
  87         MPIT_result = pvar_prefix ## _stop(session);                    \
  88         /* Free handle */                                               \
  89         MPIT_result = MPI_T_pvar_handle_free(session, &(pvar_prefix ## _handle)); \
  90         if( MPI_SUCCESS != MPIT_result ) {                              \
  91             fprintf(stderr, "Failed to allocate handle on \"%s\" pvar, check that you have " \
  92                     "enabled the monitoring component.\n", pvar_prefix ## _pvar_name); \
  93             MPI_Abort(MPI_COMM_WORLD, MPIT_result);                     \
  94             return MPIT_result;                                         \
  95         }                                                               \
  96         return MPIT_result;                                             \
  97     }                                                                   \
  98     static inline int pvar_prefix ## _read(MPI_T_pvar_session session, void*values) \
  99     {                                                                   \
 100         int MPIT_result;                                                \
 101         /* Stop pvar */                                                 \
 102         MPIT_result = pvar_prefix ## _stop(session);                    \
 103         /* Read values */                                               \
 104         MPIT_result = MPI_T_pvar_read(session, pvar_prefix ## _handle, values); \
 105         if( MPI_SUCCESS != MPIT_result ) {                              \
 106             fprintf(stderr, "Failed to read handle on \"%s\" pvar, check that you have " \
 107                     "enabled the monitoring component.\n", pvar_prefix ## _pvar_name); \
 108             MPI_Abort(MPI_COMM_WORLD, MPIT_result);                     \
 109         }                                                               \
 110         /* Start and return */                                          \
 111         return pvar_prefix ## _start(session);                          \
 112     }
 113 
 114 #define GENERATE_CS(prefix, pvar_name_prefix, pvar_class_c, pvar_class_s) \
 115     PVAR_GENERATE_VARIABLES(prefix ## _count, pvar_name_prefix "_count", pvar_class_c) \
 116     PVAR_GENERATE_VARIABLES(prefix ## _size, pvar_name_prefix "_size", pvar_class_s) \
 117     static inline int pvar_ ## prefix ## _init(MPI_T_pvar_session session) \
 118     {                                                                   \
 119         prefix ## _count_init(session);                                 \
 120         return prefix ## _size_init(session);                           \
 121     }                                                                   \
 122     static inline int pvar_ ## prefix ## _finalize(MPI_T_pvar_session session) \
 123     {                                                                   \
 124         prefix ## _count_finalize(session);                             \
 125         return prefix ## _size_finalize(session);                       \
 126     }                                                                   \
 127     static inline void pvar_ ## prefix ## _read(MPI_T_pvar_session session, \
 128                                                 size_t*cvalues, size_t*svalues) \
 129     {                                                                   \
 130         /* Read count values */                                         \
 131         prefix ## _count_read(session, cvalues);                        \
 132         /* Read size values */                                          \
 133         prefix ## _size_read(session, svalues);                         \
 134     }
 135 
 136 GENERATE_CS(pml, "pml_monitoring_messages", MPI_T_PVAR_CLASS_SIZE, MPI_T_PVAR_CLASS_SIZE)
 137 GENERATE_CS(osc_s, "osc_monitoring_messages_sent", MPI_T_PVAR_CLASS_SIZE, MPI_T_PVAR_CLASS_SIZE)
 138 GENERATE_CS(osc_r, "osc_monitoring_messages_recv", MPI_T_PVAR_CLASS_SIZE, MPI_T_PVAR_CLASS_SIZE)
 139 GENERATE_CS(coll, "coll_monitoring_messages", MPI_T_PVAR_CLASS_SIZE, MPI_T_PVAR_CLASS_SIZE)
 140 GENERATE_CS(o2a, "coll_monitoring_o2a", MPI_T_PVAR_CLASS_COUNTER, MPI_T_PVAR_CLASS_AGGREGATE)
 141 GENERATE_CS(a2o, "coll_monitoring_a2o", MPI_T_PVAR_CLASS_COUNTER, MPI_T_PVAR_CLASS_AGGREGATE)
 142 GENERATE_CS(a2a, "coll_monitoring_a2a", MPI_T_PVAR_CLASS_COUNTER, MPI_T_PVAR_CLASS_AGGREGATE)
 143 
 144 static size_t *old_cvalues, *old_svalues;
 145 
 146 static inline void pvar_all_init(MPI_T_pvar_session*session, int world_size)
 147 {
 148     int MPIT_result, provided;
 149     MPIT_result = MPI_T_init_thread(MPI_THREAD_SINGLE, &provided);
 150     if (MPIT_result != MPI_SUCCESS) {
 151         fprintf(stderr, "Failed to initialiaze MPI_Tools sub-system.\n");
 152         MPI_Abort(MPI_COMM_WORLD, MPIT_result);
 153     }
 154     MPIT_result = MPI_T_pvar_session_create(session);
 155     if (MPIT_result != MPI_SUCCESS) {
 156         printf("Failed to create a session for PVARs.\n");
 157         MPI_Abort(MPI_COMM_WORLD, MPIT_result);
 158     }
 159     old_cvalues = malloc(2 * world_size * sizeof(size_t));
 160     old_svalues = old_cvalues + world_size;
 161     pvar_pml_init(*session);
 162     pvar_osc_s_init(*session);
 163     pvar_osc_r_init(*session);
 164     pvar_coll_init(*session);
 165     pvar_o2a_init(*session);
 166     pvar_a2o_init(*session);
 167     pvar_a2a_init(*session);
 168 }
 169 
 170 static inline void pvar_all_finalize(MPI_T_pvar_session*session)
 171 {
 172     int MPIT_result;
 173     pvar_pml_finalize(*session);
 174     pvar_osc_s_finalize(*session);
 175     pvar_osc_r_finalize(*session);
 176     pvar_coll_finalize(*session);
 177     pvar_o2a_finalize(*session);
 178     pvar_a2o_finalize(*session);
 179     pvar_a2a_finalize(*session);
 180     free(old_cvalues);
 181     MPIT_result = MPI_T_pvar_session_free(session);
 182     if (MPIT_result != MPI_SUCCESS) {
 183         printf("Failed to close a session for PVARs.\n");
 184         MPI_Abort(MPI_COMM_WORLD, MPIT_result);
 185     }
 186     (void)MPI_T_finalize();
 187 }
 188 
 189 static inline int pvar_pml_check(MPI_T_pvar_session session, int world_size, int world_rank)
 190 {
 191     int i, ret = MPI_SUCCESS;
 192     size_t *cvalues, *svalues;
 193     cvalues = malloc(2 * world_size * sizeof(size_t));
 194     svalues = cvalues + world_size;
 195     /* Get values */
 196     pvar_pml_read(session, cvalues, svalues);
 197     for( i = 0; i < world_size && MPI_SUCCESS == ret; ++i ) {
 198         /* Check count values */
 199         if( i == world_rank && (cvalues[i] - old_cvalues[i]) != (size_t) 0 ) {
 200             fprintf(stderr, "Error in %s: count_values[%d]=%zu, and should be equal to %zu.\n",
 201                     __func__, i, cvalues[i] - old_cvalues[i], (size_t) 0);
 202             ret = -1;
 203         } else if ( i != world_rank && (cvalues[i] - old_cvalues[i]) < (size_t) world_size ) {
 204             fprintf(stderr, "Error in %s: count_values[%d]=%zu, and should be >= %zu.\n",
 205                     __func__, i, cvalues[i] - old_cvalues[i], (size_t) world_size);
 206             ret = -1;
 207         }
 208         /* Check size values */
 209         if( i == world_rank && (svalues[i] - old_svalues[i]) != (size_t) 0 ) {
 210             fprintf(stderr, "Error in %s: size_values[%d]=%zu, and should be equal to %zu.\n",
 211                     __func__, i, svalues[i] - old_svalues[i], (size_t) 0);
 212             ret = -1;
 213         } else if ( i != world_rank && (svalues[i] - old_svalues[i]) < (size_t) (world_size * 13 * sizeof(char)) ) {
 214             fprintf(stderr, "Error in %s: size_values[%d]=%zu, and should be >= %zu.\n",
 215                     __func__, i, svalues[i] - old_svalues[i], (size_t) (world_size * 13 * sizeof(char)));
 216             ret = -1;
 217         }
 218     }
 219     if( MPI_SUCCESS == ret ) {
 220         fprintf(stdout, "Check PML...[ OK ]\n");
 221     } else {
 222         fprintf(stdout, "Check PML...[FAIL]\n");
 223     }
 224     /* Keep old PML values */
 225     memcpy(old_cvalues, cvalues, 2 * world_size * sizeof(size_t));
 226     /* Free arrays */
 227     free(cvalues);
 228     return ret;
 229 }
 230 
 231 static inline int pvar_osc_check(MPI_T_pvar_session session, int world_size, int world_rank)
 232 {
 233     int i, ret = MPI_SUCCESS;
 234     size_t *cvalues, *svalues;
 235     cvalues = malloc(2 * world_size * sizeof(size_t));
 236     svalues = cvalues + world_size;
 237     /* Get OSC values */
 238     memset(cvalues, 0, 2 * world_size * sizeof(size_t));
 239     /* Check OSC sent values */
 240     pvar_osc_s_read(session, cvalues, svalues);
 241     for( i = 0; i < world_size && MPI_SUCCESS == ret; ++i ) {
 242         /* Check count values */
 243         if( cvalues[i] < (size_t) world_size ) {
 244             fprintf(stderr, "Error in %s: count_values[%d]=%zu, and should be >= %zu.\n",
 245                     __func__, i, cvalues[i], (size_t) world_size);
 246             ret = -1;
 247         }
 248         /* Check size values */
 249         if( svalues[i] < (size_t) (world_size * 13 * sizeof(char)) ) {
 250             fprintf(stderr, "Error in %s: size_values[%d]=%zu, and should be >= %zu.\n",
 251                     __func__, i, svalues[i], (size_t) (world_size * 13 * sizeof(char)));
 252             ret = -1;
 253         }
 254     }
 255     /* Check OSC received values */
 256     pvar_osc_r_read(session, cvalues, svalues);
 257     for( i = 0; i < world_size && MPI_SUCCESS == ret; ++i ) {
 258         /* Check count values */
 259         if( cvalues[i] < (size_t) world_size ) {
 260             fprintf(stderr, "Error in %s: count_values[%d]=%zu, and should be >= %zu.\n",
 261                     __func__, i, cvalues[i], (size_t) world_size);
 262             ret = -1;
 263         }
 264         /* Check size values */
 265         if( svalues[i] < (size_t) (world_size * 13 * sizeof(char)) ) {
 266             fprintf(stderr, "Error in %s: size_values[%d]=%zu, and should be >= %zu.\n",
 267                     __func__, i, svalues[i], (size_t) (world_size * 13 * sizeof(char)));
 268             ret = -1;
 269         }
 270     }
 271     if( MPI_SUCCESS == ret ) {
 272         fprintf(stdout, "Check OSC...[ OK ]\n");
 273     } else {
 274         fprintf(stdout, "Check OSC...[FAIL]\n");
 275     }
 276     /* Keep old PML values */
 277     memcpy(old_cvalues, cvalues, 2 * world_size * sizeof(size_t));
 278     /* Free arrays */
 279     free(cvalues);
 280     return ret;
 281 }
 282 
 283 static inline int pvar_coll_check(MPI_T_pvar_session session, int world_size, int world_rank) {
 284     int i, ret = MPI_SUCCESS;
 285     size_t count, size;
 286     size_t *cvalues, *svalues;
 287     cvalues = malloc(2 * world_size * sizeof(size_t));
 288     svalues = cvalues + world_size;
 289     /* Get COLL values */
 290     pvar_coll_read(session, cvalues, svalues);
 291     for( i = 0; i < world_size && MPI_SUCCESS == ret; ++i ) {
 292         /* Check count values */
 293         if( i == world_rank && cvalues[i] != (size_t) 0 ) {
 294             fprintf(stderr, "Error in %s: count_values[%d]=%zu, and should be equal to %zu.\n",
 295                     __func__, i, cvalues[i], (size_t) 0);
 296             ret = -1;
 297         } else if ( i != world_rank && cvalues[i] < (size_t) (world_size + 1) * 4 ) {
 298             fprintf(stderr, "Error in %s: count_values[%d]=%zu, and should be >= %zu.\n",
 299                     __func__, i, cvalues[i], (size_t) (world_size + 1) * 4);
 300             ret = -1;
 301         }
 302         /* Check size values */
 303         if( i == world_rank && svalues[i] != (size_t) 0 ) {
 304             fprintf(stderr, "Error in %s: size_values[%d]=%zu, and should be equal to %zu.\n",
 305                     __func__, i, svalues[i], (size_t) 0);
 306             ret = -1;
 307         } else if ( i != world_rank && svalues[i] < (size_t) (world_size * (2 * 13 * sizeof(char) + sizeof(int)) + 13 * 3 * sizeof(char) + sizeof(int)) ) {
 308             fprintf(stderr, "Error in %s: size_values[%d]=%zu, and should be >= %zu.\n",
 309                     __func__, i, svalues[i], (size_t) (world_size * (2 * 13 * sizeof(char) + sizeof(int)) + 13 * 3 * sizeof(char) + sizeof(int)));
 310             ret = -1;
 311         }
 312     }
 313     /* Check One-to-all COLL values */
 314     pvar_o2a_read(session, &count, &size);
 315     if( count < (size_t) 2 ) {
 316         fprintf(stderr, "Error in %s: count_o2a=%zu, and should be >= %zu.\n",
 317                 __func__, count, (size_t) 2);
 318         ret = -1;
 319     }
 320     if( size < (size_t) ((world_size - 1) * 13 * 2 * sizeof(char)) ) {
 321         fprintf(stderr, "Error in %s: size_o2a=%zu, and should be >= %zu.\n",
 322                 __func__, size, (size_t) ((world_size - 1) * 13 * 2 * sizeof(char)));
 323         ret = -1;
 324     }
 325     /* Check All-to-one COLL values */
 326     pvar_a2o_read(session, &count, &size);
 327     if( count < (size_t) 2 ) {
 328         fprintf(stderr, "Error in %s: count_a2o=%zu, and should be >= %zu.\n",
 329                 __func__, count, (size_t) 2);
 330         ret = -1;
 331     }
 332     if( size < (size_t) ((world_size - 1) * (13 * sizeof(char) + sizeof(int))) ) {
 333         fprintf(stderr, "Error in %s: size_a2o=%zu, and should be >= %zu.\n",
 334                 __func__, size,
 335                 (size_t) ((world_size - 1) * (13 * sizeof(char) + sizeof(int))));
 336         ret = -1;
 337     }
 338     /* Check All-to-all COLL values */
 339     pvar_a2a_read(session, &count, &size);
 340     if( count < (size_t) (world_size * 4) ) {
 341         fprintf(stderr, "Error in %s: count_a2a=%zu, and should be >= %zu.\n",
 342                 __func__, count, (size_t) (world_size * 4));
 343         ret = -1;
 344     }
 345     if( size < (size_t) (world_size * (world_size - 1) * (2 * 13 * sizeof(char) + sizeof(int))) ) {
 346         fprintf(stderr, "Error in %s: size_a2a=%zu, and should be >= %zu.\n",
 347                 __func__, size,
 348                 (size_t) (world_size * (world_size - 1) * (2 * 13 * sizeof(char) + sizeof(int))));
 349         ret = -1;
 350     }
 351     if( MPI_SUCCESS == ret ) {
 352         fprintf(stdout, "Check COLL...[ OK ]\n");
 353     } else {
 354         fprintf(stdout, "Check COLL...[FAIL]\n");
 355     }
 356     /* Keep old PML values */
 357     pvar_pml_read(session, old_cvalues, old_svalues);
 358     /* Free arrays */
 359     free(cvalues);
 360     return ret;
 361 }
 362 
 363 int main(int argc, char* argv[])
 364 {
 365     int size, i, n, to, from, world_rank;
 366     MPI_T_pvar_session session;
 367     MPI_Status status;
 368     char s1[20], s2[20];
 369     strncpy(s1, "hello world!", 13);
 370 
 371     MPI_Init(NULL, NULL);
 372     MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
 373     MPI_Comm_size(MPI_COMM_WORLD, &size);
 374     
 375     pvar_all_init(&session, size);
 376 
 377     /* first phase: exchange size times data with everyone in
 378        MPI_COMM_WORLD with collective operations.  This phase comes
 379        first in order to ease the prediction of messages exchanged of
 380        each kind.
 381     */
 382     char*coll_buff = malloc(2 * size * 13 * sizeof(char));
 383     char*coll_recv_buff = coll_buff + size * 13;
 384     int sum_ranks;
 385     for( n = 0; n < size; ++n ) {
 386         /* Allgather */
 387         memset(coll_buff, 0, size * 13 * sizeof(char));
 388         MPI_Allgather(s1, 13, MPI_CHAR, coll_buff, 13, MPI_CHAR, MPI_COMM_WORLD);
 389         for( i = 0; i < size; ++i ) {
 390             if( strncmp(s1, &coll_buff[i * 13], 13) ) {
 391                 fprintf(stderr, "Error in Allgather check: received \"%s\" instead of "
 392                         "\"hello world!\" from %d.\n", &coll_buff[i * 13], i);
 393                 MPI_Abort(MPI_COMM_WORLD, -1);
 394             }
 395         }
 396         /* Scatter */
 397         MPI_Scatter(coll_buff, 13, MPI_CHAR, s2, 13, MPI_CHAR, n, MPI_COMM_WORLD);
 398         if( strncmp(s1, s2, 13) ) {
 399             fprintf(stderr, "Error in Scatter check: received \"%s\" instead of "
 400                     "\"hello world!\" from %d.\n", s2, n);
 401             MPI_Abort(MPI_COMM_WORLD, -1);
 402         }
 403         /* Allreduce */
 404         MPI_Allreduce(&world_rank, &sum_ranks, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
 405         if( sum_ranks != ((size - 1) * size / 2) ) {
 406             fprintf(stderr, "Error in Allreduce check: sum_ranks=%d instead of %d.\n",
 407                     sum_ranks, (size - 1) * size / 2);
 408             MPI_Abort(MPI_COMM_WORLD, -1);
 409         }
 410         /* Alltoall */
 411         memset(coll_recv_buff, 0, size * 13 * sizeof(char));
 412         MPI_Alltoall(coll_buff, 13, MPI_CHAR, coll_recv_buff, 13, MPI_CHAR, MPI_COMM_WORLD);
 413         for( i = 0; i < size; ++i ) {
 414             if( strncmp(s1, &coll_recv_buff[i * 13], 13) ) {
 415                 fprintf(stderr, "Error in Alltoall check: received \"%s\" instead of "
 416                         "\"hello world!\" from %d.\n", &coll_recv_buff[i * 13], i);
 417                 MPI_Abort(MPI_COMM_WORLD, -1);
 418             }
 419         }
 420         /* Bcast */
 421         if( n == world_rank ) {
 422             MPI_Bcast(s1, 13, MPI_CHAR, n, MPI_COMM_WORLD);
 423         } else {
 424             MPI_Bcast(s2, 13, MPI_CHAR, n, MPI_COMM_WORLD);
 425             if( strncmp(s1, s2, 13) ) {
 426                 fprintf(stderr, "Error in Bcast check: received \"%s\" instead of "
 427                         "\"hello world!\" from %d.\n", s2, n);
 428                 MPI_Abort(MPI_COMM_WORLD, -1);
 429             }
 430         }
 431         /* Barrier */
 432         MPI_Barrier(MPI_COMM_WORLD);
 433         /* Gather */
 434         memset(coll_buff, 0, size * 13 * sizeof(char));
 435         MPI_Gather(s1, 13, MPI_CHAR, coll_buff, 13, MPI_CHAR, n, MPI_COMM_WORLD);
 436         if( n == world_rank ) {
 437             for( i = 0; i < size; ++i ) {
 438                 if( strncmp(s1, &coll_buff[i * 13], 13) ) {
 439                     fprintf(stderr, "Error in Gather check: received \"%s\" instead of "
 440                             "\"hello world!\" from %d.\n", &coll_buff[i * 13], i);
 441                     MPI_Abort(MPI_COMM_WORLD, -1);
 442                 }
 443             }
 444         }
 445         /* Reduce */
 446         MPI_Reduce(&world_rank, &sum_ranks, 1, MPI_INT, MPI_SUM, n, MPI_COMM_WORLD);
 447         if( n == world_rank ) {
 448             if( sum_ranks != ((size - 1) * size / 2) ) {
 449                 fprintf(stderr, "Error in Reduce check: sum_ranks=%d instead of %d.\n",
 450                         sum_ranks, (size - 1) * size / 2);
 451                 MPI_Abort(MPI_COMM_WORLD, -1);
 452             }
 453         }        
 454     }
 455     free(coll_buff);
 456     if( -1 == pvar_coll_check(session, size, world_rank) ) MPI_Abort(MPI_COMM_WORLD, -1);
 457 
 458     /* second phase: exchange size times data with everyone except self
 459        in MPI_COMM_WORLD with Send/Recv */    
 460     for( n = 0; n < size; ++n ) {
 461         for( i = 0; i < size - 1; ++i ) {
 462             to = (world_rank+1+i)%size;
 463             from = (world_rank+size-1-i)%size;
 464             if(world_rank < to){
 465                 MPI_Send(s1, 13, MPI_CHAR, to, world_rank, MPI_COMM_WORLD);
 466                 MPI_Recv(s2, 13, MPI_CHAR, from, from, MPI_COMM_WORLD, &status);
 467             } else {
 468                 MPI_Recv(s2, 13, MPI_CHAR, from, from, MPI_COMM_WORLD, &status);
 469                 MPI_Send(s1, 13, MPI_CHAR, to, world_rank, MPI_COMM_WORLD);
 470             }
 471             if( strncmp(s2, "hello world!", 13) ) {
 472                 fprintf(stderr, "Error in PML check: s2=\"%s\" instead of \"hello world!\".\n",
 473                         s2);
 474                 MPI_Abort(MPI_COMM_WORLD, -1);
 475             }
 476         }
 477     }    
 478     if( -1 == pvar_pml_check(session, size, world_rank) ) MPI_Abort(MPI_COMM_WORLD, -1);
 479 
 480     /* third phase: exchange size times data with everyone, including self, in
 481        MPI_COMM_WORLD with RMA opertations */
 482     char win_buff[20];
 483     MPI_Win win;
 484     MPI_Win_create(win_buff, 20, sizeof(char), MPI_INFO_NULL, MPI_COMM_WORLD, &win);
 485     for( n = 0; n < size; ++n ) {
 486         for( i = 0; i < size; ++i ) {
 487             MPI_Win_lock(MPI_LOCK_EXCLUSIVE, i, 0, win);
 488             MPI_Put(s1, 13, MPI_CHAR, i, 0, 13, MPI_CHAR, win);
 489             MPI_Win_unlock(i, win);
 490         }
 491         MPI_Win_lock(MPI_LOCK_EXCLUSIVE, world_rank, 0, win);
 492         if( strncmp(win_buff, "hello world!", 13) ) {
 493             fprintf(stderr, "Error in OSC check: win_buff=\"%s\" instead of \"hello world!\".\n",
 494                     win_buff);
 495             MPI_Abort(MPI_COMM_WORLD, -1);
 496         }
 497         MPI_Win_unlock(world_rank, win);
 498         for( i = 0; i < size; ++i ) {
 499             MPI_Win_lock(MPI_LOCK_EXCLUSIVE, i, 0, win);
 500             MPI_Get(s2, 13, MPI_CHAR, i, 0, 13, MPI_CHAR, win);
 501             MPI_Win_unlock(i, win);
 502             if( strncmp(s2, "hello world!", 13) ) {
 503                 fprintf(stderr, "Error in OSC check: s2=\"%s\" instead of \"hello world!\".\n",
 504                         s2);
 505                 MPI_Abort(MPI_COMM_WORLD, -1);
 506             }
 507         }
 508     }
 509     MPI_Win_free(&win);
 510     if( -1 == pvar_osc_check(session, size, world_rank) ) MPI_Abort(MPI_COMM_WORLD, -1);
 511     
 512     pvar_all_finalize(&session);
 513 
 514     MPI_Finalize();
 515 
 516     return EXIT_SUCCESS;
 517 }

/* [<][>][^][v][top][bottom][index][help] */