root/orte/test/mpi/parallel_r8.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. main

   1 
   2 /* parallel MPI read from a single file */
   3 
   4 #include "mpi.h"
   5 #include <stdio.h>
   6 #include <string.h>
   7 
   8 #define D      3             /* dimensions */
   9 
  10 #define X   256             /* global x grid size */
  11 #define Y   256             /* global y grid size */
  12 #define Z   256             /* global z grid size */
  13 
  14 #define nx   128             /* local x grid size */
  15 #define ny   128             /* local y grid size */
  16 #define nz   128             /* local z grid size */
  17 
  18 #define ng (nx*ny*nz)        /* local grid (cube) size */
  19 
  20 #define npx    2             /* number of PE's in x direction */
  21 #define npy    2             /* number of PE's in y direction */
  22 #define npz    2             /* number of PE's in z direction */
  23 
  24 #define np (npx*npy*npz)  /* total PE count */
  25 
  26 #define LOOP 1
  27 
  28 #define MAX_RR_NAME 7
  29 
  30 int
  31 main(int argc, char* argv[])
  32 {
  33   int  i, rank, npes, bug=0;
  34   int buf[ng];
  35   MPI_File     thefile;
  36   MPI_Status   status;
  37   MPI_Datatype filetype;
  38   MPI_Comm     new_comm;
  39   MPI_Offset   offset=0;
  40   MPI_Info     info=MPI_INFO_NULL;
  41   int gsize[D],distrib[D],dargs[D],psize[D];
  42   int dims[D],periods[D],reorder;
  43   double t1,t2,mbs;
  44   double to1,to2,tc1,tc2;
  45   double et,eto,etc;
  46   double max_mbs,min_mbs,avg_mbs;
  47   double max_et,min_et,avg_et;
  48   double max_eto,min_eto,avg_eto;
  49   double max_etc,min_etc,avg_etc;
  50   char process_name[MPI_MAX_PROCESSOR_NAME + 1];
  51   char rr_blank[] = {"       "};
  52   char rr_empty[] = {"???????"};
  53   int  count;
  54 
  55   MPI_Init(&argc, &argv);
  56   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  57   MPI_Comm_size(MPI_COMM_WORLD, &npes);
  58   if ( rank == 0 )
  59     {
  60      if ( argc < 2 )
  61        {
  62         printf(" ERROR: no filename given\n");
  63         bug++;
  64        }
  65      if ( npes == np )
  66        {
  67         printf(" file name: %s\n",argv[1]);
  68         printf(" total number of PE's: %3d\n",np);
  69         printf(" number of PE's in x direction: %3d\n",npx);
  70         printf(" number of PE's in y direction: %3d\n",npy);
  71         printf(" number of PE's in z direction: %3d\n",npz);
  72         printf(" global grid size: %dx%dx%d 4 byte integers (total %lu)\n",X,Y,Z,(unsigned long)X*Y*Z);
  73         printf("  local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng);
  74        }
  75      else
  76        {
  77         printf(" ERROR: total number of PE's must be %d\n",np);
  78         printf("        actual number of PE's was %d\n",npes);
  79         bug++;
  80        }
  81      if ( bug )
  82        {
  83         MPI_Abort(MPI_COMM_WORLD,-1);
  84        }
  85     }
  86  if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS)
  87    {
  88     sprintf(process_name, "%s", rr_empty);
  89    }
  90  else
  91    {
  92     if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
  93     process_name[MAX_RR_NAME] = '\0';
  94    }
  95 
  96   MPI_Info_create(&info);
  97 
  98 /* allow multiple writers to write to the file concurrently */
  99 
 100 /*MPI_Info_set(info,"panfs_concurrent_write","1");*/
 101 
 102 /* use data aggregation */
 103 
 104 /*MPI_Info_set(info,"romio_cb_write","enable"); */
 105 /*MPI_Info_set(info,"romio_cb_write","disable");*/
 106 /*MPI_Info_set(info,"romio_cb_read","enable"); */
 107 /*MPI_Info_set(info,"romio_cb_read","disable");*/
 108 
 109 /* use one aggregator/writer per node */
 110 
 111 /*MPI_Info_set(info,"cb_config_list","*:1");*/
 112 
 113 /* aggregators/writers per allocation: use this or the above (both work) */
 114 
 115 /*i = ((npes-1)/8) + 1;
 116   sprintf(awpa,"%d",i);
 117   MPI_Info_set (info,"cb_nodes",awpa);*/
 118 
 119   for ( i=0; i<D; i++ )
 120     {
 121      periods[i] = 1;  /* true */
 122     }
 123 
 124   reorder = 1;        /* true */
 125 
 126   dims[0] = npx;
 127   dims[1] = npy;
 128   dims[2] = npz;
 129 
 130   MPI_Cart_create(MPI_COMM_WORLD, D, dims, periods, reorder, &new_comm);
 131 
 132   for ( i=0; i<D; i++ )
 133     {
 134      distrib[i] = MPI_DISTRIBUTE_BLOCK;
 135      dargs[i]   = MPI_DISTRIBUTE_DFLT_DARG;
 136 /*   psize[i]   = 0; */
 137     }
 138 
 139   gsize[0] = X;
 140   gsize[1] = Y;
 141   gsize[2] = Z;
 142 
 143   psize[0] = npx;
 144   psize[1] = npy;
 145   psize[2] = npz;
 146 
 147 /*
 148   MPI_Dims_create(npes, D, psize);
 149 
 150   printf("psize %d %d %d\n",psize[0],psize[1],psize[2]);
 151 */
 152 
 153   MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_FORTRAN, MPI_INT, &filetype);
 154 
 155   MPI_Type_commit(&filetype);
 156 
 157   to1 = MPI_Wtime();
 158   MPI_File_open(new_comm, argv[1], MPI_MODE_RDONLY, info, &thefile);
 159   to2 = MPI_Wtime();
 160 
 161   MPI_File_set_view(thefile, offset, MPI_INT, filetype, "native", MPI_INFO_NULL);
 162 
 163   t1 = MPI_Wtime();
 164   for ( i=0; i<LOOP; i++ )
 165     {
 166      MPI_File_read_all(thefile, buf, ng, MPI_INT, &status);
 167     }
 168   t2 = MPI_Wtime();
 169 
 170 /*MPI_File_sync(thefile); */
 171 
 172   tc1 = MPI_Wtime();
 173   MPI_File_close(&thefile);
 174   tc2 = MPI_Wtime();
 175 
 176   et  = (t2  - t1)/LOOP;
 177   eto = (to2 - to1)/LOOP;
 178   etc = (tc2 - tc1)/LOOP;
 179 
 180   mbs = (((double)(LOOP*X*Y*Z)*sizeof(int)))/(1000000.0*(t2-t1));
 181 
 182 /*printf(" %s[%3d]    ET  %5.2f  %6.2f  %6.2f     %5.1f mbs       Data %9d %9d \n", process_name, rank, t1, t2, t2-t1, mbs, buf[0], buf[ng-1]);*/
 183 
 184   MPI_Barrier(MPI_COMM_WORLD);
 185 
 186   MPI_Reduce(&mbs, &avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 187   MPI_Reduce(&mbs, &min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 188   MPI_Reduce(&mbs, &max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 189 
 190   MPI_Reduce(&et, &avg_et, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 191   MPI_Reduce(&et, &min_et, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 192   MPI_Reduce(&et, &max_et, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 193 
 194   MPI_Reduce(&eto, &avg_eto, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 195   MPI_Reduce(&eto, &min_eto, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 196   MPI_Reduce(&eto, &max_eto, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 197 
 198   MPI_Reduce(&etc, &avg_etc, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 199   MPI_Reduce(&etc, &min_etc, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 200   MPI_Reduce(&etc, &max_etc, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 201 
 202   fflush(stdout);
 203 
 204   if ( rank == 0 )
 205     {
 206      mbs = avg_mbs/npes;
 207      printf("\n     average read rate: %9.1f mbs\n", mbs);
 208      printf("     minimum read rate: %9.1f mbs\n", min_mbs);
 209      printf("     maximum read rate: %9.1f mbs\n\n", max_mbs);
 210      avg_eto = avg_eto/npes;
 211      avg_et  = avg_et/npes;
 212      avg_etc = avg_etc/npes;
 213      printf("     open time:  %9.3f min %9.3f avg %9.3f max\n",min_eto,avg_eto,max_eto);
 214      printf("     read time:  %9.3f min %9.3f avg %9.3f max\n",min_et,avg_et,max_et);
 215      printf("     close time: %9.3f min %9.3f avg %9.3f max\n\n",min_etc,avg_etc,max_etc);
 216      fflush(stdout);
 217     }
 218 
 219   MPI_Finalize();
 220 
 221   return 0;
 222 }

/* [<][>][^][v][top][bottom][index][help] */