root/orte/test/mpi/parallel_w64.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. main

   1 
   2 /* parallel MPI write to a single file */
   3 
   4 #include "mpi.h"
   5 #include <stdio.h>
   6 #include <string.h>
   7 
   8 #define D      3             /* dimensions */
   9 
  10 #define X   1024             /* global x grid size */
  11 #define Y   1024             /* global y grid size */
  12 #define Z   1024             /* global z grid size */
  13 
  14 #define nx   256             /* local x grid size */
  15 #define ny   256             /* local y grid size */
  16 #define nz   256             /* local z grid size */
  17 
  18 #define ng (nx*ny*nz)        /* local grid (cube) size */
  19 
  20 #define npx    4             /* number of PE's in x direction */
  21 #define npy    4             /* number of PE's in y direction */
  22 #define npz    4             /* number of PE's in z direction */
  23 
  24 #define np (npx*npy*npz)  /* total PE count */
  25 
  26 #define LOOP 1
  27 
  28 #define MAX_RR_NAME 7
  29 
  30 int
  31 main(int argc, char* argv[])
  32 {
  33   int  i, rank, npes, bug=0;
  34   int buf[ng];
  35   MPI_File     thefile;
  36   MPI_Status   status;
  37   MPI_Datatype filetype;
  38   MPI_Comm     new_comm;
  39   MPI_Offset   offset=0;
  40   MPI_Info     info=MPI_INFO_NULL;
  41   int gsize[D],distrib[D],dargs[D],psize[D];
  42   int dims[D],periods[D],reorder;
  43   double t1,t2,mbs;
  44   double to1,to2,tc1,tc2;
  45   double et,eto,etc;
  46   double max_mbs,min_mbs,avg_mbs;
  47   double max_et,min_et,avg_et;
  48   double max_eto,min_eto,avg_eto;
  49   double max_etc,min_etc,avg_etc;
  50   char process_name[MPI_MAX_PROCESSOR_NAME + 1];
  51   char rr_blank[] = {"       "};
  52   char rr_empty[] = {"???????"};
  53   int  count;
  54 
  55   MPI_Init(&argc, &argv);
  56   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  57   MPI_Comm_size(MPI_COMM_WORLD, &npes);
  58   if ( rank == 0 )
  59     {
  60      if ( argc < 2 )
  61        {
  62         printf(" ERROR: no filename given\n");
  63         bug++;
  64        }
  65      if ( npes == np )
  66        {
  67         printf(" file name: %s\n",argv[1]);
  68         printf(" total number of PE's: %3d\n",np);
  69         printf(" number of PE's in x direction: %4d\n",npx);
  70         printf(" number of PE's in y direction: %4d\n",npy);
  71         printf(" number of PE's in z direction: %4d\n",npz);
  72         printf(" global grid size: %dx%dx%d 4 byte integers (total %lu)\n",X,Y,Z,(unsigned long)X*Y*Z);
  73         printf("  local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng);
  74        }
  75      else
  76        {
  77         printf(" ERROR: total number of PE's must be %d\n",np);
  78         printf("        actual number of PE's was %d\n",npes);
  79         bug++;
  80        }
  81      if ( bug )
  82        {
  83         MPI_Abort(MPI_COMM_WORLD,-1);
  84        }
  85     }
  86  if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS)
  87    {
  88        sprintf(process_name, "%s", rr_empty);
  89    }
  90  else
  91    {
  92     if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
  93     process_name[MAX_RR_NAME] = '\0';
  94    }
  95 
  96   MPI_Barrier(MPI_COMM_WORLD);
  97 
  98   MPI_Info_create(&info);
  99 
 100 /* allow multiple writers to write to the file concurrently */
 101 
 102 /*MPI_Info_set(info,"panfs_concurrent_write","1");*/
 103 
 104 /* use data aggregation */
 105 
 106 /*MPI_Info_set(info,"romio_cb_write","enable"); */
 107 /*MPI_Info_set(info,"romio_cb_write","disable");*/
 108 /*MPI_Info_set(info,"romio_cb_read","enable"); */
 109 /*MPI_Info_set(info,"romio_cb_read","disable");*/
 110 
 111 /* use one aggregator/writer per node */
 112 
 113 /*MPI_Info_set(info,"cb_config_list","*:1");*/
 114 
 115 /* aggregators/writers per allocation: use this or the above (both work) */
 116 
 117 /*i = ((npes-1)/8) + 1;
 118   sprintf(awpa,"%d",i);
 119   MPI_Info_set (info,"cb_nodes",awpa);*/
 120 
 121 
 122   for ( i=0; i<ng; i++ ) buf[i] = rank*10000 + (i+1)%1024;
 123 
 124   for ( i=0; i<D; i++ )
 125     {
 126      periods[i] = 1;  /* true */
 127     }
 128 
 129   reorder = 1;        /* true */
 130 
 131   dims[0] = npx;
 132   dims[1] = npy;
 133   dims[2] = npz;
 134 
 135   MPI_Cart_create(MPI_COMM_WORLD, D, dims, periods, reorder, &new_comm);
 136 
 137   for ( i=0; i<D; i++ )
 138     {
 139      distrib[i] = MPI_DISTRIBUTE_BLOCK;
 140      dargs[i]   = MPI_DISTRIBUTE_DFLT_DARG;
 141 /*   psize[i]   = 0; */
 142     }
 143 
 144   gsize[0] = X;
 145   gsize[1] = Y;
 146   gsize[2] = Z;
 147 
 148   psize[0] = npx;
 149   psize[1] = npy;
 150   psize[2] = npz;
 151 
 152 /*
 153   MPI_Dims_create(npes, D, psize);
 154 
 155   printf("psize %d %d %d\n",psize[0],psize[1],psize[2]);
 156 */
 157 
 158   MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_FORTRAN, MPI_INT, &filetype);
 159 /*MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_C, MPI_INT, &filetype);              don't do this */
 160 
 161   MPI_Type_commit(&filetype);
 162 
 163   to1 = MPI_Wtime();
 164   MPI_File_open(new_comm, argv[1], MPI_MODE_WRONLY | MPI_MODE_CREATE, info, &thefile);
 165   to2 = MPI_Wtime();
 166 
 167   MPI_File_set_size(thefile, offset);
 168 
 169   MPI_File_set_view(thefile, offset, MPI_INT, filetype, "native", MPI_INFO_NULL);
 170 
 171   t1 = MPI_Wtime();
 172   for ( i=0; i<LOOP; i++)
 173     {
 174      MPI_File_write_all(thefile, buf, ng, MPI_INT, &status);
 175     }
 176   t2 = MPI_Wtime();
 177 
 178   tc1 = MPI_Wtime();
 179   MPI_File_close(&thefile);
 180   tc2 = MPI_Wtime();
 181 
 182   et  = (t2  - t1)/LOOP;
 183   eto = (to2 - to1)/LOOP;
 184   etc = (tc2 - tc1)/LOOP;
 185 
 186   mbs = (((double)(LOOP*X*Y*Z)*sizeof(int)))/(1000000.0*(t2-t1));
 187 
 188 /*printf(" %s[%3d]    ET  %8.2f  %8.2f  %8.2f         %8.1f mbs\n", process_name, rank, t1, t2, t2-t1, mbs);*/
 189 
 190   MPI_Barrier(MPI_COMM_WORLD);
 191 
 192   MPI_Reduce(&mbs, &avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 193   MPI_Reduce(&mbs, &min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 194   MPI_Reduce(&mbs, &max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 195 
 196   MPI_Reduce(&et, &avg_et, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 197   MPI_Reduce(&et, &min_et, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 198   MPI_Reduce(&et, &max_et, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 199 
 200   MPI_Reduce(&eto, &avg_eto, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 201   MPI_Reduce(&eto, &min_eto, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 202   MPI_Reduce(&eto, &max_eto, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 203 
 204   MPI_Reduce(&etc, &avg_etc, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 205   MPI_Reduce(&etc, &min_etc, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 206   MPI_Reduce(&etc, &max_etc, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 207 
 208   fflush(stdout);
 209 
 210   if ( rank == 0 )
 211     {
 212      mbs = avg_mbs/npes;
 213      printf("\n     average write rate: %9.1f mbs\n", mbs);
 214      printf("     minimum write rate: %9.1f mbs\n", min_mbs);
 215      printf("     maximum write rate: %9.1f mbs\n\n", max_mbs);
 216      avg_eto = avg_eto/npes;
 217      avg_et  = avg_et/npes;
 218      avg_etc = avg_etc/npes;
 219      printf("     open time:  %9.3f min %9.3f avg %9.3f max\n",min_eto,avg_eto,max_eto);
 220      printf("     write time: %9.3f min %9.3f avg %9.3f max\n",min_et,avg_et,max_et);
 221      printf("     close time: %9.3f min %9.3f avg %9.3f max\n\n",min_etc,avg_etc,max_etc);
 222      fflush(stdout);
 223     }
 224 
 225   MPI_Finalize();
 226 
 227   return 0;
 228 }

/* [<][>][^][v][top][bottom][index][help] */