root/orte/test/mpi/parallel_w8.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. main

   1 /* parallel MPI write to a single file */
   2 
   3 #include "mpi.h"
   4 #include <stdio.h>
   5 #include <string.h>
   6 
   7 #define D      3             /* dimensions */
   8 
   9 #define X   256             /* global x grid size */
  10 #define Y   256             /* global y grid size */
  11 #define Z   256             /* global z grid size */
  12 
  13 #define nx   128             /* local x grid size */
  14 #define ny   128             /* local y grid size */
  15 #define nz   128             /* local z grid size */
  16 
  17 #define ng (nx*ny*nz)        /* local grid (cube) size */
  18 
  19 #define npx    2             /* number of PE's in x direction */
  20 #define npy    2             /* number of PE's in y direction */
  21 #define npz    2             /* number of PE's in z direction */
  22 
  23 #define np (npx*npy*npz)  /* total PE count */
  24 
  25 #define LOOP 1
  26 
  27 #define MAX_RR_NAME 7
  28 
  29 int
  30 main(int argc, char* argv[])
  31 {
  32     int  i, rank, npes, bug=0;
  33     int buf[ng];
  34     MPI_File     thefile;
  35     MPI_Status   status;
  36     MPI_Datatype filetype;
  37     MPI_Comm     new_comm;
  38     MPI_Offset   offset=0;
  39     MPI_Info     info=MPI_INFO_NULL;
  40     int gsize[D],distrib[D],dargs[D],psize[D];
  41     int dims[D],periods[D],reorder;
  42     double t1,t2,mbs;
  43     double to1,to2,tc1,tc2;
  44     double et,eto,etc;
  45     double max_mbs,min_mbs,avg_mbs;
  46     double max_et,min_et,avg_et;
  47     double max_eto,min_eto,avg_eto;
  48     double max_etc,min_etc,avg_etc;
  49     char process_name[MPI_MAX_PROCESSOR_NAME + 1];
  50     char rr_blank[] = {"       "};
  51     char rr_empty[] = {"???????"};
  52     int  count;
  53 
  54     MPI_Init(&argc, &argv);
  55     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  56     MPI_Comm_size(MPI_COMM_WORLD, &npes);
  57     if ( rank == 0 )
  58     {
  59         if ( argc < 2 )
  60         {
  61             printf(" ERROR: no filename given\n");
  62             bug++;
  63         }
  64         if ( npes == np )
  65         {
  66             printf(" file name: %s\n",argv[1]);
  67             printf(" total number of PE's: %3d\n",np);
  68             printf(" number of PE's in x direction: %4d\n",npx);
  69             printf(" number of PE's in y direction: %4d\n",npy);
  70             printf(" number of PE's in z direction: %4d\n",npz);
  71             printf(" global grid size: %dx%dx%d 4 byte integers (total %lu)\n",X,Y,Z,(unsigned long)X*Y*Z);
  72             printf("  local grid size: %dx%dx%d 4 byte integers (total %d)\n",nx,ny,nz,ng);
  73         }
  74         else
  75         {
  76             printf(" ERROR: total number of PE's must be %d\n",np);
  77             printf("        actual number of PE's was %d\n",npes);
  78             bug++;
  79         }
  80         if ( bug )
  81         {
  82             MPI_Abort(MPI_COMM_WORLD,-1);
  83         }
  84     }
  85     if ( MPI_Get_processor_name(process_name, &count) != MPI_SUCCESS)
  86     {
  87         sprintf(process_name, "%s", rr_empty);
  88     }
  89     else
  90     {
  91         if (count < MAX_RR_NAME) strncat(&process_name[count],rr_blank,MAX_RR_NAME-count);
  92         process_name[MAX_RR_NAME] = '\0';
  93     }
  94 
  95     MPI_Barrier(MPI_COMM_WORLD);
  96 
  97     MPI_Info_create(&info);
  98 
  99     /* allow multiple writers to write to the file concurrently */
 100 
 101     /*MPI_Info_set(info,"panfs_concurrent_write","1");*/
 102 
 103     /* use data aggregation */
 104 
 105     /*MPI_Info_set(info,"romio_cb_write","enable"); */
 106     /*MPI_Info_set(info,"romio_cb_write","disable");*/
 107     /*MPI_Info_set(info,"romio_cb_read","enable"); */
 108     /*MPI_Info_set(info,"romio_cb_read","disable");*/
 109 
 110     /* use one aggregator/writer per node */
 111 
 112     /*MPI_Info_set(info,"cb_config_list","*:1");*/
 113 
 114     /* aggregators/writers per allocation: use this or the above (both work) */
 115 
 116     /*i = ((npes-1)/8) + 1;
 117      sprintf(awpa,"%d",i);
 118      MPI_Info_set (info,"cb_nodes",awpa);*/
 119 
 120 
 121     for ( i=0; i<ng; i++ ) buf[i] = rank*10000 + (i+1)%1024;
 122 
 123     for ( i=0; i<D; i++ )
 124     {
 125         periods[i] = 1;  /* true */
 126     }
 127 
 128     reorder = 1;        /* true */
 129 
 130     dims[0] = npx;
 131     dims[1] = npy;
 132     dims[2] = npz;
 133 
 134     MPI_Cart_create(MPI_COMM_WORLD, D, dims, periods, reorder, &new_comm);
 135 
 136     for ( i=0; i<D; i++ )
 137     {
 138         distrib[i] = MPI_DISTRIBUTE_BLOCK;
 139         dargs[i]   = MPI_DISTRIBUTE_DFLT_DARG;
 140         /*   psize[i]   = 0; */
 141     }
 142 
 143     gsize[0] = X;
 144     gsize[1] = Y;
 145     gsize[2] = Z;
 146 
 147     psize[0] = npx;
 148     psize[1] = npy;
 149     psize[2] = npz;
 150 
 151     /*
 152      MPI_Dims_create(npes, D, psize);
 153 
 154      printf("psize %d %d %d\n",psize[0],psize[1],psize[2]);
 155      */
 156 
 157     MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_FORTRAN, MPI_INT, &filetype);
 158     /*MPI_Type_create_darray(npes, rank, D, gsize, distrib, dargs, psize, MPI_ORDER_C, MPI_INT, &filetype);              don't do this */
 159 
 160     MPI_Type_commit(&filetype);
 161 
 162     to1 = MPI_Wtime();
 163     MPI_File_open(new_comm, argv[1], MPI_MODE_WRONLY | MPI_MODE_CREATE, info, &thefile);
 164     to2 = MPI_Wtime();
 165 
 166     MPI_File_set_size(thefile, offset);
 167 
 168     MPI_File_set_view(thefile, offset, MPI_INT, filetype, "native", MPI_INFO_NULL);
 169 
 170     t1 = MPI_Wtime();
 171     for ( i=0; i<LOOP; i++)
 172     {
 173         MPI_File_write_all(thefile, buf, ng, MPI_INT, &status);
 174     }
 175     t2 = MPI_Wtime();
 176 
 177     tc1 = MPI_Wtime();
 178     MPI_File_close(&thefile);
 179     tc2 = MPI_Wtime();
 180 
 181     et  = (t2  - t1)/LOOP;
 182     eto = (to2 - to1)/LOOP;
 183     etc = (tc2 - tc1)/LOOP;
 184 
 185     mbs = (((double)(LOOP*X*Y*Z)*sizeof(int)))/(1000000.0*(t2-t1));
 186 
 187     /*printf(" %s[%3d]    ET  %8.2f  %8.2f  %8.2f         %8.1f mbs\n", process_name, rank, t1, t2, t2-t1, mbs);*/
 188 
 189     MPI_Barrier(MPI_COMM_WORLD);
 190 
 191     MPI_Reduce(&mbs, &avg_mbs, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 192     MPI_Reduce(&mbs, &min_mbs, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 193     MPI_Reduce(&mbs, &max_mbs, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 194 
 195     MPI_Reduce(&et, &avg_et, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 196     MPI_Reduce(&et, &min_et, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 197     MPI_Reduce(&et, &max_et, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 198 
 199     MPI_Reduce(&eto, &avg_eto, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 200     MPI_Reduce(&eto, &min_eto, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 201     MPI_Reduce(&eto, &max_eto, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 202 
 203     MPI_Reduce(&etc, &avg_etc, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
 204     MPI_Reduce(&etc, &min_etc, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
 205     MPI_Reduce(&etc, &max_etc, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
 206 
 207     fflush(stdout);
 208 
 209     if ( rank == 0 )
 210     {
 211         mbs = avg_mbs/npes;
 212         printf("\n     average write rate: %9.1f mbs\n", mbs);
 213         printf("     minimum write rate: %9.1f mbs\n", min_mbs);
 214         printf("     maximum write rate: %9.1f mbs\n\n", max_mbs);
 215         avg_eto = avg_eto/npes;
 216         avg_et  = avg_et/npes;
 217         avg_etc = avg_etc/npes;
 218         printf("     open time:  %9.3f min %9.3f avg %9.3f max\n",min_eto,avg_eto,max_eto);
 219         printf("     write time: %9.3f min %9.3f avg %9.3f max\n",min_et,avg_et,max_et);
 220         printf("     close time: %9.3f min %9.3f avg %9.3f max\n\n",min_etc,avg_etc,max_etc);
 221         fflush(stdout);
 222     }
 223 
 224     MPI_Finalize();
 225 
 226     return 0;
 227 }

/* [<][>][^][v][top][bottom][index][help] */