root/ompi/mca/io/romio321/romio/adio/common/ad_darray.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ADIO_Type_create_darray
  2. MPIOI_Type_block
  3. MPIOI_Type_cyclic

   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
   2 /* 
   3  *
   4  *   Copyright (C) 1997 University of Chicago. 
   5  *   See COPYRIGHT notice in top-level directory.
   6  */
   7 
   8 #include "adio.h"
   9 #include "adio_extern.h"
  10 
  11 static int MPIOI_Type_block(int *array_of_gsizes, int dim, int ndims, int nprocs,
  12                      int rank, int darg, int order, MPI_Aint orig_extent,
  13                      MPI_Datatype type_old, MPI_Datatype *type_new,
  14                      MPI_Aint *st_offset);
  15 static int MPIOI_Type_cyclic(int *array_of_gsizes, int dim, int ndims, int nprocs,
  16                       int rank, int darg, int order, MPI_Aint orig_extent,
  17                       MPI_Datatype type_old, MPI_Datatype *type_new,
  18                       MPI_Aint *st_offset);
  19 
  20 
  21 int ADIO_Type_create_darray(int size, int rank, int ndims, 
  22                             int *array_of_gsizes, int *array_of_distribs, 
  23                             int *array_of_dargs, int *array_of_psizes, 
  24                             int order, MPI_Datatype oldtype, 
  25                             MPI_Datatype *newtype) 
  26 {
  27     MPI_Datatype type_old, type_new=MPI_DATATYPE_NULL, inttype;
  28     int procs, tmp_rank, i, tmp_size, blklen, *coords;
  29     MPI_Aint *st_offsets, orig_extent, disp, ub, lb;
  30 
  31     MPI_Type_get_extent(oldtype, &lb, &orig_extent);
  32 
  33 /* calculate position in Cartesian grid as MPI would (row-major
  34    ordering) */
  35     coords = (int *) ADIOI_Malloc(ndims*sizeof(int));
  36     procs = size;
  37     tmp_rank = rank;
  38     for (i=0; i<ndims; i++) {
  39         procs = procs/array_of_psizes[i];
  40         coords[i] = tmp_rank/procs;
  41         tmp_rank = tmp_rank % procs;
  42     }
  43 
  44     st_offsets = (MPI_Aint *) ADIOI_Malloc(ndims*sizeof(MPI_Aint));
  45     type_old = oldtype;
  46 
  47     if (order == MPI_ORDER_FORTRAN) {
  48       /* dimension 0 changes fastest */
  49         for (i=0; i<ndims; i++) {
  50             switch(array_of_distribs[i]) {
  51             case MPI_DISTRIBUTE_BLOCK:
  52                 MPIOI_Type_block(array_of_gsizes, i, ndims,
  53                                  array_of_psizes[i],
  54                                  coords[i], array_of_dargs[i],
  55                                  order, orig_extent, 
  56                                  type_old, &type_new,
  57                                  st_offsets+i); 
  58                 break;
  59             case MPI_DISTRIBUTE_CYCLIC:
  60                 MPIOI_Type_cyclic(array_of_gsizes, i, ndims, 
  61                                   array_of_psizes[i], coords[i],
  62                                   array_of_dargs[i], order,
  63                                   orig_extent, type_old,
  64                                   &type_new, st_offsets+i);
  65                 break;
  66             case MPI_DISTRIBUTE_NONE:
  67                 /* treat it as a block distribution on 1 process */
  68                 MPIOI_Type_block(array_of_gsizes, i, ndims, 1, 0, 
  69                                  MPI_DISTRIBUTE_DFLT_DARG, order,
  70                                  orig_extent, 
  71                                  type_old, &type_new,
  72                                  st_offsets+i); 
  73                 break;
  74             }
  75             if (i) MPI_Type_free(&type_old);
  76             type_old = type_new;
  77         }
  78 
  79         /* add displacement and UB */
  80         disp = st_offsets[0];
  81         tmp_size = 1;
  82         for (i=1; i<ndims; i++) {
  83             tmp_size *= array_of_gsizes[i-1];
  84             disp += (MPI_Aint)tmp_size*st_offsets[i];
  85         }
  86         /* rest done below for both Fortran and C order */
  87     }
  88 
  89     else /* order == MPI_ORDER_C */ {
  90         /* dimension ndims-1 changes fastest */
  91         for (i=ndims-1; i>=0; i--) {
  92             switch(array_of_distribs[i]) {
  93             case MPI_DISTRIBUTE_BLOCK:
  94                 MPIOI_Type_block(array_of_gsizes, i, ndims, array_of_psizes[i],
  95                                  coords[i], array_of_dargs[i], order,
  96                                  orig_extent, type_old, &type_new,
  97                                  st_offsets+i); 
  98                 break;
  99             case MPI_DISTRIBUTE_CYCLIC:
 100                 MPIOI_Type_cyclic(array_of_gsizes, i, ndims, 
 101                                   array_of_psizes[i], coords[i],
 102                                   array_of_dargs[i], order, 
 103                                   orig_extent, type_old, &type_new,
 104                                   st_offsets+i);
 105                 break;
 106             case MPI_DISTRIBUTE_NONE:
 107                 /* treat it as a block distribution on 1 process */
 108                 MPIOI_Type_block(array_of_gsizes, i, ndims, array_of_psizes[i],
 109                       coords[i], MPI_DISTRIBUTE_DFLT_DARG, order, orig_extent, 
 110                            type_old, &type_new, st_offsets+i); 
 111                 break;
 112             }
 113             if (i != ndims-1) MPI_Type_free(&type_old);
 114             type_old = type_new;
 115         }
 116 
 117         /* add displacement and UB */
 118         disp = st_offsets[ndims-1];
 119         tmp_size = 1;
 120         for (i=ndims-2; i>=0; i--) {
 121             tmp_size *= array_of_gsizes[i+1];
 122             disp += (MPI_Aint)tmp_size*st_offsets[i];
 123         }
 124     }
 125 
 126     disp *= orig_extent;
 127 
 128     ub = orig_extent;
 129     for (i=0; i<ndims; i++) ub *= (MPI_Aint)array_of_gsizes[i];
 130         
 131     blklen = 1;
 132     
 133     MPI_Type_create_struct(1, &blklen, &disp, &type_new, &inttype);
 134     MPI_Type_create_resized (inttype, 0, ub, newtype);
 135     MPI_Type_free (&inttype);
 136 
 137     MPI_Type_free(&type_new);
 138     ADIOI_Free(st_offsets);
 139     ADIOI_Free(coords);
 140     return MPI_SUCCESS;
 141 }
 142 
 143 
 144 /* Returns MPI_SUCCESS on success, an MPI error code on failure.  Code above
 145  * needs to call MPIO_Err_return_xxx.
 146  */
 147 static int MPIOI_Type_block(int *array_of_gsizes, int dim, int ndims, int nprocs,
 148                      int rank, int darg, int order, MPI_Aint orig_extent,
 149                      MPI_Datatype type_old, MPI_Datatype *type_new,
 150                      MPI_Aint *st_offset) 
 151 {
 152 /* nprocs = no. of processes in dimension dim of grid
 153    rank = coordinate of this process in dimension dim */
 154     int blksize, global_size, mysize, i, j;
 155     MPI_Aint stride;
 156     
 157     global_size = array_of_gsizes[dim];
 158 
 159     if (darg == MPI_DISTRIBUTE_DFLT_DARG)
 160         blksize = (global_size + nprocs - 1)/nprocs;
 161     else {
 162         blksize = darg;
 163 
 164         /* --BEGIN ERROR HANDLING-- */
 165         if (blksize <= 0) {
 166             return MPI_ERR_ARG;
 167         }
 168 
 169         if (blksize * nprocs < global_size) {
 170             return MPI_ERR_ARG;
 171         }
 172         /* --END ERROR HANDLING-- */
 173     }
 174 
 175     j = global_size - blksize*rank;
 176     mysize = ADIOI_MIN(blksize, j);
 177     if (mysize < 0) mysize = 0;
 178 
 179     stride = orig_extent;
 180     if (order == MPI_ORDER_FORTRAN) {
 181         if (dim == 0) 
 182             MPI_Type_contiguous(mysize, type_old, type_new);
 183         else {
 184             for (i=0; i<dim; i++) stride *= (MPI_Aint)array_of_gsizes[i];
 185             MPI_Type_create_hvector(mysize, 1, stride, type_old, type_new);
 186         }
 187     }
 188     else {
 189         if (dim == ndims-1) 
 190             MPI_Type_contiguous(mysize, type_old, type_new);
 191         else {
 192             for (i=ndims-1; i>dim; i--) stride *= (MPI_Aint)array_of_gsizes[i];
 193             MPI_Type_create_hvector(mysize, 1, stride, type_old, type_new);
 194         }
 195 
 196     }
 197 
 198     *st_offset = (MPI_Aint)blksize * (MPI_Aint)rank;
 199      /* in terms of no. of elements of type oldtype in this dimension */
 200     if (mysize == 0) *st_offset = 0;
 201 
 202     return MPI_SUCCESS;
 203 }
 204 
 205 
 206 /* Returns MPI_SUCCESS on success, an MPI error code on failure.  Code above
 207  * needs to call MPIO_Err_return_xxx.
 208  */
 209 static int MPIOI_Type_cyclic(int *array_of_gsizes, int dim, int ndims, int nprocs,
 210                       int rank, int darg, int order, MPI_Aint orig_extent,
 211                       MPI_Datatype type_old, MPI_Datatype *type_new,
 212                       MPI_Aint *st_offset) 
 213 {
 214 /* nprocs = no. of processes in dimension dim of grid
 215    rank = coordinate of this process in dimension dim */
 216     int blksize, i, blklens[3], st_index, end_index, local_size, rem, count;
 217     MPI_Aint stride, disps[3];
 218     MPI_Datatype type_tmp, type_tmp1, types[3];
 219 
 220     if (darg == MPI_DISTRIBUTE_DFLT_DARG) blksize = 1;
 221     else blksize = darg;
 222 
 223     /* --BEGIN ERROR HANDLING-- */
 224     if (blksize <= 0) {
 225         return MPI_ERR_ARG;
 226     }
 227     /* --END ERROR HANDLING-- */
 228     
 229     st_index = rank*blksize;
 230     end_index = array_of_gsizes[dim] - 1;
 231 
 232     if (end_index < st_index) local_size = 0;
 233     else {
 234         local_size = ((end_index - st_index + 1)/(nprocs*blksize))*blksize;
 235         rem = (end_index - st_index + 1) % (nprocs*blksize);
 236         local_size += ADIOI_MIN(rem, blksize);
 237     }
 238 
 239     count = local_size/blksize;
 240     rem = local_size % blksize;
 241     
 242     stride = (MPI_Aint)nprocs*(MPI_Aint)blksize*orig_extent;
 243     if (order == MPI_ORDER_FORTRAN)
 244         for (i=0; i<dim; i++) stride *= (MPI_Aint)array_of_gsizes[i];
 245     else for (i=ndims-1; i>dim; i--) stride *= (MPI_Aint)array_of_gsizes[i];
 246 
 247     MPI_Type_create_hvector(count, blksize, stride, type_old, type_new);
 248 
 249     if (rem) {
 250         /* if the last block is of size less than blksize, include
 251            it separately using MPI_Type_struct */
 252 
 253         types[0] = *type_new;
 254         types[1] = type_old;
 255         disps[0] = 0;
 256         disps[1] = (MPI_Aint)count*stride;
 257         blklens[0] = 1;
 258         blklens[1] = rem;
 259 
 260         MPI_Type_create_struct(2, blklens, disps, types, &type_tmp);
 261 
 262         MPI_Type_free(type_new);
 263         *type_new = type_tmp;
 264     }
 265 
 266     /* In the first iteration, we need to set the displacement in that
 267        dimension correctly. */ 
 268     if ( ((order == MPI_ORDER_FORTRAN) && (dim == 0)) ||
 269          ((order == MPI_ORDER_C) && (dim == ndims-1)) ) {
 270         types[0] = *type_new;
 271         disps[0] = (MPI_Aint)rank * (MPI_Aint)blksize * orig_extent;
 272         blklens[0] = 1;
 273         MPI_Type_create_struct(1, blklens, disps, types, &type_tmp1);
 274         MPI_Type_create_resized (type_tmp1, 0, orig_extent * (MPI_Aint)array_of_gsizes[dim], &type_tmp);
 275         MPI_Type_free(&type_tmp1);
 276         MPI_Type_free(type_new);
 277         *type_new = type_tmp;
 278 
 279         *st_offset = 0;  /* set it to 0 because it is taken care of in
 280                             the struct above */
 281     }
 282     else {
 283         *st_offset = (MPI_Aint)rank * (MPI_Aint)blksize; 
 284         /* st_offset is in terms of no. of elements of type oldtype in
 285          * this dimension */ 
 286     }
 287 
 288     if (local_size == 0) *st_offset = 0;
 289 
 290     return MPI_SUCCESS;
 291 }

/* [<][>][^][v][top][bottom][index][help] */