to 22 ompi/include/mpif-externals.h ! These "external" statements are specific to the MPI mpif.h to 23 ompi/include/mpif-externals.h ! interface (and are toxic to the MPI module interfaces),. to 34 ompi/include/mpif-externals.h ! a function pointer (to MPI_REGISTER_DATAREP) and therefore must be to 23 ompi/include/mpif-sentinels.h ! All of these types were chosen with care to match the types of to 44 ompi/include/mpif-sentinels.h ! MPI_ERRCODES_IGNORE has similar rationale to MPI_ARGV_NULL. The to 45 ompi/include/mpif-sentinels.h ! F77 functions are all smart enough to check that the errcodes to 51 ompi/include/mpif-sentinels.h ! MPI_STATUS_IGNORE has similar rationale to MPI_ERRCODES_IGNORE. to 256 ompi/mca/coll/base/coll_base_barrier.c int rank, size, distance, to, from, err, line = 0; to 268 ompi/mca/coll/base/coll_base_barrier.c to = (rank + distance) % size; to 271 ompi/mca/coll/base/coll_base_barrier.c err = ompi_coll_base_sendrecv_zero(to, MCA_COLL_BASE_TAG_BARRIER, to 10 ompi/mca/topo/treematch/treematch/IntConstantInitializedVector.c if(v->from[i] < v->top && v->to[v->from[i]] == i) to 22 ompi/mca/topo/treematch/treematch/IntConstantInitializedVector.c v->to = malloc(sizeof(int)*size); to 29 ompi/mca/topo/treematch/treematch/IntConstantInitializedVector.c free(v->to); to 43 ompi/mca/topo/treematch/treematch/IntConstantInitializedVector.c v->to[v->top] = i; to 6 ompi/mca/topo/treematch/treematch/IntConstantInitializedVector.h int init_value, size, top, *to, *from, *vec; to 12 ompi/mpi/fortran/base/conversion-fn-null-int-interface.h ! them to the "mpi" module namespace, and result in linker errors if MPI to 13 ompi/mpi/fortran/base/conversion-fn-null-int-interface.h ! F90 applications try to use them. because the implementations of to 25 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! invoke a back-end ompi_*_f() subroutine, which is BIND(C)-bound to a to 29 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! parameters, and Fortran disallows passing LOGICAL parameters to to 32 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! subroutines-with-LOGICAL-params, we have to be creative on how to to 37 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! files (e.g., finalized_f08.F90) use the "mpi" module to get a to 44 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! the right types (e.g., convert MPI handles from comm to to 54 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! will pass a pointer to sendtypes(0)%MPI_VAL (i.e., the first integer to 56 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! to be exactly equivalent to a single INTEGER, an array of mpi_f08 to 57 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! handles is exactly equivalent to an array of INTEGERS. So passing to 58 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! an address to the first MPI_VAL is exactly the same as passing an to 73 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! parameters, then we have to do even more tricks than we described to 80 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! memory map) to INTEGER, DIMENSION(MPI_STATUS_SIZE). So we just have to 81 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! to fool the compiler into accepting it (it's the same C<-->Fortran to 85 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! an "interface" block for the PMPI_* subroutine that we want to call. to 91 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! For the C programmers reading this, this is very much analogous to to 2182 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! TODO - FIXME to use arrays of strings and pass strlen to 3300 ompi/mpi/fortran/use-mpi-f08/bindings/mpi-f-interfaces-bind.h ! New routines to MPI-3 to 13 ompi/mpi/fortran/use-mpi-tkr/mpi-f90-cptr-interfaces.h ! preprocessor macro to protect the problematic declarations. to 16 ompi/mpi/fortran/use-mpi-tkr/mpi-f90-cptr-interfaces.h ! mpi.F90, which allows us to use the preprocessor "if" directive, to 15 ompi/mpi/fortran/use-mpi-tkr/pmpi-f90-cptr-interfaces.h ! preprocessor macro to protect the problematic declarations. to 18 ompi/mpi/fortran/use-mpi-tkr/pmpi-f90-cptr-interfaces.h ! mpi.F90, which allows us to use the preprocessor "if" directive, to 18 ompi/mpiext/example/use-mpi-f08/mpiext_example_usempif08.h ! included before this, so anything declared there does not need to be to 18 ompi/mpiext/example/use-mpi/mpiext_example_usempi.h ! included before this, so anything declared there does not need to be to 21 ompi/mpiext/pcollreq/use-mpi-f08/mpiext_pcollreq_usempif08.h ! included before this, so anything declared there does not need to be to 21 ompi/mpiext/pcollreq/use-mpi/mpiext_pcollreq_usempi.h ! included before this, so anything declared there does not need to be to 11 ompi/mpiext/shortfloat/use-mpi/mpiext_shortfloat_usempi.h ! This file is needed to put variables defined in mpiext_shortfloat_mpifh.h to 537 opal/class/opal_tree.c int opal_tree_dup(opal_tree_t *from, opal_tree_t *to) to 542 opal/class/opal_tree.c opal_tree_init(to, to 551 opal/class/opal_tree.c ret = opal_tree_deserialize(buffer, opal_tree_get_root(to)); to 465 opal/class/opal_tree.h OPAL_DECLSPEC int opal_tree_dup(opal_tree_t *from, opal_tree_t *to); to 27 opal/datatype/opal_convertor_internal.h void* to, size_t to_length, ptrdiff_t to_extent, to 45 opal/datatype/opal_copy_functions.c char* to, size_t to_len, ptrdiff_t to_extent, \ to 59 opal/datatype/opal_copy_functions.c #TYPE, count, from, from_len, to, to_len ); \ to 62 opal/datatype/opal_copy_functions.c #TYPE, count, from, from_len, to, to_len ); \ to 67 opal/datatype/opal_copy_functions.c MEMCPY( to, from, count * local_TYPE_size ); \ to 71 opal/datatype/opal_copy_functions.c MEMCPY( to, from, local_TYPE_size ); \ to 72 opal/datatype/opal_copy_functions.c to += to_extent; \ to 97 opal/datatype/opal_copy_functions.c char* to, size_t to_len, ptrdiff_t to_extent, \ to 110 opal/datatype/opal_copy_functions.c #TYPENAME, count, from, from_len, to, to_len ); \ to 113 opal/datatype/opal_copy_functions.c #TYPENAME, count, from, from_len, to, to_len ); \ to 117 opal/datatype/opal_copy_functions.c MEMCPY( to, from, count * local_TYPE_size ); \ to 120 opal/datatype/opal_copy_functions.c MEMCPY( to, from, local_TYPE_size ); \ to 121 opal/datatype/opal_copy_functions.c to += to_extent; \ to 52 opal/datatype/opal_copy_functions_heterogeneous.c uint8_t *to = (uint8_t*) to_p; to 57 opal/datatype/opal_copy_functions_heterogeneous.c to[back_i] = from[i]; to 61 opal/datatype/opal_copy_functions_heterogeneous.c to += size; to 65 opal/datatype/opal_copy_functions_heterogeneous.c to[back_i] = from[i]; to 94 opal/datatype/opal_copy_functions_heterogeneous.c long double*to = (long double *) to_p; to 98 opal/datatype/opal_copy_functions_heterogeneous.c for (i=0; i<count; i++, to++) { to 100 opal/datatype/opal_copy_functions_heterogeneous.c struct bit128 * b = (struct bit128 *)to; to 106 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, &ld, sizeof(long double)); to 111 opal/datatype/opal_copy_functions_heterogeneous.c for (i=0; i<count; i++, to++) { to 113 opal/datatype/opal_copy_functions_heterogeneous.c struct bit80 * b = (struct bit80 *)to; to 120 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, &ld, sizeof(long double)); to 145 opal/datatype/opal_copy_functions_heterogeneous.c char* to, size_t to_length, ptrdiff_t to_extent, \ to 152 opal/datatype/opal_copy_functions_heterogeneous.c to, to_length, to_extent); \ to 157 opal/datatype/opal_copy_functions_heterogeneous.c opal_dt_swap_bytes(to, from, sizeof(TYPE), count); \ to 159 opal/datatype/opal_copy_functions_heterogeneous.c opal_dt_swap_long_double(to, from, sizeof(TYPE), count, pConvertor->remoteArch);\ to 163 opal/datatype/opal_copy_functions_heterogeneous.c opal_dt_swap_bytes(to, from, sizeof(TYPE), 1); \ to 165 opal/datatype/opal_copy_functions_heterogeneous.c opal_dt_swap_long_double(to, from, sizeof(TYPE), 1, pConvertor->remoteArch);\ to 167 opal/datatype/opal_copy_functions_heterogeneous.c to += to_extent; \ to 173 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, from, count * sizeof(TYPE) ); \ to 177 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, from, sizeof(TYPE) ); \ to 178 opal/datatype/opal_copy_functions_heterogeneous.c to += to_extent; \ to 193 opal/datatype/opal_copy_functions_heterogeneous.c char* to, size_t to_length, ptrdiff_t to_extent, \ to 200 opal/datatype/opal_copy_functions_heterogeneous.c to, to_length, to_extent); \ to 205 opal/datatype/opal_copy_functions_heterogeneous.c opal_dt_swap_bytes(to, from, sizeof(TYPE), 2 * count); \ to 207 opal/datatype/opal_copy_functions_heterogeneous.c opal_dt_swap_long_double(to, from, sizeof(TYPE), 2*count, pConvertor->remoteArch);\ to 211 opal/datatype/opal_copy_functions_heterogeneous.c opal_dt_swap_bytes(to, from, sizeof(TYPE), 2); \ to 213 opal/datatype/opal_copy_functions_heterogeneous.c opal_dt_swap_long_double(to, from, sizeof(TYPE), 2, pConvertor->remoteArch);\ to 215 opal/datatype/opal_copy_functions_heterogeneous.c to += to_extent; \ to 221 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, from, count * sizeof(TYPE) ); \ to 225 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, from, sizeof(TYPE) ); \ to 226 opal/datatype/opal_copy_functions_heterogeneous.c to += to_extent; \ to 238 opal/datatype/opal_copy_functions_heterogeneous.c char* to, size_t to_length, ptrdiff_t to_extent, \ to 246 opal/datatype/opal_copy_functions_heterogeneous.c to, to_length, to_extent); \ to 254 opal/datatype/opal_copy_functions_heterogeneous.c to_1 = (TYPE1*) to; from_1 = (TYPE1*) from; \ to 258 opal/datatype/opal_copy_functions_heterogeneous.c to += to_extent; \ to 264 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, from, count * (sizeof(TYPE1) + sizeof(TYPE2)) ); \ to 268 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, from, sizeof(TYPE1) + sizeof(TYPE2) ); \ to 269 opal/datatype/opal_copy_functions_heterogeneous.c to += to_extent; \ to 281 opal/datatype/opal_copy_functions_heterogeneous.c char* to, size_t to_len, ptrdiff_t to_extent) to 291 opal/datatype/opal_copy_functions_heterogeneous.c "char", *count, from, from_len, to, to_len ); to 294 opal/datatype/opal_copy_functions_heterogeneous.c "char", *count, from, from_len, to, to_len ); to 300 opal/datatype/opal_copy_functions_heterogeneous.c bool *to_real = (bool*) to; \ to 302 opal/datatype/opal_copy_functions_heterogeneous.c to += to_extent; \ to 308 opal/datatype/opal_copy_functions_heterogeneous.c char* to, size_t to_length, ptrdiff_t to_extent, to 329 opal/datatype/opal_copy_functions_heterogeneous.c to, to_length, to_extent); to 346 opal/datatype/opal_copy_functions_heterogeneous.c MEMCPY( to, from, count * sizeof(bool) ); to 39 opal/mca/btl/usnic/test/usnic_btl_run_tests.c char *to; to 73 opal/mca/btl/usnic/test/usnic_btl_run_tests.c to = path; to 74 opal/mca/btl/usnic/test/usnic_btl_run_tests.c to = stpcpy(to, dirname(libmpi_path)); to 75 opal/mca/btl/usnic/test/usnic_btl_run_tests.c to = stpcpy(to, "/openmpi/"); to 76 opal/mca/btl/usnic/test/usnic_btl_run_tests.c to = stpcpy(to, MCA_BTL_USNIC_SO); to 111 opal/mca/crs/base/base.h opal_crs_base_ckpt_options_t *to); to 282 opal/mca/crs/base/crs_base_fns.c opal_crs_base_ckpt_options_t *to) to 290 opal/mca/crs/base/crs_base_fns.c if( NULL == to ) { to 296 opal/mca/crs/base/crs_base_fns.c to->term = from->term; to 297 opal/mca/crs/base/crs_base_fns.c to->stop = from->stop; to 299 opal/mca/crs/base/crs_base_fns.c to->inc_prep_only = from->inc_prep_only; to 300 opal/mca/crs/base/crs_base_fns.c to->inc_recover_only = from->inc_recover_only; to 303 opal/mca/crs/base/crs_base_fns.c to->attach_debugger = from->attach_debugger; to 304 opal/mca/crs/base/crs_base_fns.c to->detach_debugger = from->detach_debugger; to 41 opal/mca/shmem/base/base.h opal_shmem_ds_t *to); to 44 opal/mca/shmem/base/shmem_base_wrappers.c opal_shmem_ds_t *to) to 50 opal/mca/shmem/base/shmem_base_wrappers.c return opal_shmem_base_module->ds_copy(from, to); to 77 opal/mca/shmem/mmap/shmem_mmap_module.c opal_shmem_ds_t *to); to 211 opal/mca/shmem/mmap/shmem_mmap_module.c opal_shmem_ds_t *to) to 213 opal/mca/shmem/mmap/shmem_mmap_module.c memcpy(to, from, sizeof(opal_shmem_ds_t)); to 225 opal/mca/shmem/mmap/shmem_mmap_module.c from->flags, to->seg_id, (unsigned long)to->seg_size, to->seg_name, to 226 opal/mca/shmem/mmap/shmem_mmap_module.c to->flags) to 75 opal/mca/shmem/posix/shmem_posix_module.c opal_shmem_ds_t *to); to 148 opal/mca/shmem/posix/shmem_posix_module.c opal_shmem_ds_t *to) to 150 opal/mca/shmem/posix/shmem_posix_module.c memcpy(to, from, sizeof(opal_shmem_ds_t)); to 162 opal/mca/shmem/posix/shmem_posix_module.c from->flags, to->seg_id, (unsigned long)to->seg_size, to->seg_name, to 163 opal/mca/shmem/posix/shmem_posix_module.c to->flags) to 100 opal/mca/shmem/shmem.h opal_shmem_ds_t *to); to 77 opal/mca/shmem/sysv/shmem_sysv_module.c opal_shmem_ds_t *to); to 150 opal/mca/shmem/sysv/shmem_sysv_module.c opal_shmem_ds_t *to) to 152 opal/mca/shmem/sysv/shmem_sysv_module.c memcpy(to, from, sizeof(opal_shmem_ds_t)); to 164 opal/mca/shmem/sysv/shmem_sysv_module.c from->flags, to->seg_id, (unsigned long)to->seg_size, to->seg_name, to 165 opal/mca/shmem/sysv/shmem_sysv_module.c to->flags) to 327 opal/util/bipartite_graph.c int to, to 338 opal/util/bipartite_graph.c if (to < 0 || to >= NUM_VERTICES(g)) { to 351 opal/util/bipartite_graph.c if (e->target == to) { to 364 opal/util/bipartite_graph.c e->target = to; to 373 opal/util/bipartite_graph.c v_to = V_ID_TO_PTR(g, to); to 111 opal/util/bipartite_graph.h int to, to 365 test/monitoring/check_monitoring.c int size, i, n, to, from, world_rank; to 462 test/monitoring/check_monitoring.c to = (world_rank+1+i)%size; to 464 test/monitoring/check_monitoring.c if(world_rank < to){ to 465 test/monitoring/check_monitoring.c MPI_Send(s1, 13, MPI_CHAR, to, world_rank, MPI_COMM_WORLD); to 469 test/monitoring/check_monitoring.c MPI_Send(s1, 13, MPI_CHAR, to, world_rank, MPI_COMM_WORLD); to 20 test/monitoring/example_reduce_count.c int rank, size, n, to, from, tagno, MPIT_result, provided, count; to 31 test/monitoring/example_reduce_count.c to = (rank + 1) % size; to 73 test/monitoring/example_reduce_count.c MPI_Isend(&n,1,MPI_INT,to,tagno,MPI_COMM_WORLD,&request); to 79 test/monitoring/example_reduce_count.c MPI_Isend(&n, 1, MPI_INT, to, tagno, MPI_COMM_WORLD, &request); to 78 test/monitoring/monitoring_test.c int rank, size, n, to, from, tagno, MPIT_result, provided, count, world_rank; to 100 test/monitoring/monitoring_test.c to = (rank + 1) % size; to 141 test/monitoring/monitoring_test.c MPI_Send(&n,1,MPI_INT,to,tagno,MPI_COMM_WORLD); to 146 test/monitoring/monitoring_test.c MPI_Send(&n, 1, MPI_INT, to, tagno, MPI_COMM_WORLD); to 202 test/monitoring/monitoring_test.c to = (rank + 1) % size; to 207 test/monitoring/monitoring_test.c MPI_Send(&n, 1, MPI_INT, to, tagno, newcomm); to 212 test/monitoring/monitoring_test.c MPI_Send(&n, 1, MPI_INT, to, tagno, newcomm); to 276 test/monitoring/monitoring_test.c to = (rank + 1) % size; to 287 test/monitoring/monitoring_test.c MPI_Put(rs_buff, 10240, MPI_INT, to, 0, 10240, MPI_INT, win); to 315 test/monitoring/monitoring_test.c MPI_Put(rs_buff, 10240, MPI_INT, to, 0, 10240, MPI_INT, win); to 325 test/monitoring/monitoring_test.c MPI_Win_lock(MPI_LOCK_EXCLUSIVE, to, 0, win); to 326 test/monitoring/monitoring_test.c MPI_Put(rs_buff, 10240, MPI_INT, to, 0, 10240, MPI_INT, win); to 327 test/monitoring/monitoring_test.c MPI_Win_unlock(to, win); to 37 test/monitoring/test_overhead.c static int to = -1; to 92 test/monitoring/test_overhead.c MPI_Recv(NULL, 0, MPI_BYTE, to, 100, MPI_COMM_WORLD, MPI_STATUS_IGNORE); to 94 test/monitoring/test_overhead.c MPI_Recv(NULL, 0, MPI_BYTE, to, 100, MPI_COMM_WORLD, MPI_STATUS_IGNORE); to 100 test/monitoring/test_overhead.c MPI_Send(sbuf, size, MPI_BYTE, to, tagno, MPI_COMM_WORLD); to 123 test/monitoring/test_overhead.c MPI_Send(sbuf, size, MPI_BYTE, to, tagno, MPI_COMM_WORLD); to 124 test/monitoring/test_overhead.c MPI_Recv(rbuf, size, MPI_BYTE, to, tagno, MPI_COMM_WORLD, MPI_STATUS_IGNORE); to 127 test/monitoring/test_overhead.c MPI_Recv(rbuf, size, MPI_BYTE, to, tagno, MPI_COMM_WORLD, MPI_STATUS_IGNORE); to 128 test/monitoring/test_overhead.c MPI_Send(sbuf, size, MPI_BYTE, to, tagno, MPI_COMM_WORLD); to 161 test/monitoring/test_overhead.c MPI_Win_lock(MPI_LOCK_EXCLUSIVE, to, 0, win); to 165 test/monitoring/test_overhead.c MPI_Put(sbuf, size, MPI_BYTE, to, 0, size, MPI_BYTE, win); to 166 test/monitoring/test_overhead.c MPI_Win_unlock(to, win); to 175 test/monitoring/test_overhead.c MPI_Win_lock(MPI_LOCK_SHARED, to, 0, win); to 179 test/monitoring/test_overhead.c MPI_Get(rbuf, size, MPI_BYTE, to, 0, size, MPI_BYTE, win); to 180 test/monitoring/test_overhead.c MPI_Win_unlock(to, win); to 218 test/monitoring/test_overhead.c to = (rank_world + 1) % size_world; to 66 test/monitoring/test_pvar_access.c int rank, size, n, to, from, tagno, MPIT_result, provided, count; to 82 test/monitoring/test_pvar_access.c to = (rank + 1) % size; to 150 test/monitoring/test_pvar_access.c MPI_Isend(&n,1,MPI_INT,to,tagno,MPI_COMM_WORLD,&request); to 156 test/monitoring/test_pvar_access.c MPI_Isend(&n, 1, MPI_INT, to, tagno, MPI_COMM_WORLD, &request); to 240 test/monitoring/test_pvar_access.c to = (rank + 1) % size; to 245 test/monitoring/test_pvar_access.c MPI_Send(&n, 1, MPI_INT, to, tagno, newcomm); to 250 test/monitoring/test_pvar_access.c MPI_Send(&n, 1, MPI_INT, to, tagno, newcomm);