np                 23 examples/connectivity_c.c     int         np;        /* number of processes in job */
np                 32 examples/connectivity_c.c     MPI_Comm_size(MPI_COMM_WORLD, &np);
np                 44 examples/connectivity_c.c     for (i=0; i<np; i++) {
np                 47 examples/connectivity_c.c             for(j=i+1; j<np; j++) {
np                 63 examples/connectivity_c.c         printf("Connectivity test on %d processes PASSED.\n", np);
np                 30 examples/dtrace/mpicommleak.c     int		np;	                      /* number of processes in job */
np                 36 examples/dtrace/mpicommleak.c     MPI_Comm_size(MPI_COMM_WORLD, &np);
np                237 ompi/debuggers/ompi_msgq_dll.c     int i, np, is_dense;
np                242 ompi/debuggers/ompi_msgq_dll.c     np = ompi_fetch_int( proc,
np                245 ompi/debuggers/ompi_msgq_dll.c     if( np < 0 ) {
np                246 ompi/debuggers/ompi_msgq_dll.c         DEBUG(VERBOSE_COMM, ("Get a size for the communicator = %d\n", np));
np                268 ompi/debuggers/ompi_msgq_dll.c     tr = (int *)mqs_malloc (np*sizeof(int));
np                269 ompi/debuggers/ompi_msgq_dll.c     trbuffer = (char *)mqs_malloc (np*sizeof(mqs_taddr_t));
np                273 ompi/debuggers/ompi_msgq_dll.c                           (void*)group, np) );
np                279 ompi/debuggers/ompi_msgq_dll.c     if( (0 != np) &&
np                280 ompi/debuggers/ompi_msgq_dll.c         (mqs_ok != mqs_fetch_data(proc, tablep, np * p_info->sizes.pointer_size,
np                301 ompi/debuggers/ompi_msgq_dll.c          extra->world_proc_array = mqs_malloc( np * sizeof(mqs_taddr_t) );
np                302 ompi/debuggers/ompi_msgq_dll.c          for( i = 0; i < np; i++ ) {
np                308 ompi/debuggers/ompi_msgq_dll.c          extra->world_proc_array_entries = np;
np                312 ompi/debuggers/ompi_msgq_dll.c          for( i = 0; i < np; i++ ) {
np                331 ompi/debuggers/ompi_msgq_dll.c     group->entries = np;
np                347 ompi/mca/coll/portals4/coll_portals4.h void get_pipeline(ptl_rank_t rank, ptl_rank_t np, ptl_rank_t root,
np                352 ompi/mca/coll/portals4/coll_portals4.h             ((rank == PTL_FIRST_RANK) ? (np - 1) : (rank - 1));
np                353 ompi/mca/coll/portals4/coll_portals4.h     *next = (rank == (np - 1)) ?
np                365 ompi/mca/coll/portals4/coll_portals4.h         ptl_rank_t rank, ptl_rank_t np, ptl_rank_t root,
np                390 ompi/mca/coll/portals4/coll_portals4.h     if ((np <= 0)    ||
np                392 ompi/mca/coll/portals4/coll_portals4.h         (rank >= np) ||
np                394 ompi/mca/coll/portals4/coll_portals4.h         (root >= np)) {
np                398 ompi/mca/coll/portals4/coll_portals4.h     my = (np + rank - root) % np;
np                403 ompi/mca/coll/portals4/coll_portals4.h     last = np - 1;
np                421 ompi/mca/coll/portals4/coll_portals4.h     *father = (up == PTL_INVALID_RANK) ? PTL_INVALID_RANK : ((up + root) % np);
np                426 ompi/mca/coll/portals4/coll_portals4.h                 first + cnt * dist) % np;
np                 48 ompi/mca/io/romio321/romio/test/hindexed.c     int          i, j, err, rank, np, num_io;
np                 66 ompi/mca/io/romio321/romio/test/hindexed.c     MPI_Comm_size(MPI_COMM_WORLD, &np);
np                 68 ompi/mca/io/romio321/romio/test/hindexed.c     if (np != 4) {
np                 35 ompi/mca/io/romio321/romio/test/types_with_zeros.c     int i, rank, np, buflen, num, err, nr_errors=0;
np                 44 ompi/mca/io/romio321/romio/test/types_with_zeros.c     MPI_Comm_size(MPI_COMM_WORLD, &np);
np                126 ompi/mca/io/romio321/romio/test/types_with_zeros.c     int nr_errors, rank, np;
np                130 ompi/mca/io/romio321/romio/test/types_with_zeros.c     MPI_Comm_size(MPI_COMM_WORLD, &np);
np                132 ompi/mca/io/romio321/romio/test/types_with_zeros.c     if (np != 2) {
np                193 opal/mca/pmix/pmix4x/pmix/src/common/pmix_iof.h #define PMIX_IOF_READ_EVENT(rv, p, np, d, nd, fid, cbfunc, actv)        \
np                201 opal/mca/pmix/pmix4x/pmix/src/common/pmix_iof.h         (rev)->ntargets = (np);                                         \
np                203 opal/mca/pmix/pmix4x/pmix/src/common/pmix_iof.h         memcpy((rev)->targets, (p), (np) * sizeof(pmix_proc_t));        \
np                 75 opal/mca/pmix/pmix4x/pmix/src/mca/pnet/base/base.h     size_t np;
np                688 opal/mca/pmix/pmix4x/pmix/src/mca/pnet/base/pnet_base_fns.c         lp->np = pmix_argv_count(ranks);
np                689 opal/mca/pmix/pmix4x/pmix/src/mca/pnet/base/pnet_base_fns.c         lp->ranks = (pmix_rank_t*)malloc(lp->np * sizeof(pmix_rank_t));
np                690 opal/mca/pmix/pmix4x/pmix/src/mca/pnet/base/pnet_base_fns.c         for (m=0; m < lp->np; m++) {
np                110 opal/mca/pmix/pmix4x/pmix/src/mca/pnet/base/pnet_base_frame.c     p->np = 0;
np                367 opal/mca/pmix/pmix4x/pmix/src/mca/pnet/opa/pnet_opa.c                     for (n=0; n < lp->np; n++) {
np                256 opal/mca/pmix/pmix4x/pmix/src/mca/pnet/test/pnet_test.c                 pmix_output(0, "\tNODE %s %d RANKS", nd->name, (int)lp->np);
np                531 opal/mca/pmix/pmix4x/pmix/src/mca/preg/native/preg_native.c     size_t ninfo, np=0, n, j;
np                576 opal/mca/pmix/pmix4x/pmix/src/mca/preg/native/preg_native.c             np = pmix_argv_count(ptr);
np                577 opal/mca/pmix/pmix4x/pmix/src/mca/preg/native/preg_native.c             PMIX_PROC_CREATE(p, np);
np                583 opal/mca/pmix/pmix4x/pmix/src/mca/preg/native/preg_native.c             for (j=0; j < np; j++) {
np                603 opal/mca/pmix/pmix4x/pmix/src/mca/preg/native/preg_native.c     *nprocs = np;
np                 54 opal/mca/pmix/pmix4x/pmix/test/test_common.c                 params->np = strdup(argv[i]);
np                105 opal/mca/pmix/pmix4x/pmix/test/test_common.h     char *np;
np                155 opal/mca/pmix/pmix4x/pmix/test/test_common.h     params.np = NULL;                 \
np                173 opal/mca/pmix/pmix4x/pmix/test/test_common.h     if (NULL != params.np) {          \
np                174 opal/mca/pmix/pmix4x/pmix/test/test_common.h         free(params.np);              \
np                 25 opal/mca/pmix/pmix4x/pmix/test/utils.c     if (NULL == params->np) {
np                 28 opal/mca/pmix/pmix4x/pmix/test/utils.c         pmix_argv_append_nosize(argv, params->np);
np                290 orte/mca/iof/base/iof_base_setup.c     int np, numdigs, fdout, i;
np                298 orte/mca/iof/base/iof_base_setup.c         np = jobdat->num_procs / 10;
np                301 orte/mca/iof/base/iof_base_setup.c         while (np > 0) {
np                303 orte/mca/iof/base/iof_base_setup.c             np = np / 10;
np                312 orte/mca/rmaps/base/rmaps_base_ranking.c     orte_vpid_t vpid, np;
np                406 orte/mca/rmaps/base/rmaps_base_ranking.c                 np = 0;
np                407 orte/mca/rmaps/base/rmaps_base_ranking.c                 for (j=0; np < node->num_procs && j < node->procs->size && cnt < app->num_procs; j++) {
np                411 orte/mca/rmaps/base/rmaps_base_ranking.c                     np++;
np                 69 orte/test/mpi/no-disconnect.c     int me, np, jump, buf = -1;
np                 72 orte/test/mpi/no-disconnect.c     MPI_Comm_size(comm,&np);
np                 75 orte/test/mpi/no-disconnect.c     for ( jump = 1; jump < np; jump <<= 1 ) {
np                 79 orte/test/mpi/no-disconnect.c         } else if ( me + jump < np ) {
np                 90 orte/test/mpi/no-disconnect.c         if ( me + jump < np ) {
np                143 orte/test/mpi/no-disconnect.c         int me, np;
np                145 orte/test/mpi/no-disconnect.c         MPI_Comm_size(MPI_COMM_WORLD,&np);
np                148 orte/test/mpi/no-disconnect.c         if ( np > 4 ) {
np                152 orte/test/mpi/no-disconnect.c         if ( np > 2 ) {
np                 65 orte/test/mpi/parallel_r64.c      if ( npes == np )
np                 68 orte/test/mpi/parallel_r64.c         printf(" total number of PE's: %3d\n",np);
np                 77 orte/test/mpi/parallel_r64.c         printf(" ERROR: total number of PE's must be %d\n",np);
np                 65 orte/test/mpi/parallel_r8.c      if ( npes == np )
np                 68 orte/test/mpi/parallel_r8.c         printf(" total number of PE's: %3d\n",np);
np                 77 orte/test/mpi/parallel_r8.c         printf(" ERROR: total number of PE's must be %d\n",np);
np                 65 orte/test/mpi/parallel_w64.c      if ( npes == np )
np                 68 orte/test/mpi/parallel_w64.c         printf(" total number of PE's: %3d\n",np);
np                 77 orte/test/mpi/parallel_w64.c         printf(" ERROR: total number of PE's must be %d\n",np);
np                 64 orte/test/mpi/parallel_w8.c         if ( npes == np )
np                 67 orte/test/mpi/parallel_w8.c             printf(" total number of PE's: %3d\n",np);
np                 76 orte/test/mpi/parallel_w8.c             printf(" ERROR: total number of PE's must be %d\n",np);
np                128 oshmem/mca/spml/ikrit/spml_ikrit.h     int np;
np                239 oshmem/mca/spml/ikrit/spml_ikrit_component.c                                        "[integer] Minimal allowed job's NP to activate ikrit", &mca_spml_ikrit.np);
np                247 oshmem/mca/spml/ikrit/spml_ikrit_component.c     if (oshmem_num_procs() < mca_spml_ikrit.np) {
np                250 oshmem/mca/spml/ikrit/spml_ikrit_component.c                      oshmem_num_procs(), mca_spml_ikrit.np);