root/ompi/patterns/comm/bcast.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ompi_comm_bcast_pml

   1 /*
   2  * Copyright (c) 2009-2012 Mellanox Technologies.  All rights reserved.
   3  * Copyright (c) 2009-2012 Oak Ridge National Laboratory.  All rights reserved.
   4  * Copyright (c) 2012      Los Alamos National Security, LLC.
   5  *                         All rights reserved.
   6  * Copyright (c) 2014      Research Organization for Information Science
   7  *                         and Technology (RIST). All rights reserved.
   8  * Copyright (c) 2017      IBM Corporation. All rights reserved.
   9  * $COPYRIGHT$
  10  *
  11  * Additional copyrights may follow
  12  *
  13  * $HEADER$
  14  */
  15 /** @file */
  16 
  17 #include "ompi_config.h"
  18 
  19 #include "ompi/constants.h"
  20 #include "ompi/op/op.h"
  21 #include "ompi/datatype/ompi_datatype.h"
  22 #include "ompi/communicator/communicator.h"
  23 #include "opal/include/opal/sys/atomic.h"
  24 #include "ompi/mca/pml/pml.h"
  25 #include "ompi/patterns/net/netpatterns.h"
  26 #include "coll_ops.h"
  27 
  28 /**
  29  * Bcast - subgroup in communicator
  30  *  This is a very simple algorithm - binary tree, transmitting the full
  31  *  message at each step.
  32  */
  33 OMPI_DECLSPEC int ompi_comm_bcast_pml(void *buffer, int root, int count,
  34         ompi_datatype_t *dtype, int my_rank_in_group,
  35         int n_peers, int *ranks_in_comm,ompi_communicator_t *comm)
  36 {
  37     /* local variables */
  38     int rc=OMPI_SUCCESS,msg_cnt,i;
  39     ompi_request_t *requests[2];
  40     int node_rank, peer_rank;
  41     netpatterns_tree_node_t node_data;
  42 
  43     /*
  44      * shift rank to root==0 tree
  45      */
  46     node_rank=(my_rank_in_group-root+n_peers)%n_peers;
  47 
  48     /*
  49      * compute my communication pattern - binary tree
  50      */
  51     rc=ompi_netpatterns_setup_narray_tree(2, node_rank, n_peers,
  52             &node_data);
  53     if( OMPI_SUCCESS != rc ) {
  54         goto Error;
  55     }
  56 
  57     /* 1 process special case */
  58     if(1 == n_peers) {
  59         return OMPI_SUCCESS;
  60     }
  61 
  62     /* if I have parents - wait on the data to arrive */
  63     if(node_data.n_parents) {
  64         /* I will have only 1 parent */
  65         peer_rank=node_data.parent_rank;
  66         peer_rank=(peer_rank+root)%n_peers;
  67         /* translate back to actual rank */
  68         rc=MCA_PML_CALL(recv(buffer, count,dtype,peer_rank,
  69                     -OMPI_COMMON_TAG_BCAST, comm, MPI_STATUSES_IGNORE));
  70         if( 0 > rc ) {
  71             goto Error;
  72         }
  73     }
  74 
  75     /* send the data to my children */
  76     msg_cnt=0;
  77     for(i=0 ; i < node_data.n_children ; i++ ) {
  78         peer_rank=node_data.children_ranks[i];
  79         peer_rank=(peer_rank+root)%n_peers;
  80         rc=MCA_PML_CALL(isend(buffer,
  81                     count,dtype,peer_rank,
  82                     -OMPI_COMMON_TAG_BCAST,MCA_PML_BASE_SEND_STANDARD,
  83                     comm,&(requests[msg_cnt])));
  84         if( 0 > rc ) {
  85             goto Error;
  86         }
  87         msg_cnt++;
  88     }
  89     /* wait for send completion */
  90     if(msg_cnt) {
  91         /* wait on send and receive completion */
  92         ompi_request_wait_all(msg_cnt,requests,MPI_STATUSES_IGNORE);
  93     }
  94 
  95     if (node_data.children_ranks) {
  96         free(node_data.children_ranks);
  97     }
  98 
  99     /* return */
 100     return OMPI_SUCCESS;
 101 
 102 Error:
 103     return rc;
 104 }

/* [<][>][^][v][top][bottom][index][help] */