root/opal/mca/btl/smcuda/btl_smcuda.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mpool_calloc
  2. setup_mpool_base_resources
  3. sm_segment_attach
  4. smcuda_btl_first_time_init
  5. create_sm_endpoint
  6. mca_btl_smcuda_add_procs
  7. mca_btl_smcuda_del_procs
  8. mca_btl_smcuda_finalize
  9. mca_btl_smcuda_register_error_cb
  10. mca_btl_smcuda_alloc
  11. mca_btl_smcuda_free
  12. mca_btl_smcuda_prepare_src
  13. OPAL_LIKELY
  14. mca_btl_smcuda_sendi
  15. mca_btl_smcuda_send
  16. mca_btl_smcuda_register_mem
  17. mca_btl_smcuda_deregister_mem
  18. mca_btl_smcuda_get_cuda
  19. mca_btl_smcuda_send_cuda_ipc_request
  20. mca_btl_smcuda_dump
  21. mca_btl_smcuda_ft_event
  22. mca_btl_smcuda_ft_event

   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2004-2011 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2014 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2007 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2005 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2006-2007 Voltaire. All rights reserved.
  14  * Copyright (c) 2009-2012 Cisco Systems, Inc.  All rights reserved.
  15  * Copyright (c) 2010-2017 Los Alamos National Security, LLC. All rights
  16  *                         reserved.
  17  * Copyright (c) 2012-2015 NVIDIA Corporation.  All rights reserved.
  18  * Copyright (c) 2012      Oracle and/or its affiliates.  All rights reserved.
  19  * Copyright (c) 2014-2017 Research Organization for Information Science
  20  *                         and Technology (RIST). All rights reserved.
  21  * Copyright (c) 2015-2016 Intel, Inc.  All rights reserved.
  22  * $COPYRIGHT$
  23  *
  24  * Additional copyrights may follow
  25  *
  26  * $HEADER$
  27  */
  28 
  29 #include "opal_config.h"
  30 
  31 #include <sys/types.h>
  32 #include <sys/stat.h>
  33 #ifdef HAVE_FCNTL_H
  34 #include <fcntl.h>
  35 #endif  /* HAVE_FCNTL_H */
  36 #include <errno.h>
  37 #ifdef HAVE_SYS_MMAN_H
  38 #include <sys/mman.h>
  39 #endif  /* HAVE_SYS_MMAN_H */
  40 
  41 #ifdef OPAL_BTL_SM_CMA_NEED_SYSCALL_DEFS
  42 #include "opal/sys/cma.h"
  43 #endif /* OPAL_BTL_SM_CMA_NEED_SYSCALL_DEFS */
  44 
  45 #include "opal/sys/atomic.h"
  46 #include "opal/class/opal_bitmap.h"
  47 #include "opal/util/output.h"
  48 #include "opal/util/show_help.h"
  49 #include "opal/util/printf.h"
  50 #include "opal/mca/hwloc/base/base.h"
  51 #include "opal/mca/pmix/base/base.h"
  52 #include "opal/mca/shmem/base/base.h"
  53 #include "opal/mca/shmem/shmem.h"
  54 #include "opal/datatype/opal_convertor.h"
  55 #include "opal/mca/btl/btl.h"
  56 
  57 #include "opal/mca/common/sm/common_sm_mpool.h"
  58 
  59 #if OPAL_CUDA_SUPPORT
  60 #include "opal/mca/common/cuda/common_cuda.h"
  61 #endif /* OPAL_CUDA_SUPPORT */
  62 #include "opal/mca/mpool/base/base.h"
  63 #include "opal/mca/rcache/base/base.h"
  64 
  65 #if OPAL_ENABLE_FT_CR    == 1
  66 #include "opal/mca/crs/base/base.h"
  67 #include "opal/util/basename.h"
  68 #include "orte/mca/sstore/sstore.h"
  69 #include "opal/runtime/opal_cr.h"
  70 #endif
  71 
  72 #include "btl_smcuda.h"
  73 #include "btl_smcuda_endpoint.h"
  74 #include "btl_smcuda_frag.h"
  75 #include "btl_smcuda_fifo.h"
  76 
  77 #if OPAL_CUDA_SUPPORT
  78 static struct mca_btl_base_registration_handle_t *mca_btl_smcuda_register_mem (
  79     struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t *endpoint, void *base,
  80     size_t size, uint32_t flags);
  81 
  82 static int mca_btl_smcuda_deregister_mem (struct mca_btl_base_module_t* btl,
  83                                           struct mca_btl_base_registration_handle_t *handle);
  84 #endif
  85 
  86 mca_btl_smcuda_t mca_btl_smcuda = {
  87     .super = {
  88         .btl_component = &mca_btl_smcuda_component.super,
  89         .btl_add_procs = mca_btl_smcuda_add_procs,
  90         .btl_del_procs = mca_btl_smcuda_del_procs,
  91         .btl_finalize = mca_btl_smcuda_finalize,
  92         .btl_alloc = mca_btl_smcuda_alloc,
  93         .btl_free = mca_btl_smcuda_free,
  94         .btl_prepare_src = mca_btl_smcuda_prepare_src,
  95 #if OPAL_CUDA_SUPPORT
  96         .btl_register_mem = mca_btl_smcuda_register_mem,
  97         .btl_deregister_mem = mca_btl_smcuda_deregister_mem,
  98 #endif /* OPAL_CUDA_SUPPORT */
  99         .btl_send = mca_btl_smcuda_send,
 100         .btl_sendi = mca_btl_smcuda_sendi,
 101         .btl_dump = mca_btl_smcuda_dump,
 102         .btl_register_error = mca_btl_smcuda_register_error_cb,
 103         .btl_ft_event = mca_btl_smcuda_ft_event
 104     }
 105 };
 106 
 107 #if OPAL_CUDA_SUPPORT
 108 static void mca_btl_smcuda_send_cuda_ipc_request(struct mca_btl_base_module_t* btl,
 109                                                  struct mca_btl_base_endpoint_t* endpoint);
 110 #endif /* OPAL_CUDA_SUPPORT */
 111 /*
 112  * calculate offset of an address from the beginning of a shared memory segment
 113  */
 114 #define ADDR2OFFSET(ADDR, BASE) ((char*)(ADDR) - (char*)(BASE))
 115 
 116 /*
 117  * calculate an absolute address in a local address space given an offset and
 118  * a base address of a shared memory segment
 119  */
 120 #define OFFSET2ADDR(OFFSET, BASE) ((ptrdiff_t)(OFFSET) + (char*)(BASE))
 121 
 122 static void *mpool_calloc(size_t nmemb, size_t size)
 123 {
 124     void *buf;
 125     size_t bsize = nmemb * size;
 126     mca_mpool_base_module_t *mpool = mca_btl_smcuda_component.sm_mpool;
 127 
 128     buf = mpool->mpool_alloc(mpool, bsize, opal_cache_line_size, 0);
 129 
 130     if (NULL == buf)
 131         return NULL;
 132 
 133     memset(buf, 0, bsize);
 134     return buf;
 135 }
 136 
 137 static int
 138 setup_mpool_base_resources(mca_btl_smcuda_component_t *comp_ptr,
 139                            mca_common_sm_mpool_resources_t *out_res)
 140 {
 141     int rc = OPAL_SUCCESS;
 142     int fd = -1;
 143     ssize_t bread = 0;
 144 
 145     if (-1 == (fd = open(comp_ptr->sm_mpool_rndv_file_name, O_RDONLY))) {
 146         int err = errno;
 147         opal_show_help("help-mpi-btl-smcuda.txt", "sys call fail", true,
 148                        "open(2)", strerror(err), err);
 149         rc = OPAL_ERR_IN_ERRNO;
 150         goto out;
 151     }
 152     if ((ssize_t)sizeof(opal_shmem_ds_t) != (bread =
 153         read(fd, &out_res->bs_meta_buf, sizeof(opal_shmem_ds_t)))) {
 154         opal_output(0, "setup_mpool_base_resources: "
 155                     "Read inconsistency -- read: %lu, but expected: %lu!\n",
 156                     (unsigned long)bread,
 157                     (unsigned long)sizeof(opal_shmem_ds_t));
 158         rc = OPAL_ERROR;
 159         goto out;
 160     }
 161     if ((ssize_t)sizeof(out_res->size) != (bread =
 162         read(fd, &out_res->size, sizeof(size_t)))) {
 163         opal_output(0, "setup_mpool_base_resources: "
 164                     "Read inconsistency -- read: %lu, but expected: %lu!\n",
 165                     (unsigned long)bread,
 166                     (unsigned long)sizeof(opal_shmem_ds_t));
 167         rc = OPAL_ERROR;
 168         goto out;
 169     }
 170 
 171 out:
 172     if (-1 != fd) {
 173         (void)close(fd);
 174     }
 175     return rc;
 176 }
 177 
 178 static int
 179 sm_segment_attach(mca_btl_smcuda_component_t *comp_ptr)
 180 {
 181     int rc = OPAL_SUCCESS;
 182     int fd = -1;
 183     ssize_t bread = 0;
 184     opal_shmem_ds_t *tmp_shmem_ds = calloc(1, sizeof(*tmp_shmem_ds));
 185 
 186     if (NULL == tmp_shmem_ds) {
 187         return OPAL_ERR_OUT_OF_RESOURCE;
 188     }
 189     if (-1 == (fd = open(comp_ptr->sm_rndv_file_name, O_RDONLY))) {
 190         int err = errno;
 191         opal_show_help("help-mpi-btl-smcuda.txt", "sys call fail", true,
 192                        "open(2)", strerror(err), err);
 193         rc = OPAL_ERR_IN_ERRNO;
 194         goto out;
 195     }
 196     if ((ssize_t)sizeof(opal_shmem_ds_t) != (bread =
 197         read(fd, tmp_shmem_ds, sizeof(opal_shmem_ds_t)))) {
 198         opal_output(0, "sm_segment_attach: "
 199                     "Read inconsistency -- read: %lu, but expected: %lu!\n",
 200                     (unsigned long)bread,
 201                     (unsigned long)sizeof(opal_shmem_ds_t));
 202         rc = OPAL_ERROR;
 203         goto out;
 204     }
 205     if (NULL == (comp_ptr->sm_seg =
 206                  mca_common_sm_module_attach(tmp_shmem_ds,
 207                                              sizeof(mca_common_sm_seg_header_t),
 208                                              opal_cache_line_size))) {
 209         /* don't have to detach here, because module_attach cleans up after
 210          * itself on failure. */
 211         opal_output(0, "sm_segment_attach: "
 212                     "mca_common_sm_module_attach failure!\n");
 213         rc = OPAL_ERROR;
 214     }
 215 
 216 out:
 217     if (-1 != fd) {
 218         (void)close(fd);
 219     }
 220     if (tmp_shmem_ds) {
 221         free(tmp_shmem_ds);
 222     }
 223     return rc;
 224 }
 225 
 226 static int
 227 smcuda_btl_first_time_init(mca_btl_smcuda_t *smcuda_btl,
 228                        int32_t my_smp_rank,
 229                        int n)
 230 {
 231     size_t length, length_payload;
 232     sm_fifo_t *my_fifos;
 233     int my_mem_node, num_mem_nodes, i, rc;
 234     mca_common_sm_mpool_resources_t *res = NULL;
 235     mca_btl_smcuda_component_t* m = &mca_btl_smcuda_component;
 236     char *loc, *mynuma;
 237     opal_process_name_t wildcard_rank;
 238 
 239     /* Assume we don't have hwloc support and fill in dummy info */
 240     mca_btl_smcuda_component.mem_node = my_mem_node = 0;
 241     mca_btl_smcuda_component.num_mem_nodes = num_mem_nodes = 1;
 242 
 243     /* see if we were given a topology signature */
 244     wildcard_rank.jobid = OPAL_PROC_MY_NAME.jobid;
 245     wildcard_rank.vpid = OPAL_VPID_WILDCARD;
 246     OPAL_MODEX_RECV_VALUE_OPTIONAL(rc, OPAL_PMIX_TOPOLOGY_SIGNATURE,
 247                                    &wildcard_rank, &loc, OPAL_STRING);
 248     if (OPAL_SUCCESS == rc) {
 249         /* the number of NUMA nodes is right at the front */
 250         mca_btl_smcuda_component.num_mem_nodes = num_mem_nodes = strtoul(loc, NULL, 10);
 251         free(loc);
 252     } else {
 253         /* If we have hwloc support, then get accurate information */
 254         if (OPAL_SUCCESS == opal_hwloc_base_get_topology()) {
 255             i = opal_hwloc_base_get_nbobjs_by_type(opal_hwloc_topology,
 256                                                    HWLOC_OBJ_NODE, 0,
 257                                                    OPAL_HWLOC_AVAILABLE);
 258 
 259             /* JMS This tells me how many numa nodes are *available*,
 260                but it's not how many are being used *by this job*.
 261                Note that this is the value we've previously used (from
 262                the previous carto-based implementation), but it really
 263                should be improved to be how many NUMA nodes are being
 264                used *in this job*. */
 265             mca_btl_smcuda_component.num_mem_nodes = num_mem_nodes = i;
 266         }
 267     }
 268     /* see if we were given our location */
 269     OPAL_MODEX_RECV_VALUE_OPTIONAL(rc, OPAL_PMIX_LOCALITY_STRING,
 270                                    &OPAL_PROC_MY_NAME, &loc, OPAL_STRING);
 271     if (OPAL_SUCCESS == rc) {
 272         if (NULL == loc) {
 273             mca_btl_smcuda_component.mem_node = my_mem_node = -1;
 274         } else {
 275             /* get our NUMA location */
 276             mynuma = opal_hwloc_base_get_location(loc, HWLOC_OBJ_NODE, 0);
 277             if (NULL == mynuma ||
 278                 NULL != strchr(mynuma, ',') ||
 279                 NULL != strchr(mynuma, '-')) {
 280                 /* we either have no idea what NUMA we are on, or we
 281                  * are on multiple NUMA nodes */
 282                 mca_btl_smcuda_component.mem_node = my_mem_node = -1;
 283             } else {
 284                 /* we are bound to a single NUMA node */
 285                 my_mem_node = strtoul(mynuma, NULL, 10);
 286                 mca_btl_smcuda_component.mem_node = my_mem_node;
 287             }
 288             if (NULL != mynuma) {
 289                 free(mynuma);
 290             }
 291             free(loc);
 292         }
 293     } else {
 294         /* If we have hwloc support, then get accurate information */
 295         if (OPAL_SUCCESS == opal_hwloc_base_get_topology() &&
 296             num_mem_nodes > 0 && NULL != opal_process_info.cpuset) {
 297             int numa=0, w;
 298             unsigned n_bound=0;
 299             hwloc_obj_t obj;
 300 
 301             /* count the number of NUMA nodes to which we are bound */
 302             for (w=0; w < i; w++) {
 303                 if (NULL == (obj = opal_hwloc_base_get_obj_by_type(opal_hwloc_topology,
 304                                                                    HWLOC_OBJ_NODE, 0, w,
 305                                                                    OPAL_HWLOC_AVAILABLE))) {
 306                     continue;
 307                 }
 308                 /* see if we intersect with that NUMA node's cpus */
 309                 if (hwloc_bitmap_intersects(obj->cpuset, opal_hwloc_my_cpuset)) {
 310                     n_bound++;
 311                     numa = w;
 312                 }
 313             }
 314             /* if we are located on more than one NUMA, or we didn't find
 315              * a NUMA we are on, then not much we can do
 316              */
 317             if (1 == n_bound) {
 318                 mca_btl_smcuda_component.mem_node = my_mem_node = numa;
 319             } else {
 320                 mca_btl_smcuda_component.mem_node = my_mem_node = -1;
 321             }
 322         }
 323     }
 324 
 325     if (NULL == (res = calloc(1, sizeof(*res)))) {
 326         return OPAL_ERR_OUT_OF_RESOURCE;
 327     }
 328 
 329     /* lookup shared memory pool */
 330     mca_btl_smcuda_component.sm_mpools =
 331         (mca_mpool_base_module_t **)calloc(num_mem_nodes,
 332                                            sizeof(mca_mpool_base_module_t *));
 333 
 334     /* Disable memory binding, because each MPI process will claim pages in the
 335      * mpool for their local NUMA node */
 336     res->mem_node = -1;
 337     res->allocator = mca_btl_smcuda_component.allocator;
 338 
 339     if (OPAL_SUCCESS != (rc = setup_mpool_base_resources(m, res))) {
 340         free(res);
 341         return rc;
 342     }
 343     /* now that res is fully populated, create the thing */
 344     mca_btl_smcuda_component.sm_mpools[0] = common_sm_mpool_create (res);
 345     /* Sanity check to ensure that we found it */
 346     if (NULL == mca_btl_smcuda_component.sm_mpools[0]) {
 347         free(res);
 348         return OPAL_ERR_OUT_OF_RESOURCE;
 349     }
 350 
 351     mca_btl_smcuda_component.sm_mpool = mca_btl_smcuda_component.sm_mpools[0];
 352 
 353     mca_btl_smcuda_component.sm_mpool_base =
 354         mca_btl_smcuda_component.sm_mpools[0]->mpool_base(mca_btl_smcuda_component.sm_mpools[0]);
 355 
 356     /* create a list of peers */
 357     mca_btl_smcuda_component.sm_peers = (struct mca_btl_base_endpoint_t**)
 358         calloc(n, sizeof(struct mca_btl_base_endpoint_t*));
 359     if (NULL == mca_btl_smcuda_component.sm_peers) {
 360         free(res);
 361         return OPAL_ERR_OUT_OF_RESOURCE;
 362     }
 363 
 364     /* remember that node rank zero is already attached */
 365     if (0 != my_smp_rank) {
 366         if (OPAL_SUCCESS != (rc = sm_segment_attach(m))) {
 367             free(res);
 368             return rc;
 369         }
 370     }
 371 #if OPAL_CUDA_SUPPORT
 372     /* Register the entire shared memory region with the CUDA library which will
 373      * force it to be pinned.  This aproach was chosen as there is no way for this
 374      * local process to know which parts of the memory are being utilized by a
 375      * remote process. */
 376     opal_output_verbose(10, opal_btl_base_framework.framework_output,
 377                         "btl:smcuda: CUDA cuMemHostRegister address=%p, size=%d",
 378                         mca_btl_smcuda_component.sm_mpool_base, (int)res->size);
 379     mca_common_cuda_register(mca_btl_smcuda_component.sm_mpool_base, res->size, "smcuda");
 380 
 381     /* Create a local memory pool that sends handles to the remote
 382      * side.  Note that the res argument is not really used, but
 383      * needed to satisfy function signature. */
 384     mca_rcache_base_resources_t rcache_res;
 385     smcuda_btl->rcache = mca_rcache_base_module_create("gpusm", smcuda_btl, &rcache_res);
 386     if (NULL == smcuda_btl->rcache) {
 387         return OPAL_ERR_OUT_OF_RESOURCE;
 388     }
 389 #endif /* OPAL_CUDA_SUPPORT */
 390 
 391     /* it is now safe to free the mpool resources */
 392     free(res);
 393 
 394     /* check to make sure number of local procs is within the
 395      * specified limits */
 396     if(mca_btl_smcuda_component.sm_max_procs > 0 &&
 397        mca_btl_smcuda_component.num_smp_procs + n >
 398        mca_btl_smcuda_component.sm_max_procs) {
 399         return OPAL_ERROR;
 400     }
 401 
 402     mca_btl_smcuda_component.shm_fifo = (volatile sm_fifo_t **)mca_btl_smcuda_component.sm_seg->module_data_addr;
 403     mca_btl_smcuda_component.shm_bases = (char**)(mca_btl_smcuda_component.shm_fifo + n);
 404     mca_btl_smcuda_component.shm_mem_nodes = (uint16_t*)(mca_btl_smcuda_component.shm_bases + n);
 405 
 406     /* set the base of the shared memory segment */
 407     mca_btl_smcuda_component.shm_bases[mca_btl_smcuda_component.my_smp_rank] =
 408         (char*)mca_btl_smcuda_component.sm_mpool_base;
 409     mca_btl_smcuda_component.shm_mem_nodes[mca_btl_smcuda_component.my_smp_rank] =
 410         (uint16_t)my_mem_node;
 411 
 412     /* initialize the array of fifo's "owned" by this process */
 413     if(NULL == (my_fifos = (sm_fifo_t*)mpool_calloc(FIFO_MAP_NUM(n), sizeof(sm_fifo_t))))
 414         return OPAL_ERR_OUT_OF_RESOURCE;
 415 
 416     mca_btl_smcuda_component.shm_fifo[mca_btl_smcuda_component.my_smp_rank] = my_fifos;
 417 
 418     /* cache the pointer to the 2d fifo array.  These addresses
 419      * are valid in the current process space */
 420     mca_btl_smcuda_component.fifo = (sm_fifo_t**)malloc(sizeof(sm_fifo_t*) * n);
 421 
 422     if(NULL == mca_btl_smcuda_component.fifo)
 423         return OPAL_ERR_OUT_OF_RESOURCE;
 424 
 425     mca_btl_smcuda_component.fifo[mca_btl_smcuda_component.my_smp_rank] = my_fifos;
 426 
 427     mca_btl_smcuda_component.mem_nodes = (uint16_t *) malloc(sizeof(uint16_t) * n);
 428     if(NULL == mca_btl_smcuda_component.mem_nodes)
 429         return OPAL_ERR_OUT_OF_RESOURCE;
 430 
 431     /* initialize fragment descriptor free lists */
 432 
 433     /* allocation will be for the fragment descriptor and payload buffer */
 434     length = sizeof(mca_btl_smcuda_frag1_t);
 435     length_payload =
 436         sizeof(mca_btl_smcuda_hdr_t) + mca_btl_smcuda_component.eager_limit;
 437     i = opal_free_list_init (&mca_btl_smcuda_component.sm_frags_eager, length,
 438                              opal_cache_line_size, OBJ_CLASS(mca_btl_smcuda_frag1_t),
 439                              length_payload, opal_cache_line_size,
 440                              mca_btl_smcuda_component.sm_free_list_num,
 441                              mca_btl_smcuda_component.sm_free_list_max,
 442                              mca_btl_smcuda_component.sm_free_list_inc,
 443                              mca_btl_smcuda_component.sm_mpool, 0, NULL, NULL, NULL);
 444     if ( OPAL_SUCCESS != i )
 445         return i;
 446 
 447     length = sizeof(mca_btl_smcuda_frag2_t);
 448     length_payload =
 449         sizeof(mca_btl_smcuda_hdr_t) + mca_btl_smcuda_component.max_frag_size;
 450     i = opal_free_list_init (&mca_btl_smcuda_component.sm_frags_max, length,
 451                              opal_cache_line_size, OBJ_CLASS(mca_btl_smcuda_frag2_t),
 452                              length_payload, opal_cache_line_size,
 453                              mca_btl_smcuda_component.sm_free_list_num,
 454                              mca_btl_smcuda_component.sm_free_list_max,
 455                              mca_btl_smcuda_component.sm_free_list_inc,
 456                              mca_btl_smcuda_component.sm_mpool, 0, NULL, NULL, NULL);
 457     if ( OPAL_SUCCESS != i )
 458         return i;
 459 
 460     i = opal_free_list_init (&mca_btl_smcuda_component.sm_frags_user,
 461                              sizeof(mca_btl_smcuda_user_t),
 462                              opal_cache_line_size, OBJ_CLASS(mca_btl_smcuda_user_t),
 463                              sizeof(mca_btl_smcuda_hdr_t), opal_cache_line_size,
 464                              mca_btl_smcuda_component.sm_free_list_num,
 465                              mca_btl_smcuda_component.sm_free_list_max,
 466                              mca_btl_smcuda_component.sm_free_list_inc,
 467                              mca_btl_smcuda_component.sm_mpool, 0, NULL, NULL, NULL);
 468     if ( OPAL_SUCCESS != i )
 469             return i;
 470 
 471     mca_btl_smcuda_component.num_outstanding_frags = 0;
 472 
 473     mca_btl_smcuda_component.num_pending_sends = 0;
 474     i = opal_free_list_init (&mca_btl_smcuda_component.pending_send_fl,
 475                              sizeof(btl_smcuda_pending_send_item_t), 8,
 476                              OBJ_CLASS(opal_free_list_item_t),
 477                              0, 0, 16, -1, 32, NULL, 0, NULL, NULL, NULL);
 478     if ( OPAL_SUCCESS != i )
 479         return i;
 480 
 481     /* set flag indicating btl has been inited */
 482     smcuda_btl->btl_inited = true;
 483 
 484     return OPAL_SUCCESS;
 485 }
 486 
 487 static struct mca_btl_base_endpoint_t *
 488 create_sm_endpoint(int local_proc, struct opal_proc_t *proc)
 489 {
 490     struct mca_btl_base_endpoint_t *ep;
 491 
 492 #if OPAL_ENABLE_PROGRESS_THREADS == 1
 493     char path[PATH_MAX];
 494 #endif
 495 
 496     ep = (struct mca_btl_base_endpoint_t*)
 497         malloc(sizeof(struct mca_btl_base_endpoint_t));
 498     if(NULL == ep)
 499         return NULL;
 500     ep->peer_smp_rank = local_proc + mca_btl_smcuda_component.num_smp_procs;
 501 
 502     OBJ_CONSTRUCT(&ep->pending_sends, opal_list_t);
 503     OBJ_CONSTRUCT(&ep->endpoint_lock, opal_mutex_t);
 504 #if OPAL_ENABLE_PROGRESS_THREADS == 1
 505     sprintf(path, "%s"OPAL_PATH_SEP"sm_fifo.%lu",
 506             opal_process_info.job_session_dir,
 507             (unsigned long)proc->proc_name);
 508     ep->fifo_fd = open(path, O_WRONLY);
 509     if(ep->fifo_fd < 0) {
 510         opal_output(0, "mca_btl_smcuda_add_procs: open(%s) failed with errno=%d\n",
 511                     path, errno);
 512         free(ep);
 513         return NULL;
 514     }
 515 #endif
 516 #if OPAL_CUDA_SUPPORT
 517     /* Create a remote memory pool on the endpoint. The rgpusm component
 518      * does not take any resources. They are filled in internally. */
 519     ep->rcache = mca_rcache_base_module_create ("rgpusm", NULL, NULL);
 520 #endif /* OPAL_CUDA_SUPPORT */
 521     return ep;
 522 }
 523 
 524 int mca_btl_smcuda_add_procs(
 525     struct mca_btl_base_module_t* btl,
 526     size_t nprocs,
 527     struct opal_proc_t **procs,
 528     struct mca_btl_base_endpoint_t **peers,
 529     opal_bitmap_t* reachability)
 530 {
 531     int return_code = OPAL_SUCCESS;
 532     int32_t n_local_procs = 0, proc, j, my_smp_rank = -1;
 533     const opal_proc_t* my_proc; /* pointer to caller's proc structure */
 534     mca_btl_smcuda_t *smcuda_btl;
 535     bool have_connected_peer = false;
 536     char **bases;
 537     /* for easy access to the mpool_sm_module */
 538     mca_common_sm_mpool_module_t *sm_mpool_modp = NULL;
 539 
 540     /* initializion */
 541 
 542     smcuda_btl = (mca_btl_smcuda_t *)btl;
 543 
 544     /* get pointer to my proc structure */
 545     if(NULL == (my_proc = opal_proc_local_get()))
 546         return OPAL_ERR_OUT_OF_RESOURCE;
 547 
 548     /* Get unique host identifier for each process in the list,
 549      * and idetify procs that are on this host.  Add procs on this
 550      * host to shared memory reachbility list.  Also, get number
 551      * of local procs in the procs list. */
 552     for (proc = 0; proc < (int32_t)nprocs; proc++) {
 553         /* check to see if this proc can be reached via shmem (i.e.,
 554            if they're on my local host and in my job) */
 555         if (procs[proc]->proc_name.jobid != my_proc->proc_name.jobid ||
 556             !OPAL_PROC_ON_LOCAL_NODE(procs[proc]->proc_flags)) {
 557             peers[proc] = NULL;
 558             continue;
 559         }
 560         /* check to see if this is me */
 561         if(my_proc == procs[proc]) {
 562             my_smp_rank = mca_btl_smcuda_component.my_smp_rank = n_local_procs++;
 563             continue;
 564         }
 565 
 566         /* we have someone to talk to */
 567         have_connected_peer = true;
 568 
 569         if(!(peers[proc] = create_sm_endpoint(n_local_procs, procs[proc]))) {
 570             return_code = OPAL_ERROR;
 571             goto CLEANUP;
 572         }
 573 #if OPAL_CUDA_SUPPORT
 574         peers[proc]->proc_opal = procs[proc];
 575         peers[proc]->ipcstate = IPC_INIT;
 576         peers[proc]->ipctries = 0;
 577 #endif /* OPAL_CUDA_SUPPORT */
 578         n_local_procs++;
 579 
 580         /* add this proc to shared memory accessibility list */
 581         return_code = opal_bitmap_set_bit(reachability, proc);
 582         if(OPAL_SUCCESS != return_code)
 583             goto CLEANUP;
 584     }
 585 
 586     /* jump out if there's not someone we can talk to */
 587     if (!have_connected_peer)
 588         goto CLEANUP;
 589 
 590     /* make sure that my_smp_rank has been defined */
 591     if (-1 == my_smp_rank) {
 592         return_code = OPAL_ERROR;
 593         goto CLEANUP;
 594     }
 595 
 596     if (!smcuda_btl->btl_inited) {
 597         return_code =
 598             smcuda_btl_first_time_init(smcuda_btl, my_smp_rank,
 599                                    mca_btl_smcuda_component.sm_max_procs);
 600         if (return_code != OPAL_SUCCESS) {
 601             goto CLEANUP;
 602         }
 603     }
 604 
 605     /* set local proc's smp rank in the peers structure for
 606      * rapid access and calculate reachability */
 607     for(proc = 0; proc < (int32_t)nprocs; proc++) {
 608         if(NULL == peers[proc])
 609             continue;
 610         mca_btl_smcuda_component.sm_peers[peers[proc]->peer_smp_rank] = peers[proc];
 611         peers[proc]->my_smp_rank = my_smp_rank;
 612     }
 613 
 614     bases = mca_btl_smcuda_component.shm_bases;
 615     sm_mpool_modp = (mca_common_sm_mpool_module_t *)mca_btl_smcuda_component.sm_mpool;
 616 
 617     /* initialize own FIFOs */
 618     /*
 619      * The receiver initializes all its FIFOs.  All components will
 620      * be allocated near the receiver.  Nothing will be local to
 621      * "the sender" since there will be many senders.
 622      */
 623     for(j = mca_btl_smcuda_component.num_smp_procs;
 624         j < mca_btl_smcuda_component.num_smp_procs + FIFO_MAP_NUM(n_local_procs); j++) {
 625 
 626         return_code = sm_fifo_init( mca_btl_smcuda_component.fifo_size,
 627                                     mca_btl_smcuda_component.sm_mpool,
 628                                    &mca_btl_smcuda_component.fifo[my_smp_rank][j],
 629                                     mca_btl_smcuda_component.fifo_lazy_free);
 630         if(return_code != OPAL_SUCCESS)
 631             goto CLEANUP;
 632     }
 633 
 634     opal_atomic_wmb();
 635 
 636     /* Sync with other local procs. Force the FIFO initialization to always
 637      * happens before the readers access it.
 638      */
 639     (void)opal_atomic_add_fetch_32(&mca_btl_smcuda_component.sm_seg->module_seg->seg_inited, 1);
 640     while( n_local_procs >
 641            mca_btl_smcuda_component.sm_seg->module_seg->seg_inited) {
 642         opal_progress();
 643         opal_atomic_rmb();
 644     }
 645 
 646     /* it is now safe to unlink the shared memory segment. only one process
 647      * needs to do this, so just let smp rank zero take care of it. */
 648     if (0 == my_smp_rank) {
 649         if (OPAL_SUCCESS !=
 650             mca_common_sm_module_unlink(mca_btl_smcuda_component.sm_seg)) {
 651             /* it is "okay" if this fails at this point. we have gone this far,
 652              * so just warn about the failure and continue. this is probably
 653              * only triggered by a programming error. */
 654             opal_output(0, "WARNING: common_sm_module_unlink failed.\n");
 655         }
 656         /* SKG - another abstraction violation here, but I don't want to add
 657          * extra code in the sm mpool for further synchronization. */
 658 
 659         /* at this point, all processes have attached to the mpool segment. so
 660          * it is safe to unlink it here. */
 661         if (OPAL_SUCCESS !=
 662             mca_common_sm_module_unlink(sm_mpool_modp->sm_common_module)) {
 663             opal_output(0, "WARNING: common_sm_module_unlink failed.\n");
 664         }
 665         if (-1 == unlink(mca_btl_smcuda_component.sm_mpool_rndv_file_name)) {
 666             opal_output(0, "WARNING: %s unlink failed.\n",
 667                         mca_btl_smcuda_component.sm_mpool_rndv_file_name);
 668         }
 669         if (-1 == unlink(mca_btl_smcuda_component.sm_rndv_file_name)) {
 670             opal_output(0, "WARNING: %s unlink failed.\n",
 671                         mca_btl_smcuda_component.sm_rndv_file_name);
 672         }
 673     }
 674 
 675     /* free up some space used by the name buffers */
 676     free(mca_btl_smcuda_component.sm_mpool_ctl_file_name);
 677     free(mca_btl_smcuda_component.sm_mpool_rndv_file_name);
 678     free(mca_btl_smcuda_component.sm_ctl_file_name);
 679     free(mca_btl_smcuda_component.sm_rndv_file_name);
 680 
 681     /* coordinate with other processes */
 682     for(j = mca_btl_smcuda_component.num_smp_procs;
 683         j < mca_btl_smcuda_component.num_smp_procs + n_local_procs; j++) {
 684         ptrdiff_t diff;
 685 
 686         /* spin until this element is allocated */
 687         /* doesn't really wait for that process... FIFO might be allocated, but not initialized */
 688         opal_atomic_rmb();
 689         while(NULL == mca_btl_smcuda_component.shm_fifo[j]) {
 690             opal_progress();
 691             opal_atomic_rmb();
 692         }
 693 
 694         /* Calculate the difference as (my_base - their_base) */
 695         diff = ADDR2OFFSET(bases[my_smp_rank], bases[j]);
 696 
 697         /* store local address of remote fifos */
 698         mca_btl_smcuda_component.fifo[j] =
 699             (sm_fifo_t*)OFFSET2ADDR(diff, mca_btl_smcuda_component.shm_fifo[j]);
 700 
 701         /* cache local copy of peer memory node number */
 702         mca_btl_smcuda_component.mem_nodes[j] = mca_btl_smcuda_component.shm_mem_nodes[j];
 703     }
 704 
 705     /* update the local smp process count */
 706     mca_btl_smcuda_component.num_smp_procs += n_local_procs;
 707 
 708     /* make sure we have enough eager fragmnents for each process */
 709     return_code = opal_free_list_resize_mt (&mca_btl_smcuda_component.sm_frags_eager,
 710                                             mca_btl_smcuda_component.num_smp_procs * 2);
 711     if (OPAL_SUCCESS != return_code)
 712         goto CLEANUP;
 713 
 714 CLEANUP:
 715     return return_code;
 716 }
 717 
 718 int mca_btl_smcuda_del_procs(
 719     struct mca_btl_base_module_t* btl,
 720     size_t nprocs,
 721     struct opal_proc_t **procs,
 722     struct mca_btl_base_endpoint_t **peers)
 723 {
 724     for (size_t i = 0 ; i < nprocs ; ++i) {
 725         if (peers[i]->rcache) {
 726             mca_rcache_base_module_destroy (peers[i]->rcache);
 727             peers[i]->rcache = NULL;
 728         }
 729     }
 730 
 731     return OPAL_SUCCESS;
 732 }
 733 
 734 
 735 /**
 736  * MCA->BTL Clean up any resources held by BTL module
 737  * before the module is unloaded.
 738  *
 739  * @param btl (IN)   BTL module.
 740  *
 741  * Prior to unloading a BTL module, the MCA framework will call
 742  * the BTL finalize method of the module. Any resources held by
 743  * the BTL should be released and if required the memory corresponding
 744  * to the BTL module freed.
 745  *
 746  */
 747 
 748 int mca_btl_smcuda_finalize(struct mca_btl_base_module_t* btl)
 749 {
 750     return OPAL_SUCCESS;
 751 }
 752 
 753 
 754 /*
 755  * Register callback function for error handling..
 756  */
 757 int mca_btl_smcuda_register_error_cb(
 758         struct mca_btl_base_module_t* btl,
 759         mca_btl_base_module_error_cb_fn_t cbfunc)
 760 {
 761     mca_btl_smcuda_t *smcuda_btl = (mca_btl_smcuda_t *)btl;
 762     smcuda_btl->error_cb = cbfunc;
 763     return OPAL_SUCCESS;
 764 }
 765 
 766 /**
 767  * Allocate a segment.
 768  *
 769  * @param btl (IN)      BTL module
 770  * @param size (IN)     Request segment size.
 771  */
 772 extern mca_btl_base_descriptor_t* mca_btl_smcuda_alloc(
 773     struct mca_btl_base_module_t* btl,
 774     struct mca_btl_base_endpoint_t* endpoint,
 775     uint8_t order,
 776     size_t size,
 777     uint32_t flags)
 778 {
 779     mca_btl_smcuda_frag_t* frag = NULL;
 780     if(size <= mca_btl_smcuda_component.eager_limit) {
 781         MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag);
 782     } else if (size <= mca_btl_smcuda_component.max_frag_size) {
 783         MCA_BTL_SMCUDA_FRAG_ALLOC_MAX(frag);
 784     }
 785 
 786     if (OPAL_LIKELY(frag != NULL)) {
 787         frag->segment.seg_len = size;
 788         frag->base.des_flags = flags;
 789     }
 790     return (mca_btl_base_descriptor_t*)frag;
 791 }
 792 
 793 /**
 794  * Return a segment allocated by this BTL.
 795  *
 796  * @param btl (IN)      BTL module
 797  * @param segment (IN)  Allocated segment.
 798  */
 799 extern int mca_btl_smcuda_free(
 800     struct mca_btl_base_module_t* btl,
 801     mca_btl_base_descriptor_t* des)
 802 {
 803     mca_btl_smcuda_frag_t* frag = (mca_btl_smcuda_frag_t*)des;
 804     MCA_BTL_SMCUDA_FRAG_RETURN(frag);
 805 
 806     return OPAL_SUCCESS;
 807 }
 808 
 809 
 810 /**
 811  * Pack data
 812  *
 813  * @param btl (IN)      BTL module
 814  */
 815 struct mca_btl_base_descriptor_t* mca_btl_smcuda_prepare_src(
 816     struct mca_btl_base_module_t* btl,
 817     struct mca_btl_base_endpoint_t* endpoint,
 818     struct opal_convertor_t* convertor,
 819     uint8_t order,
 820     size_t reserve,
 821     size_t* size,
 822     uint32_t flags)
 823 {
 824     mca_btl_smcuda_frag_t* frag;
 825     struct iovec iov;
 826     uint32_t iov_count = 1;
 827     size_t max_data = *size;
 828     int rc;
 829 
 830     if ( reserve + max_data <= mca_btl_smcuda_component.eager_limit ) {
 831         MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag);
 832     } else {
 833         MCA_BTL_SMCUDA_FRAG_ALLOC_MAX(frag);
 834     }
 835     if( OPAL_UNLIKELY(NULL == frag) ) {
 836         return NULL;
 837     }
 838 
 839     if( OPAL_UNLIKELY(reserve + max_data > frag->size) ) {
 840         max_data = frag->size - reserve;
 841     }
 842     iov.iov_len = max_data;
 843     iov.iov_base =
 844         (IOVBASE_TYPE*)(((unsigned char*)(frag->segment.seg_addr.pval)) + reserve);
 845 
 846     rc = opal_convertor_pack(convertor, &iov, &iov_count, &max_data );
 847     if( OPAL_UNLIKELY(rc < 0) ) {
 848         MCA_BTL_SMCUDA_FRAG_RETURN(frag);
 849         return NULL;
 850     }
 851 
 852     frag->segment.seg_len = reserve + max_data;
 853     frag->base.des_segments = &frag->segment;
 854     frag->base.des_segment_count = 1;
 855     frag->base.order = MCA_BTL_NO_ORDER;
 856     frag->base.des_flags = flags;
 857     *size = max_data;
 858     return &frag->base;
 859 }
 860 
 861 #if 0
 862 #define MCA_BTL_SMCUDA_TOUCH_DATA_TILL_CACHELINE_BOUNDARY(sm_frag)          \
 863     do {                                                                \
 864         char* _memory = (char*)(sm_frag)->segment.seg_addr.pval +       \
 865             (sm_frag)->segment.seg_len;                                 \
 866         int* _intmem;                                                   \
 867         size_t align = (intptr_t)_memory & 0xFUL;                       \
 868         switch( align & 0x3 ) {                                         \
 869         case 3: *_memory = 0; _memory++;                                \
 870         case 2: *_memory = 0; _memory++;                                \
 871         case 1: *_memory = 0; _memory++;                                \
 872         }                                                               \
 873         align >>= 2;                                                    \
 874         _intmem = (int*)_memory;                                        \
 875         switch( align ) {                                               \
 876         case 3: *_intmem = 0; _intmem++;                                \
 877         case 2: *_intmem = 0; _intmem++;                                \
 878         case 1: *_intmem = 0; _intmem++;                                \
 879         }                                                               \
 880     } while(0)
 881 #else
 882 #define MCA_BTL_SMCUDA_TOUCH_DATA_TILL_CACHELINE_BOUNDARY(sm_frag)
 883 #endif
 884 
 885 #if 0
 886         if( OPAL_LIKELY(align > 0) ) {                                  \
 887             align = 0xFUL - align;                                      \
 888             memset( _memory, 0, align );                                \
 889         }                                                               \
 890 
 891 #endif
 892 
 893 /**
 894  * Initiate an inline send to the peer. If failure then return a descriptor.
 895  *
 896  * @param btl (IN)      BTL module
 897  * @param peer (IN)     BTL peer addressing
 898  */
 899 int mca_btl_smcuda_sendi( struct mca_btl_base_module_t* btl,
 900                       struct mca_btl_base_endpoint_t* endpoint,
 901                       struct opal_convertor_t* convertor,
 902                       void* header,
 903                       size_t header_size,
 904                       size_t payload_size,
 905                       uint8_t order,
 906                       uint32_t flags,
 907                       mca_btl_base_tag_t tag,
 908                       mca_btl_base_descriptor_t** descriptor )
 909 {
 910     size_t length = (header_size + payload_size);
 911     mca_btl_smcuda_frag_t* frag;
 912     int rc;
 913 
 914     if ( mca_btl_smcuda_component.num_outstanding_frags * 2 > (int) mca_btl_smcuda_component.fifo_size ) {
 915         mca_btl_smcuda_component_progress();
 916     }
 917 #if OPAL_CUDA_SUPPORT
 918     /* Initiate setting up CUDA IPC support. */
 919     if (mca_common_cuda_enabled && (IPC_INIT == endpoint->ipcstate) && mca_btl_smcuda_component.use_cuda_ipc) {
 920         mca_btl_smcuda_send_cuda_ipc_request(btl, endpoint);
 921     }
 922     /* We do not want to use this path when we have CUDA IPC support */
 923     if ((convertor->flags & CONVERTOR_CUDA) && (IPC_ACKED == endpoint->ipcstate)) {
 924         if (NULL != descriptor) {
 925             *descriptor = mca_btl_smcuda_alloc(btl, endpoint, order, payload_size+header_size, flags);
 926         }
 927         return OPAL_ERR_RESOURCE_BUSY;
 928     }
 929 #endif /* OPAL_CUDA_SUPPORT */
 930 
 931     /* this check should be unnecessary... turn into an assertion? */
 932     if( length < mca_btl_smcuda_component.eager_limit ) {
 933 
 934         /* allocate a fragment, giving up if we can't get one */
 935         /* note that frag==NULL is equivalent to rc returning an error code */
 936         MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag);
 937         if( OPAL_UNLIKELY(NULL == frag) ) {
 938             *descriptor = NULL;
 939             return OPAL_ERR_OUT_OF_RESOURCE;
 940         }
 941 
 942         /* fill in fragment fields */
 943         frag->segment.seg_len = length;
 944         frag->hdr->len        = length;
 945         assert( 0 == (flags & MCA_BTL_DES_SEND_ALWAYS_CALLBACK) );
 946         frag->base.des_flags = flags | MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;   /* why do any flags matter here other than OWNERSHIP? */
 947         frag->hdr->tag = tag;
 948         frag->endpoint = endpoint;
 949 
 950         /* write the match header (with MPI comm/tag/etc. info) */
 951         memcpy( frag->segment.seg_addr.pval, header, header_size );
 952 
 953         /* write the message data if there is any */
 954         /*
 955           We can add MEMCHECKER calls before and after the packing.
 956         */
 957         if( payload_size ) {
 958             size_t max_data;
 959             struct iovec iov;
 960             uint32_t iov_count;
 961             /* pack the data into the supplied buffer */
 962             iov.iov_base = (IOVBASE_TYPE*)((unsigned char*)frag->segment.seg_addr.pval + header_size);
 963             iov.iov_len  = max_data = payload_size;
 964             iov_count    = 1;
 965 
 966             (void)opal_convertor_pack( convertor, &iov, &iov_count, &max_data);
 967 
 968             assert(max_data == payload_size);
 969         }
 970 
 971         MCA_BTL_SMCUDA_TOUCH_DATA_TILL_CACHELINE_BOUNDARY(frag);
 972 
 973         /* write the fragment pointer to the FIFO */
 974         /*
 975          * Note that we don't care what the FIFO-write return code is.  Even if
 976          * the return code indicates failure, the write has still "completed" from
 977          * our point of view:  it has been posted to a "pending send" queue.
 978          */
 979         OPAL_THREAD_ADD_FETCH32(&mca_btl_smcuda_component.num_outstanding_frags, +1);
 980         MCA_BTL_SMCUDA_FIFO_WRITE(endpoint, endpoint->my_smp_rank,
 981                               endpoint->peer_smp_rank, (void *) VIRTUAL2RELATIVE(frag->hdr), false, true, rc);
 982         (void)rc; /* this is safe to ignore as the message is requeued till success */
 983         return OPAL_SUCCESS;
 984     }
 985 
 986     /* presumably, this code path will never get executed */
 987     *descriptor = mca_btl_smcuda_alloc( btl, endpoint, order,
 988                                     payload_size + header_size, flags);
 989     return OPAL_ERR_RESOURCE_BUSY;
 990 }
 991 
 992 /**
 993  * Initiate a send to the peer.
 994  *
 995  * @param btl (IN)      BTL module
 996  * @param peer (IN)     BTL peer addressing
 997  */
 998 int mca_btl_smcuda_send( struct mca_btl_base_module_t* btl,
 999                      struct mca_btl_base_endpoint_t* endpoint,
1000                      struct mca_btl_base_descriptor_t* descriptor,
1001                      mca_btl_base_tag_t tag )
1002 {
1003     mca_btl_smcuda_frag_t* frag = (mca_btl_smcuda_frag_t*)descriptor;
1004     int rc;
1005 
1006     if ( mca_btl_smcuda_component.num_outstanding_frags * 2 > (int) mca_btl_smcuda_component.fifo_size ) {
1007         mca_btl_smcuda_component_progress();
1008     }
1009 #if OPAL_CUDA_SUPPORT
1010     /* Initiate setting up CUDA IPC support */
1011     if (mca_common_cuda_enabled && (IPC_INIT == endpoint->ipcstate) && mca_btl_smcuda_component.use_cuda_ipc) {
1012         mca_btl_smcuda_send_cuda_ipc_request(btl, endpoint);
1013     }
1014 #endif /* OPAL_CUDA_SUPPORT */
1015 
1016     /* available header space */
1017     frag->hdr->len = frag->segment.seg_len;
1018     /* type of message, pt-2-pt, one-sided, etc */
1019     frag->hdr->tag = tag;
1020 
1021     MCA_BTL_SMCUDA_TOUCH_DATA_TILL_CACHELINE_BOUNDARY(frag);
1022 
1023     frag->endpoint = endpoint;
1024 
1025     /*
1026      * post the descriptor in the queue - post with the relative
1027      * address
1028      */
1029     OPAL_THREAD_ADD_FETCH32(&mca_btl_smcuda_component.num_outstanding_frags, +1);
1030     MCA_BTL_SMCUDA_FIFO_WRITE(endpoint, endpoint->my_smp_rank,
1031                           endpoint->peer_smp_rank, (void *) VIRTUAL2RELATIVE(frag->hdr), false, true, rc);
1032     if( OPAL_LIKELY(0 == rc) ) {
1033         return 1;  /* the data is completely gone */
1034     }
1035     frag->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;
1036     /* not yet gone, but pending. Let the upper level knows that
1037      * the callback will be triggered when the data will be sent.
1038      */
1039     return 0;
1040 }
1041 
1042 #if OPAL_CUDA_SUPPORT
1043 static struct mca_btl_base_registration_handle_t *mca_btl_smcuda_register_mem (
1044     struct mca_btl_base_module_t* btl, struct mca_btl_base_endpoint_t *endpoint, void *base,
1045     size_t size, uint32_t flags)
1046 {
1047     mca_btl_smcuda_t *smcuda_module = (mca_btl_smcuda_t *) btl;
1048     mca_rcache_common_cuda_reg_t *reg;
1049     int access_flags = flags & MCA_BTL_REG_FLAG_ACCESS_ANY;
1050     int rcache_flags = 0;
1051 
1052     if (MCA_BTL_REG_FLAG_CUDA_GPU_MEM & flags) {
1053         rcache_flags |= MCA_RCACHE_FLAGS_CUDA_GPU_MEM;
1054     }
1055 
1056     smcuda_module->rcache->rcache_register (smcuda_module->rcache, base, size, rcache_flags,
1057                                             access_flags, (mca_rcache_base_registration_t **) &reg);
1058     if (OPAL_UNLIKELY(NULL == reg)) {
1059         return NULL;
1060     }
1061 
1062     return (mca_btl_base_registration_handle_t *) &reg->data;
1063 }
1064 
1065 static int mca_btl_smcuda_deregister_mem (struct mca_btl_base_module_t* btl,
1066                                           struct mca_btl_base_registration_handle_t *handle)
1067 {
1068     mca_btl_smcuda_t *smcuda_module = (mca_btl_smcuda_t *) btl;
1069     mca_rcache_common_cuda_reg_t *reg = (mca_rcache_common_cuda_reg_t *)
1070         ((intptr_t) handle - offsetof (mca_rcache_common_cuda_reg_t, data));
1071 
1072     smcuda_module->rcache->rcache_deregister (smcuda_module->rcache, &reg->base);
1073 
1074     return OPAL_SUCCESS;
1075 }
1076 
1077 int mca_btl_smcuda_get_cuda (struct mca_btl_base_module_t *btl,
1078     struct mca_btl_base_endpoint_t *ep, void *local_address,
1079     uint64_t remote_address, struct mca_btl_base_registration_handle_t *local_handle,
1080     struct mca_btl_base_registration_handle_t *remote_handle, size_t size, int flags,
1081     int order, mca_btl_base_rdma_completion_fn_t cbfunc, void *cbcontext, void *cbdata)
1082 {
1083     mca_rcache_common_cuda_reg_t rget_reg;
1084     mca_rcache_common_cuda_reg_t *reg_ptr = &rget_reg;
1085     int rc, done;
1086     void *remote_memory_address;
1087     size_t offset;
1088     mca_btl_smcuda_frag_t *frag;
1089 
1090     /* NTH: copied from old prepare_dst function */
1091     MCA_BTL_SMCUDA_FRAG_ALLOC_USER(frag);
1092     if(OPAL_UNLIKELY(NULL == frag)) {
1093         return OPAL_ERR_OUT_OF_RESOURCE;
1094     }
1095 
1096     /* shove all the info needed for completion callbacks into the fragment */
1097     frag->segment.seg_len = size;
1098     frag->segment.seg_addr.pval = local_address;
1099     frag->base.des_segments = &frag->segment;
1100     frag->base.des_segment_count = 1;
1101     frag->base.des_flags = flags;
1102     frag->base.des_cbfunc = (mca_btl_base_completion_fn_t) cbfunc;
1103     frag->base.des_cbdata = cbdata;
1104     frag->base.des_context = cbcontext;
1105     frag->local_handle = local_handle;
1106 
1107     /* Set to 0 for debugging since it is a list item but I am not
1108      * intializing it properly and it is annoying to see all the
1109      * garbage in the debugger.  */
1110 
1111     memset(&rget_reg, 0, sizeof(rget_reg));
1112     memcpy(&rget_reg.data.memHandle, remote_handle->reg_data.memHandle,
1113            sizeof(remote_handle->reg_data.memHandle));
1114 #if !OPAL_CUDA_SYNC_MEMOPS
1115     /* Only need the remote event handle when syncing with remote events */
1116     memcpy(&rget_reg.data.evtHandle, remote_handle->reg_data.evtHandle,
1117            sizeof(remote_handle->reg_data.evtHandle));
1118 #endif
1119 
1120     /* Open the memory handle to the remote memory.  If it is cached, then
1121      * we just retrieve it from cache and avoid a call to open the handle.  That
1122      * is taken care of in the memory pool.  Note that we are searching for the
1123      * memory based on the base address and size of the memory handle, not the
1124      * remote memory which may lie somewhere in the middle. This is taken care of
1125      * a few lines down. Note that we hand in the peer rank just for debugging
1126      * support. */
1127     rc = ep->rcache->rcache_register (ep->rcache, remote_handle->reg_data.memh_seg_addr.pval,
1128                                       remote_handle->reg_data.memh_seg_len, ep->peer_smp_rank,
1129                                       MCA_RCACHE_ACCESS_LOCAL_WRITE,
1130                                       (mca_rcache_base_registration_t **)&reg_ptr);
1131 
1132     if (OPAL_SUCCESS != rc) {
1133         opal_output(0, "Failed to register remote memory, rc=%d", rc);
1134         return rc;
1135     }
1136     frag->registration = (mca_rcache_base_registration_t *)reg_ptr;
1137     frag->endpoint = ep;
1138 
1139     /* The registration has given us back the memory block that this
1140      * address lives in.  However, the base address of the block may
1141      * not equal the address that was used to retrieve the block.
1142      * Therefore, compute the offset and add it to the address of the
1143      * memory handle. */
1144     offset = (size_t) ((intptr_t) remote_address - (intptr_t) reg_ptr->base.base);
1145     remote_memory_address = (unsigned char *)reg_ptr->base.alloc_base + offset;
1146     if (0 != offset) {
1147         opal_output(-1, "OFFSET=%d", (int)offset);
1148     }
1149 
1150     /* The remote side posted an IPC event to make sure we do not start our
1151      * copy until IPC event completes.  This is to ensure that the data being sent
1152      * is available in the sender's GPU buffer.  Therefore, do a stream synchronize
1153      * on the IPC event that we received.  Note that we pull it from
1154      * rget_reg, not reg_ptr, as we do not cache the event. */
1155     mca_common_wait_stream_synchronize(&rget_reg);
1156 
1157     rc = mca_common_cuda_memcpy(local_address, remote_memory_address, size,
1158                                 "mca_btl_smcuda_get", (mca_btl_base_descriptor_t *)frag,
1159                                 &done);
1160     if (OPAL_SUCCESS != rc) {
1161         /* Out of resources can be handled by upper layers. */
1162         if (OPAL_ERR_OUT_OF_RESOURCE != rc) {
1163             opal_output(0, "Failed to cuMemcpy GPU memory, rc=%d", rc);
1164         }
1165         return rc;
1166     }
1167 
1168     if (OPAL_UNLIKELY(1 == done)) {
1169         cbfunc (btl, ep, local_address, local_handle, cbcontext, cbdata, OPAL_SUCCESS);
1170         mca_btl_smcuda_free(btl, (mca_btl_base_descriptor_t *)frag);
1171     }
1172 
1173     return OPAL_SUCCESS;
1174 
1175 }
1176 
1177 /**
1178  * Send a CUDA IPC request message to the peer.  This indicates that this rank
1179  * is interested in establishing CUDA IPC support between this rank and GPU
1180  * and the remote rank and GPU.  This is called when we do a send of some
1181  * type.
1182  *
1183  * @param btl (IN)      BTL module
1184  * @param peer (IN)     BTL peer addressing
1185  */
1186 #define MAXTRIES 5
1187 static void mca_btl_smcuda_send_cuda_ipc_request(struct mca_btl_base_module_t* btl,
1188                                                  struct mca_btl_base_endpoint_t* endpoint)
1189 {
1190     mca_btl_smcuda_frag_t* frag;
1191     int rc, mydevnum, res;
1192     ctrlhdr_t ctrlhdr;
1193 
1194     /* We need to grab the lock when changing the state from IPC_INIT as multiple
1195      * threads could be doing sends. */
1196     OPAL_THREAD_LOCK(&endpoint->endpoint_lock);
1197     if (endpoint->ipcstate != IPC_INIT) {
1198         OPAL_THREAD_UNLOCK(&endpoint->endpoint_lock);
1199         return;
1200     } else {
1201         endpoint->ipctries++;
1202         if (endpoint->ipctries > MAXTRIES) {
1203             endpoint->ipcstate = IPC_BAD;
1204             OPAL_THREAD_UNLOCK(&endpoint->endpoint_lock);
1205             return;
1206         }
1207         /* All is good.  Set up state and continue. */
1208         endpoint->ipcstate = IPC_SENT;
1209         OPAL_THREAD_UNLOCK(&endpoint->endpoint_lock);
1210     }
1211 
1212     if ( mca_btl_smcuda_component.num_outstanding_frags * 2 > (int) mca_btl_smcuda_component.fifo_size ) {
1213         mca_btl_smcuda_component_progress();
1214     }
1215 
1216     if (0 != (res = mca_common_cuda_get_device(&mydevnum))) {
1217         opal_output(0, "Cannot determine device.  IPC cannot be set.");
1218         endpoint->ipcstate = IPC_BAD;
1219         return;
1220     }
1221 
1222     /* allocate a fragment, giving up if we can't get one */
1223     MCA_BTL_SMCUDA_FRAG_ALLOC_EAGER(frag);
1224     if( OPAL_UNLIKELY(NULL == frag) ) {
1225         endpoint->ipcstate = IPC_BAD;
1226         return;
1227     }
1228 
1229     /* Fill in fragment fields. */
1230     frag->hdr->tag = MCA_BTL_TAG_SMCUDA;
1231     frag->base.des_flags = MCA_BTL_DES_FLAGS_BTL_OWNERSHIP;
1232     frag->endpoint = endpoint;
1233     ctrlhdr.ctag = IPC_REQ;
1234     ctrlhdr.cudev = mydevnum;
1235     memcpy(frag->segment.seg_addr.pval, &ctrlhdr, sizeof(struct ctrlhdr_st));
1236 
1237     MCA_BTL_SMCUDA_TOUCH_DATA_TILL_CACHELINE_BOUNDARY(frag);
1238     /* write the fragment pointer to the FIFO */
1239     /*
1240      * Note that we don't care what the FIFO-write return code is.  Even if
1241      * the return code indicates failure, the write has still "completed" from
1242      * our point of view:  it has been posted to a "pending send" queue.
1243      */
1244     OPAL_THREAD_ADD_FETCH32(&mca_btl_smcuda_component.num_outstanding_frags, +1);
1245     opal_output_verbose(10, mca_btl_smcuda_component.cuda_ipc_output,
1246                         "Sending CUDA IPC REQ (try=%d): myrank=%d, mydev=%d, peerrank=%d",
1247                         endpoint->ipctries,
1248                         mca_btl_smcuda_component.my_smp_rank,
1249                         mydevnum, endpoint->peer_smp_rank);
1250 
1251     MCA_BTL_SMCUDA_FIFO_WRITE(endpoint, endpoint->my_smp_rank,
1252                               endpoint->peer_smp_rank, (void *) VIRTUAL2RELATIVE(frag->hdr), false, true, rc);
1253     return;
1254 
1255 }
1256 
1257 #endif /* OPAL_CUDA_SUPPORT */
1258 
1259 /**
1260  *
1261  */
1262 void mca_btl_smcuda_dump(struct mca_btl_base_module_t* btl,
1263                      struct mca_btl_base_endpoint_t* endpoint,
1264                      int verbose)
1265 {
1266     mca_btl_smcuda_frag_t* frag;
1267 
1268     mca_btl_base_err("BTL SM %p endpoint %p [smp_rank %d] [peer_rank %d]\n",
1269                      (void*) btl, (void*) endpoint,
1270                      endpoint->my_smp_rank, endpoint->peer_smp_rank);
1271     if( NULL != endpoint ) {
1272         OPAL_LIST_FOREACH(frag, &endpoint->pending_sends, mca_btl_smcuda_frag_t) {
1273             mca_btl_base_err(" |  frag %p size %lu (hdr frag %p len %lu rank %d tag %d)\n",
1274                              (void*) frag, frag->size, (void*) frag->hdr->frag,
1275                              frag->hdr->len, frag->hdr->my_smp_rank,
1276                              frag->hdr->tag);
1277         }
1278     }
1279 }
1280 
1281 #if OPAL_ENABLE_FT_CR    == 0
1282 int mca_btl_smcuda_ft_event(int state) {
1283     return OPAL_SUCCESS;
1284 }
1285 #else
1286 int mca_btl_smcuda_ft_event(int state) {
1287     /* Notify mpool */
1288     if( NULL != mca_btl_smcuda_component.sm_mpool &&
1289         NULL != mca_btl_smcuda_component.sm_mpool->mpool_ft_event) {
1290         mca_btl_smcuda_component.sm_mpool->mpool_ft_event(state);
1291     }
1292 
1293     if(OPAL_CRS_CHECKPOINT == state) {
1294         if( NULL != mca_btl_smcuda_component.sm_seg ) {
1295             /* On restart we need the old file names to exist (not necessarily
1296              * contain content) so the CRS component does not fail when searching
1297              * for these old file handles. The restart procedure will make sure
1298              * these files get cleaned up appropriately.
1299              */
1300             /* Disabled to get FT code compiled again
1301              * TODO: FIXIT soon
1302             orte_sstore.set_attr(orte_sstore_handle_current,
1303                                  SSTORE_METADATA_LOCAL_TOUCH,
1304                                  mca_btl_smcuda_component.sm_seg->shmem_ds.seg_name);
1305              */
1306         }
1307     }
1308     else if(OPAL_CRS_CONTINUE == state) {
1309         if (opal_cr_continue_like_restart) {
1310             if( NULL != mca_btl_smcuda_component.sm_seg ) {
1311                 /* Add shared memory file */
1312                 opal_crs_base_cleanup_append(mca_btl_smcuda_component.sm_seg->shmem_ds.seg_name, false);
1313             }
1314 
1315             /* Clear this so we force the module to re-init the sm files */
1316             mca_btl_smcuda_component.sm_mpool = NULL;
1317         }
1318     }
1319     else if(OPAL_CRS_RESTART == state ||
1320             OPAL_CRS_RESTART_PRE == state) {
1321         if( NULL != mca_btl_smcuda_component.sm_seg ) {
1322             /* Add shared memory file */
1323             opal_crs_base_cleanup_append(mca_btl_smcuda_component.sm_seg->shmem_ds.seg_name, false);
1324         }
1325 
1326         /* Clear this so we force the module to re-init the sm files */
1327         mca_btl_smcuda_component.sm_mpool = NULL;
1328     }
1329     else if(OPAL_CRS_TERM == state ) {
1330         ;
1331     }
1332     else {
1333         ;
1334     }
1335 
1336     return OPAL_SUCCESS;
1337 }
1338 #endif /* OPAL_ENABLE_FT_CR */

/* [<][>][^][v][top][bottom][index][help] */