root/ompi/mca/pml/cm/pml_cm_sendreq.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
   2 /*
   3  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
   4  *                         University Research and Technology
   5  *                         Corporation.  All rights reserved.
   6  * Copyright (c) 2004-2013 The University of Tennessee and The University
   7  *                         of Tennessee Research Foundation.  All rights
   8  *                         reserved.
   9  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
  10  *                         University of Stuttgart.  All rights reserved.
  11  * Copyright (c) 2004-2006 The Regents of the University of California.
  12  *                         All rights reserved.
  13  * Copyright (c) 2015-2017 Los Alamos National Security, LLC. All rights
  14  *                         reserved.
  15  * Copyright (c) 2015      Research Organization for Information Science
  16  *                         and Technology (RIST). All rights reserved.
  17  * Copyright (c) 2017      Intel, Inc. All rights reserved
  18  * $COPYRIGHT$
  19  *
  20  * Additional copyrights may follow
  21  *
  22  * $HEADER$
  23  */
  24 
  25 #ifndef PML_CM_SENDREQ_H
  26 #define PML_CM_SENDREQ_H
  27 
  28 #include "pml_cm_request.h"
  29 #include "ompi/mca/pml/base/pml_base_sendreq.h"
  30 #include "ompi/mca/pml/base/pml_base_bsend.h"
  31 #include "ompi/mca/pml/pml.h"
  32 #include "ompi/mca/mtl/mtl.h"
  33 #include "opal/prefetch.h"
  34 
  35 struct mca_pml_cm_send_request_t {
  36     mca_pml_cm_request_t req_base;
  37     mca_pml_base_send_mode_t req_send_mode;
  38 };
  39 typedef struct mca_pml_cm_send_request_t mca_pml_cm_send_request_t;
  40 OBJ_CLASS_DECLARATION(mca_pml_cm_send_request_t);
  41 
  42 
  43 struct mca_pml_cm_thin_send_request_t {
  44     mca_pml_cm_send_request_t req_send;
  45     mca_mtl_request_t req_mtl;            /**< the mtl specific memory. This field should be the last in the struct */
  46 };
  47 typedef struct mca_pml_cm_thin_send_request_t mca_pml_cm_thin_send_request_t;
  48 OBJ_CLASS_DECLARATION(mca_pml_cm_thin_send_request_t);
  49 
  50 
  51 struct mca_pml_cm_hvy_send_request_t {
  52     mca_pml_cm_send_request_t req_send;
  53     const void *req_addr;                 /**< pointer to application buffer */
  54     size_t req_count;                     /**< count of user datatype elements */
  55     int32_t req_peer;                     /**< peer process - rank w/in this communicator */
  56     int32_t req_tag;                      /**< user defined tag */
  57     void *req_buff;                       /**< pointer to send buffer - may not be application buffer */
  58     bool req_blocking;
  59     mca_mtl_request_t req_mtl;            /**< the mtl specific memory. This field should be the last in the struct */
  60 };
  61 typedef struct mca_pml_cm_hvy_send_request_t mca_pml_cm_hvy_send_request_t;
  62 OBJ_CLASS_DECLARATION(mca_pml_cm_hvy_send_request_t);
  63 
  64 
  65 #if OPAL_ENABLE_HETEROGENEOUS_SUPPORT
  66 #define MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst,          \
  67                                            ompi_proc)                   \
  68 do {                                                                    \
  69     ompi_proc = ompi_comm_peer_lookup( comm, dst );                     \
  70                                                                         \
  71     if(OPAL_UNLIKELY(NULL == ompi_proc)) {                              \
  72         sendreq = NULL;                                                 \
  73     } else {                                                            \
  74         sendreq = (mca_pml_cm_thin_send_request_t*)                     \
  75           opal_free_list_wait (&mca_pml_base_send_requests);            \
  76         sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_THIN; \
  77         sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;          \
  78         sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
  79     }                                                                   \
  80 } while(0)
  81 #else
  82 #define MCA_PML_CM_THIN_SEND_REQUEST_ALLOC(sendreq, comm, dst,          \
  83                                            ompi_proc)                   \
  84 do {                                                                    \
  85     sendreq = (mca_pml_cm_thin_send_request_t*)                         \
  86         opal_free_list_wait (&mca_pml_base_send_requests);              \
  87     sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_THIN; \
  88     sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;              \
  89     sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
  90 } while(0)
  91 #endif
  92 
  93 
  94 #if (OPAL_ENABLE_HETEROGENEOUS_SUPPORT)
  95 #define MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst,           \
  96                                           ompi_proc)                    \
  97 {                                                                       \
  98     ompi_proc = ompi_comm_peer_lookup( comm, dst );                     \
  99     if(OPAL_UNLIKELY(NULL == ompi_proc)) {                              \
 100         sendreq = NULL;                                                 \
 101     } else {                                                            \
 102         sendreq = (mca_pml_cm_hvy_send_request_t*)                      \
 103           opal_free_list_wait (&mca_pml_base_send_requests);            \
 104         sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_HEAVY; \
 105         sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;          \
 106         sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
 107     }                                                                   \
 108 }
 109 #else
 110 #define MCA_PML_CM_HVY_SEND_REQUEST_ALLOC(sendreq, comm, dst,           \
 111                                           ompi_proc)                    \
 112 {                                                                       \
 113     sendreq = (mca_pml_cm_hvy_send_request_t*)                          \
 114         opal_free_list_wait (&mca_pml_base_send_requests);              \
 115     sendreq->req_send.req_base.req_pml_type = MCA_PML_CM_REQUEST_SEND_HEAVY; \
 116     sendreq->req_mtl.ompi_req = (ompi_request_t*) sendreq;              \
 117     sendreq->req_mtl.completion_callback = mca_pml_cm_send_request_completion; \
 118 }
 119 #endif
 120 
 121 #if (OPAL_ENABLE_HETEROGENEOUS_SUPPORT)
 122 #define MCA_PML_CM_HVY_SEND_REQUEST_INIT_COMMON(req_send,               \
 123                                             ompi_proc,                  \
 124                                             comm,                       \
 125                                             tag,                        \
 126                                             datatype,                   \
 127                                             sendmode,                   \
 128                                             buf,                        \
 129                                             count,                      \
 130                                             flags )                     \
 131 {                                                                       \
 132     OBJ_RETAIN(comm);                                                   \
 133     OMPI_DATATYPE_RETAIN(datatype);                                     \
 134     (req_send)->req_base.req_comm = comm;                               \
 135     (req_send)->req_base.req_datatype = datatype;                       \
 136     MCA_PML_CM_SWITCH_CUDA_CONVERTOR_OFF(flags, datatype, count);       \
 137     opal_convertor_copy_and_prepare_for_send(                           \
 138                                              ompi_proc->super.proc_convertor, \
 139                                              &(datatype->super),        \
 140                                              count,                     \
 141                                              buf,                       \
 142                                              flags,                     \
 143                                              &(req_send)->req_base.req_convertor ); \
 144     (req_send)->req_base.req_ompi.req_mpi_object.comm = comm;           \
 145     (req_send)->req_base.req_ompi.req_status.MPI_SOURCE =               \
 146         comm->c_my_rank;                                                \
 147     (req_send)->req_base.req_ompi.req_status.MPI_TAG = tag;             \
 148     (req_send)->req_base.req_ompi.req_status._ucount = count;           \
 149     (req_send)->req_send_mode = sendmode;                               \
 150     (req_send)->req_base.req_free_called = false;                       \
 151 }
 152 #else
 153 #define MCA_PML_CM_HVY_SEND_REQUEST_INIT_COMMON(req_send,               \
 154                                             ompi_proc,                  \
 155                                             comm,                       \
 156                                             tag,                        \
 157                                             datatype,                   \
 158                                             sendmode,                   \
 159                                             buf,                        \
 160                                             count,                      \
 161                                             flags )                     \
 162 {                                                                       \
 163     OBJ_RETAIN(comm);                                                   \
 164     OMPI_DATATYPE_RETAIN(datatype);                                     \
 165     (req_send)->req_base.req_comm = comm;                               \
 166     (req_send)->req_base.req_datatype = datatype;                       \
 167     MCA_PML_CM_SWITCH_CUDA_CONVERTOR_OFF(flags, datatype, count);       \
 168     opal_convertor_copy_and_prepare_for_send(                           \
 169         ompi_mpi_local_convertor,                                       \
 170         &(datatype->super),                                             \
 171         count,                                                          \
 172         buf,                                                            \
 173         flags,                                                          \
 174         &(req_send)->req_base.req_convertor );                          \
 175     (req_send)->req_base.req_ompi.req_mpi_object.comm = comm;           \
 176     (req_send)->req_base.req_ompi.req_status.MPI_SOURCE =               \
 177         comm->c_my_rank;                                                \
 178     (req_send)->req_base.req_ompi.req_status.MPI_TAG = tag;             \
 179     (req_send)->req_base.req_ompi.req_status._ucount = count;           \
 180     (req_send)->req_send_mode = sendmode;                               \
 181     (req_send)->req_base.req_free_called = false;                       \
 182 }
 183 #endif
 184 
 185 #if (OPAL_ENABLE_HETEROGENEOUS_SUPPORT)
 186 #define MCA_PML_CM_SEND_REQUEST_INIT_COMMON(req_send,                   \
 187                                             ompi_proc,                  \
 188                                             comm,                       \
 189                                             tag,                        \
 190                                             datatype,                   \
 191                                             sendmode,                   \
 192                                             buf,                        \
 193                                             count,                      \
 194                                             flags )                     \
 195 {                                                                       \
 196     OBJ_RETAIN(comm);                                                   \
 197     OMPI_DATATYPE_RETAIN(datatype);                                     \
 198     (req_send)->req_base.req_comm = comm;                               \
 199     (req_send)->req_base.req_datatype = datatype;                       \
 200     MCA_PML_CM_SWITCH_CUDA_CONVERTOR_OFF(flags, datatype, count);       \
 201     opal_convertor_copy_and_prepare_for_send(                           \
 202                                              ompi_proc->super.proc_convertor, \
 203                                              &(datatype->super),        \
 204                                              count,                     \
 205                                              buf,                       \
 206                                              flags,                     \
 207                                              &(req_send)->req_base.req_convertor ); \
 208     (req_send)->req_base.req_ompi.req_mpi_object.comm = comm;           \
 209     (req_send)->req_base.req_ompi.req_status.MPI_SOURCE =               \
 210         comm->c_my_rank;                                                \
 211     (req_send)->req_base.req_ompi.req_status.MPI_TAG = tag;             \
 212     (req_send)->req_base.req_ompi.req_status._ucount = count;           \
 213     (req_send)->req_send_mode = sendmode;                               \
 214     (req_send)->req_base.req_free_called = false;                       \
 215 }
 216 
 217 #else
 218 #define MCA_PML_CM_SEND_REQUEST_INIT_COMMON(req_send,                   \
 219                                             ompi_proc,                  \
 220                                             comm,                       \
 221                                             tag,                        \
 222                                             datatype,                   \
 223                                             sendmode,                   \
 224                                             buf,                        \
 225                                             count,                      \
 226                                             flags )                     \
 227 {                                                                       \
 228     OBJ_RETAIN(comm);                                                   \
 229     OMPI_DATATYPE_RETAIN(datatype);                                     \
 230     (req_send)->req_base.req_comm = comm;                               \
 231     (req_send)->req_base.req_datatype = datatype;                       \
 232     if (opal_datatype_is_contiguous_memory_layout(&datatype->super, count)) { \
 233         (req_send)->req_base.req_convertor.remoteArch =                 \
 234             ompi_mpi_local_convertor->remoteArch;                       \
 235         (req_send)->req_base.req_convertor.flags      =                 \
 236             ompi_mpi_local_convertor->flags;                            \
 237         (req_send)->req_base.req_convertor.master     =                 \
 238             ompi_mpi_local_convertor->master;                           \
 239         (req_send)->req_base.req_convertor.local_size =                 \
 240             count * datatype->super.size;                               \
 241         (req_send)->req_base.req_convertor.pBaseBuf   =                 \
 242             (unsigned char*)buf + datatype->super.true_lb;              \
 243         (req_send)->req_base.req_convertor.count      = count;          \
 244         (req_send)->req_base.req_convertor.pDesc      = &datatype->super; \
 245     } else {                                                            \
 246         MCA_PML_CM_SWITCH_CUDA_CONVERTOR_OFF(flags, datatype, count);   \
 247         opal_convertor_copy_and_prepare_for_send(                       \
 248             ompi_mpi_local_convertor,                                   \
 249             &(datatype->super),                                         \
 250             count,                                                      \
 251             buf,                                                        \
 252             flags,                                                      \
 253             &(req_send)->req_base.req_convertor );                      \
 254     }                                                                   \
 255     (req_send)->req_base.req_ompi.req_mpi_object.comm = comm;           \
 256     (req_send)->req_base.req_ompi.req_status.MPI_SOURCE =               \
 257         comm->c_my_rank;                                                \
 258     (req_send)->req_base.req_ompi.req_status.MPI_TAG = tag;             \
 259     (req_send)->req_base.req_ompi.req_status._ucount = count;           \
 260     (req_send)->req_send_mode = sendmode;                               \
 261     (req_send)->req_base.req_free_called = false;                       \
 262 }
 263 #endif
 264 
 265 #define MCA_PML_CM_HVY_SEND_REQUEST_INIT( sendreq,                      \
 266                                           ompi_proc,                    \
 267                                           comm,                         \
 268                                           tag,                          \
 269                                           dst,                          \
 270                                           datatype,                     \
 271                                           sendmode,                     \
 272                                           persistent,                   \
 273                                           blocking,                     \
 274                                           buf,                          \
 275                                           count,                        \
 276                                           flags )                       \
 277     do {                                                                \
 278         OMPI_REQUEST_INIT(&(sendreq->req_send.req_base.req_ompi),       \
 279                           persistent);                                  \
 280         sendreq->req_tag = tag;                                         \
 281         sendreq->req_peer = dst;                                        \
 282         sendreq->req_addr = buf;                                        \
 283         sendreq->req_count = count;                                     \
 284         MCA_PML_CM_HVY_SEND_REQUEST_INIT_COMMON( (&sendreq->req_send),  \
 285                                              ompi_proc,                 \
 286                                              comm,                      \
 287                                              tag,                       \
 288                                              datatype,                  \
 289                                              sendmode,                  \
 290                                              buf,                       \
 291                                              count,                     \
 292                                              flags )                    \
 293         opal_convertor_get_packed_size(                                 \
 294                                        &sendreq->req_send.req_base.req_convertor, \
 295                                        &sendreq->req_count );           \
 296                                                                         \
 297         sendreq->req_blocking = blocking;                               \
 298         sendreq->req_send.req_base.req_pml_complete =                   \
 299             (persistent ? true:false);                                  \
 300     } while(0)
 301 
 302 
 303 #define MCA_PML_CM_THIN_SEND_REQUEST_INIT( sendreq,                     \
 304                                            ompi_proc,                   \
 305                                            comm,                        \
 306                                            tag,                         \
 307                                            dst,                         \
 308                                            datatype,                    \
 309                                            sendmode,                    \
 310                                            buf,                         \
 311                                            count,                       \
 312                                            flags )                      \
 313     do {                                                                \
 314         OMPI_REQUEST_INIT(&(sendreq->req_send.req_base.req_ompi),       \
 315                           false);                                       \
 316         MCA_PML_CM_SEND_REQUEST_INIT_COMMON( (&sendreq->req_send),      \
 317                                              ompi_proc,                 \
 318                                              comm,                      \
 319                                              tag,                       \
 320                                              datatype,                  \
 321                                              sendmode,                  \
 322                                              buf,                       \
 323                                              count,                     \
 324                                              flags);                    \
 325         sendreq->req_send.req_base.req_pml_complete = false;            \
 326     } while(0)
 327 
 328 
 329 #define MCA_PML_CM_SEND_REQUEST_START_SETUP(req_send)                   \
 330     do {                                                                \
 331         (req_send)->req_base.req_pml_complete = false;                  \
 332         (req_send)->req_base.req_ompi.req_complete = REQUEST_PENDING;   \
 333         (req_send)->req_base.req_ompi.req_state =                       \
 334             OMPI_REQUEST_ACTIVE;                                        \
 335         (req_send)->req_base.req_ompi.req_status._cancelled = 0;        \
 336     } while (0)
 337 
 338 
 339 #define MCA_PML_CM_THIN_SEND_REQUEST_START(sendreq,                     \
 340                                            comm,                        \
 341                                            tag,                         \
 342                                            dst,                         \
 343                                            sendmode,                    \
 344                                            blocking,                    \
 345                                            ret)                         \
 346 do {                                                                    \
 347     MCA_PML_CM_SEND_REQUEST_START_SETUP(&(sendreq)->req_send);          \
 348     ret = OMPI_MTL_CALL(isend(ompi_mtl,                                 \
 349                               comm,                                     \
 350                               dst,                                      \
 351                               tag,                                      \
 352                               &sendreq->req_send.req_base.req_convertor, \
 353                               sendmode,                                 \
 354                               blocking,                                 \
 355                               &sendreq->req_mtl));                      \
 356  } while (0)
 357 
 358 #define MCA_PML_CM_HVY_SEND_REQUEST_BSEND_ALLOC(sendreq, ret)           \
 359 do {                                                                    \
 360     struct iovec iov;                                                   \
 361     unsigned int iov_count;                                             \
 362     size_t max_data;                                                    \
 363                                                                         \
 364     if(sendreq->req_count > 0) {                                        \
 365         sendreq->req_buff =                                             \
 366             mca_pml_base_bsend_request_alloc_buf(sendreq->req_count);   \
 367         if (NULL == sendreq->req_buff) {                                \
 368             ret = MPI_ERR_BUFFER;                                       \
 369         } else {                                                        \
 370             iov.iov_base = (IOVBASE_TYPE*)sendreq->req_buff;            \
 371             max_data = iov.iov_len = sendreq->req_count;                \
 372             iov_count = 1;                                              \
 373             opal_convertor_pack( &sendreq->req_send.req_base.req_convertor, \
 374                                  &iov,                                  \
 375                                  &iov_count,                            \
 376                                  &max_data );                           \
 377             opal_convertor_prepare_for_send( &sendreq->req_send.req_base.req_convertor, \
 378                                              &(ompi_mpi_packed.dt.super),  \
 379                                              max_data, sendreq->req_buff ); \
 380         }                                                               \
 381     }                                                                   \
 382  } while(0);
 383 
 384 
 385 #define MCA_PML_CM_HVY_SEND_REQUEST_START(sendreq, ret)                              \
 386 do {                                                                                 \
 387     ret = OMPI_SUCCESS;                                                              \
 388     MCA_PML_CM_SEND_REQUEST_START_SETUP(&(sendreq)->req_send);                       \
 389     if (sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) {             \
 390         MCA_PML_CM_HVY_SEND_REQUEST_BSEND_ALLOC(sendreq, ret);                       \
 391     }                                                                                \
 392     if (OMPI_SUCCESS == ret) {                                                       \
 393         ret = OMPI_MTL_CALL(isend(ompi_mtl,                                          \
 394                                   sendreq->req_send.req_base.req_comm,               \
 395                                   sendreq->req_peer,                                 \
 396                                   sendreq->req_tag,                                  \
 397                                   &sendreq->req_send.req_base.req_convertor,         \
 398                                   sendreq->req_send.req_send_mode,                   \
 399                                   sendreq->req_blocking,                             \
 400                                   &sendreq->req_mtl));                               \
 401         if(OMPI_SUCCESS == ret &&                                                    \
 402            sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED) {          \
 403             sendreq->req_send.req_base.req_ompi.req_status.MPI_ERROR = 0;            \
 404             if(!REQUEST_COMPLETE(&sendreq->req_send.req_base.req_ompi)) {            \
 405                 /* request may have already been marked complete by the MTL */       \
 406                 ompi_request_complete(&(sendreq)->req_send.req_base.req_ompi, true); \
 407             }                                                                        \
 408         }                                                                            \
 409     }                                                                                \
 410  } while (0)
 411 
 412 /*
 413  * The PML has completed a send request. Note that this request
 414  * may have been orphaned by the user or have already completed
 415  * at the MPI level.
 416  * This macro will never be called directly from the upper level, as it should
 417  * only be an internal call to the PML.
 418  */
 419 #define MCA_PML_CM_HVY_SEND_REQUEST_PML_COMPLETE(sendreq)                          \
 420 do {                                                                               \
 421     assert( false == sendreq->req_send.req_base.req_pml_complete );                \
 422                                                                                    \
 423     if (sendreq->req_send.req_send_mode == MCA_PML_BASE_SEND_BUFFERED &&           \
 424         sendreq->req_count > 0 ) {                                                 \
 425         mca_pml_base_bsend_request_free(sendreq->req_buff);                        \
 426     }                                                                              \
 427                                                                                    \
 428     if( !REQUEST_COMPLETE(&sendreq->req_send.req_base.req_ompi)) {                 \
 429         /* the request may have already been marked complete by the MTL */         \
 430         ompi_request_complete(&(sendreq->req_send.req_base.req_ompi), true);       \
 431     }                                                                              \
 432     sendreq->req_send.req_base.req_pml_complete = true;                            \
 433                                                                                    \
 434     if( sendreq->req_send.req_base.req_free_called ) {                             \
 435         MCA_PML_CM_HVY_SEND_REQUEST_RETURN( sendreq );                             \
 436     } else {                                                                       \
 437         if(sendreq->req_send.req_base.req_ompi.req_persistent) {                   \
 438             /* rewind convertor */                                                 \
 439             size_t offset = 0;                                                     \
 440             opal_convertor_set_position(&sendreq->req_send.req_base.req_convertor, \
 441                                         &offset);                                  \
 442         }                                                                          \
 443     }                                                                              \
 444  } while (0)
 445 
 446 
 447 /*
 448  * Release resources associated with a request
 449  */
 450 #define MCA_PML_CM_HVY_SEND_REQUEST_RETURN(sendreq)                     \
 451     {                                                                   \
 452         /*  Let the base handle the reference counts */                 \
 453         OMPI_DATATYPE_RETAIN(sendreq->req_send.req_base.req_datatype);  \
 454         OBJ_RELEASE(sendreq->req_send.req_base.req_comm);               \
 455         OMPI_REQUEST_FINI(&sendreq->req_send.req_base.req_ompi);        \
 456         opal_convertor_cleanup( &(sendreq->req_send.req_base.req_convertor) ); \
 457         opal_free_list_return ( &mca_pml_base_send_requests,            \
 458                                (opal_free_list_item_t*)sendreq);        \
 459     }
 460 
 461 /*
 462  * The PML has completed a send request. Note that this request
 463  * may have been orphaned by the user or have already completed
 464  * at the MPI level.
 465  * This macro will never be called directly from the upper level, as it should
 466  * only be an internal call to the PML.
 467  */
 468 #define MCA_PML_CM_THIN_SEND_REQUEST_PML_COMPLETE(sendreq)                   \
 469 do {                                                                         \
 470     assert( false == sendreq->req_send.req_base.req_pml_complete );          \
 471                                                                              \
 472     if( !REQUEST_COMPLETE(&sendreq->req_send.req_base.req_ompi)) {           \
 473         /* Should only be called for long messages (maybe synchronous) */    \
 474         ompi_request_complete(&(sendreq->req_send.req_base.req_ompi), true); \
 475     }                                                                        \
 476     sendreq->req_send.req_base.req_pml_complete = true;                      \
 477                                                                              \
 478     if( sendreq->req_send.req_base.req_free_called ) {                       \
 479         MCA_PML_CM_THIN_SEND_REQUEST_RETURN( sendreq );                      \
 480     }                                                                        \
 481  } while (0)
 482 
 483 
 484 /*
 485  * Release resources associated with a request
 486  */
 487 #define MCA_PML_CM_THIN_SEND_REQUEST_RETURN(sendreq)                    \
 488     {                                                                   \
 489         /*  Let the base handle the reference counts */                 \
 490         OMPI_DATATYPE_RETAIN(sendreq->req_send.req_base.req_datatype);  \
 491         OBJ_RELEASE(sendreq->req_send.req_base.req_comm);               \
 492         OMPI_REQUEST_FINI(&sendreq->req_send.req_base.req_ompi);        \
 493         opal_convertor_cleanup( &(sendreq->req_send.req_base.req_convertor) ); \
 494         opal_free_list_return ( &mca_pml_base_send_requests,            \
 495                                (opal_free_list_item_t*)sendreq);        \
 496     }
 497 
 498 extern void
 499 mca_pml_cm_send_request_completion(struct mca_mtl_request_t *mtl_request);
 500 
 501 #endif

/* [<][>][^][v][top][bottom][index][help] */