root/orte/util/nidmap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. orte_util_nidmap_create
  2. orte_util_decode_nidmap
  3. orte_util_pass_node_info
  4. orte_util_parse_node_info
  5. orte_util_generate_ppn
  6. orte_util_decode_ppn

   1 /*
   2  * Copyright (c) 2016-2019 Intel, Inc.  All rights reserved.
   3  * Copyright (c) 2018-2019 Research Organization for Information Science
   4  *                         and Technology (RIST).  All rights reserved.
   5  * $COPYRIGHT$
   6  *
   7  * Additional copyrights may follow
   8  *
   9  * $HEADER$
  10  *
  11  */
  12 
  13 #include "orte_config.h"
  14 #include "orte/types.h"
  15 #include "opal/types.h"
  16 
  17 #ifdef HAVE_UNISTD_H
  18 #include <unistd.h>
  19 #endif
  20 #include <ctype.h>
  21 
  22 #include "opal/dss/dss_types.h"
  23 #include "opal/mca/compress/compress.h"
  24 #include "opal/util/argv.h"
  25 
  26 #include "orte/mca/errmgr/errmgr.h"
  27 #include "orte/mca/rmaps/base/base.h"
  28 #include "orte/mca/routed/routed.h"
  29 #include "orte/runtime/orte_globals.h"
  30 
  31 #include "orte/util/nidmap.h"
  32 
  33 int orte_util_nidmap_create(opal_pointer_array_t *pool,
  34                             opal_buffer_t *buffer)
  35 {
  36     char *raw = NULL;
  37     uint8_t *vpids=NULL, u8;
  38     uint16_t u16;
  39     uint32_t u32;
  40     int n, ndaemons, rc, nbytes;
  41     bool compressed;
  42     char **names = NULL, **ranks = NULL;
  43     orte_node_t *nptr;
  44     opal_byte_object_t bo, *boptr;
  45     size_t sz;
  46 
  47     /* pack a flag indicating if the HNP was included in the allocation */
  48     if (orte_hnp_is_allocated) {
  49         u8 = 1;
  50     } else {
  51         u8 = 0;
  52     }
  53     if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &u8, 1, OPAL_UINT8))) {
  54         ORTE_ERROR_LOG(rc);
  55         return rc;
  56     }
  57 
  58     /* pack a flag indicating if we are in a managed allocation */
  59     if (orte_managed_allocation) {
  60         u8 = 1;
  61     } else {
  62         u8 = 0;
  63     }
  64     if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &u8, 1, OPAL_UINT8))) {
  65         ORTE_ERROR_LOG(rc);
  66         return rc;
  67     }
  68 
  69     /* daemon vpids start from 0 and increase linearly by one
  70      * up to the number of nodes in the system. The vpid is
  71      * a 32-bit value. We don't know how many of the nodes
  72      * in the system have daemons - we may not be using them
  73      * all just yet. However, even the largest systems won't
  74      * have more than a million nodes for quite some time,
  75      * so for now we'll just allocate enough space to hold
  76      * them all. Someone can optimize this further later */
  77     if (256 >= pool->size) {
  78         nbytes = 1;
  79     } else if (65536 >= pool->size) {
  80         nbytes = 2;
  81     } else {
  82         nbytes = 4;
  83     }
  84     vpids = (uint8_t*)malloc(nbytes * pool->size);
  85 
  86     ndaemons = 0;
  87     for (n=0; n < pool->size; n++) {
  88         if (NULL == (nptr = (orte_node_t*)opal_pointer_array_get_item(pool, n))) {
  89             continue;
  90         }
  91         /* add the hostname to the argv */
  92         opal_argv_append_nosize(&names, nptr->name);
  93         /* store the vpid */
  94         if (1 == nbytes) {
  95             if (NULL == nptr->daemon) {
  96                 vpids[ndaemons] = UINT8_MAX;
  97             } else {
  98                 vpids[ndaemons] = nptr->daemon->name.vpid;
  99             }
 100         } else if (2 == nbytes) {
 101             if (NULL == nptr->daemon) {
 102                 u16 = UINT16_MAX;
 103             } else {
 104                 u16 = nptr->daemon->name.vpid;
 105             }
 106             memcpy(&vpids[nbytes*ndaemons], &u16, 2);
 107         } else {
 108             if (NULL == nptr->daemon) {
 109                 u32 = UINT32_MAX;
 110             } else {
 111                 u32 = nptr->daemon->name.vpid;
 112             }
 113             memcpy(&vpids[nbytes*ndaemons], &u32, 4);
 114         }
 115         ++ndaemons;
 116     }
 117 
 118     /* construct the string of node names for compression */
 119     raw = opal_argv_join(names, ',');
 120     if (opal_compress.compress_block((uint8_t*)raw, strlen(raw)+1,
 121                                      (uint8_t**)&bo.bytes, &sz)) {
 122         /* mark that this was compressed */
 123         compressed = true;
 124         bo.size = sz;
 125     } else {
 126         /* mark that this was not compressed */
 127         compressed = false;
 128         bo.bytes = (uint8_t*)raw;
 129         bo.size = strlen(raw)+1;
 130     }
 131     /* indicate compression */
 132     if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &compressed, 1, OPAL_BOOL))) {
 133         if (compressed) {
 134             free(bo.bytes);
 135         }
 136         goto cleanup;
 137     }
 138     /* if compressed, provide the uncompressed size */
 139     if (compressed) {
 140         sz = strlen(raw)+1;
 141         if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &sz, 1, OPAL_SIZE))) {
 142             free(bo.bytes);
 143             goto cleanup;
 144         }
 145     }
 146     /* add the object */
 147     boptr = &bo;
 148     if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &boptr, 1, OPAL_BYTE_OBJECT))) {
 149         if (compressed) {
 150             free(bo.bytes);
 151         }
 152         goto cleanup;
 153     }
 154     if (compressed) {
 155         free(bo.bytes);
 156     }
 157 
 158     /* compress the vpids */
 159     if (opal_compress.compress_block(vpids, nbytes*ndaemons,
 160                                      (uint8_t**)&bo.bytes, &sz)) {
 161         /* mark that this was compressed */
 162         compressed = true;
 163         bo.size = sz;
 164     } else {
 165         /* mark that this was not compressed */
 166         compressed = false;
 167         bo.bytes = vpids;
 168         bo.size = nbytes*ndaemons;
 169     }
 170     /* indicate compression */
 171     if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &compressed, 1, OPAL_BOOL))) {
 172         if (compressed) {
 173             free(bo.bytes);
 174         }
 175         goto cleanup;
 176     }
 177     /* provide the #bytes/vpid */
 178     if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &nbytes, 1, OPAL_INT))) {
 179         if (compressed) {
 180             free(bo.bytes);
 181         }
 182         goto cleanup;
 183     }
 184     /* if compressed, provide the uncompressed size */
 185     if (compressed) {
 186         sz = nbytes*ndaemons;
 187         if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &sz, 1, OPAL_SIZE))) {
 188             free(bo.bytes);
 189             goto cleanup;
 190         }
 191     }
 192     /* add the object */
 193     boptr = &bo;
 194     if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &boptr, 1, OPAL_BYTE_OBJECT))) {
 195         if (compressed) {
 196             free(bo.bytes);
 197         }
 198         goto cleanup;
 199     }
 200     if (compressed) {
 201         free(bo.bytes);
 202     }
 203 
 204   cleanup:
 205     if (NULL != names) {
 206         opal_argv_free(names);
 207     }
 208     if (NULL != raw) {
 209         free(raw);
 210     }
 211     if (NULL != ranks) {
 212         opal_argv_free(ranks);
 213     }
 214     if (NULL != vpids) {
 215         free(vpids);
 216     }
 217 
 218     return rc;
 219 }
 220 
 221 int orte_util_decode_nidmap(opal_buffer_t *buf)
 222 {
 223     uint8_t u8, *vp8 = NULL;
 224     uint16_t *vp16 = NULL;
 225     uint32_t *vp32 = NULL, vpid;
 226     int cnt, rc, nbytes, n;
 227     bool compressed;
 228     size_t sz;
 229     opal_byte_object_t *boptr;
 230     char *raw = NULL, **names = NULL;
 231     orte_node_t *nd;
 232     orte_job_t *daemons;
 233     orte_proc_t *proc;
 234     orte_topology_t *t;
 235 
 236     /* unpack the flag indicating if HNP is in allocation */
 237     cnt = 1;
 238     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &u8, &cnt, OPAL_UINT8))) {
 239         ORTE_ERROR_LOG(rc);
 240         goto cleanup;
 241     }
 242     if (1 == u8) {
 243         orte_hnp_is_allocated = true;
 244     } else {
 245         orte_hnp_is_allocated = false;
 246     }
 247 
 248     /* unpack the flag indicating if we are in managed allocation */
 249     cnt = 1;
 250     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &u8, &cnt, OPAL_UINT8))) {
 251         ORTE_ERROR_LOG(rc);
 252         goto cleanup;
 253     }
 254     if (1 == u8) {
 255         orte_managed_allocation = true;
 256     } else {
 257         orte_managed_allocation = false;
 258     }
 259 
 260     /* unpack compression flag for node names */
 261     cnt = 1;
 262     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &compressed, &cnt, OPAL_BOOL))) {
 263         ORTE_ERROR_LOG(rc);
 264         goto cleanup;
 265     }
 266 
 267     /* if compressed, get the uncompressed size */
 268     if (compressed) {
 269         cnt = 1;
 270         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &sz, &cnt, OPAL_SIZE))) {
 271             ORTE_ERROR_LOG(rc);
 272             goto cleanup;
 273         }
 274     }
 275 
 276     /* unpack the nodename object */
 277     cnt = 1;
 278     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &boptr, &cnt, OPAL_BYTE_OBJECT))) {
 279         ORTE_ERROR_LOG(rc);
 280         goto cleanup;
 281     }
 282 
 283     /* if compressed, decompress */
 284     if (compressed) {
 285         if (!opal_compress.decompress_block((uint8_t**)&raw, sz,
 286                                             boptr->bytes, boptr->size)) {
 287             ORTE_ERROR_LOG(ORTE_ERROR);
 288             if (NULL != boptr->bytes) {
 289                 free(boptr->bytes);
 290             }
 291             free(boptr);
 292             rc = ORTE_ERROR;
 293             goto cleanup;
 294         }
 295     } else {
 296         raw = (char*)boptr->bytes;
 297         boptr->bytes = NULL;
 298         boptr->size = 0;
 299     }
 300     if (NULL != boptr->bytes) {
 301         free(boptr->bytes);
 302     }
 303     free(boptr);
 304     names = opal_argv_split(raw, ',');
 305     free(raw);
 306 
 307 
 308     /* unpack compression flag for daemon vpids */
 309     cnt = 1;
 310     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &compressed, &cnt, OPAL_BOOL))) {
 311         ORTE_ERROR_LOG(rc);
 312         goto cleanup;
 313     }
 314 
 315     /* unpack the #bytes/vpid */
 316     cnt = 1;
 317     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &nbytes, &cnt, OPAL_INT))) {
 318         ORTE_ERROR_LOG(rc);
 319         goto cleanup;
 320     }
 321 
 322     /* if compressed, get the uncompressed size */
 323     if (compressed) {
 324         cnt = 1;
 325         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &sz, &cnt, OPAL_SIZE))) {
 326             ORTE_ERROR_LOG(rc);
 327             goto cleanup;
 328         }
 329     }
 330 
 331     /* unpack the vpid object */
 332     cnt = 1;
 333     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &boptr, &cnt, OPAL_BYTE_OBJECT))) {
 334         ORTE_ERROR_LOG(rc);
 335         goto cleanup;
 336     }
 337 
 338     /* if compressed, decompress */
 339     if (compressed) {
 340         if (!opal_compress.decompress_block((uint8_t**)&vp8, sz,
 341                                             boptr->bytes, boptr->size)) {
 342             ORTE_ERROR_LOG(ORTE_ERROR);
 343             if (NULL != boptr->bytes) {
 344                 free(boptr->bytes);
 345             }
 346             free(boptr);
 347             rc = ORTE_ERROR;
 348             goto cleanup;
 349         }
 350     } else {
 351         vp8 = (uint8_t*)boptr->bytes;
 352         sz = boptr->size;
 353         boptr->bytes = NULL;
 354         boptr->size = 0;
 355     }
 356     if (NULL != boptr->bytes) {
 357         free(boptr->bytes);
 358     }
 359     free(boptr);
 360     if (2 == nbytes) {
 361         vp16 = (uint16_t*)vp8;
 362         vp8 = NULL;
 363     } else if (4 == nbytes) {
 364         vp32 = (uint32_t*)vp8;
 365         vp8 = NULL;
 366     }
 367 
 368     /* if we are the HNP, we don't need any of this stuff */
 369     if (ORTE_PROC_IS_HNP) {
 370         goto cleanup;
 371     }
 372 
 373     /* get the daemon job object */
 374     daemons = orte_get_job_data_object(ORTE_PROC_MY_NAME->jobid);
 375 
 376     /* get our topology */
 377     for (n=0; n < orte_node_topologies->size; n++) {
 378         if (NULL != (t = (orte_topology_t*)opal_pointer_array_get_item(orte_node_topologies, n))) {
 379             break;
 380         }
 381     }
 382 
 383     /* create the node pool array - this will include
 384      * _all_ nodes known to the allocation */
 385     for (n=0; NULL != names[n]; n++) {
 386         /* add this name to the pool */
 387         nd = OBJ_NEW(orte_node_t);
 388         nd->name = strdup(names[n]);
 389         nd->index = n;
 390         opal_pointer_array_set_item(orte_node_pool, n, nd);
 391         /* set the topology - always default to homogeneous
 392          * as that is the most common scenario */
 393         nd->topology = t;
 394         /* see if it has a daemon on it */
 395         if (1 == nbytes && UINT8_MAX != vp8[n]) {
 396             vpid = vp8[n];
 397         } else if (2 == nbytes && UINT16_MAX != vp16[n]) {
 398             vpid = vp16[n];
 399         } else if (4 == nbytes && UINT32_MAX != vp32[n]) {
 400             vpid = vp32[n];
 401         } else {
 402             vpid = UINT32_MAX;
 403         }
 404         if (UINT32_MAX != vpid) {
 405             if (NULL == (proc = (orte_proc_t*)opal_pointer_array_get_item(daemons->procs, vpid))) {
 406                 proc = OBJ_NEW(orte_proc_t);
 407                 proc->name.jobid = ORTE_PROC_MY_NAME->jobid;
 408                 proc->name.vpid = vpid;
 409                 proc->state = ORTE_PROC_STATE_RUNNING;
 410                 ORTE_FLAG_SET(proc, ORTE_PROC_FLAG_ALIVE);
 411                 daemons->num_procs++;
 412                 opal_pointer_array_set_item(daemons->procs, proc->name.vpid, proc);
 413             }
 414             OBJ_RETAIN(nd);
 415             proc->node = nd;
 416             OBJ_RETAIN(proc);
 417             nd->daemon = proc;
 418         }
 419     }
 420 
 421     /* update num procs */
 422     if (orte_process_info.num_procs != daemons->num_procs) {
 423         orte_process_info.num_procs = daemons->num_procs;
 424     }
 425     /* need to update the routing plan */
 426     orte_routed.update_routing_plan();
 427 
 428     if (orte_process_info.max_procs < orte_process_info.num_procs) {
 429         orte_process_info.max_procs = orte_process_info.num_procs;
 430     }
 431 
 432   cleanup:
 433     if (NULL != vp8) {
 434         free(vp8);
 435     }
 436     if (NULL != vp16) {
 437         free(vp16);
 438     }
 439     if (NULL != vp32) {
 440         free(vp32);
 441     }
 442     if (NULL != names) {
 443         opal_argv_free(names);
 444     }
 445     return rc;
 446 }
 447 
 448 int orte_util_pass_node_info(opal_buffer_t *buffer)
 449 {
 450     uint16_t *slots=NULL, slot = UINT16_MAX;
 451     uint8_t *flags=NULL, flag = UINT8_MAX;
 452     int8_t i8, ntopos;
 453     int rc, n, nbitmap, nstart;
 454     bool compressed, unislots = true, uniflags = true, unitopos = true;
 455     orte_node_t *nptr;
 456     opal_byte_object_t bo, *boptr;
 457     size_t sz, nslots;
 458     opal_buffer_t bucket;
 459     orte_topology_t *t;
 460 
 461     /* make room for the number of slots on each node */
 462     nslots = sizeof(uint16_t) * orte_node_pool->size;
 463     slots = (uint16_t*)malloc(nslots);
 464     /* and for the flags for each node - only need one bit/node */
 465     nbitmap = (orte_node_pool->size / 8) + 1;
 466     flags = (uint8_t*)calloc(1, nbitmap);
 467 
 468     /* handle the topologies - as the most common case by far
 469      * is to have homogeneous topologies, we only send them
 470      * if something is different. We know that the HNP is
 471      * the first topology, and that any differing topology
 472      * on the compute nodes must follow. So send the topologies
 473      * if and only if:
 474      *
 475      * (a) the HNP is being used to house application procs and
 476      *     there is more than one topology in our array; or
 477      *
 478      * (b) the HNP is not being used, but there are more than
 479      *     two topologies in our array, thus indicating that
 480      *     there are multiple topologies on the compute nodes
 481      */
 482     if (!orte_hnp_is_allocated || (ORTE_GET_MAPPING_DIRECTIVE(orte_rmaps_base.mapping) & ORTE_MAPPING_NO_USE_LOCAL)) {
 483         nstart = 1;
 484     } else {
 485         nstart = 0;
 486     }
 487     OBJ_CONSTRUCT(&bucket, opal_buffer_t);
 488     ntopos = 0;
 489     for (n=nstart; n < orte_node_topologies->size; n++) {
 490         if (NULL == (t = (orte_topology_t*)opal_pointer_array_get_item(orte_node_topologies, n))) {
 491             continue;
 492         }
 493         /* pack the index */
 494         if (ORTE_SUCCESS != (rc = opal_dss.pack(&bucket, &t->index, 1, OPAL_INT))) {
 495             ORTE_ERROR_LOG(rc);
 496             OBJ_DESTRUCT(&bucket);
 497             goto cleanup;
 498         }
 499         /* pack this topology string */
 500         if (ORTE_SUCCESS != (rc = opal_dss.pack(&bucket, &t->sig, 1, OPAL_STRING))) {
 501             ORTE_ERROR_LOG(rc);
 502             OBJ_DESTRUCT(&bucket);
 503             goto cleanup;
 504         }
 505         /* pack the topology itself */
 506         if (ORTE_SUCCESS != (rc = opal_dss.pack(&bucket, &t->topo, 1, OPAL_HWLOC_TOPO))) {
 507             ORTE_ERROR_LOG(rc);
 508             OBJ_DESTRUCT(&bucket);
 509             goto cleanup;
 510         }
 511         ++ntopos;
 512     }
 513     /* pack the number of topologies in allocation */
 514     if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &ntopos, 1, OPAL_INT8))) {
 515         goto cleanup;
 516     }
 517     if (1 < ntopos) {
 518         /* need to send them along */
 519         if (opal_compress.compress_block((uint8_t*)bucket.base_ptr, bucket.bytes_used,
 520                                          &bo.bytes, &sz)) {
 521             /* the data was compressed - mark that we compressed it */
 522             compressed = true;
 523             if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &compressed, 1, OPAL_BOOL))) {
 524                 ORTE_ERROR_LOG(rc);
 525                 OBJ_DESTRUCT(&bucket);
 526                 goto cleanup;
 527             }
 528             /* pack the uncompressed length */
 529             if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &bucket.bytes_used, 1, OPAL_SIZE))) {
 530                 ORTE_ERROR_LOG(rc);
 531                 OBJ_DESTRUCT(&bucket);
 532                 goto cleanup;
 533             }
 534             bo.size = sz;
 535         } else {
 536             /* mark that it was not compressed */
 537             compressed = false;
 538             if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &compressed, 1, OPAL_BOOL))) {
 539                 ORTE_ERROR_LOG(rc);
 540                 OBJ_DESTRUCT(&bucket);
 541                 goto cleanup;
 542             }
 543             opal_dss.unload(&bucket, (void**)&bo.bytes, &bo.size);
 544         }
 545         unitopos = false;
 546         /* pack the info */
 547         boptr = &bo;
 548         if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &boptr, 1, OPAL_BYTE_OBJECT))) {
 549             ORTE_ERROR_LOG(rc);
 550             OBJ_DESTRUCT(&bucket);
 551             goto cleanup;
 552         }
 553         OBJ_DESTRUCT(&bucket);
 554         free(bo.bytes);
 555     }
 556 
 557     /* construct the per-node info */
 558     OBJ_CONSTRUCT(&bucket, opal_buffer_t);
 559     for (n=0; n < orte_node_pool->size; n++) {
 560         if (NULL == (nptr = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, n))) {
 561             continue;
 562         }
 563         /* track the topology, if required */
 564         if (!unitopos) {
 565             i8 = nptr->topology->index;
 566             if (ORTE_SUCCESS != (rc = opal_dss.pack(&bucket, &i8, 1, OPAL_INT8))) {
 567                 ORTE_ERROR_LOG(rc);
 568                 OBJ_DESTRUCT(&bucket);
 569                 goto cleanup;
 570             }
 571         }
 572         /* store the number of slots */
 573         slots[n] = nptr->slots;
 574         if (UINT16_MAX == slot) {
 575             slot = nptr->slots;
 576         } else if (slot != nptr->slots) {
 577             unislots = false;
 578         }
 579         /* store the flag */
 580         if (ORTE_FLAG_TEST(nptr, ORTE_NODE_FLAG_SLOTS_GIVEN)) {
 581             flags[n/8] |= (1 << (7 - (n % 8)));
 582             if (UINT8_MAX == flag) {
 583                 flag = 1;
 584             } else if (1 != flag) {
 585                 uniflags = false;
 586             }
 587         } else {
 588             if (UINT8_MAX == flag) {
 589                 flag = 0;
 590             } else if (0 != flag) {
 591                 uniflags = false;
 592             }
 593         }
 594     }
 595 
 596     /* deal with the topology assignments */
 597     if (!unitopos) {
 598         if (opal_compress.compress_block((uint8_t*)bucket.base_ptr, bucket.bytes_used,
 599                                          (uint8_t**)&bo.bytes, &sz)) {
 600             /* mark that this was compressed */
 601             compressed = true;
 602             bo.size = sz;
 603         } else {
 604             /* mark that this was not compressed */
 605             compressed = false;
 606             bo.bytes = bucket.base_ptr;
 607             bo.size = bucket.bytes_used;
 608         }
 609         /* indicate compression */
 610         if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &compressed, 1, OPAL_BOOL))) {
 611             if (compressed) {
 612                 free(bo.bytes);
 613             }
 614             goto cleanup;
 615         }
 616         /* if compressed, provide the uncompressed size */
 617         if (compressed) {
 618             sz = nslots;
 619             if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &sz, 1, OPAL_SIZE))) {
 620                 free(bo.bytes);
 621                 goto cleanup;
 622             }
 623         }
 624         /* add the object */
 625         boptr = &bo;
 626         rc = opal_dss.pack(buffer, &boptr, 1, OPAL_BYTE_OBJECT);
 627         if (compressed) {
 628             free(bo.bytes);
 629         }
 630     }
 631     OBJ_DESTRUCT(&bucket);
 632 
 633     /* if we have uniform #slots, then just flag it - no
 634      * need to pass anything */
 635     if (unislots) {
 636         i8 = -1 * slot;
 637         if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &i8, 1, OPAL_INT8))) {
 638             goto cleanup;
 639         }
 640     } else {
 641         if (opal_compress.compress_block((uint8_t*)slots, nslots,
 642                                          (uint8_t**)&bo.bytes, &sz)) {
 643             /* mark that this was compressed */
 644             i8 = 1;
 645             compressed = true;
 646             bo.size = sz;
 647         } else {
 648             /* mark that this was not compressed */
 649             i8 = 0;
 650             compressed = false;
 651             bo.bytes = flags;
 652             bo.size = nbitmap;
 653         }
 654         /* indicate compression */
 655         if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &i8, 1, OPAL_INT8))) {
 656             if (compressed) {
 657                 free(bo.bytes);
 658             }
 659             goto cleanup;
 660         }
 661         /* if compressed, provide the uncompressed size */
 662         if (compressed) {
 663             sz = nslots;
 664             if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &sz, 1, OPAL_SIZE))) {
 665                 free(bo.bytes);
 666                 goto cleanup;
 667             }
 668         }
 669         /* add the object */
 670         boptr = &bo;
 671         rc = opal_dss.pack(buffer, &boptr, 1, OPAL_BYTE_OBJECT);
 672         if (compressed) {
 673             free(bo.bytes);
 674         }
 675     }
 676 
 677     /* if we have uniform flags, then just flag it - no
 678      * need to pass anything */
 679     if (uniflags) {
 680         if (1 == flag) {
 681             i8 = -1;
 682         } else {
 683             i8 = -2;
 684         }
 685         if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &i8, 1, OPAL_INT8))) {
 686             goto cleanup;
 687         }
 688     } else {
 689         if (opal_compress.compress_block(flags, nbitmap,
 690                                          (uint8_t**)&bo.bytes, &sz)) {
 691             /* mark that this was compressed */
 692             i8 = 2;
 693             compressed = true;
 694             bo.size = sz;
 695         } else {
 696             /* mark that this was not compressed */
 697             i8 = 3;
 698             compressed = false;
 699             bo.bytes = flags;
 700             bo.size = nbitmap;
 701         }
 702         /* indicate compression */
 703         if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &i8, 1, OPAL_INT8))) {
 704             if (compressed) {
 705                 free(bo.bytes);
 706             }
 707             goto cleanup;
 708         }
 709         /* if compressed, provide the uncompressed size */
 710         if (compressed) {
 711             sz = nbitmap;
 712             if (ORTE_SUCCESS != (rc = opal_dss.pack(buffer, &sz, 1, OPAL_SIZE))) {
 713                 free(bo.bytes);
 714                 goto cleanup;
 715             }
 716         }
 717         /* add the object */
 718         boptr = &bo;
 719         rc = opal_dss.pack(buffer, &boptr, 1, OPAL_BYTE_OBJECT);
 720         if (compressed) {
 721             free(bo.bytes);
 722         }
 723     }
 724 
 725   cleanup:
 726     if (NULL != slots) {
 727         free(slots);
 728     }
 729     if (NULL != flags) {
 730         free(flags);
 731     }
 732     return rc;
 733 }
 734 
 735 int orte_util_parse_node_info(opal_buffer_t *buf)
 736 {
 737     int8_t i8;
 738     bool compressed;
 739     int rc = ORTE_SUCCESS, cnt, n, m, index;
 740     orte_node_t *nptr;
 741     size_t sz;
 742     opal_byte_object_t *boptr;
 743     uint16_t *slots = NULL;
 744     uint8_t *flags = NULL;
 745     uint8_t *topologies = NULL;
 746     uint8_t *bytes = NULL;
 747     orte_topology_t *t2;
 748     hwloc_topology_t topo;
 749     char *sig;
 750     opal_buffer_t bucket;
 751 
 752     /* check to see if we have uniform topologies */
 753     cnt = 1;
 754     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &i8, &cnt, OPAL_INT8))) {
 755         ORTE_ERROR_LOG(rc);
 756         goto cleanup;
 757     }
 758     /* we already defaulted to uniform topology, so only need to
 759      * process this if it is non-uniform */
 760     if (1 < i8) {
 761         /* unpack the compression flag */
 762         cnt = 1;
 763         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &compressed, &cnt, OPAL_BOOL))) {
 764             ORTE_ERROR_LOG(rc);
 765             goto cleanup;
 766         }
 767         if (compressed) {
 768             /* get the uncompressed size */
 769             cnt = 1;
 770             if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &sz, &cnt, OPAL_SIZE))) {
 771                 ORTE_ERROR_LOG(rc);
 772                 goto cleanup;
 773             }
 774         }
 775         /* unpack the topology object */
 776         cnt = 1;
 777         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &boptr, &cnt, OPAL_BYTE_OBJECT))) {
 778             ORTE_ERROR_LOG(rc);
 779             goto cleanup;
 780         }
 781 
 782         /* if compressed, decompress */
 783         if (compressed) {
 784             if (!opal_compress.decompress_block((uint8_t**)&bytes, sz,
 785                                                 boptr->bytes, boptr->size)) {
 786                 ORTE_ERROR_LOG(ORTE_ERROR);
 787                 if (NULL != boptr->bytes) {
 788                     free(boptr->bytes);
 789                 }
 790                 free(boptr);
 791                 rc = ORTE_ERROR;
 792                 goto cleanup;
 793             }
 794         } else {
 795             bytes = (uint8_t*)boptr->bytes;
 796             sz = boptr->size;
 797             boptr->bytes = NULL;
 798             boptr->size = 0;
 799         }
 800         if (NULL != boptr->bytes) {
 801             free(boptr->bytes);
 802         }
 803         /* setup to unpack */
 804         OBJ_CONSTRUCT(&bucket, opal_buffer_t);
 805         opal_dss.load(&bucket, bytes, sz);
 806 
 807         for (n=0; n < i8; n++) {
 808             /* unpack the index */
 809             cnt = 1;
 810             if (ORTE_SUCCESS != (rc = opal_dss.unpack(&bucket, &index, &cnt, OPAL_INT))) {
 811                 ORTE_ERROR_LOG(rc);
 812                 goto cleanup;
 813             }
 814             /* unpack the signature */
 815             cnt = 1;
 816             if (ORTE_SUCCESS != (rc = opal_dss.unpack(&bucket, &sig, &cnt, OPAL_STRING))) {
 817                 ORTE_ERROR_LOG(rc);
 818                 goto cleanup;
 819             }
 820             /* unpack the topology */
 821             cnt = 1;
 822             if (ORTE_SUCCESS != (rc = opal_dss.unpack(&bucket, &topo, &cnt, OPAL_HWLOC_TOPO))) {
 823                 ORTE_ERROR_LOG(rc);
 824                 goto cleanup;
 825             }
 826             /* record it */
 827             t2 = OBJ_NEW(orte_topology_t);
 828             t2->index = index;
 829             t2->sig = sig;
 830             t2->topo = topo;
 831             opal_pointer_array_set_item(orte_node_topologies, index, t2);
 832         }
 833         OBJ_DESTRUCT(&bucket);
 834 
 835         /* now get the array of assigned topologies */
 836         /* unpack the compression flag */
 837         cnt = 1;
 838         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &compressed, &cnt, OPAL_BOOL))) {
 839             ORTE_ERROR_LOG(rc);
 840             goto cleanup;
 841         }
 842         if (compressed) {
 843             /* get the uncompressed size */
 844             cnt = 1;
 845             if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &sz, &cnt, OPAL_SIZE))) {
 846                 ORTE_ERROR_LOG(rc);
 847                 goto cleanup;
 848             }
 849         }
 850         /* unpack the topologies object */
 851         cnt = 1;
 852         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &boptr, &cnt, OPAL_BYTE_OBJECT))) {
 853             ORTE_ERROR_LOG(rc);
 854             goto cleanup;
 855         }
 856         /* if compressed, decompress */
 857         if (compressed) {
 858             if (!opal_compress.decompress_block((uint8_t**)&bytes, sz,
 859                                                 boptr->bytes, boptr->size)) {
 860                 ORTE_ERROR_LOG(ORTE_ERROR);
 861                 if (NULL != boptr->bytes) {
 862                     free(boptr->bytes);
 863                 }
 864                 free(boptr);
 865                 rc = ORTE_ERROR;
 866                 goto cleanup;
 867             }
 868         } else {
 869             bytes = (uint8_t*)boptr->bytes;
 870             sz = boptr->size;
 871             boptr->bytes = NULL;
 872             boptr->size = 0;
 873         }
 874         if (NULL != boptr->bytes) {
 875             free(boptr->bytes);
 876         }
 877         free(boptr);
 878         OBJ_CONSTRUCT(&bucket, opal_buffer_t);
 879         opal_dss.load(&bucket, bytes, sz);
 880         /* cycle across the node pool and assign the values */
 881         for (n=0; n < orte_node_pool->size; n++) {
 882             if (NULL != (nptr = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, n))) {
 883                 /* unpack the next topology index */
 884                 cnt = 1;
 885                 if (OPAL_SUCCESS != (rc = opal_dss.unpack(&bucket, &i8, &cnt, OPAL_INT8))) {
 886                     ORTE_ERROR_LOG(rc);
 887                     goto cleanup;
 888                 }
 889                 nptr->topology = opal_pointer_array_get_item(orte_node_topologies, index);
 890             }
 891         }
 892     }
 893 
 894     /* check to see if we have uniform slot assignments */
 895     cnt = 1;
 896     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &i8, &cnt, OPAL_INT8))) {
 897         ORTE_ERROR_LOG(rc);
 898         goto cleanup;
 899     }
 900 
 901     /* if so, then make every node the same */
 902     if (0 > i8) {
 903         i8 = -1 * i8;
 904         for (n=0; n < orte_node_pool->size; n++) {
 905             if (NULL != (nptr = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, n))) {
 906                 nptr->slots = i8;
 907             }
 908         }
 909     } else {
 910         /* if compressed, get the uncompressed size */
 911         if (1 == i8) {
 912             cnt = 1;
 913             if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &sz, &cnt, OPAL_SIZE))) {
 914                 ORTE_ERROR_LOG(rc);
 915                 goto cleanup;
 916             }
 917         }
 918         /* unpack the slots object */
 919         cnt = 1;
 920         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &boptr, &cnt, OPAL_BYTE_OBJECT))) {
 921             ORTE_ERROR_LOG(rc);
 922             goto cleanup;
 923         }
 924         /* if compressed, decompress */
 925         if (1 == i8) {
 926             if (!opal_compress.decompress_block((uint8_t**)&slots, sz,
 927                                                 boptr->bytes, boptr->size)) {
 928                 ORTE_ERROR_LOG(ORTE_ERROR);
 929                 if (NULL != boptr->bytes) {
 930                     free(boptr->bytes);
 931                 }
 932                 free(boptr);
 933                 rc = ORTE_ERROR;
 934                 goto cleanup;
 935             }
 936         } else {
 937             slots = (uint16_t*)boptr->bytes;
 938             boptr->bytes = NULL;
 939             boptr->size = 0;
 940         }
 941         if (NULL != boptr->bytes) {
 942             free(boptr->bytes);
 943         }
 944         free(boptr);
 945         /* cycle across the node pool and assign the values */
 946         for (n=0, m=0; n < orte_node_pool->size; n++) {
 947             if (NULL != (nptr = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, n))) {
 948                 nptr->slots = slots[m];
 949                 ++m;
 950             }
 951         }
 952     }
 953 
 954     /* check to see if we have uniform flag assignments */
 955     cnt = 1;
 956     if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &i8, &cnt, OPAL_INT8))) {
 957         ORTE_ERROR_LOG(rc);
 958         goto cleanup;
 959     }
 960 
 961     /* if so, then make every node the same */
 962     if (0 > i8) {
 963          i8 += 2;
 964         for (n=0; n < orte_node_pool->size; n++) {
 965             if (NULL != (nptr = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, n))) {
 966                 if (i8) {
 967                     ORTE_FLAG_SET(nptr, ORTE_NODE_FLAG_SLOTS_GIVEN);
 968                 } else {
 969                     ORTE_FLAG_UNSET(nptr, ORTE_NODE_FLAG_SLOTS_GIVEN);
 970                 }
 971             }
 972         }
 973     } else {
 974         /* if compressed, get the uncompressed size */
 975         if (1 == i8) {
 976             cnt = 1;
 977             if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &sz, &cnt, OPAL_SIZE))) {
 978                 ORTE_ERROR_LOG(rc);
 979                 goto cleanup;
 980             }
 981         }
 982         /* unpack the slots object */
 983         cnt = 1;
 984         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &boptr, &cnt, OPAL_BYTE_OBJECT))) {
 985             ORTE_ERROR_LOG(rc);
 986             goto cleanup;
 987         }
 988         /* if compressed, decompress */
 989         if (1 == i8) {
 990             if (!opal_compress.decompress_block((uint8_t**)&flags, sz,
 991                                                 boptr->bytes, boptr->size)) {
 992                 ORTE_ERROR_LOG(ORTE_ERROR);
 993                 if (NULL != boptr->bytes) {
 994                     free(boptr->bytes);
 995                 }
 996                 free(boptr);
 997                 rc = ORTE_ERROR;
 998                 goto cleanup;
 999             }
1000         } else {
1001             flags = (uint8_t*)boptr->bytes;
1002             boptr->bytes = NULL;
1003             boptr->size = 0;
1004         }
1005         if (NULL != boptr->bytes) {
1006             free(boptr->bytes);
1007         }
1008         free(boptr);
1009         /* cycle across the node pool and assign the values */
1010         for (n=0, m=0; n < orte_node_pool->size; n++) {
1011             if (NULL != (nptr = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, n))) {
1012                 if (flags[m]) {
1013                     ORTE_FLAG_SET(nptr, ORTE_NODE_FLAG_SLOTS_GIVEN);
1014                 } else {
1015                     ORTE_FLAG_UNSET(nptr, ORTE_NODE_FLAG_SLOTS_GIVEN);
1016                 }
1017                 ++m;
1018             }
1019         }
1020     }
1021 
1022   cleanup:
1023     if (NULL != slots) {
1024         free(slots);
1025     }
1026     if (NULL != flags) {
1027         free(flags);
1028     }
1029     if (NULL != topologies) {
1030         free(topologies);
1031     }
1032     return rc;
1033 }
1034 
1035 
1036 int orte_util_generate_ppn(orte_job_t *jdata,
1037                            opal_buffer_t *buf)
1038 {
1039     uint16_t ppn;
1040     uint8_t *bytes;
1041     int32_t nbytes;
1042     int rc = ORTE_SUCCESS;
1043     orte_app_idx_t i;
1044     int j, k;
1045     opal_byte_object_t bo, *boptr;
1046     bool compressed;
1047     orte_node_t *nptr;
1048     orte_proc_t *proc;
1049     size_t sz;
1050     opal_buffer_t bucket;
1051 
1052     OBJ_CONSTRUCT(&bucket, opal_buffer_t);
1053 
1054     for (i=0; i < jdata->num_apps; i++) {
1055         /* for each app_context */
1056         for (j=0; j < jdata->map->nodes->size; j++) {
1057             if (NULL == (nptr = (orte_node_t*)opal_pointer_array_get_item(jdata->map->nodes, j))) {
1058                 continue;
1059             }
1060             if (NULL == nptr->daemon) {
1061                 continue;
1062             }
1063             ppn = 0;
1064             for (k=0; k < nptr->procs->size; k++) {
1065                 if (NULL != (proc = (orte_proc_t*)opal_pointer_array_get_item(nptr->procs, k))) {
1066                     if (proc->name.jobid == jdata->jobid) {
1067                         ++ppn;
1068                     }
1069                 }
1070             }
1071             if (0 < ppn) {
1072                 if (ORTE_SUCCESS != (rc = opal_dss.pack(&bucket, &nptr->index, 1, ORTE_STD_CNTR))) {
1073                     goto cleanup;
1074                 }
1075                 if (ORTE_SUCCESS != (rc = opal_dss.pack(&bucket, &ppn, 1, OPAL_UINT16))) {
1076                     goto cleanup;
1077                 }
1078             }
1079         }
1080         opal_dss.unload(&bucket, (void**)&bytes, &nbytes);
1081 
1082         if (opal_compress.compress_block(bytes, (size_t)nbytes,
1083                                          (uint8_t**)&bo.bytes, &sz)) {
1084             /* mark that this was compressed */
1085             compressed = true;
1086             bo.size = sz;
1087         } else {
1088             /* mark that this was not compressed */
1089             compressed = false;
1090             bo.bytes = bytes;
1091             bo.size = nbytes;
1092         }
1093         /* indicate compression */
1094         if (ORTE_SUCCESS != (rc = opal_dss.pack(buf, &compressed, 1, OPAL_BOOL))) {
1095             if (compressed) {
1096                 free(bo.bytes);
1097             }
1098             goto cleanup;
1099         }
1100         /* if compressed, provide the uncompressed size */
1101         if (compressed) {
1102             sz = nbytes;
1103             if (ORTE_SUCCESS != (rc = opal_dss.pack(buf, &sz, 1, OPAL_SIZE))) {
1104                 free(bo.bytes);
1105                 goto cleanup;
1106             }
1107         }
1108         /* add the object */
1109         boptr = &bo;
1110         rc = opal_dss.pack(buf, &boptr, 1, OPAL_BYTE_OBJECT);
1111         if (OPAL_SUCCESS != rc) {
1112             break;
1113         }
1114     }
1115 
1116   cleanup:
1117     OBJ_DESTRUCT(&bucket);
1118     return rc;
1119 }
1120 
1121 int orte_util_decode_ppn(orte_job_t *jdata,
1122                          opal_buffer_t *buf)
1123 {
1124     orte_std_cntr_t index;
1125     orte_app_idx_t n;
1126     int cnt, rc, m;
1127     opal_byte_object_t *boptr;
1128     bool compressed;
1129     uint8_t *bytes;
1130     size_t sz;
1131     uint16_t ppn, k;
1132     orte_node_t *node;
1133     orte_proc_t *proc;
1134     opal_buffer_t bucket;
1135 
1136     /* reset any flags */
1137     for (m=0; m < orte_node_pool->size; m++) {
1138         if (NULL != (node = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, m))) {
1139             ORTE_FLAG_UNSET(node, ORTE_NODE_FLAG_MAPPED);
1140         }
1141     }
1142 
1143     for (n=0; n < jdata->num_apps; n++) {
1144         /* unpack the compression flag */
1145         cnt = 1;
1146         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &compressed, &cnt, OPAL_BOOL))) {
1147             ORTE_ERROR_LOG(rc);
1148             return rc;
1149         }
1150         /* if compressed, unpack the raw size */
1151         if (compressed) {
1152             cnt = 1;
1153             if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &sz, &cnt, OPAL_SIZE))) {
1154                 ORTE_ERROR_LOG(rc);
1155                 return rc;
1156             }
1157         }
1158         /* unpack the byte object describing this app */
1159         cnt = 1;
1160         if (OPAL_SUCCESS != (rc = opal_dss.unpack(buf, &boptr, &cnt, OPAL_BYTE_OBJECT))) {
1161             ORTE_ERROR_LOG(rc);
1162             return rc;
1163         }
1164 
1165         if (ORTE_PROC_IS_HNP) {
1166             /* just discard it */
1167             free(boptr->bytes);
1168             free(boptr);
1169             continue;
1170         }
1171 
1172         /* decompress if required */
1173         if (compressed) {
1174             if (!opal_compress.decompress_block(&bytes, sz,
1175                                                 boptr->bytes, boptr->size)) {
1176                 ORTE_ERROR_LOG(ORTE_ERROR);
1177                 OBJ_RELEASE(boptr);
1178                 return ORTE_ERROR;
1179             }
1180         } else {
1181             bytes = boptr->bytes;
1182             sz = boptr->size;
1183             boptr->bytes = NULL;
1184             boptr->size = 0;
1185         }
1186         if (NULL != boptr->bytes) {
1187             free(boptr->bytes);
1188         }
1189         free(boptr);
1190 
1191         /* setup to unpack */
1192         OBJ_CONSTRUCT(&bucket, opal_buffer_t);
1193         opal_dss.load(&bucket, bytes, sz);
1194 
1195         /* unpack each node and its ppn */
1196         cnt = 1;
1197         while (OPAL_SUCCESS == (rc = opal_dss.unpack(&bucket, &index, &cnt, ORTE_STD_CNTR))) {
1198             /* get the corresponding node object */
1199             if (NULL == (node = (orte_node_t*)opal_pointer_array_get_item(orte_node_pool, index))) {
1200                 rc = ORTE_ERR_NOT_FOUND;
1201                 ORTE_ERROR_LOG(rc);
1202                 goto error;
1203             }
1204             /* add the node to the job map if not already assigned */
1205             if (!ORTE_FLAG_TEST(node, ORTE_NODE_FLAG_MAPPED)) {
1206                 OBJ_RETAIN(node);
1207                 opal_pointer_array_add(jdata->map->nodes, node);
1208                 ORTE_FLAG_SET(node, ORTE_NODE_FLAG_MAPPED);
1209             }
1210             /* get the ppn */
1211             cnt = 1;
1212             if (OPAL_SUCCESS != (rc = opal_dss.unpack(&bucket, &ppn, &cnt, OPAL_UINT16))) {
1213                 ORTE_ERROR_LOG(rc);
1214                 goto error;
1215             }
1216             /* create a proc object for each one */
1217             for (k=0; k < ppn; k++) {
1218                 proc = OBJ_NEW(orte_proc_t);
1219                 proc->name.jobid = jdata->jobid;
1220                 /* leave the vpid undefined as this will be determined
1221                  * later when we do the overall ranking */
1222                 proc->app_idx = n;
1223                 proc->parent = node->daemon->name.vpid;
1224                 OBJ_RETAIN(node);
1225                 proc->node = node;
1226                 /* flag the proc as ready for launch */
1227                 proc->state = ORTE_PROC_STATE_INIT;
1228                 opal_pointer_array_add(node->procs, proc);
1229                 node->num_procs++;
1230                 /* we will add the proc to the jdata array when we
1231                  * compute its rank */
1232             }
1233             node->num_procs += ppn;
1234             cnt = 1;
1235         }
1236         OBJ_DESTRUCT(&bucket);
1237     }
1238     if (OPAL_ERR_UNPACK_READ_PAST_END_OF_BUFFER != rc) {
1239         ORTE_ERROR_LOG(rc);
1240     }
1241 
1242     /* reset any flags */
1243     for (m=0; m < jdata->map->nodes->size; m++) {
1244         node = (orte_node_t*)opal_pointer_array_get_item(jdata->map->nodes, m);
1245         if (NULL != node) {
1246             ORTE_FLAG_UNSET(node, ORTE_NODE_FLAG_MAPPED);
1247         }
1248     }
1249     return ORTE_SUCCESS;
1250 
1251   error:
1252     OBJ_DESTRUCT(&bucket);
1253     /* reset any flags */
1254     for (m=0; m < jdata->map->nodes->size; m++) {
1255         node = (orte_node_t*)opal_pointer_array_get_item(jdata->map->nodes, m);
1256         if (NULL != node) {
1257             ORTE_FLAG_UNSET(node, ORTE_NODE_FLAG_MAPPED);
1258         }
1259     }
1260     return rc;
1261 }

/* [<][>][^][v][top][bottom][index][help] */