root/opal/mca/hwloc/hwloc201/hwloc/include/hwloc/helper.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. hwloc_get_first_largest_obj_inside_cpuset
  2. hwloc_get_next_obj_inside_cpuset_by_depth
  3. hwloc_get_next_obj_inside_cpuset_by_type
  4. hwloc_get_obj_inside_cpuset_by_depth
  5. hwloc_get_obj_inside_cpuset_by_type
  6. hwloc_get_nbobjs_inside_cpuset_by_depth
  7. hwloc_get_nbobjs_inside_cpuset_by_type
  8. hwloc_get_obj_index_inside_cpuset
  9. hwloc_get_child_covering_cpuset
  10. hwloc_get_obj_covering_cpuset
  11. hwloc_get_next_obj_covering_cpuset_by_depth
  12. hwloc_get_next_obj_covering_cpuset_by_type
  13. hwloc_get_ancestor_obj_by_depth
  14. hwloc_get_ancestor_obj_by_type
  15. hwloc_get_common_ancestor_obj
  16. hwloc_obj_is_in_subtree
  17. hwloc_get_next_child
  18. hwloc_get_cache_type_depth
  19. hwloc_get_cache_covering_cpuset
  20. hwloc_get_shared_cache_covering_obj
  21. hwloc_get_pu_obj_by_os_index
  22. hwloc_get_numanode_obj_by_os_index
  23. hwloc_get_obj_below_by_type
  24. hwloc_get_obj_below_array_by_type
  25. hwloc_distrib
  26. hwloc_topology_get_complete_cpuset
  27. hwloc_cpuset_from_nodeset
  28. hwloc_get_non_io_ancestor_obj
  29. hwloc_get_next_pcidev
  30. hwloc_get_pcidev_by_busid
  31. hwloc_get_pcidev_by_busidstring
  32. hwloc_get_next_osdev
  33. hwloc_get_next_bridge
  34. hwloc_bridge_covers_pcibus

   1 /*
   2  * Copyright © 2009 CNRS
   3  * Copyright © 2009-2018 Inria.  All rights reserved.
   4  * Copyright © 2009-2012 Université Bordeaux
   5  * Copyright © 2009-2010 Cisco Systems, Inc.  All rights reserved.
   6  * See COPYING in top-level directory.
   7  */
   8 
   9 /** \file
  10  * \brief High-level hwloc traversal helpers.
  11  */
  12 
  13 #ifndef HWLOC_HELPER_H
  14 #define HWLOC_HELPER_H
  15 
  16 #ifndef HWLOC_H
  17 #error Please include the main hwloc.h instead
  18 #endif
  19 
  20 #include <stdlib.h>
  21 #include <errno.h>
  22 
  23 
  24 #ifdef __cplusplus
  25 extern "C" {
  26 #endif
  27 
  28 
  29 /** \defgroup hwlocality_helper_find_inside Finding Objects inside a CPU set
  30  * @{
  31  */
  32 
  33 /** \brief Get the first largest object included in the given cpuset \p set.
  34  *
  35  * \return the first object that is included in \p set and whose parent is not.
  36  *
  37  * This is convenient for iterating over all largest objects within a CPU set
  38  * by doing a loop getting the first largest object and clearing its CPU set
  39  * from the remaining CPU set.
  40  */
  41 static __hwloc_inline hwloc_obj_t
  42 hwloc_get_first_largest_obj_inside_cpuset(hwloc_topology_t topology, hwloc_const_cpuset_t set)
  43 {
  44   hwloc_obj_t obj = hwloc_get_root_obj(topology);
  45   if (!hwloc_bitmap_intersects(obj->cpuset, set))
  46     return NULL;
  47   while (!hwloc_bitmap_isincluded(obj->cpuset, set)) {
  48     /* while the object intersects without being included, look at its children */
  49     hwloc_obj_t child = obj->first_child;
  50     while (child) {
  51       if (hwloc_bitmap_intersects(child->cpuset, set))
  52         break;
  53       child = child->next_sibling;
  54     }
  55     if (!child)
  56       /* no child intersects, return their father */
  57       return obj;
  58     /* found one intersecting child, look at its children */
  59     obj = child;
  60   }
  61   /* obj is included, return it */
  62   return obj;
  63 }
  64 
  65 /** \brief Get the set of largest objects covering exactly a given cpuset \p set
  66  *
  67  * \return the number of objects returned in \p objs.
  68  */
  69 HWLOC_DECLSPEC int hwloc_get_largest_objs_inside_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set,
  70                                                  hwloc_obj_t * __hwloc_restrict objs, int max);
  71 
  72 /** \brief Return the next object at depth \p depth included in CPU set \p set.
  73  *
  74  * If \p prev is \c NULL, return the first object at depth \p depth
  75  * included in \p set.  The next invokation should pass the previous
  76  * return value in \p prev so as to obtain the next object in \p set.
  77  *
  78  * \note Objects with empty CPU sets are ignored
  79  * (otherwise they would be considered included in any given set).
  80  *
  81  * \note This function cannot work if objects at the given depth do
  82  * not have CPU sets (I/O or Misc objects).
  83  */
  84 static __hwloc_inline hwloc_obj_t
  85 hwloc_get_next_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
  86                                            int depth, hwloc_obj_t prev)
  87 {
  88   hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev);
  89   if (!next)
  90     return NULL;
  91   while (next && (hwloc_bitmap_iszero(next->cpuset) || !hwloc_bitmap_isincluded(next->cpuset, set)))
  92     next = next->next_cousin;
  93   return next;
  94 }
  95 
  96 /** \brief Return the next object of type \p type included in CPU set \p set.
  97  *
  98  * If there are multiple or no depth for given type, return \c NULL
  99  * and let the caller fallback to
 100  * hwloc_get_next_obj_inside_cpuset_by_depth().
 101  *
 102  * \note Objects with empty CPU sets are ignored
 103  * (otherwise they would be considered included in any given set).
 104  *
 105  * \note This function cannot work if objects of the given type do
 106  * not have CPU sets (I/O or Misc objects).
 107  */
 108 static __hwloc_inline hwloc_obj_t
 109 hwloc_get_next_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 110                                           hwloc_obj_type_t type, hwloc_obj_t prev)
 111 {
 112   int depth = hwloc_get_type_depth(topology, type);
 113   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
 114     return NULL;
 115   return hwloc_get_next_obj_inside_cpuset_by_depth(topology, set, depth, prev);
 116 }
 117 
 118 /** \brief Return the (logically) \p idx -th object at depth \p depth included in CPU set \p set.
 119  *
 120  * \note Objects with empty CPU sets are ignored
 121  * (otherwise they would be considered included in any given set).
 122  *
 123  * \note This function cannot work if objects at the given depth do
 124  * not have CPU sets (I/O or Misc objects).
 125  */
 126 static __hwloc_inline hwloc_obj_t
 127 hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 128                                       int depth, unsigned idx) __hwloc_attribute_pure;
 129 static __hwloc_inline hwloc_obj_t
 130 hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 131                                       int depth, unsigned idx)
 132 {
 133   hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0);
 134   unsigned count = 0;
 135   if (!obj)
 136     return NULL;
 137   while (obj) {
 138     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) {
 139       if (count == idx)
 140         return obj;
 141       count++;
 142     }
 143     obj = obj->next_cousin;
 144   }
 145   return NULL;
 146 }
 147 
 148 /** \brief Return the \p idx -th object of type \p type included in CPU set \p set.
 149  *
 150  * If there are multiple or no depth for given type, return \c NULL
 151  * and let the caller fallback to
 152  * hwloc_get_obj_inside_cpuset_by_depth().
 153  *
 154  * \note Objects with empty CPU sets are ignored
 155  * (otherwise they would be considered included in any given set).
 156  *
 157  * \note This function cannot work if objects of the given type do
 158  * not have CPU sets (I/O or Misc objects).
 159  */
 160 static __hwloc_inline hwloc_obj_t
 161 hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 162                                      hwloc_obj_type_t type, unsigned idx) __hwloc_attribute_pure;
 163 static __hwloc_inline hwloc_obj_t
 164 hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 165                                      hwloc_obj_type_t type, unsigned idx)
 166 {
 167   int depth = hwloc_get_type_depth(topology, type);
 168   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
 169     return NULL;
 170   return hwloc_get_obj_inside_cpuset_by_depth(topology, set, depth, idx);
 171 }
 172 
 173 /** \brief Return the number of objects at depth \p depth included in CPU set \p set.
 174  *
 175  * \note Objects with empty CPU sets are ignored
 176  * (otherwise they would be considered included in any given set).
 177  *
 178  * \note This function cannot work if objects at the given depth do
 179  * not have CPU sets (I/O or Misc objects).
 180  */
 181 static __hwloc_inline unsigned
 182 hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 183                                          int depth) __hwloc_attribute_pure;
 184 static __hwloc_inline unsigned
 185 hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 186                                          int depth)
 187 {
 188   hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0);
 189   unsigned count = 0;
 190   if (!obj)
 191     return 0;
 192   while (obj) {
 193     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set))
 194       count++;
 195     obj = obj->next_cousin;
 196   }
 197   return count;
 198 }
 199 
 200 /** \brief Return the number of objects of type \p type included in CPU set \p set.
 201  *
 202  * If no object for that type exists inside CPU set \p set, 0 is
 203  * returned.  If there are several levels with objects of that type
 204  * inside CPU set \p set, -1 is returned.
 205  *
 206  * \note Objects with empty CPU sets are ignored
 207  * (otherwise they would be considered included in any given set).
 208  *
 209  * \note This function cannot work if objects of the given type do
 210  * not have CPU sets (I/O objects).
 211  */
 212 static __hwloc_inline int
 213 hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 214                                         hwloc_obj_type_t type) __hwloc_attribute_pure;
 215 static __hwloc_inline int
 216 hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
 217                                         hwloc_obj_type_t type)
 218 {
 219   int depth = hwloc_get_type_depth(topology, type);
 220   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN)
 221     return 0;
 222   if (depth == HWLOC_TYPE_DEPTH_MULTIPLE)
 223     return -1; /* FIXME: agregate nbobjs from different levels? */
 224   return (int) hwloc_get_nbobjs_inside_cpuset_by_depth(topology, set, depth);
 225 }
 226 
 227 /** \brief Return the logical index among the objects included in CPU set \p set.
 228  *
 229  * Consult all objects in the same level as \p obj and inside CPU set \p set
 230  * in the logical order, and return the index of \p obj within them.
 231  * If \p set covers the entire topology, this is the logical index of \p obj.
 232  * Otherwise, this is similar to a logical index within the part of the topology
 233  * defined by CPU set \p set.
 234  *
 235  * \note Objects with empty CPU sets are ignored
 236  * (otherwise they would be considered included in any given set).
 237  *
 238  * \note This function cannot work if obj does not have CPU sets (I/O objects).
 239  */
 240 static __hwloc_inline int
 241 hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
 242                                    hwloc_obj_t obj) __hwloc_attribute_pure;
 243 static __hwloc_inline int
 244 hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
 245                                    hwloc_obj_t obj)
 246 {
 247   int idx = 0;
 248   if (!hwloc_bitmap_isincluded(obj->cpuset, set))
 249     return -1;
 250   /* count how many objects are inside the cpuset on the way from us to the beginning of the level */
 251   while ((obj = obj->prev_cousin) != NULL)
 252     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set))
 253       idx++;
 254   return idx;
 255 }
 256 
 257 /** @} */
 258 
 259 
 260 
 261 /** \defgroup hwlocality_helper_find_covering Finding Objects covering at least CPU set
 262  * @{
 263  */
 264 
 265 /** \brief Get the child covering at least CPU set \p set.
 266  *
 267  * \return \c NULL if no child matches or if \p set is empty.
 268  *
 269  * \note This function cannot work if parent does not have a CPU set (I/O or Misc objects).
 270  */
 271 static __hwloc_inline hwloc_obj_t
 272 hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
 273                                 hwloc_obj_t parent) __hwloc_attribute_pure;
 274 static __hwloc_inline hwloc_obj_t
 275 hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
 276                                 hwloc_obj_t parent)
 277 {
 278   hwloc_obj_t child;
 279   if (hwloc_bitmap_iszero(set))
 280     return NULL;
 281   child = parent->first_child;
 282   while (child) {
 283     if (child->cpuset && hwloc_bitmap_isincluded(set, child->cpuset))
 284       return child;
 285     child = child->next_sibling;
 286   }
 287   return NULL;
 288 }
 289 
 290 /** \brief Get the lowest object covering at least CPU set \p set
 291  *
 292  * \return \c NULL if no object matches or if \p set is empty.
 293  */
 294 static __hwloc_inline hwloc_obj_t
 295 hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure;
 296 static __hwloc_inline hwloc_obj_t
 297 hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set)
 298 {
 299   struct hwloc_obj *current = hwloc_get_root_obj(topology);
 300   if (hwloc_bitmap_iszero(set) || !hwloc_bitmap_isincluded(set, current->cpuset))
 301     return NULL;
 302   while (1) {
 303     hwloc_obj_t child = hwloc_get_child_covering_cpuset(topology, set, current);
 304     if (!child)
 305       return current;
 306     current = child;
 307   }
 308 }
 309 
 310 /** \brief Iterate through same-depth objects covering at least CPU set \p set
 311  *
 312  * If object \p prev is \c NULL, return the first object at depth \p
 313  * depth covering at least part of CPU set \p set.  The next
 314  * invokation should pass the previous return value in \p prev so as
 315  * to obtain the next object covering at least another part of \p set.
 316  *
 317  * \note This function cannot work if objects at the given depth do
 318  * not have CPU sets (I/O or Misc objects).
 319  */
 320 static __hwloc_inline hwloc_obj_t
 321 hwloc_get_next_obj_covering_cpuset_by_depth(hwloc_topology_t topology, hwloc_const_cpuset_t set,
 322                                             int depth, hwloc_obj_t prev)
 323 {
 324   hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev);
 325   if (!next)
 326     return NULL;
 327   while (next && !hwloc_bitmap_intersects(set, next->cpuset))
 328     next = next->next_cousin;
 329   return next;
 330 }
 331 
 332 /** \brief Iterate through same-type objects covering at least CPU set \p set
 333  *
 334  * If object \p prev is \c NULL, return the first object of type \p
 335  * type covering at least part of CPU set \p set.  The next invokation
 336  * should pass the previous return value in \p prev so as to obtain
 337  * the next object of type \p type covering at least another part of
 338  * \p set.
 339  *
 340  * If there are no or multiple depths for type \p type, \c NULL is returned.
 341  * The caller may fallback to hwloc_get_next_obj_covering_cpuset_by_depth()
 342  * for each depth.
 343  *
 344  * \note This function cannot work if objects of the given type do
 345  * not have CPU sets (I/O or Misc objects).
 346  */
 347 static __hwloc_inline hwloc_obj_t
 348 hwloc_get_next_obj_covering_cpuset_by_type(hwloc_topology_t topology, hwloc_const_cpuset_t set,
 349                                            hwloc_obj_type_t type, hwloc_obj_t prev)
 350 {
 351   int depth = hwloc_get_type_depth(topology, type);
 352   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
 353     return NULL;
 354   return hwloc_get_next_obj_covering_cpuset_by_depth(topology, set, depth, prev);
 355 }
 356 
 357 /** @} */
 358 
 359 
 360 
 361 /** \defgroup hwlocality_helper_ancestors Looking at Ancestor and Child Objects
 362  * @{
 363  *
 364  * Be sure to see the figure in \ref termsanddefs that shows a
 365  * complete topology tree, including depths, child/sibling/cousin
 366  * relationships, and an example of an asymmetric topology where one
 367  * package has fewer caches than its peers.
 368  */
 369 
 370 /** \brief Returns the ancestor object of \p obj at depth \p depth. */
 371 static __hwloc_inline hwloc_obj_t
 372 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj) __hwloc_attribute_pure;
 373 static __hwloc_inline hwloc_obj_t
 374 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj)
 375 {
 376   hwloc_obj_t ancestor = obj;
 377   if (obj->depth < depth)
 378     return NULL;
 379   while (ancestor && ancestor->depth > depth)
 380     ancestor = ancestor->parent;
 381   return ancestor;
 382 }
 383 
 384 /** \brief Returns the ancestor object of \p obj with type \p type. */
 385 static __hwloc_inline hwloc_obj_t
 386 hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj) __hwloc_attribute_pure;
 387 static __hwloc_inline hwloc_obj_t
 388 hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj)
 389 {
 390   hwloc_obj_t ancestor = obj->parent;
 391   while (ancestor && ancestor->type != type)
 392     ancestor = ancestor->parent;
 393   return ancestor;
 394 }
 395 
 396 /** \brief Returns the common parent object to objects \p obj1 and \p obj2 */
 397 static __hwloc_inline hwloc_obj_t
 398 hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2) __hwloc_attribute_pure;
 399 static __hwloc_inline hwloc_obj_t
 400 hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2)
 401 {
 402   /* the loop isn't so easy since intermediate ancestors may have
 403    * different depth, causing us to alternate between using obj1->parent
 404    * and obj2->parent. Also, even if at some point we find ancestors of
 405    * of the same depth, their ancestors may have different depth again.
 406    */
 407   while (obj1 != obj2) {
 408     while (obj1->depth > obj2->depth)
 409       obj1 = obj1->parent;
 410     while (obj2->depth > obj1->depth)
 411       obj2 = obj2->parent;
 412     if (obj1 != obj2 && obj1->depth == obj2->depth) {
 413       obj1 = obj1->parent;
 414       obj2 = obj2->parent;
 415     }
 416   }
 417   return obj1;
 418 }
 419 
 420 /** \brief Returns true if \p obj is inside the subtree beginning with ancestor object \p subtree_root.
 421  *
 422  * \note This function cannot work if \p obj and \p subtree_root objects do
 423  * not have CPU sets (I/O or Misc objects).
 424  */
 425 static __hwloc_inline int
 426 hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root) __hwloc_attribute_pure;
 427 static __hwloc_inline int
 428 hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root)
 429 {
 430   return obj->cpuset && subtree_root->cpuset && hwloc_bitmap_isincluded(obj->cpuset, subtree_root->cpuset);
 431 }
 432 
 433 /** \brief Return the next child.
 434  *
 435  * Return the next child among the normal children list,
 436  * then among the memory children list, then among the I/O
 437  * children list, then among the Misc children list.
 438  *
 439  * If \p prev is \c NULL, return the first child.
 440  *
 441  * Return \c NULL when there is no next child.
 442  */
 443 static __hwloc_inline hwloc_obj_t
 444 hwloc_get_next_child (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t parent, hwloc_obj_t prev)
 445 {
 446   hwloc_obj_t obj;
 447   int state = 0;
 448   if (prev) {
 449     if (prev->type == HWLOC_OBJ_MISC)
 450       state = 3;
 451     else if (prev->type == HWLOC_OBJ_BRIDGE || prev->type == HWLOC_OBJ_PCI_DEVICE || prev->type == HWLOC_OBJ_OS_DEVICE)
 452       state = 2;
 453     else if (prev->type == HWLOC_OBJ_NUMANODE)
 454       state = 1;
 455     obj = prev->next_sibling;
 456   } else {
 457     obj = parent->first_child;
 458   }
 459   if (!obj && state == 0) {
 460     obj = parent->memory_first_child;
 461     state = 1;
 462   }
 463   if (!obj && state == 1) {
 464     obj = parent->io_first_child;
 465     state = 2;
 466   }
 467   if (!obj && state == 2) {
 468     obj = parent->misc_first_child;
 469     state = 3;
 470   }
 471   return obj;
 472 }
 473 
 474 /** @} */
 475 
 476 
 477 
 478 /** \defgroup hwlocality_helper_types Kinds of object Type
 479  * @{
 480  *
 481  * Each object type is
 482  * either Normal (i.e. hwloc_obj_type_is_normal() returns 1),
 483  * or Memory (i.e. hwloc_obj_type_is_memory() returns 1)
 484  * or I/O (i.e. hwloc_obj_type_is_io() returns 1)
 485  * or Misc (i.e. equal to ::HWLOC_OBJ_MISC).
 486  * It cannot be of more than one of these kinds.
 487  */
 488 
 489 /** \brief Check whether an object type is Normal.
 490  *
 491  * Normal objects are objects of the main CPU hierarchy
 492  * (Machine, Package, Core, PU, CPU caches, etc.),
 493  * but they are not NUMA nodes, I/O devices or Misc objects.
 494  *
 495  * They are attached to parent as Normal children,
 496  * not as Memory, I/O or Misc children.
 497  *
 498  * \return 1 if an object of type \p type is a Normal object, 0 otherwise.
 499  */
 500 HWLOC_DECLSPEC int
 501 hwloc_obj_type_is_normal(hwloc_obj_type_t type);
 502 
 503 /** \brief Check whether an object type is Memory.
 504  *
 505  * Memory objects are objects attached to their parents
 506  * in the Memory children list.
 507  * This current only includes NUMA nodes.
 508  *
 509  * \return 1 if an object of type \p type is a Memory object, 0 otherwise.
 510  */
 511 HWLOC_DECLSPEC int
 512 hwloc_obj_type_is_io(hwloc_obj_type_t type);
 513 
 514 /** \brief Check whether an object type is I/O.
 515  *
 516  * I/O objects are objects attached to their parents
 517  * in the I/O children list.
 518  * This current includes Bridges, PCI and OS devices.
 519  *
 520  * \return 1 if an object of type \p type is a I/O object, 0 otherwise.
 521  */
 522 HWLOC_DECLSPEC int
 523 hwloc_obj_type_is_memory(hwloc_obj_type_t type);
 524 
 525 /** \brief Check whether an object type is a Cache (Data, Unified or Instruction).
 526  *
 527  * \return 1 if an object of type \p type is a Cache, 0 otherwise.
 528  */
 529 HWLOC_DECLSPEC int
 530 hwloc_obj_type_is_cache(hwloc_obj_type_t type);
 531 
 532 /** \brief Check whether an object type is a Data or Unified Cache.
 533  *
 534  * \return 1 if an object of type \p type is a Data or Unified Cache, 0 otherwise.
 535  */
 536 HWLOC_DECLSPEC int
 537 hwloc_obj_type_is_dcache(hwloc_obj_type_t type);
 538 
 539 /** \brief Check whether an object type is a Instruction Cache,
 540  *
 541  * \return 1 if an object of type \p type is a Instruction Cache, 0 otherwise.
 542  */
 543 HWLOC_DECLSPEC int
 544 hwloc_obj_type_is_icache(hwloc_obj_type_t type);
 545 
 546 /** @} */
 547 
 548 
 549 
 550 /** \defgroup hwlocality_helper_find_cache Looking at Cache Objects
 551  * @{
 552  */
 553 
 554 /** \brief Find the depth of cache objects matching cache level and type.
 555  *
 556  * Return the depth of the topology level that contains cache objects
 557  * whose attributes match \p cachelevel and \p cachetype.
 558 
 559  * This function is identical to calling hwloc_get_type_depth() with the
 560  * corresponding type such as ::HWLOC_OBJ_L1ICACHE, except that it may
 561  * also return a Unified cache when looking for an instruction cache.
 562  *
 563  * If no cache level matches, ::HWLOC_TYPE_DEPTH_UNKNOWN is returned.
 564  *
 565  * If \p cachetype is ::HWLOC_OBJ_CACHE_UNIFIED, the depth of the
 566  * unique matching unified cache level is returned.
 567  *
 568  * If \p cachetype is ::HWLOC_OBJ_CACHE_DATA or ::HWLOC_OBJ_CACHE_INSTRUCTION,
 569  * either a matching cache, or a unified cache is returned.
 570  *
 571  * If \p cachetype is \c -1, it is ignored and multiple levels may
 572  * match. The function returns either the depth of a uniquely matching
 573  * level or ::HWLOC_TYPE_DEPTH_MULTIPLE.
 574  */
 575 static __hwloc_inline int
 576 hwloc_get_cache_type_depth (hwloc_topology_t topology,
 577                             unsigned cachelevel, hwloc_obj_cache_type_t cachetype)
 578 {
 579   int depth;
 580   int found = HWLOC_TYPE_DEPTH_UNKNOWN;
 581   for (depth=0; ; depth++) {
 582     hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, 0);
 583     if (!obj)
 584       break;
 585     if (!hwloc_obj_type_is_dcache(obj->type) || obj->attr->cache.depth != cachelevel)
 586       /* doesn't match, try next depth */
 587       continue;
 588     if (cachetype == (hwloc_obj_cache_type_t) -1) {
 589       if (found != HWLOC_TYPE_DEPTH_UNKNOWN) {
 590         /* second match, return MULTIPLE */
 591         return HWLOC_TYPE_DEPTH_MULTIPLE;
 592       }
 593       /* first match, mark it as found */
 594       found = depth;
 595       continue;
 596     }
 597     if (obj->attr->cache.type == cachetype || obj->attr->cache.type == HWLOC_OBJ_CACHE_UNIFIED)
 598       /* exact match (either unified is alone, or we match instruction or data), return immediately */
 599       return depth;
 600   }
 601   /* went to the bottom, return what we found */
 602   return found;
 603 }
 604 
 605 /** \brief Get the first data (or unified) cache covering a cpuset \p set
 606  *
 607  * \return \c NULL if no cache matches.
 608  */
 609 static __hwloc_inline hwloc_obj_t
 610 hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure;
 611 static __hwloc_inline hwloc_obj_t
 612 hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set)
 613 {
 614   hwloc_obj_t current = hwloc_get_obj_covering_cpuset(topology, set);
 615   while (current) {
 616     if (hwloc_obj_type_is_dcache(current->type))
 617       return current;
 618     current = current->parent;
 619   }
 620   return NULL;
 621 }
 622 
 623 /** \brief Get the first data (or unified) cache shared between an object and somebody else.
 624  *
 625  * \return \c NULL if no cache matches or if an invalid object is given.
 626  */
 627 static __hwloc_inline hwloc_obj_t
 628 hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj) __hwloc_attribute_pure;
 629 static __hwloc_inline hwloc_obj_t
 630 hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj)
 631 {
 632   hwloc_obj_t current = obj->parent;
 633   if (!obj->cpuset)
 634     return NULL;
 635   while (current) {
 636     if (!hwloc_bitmap_isequal(current->cpuset, obj->cpuset)
 637         && hwloc_obj_type_is_dcache(current->type))
 638       return current;
 639     current = current->parent;
 640   }
 641   return NULL;
 642 }
 643 
 644 /** @} */
 645 
 646 
 647 
 648 /** \defgroup hwlocality_helper_find_misc Finding objects, miscellaneous helpers
 649  * @{
 650  *
 651  * Be sure to see the figure in \ref termsanddefs that shows a
 652  * complete topology tree, including depths, child/sibling/cousin
 653  * relationships, and an example of an asymmetric topology where one
 654  * package has fewer caches than its peers.
 655  */
 656 
 657 /** \brief Returns the object of type ::HWLOC_OBJ_PU with \p os_index.
 658  *
 659  * This function is useful for converting a CPU set into the PU
 660  * objects it contains.
 661  * When retrieving the current binding (e.g. with hwloc_get_cpubind()),
 662  * one may iterate over the bits of the resulting CPU set with
 663  * hwloc_bitmap_foreach_begin(), and find the corresponding PUs
 664  * with this function.
 665  */
 666 static __hwloc_inline hwloc_obj_t
 667 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure;
 668 static __hwloc_inline hwloc_obj_t
 669 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index)
 670 {
 671   hwloc_obj_t obj = NULL;
 672   while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, obj)) != NULL)
 673     if (obj->os_index == os_index)
 674       return obj;
 675   return NULL;
 676 }
 677 
 678 /** \brief Returns the object of type ::HWLOC_OBJ_NUMANODE with \p os_index.
 679  *
 680  * This function is useful for converting a nodeset into the NUMA node
 681  * objects it contains.
 682  * When retrieving the current binding (e.g. with hwloc_get_membind() with HWLOC_MEMBIND_BYNODESET),
 683  * one may iterate over the bits of the resulting nodeset with
 684  * hwloc_bitmap_foreach_begin(), and find the corresponding NUMA nodes
 685  * with this function.
 686  */
 687 static __hwloc_inline hwloc_obj_t
 688 hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure;
 689 static __hwloc_inline hwloc_obj_t
 690 hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index)
 691 {
 692   hwloc_obj_t obj = NULL;
 693   while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_NUMANODE, obj)) != NULL)
 694     if (obj->os_index == os_index)
 695       return obj;
 696   return NULL;
 697 }
 698 
 699 /** \brief Do a depth-first traversal of the topology to find and sort
 700  *
 701  * all objects that are at the same depth than \p src.
 702  * Report in \p objs up to \p max physically closest ones to \p src.
 703  *
 704  * \return the number of objects returned in \p objs.
 705  *
 706  * \return 0 if \p src is an I/O object.
 707  *
 708  * \note This function requires the \p src object to have a CPU set.
 709  */
 710 /* TODO: rather provide an iterator? Provide a way to know how much should be allocated? By returning the total number of objects instead? */
 711 HWLOC_DECLSPEC unsigned hwloc_get_closest_objs (hwloc_topology_t topology, hwloc_obj_t src, hwloc_obj_t * __hwloc_restrict objs, unsigned max);
 712 
 713 /** \brief Find an object below another object, both specified by types and indexes.
 714  *
 715  * Start from the top system object and find object of type \p type1
 716  * and logical index \p idx1.  Then look below this object and find another
 717  * object of type \p type2 and logical index \p idx2.  Indexes are specified
 718  * within the parent, not withing the entire system.
 719  *
 720  * For instance, if type1 is PACKAGE, idx1 is 2, type2 is CORE and idx2
 721  * is 3, return the fourth core object below the third package.
 722  *
 723  * \note This function requires these objects to have a CPU set.
 724  */
 725 static __hwloc_inline hwloc_obj_t
 726 hwloc_get_obj_below_by_type (hwloc_topology_t topology,
 727                              hwloc_obj_type_t type1, unsigned idx1,
 728                              hwloc_obj_type_t type2, unsigned idx2) __hwloc_attribute_pure;
 729 static __hwloc_inline hwloc_obj_t
 730 hwloc_get_obj_below_by_type (hwloc_topology_t topology,
 731                              hwloc_obj_type_t type1, unsigned idx1,
 732                              hwloc_obj_type_t type2, unsigned idx2)
 733 {
 734   hwloc_obj_t obj;
 735   obj = hwloc_get_obj_by_type (topology, type1, idx1);
 736   if (!obj)
 737     return NULL;
 738   return hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, type2, idx2);
 739 }
 740 
 741 /** \brief Find an object below a chain of objects specified by types and indexes.
 742  *
 743  * This is a generalized version of hwloc_get_obj_below_by_type().
 744  *
 745  * Arrays \p typev and \p idxv must contain \p nr types and indexes.
 746  *
 747  * Start from the top system object and walk the arrays \p typev and \p idxv.
 748  * For each type and logical index couple in the arrays, look under the previously found
 749  * object to find the index-th object of the given type.
 750  * Indexes are specified within the parent, not withing the entire system.
 751  *
 752  * For instance, if nr is 3, typev contains NODE, PACKAGE and CORE,
 753  * and idxv contains 0, 1 and 2, return the third core object below
 754  * the second package below the first NUMA node.
 755  *
 756  * \note This function requires all these objects and the root object
 757  * to have a CPU set.
 758  */
 759 static __hwloc_inline hwloc_obj_t
 760 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv) __hwloc_attribute_pure;
 761 static __hwloc_inline hwloc_obj_t
 762 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv)
 763 {
 764   hwloc_obj_t obj = hwloc_get_root_obj(topology);
 765   int i;
 766   for(i=0; i<nr; i++) {
 767     if (!obj)
 768       return NULL;
 769     obj = hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, typev[i], idxv[i]);
 770   }
 771   return obj;
 772 }
 773 
 774 /** @} */
 775 
 776 
 777 
 778 /** \defgroup hwlocality_helper_distribute Distributing items over a topology
 779  * @{
 780  */
 781 
 782 /** \brief Flags to be given to hwloc_distrib().
 783  */
 784 enum hwloc_distrib_flags_e {
 785   /** \brief Distrib in reverse order, starting from the last objects.
 786    * \hideinitializer
 787    */
 788   HWLOC_DISTRIB_FLAG_REVERSE = (1UL<<0)
 789 };
 790 
 791 /** \brief Distribute \p n items over the topology under \p roots
 792  *
 793  * Array \p set will be filled with \p n cpusets recursively distributed
 794  * linearly over the topology under objects \p roots, down to depth \p until
 795  * (which can be INT_MAX to distribute down to the finest level).
 796  *
 797  * \p n_roots is usually 1 and \p roots only contains the topology root object
 798  * so as to distribute over the entire topology.
 799  *
 800  * This is typically useful when an application wants to distribute \p n
 801  * threads over a machine, giving each of them as much private cache as
 802  * possible and keeping them locally in number order.
 803  *
 804  * The caller may typically want to also call hwloc_bitmap_singlify()
 805  * before binding a thread so that it does not move at all.
 806  *
 807  * \p flags should be 0 or a OR'ed set of ::hwloc_distrib_flags_e.
 808  *
 809  * \note This function requires the \p roots objects to have a CPU set.
 810  *
 811  * \note This function replaces the now deprecated hwloc_distribute()
 812  * and hwloc_distributev() functions.
 813  */
 814 static __hwloc_inline int
 815 hwloc_distrib(hwloc_topology_t topology,
 816               hwloc_obj_t *roots, unsigned n_roots,
 817               hwloc_cpuset_t *set,
 818               unsigned n,
 819               int until, unsigned long flags)
 820 {
 821   unsigned i;
 822   unsigned tot_weight;
 823   unsigned given, givenweight;
 824   hwloc_cpuset_t *cpusetp = set;
 825 
 826   if (flags & ~HWLOC_DISTRIB_FLAG_REVERSE) {
 827     errno = EINVAL;
 828     return -1;
 829   }
 830 
 831   tot_weight = 0;
 832   for (i = 0; i < n_roots; i++)
 833     tot_weight += (unsigned) hwloc_bitmap_weight(roots[i]->cpuset);
 834 
 835   for (i = 0, given = 0, givenweight = 0; i < n_roots; i++) {
 836     unsigned chunk, weight;
 837     hwloc_obj_t root = roots[flags & HWLOC_DISTRIB_FLAG_REVERSE ? n_roots-1-i : i];
 838     hwloc_cpuset_t cpuset = root->cpuset;
 839     if (root->type == HWLOC_OBJ_NUMANODE)
 840       /* NUMANodes have same cpuset as their parent, but we need normal objects below */
 841       root = root->parent;
 842     weight = (unsigned) hwloc_bitmap_weight(cpuset);
 843     if (!weight)
 844       continue;
 845     /* Give to root a chunk proportional to its weight.
 846      * If previous chunks got rounded-up, we may get a bit less. */
 847     chunk = (( (givenweight+weight) * n  + tot_weight-1) / tot_weight)
 848           - ((  givenweight         * n  + tot_weight-1) / tot_weight);
 849     if (!root->arity || chunk <= 1 || root->depth >= until) {
 850       /* We can't split any more, put everything there.  */
 851       if (chunk) {
 852         /* Fill cpusets with ours */
 853         unsigned j;
 854         for (j=0; j < chunk; j++)
 855           cpusetp[j] = hwloc_bitmap_dup(cpuset);
 856       } else {
 857         /* We got no chunk, just merge our cpuset to a previous one
 858          * (the first chunk cannot be empty)
 859          * so that this root doesn't get ignored.
 860          */
 861         assert(given);
 862         hwloc_bitmap_or(cpusetp[-1], cpusetp[-1], cpuset);
 863       }
 864     } else {
 865       /* Still more to distribute, recurse into children */
 866       hwloc_distrib(topology, root->children, root->arity, cpusetp, chunk, until, flags);
 867     }
 868     cpusetp += chunk;
 869     given += chunk;
 870     givenweight += weight;
 871   }
 872 
 873   return 0;
 874 }
 875 
 876 /** @} */
 877 
 878 
 879 
 880 /** \defgroup hwlocality_helper_topology_sets CPU and node sets of entire topologies
 881  * @{
 882  */
 883 
 884 /** \brief Get complete CPU set
 885  *
 886  * \return the complete CPU set of logical processors of the system.
 887  *
 888  * \note The returned cpuset is not newly allocated and should thus not be
 889  * changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy.
 890  *
 891  * \note This is equivalent to retrieving the root object complete CPU-set.
 892  */
 893 HWLOC_DECLSPEC hwloc_const_cpuset_t
 894 hwloc_topology_get_complete_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
 895 
 896 /** \brief Get topology CPU set
 897  *
 898  * \return the CPU set of logical processors of the system for which hwloc
 899  * provides topology information. This is equivalent to the cpuset of the
 900  * system object.
 901  *
 902  * \note The returned cpuset is not newly allocated and should thus not be
 903  * changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy.
 904  *
 905  * \note This is equivalent to retrieving the root object complete CPU-set.
 906  */
 907 HWLOC_DECLSPEC hwloc_const_cpuset_t
 908 hwloc_topology_get_topology_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
 909 
 910 /** \brief Get allowed CPU set
 911  *
 912  * \return the CPU set of allowed logical processors of the system.
 913  *
 914  * \note If the topology flag ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM was not set,
 915  * this is identical to hwloc_topology_get_topology_cpuset(), which means
 916  * all PUs are allowed.
 917  *
 918  * \note If ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM was set, applying
 919  * hwloc_bitmap_intersects() on the result of this function and on an object
 920  * cpuset checks whether there are allowed PUs inside that object.
 921  * Applying hwloc_bitmap_and() returns the list of these allowed PUs.
 922  *
 923  * \note The returned cpuset is not newly allocated and should thus not be
 924  * changed or freed, hwloc_bitmap_dup() must be used to obtain a local copy.
 925  */
 926 HWLOC_DECLSPEC hwloc_const_cpuset_t
 927 hwloc_topology_get_allowed_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
 928 
 929 /** \brief Get complete node set
 930  *
 931  * \return the complete node set of memory of the system.
 932  *
 933  * \note The returned nodeset is not newly allocated and should thus not be
 934  * changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy.
 935  *
 936  * \note This is equivalent to retrieving the root object complete CPU-set.
 937  */
 938 HWLOC_DECLSPEC hwloc_const_nodeset_t
 939 hwloc_topology_get_complete_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
 940 
 941 /** \brief Get topology node set
 942  *
 943  * \return the node set of memory of the system for which hwloc
 944  * provides topology information. This is equivalent to the nodeset of the
 945  * system object.
 946  *
 947  * \note The returned nodeset is not newly allocated and should thus not be
 948  * changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy.
 949  *
 950  * \note This is equivalent to retrieving the root object complete CPU-set.
 951  */
 952 HWLOC_DECLSPEC hwloc_const_nodeset_t
 953 hwloc_topology_get_topology_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
 954 
 955 /** \brief Get allowed node set
 956  *
 957  * \return the node set of allowed memory of the system.
 958  *
 959  * \note If the topology flag ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM was not set,
 960  * this is identical to hwloc_topology_get_topology_nodeset(), which means
 961  * all NUMA nodes are allowed.
 962  *
 963  * \note If ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM was set, applying
 964  * hwloc_bitmap_intersects() on the result of this function and on an object
 965  * nodeset checks whether there are allowed NUMA nodes inside that object.
 966  * Applying hwloc_bitmap_and() returns the list of these allowed NUMA nodes.
 967  *
 968  * \note The returned nodeset is not newly allocated and should thus not be
 969  * changed or freed, hwloc_bitmap_dup() must be used to obtain a local copy.
 970  */
 971 HWLOC_DECLSPEC hwloc_const_nodeset_t
 972 hwloc_topology_get_allowed_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
 973 
 974 /** @} */
 975 
 976 
 977 
 978 /** \defgroup hwlocality_helper_nodeset_convert Converting between CPU sets and node sets
 979  *
 980  * @{
 981  */
 982 
 983 /** \brief Convert a CPU set into a NUMA node set and handle non-NUMA cases
 984  *
 985  * If some NUMA nodes have no CPUs at all, this function never sets their
 986  * indexes in the output node set, even if a full CPU set is given in input.
 987  *
 988  * If the topology contains no NUMA nodes, the machine is considered
 989  * as a single memory node, and the following behavior is used:
 990  * If \p cpuset is empty, \p nodeset will be emptied as well.
 991  * Otherwise \p nodeset will be entirely filled.
 992  */
 993 static __hwloc_inline int
 994 hwloc_cpuset_to_nodeset(hwloc_topology_t topology, hwloc_const_cpuset_t _cpuset, hwloc_nodeset_t nodeset)
 995 {
 996         int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE);
 997         hwloc_obj_t obj = NULL;
 998         assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN);
 999         hwloc_bitmap_zero(nodeset);
1000         while ((obj = hwloc_get_next_obj_covering_cpuset_by_depth(topology, _cpuset, depth, obj)) != NULL)
1001                 if (hwloc_bitmap_set(nodeset, obj->os_index) < 0)
1002                         return -1;
1003         return 0;
1004 }
1005 
1006 /** \brief Convert a NUMA node set into a CPU set and handle non-NUMA cases
1007  *
1008  * If the topology contains no NUMA nodes, the machine is considered
1009  * as a single memory node, and the following behavior is used:
1010  * If \p nodeset is empty, \p cpuset will be emptied as well.
1011  * Otherwise \p cpuset will be entirely filled.
1012  * This is useful for manipulating memory binding sets.
1013  */
1014 static __hwloc_inline int
1015 hwloc_cpuset_from_nodeset(hwloc_topology_t topology, hwloc_cpuset_t _cpuset, hwloc_const_nodeset_t nodeset)
1016 {
1017         int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE);
1018         hwloc_obj_t obj = NULL;
1019         assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN);
1020         hwloc_bitmap_zero(_cpuset);
1021         while ((obj = hwloc_get_next_obj_by_depth(topology, depth, obj)) != NULL) {
1022                 if (hwloc_bitmap_isset(nodeset, obj->os_index))
1023                         /* no need to check obj->cpuset because objects in levels always have a cpuset */
1024                         if (hwloc_bitmap_or(_cpuset, _cpuset, obj->cpuset) < 0)
1025                                 return -1;
1026         }
1027         return 0;
1028 }
1029 
1030 /** @} */
1031 
1032 
1033 
1034 /** \defgroup hwlocality_advanced_io Finding I/O objects
1035  * @{
1036  */
1037 
1038 /** \brief Get the first non-I/O ancestor object.
1039  *
1040  * Given the I/O object \p ioobj, find the smallest non-I/O ancestor
1041  * object. This object (normal or memory) may then be used for binding
1042  * because it has non-NULL CPU and node sets
1043  * and because its locality is the same as \p ioobj.
1044  *
1045  * \note The resulting object is usually a normal object but it could also
1046  * be a memory object (e.g. NUMA node) in future platforms if I/O objects
1047  * ever get attached to memory instead of CPUs.
1048  */
1049 static __hwloc_inline hwloc_obj_t
1050 hwloc_get_non_io_ancestor_obj(hwloc_topology_t topology __hwloc_attribute_unused,
1051                               hwloc_obj_t ioobj)
1052 {
1053   hwloc_obj_t obj = ioobj;
1054   while (obj && !obj->cpuset) {
1055     obj = obj->parent;
1056   }
1057   return obj;
1058 }
1059 
1060 /** \brief Get the next PCI device in the system.
1061  *
1062  * \return the first PCI device if \p prev is \c NULL.
1063  */
1064 static __hwloc_inline hwloc_obj_t
1065 hwloc_get_next_pcidev(hwloc_topology_t topology, hwloc_obj_t prev)
1066 {
1067   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PCI_DEVICE, prev);
1068 }
1069 
1070 /** \brief Find the PCI device object matching the PCI bus id
1071  * given domain, bus device and function PCI bus id.
1072  */
1073 static __hwloc_inline hwloc_obj_t
1074 hwloc_get_pcidev_by_busid(hwloc_topology_t topology,
1075                           unsigned domain, unsigned bus, unsigned dev, unsigned func)
1076 {
1077   hwloc_obj_t obj = NULL;
1078   while ((obj = hwloc_get_next_pcidev(topology, obj)) != NULL) {
1079     if (obj->attr->pcidev.domain == domain
1080         && obj->attr->pcidev.bus == bus
1081         && obj->attr->pcidev.dev == dev
1082         && obj->attr->pcidev.func == func)
1083       return obj;
1084   }
1085   return NULL;
1086 }
1087 
1088 /** \brief Find the PCI device object matching the PCI bus id
1089  * given as a string xxxx:yy:zz.t or yy:zz.t.
1090  */
1091 static __hwloc_inline hwloc_obj_t
1092 hwloc_get_pcidev_by_busidstring(hwloc_topology_t topology, const char *busid)
1093 {
1094   unsigned domain = 0; /* default */
1095   unsigned bus, dev, func;
1096 
1097   if (sscanf(busid, "%x:%x.%x", &bus, &dev, &func) != 3
1098       && sscanf(busid, "%x:%x:%x.%x", &domain, &bus, &dev, &func) != 4) {
1099     errno = EINVAL;
1100     return NULL;
1101   }
1102 
1103   return hwloc_get_pcidev_by_busid(topology, domain, bus, dev, func);
1104 }
1105 
1106 /** \brief Get the next OS device in the system.
1107  *
1108  * \return the first OS device if \p prev is \c NULL.
1109  */
1110 static __hwloc_inline hwloc_obj_t
1111 hwloc_get_next_osdev(hwloc_topology_t topology, hwloc_obj_t prev)
1112 {
1113   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_OS_DEVICE, prev);
1114 }
1115 
1116 /** \brief Get the next bridge in the system.
1117  *
1118  * \return the first bridge if \p prev is \c NULL.
1119  */
1120 static __hwloc_inline hwloc_obj_t
1121 hwloc_get_next_bridge(hwloc_topology_t topology, hwloc_obj_t prev)
1122 {
1123   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_BRIDGE, prev);
1124 }
1125 
1126 /* \brief Checks whether a given bridge covers a given PCI bus.
1127  */
1128 static __hwloc_inline int
1129 hwloc_bridge_covers_pcibus(hwloc_obj_t bridge,
1130                            unsigned domain, unsigned bus)
1131 {
1132   return bridge->type == HWLOC_OBJ_BRIDGE
1133     && bridge->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI
1134     && bridge->attr->bridge.downstream.pci.domain == domain
1135     && bridge->attr->bridge.downstream.pci.secondary_bus <= bus
1136     && bridge->attr->bridge.downstream.pci.subordinate_bus >= bus;
1137 }
1138 
1139 /** @} */
1140 
1141 
1142 
1143 #ifdef __cplusplus
1144 } /* extern "C" */
1145 #endif
1146 
1147 
1148 #endif /* HWLOC_HELPER_H */

/* [<][>][^][v][top][bottom][index][help] */