root/opal/mca/hwloc/hwloc201/hwloc/hwloc/topology-solaris.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hwloc_solaris_set_sth_cpubind
  2. hwloc_solaris_set_proc_cpubind
  3. hwloc_solaris_set_thisproc_cpubind
  4. hwloc_solaris_set_thisthread_cpubind
  5. hwloc_solaris_get_sth_cpubind
  6. hwloc_solaris_get_proc_cpubind
  7. hwloc_solaris_get_thisproc_cpubind
  8. hwloc_solaris_get_thisthread_cpubind
  9. hwloc_solaris_set_sth_membind
  10. hwloc_solaris_set_proc_membind
  11. hwloc_solaris_set_thisproc_membind
  12. hwloc_solaris_set_thisthread_membind
  13. hwloc_solaris_get_sth_membind
  14. hwloc_solaris_get_proc_membind
  15. hwloc_solaris_get_thisproc_membind
  16. hwloc_solaris_get_thisthread_membind
  17. hwloc_solaris_set_area_membind
  18. lgrp_list_allowed
  19. lgrp_build_numanodes
  20. hwloc_look_lgrp
  21. hwloc_look_kstat
  22. hwloc_look_solaris
  23. hwloc_solaris_get_allowed_hook
  24. hwloc_solaris_get_thisthread_last_cpu_location
  25. hwloc_set_solaris_hooks
  26. hwloc_solaris_component_instantiate

   1 /*
   2  * Copyright © 2009 CNRS
   3  * Copyright © 2009-2018 Inria.  All rights reserved.
   4  * Copyright © 2009-2011 Université Bordeaux
   5  * Copyright © 2011 Cisco Systems, Inc.  All rights reserved.
   6  * Copyright © 2011      Oracle and/or its affiliates.  All rights reserved.
   7  * See COPYING in top-level directory.
   8  */
   9 
  10 #include <private/autogen/config.h>
  11 #include <hwloc.h>
  12 #include <private/private.h>
  13 #include <private/debug.h>
  14 #include <private/solaris-chiptype.h>
  15 
  16 #include <stdio.h>
  17 #include <errno.h>
  18 #ifdef HAVE_DIRENT_H
  19 #include <dirent.h>
  20 #endif
  21 #ifdef HAVE_UNISTD_H
  22 #include <unistd.h>
  23 #endif
  24 #include <sys/types.h>
  25 #include <sys/processor.h>
  26 #include <sys/procset.h>
  27 #include <sys/systeminfo.h>
  28 #include <sys/types.h>
  29 #include <sys/mman.h>
  30 
  31 #ifdef HAVE_LIBLGRP
  32 #  include <sys/lgrp_user.h>
  33 #endif
  34 
  35 static int
  36 hwloc_solaris_set_sth_cpubind(hwloc_topology_t topology, idtype_t idtype, id_t id, hwloc_const_bitmap_t hwloc_set, int flags)
  37 {
  38   unsigned target_cpu;
  39 
  40   /* The resulting binding is always strict */
  41 
  42   if (hwloc_bitmap_isequal(hwloc_set, hwloc_topology_get_complete_cpuset(topology))) {
  43     if (processor_bind(idtype, id, PBIND_NONE, NULL) != 0)
  44       return -1;
  45 #ifdef HAVE_LIBLGRP
  46     if (!(flags & HWLOC_CPUBIND_NOMEMBIND)) {
  47       int n, i;
  48       n = hwloc_get_nbobjs_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE);
  49       for (i = 0; i < n; i++) {
  50         hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
  51         lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_NONE);
  52       }
  53     }
  54 #endif /* HAVE_LIBLGRP */
  55     return 0;
  56   }
  57 
  58 #ifdef HAVE_LIBLGRP
  59   if (!(flags & HWLOC_CPUBIND_NOMEMBIND)) {
  60     int n, i, ok;
  61     n = hwloc_get_nbobjs_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE);
  62     hwloc_bitmap_t target = hwloc_bitmap_alloc();
  63     for (i = 0; i < n; i++) {
  64       hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
  65       if (hwloc_bitmap_isincluded(obj->cpuset, hwloc_set))
  66         hwloc_bitmap_or(target, target, obj->cpuset);
  67     }
  68 
  69     ok = hwloc_bitmap_isequal(target, hwloc_set);
  70     hwloc_bitmap_free(target);
  71 
  72     if (ok) {
  73       /* Ok, managed to achieve hwloc_set by just combining NUMA nodes */
  74 
  75       for (i = 0; i < n; i++) {
  76         hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
  77 
  78         if (hwloc_bitmap_isincluded(obj->cpuset, hwloc_set)) {
  79           lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_STRONG);
  80         } else {
  81           if (flags & HWLOC_CPUBIND_STRICT)
  82             lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_NONE);
  83           else
  84             lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_WEAK);
  85         }
  86       }
  87 
  88       return 0;
  89     }
  90   }
  91 #endif /* HAVE_LIBLGRP */
  92 
  93   if (hwloc_bitmap_weight(hwloc_set) != 1) {
  94     errno = EXDEV;
  95     return -1;
  96   }
  97 
  98   target_cpu = hwloc_bitmap_first(hwloc_set);
  99 
 100   if (processor_bind(idtype, id,
 101                      (processorid_t) (target_cpu), NULL) != 0)
 102     return -1;
 103 
 104   return 0;
 105 }
 106 
 107 static int
 108 hwloc_solaris_set_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_bitmap_t hwloc_set, int flags)
 109 {
 110   return hwloc_solaris_set_sth_cpubind(topology, P_PID, pid, hwloc_set, flags);
 111 }
 112 
 113 static int
 114 hwloc_solaris_set_thisproc_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, int flags)
 115 {
 116   return hwloc_solaris_set_sth_cpubind(topology, P_PID, P_MYID, hwloc_set, flags);
 117 }
 118 
 119 static int
 120 hwloc_solaris_set_thisthread_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, int flags)
 121 {
 122   return hwloc_solaris_set_sth_cpubind(topology, P_LWPID, P_MYID, hwloc_set, flags);
 123 }
 124 
 125 #ifdef HAVE_LIBLGRP
 126 static int
 127 hwloc_solaris_get_sth_cpubind(hwloc_topology_t topology, idtype_t idtype, id_t id, hwloc_bitmap_t hwloc_set, int flags __hwloc_attribute_unused)
 128 {
 129   processorid_t binding;
 130   int n;
 131   int i;
 132 
 133   /* first check if processor_bind() was used to bind to a single processor rather than to an lgroup */
 134   if ( processor_bind(idtype, id, PBIND_QUERY, &binding) == 0 && binding != PBIND_NONE ) {
 135     hwloc_bitmap_only(hwloc_set, binding);
 136     return 0;
 137   }
 138 
 139   /* if not, check lgroups */
 140   hwloc_bitmap_zero(hwloc_set);
 141   n = hwloc_get_nbobjs_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE);
 142   for (i = 0; i < n; i++) {
 143     hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
 144     lgrp_affinity_t aff = lgrp_affinity_get(idtype, id, obj->os_index);
 145 
 146     if (aff == LGRP_AFF_STRONG)
 147       hwloc_bitmap_or(hwloc_set, hwloc_set, obj->cpuset);
 148   }
 149 
 150   if (hwloc_bitmap_iszero(hwloc_set))
 151     hwloc_bitmap_copy(hwloc_set, hwloc_topology_get_complete_cpuset(topology));
 152 
 153   return 0;
 154 }
 155 
 156 static int
 157 hwloc_solaris_get_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t hwloc_set, int flags)
 158 {
 159   return hwloc_solaris_get_sth_cpubind(topology, P_PID, pid, hwloc_set, flags);
 160 }
 161 
 162 static int
 163 hwloc_solaris_get_thisproc_cpubind(hwloc_topology_t topology, hwloc_bitmap_t hwloc_set, int flags)
 164 {
 165   return hwloc_solaris_get_sth_cpubind(topology, P_PID, P_MYID, hwloc_set, flags);
 166 }
 167 
 168 static int
 169 hwloc_solaris_get_thisthread_cpubind(hwloc_topology_t topology, hwloc_bitmap_t hwloc_set, int flags)
 170 {
 171   return hwloc_solaris_get_sth_cpubind(topology, P_LWPID, P_MYID, hwloc_set, flags);
 172 }
 173 #endif /* HAVE_LIBLGRP */
 174 
 175 /* TODO: given thread, probably not easy because of the historical n:m implementation */
 176 #ifdef HAVE_LIBLGRP
 177 static int
 178 hwloc_solaris_set_sth_membind(hwloc_topology_t topology, idtype_t idtype, id_t id, hwloc_const_nodeset_t _nodeset, hwloc_membind_policy_t policy, int flags)
 179 {
 180   int n, i;
 181   hwloc_const_nodeset_t nodeset;
 182 
 183   switch (policy) {
 184     case HWLOC_MEMBIND_DEFAULT:
 185     case HWLOC_MEMBIND_BIND:
 186       break;
 187     default:
 188       errno = ENOSYS;
 189       return -1;
 190   }
 191 
 192   if (flags & HWLOC_MEMBIND_NOCPUBIND) {
 193     errno = ENOSYS;
 194     return -1;
 195   }
 196 
 197   if (policy == HWLOC_MEMBIND_DEFAULT)
 198     nodeset = hwloc_topology_get_complete_nodeset(topology);
 199   else
 200     nodeset = _nodeset;
 201 
 202   n = hwloc_get_nbobjs_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE);
 203 
 204   for (i = 0; i < n; i++) {
 205     hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
 206     if (hwloc_bitmap_isset(nodeset, obj->os_index)) {
 207       lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_STRONG);
 208     } else {
 209       if (flags & HWLOC_CPUBIND_STRICT)
 210         lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_NONE);
 211       else
 212         lgrp_affinity_set(idtype, id, obj->os_index, LGRP_AFF_WEAK);
 213     }
 214   }
 215 
 216   return 0;
 217 }
 218 
 219 static int
 220 hwloc_solaris_set_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags)
 221 {
 222   return hwloc_solaris_set_sth_membind(topology, P_PID, pid, nodeset, policy, flags);
 223 }
 224 
 225 static int
 226 hwloc_solaris_set_thisproc_membind(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags)
 227 {
 228   return hwloc_solaris_set_sth_membind(topology, P_PID, P_MYID, nodeset, policy, flags);
 229 }
 230 
 231 static int
 232 hwloc_solaris_set_thisthread_membind(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags)
 233 {
 234   return hwloc_solaris_set_sth_membind(topology, P_LWPID, P_MYID, nodeset, policy, flags);
 235 }
 236 
 237 static int
 238 hwloc_solaris_get_sth_membind(hwloc_topology_t topology, idtype_t idtype, id_t id, hwloc_nodeset_t nodeset, hwloc_membind_policy_t *policy, int flags __hwloc_attribute_unused)
 239 {
 240   int n;
 241   int i;
 242 
 243   hwloc_bitmap_zero(nodeset);
 244   n = hwloc_get_nbobjs_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE);
 245 
 246   for (i = 0; i < n; i++) {
 247     hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, i);
 248     lgrp_affinity_t aff = lgrp_affinity_get(idtype, id, obj->os_index);
 249 
 250     if (aff == LGRP_AFF_STRONG)
 251       hwloc_bitmap_set(nodeset, obj->os_index);
 252   }
 253 
 254   if (hwloc_bitmap_iszero(nodeset))
 255     hwloc_bitmap_copy(nodeset, hwloc_topology_get_complete_nodeset(topology));
 256 
 257   *policy = HWLOC_MEMBIND_BIND;
 258   return 0;
 259 }
 260 
 261 static int
 262 hwloc_solaris_get_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_nodeset_t nodeset, hwloc_membind_policy_t *policy, int flags)
 263 {
 264   return hwloc_solaris_get_sth_membind(topology, P_PID, pid, nodeset, policy, flags);
 265 }
 266 
 267 static int
 268 hwloc_solaris_get_thisproc_membind(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t *policy, int flags)
 269 {
 270   return hwloc_solaris_get_sth_membind(topology, P_PID, P_MYID, nodeset, policy, flags);
 271 }
 272 
 273 static int
 274 hwloc_solaris_get_thisthread_membind(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t *policy, int flags)
 275 {
 276   return hwloc_solaris_get_sth_membind(topology, P_LWPID, P_MYID, nodeset, policy, flags);
 277 }
 278 #endif /* HAVE_LIBLGRP */
 279 
 280 
 281 #ifdef MADV_ACCESS_LWP
 282 static int
 283 hwloc_solaris_set_area_membind(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags __hwloc_attribute_unused)
 284 {
 285   int advice;
 286   size_t remainder;
 287 
 288   /* Can not give a set of nodes just for an area.  */
 289   if (policy != HWLOC_MEMBIND_DEFAULT
 290       && !hwloc_bitmap_isequal(nodeset, hwloc_topology_get_complete_nodeset(topology))) {
 291     errno = EXDEV;
 292     return -1;
 293   }
 294 
 295   switch (policy) {
 296     case HWLOC_MEMBIND_DEFAULT:
 297     case HWLOC_MEMBIND_BIND:
 298       advice = MADV_ACCESS_DEFAULT;
 299       break;
 300     case HWLOC_MEMBIND_FIRSTTOUCH:
 301     case HWLOC_MEMBIND_NEXTTOUCH:
 302       advice = MADV_ACCESS_LWP;
 303       break;
 304     case HWLOC_MEMBIND_INTERLEAVE:
 305       advice = MADV_ACCESS_MANY;
 306       break;
 307     default:
 308       errno = ENOSYS;
 309       return -1;
 310   }
 311 
 312   remainder = (uintptr_t) addr & (sysconf(_SC_PAGESIZE)-1);
 313   addr = (char*) addr - remainder;
 314   len += remainder;
 315   return madvise((void*) addr, len, advice);
 316 }
 317 #endif
 318 
 319 #ifdef HAVE_LIBLGRP
 320 
 321 /* list the allowed PUs and NUMA Nodes using LGRP_VIEW_CALLER */
 322 static void
 323 lgrp_list_allowed(struct hwloc_topology *topology)
 324 {
 325   lgrp_cookie_t cookie;
 326   lgrp_id_t root;
 327   int npids, nnids;
 328   int i, n;
 329   processorid_t *pids;
 330   lgrp_id_t *nids;
 331 
 332   cookie = lgrp_init(LGRP_VIEW_CALLER);
 333   if (cookie == LGRP_COOKIE_NONE) {
 334     hwloc_debug("lgrp_init LGRP_VIEW_CALLER failed: %s\n", strerror(errno));
 335     goto out;
 336   }
 337   root = lgrp_root(cookie);
 338 
 339   /* list allowed PUs */
 340   npids = lgrp_cpus(cookie, root, NULL, 0, LGRP_CONTENT_HIERARCHY);
 341   if (npids < 0) {
 342     hwloc_debug("lgrp_cpus failed: %s\n", strerror(errno));
 343     goto out_with_cookie;
 344   }
 345   hwloc_debug("root lgrp contains %d allowed PUs\n", npids);
 346   assert(npids > 0);
 347 
 348   pids = malloc(npids * sizeof(*pids));
 349   if (!pids)
 350     goto out_with_cookie;
 351 
 352   n = lgrp_cpus(cookie, root, pids, npids, LGRP_CONTENT_HIERARCHY);
 353   assert(n == npids);
 354 
 355   hwloc_bitmap_zero(topology->allowed_cpuset);
 356 
 357   for(i=0; i<npids; i++) {
 358     hwloc_debug("root lgrp contains allowed PU #%d = P#%d\n", i, pids[i]);
 359     hwloc_bitmap_set(topology->allowed_cpuset, pids[i]);
 360   }
 361   free(pids);
 362 
 363   /* list allowed NUMA nodes */
 364   nnids = lgrp_resources(cookie, root, NULL, 0, LGRP_RSRC_MEM);
 365   if (nnids < 0) {
 366     hwloc_debug("lgrp_resources failed: %s\n", strerror(errno));
 367     goto out_with_cookie;
 368   }
 369   hwloc_debug("root lgrp contains %d allowed NUMA nodes\n", nnids);
 370   assert(nnids > 0);
 371 
 372   nids = malloc(nnids * sizeof(*nids));
 373   if (!nids)
 374     goto out_with_cookie;
 375 
 376   n = lgrp_resources(cookie, root, nids, nnids, LGRP_RSRC_MEM);
 377   assert(n == nnids);
 378 
 379   hwloc_bitmap_zero(topology->allowed_nodeset);
 380 
 381   for(i=0; i<nnids; i++) {
 382     hwloc_debug("root lgrp contains allowed NUMA node #%d = P#%ld\n", i, nids[i]);
 383     hwloc_bitmap_set(topology->allowed_nodeset, nids[i]);
 384   }
 385   free(nids);
 386 
 387  out_with_cookie:
 388   lgrp_fini(cookie);
 389  out:
 390   return;
 391 }
 392 
 393 /* build all NUMAs (even if disallowed) and get global cpuset+nodeset using LGRP_VIEW_OS */
 394 static void
 395 lgrp_build_numanodes(struct hwloc_topology *topology,
 396                      lgrp_cookie_t cookie, lgrp_id_t root,
 397                      hwloc_obj_t *nodes, unsigned *nr_nodes)
 398 {
 399   int npids, nnids;
 400   int i, j, n;
 401   processorid_t *pids;
 402   lgrp_id_t *nids;
 403 
 404   /* get the max number of PUs */
 405   npids = lgrp_cpus(cookie, root, NULL, 0, LGRP_CONTENT_HIERARCHY);
 406   if (npids < 0) {
 407     hwloc_debug("lgrp_cpus failed: %s\n", strerror(errno));
 408     goto out;
 409   }
 410   hwloc_debug("root lgrp contains %d PUs\n", npids);
 411   assert(npids > 0);
 412 
 413   /* allocate a single array that will be large enough for lgroup cpus below */
 414   pids = malloc(npids * sizeof(*pids));
 415   if (!pids)
 416     goto out;
 417 
 418   /* list NUMA nodes */
 419   nnids = lgrp_resources(cookie, root, NULL, 0, LGRP_RSRC_MEM);
 420   if (nnids < 0) {
 421     hwloc_debug("lgrp_resources failed: %s\n", strerror(errno));
 422     goto out_with_pids;
 423   }
 424   hwloc_debug("root lgrp contains %d NUMA nodes\n", nnids);
 425   assert(nnids > 0);
 426 
 427   nids = malloc(nnids * sizeof(*nids));
 428   if (!nids)
 429     goto out_with_pids;
 430 
 431   n = lgrp_resources(cookie, root, nids, nnids, LGRP_RSRC_MEM);
 432   assert(n == nnids);
 433 
 434   for(i=0; i<nnids; i++) {
 435     hwloc_obj_t obj;
 436     lgrp_mem_size_t mem_size;
 437     hwloc_debug("root lgrp contains NUMA node #%d = P#%ld\n", i, nids[i]);
 438     mem_size = lgrp_mem_size(cookie, nids[i], LGRP_MEM_SZ_INSTALLED, LGRP_CONTENT_DIRECT);
 439     /* or LGRP_MEM_SZ_FREE */
 440 
 441     obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_NUMANODE, (unsigned) nids[i]);
 442     obj->nodeset = hwloc_bitmap_alloc();
 443     hwloc_bitmap_set(obj->nodeset, nids[i]);
 444     obj->cpuset = hwloc_bitmap_alloc();
 445     nodes[(*nr_nodes)++] = obj;
 446 
 447     hwloc_debug("NUMA node %ld has %lldkB\n", nids[i], mem_size/1024);
 448     obj->attr->numanode.local_memory = mem_size;
 449     obj->attr->numanode.page_types_len = 2;
 450     obj->attr->numanode.page_types = malloc(2*sizeof(*obj->attr->numanode.page_types));
 451     memset(obj->attr->numanode.page_types, 0, 2*sizeof(*obj->attr->numanode.page_types));
 452     obj->attr->numanode.page_types[0].size = hwloc_getpagesize();
 453 #if HAVE_DECL__SC_LARGE_PAGESIZE
 454     obj->attr->numanode.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE);
 455 #endif
 456 
 457     n = lgrp_cpus(cookie, nids[i], pids, npids, LGRP_CONTENT_HIERARCHY);
 458     if (n < 0) {
 459       hwloc_debug("lgrp_cpus on NUMA node failed: %s\n", strerror(errno));
 460     } else {
 461       hwloc_debug("NUMA node %ld contains %d PUs\n", nids[i], n);
 462       for (j = 0; j < n ; j++) {
 463         hwloc_debug("node %ld's cpu %d is %d\n", nids[i], j, pids[j]);
 464         hwloc_bitmap_set(obj->cpuset, pids[j]);
 465       }
 466       hwloc_debug_1arg_bitmap("node %ld has cpuset %s\n",
 467                               nids[i], obj->cpuset);
 468     }
 469 
 470     hwloc_insert_object_by_cpuset(topology, obj);
 471   }
 472   topology->support.discovery->numa = 1;
 473   topology->support.discovery->numa_memory = 1;
 474 
 475  out_with_pids:
 476   free(pids);
 477  out:
 478   return;
 479 }
 480 
 481 static void
 482 hwloc_look_lgrp(struct hwloc_topology *topology)
 483 {
 484   lgrp_cookie_t cookie;
 485   unsigned curlgrp = 0;
 486   int nlgrps;
 487   lgrp_id_t root;
 488 
 489   lgrp_list_allowed(topology);
 490 
 491   cookie = lgrp_init(LGRP_VIEW_OS);
 492   if (cookie == LGRP_COOKIE_NONE)
 493     {
 494       hwloc_debug("lgrp_init failed: %s\n", strerror(errno));
 495       return;
 496     }
 497   nlgrps = lgrp_nlgrps(cookie);
 498   root = lgrp_root(cookie);
 499   if (nlgrps > 0) {
 500     hwloc_obj_t *glob_lgrps = calloc(nlgrps, sizeof(hwloc_obj_t));
 501 
 502     lgrp_build_numanodes(topology, cookie, root, glob_lgrps, &curlgrp);
 503 
 504 #if HAVE_DECL_LGRP_LATENCY_COOKIE
 505     if (nlgrps > 1) {
 506       uint64_t *distances = calloc(curlgrp*curlgrp, sizeof(uint64_t));
 507       unsigned i, j;
 508       if (distances) {
 509         for (i = 0; i < curlgrp; i++)
 510           for (j = 0; j < curlgrp; j++) {
 511             int latency = lgrp_latency_cookie(cookie, glob_lgrps[i]->os_index, glob_lgrps[j]->os_index, LGRP_LAT_CPU_TO_MEM);
 512             if (latency < 0) {
 513               /* FIXME: if errno = ESRCH because some NUMA nodes are unavailable, we could reduce the matrix instead of ignoring */
 514               free(distances);
 515               goto done;
 516             }
 517             distances[i*curlgrp+j] = (uint64_t) latency;
 518         }
 519         hwloc_internal_distances_add(topology, curlgrp, glob_lgrps, distances,
 520                                      HWLOC_DISTANCES_KIND_FROM_OS|HWLOC_DISTANCES_KIND_MEANS_LATENCY,
 521                                      HWLOC_DISTANCES_ADD_FLAG_GROUP);
 522         glob_lgrps = NULL; /* dont free it below */
 523       }
 524     }
 525 #endif /* HAVE_DECL_LGRP_LATENCY_COOKIE */
 526 done:
 527     free(glob_lgrps);
 528   }
 529   lgrp_fini(cookie);
 530 }
 531 #endif /* LIBLGRP */
 532 
 533 #ifdef HAVE_LIBKSTAT
 534 #include <kstat.h>
 535 static int
 536 hwloc_look_kstat(struct hwloc_topology *topology)
 537 {
 538   struct hwloc_solaris_chip_info_s chip_info;
 539   static char architecture[6] = "";
 540   int is_sparc = 0;
 541   int l1i_from_core = 0;
 542   int l1d_from_core = 0;
 543   int ret;
 544 
 545   kstat_ctl_t *kc = kstat_open();
 546   kstat_t *ksp;
 547   kstat_named_t *stat;
 548   unsigned look_cores = 1, look_chips = 1;
 549 
 550   unsigned Pproc_max = 0;
 551   unsigned Pproc_alloc = 256;
 552   struct hwloc_solaris_Pproc {
 553     unsigned Lpkg, Ppkg, Lcore, Lproc;
 554   } * Pproc = malloc(Pproc_alloc * sizeof(*Pproc));
 555 
 556   unsigned Lproc_num = 0;
 557   unsigned Lproc_alloc = 256;
 558   struct hwloc_solaris_Lproc {
 559     unsigned Pproc;
 560   } * Lproc = malloc(Lproc_alloc * sizeof(*Lproc));
 561 
 562   unsigned Lcore_num = 0;
 563   unsigned Lcore_alloc = 256;
 564   struct hwloc_solaris_Lcore {
 565     unsigned Pcore, Ppkg;
 566   } * Lcore = malloc(Lcore_alloc * sizeof(*Lcore));
 567 
 568   unsigned Lpkg_num = 0;
 569   unsigned Lpkg_alloc = 256;
 570   struct hwloc_solaris_Lpkg {
 571     unsigned Ppkg;
 572   } * Lpkg = malloc(Lpkg_alloc * sizeof(*Lpkg));
 573 
 574   unsigned pkgid, coreid, cpuid;
 575   unsigned i;
 576 
 577   for (i = 0; i < Pproc_alloc; i++) {
 578     Pproc[i].Lproc = -1;
 579     Pproc[i].Lpkg = -1;
 580     Pproc[i].Ppkg = -1;
 581     Pproc[i].Lcore = -1;
 582   }
 583 
 584   if (!kc) {
 585     hwloc_debug("kstat_open failed: %s\n", strerror(errno));
 586     free(Pproc);
 587     free(Lproc);
 588     free(Lcore);
 589     free(Lpkg);
 590     return 0;
 591   }
 592 
 593   ret = sysinfo(SI_ARCHITECTURE, architecture, sizeof architecture);
 594   if (ret == 6 && !strcmp(architecture, "sparc"))
 595     is_sparc = 1;
 596 
 597   hwloc_solaris_get_chip_info(&chip_info);
 598 
 599   /* mark unneeded caches as size -1 */
 600   if (!hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L1ICACHE))
 601     chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L1I] = -1;
 602   if (!hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L1CACHE))
 603     chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L1D] = -1;
 604   if (!hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L2ICACHE))
 605     chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L2I] = -1;
 606   if (!hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L2CACHE))
 607     chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L2D] = -1;
 608   if (!hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_L3CACHE))
 609     chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L3] = -1;
 610 
 611   /* mark empty caches as unneeded on !sparc since we have the x86 backend to better get them. */
 612   if (!is_sparc) {
 613     for(i=0; i<sizeof(chip_info.cache_size)/sizeof(*chip_info.cache_size); i++)
 614       if (!chip_info.cache_size[i])
 615         chip_info.cache_size[i] = -1;
 616   }
 617 
 618   /* on sparc, assume l1d and l1i have same sharing as the core.
 619    * on !sparc, we don't know the sharing of these caches, hence we ignore them.
 620    * on x86, the x86-backend will take care of these caches again.
 621    */
 622   if (is_sparc && chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L1D] >= 0) {
 623     hwloc_debug("Will generate L1d caches from cores and PICL cache index #%u\n", HWLOC_SOLARIS_CHIP_INFO_L1D);
 624     l1d_from_core = 1;
 625   }
 626   if (is_sparc && chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L1I] >= 0) {
 627     hwloc_debug("Will generate L1i caches from cores and PICL cache index #%u\n", HWLOC_SOLARIS_CHIP_INFO_L1I);
 628     l1i_from_core = 1;
 629   }
 630 
 631   for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
 632     if (!strncmp("cpu_info", ksp->ks_module, 8)) {
 633       cpuid = ksp->ks_instance;
 634 
 635       if (kstat_read(kc, ksp, NULL) == -1)
 636         {
 637           fprintf(stderr, "kstat_read failed for CPU%u: %s\n", cpuid, strerror(errno));
 638           continue;
 639         }
 640 
 641       hwloc_debug("cpu%u\n", cpuid);
 642       hwloc_bitmap_set(topology->levels[0][0]->complete_cpuset, cpuid);
 643 
 644       stat = (kstat_named_t *) kstat_data_lookup(ksp, "state");
 645       if (!stat)
 646           hwloc_debug("could not read state for CPU%u: %s\n", cpuid, strerror(errno));
 647       else if (stat->data_type != KSTAT_DATA_CHAR)
 648           hwloc_debug("unknown kstat type %d for cpu state\n", stat->data_type);
 649       else
 650         {
 651           hwloc_debug("cpu%u's state is %s\n", cpuid, stat->value.c);
 652           if (strcmp(stat->value.c, "on-line")) {
 653             /* Not online.
 654              * It was marked as existing in complete_cpuset above, ignore everything else.
 655              * We wouldn't get the all topology information about parents anyway.
 656              */
 657             continue;
 658           }
 659         }
 660 
 661       if (cpuid >= Pproc_alloc) {
 662         struct hwloc_solaris_Pproc *tmp = realloc(Pproc, 2*Pproc_alloc * sizeof(*Pproc));
 663         if (!tmp)
 664           goto err;
 665         Pproc = tmp;
 666         Pproc_alloc *= 2;
 667         for(i = Pproc_alloc/2; i < Pproc_alloc; i++) {
 668           Pproc[i].Lproc = -1;
 669           Pproc[i].Lpkg = -1;
 670           Pproc[i].Ppkg = -1;
 671           Pproc[i].Lcore = -1;
 672         }
 673       }
 674       Pproc[cpuid].Lproc = Lproc_num;
 675 
 676       if (Lproc_num >= Lproc_alloc) {
 677         struct hwloc_solaris_Lproc *tmp = realloc(Lproc, 2*Lproc_alloc * sizeof(*Lproc));
 678         if (!tmp)
 679           goto err;
 680         Lproc = tmp;
 681         Lproc_alloc *= 2;
 682       }
 683       Lproc[Lproc_num].Pproc = cpuid;
 684       Lproc_num++;
 685 
 686       if (cpuid >= Pproc_max)
 687         Pproc_max = cpuid + 1;
 688 
 689       if (look_chips) do {
 690         /* Get Chip ID */
 691         stat = (kstat_named_t *) kstat_data_lookup(ksp, "chip_id");
 692         if (!stat)
 693           {
 694             if (Lpkg_num)
 695               fprintf(stderr, "could not read package id for CPU%u: %s\n", cpuid, strerror(errno));
 696             else
 697               hwloc_debug("could not read package id for CPU%u: %s\n", cpuid, strerror(errno));
 698             look_chips = 0;
 699             continue;
 700           }
 701         switch (stat->data_type) {
 702           case KSTAT_DATA_INT32:
 703             pkgid = stat->value.i32;
 704             break;
 705           case KSTAT_DATA_UINT32:
 706             pkgid = stat->value.ui32;
 707             break;
 708 #ifdef _INT64_TYPE
 709           case KSTAT_DATA_UINT64:
 710             pkgid = stat->value.ui64;
 711             break;
 712           case KSTAT_DATA_INT64:
 713             pkgid = stat->value.i64;
 714             break;
 715 #endif
 716           default:
 717             fprintf(stderr, "chip_id type %u unknown\n", (unsigned) stat->data_type);
 718             look_chips = 0;
 719             continue;
 720         }
 721         Pproc[cpuid].Ppkg = pkgid;
 722         for (i = 0; i < Lpkg_num; i++)
 723           if (pkgid == Lpkg[i].Ppkg)
 724             break;
 725         Pproc[cpuid].Lpkg = i;
 726         hwloc_debug("%u on package %u (%u)\n", cpuid, i, pkgid);
 727         if (i == Lpkg_num) {
 728           if (Lpkg_num == Lpkg_alloc) {
 729             struct hwloc_solaris_Lpkg *tmp = realloc(Lpkg, 2*Lpkg_alloc * sizeof(*Lpkg));
 730             if (!tmp)
 731               goto err;
 732             Lpkg = tmp;
 733             Lpkg_alloc *= 2;
 734           }
 735           Lpkg[Lpkg_num++].Ppkg = pkgid;
 736         }
 737       } while(0);
 738 
 739       if (look_cores) do {
 740         /* Get Core ID */
 741         stat = (kstat_named_t *) kstat_data_lookup(ksp, "core_id");
 742         if (!stat)
 743           {
 744             if (Lcore_num)
 745               fprintf(stderr, "could not read core id for CPU%u: %s\n", cpuid, strerror(errno));
 746             else
 747               hwloc_debug("could not read core id for CPU%u: %s\n", cpuid, strerror(errno));
 748             look_cores = 0;
 749             continue;
 750           }
 751         switch (stat->data_type) {
 752           case KSTAT_DATA_INT32:
 753             coreid = stat->value.i32;
 754             break;
 755           case KSTAT_DATA_UINT32:
 756             coreid = stat->value.ui32;
 757             break;
 758 #ifdef _INT64_TYPE
 759           case KSTAT_DATA_UINT64:
 760             coreid = stat->value.ui64;
 761             break;
 762           case KSTAT_DATA_INT64:
 763             coreid = stat->value.i64;
 764             break;
 765 #endif
 766           default:
 767             fprintf(stderr, "core_id type %u unknown\n", (unsigned) stat->data_type);
 768             look_cores = 0;
 769             continue;
 770         }
 771         for (i = 0; i < Lcore_num; i++)
 772           if (coreid == Lcore[i].Pcore && Pproc[cpuid].Ppkg == Lcore[i].Ppkg)
 773             break;
 774         Pproc[cpuid].Lcore = i;
 775         hwloc_debug("%u on core %u (%u)\n", cpuid, i, coreid);
 776         if (i == Lcore_num) {
 777           if (Lcore_num == Lcore_alloc) {
 778             struct hwloc_solaris_Lcore *tmp = realloc(Lcore, 2*Lcore_alloc * sizeof(*Lcore));
 779             if (!tmp)
 780               goto err;
 781             Lcore = tmp;
 782             Lcore_alloc *= 2;
 783           }
 784           Lcore[Lcore_num].Ppkg = Pproc[cpuid].Ppkg;
 785           Lcore[Lcore_num++].Pcore = coreid;
 786         }
 787       } while(0);
 788 
 789       /* Note: there is also clog_id for the Thread ID (not unique) and
 790        * pkg_core_id for the core ID (not unique).  They are not useful to us
 791        * however. */
 792 
 793     } else if (!strcmp("pg_hw_perf", ksp->ks_module)) {
 794       if (kstat_read(kc, ksp, NULL) == -1) {
 795         fprintf(stderr, "kstat_read failed for module %s name %s instance %d: %s\n", ksp->ks_module, ksp->ks_name, ksp->ks_instance, strerror(errno));
 796         continue;
 797       }
 798       stat = (kstat_named_t *) kstat_data_lookup(ksp, "cpus");
 799       if (stat) {
 800         hwloc_debug("found kstat module %s name %s instance %d cpus type %d\n", ksp->ks_module, ksp->ks_name, ksp->ks_instance, stat->data_type);
 801         if (stat->data_type == KSTAT_DATA_STRING) {
 802           hwloc_bitmap_t cpuset = hwloc_bitmap_alloc();
 803           hwloc_bitmap_list_sscanf(cpuset, stat->value.str.addr.ptr);
 804 
 805           if (!strcmp(ksp->ks_name, "L3_Cache")) {
 806             if (chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L3] >= 0) {
 807               hwloc_obj_t l3 = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L3CACHE, HWLOC_UNKNOWN_INDEX);
 808               l3->cpuset = cpuset;
 809               l3->attr->cache.depth = 3;
 810               l3->attr->cache.size = chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L3];
 811               l3->attr->cache.linesize = chip_info.cache_linesize[HWLOC_SOLARIS_CHIP_INFO_L3];
 812               l3->attr->cache.associativity = chip_info.cache_associativity[HWLOC_SOLARIS_CHIP_INFO_L3];
 813               l3->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED;
 814               hwloc_insert_object_by_cpuset(topology, l3);
 815               cpuset = NULL; /* don't free below */
 816             }
 817           }
 818           else if (!strcmp(ksp->ks_name, "L2_Cache")) {
 819             if (!chip_info.l2_unified && chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L2I] >= 0) {
 820               hwloc_obj_t l2i = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L2ICACHE, HWLOC_UNKNOWN_INDEX);
 821               l2i->cpuset = hwloc_bitmap_dup(cpuset);
 822               l2i->attr->cache.depth = 2;
 823               l2i->attr->cache.size = chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L2I];
 824               l2i->attr->cache.linesize = chip_info.cache_linesize[HWLOC_SOLARIS_CHIP_INFO_L2I];
 825               l2i->attr->cache.associativity = chip_info.cache_associativity[HWLOC_SOLARIS_CHIP_INFO_L2I];
 826               l2i->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION;
 827               hwloc_insert_object_by_cpuset(topology, l2i);
 828             }
 829             if (chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L2D] >= 0) {
 830               hwloc_obj_t l2 = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L2CACHE, HWLOC_UNKNOWN_INDEX);
 831               l2->cpuset = cpuset;
 832               l2->attr->cache.depth = 2;
 833               l2->attr->cache.size = chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L2D];
 834               l2->attr->cache.linesize = chip_info.cache_linesize[HWLOC_SOLARIS_CHIP_INFO_L2D];
 835               l2->attr->cache.associativity = chip_info.cache_associativity[HWLOC_SOLARIS_CHIP_INFO_L2D];
 836               l2->attr->cache.type = chip_info.l2_unified ? HWLOC_OBJ_CACHE_UNIFIED : HWLOC_OBJ_CACHE_DATA;
 837               hwloc_insert_object_by_cpuset(topology, l2);
 838               cpuset = NULL; /* don't free below */
 839             }
 840           }
 841           else if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) {
 842             hwloc_obj_t group = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX);
 843             group->cpuset = cpuset;
 844             group->attr->group.kind = HWLOC_GROUP_KIND_SOLARIS_PG_HW_PERF;
 845             group->attr->group.subkind = hwloc_bitmap_weight(cpuset);
 846             if (ksp->ks_name[0])
 847               hwloc_obj_add_info(group, "SolarisProcessorGroup", ksp->ks_name);
 848             hwloc_insert_object_by_cpuset(topology, group);
 849             cpuset = NULL; /* don't free below */
 850           }
 851           hwloc_bitmap_free(cpuset);
 852         }
 853       }
 854     }
 855   }
 856 
 857   if (look_chips
 858       && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_PACKAGE)) {
 859     struct hwloc_obj *obj;
 860     unsigned j,k;
 861     hwloc_debug("%u Packages\n", Lpkg_num);
 862     for (j = 0; j < Lpkg_num; j++) {
 863       obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_PACKAGE, Lpkg[j].Ppkg);
 864       if (chip_info.type && chip_info.type[0])
 865         hwloc_obj_add_info(obj, "CPUType", chip_info.type);
 866       if (chip_info.model && chip_info.model[0])
 867         hwloc_obj_add_info(obj, "CPUModel", chip_info.model);
 868       obj->cpuset = hwloc_bitmap_alloc();
 869       for(k=0; k<Pproc_max; k++)
 870         if (Pproc[k].Lpkg == j)
 871           hwloc_bitmap_set(obj->cpuset, k);
 872       hwloc_debug_1arg_bitmap("Package %u has cpuset %s\n", j, obj->cpuset);
 873       hwloc_insert_object_by_cpuset(topology, obj);
 874     }
 875     hwloc_debug("%s", "\n");
 876   }
 877 
 878   if (look_cores || l1i_from_core || l1d_from_core) {
 879     unsigned j;
 880     hwloc_debug("%u Cores\n", Lcore_num);
 881     for (j = 0; j < Lcore_num; j++) {
 882       /* Build the core cpuset */
 883       unsigned k;
 884       hwloc_bitmap_t cpuset = hwloc_bitmap_alloc();
 885       for(k=0; k<Pproc_max; k++)
 886         if (Pproc[k].Lcore == j)
 887           hwloc_bitmap_set(cpuset, k);
 888       hwloc_debug_1arg_bitmap("Core %u has cpuset %s\n", j, cpuset);
 889 
 890       /* Sparcs have per-core L1's. If we got their sizes from PICL, create those objects.
 891        *
 892        * On x86, let the x86 backend handle things.
 893        * At least AMD Fam15h L1i isn't per core (shared by dual-core compute unit).
 894        */
 895       if (l1d_from_core) {
 896         struct hwloc_obj *l1 = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L1CACHE, HWLOC_UNKNOWN_INDEX);
 897         l1->cpuset = hwloc_bitmap_dup(cpuset);
 898         l1->attr->cache.depth = 1;
 899         l1->attr->cache.type = HWLOC_OBJ_CACHE_DATA;
 900         l1->attr->cache.size = chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L1D];
 901         l1->attr->cache.linesize = chip_info.cache_linesize[HWLOC_SOLARIS_CHIP_INFO_L1D];
 902         l1->attr->cache.associativity = chip_info.cache_associativity[HWLOC_SOLARIS_CHIP_INFO_L1D];
 903         hwloc_insert_object_by_cpuset(topology, l1);
 904       }
 905       if (l1i_from_core) {
 906         struct hwloc_obj *l1i = hwloc_alloc_setup_object(topology, HWLOC_OBJ_L1ICACHE, HWLOC_UNKNOWN_INDEX);
 907         l1i->cpuset = hwloc_bitmap_dup(cpuset);
 908         l1i->attr->cache.depth = 1;
 909         l1i->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION;
 910         l1i->attr->cache.size = chip_info.cache_size[HWLOC_SOLARIS_CHIP_INFO_L1I];
 911         l1i->attr->cache.linesize = chip_info.cache_linesize[HWLOC_SOLARIS_CHIP_INFO_L1I];
 912         l1i->attr->cache.associativity = chip_info.cache_associativity[HWLOC_SOLARIS_CHIP_INFO_L1I];
 913         hwloc_insert_object_by_cpuset(topology, l1i);
 914       }
 915       if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_CORE)) {
 916         struct hwloc_obj *obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_CORE, Lcore[j].Pcore);
 917         obj->cpuset = cpuset;
 918         hwloc_insert_object_by_cpuset(topology, obj);
 919       } else {
 920         hwloc_bitmap_free(cpuset);
 921       }
 922     }
 923     hwloc_debug("%s", "\n");
 924   }
 925 
 926   if (Lproc_num) {
 927     struct hwloc_obj *obj;
 928     unsigned j,k;
 929     hwloc_debug("%u PUs\n", Lproc_num);
 930     for (j = 0; j < Lproc_num; j++) {
 931       obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_PU, Lproc[j].Pproc);
 932       obj->cpuset = hwloc_bitmap_alloc();
 933       for(k=0; k<Pproc_max; k++)
 934         if (Pproc[k].Lproc == j)
 935           hwloc_bitmap_set(obj->cpuset, k);
 936       hwloc_debug_1arg_bitmap("PU %u has cpuset %s\n", j, obj->cpuset);
 937       hwloc_insert_object_by_cpuset(topology, obj);
 938     }
 939     hwloc_debug("%s", "\n");
 940     topology->support.discovery->pu = 1;
 941   }
 942 
 943   kstat_close(kc);
 944 
 945   free(Pproc);
 946   free(Lproc);
 947   free(Lcore);
 948   free(Lpkg);
 949   return Lproc_num > 0;
 950 
 951  err:
 952   kstat_close(kc);
 953 
 954   free(Pproc);
 955   free(Lproc);
 956   free(Lcore);
 957   free(Lpkg);
 958   return 0;
 959 }
 960 #endif /* LIBKSTAT */
 961 
 962 static int
 963 hwloc_look_solaris(struct hwloc_backend *backend)
 964 {
 965   struct hwloc_topology *topology = backend->topology;
 966   int alreadypus = 0;
 967 
 968   if (topology->levels[0][0]->cpuset)
 969     /* somebody discovered things */
 970     return -1;
 971 
 972   hwloc_alloc_root_sets(topology->levels[0][0]);
 973 
 974 #ifdef HAVE_LIBLGRP
 975   hwloc_look_lgrp(topology);
 976 #endif /* HAVE_LIBLGRP */
 977 #ifdef HAVE_LIBKSTAT
 978   if (hwloc_look_kstat(topology) > 0)
 979     alreadypus = 1;
 980 #endif /* HAVE_LIBKSTAT */
 981 
 982   if (!alreadypus) {
 983     int nbprocs = hwloc_fallback_nbprocessors (topology);
 984     if (nbprocs >= 1)
 985       topology->support.discovery->pu = 1;
 986     else
 987       nbprocs = 1;
 988     hwloc_setup_pu_level(topology, nbprocs);
 989   }
 990 
 991   hwloc_obj_add_info(topology->levels[0][0], "Backend", "Solaris");
 992   hwloc_add_uname_info(topology, NULL);
 993   return 0;
 994 }
 995 
 996 #ifdef HAVE_LIBLGRP
 997 static int hwloc_solaris_get_allowed_hook(hwloc_topology_t topology)
 998 {
 999   lgrp_list_allowed(topology);
1000   return 0;
1001 }
1002 #endif
1003 
1004 static int
1005 hwloc_solaris_get_thisthread_last_cpu_location(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t hwloc_set, int flags __hwloc_attribute_unused)
1006 {
1007   int pu = getcpuid();
1008   hwloc_bitmap_only(hwloc_set, pu);
1009   return 0;
1010 }
1011 
1012 void
1013 hwloc_set_solaris_hooks(struct hwloc_binding_hooks *hooks,
1014                         struct hwloc_topology_support *support __hwloc_attribute_unused)
1015 {
1016   hooks->set_proc_cpubind = hwloc_solaris_set_proc_cpubind;
1017   hooks->set_thisproc_cpubind = hwloc_solaris_set_thisproc_cpubind;
1018   hooks->set_thisthread_cpubind = hwloc_solaris_set_thisthread_cpubind;
1019   hooks->get_thisthread_last_cpu_location = hwloc_solaris_get_thisthread_last_cpu_location;
1020 #ifdef HAVE_LIBLGRP
1021   hooks->get_proc_cpubind = hwloc_solaris_get_proc_cpubind;
1022   hooks->get_thisproc_cpubind = hwloc_solaris_get_thisproc_cpubind;
1023   hooks->get_thisthread_cpubind = hwloc_solaris_get_thisthread_cpubind;
1024   hooks->set_proc_membind = hwloc_solaris_set_proc_membind;
1025   hooks->set_thisproc_membind = hwloc_solaris_set_thisproc_membind;
1026   hooks->set_thisthread_membind = hwloc_solaris_set_thisthread_membind;
1027   hooks->get_proc_membind = hwloc_solaris_get_proc_membind;
1028   hooks->get_thisproc_membind = hwloc_solaris_get_thisproc_membind;
1029   hooks->get_thisthread_membind = hwloc_solaris_get_thisthread_membind;
1030 #endif /* HAVE_LIBLGRP */
1031 #ifdef MADV_ACCESS_LWP
1032   hooks->set_area_membind = hwloc_solaris_set_area_membind;
1033   support->membind->firsttouch_membind = 1;
1034   support->membind->bind_membind = 1;
1035   support->membind->interleave_membind = 1;
1036   support->membind->nexttouch_membind = 1;
1037 #endif
1038 #ifdef HAVE_LIBLGRP
1039   hooks->get_allowed_resources = hwloc_solaris_get_allowed_hook;
1040 #endif
1041 }
1042 
1043 static struct hwloc_backend *
1044 hwloc_solaris_component_instantiate(struct hwloc_disc_component *component,
1045                                     const void *_data1 __hwloc_attribute_unused,
1046                                     const void *_data2 __hwloc_attribute_unused,
1047                                     const void *_data3 __hwloc_attribute_unused)
1048 {
1049   struct hwloc_backend *backend;
1050   backend = hwloc_backend_alloc(component);
1051   if (!backend)
1052     return NULL;
1053   backend->discover = hwloc_look_solaris;
1054   return backend;
1055 }
1056 
1057 static struct hwloc_disc_component hwloc_solaris_disc_component = {
1058   HWLOC_DISC_COMPONENT_TYPE_CPU,
1059   "solaris",
1060   HWLOC_DISC_COMPONENT_TYPE_GLOBAL,
1061   hwloc_solaris_component_instantiate,
1062   50,
1063   1,
1064   NULL
1065 };
1066 
1067 const struct hwloc_component hwloc_solaris_component = {
1068   HWLOC_COMPONENT_ABI,
1069   NULL, NULL,
1070   HWLOC_COMPONENT_TYPE_DISC,
1071   0,
1072   &hwloc_solaris_disc_component
1073 };

/* [<][>][^][v][top][bottom][index][help] */