This source file includes following definitions.
- hwloc_get_api_version
- hwloc_topology_abi_check
- hwloc_hide_errors
- hwloc_report_os_error
- hwloc_get_sysctlbyname
- hwloc_get_sysctl
- hwloc_fallback_nbprocessors
- hwloc_setup_pu_level
- hwloc_debug_print_object
- hwloc_debug_print_objects
- hwloc__free_infos
- hwloc__add_info
- hwloc__add_info_nodup
- hwloc__move_infos
- hwloc_obj_add_info
- hwloc__tma_dup_infos
- hwloc__free_object_contents
- hwloc_free_unlinked_object
- hwloc_replace_linked_object
- unlink_and_free_object_and_children
- hwloc_free_object_and_children
- hwloc_free_object_siblings_and_children
- insert_siblings_list
- prepend_siblings_list
- append_siblings_list
- unlink_and_free_single_object
- hwloc__duplicate_object
- hwloc__topology_dup
- hwloc_topology_dup
- hwloc_compare_types
- hwloc_type_cmp
- hwloc_obj_cmp_sets
- hwloc__object_cpusets_compare_first
- hwloc__report_error_format_obj
- merge_insert_equal
- hwloc__insert_try_merge_group
- hwloc___insert_object_by_cpuset
- hwloc__find_obj_covering_memory_cpuset
- hwloc__find_insert_memory_parent
- hwloc__attach_memory_object
- hwloc__insert_object_by_cpuset
- hwloc_insert_object_by_cpuset
- hwloc_insert_object_by_parent
- hwloc_alloc_setup_object
- hwloc_topology_alloc_group_object
- hwloc_topology_insert_group_object
- hwloc_topology_insert_misc_object
- hwloc_get_highest_obj_covering_complete_cpuset
- hwloc_find_insert_io_parent_by_complete_cpuset
- hwloc_memory_page_type_compare
- propagate_total_memory
- fixup_sets
- hwloc_obj_add_other_obj_sets
- hwloc_obj_add_children_sets
- propagate_nodeset
- remove_unused_sets
- hwloc__filter_bridges
- hwloc_filter_bridges
- hwloc__reorder_children
- remove_empty
- hwloc_reset_normal_type_depths
- hwloc_compare_levels_structure
- hwloc_filter_levels_keep_structure
- hwloc_propagate_symmetric_subtree
- hwloc_set_group_depth
- hwloc_connect_children
- find_same_type
- hwloc_level_take_objects
- hwloc_build_level_from_list
- hwloc_append_special_object
- hwloc_list_special_objects
- hwloc_connect_io_misc_levels
- hwloc_connect_levels
- hwloc_topology_reconnect
- hwloc_alloc_root_sets
- hwloc_discover
- hwloc_topology_setup_defaults
- hwloc__topology_init
- hwloc_topology_init
- hwloc_topology_set_pid
- hwloc_topology_set_synthetic
- hwloc_topology_set_xml
- hwloc_topology_set_xmlbuffer
- hwloc_topology_set_flags
- hwloc_topology_get_flags
- hwloc__topology_filter_init
- hwloc__topology_set_type_filter
- hwloc_topology_set_type_filter
- hwloc_topology_set_all_types_filter
- hwloc_topology_set_cache_types_filter
- hwloc_topology_set_icache_types_filter
- hwloc_topology_set_io_types_filter
- hwloc_topology_get_type_filter
- hwloc_topology_clear
- hwloc_topology_destroy
- hwloc_topology_load
- restrict_object_by_cpuset
- hwloc_topology_restrict
- hwloc_topology_is_thissystem
- hwloc_topology_get_depth
- hwloc_topology_get_support
- hwloc_topology_set_userdata
- hwloc_topology_get_userdata
- hwloc_topology_get_complete_cpuset
- hwloc_topology_get_topology_cpuset
- hwloc_topology_get_allowed_cpuset
- hwloc_topology_get_complete_nodeset
- hwloc_topology_get_topology_nodeset
- hwloc_topology_get_allowed_nodeset
- hwloc__check_child_siblings
- hwloc__check_normal_children
- hwloc__check_children_cpusets
- hwloc__check_memory_children
- hwloc__check_io_children
- hwloc__check_misc_children
- hwloc__check_object
- hwloc__check_nodesets
- hwloc__check_level
- hwloc_topology_check
- hwloc_topology_check
1
2
3
4
5
6
7
8
9 #include <private/autogen/config.h>
10
11 #define _ATFILE_SOURCE
12 #include <assert.h>
13 #include <sys/types.h>
14 #ifdef HAVE_DIRENT_H
15 #include <dirent.h>
16 #endif
17 #ifdef HAVE_UNISTD_H
18 #include <unistd.h>
19 #endif
20 #include <string.h>
21 #include <errno.h>
22 #include <stdio.h>
23 #include <sys/stat.h>
24 #include <fcntl.h>
25 #include <limits.h>
26 #include <float.h>
27
28 #include <hwloc.h>
29 #include <private/private.h>
30 #include <private/debug.h>
31 #include <private/misc.h>
32
33 #ifdef HAVE_MACH_MACH_INIT_H
34 #include <mach/mach_init.h>
35 #endif
36 #ifdef HAVE_MACH_MACH_HOST_H
37 #include <mach/mach_host.h>
38 #endif
39
40 #ifdef HAVE_SYS_PARAM_H
41 #include <sys/param.h>
42 #endif
43
44 #ifdef HAVE_SYS_SYSCTL_H
45 #include <sys/sysctl.h>
46 #endif
47
48 #ifdef HWLOC_WIN_SYS
49 #include <windows.h>
50 #endif
51
52 unsigned hwloc_get_api_version(void)
53 {
54 return HWLOC_API_VERSION;
55 }
56
57 int hwloc_topology_abi_check(hwloc_topology_t topology)
58 {
59 return topology->topology_abi != HWLOC_TOPOLOGY_ABI ? -1 : 0;
60 }
61
62 int hwloc_hide_errors(void)
63 {
64 static int hide = 0;
65 static int checked = 0;
66 if (!checked) {
67 const char *envvar = getenv("HWLOC_HIDE_ERRORS");
68 if (envvar)
69 hide = atoi(envvar);
70 checked = 1;
71 }
72 return hide;
73 }
74
75 void hwloc_report_os_error(const char *msg, int line)
76 {
77 static int reported = 0;
78
79 if (!reported && !hwloc_hide_errors()) {
80 fprintf(stderr, "****************************************************************************\n");
81 fprintf(stderr, "* hwloc %s has encountered what looks like an error from the operating system.\n", HWLOC_VERSION);
82 fprintf(stderr, "*\n");
83 fprintf(stderr, "* %s\n", msg);
84 fprintf(stderr, "* Error occurred in topology.c line %d\n", line);
85 fprintf(stderr, "*\n");
86 fprintf(stderr, "* The following FAQ entry in the hwloc documentation may help:\n");
87 fprintf(stderr, "* What should I do when hwloc reports \"operating system\" warnings?\n");
88 fprintf(stderr, "* Otherwise please report this error message to the hwloc user's mailing list,\n");
89 #ifdef HWLOC_LINUX_SYS
90 fprintf(stderr, "* along with the files generated by the hwloc-gather-topology script.\n");
91 #else
92 fprintf(stderr, "* along with any relevant topology information from your platform.\n");
93 #endif
94 fprintf(stderr, "****************************************************************************\n");
95 reported = 1;
96 }
97 }
98
99 #if defined(HAVE_SYSCTLBYNAME)
100 int hwloc_get_sysctlbyname(const char *name, int64_t *ret)
101 {
102 union {
103 int32_t i32;
104 int64_t i64;
105 } n;
106 size_t size = sizeof(n);
107 if (sysctlbyname(name, &n, &size, NULL, 0))
108 return -1;
109 switch (size) {
110 case sizeof(n.i32):
111 *ret = n.i32;
112 break;
113 case sizeof(n.i64):
114 *ret = n.i64;
115 break;
116 default:
117 return -1;
118 }
119 return 0;
120 }
121 #endif
122
123 #if defined(HAVE_SYSCTL)
124 int hwloc_get_sysctl(int name[], unsigned namelen, int *ret)
125 {
126 int n;
127 size_t size = sizeof(n);
128 if (sysctl(name, namelen, &n, &size, NULL, 0))
129 return -1;
130 if (size != sizeof(n))
131 return -1;
132 *ret = n;
133 return 0;
134 }
135 #endif
136
137
138
139
140
141 #ifndef HWLOC_WIN_SYS
142 int
143 hwloc_fallback_nbprocessors(struct hwloc_topology *topology __hwloc_attribute_unused) {
144 int n;
145 #if HAVE_DECL__SC_NPROCESSORS_ONLN
146 n = sysconf(_SC_NPROCESSORS_ONLN);
147 #elif HAVE_DECL__SC_NPROC_ONLN
148 n = sysconf(_SC_NPROC_ONLN);
149 #elif HAVE_DECL__SC_NPROCESSORS_CONF
150 n = sysconf(_SC_NPROCESSORS_CONF);
151 #elif HAVE_DECL__SC_NPROC_CONF
152 n = sysconf(_SC_NPROC_CONF);
153 #elif defined(HAVE_HOST_INFO) && HAVE_HOST_INFO
154 struct host_basic_info info;
155 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
156 host_info(mach_host_self(), HOST_BASIC_INFO, (integer_t*) &info, &count);
157 n = info.avail_cpus;
158 #elif defined(HAVE_SYSCTLBYNAME)
159 int64_t nn;
160 if (hwloc_get_sysctlbyname("hw.ncpu", &nn))
161 nn = -1;
162 n = nn;
163 #elif defined(HAVE_SYSCTL) && HAVE_DECL_CTL_HW && HAVE_DECL_HW_NCPU
164 static int name[2] = {CTL_HW, HW_NCPU};
165 if (hwloc_get_sysctl(name, sizeof(name)/sizeof(*name), &n))
166 n = -1;
167 #else
168 #ifdef __GNUC__
169 #warning No known way to discover number of available processors on this system
170 #endif
171 n = -1;
172 #endif
173 return n;
174 }
175 #endif
176
177
178
179
180 void
181 hwloc_setup_pu_level(struct hwloc_topology *topology,
182 unsigned nb_pus)
183 {
184 struct hwloc_obj *obj;
185 unsigned oscpu,cpu;
186
187 hwloc_debug("%s", "\n\n * CPU cpusets *\n\n");
188 for (cpu=0,oscpu=0; cpu<nb_pus; oscpu++)
189 {
190 obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_PU, oscpu);
191 obj->cpuset = hwloc_bitmap_alloc();
192 hwloc_bitmap_only(obj->cpuset, oscpu);
193
194 hwloc_debug_2args_bitmap("cpu %u (os %u) has cpuset %s\n",
195 cpu, oscpu, obj->cpuset);
196 hwloc_insert_object_by_cpuset(topology, obj);
197
198 cpu++;
199 }
200 }
201
202
203
204 #define for_each_child_safe(child, parent, pchild) \
205 for (pchild = &(parent)->first_child, child = *pchild; \
206 child; \
207 \
208 (*pchild == child ? pchild = &(child->next_sibling) : NULL), \
209 \
210 child = *pchild)
211 #define for_each_memory_child_safe(child, parent, pchild) \
212 for (pchild = &(parent)->memory_first_child, child = *pchild; \
213 child; \
214 \
215 (*pchild == child ? pchild = &(child->next_sibling) : NULL), \
216 \
217 child = *pchild)
218 #define for_each_io_child_safe(child, parent, pchild) \
219 for (pchild = &(parent)->io_first_child, child = *pchild; \
220 child; \
221 \
222 (*pchild == child ? pchild = &(child->next_sibling) : NULL), \
223 \
224 child = *pchild)
225 #define for_each_misc_child_safe(child, parent, pchild) \
226 for (pchild = &(parent)->misc_first_child, child = *pchild; \
227 child; \
228 \
229 (*pchild == child ? pchild = &(child->next_sibling) : NULL), \
230 \
231 child = *pchild)
232
233 #ifdef HWLOC_DEBUG
234
235 static void
236 hwloc_debug_print_object(int indent __hwloc_attribute_unused, hwloc_obj_t obj)
237 {
238 char type[64], idx[10], attr[1024], *cpuset = NULL;
239 hwloc_debug("%*s", 2*indent, "");
240 hwloc_obj_type_snprintf(type, sizeof(type), obj, 1);
241 if (obj->os_index != HWLOC_UNKNOWN_INDEX)
242 snprintf(idx, sizeof(idx), "#%u", obj->os_index);
243 else
244 *idx = '\0';
245 hwloc_obj_attr_snprintf(attr, sizeof(attr), obj, " ", 1);
246 hwloc_debug("%s%s%s%s%s", type, idx, *attr ? "(" : "", attr, *attr ? ")" : "");
247 if (obj->name)
248 hwloc_debug(" name \"%s\"", obj->name);
249 if (obj->subtype)
250 hwloc_debug(" subtype \"%s\"", obj->subtype);
251 if (obj->cpuset) {
252 hwloc_bitmap_asprintf(&cpuset, obj->cpuset);
253 hwloc_debug(" cpuset %s", cpuset);
254 free(cpuset);
255 }
256 if (obj->complete_cpuset) {
257 hwloc_bitmap_asprintf(&cpuset, obj->complete_cpuset);
258 hwloc_debug(" complete %s", cpuset);
259 free(cpuset);
260 }
261 if (obj->nodeset) {
262 hwloc_bitmap_asprintf(&cpuset, obj->nodeset);
263 hwloc_debug(" nodeset %s", cpuset);
264 free(cpuset);
265 }
266 if (obj->complete_nodeset) {
267 hwloc_bitmap_asprintf(&cpuset, obj->complete_nodeset);
268 hwloc_debug(" completeN %s", cpuset);
269 free(cpuset);
270 }
271 if (obj->arity)
272 hwloc_debug(" arity %u", obj->arity);
273 hwloc_debug("%s", "\n");
274 }
275
276 static void
277 hwloc_debug_print_objects(int indent __hwloc_attribute_unused, hwloc_obj_t obj)
278 {
279 hwloc_obj_t child;
280 hwloc_debug_print_object(indent, obj);
281 for_each_child (child, obj)
282 hwloc_debug_print_objects(indent + 1, child);
283 for_each_memory_child (child, obj)
284 hwloc_debug_print_objects(indent + 1, child);
285 for_each_io_child (child, obj)
286 hwloc_debug_print_objects(indent + 1, child);
287 for_each_misc_child (child, obj)
288 hwloc_debug_print_objects(indent + 1, child);
289 }
290 #else
291 #define hwloc_debug_print_object(indent, obj) do { } while (0)
292 #define hwloc_debug_print_objects(indent, obj) do { } while (0)
293 #endif
294
295 void hwloc__free_infos(struct hwloc_info_s *infos, unsigned count)
296 {
297 unsigned i;
298 for(i=0; i<count; i++) {
299 free(infos[i].name);
300 free(infos[i].value);
301 }
302 free(infos);
303 }
304
305 int hwloc__add_info(struct hwloc_info_s **infosp, unsigned *countp, const char *name, const char *value)
306 {
307 unsigned count = *countp;
308 struct hwloc_info_s *infos = *infosp;
309 #define OBJECT_INFO_ALLOC 8
310
311 unsigned alloccount = (count + 1 + (OBJECT_INFO_ALLOC-1)) & ~(OBJECT_INFO_ALLOC-1);
312 if (count != alloccount) {
313 struct hwloc_info_s *tmpinfos = realloc(infos, alloccount*sizeof(*infos));
314 if (!tmpinfos)
315
316 goto out_with_array;
317 *infosp = infos = tmpinfos;
318 }
319 infos[count].name = strdup(name);
320 if (!infos[count].name)
321 goto out_with_array;
322 infos[count].value = strdup(value);
323 if (!infos[count].value)
324 goto out_with_name;
325 *countp = count+1;
326 return 0;
327
328 out_with_name:
329 free(infos[count].name);
330 out_with_array:
331
332 return -1;
333 }
334
335 int hwloc__add_info_nodup(struct hwloc_info_s **infosp, unsigned *countp,
336 const char *name, const char *value,
337 int replace)
338 {
339 struct hwloc_info_s *infos = *infosp;
340 unsigned count = *countp;
341 unsigned i;
342 for(i=0; i<count; i++) {
343 if (!strcmp(infos[i].name, name)) {
344 if (replace) {
345 char *new = strdup(value);
346 if (!new)
347 return -1;
348 free(infos[i].value);
349 infos[i].value = new;
350 }
351 return 0;
352 }
353 }
354 return hwloc__add_info(infosp, countp, name, value);
355 }
356
357 int hwloc__move_infos(struct hwloc_info_s **dst_infosp, unsigned *dst_countp,
358 struct hwloc_info_s **src_infosp, unsigned *src_countp)
359 {
360 unsigned dst_count = *dst_countp;
361 struct hwloc_info_s *dst_infos = *dst_infosp;
362 unsigned src_count = *src_countp;
363 struct hwloc_info_s *src_infos = *src_infosp;
364 unsigned i;
365 #define OBJECT_INFO_ALLOC 8
366
367 unsigned alloccount = (dst_count + src_count + (OBJECT_INFO_ALLOC-1)) & ~(OBJECT_INFO_ALLOC-1);
368 if (dst_count != alloccount) {
369 struct hwloc_info_s *tmp_infos = realloc(dst_infos, alloccount*sizeof(*dst_infos));
370 if (!tmp_infos)
371
372 goto drop;
373 dst_infos = tmp_infos;
374 }
375 for(i=0; i<src_count; i++, dst_count++) {
376 dst_infos[dst_count].name = src_infos[i].name;
377 dst_infos[dst_count].value = src_infos[i].value;
378 }
379 *dst_infosp = dst_infos;
380 *dst_countp = dst_count;
381 free(src_infos);
382 *src_infosp = NULL;
383 *src_countp = 0;
384 return 0;
385
386 drop:
387
388 for(i=0; i<src_count; i++) {
389 free(src_infos[i].name);
390 free(src_infos[i].value);
391 }
392 free(src_infos);
393 *src_infosp = NULL;
394 *src_countp = 0;
395 return -1;
396 }
397
398 int hwloc_obj_add_info(hwloc_obj_t obj, const char *name, const char *value)
399 {
400 return hwloc__add_info(&obj->infos, &obj->infos_count, name, value);
401 }
402
403
404 static int hwloc__tma_dup_infos(struct hwloc_tma *tma, hwloc_obj_t new, hwloc_obj_t src)
405 {
406 unsigned i, j;
407 new->infos = hwloc_tma_calloc(tma, src->infos_count * sizeof(*src->infos));
408 if (!new->infos)
409 return -1;
410 for(i=0; i<src->infos_count; i++) {
411 new->infos[i].name = hwloc_tma_strdup(tma, src->infos[i].name);
412 new->infos[i].value = hwloc_tma_strdup(tma, src->infos[i].value);
413 if (!new->infos[i].name || !new->infos[i].value)
414 goto failed;
415 }
416 new->infos_count = src->infos_count;
417 return 0;
418
419 failed:
420 assert(!tma || !tma->dontfree);
421 for(j=0; j<=i; j++) {
422 free(new->infos[i].name);
423 free(new->infos[i].value);
424 }
425 free(new->infos);
426 new->infos = NULL;
427 return -1;
428 }
429
430 static void
431 hwloc__free_object_contents(hwloc_obj_t obj)
432 {
433 switch (obj->type) {
434 case HWLOC_OBJ_NUMANODE:
435 free(obj->attr->numanode.page_types);
436 break;
437 default:
438 break;
439 }
440 hwloc__free_infos(obj->infos, obj->infos_count);
441 free(obj->attr);
442 free(obj->children);
443 free(obj->subtype);
444 free(obj->name);
445 hwloc_bitmap_free(obj->cpuset);
446 hwloc_bitmap_free(obj->complete_cpuset);
447 hwloc_bitmap_free(obj->nodeset);
448 hwloc_bitmap_free(obj->complete_nodeset);
449 }
450
451
452 void
453 hwloc_free_unlinked_object(hwloc_obj_t obj)
454 {
455 hwloc__free_object_contents(obj);
456 free(obj);
457 }
458
459
460
461
462
463 static void
464 hwloc_replace_linked_object(hwloc_obj_t old, hwloc_obj_t new)
465 {
466
467 hwloc__free_object_contents(old);
468
469 new->parent = old->parent;
470 new->next_sibling = old->next_sibling;
471 new->first_child = old->first_child;
472 new->memory_first_child = old->memory_first_child;
473 new->io_first_child = old->io_first_child;
474 new->misc_first_child = old->misc_first_child;
475
476 memcpy(old, new, sizeof(*old));
477
478 memset(new, 0,sizeof(*new));
479 }
480
481
482
483
484
485 static void
486 unlink_and_free_object_and_children(hwloc_obj_t *pobj)
487 {
488 hwloc_obj_t obj = *pobj, child, *pchild;
489
490 for_each_child_safe(child, obj, pchild)
491 unlink_and_free_object_and_children(pchild);
492 for_each_memory_child_safe(child, obj, pchild)
493 unlink_and_free_object_and_children(pchild);
494 for_each_io_child_safe(child, obj, pchild)
495 unlink_and_free_object_and_children(pchild);
496 for_each_misc_child_safe(child, obj, pchild)
497 unlink_and_free_object_and_children(pchild);
498
499 *pobj = obj->next_sibling;
500 hwloc_free_unlinked_object(obj);
501 }
502
503
504
505 void
506 hwloc_free_object_and_children(hwloc_obj_t obj)
507 {
508 unlink_and_free_object_and_children(&obj);
509 }
510
511
512
513 void
514 hwloc_free_object_siblings_and_children(hwloc_obj_t obj)
515 {
516 while (obj)
517 unlink_and_free_object_and_children(&obj);
518 }
519
520
521
522
523 static hwloc_obj_t *
524 insert_siblings_list(hwloc_obj_t *firstp, hwloc_obj_t firstnew, hwloc_obj_t newparent)
525 {
526 hwloc_obj_t tmp;
527 assert(firstnew);
528 *firstp = tmp = firstnew;
529 tmp->parent = newparent;
530 while (tmp->next_sibling) {
531 tmp = tmp->next_sibling;
532 tmp->parent = newparent;
533 }
534 return &tmp->next_sibling;
535 }
536
537
538
539
540
541
542 static void
543 prepend_siblings_list(hwloc_obj_t *firstp, hwloc_obj_t firstnew, hwloc_obj_t newparent)
544 {
545 hwloc_obj_t *tmpp, tmp, last;
546 unsigned length;
547
548
549 for(length = 0, tmpp = &firstnew, last = NULL ; *tmpp; length++, last = *tmpp, tmpp = &((*tmpp)->next_sibling))
550 (*tmpp)->parent = newparent;
551
552
553 for(tmp = *firstp; tmp; tmp = tmp->next_sibling)
554 tmp->sibling_rank += length;
555
556
557 *tmpp = *firstp;
558 if (*firstp)
559 (*firstp)->prev_sibling = last;
560
561
562 *firstp = firstnew;
563 }
564
565
566
567
568
569 static void
570 append_siblings_list(hwloc_obj_t *firstp, hwloc_obj_t firstnew, hwloc_obj_t newparent)
571 {
572 hwloc_obj_t *tmpp, tmp, last;
573 unsigned length;
574
575
576 for(length = 0, tmpp = firstp, last = NULL ; *tmpp; length++, last = *tmpp, tmpp = &((*tmpp)->next_sibling));
577
578
579 for(tmp = firstnew; tmp; tmp = tmp->next_sibling) {
580 tmp->parent = newparent;
581 tmp->sibling_rank += length;
582 }
583
584
585 *tmpp = firstnew;
586 if (firstnew)
587 firstnew->prev_sibling = last;
588 }
589
590
591
592
593
594
595
596
597
598 static void
599 unlink_and_free_single_object(hwloc_obj_t *pparent)
600 {
601 hwloc_obj_t old = *pparent;
602 hwloc_obj_t *lastp;
603
604 if (old->type == HWLOC_OBJ_MISC) {
605
606
607
608 assert(!old->first_child);
609
610 assert(!old->memory_first_child);
611
612 assert(!old->io_first_child);
613
614 if (old->misc_first_child)
615
616 lastp = insert_siblings_list(pparent, old->misc_first_child, old->parent);
617 else
618 lastp = pparent;
619
620 *lastp = old->next_sibling;
621
622 } else if (hwloc__obj_type_is_io(old->type)) {
623
624
625
626 assert(!old->first_child);
627
628 assert(!old->memory_first_child);
629
630 if (old->io_first_child)
631
632 lastp = insert_siblings_list(pparent, old->io_first_child, old->parent);
633 else
634 lastp = pparent;
635
636 *lastp = old->next_sibling;
637
638
639 if (old->misc_first_child)
640 append_siblings_list(&old->parent->misc_first_child, old->misc_first_child, old->parent);
641
642 } else if (hwloc__obj_type_is_memory(old->type)) {
643
644
645
646 assert(!old->first_child);
647
648 assert(!old->io_first_child);
649
650 if (old->memory_first_child)
651
652 lastp = insert_siblings_list(pparent, old->memory_first_child, old->parent);
653 else
654 lastp = pparent;
655
656 *lastp = old->next_sibling;
657
658
659 if (old->misc_first_child)
660 append_siblings_list(&old->parent->misc_first_child, old->misc_first_child, old->parent);
661
662 } else {
663
664
665 if (old->first_child)
666
667 lastp = insert_siblings_list(pparent, old->first_child, old->parent);
668 else
669 lastp = pparent;
670
671 *lastp = old->next_sibling;
672
673
674
675
676 if (old->memory_first_child)
677 append_siblings_list(&old->parent->memory_first_child, old->memory_first_child, old->parent);
678 if (old->io_first_child)
679 append_siblings_list(&old->parent->io_first_child, old->io_first_child, old->parent);
680 if (old->misc_first_child)
681 append_siblings_list(&old->parent->misc_first_child, old->misc_first_child, old->parent);
682 }
683
684 hwloc_free_unlinked_object(old);
685 }
686
687
688 static int
689 hwloc__duplicate_object(struct hwloc_topology *newtopology,
690 struct hwloc_obj *newparent,
691 struct hwloc_obj *newobj,
692 struct hwloc_obj *src)
693 {
694 struct hwloc_tma *tma = newtopology->tma;
695 hwloc_obj_t *level;
696 unsigned level_width;
697 size_t len;
698 unsigned i;
699 hwloc_obj_t child, prev;
700 int err = 0;
701
702
703
704
705 assert(!newparent == !!newobj);
706
707 if (!newobj) {
708 newobj = hwloc_alloc_setup_object(newtopology, src->type, src->os_index);
709 if (!newobj)
710 return -1;
711 }
712
713
714 newobj->logical_index = src->logical_index;
715 newobj->depth = src->depth;
716 newobj->sibling_rank = src->sibling_rank;
717
718 newobj->type = src->type;
719 newobj->os_index = src->os_index;
720 newobj->gp_index = src->gp_index;
721 newobj->symmetric_subtree = src->symmetric_subtree;
722
723 if (src->name)
724 newobj->name = hwloc_tma_strdup(tma, src->name);
725 if (src->subtype)
726 newobj->subtype = hwloc_tma_strdup(tma, src->subtype);
727 newobj->userdata = src->userdata;
728
729 newobj->total_memory = src->total_memory;
730
731 memcpy(newobj->attr, src->attr, sizeof(*newobj->attr));
732
733 if (src->type == HWLOC_OBJ_NUMANODE && src->attr->numanode.page_types_len) {
734 len = src->attr->numanode.page_types_len * sizeof(struct hwloc_memory_page_type_s);
735 newobj->attr->numanode.page_types = hwloc_tma_malloc(tma, len);
736 memcpy(newobj->attr->numanode.page_types, src->attr->numanode.page_types, len);
737 }
738
739 newobj->cpuset = hwloc_bitmap_tma_dup(tma, src->cpuset);
740 newobj->complete_cpuset = hwloc_bitmap_tma_dup(tma, src->complete_cpuset);
741 newobj->nodeset = hwloc_bitmap_tma_dup(tma, src->nodeset);
742 newobj->complete_nodeset = hwloc_bitmap_tma_dup(tma, src->complete_nodeset);
743
744 hwloc__tma_dup_infos(tma, newobj, src);
745
746
747 if (src->depth < 0) {
748 i = HWLOC_SLEVEL_FROM_DEPTH(src->depth);
749 level = newtopology->slevels[i].objs;
750 level_width = newtopology->slevels[i].nbobjs;
751
752 if (!newobj->logical_index)
753 newtopology->slevels[i].first = newobj;
754 if (newobj->logical_index == newtopology->slevels[i].nbobjs - 1)
755 newtopology->slevels[i].last = newobj;
756 } else {
757 level = newtopology->levels[src->depth];
758 level_width = newtopology->level_nbobjects[src->depth];
759 }
760
761 assert(newobj->logical_index < level_width);
762 level[newobj->logical_index] = newobj;
763
764
765
766 if (newobj->logical_index > 0 && level[newobj->logical_index-1]) {
767 newobj->prev_cousin = level[newobj->logical_index-1];
768 level[newobj->logical_index-1]->next_cousin = newobj;
769 }
770 if (newobj->logical_index < level_width-1 && level[newobj->logical_index+1]) {
771 newobj->next_cousin = level[newobj->logical_index+1];
772 level[newobj->logical_index+1]->prev_cousin = newobj;
773 }
774
775
776 if (src->arity) {
777 newobj->children = hwloc_tma_malloc(tma, src->arity * sizeof(*newobj->children));
778 if (!newobj->children)
779 return -1;
780 }
781 newobj->arity = src->arity;
782 newobj->memory_arity = src->memory_arity;
783 newobj->io_arity = src->io_arity;
784 newobj->misc_arity = src->misc_arity;
785
786
787 for_each_child(child, src) {
788 err = hwloc__duplicate_object(newtopology, newobj, NULL, child);
789 if (err < 0)
790 goto out_with_children;
791 }
792 for_each_memory_child(child, src) {
793 err = hwloc__duplicate_object(newtopology, newobj, NULL, child);
794 if (err < 0)
795 return err;
796 }
797 for_each_io_child(child, src) {
798 err = hwloc__duplicate_object(newtopology, newobj, NULL, child);
799 if (err < 0)
800 goto out_with_children;
801 }
802 for_each_misc_child(child, src) {
803 err = hwloc__duplicate_object(newtopology, newobj, NULL, child);
804 if (err < 0)
805 goto out_with_children;
806 }
807
808 out_with_children:
809
810
811 if (!err) {
812
813
814
815 if (newobj->arity) {
816 newobj->children[0]->prev_sibling = NULL;
817 for(i=1; i<newobj->arity; i++)
818 newobj->children[i]->prev_sibling = newobj->children[i-1];
819 newobj->last_child = newobj->children[newobj->arity-1];
820 }
821 if (newobj->memory_arity) {
822 child = newobj->memory_first_child;
823 prev = NULL;
824 while (child) {
825 child->prev_sibling = prev;
826 prev = child;
827 child = child->next_sibling;
828 }
829 }
830 if (newobj->io_arity) {
831 child = newobj->io_first_child;
832 prev = NULL;
833 while (child) {
834 child->prev_sibling = prev;
835 prev = child;
836 child = child->next_sibling;
837 }
838 }
839 if (newobj->misc_arity) {
840 child = newobj->misc_first_child;
841 prev = NULL;
842 while (child) {
843 child->prev_sibling = prev;
844 prev = child;
845 child = child->next_sibling;
846 }
847 }
848 }
849
850
851
852
853
854 if (newparent) {
855
856
857
858 hwloc_insert_object_by_parent(newtopology, newparent, newobj);
859
860
861 if (hwloc__obj_type_is_normal(newobj->type))
862 newparent->children[newobj->sibling_rank] = newobj;
863 }
864
865 return err;
866 }
867
868 static int
869 hwloc__topology_init (struct hwloc_topology **topologyp, unsigned nblevels, struct hwloc_tma *tma);
870
871
872 int
873 hwloc__topology_dup(hwloc_topology_t *newp,
874 hwloc_topology_t old,
875 struct hwloc_tma *tma)
876 {
877 hwloc_topology_t new;
878 hwloc_obj_t newroot;
879 hwloc_obj_t oldroot = hwloc_get_root_obj(old);
880 unsigned i;
881 int err;
882
883 if (!old->is_loaded) {
884 errno = EINVAL;
885 return -1;
886 }
887
888 err = hwloc__topology_init(&new, old->nb_levels_allocated, tma);
889 if (err < 0)
890 goto out;
891
892 new->flags = old->flags;
893 memcpy(new->type_filter, old->type_filter, sizeof(old->type_filter));
894 new->is_thissystem = old->is_thissystem;
895 new->is_loaded = 1;
896 new->pid = old->pid;
897 new->next_gp_index = old->next_gp_index;
898
899 memcpy(&new->binding_hooks, &old->binding_hooks, sizeof(old->binding_hooks));
900
901 memcpy(new->support.discovery, old->support.discovery, sizeof(*old->support.discovery));
902 memcpy(new->support.cpubind, old->support.cpubind, sizeof(*old->support.cpubind));
903 memcpy(new->support.membind, old->support.membind, sizeof(*old->support.membind));
904
905 new->allowed_cpuset = hwloc_bitmap_tma_dup(tma, old->allowed_cpuset);
906 new->allowed_nodeset = hwloc_bitmap_tma_dup(tma, old->allowed_nodeset);
907
908 new->userdata_export_cb = old->userdata_export_cb;
909 new->userdata_import_cb = old->userdata_import_cb;
910 new->userdata_not_decoded = old->userdata_not_decoded;
911
912 assert(!old->machine_memory.local_memory);
913 assert(!old->machine_memory.page_types_len);
914 assert(!old->machine_memory.page_types);
915
916 for(i = HWLOC_OBJ_TYPE_MIN; i < HWLOC_OBJ_TYPE_MAX; i++)
917 new->type_depth[i] = old->type_depth[i];
918
919
920 new->nb_levels = old->nb_levels;
921 assert(new->nb_levels_allocated >= new->nb_levels);
922 for(i=1 ; i<new->nb_levels; i++) {
923 new->level_nbobjects[i] = old->level_nbobjects[i];
924 new->levels[i] = hwloc_tma_calloc(tma, new->level_nbobjects[i] * sizeof(*new->levels[i]));
925 }
926 for(i=0; i<HWLOC_NR_SLEVELS; i++) {
927 new->slevels[i].nbobjs = old->slevels[i].nbobjs;
928 if (new->slevels[i].nbobjs)
929 new->slevels[i].objs = hwloc_tma_calloc(tma, new->slevels[i].nbobjs * sizeof(*new->slevels[i].objs));
930 }
931
932
933 newroot = hwloc_get_root_obj(new);
934 err = hwloc__duplicate_object(new, NULL, newroot, oldroot);
935 if (err < 0)
936 goto out_with_topology;
937
938 err = hwloc_internal_distances_dup(new, old);
939 if (err < 0)
940 goto out_with_topology;
941
942
943 new->modified = 0;
944
945
946 new->backends = NULL;
947 new->get_pci_busid_cpuset_backend = NULL;
948
949 #ifndef HWLOC_DEBUG
950 if (getenv("HWLOC_DEBUG_CHECK"))
951 #endif
952 hwloc_topology_check(new);
953
954 *newp = new;
955 return 0;
956
957 out_with_topology:
958 assert(!tma || !tma->dontfree);
959 hwloc_topology_destroy(new);
960 out:
961 return -1;
962 }
963
964 int
965 hwloc_topology_dup(hwloc_topology_t *newp,
966 hwloc_topology_t old)
967 {
968 return hwloc__topology_dup(newp, old, NULL);
969 }
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990 static const unsigned obj_type_order[] = {
991 0,
992 3,
993 12,
994 16,
995 10,
996 8,
997 6,
998 5,
999 4,
1000 11,
1001 9,
1002 7,
1003 1,
1004 2,
1005 13,
1006 14,
1007 15,
1008 17
1009 };
1010
1011 #ifndef NDEBUG
1012 static const hwloc_obj_type_t obj_order_type[] = {
1013 HWLOC_OBJ_MACHINE,
1014 HWLOC_OBJ_GROUP,
1015 HWLOC_OBJ_NUMANODE,
1016 HWLOC_OBJ_PACKAGE,
1017 HWLOC_OBJ_L5CACHE,
1018 HWLOC_OBJ_L4CACHE,
1019 HWLOC_OBJ_L3CACHE,
1020 HWLOC_OBJ_L3ICACHE,
1021 HWLOC_OBJ_L2CACHE,
1022 HWLOC_OBJ_L2ICACHE,
1023 HWLOC_OBJ_L1CACHE,
1024 HWLOC_OBJ_L1ICACHE,
1025 HWLOC_OBJ_CORE,
1026 HWLOC_OBJ_BRIDGE,
1027 HWLOC_OBJ_PCI_DEVICE,
1028 HWLOC_OBJ_OS_DEVICE,
1029 HWLOC_OBJ_PU,
1030 HWLOC_OBJ_MISC
1031 };
1032 #endif
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 static const int obj_type_priority[] = {
1049 90,
1050 40,
1051 60,
1052 100,
1053 20,
1054 20,
1055 20,
1056 20,
1057 20,
1058 19,
1059 19,
1060 19,
1061 0,
1062 100,
1063 0,
1064 100,
1065 100,
1066 0
1067 };
1068
1069 int hwloc_compare_types (hwloc_obj_type_t type1, hwloc_obj_type_t type2)
1070 {
1071 unsigned order1 = obj_type_order[type1];
1072 unsigned order2 = obj_type_order[type2];
1073
1074
1075 if (!hwloc__obj_type_is_normal(type1)
1076 && hwloc__obj_type_is_normal(type2) && type2 != HWLOC_OBJ_MACHINE)
1077 return HWLOC_TYPE_UNORDERED;
1078 if (!hwloc__obj_type_is_normal(type2)
1079 && hwloc__obj_type_is_normal(type1) && type1 != HWLOC_OBJ_MACHINE)
1080 return HWLOC_TYPE_UNORDERED;
1081
1082 return order1 - order2;
1083 }
1084
1085 enum hwloc_obj_cmp_e {
1086 HWLOC_OBJ_EQUAL = HWLOC_BITMAP_EQUAL,
1087 HWLOC_OBJ_INCLUDED = HWLOC_BITMAP_INCLUDED,
1088 HWLOC_OBJ_CONTAINS = HWLOC_BITMAP_CONTAINS,
1089 HWLOC_OBJ_INTERSECTS = HWLOC_BITMAP_INTERSECTS,
1090 HWLOC_OBJ_DIFFERENT = HWLOC_BITMAP_DIFFERENT
1091 };
1092
1093 static enum hwloc_obj_cmp_e
1094 hwloc_type_cmp(hwloc_obj_t obj1, hwloc_obj_t obj2)
1095 {
1096 hwloc_obj_type_t type1 = obj1->type;
1097 hwloc_obj_type_t type2 = obj2->type;
1098 int compare;
1099
1100 compare = hwloc_compare_types(type1, type2);
1101 if (compare == HWLOC_TYPE_UNORDERED)
1102 return HWLOC_OBJ_DIFFERENT;
1103 if (compare > 0)
1104 return HWLOC_OBJ_INCLUDED;
1105 if (compare < 0)
1106 return HWLOC_OBJ_CONTAINS;
1107
1108 if (obj1->type == HWLOC_OBJ_GROUP
1109 && (obj1->attr->group.kind != obj2->attr->group.kind
1110 || obj1->attr->group.subkind != obj2->attr->group.subkind))
1111 return HWLOC_OBJ_DIFFERENT;
1112
1113 return HWLOC_OBJ_EQUAL;
1114 }
1115
1116
1117
1118
1119
1120 static int
1121 hwloc_obj_cmp_sets(hwloc_obj_t obj1, hwloc_obj_t obj2)
1122 {
1123 hwloc_bitmap_t set1, set2;
1124 int res = HWLOC_OBJ_DIFFERENT;
1125
1126 assert(!hwloc__obj_type_is_special(obj1->type));
1127 assert(!hwloc__obj_type_is_special(obj2->type));
1128
1129
1130 if (obj1->complete_cpuset && obj2->complete_cpuset) {
1131 set1 = obj1->complete_cpuset;
1132 set2 = obj2->complete_cpuset;
1133 } else {
1134 set1 = obj1->cpuset;
1135 set2 = obj2->cpuset;
1136 }
1137 if (set1 && set2 && !hwloc_bitmap_iszero(set1) && !hwloc_bitmap_iszero(set2)) {
1138 res = hwloc_bitmap_compare_inclusion(set1, set2);
1139 if (res == HWLOC_OBJ_INTERSECTS)
1140 return HWLOC_OBJ_INTERSECTS;
1141 }
1142
1143
1144 if (obj1->complete_nodeset && obj2->complete_nodeset) {
1145 set1 = obj1->complete_nodeset;
1146 set2 = obj2->complete_nodeset;
1147 } else {
1148 set1 = obj1->nodeset;
1149 set2 = obj2->nodeset;
1150 }
1151 if (set1 && set2 && !hwloc_bitmap_iszero(set1) && !hwloc_bitmap_iszero(set2)) {
1152 int noderes = hwloc_bitmap_compare_inclusion(set1, set2);
1153
1154 if (noderes == HWLOC_OBJ_INCLUDED) {
1155 if (res == HWLOC_OBJ_CONTAINS)
1156
1157 return HWLOC_OBJ_INTERSECTS;
1158 res = HWLOC_OBJ_INCLUDED;
1159
1160 } else if (noderes == HWLOC_OBJ_CONTAINS) {
1161 if (res == HWLOC_OBJ_INCLUDED)
1162
1163 return HWLOC_OBJ_INTERSECTS;
1164 res = HWLOC_OBJ_CONTAINS;
1165
1166 } else if (noderes == HWLOC_OBJ_INTERSECTS) {
1167 return HWLOC_OBJ_INTERSECTS;
1168
1169 } else {
1170
1171
1172 }
1173 }
1174
1175 return res;
1176 }
1177
1178
1179
1180
1181
1182
1183 int
1184 hwloc__object_cpusets_compare_first(hwloc_obj_t obj1, hwloc_obj_t obj2)
1185 {
1186 if (obj1->complete_cpuset && obj2->complete_cpuset)
1187 return hwloc_bitmap_compare_first(obj1->complete_cpuset, obj2->complete_cpuset);
1188 else
1189 return hwloc_bitmap_compare_first(obj1->cpuset, obj2->cpuset);
1190 }
1191
1192
1193 static void
1194 hwloc__report_error_format_obj(char *buf, size_t buflen, hwloc_obj_t obj)
1195 {
1196 char typestr[64];
1197 char *cpusetstr;
1198 char *nodesetstr = NULL;
1199 hwloc_obj_type_snprintf(typestr, sizeof(typestr), obj, 0);
1200 hwloc_bitmap_asprintf(&cpusetstr, obj->cpuset);
1201 if (obj->nodeset)
1202 hwloc_bitmap_asprintf(&nodesetstr, obj->nodeset);
1203 if (obj->os_index != HWLOC_UNKNOWN_INDEX)
1204 snprintf(buf, buflen, "%s (P#%u cpuset %s%s%s)",
1205 typestr, obj->os_index, cpusetstr,
1206 nodesetstr ? " nodeset " : "",
1207 nodesetstr ? nodesetstr : "");
1208 else
1209 snprintf(buf, buflen, "%s (cpuset %s%s%s)",
1210 typestr, cpusetstr,
1211 nodesetstr ? " nodeset " : "",
1212 nodesetstr ? nodesetstr : "");
1213 free(cpusetstr);
1214 free(nodesetstr);
1215 }
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 static void
1229 merge_insert_equal(hwloc_obj_t new, hwloc_obj_t old)
1230 {
1231 if (old->os_index == HWLOC_UNKNOWN_INDEX)
1232 old->os_index = new->os_index;
1233
1234 if (new->infos_count) {
1235
1236 hwloc__move_infos(&old->infos, &old->infos_count,
1237 &new->infos, &new->infos_count);
1238 }
1239
1240 if (new->name && !old->name) {
1241 old->name = new->name;
1242 new->name = NULL;
1243 }
1244 if (new->subtype && !old->subtype) {
1245 old->subtype = new->subtype;
1246 new->subtype = NULL;
1247 }
1248
1249
1250
1251
1252
1253 switch(new->type) {
1254 case HWLOC_OBJ_NUMANODE:
1255 if (new->attr->numanode.local_memory && !old->attr->numanode.local_memory) {
1256
1257 old->attr->numanode.local_memory = new->attr->numanode.local_memory;
1258 free(old->attr->numanode.page_types);
1259 old->attr->numanode.page_types_len = new->attr->numanode.page_types_len;
1260 old->attr->numanode.page_types = new->attr->numanode.page_types;
1261 new->attr->numanode.page_types = NULL;
1262 new->attr->numanode.page_types_len = 0;
1263 }
1264
1265 break;
1266 case HWLOC_OBJ_L1CACHE:
1267 case HWLOC_OBJ_L2CACHE:
1268 case HWLOC_OBJ_L3CACHE:
1269 case HWLOC_OBJ_L4CACHE:
1270 case HWLOC_OBJ_L5CACHE:
1271 case HWLOC_OBJ_L1ICACHE:
1272 case HWLOC_OBJ_L2ICACHE:
1273 case HWLOC_OBJ_L3ICACHE:
1274 if (!old->attr->cache.size)
1275 old->attr->cache.size = new->attr->cache.size;
1276 if (!old->attr->cache.linesize)
1277 old->attr->cache.size = new->attr->cache.linesize;
1278 if (!old->attr->cache.associativity)
1279 old->attr->cache.size = new->attr->cache.linesize;
1280 break;
1281 default:
1282 break;
1283 }
1284 }
1285
1286
1287 static __hwloc_inline hwloc_obj_t
1288 hwloc__insert_try_merge_group(hwloc_obj_t old, hwloc_obj_t new)
1289 {
1290 if (new->type == HWLOC_OBJ_GROUP) {
1291
1292
1293 if (old->type == HWLOC_OBJ_PU && new->attr->group.kind == HWLOC_GROUP_KIND_MEMORY)
1294
1295 return NULL;
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 if (old->type == HWLOC_OBJ_GROUP
1306 && (new->attr->group.kind < old->attr->group.kind))
1307 hwloc_replace_linked_object(old, new);
1308
1309 return old;
1310
1311 } else if (old->type == HWLOC_OBJ_GROUP) {
1312
1313 if (new->type == HWLOC_OBJ_PU && old->attr->group.kind == HWLOC_GROUP_KIND_MEMORY)
1314
1315 return NULL;
1316
1317
1318
1319
1320 hwloc_replace_linked_object(old, new);
1321 return old;
1322 }
1323
1324 return NULL;
1325 }
1326
1327
1328
1329
1330
1331
1332 static struct hwloc_obj *
1333 hwloc___insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t cur, hwloc_obj_t obj,
1334 hwloc_report_error_t report_error)
1335 {
1336 hwloc_obj_t child, next_child = NULL;
1337
1338 hwloc_obj_t *cur_children = &cur->first_child;
1339 hwloc_obj_t *obj_children = &obj->first_child;
1340
1341 hwloc_obj_t *putp = NULL;
1342
1343 assert(!hwloc__obj_type_is_memory(obj->type));
1344
1345
1346
1347
1348 for (child = cur->first_child, child ? next_child = child->next_sibling : NULL;
1349 child;
1350 child = next_child, child ? next_child = child->next_sibling : NULL) {
1351
1352 int res = hwloc_obj_cmp_sets(obj, child);
1353 int setres = res;
1354
1355 if (res == HWLOC_OBJ_EQUAL) {
1356 hwloc_obj_t merged = hwloc__insert_try_merge_group(child, obj);
1357 if (merged)
1358 return merged;
1359
1360 res = hwloc_type_cmp(obj, child);
1361 }
1362
1363 switch (res) {
1364 case HWLOC_OBJ_EQUAL:
1365
1366
1367
1368 merge_insert_equal(obj, child);
1369
1370 return child;
1371
1372 case HWLOC_OBJ_INCLUDED:
1373
1374 return hwloc___insert_object_by_cpuset(topology, child, obj, report_error);
1375
1376 case HWLOC_OBJ_INTERSECTS:
1377 if (report_error) {
1378 char childstr[512];
1379 char objstr[512];
1380 char msg[1024];
1381 hwloc__report_error_format_obj(objstr, sizeof(objstr), obj);
1382 hwloc__report_error_format_obj(childstr, sizeof(childstr), child);
1383 snprintf(msg, sizeof(msg), "%s intersects with %s without inclusion!", objstr, childstr);
1384 report_error(msg, __LINE__);
1385 }
1386 goto putback;
1387
1388 case HWLOC_OBJ_DIFFERENT:
1389
1390 if (!putp && hwloc__object_cpusets_compare_first(obj, child) < 0)
1391
1392 putp = cur_children;
1393
1394 cur_children = &child->next_sibling;
1395 break;
1396
1397 case HWLOC_OBJ_CONTAINS:
1398
1399 *cur_children = child->next_sibling;
1400 child->next_sibling = NULL;
1401
1402 *obj_children = child;
1403 obj_children = &child->next_sibling;
1404 child->parent = obj;
1405 if (setres == HWLOC_OBJ_EQUAL) {
1406 obj->memory_first_child = child->memory_first_child;
1407 child->memory_first_child = NULL;
1408 }
1409 break;
1410 }
1411 }
1412
1413 assert(!*obj_children);
1414 assert(!*cur_children);
1415
1416
1417 if (!putp)
1418 putp = cur_children;
1419 obj->next_sibling = *putp;
1420 *putp = obj;
1421 obj->parent = cur;
1422
1423 topology->modified = 1;
1424 return obj;
1425
1426 putback:
1427
1428 if (putp)
1429 cur_children = putp;
1430 else
1431 cur_children = &cur->first_child;
1432
1433 while ((child = obj->first_child) != NULL) {
1434
1435 obj->first_child = child->next_sibling;
1436 obj->parent = cur;
1437
1438 while (*cur_children && hwloc__object_cpusets_compare_first(*cur_children, child) < 0)
1439 cur_children = &(*cur_children)->next_sibling;
1440 child->next_sibling = *cur_children;
1441 *cur_children = child;
1442 }
1443 return NULL;
1444 }
1445
1446
1447
1448
1449
1450
1451
1452 static struct hwloc_obj *
1453 hwloc__find_obj_covering_memory_cpuset(struct hwloc_topology *topology, hwloc_obj_t parent, hwloc_bitmap_t cpuset)
1454 {
1455 hwloc_obj_t child = hwloc_get_child_covering_cpuset(topology, cpuset, parent);
1456 if (!child)
1457 return parent;
1458 if (child && hwloc_bitmap_isequal(child->cpuset, cpuset))
1459 return child;
1460 return hwloc__find_obj_covering_memory_cpuset(topology, child, cpuset);
1461 }
1462
1463 static struct hwloc_obj *
1464 hwloc__find_insert_memory_parent(struct hwloc_topology *topology, hwloc_obj_t obj,
1465 hwloc_report_error_t report_error)
1466 {
1467 hwloc_obj_t parent, group, result;
1468
1469 if (hwloc_bitmap_iszero(obj->cpuset)) {
1470
1471 parent = topology->levels[0][0];
1472
1473 } else {
1474
1475 parent = hwloc__find_obj_covering_memory_cpuset(topology, topology->levels[0][0], obj->cpuset);
1476 if (!parent) {
1477
1478 parent = hwloc_get_root_obj(topology);
1479 }
1480
1481 if (parent->type == HWLOC_OBJ_PU) {
1482
1483 parent = parent->parent;
1484 assert(parent);
1485 }
1486
1487
1488 if (parent != topology->levels[0][0] && hwloc_bitmap_isequal(parent->cpuset, obj->cpuset))
1489
1490 return parent;
1491 }
1492
1493 if (!hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP))
1494
1495 return parent;
1496
1497
1498 group = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX);
1499 if (!group)
1500
1501 return parent;
1502
1503 group->attr->group.kind = HWLOC_GROUP_KIND_MEMORY;
1504 group->cpuset = hwloc_bitmap_dup(obj->cpuset);
1505 group->complete_cpuset = hwloc_bitmap_dup(obj->complete_cpuset);
1506
1507
1508
1509
1510 if (!group->cpuset != !obj->cpuset
1511 || !group->complete_cpuset != !obj->complete_cpuset) {
1512
1513 hwloc_free_unlinked_object(group);
1514 return parent;
1515 }
1516
1517 result = hwloc__insert_object_by_cpuset(topology, parent, group, report_error);
1518 if (!result) {
1519
1520 return parent;
1521 }
1522
1523 assert(result == group);
1524 return group;
1525 }
1526
1527
1528 struct hwloc_obj *
1529 hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
1530 hwloc_obj_t obj,
1531 hwloc_report_error_t report_error __hwloc_attribute_unused)
1532 {
1533 hwloc_obj_t *cur_children;
1534
1535 assert(parent);
1536 assert(hwloc__obj_type_is_normal(parent->type));
1537
1538 #if 0
1539
1540
1541
1542
1543
1544
1545 hwloc_bitmap_copy(obj->cpuset, parent->cpuset);
1546 #endif
1547
1548
1549 assert(obj->type == HWLOC_OBJ_NUMANODE);
1550 assert(obj->nodeset);
1551 cur_children = &parent->memory_first_child;
1552 while (*cur_children) {
1553
1554
1555
1556 cur_children = &(*cur_children)->next_sibling;
1557 }
1558 *cur_children = obj;
1559 obj->next_sibling = NULL;
1560
1561
1562 if (!obj->complete_nodeset) {
1563 obj->complete_nodeset = hwloc_bitmap_dup(obj->nodeset);
1564 }
1565
1566
1567 if (obj->type == HWLOC_OBJ_NUMANODE) {
1568 if (hwloc_bitmap_isset(obj->nodeset, obj->os_index))
1569 hwloc_bitmap_set(topology->levels[0][0]->nodeset, obj->os_index);
1570 hwloc_bitmap_set(topology->levels[0][0]->complete_nodeset, obj->os_index);
1571 }
1572
1573 topology->modified = 1;
1574 return obj;
1575 }
1576
1577
1578 struct hwloc_obj *
1579 hwloc__insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t root,
1580 hwloc_obj_t obj,
1581 hwloc_report_error_t report_error)
1582 {
1583 struct hwloc_obj *result;
1584
1585 #ifdef HWLOC_DEBUG
1586 assert(!hwloc__obj_type_is_special(obj->type));
1587
1588
1589 assert(obj->cpuset || obj->complete_cpuset || obj->nodeset || obj->complete_nodeset);
1590
1591
1592
1593
1594 #endif
1595
1596 if (hwloc__obj_type_is_memory(obj->type)) {
1597 if (!root) {
1598 root = hwloc__find_insert_memory_parent(topology, obj, report_error);
1599 if (!root) {
1600 hwloc_free_unlinked_object(obj);
1601 return NULL;
1602 }
1603 }
1604 return hwloc__attach_memory_object(topology, root, obj, report_error);
1605 }
1606
1607 if (!root)
1608
1609 root = topology->levels[0][0];
1610
1611 result = hwloc___insert_object_by_cpuset(topology, root, obj, report_error);
1612 if (result && result->type == HWLOC_OBJ_PU) {
1613
1614 if (hwloc_bitmap_isset(result->cpuset, result->os_index))
1615 hwloc_bitmap_set(topology->levels[0][0]->cpuset, result->os_index);
1616 hwloc_bitmap_set(topology->levels[0][0]->complete_cpuset, result->os_index);
1617 }
1618 if (result != obj) {
1619
1620 hwloc_free_unlinked_object(obj);
1621 }
1622 return result;
1623 }
1624
1625
1626
1627 struct hwloc_obj *
1628 hwloc_insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t obj)
1629 {
1630 return hwloc__insert_object_by_cpuset(topology, NULL, obj, hwloc_report_os_error);
1631 }
1632
1633 void
1634 hwloc_insert_object_by_parent(struct hwloc_topology *topology, hwloc_obj_t parent, hwloc_obj_t obj)
1635 {
1636 hwloc_obj_t *current;
1637
1638 if (obj->type == HWLOC_OBJ_MISC) {
1639
1640 for (current = &parent->misc_first_child; *current; current = &(*current)->next_sibling);
1641 } else if (hwloc__obj_type_is_io(obj->type)) {
1642
1643 for (current = &parent->io_first_child; *current; current = &(*current)->next_sibling);
1644 } else if (hwloc__obj_type_is_memory(obj->type)) {
1645
1646 for (current = &parent->memory_first_child; *current; current = &(*current)->next_sibling);
1647
1648 if (obj->type == HWLOC_OBJ_NUMANODE) {
1649 if (hwloc_bitmap_isset(obj->nodeset, obj->os_index))
1650 hwloc_bitmap_set(topology->levels[0][0]->nodeset, obj->os_index);
1651 hwloc_bitmap_set(topology->levels[0][0]->complete_nodeset, obj->os_index);
1652 }
1653 } else {
1654
1655
1656
1657
1658
1659
1660 for (current = &parent->first_child; *current; current = &(*current)->next_sibling);
1661
1662 if (obj->type == HWLOC_OBJ_PU) {
1663 if (hwloc_bitmap_isset(obj->cpuset, obj->os_index))
1664 hwloc_bitmap_set(topology->levels[0][0]->cpuset, obj->os_index);
1665 hwloc_bitmap_set(topology->levels[0][0]->complete_cpuset, obj->os_index);
1666 }
1667 }
1668
1669 *current = obj;
1670 obj->parent = parent;
1671 obj->next_sibling = NULL;
1672 topology->modified = 1;
1673 }
1674
1675 hwloc_obj_t
1676 hwloc_alloc_setup_object(hwloc_topology_t topology,
1677 hwloc_obj_type_t type, unsigned os_index)
1678 {
1679 struct hwloc_obj *obj = hwloc_tma_malloc(topology->tma, sizeof(*obj));
1680 memset(obj, 0, sizeof(*obj));
1681 obj->type = type;
1682 obj->os_index = os_index;
1683 obj->gp_index = topology->next_gp_index++;
1684 obj->attr = hwloc_tma_malloc(topology->tma, sizeof(*obj->attr));
1685 memset(obj->attr, 0, sizeof(*obj->attr));
1686
1687 return obj;
1688 }
1689
1690 hwloc_obj_t
1691 hwloc_topology_alloc_group_object(struct hwloc_topology *topology)
1692 {
1693 if (!topology->is_loaded) {
1694
1695 errno = EINVAL;
1696 return NULL;
1697 }
1698 return hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX);
1699 }
1700
1701 static void hwloc_propagate_symmetric_subtree(hwloc_topology_t topology, hwloc_obj_t root);
1702 static void propagate_total_memory(hwloc_obj_t obj);
1703 static void hwloc_set_group_depth(hwloc_topology_t topology);
1704
1705 hwloc_obj_t
1706 hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t obj)
1707 {
1708 hwloc_obj_t res, root;
1709
1710 if (!topology->is_loaded) {
1711
1712 hwloc_free_unlinked_object(obj);
1713 errno = EINVAL;
1714 return NULL;
1715 }
1716
1717 if (topology->type_filter[HWLOC_OBJ_GROUP] == HWLOC_TYPE_FILTER_KEEP_NONE) {
1718 hwloc_free_unlinked_object(obj);
1719 errno = EINVAL;
1720 return NULL;
1721 }
1722
1723 root = hwloc_get_root_obj(topology);
1724 if (obj->cpuset)
1725 hwloc_bitmap_and(obj->cpuset, obj->cpuset, root->cpuset);
1726 if (obj->complete_cpuset)
1727 hwloc_bitmap_and(obj->complete_cpuset, obj->complete_cpuset, root->complete_cpuset);
1728 if (obj->nodeset)
1729 hwloc_bitmap_and(obj->nodeset, obj->nodeset, root->nodeset);
1730 if (obj->complete_nodeset)
1731 hwloc_bitmap_and(obj->complete_nodeset, obj->complete_nodeset, root->complete_nodeset);
1732
1733 if ((!obj->cpuset || hwloc_bitmap_iszero(obj->cpuset))
1734 && (!obj->complete_cpuset || hwloc_bitmap_iszero(obj->complete_cpuset))
1735 && (!obj->nodeset || hwloc_bitmap_iszero(obj->nodeset))
1736 && (!obj->complete_nodeset || hwloc_bitmap_iszero(obj->complete_nodeset))) {
1737 hwloc_free_unlinked_object(obj);
1738 errno = EINVAL;
1739 return NULL;
1740 }
1741
1742 res = hwloc__insert_object_by_cpuset(topology, NULL, obj, NULL );
1743 if (!res)
1744 return NULL;
1745 if (res != obj)
1746
1747 return res;
1748
1749
1750 hwloc_obj_add_children_sets(obj);
1751 if (hwloc_topology_reconnect(topology, 0) < 0)
1752 return NULL;
1753
1754 hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]);
1755 hwloc_set_group_depth(topology);
1756
1757 #ifndef HWLOC_DEBUG
1758 if (getenv("HWLOC_DEBUG_CHECK"))
1759 #endif
1760 hwloc_topology_check(topology);
1761
1762 return obj;
1763 }
1764
1765 hwloc_obj_t
1766 hwloc_topology_insert_misc_object(struct hwloc_topology *topology, hwloc_obj_t parent, const char *name)
1767 {
1768 hwloc_obj_t obj;
1769
1770 if (topology->type_filter[HWLOC_OBJ_MISC] == HWLOC_TYPE_FILTER_KEEP_NONE) {
1771 errno = EINVAL;
1772 return NULL;
1773 }
1774
1775 if (!topology->is_loaded) {
1776 errno = EINVAL;
1777 return NULL;
1778 }
1779
1780 obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_MISC, HWLOC_UNKNOWN_INDEX);
1781 if (name)
1782 obj->name = strdup(name);
1783
1784 hwloc_insert_object_by_parent(topology, parent, obj);
1785
1786
1787
1788
1789 hwloc_topology_reconnect(topology, 0);
1790
1791 #ifndef HWLOC_DEBUG
1792 if (getenv("HWLOC_DEBUG_CHECK"))
1793 #endif
1794 hwloc_topology_check(topology);
1795
1796 return obj;
1797 }
1798
1799
1800
1801
1802
1803
1804 static hwloc_obj_t
1805 hwloc_get_highest_obj_covering_complete_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set)
1806 {
1807 hwloc_obj_t current = hwloc_get_root_obj(topology);
1808 hwloc_obj_t child;
1809
1810 if (hwloc_bitmap_isequal(set, current->complete_cpuset))
1811
1812 return current;
1813
1814 recurse:
1815
1816 for_each_child(child, current) {
1817 if (hwloc_bitmap_isequal(set, child->complete_cpuset))
1818
1819 return child;
1820 if (!hwloc_bitmap_iszero(child->complete_cpuset) && hwloc_bitmap_isincluded(set, child->complete_cpuset))
1821 break;
1822 }
1823
1824 if (child) {
1825 current = child;
1826 goto recurse;
1827 }
1828
1829
1830 return current;
1831 }
1832
1833 hwloc_obj_t
1834 hwloc_find_insert_io_parent_by_complete_cpuset(struct hwloc_topology *topology, hwloc_cpuset_t cpuset)
1835 {
1836 hwloc_obj_t group_obj, largeparent, parent;
1837
1838
1839 hwloc_bitmap_and(cpuset, cpuset, hwloc_topology_get_complete_cpuset(topology));
1840 if (hwloc_bitmap_iszero(cpuset))
1841
1842 return NULL;
1843
1844 largeparent = hwloc_get_highest_obj_covering_complete_cpuset(topology, cpuset);
1845 if (hwloc_bitmap_isequal(largeparent->complete_cpuset, cpuset)
1846 || !hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP))
1847
1848 return largeparent;
1849
1850
1851 group_obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX);
1852 if (!group_obj)
1853
1854 return largeparent;
1855
1856 group_obj->complete_cpuset = hwloc_bitmap_dup(cpuset);
1857 hwloc_bitmap_and(cpuset, cpuset, hwloc_topology_get_topology_cpuset(topology));
1858 group_obj->cpuset = hwloc_bitmap_dup(cpuset);
1859 group_obj->attr->group.kind = HWLOC_GROUP_KIND_IO;
1860 parent = hwloc__insert_object_by_cpuset(topology, largeparent, group_obj, hwloc_report_os_error);
1861 if (!parent)
1862
1863 return largeparent;
1864
1865
1866 assert(parent == group_obj);
1867
1868
1869 hwloc_obj_add_children_sets(group_obj);
1870
1871 return parent;
1872 }
1873
1874 static int hwloc_memory_page_type_compare(const void *_a, const void *_b)
1875 {
1876 const struct hwloc_memory_page_type_s *a = _a;
1877 const struct hwloc_memory_page_type_s *b = _b;
1878
1879 if (!b->size)
1880 return -1;
1881
1882 if (b->size == a->size)
1883 return 0;
1884 return a->size < b->size ? -1 : 1;
1885 }
1886
1887
1888 static void
1889 propagate_total_memory(hwloc_obj_t obj)
1890 {
1891 hwloc_obj_t child;
1892 unsigned i;
1893
1894
1895 obj->total_memory = 0;
1896
1897
1898 for_each_child(child, obj) {
1899 propagate_total_memory(child);
1900 obj->total_memory += child->total_memory;
1901 }
1902 for_each_memory_child(child, obj) {
1903 propagate_total_memory(child);
1904 obj->total_memory += child->total_memory;
1905 }
1906
1907
1908 if (obj->type == HWLOC_OBJ_NUMANODE) {
1909 obj->total_memory += obj->attr->numanode.local_memory;
1910
1911
1912
1913
1914 qsort(obj->attr->numanode.page_types, obj->attr->numanode.page_types_len, sizeof(*obj->attr->numanode.page_types), hwloc_memory_page_type_compare);
1915
1916 for(i=obj->attr->numanode.page_types_len; i>=1; i--)
1917 if (obj->attr->numanode.page_types[i-1].size)
1918 break;
1919 obj->attr->numanode.page_types_len = i;
1920 }
1921 }
1922
1923
1924
1925
1926 static void
1927 fixup_sets(hwloc_obj_t obj)
1928 {
1929 int in_memory_list;
1930 hwloc_obj_t child;
1931
1932 child = obj->first_child;
1933 in_memory_list = 0;
1934
1935
1936 iterate:
1937 while (child) {
1938
1939 hwloc_bitmap_and(child->cpuset, child->cpuset, obj->cpuset);
1940 hwloc_bitmap_and(child->nodeset, child->nodeset, obj->nodeset);
1941
1942 if (child->complete_cpuset) {
1943 hwloc_bitmap_and(child->complete_cpuset, child->complete_cpuset, obj->complete_cpuset);
1944 } else {
1945 child->complete_cpuset = hwloc_bitmap_dup(child->cpuset);
1946 }
1947 if (child->complete_nodeset) {
1948 hwloc_bitmap_and(child->complete_nodeset, child->complete_nodeset, obj->complete_nodeset);
1949 } else {
1950 child->complete_nodeset = hwloc_bitmap_dup(child->nodeset);
1951 }
1952
1953 fixup_sets(child);
1954 child = child->next_sibling;
1955 }
1956
1957
1958 if (!in_memory_list && obj->memory_first_child) {
1959 child = obj->memory_first_child;
1960 in_memory_list = 1;
1961 goto iterate;
1962 }
1963
1964
1965 }
1966
1967
1968 int
1969 hwloc_obj_add_other_obj_sets(hwloc_obj_t dst, hwloc_obj_t src)
1970 {
1971 #define ADD_OTHER_OBJ_SET(_dst, _src, _set) \
1972 if ((_src)->_set) { \
1973 if (!(_dst)->_set) \
1974 (_dst)->_set = hwloc_bitmap_alloc(); \
1975 hwloc_bitmap_or((_dst)->_set, (_dst)->_set, (_src)->_set); \
1976 }
1977 ADD_OTHER_OBJ_SET(dst, src, cpuset);
1978 ADD_OTHER_OBJ_SET(dst, src, complete_cpuset);
1979 ADD_OTHER_OBJ_SET(dst, src, nodeset);
1980 ADD_OTHER_OBJ_SET(dst, src, complete_nodeset);
1981 return 0;
1982 }
1983
1984 int
1985 hwloc_obj_add_children_sets(hwloc_obj_t obj)
1986 {
1987 hwloc_obj_t child;
1988 assert(obj->cpuset != NULL);
1989 for_each_child(child, obj) {
1990 assert(child->cpuset != NULL);
1991 hwloc_obj_add_other_obj_sets(obj, child);
1992 }
1993
1994 return 0;
1995 }
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008 static void
2009 propagate_nodeset(hwloc_obj_t obj)
2010 {
2011 hwloc_obj_t child;
2012
2013
2014
2015
2016
2017 if (!obj->nodeset)
2018 obj->nodeset = hwloc_bitmap_alloc();
2019 if (obj->parent)
2020 hwloc_bitmap_copy(obj->nodeset, obj->parent->nodeset);
2021 else
2022 hwloc_bitmap_zero(obj->nodeset);
2023
2024
2025
2026
2027
2028 if (!obj->complete_nodeset)
2029 obj->complete_nodeset = hwloc_bitmap_dup(obj->nodeset);
2030 else
2031 hwloc_bitmap_or(obj->complete_nodeset, obj->complete_nodeset, obj->nodeset);
2032
2033
2034 for_each_memory_child(child, obj) {
2035
2036
2037
2038 if (!child->complete_nodeset)
2039 child->complete_nodeset = hwloc_bitmap_dup(child->nodeset);
2040 else
2041 hwloc_bitmap_or(child->complete_nodeset, child->complete_nodeset, child->nodeset);
2042
2043
2044 hwloc_bitmap_or(obj->nodeset, obj->nodeset, child->nodeset);
2045 hwloc_bitmap_or(obj->complete_nodeset, obj->complete_nodeset, child->complete_nodeset);
2046
2047
2048 if (child->cpuset)
2049 hwloc_bitmap_copy(child->cpuset, obj->cpuset);
2050 else
2051 child->cpuset = hwloc_bitmap_dup(obj->cpuset);
2052 if (child->complete_cpuset)
2053 hwloc_bitmap_copy(child->complete_cpuset, obj->complete_cpuset);
2054 else
2055 child->complete_cpuset = hwloc_bitmap_dup(obj->complete_cpuset);
2056 }
2057
2058
2059 for_each_child(child, obj) {
2060 propagate_nodeset(child);
2061 }
2062
2063
2064
2065
2066
2067
2068
2069
2070 for_each_child(child, obj) {
2071 hwloc_bitmap_or(obj->nodeset, obj->nodeset, child->nodeset);
2072 hwloc_bitmap_or(obj->complete_nodeset, obj->complete_nodeset, child->complete_nodeset);
2073 }
2074
2075
2076
2077 }
2078
2079 static void
2080 remove_unused_sets(hwloc_topology_t topology, hwloc_obj_t obj)
2081 {
2082 hwloc_obj_t child;
2083
2084 hwloc_bitmap_and(obj->cpuset, obj->cpuset, topology->allowed_cpuset);
2085 hwloc_bitmap_and(obj->nodeset, obj->nodeset, topology->allowed_nodeset);
2086
2087 for_each_child(child, obj)
2088 remove_unused_sets(topology, child);
2089 for_each_memory_child(child, obj)
2090 remove_unused_sets(topology, child);
2091
2092 }
2093
2094 static void
2095 hwloc__filter_bridges(hwloc_topology_t topology, hwloc_obj_t root, unsigned depth)
2096 {
2097 hwloc_obj_t child, *pchild;
2098
2099
2100 for_each_io_child_safe(child, root, pchild) {
2101 enum hwloc_type_filter_e filter = topology->type_filter[child->type];
2102
2103
2104 hwloc__filter_bridges(topology, child, depth+1);
2105
2106 child->attr->bridge.depth = depth;
2107
2108 if (child->type == HWLOC_OBJ_BRIDGE
2109 && filter == HWLOC_TYPE_FILTER_KEEP_IMPORTANT
2110 && !child->io_first_child) {
2111 unlink_and_free_single_object(pchild);
2112 topology->modified = 1;
2113 }
2114 }
2115 }
2116
2117 static void
2118 hwloc_filter_bridges(hwloc_topology_t topology, hwloc_obj_t parent)
2119 {
2120 hwloc_obj_t child = parent->first_child;
2121 while (child) {
2122 hwloc_filter_bridges(topology, child);
2123 child = child->next_sibling;
2124 }
2125
2126 hwloc__filter_bridges(topology, parent, 0);
2127 }
2128
2129 void
2130 hwloc__reorder_children(hwloc_obj_t parent)
2131 {
2132
2133 hwloc_obj_t *prev, child, children = parent->first_child;
2134 parent->first_child = NULL;
2135 while (children) {
2136
2137 child = children;
2138 children = child->next_sibling;
2139
2140 prev = &parent->first_child;
2141 while (*prev && hwloc__object_cpusets_compare_first(child, *prev) > 0)
2142 prev = &((*prev)->next_sibling);
2143
2144 child->next_sibling = *prev;
2145 *prev = child;
2146 }
2147
2148 }
2149
2150
2151
2152
2153
2154 static void
2155 remove_empty(hwloc_topology_t topology, hwloc_obj_t *pobj)
2156 {
2157 hwloc_obj_t obj = *pobj, child, *pchild;
2158
2159 for_each_child_safe(child, obj, pchild)
2160 remove_empty(topology, pchild);
2161 for_each_memory_child_safe(child, obj, pchild)
2162 remove_empty(topology, pchild);
2163
2164
2165 if (obj->first_child
2166 || obj->memory_first_child
2167 || obj->io_first_child )
2168
2169 return;
2170
2171 if (hwloc__obj_type_is_normal(obj->type)) {
2172 if (!hwloc_bitmap_iszero(obj->cpuset))
2173 return;
2174 } else {
2175 assert(hwloc__obj_type_is_memory(obj->type));
2176 if (!hwloc_bitmap_iszero(obj->nodeset))
2177 return;
2178 }
2179
2180 hwloc_debug("%s", "\nRemoving empty object ");
2181 hwloc_debug_print_object(0, obj);
2182 unlink_and_free_single_object(pobj);
2183 topology->modified = 1;
2184 }
2185
2186
2187 static void
2188 hwloc_reset_normal_type_depths(hwloc_topology_t topology)
2189 {
2190 unsigned i;
2191 for (i=HWLOC_OBJ_TYPE_MIN; i<=HWLOC_OBJ_GROUP; i++)
2192 topology->type_depth[i] = HWLOC_TYPE_DEPTH_UNKNOWN;
2193
2194 }
2195
2196
2197 static int
2198 hwloc_compare_levels_structure(hwloc_topology_t topology, unsigned i)
2199 {
2200 int checkmemory = (topology->levels[i][0]->type == HWLOC_OBJ_PU);
2201 unsigned j;
2202
2203 if (topology->level_nbobjects[i-1] != topology->level_nbobjects[i])
2204 return -1;
2205
2206 for(j=0; j<topology->level_nbobjects[i]; j++) {
2207 if (topology->levels[i-1][j]->arity != 1)
2208 return -1;
2209 if (checkmemory && topology->levels[i-1][j]->memory_arity)
2210
2211 return -1;
2212 }
2213
2214 return 0;
2215 }
2216
2217
2218 static void
2219 hwloc_filter_levels_keep_structure(hwloc_topology_t topology)
2220 {
2221 unsigned i, j;
2222 int res = 0;
2223
2224
2225 for(i=topology->nb_levels-1; i>0; i--) {
2226 int replacechild = 0, replaceparent = 0;
2227 hwloc_obj_t obj1 = topology->levels[i-1][0];
2228 hwloc_obj_t obj2 = topology->levels[i][0];
2229 hwloc_obj_type_t type1 = obj1->type;
2230 hwloc_obj_type_t type2 = obj2->type;
2231
2232
2233 if (topology->type_filter[type1] == HWLOC_TYPE_FILTER_KEEP_STRUCTURE)
2234
2235 replaceparent = 1;
2236 if (topology->type_filter[type2] == HWLOC_TYPE_FILTER_KEEP_STRUCTURE)
2237
2238 replacechild = 1;
2239 if (!replacechild && !replaceparent)
2240
2241 continue;
2242
2243 if (replaceparent && replacechild) {
2244
2245 if (obj_type_priority[type1] >= obj_type_priority[type2])
2246 replaceparent = 0;
2247 else
2248 replacechild = 0;
2249 }
2250
2251 if (hwloc_compare_levels_structure(topology, i) < 0)
2252 continue;
2253 hwloc_debug("may merge levels #%u=%s and #%u=%s\n",
2254 i-1, hwloc_obj_type_string(type1), i, hwloc_obj_type_string(type2));
2255
2256
2257 for(j=0; j<topology->level_nbobjects[i]; j++) {
2258 hwloc_obj_t parent = topology->levels[i-1][j];
2259 hwloc_obj_t child = topology->levels[i][j];
2260 unsigned k;
2261 if (replacechild) {
2262
2263 parent->first_child = child->first_child;
2264 parent->last_child = child->last_child;
2265 parent->arity = child->arity;
2266 free(parent->children);
2267 parent->children = child->children;
2268 child->children = NULL;
2269
2270 for(k=0; k<parent->arity; k++)
2271 parent->children[k]->parent = parent;
2272
2273 if (child->memory_first_child) {
2274 append_siblings_list(&parent->memory_first_child, child->memory_first_child, parent);
2275 parent->memory_arity += child->memory_arity;
2276 }
2277 if (child->io_first_child) {
2278 append_siblings_list(&parent->io_first_child, child->io_first_child, parent);
2279 parent->io_arity += child->io_arity;
2280 }
2281 if (child->misc_first_child) {
2282 append_siblings_list(&parent->misc_first_child, child->misc_first_child, parent);
2283 parent->misc_arity += child->misc_arity;
2284 }
2285 hwloc_free_unlinked_object(child);
2286 } else {
2287
2288 if (parent->parent) {
2289 parent->parent->children[parent->sibling_rank] = child;
2290 child->sibling_rank = parent->sibling_rank;
2291 if (!parent->sibling_rank) {
2292 parent->parent->first_child = child;
2293
2294 } else {
2295 child->prev_sibling = parent->parent->children[parent->sibling_rank-1];
2296 child->prev_sibling->next_sibling = child;
2297 }
2298 if (parent->sibling_rank == parent->parent->arity-1) {
2299 parent->parent->last_child = child;
2300
2301 } else {
2302 child->next_sibling = parent->parent->children[parent->sibling_rank+1];
2303 child->next_sibling->prev_sibling = child;
2304 }
2305
2306 child->parent = parent->parent;
2307 } else {
2308
2309 topology->levels[0][0] = child;
2310 child->parent = NULL;
2311 }
2312
2313 if (parent->memory_first_child) {
2314 prepend_siblings_list(&child->memory_first_child, parent->memory_first_child, child);
2315 child->memory_arity += parent->memory_arity;
2316 }
2317 if (parent->io_first_child) {
2318 prepend_siblings_list(&child->io_first_child, parent->io_first_child, child);
2319 child->io_arity += parent->io_arity;
2320 }
2321 if (parent->misc_first_child) {
2322 prepend_siblings_list(&child->misc_first_child, parent->misc_first_child, child);
2323 child->misc_arity += parent->misc_arity;
2324 }
2325 hwloc_free_unlinked_object(parent);
2326
2327 }
2328 }
2329 if (replaceparent && i>1) {
2330
2331 for(j=0; j<topology->level_nbobjects[i]; j++) {
2332 hwloc_obj_t child = topology->levels[i][j];
2333 unsigned rank = child->sibling_rank;
2334 child->prev_sibling = rank > 0 ? child->parent->children[rank-1] : NULL;
2335 child->next_sibling = rank < child->parent->arity-1 ? child->parent->children[rank+1] : NULL;
2336 }
2337 }
2338
2339
2340 if (replaceparent) {
2341
2342 free(topology->levels[i-1]);
2343 memmove(&topology->levels[i-1],
2344 &topology->levels[i],
2345 (topology->nb_levels-i)*sizeof(topology->levels[i]));
2346 memmove(&topology->level_nbobjects[i-1],
2347 &topology->level_nbobjects[i],
2348 (topology->nb_levels-i)*sizeof(topology->level_nbobjects[i]));
2349 hwloc_debug("removed parent level %s at depth %u\n",
2350 hwloc_obj_type_string(type1), i-1);
2351 } else {
2352
2353 free(topology->levels[i]);
2354 memmove(&topology->levels[i],
2355 &topology->levels[i+1],
2356 (topology->nb_levels-1-i)*sizeof(topology->levels[i]));
2357 memmove(&topology->level_nbobjects[i],
2358 &topology->level_nbobjects[i+1],
2359 (topology->nb_levels-1-i)*sizeof(topology->level_nbobjects[i]));
2360 hwloc_debug("removed child level %s at depth %u\n",
2361 hwloc_obj_type_string(type2), i);
2362 }
2363 topology->level_nbobjects[topology->nb_levels-1] = 0;
2364 topology->levels[topology->nb_levels-1] = NULL;
2365 topology->nb_levels--;
2366
2367 res++;
2368 }
2369
2370 if (res > 0) {
2371
2372 hwloc_reset_normal_type_depths(topology);
2373 for(i=0; i<topology->nb_levels; i++) {
2374 hwloc_obj_type_t type = topology->levels[i][0]->type;
2375 for(j=0; j<topology->level_nbobjects[i]; j++)
2376 topology->levels[i][j]->depth = (int)i;
2377 if (topology->type_depth[type] == HWLOC_TYPE_DEPTH_UNKNOWN)
2378 topology->type_depth[type] = (int)i;
2379 else
2380 topology->type_depth[type] = HWLOC_TYPE_DEPTH_MULTIPLE;
2381 }
2382 }
2383 }
2384
2385 static void
2386 hwloc_propagate_symmetric_subtree(hwloc_topology_t topology, hwloc_obj_t root)
2387 {
2388 hwloc_obj_t child;
2389 unsigned arity = root->arity;
2390 int ok;
2391
2392
2393 root->symmetric_subtree = 0;
2394
2395
2396 if (!arity)
2397 goto good;
2398
2399
2400
2401
2402
2403
2404 ok = 1;
2405 for_each_child(child, root) {
2406 hwloc_propagate_symmetric_subtree(topology, child);
2407 if (!child->symmetric_subtree)
2408 ok = 0;
2409 }
2410 if (!ok)
2411 return;
2412
2413
2414
2415 if (arity == 1)
2416 goto good;
2417
2418
2419
2420
2421 {
2422 HWLOC_VLA(hwloc_obj_t, array, arity);
2423 memcpy(array, root->children, arity * sizeof(*array));
2424 while (1) {
2425 unsigned i;
2426
2427 for(i=1; i<arity; i++)
2428 if (array[i]->depth != array[0]->depth
2429 || array[i]->arity != array[0]->arity) {
2430 return;
2431 }
2432 if (!array[0]->arity)
2433
2434 break;
2435
2436 for(i=0; i<arity; i++)
2437 array[i] = array[i]->first_child;
2438 }
2439 }
2440
2441
2442 good:
2443 root->symmetric_subtree = 1;
2444 }
2445
2446 static void hwloc_set_group_depth(hwloc_topology_t topology)
2447 {
2448 unsigned groupdepth = 0;
2449 unsigned i, j;
2450 for(i=0; i<topology->nb_levels; i++)
2451 if (topology->levels[i][0]->type == HWLOC_OBJ_GROUP) {
2452 for (j = 0; j < topology->level_nbobjects[i]; j++)
2453 topology->levels[i][j]->attr->group.depth = groupdepth;
2454 groupdepth++;
2455 }
2456 }
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467 static void
2468 hwloc_connect_children(hwloc_obj_t parent)
2469 {
2470 unsigned n, oldn = parent->arity;
2471 hwloc_obj_t child, prev_child;
2472 int ok;
2473
2474
2475
2476 ok = 1;
2477 prev_child = NULL;
2478 for (n = 0, child = parent->first_child;
2479 child;
2480 n++, prev_child = child, child = child->next_sibling) {
2481 child->sibling_rank = n;
2482 child->prev_sibling = prev_child;
2483
2484 if (n >= oldn || parent->children[n] != child)
2485 ok = 0;
2486
2487 hwloc_connect_children(child);
2488 }
2489 parent->last_child = prev_child;
2490 parent->arity = n;
2491 if (!n) {
2492
2493 free(parent->children);
2494 parent->children = NULL;
2495 goto memory;
2496 }
2497 if (ok)
2498
2499 goto memory;
2500
2501
2502 if (oldn < n) {
2503 free(parent->children);
2504 parent->children = malloc(n * sizeof(*parent->children));
2505 }
2506
2507 for (n = 0, child = parent->first_child;
2508 child;
2509 n++, child = child->next_sibling) {
2510 parent->children[n] = child;
2511 }
2512
2513
2514
2515 memory:
2516
2517
2518 prev_child = NULL;
2519 for (n = 0, child = parent->memory_first_child;
2520 child;
2521 n++, prev_child = child, child = child->next_sibling) {
2522 child->parent = parent;
2523 child->sibling_rank = n;
2524 child->prev_sibling = prev_child;
2525 hwloc_connect_children(child);
2526 }
2527 parent->memory_arity = n;
2528
2529
2530
2531 prev_child = NULL;
2532 for (n = 0, child = parent->io_first_child;
2533 child;
2534 n++, prev_child = child, child = child->next_sibling) {
2535 child->parent = parent;
2536 child->sibling_rank = n;
2537 child->prev_sibling = prev_child;
2538 hwloc_connect_children(child);
2539 }
2540 parent->io_arity = n;
2541
2542
2543
2544 prev_child = NULL;
2545 for (n = 0, child = parent->misc_first_child;
2546 child;
2547 n++, prev_child = child, child = child->next_sibling) {
2548 child->parent = parent;
2549 child->sibling_rank = n;
2550 child->prev_sibling = prev_child;
2551 hwloc_connect_children(child);
2552 }
2553 parent->misc_arity = n;
2554 }
2555
2556
2557
2558
2559 static int
2560 find_same_type(hwloc_obj_t root, hwloc_obj_t obj)
2561 {
2562 hwloc_obj_t child;
2563
2564 if (hwloc_type_cmp(root, obj) == HWLOC_OBJ_EQUAL)
2565 return 1;
2566
2567 for_each_child (child, root)
2568 if (find_same_type(child, obj))
2569 return 1;
2570
2571 return 0;
2572 }
2573
2574
2575
2576
2577
2578 static unsigned
2579 hwloc_level_take_objects(hwloc_obj_t top_obj,
2580 hwloc_obj_t *current_objs, unsigned n_current_objs,
2581 hwloc_obj_t *taken_objs, unsigned n_taken_objs __hwloc_attribute_unused,
2582 hwloc_obj_t *remaining_objs, unsigned n_remaining_objs __hwloc_attribute_unused)
2583 {
2584 unsigned taken_i = 0;
2585 unsigned new_i = 0;
2586 unsigned i, j;
2587
2588 for (i = 0; i < n_current_objs; i++)
2589 if (hwloc_type_cmp(top_obj, current_objs[i]) == HWLOC_OBJ_EQUAL) {
2590
2591 taken_objs[taken_i++] = current_objs[i];
2592 for (j = 0; j < current_objs[i]->arity; j++)
2593 remaining_objs[new_i++] = current_objs[i]->children[j];
2594 } else {
2595
2596 remaining_objs[new_i++] = current_objs[i];
2597 }
2598
2599 #ifdef HWLOC_DEBUG
2600
2601 assert(taken_i == n_taken_objs);
2602 assert(new_i == n_current_objs - n_taken_objs + n_remaining_objs);
2603 #endif
2604
2605 return new_i;
2606 }
2607
2608 static int
2609 hwloc_build_level_from_list(struct hwloc_special_level_s *slevel)
2610 {
2611 unsigned i, nb;
2612 struct hwloc_obj * obj;
2613
2614
2615 obj = slevel->first;
2616 i = 0;
2617 while (obj) {
2618 i++;
2619 obj = obj->next_cousin;
2620 }
2621 nb = i;
2622
2623 if (nb) {
2624
2625 slevel->objs = malloc(nb * sizeof(struct hwloc_obj *));
2626 obj = slevel->first;
2627 i = 0;
2628 while (obj) {
2629 obj->logical_index = i;
2630 slevel->objs[i] = obj;
2631 i++;
2632 obj = obj->next_cousin;
2633 }
2634 }
2635
2636 slevel->nbobjs = nb;
2637 return 0;
2638 }
2639
2640 static void
2641 hwloc_append_special_object(struct hwloc_special_level_s *level, hwloc_obj_t obj)
2642 {
2643 if (level->first) {
2644 obj->prev_cousin = level->last;
2645 obj->prev_cousin->next_cousin = obj;
2646 level->last = obj;
2647 } else {
2648 obj->prev_cousin = NULL;
2649 level->first = level->last = obj;
2650 }
2651 }
2652
2653
2654 static void
2655 hwloc_list_special_objects(hwloc_topology_t topology, hwloc_obj_t obj)
2656 {
2657 hwloc_obj_t child;
2658
2659 if (obj->type == HWLOC_OBJ_NUMANODE) {
2660 obj->next_cousin = NULL;
2661 obj->depth = HWLOC_TYPE_DEPTH_NUMANODE;
2662
2663 hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_NUMANODE], obj);
2664
2665
2666 for_each_memory_child(child, obj)
2667 hwloc_list_special_objects(topology, child);
2668 for_each_misc_child(child, obj)
2669 hwloc_list_special_objects(topology, child);
2670
2671 } else if (obj->type == HWLOC_OBJ_MISC) {
2672 obj->next_cousin = NULL;
2673 obj->depth = HWLOC_TYPE_DEPTH_MISC;
2674
2675 hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_MISC], obj);
2676
2677 for_each_misc_child(child, obj)
2678 hwloc_list_special_objects(topology, child);
2679
2680 } else if (hwloc__obj_type_is_io(obj->type)) {
2681 obj->next_cousin = NULL;
2682
2683 if (obj->type == HWLOC_OBJ_BRIDGE) {
2684 obj->depth = HWLOC_TYPE_DEPTH_BRIDGE;
2685
2686 hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_BRIDGE], obj);
2687
2688 } else if (obj->type == HWLOC_OBJ_PCI_DEVICE) {
2689 obj->depth = HWLOC_TYPE_DEPTH_PCI_DEVICE;
2690
2691 hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_PCIDEV], obj);
2692
2693 } else if (obj->type == HWLOC_OBJ_OS_DEVICE) {
2694 obj->depth = HWLOC_TYPE_DEPTH_OS_DEVICE;
2695
2696 hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_OSDEV], obj);
2697 }
2698
2699 for_each_io_child(child, obj)
2700 hwloc_list_special_objects(topology, child);
2701 for_each_misc_child(child, obj)
2702 hwloc_list_special_objects(topology, child);
2703
2704 } else {
2705
2706 for_each_child(child, obj)
2707 hwloc_list_special_objects(topology, child);
2708 for_each_memory_child(child, obj)
2709 hwloc_list_special_objects(topology, child);
2710 for_each_io_child(child, obj)
2711 hwloc_list_special_objects(topology, child);
2712 for_each_misc_child(child, obj)
2713 hwloc_list_special_objects(topology, child);
2714 }
2715 }
2716
2717
2718 static void
2719 hwloc_connect_io_misc_levels(hwloc_topology_t topology)
2720 {
2721 unsigned i;
2722
2723 for(i=0; i<HWLOC_NR_SLEVELS; i++)
2724 free(topology->slevels[i].objs);
2725 memset(&topology->slevels, 0, sizeof(topology->slevels));
2726
2727 hwloc_list_special_objects(topology, topology->levels[0][0]);
2728
2729 for(i=0; i<HWLOC_NR_SLEVELS; i++)
2730 hwloc_build_level_from_list(&topology->slevels[i]);
2731 }
2732
2733
2734
2735
2736
2737 static int
2738 hwloc_connect_levels(hwloc_topology_t topology)
2739 {
2740 unsigned l, i=0;
2741 hwloc_obj_t *objs, *taken_objs, *new_objs, top_obj, root;
2742 unsigned n_objs, n_taken_objs, n_new_objs;
2743
2744
2745 for(l=1; l<topology->nb_levels; l++)
2746 free(topology->levels[l]);
2747 memset(topology->levels+1, 0, (topology->nb_levels-1)*sizeof(*topology->levels));
2748 memset(topology->level_nbobjects+1, 0, (topology->nb_levels-1)*sizeof(*topology->level_nbobjects));
2749 topology->nb_levels = 1;
2750
2751
2752 hwloc_reset_normal_type_depths(topology);
2753
2754
2755 root = topology->levels[0][0];
2756 root->depth = 0;
2757 topology->type_depth[root->type] = 0;
2758
2759 root->logical_index = 0;
2760 root->prev_cousin = NULL;
2761 root->next_cousin = NULL;
2762
2763 root->parent = NULL;
2764 root->sibling_rank = 0;
2765 root->prev_sibling = NULL;
2766 root->next_sibling = NULL;
2767
2768
2769 n_objs = topology->levels[0][0]->arity;
2770 objs = malloc(n_objs * sizeof(objs[0]));
2771 if (!objs) {
2772 errno = ENOMEM;
2773 return -1;
2774 }
2775 memcpy(objs, topology->levels[0][0]->children, n_objs*sizeof(objs[0]));
2776
2777
2778 while (n_objs) {
2779
2780
2781
2782
2783
2784
2785
2786 for (i = 0; i < n_objs; i++)
2787 if (objs[i]->type != HWLOC_OBJ_PU)
2788 break;
2789 top_obj = i == n_objs ? objs[0] : objs[i];
2790
2791
2792 for (i = 0; i < n_objs; i++) {
2793 if (hwloc_type_cmp(top_obj, objs[i]) != HWLOC_OBJ_EQUAL) {
2794 if (find_same_type(objs[i], top_obj)) {
2795
2796
2797 top_obj = objs[i];
2798 }
2799 }
2800 }
2801
2802
2803
2804
2805
2806 n_taken_objs = 0;
2807 n_new_objs = 0;
2808 for (i = 0; i < n_objs; i++)
2809 if (hwloc_type_cmp(top_obj, objs[i]) == HWLOC_OBJ_EQUAL) {
2810 n_taken_objs++;
2811 n_new_objs += objs[i]->arity;
2812 }
2813
2814
2815 taken_objs = malloc((n_taken_objs + 1) * sizeof(taken_objs[0]));
2816
2817 if (n_objs - n_taken_objs + n_new_objs) {
2818 new_objs = malloc((n_objs - n_taken_objs + n_new_objs) * sizeof(new_objs[0]));
2819 } else {
2820 #ifdef HWLOC_DEBUG
2821 assert(!n_new_objs);
2822 assert(n_objs == n_taken_objs);
2823 #endif
2824 new_objs = NULL;
2825 }
2826
2827 n_new_objs = hwloc_level_take_objects(top_obj,
2828 objs, n_objs,
2829 taken_objs, n_taken_objs,
2830 new_objs, n_new_objs);
2831
2832
2833 for (i = 0; i < n_taken_objs; i++) {
2834 taken_objs[i]->depth = (int) topology->nb_levels;
2835 taken_objs[i]->logical_index = i;
2836 if (i) {
2837 taken_objs[i]->prev_cousin = taken_objs[i-1];
2838 taken_objs[i-1]->next_cousin = taken_objs[i];
2839 }
2840 }
2841 taken_objs[0]->prev_cousin = NULL;
2842 taken_objs[n_taken_objs-1]->next_cousin = NULL;
2843
2844
2845 hwloc_debug("--- %s level", hwloc_obj_type_string(top_obj->type));
2846 hwloc_debug(" has number %u\n\n", topology->nb_levels);
2847
2848 if (topology->type_depth[top_obj->type] == HWLOC_TYPE_DEPTH_UNKNOWN)
2849 topology->type_depth[top_obj->type] = (int) topology->nb_levels;
2850 else
2851 topology->type_depth[top_obj->type] = HWLOC_TYPE_DEPTH_MULTIPLE;
2852
2853 taken_objs[n_taken_objs] = NULL;
2854
2855 if (topology->nb_levels == topology->nb_levels_allocated) {
2856
2857 void *tmplevels, *tmpnbobjs;
2858 tmplevels = realloc(topology->levels,
2859 2 * topology->nb_levels_allocated * sizeof(*topology->levels));
2860 tmpnbobjs = realloc(topology->level_nbobjects,
2861 2 * topology->nb_levels_allocated * sizeof(*topology->level_nbobjects));
2862 if (!tmplevels || !tmpnbobjs) {
2863 fprintf(stderr, "hwloc failed to realloc level arrays to %u\n", topology->nb_levels_allocated * 2);
2864
2865
2866 if (tmplevels)
2867 topology->levels = tmplevels;
2868 if (tmpnbobjs)
2869 topology->level_nbobjects = tmpnbobjs;
2870
2871
2872 free(objs);
2873 free(taken_objs);
2874 free(new_objs);
2875 errno = ENOMEM;
2876 return -1;
2877 }
2878 topology->levels = tmplevels;
2879 topology->level_nbobjects = tmpnbobjs;
2880 memset(topology->levels + topology->nb_levels_allocated,
2881 0, topology->nb_levels_allocated * sizeof(*topology->levels));
2882 memset(topology->level_nbobjects + topology->nb_levels_allocated,
2883 0, topology->nb_levels_allocated * sizeof(*topology->level_nbobjects));
2884 topology->nb_levels_allocated *= 2;
2885 }
2886
2887 topology->level_nbobjects[topology->nb_levels] = n_taken_objs;
2888 topology->levels[topology->nb_levels] = taken_objs;
2889
2890 topology->nb_levels++;
2891
2892 free(objs);
2893
2894
2895 objs = new_objs;
2896 n_objs = n_new_objs;
2897 }
2898
2899
2900 free(objs);
2901
2902 return 0;
2903 }
2904
2905 int
2906 hwloc_topology_reconnect(struct hwloc_topology *topology, unsigned long flags)
2907 {
2908 if (flags) {
2909 errno = EINVAL;
2910 return -1;
2911 }
2912 if (!topology->modified)
2913 return 0;
2914
2915 hwloc_connect_children(topology->levels[0][0]);
2916
2917 if (hwloc_connect_levels(topology) < 0)
2918 return -1;
2919
2920 hwloc_connect_io_misc_levels(topology);
2921
2922 topology->modified = 0;
2923
2924 return 0;
2925 }
2926
2927 void hwloc_alloc_root_sets(hwloc_obj_t root)
2928 {
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938 if (!root->cpuset)
2939 root->cpuset = hwloc_bitmap_alloc();
2940 if (!root->complete_cpuset)
2941 root->complete_cpuset = hwloc_bitmap_alloc();
2942 if (!root->nodeset)
2943 root->nodeset = hwloc_bitmap_alloc();
2944 if (!root->complete_nodeset)
2945 root->complete_nodeset = hwloc_bitmap_alloc();
2946 }
2947
2948
2949 static int
2950 hwloc_discover(struct hwloc_topology *topology)
2951 {
2952 struct hwloc_backend *backend;
2953
2954 topology->modified = 0;
2955
2956 topology->allowed_cpuset = hwloc_bitmap_alloc_full();
2957 topology->allowed_nodeset = hwloc_bitmap_alloc_full();
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997 backend = topology->backends;
2998 while (NULL != backend) {
2999 if (backend->component->type != HWLOC_DISC_COMPONENT_TYPE_CPU
3000 && backend->component->type != HWLOC_DISC_COMPONENT_TYPE_GLOBAL)
3001
3002 goto next_cpubackend;
3003 if (!backend->discover)
3004 goto next_cpubackend;
3005 backend->discover(backend);
3006 hwloc_debug_print_objects(0, topology->levels[0][0]);
3007
3008 next_cpubackend:
3009 backend = backend->next;
3010 }
3011
3012
3013
3014
3015 if (!topology->levels[0][0]->cpuset || hwloc_bitmap_iszero(topology->levels[0][0]->cpuset)) {
3016 hwloc_debug("%s", "No PU added by any CPU and global backend\n");
3017 errno = EINVAL;
3018 return -1;
3019 }
3020
3021 if (topology->binding_hooks.get_allowed_resources && topology->is_thissystem) {
3022 const char *env = getenv("HWLOC_THISSYSTEM_ALLOWED_RESOURCES");
3023 if ((env && atoi(env))
3024 || (topology->flags & HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES))
3025 topology->binding_hooks.get_allowed_resources(topology);
3026 }
3027
3028
3029
3030
3031
3032
3033 if (hwloc_bitmap_iszero(topology->levels[0][0]->complete_nodeset)) {
3034 hwloc_obj_t node;
3035 hwloc_debug("%s", "\nAdd missing single NUMA node\n");
3036 node = hwloc_alloc_setup_object(topology, HWLOC_OBJ_NUMANODE, 0);
3037 node->cpuset = hwloc_bitmap_dup(topology->levels[0][0]->cpuset);
3038 node->nodeset = hwloc_bitmap_alloc();
3039
3040 hwloc_bitmap_set(node->nodeset, 0);
3041 memcpy(&node->attr->numanode, &topology->machine_memory, sizeof(topology->machine_memory));
3042 memset(&topology->machine_memory, 0, sizeof(topology->machine_memory));
3043 hwloc_insert_object_by_cpuset(topology, node);
3044 } else {
3045
3046
3047
3048 free(topology->machine_memory.page_types);
3049 memset(&topology->machine_memory, 0, sizeof(topology->machine_memory));
3050 }
3051
3052 hwloc_debug("%s", "\nFixup root sets\n");
3053 hwloc_bitmap_and(topology->levels[0][0]->cpuset, topology->levels[0][0]->cpuset, topology->levels[0][0]->complete_cpuset);
3054 hwloc_bitmap_and(topology->levels[0][0]->nodeset, topology->levels[0][0]->nodeset, topology->levels[0][0]->complete_nodeset);
3055
3056 hwloc_bitmap_and(topology->allowed_cpuset, topology->allowed_cpuset, topology->levels[0][0]->cpuset);
3057 hwloc_bitmap_and(topology->allowed_nodeset, topology->allowed_nodeset, topology->levels[0][0]->nodeset);
3058
3059 hwloc_debug("%s", "\nPropagate sets\n");
3060
3061
3062
3063 propagate_nodeset(topology->levels[0][0]);
3064
3065 fixup_sets(topology->levels[0][0]);
3066
3067 hwloc_debug_print_objects(0, topology->levels[0][0]);
3068
3069 if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) {
3070 hwloc_debug("%s", "\nRemoving unauthorized sets from all sets\n");
3071 remove_unused_sets(topology, topology->levels[0][0]);
3072 hwloc_debug_print_objects(0, topology->levels[0][0]);
3073 }
3074
3075
3076 if (!hwloc_filter_check_keep_object(topology, topology->levels[0][0])
3077 && topology->levels[0][0]->first_child && !topology->levels[0][0]->first_child->next_sibling) {
3078 hwloc_obj_t oldroot = topology->levels[0][0];
3079 hwloc_obj_t newroot = oldroot->first_child;
3080
3081 newroot->parent = NULL;
3082 topology->levels[0][0] = newroot;
3083
3084 if (oldroot->memory_first_child)
3085 prepend_siblings_list(&newroot->memory_first_child, oldroot->memory_first_child, newroot);
3086 if (oldroot->io_first_child)
3087 prepend_siblings_list(&newroot->io_first_child, oldroot->io_first_child, newroot);
3088 if (oldroot->misc_first_child)
3089 prepend_siblings_list(&newroot->misc_first_child, oldroot->misc_first_child, newroot);
3090
3091 hwloc_free_unlinked_object(oldroot);
3092 }
3093
3094
3095
3096
3097
3098
3099 hwloc_debug("%s", "\nOk, finished tweaking, now connect\n");
3100 if (hwloc_topology_reconnect(topology, 0) < 0)
3101 return -1;
3102 hwloc_debug_print_objects(0, topology->levels[0][0]);
3103
3104
3105
3106
3107
3108 backend = topology->backends;
3109 while (NULL != backend) {
3110 if (backend->component->type == HWLOC_DISC_COMPONENT_TYPE_CPU
3111 || backend->component->type == HWLOC_DISC_COMPONENT_TYPE_GLOBAL)
3112
3113 goto next_noncpubackend;
3114 if (!backend->discover)
3115 goto next_noncpubackend;
3116 backend->discover(backend);
3117 hwloc_debug_print_objects(0, topology->levels[0][0]);
3118
3119 next_noncpubackend:
3120 backend = backend->next;
3121 }
3122
3123 hwloc_pci_belowroot_apply_locality(topology);
3124
3125 hwloc_debug("%s", "\nNow reconnecting\n");
3126 hwloc_debug_print_objects(0, topology->levels[0][0]);
3127
3128
3129
3130 hwloc_debug("%s", "\nRemoving bridge objects if needed\n");
3131 hwloc_filter_bridges(topology, topology->levels[0][0]);
3132 hwloc_debug_print_objects(0, topology->levels[0][0]);
3133
3134 hwloc_debug("%s", "\nRemoving empty objects except numa nodes and PCI devices\n");
3135 remove_empty(topology, &topology->levels[0][0]);
3136 if (!topology->levels[0][0]) {
3137 fprintf(stderr, "Topology became empty, aborting!\n");
3138 abort();
3139 }
3140 hwloc_debug_print_objects(0, topology->levels[0][0]);
3141
3142
3143
3144
3145
3146 if (hwloc_topology_reconnect(topology, 0) < 0)
3147 return -1;
3148
3149 hwloc_debug("%s", "\nRemoving levels with HWLOC_TYPE_FILTER_KEEP_STRUCTURE\n");
3150 hwloc_filter_levels_keep_structure(topology);
3151 hwloc_debug_print_objects(0, topology->levels[0][0]);
3152
3153
3154 hwloc_debug("%s", "\nPropagate total memory up\n");
3155 propagate_total_memory(topology->levels[0][0]);
3156
3157
3158 hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]);
3159
3160
3161 hwloc_set_group_depth(topology);
3162
3163
3164 if (topology->backends
3165 && strcmp(topology->backends->component->name, "xml")) {
3166 char *value;
3167
3168 hwloc_obj_add_info(topology->levels[0][0], "hwlocVersion", HWLOC_VERSION);
3169
3170 value = hwloc_progname(topology);
3171 if (value) {
3172 hwloc_obj_add_info(topology->levels[0][0], "ProcessName", value);
3173 free(value);
3174 }
3175 }
3176
3177 return 0;
3178 }
3179
3180
3181
3182
3183 void
3184 hwloc_topology_setup_defaults(struct hwloc_topology *topology)
3185 {
3186 struct hwloc_obj *root_obj;
3187
3188
3189 memset(&topology->binding_hooks, 0, sizeof(topology->binding_hooks));
3190 memset(topology->support.discovery, 0, sizeof(*topology->support.discovery));
3191 memset(topology->support.cpubind, 0, sizeof(*topology->support.cpubind));
3192 memset(topology->support.membind, 0, sizeof(*topology->support.membind));
3193
3194
3195 topology->next_gp_index = 1;
3196 topology->nb_levels = 1;
3197 topology->levels[0] = hwloc_tma_malloc (topology->tma, sizeof (hwloc_obj_t));
3198 topology->level_nbobjects[0] = 1;
3199
3200
3201 topology->machine_memory.local_memory = 0;
3202 topology->machine_memory.page_types_len = 0;
3203 topology->machine_memory.page_types = NULL;
3204
3205
3206 topology->allowed_cpuset = NULL;
3207 topology->allowed_nodeset = NULL;
3208
3209
3210 memset(&topology->slevels, 0, sizeof(topology->slevels));
3211
3212 HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_NUMANODE == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_NUMANODE));
3213 HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_MISC == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_MISC));
3214 HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_BRIDGE == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_BRIDGE));
3215 HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_PCIDEV == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_PCI_DEVICE));
3216 HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_OSDEV == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_OS_DEVICE));
3217
3218
3219 hwloc_reset_normal_type_depths(topology);
3220 topology->type_depth[HWLOC_OBJ_NUMANODE] = HWLOC_TYPE_DEPTH_NUMANODE;
3221 topology->type_depth[HWLOC_OBJ_MISC] = HWLOC_TYPE_DEPTH_MISC;
3222 topology->type_depth[HWLOC_OBJ_BRIDGE] = HWLOC_TYPE_DEPTH_BRIDGE;
3223 topology->type_depth[HWLOC_OBJ_PCI_DEVICE] = HWLOC_TYPE_DEPTH_PCI_DEVICE;
3224 topology->type_depth[HWLOC_OBJ_OS_DEVICE] = HWLOC_TYPE_DEPTH_OS_DEVICE;
3225
3226
3227
3228
3229
3230 root_obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_MACHINE, 0);
3231 topology->levels[0][0] = root_obj;
3232 }
3233
3234 static void hwloc__topology_filter_init(struct hwloc_topology *topology);
3235
3236
3237 static int
3238 hwloc__topology_init (struct hwloc_topology **topologyp,
3239 unsigned nblevels,
3240 struct hwloc_tma *tma)
3241 {
3242 struct hwloc_topology *topology;
3243
3244 topology = hwloc_tma_malloc (tma, sizeof (struct hwloc_topology));
3245 if(!topology)
3246 return -1;
3247
3248 topology->tma = tma;
3249
3250 hwloc_components_init();
3251 hwloc_backends_init(topology);
3252 hwloc_pci_discovery_init(topology);
3253
3254
3255 topology->is_loaded = 0;
3256 topology->flags = 0;
3257 topology->is_thissystem = 1;
3258 topology->pid = 0;
3259 topology->userdata = NULL;
3260 topology->topology_abi = HWLOC_TOPOLOGY_ABI;
3261 topology->adopted_shmem_addr = NULL;
3262 topology->adopted_shmem_length = 0;
3263
3264 topology->support.discovery = hwloc_tma_malloc(tma, sizeof(*topology->support.discovery));
3265 topology->support.cpubind = hwloc_tma_malloc(tma, sizeof(*topology->support.cpubind));
3266 topology->support.membind = hwloc_tma_malloc(tma, sizeof(*topology->support.membind));
3267
3268 topology->nb_levels_allocated = nblevels;
3269 topology->levels = hwloc_tma_calloc(tma, topology->nb_levels_allocated * sizeof(*topology->levels));
3270 topology->level_nbobjects = hwloc_tma_calloc(tma, topology->nb_levels_allocated * sizeof(*topology->level_nbobjects));
3271
3272 hwloc__topology_filter_init(topology);
3273
3274 hwloc_internal_distances_init(topology);
3275
3276 topology->userdata_export_cb = NULL;
3277 topology->userdata_import_cb = NULL;
3278 topology->userdata_not_decoded = 0;
3279
3280
3281 hwloc_topology_setup_defaults(topology);
3282
3283 *topologyp = topology;
3284 return 0;
3285 }
3286
3287 int
3288 hwloc_topology_init (struct hwloc_topology **topologyp)
3289 {
3290 return hwloc__topology_init(topologyp,
3291 16,
3292 NULL);
3293 }
3294
3295 int
3296 hwloc_topology_set_pid(struct hwloc_topology *topology __hwloc_attribute_unused,
3297 hwloc_pid_t pid __hwloc_attribute_unused)
3298 {
3299 if (topology->is_loaded) {
3300 errno = EBUSY;
3301 return -1;
3302 }
3303
3304
3305 #ifdef HWLOC_LINUX_SYS
3306 topology->pid = pid;
3307 return 0;
3308 #else
3309 errno = ENOSYS;
3310 return -1;
3311 #endif
3312 }
3313
3314 int
3315 hwloc_topology_set_synthetic(struct hwloc_topology *topology, const char *description)
3316 {
3317 if (topology->is_loaded) {
3318 errno = EBUSY;
3319 return -1;
3320 }
3321
3322 return hwloc_disc_component_force_enable(topology,
3323 0 ,
3324 -1, "synthetic",
3325 description, NULL, NULL);
3326 }
3327
3328 int
3329 hwloc_topology_set_xml(struct hwloc_topology *topology,
3330 const char *xmlpath)
3331 {
3332 if (topology->is_loaded) {
3333 errno = EBUSY;
3334 return -1;
3335 }
3336
3337 return hwloc_disc_component_force_enable(topology,
3338 0 ,
3339 -1, "xml",
3340 xmlpath, NULL, NULL);
3341 }
3342
3343 int
3344 hwloc_topology_set_xmlbuffer(struct hwloc_topology *topology,
3345 const char *xmlbuffer,
3346 int size)
3347 {
3348 if (topology->is_loaded) {
3349 errno = EBUSY;
3350 return -1;
3351 }
3352
3353 return hwloc_disc_component_force_enable(topology,
3354 0 ,
3355 -1, "xml", NULL,
3356 xmlbuffer, (void*) (uintptr_t) size);
3357 }
3358
3359 int
3360 hwloc_topology_set_flags (struct hwloc_topology *topology, unsigned long flags)
3361 {
3362 if (topology->is_loaded) {
3363
3364 errno = EBUSY;
3365 return -1;
3366 }
3367
3368 if (flags & ~(HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM|HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES)) {
3369 errno = EINVAL;
3370 return -1;
3371 }
3372
3373 topology->flags = flags;
3374 return 0;
3375 }
3376
3377 unsigned long
3378 hwloc_topology_get_flags (struct hwloc_topology *topology)
3379 {
3380 return topology->flags;
3381 }
3382
3383 static void
3384 hwloc__topology_filter_init(struct hwloc_topology *topology)
3385 {
3386 hwloc_obj_type_t type;
3387
3388 for(type = HWLOC_OBJ_TYPE_MIN; type < HWLOC_OBJ_TYPE_MAX; type++)
3389 topology->type_filter[type] = HWLOC_TYPE_FILTER_KEEP_ALL;
3390 topology->type_filter[HWLOC_OBJ_L1ICACHE] = HWLOC_TYPE_FILTER_KEEP_NONE;
3391 topology->type_filter[HWLOC_OBJ_L2ICACHE] = HWLOC_TYPE_FILTER_KEEP_NONE;
3392 topology->type_filter[HWLOC_OBJ_L3ICACHE] = HWLOC_TYPE_FILTER_KEEP_NONE;
3393 topology->type_filter[HWLOC_OBJ_GROUP] = HWLOC_TYPE_FILTER_KEEP_STRUCTURE;
3394 topology->type_filter[HWLOC_OBJ_MISC] = HWLOC_TYPE_FILTER_KEEP_NONE;
3395 topology->type_filter[HWLOC_OBJ_BRIDGE] = HWLOC_TYPE_FILTER_KEEP_NONE;
3396 topology->type_filter[HWLOC_OBJ_PCI_DEVICE] = HWLOC_TYPE_FILTER_KEEP_NONE;
3397 topology->type_filter[HWLOC_OBJ_OS_DEVICE] = HWLOC_TYPE_FILTER_KEEP_NONE;
3398 }
3399
3400 static int
3401 hwloc__topology_set_type_filter(struct hwloc_topology *topology, hwloc_obj_type_t type, enum hwloc_type_filter_e filter)
3402 {
3403 if (type == HWLOC_OBJ_PU || type == HWLOC_OBJ_NUMANODE || type == HWLOC_OBJ_MACHINE) {
3404 if (filter != HWLOC_TYPE_FILTER_KEEP_ALL) {
3405
3406 errno = EINVAL;
3407 return -1;
3408 }
3409 } else if (hwloc__obj_type_is_special(type)) {
3410 if (filter == HWLOC_TYPE_FILTER_KEEP_STRUCTURE) {
3411
3412 errno = EINVAL;
3413 return -1;
3414 }
3415 } else if (type == HWLOC_OBJ_GROUP) {
3416 if (filter == HWLOC_TYPE_FILTER_KEEP_ALL) {
3417
3418 errno = EINVAL;
3419 return -1;
3420 }
3421 }
3422
3423
3424 if (!hwloc__obj_type_is_special(type) && filter == HWLOC_TYPE_FILTER_KEEP_IMPORTANT)
3425 filter = HWLOC_TYPE_FILTER_KEEP_ALL;
3426
3427 topology->type_filter[type] = filter;
3428 return 0;
3429 }
3430
3431 int
3432 hwloc_topology_set_type_filter(struct hwloc_topology *topology, hwloc_obj_type_t type, enum hwloc_type_filter_e filter)
3433 {
3434 HWLOC_BUILD_ASSERT(HWLOC_OBJ_TYPE_MIN == 0);
3435 if ((unsigned) type >= HWLOC_OBJ_TYPE_MAX) {
3436 errno = EINVAL;
3437 return -1;
3438 }
3439 if (topology->is_loaded) {
3440 errno = EBUSY;
3441 return -1;
3442 }
3443 return hwloc__topology_set_type_filter(topology, type, filter);
3444 }
3445
3446 int
3447 hwloc_topology_set_all_types_filter(struct hwloc_topology *topology, enum hwloc_type_filter_e filter)
3448 {
3449 hwloc_obj_type_t type;
3450 if (topology->is_loaded) {
3451 errno = EBUSY;
3452 return -1;
3453 }
3454 for(type = HWLOC_OBJ_TYPE_MIN; type < HWLOC_OBJ_TYPE_MAX; type++)
3455 hwloc__topology_set_type_filter(topology, type, filter);
3456 return 0;
3457 }
3458
3459 int
3460 hwloc_topology_set_cache_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter)
3461 {
3462 unsigned i;
3463 for(i=HWLOC_OBJ_L1CACHE; i<HWLOC_OBJ_L3ICACHE; i++)
3464 hwloc_topology_set_type_filter(topology, (hwloc_obj_type_t) i, filter);
3465 return 0;
3466 }
3467
3468 int
3469 hwloc_topology_set_icache_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter)
3470 {
3471 unsigned i;
3472 for(i=HWLOC_OBJ_L1ICACHE; i<HWLOC_OBJ_L3ICACHE; i++)
3473 hwloc_topology_set_type_filter(topology, (hwloc_obj_type_t) i, filter);
3474 return 0;
3475 }
3476
3477 int
3478 hwloc_topology_set_io_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter)
3479 {
3480 hwloc_topology_set_type_filter(topology, HWLOC_OBJ_BRIDGE, filter);
3481 hwloc_topology_set_type_filter(topology, HWLOC_OBJ_PCI_DEVICE, filter);
3482 hwloc_topology_set_type_filter(topology, HWLOC_OBJ_OS_DEVICE, filter);
3483 return 0;
3484 }
3485
3486 int
3487 hwloc_topology_get_type_filter(struct hwloc_topology *topology, hwloc_obj_type_t type, enum hwloc_type_filter_e *filterp)
3488 {
3489 HWLOC_BUILD_ASSERT(HWLOC_OBJ_TYPE_MIN == 0);
3490 if ((unsigned) type >= HWLOC_OBJ_TYPE_MAX) {
3491 errno = EINVAL;
3492 return -1;
3493 }
3494 *filterp = topology->type_filter[type];
3495 return 0;
3496 }
3497
3498 void
3499 hwloc_topology_clear (struct hwloc_topology *topology)
3500 {
3501
3502 unsigned l;
3503 hwloc_internal_distances_destroy(topology);
3504 hwloc_free_object_and_children(topology->levels[0][0]);
3505 hwloc_bitmap_free(topology->allowed_cpuset);
3506 hwloc_bitmap_free(topology->allowed_nodeset);
3507 for (l=0; l<topology->nb_levels; l++)
3508 free(topology->levels[l]);
3509 for(l=0; l<HWLOC_NR_SLEVELS; l++)
3510 free(topology->slevels[l].objs);
3511 free(topology->machine_memory.page_types);
3512 }
3513
3514 void
3515 hwloc_topology_destroy (struct hwloc_topology *topology)
3516 {
3517 if (topology->adopted_shmem_addr) {
3518 hwloc__topology_disadopt(topology);
3519 return;
3520 }
3521
3522 hwloc_backends_disable_all(topology);
3523 hwloc_components_fini();
3524
3525 hwloc_topology_clear(topology);
3526
3527 free(topology->levels);
3528 free(topology->level_nbobjects);
3529
3530 free(topology->support.discovery);
3531 free(topology->support.cpubind);
3532 free(topology->support.membind);
3533 free(topology);
3534 }
3535
3536 int
3537 hwloc_topology_load (struct hwloc_topology *topology)
3538 {
3539 int err;
3540
3541 if (topology->is_loaded) {
3542 errno = EBUSY;
3543 return -1;
3544 }
3545
3546 hwloc_internal_distances_prepare(topology);
3547
3548 if (getenv("HWLOC_XML_USERDATA_NOT_DECODED"))
3549 topology->userdata_not_decoded = 1;
3550
3551
3552 if (!getenv("HWLOC_COMPONENTS")) {
3553
3554
3555
3556
3557
3558
3559
3560 if (!topology->backends) {
3561 const char *fsroot_path_env = getenv("HWLOC_FSROOT");
3562 if (fsroot_path_env)
3563 hwloc_disc_component_force_enable(topology,
3564 1 ,
3565 HWLOC_DISC_COMPONENT_TYPE_CPU, "linux",
3566 NULL , NULL, NULL);
3567 }
3568 if (!topology->backends) {
3569 const char *cpuid_path_env = getenv("HWLOC_CPUID_PATH");
3570 if (cpuid_path_env)
3571 hwloc_disc_component_force_enable(topology,
3572 1 ,
3573 HWLOC_DISC_COMPONENT_TYPE_CPU, "x86",
3574 NULL , NULL, NULL);
3575 }
3576 if (!topology->backends) {
3577 const char *synthetic_env = getenv("HWLOC_SYNTHETIC");
3578 if (synthetic_env)
3579 hwloc_disc_component_force_enable(topology,
3580 1 ,
3581 -1, "synthetic",
3582 synthetic_env, NULL, NULL);
3583 }
3584 if (!topology->backends) {
3585 const char *xmlpath_env = getenv("HWLOC_XMLFILE");
3586 if (xmlpath_env)
3587 hwloc_disc_component_force_enable(topology,
3588 1 ,
3589 -1, "xml",
3590 xmlpath_env, NULL, NULL);
3591 }
3592 }
3593
3594
3595 hwloc_disc_components_enable_others(topology);
3596
3597 hwloc_backends_is_thissystem(topology);
3598 hwloc_backends_find_callbacks(topology);
3599
3600
3601
3602
3603 hwloc_set_binding_hooks(topology);
3604
3605 hwloc_pci_discovery_prepare(topology);
3606
3607
3608 err = hwloc_discover(topology);
3609 if (err < 0)
3610 goto out;
3611
3612 hwloc_pci_discovery_exit(topology);
3613
3614 #ifndef HWLOC_DEBUG
3615 if (getenv("HWLOC_DEBUG_CHECK"))
3616 #endif
3617 hwloc_topology_check(topology);
3618
3619
3620
3621
3622
3623 hwloc_internal_distances_invalidate_cached_objs(topology);
3624
3625
3626
3627 hwloc_internal_distances_refresh(topology);
3628
3629 topology->is_loaded = 1;
3630 return 0;
3631
3632 out:
3633 hwloc_pci_discovery_exit(topology);
3634 hwloc_topology_clear(topology);
3635 hwloc_topology_setup_defaults(topology);
3636 hwloc_backends_disable_all(topology);
3637 return -1;
3638 }
3639
3640
3641
3642
3643
3644 static void
3645 restrict_object_by_cpuset(hwloc_topology_t topology, unsigned long flags, hwloc_obj_t *pobj,
3646 hwloc_bitmap_t droppedcpuset, hwloc_bitmap_t droppednodeset)
3647 {
3648 hwloc_obj_t obj = *pobj, child, *pchild;
3649 int modified = 0;
3650
3651 if (hwloc_bitmap_intersects(obj->complete_cpuset, droppedcpuset)) {
3652 hwloc_bitmap_andnot(obj->cpuset, obj->cpuset, droppedcpuset);
3653 hwloc_bitmap_andnot(obj->complete_cpuset, obj->complete_cpuset, droppedcpuset);
3654 modified = 1;
3655 } else {
3656 if ((flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS)
3657 && hwloc_bitmap_iszero(obj->complete_cpuset)) {
3658
3659 modified = 1;
3660 }
3661
3662 if (droppednodeset)
3663 assert(!hwloc_bitmap_intersects(obj->complete_nodeset, droppednodeset)
3664 || hwloc_bitmap_iszero(obj->complete_cpuset));
3665 }
3666 if (droppednodeset) {
3667 hwloc_bitmap_andnot(obj->nodeset, obj->nodeset, droppednodeset);
3668 hwloc_bitmap_andnot(obj->complete_nodeset, obj->complete_nodeset, droppednodeset);
3669 }
3670
3671 if (modified) {
3672 for_each_child_safe(child, obj, pchild)
3673 restrict_object_by_cpuset(topology, flags, pchild, droppedcpuset, droppednodeset);
3674 for_each_memory_child_safe(child, obj, pchild)
3675 restrict_object_by_cpuset(topology, flags, pchild, droppedcpuset, droppednodeset);
3676
3677 }
3678
3679 if (!obj->first_child && !obj->memory_first_child
3680 && hwloc_bitmap_iszero(obj->cpuset)
3681 && (obj->type != HWLOC_OBJ_NUMANODE || (flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS))) {
3682
3683 hwloc_debug("%s", "\nRemoving object during restrict");
3684 hwloc_debug_print_object(0, obj);
3685
3686 if (!(flags & HWLOC_RESTRICT_FLAG_ADAPT_IO)) {
3687 hwloc_free_object_siblings_and_children(obj->io_first_child);
3688 obj->io_first_child = NULL;
3689 }
3690 if (!(flags & HWLOC_RESTRICT_FLAG_ADAPT_MISC)) {
3691 hwloc_free_object_siblings_and_children(obj->misc_first_child);
3692 obj->misc_first_child = NULL;
3693 }
3694 assert(!obj->first_child);
3695 assert(!obj->memory_first_child);
3696 unlink_and_free_single_object(pobj);
3697 topology->modified = 1;
3698 }
3699 }
3700
3701 int
3702 hwloc_topology_restrict(struct hwloc_topology *topology, hwloc_const_cpuset_t cpuset, unsigned long flags)
3703 {
3704 hwloc_bitmap_t droppedcpuset, droppednodeset;
3705
3706 if (!topology->is_loaded) {
3707 errno = EINVAL;
3708 return -1;
3709 }
3710
3711 if (flags & ~(HWLOC_RESTRICT_FLAG_REMOVE_CPULESS
3712 |HWLOC_RESTRICT_FLAG_ADAPT_MISC|HWLOC_RESTRICT_FLAG_ADAPT_IO)) {
3713 errno = EINVAL;
3714 return -1;
3715 }
3716
3717
3718 if (!hwloc_bitmap_intersects(cpuset, topology->allowed_cpuset)) {
3719 errno = EINVAL;
3720 return -1;
3721 }
3722
3723 droppedcpuset = hwloc_bitmap_alloc();
3724 droppednodeset = hwloc_bitmap_alloc();
3725 if (!droppedcpuset || !droppednodeset) {
3726 hwloc_bitmap_free(droppedcpuset);
3727 hwloc_bitmap_free(droppednodeset);
3728 return -1;
3729 }
3730
3731
3732 hwloc_bitmap_not(droppedcpuset, cpuset);
3733
3734 if (flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS) {
3735 hwloc_obj_t node = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, 0);
3736 do {
3737
3738 if (hwloc_bitmap_iszero(node->cpuset)
3739 || hwloc_bitmap_isincluded(node->cpuset, droppedcpuset))
3740 hwloc_bitmap_set(droppednodeset, node->os_index);
3741 node = node->next_cousin;
3742 } while (node);
3743
3744
3745 if (hwloc_bitmap_isincluded(topology->allowed_nodeset, droppednodeset)) {
3746 errno = EINVAL;
3747 hwloc_bitmap_free(droppedcpuset);
3748 hwloc_bitmap_free(droppednodeset);
3749 return -1;
3750 }
3751 }
3752
3753 if (!(flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS)
3754 || hwloc_bitmap_iszero(droppednodeset)) {
3755 hwloc_bitmap_free(droppednodeset);
3756 droppednodeset = NULL;
3757 }
3758
3759
3760 restrict_object_by_cpuset(topology, flags, &topology->levels[0][0], droppedcpuset, droppednodeset);
3761 hwloc_bitmap_andnot(topology->allowed_cpuset, topology->allowed_cpuset, droppedcpuset);
3762 if (droppednodeset)
3763 hwloc_bitmap_andnot(topology->allowed_nodeset, topology->allowed_nodeset, droppednodeset);
3764
3765 hwloc_bitmap_free(droppedcpuset);
3766 hwloc_bitmap_free(droppednodeset);
3767
3768 if (hwloc_topology_reconnect(topology, 0) < 0)
3769 goto out;
3770
3771
3772 hwloc_internal_distances_invalidate_cached_objs(topology);
3773
3774 hwloc_filter_levels_keep_structure(topology);
3775 hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]);
3776 propagate_total_memory(topology->levels[0][0]);
3777
3778 #ifndef HWLOC_DEBUG
3779 if (getenv("HWLOC_DEBUG_CHECK"))
3780 #endif
3781 hwloc_topology_check(topology);
3782
3783 return 0;
3784
3785 out:
3786
3787 hwloc_topology_clear(topology);
3788 hwloc_topology_setup_defaults(topology);
3789 return -1;
3790 }
3791
3792 int
3793 hwloc_topology_is_thissystem(struct hwloc_topology *topology)
3794 {
3795 return topology->is_thissystem;
3796 }
3797
3798 int
3799 hwloc_topology_get_depth(struct hwloc_topology *topology)
3800 {
3801 return (int) topology->nb_levels;
3802 }
3803
3804 const struct hwloc_topology_support *
3805 hwloc_topology_get_support(struct hwloc_topology * topology)
3806 {
3807 return &topology->support;
3808 }
3809
3810 void hwloc_topology_set_userdata(struct hwloc_topology * topology, const void *userdata)
3811 {
3812 topology->userdata = (void *) userdata;
3813 }
3814
3815 void * hwloc_topology_get_userdata(struct hwloc_topology * topology)
3816 {
3817 return topology->userdata;
3818 }
3819
3820 hwloc_const_cpuset_t
3821 hwloc_topology_get_complete_cpuset(hwloc_topology_t topology)
3822 {
3823 return hwloc_get_root_obj(topology)->complete_cpuset;
3824 }
3825
3826 hwloc_const_cpuset_t
3827 hwloc_topology_get_topology_cpuset(hwloc_topology_t topology)
3828 {
3829 return hwloc_get_root_obj(topology)->cpuset;
3830 }
3831
3832 hwloc_const_cpuset_t
3833 hwloc_topology_get_allowed_cpuset(hwloc_topology_t topology)
3834 {
3835 return topology->allowed_cpuset;
3836 }
3837
3838 hwloc_const_nodeset_t
3839 hwloc_topology_get_complete_nodeset(hwloc_topology_t topology)
3840 {
3841 return hwloc_get_root_obj(topology)->complete_nodeset;
3842 }
3843
3844 hwloc_const_nodeset_t
3845 hwloc_topology_get_topology_nodeset(hwloc_topology_t topology)
3846 {
3847 return hwloc_get_root_obj(topology)->nodeset;
3848 }
3849
3850 hwloc_const_nodeset_t
3851 hwloc_topology_get_allowed_nodeset(hwloc_topology_t topology)
3852 {
3853 return topology->allowed_nodeset;
3854 }
3855
3856
3857
3858
3859
3860
3861 #ifndef NDEBUG
3862
3863 static void
3864 hwloc__check_child_siblings(hwloc_obj_t parent, hwloc_obj_t *array,
3865 unsigned arity, unsigned i,
3866 hwloc_obj_t child, hwloc_obj_t prev)
3867 {
3868 assert(child->parent == parent);
3869
3870 assert(child->sibling_rank == i);
3871 if (array)
3872 assert(child == array[i]);
3873
3874 if (prev)
3875 assert(prev->next_sibling == child);
3876 assert(child->prev_sibling == prev);
3877
3878 if (!i)
3879 assert(child->prev_sibling == NULL);
3880 else
3881 assert(child->prev_sibling != NULL);
3882
3883 if (i == arity-1)
3884 assert(child->next_sibling == NULL);
3885 else
3886 assert(child->next_sibling != NULL);
3887 }
3888
3889 static void
3890 hwloc__check_object(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t obj);
3891
3892
3893 static void
3894 hwloc__check_normal_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t parent)
3895 {
3896 hwloc_obj_t child, prev;
3897 unsigned j;
3898
3899 if (!parent->arity) {
3900
3901 assert(!parent->children);
3902 assert(!parent->first_child);
3903 assert(!parent->last_child);
3904 return;
3905 }
3906
3907 assert(parent->children);
3908 assert(parent->first_child);
3909 assert(parent->last_child);
3910
3911
3912 for(prev = NULL, child = parent->first_child, j = 0;
3913 child;
3914 prev = child, child = child->next_sibling, j++) {
3915
3916 assert(hwloc__obj_type_is_normal(child->type));
3917
3918 assert(child->depth > parent->depth);
3919
3920 hwloc__check_child_siblings(parent, parent->children, parent->arity, j, child, prev);
3921
3922 hwloc__check_object(topology, gp_indexes, child);
3923 }
3924
3925 assert(j == parent->arity);
3926
3927 assert(parent->first_child == parent->children[0]);
3928 assert(parent->last_child == parent->children[parent->arity-1]);
3929
3930
3931 if (parent->type == HWLOC_OBJ_PU)
3932 assert(!parent->arity);
3933 }
3934
3935 static void
3936 hwloc__check_children_cpusets(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj)
3937 {
3938
3939 hwloc_obj_t child;
3940 int prev_first, prev_empty;
3941
3942 if (obj->type == HWLOC_OBJ_PU) {
3943
3944 assert(hwloc_bitmap_weight(obj->cpuset) == 1);
3945 assert(hwloc_bitmap_first(obj->cpuset) == (int) obj->os_index);
3946 assert(hwloc_bitmap_weight(obj->complete_cpuset) == 1);
3947 assert(hwloc_bitmap_first(obj->complete_cpuset) == (int) obj->os_index);
3948 if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) {
3949 assert(hwloc_bitmap_isset(topology->allowed_cpuset, (int) obj->os_index));
3950 }
3951 assert(!obj->arity);
3952 } else if (hwloc__obj_type_is_memory(obj->type)) {
3953
3954 assert(hwloc_bitmap_isequal(obj->parent->cpuset, obj->cpuset));
3955 assert(!obj->arity);
3956 } else if (!hwloc__obj_type_is_special(obj->type)) {
3957 hwloc_bitmap_t set;
3958
3959 set = hwloc_bitmap_alloc();
3960 for_each_child(child, obj) {
3961 assert(!hwloc_bitmap_intersects(set, child->cpuset));
3962 hwloc_bitmap_or(set, set, child->cpuset);
3963 }
3964 assert(hwloc_bitmap_isequal(set, obj->cpuset));
3965 hwloc_bitmap_free(set);
3966 }
3967
3968
3969 for_each_memory_child(child, obj)
3970 assert(hwloc_bitmap_isequal(obj->cpuset, child->cpuset));
3971
3972
3973
3974
3975 prev_first = -1;
3976 prev_empty = 0;
3977 for_each_child(child, obj) {
3978 int first = hwloc_bitmap_first(child->complete_cpuset);
3979 if (first >= 0) {
3980 assert(!prev_empty);
3981 assert(prev_first < first);
3982 } else {
3983 prev_empty = 1;
3984 }
3985 prev_first = first;
3986 }
3987 }
3988
3989 static void
3990 hwloc__check_memory_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t parent)
3991 {
3992 unsigned j;
3993 hwloc_obj_t child, prev;
3994
3995 if (!parent->memory_arity) {
3996
3997 assert(!parent->memory_first_child);
3998 return;
3999 }
4000
4001 assert(parent->memory_first_child);
4002
4003 for(prev = NULL, child = parent->memory_first_child, j = 0;
4004 child;
4005 prev = child, child = child->next_sibling, j++) {
4006 assert(hwloc__obj_type_is_memory(child->type));
4007
4008 hwloc__check_child_siblings(parent, NULL, parent->memory_arity, j, child, prev);
4009
4010 assert(!child->first_child);
4011 assert(!child->io_first_child);
4012 hwloc__check_object(topology, gp_indexes, child);
4013 }
4014
4015 assert(j == parent->memory_arity);
4016
4017
4018 if (parent->type == HWLOC_OBJ_NUMANODE)
4019 assert(!parent->memory_arity);
4020 }
4021
4022 static void
4023 hwloc__check_io_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t parent)
4024 {
4025 unsigned j;
4026 hwloc_obj_t child, prev;
4027
4028 if (!parent->io_arity) {
4029
4030 assert(!parent->io_first_child);
4031 return;
4032 }
4033
4034 assert(parent->io_first_child);
4035
4036 for(prev = NULL, child = parent->io_first_child, j = 0;
4037 child;
4038 prev = child, child = child->next_sibling, j++) {
4039
4040 assert(hwloc__obj_type_is_io(child->type));
4041
4042 hwloc__check_child_siblings(parent, NULL, parent->io_arity, j, child, prev);
4043
4044 assert(!child->first_child);
4045 assert(!child->memory_first_child);
4046 hwloc__check_object(topology, gp_indexes, child);
4047 }
4048
4049 assert(j == parent->io_arity);
4050 }
4051
4052 static void
4053 hwloc__check_misc_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t parent)
4054 {
4055 unsigned j;
4056 hwloc_obj_t child, prev;
4057
4058 if (!parent->misc_arity) {
4059
4060 assert(!parent->misc_first_child);
4061 return;
4062 }
4063
4064 assert(parent->misc_first_child);
4065
4066 for(prev = NULL, child = parent->misc_first_child, j = 0;
4067 child;
4068 prev = child, child = child->next_sibling, j++) {
4069
4070 assert(child->type == HWLOC_OBJ_MISC);
4071
4072 hwloc__check_child_siblings(parent, NULL, parent->misc_arity, j, child, prev);
4073
4074 assert(!child->first_child);
4075 assert(!child->memory_first_child);
4076 assert(!child->io_first_child);
4077 hwloc__check_object(topology, gp_indexes, child);
4078 }
4079
4080 assert(j == parent->misc_arity);
4081 }
4082
4083 static void
4084 hwloc__check_object(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t obj)
4085 {
4086 assert(!hwloc_bitmap_isset(gp_indexes, obj->gp_index));
4087 hwloc_bitmap_set(gp_indexes, obj->gp_index);
4088
4089 HWLOC_BUILD_ASSERT(HWLOC_OBJ_TYPE_MIN == 0);
4090 assert((unsigned) obj->type < HWLOC_OBJ_TYPE_MAX);
4091
4092 assert(hwloc_filter_check_keep_object(topology, obj));
4093
4094
4095 if (hwloc__obj_type_is_special(obj->type)) {
4096 assert(!obj->cpuset);
4097 if (obj->type == HWLOC_OBJ_BRIDGE)
4098 assert(obj->depth == HWLOC_TYPE_DEPTH_BRIDGE);
4099 else if (obj->type == HWLOC_OBJ_PCI_DEVICE)
4100 assert(obj->depth == HWLOC_TYPE_DEPTH_PCI_DEVICE);
4101 else if (obj->type == HWLOC_OBJ_OS_DEVICE)
4102 assert(obj->depth == HWLOC_TYPE_DEPTH_OS_DEVICE);
4103 else if (obj->type == HWLOC_OBJ_MISC)
4104 assert(obj->depth == HWLOC_TYPE_DEPTH_MISC);
4105 } else {
4106 assert(obj->cpuset);
4107 if (obj->type == HWLOC_OBJ_NUMANODE)
4108 assert(obj->depth == HWLOC_TYPE_DEPTH_NUMANODE);
4109 else
4110 assert(obj->depth >= 0);
4111 }
4112
4113
4114 if (obj->type == HWLOC_OBJ_GROUP) {
4115 assert(obj->attr->group.depth != (unsigned) -1);
4116 }
4117
4118
4119 assert(!!obj->cpuset == !!obj->complete_cpuset);
4120 assert(!!obj->cpuset == !!obj->nodeset);
4121 assert(!!obj->nodeset == !!obj->complete_nodeset);
4122
4123
4124 if (obj->cpuset) {
4125 assert(hwloc_bitmap_isincluded(obj->cpuset, obj->complete_cpuset));
4126 assert(hwloc_bitmap_isincluded(obj->nodeset, obj->complete_nodeset));
4127 }
4128
4129
4130 if (hwloc__obj_type_is_cache(obj->type)) {
4131 if (hwloc__obj_type_is_icache(obj->type))
4132 assert(obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION);
4133 else if (hwloc__obj_type_is_dcache(obj->type))
4134 assert(obj->attr->cache.type == HWLOC_OBJ_CACHE_DATA
4135 || obj->attr->cache.type == HWLOC_OBJ_CACHE_UNIFIED);
4136 else
4137 assert(0);
4138 assert(hwloc_cache_type_by_depth_type(obj->attr->cache.depth, obj->attr->cache.type) == obj->type);
4139 }
4140
4141
4142 hwloc__check_normal_children(topology, gp_indexes, obj);
4143 hwloc__check_memory_children(topology, gp_indexes, obj);
4144 hwloc__check_io_children(topology, gp_indexes, obj);
4145 hwloc__check_misc_children(topology, gp_indexes, obj);
4146 hwloc__check_children_cpusets(topology, obj);
4147
4148 }
4149
4150 static void
4151 hwloc__check_nodesets(hwloc_topology_t topology, hwloc_obj_t obj, hwloc_bitmap_t parentset)
4152 {
4153 hwloc_obj_t child;
4154 int prev_first;
4155
4156 if (obj->type == HWLOC_OBJ_NUMANODE) {
4157
4158 assert(hwloc_bitmap_weight(obj->nodeset) == 1);
4159 assert(hwloc_bitmap_first(obj->nodeset) == (int) obj->os_index);
4160 assert(hwloc_bitmap_weight(obj->complete_nodeset) == 1);
4161 assert(hwloc_bitmap_first(obj->complete_nodeset) == (int) obj->os_index);
4162 if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) {
4163 assert(hwloc_bitmap_isset(topology->allowed_nodeset, (int) obj->os_index));
4164 }
4165 assert(!obj->arity);
4166 assert(!obj->memory_arity);
4167 assert(hwloc_bitmap_isincluded(obj->nodeset, parentset));
4168 } else {
4169 hwloc_bitmap_t myset;
4170 hwloc_bitmap_t childset;
4171
4172
4173 myset = hwloc_bitmap_alloc();
4174 for_each_memory_child(child, obj) {
4175 assert(!hwloc_bitmap_intersects(myset, child->nodeset));
4176 hwloc_bitmap_or(myset, myset, child->nodeset);
4177 }
4178
4179 assert(!hwloc_bitmap_intersects(myset, parentset));
4180 hwloc_bitmap_or(parentset, parentset, myset);
4181 hwloc_bitmap_free(myset);
4182
4183
4184
4185 childset = hwloc_bitmap_alloc();
4186 for_each_child(child, obj) {
4187 hwloc_bitmap_t set = hwloc_bitmap_dup(parentset);
4188 hwloc__check_nodesets(topology, child, set);
4189
4190 hwloc_bitmap_andnot(set, set, parentset);
4191
4192 assert(!hwloc_bitmap_intersects(childset, set));
4193 hwloc_bitmap_or(childset, childset, set);
4194 hwloc_bitmap_free(set);
4195 }
4196
4197 assert(!hwloc_bitmap_intersects(parentset, childset));
4198 hwloc_bitmap_or(parentset, parentset, childset);
4199 hwloc_bitmap_free(childset);
4200
4201 assert(hwloc_bitmap_isequal(obj->nodeset, parentset));
4202 }
4203
4204
4205
4206
4207 prev_first = -1;
4208 for_each_memory_child(child, obj) {
4209 int first = hwloc_bitmap_first(child->complete_nodeset);
4210 assert(prev_first < first);
4211 prev_first = first;
4212 }
4213 }
4214
4215 static void
4216 hwloc__check_level(struct hwloc_topology *topology, int depth,
4217 hwloc_obj_t first, hwloc_obj_t last)
4218 {
4219 unsigned width = hwloc_get_nbobjs_by_depth(topology, depth);
4220 struct hwloc_obj *prev = NULL;
4221 hwloc_obj_t obj;
4222 unsigned j;
4223
4224
4225 for(j=0; j<width; j++) {
4226 obj = hwloc_get_obj_by_depth(topology, depth, j);
4227
4228 assert(obj);
4229 assert(obj->depth == depth);
4230 assert(obj->logical_index == j);
4231
4232 if (prev) {
4233 assert(hwloc_type_cmp(obj, prev) == HWLOC_OBJ_EQUAL);
4234 assert(prev->next_cousin == obj);
4235 }
4236 assert(obj->prev_cousin == prev);
4237
4238
4239 if (obj->type == HWLOC_OBJ_NUMANODE) {
4240 assert(hwloc_bitmap_weight(obj->complete_nodeset) == 1);
4241 assert(hwloc_bitmap_first(obj->complete_nodeset) == (int) obj->os_index);
4242 }
4243 prev = obj;
4244 }
4245 if (prev)
4246 assert(prev->next_cousin == NULL);
4247
4248 if (width) {
4249
4250 obj = hwloc_get_obj_by_depth(topology, depth, 0);
4251 assert(obj);
4252 assert(!obj->prev_cousin);
4253
4254 assert(hwloc_get_depth_type(topology, depth) == obj->type);
4255 assert(depth == hwloc_get_type_depth(topology, obj->type)
4256 || HWLOC_TYPE_DEPTH_MULTIPLE == hwloc_get_type_depth(topology, obj->type));
4257
4258 obj = hwloc_get_obj_by_depth(topology, depth, width-1);
4259 assert(obj);
4260 assert(!obj->next_cousin);
4261 }
4262
4263 if (depth < 0) {
4264 assert(first == hwloc_get_obj_by_depth(topology, depth, 0));
4265 assert(last == hwloc_get_obj_by_depth(topology, depth, width-1));
4266 } else {
4267 assert(!first);
4268 assert(!last);
4269 }
4270
4271
4272 obj = hwloc_get_obj_by_depth(topology, depth, width);
4273 assert(!obj);
4274 }
4275
4276
4277 void
4278 hwloc_topology_check(struct hwloc_topology *topology)
4279 {
4280 struct hwloc_obj *obj;
4281 hwloc_bitmap_t gp_indexes, set;
4282 hwloc_obj_type_t type;
4283 unsigned i;
4284 int j, depth;
4285
4286
4287
4288
4289 HWLOC_BUILD_ASSERT(HWLOC_OBJ_L2CACHE == HWLOC_OBJ_L1CACHE + 1);
4290 HWLOC_BUILD_ASSERT(HWLOC_OBJ_L3CACHE == HWLOC_OBJ_L2CACHE + 1);
4291 HWLOC_BUILD_ASSERT(HWLOC_OBJ_L4CACHE == HWLOC_OBJ_L3CACHE + 1);
4292 HWLOC_BUILD_ASSERT(HWLOC_OBJ_L5CACHE == HWLOC_OBJ_L4CACHE + 1);
4293 HWLOC_BUILD_ASSERT(HWLOC_OBJ_L1ICACHE == HWLOC_OBJ_L5CACHE + 1);
4294 HWLOC_BUILD_ASSERT(HWLOC_OBJ_L2ICACHE == HWLOC_OBJ_L1ICACHE + 1);
4295 HWLOC_BUILD_ASSERT(HWLOC_OBJ_L3ICACHE == HWLOC_OBJ_L2ICACHE + 1);
4296
4297
4298
4299
4300
4301 HWLOC_BUILD_ASSERT(HWLOC_OBJ_NUMANODE + 1 == HWLOC_OBJ_BRIDGE);
4302 HWLOC_BUILD_ASSERT(HWLOC_OBJ_BRIDGE + 1 == HWLOC_OBJ_PCI_DEVICE);
4303 HWLOC_BUILD_ASSERT(HWLOC_OBJ_PCI_DEVICE + 1 == HWLOC_OBJ_OS_DEVICE);
4304 HWLOC_BUILD_ASSERT(HWLOC_OBJ_OS_DEVICE + 1 == HWLOC_OBJ_MISC);
4305 HWLOC_BUILD_ASSERT(HWLOC_OBJ_MISC + 1 == HWLOC_OBJ_TYPE_MAX);
4306
4307
4308 HWLOC_BUILD_ASSERT(sizeof(obj_type_order)/sizeof(*obj_type_order) == HWLOC_OBJ_TYPE_MAX);
4309 HWLOC_BUILD_ASSERT(sizeof(obj_order_type)/sizeof(*obj_order_type) == HWLOC_OBJ_TYPE_MAX);
4310 HWLOC_BUILD_ASSERT(sizeof(obj_type_priority)/sizeof(*obj_type_priority) == HWLOC_OBJ_TYPE_MAX);
4311
4312
4313 assert(topology->type_filter[HWLOC_OBJ_GROUP] != HWLOC_TYPE_FILTER_KEEP_ALL);
4314
4315
4316 for(type=HWLOC_OBJ_TYPE_MIN; type<HWLOC_OBJ_TYPE_MAX; type++)
4317 assert(obj_order_type[obj_type_order[type]] == type);
4318 for(i=HWLOC_OBJ_TYPE_MIN; i<HWLOC_OBJ_TYPE_MAX; i++)
4319 assert(obj_type_order[obj_order_type[i]] == i);
4320
4321 depth = hwloc_topology_get_depth(topology);
4322
4323 assert(!topology->modified);
4324
4325
4326
4327
4328
4329 assert(hwloc_get_depth_type(topology, 0) == HWLOC_OBJ_MACHINE);
4330
4331
4332 assert(hwloc_get_depth_type(topology, depth-1) == HWLOC_OBJ_PU);
4333 assert(hwloc_get_nbobjs_by_depth(topology, depth-1) > 0);
4334 for(i=0; i<hwloc_get_nbobjs_by_depth(topology, depth-1); i++) {
4335 obj = hwloc_get_obj_by_depth(topology, depth-1, i);
4336 assert(obj);
4337 assert(obj->type == HWLOC_OBJ_PU);
4338 assert(!obj->memory_first_child);
4339 }
4340
4341 for(j=1; j<depth-1; j++) {
4342 assert(hwloc_get_depth_type(topology, j) != HWLOC_OBJ_PU);
4343 assert(hwloc_get_depth_type(topology, j) != HWLOC_OBJ_MACHINE);
4344 }
4345
4346
4347 assert(hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE) == HWLOC_TYPE_DEPTH_NUMANODE);
4348 assert(hwloc_get_depth_type(topology, HWLOC_TYPE_DEPTH_NUMANODE) == HWLOC_OBJ_NUMANODE);
4349
4350 for(j=0; j<depth; j++)
4351 assert(hwloc_get_depth_type(topology, j) != HWLOC_OBJ_NUMANODE);
4352
4353
4354 assert(hwloc_get_nbobjs_by_depth(topology, 0) == 1);
4355 obj = hwloc_get_root_obj(topology);
4356 assert(obj);
4357 assert(!obj->parent);
4358 assert(obj->cpuset);
4359 assert(!obj->depth);
4360
4361
4362 if (topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM) {
4363 assert(hwloc_bitmap_isincluded(topology->allowed_cpuset, obj->cpuset));
4364 assert(hwloc_bitmap_isincluded(topology->allowed_nodeset, obj->nodeset));
4365 } else {
4366 assert(hwloc_bitmap_isequal(topology->allowed_cpuset, obj->cpuset));
4367 assert(hwloc_bitmap_isequal(topology->allowed_nodeset, obj->nodeset));
4368 }
4369
4370
4371 for(j=0; j<depth; j++)
4372 hwloc__check_level(topology, j, NULL, NULL);
4373 for(j=0; j<HWLOC_NR_SLEVELS; j++)
4374 hwloc__check_level(topology, HWLOC_SLEVEL_TO_DEPTH(j), topology->slevels[j].first, topology->slevels[j].last);
4375
4376
4377 gp_indexes = hwloc_bitmap_alloc();
4378 hwloc__check_object(topology, gp_indexes, obj);
4379 hwloc_bitmap_free(gp_indexes);
4380
4381
4382 set = hwloc_bitmap_alloc();
4383 hwloc__check_nodesets(topology, obj, set);
4384 hwloc_bitmap_free(set);
4385 }
4386
4387 #else
4388
4389 void
4390 hwloc_topology_check(struct hwloc_topology *topology __hwloc_attribute_unused)
4391 {
4392 }
4393
4394 #endif