This source file includes following definitions.
- win32mmap
- win32direct_mmap
- win32munmap
- pthread_acquire_lock
- pthread_release_lock
- pthread_try_lock
- win32_acquire_lock
- win32_release_lock
- win32_try_lock
- pthread_acquire_lock
- pthread_release_lock
- pthread_try_lock
- pthread_init_lock
- pthread_islocked
- segment_holding
- has_segment_link
- init_mparams
- change_mparam
- do_check_any_chunk
- do_check_top_chunk
- do_check_mmapped_chunk
- do_check_inuse_chunk
- do_check_free_chunk
- do_check_malloced_chunk
- do_check_tree
- do_check_treebin
- do_check_smallbin
- bin_find
- traverse_and_check
- do_check_malloc_state
- internal_mallinfo
- internal_malloc_stats
- mmap_alloc
- mmap_resize
- init_top
- init_bins
- reset_on_error
- prepend_alloc
- add_segment
- sys_alloc
- release_unused_segments
- sys_trim
- tmalloc_large
- tmalloc_small
- internal_realloc
- internal_memalign
- ialloc
- dlmalloc
- dlfree
- dlcalloc
- dlrealloc
- dlmemalign
- dlindependent_calloc
- dlindependent_comalloc
- dlvalloc
- dlpvalloc
- dlmalloc_trim
- dlmalloc_footprint
- dlmalloc_max_footprint
- dlmallinfo
- dlmalloc_stats
- dlmalloc_usable_size
- dlmallopt
- init_user_mstate
- create_mspace
- create_mspace_with_base
- destroy_mspace
- mspace_malloc
- mspace_free
- mspace_calloc
- mspace_realloc
- mspace_memalign
- mspace_independent_calloc
- mspace_independent_comalloc
- mspace_trim
- mspace_malloc_stats
- mspace_footprint
- mspace_max_footprint
- mspace_mallinfo
- mspace_usable_size
- mspace_mallopt
1 #include "malloc_defs.h"
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485 #if defined(DARWIN) || defined(_DARWIN)
486
487 #ifndef HAVE_MORECORE
488 #define HAVE_MORECORE 0
489 #define DL_HAVE_MMAP 1
490 #endif
491 #endif
492
493 #ifndef LACKS_SYS_TYPES_H
494 #include <sys/types.h>
495 #endif
496
497
498 #define MAX_SIZE_T (~(size_t)0)
499
500 #ifndef ONLY_MSPACES
501 #define ONLY_MSPACES 0
502 #endif
503 #ifndef MSPACES
504 #if ONLY_MSPACES
505 #define MSPACES 1
506 #else
507 #define MSPACES 0
508 #endif
509 #endif
510 #ifndef MALLOC_ALIGNMENT
511 #define MALLOC_ALIGNMENT ((size_t)8U)
512 #endif
513 #ifndef FOOTERS
514 #define FOOTERS 0
515 #endif
516 #ifndef ABORT
517 #define ABORT abort()
518 #endif
519 #ifndef ABORT_ON_ASSERT_FAILURE
520 #define ABORT_ON_ASSERT_FAILURE 1
521 #endif
522 #ifndef PROCEED_ON_ERROR
523 #define PROCEED_ON_ERROR 0
524 #endif
525 #ifndef USE_LOCKS
526 #define USE_LOCKS 0
527 #endif
528 #ifndef USE_SPIN_LOCKS
529 #if USE_LOCKS && (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(_MSC_VER) && _MSC_VER>=1310)
530 #define USE_SPIN_LOCKS 1
531 #else
532 #define USE_SPIN_LOCKS 0
533 #endif
534 #endif
535 #ifndef INSECURE
536 #define INSECURE 0
537 #endif
538 #ifndef DL_HAVE_MMAP
539 #define DL_HAVE_MMAP 1
540 #endif
541 #ifndef MMAP_CLEARS
542 #define MMAP_CLEARS 1
543 #endif
544 #ifndef DL_HAVE_MREMAP
545 #ifdef linux
546 #define DL_HAVE_MREMAP 1
547 #else
548 #define DL_HAVE_MREMAP 0
549 #endif
550 #endif
551 #ifndef MALLOC_FAILURE_ACTION
552 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
553 #endif
554 #ifndef HAVE_MORECORE
555 #if ONLY_MSPACES
556 #define HAVE_MORECORE 0
557 #else
558 #define HAVE_MORECORE 1
559 #endif
560 #endif
561 #if !HAVE_MORECORE
562 #define MORECORE_CONTIGUOUS 0
563 #else
564 #ifndef MORECORE
565 #define MORECORE sbrk
566 #endif
567 #ifndef MORECORE_CONTIGUOUS
568 #define MORECORE_CONTIGUOUS 1
569 #endif
570 #endif
571 #ifndef DEFAULT_GRANULARITY
572 #if MORECORE_CONTIGUOUS
573 #define DEFAULT_GRANULARITY (0)
574 #else
575 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
576 #endif
577 #endif
578 #ifndef DEFAULT_TRIM_THRESHOLD
579 #ifndef MORECORE_CANNOT_TRIM
580 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
581 #else
582 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
583 #endif
584 #endif
585 #ifndef DEFAULT_MMAP_THRESHOLD
586 #if DL_HAVE_MMAP
587 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
588 #else
589 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
590 #endif
591 #endif
592 #ifndef MAX_RELEASE_CHECK_RATE
593 #if DL_HAVE_MMAP
594 #define MAX_RELEASE_CHECK_RATE 255
595 #else
596 #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
597 #endif
598 #endif
599 #ifndef USE_BUILTIN_FFS
600 #define USE_BUILTIN_FFS 0
601 #endif
602 #ifndef USE_DEV_RANDOM
603 #define USE_DEV_RANDOM 0
604 #endif
605 #ifndef NO_MALLINFO
606 #define NO_MALLINFO 0
607 #endif
608 #ifndef MALLINFO_FIELD_TYPE
609 #define MALLINFO_FIELD_TYPE size_t
610 #endif
611 #ifndef NO_SEGMENT_TRAVERSAL
612 #define NO_SEGMENT_TRAVERSAL 0
613 #endif
614
615
616
617
618
619
620
621
622 #define M_TRIM_THRESHOLD (-1)
623 #define M_GRANULARITY (-2)
624 #define M_MMAP_THRESHOLD (-3)
625
626
627
628 #if !NO_MALLINFO
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653 #ifdef HAVE_USR_INCLUDE_MALLOC_H
654 #include "/usr/include/malloc.h"
655 #else
656
657 struct mallinfo {
658 MALLINFO_FIELD_TYPE arena;
659 MALLINFO_FIELD_TYPE ordblks;
660 MALLINFO_FIELD_TYPE smblks;
661 MALLINFO_FIELD_TYPE hblks;
662 MALLINFO_FIELD_TYPE hblkhd;
663 MALLINFO_FIELD_TYPE usmblks;
664 MALLINFO_FIELD_TYPE fsmblks;
665 MALLINFO_FIELD_TYPE uordblks;
666 MALLINFO_FIELD_TYPE fordblks;
667 MALLINFO_FIELD_TYPE keepcost;
668 };
669
670 #endif
671 #endif
672
673
674
675
676
677
678 #ifndef FORCEINLINE
679 #if defined(__GNUC__)
680 #define FORCEINLINE __inline __attribute__ ((always_inline))
681 #elif defined(_MSC_VER)
682 #define FORCEINLINE __forceinline
683 #endif
684 #endif
685 #ifndef NOINLINE
686 #if defined(__GNUC__)
687 #define NOINLINE __attribute__ ((noinline))
688 #elif defined(_MSC_VER)
689 #define NOINLINE __declspec(noinline)
690 #else
691 #define NOINLINE
692 #endif
693 #endif
694
695 #ifdef __cplusplus
696 extern "C" {
697 #ifndef FORCEINLINE
698 #define FORCEINLINE inline
699 #endif
700 #endif
701 #ifndef FORCEINLINE
702 #define FORCEINLINE
703 #endif
704
705 #if !ONLY_MSPACES
706
707
708
709 #ifndef USE_DL_PREFIX
710 #define dlcalloc calloc
711 #define dlfree free
712 #define dlmalloc malloc
713 #define dlmemalign memalign
714 #define dlrealloc realloc
715 #define dlvalloc valloc
716 #define dlpvalloc pvalloc
717 #define dlmallinfo mallinfo
718 #define dlmallopt mallopt
719 #define dlmalloc_trim malloc_trim
720 #define dlmalloc_stats malloc_stats
721 #define dlmalloc_usable_size malloc_usable_size
722 #define dlmalloc_footprint malloc_footprint
723 #define dlmalloc_max_footprint malloc_max_footprint
724 #define dlindependent_calloc independent_calloc
725 #define dlindependent_comalloc independent_comalloc
726 #endif
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743 void* dlmalloc(size_t);
744
745
746
747
748
749
750
751
752 void dlfree(void*);
753
754
755
756
757
758
759 void* dlcalloc(size_t, size_t);
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784 void* dlrealloc(void*, size_t);
785
786
787
788
789
790
791
792
793
794
795
796
797
798 void* dlmemalign(size_t, size_t);
799
800
801
802
803
804
805 void* dlvalloc(size_t);
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825 int dlmallopt(int, int);
826
827
828
829
830
831
832
833
834
835
836 size_t dlmalloc_footprint(void);
837
838
839
840
841
842
843
844
845
846
847
848
849 size_t dlmalloc_max_footprint(void);
850
851 #if !NO_MALLINFO
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874 struct mallinfo dlmallinfo(void);
875 #endif
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929 void** dlindependent_calloc(size_t, size_t, void**);
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990 void** dlindependent_comalloc(size_t, size_t*, void**);
991
992
993
994
995
996
997
998 void* dlpvalloc(size_t);
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 int dlmalloc_trim(size_t);
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 size_t dlmalloc_usable_size(void*);
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 void dlmalloc_stats(void);
1059
1060 #endif
1061
1062 #if MSPACES
1063
1064
1065
1066
1067
1068 typedef void* mspace;
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 mspace create_mspace(size_t capacity, int locked);
1082
1083
1084
1085
1086
1087
1088
1089 size_t destroy_mspace(mspace msp);
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
1101
1102
1103
1104
1105
1106 void* mspace_malloc(mspace msp, size_t bytes);
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 void mspace_free(mspace msp, void* mem);
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
1128
1129
1130
1131
1132
1133 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1134
1135
1136
1137
1138
1139 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1140
1141
1142
1143
1144
1145 void** mspace_independent_calloc(mspace msp, size_t n_elements,
1146 size_t elem_size, void* chunks[]);
1147
1148
1149
1150
1151
1152 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
1153 size_t sizes[], void* chunks[]);
1154
1155
1156
1157
1158
1159 size_t mspace_footprint(mspace msp);
1160
1161
1162
1163
1164
1165 size_t mspace_max_footprint(mspace msp);
1166
1167
1168 #if !NO_MALLINFO
1169
1170
1171
1172
1173 struct mallinfo mspace_mallinfo(mspace msp);
1174 #endif
1175
1176
1177
1178
1179
1180 void mspace_malloc_stats(mspace msp);
1181
1182
1183
1184
1185
1186 int mspace_trim(mspace msp, size_t pad);
1187
1188
1189
1190
1191 int mspace_mallopt(int, int);
1192
1193 #endif
1194
1195 #ifdef __cplusplus
1196 };
1197 #endif
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211 #ifdef WIN32
1212 #pragma warning( disable : 4146 )
1213 #endif
1214
1215 #include <stdio.h>
1216
1217 #ifndef LACKS_ERRNO_H
1218 #include <errno.h>
1219 #endif
1220 #if FOOTERS
1221 #include <time.h>
1222 #endif
1223 #ifndef LACKS_STDLIB_H
1224 #include <stdlib.h>
1225 #endif
1226 #ifdef DL_DEBUG
1227 #if ABORT_ON_ASSERT_FAILURE
1228 #define dl_assert(x) if(!(x)) ABORT
1229 #else
1230 #include <assert.h>
1231 #endif
1232 #else
1233 #define dl_assert(x) assert(x)
1234 #include <assert.h>
1235 #endif
1236 #ifndef LACKS_STRING_H
1237 #include <string.h>
1238 #endif
1239 #if USE_BUILTIN_FFS
1240 #ifndef LACKS_STRINGS_H
1241 #include <strings.h>
1242 #endif
1243 #endif
1244 #if DL_HAVE_MMAP
1245 #ifndef LACKS_SYS_MMAN_H
1246 #include <sys/mman.h>
1247 #endif
1248 #ifndef LACKS_FCNTL_H
1249 #include <fcntl.h>
1250 #endif
1251 #endif
1252 #if HAVE_MORECORE
1253 #ifndef LACKS_UNISTD_H
1254 #include <unistd.h>
1255 #else
1256 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1257 extern void* sbrk(ptrdiff_t);
1258 #endif
1259 #endif
1260 #endif
1261
1262
1263 #if USE_LOCKS
1264 #ifndef WIN32
1265 #include <pthread.h>
1266 #if defined (__SVR4) && defined (__sun)
1267 #include <thread.h>
1268 #endif
1269 #else
1270 #ifndef _M_AMD64
1271
1272 #ifdef __cplusplus
1273 extern "C" {
1274 #endif
1275 LONG __cdecl _InterlockedCompareExchange(LPLONG volatile Dest, LONG Exchange, LONG Comp);
1276 LONG __cdecl _InterlockedExchange(LPLONG volatile Target, LONG Value);
1277 #ifdef __cplusplus
1278 }
1279 #endif
1280 #endif
1281 #pragma intrinsic (_InterlockedCompareExchange)
1282 #pragma intrinsic (_InterlockedExchange)
1283 #define interlockedcompareexchange _InterlockedCompareExchange
1284 #define interlockedexchange _InterlockedExchange
1285 #endif
1286 #endif
1287
1288
1289 #if defined(_MSC_VER) && _MSC_VER>=1300
1290 #ifndef BitScanForward
1291 #ifdef __cplusplus
1292 extern "C" {
1293 #endif
1294 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
1295 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
1296 #ifdef __cplusplus
1297 }
1298 #endif
1299
1300 #define BitScanForward _BitScanForward
1301 #define BitScanReverse _BitScanReverse
1302 #pragma intrinsic(_BitScanForward)
1303 #pragma intrinsic(_BitScanReverse)
1304 #endif
1305 #endif
1306
1307 #ifndef WIN32
1308 #ifndef malloc_getpagesize
1309 # ifdef _SC_PAGESIZE
1310 # ifndef _SC_PAGE_SIZE
1311 # define _SC_PAGE_SIZE _SC_PAGESIZE
1312 # endif
1313 # endif
1314 # ifdef _SC_PAGE_SIZE
1315 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1316 # else
1317 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1318 extern size_t getpagesize();
1319 # define malloc_getpagesize getpagesize()
1320 # else
1321 # ifdef WIN32
1322 # define malloc_getpagesize getpagesize()
1323 # else
1324 # ifndef LACKS_SYS_PARAM_H
1325 # include <sys/param.h>
1326 # endif
1327 # ifdef EXEC_PAGESIZE
1328 # define malloc_getpagesize EXEC_PAGESIZE
1329 # else
1330 # ifdef NBPG
1331 # ifndef CLSIZE
1332 # define malloc_getpagesize NBPG
1333 # else
1334 # define malloc_getpagesize (NBPG * CLSIZE)
1335 # endif
1336 # else
1337 # ifdef NBPC
1338 # define malloc_getpagesize NBPC
1339 # else
1340 # ifdef PAGESIZE
1341 # define malloc_getpagesize PAGESIZE
1342 # else
1343 # define malloc_getpagesize ((size_t)4096U)
1344 # endif
1345 # endif
1346 # endif
1347 # endif
1348 # endif
1349 # endif
1350 # endif
1351 #endif
1352 #endif
1353
1354
1355
1356
1357
1358
1359 #define SIZE_T_SIZE (sizeof(size_t))
1360 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1361
1362
1363
1364 #define SIZE_T_ZERO ((size_t)0)
1365 #define SIZE_T_ONE ((size_t)1)
1366 #define SIZE_T_TWO ((size_t)2)
1367 #define SIZE_T_FOUR ((size_t)4)
1368 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1369 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1370 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1371 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1372
1373
1374 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1375
1376
1377 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1378
1379
1380 #define align_offset(A)\
1381 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1382 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 #define MFAIL ((void*)(MAX_SIZE_T))
1395 #define CMFAIL ((char*)(MFAIL))
1396
1397 #if !DL_HAVE_MMAP
1398 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
1399 #define USE_MMAP_BIT (SIZE_T_ZERO)
1400 #define CALL_MMAP(s) MFAIL
1401 #define CALL_MUNMAP(a, s) (-1)
1402 #define DIRECT_MMAP(s) MFAIL
1403
1404 #else
1405 #define IS_MMAPPED_BIT (SIZE_T_ONE)
1406 #define USE_MMAP_BIT (SIZE_T_ONE)
1407
1408 #ifndef WIN32
1409 #define CALL_MUNMAP(a, s) munmap((a), (s))
1410 #define MMAP_PROT (PROT_READ|PROT_WRITE)
1411 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1412 #define MAP_ANONYMOUS MAP_ANON
1413 #endif
1414 #ifdef MAP_ANONYMOUS
1415 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1416 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1417 #else
1418
1419
1420
1421
1422 #define MMAP_FLAGS (MAP_PRIVATE)
1423 static int dev_zero_fd = -1;
1424 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1425 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1426 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1427 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1428 #endif
1429
1430 #define DIRECT_MMAP(s) CALL_MMAP(s)
1431 #else
1432
1433
1434 static FORCEINLINE void* win32mmap(size_t size) {
1435 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
1436 return (ptr != 0)? ptr: MFAIL;
1437 }
1438
1439
1440 static FORCEINLINE void* win32direct_mmap(size_t size) {
1441 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
1442 PAGE_READWRITE);
1443 return (ptr != 0)? ptr: MFAIL;
1444 }
1445
1446
1447 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
1448 MEMORY_BASIC_INFORMATION minfo;
1449 char* cptr = (char*)ptr;
1450 while (size) {
1451 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1452 return -1;
1453 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1454 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1455 return -1;
1456 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1457 return -1;
1458 cptr += minfo.RegionSize;
1459 size -= minfo.RegionSize;
1460 }
1461 return 0;
1462 }
1463
1464 #define CALL_MMAP(s) win32mmap(s)
1465 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
1466 #define DIRECT_MMAP(s) win32direct_mmap(s)
1467 #endif
1468 #endif
1469
1470 #if DL_HAVE_MMAP && DL_HAVE_MREMAP
1471 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1472 #else
1473 #define CALL_MREMAP(addr, osz, nsz, mv) ((void)(addr),(void)(osz), \
1474 (void)(nsz), (void)(mv),MFAIL)
1475 #endif
1476
1477 #if HAVE_MORECORE
1478 #define CALL_MORECORE(S) MORECORE(S)
1479 #else
1480 #define CALL_MORECORE(S) MFAIL
1481 #endif
1482
1483
1484 #define USE_NONCONTIGUOUS_BIT (4U)
1485
1486
1487 #define EXTERN_BIT (8U)
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 #if USE_LOCKS == 1
1519
1520 #if USE_SPIN_LOCKS
1521 #ifndef WIN32
1522
1523 struct pthread_mlock_t
1524 {
1525 volatile pthread_t threadid;
1526 volatile unsigned int c;
1527 volatile unsigned int l;
1528 };
1529 #define MLOCK_T struct pthread_mlock_t
1530 #define CURRENT_THREAD pthread_self()
1531 #define SPINS_PER_YIELD 63
1532 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
1533 if(CURRENT_THREAD==sl->threadid)
1534 ++sl->c;
1535 else {
1536 int spins = 0;
1537 for (;;) {
1538 int ret;
1539 __asm__ __volatile__ ("lock cmpxchgl %2,(%1)" : "=a" (ret) : "r" (&sl->l), "r" (1), "a" (0));
1540 if(!ret) {
1541 dl_assert(!sl->threadid);
1542 sl->threadid=CURRENT_THREAD;
1543 sl->c=1;
1544 break;
1545 }
1546 if ((++spins & SPINS_PER_YIELD) == 0) {
1547 #if defined (__SVR4) && defined (__sun)
1548 thr_yield();
1549 #else
1550 #ifdef linux
1551 sched_yield();
1552 #else
1553 ;
1554 #endif
1555 #endif
1556 }
1557 }
1558 }
1559
1560 return 0;
1561 }
1562
1563 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
1564 int ret;
1565 dl_assert(CURRENT_THREAD==sl->threadid);
1566 if (!--sl->c) {
1567 sl->threadid=0;
1568 __asm__ __volatile__ ("xchgl %2,(%1)" : "=r" (ret) : "r" (&sl->l), "0" (0));
1569 }
1570 }
1571
1572 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
1573 int ret;
1574 __asm__ __volatile__ ("lock cmpxchgl %2,(%1)" : "=a" (ret) : "r" (&sl->l), "r" (1), "a" (0));
1575 if(!ret){
1576 dl_assert(!sl->threadid);
1577 sl->threadid=CURRENT_THREAD;
1578 sl->c=1;
1579 return 1;
1580 }
1581 return 0;
1582 }
1583
1584 #define INITIAL_LOCK(sl) (memset((sl), 0, sizeof(MLOCK_T)), 0)
1585 #define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
1586 #define RELEASE_LOCK(sl) pthread_release_lock(sl)
1587 #define TRY_LOCK(sl) pthread_try_lock(sl)
1588 #define IS_LOCKED(sl) ((sl)->l)
1589
1590 static MLOCK_T magic_init_mutex = {0, 0, 0 };
1591 #if HAVE_MORECORE
1592 static MLOCK_T morecore_mutex = {0, 0, 0 };
1593 #endif
1594
1595 #else
1596
1597 struct win32_mlock_t
1598 {
1599 volatile long threadid;
1600 volatile unsigned int c;
1601 long l;
1602 };
1603 #define MLOCK_T struct win32_mlock_t
1604 #define CURRENT_THREAD GetCurrentThreadId()
1605 #define SPINS_PER_YIELD 63
1606 static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) {
1607 long mythreadid=CURRENT_THREAD;
1608 if(mythreadid==sl->threadid)
1609 ++sl->c;
1610 else {
1611 int spins = 0;
1612 for (;;) {
1613 if (!interlockedexchange(&sl->l, 1)) {
1614 dl_assert(!sl->threadid);
1615 sl->threadid=mythreadid;
1616 sl->c=1;
1617 break;
1618 }
1619 if ((++spins & SPINS_PER_YIELD) == 0)
1620 SleepEx(0, FALSE);
1621 }
1622 }
1623 return 0;
1624 }
1625
1626 static FORCEINLINE void win32_release_lock (MLOCK_T *sl) {
1627 dl_assert(CURRENT_THREAD==sl->threadid);
1628 if (!--sl->c) {
1629 sl->threadid=0;
1630 interlockedexchange (&sl->l, 0);
1631 }
1632 }
1633
1634 static FORCEINLINE int win32_try_lock (MLOCK_T *sl) {
1635 if (!interlockedexchange(&sl->l, 1)){
1636 dl_assert(!sl->threadid);
1637 sl->threadid=CURRENT_THREAD;
1638 sl->c=1;
1639 return 1;
1640 }
1641 return 0;
1642 }
1643
1644 #define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0)
1645 #define ACQUIRE_LOCK(sl) win32_acquire_lock(sl)
1646 #define RELEASE_LOCK(sl) win32_release_lock(sl)
1647 #define TRY_LOCK(sl) win32_try_lock(sl)
1648 #define IS_LOCKED(sl) ((sl)->l)
1649
1650 static MLOCK_T magic_init_mutex = {0, 0 };
1651 #if HAVE_MORECORE
1652 static MLOCK_T morecore_mutex = {0, 0 };
1653 #endif
1654
1655 #endif
1656 #else
1657
1658 #ifndef WIN32
1659
1660 struct pthread_mlock_t
1661 {
1662 volatile unsigned int c;
1663 pthread_mutex_t l;
1664 };
1665 #define MLOCK_T struct pthread_mlock_t
1666 #define CURRENT_THREAD pthread_self()
1667 static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) {
1668 if(!pthread_mutex_lock(&(sl)->l)){
1669 sl->c++;
1670 return 0;
1671 }
1672 return 1;
1673 }
1674
1675 static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) {
1676 --sl->c;
1677 pthread_mutex_unlock(&(sl)->l);
1678 }
1679
1680 static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) {
1681 if(!pthread_mutex_trylock(&(sl)->l)){
1682 sl->c++;
1683 return 1;
1684 }
1685 return 0;
1686 }
1687
1688 static FORCEINLINE int pthread_init_lock (MLOCK_T *sl) {
1689 pthread_mutexattr_t attr;
1690 sl->c=0;
1691 if(pthread_mutexattr_init(&attr)) return 1;
1692 if(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
1693 if(pthread_mutex_init(&sl->l, &attr)) return 1;
1694 pthread_mutexattr_destroy(&attr);
1695 return 0;
1696 }
1697
1698 static FORCEINLINE int pthread_islocked (MLOCK_T *sl) {
1699 if(!pthread_try_lock(sl)){
1700 int ret = (sl->c != 0);
1701 pthread_mutex_unlock(sl);
1702 return ret;
1703 }
1704 return 0;
1705 }
1706
1707 #define INITIAL_LOCK(sl) pthread_init_lock(sl)
1708 #define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl)
1709 #define RELEASE_LOCK(sl) pthread_release_lock(sl)
1710 #define TRY_LOCK(sl) pthread_try_lock(sl)
1711 #define IS_LOCKED(sl) pthread_islocked(sl)
1712
1713 static MLOCK_T magic_init_mutex = {0, PTHREAD_MUTEX_INITIALIZER };
1714 #if HAVE_MORECORE
1715 static MLOCK_T morecore_mutex = {0, PTHREAD_MUTEX_INITIALIZER };
1716 #endif
1717
1718 #else
1719
1720 #define MLOCK_T CRITICAL_SECTION
1721 #define CURRENT_THREAD GetCurrentThreadId()
1722 #define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 4000)
1723 #define ACQUIRE_LOCK(s) ( (!((s))->DebugInfo ? INITIAL_LOCK((s)) : 0), !EnterCriticalSection((s)), 0)
1724 #define RELEASE_LOCK(s) ( LeaveCriticalSection((s)), 0 )
1725 #define TRY_LOCK(s) ( TryEnterCriticalSection((s)) )
1726 #define IS_LOCKED(s) ( (s)->LockCount >= 0 )
1727 #define NULL_LOCK_INITIALIZER
1728 static MLOCK_T magic_init_mutex;
1729 #if HAVE_MORECORE
1730 static MLOCK_T morecore_mutex;
1731 #endif
1732 #endif
1733 #endif
1734 #endif
1735
1736
1737
1738 #if USE_LOCKS > 1
1739
1740
1741
1742
1743
1744
1745
1746
1747 static MLOCK_T magic_init_mutex = NULL_LOCK_INITIALIZER;
1748 #if HAVE_MORECORE
1749 static MLOCK_T morecore_mutex = NULL_LOCK_INITIALIZER;
1750 #endif
1751 #endif
1752
1753
1754
1755
1756 #if USE_LOCKS
1757 #define USE_LOCK_BIT (2U)
1758 #else
1759 #define USE_LOCK_BIT (0U)
1760 #define INITIAL_LOCK(l)
1761 #endif
1762
1763 #if USE_LOCKS && HAVE_MORECORE
1764 #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
1765 #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
1766 #else
1767 #define ACQUIRE_MORECORE_LOCK()
1768 #define RELEASE_MORECORE_LOCK()
1769 #endif
1770
1771 #if USE_LOCKS
1772 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1773 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1774 #else
1775 #define ACQUIRE_MAGIC_INIT_LOCK()
1776 #define RELEASE_MAGIC_INIT_LOCK()
1777 #endif
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917 struct malloc_chunk {
1918 size_t prev_foot;
1919 size_t head;
1920 struct malloc_chunk* fd;
1921 struct malloc_chunk* bk;
1922 };
1923
1924 typedef struct malloc_chunk mchunk;
1925 typedef struct malloc_chunk* mchunkptr;
1926 typedef struct malloc_chunk* sbinptr;
1927 typedef unsigned int bindex_t;
1928 typedef unsigned int binmap_t;
1929 typedef unsigned int flag_t;
1930
1931
1932
1933 #define MCHUNK_SIZE (sizeof(mchunk))
1934
1935 #if FOOTERS
1936 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1937 #else
1938 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
1939 #endif
1940
1941
1942 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1943
1944 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
1945
1946
1947 #define MIN_CHUNK_SIZE\
1948 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1949
1950
1951 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
1952 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1953
1954 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
1955
1956
1957 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
1958 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1959
1960
1961 #define pad_request(req) \
1962 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1963
1964
1965 #define request2size(req) \
1966 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981 #define PINUSE_BIT (SIZE_T_ONE)
1982 #define CINUSE_BIT (SIZE_T_TWO)
1983 #define FLAG4_BIT (SIZE_T_FOUR)
1984 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
1985 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
1986
1987
1988 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
1989
1990
1991 #define cinuse(p) ((p)->head & CINUSE_BIT)
1992 #define pinuse(p) ((p)->head & PINUSE_BIT)
1993 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
1994
1995 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
1996 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
1997
1998
1999 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2000 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
2001
2002
2003 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
2004 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
2005
2006
2007 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
2008
2009
2010 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
2011 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
2012
2013
2014 #define set_size_and_pinuse_of_free_chunk(p, s)\
2015 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
2016
2017
2018 #define set_free_with_pinuse(p, s, n)\
2019 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
2020
2021 #define is_mmapped(p)\
2022 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
2023
2024
2025 #define overhead_for(p)\
2026 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
2027
2028
2029 #if MMAP_CLEARS
2030 #define calloc_must_clear(p) (!is_mmapped(p))
2031 #else
2032 #define calloc_must_clear(p) (1)
2033 #endif
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126 struct malloc_tree_chunk {
2127
2128 size_t prev_foot;
2129 size_t head;
2130 struct malloc_tree_chunk* fd;
2131 struct malloc_tree_chunk* bk;
2132
2133 struct malloc_tree_chunk* child[2];
2134 struct malloc_tree_chunk* parent;
2135 bindex_t index;
2136 };
2137
2138 typedef struct malloc_tree_chunk tchunk;
2139 typedef struct malloc_tree_chunk* tchunkptr;
2140 typedef struct malloc_tree_chunk* tbinptr;
2141
2142
2143 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202 struct malloc_segment {
2203 char* base;
2204 size_t size;
2205 struct malloc_segment* next;
2206 flag_t sflags;
2207 };
2208
2209 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
2210 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2211
2212 typedef struct malloc_segment msegment;
2213 typedef struct malloc_segment* msegmentptr;
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300 #define NSMALLBINS (32U)
2301 #define NTREEBINS (32U)
2302 #define SMALLBIN_SHIFT (3U)
2303 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2304 #define TREEBIN_SHIFT (8U)
2305 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2306 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2307 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2308
2309 struct malloc_state {
2310 binmap_t smallmap;
2311 binmap_t treemap;
2312 size_t dvsize;
2313 size_t topsize;
2314 char* least_addr;
2315 mchunkptr dv;
2316 mchunkptr top;
2317 size_t trim_check;
2318 size_t release_checks;
2319 size_t magic;
2320 mchunkptr smallbins[(NSMALLBINS+1)*2];
2321 tbinptr treebins[NTREEBINS];
2322 size_t footprint;
2323 size_t max_footprint;
2324 flag_t mflags;
2325 #if USE_LOCKS
2326 MLOCK_T mutex;
2327 #endif
2328 msegment seg;
2329 void* extp;
2330 size_t exts;
2331 };
2332
2333 typedef struct malloc_state* mstate;
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343 struct malloc_params {
2344 size_t magic;
2345 size_t page_size;
2346 size_t granularity;
2347 size_t mmap_threshold;
2348 size_t trim_threshold;
2349 flag_t default_mflags;
2350 };
2351
2352 static struct malloc_params mparams;
2353
2354 #if !ONLY_MSPACES
2355
2356
2357 static struct malloc_state _gm_;
2358 #define gm (&_gm_)
2359 #define is_global(M) ((M) == &_gm_)
2360
2361 #endif
2362
2363 #define is_initialized(M) ((M)->top != 0)
2364
2365
2366
2367
2368
2369 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2370 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2371 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2372
2373 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2374 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2375 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2376
2377 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2378 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2379
2380 #define set_lock(M,L)\
2381 ((M)->mflags = (L)?\
2382 ((M)->mflags | USE_LOCK_BIT) :\
2383 ((M)->mflags & ~USE_LOCK_BIT))
2384
2385
2386 #define page_align(S)\
2387 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
2388
2389
2390 #define granularity_align(S)\
2391 (((S) + (mparams.granularity - SIZE_T_ONE))\
2392 & ~(mparams.granularity - SIZE_T_ONE))
2393
2394
2395
2396 #ifdef WIN32
2397 #define mmap_align(S) granularity_align(S)
2398 #else
2399 #define mmap_align(S) page_align(S)
2400 #endif
2401
2402 #define is_page_aligned(S)\
2403 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2404 #define is_granularity_aligned(S)\
2405 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2406
2407
2408 #define segment_holds(S, A)\
2409 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2410
2411
2412 static msegmentptr segment_holding(mstate m, char* addr) {
2413 msegmentptr sp = &m->seg;
2414 for (;;) {
2415 if (addr >= sp->base && addr < sp->base + sp->size)
2416 return sp;
2417 if ((sp = sp->next) == 0)
2418 return 0;
2419 }
2420 }
2421
2422
2423 static int has_segment_link(mstate m, msegmentptr ss) {
2424 msegmentptr sp = &m->seg;
2425 for (;;) {
2426 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
2427 return 1;
2428 if ((sp = sp->next) == 0)
2429 return 0;
2430 }
2431 }
2432
2433 #ifndef MORECORE_CANNOT_TRIM
2434 #define should_trim(M,s) ((s) > (M)->trim_check)
2435 #else
2436 #define should_trim(M,s) (0)
2437 #endif
2438
2439
2440
2441
2442
2443
2444 #define TOP_FOOT_SIZE\
2445 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456 #if USE_LOCKS
2457
2458
2459 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2460
2461 #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2462 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2463 #else
2464
2465 #ifndef PREACTION
2466 #define PREACTION(M) (0)
2467 #endif
2468
2469 #ifndef POSTACTION
2470 #define POSTACTION(M)
2471 #endif
2472
2473 #endif
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483 #if PROCEED_ON_ERROR
2484
2485
2486 int malloc_corruption_error_count;
2487
2488
2489 static void reset_on_error(mstate m);
2490
2491 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2492 #define USAGE_ERROR_ACTION(m, p)
2493
2494 #else
2495
2496 #ifndef CORRUPTION_ERROR_ACTION
2497 #define CORRUPTION_ERROR_ACTION(m) ABORT
2498 #endif
2499
2500 #ifndef USAGE_ERROR_ACTION
2501 #define USAGE_ERROR_ACTION(m,p) ABORT
2502 #endif
2503
2504 #endif
2505
2506
2507
2508 #if ! DL_DEBUG
2509
2510 #define check_free_chunk(M,P)
2511 #define check_inuse_chunk(M,P)
2512 #define check_malloced_chunk(M,P,N)
2513 #define check_mmapped_chunk(M,P)
2514 #define check_malloc_state(M)
2515 #define check_top_chunk(M,P)
2516
2517 #else
2518 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
2519 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2520 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
2521 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2522 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2523 #define check_malloc_state(M) do_check_malloc_state(M)
2524
2525 static void do_check_any_chunk(mstate m, mchunkptr p);
2526 static void do_check_top_chunk(mstate m, mchunkptr p);
2527 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2528 static void do_check_inuse_chunk(mstate m, mchunkptr p);
2529 static void do_check_free_chunk(mstate m, mchunkptr p);
2530 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
2531 static void do_check_tree(mstate m, tchunkptr t);
2532 static void do_check_treebin(mstate m, bindex_t i);
2533 static void do_check_smallbin(mstate m, bindex_t i);
2534 static void do_check_malloc_state(mstate m);
2535 static int bin_find(mstate m, mchunkptr x);
2536 static size_t traverse_and_check(mstate m);
2537 #endif
2538
2539
2540
2541 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2542 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
2543 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2544 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2545
2546
2547 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2548 #define treebin_at(M,i) (&((M)->treebins[i]))
2549
2550
2551 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2552 #define compute_tree_index(S, I)\
2553 {\
2554 unsigned int X = S >> TREEBIN_SHIFT;\
2555 if (X == 0)\
2556 I = 0;\
2557 else if (X > 0xFFFF)\
2558 I = NTREEBINS-1;\
2559 else {\
2560 unsigned int K;\
2561 __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "g" (X));\
2562 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2563 }\
2564 }
2565
2566 #elif defined(_MSC_VER) && _MSC_VER>=1300
2567 #define compute_tree_index(S, I)\
2568 {\
2569 size_t X = S >> TREEBIN_SHIFT;\
2570 if (X == 0)\
2571 I = 0;\
2572 else if (X > 0xFFFF)\
2573 I = NTREEBINS-1;\
2574 else {\
2575 unsigned int K;\
2576 _BitScanReverse((DWORD *) &K, X);\
2577 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2578 }\
2579 }
2580 #else
2581 #define compute_tree_index(S, I)\
2582 {\
2583 size_t X = S >> TREEBIN_SHIFT;\
2584 if (X == 0)\
2585 I = 0;\
2586 else if (X > 0xFFFF)\
2587 I = NTREEBINS-1;\
2588 else {\
2589 unsigned int Y = (unsigned int)X;\
2590 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2591 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2592 N += K;\
2593 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2594 K = 14 - N + ((Y <<= K) >> 15);\
2595 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2596 }\
2597 }
2598 #endif
2599
2600
2601 #define bit_for_tree_index(i) \
2602 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2603
2604
2605 #define leftshift_for_tree_index(i) \
2606 ((i == NTREEBINS-1)? 0 : \
2607 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2608
2609
2610 #define minsize_for_tree_index(i) \
2611 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2612 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2613
2614
2615
2616
2617
2618 #define idx2bit(i) ((binmap_t)(1) << (i))
2619
2620
2621 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2622 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2623 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2624
2625 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2626 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2627 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2628
2629
2630
2631 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2632 #define compute_bit2idx(X, I)\
2633 {\
2634 unsigned int J;\
2635 __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "g" (X));\
2636 I = (bindex_t)J;\
2637 }
2638 #elif defined(_MSC_VER) && _MSC_VER>=1300
2639 #define compute_bit2idx(X, I)\
2640 {\
2641 unsigned int J;\
2642 _BitScanForward((DWORD *) &J, X);\
2643 I = (bindex_t)J;\
2644 }
2645
2646 #else
2647 #if USE_BUILTIN_FFS
2648 #define compute_bit2idx(X, I) I = ffs(X)-1
2649
2650 #else
2651 #define compute_bit2idx(X, I)\
2652 {\
2653 unsigned int Y = X - 1;\
2654 unsigned int K = Y >> (16-4) & 16;\
2655 unsigned int N = K; Y >>= K;\
2656 N += K = Y >> (8-3) & 8; Y >>= K;\
2657 N += K = Y >> (4-2) & 4; Y >>= K;\
2658 N += K = Y >> (2-1) & 2; Y >>= K;\
2659 N += K = Y >> (1-0) & 1; Y >>= K;\
2660 I = (bindex_t)(N + Y);\
2661 }
2662 #endif
2663 #endif
2664
2665
2666 #define least_bit(x) ((x) & -(x))
2667
2668
2669 #define left_bits(x) ((x<<1) | -(x<<1))
2670
2671
2672 #define same_or_left_bits(x) ((x) | -(x))
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703 #if !INSECURE
2704
2705 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2706
2707 #define ok_next(p, n) ((char*)(p) < (char*)(n))
2708
2709 #define ok_cinuse(p) cinuse(p)
2710
2711 #define ok_pinuse(p) pinuse(p)
2712
2713 #else
2714 #define ok_address(M, a) (1)
2715 #define ok_next(b, n) (1)
2716 #define ok_cinuse(p) (1)
2717 #define ok_pinuse(p) (1)
2718 #endif
2719
2720 #if (FOOTERS && !INSECURE)
2721
2722
2723 #define ok_magic(M) ((char *)(M) >= (char *)gm->least_addr && (M)->magic == mparams.magic)
2724 #else
2725 #define ok_magic(M) (1)
2726 #endif
2727
2728
2729
2730 #if !INSECURE
2731 #if defined(__GNUC__) && __GNUC__ >= 3
2732 #define RTCHECK(e) __builtin_expect(e, 1)
2733 #else
2734 #define RTCHECK(e) (e)
2735 #endif
2736 #else
2737 #define RTCHECK(e) (1)
2738 #endif
2739
2740
2741
2742 #if !FOOTERS
2743
2744 #define mark_inuse_foot(M,p,s)
2745
2746
2747 #define set_inuse(M,p,s)\
2748 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2749 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2750
2751
2752 #define set_inuse_and_pinuse(M,p,s)\
2753 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2754 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2755
2756
2757 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2758 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2759
2760 #else
2761
2762
2763 #define mark_inuse_foot(M,p,s)\
2764 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2765
2766 #define get_mstate_for(p)\
2767 ((mstate)(((mchunkptr)((char*)(p) +\
2768 (chunksize(p))))->prev_foot ^ mparams.magic))
2769
2770 #define set_inuse(M,p,s)\
2771 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2772 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2773 mark_inuse_foot(M,p,s))
2774
2775 #define set_inuse_and_pinuse(M,p,s)\
2776 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2777 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2778 mark_inuse_foot(M,p,s))
2779
2780 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2781 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2782 mark_inuse_foot(M, p, s))
2783
2784 #endif
2785
2786
2787
2788
2789 static int init_mparams(void) {
2790 if (mparams.page_size == 0) {
2791 size_t s;
2792
2793 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2794 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
2795 #if MORECORE_CONTIGUOUS
2796 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
2797 #else
2798 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
2799 #endif
2800
2801 #if (FOOTERS && !INSECURE)
2802 {
2803 #if USE_DEV_RANDOM
2804 int fd;
2805 unsigned char buf[sizeof(size_t)];
2806
2807 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
2808 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
2809 s = *((size_t *) buf);
2810 close(fd);
2811 }
2812 else
2813 #endif
2814 s = (size_t)(time(0) ^ (size_t)0x55555555U);
2815
2816 s |= (size_t)8U;
2817 s &= ~(size_t)7U;
2818
2819 }
2820 #else
2821 s = (size_t)0x58585858U;
2822 #endif
2823 ACQUIRE_MAGIC_INIT_LOCK();
2824 if (mparams.magic == 0) {
2825 mparams.magic = s;
2826 #if !ONLY_MSPACES
2827
2828 INITIAL_LOCK(&gm->mutex);
2829 gm->mflags = mparams.default_mflags;
2830 #endif
2831 }
2832 RELEASE_MAGIC_INIT_LOCK();
2833
2834 #ifndef WIN32
2835 mparams.page_size = malloc_getpagesize;
2836 mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
2837 DEFAULT_GRANULARITY : mparams.page_size);
2838 #else
2839 {
2840 SYSTEM_INFO system_info;
2841 GetSystemInfo(&system_info);
2842 mparams.page_size = system_info.dwPageSize;
2843 mparams.granularity = system_info.dwAllocationGranularity;
2844 }
2845 #endif
2846
2847
2848
2849
2850
2851
2852
2853 if ((sizeof(size_t) != sizeof(char*)) ||
2854 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
2855 (sizeof(int) < 4) ||
2856 (MALLOC_ALIGNMENT < (size_t)8U) ||
2857 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
2858 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
2859 ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
2860 ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0))
2861 ABORT;
2862 }
2863 return 0;
2864 }
2865
2866
2867 static int change_mparam(int param_number, int value) {
2868 size_t val = (size_t)value;
2869 init_mparams();
2870 switch(param_number) {
2871 case M_TRIM_THRESHOLD:
2872 mparams.trim_threshold = val;
2873 return 1;
2874 case M_GRANULARITY:
2875 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
2876 mparams.granularity = val;
2877 return 1;
2878 }
2879 else
2880 return 0;
2881 case M_MMAP_THRESHOLD:
2882 mparams.mmap_threshold = val;
2883 return 1;
2884 default:
2885 return 0;
2886 }
2887 }
2888
2889 #if DL_DEBUG
2890
2891
2892
2893 static void do_check_any_chunk(mstate m, mchunkptr p) {
2894 dl_assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2895 dl_assert(ok_address(m, p));
2896 }
2897
2898
2899 static void do_check_top_chunk(mstate m, mchunkptr p) {
2900 msegmentptr sp = segment_holding(m, (char*)p);
2901 size_t sz = p->head & ~INUSE_BITS;
2902 dl_assert(sp != 0);
2903 dl_assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2904 dl_assert(ok_address(m, p));
2905 dl_assert(sz == m->topsize);
2906 dl_assert(sz > 0);
2907 dl_assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
2908 dl_assert(pinuse(p));
2909 dl_assert(!pinuse(chunk_plus_offset(p, sz)));
2910 }
2911
2912
2913 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
2914 size_t sz = chunksize(p);
2915 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2916 dl_assert(is_mmapped(p));
2917 dl_assert(use_mmap(m));
2918 dl_assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2919 dl_assert(ok_address(m, p));
2920 dl_assert(!is_small(sz));
2921 dl_assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
2922 dl_assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
2923 dl_assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
2924 }
2925
2926
2927 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
2928 do_check_any_chunk(m, p);
2929 dl_assert(cinuse(p));
2930 dl_assert(next_pinuse(p));
2931
2932 dl_assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
2933 if (is_mmapped(p))
2934 do_check_mmapped_chunk(m, p);
2935 }
2936
2937
2938 static void do_check_free_chunk(mstate m, mchunkptr p) {
2939 size_t sz = chunksize(p);
2940 mchunkptr next = chunk_plus_offset(p, sz);
2941 do_check_any_chunk(m, p);
2942 dl_assert(!cinuse(p));
2943 dl_assert(!next_pinuse(p));
2944 assert (!is_mmapped(p));
2945 if (p != m->dv && p != m->top) {
2946 if (sz >= MIN_CHUNK_SIZE) {
2947 dl_assert((sz & CHUNK_ALIGN_MASK) == 0);
2948 dl_assert(is_aligned(chunk2mem(p)));
2949 dl_assert(next->prev_foot == sz);
2950 dl_assert(pinuse(p));
2951 assert (next == m->top || cinuse(next));
2952 dl_assert(p->fd->bk == p);
2953 dl_assert(p->bk->fd == p);
2954 }
2955 else
2956 dl_assert(sz == SIZE_T_SIZE);
2957 }
2958 }
2959
2960
2961 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
2962 if (mem != 0) {
2963 mchunkptr p = mem2chunk(mem);
2964 size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
2965 do_check_inuse_chunk(m, p);
2966 dl_assert((sz & CHUNK_ALIGN_MASK) == 0);
2967 dl_assert(sz >= MIN_CHUNK_SIZE);
2968 dl_assert(sz >= s);
2969
2970 dl_assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
2971 }
2972 }
2973
2974
2975 static void do_check_tree(mstate m, tchunkptr t) {
2976 tchunkptr head = 0;
2977 tchunkptr u = t;
2978 bindex_t tindex = t->index;
2979 size_t tsize = chunksize(t);
2980 bindex_t idx;
2981 compute_tree_index(tsize, idx);
2982 dl_assert(tindex == idx);
2983 dl_assert(tsize >= MIN_LARGE_SIZE);
2984 dl_assert(tsize >= minsize_for_tree_index(idx));
2985 dl_assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
2986
2987 do {
2988 do_check_any_chunk(m, ((mchunkptr)u));
2989 dl_assert(u->index == tindex);
2990 dl_assert(chunksize(u) == tsize);
2991 dl_assert(!cinuse(u));
2992 dl_assert(!next_pinuse(u));
2993 dl_assert(u->fd->bk == u);
2994 dl_assert(u->bk->fd == u);
2995 if (u->parent == 0) {
2996 dl_assert(u->child[0] == 0);
2997 dl_assert(u->child[1] == 0);
2998 }
2999 else {
3000 dl_assert(head == 0);
3001 head = u;
3002 dl_assert(u->parent != u);
3003 assert (u->parent->child[0] == u ||
3004 u->parent->child[1] == u ||
3005 *((tbinptr*)(u->parent)) == u);
3006 if (u->child[0] != 0) {
3007 dl_assert(u->child[0]->parent == u);
3008 dl_assert(u->child[0] != u);
3009 do_check_tree(m, u->child[0]);
3010 }
3011 if (u->child[1] != 0) {
3012 dl_assert(u->child[1]->parent == u);
3013 dl_assert(u->child[1] != u);
3014 do_check_tree(m, u->child[1]);
3015 }
3016 if (u->child[0] != 0 && u->child[1] != 0) {
3017 dl_assert(chunksize(u->child[0]) < chunksize(u->child[1]));
3018 }
3019 }
3020 u = u->fd;
3021 } while (u != t);
3022 dl_assert(head != 0);
3023 }
3024
3025
3026 static void do_check_treebin(mstate m, bindex_t i) {
3027 tbinptr* tb = treebin_at(m, i);
3028 tchunkptr t = *tb;
3029 int empty = (m->treemap & (1U << i)) == 0;
3030 if (t == 0)
3031 dl_assert(empty);
3032 if (!empty)
3033 do_check_tree(m, t);
3034 }
3035
3036
3037 static void do_check_smallbin(mstate m, bindex_t i) {
3038 sbinptr b = smallbin_at(m, i);
3039 mchunkptr p = b->bk;
3040 unsigned int empty = (m->smallmap & (1U << i)) == 0;
3041 if (p == b)
3042 dl_assert(empty);
3043 if (!empty) {
3044 for (; p != b; p = p->bk) {
3045 size_t size = chunksize(p);
3046 mchunkptr q;
3047
3048 do_check_free_chunk(m, p);
3049
3050 dl_assert(small_index(size) == i);
3051 dl_assert(p->bk == b || chunksize(p->bk) == chunksize(p));
3052
3053 q = next_chunk(p);
3054 if (q->head != FENCEPOST_HEAD)
3055 do_check_inuse_chunk(m, q);
3056 }
3057 }
3058 }
3059
3060
3061 static int bin_find(mstate m, mchunkptr x) {
3062 size_t size = chunksize(x);
3063 if (is_small(size)) {
3064 bindex_t sidx = small_index(size);
3065 sbinptr b = smallbin_at(m, sidx);
3066 if (smallmap_is_marked(m, sidx)) {
3067 mchunkptr p = b;
3068 do {
3069 if (p == x)
3070 return 1;
3071 } while ((p = p->fd) != b);
3072 }
3073 }
3074 else {
3075 bindex_t tidx;
3076 compute_tree_index(size, tidx);
3077 if (treemap_is_marked(m, tidx)) {
3078 tchunkptr t = *treebin_at(m, tidx);
3079 size_t sizebits = size << leftshift_for_tree_index(tidx);
3080 while (t != 0 && chunksize(t) != size) {
3081 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3082 sizebits <<= 1;
3083 }
3084 if (t != 0) {
3085 tchunkptr u = t;
3086 do {
3087 if (u == (tchunkptr)x)
3088 return 1;
3089 } while ((u = u->fd) != t);
3090 }
3091 }
3092 }
3093 return 0;
3094 }
3095
3096
3097 static size_t traverse_and_check(mstate m) {
3098 size_t sum = 0;
3099 if (is_initialized(m)) {
3100 msegmentptr s = &m->seg;
3101 sum += m->topsize + TOP_FOOT_SIZE;
3102 while (s != 0) {
3103 mchunkptr q = align_as_chunk(s->base);
3104 mchunkptr lastq = 0;
3105 dl_assert(pinuse(q));
3106 while (segment_holds(s, q) &&
3107 q != m->top && q->head != FENCEPOST_HEAD) {
3108 sum += chunksize(q);
3109 if (cinuse(q)) {
3110 dl_assert(!bin_find(m, q));
3111 do_check_inuse_chunk(m, q);
3112 }
3113 else {
3114 dl_assert(q == m->dv || bin_find(m, q));
3115 dl_assert(lastq == 0 || cinuse(lastq));
3116 do_check_free_chunk(m, q);
3117 }
3118 lastq = q;
3119 q = next_chunk(q);
3120 }
3121 s = s->next;
3122 }
3123 }
3124 return sum;
3125 }
3126
3127
3128 static void do_check_malloc_state(mstate m) {
3129 bindex_t i;
3130 size_t total;
3131
3132 for (i = 0; i < NSMALLBINS; ++i)
3133 do_check_smallbin(m, i);
3134 for (i = 0; i < NTREEBINS; ++i)
3135 do_check_treebin(m, i);
3136
3137 if (m->dvsize != 0) {
3138 do_check_any_chunk(m, m->dv);
3139 dl_assert(m->dvsize == chunksize(m->dv));
3140 dl_assert(m->dvsize >= MIN_CHUNK_SIZE);
3141 dl_assert(bin_find(m, m->dv) == 0);
3142 }
3143
3144 if (m->top != 0) {
3145 do_check_top_chunk(m, m->top);
3146
3147 dl_assert(m->topsize > 0);
3148 dl_assert(bin_find(m, m->top) == 0);
3149 }
3150
3151 total = traverse_and_check(m);
3152 dl_assert(total <= m->footprint);
3153 dl_assert(m->footprint <= m->max_footprint);
3154 }
3155 #endif
3156
3157
3158
3159 #if !NO_MALLINFO
3160 static struct mallinfo internal_mallinfo(mstate m) {
3161 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3162 if (!PREACTION(m)) {
3163 check_malloc_state(m);
3164 if (is_initialized(m)) {
3165 size_t nfree = SIZE_T_ONE;
3166 size_t mfree = m->topsize + TOP_FOOT_SIZE;
3167 size_t sum = mfree;
3168 msegmentptr s = &m->seg;
3169 while (s != 0) {
3170 mchunkptr q = align_as_chunk(s->base);
3171 while (segment_holds(s, q) &&
3172 q != m->top && q->head != FENCEPOST_HEAD) {
3173 size_t sz = chunksize(q);
3174 sum += sz;
3175 if (!cinuse(q)) {
3176 mfree += sz;
3177 ++nfree;
3178 }
3179 q = next_chunk(q);
3180 }
3181 s = s->next;
3182 }
3183
3184 nm.arena = sum;
3185 nm.ordblks = nfree;
3186 nm.hblkhd = m->footprint - sum;
3187 nm.usmblks = m->max_footprint;
3188 nm.uordblks = m->footprint - mfree;
3189 nm.fordblks = mfree;
3190 nm.keepcost = m->topsize;
3191 }
3192
3193 POSTACTION(m);
3194 }
3195 return nm;
3196 }
3197 #endif
3198
3199 static void internal_malloc_stats(mstate m) {
3200 if (!PREACTION(m)) {
3201 size_t maxfp = 0;
3202 size_t fp = 0;
3203 size_t used = 0;
3204 check_malloc_state(m);
3205 if (is_initialized(m)) {
3206 msegmentptr s = &m->seg;
3207 maxfp = m->max_footprint;
3208 fp = m->footprint;
3209 used = fp - (m->topsize + TOP_FOOT_SIZE);
3210
3211 while (s != 0) {
3212 mchunkptr q = align_as_chunk(s->base);
3213 while (segment_holds(s, q) &&
3214 q != m->top && q->head != FENCEPOST_HEAD) {
3215 if (!cinuse(q))
3216 used -= chunksize(q);
3217 q = next_chunk(q);
3218 }
3219 s = s->next;
3220 }
3221 }
3222
3223 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
3224 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
3225 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
3226
3227 POSTACTION(m);
3228 }
3229 }
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241 #define insert_small_chunk(M, P, S) {\
3242 bindex_t IDX = small_index(S);\
3243 mchunkptr B = smallbin_at(M, IDX);\
3244 mchunkptr F = B;\
3245 dl_assert(S >= MIN_CHUNK_SIZE);\
3246 if (!smallmap_is_marked(M, IDX))\
3247 mark_smallmap(M, IDX);\
3248 else if (RTCHECK(ok_address(M, B->fd)))\
3249 F = B->fd;\
3250 else {\
3251 CORRUPTION_ERROR_ACTION(M);\
3252 }\
3253 B->fd = P;\
3254 F->bk = P;\
3255 P->fd = F;\
3256 P->bk = B;\
3257 }
3258
3259
3260 #define unlink_small_chunk(M, P, S) {\
3261 mchunkptr F = P->fd;\
3262 mchunkptr B = P->bk;\
3263 bindex_t IDX = small_index(S);\
3264 dl_assert(P != B);\
3265 dl_assert(P != F);\
3266 dl_assert(chunksize(P) == small_index2size(IDX));\
3267 if (F == B)\
3268 clear_smallmap(M, IDX);\
3269 else if (RTCHECK((F == smallbin_at(M,IDX) || ok_address(M, F)) &&\
3270 (B == smallbin_at(M,IDX) || ok_address(M, B)))) {\
3271 F->bk = B;\
3272 B->fd = F;\
3273 }\
3274 else {\
3275 CORRUPTION_ERROR_ACTION(M);\
3276 }\
3277 }
3278
3279
3280 #define unlink_first_small_chunk(M, B, P, IDX) {\
3281 mchunkptr F = P->fd;\
3282 dl_assert(P != B);\
3283 dl_assert(P != F);\
3284 dl_assert(chunksize(P) == small_index2size(IDX));\
3285 if (B == F)\
3286 clear_smallmap(M, IDX);\
3287 else if (RTCHECK(ok_address(M, F))) {\
3288 B->fd = F;\
3289 F->bk = B;\
3290 }\
3291 else {\
3292 CORRUPTION_ERROR_ACTION(M);\
3293 }\
3294 }
3295
3296
3297
3298 #define replace_dv(M, P, S) {\
3299 size_t DVS = M->dvsize;\
3300 if (DVS != 0) {\
3301 mchunkptr DV = M->dv;\
3302 dl_assert(is_small(DVS));\
3303 insert_small_chunk(M, DV, DVS);\
3304 }\
3305 M->dvsize = S;\
3306 M->dv = P;\
3307 }
3308
3309
3310
3311
3312 #define insert_large_chunk(M, X, S) {\
3313 tbinptr* H;\
3314 bindex_t IDX;\
3315 compute_tree_index(S, IDX);\
3316 H = treebin_at(M, IDX);\
3317 X->index = IDX;\
3318 X->child[0] = X->child[1] = 0;\
3319 if (!treemap_is_marked(M, IDX)) {\
3320 mark_treemap(M, IDX);\
3321 *H = X;\
3322 X->parent = (tchunkptr)H;\
3323 X->fd = X->bk = X;\
3324 }\
3325 else {\
3326 tchunkptr T = *H;\
3327 size_t K = S << leftshift_for_tree_index(IDX);\
3328 for (;;) {\
3329 if (chunksize(T) != S) {\
3330 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3331 K <<= 1;\
3332 if (*C != 0)\
3333 T = *C;\
3334 else if (RTCHECK(ok_address(M, C))) {\
3335 *C = X;\
3336 X->parent = T;\
3337 X->fd = X->bk = X;\
3338 break;\
3339 }\
3340 else {\
3341 CORRUPTION_ERROR_ACTION(M);\
3342 break;\
3343 }\
3344 }\
3345 else {\
3346 tchunkptr F = T->fd;\
3347 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3348 T->fd = F->bk = X;\
3349 X->fd = F;\
3350 X->bk = T;\
3351 X->parent = 0;\
3352 break;\
3353 }\
3354 else {\
3355 CORRUPTION_ERROR_ACTION(M);\
3356 break;\
3357 }\
3358 }\
3359 }\
3360 }\
3361 }
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380 #define unlink_large_chunk(M, X) {\
3381 tchunkptr XP = X->parent;\
3382 tchunkptr R;\
3383 if (X->bk != X) {\
3384 tchunkptr F = X->fd;\
3385 R = X->bk;\
3386 if (RTCHECK(ok_address(M, F))) {\
3387 F->bk = R;\
3388 R->fd = F;\
3389 }\
3390 else {\
3391 CORRUPTION_ERROR_ACTION(M);\
3392 }\
3393 }\
3394 else {\
3395 tchunkptr* RP;\
3396 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3397 ((R = *(RP = &(X->child[0]))) != 0)) {\
3398 tchunkptr* CP;\
3399 while ((*(CP = &(R->child[1])) != 0) ||\
3400 (*(CP = &(R->child[0])) != 0)) {\
3401 R = *(RP = CP);\
3402 }\
3403 if (RTCHECK(ok_address(M, RP)))\
3404 *RP = 0;\
3405 else {\
3406 CORRUPTION_ERROR_ACTION(M);\
3407 }\
3408 }\
3409 }\
3410 if (XP != 0) {\
3411 tbinptr* H = treebin_at(M, X->index);\
3412 if (X == *H) {\
3413 if ((*H = R) == 0) \
3414 clear_treemap(M, X->index);\
3415 }\
3416 else if (RTCHECK(ok_address(M, XP))) {\
3417 if (XP->child[0] == X) \
3418 XP->child[0] = R;\
3419 else \
3420 XP->child[1] = R;\
3421 }\
3422 else\
3423 CORRUPTION_ERROR_ACTION(M);\
3424 if (R != 0) {\
3425 if (RTCHECK(ok_address(M, R))) {\
3426 tchunkptr C0, C1;\
3427 R->parent = XP;\
3428 if ((C0 = X->child[0]) != 0) {\
3429 if (RTCHECK(ok_address(M, C0))) {\
3430 R->child[0] = C0;\
3431 C0->parent = R;\
3432 }\
3433 else\
3434 CORRUPTION_ERROR_ACTION(M);\
3435 }\
3436 if ((C1 = X->child[1]) != 0) {\
3437 if (RTCHECK(ok_address(M, C1))) {\
3438 R->child[1] = C1;\
3439 C1->parent = R;\
3440 }\
3441 else\
3442 CORRUPTION_ERROR_ACTION(M);\
3443 }\
3444 }\
3445 else\
3446 CORRUPTION_ERROR_ACTION(M);\
3447 }\
3448 }\
3449 }
3450
3451
3452
3453 #define insert_chunk(M, P, S)\
3454 if (is_small(S)) insert_small_chunk(M, P, S)\
3455 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3456
3457 #define unlink_chunk(M, P, S)\
3458 if (is_small(S)) unlink_small_chunk(M, P, S)\
3459 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3460
3461
3462
3463
3464 #if ONLY_MSPACES
3465 #define internal_malloc(m, b) mspace_malloc(m, b)
3466 #define internal_free(m, mem) mspace_free(m,mem);
3467 #else
3468 #if MSPACES
3469 #define internal_malloc(m, b)\
3470 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3471 #define internal_free(m, mem)\
3472 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3473 #else
3474 #define internal_malloc(m, b) dlmalloc(b)
3475 #define internal_free(m, mem) dlfree(mem)
3476 #endif
3477 #endif
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492 static void* mmap_alloc(mstate m, size_t nb) {
3493 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3494 if (mmsize > nb) {
3495 char* mm = (char*)(DIRECT_MMAP(mmsize));
3496 if (mm != CMFAIL) {
3497 size_t offset = align_offset(chunk2mem(mm));
3498 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3499 mchunkptr p = (mchunkptr)(mm + offset);
3500 p->prev_foot = offset | IS_MMAPPED_BIT;
3501 (p)->head = (psize|CINUSE_BIT);
3502 mark_inuse_foot(m, p, psize);
3503 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3504 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
3505
3506 if (mm < m->least_addr)
3507 m->least_addr = mm;
3508 if ((m->footprint += mmsize) > m->max_footprint)
3509 m->max_footprint = m->footprint;
3510 dl_assert(is_aligned(chunk2mem(p)));
3511 check_mmapped_chunk(m, p);
3512 return chunk2mem(p);
3513 }
3514 }
3515 return 0;
3516 }
3517
3518
3519 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
3520 size_t oldsize = chunksize(oldp);
3521 if (is_small(nb))
3522 return 0;
3523
3524 if (oldsize >= nb + SIZE_T_SIZE &&
3525 (oldsize - nb) <= (mparams.granularity << 1))
3526 return oldp;
3527 else {
3528 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3529 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3530 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3531 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
3532 oldmmsize, newmmsize, 1);
3533 if (cp != CMFAIL) {
3534 mchunkptr newp = (mchunkptr)(cp + offset);
3535 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3536 newp->head = (psize|CINUSE_BIT);
3537 mark_inuse_foot(m, newp, psize);
3538 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3539 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
3540
3541 if (cp < m->least_addr)
3542 m->least_addr = cp;
3543 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3544 m->max_footprint = m->footprint;
3545 check_mmapped_chunk(m, newp);
3546 return newp;
3547 }
3548 }
3549 return 0;
3550 }
3551
3552
3553
3554
3555 static void init_top(mstate m, mchunkptr p, size_t psize) {
3556
3557 size_t offset = align_offset(chunk2mem(p));
3558 p = (mchunkptr)((char*)p + offset);
3559 psize -= offset;
3560
3561 m->top = p;
3562 m->topsize = psize;
3563 p->head = psize | PINUSE_BIT;
3564
3565 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3566 m->trim_check = mparams.trim_threshold;
3567 }
3568
3569
3570 static void init_bins(mstate m) {
3571
3572 bindex_t i;
3573 for (i = 0; i < NSMALLBINS; ++i) {
3574 sbinptr bin = smallbin_at(m,i);
3575 bin->fd = bin->bk = bin;
3576 }
3577 }
3578
3579 #if PROCEED_ON_ERROR
3580
3581
3582 static void reset_on_error(mstate m) {
3583 int i;
3584 ++malloc_corruption_error_count;
3585
3586 m->smallbins = m->treebins = 0;
3587 m->dvsize = m->topsize = 0;
3588 m->seg.base = 0;
3589 m->seg.size = 0;
3590 m->seg.next = 0;
3591 m->top = m->dv = 0;
3592 for (i = 0; i < NTREEBINS; ++i)
3593 *treebin_at(m, i) = 0;
3594 init_bins(m);
3595 }
3596 #endif
3597
3598
3599 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
3600 size_t nb) {
3601 mchunkptr p = align_as_chunk(newbase);
3602 mchunkptr oldfirst = align_as_chunk(oldbase);
3603 size_t psize = (char*)oldfirst - (char*)p;
3604 mchunkptr q = chunk_plus_offset(p, nb);
3605 size_t qsize = psize - nb;
3606 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3607
3608 dl_assert((char*)oldfirst > (char*)q);
3609 dl_assert(pinuse(oldfirst));
3610 dl_assert(qsize >= MIN_CHUNK_SIZE);
3611
3612
3613 if (oldfirst == m->top) {
3614 size_t tsize = m->topsize += qsize;
3615 m->top = q;
3616 q->head = tsize | PINUSE_BIT;
3617 check_top_chunk(m, q);
3618 }
3619 else if (oldfirst == m->dv) {
3620 size_t dsize = m->dvsize += qsize;
3621 m->dv = q;
3622 set_size_and_pinuse_of_free_chunk(q, dsize);
3623 }
3624 else {
3625 if (!cinuse(oldfirst)) {
3626 size_t nsize = chunksize(oldfirst);
3627 unlink_chunk(m, oldfirst, nsize);
3628 oldfirst = chunk_plus_offset(oldfirst, nsize);
3629 qsize += nsize;
3630 }
3631 set_free_with_pinuse(q, qsize, oldfirst);
3632 insert_chunk(m, q, qsize);
3633 check_free_chunk(m, q);
3634 }
3635
3636 check_malloced_chunk(m, chunk2mem(p), nb);
3637 return chunk2mem(p);
3638 }
3639
3640
3641 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
3642
3643 char* old_top = (char*)m->top;
3644 msegmentptr oldsp = segment_holding(m, old_top);
3645 char* old_end = oldsp->base + oldsp->size;
3646 size_t ssize = pad_request(sizeof(struct malloc_segment));
3647 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3648 size_t offset = align_offset(chunk2mem(rawsp));
3649 char* asp = rawsp + offset;
3650 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
3651 mchunkptr sp = (mchunkptr)csp;
3652 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
3653 mchunkptr tnext = chunk_plus_offset(sp, ssize);
3654 mchunkptr p = tnext;
3655 int nfences = 0;
3656
3657
3658 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
3659
3660
3661 dl_assert(is_aligned(ss));
3662 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
3663 *ss = m->seg;
3664 m->seg.base = tbase;
3665 m->seg.size = tsize;
3666 m->seg.sflags = mmapped;
3667 m->seg.next = ss;
3668
3669
3670 for (;;) {
3671 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
3672 p->head = FENCEPOST_HEAD;
3673 ++nfences;
3674 if ((char*)(&(nextp->head)) < old_end)
3675 p = nextp;
3676 else
3677 break;
3678 }
3679 dl_assert(nfences >= 2);
3680
3681
3682 if (csp != old_top) {
3683 mchunkptr q = (mchunkptr)old_top;
3684 size_t psize = csp - old_top;
3685 mchunkptr tn = chunk_plus_offset(q, psize);
3686 set_free_with_pinuse(q, psize, tn);
3687 insert_chunk(m, q, psize);
3688 }
3689
3690 check_top_chunk(m, m->top);
3691 }
3692
3693
3694
3695
3696 static void* sys_alloc(mstate m, size_t nb) {
3697 char* tbase = CMFAIL;
3698 size_t tsize = 0;
3699 flag_t mmap_flag = 0;
3700
3701 init_mparams();
3702
3703
3704 if (use_mmap(m) && nb >= mparams.mmap_threshold) {
3705 void* mem = mmap_alloc(m, nb);
3706 if (mem != 0)
3707 return mem;
3708 }
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
3728 char* br = CMFAIL;
3729 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
3730 size_t asize = 0;
3731 ACQUIRE_MORECORE_LOCK();
3732
3733 if (ss == 0) {
3734 char* base = (char*)CALL_MORECORE(0);
3735 if (base != CMFAIL) {
3736 asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3737
3738 if (!is_page_aligned(base))
3739 asize += (page_align((size_t)base) - (size_t)base);
3740
3741 if (asize < HALF_MAX_SIZE_T &&
3742 (br = (char*)(CALL_MORECORE(asize))) == base) {
3743 tbase = base;
3744 tsize = asize;
3745 }
3746 }
3747 }
3748 else {
3749
3750 asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
3751
3752 if (asize < HALF_MAX_SIZE_T &&
3753 (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
3754 tbase = br;
3755 tsize = asize;
3756 }
3757 }
3758
3759 if (tbase == CMFAIL) {
3760 if (br != CMFAIL) {
3761 if (asize < HALF_MAX_SIZE_T &&
3762 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
3763 size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
3764 if (esize < HALF_MAX_SIZE_T) {
3765 char* end = (char*)CALL_MORECORE(esize);
3766 if (end != CMFAIL)
3767 asize += esize;
3768 else {
3769 (void) CALL_MORECORE(-asize);
3770 br = CMFAIL;
3771 }
3772 }
3773 }
3774 }
3775 if (br != CMFAIL) {
3776 tbase = br;
3777 tsize = asize;
3778 }
3779 else
3780 disable_contiguous(m);
3781 }
3782
3783 RELEASE_MORECORE_LOCK();
3784 }
3785
3786 if (DL_HAVE_MMAP && tbase == CMFAIL) {
3787 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
3788 size_t rsize = granularity_align(req);
3789 if (rsize > nb) {
3790 char* mp = (char*)(CALL_MMAP(rsize));
3791 if (mp != CMFAIL) {
3792 tbase = mp;
3793 tsize = rsize;
3794 mmap_flag = IS_MMAPPED_BIT;
3795 }
3796 }
3797 }
3798
3799 if (HAVE_MORECORE && tbase == CMFAIL) {
3800 size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
3801 if (asize < HALF_MAX_SIZE_T) {
3802 char* br = CMFAIL;
3803 char* end = CMFAIL;
3804 ACQUIRE_MORECORE_LOCK();
3805 br = (char*)(CALL_MORECORE(asize));
3806 end = (char*)(CALL_MORECORE(0));
3807 RELEASE_MORECORE_LOCK();
3808 if (br != CMFAIL && end != CMFAIL && br < end) {
3809 size_t ssize = end - br;
3810 if (ssize > nb + TOP_FOOT_SIZE) {
3811 tbase = br;
3812 tsize = ssize;
3813 }
3814 }
3815 }
3816 }
3817
3818 if (tbase != CMFAIL) {
3819
3820 if ((m->footprint += tsize) > m->max_footprint)
3821 m->max_footprint = m->footprint;
3822
3823 if (!is_initialized(m)) {
3824 m->seg.base = m->least_addr = tbase;
3825 m->seg.size = tsize;
3826 m->seg.sflags = mmap_flag;
3827 m->magic = mparams.magic;
3828 m->release_checks = MAX_RELEASE_CHECK_RATE;
3829 init_bins(m);
3830 #if !ONLY_MSPACES
3831 if (is_global(m))
3832 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
3833 else
3834 #endif
3835 {
3836
3837 mchunkptr mn = next_chunk(mem2chunk(m));
3838 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
3839 }
3840 }
3841
3842 else {
3843
3844 msegmentptr sp = &m->seg;
3845
3846 while (sp != 0 && tbase != sp->base + sp->size)
3847 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
3848 if (sp != 0 &&
3849 !is_extern_segment(sp) &&
3850 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
3851 segment_holds(sp, m->top)) {
3852 sp->size += tsize;
3853 init_top(m, m->top, m->topsize + tsize);
3854 }
3855 else {
3856 if (tbase < m->least_addr)
3857 m->least_addr = tbase;
3858 sp = &m->seg;
3859 while (sp != 0 && sp->base != tbase + tsize)
3860 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
3861 if (sp != 0 &&
3862 !is_extern_segment(sp) &&
3863 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3864 char* oldbase = sp->base;
3865 sp->base = tbase;
3866 sp->size += tsize;
3867 return prepend_alloc(m, tbase, oldbase, nb);
3868 }
3869 else
3870 add_segment(m, tbase, tsize, mmap_flag);
3871 }
3872 }
3873
3874 if (nb < m->topsize) {
3875 size_t rsize = m->topsize -= nb;
3876 mchunkptr p = m->top;
3877 mchunkptr r = m->top = chunk_plus_offset(p, nb);
3878 r->head = rsize | PINUSE_BIT;
3879 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3880 check_top_chunk(m, m->top);
3881 check_malloced_chunk(m, chunk2mem(p), nb);
3882 return chunk2mem(p);
3883 }
3884 }
3885
3886 MALLOC_FAILURE_ACTION;
3887 return 0;
3888 }
3889
3890
3891
3892
3893 static size_t release_unused_segments(mstate m) {
3894 size_t released = 0;
3895 int nsegs = 0;
3896 msegmentptr pred = &m->seg;
3897 msegmentptr sp = pred->next;
3898 while (sp != 0) {
3899 char* base = sp->base;
3900 size_t size = sp->size;
3901 msegmentptr next = sp->next;
3902 ++nsegs;
3903 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
3904 mchunkptr p = align_as_chunk(base);
3905 size_t psize = chunksize(p);
3906
3907 if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
3908 tchunkptr tp = (tchunkptr)p;
3909 dl_assert(segment_holds(sp, (char*)sp));
3910 if (p == m->dv) {
3911 m->dv = 0;
3912 m->dvsize = 0;
3913 }
3914 else {
3915 unlink_large_chunk(m, tp);
3916 }
3917 if (CALL_MUNMAP(base, size) == 0) {
3918 released += size;
3919 m->footprint -= size;
3920
3921 sp = pred;
3922 sp->next = next;
3923 }
3924 else {
3925 insert_large_chunk(m, tp, psize);
3926 }
3927 }
3928 }
3929 if (NO_SEGMENT_TRAVERSAL)
3930 break;
3931 pred = sp;
3932 sp = next;
3933 }
3934
3935 m->release_checks = ((nsegs > (int)MAX_RELEASE_CHECK_RATE)?
3936 (size_t)nsegs : MAX_RELEASE_CHECK_RATE);
3937 return released;
3938 }
3939
3940 static int sys_trim(mstate m, size_t pad) {
3941 size_t released = 0;
3942 if (pad < MAX_REQUEST && is_initialized(m)) {
3943 pad += TOP_FOOT_SIZE;
3944
3945 if (m->topsize > pad) {
3946
3947 size_t unit = mparams.granularity;
3948 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3949 SIZE_T_ONE) * unit;
3950 msegmentptr sp = segment_holding(m, (char*)m->top);
3951
3952 if (!is_extern_segment(sp)) {
3953 if (is_mmapped_segment(sp)) {
3954 if (DL_HAVE_MMAP &&
3955 sp->size >= extra &&
3956 !has_segment_link(m, sp)) {
3957 size_t newsize = sp->size - extra;
3958
3959 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
3960 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
3961 released = extra;
3962 }
3963 }
3964 }
3965 else if (HAVE_MORECORE) {
3966 if (extra >= HALF_MAX_SIZE_T)
3967 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3968 ACQUIRE_MORECORE_LOCK();
3969 {
3970
3971 char* old_br = (char*)(CALL_MORECORE(0));
3972 if (old_br == sp->base + sp->size) {
3973 char* rel_br = (char*)(CALL_MORECORE(-extra));
3974 char* new_br = (char*)(CALL_MORECORE(0));
3975 if (rel_br != CMFAIL && new_br < old_br)
3976 released = old_br - new_br;
3977 }
3978 }
3979 RELEASE_MORECORE_LOCK();
3980 }
3981 }
3982
3983 if (released != 0) {
3984 sp->size -= released;
3985 m->footprint -= released;
3986 init_top(m, m->top, m->topsize - released);
3987 check_top_chunk(m, m->top);
3988 }
3989 }
3990
3991
3992 if (DL_HAVE_MMAP)
3993 released += release_unused_segments(m);
3994
3995
3996 if (released == 0 && m->topsize > m->trim_check)
3997 m->trim_check = MAX_SIZE_T;
3998 }
3999
4000 return (released != 0)? 1 : 0;
4001 }
4002
4003
4004
4005
4006 static void* tmalloc_large(mstate m, size_t nb) {
4007 tchunkptr v = 0;
4008 size_t rsize = -nb;
4009 tchunkptr t;
4010 bindex_t idx;
4011 compute_tree_index(nb, idx);
4012
4013 if ((t = *treebin_at(m, idx)) != 0) {
4014
4015 size_t sizebits = nb << leftshift_for_tree_index(idx);
4016 tchunkptr rst = 0;
4017 for (;;) {
4018 tchunkptr rt;
4019 size_t trem = chunksize(t) - nb;
4020 if (trem < rsize) {
4021 v = t;
4022 if ((rsize = trem) == 0)
4023 break;
4024 }
4025 rt = t->child[1];
4026 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
4027 if (rt != 0 && rt != t)
4028 rst = rt;
4029 if (t == 0) {
4030 t = rst;
4031 break;
4032 }
4033 sizebits <<= 1;
4034 }
4035 }
4036
4037 if (t == 0 && v == 0) {
4038 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
4039 if (leftbits != 0) {
4040 bindex_t i;
4041 binmap_t leastbit = least_bit(leftbits);
4042 compute_bit2idx(leastbit, i);
4043 t = *treebin_at(m, i);
4044 }
4045 }
4046
4047 while (t != 0) {
4048 size_t trem = chunksize(t) - nb;
4049 if (trem < rsize) {
4050 rsize = trem;
4051 v = t;
4052 }
4053 t = leftmost_child(t);
4054 }
4055
4056
4057 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
4058 if (RTCHECK(ok_address(m, v))) {
4059 mchunkptr r = chunk_plus_offset(v, nb);
4060 dl_assert(chunksize(v) == rsize + nb);
4061 if (RTCHECK(ok_next(v, r))) {
4062 unlink_large_chunk(m, v);
4063 if (rsize < MIN_CHUNK_SIZE)
4064 set_inuse_and_pinuse(m, v, (rsize + nb));
4065 else {
4066 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4067 set_size_and_pinuse_of_free_chunk(r, rsize);
4068 insert_chunk(m, r, rsize);
4069 }
4070 return chunk2mem(v);
4071 }
4072 }
4073 CORRUPTION_ERROR_ACTION(m);
4074 }
4075 return 0;
4076 }
4077
4078
4079 static void* tmalloc_small(mstate m, size_t nb) {
4080 tchunkptr t, v;
4081 size_t rsize;
4082 bindex_t i;
4083 binmap_t leastbit = least_bit(m->treemap);
4084 compute_bit2idx(leastbit, i);
4085
4086 v = t = *treebin_at(m, i);
4087 rsize = chunksize(t) - nb;
4088
4089 while ((t = leftmost_child(t)) != 0) {
4090 size_t trem = chunksize(t) - nb;
4091 if (trem < rsize) {
4092 rsize = trem;
4093 v = t;
4094 }
4095 }
4096
4097 if (RTCHECK(ok_address(m, v))) {
4098 mchunkptr r = chunk_plus_offset(v, nb);
4099 dl_assert(chunksize(v) == rsize + nb);
4100 if (RTCHECK(ok_next(v, r))) {
4101 unlink_large_chunk(m, v);
4102 if (rsize < MIN_CHUNK_SIZE)
4103 set_inuse_and_pinuse(m, v, (rsize + nb));
4104 else {
4105 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
4106 set_size_and_pinuse_of_free_chunk(r, rsize);
4107 replace_dv(m, r, rsize);
4108 }
4109 return chunk2mem(v);
4110 }
4111 }
4112
4113 CORRUPTION_ERROR_ACTION(m);
4114 return 0;
4115 }
4116
4117
4118
4119 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
4120 if (bytes >= MAX_REQUEST) {
4121 MALLOC_FAILURE_ACTION;
4122 return 0;
4123 }
4124 if (!PREACTION(m)) {
4125 mchunkptr oldp = mem2chunk(oldmem);
4126 size_t oldsize = chunksize(oldp);
4127 mchunkptr next = chunk_plus_offset(oldp, oldsize);
4128 mchunkptr newp = 0;
4129 void* extra = 0;
4130
4131
4132
4133 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
4134 ok_next(oldp, next) && ok_pinuse(next))) {
4135 size_t nb = request2size(bytes);
4136 if (is_mmapped(oldp))
4137 newp = mmap_resize(m, oldp, nb);
4138 else if (oldsize >= nb) {
4139 size_t rsize = oldsize - nb;
4140 newp = oldp;
4141 if (rsize >= MIN_CHUNK_SIZE) {
4142 mchunkptr remainder = chunk_plus_offset(newp, nb);
4143 set_inuse(m, newp, nb);
4144 set_inuse(m, remainder, rsize);
4145 extra = chunk2mem(remainder);
4146 }
4147 }
4148 else if (next == m->top && oldsize + m->topsize > nb) {
4149
4150 size_t newsize = oldsize + m->topsize;
4151 size_t newtopsize = newsize - nb;
4152 mchunkptr newtop = chunk_plus_offset(oldp, nb);
4153 set_inuse(m, oldp, nb);
4154 newtop->head = newtopsize |PINUSE_BIT;
4155 m->top = newtop;
4156 m->topsize = newtopsize;
4157 newp = oldp;
4158 }
4159 }
4160 else {
4161 USAGE_ERROR_ACTION(m, oldmem);
4162 POSTACTION(m);
4163 return 0;
4164 }
4165
4166 POSTACTION(m);
4167
4168 if (newp != 0) {
4169 if (extra != 0) {
4170 internal_free(m, extra);
4171 }
4172 check_inuse_chunk(m, newp);
4173 return chunk2mem(newp);
4174 }
4175 else {
4176 void* newmem = internal_malloc(m, bytes);
4177 if (newmem != 0) {
4178 size_t oc = oldsize - overhead_for(oldp);
4179 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
4180 internal_free(m, oldmem);
4181 }
4182 return newmem;
4183 }
4184 }
4185 return 0;
4186 }
4187
4188
4189
4190 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
4191 if (alignment <= MALLOC_ALIGNMENT)
4192 return internal_malloc(m, bytes);
4193 if (alignment < MIN_CHUNK_SIZE)
4194 alignment = MIN_CHUNK_SIZE;
4195 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {
4196 size_t a = MALLOC_ALIGNMENT << 1;
4197 while (a < alignment) a <<= 1;
4198 alignment = a;
4199 }
4200
4201 if (bytes >= MAX_REQUEST - alignment) {
4202 if (m != 0) {
4203 MALLOC_FAILURE_ACTION;
4204 }
4205 }
4206 else {
4207 size_t nb = request2size(bytes);
4208 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4209 char* mem = (char*)internal_malloc(m, req);
4210 if (mem != 0) {
4211 void* leader = 0;
4212 void* trailer = 0;
4213 mchunkptr p = mem2chunk(mem);
4214
4215 if (PREACTION(m)) return 0;
4216 if ((((size_t)(mem)) % alignment) != 0) {
4217
4218
4219
4220
4221
4222
4223
4224
4225 char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
4226 alignment -
4227 SIZE_T_ONE)) &
4228 -alignment));
4229 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
4230 br : br+alignment;
4231 mchunkptr newp = (mchunkptr)pos;
4232 size_t leadsize = pos - (char*)(p);
4233 size_t newsize = chunksize(p) - leadsize;
4234
4235 if (is_mmapped(p)) {
4236 newp->prev_foot = p->prev_foot + leadsize;
4237 newp->head = (newsize|CINUSE_BIT);
4238 }
4239 else {
4240 set_inuse(m, newp, newsize);
4241 set_inuse(m, p, leadsize);
4242 leader = chunk2mem(p);
4243 }
4244 p = newp;
4245 }
4246
4247
4248 if (!is_mmapped(p)) {
4249 size_t size = chunksize(p);
4250 if (size > nb + MIN_CHUNK_SIZE) {
4251 size_t remainder_size = size - nb;
4252 mchunkptr remainder = chunk_plus_offset(p, nb);
4253 set_inuse(m, p, nb);
4254 set_inuse(m, remainder, remainder_size);
4255 trailer = chunk2mem(remainder);
4256 }
4257 }
4258
4259 assert (chunksize(p) >= nb);
4260 dl_assert((((size_t)(chunk2mem(p))) % alignment) == 0);
4261 check_inuse_chunk(m, p);
4262 POSTACTION(m);
4263 if (leader != 0) {
4264 internal_free(m, leader);
4265 }
4266 if (trailer != 0) {
4267 internal_free(m, trailer);
4268 }
4269 return chunk2mem(p);
4270 }
4271 }
4272 return 0;
4273 }
4274
4275
4276
4277 static void** ialloc(mstate m,
4278 size_t n_elements,
4279 size_t* sizes,
4280 int opts,
4281 void* chunks[]) {
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291 size_t element_size;
4292 size_t contents_size;
4293 size_t array_size;
4294 void* mem;
4295 mchunkptr p;
4296 size_t remainder_size;
4297 void** marray;
4298 mchunkptr array_chunk;
4299 flag_t was_enabled;
4300 size_t size;
4301 size_t i;
4302
4303
4304 if (chunks != 0) {
4305 if (n_elements == 0)
4306 return chunks;
4307 marray = chunks;
4308 array_size = 0;
4309 }
4310 else {
4311
4312 if (n_elements == 0)
4313 return (void**)internal_malloc(m, 0);
4314 marray = 0;
4315 array_size = request2size(n_elements * (sizeof(void*)));
4316 }
4317
4318
4319 if (opts & 0x1) {
4320 element_size = request2size(*sizes);
4321 contents_size = n_elements * element_size;
4322 }
4323 else {
4324 element_size = 0;
4325 contents_size = 0;
4326 for (i = 0; i != n_elements; ++i)
4327 contents_size += request2size(sizes[i]);
4328 }
4329
4330 size = contents_size + array_size;
4331
4332
4333
4334
4335
4336
4337 was_enabled = use_mmap(m);
4338 disable_mmap(m);
4339 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
4340 if (was_enabled)
4341 enable_mmap(m);
4342 if (mem == 0)
4343 return 0;
4344
4345 if (PREACTION(m)) return 0;
4346 p = mem2chunk(mem);
4347 remainder_size = chunksize(p);
4348
4349 dl_assert(!is_mmapped(p));
4350
4351 if (opts & 0x2) {
4352 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4353 }
4354
4355
4356 if (marray == 0) {
4357 size_t array_chunk_size;
4358 array_chunk = chunk_plus_offset(p, contents_size);
4359 array_chunk_size = remainder_size - contents_size;
4360 marray = (void**) (chunk2mem(array_chunk));
4361 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4362 remainder_size = contents_size;
4363 }
4364
4365
4366 for (i = 0; ; ++i) {
4367 marray[i] = chunk2mem(p);
4368 if (i != n_elements-1) {
4369 if (element_size != 0)
4370 size = element_size;
4371 else
4372 size = request2size(sizes[i]);
4373 remainder_size -= size;
4374 set_size_and_pinuse_of_inuse_chunk(m, p, size);
4375 p = chunk_plus_offset(p, size);
4376 }
4377 else {
4378 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4379 break;
4380 }
4381 }
4382
4383 #if DL_DEBUG
4384 if (marray != chunks) {
4385
4386 if (element_size != 0) {
4387 dl_assert(remainder_size == element_size);
4388 }
4389 else {
4390 dl_assert(remainder_size == request2size(sizes[i]));
4391 }
4392 check_inuse_chunk(m, mem2chunk(marray));
4393 }
4394 for (i = 0; i != n_elements; ++i)
4395 check_inuse_chunk(m, mem2chunk(marray[i]));
4396
4397 #endif
4398
4399 POSTACTION(m);
4400 return marray;
4401 }
4402
4403
4404
4405
4406 #if !ONLY_MSPACES
4407
4408 void* dlmalloc(size_t bytes) {
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432 if (!PREACTION(gm)) {
4433 void* mem;
4434 size_t nb;
4435 if (bytes <= MAX_SMALL_REQUEST) {
4436 bindex_t idx;
4437 binmap_t smallbits;
4438 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4439 idx = small_index(nb);
4440 smallbits = gm->smallmap >> idx;
4441
4442 if ((smallbits & 0x3U) != 0) {
4443 mchunkptr b, p;
4444 idx += ~smallbits & 1;
4445 b = smallbin_at(gm, idx);
4446 p = b->fd;
4447 dl_assert(chunksize(p) == small_index2size(idx));
4448 unlink_first_small_chunk(gm, b, p, idx);
4449 set_inuse_and_pinuse(gm, p, small_index2size(idx));
4450 mem = chunk2mem(p);
4451 check_malloced_chunk(gm, mem, nb);
4452 goto postaction;
4453 }
4454
4455 else if (nb > gm->dvsize) {
4456 if (smallbits != 0) {
4457 mchunkptr b, p, r;
4458 size_t rsize;
4459 bindex_t i;
4460 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4461 binmap_t leastbit = least_bit(leftbits);
4462 compute_bit2idx(leastbit, i);
4463 b = smallbin_at(gm, i);
4464 p = b->fd;
4465 dl_assert(chunksize(p) == small_index2size(i));
4466 unlink_first_small_chunk(gm, b, p, i);
4467 rsize = small_index2size(i) - nb;
4468
4469 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4470 set_inuse_and_pinuse(gm, p, small_index2size(i));
4471 else {
4472 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4473 r = chunk_plus_offset(p, nb);
4474 set_size_and_pinuse_of_free_chunk(r, rsize);
4475 replace_dv(gm, r, rsize);
4476 }
4477 mem = chunk2mem(p);
4478 check_malloced_chunk(gm, mem, nb);
4479 goto postaction;
4480 }
4481
4482 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
4483 check_malloced_chunk(gm, mem, nb);
4484 goto postaction;
4485 }
4486 }
4487 }
4488 else if (bytes >= MAX_REQUEST)
4489 nb = MAX_SIZE_T;
4490 else {
4491 nb = pad_request(bytes);
4492 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4493 check_malloced_chunk(gm, mem, nb);
4494 goto postaction;
4495 }
4496 }
4497
4498 if (nb <= gm->dvsize) {
4499 size_t rsize = gm->dvsize - nb;
4500 mchunkptr p = gm->dv;
4501 if (rsize >= MIN_CHUNK_SIZE) {
4502 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4503 gm->dvsize = rsize;
4504 set_size_and_pinuse_of_free_chunk(r, rsize);
4505 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4506 }
4507 else {
4508 size_t dvs = gm->dvsize;
4509 gm->dvsize = 0;
4510 gm->dv = 0;
4511 set_inuse_and_pinuse(gm, p, dvs);
4512 }
4513 mem = chunk2mem(p);
4514 check_malloced_chunk(gm, mem, nb);
4515 goto postaction;
4516 }
4517
4518 else if (nb < gm->topsize) {
4519 size_t rsize = gm->topsize -= nb;
4520 mchunkptr p = gm->top;
4521 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4522 r->head = rsize | PINUSE_BIT;
4523 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4524 mem = chunk2mem(p);
4525 check_top_chunk(gm, gm->top);
4526 check_malloced_chunk(gm, mem, nb);
4527 goto postaction;
4528 }
4529
4530 mem = sys_alloc(gm, nb);
4531
4532 postaction:
4533 POSTACTION(gm);
4534 return mem;
4535 }
4536
4537 return 0;
4538 }
4539
4540 void dlfree(void* mem) {
4541
4542
4543
4544
4545
4546
4547 if (mem != 0) {
4548 mchunkptr p = mem2chunk(mem);
4549 #if FOOTERS
4550 mstate fm = get_mstate_for(p);
4551 if (!ok_magic(fm)) {
4552 USAGE_ERROR_ACTION(fm, p);
4553 return;
4554 }
4555 #else
4556 #define fm gm
4557 #endif
4558 if (!PREACTION(fm)) {
4559 check_inuse_chunk(fm, p);
4560 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4561 size_t psize = chunksize(p);
4562 mchunkptr next = chunk_plus_offset(p, psize);
4563 if (!pinuse(p)) {
4564 size_t prevsize = p->prev_foot;
4565 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4566 prevsize &= ~IS_MMAPPED_BIT;
4567 psize += prevsize + MMAP_FOOT_PAD;
4568 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4569 fm->footprint -= psize;
4570 goto postaction;
4571 }
4572 else {
4573 mchunkptr prev = chunk_minus_offset(p, prevsize);
4574 psize += prevsize;
4575 p = prev;
4576 if (RTCHECK(ok_address(fm, prev))) {
4577 if (p != fm->dv) {
4578 unlink_chunk(fm, p, prevsize);
4579 }
4580 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4581 fm->dvsize = psize;
4582 set_free_with_pinuse(p, psize, next);
4583 goto postaction;
4584 }
4585 }
4586 else
4587 goto erroraction;
4588 }
4589 }
4590
4591 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4592 if (!cinuse(next)) {
4593 if (next == fm->top) {
4594 size_t tsize = fm->topsize += psize;
4595 fm->top = p;
4596 p->head = tsize | PINUSE_BIT;
4597 if (p == fm->dv) {
4598 fm->dv = 0;
4599 fm->dvsize = 0;
4600 }
4601 if (should_trim(fm, tsize))
4602 sys_trim(fm, 0);
4603 goto postaction;
4604 }
4605 else if (next == fm->dv) {
4606 size_t dsize = fm->dvsize += psize;
4607 fm->dv = p;
4608 set_size_and_pinuse_of_free_chunk(p, dsize);
4609 goto postaction;
4610 }
4611 else {
4612 size_t nsize = chunksize(next);
4613 psize += nsize;
4614 unlink_chunk(fm, next, nsize);
4615 set_size_and_pinuse_of_free_chunk(p, psize);
4616 if (p == fm->dv) {
4617 fm->dvsize = psize;
4618 goto postaction;
4619 }
4620 }
4621 }
4622 else
4623 set_free_with_pinuse(p, psize, next);
4624
4625 if (is_small(psize)) {
4626 insert_small_chunk(fm, p, psize);
4627 check_free_chunk(fm, p);
4628 }
4629 else {
4630 tchunkptr tp = (tchunkptr)p;
4631 insert_large_chunk(fm, tp, psize);
4632 check_free_chunk(fm, p);
4633 if (--fm->release_checks == 0)
4634 release_unused_segments(fm);
4635 }
4636 goto postaction;
4637 }
4638 }
4639 erroraction:
4640 USAGE_ERROR_ACTION(fm, p);
4641 postaction:
4642 POSTACTION(fm);
4643 }
4644 }
4645 #if !FOOTERS
4646 #undef fm
4647 #endif
4648 }
4649
4650 void* dlcalloc(size_t n_elements, size_t elem_size) {
4651 void* mem;
4652 size_t req = 0;
4653 if (n_elements != 0) {
4654 req = n_elements * elem_size;
4655 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4656 (req / n_elements != elem_size))
4657 req = MAX_SIZE_T;
4658 }
4659 mem = dlmalloc(req);
4660 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4661 memset(mem, 0, req);
4662 return mem;
4663 }
4664
4665 void* dlrealloc(void* oldmem, size_t bytes) {
4666
4667 if (oldmem == 0)
4668 return dlmalloc(bytes);
4669 #ifdef REALLOC_ZERO_BYTES_FREES
4670 if (bytes == 0) {
4671 dlfree(oldmem);
4672 return 0;
4673 }
4674 #endif
4675 else {
4676 #if ! FOOTERS
4677 mstate m = gm;
4678 #else
4679
4680 mstate m = get_mstate_for(mem2chunk(oldmem));
4681
4682
4683
4684
4685 if (!ok_magic(m)) {
4686
4687 USAGE_ERROR_ACTION(m, oldmem);
4688 return 0;
4689 }
4690
4691 #endif
4692
4693 return internal_realloc(m, oldmem, bytes);
4694 }
4695 }
4696
4697 void* dlmemalign(size_t alignment, size_t bytes) {
4698 return internal_memalign(gm, alignment, bytes);
4699 }
4700
4701 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
4702 void* chunks[]) {
4703 size_t sz = elem_size;
4704 return ialloc(gm, n_elements, &sz, 3, chunks);
4705 }
4706
4707 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
4708 void* chunks[]) {
4709 return ialloc(gm, n_elements, sizes, 0, chunks);
4710 }
4711
4712 void* dlvalloc(size_t bytes) {
4713 size_t pagesz;
4714 init_mparams();
4715 pagesz = mparams.page_size;
4716 return dlmemalign(pagesz, bytes);
4717 }
4718
4719 void* dlpvalloc(size_t bytes) {
4720 size_t pagesz;
4721 init_mparams();
4722 pagesz = mparams.page_size;
4723 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4724 }
4725
4726 int dlmalloc_trim(size_t pad) {
4727 int result = 0;
4728 if (!PREACTION(gm)) {
4729 result = sys_trim(gm, pad);
4730 POSTACTION(gm);
4731 }
4732 return result;
4733 }
4734
4735 size_t dlmalloc_footprint(void) {
4736 return gm->footprint;
4737 }
4738
4739 size_t dlmalloc_max_footprint(void) {
4740 return gm->max_footprint;
4741 }
4742
4743 #if !NO_MALLINFO
4744 struct mallinfo dlmallinfo(void) {
4745 return internal_mallinfo(gm);
4746 }
4747 #endif
4748
4749 void dlmalloc_stats() {
4750 internal_malloc_stats(gm);
4751 }
4752
4753 size_t dlmalloc_usable_size(void* mem) {
4754 if (mem != 0) {
4755 mchunkptr p = mem2chunk(mem);
4756 if (cinuse(p))
4757 return chunksize(p) - overhead_for(p);
4758 }
4759 return 0;
4760 }
4761
4762 int dlmallopt(int param_number, int value) {
4763 return change_mparam(param_number, value);
4764 }
4765
4766 #endif
4767
4768
4769
4770 #if MSPACES
4771
4772 static mstate init_user_mstate(char* tbase, size_t tsize) {
4773 size_t msize = pad_request(sizeof(struct malloc_state));
4774 mchunkptr mn;
4775 mchunkptr msp = align_as_chunk(tbase);
4776 mstate m = (mstate)(chunk2mem(msp));
4777 memset(m, 0, msize);
4778 INITIAL_LOCK(&m->mutex);
4779 msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
4780 m->seg.base = m->least_addr = tbase;
4781 m->seg.size = m->footprint = m->max_footprint = tsize;
4782 m->magic = mparams.magic;
4783 m->release_checks = MAX_RELEASE_CHECK_RATE;
4784 m->mflags = mparams.default_mflags;
4785 m->extp = 0;
4786 m->exts = 0;
4787 disable_contiguous(m);
4788 init_bins(m);
4789 mn = next_chunk(mem2chunk(m));
4790 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
4791 check_top_chunk(m, m->top);
4792 return m;
4793 }
4794
4795 mspace create_mspace(size_t capacity, int locked) {
4796 mstate m = 0;
4797 size_t msize = pad_request(sizeof(struct malloc_state));
4798 init_mparams();
4799
4800 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4801 size_t rs = ((capacity == 0)? mparams.granularity :
4802 (capacity + TOP_FOOT_SIZE + msize));
4803 size_t tsize = granularity_align(rs);
4804 char* tbase = (char*)(CALL_MMAP(tsize));
4805 if (tbase != CMFAIL) {
4806 m = init_user_mstate(tbase, tsize);
4807 m->seg.sflags = IS_MMAPPED_BIT;
4808 set_lock(m, locked);
4809 }
4810 }
4811 return (mspace)m;
4812 }
4813
4814 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
4815 mstate m = 0;
4816 size_t msize = pad_request(sizeof(struct malloc_state));
4817 init_mparams();
4818
4819 if (capacity > msize + TOP_FOOT_SIZE &&
4820 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4821 m = init_user_mstate((char*)base, capacity);
4822 m->seg.sflags = EXTERN_BIT;
4823 set_lock(m, locked);
4824 }
4825 return (mspace)m;
4826 }
4827
4828 size_t destroy_mspace(mspace msp) {
4829 size_t freed = 0;
4830 mstate ms = (mstate)msp;
4831 if (ok_magic(ms)) {
4832 msegmentptr sp = &ms->seg;
4833 while (sp != 0) {
4834 char* base = sp->base;
4835 size_t size = sp->size;
4836 flag_t flag = sp->sflags;
4837 sp = sp->next;
4838 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
4839 CALL_MUNMAP(base, size) == 0)
4840 freed += size;
4841 }
4842 }
4843 else {
4844 USAGE_ERROR_ACTION(ms,ms);
4845 }
4846 return freed;
4847 }
4848
4849
4850
4851
4852
4853
4854
4855 void* mspace_malloc(mspace msp, size_t bytes) {
4856 mstate ms = (mstate)msp;
4857 if (!ok_magic(ms)) {
4858 USAGE_ERROR_ACTION(ms,ms);
4859 return 0;
4860 }
4861 if (!PREACTION(ms)) {
4862 void* mem;
4863 size_t nb;
4864 if (bytes <= MAX_SMALL_REQUEST) {
4865 bindex_t idx;
4866 binmap_t smallbits;
4867 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4868 idx = small_index(nb);
4869 smallbits = ms->smallmap >> idx;
4870
4871 if ((smallbits & 0x3U) != 0) {
4872 mchunkptr b, p;
4873 idx += ~smallbits & 1;
4874 b = smallbin_at(ms, idx);
4875 p = b->fd;
4876 dl_assert(chunksize(p) == small_index2size(idx));
4877 unlink_first_small_chunk(ms, b, p, idx);
4878 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4879 mem = chunk2mem(p);
4880 check_malloced_chunk(ms, mem, nb);
4881 goto postaction;
4882 }
4883
4884 else if (nb > ms->dvsize) {
4885 if (smallbits != 0) {
4886 mchunkptr b, p, r;
4887 size_t rsize;
4888 bindex_t i;
4889 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4890 binmap_t leastbit = least_bit(leftbits);
4891 compute_bit2idx(leastbit, i);
4892 b = smallbin_at(ms, i);
4893 p = b->fd;
4894 dl_assert(chunksize(p) == small_index2size(i));
4895 unlink_first_small_chunk(ms, b, p, i);
4896 rsize = small_index2size(i) - nb;
4897
4898 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4899 set_inuse_and_pinuse(ms, p, small_index2size(i));
4900 else {
4901 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4902 r = chunk_plus_offset(p, nb);
4903 set_size_and_pinuse_of_free_chunk(r, rsize);
4904 replace_dv(ms, r, rsize);
4905 }
4906 mem = chunk2mem(p);
4907 check_malloced_chunk(ms, mem, nb);
4908 goto postaction;
4909 }
4910
4911 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4912 check_malloced_chunk(ms, mem, nb);
4913 goto postaction;
4914 }
4915 }
4916 }
4917 else if (bytes >= MAX_REQUEST)
4918 nb = MAX_SIZE_T;
4919 else {
4920 nb = pad_request(bytes);
4921 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4922 check_malloced_chunk(ms, mem, nb);
4923 goto postaction;
4924 }
4925 }
4926
4927 if (nb <= ms->dvsize) {
4928 size_t rsize = ms->dvsize - nb;
4929 mchunkptr p = ms->dv;
4930 if (rsize >= MIN_CHUNK_SIZE) {
4931 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4932 ms->dvsize = rsize;
4933 set_size_and_pinuse_of_free_chunk(r, rsize);
4934 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4935 }
4936 else {
4937 size_t dvs = ms->dvsize;
4938 ms->dvsize = 0;
4939 ms->dv = 0;
4940 set_inuse_and_pinuse(ms, p, dvs);
4941 }
4942 mem = chunk2mem(p);
4943 check_malloced_chunk(ms, mem, nb);
4944 goto postaction;
4945 }
4946
4947 else if (nb < ms->topsize) {
4948 size_t rsize = ms->topsize -= nb;
4949 mchunkptr p = ms->top;
4950 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4951 r->head = rsize | PINUSE_BIT;
4952 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4953 mem = chunk2mem(p);
4954 check_top_chunk(ms, ms->top);
4955 check_malloced_chunk(ms, mem, nb);
4956 goto postaction;
4957 }
4958
4959 mem = sys_alloc(ms, nb);
4960
4961 postaction:
4962 POSTACTION(ms);
4963 return mem;
4964 }
4965
4966 return 0;
4967 }
4968
4969 void mspace_free(mspace msp, void* mem) {
4970 if (mem != 0) {
4971 mchunkptr p = mem2chunk(mem);
4972 #if FOOTERS
4973 mstate fm = get_mstate_for(p);
4974 #else
4975 mstate fm = (mstate)msp;
4976 #endif
4977 if (!ok_magic(fm)) {
4978 USAGE_ERROR_ACTION(fm, p);
4979 return;
4980 }
4981 if (!PREACTION(fm)) {
4982 check_inuse_chunk(fm, p);
4983 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4984 size_t psize = chunksize(p);
4985 mchunkptr next = chunk_plus_offset(p, psize);
4986 if (!pinuse(p)) {
4987 size_t prevsize = p->prev_foot;
4988 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4989 prevsize &= ~IS_MMAPPED_BIT;
4990 psize += prevsize + MMAP_FOOT_PAD;
4991 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4992 fm->footprint -= psize;
4993 goto postaction;
4994 }
4995 else {
4996 mchunkptr prev = chunk_minus_offset(p, prevsize);
4997 psize += prevsize;
4998 p = prev;
4999 if (RTCHECK(ok_address(fm, prev))) {
5000 if (p != fm->dv) {
5001 unlink_chunk(fm, p, prevsize);
5002 }
5003 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
5004 fm->dvsize = psize;
5005 set_free_with_pinuse(p, psize, next);
5006 goto postaction;
5007 }
5008 }
5009 else
5010 goto erroraction;
5011 }
5012 }
5013
5014 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
5015 if (!cinuse(next)) {
5016 if (next == fm->top) {
5017 size_t tsize = fm->topsize += psize;
5018 fm->top = p;
5019 p->head = tsize | PINUSE_BIT;
5020 if (p == fm->dv) {
5021 fm->dv = 0;
5022 fm->dvsize = 0;
5023 }
5024 if (should_trim(fm, tsize))
5025 sys_trim(fm, 0);
5026 goto postaction;
5027 }
5028 else if (next == fm->dv) {
5029 size_t dsize = fm->dvsize += psize;
5030 fm->dv = p;
5031 set_size_and_pinuse_of_free_chunk(p, dsize);
5032 goto postaction;
5033 }
5034 else {
5035 size_t nsize = chunksize(next);
5036 psize += nsize;
5037 unlink_chunk(fm, next, nsize);
5038 set_size_and_pinuse_of_free_chunk(p, psize);
5039 if (p == fm->dv) {
5040 fm->dvsize = psize;
5041 goto postaction;
5042 }
5043 }
5044 }
5045 else
5046 set_free_with_pinuse(p, psize, next);
5047
5048 if (is_small(psize)) {
5049 insert_small_chunk(fm, p, psize);
5050 check_free_chunk(fm, p);
5051 }
5052 else {
5053 tchunkptr tp = (tchunkptr)p;
5054 insert_large_chunk(fm, tp, psize);
5055 check_free_chunk(fm, p);
5056 if (--fm->release_checks == 0)
5057 release_unused_segments(fm);
5058 }
5059 goto postaction;
5060 }
5061 }
5062 erroraction:
5063 USAGE_ERROR_ACTION(fm, p);
5064 postaction:
5065 POSTACTION(fm);
5066 }
5067 }
5068 }
5069
5070 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
5071 void* mem;
5072 size_t req = 0;
5073 mstate ms = (mstate)msp;
5074 if (!ok_magic(ms)) {
5075 USAGE_ERROR_ACTION(ms,ms);
5076 return 0;
5077 }
5078 if (n_elements != 0) {
5079 req = n_elements * elem_size;
5080 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
5081 (req / n_elements != elem_size))
5082 req = MAX_SIZE_T;
5083 }
5084 mem = internal_malloc(ms, req);
5085 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
5086 memset(mem, 0, req);
5087 return mem;
5088 }
5089
5090 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
5091 if (oldmem == 0)
5092 return mspace_malloc(msp, bytes);
5093 #ifdef REALLOC_ZERO_BYTES_FREES
5094 if (bytes == 0) {
5095 mspace_free(msp, oldmem);
5096 return 0;
5097 }
5098 #endif
5099 else {
5100 #if FOOTERS
5101 mchunkptr p = mem2chunk(oldmem);
5102 mstate ms = get_mstate_for(p);
5103 #else
5104 mstate ms = (mstate)msp;
5105 #endif
5106 if (!ok_magic(ms)) {
5107 USAGE_ERROR_ACTION(ms,ms);
5108 return 0;
5109 }
5110 return internal_realloc(ms, oldmem, bytes);
5111 }
5112 }
5113
5114 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
5115 mstate ms = (mstate)msp;
5116 if (!ok_magic(ms)) {
5117 USAGE_ERROR_ACTION(ms,ms);
5118 return 0;
5119 }
5120 return internal_memalign(ms, alignment, bytes);
5121 }
5122
5123 void** mspace_independent_calloc(mspace msp, size_t n_elements,
5124 size_t elem_size, void* chunks[]) {
5125 size_t sz = elem_size;
5126 mstate ms = (mstate)msp;
5127 if (!ok_magic(ms)) {
5128 USAGE_ERROR_ACTION(ms,ms);
5129 return 0;
5130 }
5131 return ialloc(ms, n_elements, &sz, 3, chunks);
5132 }
5133
5134 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
5135 size_t sizes[], void* chunks[]) {
5136 mstate ms = (mstate)msp;
5137 if (!ok_magic(ms)) {
5138 USAGE_ERROR_ACTION(ms,ms);
5139 return 0;
5140 }
5141 return ialloc(ms, n_elements, sizes, 0, chunks);
5142 }
5143
5144 int mspace_trim(mspace msp, size_t pad) {
5145 int result = 0;
5146 mstate ms = (mstate)msp;
5147 if (ok_magic(ms)) {
5148 if (!PREACTION(ms)) {
5149 result = sys_trim(ms, pad);
5150 POSTACTION(ms);
5151 }
5152 }
5153 else {
5154 USAGE_ERROR_ACTION(ms,ms);
5155 }
5156 return result;
5157 }
5158
5159 void mspace_malloc_stats(mspace msp) {
5160 mstate ms = (mstate)msp;
5161 if (ok_magic(ms)) {
5162 internal_malloc_stats(ms);
5163 }
5164 else {
5165 USAGE_ERROR_ACTION(ms,ms);
5166 }
5167 }
5168
5169 size_t mspace_footprint(mspace msp) {
5170 size_t result = 0;
5171 mstate ms = (mstate)msp;
5172 if (ok_magic(ms)) {
5173 result = ms->footprint;
5174 }
5175 else {
5176 USAGE_ERROR_ACTION(ms,ms);
5177 }
5178 return result;
5179 }
5180
5181
5182 size_t mspace_max_footprint(mspace msp) {
5183 size_t result = 0;
5184 mstate ms = (mstate)msp;
5185 if (ok_magic(ms)) {
5186 result = ms->max_footprint;
5187 }
5188 else {
5189 USAGE_ERROR_ACTION(ms,ms);
5190 }
5191 return result;
5192 }
5193
5194
5195 #if !NO_MALLINFO
5196 struct mallinfo mspace_mallinfo(mspace msp) {
5197 mstate ms = (mstate)msp;
5198 if (!ok_magic(ms)) {
5199 USAGE_ERROR_ACTION(ms,ms);
5200 }
5201 return internal_mallinfo(ms);
5202 }
5203 #endif
5204
5205 size_t mspace_usable_size(void* mem) {
5206 if (mem != 0) {
5207 mchunkptr p = mem2chunk(mem);
5208 if (cinuse(p))
5209 return chunksize(p) - overhead_for(p);
5210 }
5211 return 0;
5212 }
5213
5214 int mspace_mallopt(int param_number, int value) {
5215 return change_mparam(param_number, value);
5216 }
5217
5218 #endif
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502