00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441 #include "dlmalloc-config.h"
00442
00443 #ifndef WIN32
00444 #ifdef _WIN32
00445 #define WIN32 1
00446 #endif
00447 #endif
00448 #ifdef WIN32
00449 #define WIN32_LEAN_AND_MEAN
00450 #include <windows.h>
00451 #define HAVE_MMAP 1
00452 #define HAVE_MORECORE 0
00453 #define LACKS_UNISTD_H
00454 #define LACKS_SYS_PARAM_H
00455 #define LACKS_SYS_MMAN_H
00456 #define LACKS_STRING_H
00457 #define LACKS_STRINGS_H
00458 #define LACKS_SYS_TYPES_H
00459 #define LACKS_ERRNO_H
00460 #define MALLOC_FAILURE_ACTION
00461 #define MMAP_CLEARS 0
00462 #endif
00463
00464 #if defined(DARWIN) || defined(_DARWIN)
00465
00466 #ifndef HAVE_MORECORE
00467 #define HAVE_MORECORE 0
00468 #define HAVE_MMAP 1
00469 #endif
00470 #endif
00471
00472 #ifndef LACKS_SYS_TYPES_H
00473 #include <sys/types.h>
00474 #endif
00475
00476
00477 #define MAX_SIZE_T (~(size_t)0)
00478
00479 #ifndef ONLY_MSPACES
00480 #define ONLY_MSPACES 0
00481 #endif
00482 #ifndef MSPACES
00483 #if ONLY_MSPACES
00484 #define MSPACES 1
00485 #else
00486 #define MSPACES 0
00487 #endif
00488 #endif
00489 #ifndef MALLOC_ALIGNMENT
00490 #define MALLOC_ALIGNMENT ((size_t)8U)
00491 #endif
00492 #ifndef FOOTERS
00493 #define FOOTERS 0
00494 #endif
00495 #ifndef ABORT
00496 #define ABORT abort()
00497 #endif
00498 #ifndef ABORT_ON_ASSERT_FAILURE
00499 #define ABORT_ON_ASSERT_FAILURE 1
00500 #endif
00501 #ifndef PROCEED_ON_ERROR
00502 #define PROCEED_ON_ERROR 0
00503 #endif
00504 #ifndef USE_LOCKS
00505 #define USE_LOCKS 0
00506 #endif
00507 #ifndef INSECURE
00508 #define INSECURE 0
00509 #endif
00510 #ifndef HAVE_MMAP
00511 #define HAVE_MMAP 1
00512 #endif
00513 #ifndef MMAP_CLEARS
00514 #define MMAP_CLEARS 1
00515 #endif
00516 #ifndef HAVE_MREMAP
00517 #ifdef linux
00518 #define HAVE_MREMAP 1
00519 #else
00520 #define HAVE_MREMAP 0
00521 #endif
00522 #endif
00523 #ifndef MALLOC_FAILURE_ACTION
00524 #define MALLOC_FAILURE_ACTION errno = ENOMEM;
00525 #endif
00526 #ifndef HAVE_MORECORE
00527 #if ONLY_MSPACES
00528 #define HAVE_MORECORE 0
00529 #else
00530 #define HAVE_MORECORE 1
00531 #endif
00532 #endif
00533 #if !HAVE_MORECORE
00534 #define MORECORE_CONTIGUOUS 0
00535 #else
00536 #ifndef MORECORE
00537 #define MORECORE sbrk
00538 #endif
00539 #ifndef MORECORE_CONTIGUOUS
00540 #define MORECORE_CONTIGUOUS 1
00541 #endif
00542 #endif
00543 #ifndef DEFAULT_GRANULARITY
00544 #if MORECORE_CONTIGUOUS
00545 #define DEFAULT_GRANULARITY (0)
00546 #else
00547 #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
00548 #endif
00549 #endif
00550 #ifndef DEFAULT_TRIM_THRESHOLD
00551 #ifndef MORECORE_CANNOT_TRIM
00552 #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
00553 #else
00554 #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
00555 #endif
00556 #endif
00557 #ifndef DEFAULT_MMAP_THRESHOLD
00558 #if HAVE_MMAP
00559 #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
00560 #else
00561 #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
00562 #endif
00563 #endif
00564 #ifndef USE_BUILTIN_FFS
00565 #define USE_BUILTIN_FFS 0
00566 #endif
00567 #ifndef USE_DEV_RANDOM
00568 #define USE_DEV_RANDOM 0
00569 #endif
00570 #ifndef NO_MALLINFO
00571 #define NO_MALLINFO 0
00572 #endif
00573 #ifndef MALLINFO_FIELD_TYPE
00574 #define MALLINFO_FIELD_TYPE size_t
00575 #endif
00576
00577
00578
00579
00580
00581
00582
00583
00584 #define M_TRIM_THRESHOLD (-1)
00585 #define M_GRANULARITY (-2)
00586 #define M_MMAP_THRESHOLD (-3)
00587
00588
00589
00590 #if !NO_MALLINFO
00591
00592
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612
00613
00614
00615 #ifdef HAVE_USR_INCLUDE_MALLOC_H
00616 #include "/usr/include/malloc.h"
00617 #else
00618
00619 struct mallinfo {
00620 MALLINFO_FIELD_TYPE arena;
00621 MALLINFO_FIELD_TYPE ordblks;
00622 MALLINFO_FIELD_TYPE smblks;
00623 MALLINFO_FIELD_TYPE hblks;
00624 MALLINFO_FIELD_TYPE hblkhd;
00625 MALLINFO_FIELD_TYPE usmblks;
00626 MALLINFO_FIELD_TYPE fsmblks;
00627 MALLINFO_FIELD_TYPE uordblks;
00628 MALLINFO_FIELD_TYPE fordblks;
00629 MALLINFO_FIELD_TYPE keepcost;
00630 };
00631
00632 #endif
00633 #endif
00634
00635 #ifdef __cplusplus
00636 extern "C" {
00637 #endif
00638
00639 #if !ONLY_MSPACES
00640
00641
00642
00643 #ifndef USE_DL_PREFIX
00644 #define dlcalloc calloc
00645 #define dlfree free
00646 #define dlmalloc malloc
00647 #define dlmemalign memalign
00648 #define dlrealloc realloc
00649 #define dlvalloc valloc
00650 #define dlpvalloc pvalloc
00651 #define dlmallinfo mallinfo
00652 #define dlmallopt mallopt
00653 #define dlmalloc_trim malloc_trim
00654 #define dlmalloc_stats malloc_stats
00655 #define dlmalloc_usable_size malloc_usable_size
00656 #define dlmalloc_footprint malloc_footprint
00657 #define dlmalloc_max_footprint malloc_max_footprint
00658 #define dlindependent_calloc independent_calloc
00659 #define dlindependent_comalloc independent_comalloc
00660 #endif
00661
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671
00672
00673
00674
00675
00676
00677 void* dlmalloc(size_t);
00678
00679
00680
00681
00682
00683
00684
00685
00686 void dlfree(void*);
00687
00688
00689
00690
00691
00692
00693 void* dlcalloc(size_t, size_t);
00694
00695
00696
00697
00698
00699
00700
00701
00702
00703
00704
00705
00706
00707
00708
00709
00710
00711
00712
00713
00714
00715
00716
00717
00718 void* dlrealloc(void*, size_t);
00719
00720
00721
00722
00723
00724
00725
00726
00727
00728
00729
00730
00731
00732 void* dlmemalign(size_t, size_t);
00733
00734
00735
00736
00737
00738
00739 void* dlvalloc(size_t);
00740
00741
00742
00743
00744
00745
00746
00747
00748
00749
00750
00751
00752
00753
00754
00755
00756
00757
00758
00759 int dlmallopt(int, int);
00760
00761
00762
00763
00764
00765
00766
00767
00768
00769
00770 size_t dlmalloc_footprint(void);
00771
00772
00773
00774
00775
00776
00777
00778
00779
00780
00781
00782
00783 size_t dlmalloc_max_footprint(void);
00784
00785 #if !NO_MALLINFO
00786
00787
00788
00789
00790
00791
00792
00793
00794
00795
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808 struct mallinfo dlmallinfo(void);
00809 #endif
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828
00829
00830
00831
00832
00833
00834
00835
00836
00837
00838
00839
00840
00841
00842
00843
00844
00845
00846
00847
00848
00849
00850
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860
00861
00862
00863 void** dlindependent_calloc(size_t, size_t, void**);
00864
00865
00866
00867
00868
00869
00870
00871
00872
00873
00874
00875
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885
00886
00887
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924 void** dlindependent_comalloc(size_t, size_t*, void**);
00925
00926
00927
00928
00929
00930
00931
00932 void* dlpvalloc(size_t);
00933
00934
00935
00936
00937
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953
00954
00955 int dlmalloc_trim(size_t);
00956
00957
00958
00959
00960
00961
00962
00963
00964
00965
00966
00967
00968
00969
00970
00971 size_t dlmalloc_usable_size(void*);
00972
00973
00974
00975
00976
00977
00978
00979
00980
00981
00982
00983
00984
00985
00986
00987
00988
00989
00990
00991
00992 void dlmalloc_stats(void);
00993
00994 #endif
00995
00996 #if MSPACES
00997
00998
00999
01000
01001
01002 typedef void* mspace;
01003
01004
01005
01006
01007
01008
01009
01010
01011
01012
01013
01014
01015 mspace create_mspace(size_t capacity, int locked);
01016
01017
01018
01019
01020
01021
01022
01023 size_t destroy_mspace(mspace msp);
01024
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
01035
01036
01037
01038
01039
01040 void* mspace_malloc(mspace msp, size_t bytes);
01041
01042
01043
01044
01045
01046
01047
01048
01049
01050 void mspace_free(mspace msp, void* mem);
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060
01061 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
01062
01063
01064
01065
01066
01067 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
01068
01069
01070
01071
01072
01073 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
01074
01075
01076
01077
01078
01079 void** mspace_independent_calloc(mspace msp, size_t n_elements,
01080 size_t elem_size, void* chunks[]);
01081
01082
01083
01084
01085
01086 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
01087 size_t sizes[], void* chunks[]);
01088
01089
01090
01091
01092
01093 size_t mspace_footprint(mspace msp);
01094
01095
01096
01097
01098
01099 size_t mspace_max_footprint(mspace msp);
01100
01101
01102 #if !NO_MALLINFO
01103
01104
01105
01106
01107 struct mallinfo mspace_mallinfo(mspace msp);
01108 #endif
01109
01110
01111
01112
01113
01114 void mspace_malloc_stats(mspace msp);
01115
01116
01117
01118
01119
01120 int mspace_trim(mspace msp, size_t pad);
01121
01122
01123
01124
01125 int mspace_mallopt(int, int);
01126
01127 #endif
01128
01129 #ifdef __cplusplus
01130 };
01131 #endif
01132
01133
01134
01135
01136
01137
01138
01139
01140
01141
01142
01143
01144
01145 #ifdef WIN32
01146 #pragma warning( disable : 4146 )
01147 #endif
01148
01149 #ifndef LACKS_STDIO_H
01150 #include <stdio.h>
01151 #endif
01152
01153 #ifndef LACKS_ERRNO_H
01154 #include <errno.h>
01155 #endif
01156 #if FOOTERS
01157 #include <time.h>
01158 #endif
01159 #ifndef LACKS_STDLIB_H
01160 #include <stdlib.h>
01161 #endif
01162 #ifdef DEBUG
01163 #if ABORT_ON_ASSERT_FAILURE
01164 #define assert(x) if(!(x)) ABORT
01165 #else
01166 #include <assert.h>
01167 #endif
01168 #else
01169 #define assert(x)
01170 #endif
01171 #ifndef LACKS_STRING_H
01172 #include <string.h>
01173 #endif
01174 #if USE_BUILTIN_FFS
01175 #ifndef LACKS_STRINGS_H
01176 #include <strings.h>
01177 #endif
01178 #endif
01179 #if HAVE_MMAP
01180 #ifndef LACKS_SYS_MMAN_H
01181 #include <sys/mman.h>
01182 #endif
01183 #ifndef LACKS_FCNTL_H
01184 #include <fcntl.h>
01185 #endif
01186 #endif
01187 #if HAVE_MORECORE
01188 #ifndef LACKS_UNISTD_H
01189 #include <unistd.h>
01190 #else
01191 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
01192 extern void* sbrk(ptrdiff_t);
01193 #endif
01194 #endif
01195 #endif
01196
01197 #ifndef WIN32
01198 #ifndef malloc_getpagesize
01199 # ifdef _SC_PAGESIZE
01200 # ifndef _SC_PAGE_SIZE
01201 # define _SC_PAGE_SIZE _SC_PAGESIZE
01202 # endif
01203 # endif
01204 # ifdef _SC_PAGE_SIZE
01205 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
01206 # else
01207 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
01208 extern size_t getpagesize();
01209 # define malloc_getpagesize getpagesize()
01210 # else
01211 # ifdef WIN32
01212 # define malloc_getpagesize getpagesize()
01213 # else
01214 # ifndef LACKS_SYS_PARAM_H
01215 # include <sys/param.h>
01216 # endif
01217 # ifdef EXEC_PAGESIZE
01218 # define malloc_getpagesize EXEC_PAGESIZE
01219 # else
01220 # ifdef NBPG
01221 # ifndef CLSIZE
01222 # define malloc_getpagesize NBPG
01223 # else
01224 # define malloc_getpagesize (NBPG * CLSIZE)
01225 # endif
01226 # else
01227 # ifdef NBPC
01228 # define malloc_getpagesize NBPC
01229 # else
01230 # ifdef PAGESIZE
01231 # define malloc_getpagesize PAGESIZE
01232 # else
01233 # define malloc_getpagesize ((size_t)4096U)
01234 # endif
01235 # endif
01236 # endif
01237 # endif
01238 # endif
01239 # endif
01240 # endif
01241 #endif
01242 #endif
01243
01244
01245
01246
01247 #define SIZE_T_SIZE (sizeof(size_t))
01248 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
01249
01250
01251
01252 #define SIZE_T_ZERO ((size_t)0)
01253 #define SIZE_T_ONE ((size_t)1)
01254 #define SIZE_T_TWO ((size_t)2)
01255 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
01256 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
01257 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
01258 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
01259
01260
01261 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
01262
01263
01264 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
01265
01266
01267 #define align_offset(A)\
01268 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
01269 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
01270
01271
01272
01273
01274
01275
01276
01277
01278
01279
01280
01281 #define MFAIL ((void*)(MAX_SIZE_T))
01282 #define CMFAIL ((char*)(MFAIL))
01283
01284 #if !HAVE_MMAP
01285 #define IS_MMAPPED_BIT (SIZE_T_ZERO)
01286 #define USE_MMAP_BIT (SIZE_T_ZERO)
01287 #define CALL_MMAP(s) MFAIL
01288 #define CALL_MUNMAP(a, s) (-1)
01289 #define DIRECT_MMAP(s) MFAIL
01290
01291 #else
01292 #define IS_MMAPPED_BIT (SIZE_T_ONE)
01293 #define USE_MMAP_BIT (SIZE_T_ONE)
01294
01295 #ifndef WIN32
01296 #define CALL_MUNMAP(a, s) munmap((a), (s))
01297 #define MMAP_PROT (PROT_READ|PROT_WRITE)
01298 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
01299 #define MAP_ANONYMOUS MAP_ANON
01300 #endif
01301 #ifdef MAP_ANONYMOUS
01302 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
01303 #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
01304 #else
01305
01306
01307
01308
01309 #define MMAP_FLAGS (MAP_PRIVATE)
01310 static int dev_zero_fd = -1;
01311 #define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
01312 (dev_zero_fd = open("/dev/zero", O_RDWR), \
01313 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
01314 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
01315 #endif
01316
01317 #define DIRECT_MMAP(s) CALL_MMAP(s)
01318 #else
01319
01320
01321 static void* win32mmap(size_t size) {
01322 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
01323 return (ptr != 0)? ptr: MFAIL;
01324 }
01325
01326
01327 static void* win32direct_mmap(size_t size) {
01328 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
01329 PAGE_READWRITE);
01330 return (ptr != 0)? ptr: MFAIL;
01331 }
01332
01333
01334 static int win32munmap(void* ptr, size_t size) {
01335 MEMORY_BASIC_INFORMATION minfo;
01336 char* cptr = ptr;
01337 while (size) {
01338 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
01339 return -1;
01340 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
01341 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
01342 return -1;
01343 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
01344 return -1;
01345 cptr += minfo.RegionSize;
01346 size -= minfo.RegionSize;
01347 }
01348 return 0;
01349 }
01350
01351 #define CALL_MMAP(s) win32mmap(s)
01352 #define CALL_MUNMAP(a, s) win32munmap((a), (s))
01353 #define DIRECT_MMAP(s) win32direct_mmap(s)
01354 #endif
01355 #endif
01356
01357 #if HAVE_MMAP && HAVE_MREMAP
01358 #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
01359 #else
01360 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
01361 #endif
01362
01363 #if HAVE_MORECORE
01364 #define CALL_MORECORE(S) MORECORE(S)
01365 #else
01366 #define CALL_MORECORE(S) MFAIL
01367 #endif
01368
01369
01370 #define USE_NONCONTIGUOUS_BIT (4U)
01371
01372
01373 #define EXTERN_BIT (8U)
01374
01375
01376
01377
01378 #if USE_LOCKS
01379
01380
01381
01382
01383
01384
01385
01386
01387
01388
01389
01390
01391
01392
01393
01394 #ifndef WIN32
01395
01396 #include <pthread.h>
01397 #define MLOCK_T pthread_mutex_t
01398 #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
01399 #define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
01400 #define RELEASE_LOCK(l) pthread_mutex_unlock(l)
01401
01402 #if HAVE_MORECORE
01403 static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
01404 #endif
01405
01406 static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
01407
01408 #else
01409
01410
01411
01412
01413
01414 #define MLOCK_T long
01415 static int win32_acquire_lock (MLOCK_T *sl) {
01416 for (;;) {
01417 #ifdef InterlockedCompareExchangePointer
01418 if (!InterlockedCompareExchange(sl, 1, 0))
01419 return 0;
01420 #else
01421 if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
01422 return 0;
01423 #endif
01424 Sleep (0);
01425 }
01426 }
01427
01428 static void win32_release_lock (MLOCK_T *sl) {
01429 InterlockedExchange (sl, 0);
01430 }
01431
01432 #define INITIAL_LOCK(l) *(l)=0
01433 #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
01434 #define RELEASE_LOCK(l) win32_release_lock(l)
01435 #if HAVE_MORECORE
01436 static MLOCK_T morecore_mutex;
01437 #endif
01438 static MLOCK_T magic_init_mutex;
01439 #endif
01440
01441 #define USE_LOCK_BIT (2U)
01442 #else
01443 #define USE_LOCK_BIT (0U)
01444 #define INITIAL_LOCK(l)
01445 #endif
01446
01447 #if USE_LOCKS && HAVE_MORECORE
01448 #define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
01449 #define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
01450 #else
01451 #define ACQUIRE_MORECORE_LOCK()
01452 #define RELEASE_MORECORE_LOCK()
01453 #endif
01454
01455 #if USE_LOCKS
01456 #define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
01457 #define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
01458 #else
01459 #define ACQUIRE_MAGIC_INIT_LOCK()
01460 #define RELEASE_MAGIC_INIT_LOCK()
01461 #endif
01462
01463
01464
01465
01466
01467
01468
01469
01470
01471
01472
01473
01474
01475
01476
01477
01478
01479
01480
01481
01482
01483
01484
01485
01486
01487
01488
01489
01490
01491
01492
01493
01494
01495
01496
01497
01498
01499
01500
01501
01502
01503
01504
01505
01506
01507
01508
01509
01510
01511
01512
01513
01514
01515
01516
01517
01518
01519
01520
01521
01522
01523
01524
01525
01526
01527
01528
01529
01530
01531
01532
01533
01534
01535
01536
01537
01538
01539
01540
01541
01542
01543
01544
01545
01546
01547
01548
01549
01550
01551
01552
01553
01554
01555
01556
01557
01558
01559
01560
01561
01562
01563
01564
01565
01566
01567
01568
01569
01570
01571
01572
01573
01574
01575
01576
01577
01578
01579
01580
01581
01582
01583
01584
01585
01586
01587
01588
01589
01590
01591
01592
01593
01594
01595
01596
01597
01598
01599
01600
01601 struct malloc_chunk {
01602 size_t prev_foot;
01603 size_t head;
01604 struct malloc_chunk* fd;
01605 struct malloc_chunk* bk;
01606 };
01607
01608 typedef struct malloc_chunk mchunk;
01609 typedef struct malloc_chunk* mchunkptr;
01610 typedef struct malloc_chunk* sbinptr;
01611 typedef unsigned int bindex_t;
01612 typedef unsigned int binmap_t;
01613 typedef unsigned int flag_t;
01614
01615
01616
01617 #define MCHUNK_SIZE (sizeof(mchunk))
01618
01619 #if FOOTERS
01620 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
01621 #else
01622 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
01623 #endif
01624
01625
01626 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
01627
01628 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
01629
01630
01631 #define MIN_CHUNK_SIZE\
01632 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
01633
01634
01635 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
01636 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
01637
01638 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
01639
01640
01641 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
01642 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
01643
01644
01645 #define pad_request(req) \
01646 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
01647
01648
01649 #define request2size(req) \
01650 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
01651
01652
01653
01654
01655
01656
01657
01658
01659
01660
01661
01662
01663 #define PINUSE_BIT (SIZE_T_ONE)
01664 #define CINUSE_BIT (SIZE_T_TWO)
01665 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
01666
01667
01668 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
01669
01670
01671 #define cinuse(p) ((p)->head & CINUSE_BIT)
01672 #define pinuse(p) ((p)->head & PINUSE_BIT)
01673 #define chunksize(p) ((p)->head & ~(INUSE_BITS))
01674
01675 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
01676 #define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
01677
01678
01679 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
01680 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
01681
01682
01683 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
01684 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
01685
01686
01687 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
01688
01689
01690 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
01691 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
01692
01693
01694 #define set_size_and_pinuse_of_free_chunk(p, s)\
01695 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
01696
01697
01698 #define set_free_with_pinuse(p, s, n)\
01699 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
01700
01701 #define is_mmapped(p)\
01702 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
01703
01704
01705 #define overhead_for(p)\
01706 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
01707
01708
01709 #if MMAP_CLEARS
01710 #define calloc_must_clear(p) (!is_mmapped(p))
01711 #else
01712 #define calloc_must_clear(p) (1)
01713 #endif
01714
01715
01716
01717
01718
01719
01720
01721
01722
01723
01724
01725
01726
01727
01728
01729
01730
01731
01732
01733
01734
01735
01736
01737
01738
01739
01740
01741
01742
01743
01744
01745
01746
01747
01748
01749
01750
01751
01752
01753
01754
01755
01756
01757
01758
01759
01760
01761
01762
01763
01764
01765
01766
01767
01768
01769
01770
01771
01772
01773
01774
01775
01776
01777
01778
01779
01780
01781
01782
01783
01784
01785
01786
01787
01788
01789
01790
01791
01792
01793
01794
01795
01796
01797
01798
01799
01800
01801
01802
01803
01804
01805
01806 struct malloc_tree_chunk {
01807
01808 size_t prev_foot;
01809 size_t head;
01810 struct malloc_tree_chunk* fd;
01811 struct malloc_tree_chunk* bk;
01812
01813 struct malloc_tree_chunk* child[2];
01814 struct malloc_tree_chunk* parent;
01815 bindex_t index;
01816 };
01817
01818 typedef struct malloc_tree_chunk tchunk;
01819 typedef struct malloc_tree_chunk* tchunkptr;
01820 typedef struct malloc_tree_chunk* tbinptr;
01821
01822
01823 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
01824
01825
01826
01827
01828
01829
01830
01831
01832
01833
01834
01835
01836
01837
01838
01839
01840
01841
01842
01843
01844
01845
01846
01847
01848
01849
01850
01851
01852
01853
01854
01855
01856
01857
01858
01859
01860
01861
01862
01863
01864
01865
01866
01867
01868
01869
01870
01871
01872
01873
01874
01875
01876
01877
01878
01879
01880
01881
01882 struct malloc_segment {
01883 char* base;
01884 size_t size;
01885 struct malloc_segment* next;
01886 flag_t sflags;
01887 };
01888
01889 #define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
01890 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
01891
01892 typedef struct malloc_segment msegment;
01893 typedef struct malloc_segment* msegmentptr;
01894
01895
01896
01897
01898
01899
01900
01901
01902
01903
01904
01905
01906
01907
01908
01909
01910
01911
01912
01913
01914
01915
01916
01917
01918
01919
01920
01921
01922
01923
01924
01925
01926
01927
01928
01929
01930
01931
01932
01933
01934
01935
01936
01937
01938
01939
01940
01941
01942
01943
01944
01945
01946
01947
01948
01949
01950
01951
01952
01953
01954
01955
01956
01957
01958
01959
01960
01961
01962
01963
01964
01965
01966
01967
01968
01969
01970
01971 #define NSMALLBINS (32U)
01972 #define NTREEBINS (32U)
01973 #define SMALLBIN_SHIFT (3U)
01974 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
01975 #define TREEBIN_SHIFT (8U)
01976 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
01977 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
01978 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
01979
01980 struct malloc_state {
01981 binmap_t smallmap;
01982 binmap_t treemap;
01983 size_t dvsize;
01984 size_t topsize;
01985 char* least_addr;
01986 mchunkptr dv;
01987 mchunkptr top;
01988 size_t trim_check;
01989 size_t magic;
01990 mchunkptr smallbins[(NSMALLBINS+1)*2];
01991 tbinptr treebins[NTREEBINS];
01992 size_t footprint;
01993 size_t max_footprint;
01994 flag_t mflags;
01995 #if USE_LOCKS
01996 MLOCK_T mutex;
01997 #endif
01998 msegment seg;
01999 };
02000
02001 typedef struct malloc_state* mstate;
02002
02003
02004
02005
02006
02007
02008
02009
02010
02011 struct malloc_params {
02012 size_t magic;
02013 size_t page_size;
02014 size_t granularity;
02015 size_t mmap_threshold;
02016 size_t trim_threshold;
02017 flag_t default_mflags;
02018 };
02019
02020 static struct malloc_params mparams;
02021
02022
02023 static struct malloc_state _gm_;
02024 #define gm (&_gm_)
02025 #define is_global(M) ((M) == &_gm_)
02026 #define is_initialized(M) ((M)->top != 0)
02027
02028
02029
02030
02031
02032 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
02033 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
02034 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
02035
02036 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
02037 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
02038 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
02039
02040 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
02041 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
02042
02043 #define set_lock(M,L)\
02044 ((M)->mflags = (L)?\
02045 ((M)->mflags | USE_LOCK_BIT) :\
02046 ((M)->mflags & ~USE_LOCK_BIT))
02047
02048
02049 #define page_align(S)\
02050 (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
02051
02052
02053 #define granularity_align(S)\
02054 (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
02055
02056 #define is_page_aligned(S)\
02057 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
02058 #define is_granularity_aligned(S)\
02059 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
02060
02061
02062 #define segment_holds(S, A)\
02063 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
02064
02065
02066 static msegmentptr segment_holding(mstate m, char* addr) {
02067 msegmentptr sp = &m->seg;
02068 for (;;) {
02069 if (addr >= sp->base && addr < sp->base + sp->size)
02070 return sp;
02071 if ((sp = sp->next) == 0)
02072 return 0;
02073 }
02074 }
02075
02076
02077 static int has_segment_link(mstate m, msegmentptr ss) {
02078 msegmentptr sp = &m->seg;
02079 for (;;) {
02080 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
02081 return 1;
02082 if ((sp = sp->next) == 0)
02083 return 0;
02084 }
02085 }
02086
02087 #ifndef MORECORE_CANNOT_TRIM
02088 #define should_trim(M,s) ((s) > (M)->trim_check)
02089 #else
02090 #define should_trim(M,s) (0)
02091 #endif
02092
02093
02094
02095
02096
02097
02098 #define TOP_FOOT_SIZE\
02099 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
02100
02101
02102
02103
02104
02105
02106
02107
02108
02109
02110 #if USE_LOCKS
02111
02112
02113 #define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
02114
02115 #define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
02116 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
02117 #else
02118
02119 #ifndef PREACTION
02120 #define PREACTION(M) (0)
02121 #endif
02122
02123 #ifndef POSTACTION
02124 #define POSTACTION(M)
02125 #endif
02126
02127 #endif
02128
02129
02130
02131
02132
02133
02134
02135
02136
02137 #if PROCEED_ON_ERROR
02138
02139
02140 int malloc_corruption_error_count;
02141
02142
02143 static void reset_on_error(mstate m);
02144
02145 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
02146 #define USAGE_ERROR_ACTION(m, p)
02147
02148 #else
02149
02150 #ifndef CORRUPTION_ERROR_ACTION
02151 #define CORRUPTION_ERROR_ACTION(m) ABORT
02152 #endif
02153
02154 #ifndef USAGE_ERROR_ACTION
02155 #define USAGE_ERROR_ACTION(m,p) ABORT
02156 #endif
02157
02158 #endif
02159
02160
02161
02162 #if ! DEBUG
02163
02164 #define check_free_chunk(M,P)
02165 #define check_inuse_chunk(M,P)
02166 #define check_malloced_chunk(M,P,N)
02167 #define check_mmapped_chunk(M,P)
02168 #define check_malloc_state(M)
02169 #define check_top_chunk(M,P)
02170
02171 #else
02172 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
02173 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
02174 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
02175 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
02176 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
02177 #define check_malloc_state(M) do_check_malloc_state(M)
02178
02179 static void do_check_any_chunk(mstate m, mchunkptr p);
02180 static void do_check_top_chunk(mstate m, mchunkptr p);
02181 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
02182 static void do_check_inuse_chunk(mstate m, mchunkptr p);
02183 static void do_check_free_chunk(mstate m, mchunkptr p);
02184 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
02185 static void do_check_tree(mstate m, tchunkptr t);
02186 static void do_check_treebin(mstate m, bindex_t i);
02187 static void do_check_smallbin(mstate m, bindex_t i);
02188 static void do_check_malloc_state(mstate m);
02189 static int bin_find(mstate m, mchunkptr x);
02190 static size_t traverse_and_check(mstate m);
02191 #endif
02192
02193
02194
02195 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
02196 #define small_index(s) ((s) >> SMALLBIN_SHIFT)
02197 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
02198 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
02199
02200
02201 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
02202 #define treebin_at(M,i) (&((M)->treebins[i]))
02203
02204
02205 #if defined(__GNUC__) && defined(i386)
02206 #define compute_tree_index(S, I)\
02207 {\
02208 size_t X = S >> TREEBIN_SHIFT;\
02209 if (X == 0)\
02210 I = 0;\
02211 else if (X > 0xFFFF)\
02212 I = NTREEBINS-1;\
02213 else {\
02214 unsigned int K;\
02215 __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
02216 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
02217 }\
02218 }
02219 #else
02220 #define compute_tree_index(S, I)\
02221 {\
02222 size_t X = S >> TREEBIN_SHIFT;\
02223 if (X == 0)\
02224 I = 0;\
02225 else if (X > 0xFFFF)\
02226 I = NTREEBINS-1;\
02227 else {\
02228 unsigned int Y = (unsigned int)X;\
02229 unsigned int N = ((Y - 0x100) >> 16) & 8;\
02230 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
02231 N += K;\
02232 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
02233 K = 14 - N + ((Y <<= K) >> 15);\
02234 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
02235 }\
02236 }
02237 #endif
02238
02239
02240 #define bit_for_tree_index(i) \
02241 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
02242
02243
02244 #define leftshift_for_tree_index(i) \
02245 ((i == NTREEBINS-1)? 0 : \
02246 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
02247
02248
02249 #define minsize_for_tree_index(i) \
02250 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
02251 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
02252
02253
02254
02255
02256
02257 #define idx2bit(i) ((binmap_t)(1) << (i))
02258
02259
02260 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
02261 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
02262 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
02263
02264 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
02265 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
02266 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
02267
02268
02269
02270 #if defined(__GNUC__) && defined(i386)
02271 #define compute_bit2idx(X, I)\
02272 {\
02273 unsigned int J;\
02274 __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
02275 I = (bindex_t)J;\
02276 }
02277
02278 #else
02279 #if USE_BUILTIN_FFS
02280 #define compute_bit2idx(X, I) I = ffs(X)-1
02281
02282 #else
02283 #define compute_bit2idx(X, I)\
02284 {\
02285 unsigned int Y = X - 1;\
02286 unsigned int K = Y >> (16-4) & 16;\
02287 unsigned int N = K; Y >>= K;\
02288 N += K = Y >> (8-3) & 8; Y >>= K;\
02289 N += K = Y >> (4-2) & 4; Y >>= K;\
02290 N += K = Y >> (2-1) & 2; Y >>= K;\
02291 N += K = Y >> (1-0) & 1; Y >>= K;\
02292 I = (bindex_t)(N + Y);\
02293 }
02294 #endif
02295 #endif
02296
02297
02298 #define least_bit(x) ((x) & -(x))
02299
02300
02301 #define left_bits(x) ((x<<1) | -(x<<1))
02302
02303
02304 #define same_or_left_bits(x) ((x) | -(x))
02305
02306
02307
02308
02309
02310
02311
02312
02313
02314
02315
02316
02317
02318
02319
02320
02321
02322
02323
02324
02325
02326
02327
02328
02329
02330
02331
02332
02333
02334
02335 #if !INSECURE
02336
02337 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
02338
02339 #define ok_next(p, n) ((char*)(p) < (char*)(n))
02340
02341 #define ok_cinuse(p) cinuse(p)
02342
02343 #define ok_pinuse(p) pinuse(p)
02344
02345 #else
02346 #define ok_address(M, a) (1)
02347 #define ok_next(b, n) (1)
02348 #define ok_cinuse(p) (1)
02349 #define ok_pinuse(p) (1)
02350 #endif
02351
02352 #if (FOOTERS && !INSECURE)
02353
02354 #define ok_magic(M) ((M)->magic == mparams.magic)
02355 #else
02356 #define ok_magic(M) (1)
02357 #endif
02358
02359
02360
02361 #if !INSECURE
02362 #if defined(__GNUC__) && __GNUC__ >= 3
02363 #define RTCHECK(e) __builtin_expect(e, 1)
02364 #else
02365 #define RTCHECK(e) (e)
02366 #endif
02367 #else
02368 #define RTCHECK(e) (1)
02369 #endif
02370
02371
02372
02373 #if !FOOTERS
02374
02375 #define mark_inuse_foot(M,p,s)
02376
02377
02378 #define set_inuse(M,p,s)\
02379 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
02380 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
02381
02382
02383 #define set_inuse_and_pinuse(M,p,s)\
02384 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02385 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
02386
02387
02388 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
02389 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
02390
02391 #else
02392
02393
02394 #define mark_inuse_foot(M,p,s)\
02395 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
02396
02397 #define get_mstate_for(p)\
02398 ((mstate)(((mchunkptr)((char*)(p) +\
02399 (chunksize(p))))->prev_foot ^ mparams.magic))
02400
02401 #define set_inuse(M,p,s)\
02402 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
02403 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
02404 mark_inuse_foot(M,p,s))
02405
02406 #define set_inuse_and_pinuse(M,p,s)\
02407 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02408 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
02409 mark_inuse_foot(M,p,s))
02410
02411 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
02412 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
02413 mark_inuse_foot(M, p, s))
02414
02415 #endif
02416
02417
02418
02419
02420 static int init_mparams(void) {
02421 if (mparams.page_size == 0) {
02422 size_t s;
02423
02424 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
02425 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
02426 #if MORECORE_CONTIGUOUS
02427 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
02428 #else
02429 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
02430 #endif
02431
02432 #if (FOOTERS && !INSECURE)
02433 {
02434 #if USE_DEV_RANDOM
02435 int fd;
02436 unsigned char buf[sizeof(size_t)];
02437
02438 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
02439 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
02440 s = *((size_t *) buf);
02441 close(fd);
02442 }
02443 else
02444 #endif
02445 s = (size_t)(time(0) ^ (size_t)0x55555555U);
02446
02447 s |= (size_t)8U;
02448 s &= ~(size_t)7U;
02449
02450 }
02451 #else
02452 s = (size_t)0x58585858U;
02453 #endif
02454 ACQUIRE_MAGIC_INIT_LOCK();
02455 if (mparams.magic == 0) {
02456 mparams.magic = s;
02457
02458 INITIAL_LOCK(&gm->mutex);
02459 gm->mflags = mparams.default_mflags;
02460 }
02461 RELEASE_MAGIC_INIT_LOCK();
02462
02463 #ifndef WIN32
02464 mparams.page_size = malloc_getpagesize;
02465 mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
02466 DEFAULT_GRANULARITY : mparams.page_size);
02467 #else
02468 {
02469 SYSTEM_INFO system_info;
02470 GetSystemInfo(&system_info);
02471 mparams.page_size = system_info.dwPageSize;
02472 mparams.granularity = system_info.dwAllocationGranularity;
02473 }
02474 #endif
02475
02476
02477
02478
02479
02480
02481
02482 if ((sizeof(size_t) != sizeof(char*)) ||
02483 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
02484 (sizeof(int) < 4) ||
02485 (MALLOC_ALIGNMENT < (size_t)8U) ||
02486 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
02487 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
02488 ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
02489 ((mparams.page_size & (mparams.page_size-SIZE_T_ONE)) != 0))
02490 ABORT;
02491 }
02492 return 0;
02493 }
02494
02495
02496 static int change_mparam(int param_number, int value) {
02497 size_t val = (size_t)value;
02498 init_mparams();
02499 switch(param_number) {
02500 case M_TRIM_THRESHOLD:
02501 mparams.trim_threshold = val;
02502 return 1;
02503 case M_GRANULARITY:
02504 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
02505 mparams.granularity = val;
02506 return 1;
02507 }
02508 else
02509 return 0;
02510 case M_MMAP_THRESHOLD:
02511 mparams.mmap_threshold = val;
02512 return 1;
02513 default:
02514 return 0;
02515 }
02516 }
02517
02518 #if DEBUG
02519
02520
02521
02522 static void do_check_any_chunk(mstate m, mchunkptr p) {
02523 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
02524 assert(ok_address(m, p));
02525 }
02526
02527
02528 static void do_check_top_chunk(mstate m, mchunkptr p) {
02529 msegmentptr sp = segment_holding(m, (char*)p);
02530 size_t sz = chunksize(p);
02531 assert(sp != 0);
02532 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
02533 assert(ok_address(m, p));
02534 assert(sz == m->topsize);
02535 assert(sz > 0);
02536 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
02537 assert(pinuse(p));
02538 assert(!next_pinuse(p));
02539 }
02540
02541
02542 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
02543 size_t sz = chunksize(p);
02544 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
02545 assert(is_mmapped(p));
02546 assert(use_mmap(m));
02547 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
02548 assert(ok_address(m, p));
02549 assert(!is_small(sz));
02550 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
02551 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
02552 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
02553 }
02554
02555
02556 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
02557 do_check_any_chunk(m, p);
02558 assert(cinuse(p));
02559 assert(next_pinuse(p));
02560
02561 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
02562 if (is_mmapped(p))
02563 do_check_mmapped_chunk(m, p);
02564 }
02565
02566
02567 static void do_check_free_chunk(mstate m, mchunkptr p) {
02568 size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
02569 mchunkptr next = chunk_plus_offset(p, sz);
02570 do_check_any_chunk(m, p);
02571 assert(!cinuse(p));
02572 assert(!next_pinuse(p));
02573 assert (!is_mmapped(p));
02574 if (p != m->dv && p != m->top) {
02575 if (sz >= MIN_CHUNK_SIZE) {
02576 assert((sz & CHUNK_ALIGN_MASK) == 0);
02577 assert(is_aligned(chunk2mem(p)));
02578 assert(next->prev_foot == sz);
02579 assert(pinuse(p));
02580 assert (next == m->top || cinuse(next));
02581 assert(p->fd->bk == p);
02582 assert(p->bk->fd == p);
02583 }
02584 else
02585 assert(sz == SIZE_T_SIZE);
02586 }
02587 }
02588
02589
02590 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
02591 if (mem != 0) {
02592 mchunkptr p = mem2chunk(mem);
02593 size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
02594 do_check_inuse_chunk(m, p);
02595 assert((sz & CHUNK_ALIGN_MASK) == 0);
02596 assert(sz >= MIN_CHUNK_SIZE);
02597 assert(sz >= s);
02598
02599 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
02600 }
02601 }
02602
02603
02604 static void do_check_tree(mstate m, tchunkptr t) {
02605 tchunkptr head = 0;
02606 tchunkptr u = t;
02607 bindex_t tindex = t->index;
02608 size_t tsize = chunksize(t);
02609 bindex_t idx;
02610 compute_tree_index(tsize, idx);
02611 assert(tindex == idx);
02612 assert(tsize >= MIN_LARGE_SIZE);
02613 assert(tsize >= minsize_for_tree_index(idx));
02614 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
02615
02616 do {
02617 do_check_any_chunk(m, ((mchunkptr)u));
02618 assert(u->index == tindex);
02619 assert(chunksize(u) == tsize);
02620 assert(!cinuse(u));
02621 assert(!next_pinuse(u));
02622 assert(u->fd->bk == u);
02623 assert(u->bk->fd == u);
02624 if (u->parent == 0) {
02625 assert(u->child[0] == 0);
02626 assert(u->child[1] == 0);
02627 }
02628 else {
02629 assert(head == 0);
02630 head = u;
02631 assert(u->parent != u);
02632 assert (u->parent->child[0] == u ||
02633 u->parent->child[1] == u ||
02634 *((tbinptr*)(u->parent)) == u);
02635 if (u->child[0] != 0) {
02636 assert(u->child[0]->parent == u);
02637 assert(u->child[0] != u);
02638 do_check_tree(m, u->child[0]);
02639 }
02640 if (u->child[1] != 0) {
02641 assert(u->child[1]->parent == u);
02642 assert(u->child[1] != u);
02643 do_check_tree(m, u->child[1]);
02644 }
02645 if (u->child[0] != 0 && u->child[1] != 0) {
02646 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
02647 }
02648 }
02649 u = u->fd;
02650 } while (u != t);
02651 assert(head != 0);
02652 }
02653
02654
02655 static void do_check_treebin(mstate m, bindex_t i) {
02656 tbinptr* tb = treebin_at(m, i);
02657 tchunkptr t = *tb;
02658 int empty = (m->treemap & (1U << i)) == 0;
02659 if (t == 0)
02660 assert(empty);
02661 if (!empty)
02662 do_check_tree(m, t);
02663 }
02664
02665
02666 static void do_check_smallbin(mstate m, bindex_t i) {
02667 sbinptr b = smallbin_at(m, i);
02668 mchunkptr p = b->bk;
02669 unsigned int empty = (m->smallmap & (1U << i)) == 0;
02670 if (p == b)
02671 assert(empty);
02672 if (!empty) {
02673 for (; p != b; p = p->bk) {
02674 size_t size = chunksize(p);
02675 mchunkptr q;
02676
02677 do_check_free_chunk(m, p);
02678
02679 assert(small_index(size) == i);
02680 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
02681
02682 q = next_chunk(p);
02683 if (q->head != FENCEPOST_HEAD)
02684 do_check_inuse_chunk(m, q);
02685 }
02686 }
02687 }
02688
02689
02690 static int bin_find(mstate m, mchunkptr x) {
02691 size_t size = chunksize(x);
02692 if (is_small(size)) {
02693 bindex_t sidx = small_index(size);
02694 sbinptr b = smallbin_at(m, sidx);
02695 if (smallmap_is_marked(m, sidx)) {
02696 mchunkptr p = b;
02697 do {
02698 if (p == x)
02699 return 1;
02700 } while ((p = p->fd) != b);
02701 }
02702 }
02703 else {
02704 bindex_t tidx;
02705 compute_tree_index(size, tidx);
02706 if (treemap_is_marked(m, tidx)) {
02707 tchunkptr t = *treebin_at(m, tidx);
02708 size_t sizebits = size << leftshift_for_tree_index(tidx);
02709 while (t != 0 && chunksize(t) != size) {
02710 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
02711 sizebits <<= 1;
02712 }
02713 if (t != 0) {
02714 tchunkptr u = t;
02715 do {
02716 if (u == (tchunkptr)x)
02717 return 1;
02718 } while ((u = u->fd) != t);
02719 }
02720 }
02721 }
02722 return 0;
02723 }
02724
02725
02726 static size_t traverse_and_check(mstate m) {
02727 size_t sum = 0;
02728 if (is_initialized(m)) {
02729 msegmentptr s = &m->seg;
02730 sum += m->topsize + TOP_FOOT_SIZE;
02731 while (s != 0) {
02732 mchunkptr q = align_as_chunk(s->base);
02733 mchunkptr lastq = 0;
02734 assert(pinuse(q));
02735 while (segment_holds(s, q) &&
02736 q != m->top && q->head != FENCEPOST_HEAD) {
02737 sum += chunksize(q);
02738 if (cinuse(q)) {
02739 assert(!bin_find(m, q));
02740 do_check_inuse_chunk(m, q);
02741 }
02742 else {
02743 assert(q == m->dv || bin_find(m, q));
02744 assert(lastq == 0 || cinuse(lastq));
02745 do_check_free_chunk(m, q);
02746 }
02747 lastq = q;
02748 q = next_chunk(q);
02749 }
02750 s = s->next;
02751 }
02752 }
02753 return sum;
02754 }
02755
02756
02757 static void do_check_malloc_state(mstate m) {
02758 bindex_t i;
02759 size_t total;
02760
02761 for (i = 0; i < NSMALLBINS; ++i)
02762 do_check_smallbin(m, i);
02763 for (i = 0; i < NTREEBINS; ++i)
02764 do_check_treebin(m, i);
02765
02766 if (m->dvsize != 0) {
02767 do_check_any_chunk(m, m->dv);
02768 assert(m->dvsize == chunksize(m->dv));
02769 assert(m->dvsize >= MIN_CHUNK_SIZE);
02770 assert(bin_find(m, m->dv) == 0);
02771 }
02772
02773 if (m->top != 0) {
02774 do_check_top_chunk(m, m->top);
02775 assert(m->topsize == chunksize(m->top));
02776 assert(m->topsize > 0);
02777 assert(bin_find(m, m->top) == 0);
02778 }
02779
02780 total = traverse_and_check(m);
02781 assert(total <= m->footprint);
02782 assert(m->footprint <= m->max_footprint);
02783 }
02784 #endif
02785
02786
02787
02788 #if !NO_MALLINFO
02789 static struct mallinfo internal_mallinfo(mstate m) {
02790 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
02791 if (!PREACTION(m)) {
02792 check_malloc_state(m);
02793 if (is_initialized(m)) {
02794 size_t nfree = SIZE_T_ONE;
02795 size_t mfree = m->topsize + TOP_FOOT_SIZE;
02796 size_t sum = mfree;
02797 msegmentptr s = &m->seg;
02798 while (s != 0) {
02799 mchunkptr q = align_as_chunk(s->base);
02800 while (segment_holds(s, q) &&
02801 q != m->top && q->head != FENCEPOST_HEAD) {
02802 size_t sz = chunksize(q);
02803 sum += sz;
02804 if (!cinuse(q)) {
02805 mfree += sz;
02806 ++nfree;
02807 }
02808 q = next_chunk(q);
02809 }
02810 s = s->next;
02811 }
02812
02813 nm.arena = sum;
02814 nm.ordblks = nfree;
02815 nm.hblkhd = m->footprint - sum;
02816 nm.usmblks = m->max_footprint;
02817 nm.uordblks = m->footprint - mfree;
02818 nm.fordblks = mfree;
02819 nm.keepcost = m->topsize;
02820 }
02821
02822 POSTACTION(m);
02823 }
02824 return nm;
02825 }
02826 #endif
02827
02828 static void internal_malloc_stats(mstate m) {
02829 if (!PREACTION(m)) {
02830 size_t maxfp = 0;
02831 size_t fp = 0;
02832 size_t used = 0;
02833 check_malloc_state(m);
02834 if (is_initialized(m)) {
02835 msegmentptr s = &m->seg;
02836 maxfp = m->max_footprint;
02837 fp = m->footprint;
02838 used = fp - (m->topsize + TOP_FOOT_SIZE);
02839
02840 while (s != 0) {
02841 mchunkptr q = align_as_chunk(s->base);
02842 while (segment_holds(s, q) &&
02843 q != m->top && q->head != FENCEPOST_HEAD) {
02844 if (!cinuse(q))
02845 used -= chunksize(q);
02846 q = next_chunk(q);
02847 }
02848 s = s->next;
02849 }
02850 }
02851
02852 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
02853 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
02854 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
02855
02856 POSTACTION(m);
02857 }
02858 }
02859
02860
02861
02862
02863
02864
02865
02866
02867
02868
02869
02870 #define insert_small_chunk(M, P, S) {\
02871 bindex_t I = small_index(S);\
02872 mchunkptr B = smallbin_at(M, I);\
02873 mchunkptr F = B;\
02874 assert(S >= MIN_CHUNK_SIZE);\
02875 if (!smallmap_is_marked(M, I))\
02876 mark_smallmap(M, I);\
02877 else if (RTCHECK(ok_address(M, B->fd)))\
02878 F = B->fd;\
02879 else {\
02880 CORRUPTION_ERROR_ACTION(M);\
02881 }\
02882 B->fd = P;\
02883 F->bk = P;\
02884 P->fd = F;\
02885 P->bk = B;\
02886 }
02887
02888
02889 #define unlink_small_chunk(M, P, S) {\
02890 mchunkptr F = P->fd;\
02891 mchunkptr B = P->bk;\
02892 bindex_t I = small_index(S);\
02893 assert(P != B);\
02894 assert(P != F);\
02895 assert(chunksize(P) == small_index2size(I));\
02896 if (F == B)\
02897 clear_smallmap(M, I);\
02898 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
02899 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
02900 F->bk = B;\
02901 B->fd = F;\
02902 }\
02903 else {\
02904 CORRUPTION_ERROR_ACTION(M);\
02905 }\
02906 }
02907
02908
02909 #define unlink_first_small_chunk(M, B, P, I) {\
02910 mchunkptr F = P->fd;\
02911 assert(P != B);\
02912 assert(P != F);\
02913 assert(chunksize(P) == small_index2size(I));\
02914 if (B == F)\
02915 clear_smallmap(M, I);\
02916 else if (RTCHECK(ok_address(M, F))) {\
02917 B->fd = F;\
02918 F->bk = B;\
02919 }\
02920 else {\
02921 CORRUPTION_ERROR_ACTION(M);\
02922 }\
02923 }
02924
02925
02926
02927 #define replace_dv(M, P, S) {\
02928 size_t DVS = M->dvsize;\
02929 if (DVS != 0) {\
02930 mchunkptr DV = M->dv;\
02931 assert(is_small(DVS));\
02932 insert_small_chunk(M, DV, DVS);\
02933 }\
02934 M->dvsize = S;\
02935 M->dv = P;\
02936 }
02937
02938
02939
02940
02941 #define insert_large_chunk(M, X, S) {\
02942 tbinptr* H;\
02943 bindex_t I;\
02944 compute_tree_index(S, I);\
02945 H = treebin_at(M, I);\
02946 X->index = I;\
02947 X->child[0] = X->child[1] = 0;\
02948 if (!treemap_is_marked(M, I)) {\
02949 mark_treemap(M, I);\
02950 *H = X;\
02951 X->parent = (tchunkptr)H;\
02952 X->fd = X->bk = X;\
02953 }\
02954 else {\
02955 tchunkptr T = *H;\
02956 size_t K = S << leftshift_for_tree_index(I);\
02957 for (;;) {\
02958 if (chunksize(T) != S) {\
02959 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
02960 K <<= 1;\
02961 if (*C != 0)\
02962 T = *C;\
02963 else if (RTCHECK(ok_address(M, C))) {\
02964 *C = X;\
02965 X->parent = T;\
02966 X->fd = X->bk = X;\
02967 break;\
02968 }\
02969 else {\
02970 CORRUPTION_ERROR_ACTION(M);\
02971 break;\
02972 }\
02973 }\
02974 else {\
02975 tchunkptr F = T->fd;\
02976 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
02977 T->fd = F->bk = X;\
02978 X->fd = F;\
02979 X->bk = T;\
02980 X->parent = 0;\
02981 break;\
02982 }\
02983 else {\
02984 CORRUPTION_ERROR_ACTION(M);\
02985 break;\
02986 }\
02987 }\
02988 }\
02989 }\
02990 }
02991
02992
02993
02994
02995
02996
02997
02998
02999
03000
03001
03002
03003
03004
03005
03006
03007
03008
03009 #define unlink_large_chunk(M, X) {\
03010 tchunkptr XP = X->parent;\
03011 tchunkptr R;\
03012 if (X->bk != X) {\
03013 tchunkptr F = X->fd;\
03014 R = X->bk;\
03015 if (RTCHECK(ok_address(M, F))) {\
03016 F->bk = R;\
03017 R->fd = F;\
03018 }\
03019 else {\
03020 CORRUPTION_ERROR_ACTION(M);\
03021 }\
03022 }\
03023 else {\
03024 tchunkptr* RP;\
03025 if (((R = *(RP = &(X->child[1]))) != 0) ||\
03026 ((R = *(RP = &(X->child[0]))) != 0)) {\
03027 tchunkptr* CP;\
03028 while ((*(CP = &(R->child[1])) != 0) ||\
03029 (*(CP = &(R->child[0])) != 0)) {\
03030 R = *(RP = CP);\
03031 }\
03032 if (RTCHECK(ok_address(M, RP)))\
03033 *RP = 0;\
03034 else {\
03035 CORRUPTION_ERROR_ACTION(M);\
03036 }\
03037 }\
03038 }\
03039 if (XP != 0) {\
03040 tbinptr* H = treebin_at(M, X->index);\
03041 if (X == *H) {\
03042 if ((*H = R) == 0) \
03043 clear_treemap(M, X->index);\
03044 }\
03045 else if (RTCHECK(ok_address(M, XP))) {\
03046 if (XP->child[0] == X) \
03047 XP->child[0] = R;\
03048 else \
03049 XP->child[1] = R;\
03050 }\
03051 else\
03052 CORRUPTION_ERROR_ACTION(M);\
03053 if (R != 0) {\
03054 if (RTCHECK(ok_address(M, R))) {\
03055 tchunkptr C0, C1;\
03056 R->parent = XP;\
03057 if ((C0 = X->child[0]) != 0) {\
03058 if (RTCHECK(ok_address(M, C0))) {\
03059 R->child[0] = C0;\
03060 C0->parent = R;\
03061 }\
03062 else\
03063 CORRUPTION_ERROR_ACTION(M);\
03064 }\
03065 if ((C1 = X->child[1]) != 0) {\
03066 if (RTCHECK(ok_address(M, C1))) {\
03067 R->child[1] = C1;\
03068 C1->parent = R;\
03069 }\
03070 else\
03071 CORRUPTION_ERROR_ACTION(M);\
03072 }\
03073 }\
03074 else\
03075 CORRUPTION_ERROR_ACTION(M);\
03076 }\
03077 }\
03078 }
03079
03080
03081
03082 #define insert_chunk(M, P, S)\
03083 if (is_small(S)) insert_small_chunk(M, P, S)\
03084 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
03085
03086 #define unlink_chunk(M, P, S)\
03087 if (is_small(S)) unlink_small_chunk(M, P, S)\
03088 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
03089
03090
03091
03092
03093 #if ONLY_MSPACES
03094 #define internal_malloc(m, b) mspace_malloc(m, b)
03095 #define internal_free(m, mem) mspace_free(m,mem);
03096 #else
03097 #if MSPACES
03098 #define internal_malloc(m, b)\
03099 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
03100 #define internal_free(m, mem)\
03101 if (m == gm) dlfree(mem); else mspace_free(m,mem);
03102 #else
03103 #define internal_malloc(m, b) dlmalloc(b)
03104 #define internal_free(m, mem) dlfree(mem)
03105 #endif
03106 #endif
03107
03108
03109
03110
03111
03112
03113
03114
03115
03116
03117
03118
03119
03120
03121 static void* mmap_alloc(mstate m, size_t nb) {
03122 size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
03123 if (mmsize > nb) {
03124 char* mm = (char*)(DIRECT_MMAP(mmsize));
03125 if (mm != CMFAIL) {
03126 size_t offset = align_offset(chunk2mem(mm));
03127 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
03128 mchunkptr p = (mchunkptr)(mm + offset);
03129 p->prev_foot = offset | IS_MMAPPED_BIT;
03130 (p)->head = (psize|CINUSE_BIT);
03131 mark_inuse_foot(m, p, psize);
03132 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
03133 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
03134
03135 if (mm < m->least_addr)
03136 m->least_addr = mm;
03137 if ((m->footprint += mmsize) > m->max_footprint)
03138 m->max_footprint = m->footprint;
03139 assert(is_aligned(chunk2mem(p)));
03140 check_mmapped_chunk(m, p);
03141 return chunk2mem(p);
03142 }
03143 }
03144 return 0;
03145 }
03146
03147
03148 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
03149 size_t oldsize = chunksize(oldp);
03150 if (is_small(nb))
03151 return 0;
03152
03153 if (oldsize >= nb + SIZE_T_SIZE &&
03154 (oldsize - nb) <= (mparams.granularity << 1))
03155 return oldp;
03156 else {
03157 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
03158 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
03159 size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
03160 CHUNK_ALIGN_MASK);
03161 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
03162 oldmmsize, newmmsize, 1);
03163 if (cp != CMFAIL) {
03164 mchunkptr newp = (mchunkptr)(cp + offset);
03165 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
03166 newp->head = (psize|CINUSE_BIT);
03167 mark_inuse_foot(m, newp, psize);
03168 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
03169 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
03170
03171 if (cp < m->least_addr)
03172 m->least_addr = cp;
03173 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
03174 m->max_footprint = m->footprint;
03175 check_mmapped_chunk(m, newp);
03176 return newp;
03177 }
03178 }
03179 return 0;
03180 }
03181
03182
03183
03184
03185 static void init_top(mstate m, mchunkptr p, size_t psize) {
03186
03187 size_t offset = align_offset(chunk2mem(p));
03188 p = (mchunkptr)((char*)p + offset);
03189 psize -= offset;
03190
03191 m->top = p;
03192 m->topsize = psize;
03193 p->head = psize | PINUSE_BIT;
03194
03195 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
03196 m->trim_check = mparams.trim_threshold;
03197 }
03198
03199
03200 static void init_bins(mstate m) {
03201
03202 bindex_t i;
03203 for (i = 0; i < NSMALLBINS; ++i) {
03204 sbinptr bin = smallbin_at(m,i);
03205 bin->fd = bin->bk = bin;
03206 }
03207 }
03208
03209 #if PROCEED_ON_ERROR
03210
03211
03212 static void reset_on_error(mstate m) {
03213 int i;
03214 ++malloc_corruption_error_count;
03215
03216 m->smallbins = m->treebins = 0;
03217 m->dvsize = m->topsize = 0;
03218 m->seg.base = 0;
03219 m->seg.size = 0;
03220 m->seg.next = 0;
03221 m->top = m->dv = 0;
03222 for (i = 0; i < NTREEBINS; ++i)
03223 *treebin_at(m, i) = 0;
03224 init_bins(m);
03225 }
03226 #endif
03227
03228
03229 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
03230 size_t nb) {
03231 mchunkptr p = align_as_chunk(newbase);
03232 mchunkptr oldfirst = align_as_chunk(oldbase);
03233 size_t psize = (char*)oldfirst - (char*)p;
03234 mchunkptr q = chunk_plus_offset(p, nb);
03235 size_t qsize = psize - nb;
03236 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
03237
03238 assert((char*)oldfirst > (char*)q);
03239 assert(pinuse(oldfirst));
03240 assert(qsize >= MIN_CHUNK_SIZE);
03241
03242
03243 if (oldfirst == m->top) {
03244 size_t tsize = m->topsize += qsize;
03245 m->top = q;
03246 q->head = tsize | PINUSE_BIT;
03247 check_top_chunk(m, q);
03248 }
03249 else if (oldfirst == m->dv) {
03250 size_t dsize = m->dvsize += qsize;
03251 m->dv = q;
03252 set_size_and_pinuse_of_free_chunk(q, dsize);
03253 }
03254 else {
03255 if (!cinuse(oldfirst)) {
03256 size_t nsize = chunksize(oldfirst);
03257 unlink_chunk(m, oldfirst, nsize);
03258 oldfirst = chunk_plus_offset(oldfirst, nsize);
03259 qsize += nsize;
03260 }
03261 set_free_with_pinuse(q, qsize, oldfirst);
03262 insert_chunk(m, q, qsize);
03263 check_free_chunk(m, q);
03264 }
03265
03266 check_malloced_chunk(m, chunk2mem(p), nb);
03267 return chunk2mem(p);
03268 }
03269
03270
03271
03272 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
03273
03274 char* old_top = (char*)m->top;
03275 msegmentptr oldsp = segment_holding(m, old_top);
03276 char* old_end = oldsp->base + oldsp->size;
03277 size_t ssize = pad_request(sizeof(struct malloc_segment));
03278 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
03279 size_t offset = align_offset(chunk2mem(rawsp));
03280 char* asp = rawsp + offset;
03281 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
03282 mchunkptr sp = (mchunkptr)csp;
03283 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
03284 mchunkptr tnext = chunk_plus_offset(sp, ssize);
03285 mchunkptr p = tnext;
03286 int nfences = 0;
03287
03288
03289 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
03290
03291
03292 assert(is_aligned(ss));
03293 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
03294 *ss = m->seg;
03295 m->seg.base = tbase;
03296 m->seg.size = tsize;
03297 m->seg.sflags = mmapped;
03298 m->seg.next = ss;
03299
03300
03301 for (;;) {
03302 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
03303 p->head = FENCEPOST_HEAD;
03304 ++nfences;
03305 if ((char*)(&(nextp->head)) < old_end)
03306 p = nextp;
03307 else
03308 break;
03309 }
03310 assert(nfences >= 2);
03311
03312
03313 if (csp != old_top) {
03314 mchunkptr q = (mchunkptr)old_top;
03315 size_t psize = csp - old_top;
03316 mchunkptr tn = chunk_plus_offset(q, psize);
03317 set_free_with_pinuse(q, psize, tn);
03318 insert_chunk(m, q, psize);
03319 }
03320
03321 check_top_chunk(m, m->top);
03322 }
03323
03324
03325
03326
03327 static void* sys_alloc(mstate m, size_t nb) {
03328 char* tbase = CMFAIL;
03329 size_t tsize = 0;
03330 flag_t mmap_flag = 0;
03331
03332 init_mparams();
03333
03334
03335 if (use_mmap(m) && nb >= mparams.mmap_threshold) {
03336 void* mem = mmap_alloc(m, nb);
03337 if (mem != 0)
03338 return mem;
03339 }
03340
03341
03342
03343
03344
03345
03346
03347
03348
03349
03350
03351
03352
03353
03354
03355
03356
03357
03358 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
03359 char* br = CMFAIL;
03360 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
03361 size_t asize = 0;
03362 ACQUIRE_MORECORE_LOCK();
03363
03364 if (ss == 0) {
03365 char* base = (char*)CALL_MORECORE(0);
03366 if (base != CMFAIL) {
03367 asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
03368
03369 if (!is_page_aligned(base))
03370 asize += (page_align((size_t)base) - (size_t)base);
03371
03372 if (asize < HALF_MAX_SIZE_T &&
03373 (br = (char*)(CALL_MORECORE(asize))) == base) {
03374 tbase = base;
03375 tsize = asize;
03376 }
03377 }
03378 }
03379 else {
03380
03381 asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
03382
03383 if (asize < HALF_MAX_SIZE_T &&
03384 (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
03385 tbase = br;
03386 tsize = asize;
03387 }
03388 }
03389
03390 if (tbase == CMFAIL) {
03391 if (br != CMFAIL) {
03392 if (asize < HALF_MAX_SIZE_T &&
03393 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
03394 size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
03395 if (esize < HALF_MAX_SIZE_T) {
03396 char* end = (char*)CALL_MORECORE(esize);
03397 if (end != CMFAIL)
03398 asize += esize;
03399 else {
03400 CALL_MORECORE(-asize);
03401 br = CMFAIL;
03402 }
03403 }
03404 }
03405 }
03406 if (br != CMFAIL) {
03407 tbase = br;
03408 tsize = asize;
03409 }
03410 else
03411 disable_contiguous(m);
03412 }
03413
03414 RELEASE_MORECORE_LOCK();
03415 }
03416
03417 if (HAVE_MMAP && tbase == CMFAIL) {
03418 size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
03419 size_t rsize = granularity_align(req);
03420 if (rsize > nb) {
03421 char* mp = (char*)(CALL_MMAP(rsize));
03422 if (mp != CMFAIL) {
03423 tbase = mp;
03424 tsize = rsize;
03425 mmap_flag = IS_MMAPPED_BIT;
03426 }
03427 }
03428 }
03429
03430 if (HAVE_MORECORE && tbase == CMFAIL) {
03431 size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
03432 if (asize < HALF_MAX_SIZE_T) {
03433 char* br = CMFAIL;
03434 char* end = CMFAIL;
03435 ACQUIRE_MORECORE_LOCK();
03436 br = (char*)(CALL_MORECORE(asize));
03437 end = (char*)(CALL_MORECORE(0));
03438 RELEASE_MORECORE_LOCK();
03439 if (br != CMFAIL && end != CMFAIL && br < end) {
03440 size_t ssize = end - br;
03441 if (ssize > nb + TOP_FOOT_SIZE) {
03442 tbase = br;
03443 tsize = ssize;
03444 }
03445 }
03446 }
03447 }
03448
03449 if (tbase != CMFAIL) {
03450
03451 if ((m->footprint += tsize) > m->max_footprint)
03452 m->max_footprint = m->footprint;
03453
03454 if (!is_initialized(m)) {
03455 m->seg.base = m->least_addr = tbase;
03456 m->seg.size = tsize;
03457 m->seg.sflags = mmap_flag;
03458 m->magic = mparams.magic;
03459 init_bins(m);
03460 if (is_global(m))
03461 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
03462 else {
03463
03464 mchunkptr mn = next_chunk(mem2chunk(m));
03465 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
03466 }
03467 }
03468
03469 else {
03470
03471 msegmentptr sp = &m->seg;
03472 while (sp != 0 && tbase != sp->base + sp->size)
03473 sp = sp->next;
03474 if (sp != 0 &&
03475 !is_extern_segment(sp) &&
03476 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
03477 segment_holds(sp, m->top)) {
03478 sp->size += tsize;
03479 init_top(m, m->top, m->topsize + tsize);
03480 }
03481 else {
03482 if (tbase < m->least_addr)
03483 m->least_addr = tbase;
03484 sp = &m->seg;
03485 while (sp != 0 && sp->base != tbase + tsize)
03486 sp = sp->next;
03487 if (sp != 0 &&
03488 !is_extern_segment(sp) &&
03489 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
03490 char* oldbase = sp->base;
03491 sp->base = tbase;
03492 sp->size += tsize;
03493 return prepend_alloc(m, tbase, oldbase, nb);
03494 }
03495 else
03496 add_segment(m, tbase, tsize, mmap_flag);
03497 }
03498 }
03499
03500 if (nb < m->topsize) {
03501 size_t rsize = m->topsize -= nb;
03502 mchunkptr p = m->top;
03503 mchunkptr r = m->top = chunk_plus_offset(p, nb);
03504 r->head = rsize | PINUSE_BIT;
03505 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
03506 check_top_chunk(m, m->top);
03507 check_malloced_chunk(m, chunk2mem(p), nb);
03508 return chunk2mem(p);
03509 }
03510 }
03511
03512 MALLOC_FAILURE_ACTION;
03513 return 0;
03514 }
03515
03516
03517
03518
03519 static size_t release_unused_segments(mstate m) {
03520 size_t released = 0;
03521 msegmentptr pred = &m->seg;
03522 msegmentptr sp = pred->next;
03523 while (sp != 0) {
03524 char* base = sp->base;
03525 size_t size = sp->size;
03526 msegmentptr next = sp->next;
03527 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
03528 mchunkptr p = align_as_chunk(base);
03529 size_t psize = chunksize(p);
03530
03531 if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
03532 tchunkptr tp = (tchunkptr)p;
03533 assert(segment_holds(sp, (char*)sp));
03534 if (p == m->dv) {
03535 m->dv = 0;
03536 m->dvsize = 0;
03537 }
03538 else {
03539 unlink_large_chunk(m, tp);
03540 }
03541 if (CALL_MUNMAP(base, size) == 0) {
03542 released += size;
03543 m->footprint -= size;
03544
03545 sp = pred;
03546 sp->next = next;
03547 }
03548 else {
03549 insert_large_chunk(m, tp, psize);
03550 }
03551 }
03552 }
03553 pred = sp;
03554 sp = next;
03555 }
03556 return released;
03557 }
03558
03559 static int sys_trim(mstate m, size_t pad) {
03560 size_t released = 0;
03561 if (pad < MAX_REQUEST && is_initialized(m)) {
03562 pad += TOP_FOOT_SIZE;
03563
03564 if (m->topsize > pad) {
03565
03566 size_t unit = mparams.granularity;
03567 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
03568 SIZE_T_ONE) * unit;
03569 msegmentptr sp = segment_holding(m, (char*)m->top);
03570
03571 if (!is_extern_segment(sp)) {
03572 if (is_mmapped_segment(sp)) {
03573 if (HAVE_MMAP &&
03574 sp->size >= extra &&
03575 !has_segment_link(m, sp)) {
03576 size_t newsize = sp->size - extra;
03577
03578 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
03579 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
03580 released = extra;
03581 }
03582 }
03583 }
03584 else if (HAVE_MORECORE) {
03585 if (extra >= HALF_MAX_SIZE_T)
03586 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
03587 ACQUIRE_MORECORE_LOCK();
03588 {
03589
03590 char* old_br = (char*)(CALL_MORECORE(0));
03591 if (old_br == sp->base + sp->size) {
03592 char* rel_br = (char*)(CALL_MORECORE(-extra));
03593 char* new_br = (char*)(CALL_MORECORE(0));
03594 if (rel_br != CMFAIL && new_br < old_br)
03595 released = old_br - new_br;
03596 }
03597 }
03598 RELEASE_MORECORE_LOCK();
03599 }
03600 }
03601
03602 if (released != 0) {
03603 sp->size -= released;
03604 m->footprint -= released;
03605 init_top(m, m->top, m->topsize - released);
03606 check_top_chunk(m, m->top);
03607 }
03608 }
03609
03610
03611 if (HAVE_MMAP)
03612 released += release_unused_segments(m);
03613
03614
03615 if (released == 0)
03616 m->trim_check = MAX_SIZE_T;
03617 }
03618
03619 return (released != 0)? 1 : 0;
03620 }
03621
03622
03623
03624
03625 static void* tmalloc_large(mstate m, size_t nb) {
03626 tchunkptr v = 0;
03627 size_t rsize = -nb;
03628 tchunkptr t;
03629 bindex_t idx;
03630 compute_tree_index(nb, idx);
03631
03632 if ((t = *treebin_at(m, idx)) != 0) {
03633
03634 size_t sizebits = nb << leftshift_for_tree_index(idx);
03635 tchunkptr rst = 0;
03636 for (;;) {
03637 tchunkptr rt;
03638 size_t trem = chunksize(t) - nb;
03639 if (trem < rsize) {
03640 v = t;
03641 if ((rsize = trem) == 0)
03642 break;
03643 }
03644 rt = t->child[1];
03645 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
03646 if (rt != 0 && rt != t)
03647 rst = rt;
03648 if (t == 0) {
03649 t = rst;
03650 break;
03651 }
03652 sizebits <<= 1;
03653 }
03654 }
03655
03656 if (t == 0 && v == 0) {
03657 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
03658 if (leftbits != 0) {
03659 bindex_t i;
03660 binmap_t leastbit = least_bit(leftbits);
03661 compute_bit2idx(leastbit, i);
03662 t = *treebin_at(m, i);
03663 }
03664 }
03665
03666 while (t != 0) {
03667 size_t trem = chunksize(t) - nb;
03668 if (trem < rsize) {
03669 rsize = trem;
03670 v = t;
03671 }
03672 t = leftmost_child(t);
03673 }
03674
03675
03676 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
03677 if (RTCHECK(ok_address(m, v))) {
03678 mchunkptr r = chunk_plus_offset(v, nb);
03679 assert(chunksize(v) == rsize + nb);
03680 if (RTCHECK(ok_next(v, r))) {
03681 unlink_large_chunk(m, v);
03682 if (rsize < MIN_CHUNK_SIZE)
03683 set_inuse_and_pinuse(m, v, (rsize + nb));
03684 else {
03685 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
03686 set_size_and_pinuse_of_free_chunk(r, rsize);
03687 insert_chunk(m, r, rsize);
03688 }
03689 return chunk2mem(v);
03690 }
03691 }
03692 CORRUPTION_ERROR_ACTION(m);
03693 }
03694 return 0;
03695 }
03696
03697
03698 static void* tmalloc_small(mstate m, size_t nb) {
03699 tchunkptr t, v;
03700 size_t rsize;
03701 bindex_t i;
03702 binmap_t leastbit = least_bit(m->treemap);
03703 compute_bit2idx(leastbit, i);
03704
03705 v = t = *treebin_at(m, i);
03706 rsize = chunksize(t) - nb;
03707
03708 while ((t = leftmost_child(t)) != 0) {
03709 size_t trem = chunksize(t) - nb;
03710 if (trem < rsize) {
03711 rsize = trem;
03712 v = t;
03713 }
03714 }
03715
03716 if (RTCHECK(ok_address(m, v))) {
03717 mchunkptr r = chunk_plus_offset(v, nb);
03718 assert(chunksize(v) == rsize + nb);
03719 if (RTCHECK(ok_next(v, r))) {
03720 unlink_large_chunk(m, v);
03721 if (rsize < MIN_CHUNK_SIZE)
03722 set_inuse_and_pinuse(m, v, (rsize + nb));
03723 else {
03724 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
03725 set_size_and_pinuse_of_free_chunk(r, rsize);
03726 replace_dv(m, r, rsize);
03727 }
03728 return chunk2mem(v);
03729 }
03730 }
03731
03732 CORRUPTION_ERROR_ACTION(m);
03733 return 0;
03734 }
03735
03736
03737
03738 static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
03739 if (bytes >= MAX_REQUEST) {
03740 MALLOC_FAILURE_ACTION;
03741 return 0;
03742 }
03743 if (!PREACTION(m)) {
03744 mchunkptr oldp = mem2chunk(oldmem);
03745 size_t oldsize = chunksize(oldp);
03746 mchunkptr next = chunk_plus_offset(oldp, oldsize);
03747 mchunkptr newp = 0;
03748 void* extra = 0;
03749
03750
03751
03752 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
03753 ok_next(oldp, next) && ok_pinuse(next))) {
03754 size_t nb = request2size(bytes);
03755 if (is_mmapped(oldp))
03756 newp = mmap_resize(m, oldp, nb);
03757 else if (oldsize >= nb) {
03758 size_t rsize = oldsize - nb;
03759 newp = oldp;
03760 if (rsize >= MIN_CHUNK_SIZE) {
03761 mchunkptr remainder = chunk_plus_offset(newp, nb);
03762 set_inuse(m, newp, nb);
03763 set_inuse(m, remainder, rsize);
03764 extra = chunk2mem(remainder);
03765 }
03766 }
03767 else if (next == m->top && oldsize + m->topsize > nb) {
03768
03769 size_t newsize = oldsize + m->topsize;
03770 size_t newtopsize = newsize - nb;
03771 mchunkptr newtop = chunk_plus_offset(oldp, nb);
03772 set_inuse(m, oldp, nb);
03773 newtop->head = newtopsize |PINUSE_BIT;
03774 m->top = newtop;
03775 m->topsize = newtopsize;
03776 newp = oldp;
03777 }
03778 }
03779 else {
03780 USAGE_ERROR_ACTION(m, oldmem);
03781 POSTACTION(m);
03782 return 0;
03783 }
03784
03785 POSTACTION(m);
03786
03787 if (newp != 0) {
03788 if (extra != 0) {
03789 internal_free(m, extra);
03790 }
03791 check_inuse_chunk(m, newp);
03792 return chunk2mem(newp);
03793 }
03794 else {
03795 void* newmem = internal_malloc(m, bytes);
03796 if (newmem != 0) {
03797 size_t oc = oldsize - overhead_for(oldp);
03798 memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
03799 internal_free(m, oldmem);
03800 }
03801 return newmem;
03802 }
03803 }
03804 return 0;
03805 }
03806
03807
03808
03809 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
03810 if (alignment <= MALLOC_ALIGNMENT)
03811 return internal_malloc(m, bytes);
03812 if (alignment < MIN_CHUNK_SIZE)
03813 alignment = MIN_CHUNK_SIZE;
03814 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {
03815 size_t a = MALLOC_ALIGNMENT << 1;
03816 while (a < alignment) a <<= 1;
03817 alignment = a;
03818 }
03819
03820 if (bytes >= MAX_REQUEST - alignment) {
03821 if (m != 0) {
03822 MALLOC_FAILURE_ACTION;
03823 }
03824 }
03825 else {
03826 size_t nb = request2size(bytes);
03827 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
03828 char* mem = (char*)internal_malloc(m, req);
03829 if (mem != 0) {
03830 void* leader = 0;
03831 void* trailer = 0;
03832 mchunkptr p = mem2chunk(mem);
03833
03834 if (PREACTION(m)) return 0;
03835 if ((((size_t)(mem)) % alignment) != 0) {
03836
03837
03838
03839
03840
03841
03842
03843
03844 char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
03845 alignment -
03846 SIZE_T_ONE)) &
03847 -alignment));
03848 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
03849 br : br+alignment;
03850 mchunkptr newp = (mchunkptr)pos;
03851 size_t leadsize = pos - (char*)(p);
03852 size_t newsize = chunksize(p) - leadsize;
03853
03854 if (is_mmapped(p)) {
03855 newp->prev_foot = p->prev_foot + leadsize;
03856 newp->head = (newsize|CINUSE_BIT);
03857 }
03858 else {
03859 set_inuse(m, newp, newsize);
03860 set_inuse(m, p, leadsize);
03861 leader = chunk2mem(p);
03862 }
03863 p = newp;
03864 }
03865
03866
03867 if (!is_mmapped(p)) {
03868 size_t size = chunksize(p);
03869 if (size > nb + MIN_CHUNK_SIZE) {
03870 size_t remainder_size = size - nb;
03871 mchunkptr remainder = chunk_plus_offset(p, nb);
03872 set_inuse(m, p, nb);
03873 set_inuse(m, remainder, remainder_size);
03874 trailer = chunk2mem(remainder);
03875 }
03876 }
03877
03878 assert (chunksize(p) >= nb);
03879 assert((((size_t)(chunk2mem(p))) % alignment) == 0);
03880 check_inuse_chunk(m, p);
03881 POSTACTION(m);
03882 if (leader != 0) {
03883 internal_free(m, leader);
03884 }
03885 if (trailer != 0) {
03886 internal_free(m, trailer);
03887 }
03888 return chunk2mem(p);
03889 }
03890 }
03891 return 0;
03892 }
03893
03894
03895
03896 static void** ialloc(mstate m,
03897 size_t n_elements,
03898 size_t* sizes,
03899 int opts,
03900 void* chunks[]) {
03901
03902
03903
03904
03905
03906
03907
03908
03909
03910 size_t element_size;
03911 size_t contents_size;
03912 size_t array_size;
03913 void* mem;
03914 mchunkptr p;
03915 size_t remainder_size;
03916 void** marray;
03917 mchunkptr array_chunk;
03918 flag_t was_enabled;
03919 size_t size;
03920 size_t i;
03921
03922
03923 if (chunks != 0) {
03924 if (n_elements == 0)
03925 return chunks;
03926 marray = chunks;
03927 array_size = 0;
03928 }
03929 else {
03930
03931 if (n_elements == 0)
03932 return (void**)internal_malloc(m, 0);
03933 marray = 0;
03934 array_size = request2size(n_elements * (sizeof(void*)));
03935 }
03936
03937
03938 if (opts & 0x1) {
03939 element_size = request2size(*sizes);
03940 contents_size = n_elements * element_size;
03941 }
03942 else {
03943 element_size = 0;
03944 contents_size = 0;
03945 for (i = 0; i != n_elements; ++i)
03946 contents_size += request2size(sizes[i]);
03947 }
03948
03949 size = contents_size + array_size;
03950
03951
03952
03953
03954
03955
03956 was_enabled = use_mmap(m);
03957 disable_mmap(m);
03958 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
03959 if (was_enabled)
03960 enable_mmap(m);
03961 if (mem == 0)
03962 return 0;
03963
03964 if (PREACTION(m)) return 0;
03965 p = mem2chunk(mem);
03966 remainder_size = chunksize(p);
03967
03968 assert(!is_mmapped(p));
03969
03970 if (opts & 0x2) {
03971 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
03972 }
03973
03974
03975 if (marray == 0) {
03976 size_t array_chunk_size;
03977 array_chunk = chunk_plus_offset(p, contents_size);
03978 array_chunk_size = remainder_size - contents_size;
03979 marray = (void**) (chunk2mem(array_chunk));
03980 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
03981 remainder_size = contents_size;
03982 }
03983
03984
03985 for (i = 0; ; ++i) {
03986 marray[i] = chunk2mem(p);
03987 if (i != n_elements-1) {
03988 if (element_size != 0)
03989 size = element_size;
03990 else
03991 size = request2size(sizes[i]);
03992 remainder_size -= size;
03993 set_size_and_pinuse_of_inuse_chunk(m, p, size);
03994 p = chunk_plus_offset(p, size);
03995 }
03996 else {
03997 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
03998 break;
03999 }
04000 }
04001
04002 #if DEBUG
04003 if (marray != chunks) {
04004
04005 if (element_size != 0) {
04006 assert(remainder_size == element_size);
04007 }
04008 else {
04009 assert(remainder_size == request2size(sizes[i]));
04010 }
04011 check_inuse_chunk(m, mem2chunk(marray));
04012 }
04013 for (i = 0; i != n_elements; ++i)
04014 check_inuse_chunk(m, mem2chunk(marray[i]));
04015
04016 #endif
04017
04018 POSTACTION(m);
04019 return marray;
04020 }
04021
04022
04023
04024
04025 #if !ONLY_MSPACES
04026
04027 void* dlmalloc(size_t bytes) {
04028
04029
04030
04031
04032
04033
04034
04035
04036
04037
04038
04039
04040
04041
04042
04043
04044
04045
04046
04047
04048
04049
04050
04051 if (!PREACTION(gm)) {
04052 void* mem;
04053 size_t nb;
04054 if (bytes <= MAX_SMALL_REQUEST) {
04055 bindex_t idx;
04056 binmap_t smallbits;
04057 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
04058 idx = small_index(nb);
04059 smallbits = gm->smallmap >> idx;
04060
04061 if ((smallbits & 0x3U) != 0) {
04062 mchunkptr b, p;
04063 idx += ~smallbits & 1;
04064 b = smallbin_at(gm, idx);
04065 p = b->fd;
04066 assert(chunksize(p) == small_index2size(idx));
04067 unlink_first_small_chunk(gm, b, p, idx);
04068 set_inuse_and_pinuse(gm, p, small_index2size(idx));
04069 mem = chunk2mem(p);
04070 check_malloced_chunk(gm, mem, nb);
04071 goto postaction;
04072 }
04073
04074 else if (nb > gm->dvsize) {
04075 if (smallbits != 0) {
04076 mchunkptr b, p, r;
04077 size_t rsize;
04078 bindex_t i;
04079 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
04080 binmap_t leastbit = least_bit(leftbits);
04081 compute_bit2idx(leastbit, i);
04082 b = smallbin_at(gm, i);
04083 p = b->fd;
04084 assert(chunksize(p) == small_index2size(i));
04085 unlink_first_small_chunk(gm, b, p, i);
04086 rsize = small_index2size(i) - nb;
04087
04088 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
04089 set_inuse_and_pinuse(gm, p, small_index2size(i));
04090 else {
04091 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04092 r = chunk_plus_offset(p, nb);
04093 set_size_and_pinuse_of_free_chunk(r, rsize);
04094 replace_dv(gm, r, rsize);
04095 }
04096 mem = chunk2mem(p);
04097 check_malloced_chunk(gm, mem, nb);
04098 goto postaction;
04099 }
04100
04101 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
04102 check_malloced_chunk(gm, mem, nb);
04103 goto postaction;
04104 }
04105 }
04106 }
04107 else if (bytes >= MAX_REQUEST)
04108 nb = MAX_SIZE_T;
04109 else {
04110 nb = pad_request(bytes);
04111 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
04112 check_malloced_chunk(gm, mem, nb);
04113 goto postaction;
04114 }
04115 }
04116
04117 if (nb <= gm->dvsize) {
04118 size_t rsize = gm->dvsize - nb;
04119 mchunkptr p = gm->dv;
04120 if (rsize >= MIN_CHUNK_SIZE) {
04121 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
04122 gm->dvsize = rsize;
04123 set_size_and_pinuse_of_free_chunk(r, rsize);
04124 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04125 }
04126 else {
04127 size_t dvs = gm->dvsize;
04128 gm->dvsize = 0;
04129 gm->dv = 0;
04130 set_inuse_and_pinuse(gm, p, dvs);
04131 }
04132 mem = chunk2mem(p);
04133 check_malloced_chunk(gm, mem, nb);
04134 goto postaction;
04135 }
04136
04137 else if (nb < gm->topsize) {
04138 size_t rsize = gm->topsize -= nb;
04139 mchunkptr p = gm->top;
04140 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
04141 r->head = rsize | PINUSE_BIT;
04142 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
04143 mem = chunk2mem(p);
04144 check_top_chunk(gm, gm->top);
04145 check_malloced_chunk(gm, mem, nb);
04146 goto postaction;
04147 }
04148
04149 mem = sys_alloc(gm, nb);
04150
04151 postaction:
04152 POSTACTION(gm);
04153 return mem;
04154 }
04155
04156 return 0;
04157 }
04158
04159 void dlfree(void* mem) {
04160
04161
04162
04163
04164
04165
04166 if (mem != 0) {
04167 mchunkptr p = mem2chunk(mem);
04168 #if FOOTERS
04169 mstate fm = get_mstate_for(p);
04170 if (!ok_magic(fm)) {
04171 USAGE_ERROR_ACTION(fm, p);
04172 return;
04173 }
04174 #else
04175 #define fm gm
04176 #endif
04177 if (!PREACTION(fm)) {
04178 check_inuse_chunk(fm, p);
04179 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
04180 size_t psize = chunksize(p);
04181 mchunkptr next = chunk_plus_offset(p, psize);
04182 if (!pinuse(p)) {
04183 size_t prevsize = p->prev_foot;
04184 if ((prevsize & IS_MMAPPED_BIT) != 0) {
04185 prevsize &= ~IS_MMAPPED_BIT;
04186 psize += prevsize + MMAP_FOOT_PAD;
04187 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
04188 fm->footprint -= psize;
04189 goto postaction;
04190 }
04191 else {
04192 mchunkptr prev = chunk_minus_offset(p, prevsize);
04193 psize += prevsize;
04194 p = prev;
04195 if (RTCHECK(ok_address(fm, prev))) {
04196 if (p != fm->dv) {
04197 unlink_chunk(fm, p, prevsize);
04198 }
04199 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
04200 fm->dvsize = psize;
04201 set_free_with_pinuse(p, psize, next);
04202 goto postaction;
04203 }
04204 }
04205 else
04206 goto erroraction;
04207 }
04208 }
04209
04210 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
04211 if (!cinuse(next)) {
04212 if (next == fm->top) {
04213 size_t tsize = fm->topsize += psize;
04214 fm->top = p;
04215 p->head = tsize | PINUSE_BIT;
04216 if (p == fm->dv) {
04217 fm->dv = 0;
04218 fm->dvsize = 0;
04219 }
04220 if (should_trim(fm, tsize))
04221 sys_trim(fm, 0);
04222 goto postaction;
04223 }
04224 else if (next == fm->dv) {
04225 size_t dsize = fm->dvsize += psize;
04226 fm->dv = p;
04227 set_size_and_pinuse_of_free_chunk(p, dsize);
04228 goto postaction;
04229 }
04230 else {
04231 size_t nsize = chunksize(next);
04232 psize += nsize;
04233 unlink_chunk(fm, next, nsize);
04234 set_size_and_pinuse_of_free_chunk(p, psize);
04235 if (p == fm->dv) {
04236 fm->dvsize = psize;
04237 goto postaction;
04238 }
04239 }
04240 }
04241 else
04242 set_free_with_pinuse(p, psize, next);
04243 insert_chunk(fm, p, psize);
04244 check_free_chunk(fm, p);
04245 goto postaction;
04246 }
04247 }
04248 erroraction:
04249 USAGE_ERROR_ACTION(fm, p);
04250 postaction:
04251 POSTACTION(fm);
04252 }
04253 }
04254 #if !FOOTERS
04255 #undef fm
04256 #endif
04257 }
04258
04259 void* dlcalloc(size_t n_elements, size_t elem_size) {
04260 void* mem;
04261 size_t req = 0;
04262 if (n_elements != 0) {
04263 req = n_elements * elem_size;
04264 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
04265 (req / n_elements != elem_size))
04266 req = MAX_SIZE_T;
04267 }
04268 mem = dlmalloc(req);
04269 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
04270 memset(mem, 0, req);
04271 return mem;
04272 }
04273
04274 void* dlrealloc(void* oldmem, size_t bytes) {
04275 if (oldmem == 0)
04276 return dlmalloc(bytes);
04277 #ifdef REALLOC_ZERO_BYTES_FREES
04278 if (bytes == 0) {
04279 dlfree(oldmem);
04280 return 0;
04281 }
04282 #endif
04283 else {
04284 #if ! FOOTERS
04285 mstate m = gm;
04286 #else
04287 mstate m = get_mstate_for(mem2chunk(oldmem));
04288 if (!ok_magic(m)) {
04289 USAGE_ERROR_ACTION(m, oldmem);
04290 return 0;
04291 }
04292 #endif
04293 return internal_realloc(m, oldmem, bytes);
04294 }
04295 }
04296
04297 void* dlmemalign(size_t alignment, size_t bytes) {
04298 return internal_memalign(gm, alignment, bytes);
04299 }
04300
04301 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
04302 void* chunks[]) {
04303 size_t sz = elem_size;
04304 return ialloc(gm, n_elements, &sz, 3, chunks);
04305 }
04306
04307 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
04308 void* chunks[]) {
04309 return ialloc(gm, n_elements, sizes, 0, chunks);
04310 }
04311
04312 void* dlvalloc(size_t bytes) {
04313 size_t pagesz;
04314 init_mparams();
04315 pagesz = mparams.page_size;
04316 return dlmemalign(pagesz, bytes);
04317 }
04318
04319 void* dlpvalloc(size_t bytes) {
04320 size_t pagesz;
04321 init_mparams();
04322 pagesz = mparams.page_size;
04323 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
04324 }
04325
04326 int dlmalloc_trim(size_t pad) {
04327 int result = 0;
04328 if (!PREACTION(gm)) {
04329 result = sys_trim(gm, pad);
04330 POSTACTION(gm);
04331 }
04332 return result;
04333 }
04334
04335 size_t dlmalloc_footprint(void) {
04336 return gm->footprint;
04337 }
04338
04339 size_t dlmalloc_max_footprint(void) {
04340 return gm->max_footprint;
04341 }
04342
04343 #if !NO_MALLINFO
04344 struct mallinfo dlmallinfo(void) {
04345 return internal_mallinfo(gm);
04346 }
04347 #endif
04348
04349 void dlmalloc_stats() {
04350 internal_malloc_stats(gm);
04351 }
04352
04353 size_t dlmalloc_usable_size(void* mem) {
04354 if (mem != 0) {
04355 mchunkptr p = mem2chunk(mem);
04356 if (cinuse(p))
04357 return chunksize(p) - overhead_for(p);
04358 }
04359 return 0;
04360 }
04361
04362 int dlmallopt(int param_number, int value) {
04363 return change_mparam(param_number, value);
04364 }
04365
04366 #endif
04367
04368
04369
04370 #if MSPACES
04371
04372 static mstate init_user_mstate(char* tbase, size_t tsize) {
04373 size_t msize = pad_request(sizeof(struct malloc_state));
04374 mchunkptr mn;
04375 mchunkptr msp = align_as_chunk(tbase);
04376 mstate m = (mstate)(chunk2mem(msp));
04377 memset(m, 0, msize);
04378 INITIAL_LOCK(&m->mutex);
04379 msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
04380 m->seg.base = m->least_addr = tbase;
04381 m->seg.size = m->footprint = m->max_footprint = tsize;
04382 m->magic = mparams.magic;
04383 m->mflags = mparams.default_mflags;
04384 disable_contiguous(m);
04385 init_bins(m);
04386 mn = next_chunk(mem2chunk(m));
04387 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
04388 check_top_chunk(m, m->top);
04389 return m;
04390 }
04391
04392 mspace create_mspace(size_t capacity, int locked) {
04393 mstate m = 0;
04394 size_t msize = pad_request(sizeof(struct malloc_state));
04395 init_mparams();
04396
04397 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
04398 size_t rs = ((capacity == 0)? mparams.granularity :
04399 (capacity + TOP_FOOT_SIZE + msize));
04400 size_t tsize = granularity_align(rs);
04401 char* tbase = (char*)(CALL_MMAP(tsize));
04402 if (tbase != CMFAIL) {
04403 m = init_user_mstate(tbase, tsize);
04404 m->seg.sflags = IS_MMAPPED_BIT;
04405 set_lock(m, locked);
04406 }
04407 }
04408 return (mspace)m;
04409 }
04410
04411 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
04412 mstate m = 0;
04413 size_t msize = pad_request(sizeof(struct malloc_state));
04414 init_mparams();
04415
04416 if (capacity > msize + TOP_FOOT_SIZE &&
04417 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
04418 m = init_user_mstate((char*)base, capacity);
04419 m->seg.sflags = EXTERN_BIT;
04420 set_lock(m, locked);
04421 }
04422 return (mspace)m;
04423 }
04424
04425 size_t destroy_mspace(mspace msp) {
04426 size_t freed = 0;
04427 mstate ms = (mstate)msp;
04428 if (ok_magic(ms)) {
04429 msegmentptr sp = &ms->seg;
04430 while (sp != 0) {
04431 char* base = sp->base;
04432 size_t size = sp->size;
04433 flag_t flag = sp->sflags;
04434 sp = sp->next;
04435 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
04436 CALL_MUNMAP(base, size) == 0)
04437 freed += size;
04438 }
04439 }
04440 else {
04441 USAGE_ERROR_ACTION(ms,ms);
04442 }
04443 return freed;
04444 }
04445
04446
04447
04448
04449
04450
04451
04452 void* mspace_malloc(mspace msp, size_t bytes) {
04453 mstate ms = (mstate)msp;
04454 if (!ok_magic(ms)) {
04455 USAGE_ERROR_ACTION(ms,ms);
04456 return 0;
04457 }
04458 if (!PREACTION(ms)) {
04459 void* mem;
04460 size_t nb;
04461 if (bytes <= MAX_SMALL_REQUEST) {
04462 bindex_t idx;
04463 binmap_t smallbits;
04464 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
04465 idx = small_index(nb);
04466 smallbits = ms->smallmap >> idx;
04467
04468 if ((smallbits & 0x3U) != 0) {
04469 mchunkptr b, p;
04470 idx += ~smallbits & 1;
04471 b = smallbin_at(ms, idx);
04472 p = b->fd;
04473 assert(chunksize(p) == small_index2size(idx));
04474 unlink_first_small_chunk(ms, b, p, idx);
04475 set_inuse_and_pinuse(ms, p, small_index2size(idx));
04476 mem = chunk2mem(p);
04477 check_malloced_chunk(ms, mem, nb);
04478 goto postaction;
04479 }
04480
04481 else if (nb > ms->dvsize) {
04482 if (smallbits != 0) {
04483 mchunkptr b, p, r;
04484 size_t rsize;
04485 bindex_t i;
04486 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
04487 binmap_t leastbit = least_bit(leftbits);
04488 compute_bit2idx(leastbit, i);
04489 b = smallbin_at(ms, i);
04490 p = b->fd;
04491 assert(chunksize(p) == small_index2size(i));
04492 unlink_first_small_chunk(ms, b, p, i);
04493 rsize = small_index2size(i) - nb;
04494
04495 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
04496 set_inuse_and_pinuse(ms, p, small_index2size(i));
04497 else {
04498 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
04499 r = chunk_plus_offset(p, nb);
04500 set_size_and_pinuse_of_free_chunk(r, rsize);
04501 replace_dv(ms, r, rsize);
04502 }
04503 mem = chunk2mem(p);
04504 check_malloced_chunk(ms, mem, nb);
04505 goto postaction;
04506 }
04507
04508 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
04509 check_malloced_chunk(ms, mem, nb);
04510 goto postaction;
04511 }
04512 }
04513 }
04514 else if (bytes >= MAX_REQUEST)
04515 nb = MAX_SIZE_T;
04516 else {
04517 nb = pad_request(bytes);
04518 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
04519 check_malloced_chunk(ms, mem, nb);
04520 goto postaction;
04521 }
04522 }
04523
04524 if (nb <= ms->dvsize) {
04525 size_t rsize = ms->dvsize - nb;
04526 mchunkptr p = ms->dv;
04527 if (rsize >= MIN_CHUNK_SIZE) {
04528 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
04529 ms->dvsize = rsize;
04530 set_size_and_pinuse_of_free_chunk(r, rsize);
04531 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
04532 }
04533 else {
04534 size_t dvs = ms->dvsize;
04535 ms->dvsize = 0;
04536 ms->dv = 0;
04537 set_inuse_and_pinuse(ms, p, dvs);
04538 }
04539 mem = chunk2mem(p);
04540 check_malloced_chunk(ms, mem, nb);
04541 goto postaction;
04542 }
04543
04544 else if (nb < ms->topsize) {
04545 size_t rsize = ms->topsize -= nb;
04546 mchunkptr p = ms->top;
04547 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
04548 r->head = rsize | PINUSE_BIT;
04549 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
04550 mem = chunk2mem(p);
04551 check_top_chunk(ms, ms->top);
04552 check_malloced_chunk(ms, mem, nb);
04553 goto postaction;
04554 }
04555
04556 mem = sys_alloc(ms, nb);
04557
04558 postaction:
04559 POSTACTION(ms);
04560 return mem;
04561 }
04562
04563 return 0;
04564 }
04565
04566 void mspace_free(mspace msp, void* mem) {
04567 if (mem != 0) {
04568 mchunkptr p = mem2chunk(mem);
04569 #if FOOTERS
04570 mstate fm = get_mstate_for(p);
04571 #else
04572 mstate fm = (mstate)msp;
04573 #endif
04574 if (!ok_magic(fm)) {
04575 USAGE_ERROR_ACTION(fm, p);
04576 return;
04577 }
04578 if (!PREACTION(fm)) {
04579 check_inuse_chunk(fm, p);
04580 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
04581 size_t psize = chunksize(p);
04582 mchunkptr next = chunk_plus_offset(p, psize);
04583 if (!pinuse(p)) {
04584 size_t prevsize = p->prev_foot;
04585 if ((prevsize & IS_MMAPPED_BIT) != 0) {
04586 prevsize &= ~IS_MMAPPED_BIT;
04587 psize += prevsize + MMAP_FOOT_PAD;
04588 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
04589 fm->footprint -= psize;
04590 goto postaction;
04591 }
04592 else {
04593 mchunkptr prev = chunk_minus_offset(p, prevsize);
04594 psize += prevsize;
04595 p = prev;
04596 if (RTCHECK(ok_address(fm, prev))) {
04597 if (p != fm->dv) {
04598 unlink_chunk(fm, p, prevsize);
04599 }
04600 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
04601 fm->dvsize = psize;
04602 set_free_with_pinuse(p, psize, next);
04603 goto postaction;
04604 }
04605 }
04606 else
04607 goto erroraction;
04608 }
04609 }
04610
04611 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
04612 if (!cinuse(next)) {
04613 if (next == fm->top) {
04614 size_t tsize = fm->topsize += psize;
04615 fm->top = p;
04616 p->head = tsize | PINUSE_BIT;
04617 if (p == fm->dv) {
04618 fm->dv = 0;
04619 fm->dvsize = 0;
04620 }
04621 if (should_trim(fm, tsize))
04622 sys_trim(fm, 0);
04623 goto postaction;
04624 }
04625 else if (next == fm->dv) {
04626 size_t dsize = fm->dvsize += psize;
04627 fm->dv = p;
04628 set_size_and_pinuse_of_free_chunk(p, dsize);
04629 goto postaction;
04630 }
04631 else {
04632 size_t nsize = chunksize(next);
04633 psize += nsize;
04634 unlink_chunk(fm, next, nsize);
04635 set_size_and_pinuse_of_free_chunk(p, psize);
04636 if (p == fm->dv) {
04637 fm->dvsize = psize;
04638 goto postaction;
04639 }
04640 }
04641 }
04642 else
04643 set_free_with_pinuse(p, psize, next);
04644 insert_chunk(fm, p, psize);
04645 check_free_chunk(fm, p);
04646 goto postaction;
04647 }
04648 }
04649 erroraction:
04650 USAGE_ERROR_ACTION(fm, p);
04651 postaction:
04652 POSTACTION(fm);
04653 }
04654 }
04655 }
04656
04657 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
04658 void* mem;
04659 size_t req = 0;
04660 mstate ms = (mstate)msp;
04661 if (!ok_magic(ms)) {
04662 USAGE_ERROR_ACTION(ms,ms);
04663 return 0;
04664 }
04665 if (n_elements != 0) {
04666 req = n_elements * elem_size;
04667 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
04668 (req / n_elements != elem_size))
04669 req = MAX_SIZE_T;
04670 }
04671 mem = internal_malloc(ms, req);
04672 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
04673 memset(mem, 0, req);
04674 return mem;
04675 }
04676
04677 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
04678 if (oldmem == 0)
04679 return mspace_malloc(msp, bytes);
04680 #ifdef REALLOC_ZERO_BYTES_FREES
04681 if (bytes == 0) {
04682 mspace_free(msp, oldmem);
04683 return 0;
04684 }
04685 #endif
04686 else {
04687 #if FOOTERS
04688 mchunkptr p = mem2chunk(oldmem);
04689 mstate ms = get_mstate_for(p);
04690 #else
04691 mstate ms = (mstate)msp;
04692 #endif
04693 if (!ok_magic(ms)) {
04694 USAGE_ERROR_ACTION(ms,ms);
04695 return 0;
04696 }
04697 return internal_realloc(ms, oldmem, bytes);
04698 }
04699 }
04700
04701 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
04702 mstate ms = (mstate)msp;
04703 if (!ok_magic(ms)) {
04704 USAGE_ERROR_ACTION(ms,ms);
04705 return 0;
04706 }
04707 return internal_memalign(ms, alignment, bytes);
04708 }
04709
04710 void** mspace_independent_calloc(mspace msp, size_t n_elements,
04711 size_t elem_size, void* chunks[]) {
04712 size_t sz = elem_size;
04713 mstate ms = (mstate)msp;
04714 if (!ok_magic(ms)) {
04715 USAGE_ERROR_ACTION(ms,ms);
04716 return 0;
04717 }
04718 return ialloc(ms, n_elements, &sz, 3, chunks);
04719 }
04720
04721 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
04722 size_t sizes[], void* chunks[]) {
04723 mstate ms = (mstate)msp;
04724 if (!ok_magic(ms)) {
04725 USAGE_ERROR_ACTION(ms,ms);
04726 return 0;
04727 }
04728 return ialloc(ms, n_elements, sizes, 0, chunks);
04729 }
04730
04731 int mspace_trim(mspace msp, size_t pad) {
04732 int result = 0;
04733 mstate ms = (mstate)msp;
04734 if (ok_magic(ms)) {
04735 if (!PREACTION(ms)) {
04736 result = sys_trim(ms, pad);
04737 POSTACTION(ms);
04738 }
04739 }
04740 else {
04741 USAGE_ERROR_ACTION(ms,ms);
04742 }
04743 return result;
04744 }
04745
04746 void mspace_malloc_stats(mspace msp) {
04747 mstate ms = (mstate)msp;
04748 if (ok_magic(ms)) {
04749 internal_malloc_stats(ms);
04750 }
04751 else {
04752 USAGE_ERROR_ACTION(ms,ms);
04753 }
04754 }
04755
04756 size_t mspace_footprint(mspace msp) {
04757 size_t result;
04758 mstate ms = (mstate)msp;
04759 if (ok_magic(ms)) {
04760 result = ms->footprint;
04761 }
04762 USAGE_ERROR_ACTION(ms,ms);
04763 return result;
04764 }
04765
04766
04767 size_t mspace_max_footprint(mspace msp) {
04768 size_t result;
04769 mstate ms = (mstate)msp;
04770 if (ok_magic(ms)) {
04771 result = ms->max_footprint;
04772 }
04773 USAGE_ERROR_ACTION(ms,ms);
04774 return result;
04775 }
04776
04777
04778 #if !NO_MALLINFO
04779 struct mallinfo mspace_mallinfo(mspace msp) {
04780 mstate ms = (mstate)msp;
04781 if (!ok_magic(ms)) {
04782 USAGE_ERROR_ACTION(ms,ms);
04783 }
04784 return internal_mallinfo(ms);
04785 }
04786 #endif
04787
04788 int mspace_mallopt(int param_number, int value) {
04789 return change_mparam(param_number, value);
04790 }
04791
04792 #endif
04793
04794
04795
04796
04797
04798
04799
04800
04801
04802
04803
04804
04805
04806
04807
04808
04809
04810
04811
04812
04813
04814
04815
04816
04817
04818
04819
04820
04821
04822
04823
04824
04825
04826
04827
04828
04829
04830
04831
04832
04833
04834
04835
04836
04837
04838
04839
04840
04841
04842
04843
04844
04845
04846
04847
04848
04849
04850
04851
04852
04853
04854
04855
04856
04857
04858
04859
04860
04861
04862
04863
04864
04865
04866
04867
04868
04869
04870
04871
04872
04873
04874
04875
04876
04877
04878
04879
04880
04881
04882
04883
04884
04885
04886
04887
04888
04889
04890
04891
04892
04893
04894
04895
04896
04897
04898
04899
04900
04901
04902
04903
04904
04905
04906
04907
04908
04909
04910
04911
04912
04913
04914
04915
04916
04917
04918
04919
04920
04921
04922
04923
04924
04925
04926
04927
04928
04929
04930
04931
04932
04933
04934
04935
04936
04937
04938
04939
04940
04941
04942
04943
04944
04945
04946
04947
04948
04949
04950
04951
04952
04953
04954
04955
04956
04957
04958
04959
04960
04961
04962
04963
04964
04965
04966
04967
04968
04969
04970
04971
04972
04973
04974
04975
04976
04977
04978
04979
04980
04981
04982
04983
04984
04985
04986
04987
04988
04989
04990
04991
04992
04993
04994
04995
04996
04997
04998
04999
05000
05001
05002
05003
05004
05005
05006
05007
05008
05009
05010
05011
05012
05013
05014
05015
05016
05017
05018
05019
05020
05021
05022
05023
05024
05025
05026
05027
05028
05029
05030
05031
05032
05033
05034
05035
05036
05037
05038
05039
05040
05041
05042
05043
05044
05045
05046
05047
05048
05049
05050
05051
05052
05053
05054
05055
05056
05057
05058
05059
05060
05061
05062
05063
05064
05065