00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207 #include "tm.h"
00208 #include "internal.h"
00209
00210
00211
00212
00213
00214
00215 void tm_validate_lists();
00216
00217
00218
00219
00220 static
00221 tm_block *_tm_block_alloc_from_free_list(size_t size);
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237 #define tm_DEFAULT_ALLOC_COLOR tm_ECRU
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248 #define tm_SWEEP_ALLOC_COLOR tm_GREY
00249
00250
00251 static const tm_color tm_phase_alloc[] = {
00252 tm_DEFAULT_ALLOC_COLOR,
00253 tm_DEFAULT_ALLOC_COLOR,
00254 tm_DEFAULT_ALLOC_COLOR,
00255 tm_DEFAULT_ALLOC_COLOR,
00256 tm_SWEEP_ALLOC_COLOR,
00257 -1
00258 };
00259
00260 #undef tm_DEFAULT_ALLOC_COLOR
00261 #undef tm_SWEEP_ALLOC_COLOR
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272 struct tm_data tm;
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285 void _tm_phase_init(int p)
00286 {
00287 tm_msg("p %s\n", tm_phase_name[p]);
00288
00289 #if 0
00290
00291 if ( tm.alloc_id == 1987 )
00292 tm_stop();
00293 #endif
00294
00295 ++ tm.n_phase_transitions[tm.phase][p];
00296 ++ tm.n_phase_transitions[tm.phase][tm_phase_END];
00297 ++ tm.n_phase_transitions[tm_phase_END][p];
00298 ++ tm.n_phase_transitions[tm_phase_END][tm_phase_END];
00299
00300 if ( tm.phase == tm_SWEEP && tm.phase != p ) {
00301 tm.alloc_since_sweep = 0;
00302 }
00303
00304 #if 0
00305 fprintf(stderr, "\n%s->%s #%lu %lu %lu %lu %lu\n", tm_phase_name[tm.phase], tm_phase_name[p], (unsigned long) tm.alloc_id,
00306 (unsigned long) tm.n[tm_WHITE],
00307 (unsigned long) tm.n[tm_ECRU],
00308 (unsigned long) tm.n[tm_GREY],
00309 (unsigned long) tm.n[tm_BLACK],
00310 0
00311 );
00312 #endif
00313
00314 switch ( (tm.phase = p) ) {
00315 case tm_ALLOC:
00316
00317
00318
00319 tm.allocating = 1;
00320 tm.unmarking = 0;
00321 tm.marking = 0;
00322 tm.scanning = 0;
00323 tm.sweeping = 0;
00324 break;
00325
00326 case tm_UNMARK:
00327
00328
00329
00330 tm.allocating = 0;
00331 tm.unmarking = 1;
00332 tm.marking = 0;
00333 tm.scanning = 0;
00334 tm.sweeping = 0;
00335
00336
00337 tm_node_LOOP_INIT(tm_BLACK);
00338
00339
00340 tm.n[tm_NU] = tm.n[tm_BLACK];
00341 break;
00342
00343 case tm_ROOT:
00344
00345
00346
00347 tm.allocating = 0;
00348 tm.unmarking = 0;
00349 tm.marking = 1;
00350 tm.scanning = 0;
00351 tm.sweeping = 0;
00352
00353
00354 tm.stack_mutations = tm.data_mutations = 0;
00355
00356
00357 _tm_root_loop_init();
00358 break;
00359
00360 case tm_SCAN:
00361
00362
00363
00364 tm.allocating = 0;
00365 tm.unmarking = 0;
00366 tm.marking = 1;
00367 tm.scanning = 1;
00368 tm.sweeping = 0;
00369
00370
00371 tm.stack_mutations = tm.data_mutations = 0;
00372
00373
00374 tm_node_LOOP_INIT(tm_GREY);
00375 break;
00376
00377 case tm_SWEEP:
00378
00379
00380
00381 tm.allocating = 0;
00382 tm.unmarking = 0;
00383 tm.marking = 0;
00384 tm.scanning = 0;
00385 tm.sweeping = 1;
00386
00387 tm_assert_test(tm.n[tm_GREY] == 0);
00388
00389
00390
00391
00392
00393
00394 break;
00395
00396 default:
00397 tm_fatal();
00398 break;
00399 }
00400
00401 if ( 1 || p == tm_SWEEP ) {
00402
00403 }
00404
00405 }
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416 __inline
00417 void _tm_block_sweep_init()
00418 {
00419 #if 0
00420 tm.bt = tm_list_next(&tm.types);
00421 tm.bb = tm.bt != tm_list_next(tm.bt) ?
00422 tm_list_next(&tm.bt->blocks) :
00423 0;
00424 #endif
00425 }
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437 static __inline
00438 void _tm_node_set_color(tm_node *n, tm_block *b, tm_type *t, tm_color c)
00439 {
00440 int nc = tm_node_color(n);
00441
00442 tm_assert_test(b);
00443 tm_assert_test(t);
00444 tm_assert_test(c <= tm_BLACK);
00445
00446
00447 ++ tm.alloc_n[c];
00448 ++ tm.alloc_n[tm_TOTAL];
00449
00450
00451 if ( ! tm.parceling ) {
00452 tm_assert_test(c != nc);
00453 ++ tm.n_color_transitions[nc][c];
00454 ++ tm.n_color_transitions[nc][tm_TOTAL];
00455 ++ tm.n_color_transitions[tm_TOTAL][c];
00456 ++ tm.n_color_transitions[tm_TOTAL][tm_TOTAL];
00457 }
00458
00459
00460 ++ tm.n[c];
00461 tm_assert_test(tm.n[nc]);
00462 -- tm.n[nc];
00463
00464
00465 ++ t->n[c];
00466 tm_assert_test(t->n[nc]);
00467 -- t->n[nc];
00468
00469
00470 ++ b->n[c];
00471 tm_assert_test(b->n[nc]);
00472 -- b->n[nc];
00473
00474
00475 tm_list_set_color(n, c);
00476 }
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487 void tm_node_set_color(tm_node *n, tm_block *b, tm_color c)
00488 {
00489 tm_type *t;
00490
00491 tm_assert_test(b);
00492
00493 tm_assert_test(b->type);
00494 t = b->type;
00495
00496 _tm_node_set_color(n, b, t, c);
00497
00498 tm_list_remove_and_append(&t->color_list[c], n);
00499
00500
00501 }
00502
00503
00504
00505
00506
00507
00508
00509
00510
00511
00512
00513
00514 static __inline
00515 void _tm_type_add_block(tm_type *t, tm_block *b)
00516 {
00517 tm_assert_test(t);
00518 tm_assert_test(b);
00519
00520 tm_assert_test(b->type == 0);
00521
00522
00523 b->type = t;
00524
00525
00526 tm_assert_test(! b->n[tm_CAPACITY]);
00527 b->n[tm_CAPACITY] = (b->end - b->begin) / (sizeof(tm_node) + t->size);
00528
00529
00530 tm_assert_test(! t->parcel_from_block);
00531 t->parcel_from_block = b;
00532
00533
00534 ++ t->n[tm_B];
00535
00536
00537 ++ tm.n[tm_B];
00538
00539
00540 tm_list_insert(&t->blocks, b);
00541 }
00542
00543
00544
00545
00546
00547 static __inline
00548 void _tm_type_remove_block(tm_type *t, tm_block *b)
00549 {
00550 tm_assert_test(t);
00551 tm_assert_test(b->type);
00552 tm_assert_test(b->type == t);
00553
00554
00555 tm_assert_test(t->n[tm_B]);
00556 -- t->n[tm_B];
00557
00558
00559 tm_assert_test(tm.n[tm_B]);
00560 -- tm.n[tm_B];
00561
00562
00563 if ( t->parcel_from_block == b ) {
00564 t->parcel_from_block = 0;
00565 }
00566
00567
00568 tm_list_remove(b);
00569
00570
00571 b->type = 0;
00572 }
00573
00574
00575
00576
00577
00578 static __inline
00579 void tm_block_init(tm_block *b)
00580 {
00581 tm_assert_test(b->size);
00582
00583
00584 if ( ! b->id ) {
00585 b->id = ++ tm.block_id;
00586 }
00587
00588
00589 tm_list_init(&b->list);
00590
00591 tm_list_set_color(b, tm_LIVE_BLOCK);
00592
00593 #if tm_name_GUARD
00594 b->name = "BLOCK";
00595 #endif
00596
00597
00598 b->type = 0;
00599
00600
00601 b->begin = (char*) b + tm_block_HDR_SIZE;
00602 b->end = (char*) b + b->size;
00603
00604
00605 b->next_parcel = b->begin;
00606
00607 #if tm_block_GUARD
00608 b->guard1 = b->guard2 = tm_block_hash(b);
00609 #endif
00610
00611
00612 memset(b->n, 0, sizeof(b->n));
00613
00614
00615 _tm_block_sweep_init();
00616
00617
00618 tm.block_last = b;
00619 if ( ! tm.block_first ) {
00620 tm.block_first = b;
00621 } else {
00622 #if tm_USE_SBRK
00623
00624 tm_assert((void*) b >= (void*) tm.block_first);
00625 #endif
00626 }
00627 }
00628
00629
00630
00631
00632
00633 static __inline
00634 size_t tm_block_align_size(size_t size)
00635 {
00636 size_t offset;
00637
00638
00639 if ( (offset = (size % tm_block_SIZE)) )
00640 size += tm_block_SIZE - offset;
00641
00642 return size;
00643 }
00644
00645
00646
00647
00648
00649
00650
00651 static
00652 tm_block *_tm_block_alloc_from_free_list(size_t size)
00653 {
00654 tm_block *b = 0;
00655
00656 size = tm_block_align_size(size);
00657
00658
00659 {
00660 tm_block *bi;
00661
00662 tm_list_LOOP(&tm.free_blocks, bi);
00663 {
00664 if ( bi->size == size &&
00665 tm_list_color(bi) == tm_FREE_BLOCK
00666 ) {
00667 tm_assert_test(tm.free_blocks_n);
00668 -- tm.free_blocks_n;
00669
00670 tm_list_remove(bi);
00671
00672 b = bi;
00673
00674 tm_msg("b a fl b%p %d\n", (void*) b, tm.free_blocks_n);
00675 break;
00676 }
00677 }
00678 tm_list_LOOP_END;
00679 }
00680
00681 if ( b ) {
00682
00683 tm_block_init(b);
00684
00685
00686 ++ tm.n[tm_B];
00687 tm.blocks_allocated_since_gc += size / tm_block_SIZE;
00688 }
00689
00690
00691 return b;
00692 }
00693
00694
00695
00696
00697 static
00698 tm_block *_tm_block_alloc(size_t size)
00699 {
00700 tm_block *b;
00701
00702 size = tm_block_align_size(size);
00703
00704
00705 b = _tm_block_alloc_from_free_list(size);
00706
00707
00708
00709
00710 if ( ! b ) {
00711
00712 b = _tm_os_alloc_aligned(size);
00713
00714
00715 if ( ! b )
00716 return 0;
00717
00718
00719 b->id = 0;
00720
00721
00722 b->size = size;
00723
00724
00725 ++ tm.n[tm_B_OS];
00726 if ( tm.n[tm_B_OS_M] < tm.n[tm_B_OS] )
00727 tm.n[tm_B_OS_M] = tm.n[tm_B_OS];
00728
00729 tm.n[tm_b_OS] += size;
00730 if ( tm.n[tm_b_OS_M] < tm.n[tm_b_OS] )
00731 tm.n[tm_b_OS_M] = tm.n[tm_b_OS];
00732
00733 tm_msg("b a os b%p\n", (void*) b);
00734
00735
00736 tm_block_init(b);
00737
00738
00739 ++ tm.n[tm_B];
00740 tm.blocks_allocated_since_gc += size / tm_block_SIZE;
00741 }
00742
00743
00744
00745
00746
00747
00748 return b;
00749 }
00750
00751
00752
00753
00754
00755
00756
00757 static
00758 tm_block *tm_block_scavenge(tm_type *t)
00759 {
00760 tm_type *type = 0;
00761
00762 tm_list_LOOP(&tm.types, type);
00763 {
00764 tm_block *block = 0;
00765
00766 tm_list_LOOP(&type->blocks, block);
00767 {
00768 if ( block->type != t &&
00769 tm_block_unused(block) &&
00770 block->n[tm_TOTAL]
00771 ) {
00772 return block;
00773 }
00774 }
00775 tm_list_LOOP_END;
00776 }
00777 tm_list_LOOP_END;
00778
00779 return 0;
00780 }
00781
00782
00783
00784
00785
00786 static __inline
00787 void _tm_node_delete(tm_node *n, tm_block *b);
00788
00789
00790
00791
00792
00793
00794
00795
00796 static
00797 int _tm_block_unparcel_nodes(tm_block *b)
00798 {
00799 int count = 0, bytes = 0;
00800 tm_type *t = b->type;
00801
00802 tm_assert_test(b->type);
00803
00804
00805
00806
00807
00808 tm_assert_test(tm_block_unused(b));
00809
00810 {
00811 tm_node *n;
00812
00813
00814 n = tm_block_node_begin(b);
00815 while ( (void*) n < tm_block_node_next_parcel(b) ) {
00816
00817 ++ count;
00818 bytes += b->size;
00819
00820 tm_assert_test(tm_node_color(n) == tm_WHITE);
00821 _tm_node_delete(n, b);
00822
00823 n = tm_block_node_next(b, n);
00824 }
00825 }
00826
00827
00828
00829
00830 tm_assert_test(t->n[tm_WHITE] >= count);
00831 t->n[tm_WHITE] -= count;
00832 tm_assert_test(t->n[tm_TOTAL] >= count);
00833 t->n[tm_TOTAL] -= count;
00834
00835
00836 tm_assert_test(b->n[tm_WHITE] >= count);
00837 b->n[tm_WHITE] -= count;
00838 tm_assert_test(b->n[tm_WHITE] == 0);
00839
00840 tm_assert_test(b->n[tm_TOTAL] >= count);
00841 b->n[tm_TOTAL] -= count;
00842 tm_assert_test(b->n[tm_TOTAL] == 0);
00843
00844
00845 tm_assert_test(tm.n[tm_WHITE] >= count);
00846 tm.n[tm_WHITE] -= count;
00847 tm_assert_test(tm.n[tm_TOTAL] >= count);
00848 tm.n[tm_TOTAL] -= count;
00849
00850 return count;
00851 }
00852
00853
00854
00855
00856
00857 static __inline
00858 void _tm_block_reclaim(tm_block *b)
00859 {
00860 tm_assert_test(b);
00861 tm_assert_test(tm_list_color(b) == tm_LIVE_BLOCK);
00862
00863
00864 _tm_block_unparcel_nodes(b);
00865
00866
00867 b->next_parcel = b->begin;
00868
00869
00870 tm_assert_test(tm.n[tm_B]);
00871 -- tm.n[tm_B];
00872
00873
00874 if ( tm.block_last == b ) {
00875 tm.block_last = 0;
00876 }
00877 if ( tm.block_first == b ) {
00878 tm.block_first = 0;
00879 }
00880
00881
00882 _tm_type_remove_block(b->type, b);
00883
00884
00885 _tm_page_mark_unused_range(b, b->size);
00886 }
00887
00888
00889
00890
00891
00892
00893
00894
00895
00896 static __inline
00897 void _tm_block_free(tm_block *b)
00898 {
00899 int os_free = 0;
00900
00901 tm_assert(tm_block_unused(b));
00902
00903
00904 _tm_block_reclaim(b);
00905
00906 #if tm_USE_MMAP
00907
00908 if ( tm.free_blocks_n > tm_block_min_free ) {
00909
00910 os_free = 1;
00911 } else {
00912 os_free = 0;
00913 }
00914 #endif
00915
00916 #if tm_USE_SBRK
00917
00918 if ( sbrk(0) == (void*) b->end ) {
00919
00920 tm_assert(tm_ptr_h == b->end);
00921 tm_ptr_h = b;
00922
00923
00924 os_free = 1;
00925 } else {
00926 os_free = 0;
00927 }
00928 #endif
00929
00930
00931 if ( os_free ) {
00932
00933 tm_assert_test(tm.n[tm_B_OS]);
00934 -- tm.n[tm_B_OS];
00935
00936 tm_assert_test(tm.n[tm_b_OS] > b->size);
00937 tm.n[tm_b_OS] -= b->size;
00938
00939 b->id = 0;
00940
00941
00942 _tm_os_free_aligned(b, b->size);
00943
00944 tm_msg("b f os b%p\n", (void*) b);
00945 } else {
00946
00947 tm_list_remove_and_append(&tm.free_blocks, b);
00948 ++ tm.free_blocks_n;
00949
00950
00951 tm_list_set_color(b, tm_FREE_BLOCK);
00952
00953 tm_msg("b f fl b%p %d\n", (void*) b, tm.free_blocks_n);
00954 }
00955
00956
00957 _tm_block_sweep_init();
00958
00959
00960
00961
00962
00963
00964
00965 }
00966
00967
00968
00969
00970
00971 static
00972 tm_block * _tm_block_alloc_for_type(tm_type *t)
00973 {
00974 tm_block *b;
00975
00976
00977 b = _tm_block_alloc(tm_block_SIZE);
00978
00979
00980
00981
00982 if ( b ) {
00983 _tm_type_add_block(t, b);
00984 }
00985
00986
00987
00988 return b;
00989 }
00990
00991
00992
00993
00994
00995
00996
00997
00998
00999
01000
01001 static __inline
01002 void _tm_node_init(tm_node *n, tm_block *b)
01003 {
01004 tm_type *t;
01005
01006 tm_assert_test(b);
01007
01008 tm_assert_test(b->type);
01009
01010
01011 t = b->type;
01012
01013
01014 tm_list_init(&n->list);
01015
01016 #if 1
01017 tm_assert_test(tm_list_color(&n->list) == tm_WHITE);
01018 #else
01019
01020 tm_list_set_color(n, tm_WHITE);
01021 #endif
01022
01023
01024 ++ t->n[tm_TOTAL];
01025 ++ t->n[tm_WHITE];
01026
01027
01028 ++ b->n[tm_TOTAL];
01029 ++ b->n[tm_WHITE];
01030
01031
01032 ++ tm.n[tm_TOTAL];
01033 ++ tm.n[tm_WHITE];
01034
01035
01036 tm_node_set_color(n, b, tm_WHITE);
01037
01038
01039
01040
01041 }
01042
01043
01044
01045
01046
01047 static __inline
01048 void _tm_node_delete(tm_node *n, tm_block *b)
01049 {
01050 tm_type *t;
01051 tm_color nc = tm_node_color(n);
01052
01053
01054 tm_assert_test(b->type);
01055 t = b->type;
01056 tm_assert_test(nc == tm_WHITE);
01057
01058 #if 0
01059
01060 tm_assert_test(t->n[tm_TOTAL]);
01061 -- t->n[tm_TOTAL];
01062 tm_assert_test(t->n[nc]);
01063 -- t->n[nc];
01064
01065
01066 tm_assert_test(b->n[tm_TOTAL] > 0);
01067 -- b->n[tm_TOTAL];
01068 tm_assert_test(b->n[nc]) > 0);
01069 -- b->n[nc];
01070
01071
01072 tm_assert_test(tm.n[tm_TOTAL] > 0);
01073 -- tm.n[tm_TOTAL];
01074 tm_assert_test(tm.n[nc] > 0);
01075 -- tm.n[nc];
01076 #endif
01077
01078
01079 tm_list_remove(n);
01080
01081
01082
01083 #if 0
01084 tm_msg("N d n%p[%lu] t%p\n",
01085 (void*) n,
01086 (unsigned long) t->size,
01087 (void*) t);
01088 #endif
01089 }
01090
01091
01092
01093
01094
01095
01096 static __inline
01097 int _tm_node_parcel_some(tm_type *t, long left)
01098 {
01099 int count = 0;
01100 size_t bytes = 0;
01101 tm_block *b;
01102
01103 ++ tm.parceling;
01104
01105
01106
01107
01108
01109 if ( ! t->parcel_from_block ) {
01110 if ( (b = _tm_block_alloc_from_free_list(tm_block_SIZE)) ) {
01111 _tm_type_add_block(t, b);
01112 } else {
01113 goto done;
01114 }
01115 }
01116
01117 b = t->parcel_from_block;
01118
01119
01120
01121 {
01122
01123 void *pe = tm_block_node_begin(b);
01124
01125 while ( (pe = tm_block_node_next(b, tm_block_node_next_parcel(b)))
01126 <= tm_block_node_end(b)
01127 ) {
01128 tm_node *n;
01129
01130
01131
01132
01133 n = tm_block_node_next_parcel(b);
01134
01135
01136 b->next_parcel = pe;
01137
01138
01139 {
01140 void *ptr;
01141
01142 if ( tm_ptr_l > (ptr = tm_node_ptr(n)) ) {
01143 tm_ptr_l = ptr;
01144 }
01145 if ( tm_ptr_h < (ptr = pe) ) {
01146 tm_ptr_h = ptr;
01147 }
01148 }
01149
01150
01151 _tm_node_init(n, b);
01152
01153 tm_assert_test(tm_node_color(n) == tm_WHITE);
01154
01155
01156 ++ count;
01157 bytes += t->size;
01158
01159
01160 if ( -- left <= 0 ) {
01161 goto done;
01162 }
01163 }
01164
01165
01166
01167
01168
01169 t->parcel_from_block = 0;
01170 }
01171
01172 done:
01173
01174 -- tm.parceling;
01175
01176 #if 0
01177 if ( count )
01178 tm_msg("N i n%lu b%lu t%lu\n", count, bytes, t->n[tm_WHITE]);
01179 #endif
01180
01181
01182 return count;
01183 }
01184
01185
01186
01187
01188
01189
01190 static __inline
01191 int _tm_node_parcel_or_alloc(tm_type *t)
01192 {
01193 int count;
01194
01195 count = _tm_node_parcel_some(t, tm_node_parcel_some_size);
01196 if ( ! count ) {
01197 if ( ! _tm_block_alloc_for_type(t) )
01198 return 0;
01199 count = _tm_node_parcel_some(t, tm_node_parcel_some_size);
01200 }
01201
01202
01203 return count;
01204 }
01205
01206
01207
01208
01209
01210
01211
01212
01213
01214
01215
01216 __inline
01217 void tm_type_init(tm_type *t, size_t size)
01218 {
01219
01220 t->id = ++ tm.type_id;
01221
01222
01223 tm_list_init(&t->list);
01224
01225
01226 tm_list_set_color(&t->list, tm_LIVE_TYPE);
01227 #if tm_name_GUARD
01228 t->name = "TYPE";
01229 #endif
01230 t->size = size;
01231
01232
01233 tm_list_init(&t->blocks);
01234 tm_list_set_color(&t->blocks, tm_LIVE_BLOCK);
01235
01236
01237 memset(t->n, 0, sizeof(t->n));
01238
01239
01240 {
01241 int j;
01242
01243 for ( j = 0; j < sizeof(t->color_list) / sizeof(t->color_list[0]); ++ j ) {
01244 tm_list_init(&t->color_list[j]);
01245 tm_list_set_color(&t->color_list[j], j);
01246 }
01247 }
01248
01249
01250 t->parcel_from_block = 0;
01251
01252
01253 t->desc = 0;
01254 }
01255
01256
01257
01258
01259
01260 static __inline
01261 tm_type *tm_type_new(size_t size)
01262 {
01263 tm_type *t;
01264
01265
01266 if ( tm.type_free ) {
01267 t = tm.type_free;
01268 tm.type_free = t->hash_next;
01269 t->hash_next = 0;
01270 } else {
01271 t = 0;
01272 }
01273
01274
01275 tm_type_init(t, size);
01276
01277
01278 tm_list_insert(&tm.types, t);
01279
01280
01281
01282
01283
01284
01285
01286 _tm_node_parcel_or_alloc(t);
01287
01288 return t;
01289 }
01290
01291
01292
01293
01294
01295 static __inline
01296 int tm_size_hash(size_t size)
01297 {
01298 int i;
01299
01300
01301 tm_assert(size <= tm_block_SIZE_MAX);
01302
01303
01304 i = size / tm_ALLOC_ALIGN;
01305
01306
01307 i %= tm_type_hash_LEN;
01308
01309 return i;
01310 }
01311
01312
01313
01314
01315
01316 static __inline
01317 tm_type *tm_size_to_type_2(size_t size)
01318 {
01319 tm_type **tp, *t;
01320 int i;
01321
01322 i = tm_size_hash(size);
01323
01324
01325 for ( tp = &tm.type_hash[i]; (t = (*tp)); tp = &t->hash_next ) {
01326
01327 if ( t->size == size ) {
01328
01329 *tp = t->hash_next;
01330 t->hash_next = tm.type_hash[i];
01331 tm.type_hash[i] = t;
01332 break;
01333 }
01334 }
01335
01336 return t;
01337 }
01338
01339
01340
01341
01342
01343 static __inline
01344 tm_type *tm_type_new_2(size_t size)
01345 {
01346 int i;
01347 tm_type *t;
01348
01349
01350 t = tm_type_new(size);
01351
01352
01353 i = tm_size_hash(t->size);
01354 t->hash_next = tm.type_hash[i];
01355 tm.type_hash[i] = t;
01356
01357 #if 0
01358 tm_msg("t a t%p %lu\n", (void*) t, (unsigned long) size);
01359 #endif
01360
01361 return t;
01362 }
01363
01364
01365
01366
01367
01368 tm_adesc *tm_adesc_for_size(tm_adesc *desc, int force_new)
01369 {
01370 tm_type *t;
01371
01372 if ( ! force_new ) {
01373 t = tm_size_to_type_2(desc->size);
01374 if ( t )
01375 return t->desc;
01376 }
01377
01378 t = tm_type_new_2(desc->size);
01379 t->desc = desc;
01380 t->desc->hidden = t;
01381
01382 return t->desc;
01383 }
01384
01385
01386
01387
01388
01389 static __inline
01390 tm_type *tm_size_to_type(size_t size)
01391 {
01392 tm_type *t;
01393
01394
01395 size = (size + (tm_ALLOC_ALIGN - 1)) & ~(tm_ALLOC_ALIGN - 1);
01396
01397
01398 t = tm_size_to_type_2(size);
01399
01400 if ( t )
01401 return t;
01402
01403
01404 #define POW2(i) if ( size <= (1UL << i) ) size = (1UL << i); else
01405
01406 POW2(3)
01407 POW2(4)
01408 POW2(5)
01409 POW2(6)
01410 POW2(7)
01411 POW2(8)
01412 POW2(9)
01413 POW2(10)
01414 POW2(11)
01415 POW2(12)
01416 POW2(13)
01417 POW2(14)
01418 POW2(15)
01419 POW2(16)
01420 (void) 0;
01421
01422 #undef POW2
01423
01424
01425 t = tm_size_to_type_2(size);
01426
01427
01428 if ( ! t ) {
01429 t = tm_type_new_2(size);
01430 }
01431
01432 tm_assert_test(t->size == size);
01433
01434 return t;
01435 }
01436
01437
01438
01439
01440
01441
01442
01443
01444
01445
01446
01447
01448
01449 static __inline
01450 void _tm_node_sweep(tm_node *n, tm_block *b)
01451 {
01452
01453 tm_assert_test(tm_node_color(n) == tm_ECRU);
01454
01455
01456
01457 memset(tm_node_to_ptr(n), 0, b->type->size);
01458
01459
01460 tm_node_set_color(n, b, tm_WHITE);
01461
01462 #if 0
01463 tm_msg("s n%p\n", n);
01464 #endif
01465
01466
01467 if ( tm_block_unused(b) ) {
01468 _tm_block_free(b);
01469 }
01470 }
01471
01472
01473
01474
01475
01476 static
01477 size_t _tm_node_sweep_some(int left)
01478 {
01479 size_t count = 0, bytes = 0;
01480
01481 if ( tm.n[tm_ECRU] ) {
01482 if ( _tm_check_sweep_error() )
01483 return 0;
01484
01485 tm_node_LOOP(tm_ECRU);
01486 {
01487 tm_assert_test(tm_node_color(n) == tm_ECRU);
01488
01489 _tm_node_sweep(n, tm_node_to_block(n));
01490
01491 bytes += t->size;
01492 ++ count;
01493
01494 if ( -- left <= 0 ) {
01495 tm_node_LOOP_BREAK(tm_ECRU);
01496 }
01497 }
01498 tm_node_LOOP_END(tm_ECRU);
01499 }
01500
01501
01502 tm.n[tm_b] -= bytes;
01503
01504 #if 0
01505 if ( count )
01506 tm_msg("s c%lu b%lu l%lu\n", count, bytes, tm.n[tm_ECRU]);
01507 #endif
01508
01509
01510 return tm.n[tm_ECRU];
01511 }
01512
01513
01514
01515
01516
01517 static
01518 size_t _tm_node_sweep_some_for_type(tm_type *t, int left)
01519 {
01520 size_t count = 0, bytes = 0;
01521 tm_node *n;
01522
01523 while ( t->n[tm_ECRU] ) {
01524 n = tm_list_first(&t->color_list[tm_ECRU]);
01525 _tm_node_sweep(n, tm_node_to_block(n));
01526
01527 bytes += t->size;
01528 ++ count;
01529
01530 if ( -- left <= 0 ) {
01531 break;
01532 }
01533 }
01534
01535
01536 tm.n[tm_b] -= bytes;
01537
01538 #if 0
01539 if ( count )
01540 tm_msg("s t%p c%lu b%lu l%lu\n", t, count, bytes, tm.n[tm_ECRU]);
01541 #endif
01542
01543
01544 return t->n[tm_ECRU];
01545 }
01546
01547
01548
01549
01550
01551 static
01552 void _tm_node_sweep_all()
01553 {
01554
01555 while ( tm.n[tm_ECRU] ) {
01556
01557 tm_node_LOOP_INIT(tm_ECRU);
01558 _tm_node_sweep_some(tm.n[tm_ECRU]);
01559 }
01560 }
01561
01562
01563
01564
01565
01566
01567
01568
01569
01570
01571
01572
01573
01574
01575 static
01576 int _tm_block_sweep_maybe(tm_block *b)
01577 {
01578 return 0;
01579 }
01580
01581
01582
01583
01584
01585 static
01586 int _tm_block_sweep_some()
01587 {
01588 int count = 0;
01589 unsigned long bytes = 0;
01590 int left = tm_block_sweep_some_size;
01591
01592 #if tm_USE_SBRK
01593
01594
01595
01596
01597 #endif
01598
01599
01600
01601 #if 0
01602 if ( count )
01603 tm_msg("b s b%lu b%lu\n", (unsigned long) count, (unsigned long) bytes);
01604 #endif
01605
01606 return count;
01607 }
01608
01609
01610
01611
01612
01613 static
01614 size_t _tm_node_unmark_some(long left)
01615 {
01616 size_t count = 0, bytes = 0;
01617
01618 tm_node_LOOP(tm_BLACK);
01619 {
01620 tm_assert_test(tm_node_color(n) == tm_BLACK);
01621
01622 tm_node_set_color(n, tm_node_to_block(n), tm_ECRU);
01623
01624 bytes += t->size;
01625 ++ count;
01626
01627 if ( -- left <= 0 ) {
01628 tm_node_LOOP_BREAK(tm_BLACK);
01629 }
01630 }
01631 tm_node_LOOP_END(tm_BLACK);
01632
01633 #if 0
01634 if ( count )
01635 tm_msg("u n%lu b%lu l%lu\n", count, bytes, tm.n[tm_BLACK]);
01636 #endif
01637
01638 return tm.n[tm_BLACK];
01639 }
01640
01641
01642
01643
01644
01645 static
01646 void _tm_node_unmark_all()
01647 {
01648 while ( tm.n[tm_BLACK] ) {
01649 tm_node_LOOP_INIT(tm_BLACK);
01650 _tm_node_unmark_some(tm.n[tm_TOTAL]);
01651 }
01652 }
01653
01654
01655
01656
01657
01658
01659
01660
01661
01662
01663
01664
01665 static
01666 void _tm_gc_clear_stats()
01667 {
01668 tm.blocks_allocated_since_gc = 0;
01669 tm.blocks_in_use_after_gc = tm.n[tm_B];
01670
01671 tm.nodes_allocated_since_gc = 0;
01672 tm.nodes_in_use_after_gc = tm.n[tm_NU] = tm.n[tm_TOTAL] - tm.n[tm_WHITE];
01673
01674 tm.bytes_allocated_since_gc = 0;
01675 tm.bytes_in_use_after_gc = tm.n[tm_b];
01676 }
01677
01678
01679
01680
01681
01682
01683
01684
01685
01686
01687
01688
01689 void _tm_gc_full_type_inner(tm_type *type)
01690 {
01691 int try = 0;
01692
01693 tm_msg("gc {\n");
01694
01695 #if tm_TIME_STAT
01696 tm_time_stat_begin(&tm.ts_gc_inner);
01697 #endif
01698
01699 _tm_sweep_is_error = 0;
01700
01701
01702 while ( try ++ < 1 ) {
01703
01704 _tm_phase_init(tm_UNMARK);
01705 _tm_node_unmark_all();
01706 tm_assert_test(tm.n[tm_BLACK] == 0);
01707
01708
01709 _tm_phase_init(tm_ROOT);
01710 _tm_root_scan_all();
01711
01712
01713 _tm_phase_init(tm_SCAN);
01714 _tm_node_scan_all();
01715 tm_assert_test(tm.n[tm_GREY] == 0);
01716
01717
01718 _tm_phase_init(tm_SWEEP);
01719 _tm_node_sweep_all();
01720 tm_assert_test(tm.n[tm_ECRU] == 0);
01721
01722
01723 _tm_phase_init(tm_UNMARK);
01724 _tm_node_unmark_all();
01725 tm_assert_test(tm.n[tm_BLACK] == 0);
01726
01727
01728 while ( _tm_block_sweep_some() ) {
01729 }
01730
01731
01732 _tm_phase_init(tm_ALLOC);
01733 }
01734
01735
01736 _tm_gc_clear_stats();
01737
01738 #if tm_TIME_STAT
01739 tm_time_stat_end(&tm.ts_gc_inner);
01740 #endif
01741
01742 tm_msg("gc }\n");
01743 }
01744
01745
01746
01747
01748
01749 void _tm_gc_full_inner()
01750 {
01751 _tm_gc_full_type_inner(0);
01752 }
01753
01754
01755
01756
01757
01758
01759
01760
01761
01762
01763
01764
01765 static __inline
01766 void *_tm_type_alloc_node_from_free_list(tm_type *t)
01767 {
01768 tm_node *n;
01769 char *ptr;
01770
01771
01772 if ( ! (n = tm_list_first(&t->color_list[tm_WHITE])) ) {
01773 return 0;
01774 }
01775
01776
01777
01778
01779 tm_assert_test(tm_node_to_block(n)->type == t);
01780
01781
01782 tm_assert_test(tm_node_color(n) == tm_WHITE);
01783
01784
01785 ptr = tm_node_to_ptr(n);
01786
01787
01788 memset(ptr, 0, t->size);
01789
01790
01791 tm_node_set_color(n, tm_node_to_block(n), tm_phase_alloc[tm.phase]);
01792
01793
01794 _tm_page_mark_used(ptr);
01795
01796
01797 ++ tm.nodes_allocated_since_gc;
01798 tm.bytes_allocated_since_gc += t->size;
01799 tm.n[tm_b] += t->size;
01800
01801 #if 0
01802 tm_msg("N a n%p[%lu] t%p\n",
01803 (void*) n,
01804 (unsigned long) t->size,
01805 (void*) t);
01806 #endif
01807
01808
01809 return ptr;
01810 }
01811
01812
01813
01814
01815
01816
01817
01818
01819 static __inline
01820 int _tm_type_memory_pressureQ(tm_type *t)
01821 {
01822 return
01823 (! t->n[tm_WHITE] && ! t->parcel_from_block && ! tm.free_blocks_n)
01824
01825
01826 ;
01827 }
01828
01829
01830
01831
01832
01833
01834
01835 static __inline
01836 int _tm_type_memory_pressureQ_2(tm_type *t)
01837 {
01838 return
01839 _tm_type_memory_pressureQ(t) &&
01840 tm.alloc_since_sweep > 100
01841 ;
01842 }
01843
01844
01845
01846
01847
01848
01849
01850
01851
01852
01853
01854
01855
01856
01857
01858
01859
01860
01861
01862 void *_tm_alloc_type_inner(tm_type *t)
01863 {
01864 void *ptr;
01865 int again;
01866 #if tm_TIME_STAT
01867 tm_time_stat *ts;
01868 #endif
01869
01870
01871 ++ tm.alloc_id;
01872 #if 0
01873
01874 if ( tm.alloc_id == 5040 )
01875 tm_stop();
01876 #endif
01877
01878
01879 tm.alloc_pass = 0;
01880 memset(tm.alloc_n, 0, sizeof(tm.alloc_n));
01881
01882
01883 ++ tm.alloc_since_sweep;
01884
01885
01886
01887
01888 ++ tm.alloc_pass;
01889
01890
01891
01892 #if tm_TIME_STAT
01893 tm_time_stat_begin(ts = &tm.ts_phase[tm.phase]);
01894 #endif
01895
01896 again:
01897 again = 0;
01898
01899
01900 tm.next_phase = (enum tm_phase) -1;
01901
01902
01903 switch ( tm.phase ) {
01904 case tm_ALLOC:
01905
01906
01907 _tm_node_unmark_some(tm_node_unmark_some_size);
01908
01909
01910
01911
01912
01913 if ( _tm_type_memory_pressureQ_2(t) ) {
01914 tm.next_phase = tm_UNMARK;
01915 }
01916 break;
01917
01918 case tm_UNMARK:
01919
01920
01921
01922 if ( ! _tm_node_unmark_some(tm_node_unmark_some_size) ) {
01923 tm.next_phase = tm_ROOT;
01924 }
01925
01926 #if 0
01927
01928
01929
01930 if ( _tm_type_memory_pressureQ(t) ) {
01931 tm.next_phase = tm_ROOT;
01932 }
01933 #endif
01934 break;
01935
01936 case tm_ROOT:
01937
01938
01939 _tm_node_scan_some(tm_node_scan_some_size);
01940
01941
01942 if ( tm_root_scan_full ) {
01943
01944 _tm_root_scan_all();
01945 tm.next_phase = tm_SCAN;
01946 } else {
01947
01948 if ( ! _tm_root_scan_some() ) {
01949
01950 tm.next_phase = tm_SCAN;
01951 }
01952 }
01953 break;
01954
01955 case tm_SCAN:
01956
01957
01958
01959 if ( ! _tm_node_scan_some(tm_node_scan_some_size) ) {
01960
01961 _tm_root_scan_all();
01962 }
01963
01964
01965
01966
01967
01968 if ( tm.n[tm_GREY] && _tm_type_memory_pressureQ_2(t) ) {
01969 _tm_node_scan_all();
01970 }
01971
01972 if ( ! tm.n[tm_GREY] ) {
01973 tm.next_phase = tm_SWEEP;
01974 again = 1;
01975 }
01976 break;
01977
01978 case tm_SWEEP:
01979
01980
01981
01982
01983
01984
01985
01986
01987
01988
01989
01990
01991 if ( t->n[tm_TOTAL] && ! _tm_node_sweep_some_for_type(t, tm_node_sweep_some_size) ) {
01992 _tm_node_sweep_some(tm_node_sweep_some_size);
01993 }
01994
01995
01996 if ( ! tm.n[tm_ECRU] ) {
01997
01998 tm.next_phase = tm_ALLOC;
01999 }
02000 break;
02001
02002 default:
02003 tm_abort();
02004 break;
02005 }
02006
02007
02008
02009
02010 if ( tm.next_phase != (enum tm_phase) -1 )
02011 _tm_phase_init(tm.next_phase);
02012
02013 if ( again )
02014 goto again;
02015
02016
02017 ++ tm.alloc_by_phase[tm.phase];
02018
02019
02020 if ( ! (ptr = _tm_type_alloc_node_from_free_list(t)) ) {
02021
02022
02023
02024
02025
02026 _tm_node_parcel_or_alloc(t);
02027 ptr = _tm_type_alloc_node_from_free_list(t);
02028 }
02029
02030
02031
02032 #if tm_ptr_to_node_TEST
02033
02034 if ( ptr ) {
02035 char *p = ptr;
02036 tm_node *n = (void*) (p - tm_node_HDR_SIZE);
02037 tm_assert(tm_ptr_to_node(n) == 0);
02038 tm_assert(tm_ptr_to_node(p) == n);
02039 tm_assert(tm_ptr_to_node(p + 1) == n);
02040 tm_assert(tm_ptr_to_node(p + t->size / 2) == n);
02041 tm_assert(tm_ptr_to_node(p + t->size - 1) == n);
02042 #if tm_ptr_AT_END_IS_VALID
02043 tm_assert(tm_ptr_to_node(p + t->size) == n);
02044 #else
02045 tm_assert(tm_ptr_to_node(p + t->size) == 0);
02046 #endif
02047 }
02048 #endif
02049
02050 #if tm_TIME_STAT
02051 tm_time_stat_end(ts);
02052 #endif
02053
02054 {
02055 static FILE *tm_alloc_log;
02056 static unsigned long log_id = 0;
02057 static unsigned long log_ratio = 1;
02058
02059 if ( ! tm_alloc_log ) {
02060 tm_alloc_log = fopen("/tmp/tm_alloc.log", "w+");
02061 fprintf(tm_alloc_log, "#ID PTR WHITE ECRU GREY BLACK PHASE BLOCKS FREE_BLOCKS\n");
02062 }
02063 if ( tm_alloc_log ) {
02064 if ( log_id ++ % log_ratio == 0 ) {
02065 fprintf(tm_alloc_log, "%lu %lu %lu %lu %lu %lu %lu %lu %lu, %lu\n",
02066 (unsigned long) tm.alloc_id,
02067 (unsigned long) ptr,
02068 (unsigned long) tm.n[tm_WHITE],
02069 (unsigned long) tm.n[tm_ECRU],
02070 (unsigned long) tm.n[tm_GREY],
02071 (unsigned long) tm.n[tm_BLACK],
02072 (unsigned long) tm.n[tm_TOTAL],
02073 (unsigned long) tm.phase,
02074 (unsigned long) tm.n[tm_B],
02075 (unsigned long) tm.free_blocks_n,
02076 0
02077 );
02078 }
02079 if ( log_ratio < 1000 && log_id / log_ratio > 100 ) {
02080 log_ratio *= 10;
02081 }
02082 }
02083 }
02084
02085 #if 0
02086 tm_msg("a %p[%lu]\n", ptr, (unsigned long) t->size);
02087 #endif
02088
02089
02090 return (void*) ptr;
02091 }
02092
02093
02094
02095
02096
02097 void *_tm_alloc_inner(size_t size)
02098 {
02099 tm_type *type = tm_size_to_type(size);
02100 tm.alloc_request_size = size;
02101 tm.alloc_request_type = type;
02102 return _tm_alloc_type_inner(type);
02103 }
02104
02105
02106
02107
02108
02109 void *_tm_alloc_desc_inner(tm_adesc *desc)
02110 {
02111 tm_type *type = (tm_type*) desc->hidden;
02112 tm.alloc_request_size = type->size;
02113 tm.alloc_request_type = type;
02114 return _tm_alloc_type_inner(type);
02115 }
02116
02117
02118
02119
02120
02121 void *_tm_realloc_inner(void *oldptr, size_t size)
02122 {
02123 char *ptr = 0;
02124 tm_type *t, *oldt;
02125
02126 oldt = tm_node_to_type(tm_pure_ptr_to_node(oldptr));
02127 t = tm_size_to_type(size);
02128
02129 if ( oldt == t ) {
02130 ptr = oldptr;
02131 } else {
02132 ptr = _tm_alloc_inner(size);
02133 memcpy(ptr, oldptr, size < oldt->size ? size : oldt->size);
02134 }
02135
02136 return (void*) ptr;
02137 }
02138
02139
02140
02141
02142
02143
02144
02145
02146
02147
02148 void _tm_free_inner(void *ptr)
02149 {
02150 tm_block *b;
02151 tm_node *n;
02152
02153 n = tm_pure_ptr_to_node(ptr);
02154 b = tm_node_to_block(n);
02155
02156 tm_assert(tm_node_color(n) != tm_WHITE);
02157 tm_node_set_color(n, b, tm_WHITE);
02158 }
02159
02160
02161
02162
02163
02164
02165
02166
02167
02168
02169
02170
02171 void __tm_clear_some_stack_words()
02172 {
02173 int some_words[64];
02174 memset(some_words, 0, sizeof(some_words));
02175 }
02176
02177
02178
02179
02180
02181