Data Structures | |
struct | tm_node |
An allocation node representing the data ptr returned from tm_alloc(). More... | |
struct | tm_node_large |
An allocation for a large node. More... | |
struct | tm_node_iterator |
Colored Node Iterator. More... | |
Defines | |
#define | tm_node_color(n) ((tm_color) tm_list_color(n)) |
The color of a tm_node. | |
#define | tm_node_ptr(n) ((void*)(((tm_node*) n) + 1)) |
A pointer to the data of a tm_node. | |
#define | tm_node_type(n) tm_block_type(tm_node_to_block(n)) |
Return the tm_node's tm_type. | |
Functions | |
static __inline void | _tm_node_init (tm_node *n, tm_block *b) |
Initialize a tm_node from a tm_block. | |
static __inline int | _tm_node_parcel_some (tm_type *t, long left) |
Parcel some nodes for a tm_type from a tm_block already allocated from the OS. | |
static __inline int | _tm_node_parcel_or_alloc (tm_type *t) |
Parcel some nodes from an existing tm_block, or allocate a new tm_block and try again. |
#define tm_node_color | ( | n | ) | ((tm_color) tm_list_color(n)) |
The color of a tm_node.
Definition at line 37 of file node.h.
Referenced by _tm_block_unparcel_nodes(), _tm_check_sweep_error(), _tm_free_inner(), _tm_node_delete(), _tm_node_mark(), _tm_node_parcel_some(), _tm_node_scan_some(), _tm_node_set_color(), _tm_node_sweep(), _tm_node_sweep_some(), _tm_node_unmark_some(), _tm_type_alloc_node_from_free_list(), tm_node_iterator_next(), tm_ptr_to_node(), tm_validate_lists(), and tm_write_barrier_node().
#define tm_node_ptr | ( | n | ) | ((void*)(((tm_node*) n) + 1)) |
A pointer to the data of a tm_node.
Definition at line 40 of file node.h.
Referenced by _tm_node_parcel_some(), _tm_node_scan(), and _tm_node_scan_some().
#define tm_node_type | ( | n | ) | tm_block_type(tm_node_to_block(n)) |
Initialize a tm_node from a tm_block.
Set the tm_block.type.
Initialize its list pointers.
Increment type node counts.
Increment block node counts
Increment global node counts.
Place tm_node on tm_type tm_WHITE list.
Definition at line 1002 of file tm.c.
References tm_node::list, tm_data::n, tm_block::n, tm_type::n, tm, tm_assert_test, tm_list_color, tm_list_init(), tm_list_set_color, tm_node_set_color(), tm_TOTAL, tm_WHITE, and tm_block::type.
Referenced by _tm_node_parcel_some().
01003 { 01004 tm_type *t; 01005 01006 tm_assert_test(b); 01007 // _tm_block_validate(b); 01008 tm_assert_test(b->type); 01009 01010 /*! Set the tm_block.type. */ 01011 t = b->type; 01012 01013 /*! Initialize its list pointers. */ 01014 tm_list_init(&n->list); 01015 01016 #if 1 01017 tm_assert_test(tm_list_color(&n->list) == tm_WHITE); 01018 #else 01019 /*! Set the tm_node color to tm_WHITE. */ 01020 tm_list_set_color(n, tm_WHITE); 01021 #endif 01022 01023 /*! Increment type node counts. */ 01024 ++ t->n[tm_TOTAL]; 01025 ++ t->n[tm_WHITE]; 01026 01027 /*! Increment block node counts */ 01028 ++ b->n[tm_TOTAL]; 01029 ++ b->n[tm_WHITE]; 01030 01031 /*! Increment global node counts. */ 01032 ++ tm.n[tm_TOTAL]; 01033 ++ tm.n[tm_WHITE]; 01034 01035 /*! Place tm_node on tm_type tm_WHITE list. */ 01036 tm_node_set_color(n, b, tm_WHITE); 01037 01038 // tm_validate_lists(); 01039 01040 // tm_msg("N n%p t%p\n", (void*) n, (void*) t); 01041 }
static __inline int _tm_node_parcel_or_alloc | ( | tm_type * | t | ) | [static] |
Parcel some nodes from an existing tm_block, or allocate a new tm_block and try again.
Return the number of tm_node parcelled.
Definition at line 1191 of file tm.c.
References _tm_block_alloc_for_type(), _tm_node_parcel_some(), and tm_node_parcel_some_size.
Referenced by _tm_alloc_type_inner(), and tm_type_new().
01192 { 01193 int count; 01194 01195 count = _tm_node_parcel_some(t, tm_node_parcel_some_size); 01196 if ( ! count ) { 01197 if ( ! _tm_block_alloc_for_type(t) ) 01198 return 0; 01199 count = _tm_node_parcel_some(t, tm_node_parcel_some_size); 01200 } 01201 01202 /*! Return the number of tm_node parcelled. */ 01203 return count; 01204 }
static __inline int _tm_node_parcel_some | ( | tm_type * | t, | |
long | left | |||
) | [static] |
Parcel some nodes for a tm_type from a tm_block already allocated from the OS.
If a tm_block is already scheduled for parceling, parcel from it. Otherwise, try to allocate a block from the free list and schedule it for parceling.
Until end of tm_block parcel space is reached,
Parcel a tm_node from the tm_block.
Increment tm_block parcel pointer.
Update global valid node pointer range.
Initialize the tm_node.
Update local accounting.
If enough nodes have been parceled, stop.
If the end of the tm_block was reached, Force a new tm_block allocation next time.
Return the number of tm_nodes actually allocated.
Definition at line 1097 of file tm.c.
References _tm_block_alloc_from_free_list(), _tm_node_init(), _tm_type_add_block(), tm_type::n, tm_data::n, tm_block::next_parcel, tm_type::parcel_from_block, tm_data::parceling, tm_type::size, tm, tm_assert_test, tm_block_node_begin, tm_block_node_end, tm_block_node_next, tm_block_node_next_parcel, tm_block_SIZE, tm_msg(), tm_node_color, tm_node_ptr, tm_ptr_h, tm_ptr_l, and tm_WHITE.
Referenced by _tm_node_parcel_or_alloc().
01098 { 01099 int count = 0; 01100 size_t bytes = 0; 01101 tm_block *b; 01102 01103 ++ tm.parceling; 01104 01105 /** 01106 * If a tm_block is already scheduled for parceling, parcel from it. 01107 * Otherwise, try to allocate a block from the free list and schedule it for parceling. 01108 */ 01109 if ( ! t->parcel_from_block ) { 01110 if ( (b = _tm_block_alloc_from_free_list(tm_block_SIZE)) ) { 01111 _tm_type_add_block(t, b); 01112 } else { 01113 goto done; 01114 } 01115 } 01116 01117 b = t->parcel_from_block; 01118 01119 // _tm_block_validate(b); 01120 01121 { 01122 /*! Until end of tm_block parcel space is reached, */ 01123 void *pe = tm_block_node_begin(b); 01124 01125 while ( (pe = tm_block_node_next(b, tm_block_node_next_parcel(b))) 01126 <= tm_block_node_end(b) 01127 ) { 01128 tm_node *n; 01129 01130 // _tm_block_validate(b); 01131 01132 /*! Parcel a tm_node from the tm_block. */ 01133 n = tm_block_node_next_parcel(b); 01134 01135 /*! Increment tm_block parcel pointer. */ 01136 b->next_parcel = pe; 01137 01138 /*! Update global valid node pointer range. */ 01139 { 01140 void *ptr; 01141 01142 if ( tm_ptr_l > (ptr = tm_node_ptr(n)) ) { 01143 tm_ptr_l = ptr; 01144 } 01145 if ( tm_ptr_h < (ptr = pe) ) { 01146 tm_ptr_h = ptr; 01147 } 01148 } 01149 01150 /*! Initialize the tm_node. */ 01151 _tm_node_init(n, b); 01152 01153 tm_assert_test(tm_node_color(n) == tm_WHITE); 01154 01155 /*! Update local accounting. */ 01156 ++ count; 01157 bytes += t->size; 01158 01159 /*! If enough nodes have been parceled, stop. */ 01160 if ( -- left <= 0 ) { 01161 goto done; 01162 } 01163 } 01164 01165 /** 01166 * If the end of the tm_block was reached, 01167 * Force a new tm_block allocation next time. 01168 */ 01169 t->parcel_from_block = 0; 01170 } 01171 01172 done: 01173 01174 -- tm.parceling; 01175 01176 #if 0 01177 if ( count ) 01178 tm_msg("N i n%lu b%lu t%lu\n", count, bytes, t->n[tm_WHITE]); 01179 #endif 01180 01181 /*! Return the number of tm_nodes actually allocated. */ 01182 return count; 01183 }