Block

Data Structures

struct  tm_block
 A block allocated from the operating system. More...

Defines

#define tm_block_unused(b)   ((b)->n[tm_WHITE] == b->n[tm_TOTAL])
 True if the tm_block has no used nodes; i.e.
#define tm_block_type(b)   ((b)->type)
#define tm_block_node_begin(b)   ((void*) (b)->begin)
 The begining address of any tm_nodes parcelled from a tm_block.
#define tm_block_node_end(b)   ((void*) (b)->end)
 The end address of any tm_nodes parcelled from a tm_block.
#define tm_block_node_next_parcel(b)   ((void*) (b)->next_parcel)
 The address of the next tm_node to be parcelled from a tm_block.
#define tm_block_node_size(b)   ((b)->type->size + tm_node_HDR_SIZE)
 The total size of a tm_node with a useable size based on the tm_block's tm_type size.
#define tm_block_node_next(b, n)   ((void*) (((char*) (n)) + tm_block_node_size(b)))
 The adddress of the next tm_node after n, parcelled from tm_block.
#define _tm_block_validate(b)   ((void) 0)

Functions

static __inline void _tm_type_add_block (tm_type *t, tm_block *b)
 Add a tm_block to a tm_type.
static __inline void _tm_type_remove_block (tm_type *t, tm_block *b)
 Remove a tm_block from its tm_type.
static __inline void tm_block_init (tm_block *b)
 Initialize a new tm_block.
static __inline size_t tm_block_align_size (size_t size)
 Align size to a multiple of tm_block_SIZE.
static tm_block_tm_block_alloc_from_free_list (size_t size)
 Allocate a tm_block from the free list.
static tm_block_tm_block_alloc (size_t size)
 Allocate a tm_block of a given size.
static tm_blocktm_block_scavenge (tm_type *t)
 Scavenges all tm_types for an unused tm_block.
static __inline void _tm_node_delete (tm_node *n, tm_block *b)
 Deletes a WHITE tm_node from a tm_block.
static int _tm_block_unparcel_nodes (tm_block *b)
 Unparcels the tm_nodes in a tm_block.
static __inline void _tm_block_reclaim (tm_block *b)
 Reclaim a live tm_block.
static __inline void _tm_block_free (tm_block *b)
 Frees a block either returning the block to the OS or keeping it on a free list.
static tm_block_tm_block_alloc_for_type (tm_type *t)
 Allocates a tm_block of tm_block_SIZE for a tm_type.

Define Documentation

#define _tm_block_validate (  )     ((void) 0)

Definition at line 107 of file block.h.

Referenced by tm_node_to_type(), tm_print_block_stats(), tm_ptr_to_node(), and tm_validate_lists().

#define tm_block_node_begin (  )     ((void*) (b)->begin)

The begining address of any tm_nodes parcelled from a tm_block.

Definition at line 80 of file block.h.

Referenced by _tm_block_unparcel_nodes(), _tm_node_parcel_some(), and tm_ptr_to_node().

#define tm_block_node_end (  )     ((void*) (b)->end)

The end address of any tm_nodes parcelled from a tm_block.

Definition at line 83 of file block.h.

Referenced by _tm_node_parcel_some().

#define tm_block_node_next ( b,
 )     ((void*) (((char*) (n)) + tm_block_node_size(b)))

The adddress of the next tm_node after n, parcelled from tm_block.

Definition at line 92 of file block.h.

Referenced by _tm_block_unparcel_nodes(), and _tm_node_parcel_some().

#define tm_block_node_next_parcel (  )     ((void*) (b)->next_parcel)

The address of the next tm_node to be parcelled from a tm_block.

Definition at line 86 of file block.h.

Referenced by _tm_block_unparcel_nodes(), _tm_node_parcel_some(), and tm_ptr_to_node().

#define tm_block_node_size (  )     ((b)->type->size + tm_node_HDR_SIZE)

The total size of a tm_node with a useable size based on the tm_block's tm_type size.

Definition at line 89 of file block.h.

Referenced by tm_ptr_to_node().

#define tm_block_type (  )     ((b)->type)

Definition at line 77 of file block.h.

#define tm_block_unused (  )     ((b)->n[tm_WHITE] == b->n[tm_TOTAL])

True if the tm_block has no used nodes; i.e.

it can be returned to the OS.

Definition at line 75 of file block.h.

Referenced by _tm_block_free(), _tm_block_unparcel_nodes(), _tm_node_sweep(), and tm_block_scavenge().


Function Documentation

static tm_block* _tm_block_alloc ( size_t  size  )  [static]

Allocate a tm_block of a given size.

Try to allocate from block free list.

Otherwise, allocate a new tm_block from the OS.

Make sure it's aligned to tm_block_SIZE.

Return 0 if OS denied.

Force allocation of a new block id.

Initialize its size.

Increment OS block stats.

Initialize the tm_block.

Increment global block stats.

Return the new or reused tm_block.

Definition at line 698 of file tm.c.

References _tm_block_alloc_from_free_list(), _tm_os_alloc_aligned(), tm_data::blocks_allocated_since_gc, tm_block::id, tm_data::n, tm_block::size, tm, tm_B, tm_b_OS, tm_B_OS, tm_b_OS_M, tm_B_OS_M, tm_block_align_size(), tm_block_init(), tm_block_SIZE, and tm_msg().

Referenced by _tm_block_alloc_for_type().

00699 {
00700   tm_block *b;
00701 
00702   size = tm_block_align_size(size);
00703 
00704   /*! Try to allocate from block free list. */
00705   b = _tm_block_alloc_from_free_list(size);
00706 
00707   /** 
00708    * Otherwise, allocate a new tm_block from the OS.
00709    */
00710   if ( ! b ) {
00711     /*! Make sure it's aligned to tm_block_SIZE. */
00712     b = _tm_os_alloc_aligned(size);
00713 
00714     /*! Return 0 if OS denied. */
00715     if ( ! b )
00716       return 0;
00717 
00718     /*! Force allocation of a new block id. */
00719     b->id = 0;
00720 
00721     /*! Initialize its size. */
00722     b->size = size;
00723     
00724     /*! Increment OS block stats. */
00725     ++ tm.n[tm_B_OS];
00726     if ( tm.n[tm_B_OS_M] < tm.n[tm_B_OS] )
00727       tm.n[tm_B_OS_M] = tm.n[tm_B_OS];
00728 
00729     tm.n[tm_b_OS] += size;
00730     if ( tm.n[tm_b_OS_M] < tm.n[tm_b_OS] )
00731       tm.n[tm_b_OS_M] = tm.n[tm_b_OS];
00732 
00733     tm_msg("b a os b%p\n", (void*) b);
00734 
00735     /*! Initialize the tm_block. */
00736     tm_block_init(b);
00737     
00738     /*! Increment global block stats. */
00739     ++ tm.n[tm_B];
00740     tm.blocks_allocated_since_gc += size / tm_block_SIZE;
00741   }
00742 
00743   // tm_validate_lists();
00744 
00745   // fprintf(stderr, "  _tm_block_alloc(%p)\n", b);
00746 
00747   /*! Return the new or reused tm_block. */
00748   return b;
00749 }

Here is the call graph for this function:

Here is the caller graph for this function:

static tm_block* _tm_block_alloc_for_type ( tm_type t  )  [static]

Allocates a tm_block of tm_block_SIZE for a tm_type.

Allocate a new tm_block from free list or OS.

Add it to the tm_type.blocks list

Definition at line 972 of file tm.c.

References _tm_block_alloc(), _tm_type_add_block(), and tm_block_SIZE.

Referenced by _tm_node_parcel_or_alloc().

00973 {
00974   tm_block *b;
00975   
00976   /*! Allocate a new tm_block from free list or OS. */
00977   b = _tm_block_alloc(tm_block_SIZE);
00978 
00979   // fprintf(stderr, "  _tm_block_alloc(%d) => %p\n", tm_block_SIZE, b);
00980 
00981   /*! Add it to the tm_type.blocks list */
00982   if ( b ) {
00983     _tm_type_add_block(t, b);
00984   }
00985 
00986   // tm_msg("b a b%p t%p\n", (void*) b, (void*) t);
00987 
00988   return b;
00989 }

Here is the call graph for this function:

Here is the caller graph for this function:

static tm_block * _tm_block_alloc_from_free_list ( size_t  size  )  [static]

Allocate a tm_block from the free list.

Force allocation to a multiple of tm_block_SIZE.

Scan global tm_block free list for a block of the right size.

Initialize the tm_block.

Increment global block stats.

Return 0 if no free blocks exist.

Definition at line 652 of file tm.c.

References tm_data::blocks_allocated_since_gc, tm_data::free_blocks, tm_data::free_blocks_n, tm_data::n, tm_block::size, tm, tm_assert_test, tm_B, tm_block_align_size(), tm_block_init(), tm_block_SIZE, tm_FREE_BLOCK, tm_list_color, tm_list_LOOP, tm_list_LOOP_END, tm_list_remove(), and tm_msg().

Referenced by _tm_block_alloc(), and _tm_node_parcel_some().

00653 {
00654   tm_block *b = 0;
00655 
00656   size = tm_block_align_size(size);
00657 
00658   /*! Scan global tm_block free list for a block of the right size. */
00659   {
00660     tm_block *bi;
00661 
00662     tm_list_LOOP(&tm.free_blocks, bi);
00663     {
00664       if ( bi->size == size &&
00665            tm_list_color(bi) == tm_FREE_BLOCK
00666            ) {
00667         tm_assert_test(tm.free_blocks_n);
00668         -- tm.free_blocks_n;
00669 
00670         tm_list_remove(bi);
00671 
00672         b = bi;
00673 
00674         tm_msg("b a fl b%p %d\n", (void*) b, tm.free_blocks_n);
00675         break;
00676       }
00677     }
00678     tm_list_LOOP_END;
00679   }
00680   
00681   if ( b ) {
00682     /*! Initialize the tm_block. */
00683     tm_block_init(b);
00684     
00685     /*! Increment global block stats. */
00686     ++ tm.n[tm_B];
00687     tm.blocks_allocated_since_gc += size / tm_block_SIZE;
00688   }
00689 
00690   /*! Return 0 if no free blocks exist. */
00691   return b;
00692 }

Here is the call graph for this function:

Here is the caller graph for this function:

static __inline void _tm_block_free ( tm_block b  )  [static]

Frees a block either returning the block to the OS or keeping it on a free list.

  • If using mmap(), the block is returned to the OS only if there are enough blocks on the free list (see: tm_block_min_free).
  • If using sbrk(), the block is returned to the OS only if it was the most recient block allocated by sbrk().

Reclaim tm_block from its tm_type.blocks list.

If using mmap(), reduce calls to _tm_os_free() by keeping tm_block_min_free free blocks.

If block should return to OS,

Decrement global OS block stats.

And return aligned block back to OS.

Otherwise, remove from t->blocks list and add to global free block list.

Mark block as tm_FREE_BLOCK.

Reset block sweep iterator.

Return.

Definition at line 897 of file tm.c.

References _tm_block_reclaim(), _tm_block_sweep_init(), _tm_os_free_aligned(), tm_block::end, tm_data::free_blocks, tm_data::free_blocks_n, tm_block::id, tm_data::n, tm_block::size, tm, tm_assert, tm_assert_test, tm_b_OS, tm_B_OS, tm_block_min_free, tm_block_unused, tm_FREE_BLOCK, tm_list_remove_and_append(), tm_list_set_color, tm_msg(), and tm_ptr_h.

Referenced by _tm_node_sweep().

00898 {
00899   int os_free = 0;
00900 
00901   tm_assert(tm_block_unused(b));
00902 
00903   /*! Reclaim tm_block from its tm_type.blocks list. */
00904   _tm_block_reclaim(b);
00905 
00906 #if tm_USE_MMAP
00907   /*! If using mmap(), reduce calls to _tm_os_free() by keeping tm_block_min_free free blocks. */
00908   if ( tm.free_blocks_n > tm_block_min_free ) {
00909     // fprintf(stderr, "  tm_block_free too many free blocks: %d\n", tm.free_blocks_n);
00910     os_free = 1;
00911   } else {
00912     os_free = 0;
00913   }
00914 #endif
00915 
00916 #if tm_USE_SBRK
00917   /*! If using sbrk() and b was the last block allocated from the OS, */
00918   if ( sbrk(0) == (void*) b->end ) {
00919     /*! Reduce valid node ptr range. */
00920     tm_assert(tm_ptr_h == b->end);
00921     tm_ptr_h = b;
00922 
00923     /*! And plan to return block to OS. */
00924     os_free = 1;
00925   } else {
00926     os_free = 0;
00927   }
00928 #endif
00929 
00930   /*! If block should return to OS, */
00931   if ( os_free ) {
00932     /*! Decrement global OS block stats. */
00933     tm_assert_test(tm.n[tm_B_OS]);
00934     -- tm.n[tm_B_OS];
00935 
00936     tm_assert_test(tm.n[tm_b_OS] > b->size);
00937     tm.n[tm_b_OS] -= b->size;
00938 
00939     b->id = 0;
00940 
00941     /*! And return aligned block back to OS. */
00942     _tm_os_free_aligned(b, b->size);
00943 
00944     tm_msg("b f os b%p\n", (void*) b);
00945   } else {
00946     /*! Otherwise, remove from t->blocks list and add to global free block list. */
00947     tm_list_remove_and_append(&tm.free_blocks, b);
00948     ++ tm.free_blocks_n;
00949 
00950     /*! Mark block as tm_FREE_BLOCK. */
00951     tm_list_set_color(b, tm_FREE_BLOCK);
00952 
00953     tm_msg("b f fl b%p %d\n", (void*) b, tm.free_blocks_n);
00954   }
00955 
00956   /*! Reset block sweep iterator. */
00957   _tm_block_sweep_init();
00958 
00959   // tm_validate_lists();
00960 
00961   // fprintf(stderr, "  _tm_block_free(%p)\n", b);
00962   // tm_msg("b f b%p\n", (void*) b);
00963 
00964   /*! Return. */
00965 }

Here is the call graph for this function:

Here is the caller graph for this function:

static __inline void _tm_block_reclaim ( tm_block b  )  [static]

Reclaim a live tm_block.

Unparcel any allocated nodes from type free lists.

Avoid pointers into block.

Decrement global block stats.

Remove reference from tm.block_first and tm.block_last, if necessary.

Remove tm_block from tm_type.

Mark tm_block's pages as unused.

Definition at line 858 of file tm.c.

References _tm_block_unparcel_nodes(), _tm_page_mark_unused_range(), _tm_type_remove_block(), tm_block::begin, tm_data::block_first, tm_data::block_last, tm_data::n, tm_block::next_parcel, tm_block::size, tm, tm_assert_test, tm_B, tm_list_color, tm_LIVE_BLOCK, and tm_block::type.

Referenced by _tm_block_free().

00859 {
00860   tm_assert_test(b);
00861   tm_assert_test(tm_list_color(b) == tm_LIVE_BLOCK);
00862 
00863   /*! Unparcel any allocated nodes from type free lists. */
00864   _tm_block_unparcel_nodes(b);
00865 
00866   /*! Avoid pointers into block. */
00867   b->next_parcel = b->begin;
00868 
00869   /*! Decrement global block stats. */
00870   tm_assert_test(tm.n[tm_B]);
00871   -- tm.n[tm_B];
00872 
00873   /*! Remove reference from tm.block_first and tm.block_last, if necessary. */
00874   if ( tm.block_last == b ) {
00875     tm.block_last = 0;
00876   }
00877   if ( tm.block_first == b ) {
00878     tm.block_first = 0;
00879   }
00880 
00881   /*! Remove tm_block from tm_type. */
00882   _tm_type_remove_block(b->type, b);
00883 
00884   /*! Mark tm_block's pages as unused. */
00885   _tm_page_mark_unused_range(b, b->size);
00886 }

Here is the call graph for this function:

Here is the caller graph for this function:

static int _tm_block_unparcel_nodes ( tm_block b  )  [static]

Unparcels the tm_nodes in a tm_block.

  • Removes each tm_node allocated from the tm_block from its WHITE list.
  • All tm_nodes in the tm_block must be WHITE.

If all nodes in the block are free, remove all nodes in the block from any lists.

Start at first tm_node in tm_block.

Remove node from tm_WHITE list and advance.

Decrement tm_WHITE and tm_TOTAL counts:

  • Decrement type node counts.
  • Decrement block node counts.
  • Decrement global node counts.

Definition at line 797 of file tm.c.

References _tm_node_delete(), tm_block::n, tm_type::n, tm_data::n, tm_block::size, tm, tm_assert_test, tm_block_node_begin, tm_block_node_next, tm_block_node_next_parcel, tm_block_unused, tm_node_color, tm_TOTAL, tm_WHITE, and tm_block::type.

Referenced by _tm_block_reclaim().

00798 {
00799   int count = 0, bytes = 0;
00800   tm_type *t = b->type;
00801   
00802   tm_assert_test(b->type);
00803 
00804   /**
00805    * If all nodes in the block are free, 
00806    * remove all nodes in the block from any lists.
00807   */
00808   tm_assert_test(tm_block_unused(b));
00809 
00810   {
00811     tm_node *n;
00812     
00813     /*! Start at first tm_node in tm_block. */
00814     n = tm_block_node_begin(b);
00815     while ( (void*) n < tm_block_node_next_parcel(b) ) {
00816       /*! Remove node from tm_WHITE list and advance. */
00817       ++ count;
00818       bytes += b->size;
00819 
00820       tm_assert_test(tm_node_color(n) == tm_WHITE);
00821       _tm_node_delete(n, b);
00822 
00823       n = tm_block_node_next(b, n);
00824     }
00825   }
00826 
00827   /*! Decrement tm_WHITE and tm_TOTAL counts: */
00828 
00829   /*! - Decrement type node counts. */
00830   tm_assert_test(t->n[tm_WHITE] >= count);
00831   t->n[tm_WHITE] -= count;
00832   tm_assert_test(t->n[tm_TOTAL] >= count);
00833   t->n[tm_TOTAL] -= count;
00834 
00835   /*! - Decrement block node counts. */
00836   tm_assert_test(b->n[tm_WHITE] >= count);
00837   b->n[tm_WHITE] -= count;
00838   tm_assert_test(b->n[tm_WHITE] == 0);
00839 
00840   tm_assert_test(b->n[tm_TOTAL] >= count);
00841   b->n[tm_TOTAL] -= count;
00842   tm_assert_test(b->n[tm_TOTAL] == 0);
00843 
00844   /*! - Decrement global node counts. */
00845   tm_assert_test(tm.n[tm_WHITE] >= count);
00846   tm.n[tm_WHITE] -= count;
00847   tm_assert_test(tm.n[tm_TOTAL] >= count);
00848   tm.n[tm_TOTAL] -= count;
00849 
00850   return count;
00851 }

Here is the call graph for this function:

Here is the caller graph for this function:

static __inline void _tm_node_delete ( tm_node n,
tm_block b 
) [static]

Deletes a WHITE tm_node from a tm_block.

Remove tm_node from tm_type's color list.

Definition at line 1048 of file tm.c.

References tm_data::n, tm_block::n, tm_type::n, tm_type::size, tm, tm_assert_test, tm_list_remove(), tm_msg(), tm_node_color, tm_TOTAL, tm_WHITE, and tm_block::type.

Referenced by _tm_block_unparcel_nodes().

01049 {
01050   tm_type *t;
01051   tm_color nc = tm_node_color(n);
01052 
01053   // _tm_block_validate(b);
01054   tm_assert_test(b->type);
01055   t = b->type;
01056   tm_assert_test(nc == tm_WHITE);
01057 
01058 #if 0 /* See _tm_block_unparcel_nodes() */
01059   /*! Decrement type node counts. */
01060   tm_assert_test(t->n[tm_TOTAL]);
01061   -- t->n[tm_TOTAL];
01062   tm_assert_test(t->n[nc]);
01063   -- t->n[nc];
01064 
01065   /*! Decrement block node counts. */
01066   tm_assert_test(b->n[tm_TOTAL] > 0);
01067   -- b->n[tm_TOTAL];
01068   tm_assert_test(b->n[nc]) > 0);
01069   -- b->n[nc];
01070 
01071   /*! Decrement global node counts. */
01072   tm_assert_test(tm.n[tm_TOTAL] > 0);
01073   -- tm.n[tm_TOTAL];
01074   tm_assert_test(tm.n[nc] > 0);
01075   -- tm.n[nc];
01076 #endif
01077 
01078   /*! Remove tm_node from tm_type's color list. */
01079   tm_list_remove(n);
01080 
01081   // tm_validate_lists();
01082 
01083 #if 0
01084   tm_msg("N d n%p[%lu] t%p\n", 
01085          (void*) n,
01086          (unsigned long) t->size,
01087          (void*) t);
01088 #endif
01089 }

Here is the call graph for this function:

Here is the caller graph for this function:

static __inline void _tm_type_add_block ( tm_type t,
tm_block b 
) [static]

Add a tm_block to a tm_type.

Assert that tm_block is not already associated with a tm_type.

Associate tm_block with the tm_type.

Compute the capacity of this block.

Begin parceling from this block.

Increment type block stats.

Decrement global block stats.

Add to type's block list.

Definition at line 515 of file tm.c.

References tm_block::begin, tm_type::blocks, tm_block::end, tm_data::n, tm_type::n, tm_block::n, tm_type::parcel_from_block, tm_type::size, tm, tm_assert_test, tm_B, tm_CAPACITY, tm_list_insert(), and tm_block::type.

Referenced by _tm_block_alloc_for_type(), and _tm_node_parcel_some().

00516 {
00517   tm_assert_test(t);
00518   tm_assert_test(b);
00519   /*! Assert that tm_block is not already associated with a tm_type. */
00520   tm_assert_test(b->type == 0);
00521 
00522   /*! Associate tm_block with the tm_type. */
00523   b->type = t;
00524 
00525   /*! Compute the capacity of this block. */
00526   tm_assert_test(! b->n[tm_CAPACITY]);
00527   b->n[tm_CAPACITY] = (b->end - b->begin) / (sizeof(tm_node) + t->size); 
00528 
00529   /*! Begin parceling from this block. */
00530   tm_assert_test(! t->parcel_from_block);
00531   t->parcel_from_block = b;
00532 
00533   /*! Increment type block stats. */
00534   ++ t->n[tm_B];
00535 
00536   /*! Decrement global block stats. */
00537   ++ tm.n[tm_B];
00538 
00539   /*! Add to type's block list. */
00540   tm_list_insert(&t->blocks, b);
00541 }

Here is the call graph for this function:

Here is the caller graph for this function:

static __inline void _tm_type_remove_block ( tm_type t,
tm_block b 
) [static]

Remove a tm_block from its tm_type.

Decrement type block stats.

Decrement global block stats.

Do not parcel nodes from it any more.

Remove tm-block from tm_type.block list.

Dissassociate tm_block with the tm_type.

Definition at line 548 of file tm.c.

References tm_data::n, tm_type::n, tm_type::parcel_from_block, tm, tm_assert_test, tm_B, tm_list_remove(), and tm_block::type.

Referenced by _tm_block_reclaim().

00549 {
00550   tm_assert_test(t);
00551   tm_assert_test(b->type);
00552   tm_assert_test(b->type == t);
00553 
00554   /*! Decrement type block stats. */
00555   tm_assert_test(t->n[tm_B]);
00556   -- t->n[tm_B];
00557 
00558   /*! Decrement global block stats. */
00559   tm_assert_test(tm.n[tm_B]);
00560   -- tm.n[tm_B];
00561 
00562   /*! Do not parcel nodes from it any more. */
00563   if ( t->parcel_from_block == b ) {
00564     t->parcel_from_block = 0;
00565   }
00566 
00567   /*! Remove tm-block from tm_type.block list. */
00568   tm_list_remove(b);
00569 
00570   /*! Dissassociate tm_block with the tm_type. */
00571   b->type = 0;
00572 }

Here is the call graph for this function:

Here is the caller graph for this function:

static __inline size_t tm_block_align_size ( size_t  size  )  [static]

Align size to a multiple of tm_block_SIZE.

Force allocation to a multiple of tm_block_SIZE.

Definition at line 634 of file tm.c.

References tm_block_SIZE.

Referenced by _tm_block_alloc(), and _tm_block_alloc_from_free_list().

00635 {
00636   size_t offset;
00637 
00638   /*! Force allocation to a multiple of tm_block_SIZE. */
00639   if ( (offset = (size % tm_block_SIZE)) )
00640     size += tm_block_SIZE - offset;
00641 
00642   return size;
00643 }

Here is the caller graph for this function:

static __inline void tm_block_init ( tm_block b  )  [static]

Initialize a new tm_block.

Initialize the block id.

Initialize tm_block list pointers.

Mark tm_block as tm_LIVE_BLOCK.

Disassocate the tm_block with any tm_type.

Initialize bounds of the tm_block's allocation space.

Initialize next tm_node parcel to beginning of valid useable space.

Clear tm_block stats.

Reset tm_block sweep iterator.

Remember the first block and most recent blocks allocated.

Definition at line 579 of file tm.c.

References _tm_block_sweep_init(), tm_block::begin, tm_data::block_first, tm_data::block_id, tm_data::block_last, tm_block::end, tm_block::id, tm_block::list, tm_block::n, tm_block::next_parcel, tm_block::size, tm, tm_assert, tm_assert_test, tm_block_GUARD, tm_block_HDR_SIZE, tm_list_init(), tm_list_set_color, tm_LIVE_BLOCK, and tm_block::type.

Referenced by _tm_block_alloc(), and _tm_block_alloc_from_free_list().

00580 {
00581   tm_assert_test(b->size);
00582 
00583   /*! Initialize the block id. */
00584   if ( ! b->id ) {
00585     b->id = ++ tm.block_id;
00586   }
00587 
00588   /*! Initialize tm_block list pointers. */
00589   tm_list_init(&b->list);
00590   /*! Mark tm_block as tm_LIVE_BLOCK. */
00591   tm_list_set_color(b, tm_LIVE_BLOCK);
00592 
00593 #if tm_name_GUARD
00594   b->name = "BLOCK";
00595 #endif
00596 
00597   /*! Disassocate the tm_block with any tm_type. */
00598   b->type = 0;
00599 
00600   /*! Initialize bounds of the tm_block's allocation space. */
00601   b->begin = (char*) b + tm_block_HDR_SIZE;
00602   b->end   = (char*) b + b->size;
00603   
00604   /*! Initialize next tm_node parcel to beginning of valid useable space. */
00605   b->next_parcel = b->begin;
00606 
00607 #if tm_block_GUARD
00608   b->guard1 = b->guard2 = tm_block_hash(b);
00609 #endif
00610 
00611   /*! Clear tm_block stats. */
00612   memset(b->n, 0, sizeof(b->n));
00613 
00614   /*! Reset tm_block sweep iterator. */
00615   _tm_block_sweep_init();
00616 
00617   /*! Remember the first block and most recent blocks allocated. */
00618   tm.block_last = b;
00619   if ( ! tm.block_first ) {
00620     tm.block_first = b;
00621   } else {
00622 #if tm_USE_SBRK
00623     /* Make sure heap grows up, other code depends on it. */
00624     tm_assert((void*) b >= (void*) tm.block_first);
00625 #endif
00626   }
00627 }

Here is the call graph for this function:

Here is the caller graph for this function:

static tm_block* tm_block_scavenge ( tm_type t  )  [static]

Scavenges all tm_types for an unused tm_block.

NOT USED!

Definition at line 758 of file tm.c.

References tm_type::blocks, tm_block::n, tm, tm_block_unused, tm_list_LOOP, tm_list_LOOP_END, tm_TOTAL, tm_block::type, and tm_data::types.

00759 {
00760   tm_type *type = 0;
00761 
00762   tm_list_LOOP(&tm.types, type);
00763   {
00764     tm_block *block = 0;
00765 
00766     tm_list_LOOP(&type->blocks, block);
00767     {
00768       if ( block->type != t && 
00769            tm_block_unused(block) &&
00770            block->n[tm_TOTAL]
00771            ) {
00772         return block;
00773       }
00774     }
00775     tm_list_LOOP_END;
00776   }
00777   tm_list_LOOP_END;
00778 
00779   return 0;
00780 }


Generated on Mon Jan 25 06:33:12 2010 for TM(tredmill) by  doxygen 1.6.1