/* * RMALLOC by Ronald Kriemann * -------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation (version 2 of the License). * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * Changelog * --------- * * v0.100 - another major rewrite: added coalescing similar to * dmalloc but without caching (deffered) * v0.99 - major rewrite with containers which can be reused by * other size-classes => led to massive fragmentation * - code-cleanup * v0.98.2 - replaced "real" thread-private-data by simulated one * v0.98.1 - active fragmentation statistics with STAT_FRAGMENTATION * (prints "real" consumption) * - moved top_pad into data-segment-structure * v0.98 - used remaining blocks of each data-segment to serve future * requests, thereby reducing fragmentation (left-over-blocks) * v0.97 - small bugfixes * - top_pad is now heap-specific and adjusted dynamicly * (1/TOP_PAD_FRACTION of mem_used) * - changed size-field in nodes to slot; avoided to many calls * to size_to_class * - moved handling of sizes larger than biggest size-class to * operating-system (via malloc or mmap) * - fixed error-handling if no memory is available from the OS * (sends kill to own process instead of exit(1)) * - added another trace-method: trace by allocation * v0.96 - rewritten chunk-handling: heap -> block -> chunk * - exchange of blocks between thread-heaps and global-heaps * to reduce overall memory-consumption * v0.95 - round sizes to next size-class to guarantee, that there * is a free chunk in the list (avoid searching) * - increased number of size-classes; changed size_to_class to * use a binary search instead of linear * - ok. maybe 10 MB for the DEFAULT_TOP_PAD is better ? * - implemented trace-functionality for thread-heaps * v0.94 - rearranged code for system-allocation (mmap and malloc had * too much in common to be separated) * - removed most messages when mutices were locked * - set MMAP to be default, even when malloc is not overwritten * - changed "r_mallinfo" to "rmallinfo" and added declaration * in rmalloc.h * v0.93 - changed heap handling, now each thread has a private * heap; if the thread terminates, this heap is then marked * as free and can be used by a new thread * - removed creation of first heap (besides global-heap), this * will now be done on demand (in r_malloc) * - DEFAULT_TOP_PAD reduced to 5 MB (should also be enough) * - heap-list is no longer a circular list (just a linear one) * - heaps can now be aligned to the cache-line-size (ALIGN_TO_CACHE_SIZE) * - added wrapper for mallinfo which calls internal r_mallinfo * (r_mallinfo uses mallinfo-struct with ulong fields) * v0.92 - replaced simulated thread-specific-data by real one * v0.91 - fixed bug/feature when using malloc as system-alloc: only * requested chunks of needed size were allocated, not plus * DEFAULT_TOP_PAD * v0.90 - initial release * * ToDo * ---- * */ /* * the version info */ #define RMALLOC_VERSION "1.0-pre1" /***************************************************************** ***************************************************************** ** ** some options ** ***************************************************************** *****************************************************************/ /* * use rmalloc as a malloc-replacment */ #if defined(__cplusplus) && !defined(OVERLOAD_MALLOC) # define OVERLOAD_MALLOC 0 #else # ifndef OVERLOAD_MALLOC # define OVERLOAD_MALLOC 1 # endif #endif /* * use rmalloc as a new/delete replacment */ #if defined(__cplusplus) && !defined(OVERLOAD_NEW) #define OVERLOAD_NEW 1 #endif /* * use pthreads */ #ifndef USE_PTHREAD #define USE_PTHREAD 1 #endif /* * define system-allocation method: MALLOC, MMAP, SBRK */ #if OVERLOAD_MALLOC == 1 # ifndef USE_MMAP # define USE_MMAP 1 # endif # ifndef USE_SBRK # define USE_SBRK 0 # endif #else # ifndef USE_MALLOC # define USE_MALLOC 0 # endif # ifndef USE_MMAP # define USE_MMAP 1 # endif # ifndef USE_SBRK # define USE_SBRK 0 # endif #endif /* * check heap extracted from memory-chunk */ #ifndef CHECK_HEAP #define CHECK_HEAP 0 #endif #include #include #include #include #include #include #include #include #include #include #include #include #if USE_PTHREAD == 1 #include #endif #include "rmalloc.h" #ifdef __cplusplus extern "C" { #endif /***************************************************************** ***************************************************************** ** ** defines and option setting ** ***************************************************************** *****************************************************************/ /* overload malloc functions */ #if OVERLOAD_MALLOC == 1 # define r_malloc malloc # define r_calloc calloc # define r_free free # define r_realloc realloc # define r_mallopt mallopt #endif /* * total number of size classes, * number of small classes and small size * and first "middle" size */ #define NO_OF_CLASSES 194 #define SMALL_SIZE 256 #define MIN_BLOCK_SIZE 288 #define NO_OF_SMALL (SMALL_SIZE / 8) /* size of a container for small blocks and * maximal number of full cont. per heap */ #define CONTAINER_SIZE 8192 #define MAX_FULL_CONTAINERS 8 /* container-classes */ #define TYPE_FULL 0 #define TYPE_NONEMPTY 1 #define TYPE_EMPTY 2 /* * set to 1 to enable statistics */ #define STAT_FRAGMENTATION 1 /* * set to 1 to enable debugging */ #define DEBUG_BLOCK 0 #define DEBUG_CONT 0 #define DEBUG_RMALLOC 0 /* magic numbers for blocks and containers */ #define BLOCK_MAGIC 0xbabecafe #define CONT_MAGIC 0xcafebabe /* default trace file */ #define DEFAULT_TRACE_FILE "rmalloc.trc" /* trace-types: 1 : trace by steps, 2: trace by allocation */ #define TRACE_STEPS 1 #define TRACE_ALLOCATION 2 /* default value for top-pad */ #define DEFAULT_TOP_PAD 1*1024*1024 /* fraction of the heap, top_pad must not exceed */ #define TOP_PAD_FRACTION 50 /* exit/abort command */ #define ABORT kill( getpid(), SIGKILL ) #define EXIT exit( 1 ) /* error value in sbrk */ #define SBRK_FAIL -1 /* definitions for mmap-usage */ #if USE_MMAP == 1 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) # define MAP_ANONYMOUS MAP_ANON #endif #if !defined(MAP_FAILED) # define MAP_FAILED ((char*)-1) #endif #if !defined(MAP_ANONYMOUS) static int dev_zero_fd = -1; #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ (dev_zero_fd = open("/dev/zero", O_RDWR), \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) #else #define MMAP(addr, size, prot, flags) \ (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0)) #endif /* !defined(MAP_ANONYMOUS) */ #endif /* USE_MMAP == 1 */ /***************************************************************** ***************************************************************** ** ** data-types for rmalloc ** ***************************************************************** *****************************************************************/ /* internal size and pointer types */ typedef unsigned long internal_size_t; /* bool looks better than int */ #ifndef __cplusplus typedef enum { false, true } bool; #endif /* * mutex and thread-specific-data type */ #if USE_PTHREAD == 1 typedef pthread_mutex_t mutex_t; typedef pthread_key_t tsd_key_t; #else typedef int mutex_t; typedef int tsd_key_t; #endif /* * forward declaration of types */ struct sysconf_s; struct loseg_s; struct data_seg_s; struct block_s; struct container_s; struct block_storage_s; struct heap_s; struct heaplist_s; struct trace_s; typedef struct sysconf_s sysconf_t; typedef struct loseg_s loseg_t; typedef struct data_seg_s data_seg_t; typedef struct stat_s stat_t; typedef struct block_s block_t; typedef struct container_s container_t; typedef struct block_storage_s block_storage_t; typedef struct heap_s heap_t; typedef struct heap_list_s heap_list_t; typedef struct trace_s trace_t; /* * for holding info about system */ struct sysconf_s { internal_size_t pagesize; /* number of bytes per page */ internal_size_t top_pad; /* number of extra bytes to allocated in each sbrk */ }; /* * type for a left-over segment * (end of a data-segment, which could not be used for an alloc-request) */ struct loseg_s { internal_size_t size; /* size of this segment */ loseg_t * prev, * next; /* pointers for double-linked-list */ }; /* * data-structure for a system-heap * (represents data-segment) */ struct data_seg_s { char * begin; /* pointer to the first byte in the data-segment */ char * pos; /* pointer to the first free byte in the data-segment */ char * end; /* pointer to the first byte out of the data-segment */ internal_size_t unused; /* number of bytes unusable in data-segment (fragmented) */ loseg_t * loseg_first; /* list of unused blocks (first/last entries) */ loseg_t * loseg_last; internal_size_t top_pad; /* number of bytes to alloc. in advance */ }; /* * holding statistical information */ struct stat_s { internal_size_t used[ NO_OF_CLASSES ]; /* number of used chunks */ internal_size_t free[ NO_OF_CLASSES ]; /* number of free chunks */ internal_size_t used_mem; /* total number of bytes used in heap (alloc from system) */ internal_size_t mem_in_use; /* current number of bytes, used by application */ internal_size_t max_in_use; /* maximal number of bytes, used by application */ internal_size_t allocated; /* sum of all allocated memory in heap */ }; /* * datatype for memory chunks */ struct block_s { internal_size_t size; /* original, requested size of block */ void * hc_addr; /* heap/container we belong to */ #if DEBUG_BLOCK == 1 unsigned magic; /* magic number for the block */ unsigned __tmp__; /* for 8-byte-alignment */ #endif block_t * next, * prev; /* next, prev pointer for list */ }; /* size of extra-data to be stored in each chunk (pointers + size-field) */ #define PREFIX_SIZE (sizeof(block_t) - (2*sizeof(block_t*))) #define POSTFIX_SIZE (sizeof(internal_size_t)) #define EXTRA_DATA_SIZE (PREFIX_SIZE) /* * container for a list of blocks */ struct container_s { heap_t * heap; /* heap holding this container */ container_t * next, * prev; /* data for double-linked-list */ block_t * blocks; /* list of free memory-chunks in container */ unsigned free, max; /* number of free and total number of blocks */ unsigned sclass; /* which size-class represents the container */ #if DEBUG_CONT == 1 unsigned magic; /* magic number for the container */ #else unsigned __tmp__; /* for 8-byte-alignment */ #endif }; /* * combines different container-types */ struct block_storage_s { container_t * empty[ NO_OF_SMALL ]; /* all blocks in container used */ container_t * nonempty[ NO_OF_SMALL ]; /* at least one block used, but not all */ container_t * full, * last_full; /* full container, no block used */ unsigned no_of_full; /* number of full containers */ block_t * blocks[ NO_OF_CLASSES ]; /* size-classes for usual blocks */ block_t * wilderness; /* last block of each data-segment */ }; /* * type for a heap */ struct heap_s { int id; /* unique id of heap */ heap_t * next; /* link for the heap-list */ data_seg_t data_seg; /* data segment associated with heap */ block_storage_t blocks; /* holds all stored blocks */ stat_t stat; /* status information */ mutex_t mutex; /* mutex for locking heap */ bool used; /* if false, heap is not used */ }; /* * list of heaps for the threads */ struct heap_list_s { heap_t * heaps; /* linked list of heaps */ unsigned nheaps; /* number of heaps in list */ mutex_t mutex; /* mutex for safety */ }; /* * type for tracing memory-allocation */ struct trace_s { int fd; /* file descriptor of trace-file */ internal_size_t old_used; /* old number of used bytes */ internal_size_t old_free; /* old number of free bytes */ internal_size_t old_alloc; /* old total allocation size (time in MB) */ unsigned step; /* allocation step */ internal_size_t size; /* size of chunk to trace */ unsigned type; /* type of trace */ }; /***************************************************************** ***************************************************************** ** ** thread definition ** ***************************************************************** *****************************************************************/ #if USE_PTHREAD == 1 /* maximal number of threads */ #define TSD_MAX 256 /* * handling of mutices */ # define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER # define DEFINE_MUTEX( m ) static mutex_t m = PTHREAD_MUTEX_INITIALIZER # define MUTEX_INIT(mutex,status) if (( status = pthread_mutex_init( & mutex, NULL )) != 0) \ { fprintf( stderr, "(rmalloc) init_heap : error in mutex_init (%s)\n", \ strerror( status ) ); \ EXIT; } # define LOCK( m ) pthread_mutex_lock( & m ) # define UNLOCK( m ) pthread_mutex_unlock( & m ) # define TRYLOCK( m ) pthread_mutex_trylock( & m ) # define MUTEX_BUSY EBUSY # define IS_LOCKED( m ) TRYLOCK( m ) == MUTEX_BUSY /* * handling thread-specific-data */ # define TSD_KEY_INIT( key ) { int i; (*(key)) = 0; for ( i = 0; i < TSD_MAX; i++ ) tsd[i] = NULL; } # define TSD_GET_DATA( key ) (tsd[ ((unsigned) pthread_self()) % 256 ]) # define TSD_SET_DATA( key, data ) tsd[ ((unsigned) pthread_self()) % 256 ] = ((heap_t*) data); /* * conversion thread <-> heap */ # define GET_THR_HEAP() ((heap_t*) TSD_GET_DATA( heap_key )) # define SET_THR_HEAP( heap ) TSD_SET_DATA( heap_key, (void*) heap ) #else /* maximal number of threads */ #define TSD_MAX 256 /* * handling of mutices */ # define MUTEX_INITIALIZER 0 # define DEFINE_MUTEX( m ) # define MUTEX_INIT(mutex,status) # define LOCK( m ) # define UNLOCK( m ) # define TRYLOCK( m ) 1 # define MUTEX_BUSY 0 # define IS_LOCKED( m ) false /* * handling thread-specific-data */ # define TSD_KEY_INIT( key ) { int i; (*(key)) = 0; for ( i = 0; i < TSD_MAX; i++ ) tsd[i] = NULL; } /* * conversion thread <-> heap */ # define THREAD_ID 0 # define GET_THR_HEAP() & global_heap # define SET_THR_HEAP( heap ) #endif /* USE_PTHREADS == 1 */ /***************************************************************** ***************************************************************** ** ** heap specific data ** ***************************************************************** *****************************************************************/ /* lookup table for size classes */ static internal_size_t size_classes[ NO_OF_CLASSES ] = { 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248, 256, 288, 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800, 832, 864, 896, 928, 960, 992, 1024, 1056, 1088, 1120, 1152, 1184, 1216, 1248, 1280, 1408, 1536, 1664, 1792, 1920, 2048, 2176, 2304, 2432, 2560, 2688, 2816, 2944, 3072, 3200, 3328, 3456, 3584, 3712, 3840, 3968, 4096, 4224, 4352, 4864, 5376, 5888, 6400, 6912, 7424, 7936, 8448, 8960, 9472, 9984, 10496, 11008, 11520, 12032, 12544, 13056, 13568, 14080, 14592, 15104, 15616, 16128, 16640, 18688, 20736, 22784, 24832, 26880, 28928, 30976, 33024, 35072, 37120, 39168, 41216, 43264, 45312, 47360, 49408, 57600, 65792, 73984, 82176, 90368, 98560, 106752, 114944, 123136, 131328, 139520, 147712, 155904, 164096, 172288, 180480, 213248, 246016, 278784, 311552, 344320, 377088, 409856, 442624, 475392, 508160, 540928, 573696, 606464, 639232, 672000, 704768, 775248, 852776, 938048, 1031856, 1135040, 1248544, 1373400, 1510736, 1661808, 1827992, 2010792, 2211864, 2433056, 2676360, 2943992, 3238392, 3562232, 3918456, 4310304, 4741328, 5215464, 5737008, 6310712, 6941784, 7635960, 8399552, 9239512, 10163456, 11179808, 12297784, 13527560, 14880320, 16368352, 18005184 }; /* holds info about system */ static sysconf_t sys_conf = { 0, DEFAULT_TOP_PAD }; /* global heap */ static heap_t global_heap = { 0, NULL, { NULL, NULL, NULL, 0, NULL, NULL, DEFAULT_TOP_PAD }, { { NULL }, { NULL }, NULL, NULL, 0, { NULL }, NULL }, { { 0 }, { 0 }, 0, 0, 0, 0 }, MUTEX_INITIALIZER, true }; /* counter for the allocated heaps */ static unsigned heap_id = 0; /* linked list of heaps for threads (global heap is not a part of it) */ static heap_list_t heap_list = { NULL, 0, MUTEX_INITIALIZER }; /* thread specific data */ static heap_t * tsd[ TSD_MAX ]; /* is the heap initialised */ static bool is_initialised = false; /* how much statistics should be printed */ static unsigned heap_stat_lvl = 0; /* for memory trace */ static bool trace_mem = false; static trace_t trace = { -1, 0, 0, 0, 0 }; /* key for accessing thread-specific-data */ static tsd_key_t heap_key; /***************************************************************** ***************************************************************** ** ** defines for easier access ** ***************************************************************** *****************************************************************/ #define BLOCK_SIZE( b ) ((b)->size & ~0x7) #define PRED_SIZE( b ) (*(((internal_size_t*) (b)) - 1)) #define SET_SIZE( b, n ) ((b)->size = (n) + ((b)->size & 0x7)) #define SET_EOB_SIZE( b ) { char * p = ((char*) b) + BLOCK_SIZE( b ) - sizeof(internal_size_t); \ *((internal_size_t*) p) = BLOCK_SIZE( b ); } #define IS_PRED_USED( b ) ((b)->size & 0x1) #define SET_PRED_USED( b ) ((b)->size |= 0x1) #define SET_PRED_FREE( b ) ((b)->size &= ~0x1) #define IS_USED( b ) ((b)->size & 0x2) #define SET_USED( b ) ((b)->size |= 0x2) #define SET_FREE( b ) ((b)->size &= ~0x2) #define IS_WILD( b ) ((b)->size & 0x4) #define SET_WILD( b ) ((b)->size |= 0x4) #define SET_TAME( b ) ((b)->size &= ~0x4) #define BLOCK_TO_PTR( n ) ((void *) (((char*) n) + PREFIX_SIZE)) #define PTR_TO_BLOCK( p ) ((block_t*) (((char*) p) - PREFIX_SIZE)) #ifndef MAX #define MAX(x,y) ((x) > (y) ? (x) : (y)) #endif #if DEBUG_CONT == 1 #define SET_CONT_MAGIC( c ) (c)->magic = CONT_MAGIC #define CHECK_CONT_MAGIC( c, m ) if ( (c)->magic != CONT_MAGIC ) \ { fprintf( stderr, "(rmalloc) %s : container corrupted\n", m ); return; } #else #define SET_CONT_MAGIC( c ) #define CHECK_CONT_MAGIC( c, m ) #endif #if DEBUG_BLOCK == 1 #define SET_BLOCK_MAGIC( b ) { (b)->magic = BLOCK_MAGIC; (b)->__tmp__ = 0; } #define CHECK_BLOCK_MAGIC( b, m ) if ( (b)->magic != BLOCK_MAGIC ) \ { fprintf( stderr, "(rmalloc) %s : block corrupted\n", m ); return; } #else #define SET_BLOCK_MAGIC( b ) #define CHECK_BLOCK_MAGIC( b, m ) #endif #if DEBUG_RMALLOC == 1 #define CHECK_RMALLOC check_rmalloc() #else #define CHECK_RMALLOC #endif /***************************************************************** ***************************************************************** ** ** forward declarations and inline functions ** ***************************************************************** *****************************************************************/ /* * initialise/finish malloc */ static void rmalloc_init (); static void rmalloc_finish (); /* * heap management */ static void init_heap ( heap_t * heap ); static void insert_heap ( heap_t * heap ); static heap_t * get_free_heap (); /* * allocation/deallocation */ void * r_malloc ( size_t size ); void * r_calloc ( size_t nmemb, size_t size); void r_free ( void * ptr ); void * r_realloc ( void * ptr, size_t size ); /******************************************** * methods for small sizes ********************************************/ /* * allocation and deallocation */ static block_t * small_alloc ( heap_t * heap, unsigned sclass ); static void small_free ( block_t * block ); /* * container management */ static void move_container ( container_t * container, unsigned cfrom, unsigned cto ); static void insert_container ( container_t * container, unsigned cto ); static container_t * alloc_container ( heap_t * heap, unsigned sclass ); static void init_container_blocks ( container_t * container ); /******************************************** * methods for middle (usual) sizes ********************************************/ /* * allocation and deallocation */ static block_t * middle_alloc ( heap_t * heap, internal_size_t size, unsigned sclass ); static void middle_free ( heap_t * heap, block_t * block ); /******************************************** * methods for large (unmanaged) sizes ********************************************/ static block_t * vm_alloc ( internal_size_t size ); static void vm_dealloc ( block_t * block ); /* * trace memory allocation */ static void rmalloc_init_trace ( const char * name ); static void rmalloc_trace (); static void rmalloc_finish_trace (); /* * print statistics */ static void rmalloc_stat (); /* * translate size into size-class */ static unsigned size_to_class ( internal_size_t size ); /* * check status of rmalloc */ #if DEBUG_RMALLOC == 1 static void check_rmalloc (); #endif /***************************************************************** ***************************************************************** ** ** allocation of memory from system ** ***************************************************************** *****************************************************************/ /* * allocation from system via sbrk / mmap etc. */ static void * system_alloc ( heap_t * heap, internal_size_t size ) { char * p; data_seg_t * data_seg; block_t * block, * remainder; internal_size_t n; #if USE_MALLOC == 1 || USE_SBRK == 1 DEFINE_MUTEX( mutex ); #endif if ( size == 0 ) return NULL; assert( heap != NULL ); /* get datasegment of given heap */ data_seg = & heap->data_seg; /* adjust n to be multiple of 8 */ if ( size % 8 != 0 ) { fprintf( stderr, "system_alloc : warning, size is not a multiple of 8\n" ); size += 8 - (size % 8); } /* * use wilderness of heap to serve malloc request */ block = heap->blocks.wilderness; while ( block != NULL ) { if ( BLOCK_SIZE( block ) >= size ) { /* split block to serve request * and prepend block for future requests (if large enough) */ n = BLOCK_SIZE( block ) - size; if ( n >= MIN_BLOCK_SIZE ) { /* split block */ block_t * new_block = block; SET_SIZE( new_block, size ); SET_TAME( new_block ); block = (block_t*) (((char*) block) + size); SET_SIZE( block, n ); block->hc_addr = new_block->hc_addr; SET_WILD( block ); SET_FREE( block ); if ( new_block != heap->blocks.wilderness ) { /* remove block and prepend to wilderness */ if ( new_block->next != NULL ) new_block->next->prev = new_block->prev; new_block->prev->next = new_block->next; block->next = heap->blocks.wilderness; block->prev = NULL; heap->blocks.wilderness->prev = block; heap->blocks.wilderness = block; } else { /* copy links from old (new) block */ block->next = new_block->next; block->prev = new_block->prev; /* reset wilderness */ heap->blocks.wilderness = block; if ( block->next != NULL ) block->next->prev = block; /* prev is NULL !!! */ } return new_block; } else { /* block is too small to serve for another request, * treat it as internal fragmentation */ if ( block->next != NULL ) block->next->prev = block->prev; if ( block->prev != NULL ) block->prev->next = block->next; else heap->blocks.wilderness = block->next; return block; } break; } else block = block->next; } /* * no suitable block was found in the wilderness, * create a new block + padding, serve the request * and put remainder into wilderness */ #if USE_MMAP == 1 || USE_MALLOC == 1 /* * use system-malloc to request a large memory-chunk */ /* adjust top-pad of heap */ if ( heap->stat.used_mem / TOP_PAD_FRACTION > data_seg->top_pad ) data_seg->top_pad = heap->stat.used_mem / TOP_PAD_FRACTION; n = size + data_seg->top_pad; /* adjust size to be multiple of pagesize */ if ( n % sys_conf.pagesize != 0 ) n += sys_conf.pagesize - (n % sys_conf.pagesize); #if USE_MALLOC == 1 /* call malloc in a thread-safe way */ LOCK( mutex ); p = (char*) malloc( n ); UNLOCK( mutex ); if ( p == (char*) NULL ) { fprintf( stderr, "(rmalloc) system_alloc : malloc failed (%s)\n", strerror( errno ) ); ABORT; } #else /* USE_MMAP == 1 */ /* map new heap with mmap */ p = (char*) MMAP( 0, n, PROT_READ | PROT_WRITE, MAP_PRIVATE ); if ( p == (char*) MAP_FAILED ) { fprintf( stderr, "(rmalloc) system_alloc : mmap failed (%s)\n", strerror( errno ) ); ABORT; } #endif #elif USE_SBRK == 1 /* to be done */ #else fprintf( stderr, "(rmalloc) system_alloc : no heap-allocation method defined (MALLOC,MMAP,SBRK)\n" ); EXIT; #endif /* * update statistics */ heap->stat.used_mem += n; /* * form block and insert remainder to wilderness */ block = (block_t *) p; SET_SIZE( block, size ); block->hc_addr = heap; /* this is the first block in the data-segment and therefore has no * predecessor. by setting pred_used, we avoid coalescing with it */ SET_PRED_USED( block ); remainder = (block_t*) (p + size); SET_SIZE( remainder, n - size ); remainder->hc_addr = heap; remainder->prev = NULL; remainder->next = heap->blocks.wilderness; SET_WILD( remainder ); if ( remainder->next != NULL ) remainder->next->prev = remainder; heap->blocks.wilderness = remainder; return block; } /******************************************************************** ******************************************************************** ** ** malloc/free interface ** ******************************************************************** ********************************************************************/ void * r_malloc ( size_t size ) { block_t * block = NULL; heap_t * heap = NULL; unsigned sclass = 0; if ( size == 0 ) return NULL; if ( ! is_initialised ) rmalloc_init(); CHECK_RMALLOC; /* adjust size to hold management data */ size = size + PREFIX_SIZE; size = MAX( size, sizeof(block_t) + POSTFIX_SIZE ); /* trim size to be multiple of 8 */ if ( size % 8 != 0 ) size += 8 - (size % 8); /* first check if size exceeds maximal managed size */ if ( size > size_classes[ NO_OF_CLASSES - 1 ] ) { block_t * block; /* request is managed by the operating system */ block = vm_alloc( size ); /* block has no heap/container, but remember size */ SET_SIZE( block, size ); block->hc_addr = NULL; return BLOCK_TO_PTR( block ); } else { /* round size to next size_class */ sclass = size_to_class( size ); /* size = size_classes[ sclass ]; */ } /* * get heap for this thread */ /* get private heap of thread or set new heap */ heap = GET_THR_HEAP(); if ( heap == NULL ) { /* look for free heap or create new one */ if ((heap = get_free_heap()) == NULL) { LOCK( global_heap.mutex ); heap = (heap_t *) system_alloc( (heap_t*) & global_heap, sizeof(heap_t) ); UNLOCK( global_heap.mutex ); init_heap( heap ); /* lock heap BEFORE inserting so we can be sure, no other grabs it */ LOCK( heap->mutex ); insert_heap( heap ); } /* set this heap to be the thread-private heap */ SET_THR_HEAP( heap ); } else LOCK( heap->mutex ); assert( heap != NULL ); /* * get free block, depending on size */ if ( size <= SMALL_SIZE ) block = small_alloc( heap, sclass ); else block = middle_alloc( heap, size, sclass ); assert( block != NULL ); SET_BLOCK_MAGIC( block ); /* update statistics */ heap->stat.mem_in_use += BLOCK_SIZE( block ); heap->stat.max_in_use = MAX( heap->stat.max_in_use, heap->stat.mem_in_use ); heap->stat.allocated += BLOCK_SIZE( block ); if ( trace_mem ) rmalloc_trace(); UNLOCK( heap->mutex ); CHECK_RMALLOC; return BLOCK_TO_PTR( block ); } void * r_calloc ( size_t nmemb, size_t size ) { void * p; if ((nmemb == 0) || (size == 0)) return NULL; p = r_malloc( nmemb * size ); /* memset to zero */ if ( p != NULL ) memset( p, 0, nmemb * size ); return p; } void r_free ( void * ptr ) { block_t * block; if ( ptr == NULL ) return; /* should at least have called malloc for this ptr, if not : ERROR */ if ( ! is_initialised ) { fprintf( stderr, "(rmalloc) free : rmalloc not initialised\n" ); return; } CHECK_RMALLOC; block = PTR_TO_BLOCK( ptr ); CHECK_BLOCK_MAGIC( block, "free" ); /* test whether block is managed by operating system */ if ( block->hc_addr == NULL ) vm_dealloc( block ); else { /* now free block, depending on size */ if ( BLOCK_SIZE( block ) <= SMALL_SIZE ) small_free( block ); else { heap_t * heap = (heap_t*) block->hc_addr; assert( heap != NULL ); /* lock mutex here, cause middle_free is also called * from small_free and mutex is already locked there */ LOCK( heap->mutex ); /* update statistics */ heap->stat.mem_in_use -= BLOCK_SIZE( block ); middle_free( heap, block ); UNLOCK( heap->mutex ); } } CHECK_RMALLOC; } void * r_realloc ( void * ptr, size_t size ) { if ( ptr == NULL ) return r_malloc( size ); else { block_t * block = PTR_TO_BLOCK( ptr ); void * newptr = NULL; internal_size_t old_size = BLOCK_SIZE( block ); /* if new size < old size do nothing */ if ( old_size >= size ) return ptr; /* allocate new chunk, copy and free old */ newptr = r_malloc( size ); memcpy( newptr, ptr, old_size ); r_free( ptr ); return newptr; } } /******************************************************************** ******************************************************************** ** ** misc. functions for heap-management ** ******************************************************************** ********************************************************************/ /* * init/finish heap-manager */ static void rmalloc_init () { DEFINE_MUTEX( mutex ); if ( IS_LOCKED( mutex ) ) { fprintf( stderr, "(rmalloc) rmalloc_init : mutex locked\n" ); LOCK( mutex ); } if ( ! is_initialised ) { char * value; sys_conf.pagesize = getpagesize(); /* init tsd-key */ TSD_KEY_INIT( & heap_key ); /* re-initialise global heap */ init_heap( & global_heap ); /* * setup memory trace */ value = getenv( "RMALLOC_TRACE" ); if (( value != NULL ) && ((value[0] == '1') || (value[0] == '2'))) { /* set trace-type */ if ( value[0] == '1' ) trace.type = TRACE_STEPS; else trace.type = TRACE_ALLOCATION; /* get size of trace */ if ((value = getenv( "RMALLOC_TRACE_SIZE" )) != NULL ) { if ( strcmp( value, "all" ) == 0 ) trace.size = 0; else { trace.size = atoi( value ); /* truncate to multiple of 8 not bigger than trace_size */ trace.size = (trace.size / 8) * 8; } } /* get name of tracefile and initialise */ if ((value = getenv( "RMALLOC_TRACE_FILE" )) != NULL ) rmalloc_init_trace( value ); else rmalloc_init_trace( DEFAULT_TRACE_FILE ); } /* * register cleanup-functions */ if ( atexit( rmalloc_finish ) != 0 ) fprintf( stderr, "(rmalloc) init : error in atexit (%s)\n", strerror( errno ) ); if ( atexit( rmalloc_finish_trace ) != 0 ) fprintf( stderr, "(rmalloc) init : error in atexit (%s)\n", strerror( errno ) ); if ((value = getenv( "RMALLOC_STAT" )) != NULL ) { heap_stat_lvl = atoi( value ); if ( heap_stat_lvl > 0 ) { if ( atexit( rmalloc_stat ) != 0 ) fprintf( stderr, "(rmalloc) init : error in atexit (%s)\n", strerror( errno ) ); } } is_initialised = true; } UNLOCK( mutex ); } /* * end it all */ static void rmalloc_finish () { /* * clean will be called at the end and then * the memory will be deallocated by the system * so why bother ??? */ } /* * initialise a heap */ static void init_heap ( heap_t * heap ) { #if USE_PTHREAD == 1 int status; #endif unsigned i; heap->next = NULL; heap->id = heap_id++; heap->data_seg.begin = NULL; heap->data_seg.pos = NULL; heap->data_seg.end = NULL; heap->data_seg.unused = 0; heap->data_seg.loseg_first = NULL; heap->data_seg.loseg_last = NULL; heap->data_seg.top_pad = sys_conf.top_pad; for ( i = 0; i < NO_OF_SMALL; i++ ) { heap->blocks.empty[i] = NULL; heap->blocks.nonempty[i] = NULL; } heap->blocks.full = NULL; heap->blocks.last_full = NULL; heap->blocks.no_of_full = 0; for ( i = 0; i < NO_OF_CLASSES; i++ ) { heap->blocks.blocks[i] = NULL; heap->stat.free[i] = 0; heap->stat.used[i] = 0; } heap->blocks.wilderness = NULL; heap->stat.used_mem = 0; heap->stat.mem_in_use = 0; heap->stat.max_in_use = 0; heap->stat.allocated = 0; heap->used = true; if ( heap != & global_heap ) { MUTEX_INIT( heap->mutex, status ); } } /* * inserts new heap into heap-list */ static void insert_heap ( heap_t * heap ) { LOCK( heap_list.mutex ); /* prepend into list */ heap->next = heap_list.heaps; heap_list.heaps = heap; heap_list.nheaps++; UNLOCK( heap_list.mutex ); } /* * return unused/free heap from heap-list */ static heap_t * get_free_heap () { heap_t * heap = heap_list.heaps; while ( heap != NULL ) { if ( ! heap->used ) { heap->used = true; return heap; } else heap = heap->next; } /* no free heap found */ return NULL; } /* * translate size into size-class */ static unsigned size_to_class ( internal_size_t size ) { if ( size <= SMALL_SIZE ) return (size >> 3) - 1; else { #if 1 /* * use binary search for size-class */ unsigned lb = (SMALL_SIZE >> 3)-1; unsigned ub = NO_OF_CLASSES; unsigned split; while ( ub - lb > 1 ) { split = (ub + lb) / 2; if (( size_classes[ split-1 ] < size ) && (size <= size_classes[ split ])) return split; if ( size_classes[ split ] < size ) lb = split; else ub = split; } return ub; #else /* * use linear search for size-class */ unsigned sclass = SMALL_SIZE >> 3; while ((sclass < NO_OF_CLASSES) && (size_classes[ sclass ] < size)) sclass++; return sclass; #endif } } /******************************************************************** ******************************************************************** ** ** allocation and deallocation of small sized blocks ** ******************************************************************** ********************************************************************/ /* * malloc/free for small sizes */ static block_t * small_alloc ( heap_t * heap, unsigned sclass ) { container_t * container; block_t * block; assert( heap != NULL ); /* * try to use block from non-empty container, if no such container * exists, try full ones or transfer a container from the global * heap. Only of no container is avaiable at all, allocate a new one. */ container = heap->blocks.nonempty[ sclass ]; if ( container == NULL ) { container = alloc_container( heap, sclass ); insert_container( container, TYPE_NONEMPTY ); } assert( container != NULL ); block = container->blocks; container->blocks = block->next; container->free--; heap->stat.free[ container->sclass ]--; heap->stat.used[ container->sclass ]++; assert( block != NULL ); if ( container->free == 0 ) move_container( container, TYPE_NONEMPTY, TYPE_EMPTY ); return block; } static void small_free ( block_t * block ) { container_t * container; heap_t * heap; assert( block != NULL ); container = (container_t*) block->hc_addr; CHECK_CONT_MAGIC( container, "small_free" ); heap = container->heap; LOCK( heap->mutex ); /* update statistics */ heap->stat.mem_in_use -= BLOCK_SIZE( block ); /* * Insert block back into container, if container is full, * move it to the "full"-list and transfer last full * container to global heap if too many full containers exist */ block->next = container->blocks; container->blocks = block; container->free++; heap->stat.free[ container->sclass ]++; heap->stat.used[ container->sclass ]--; /* if free == 1, container was empty before */ if ( container->free == 1 ) move_container( container, TYPE_EMPTY, TYPE_NONEMPTY ); if ( container->free == container->max ) move_container( container, TYPE_NONEMPTY, TYPE_FULL ); UNLOCK( heap->mutex ); } /******************************************************************** ******************************************************************** ** ** container managment ** ******************************************************************** ********************************************************************/ /* * move given container from class "cfrom" to class "cto" */ static void move_container ( container_t * container, unsigned cfrom, unsigned cto ) { heap_t * heap; unsigned sclass; assert( container != NULL ); heap = container->heap; sclass = container->sclass; /* * release container from old class */ if ( container->prev != NULL ) container->prev->next = container->next; if ( container->next != NULL ) container->next->prev = container->prev; switch ( cfrom ) { case TYPE_EMPTY : if ( heap->blocks.empty[ sclass ] == container ) heap->blocks.empty[ sclass ] = container->next; break; case TYPE_NONEMPTY : if ( heap->blocks.nonempty[ sclass ] == container ) heap->blocks.nonempty[ sclass ] = container->next; break; case TYPE_FULL : if ( heap->blocks.full == container ) heap->blocks.full = container->next; if ( heap->blocks.last_full == container ) heap->blocks.last_full = container->prev; heap->blocks.no_of_full--; break; default: break; } /* * prepend container to new class */ container->prev = NULL; switch ( cto ) { case TYPE_EMPTY : container->next = heap->blocks.empty[ sclass ]; if ( heap->blocks.empty[ sclass ] != NULL ) heap->blocks.empty[ sclass ]->prev = container; heap->blocks.empty[ sclass ] = container; break; case TYPE_NONEMPTY : container->next = heap->blocks.nonempty[ sclass ]; if ( heap->blocks.nonempty[ sclass ] != NULL ) heap->blocks.nonempty[ sclass ]->prev = container; heap->blocks.nonempty[ sclass ] = container; break; case TYPE_FULL : container->next = heap->blocks.full; if ( heap->blocks.full != NULL ) heap->blocks.full->prev = container; else heap->blocks.last_full = container; heap->blocks.full = container; heap->blocks.no_of_full++; heap->stat.free[ container->sclass ] -= container->max; break; default: break; } /* * check if there are too many full containers in heap * and release last container to middle-malloc */ if (( cto == TYPE_FULL ) && ( heap->blocks.no_of_full > MAX_FULL_CONTAINERS )) { container_t * last = heap->blocks.last_full; heap->blocks.last_full = last->prev; last->prev->next = NULL; heap->blocks.no_of_full--; middle_free( heap, PTR_TO_BLOCK( last ) ); } } /* * insert (prepend) given container into class "cto" */ static void insert_container ( container_t * container, unsigned cto ) { heap_t * heap; unsigned sclass; assert( container != NULL ); heap = container->heap; sclass = container->sclass; /* * prepend container to new class */ container->prev = NULL; switch ( cto ) { case TYPE_EMPTY : container->next = heap->blocks.empty[ sclass ]; if ( heap->blocks.empty[ sclass ] != NULL ) heap->blocks.empty[ sclass ]->prev = container; heap->blocks.empty[ sclass ] = container; break; case TYPE_NONEMPTY : container->next = heap->blocks.nonempty[ sclass ]; if ( heap->blocks.nonempty[ sclass ] != NULL ) heap->blocks.nonempty[ sclass ]->prev = container; heap->blocks.nonempty[ sclass ] = container; break; case TYPE_FULL : container->next = heap->blocks.full; if ( heap->blocks.full != NULL ) heap->blocks.full->prev = container; else heap->blocks.last_full = container; heap->blocks.full = container; heap->blocks.no_of_full++; break; default: break; } } /* * return container */ static container_t * alloc_container ( heap_t * heap, unsigned sclass ) { container_t * container; assert( heap != NULL ); if ( heap->blocks.full != NULL ) { /* * use old unused containers */ container = heap->blocks.full; if ( container->next != NULL ) container->next->prev = NULL; else heap->blocks.last_full = NULL; heap->blocks.full = container->next; heap->blocks.no_of_full--; container->next = NULL; container->prev = NULL; if ( container->sclass != sclass ) { container->sclass = sclass; init_container_blocks( container ); } } else { /* * allocate container from middle-malloc */ const internal_size_t csize = EXTRA_DATA_SIZE + sizeof(container_t) + CONTAINER_SIZE; container = (container_t *) BLOCK_TO_PTR( middle_alloc( heap, csize, size_to_class( csize ) ) ); assert( container != NULL ); container->heap = heap; container->next = NULL; container->prev = NULL; container->sclass = sclass; SET_CONT_MAGIC( container ); /* build blocks in container */ init_container_blocks( container ); } return container; } static void init_container_blocks ( container_t * container ) { internal_size_t block_size; unsigned count, i; char * addr; block_t * old_block = NULL; assert( container != NULL ); block_size = size_classes[ container->sclass ]; count = CONTAINER_SIZE / block_size; addr = ((char*) container) + sizeof(container_t); container->max = count; container->free = count; container->blocks = (block_t*) addr; for ( i = 0; i < count; i++ ) { block_t * block = (block_t*) addr; SET_SIZE( block, block_size ); block->hc_addr = container; if ( old_block != NULL ) old_block->next = block; old_block = block; SET_BLOCK_MAGIC( block ); addr += block_size; } old_block->next = NULL; container->heap->stat.free[ container->sclass ] += container->max; } /******************************************************************** ******************************************************************** ** ** allocation and deallocation of middle-sized blocks ** ******************************************************************** ********************************************************************/ static block_t * middle_alloc ( heap_t * heap, internal_size_t size, unsigned sclass ) { block_t * block = NULL; unsigned i; assert( heap != NULL ); /* * look for an old block, and begin search with * given size-class */ for ( i = sclass; i < NO_OF_CLASSES; i++ ) { block = heap->blocks.blocks[ i ]; while ( block != NULL ) { if ( BLOCK_SIZE( block ) >= size ) { /* remove block from list */ if ( block->next != NULL ) block->next->prev = block->prev; if ( block->prev != NULL ) block->prev->next = block->next; else heap->blocks.blocks[ i ] = block->next; block->next = NULL; block->prev = NULL; /* update statistics */ heap->stat.free[ i ]--; /* * check if block is from a different size-class * and if the remainder of the block can be used * by another sclass */ if (( i != sclass ) && ( BLOCK_SIZE( block ) - size > MIN_BLOCK_SIZE)) { /* * split block and put remainder into another size-class */ block_t * new_block = (block_t*) (((char*) block) + size); unsigned s; SET_SIZE( new_block, BLOCK_SIZE( block ) - size ); SET_FREE( new_block ); SET_TAME( new_block ); SET_BLOCK_MAGIC( new_block ); new_block->hc_addr = heap; /* copy size-information to end of new block */ SET_EOB_SIZE( new_block ); /* put block into proper size-class */ s = size_to_class( BLOCK_SIZE( new_block ) ); new_block->prev = NULL; new_block->next = heap->blocks.blocks[ s ]; if ( new_block->next != NULL ) new_block->next->prev = new_block; heap->blocks.blocks[ s ] = new_block; heap->stat.free[ s ]++; /* adjust blocksize of old block */ SET_SIZE( block, size ); } else sclass = i; /* finish search */ i = NO_OF_CLASSES; break; } else block = block->next; } } /* * allocate block from system */ if ( block == NULL ) { block = (block_t*) system_alloc( heap, size ); assert( block != NULL ); SET_SIZE( block, size ); block->hc_addr = heap; } /* update statistics */ heap->stat.used[ sclass ]++; SET_USED( block ); /* set boundary tag in successor */ if ( ! IS_WILD( block ) ) { block_t * succ = (block_t*) (((char*) block) + BLOCK_SIZE( block )); SET_PRED_USED( succ ); } SET_BLOCK_MAGIC( block ); return block; } static void middle_free ( heap_t * heap, block_t * block ) { unsigned sclass; assert((heap != NULL) && ( block != NULL )); sclass = size_to_class( BLOCK_SIZE( block ) ); heap->stat.used[ sclass ]--; /* * try to coalesce block with succ */ if ( ! IS_WILD( block ) ) { block_t * succ = (block_t*) (((char*) block) + BLOCK_SIZE( block )); if ( ! IS_USED( succ ) ) { if ( ! IS_WILD( succ ) ) { /* remove successor from his list */ sclass = size_to_class( BLOCK_SIZE( succ ) ); if ( succ->next != NULL ) succ->next->prev = succ->prev; if ( succ->prev != NULL ) succ->prev->next = succ->next; if ( heap->blocks.blocks[ sclass ] == succ ) heap->blocks.blocks[ sclass ] = succ->next; heap->stat.free[ sclass ]--; } else { /* coalesced block is now in wilderness: * set status and copy links */ SET_WILD( block ); block->next = succ->next; block->prev = succ->prev; if ( heap->blocks.wilderness == succ ) heap->blocks.wilderness = block; } /* coalesce */ SET_SIZE( block, BLOCK_SIZE( block ) + BLOCK_SIZE( succ ) ); } } else { /* * reinsert block into wilderness */ block->next = heap->blocks.wilderness; block->prev = NULL; if ( block->next != NULL ) block->next->prev = block; heap->blocks.wilderness = block; } /* * try to coalesce with predecessor */ if ( ! IS_PRED_USED( block ) ) { internal_size_t psize = PRED_SIZE( block ); block_t * pred = (block_t*) (((char*) block) - psize); /* remove predecessor from his list */ sclass = size_to_class( psize ); if ( pred->next != NULL ) pred->next->prev = pred->prev; if ( pred->prev != NULL ) pred->prev->next = pred->next; if ( heap->blocks.blocks[ sclass ] == pred ) heap->blocks.blocks[ sclass ] = pred->next; heap->stat.free[ sclass ]--; /* coalesce */ SET_SIZE( pred, BLOCK_SIZE( block ) + psize ); /* adjust status and links if block is inside wilderness */ if ( IS_WILD( block ) ) { SET_WILD( pred ); pred->next = block->next; pred->prev = block->prev; if ( heap->blocks.wilderness == block ) heap->blocks.wilderness = pred; } /* replace block */ block = pred; } /* * put block back into wilderness or size-classes */ if ( IS_WILD( block ) ) { /* adjust links in wilderness-list */ if ( block->prev != NULL ) block->prev->next = block; if ( block->next != NULL ) block->next->prev = block; } else { block_t * succ; sclass = size_to_class( BLOCK_SIZE( block ) ); block->next = heap->blocks.blocks[ sclass ]; if ( block->next != NULL ) block->next->prev = block; block->prev = NULL; heap->blocks.blocks[ sclass ] = block; heap->stat.free[ sclass ]++; /* copy size-information to end of block */ SET_EOB_SIZE( block ); /* adjust boundary tag */ succ = (block_t*) (((char*) block) + BLOCK_SIZE( block )); SET_PRED_FREE( succ ); } SET_FREE( block ); } /******************************************************************** ******************************************************************** ** ** handling of unmanaged chunks ** ******************************************************************** ********************************************************************/ /* * allocate block directly */ static block_t * vm_alloc ( internal_size_t size ) { #if USE_MALLOC == 1 return (block_t*) malloc( size ); #else #if USE_MMAP == 1 char * p = (char*) MMAP( 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE ); if ( p == (char*) MAP_FAILED ) { fprintf( stderr, "(rmalloc) vm_alloc : mmap failed (%s)\n", strerror( errno ) ); ABORT; } return (block_t*) p; #else fprintf( stderr, "(rmalloc) vm_alloc : no supported allocation method specified\n" ); return NULL; #endif #endif } static void vm_dealloc ( block_t * block ) { #if USE_MALLOC == 1 return free( block ); #else #if USE_MMAP == 1 if ( munmap( (char*) block, BLOCK_SIZE( block ) ) != 0 ) { fprintf( stderr, "(rmalloc) vm_dealloc : munmap failed (%s)\n", strerror( errno ) ); ABORT; } #endif #endif } /******************************************************************** ******************************************************************** ** ** get info and set options from/for malloc ** ******************************************************************** ********************************************************************/ /* * set malloc options */ int r_mallopt ( int param, int val ) { if ( param == M_TOP_PAD ) { if ( val < 0 ) val = 0; sys_conf.top_pad = val; return 0; } return 0; } /* * report memory usage */ struct ul_mallinfo rmallinfo ( void ) { struct ul_mallinfo mi; internal_size_t total_size; internal_size_t used_size; internal_size_t free_size; heap_t * heap; LOCK( global_heap.mutex ); total_size = global_heap.stat.used_mem; used_size = global_heap.stat.mem_in_use; free_size = global_heap.stat.used_mem - global_heap.stat.mem_in_use; UNLOCK( global_heap.mutex ); LOCK( heap_list.mutex ); heap = heap_list.heaps; while ( heap ) { LOCK( heap->mutex ); total_size += heap->stat.used_mem; used_size += heap->stat.mem_in_use; free_size += heap->stat.used_mem - heap->stat.mem_in_use; UNLOCK( heap->mutex ); heap = heap->next; } UNLOCK( heap_list.mutex ); mi.arena = total_size; /* total space allocated from system */ mi.ordblks = 0; /* number of non-inuse chunks */ mi.smblks = 0; /* unused -- always zero */ mi.hblks = 0; /* number of mmapped regions */ mi.hblkhd = 0; /* total space in mmapped regions */ mi.usmblks = 0; /* unused -- always zero */ mi.fsmblks = 0; /* unused -- always zero */ mi.uordblks = used_size; /* total allocated space */ mi.fordblks = free_size; /* total non-inuse space */ mi.keepcost = 0; /* top-most, releasable (via malloc_trim) space */ return mi; } /* * wrapper for mallinfo */ #if OVERLOAD_MALLOC == 1 struct mallinfo mallinfo ( void ) { struct mallinfo mi; struct ul_mallinfo ul_mi = rmallinfo(); mi.arena = ul_mi.arena; mi.ordblks = ul_mi.ordblks; mi.smblks = ul_mi.smblks; mi.hblks = ul_mi.hblks; mi.hblkhd = ul_mi.hblkhd; mi.usmblks = ul_mi.usmblks; mi.fsmblks = ul_mi.fsmblks; mi.uordblks = ul_mi.uordblks; mi.fordblks = ul_mi.fordblks; mi.keepcost = ul_mi.keepcost; return mi; } #endif /******************************************************************** ******************************************************************** ** ** tracing ** ******************************************************************** ********************************************************************/ /* * allocation trace */ static void rmalloc_init_trace ( const char * name ) { assert( name != NULL ); if ((trace.fd = open( name, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH )) == -1) { fprintf( stderr, "(rmalloc) init_trace : could not open \"%s\" (%s)\n", name, strerror( errno ) ); return; } trace.old_used = 0; trace.old_free = 0; trace.old_alloc = 0; trace.step = 0; trace_mem = true; } /* * get status and write trace-entry */ static void rmalloc_trace () { internal_size_t used, free, alloc; if ( ! trace_mem ) return; if ( trace.size == 0 ) { heap_t * heap = heap_list.heaps; used = global_heap.stat.mem_in_use; free = global_heap.stat.used_mem - global_heap.stat.mem_in_use; alloc = global_heap.stat.allocated; while ( heap != NULL ) { used += heap->stat.mem_in_use; free += (heap->stat.used_mem - heap->stat.mem_in_use); alloc += heap->stat.allocated; heap = heap->next; } } else { unsigned sclass = size_to_class( trace.size ); heap_t * heap = heap_list.heaps; used = global_heap.stat.used[ sclass ] * size_classes[ sclass ]; free = global_heap.stat.free[ sclass ] * size_classes[ sclass ]; alloc = global_heap.stat.allocated; while ( heap != NULL ) { used += heap->stat.used[ sclass ] * size_classes[ sclass ]; free += heap->stat.free[ sclass ] * size_classes[ sclass ]; alloc += heap->stat.allocated; heap = heap->next; } } used /= 1024; free /= 1024; if ( trace.type == TRACE_STEPS ) { if (((used > 0) || (free > 0)) && ((used != trace.old_used) || (free != trace.old_free))) { char buffer[256]; sprintf( buffer, "%ld %ld %ld\n", (long) trace.step, (long) used, (long) free ); write( trace.fd, buffer, strlen(buffer) ); trace.old_used = used; trace.old_free = free; } trace.step++; } else if ( trace.type == TRACE_ALLOCATION ) { alloc /= 1024; if (( alloc != trace.old_alloc ) && ( ((used > 0) || (free > 0)) && ((used != trace.old_used) || (free != trace.old_free)))) { char buffer[256]; sprintf( buffer, "%.3f %ld %ld\n", ((float)alloc)/1024.0, used, free ); write( trace.fd, buffer, strlen(buffer) ); trace.old_used = used; trace.old_free = free; trace.old_alloc = alloc; } } } /* * finish tracing */ static void rmalloc_finish_trace () { if ( trace_mem ) { close( trace.fd ); trace_mem = false; } } /******************************************************************** ******************************************************************** ** ** statistical methods ** ******************************************************************** ********************************************************************/ #define BYTE_TO_MB( n ) (((double)(n)) / (1000.0 * 1000.0)) /* * print statistics */ static void print_heap_stat ( heap_t * heap ) { unsigned i; unsigned nwild; internal_size_t swild; block_t * block; if ( heap_stat_lvl <= 1 ) return; /* * statistic for container usage */ if ( heap_stat_lvl >= 3 ) { container_t * cont; fprintf( stderr, " sizeclass | containers |\n" ); fprintf( stderr, " | # empty | # nonempty |\n" ); fprintf( stderr, "-----------+------------+------------+\n" ); for ( i = 0; i <= NO_OF_SMALL; i++ ) { unsigned empty = 0; unsigned nonempty = 0; cont = heap->blocks.empty[i]; while ( cont != NULL ) { empty++; cont = cont->next; } cont = heap->blocks.nonempty[i]; while ( cont != NULL ) { nonempty++; cont = cont->next; } if (( empty != 0 ) || (nonempty != 0)) fprintf( stderr, " %9ld | %10d | %10d |\n", size_classes[i], empty, nonempty ); } fprintf( stderr, "-----------+------------+------------+\n" ); fprintf( stderr, " full blocks : %d\n", heap->blocks.no_of_full ); } /* * statistic for small-usage */ if ( heap_stat_lvl >= 3 ) { fprintf( stderr, " sizeclass | # used | # free | kB | comment \n" ); fprintf( stderr, "-----------+------------+------------+------------+---------------\n" ); for ( i = NO_OF_SMALL; i < NO_OF_CLASSES; i++ ) { block = heap->blocks.blocks[i]; if ( block != NULL ) { internal_size_t mem = 0; while ( block != NULL ) { mem += BLOCK_SIZE( block ); block = block->next; } fprintf( stderr, "%10ld | %10ld | %10ld | %10ld |", (long) size_classes[ i ], (long) heap->stat.used[i], (long) heap->stat.free[i], (long) mem / 1024 ); if ( heap->stat.used[i] != 0 ) fprintf( stderr, " %ld chunk(s) alive", heap->stat.used[i] ); fprintf( stderr, "\n" ); } } } /* * sum up wilderness */ nwild = 0; swild = 0; block = heap->blocks.wilderness; while ( block != NULL ) { nwild++; swild += BLOCK_SIZE( block ); block = block->next; } /* * output */ if ((heap->stat.mem_in_use != 0) || (heap->stat.used_mem != 0)) fprintf( stderr, " mem in use / allocated = %.2f MB / %.2f MB\n", BYTE_TO_MB(heap->stat.mem_in_use), BYTE_TO_MB(heap->stat.used_mem) ); if ( nwild != 0 ) fprintf( stderr, " wilderness blocks/size = %d / %.2f MB\n", nwild, BYTE_TO_MB(swild) ); fprintf( stderr, " top-pad = %.2f MB\n", BYTE_TO_MB( heap->data_seg.top_pad ) ); } static void rmalloc_stat () { heap_t * heap; if ( heap_stat_lvl == 0 ) return; if ( ! is_initialised ) { fprintf( stderr, "(rmalloc) stat : heap not initialised\n" ); return; } fprintf( stderr, "(rmalloc) rmalloc version %s\n", RMALLOC_VERSION ); /* * print statstics about each heap */ if ( heap_stat_lvl >= 2 ) { print_heap_stat( & global_heap ); heap = heap_list.heaps; while ( heap != NULL ) { fprintf( stderr, "(rmalloc) heap %ld :\n", (long) heap->id ); print_heap_stat( heap ); heap = heap->next; } } if ( heap_stat_lvl >= 1 ) { internal_size_t total_size = global_heap.stat.used_mem; internal_size_t max_use = global_heap.stat.max_in_use; heap = heap_list.heaps; while ( heap != NULL ) { total_size += heap->stat.used_mem; max_use += heap->stat.max_in_use; heap = heap->next; } fprintf( stderr, "(rmalloc) global stat:\n" ); if ( total_size > 0 ) fprintf( stderr, " mem used by rmalloc = %.2f MB\n", BYTE_TO_MB(total_size) ); if ( max_use > 0 ) fprintf( stderr, " mem used by app (frag) = %.2f MB (%.2f)\n", BYTE_TO_MB(max_use), ((double) total_size) / ((double) max_use) ); } } #if DEBUG_RMALLOC == 1 static void check_rmalloc () { /* * check all heaps, all containers and all blocks */ heap_t * heap = heap_list.heaps; while ( heap != NULL ) { container_t * container; block_t * block; unsigned i; for ( i = 0; i < NO_OF_SMALL; i++ ) { container = heap->blocks.nonempty[i]; while ( container != NULL ) { #if DEBUG_CONT == 1 if ( container->magic != CONT_MAGIC ) { fprintf( stderr, "(check_rmalloc) nonempty container is corrupted\n" ); continue; } #endif block = container->blocks; while ( block != NULL ) { #if DEBUG_BLOCK == 1 if ( block->magic != BLOCK_MAGIC ) { fprintf( stderr, "(check_rmalloc) small nonempty block is corrupted\n" ); continue; } #endif block = block->next; } container = container->next; } } container = heap->blocks.full; while ( container != NULL ) { #if DEBUG_CONT == 1 if ( container->magic != CONT_MAGIC ) { fprintf( stderr, "(check_rmalloc) full container is corrupted\n" ); continue; } #endif block = container->blocks; while ( block != NULL ) { #if DEBUG_BLOCK == 1 if ( block->magic != BLOCK_MAGIC ) { fprintf( stderr, "(check_rmalloc) small full block is corrupted\n" ); continue; } #endif block = block->next; } container = container->next; } for ( i = NO_OF_SMALL; i < NO_OF_CLASSES; i++ ) { block = heap->blocks.blocks[i]; while ( block != NULL ) { #if DEBUG_BLOCK == 1 if ( block->magic != BLOCK_MAGIC ) { fprintf( stderr, "(check_rmalloc) middle block is corrupted\n" ); continue; } if ( block->__tmp__ != 0 ) { fprintf( stderr, "(check_rmalloc) middle block is corrupted\n" ); continue; } #endif block = block->next; } } heap = heap->next; } } #endif #ifdef __cplusplus } #endif /******************************************************************** ******************************************************************** ** ** C++ memory management ** ******************************************************************** ********************************************************************/ /* * overload global new/delete */ #if defined(__cplusplus) && OVERLOAD_NEW == 1 #include void * operator new ( size_t n ) throw (std::bad_alloc) { return r_malloc( n ); } void * operator new[] ( size_t n ) throw (std::bad_alloc) { return r_malloc( n ); } void operator delete ( void * p ) throw () { r_free( p ); } void operator delete[] ( void * p ) throw () { r_free( p ); } #endif /* defined(__cplusplus) && OVERLOAD_NEW == 1 */