/* * RMALLOC by Ronald Kriemann * -------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation (version 2 of the License). * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * Documentation * ------------- * * One reason for this version of malloc was a problem I encountered * with one of my other programs when allocating several gigabytes. * Because of the used algorithms I deallocated and allocated most * of this memory (which was used by relativly small chunks). This * behaviour confused all other malloc implementations I tested and * led to unacceptable program-performance. So I decided to do it * by myself and wrote this lib which performed MUCH better, * allthough I have to admit, that one can easily construct programs * where the used strategy leads to a totaly disastrous memory- * consumption. Anyway here are the details: * * This malloc implementation works by "recycling" freed blocks * (this is also responsible for the "r" in rmalloc and not the * R in Ronald ;). We NEVER give a block back to the system, * so be sure that your application uses similar blocksizes during * program- execution. Also, this malloc assumes the overall memory- * consumption of the program to be rather high (several tens or * hundreds of megabytes [or better gigabytes ;]). * * We allocate chunks in heaps, and a heap is built, if a * thread wants to allocate data, but no heap is free (same * as in ptmalloc). After using the heap, we save this heap * as thread-specific-data and try to use it next time. A heap * consists mainly of a data-segment which is allocated from * the system via sbrk or mmap. To avoid too many calls to these * functions we allocate system-memory with an additional size * (top_pad) which is by default 5 MB, but can be changed via * mallopt. Be aware that a too small top_pad can increase * fragmentation and degrade performance. On the other hand, if * you have 8 concurrent threads, you will have at least 45 MB * memory consumption with the default top_pad (8*5 for thread-heaps * plus global-heap). * * Beside the user-data requested, we store additional data-fields * namely the size of the chunk, the heap it was allocated in and some * status-flags. Because we use only multiples of 8 as chunk-sizes, * we have three bits for free in each size-field which we use as follows: * * BIT 0 : 0 - chunk free * 1 - chunk in use * BIT 1 : 0 - chunk MUST not be deallocated (only in small blocks) * 1 - chunk can be deallocated * BIT 2 : not used * * We also use additional fields for management: next-/prev- pointers * for lists. These fields are only used, when the block is not in use, * so we can use the area of the chunk. * * Because of these data-field we have a minimal chunk-size of 16 bytes * in 32 bit systems and 24 bytes in 64 bit systems. * * We have size-classes for storing freed chunks (thanks to libhoard for the idea). * By this we can handle allocation and deallocation in O(1) and avoid fragmentation * by providing appropriate classes. One can also choose between Best Fit and * First Fit when allocating in these size-classes (with Best-Fit allthough, O(1) * complexity cannot be assured). * * We use a special allocation strategy for small blocks (<= SMALL_SIZE) * to avoid too small block-requests from the system: if we need new chunks, * we allocate more than one a time. How many is determined by processing * smallest common multiply of chunk- and pagesize. This not only * helps keeping sbrk-requests low, but also speed things up. * (This procedure must be activated; see HANDLE_SMALL below) * * Some environment variables are supported for doing a memory-trace or * printing statistics: * * RMALLOC_TRACE : not defined or * : 0 - no memory traceing * : 1 - do a memory-trace * RMALLOC_TRACE_FILE : filename of the tracefile (default = trace) * RMALLOC_TRACE_SIZE : blocksize we should trace (default = 0 - means whole memory) * RMALLOC_STAT : not defined or * : 0 - no statistics will be printed * : 1 - print info about allocated space * : 2 - print info about number of chunks and allocated memory * : 3 - print info about each allocated chunksize * RMALLOC_BUILD_SIZE : 1 - print size-classes-source usable for rmalloc * * * Changelog * --------- * * v0.95 - round sizes to next size-class to guarantee, that there * is a free block in the list (avoid searching) * - increased number of size-classes; changed size_to_class to * use a binary search instead of linear * - ok. maybe 10 MB for the DEFAULT_TOP_PAD is better ? * - implemented trace-functionality for thread-heaps * v0.94 - rearranged code for system-allocation (mmap and malloc had * too much in common to be separated) * - removed most messages when mutices were locked * - set MMAP to be default, even when malloc is not overwritten * - changed "r_mallinfo" to "rmallinfo" and added declaration * in rmalloc.h * v0.93 - changed heap handling, now each thread has a private * heap; if the thread terminates, this heap is then marked * as free and can be used by a new thread * - removed creation of first heap (besides global-heap), this * will now be done on demand (in r_malloc) * - DEFAULT_TOP_PAD reduced to 5 MB (should also be enough) * - heap-list is no longer a circular list (just a linear one) * - heaps can now be aligned to the cache-line-size (ALIGN_TO_CACHE_SIZE) * - added wrapper for mallinfo which calls internal r_mallinfo * (r_mallinfo uses mallinfo-struct with ulong fields) * v0.92 - replaced simulated thread-specific-data by real one * v0.91 - fixed bug/feature when using malloc as system-alloc: only * requested chunks of needed size were allocated, not plus * DEFAULT_TOP_PAD * v0.90 - initial release * * ToDo * ---- * * - implement request of chunks in the order: * local heap -> global heap -> system * global heap is not used now, might reduce fragmentation * - when overloading malloc: pthread_key_create calls malloc * -> recursion in initialisation * - handle size-requests which are larger than maximal size-class */ /* * the version info */ #define RMALLOC_VERSION "0.95" /***************************************************************** ***************************************************************** ** ** some options ** ***************************************************************** *****************************************************************/ /* * use rmalloc as a malloc-replacment */ #if defined(__cplusplus) && !defined(OVERLOAD_MALLOC) # define OVERLOAD_MALLOC 0 #else # ifndef OVERLOAD_MALLOC # define OVERLOAD_MALLOC 1 # endif #endif /* * use rmalloc as a new/delete replacment */ #if defined(__cplusplus) && !defined(OVERLOAD_NEW) #define OVERLOAD_NEW 1 #endif /* * use pthreads */ #ifndef USE_PTHREAD #define USE_PTHREAD 1 #endif /* * define system-allocation method: MALLOC, MMAP, SBRK */ #if OVERLOAD_MALLOC == 1 # ifndef USE_MMAP # define USE_MMAP 1 # endif # ifndef USE_SBRK # define USE_SBRK 0 # endif #else # ifndef USE_MALLOC # define USE_MALLOC 0 # endif # ifndef USE_MMAP # define USE_MMAP 1 # endif # ifndef USE_SBRK # define USE_SBRK 0 # endif #endif /* * check heap extracted from memory-chunk */ #ifndef CHECK_HEAP #define CHECK_HEAP 1 #endif #include #include #include #include #include #include #include #include #include #include #include #if USE_PTHREAD == 1 #include #endif #include "rmalloc.h" #ifdef __cplusplus extern "C" { #endif /***************************************************************** ***************************************************************** ** ** defines and option setting ** ***************************************************************** *****************************************************************/ /* overload malloc functions */ #if OVERLOAD_MALLOC == 1 # define r_malloc malloc # define r_calloc calloc # define r_free free # define r_realloc realloc # define r_mallopt mallopt #endif /* number of size classes */ #define NO_OF_CLASSES 223 /* upper limit for small chunks (must be multiple of 8) */ #define SMALL_SIZE 256 /* define to handle small chunks differently */ #define HANDLE_SMALL 1 /* define size of cache-line in bytes */ #define CACHE_LINE_SIZE 64 /* align heaps to cache-line size */ #define ALIGN_TO_CACHE_SIZE 0 /* default trace file */ #define DEFAULT_TRACE_FILE "trace" /* default value for top-pad */ #define DEFAULT_TOP_PAD 10*1024*1024 /* error value in sbrk */ #define SBRK_FAIL -1 /* definitions for mmap-usage */ #if USE_MMAP == 1 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) # define MAP_ANONYMOUS MAP_ANON #endif #if !defined(MAP_FAILED) # define MAP_FAILED ((char*)-1) #endif #if !defined(MAP_ANONYMOUS) static int dev_zero_fd = -1; #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \ (dev_zero_fd = open("/dev/zero", O_RDWR), \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \ mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) #else #define MMAP(addr, size, prot, flags) \ (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0)) #endif /* !defined(MAP_ANONYMOUS) */ #endif /* USE_MMAP == 1 */ /***************************************************************** ***************************************************************** ** ** data-types for rmalloc ** ***************************************************************** *****************************************************************/ /* internal size and pointer types */ typedef unsigned long internal_size_t; /* bool looks better than int */ #ifndef __cplusplus typedef enum { false, true } bool; #endif /* * mutex and thread-specific-data type */ #if USE_PTHREAD == 1 typedef pthread_mutex_t mutex_t; typedef pthread_key_t tsd_key_t; #else typedef int mutex_t; typedef int tsd_key_t; #endif /* * for holding info about system */ typedef struct { internal_size_t pagesize; /* number of bytes per page */ unsigned int nprocs; /* number of processors */ internal_size_t top_pad; /* number of extra bytes to allocated in each sbrk */ } sysconf_t; /* * holds size information for a data segment */ typedef struct { internal_size_t alloc; /* number of bytes allocated */ internal_size_t size; /* total number of bytes */ } size_info_t; /* * data-structure for a system-heap * (represents data-segment) */ typedef struct { char * begin; /* pointer to the first byte in the data-segment */ char * pos; /* pointer to the first free byte in the data-segment */ char * end; /* pointer to the first byte out of the data-segment */ size_info_t local; /* info about local data-segment */ size_info_t global; /* info about global data-segment */ internal_size_t unused; /* number of bytes unusable in data-segment (fragmented) */ } data_seg_t; /* * node for memory chunks */ typedef struct internal_node_struct { /* size of chunk */ internal_size_t size; /* heap this chunk belongs to */ void * heap; /* next/prev pointer for list */ struct internal_node_struct * prev; struct internal_node_struct * next; } node_t; /* size of extra-data to be stored in each chunk */ #define EXTRA_DATA_SIZE (sizeof(internal_size_t) + sizeof(void*)) /* * holding statistical information */ typedef struct { internal_size_t alloc[ NO_OF_CLASSES+1 ]; /* number of allocated small chunks */ internal_size_t used[ NO_OF_CLASSES+1 ]; /* number of used small chunks */ internal_size_t free[ NO_OF_CLASSES+1 ]; /* number of free small chunks */ internal_size_t used_mem; /* total number of bytes used in heap (alloc from system) */ internal_size_t mem_in_use; /* number of bytes in heap used by application */ } stat_t; /* * type for a heap */ typedef struct heap_s { int id; /* unique id of heap */ struct heap_s * next; /* link for the heap-list */ data_seg_t data_seg; /* data segment associated with heap */ node_t * free[ NO_OF_CLASSES+1 ]; /* free chunks */ stat_t stat; /* status information */ bool used; /* if false, heap is not used */ mutex_t mutex; /* mutex for locking heap */ } heap_t; /* * list of heaps for the threads */ typedef struct { heap_t * heaps; /* linked list of heaps */ unsigned int nheaps; /* number of heaps in list */ mutex_t mutex; /* mutex for safety */ } heap_list_t; /* * type for tracing memory-allocation */ typedef struct { int fd; /* file descriptor of trace-file */ internal_size_t old_used; /* old number of used bytes */ internal_size_t old_free; /* old number of free bytes */ unsigned long step; /* number of allocation-steps */ internal_size_t size; /* size of chunk to trace */ } trace_t; /***************************************************************** ***************************************************************** ** ** thread definition ** ***************************************************************** *****************************************************************/ #if USE_PTHREAD == 1 /* maximal number of threads */ #define TSD_MAX 256 /* * handling of mutices */ # define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER # define DEFINE_MUTEX( m ) static mutex_t m = PTHREAD_MUTEX_INITIALIZER # define MUTEX_INIT(mutex,status) if (( status = pthread_mutex_init( & mutex, NULL )) != 0) \ { fprintf( stderr, "(rmalloc) init_heap : error in mutex_init (%s)\n", \ strerror( status ) ); \ exit( 1 ); } # define LOCK( m ) pthread_mutex_lock( & m ) # define UNLOCK( m ) pthread_mutex_unlock( & m ) # define TRYLOCK( m ) pthread_mutex_trylock( & m ) # define MUTEX_BUSY EBUSY # define IS_LOCKED( m ) TRYLOCK( m ) == MUTEX_BUSY /* * handling thread-specific-data */ # define TSD_KEY_INIT( key ) pthread_key_create( key, heap_key_destructor ) # define TSD_GET_DATA( key ) pthread_getspecific( key ) # define TSD_SET_DATA( key, data ) pthread_setspecific( key, (void *) data ) /* * conversion thread <-> heap */ # define GET_OLD_HEAP() ((heap_t*) TSD_GET_DATA( heap_key )) # define SET_OLD_HEAP( heap ) TSD_SET_DATA( heap_key, (void*) heap ) #else /* maximal number of threads */ #define TSD_MAX 256 /* * handling of mutices */ # define MUTEX_INITIALIZER 0 # define DEFINE_MUTEX( m ) # define MUTEX_INIT(mutex,status) # define LOCK( m ) # define UNLOCK( m ) # define TRYLOCK( m ) 1 # define MUTEX_BUSY 0 # define IS_LOCKED( m ) false /* * handling thread-specific-data */ # define TSD_KEY_INIT( key ) { int i; (*(key)) = 0; for ( i = 0; i < TSD_MAX; i++ ) tsd[i] = NULL; } /* * conversion thread <-> heap */ # define THREAD_ID 0 # define GET_OLD_HEAP() & global_heap # define SET_OLD_HEAP( heap ) #endif /* USE_PTHREADS == 1 */ /***************************************************************** ***************************************************************** ** ** heap specific data ** ***************************************************************** *****************************************************************/ /* array holding the different size classes */ static internal_size_t size_classes[ NO_OF_CLASSES ] = { 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960, 1024, 1088, 1152, 1216, 1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, 1856, 1920, 1984, 2048, 2112, 2176, 2240, 2304, 2816, 3328, 3840, 4352, 4864, 5376, 5888, 6400, 6912, 7424, 7936, 8448, 8960, 9472, 9984, 10496, 11008, 11520, 12032, 12544, 13056, 13568, 14080, 14592, 15104, 15616, 16128, 16640, 17152, 17664, 18176, 18688, 22784, 26880, 30976, 35072, 39168, 43264, 47360, 51456, 55552, 59648, 63744, 67840, 71936, 76032, 80128, 84224, 88320, 92416, 96512, 100608, 104704, 108800, 112896, 116992, 121088, 125184, 129280, 133376, 137472, 141568, 145664, 149760, 166144, 182528, 198912, 215296, 231680, 248064, 264448, 280832, 297216, 313600, 329984, 346368, 362752, 379136, 395520, 411904, 428288, 444672, 461056, 477440, 493824, 510208, 526592, 542976, 559360, 575744, 592128, 608512, 624896, 641280, 657664, 674048, 741456, 815600, 897160, 986880, 1085568, 1194120, 1313536, 1444888, 1589376, 1748312, 1923144, 2115456, 2327000, 2559704, 2815672, 3097240, 3406960, 3747656, 4122424, 4534664, 4988128, 5486944, 6035632, 6639200, 7303112, 8033424, 8836768, 9720448, 10692488, 11761736, 12937912, 14231704, 15654872, 17220360, 18942392, 20836632, 22920296, 25212328, 27733560, 30506912, 33557608, 36913368, 40604704, 44665168, 49131688, 54044856, 59449344, 65394280, 71933704, 79127072, 87039784, 95743760, 105318136, 115849944, 127434944, 140178432, 154196280, 169615904, 186577496, 205235248, 225758768, 248334648, 273168112 }; /* holds info about system */ static sysconf_t sys_conf = { 0, 1, DEFAULT_TOP_PAD }; /* global heap */ static heap_t global_heap = { 0, NULL, { NULL, NULL, NULL, { 0, 0 }, { 0, 0 }, 0 }, { 0 }, { { 0 }, { 0 }, { 0 }, 0, 0 }, true, MUTEX_INITIALIZER }; /* counter for the allocated heaps */ static unsigned int heap_id = 0; /* linked list of heaps for threads (global heap is not a part of it) */ static heap_list_t heap_list = { NULL, 0, MUTEX_INITIALIZER }; /* thread specific data */ #if USE_PTHREAD == 0 static heap_t * tsd[ TSD_MAX ]; #endif /* is the heap initialised */ static bool is_initialised = false; /* how much statistics should be printed */ static unsigned int heap_stat_lvl = 0; /* for memory trace */ static bool trace_mem = false; static trace_t trace = { -1, 0, 0, 0, 0 }; /* holds allocations sizes for very small chunks */ static internal_size_t alloc_size[ SMALL_SIZE ]; /* key for accessing thread-specific-data */ static tsd_key_t heap_key; /***************************************************************** ***************************************************************** ** ** defines for easier access ** ***************************************************************** *****************************************************************/ #define NODE_TO_PTR( n ) ((void *) (((char*) n) + EXTRA_DATA_SIZE)) #define PTR_TO_NODE( p ) ((node_t*) (((char*) p) - EXTRA_DATA_SIZE)) #define NODE_SIZE( n ) ((n)->size & ~0x7) #define IS_USED( n ) ((n)->size & 0x1) #define SET_USED( n ) ((n)->size |= 0x1) #define IS_FREE( n ) (! ((n)->size & 0x1)) #define SET_FREE( n ) ((n)->size &= ~0x1) #define SET_DEALLOC( n ) ((n)->size |= 0x2) #define IS_DEALLOC( n ) ((n)->size & 0x2) #ifndef MAX #define MAX(x,y) ((x) > (y) ? (x) : (y)) #endif /***************************************************************** ***************************************************************** ** ** forward declarations and inline functions ** ***************************************************************** *****************************************************************/ /* * initialise/finish malloc */ static void rmalloc_init (); static void rmalloc_finish (); static void init_alloc_sizes (); /* * heap management */ static void init_heap ( heap_t * heap ); static void insert_heap ( heap_t * heap ); static heap_t * get_free_heap (); #if USE_PTHREAD == 1 static void heap_key_destructor ( void * arg ); #endif /* * allocation/deallocation */ void * r_malloc ( size_t size ); void * r_calloc ( size_t nmemb, size_t size); void r_free ( void * ptr ); void * r_realloc ( void * ptr, size_t size ); /* * free all chunks */ static void rmalloc_clean (); /* * alternative handling of small allocation sizes */ #if HANDLE_SMALL == 1 static node_t * allocate_small ( heap_t * heap, internal_size_t size ); #endif /* * handling of chunks */ static node_t * get_free ( heap_t * heap, internal_size_t size ); static void put_free ( heap_t * heap, node_t * node ); static void store_unused ( void * ptr, internal_size_t size, heap_t * heap ); /* * trace memory allocation */ static void rmalloc_init_trace ( const char * name ); static void rmalloc_trace (); static void rmalloc_finish_trace (); /* * print statistics */ static void rmalloc_stat (); /* * build size-classes */ static void build_size_classes (); /* * translate size into size-class */ static unsigned int size_to_class ( internal_size_t size ); /* * round size up to nearest size-class */ inline internal_size_t round_size ( internal_size_t size ) { if ( size <= SMALL_SIZE ) return size; else return size_classes[ size_to_class( size )+1 ]; } /***************************************************************** ***************************************************************** ** ** allocation of memory from system ** ***************************************************************** *****************************************************************/ /* * allocation from system via sbrk / mmap etc. */ static void * system_alloc ( heap_t * heap, internal_size_t n ) { char * tmp; data_seg_t * data_seg; #if USE_MALLOC == 1 || USE_SBRK == 1 DEFINE_MUTEX( mutex ); #endif if ( n == 0 ) return NULL; assert( heap != NULL ); /* get datasegment of given heap */ data_seg = & heap->data_seg; /* adjust n to be multiple of 8 */ if ( n % 8 != 0 ) n += 8 - (n % 8); /* * check if we have enough free space in heap */ if ( data_seg->local.size - data_seg->local.alloc >= n ) { data_seg->local.alloc += n; data_seg->global.alloc += n; } else { #if USE_MMAP == 1 || USE_MALLOC == 1 /* * use system-malloc to request a large memory-chunk */ /* put left space as chunk into free list of heap */ if ( data_seg->local.size - data_seg->local.alloc ) { store_unused( (void*) data_seg->pos, data_seg->local.size - data_seg->local.alloc, heap ); } data_seg->local.size = n + sys_conf.top_pad; data_seg->local.alloc = n; data_seg->global.alloc += n; /* adjust size to be multiple of pagesize */ if ( data_seg->local.size % sys_conf.pagesize != 0 ) data_seg->local.size += sys_conf.pagesize - (data_seg->local.size % sys_conf.pagesize); data_seg->global.size += data_seg->local.size; #if USE_MALLOC == 1 /* call malloc in a thread-safe way */ LOCK( mutex ); data_seg->begin = (char*) malloc( data_seg->local.size ); UNLOCK( mutex ); if ( data_seg->begin == (char*) NULL ) { fprintf( stderr, "(rmalloc) system_alloc : malloc failed (%s)\n", strerror( errno ) ); exit( 1 ); } #else /* USE_MMAP == 1 */ /* map new heap with mmap */ data_seg->begin = (char*) MMAP( 0, data_seg->local.size, PROT_READ | PROT_WRITE, MAP_PRIVATE ); if ( data_seg->begin == (char*) MAP_FAILED ) { fprintf( stderr, "(rmalloc) system_alloc : mmap failed (%s)\n", strerror( errno ) ); exit( 1 ); } #endif #if ALIGN_TO_CACHE_SIZE == 1 /* align heap-begin to size of cache-line */ if ( ((internal_size_t) data_seg->begin) % CACHE_LINE_SIZE != 0 ) { internal_size_t ofs = ((internal_size_t) data_seg->begin) % CACHE_LINE_SIZE; /* adjust beginning of heap */ data_seg->begin += ofs; data_seg->local.alloc += ofs; /* fprintf( stderr, "(rmalloc) system_alloc : heap not aligned (diff = %d)\n", ((internal_size_t) data_seg->begin) % CACHE_LINE_SIZE ); */ } #endif data_seg->pos = data_seg->begin; data_seg->end = data_seg->begin + data_seg->local.size; #elif USE_SBRK == 1 /* * check if first call or someone other has called sbrk */ if ((data_seg->begin == NULL) || (((char*) sbrk(0)) != data_seg->end)) { /* put left space as chunk into free list of heap */ if ( data_seg->local.size - data_seg->local.alloc ) { store_unused( (void*) data_seg->pos, data_seg->local.size - data_seg->local.alloc, heap ); } /* * allocate new heap */ data_seg->local.size = n + sys_conf.top_pad; data_seg->local.alloc = n; data_seg->global.alloc += n; /* adjust size to be multiple of pagesize */ if ( data_seg->local.size % sys_conf.pagesize != 0 ) data_seg->local.size += sys_conf.pagesize - (data_seg->local.size % sys_conf.pagesize); data_seg->global.size += data_seg->local.size; LOCK( mutex ); data_seg->begin = (char*) sbrk( data_seg->local.size ); UNLOCK( mutex ); if ( data_seg->begin == (char*) SBRK_FAIL ) { fprintf( stderr, "(rmalloc) system_alloc : sbrk failed (%s)\n", strerror( errno ) ); exit( 1 ); } /* adjust heap to be aligned by 8 */ if ( ((internal_size_t) data_seg->begin) % 8 != 0 ) { internal_size_t adjust = 8 - (((internal_size_t) data_seg->begin) % 8); data_seg->begin += adjust; LOCK( mutex ); if ( sbrk( adjust ) == (void*) SBRK_FAIL ) { fprintf( stderr, "(rmalloc) system_alloc : sbrk failed (%s)\n", strerror( errno ) ); exit( 1 ); } UNLOCK( mutex ); } LOCK( mutex ); data_seg->pos = data_seg->begin; data_seg->end = (char*) sbrk(0); UNLOCK( mutex ); } else { /* * extend old heap */ internal_size_t add = n + sys_conf.top_pad; data_seg->local.alloc += n; data_seg->global.alloc += n; /* adjust size to be multiple of pagesize */ if ( add % sys_conf.pagesize != 0 ) add += sys_conf.pagesize - (add % sys_conf.pagesize); data_seg->local.size += add; data_seg->global.size += add; LOCK( mutex ); if ( sbrk( add ) == (void*) SBRK_FAIL ) { fprintf( stderr, "(rmalloc) system_alloc : sbrk failed (%s)\n", strerror( errno ) ); exit( 1 ); } data_seg->end = (char*) sbrk(0); UNLOCK( mutex ); } #else fprintf( stderr, "(rmalloc) system_alloc : no heap-allocation method defined (MALLOC,MMAP,SBRK)\n" ); exit( 1 ); #endif } /* * increment heap and return old position */ tmp = data_seg->pos; data_seg->pos += n; return (void *) tmp; } /******************************************************************** ******************************************************************** ** ** malloc/free interface ** ******************************************************************** ********************************************************************/ void * r_malloc ( size_t size ) { node_t * node = NULL; heap_t * heap = NULL; if ( size == 0 ) return NULL; if ( ! is_initialised ) rmalloc_init(); /* adjust size to hold management data */ size = EXTRA_DATA_SIZE + MAX( sizeof(node_t) - EXTRA_DATA_SIZE, size ); /* trim size to be multiple of 8 */ if ( size % 8 != 0 ) size += 8 - (size % 8); /* round size to next size_class */ size = round_size( size ); /* * get heap for this thread */ /* get private heap of thread or set new heap */ heap = GET_OLD_HEAP(); if ( heap == NULL ) { /* look for free heap or create new one */ if ((heap = get_free_heap()) == NULL) { #ifdef DEBUG fprintf( stderr, "(rmalloc) malloc : building new heap\n" ); #endif LOCK( global_heap.mutex ); heap = (heap_t *) system_alloc( & global_heap, sizeof(heap_t) ); UNLOCK( global_heap.mutex ); init_heap( heap ); /* lock heap BEFORE inserting so we can be sure, no other grabs it */ LOCK( heap->mutex ); insert_heap( heap ); } /* set this heap to be the thread-private heap */ SET_OLD_HEAP( heap ); } else LOCK( heap->mutex ); assert( heap != NULL ); /* * get free block if available */ node = get_free( heap, size ); /* if not, allocate new block */ if ( node == NULL ) { unsigned int slot = size_to_class( size ); #if HANDLE_SMALL == 1 if ( size <= SMALL_SIZE ) { node = allocate_small( heap, size ); } else #endif { node = (node_t*) system_alloc( heap, size ); heap->stat.used_mem += size; node->size = size; node->heap = heap; heap->stat.alloc[ slot ]++; } heap->stat.used[ slot ]++; } assert( node != NULL ); node->heap = heap; heap->stat.mem_in_use += NODE_SIZE( node ); if ( trace_mem ) rmalloc_trace(); UNLOCK( heap->mutex ); return NODE_TO_PTR( node ); } void * r_calloc ( size_t nmemb, size_t size) { void * p; if ((nmemb == 0) || (size == 0)) return NULL; p = r_malloc( nmemb * size ); /* memset to zero */ if ( p != NULL ) memset( p, 0, nmemb * size ); return p; } void r_free ( void * ptr ) { node_t * node; internal_size_t size; heap_t * heap = NULL; if ( ptr == NULL ) return; /* should at least have called malloc for this ptr, if not : ERROR */ if ( ! is_initialised ) { fprintf( stderr, "(rmalloc) free : rmalloc not initialised\n" ); return; } node = PTR_TO_NODE( ptr ); size = NODE_SIZE( node ); /* * put chunk back into itīs own heap */ heap = (heap_t*) node->heap; /* * check if heap does exist */ #if CHECK_HEAP == 1 if ( ((unsigned long) heap) < 0xff ) { #ifdef DEBUG fprintf( stderr, "(rmalloc) free : node has unknown heap: node = %p, heap = %p\n", node, heap ); #endif return; } else { bool found_heap = false; if ( heap == & global_heap ) found_heap = true; else { heap_t * tmp = heap_list.heaps; while ( tmp != NULL ) { if ( tmp == heap ) { found_heap = true; break; } tmp = tmp->next; } } if ( ! found_heap ) { #ifdef DEBUG fprintf( stderr, "(rmalloc) free : node hase unknown heap: node = %p, heap = %p\n", node, heap ); #endif return; } } #endif LOCK( heap->mutex ); /* move node into free area */ put_free( heap, node ); SET_FREE(node); heap->stat.mem_in_use -= size; if ( trace_mem ) rmalloc_trace(); UNLOCK( heap->mutex ); } void * r_realloc ( void * ptr, size_t size ) { if ( ptr == NULL ) return r_malloc( size ); else { node_t * node = PTR_TO_NODE( ptr ); void * newptr = NULL; /* if new size < old size do nothing */ if ( NODE_SIZE(node) >= size ) return ptr; /* allocate new chunk, copy and free old */ newptr = r_malloc( size ); memcpy( newptr, ptr, NODE_SIZE(node) ); r_free( ptr ); return newptr; } } /******************************************************************** ******************************************************************** ** ** misc. functions for heap-management ** ******************************************************************** ********************************************************************/ /* * init/finish heap-manager */ static void rmalloc_init () { DEFINE_MUTEX( mutex ); if ( IS_LOCKED( mutex ) ) { fprintf( stderr, "(rmalloc) rmalloc_init : mutex locked\n" ); LOCK( mutex ); } if ( ! is_initialised ) { char * value; sys_conf.pagesize = getpagesize(); /* init tsd-key */ TSD_KEY_INIT( & heap_key ); /* init global heap */ init_heap( & global_heap ); /* set up allocation sizes */ init_alloc_sizes(); /* * shall we build size-classes */ if ((value = getenv( "RMALLOC_BUILD_SIZE" )) != NULL) { if ( strcmp( value, "1" ) == 0 ) build_size_classes(); } /* * setup memory trace */ value = getenv( "RMALLOC_TRACE" ); if (( value != NULL ) && (value[0] == '1')) { /* get size of trace */ if ((value = getenv( "RMALLOC_TRACE_SIZE" )) != NULL ) { if ( strcmp( value, "all" ) == 0 ) trace.size = 0; else { trace.size = atoi( value ); /* truncate to multiple of 8 not bigger than trace_size */ trace.size = (trace.size / 8) * 8; } } /* get name of tracefile and initialise */ if ((value = getenv( "RMALLOC_TRACE_FILE" )) != NULL ) rmalloc_init_trace( value ); else rmalloc_init_trace( DEFAULT_TRACE_FILE ); } /* * register cleanup-functions */ if ( atexit( rmalloc_finish ) != 0 ) fprintf( stderr, "(rmalloc) init : error in atexit (%s)\n", strerror( errno ) ); if ( atexit( rmalloc_finish_trace ) != 0 ) fprintf( stderr, "(rmalloc) init : error in atexit (%s)\n", strerror( errno ) ); if ((value = getenv( "RMALLOC_STAT" )) != NULL ) { heap_stat_lvl = atoi( value ); if ( heap_stat_lvl > 0 ) { if ( atexit( rmalloc_stat ) != 0 ) fprintf( stderr, "(rmalloc) init : error in atexit (%s)\n", strerror( errno ) ); } } is_initialised = true; } UNLOCK( mutex ); } /* * end it all */ static void rmalloc_finish () { rmalloc_clean(); } /* * really free all chunks */ static void rmalloc_clean () { /* * clean will be called at the end and then * the memory will be deallocated by the system * so why bother ??? */ return; } /* * initialise a heap */ static void init_heap ( heap_t * heap ) { #if USE_PTHREAD == 1 int status; #endif unsigned int i; heap->next = NULL; heap->id = heap_id++; heap->data_seg.begin = NULL; heap->data_seg.pos = NULL; heap->data_seg.end = NULL; heap->data_seg.global.size = 0; heap->data_seg.global.alloc = 0; heap->data_seg.local.size = 0; heap->data_seg.local.alloc = 0; heap->data_seg.unused = 0; for ( i = 0; i <= NO_OF_CLASSES; i++ ) { heap->free[i] = NULL; heap->stat.alloc[i] = 0; heap->stat.free[i] = 0; heap->stat.used[i] = 0; } heap->stat.used_mem = 0; heap->stat.mem_in_use = 0; heap->used = true; if ( heap != & global_heap ) { MUTEX_INIT( heap->mutex, status ); } } /* * inserts new heap into heap-list */ static void insert_heap ( heap_t * heap ) { LOCK( heap_list.mutex ); /* prepend into list */ heap->next = heap_list.heaps; heap_list.heaps = heap; heap_list.nheaps++; UNLOCK( heap_list.mutex ); } /* * return unused/free heap from heap-list */ static heap_t * get_free_heap () { heap_t * heap = heap_list.heaps; while ( heap != NULL ) { if ( ! heap->used ) { heap->used = true; return heap; } else heap = heap->next; } /* no free heap found */ return NULL; } /* * called when a thread is finished and releases * local heap into list of unused heaps */ #if USE_PTHREAD == 1 static void heap_key_destructor ( void * arg ) { heap_t * heap = (heap_t *) arg; if ( heap != NULL ) heap->used = false; } #endif /* * translate size into size-class */ static unsigned int size_to_class ( internal_size_t size ) { if ( size <= SMALL_SIZE ) return (size >> 3) - 1; else { #if 1 /* * use binary search for size-class */ unsigned int lb = (SMALL_SIZE >> 3)-1; unsigned int ub = NO_OF_CLASSES-1; unsigned int split; while ( lb != ub ) { split = (ub + lb) / 2; if (( size_classes[ split ] <= size ) && (size < size_classes[ split+1 ])) return split; if ( size_classes[ split ] < size ) lb = split; else ub = split; } fprintf( stderr, "(rmalloc) size_to_class : found size-class %ld\n", size_classes[ lb ] ); return lb; #else /* * use linear search for size-class */ unsigned int sclass = SMALL_SIZE >> 3; while ((sclass < NO_OF_CLASSES) && (size_classes[ sclass ] < size)) sclass++; return sclass-1; #endif } } /* * setup allocation sizes for very small chunks */ static void init_alloc_sizes () { size_t pagesize = getpagesize(); size_t minsize = sizeof(node_t); unsigned int i, j; /* adjust to be multiple of 8 */ if ( (minsize / 8) * 8 < minsize ) minsize = ((minsize / 8) + 1) * 8; j = 0; for ( i = 8; i <= SMALL_SIZE; i += 8 ) { unsigned int size = i; unsigned int count = 0; unsigned int bsize = 4 * pagesize; unsigned int min = size; /* compute multiple of pagesize with smallest split */ while ( bsize <= 8 * pagesize ) { if ( (bsize - ((bsize / size) * size)) < min ) { min = bsize - ((bsize / size) * size); count = bsize / pagesize; if ( min == 0 ) break; } bsize += pagesize; } bsize = count * pagesize; count = bsize / size; alloc_size[ j++ ] = count; } } /* * allocate big chunk of very small blocks */ #if HANDLE_SMALL == 1 static node_t * allocate_small ( heap_t * heap, internal_size_t size ) { unsigned int slot = (size >> 3) - 1; node_t * p; node_t * node; node_t * last = NULL; internal_size_t i; assert( heap != NULL ); /* * allocate more than one node */ if ((p = (node_t*) system_alloc( heap, alloc_size[ slot ] * size )) == NULL ) { fprintf( stderr, "(rmalloc) allocate_very_small : no memory available\n" ); exit( 1 ); } heap->stat.used_mem += size * alloc_size[ slot ]; heap->stat.alloc[ slot ] += alloc_size[ slot ]; heap->stat.free[ slot ] += alloc_size[ slot ]; /* * initialise nodes in memory chunk and * put them into free list */ #define NODE( i ) ((node_t*) (((char*) p) + ((i) * size))) node = p; for ( i = 0; i < alloc_size[ slot ]; i++ ) { /* * set size */ node = NODE( i ); node->size = size; node->heap = heap; /* first block mark as being deallocatable */ if ( i == 0 ) SET_DEALLOC( node ); /* set link in list */ if ( last != NULL ) last->next = node; node->next = NULL; node->prev = last; last = node; } /* * put list into free list of nodes * - method is only called if no free node * available, so we can set directly */ heap->free[ slot ] = p; /* * return node from free-list */ return get_free( heap, size ); } #endif /* * return node of suitable size */ static node_t * get_free ( heap_t * heap, internal_size_t size ) { /* * search list for best-fit */ node_t * node; node_t * fit = NULL; unsigned int slot = size_to_class( size ); assert( heap != NULL ); if ((node = heap->free[ slot ]) == NULL) return NULL; /* * look for chunk which is large enough */ while ( node != NULL ) { if ( NODE_SIZE(node) >= size ) /* "==" - best fit; ">=" - first fit */ { /* * remove node from list */ if ( node->prev != NULL ) node->prev->next = node->next; else heap->free[ slot ] = node->next; if ( node->next != NULL ) node->next->prev = node->prev; heap->stat.free[ slot ]--; heap->stat.used[ slot ]++; return node; } else if ( NODE_SIZE(node) > size ) { if (fit != NULL) { if (NODE_SIZE(node) < NODE_SIZE(fit)) fit = node; } else fit = node; } node = node->next; } if ( fit != NULL ) { if ( fit->prev != NULL ) fit->prev->next = fit->next; else heap->free[ slot ] = fit->next; if ( fit->next != NULL ) fit->next->prev = fit->prev; heap->stat.free[ slot ]--; heap->stat.used[ slot ]++; return fit; } return NULL; } /* * insert big node into internal datastructure */ static void put_free ( heap_t * heap, node_t * node ) { unsigned int slot; node_t * first; assert((heap != NULL) && (node != NULL)); slot = size_to_class( NODE_SIZE(node) ); /* * prepend node */ first = heap->free[ slot ]; if (first == NULL) { heap->free[ slot ] = node; node->next = NULL; } else { heap->free[ slot ] = node; node->next = first; first->prev = node; } node->prev = NULL; heap->stat.free[ slot ]++; heap->stat.used[ slot ]--; } /* * insert unused end of data-segment as free chunk */ static void store_unused ( void * ptr, internal_size_t size, heap_t * heap ) { node_t * node = (node_t*) ptr; /* check if we have enough space for a node plus data */ if ( size > (EXTRA_DATA_SIZE + 8) ) { unsigned int slot; size -= EXTRA_DATA_SIZE; slot = size_to_class( size ); /* set node-size */ node->size = size; node->heap = heap; heap->stat.used_mem += size; heap->stat.alloc[ slot ]++; } } /******************************************************************** ******************************************************************** ** ** get info and set options from/for malloc ** ******************************************************************** ********************************************************************/ /* * set malloc options */ int r_mallopt ( int param, int val ) { if ( param == M_TOP_PAD ) { if ( val < 0 ) val = 0; sys_conf.top_pad = val; return 0; } return 0; } /* * report memory usage */ struct ul_mallinfo rmallinfo ( void ) { struct ul_mallinfo mi; internal_size_t total_size; internal_size_t used_size; internal_size_t free_size; heap_t * heap; LOCK( global_heap.mutex ); total_size = global_heap.data_seg.global.size; used_size = global_heap.stat.mem_in_use; free_size = global_heap.stat.used_mem - global_heap.stat.mem_in_use; UNLOCK( global_heap.mutex ); LOCK( heap_list.mutex ); heap = heap_list.heaps; while ( heap ) { LOCK( heap->mutex ); total_size += heap->data_seg.global.size; used_size += heap->stat.mem_in_use; free_size += heap->stat.used_mem - heap->stat.mem_in_use; UNLOCK( heap->mutex ); heap = heap->next; } UNLOCK( heap_list.mutex ); mi.arena = total_size; /* total space allocated from system */ mi.ordblks = 0; /* number of non-inuse chunks */ mi.smblks = 0; /* unused -- always zero */ mi.hblks = 0; /* number of mmapped regions */ mi.hblkhd = 0; /* total space in mmapped regions */ mi.usmblks = 0; /* unused -- always zero */ mi.fsmblks = 0; /* unused -- always zero */ mi.uordblks = used_size; /* total allocated space */ mi.fordblks = free_size; /* total non-inuse space */ mi.keepcost = 0; /* top-most, releasable (via malloc_trim) space */ return mi; } /* * wrapper for mallinfo */ #if OVERLOAD_MALLOC == 1 struct mallinfo mallinfo ( void ) { struct mallinfo mi; struct ul_mallinfo ul_mi = rmallinfo(); mi.arena = ul_mi.arena; mi.ordblks = ul_mi.ordblks; mi.smblks = ul_mi.smblks; mi.hblks = ul_mi.hblks; mi.hblkhd = ul_mi.hblkhd; mi.usmblks = ul_mi.usmblks; mi.fsmblks = ul_mi.fsmblks; mi.uordblks = ul_mi.uordblks; mi.fordblks = ul_mi.fordblks; mi.keepcost = ul_mi.keepcost; return mi; } #endif /******************************************************************** ******************************************************************** ** ** tracing ** ******************************************************************** ********************************************************************/ /* * allocation trace */ static void rmalloc_init_trace ( const char * name ) { assert( name != NULL ); if ((trace.fd = open( name, O_WRONLY | O_CREAT | O_TRUNC )) == -1) { fprintf( stderr, "(rmalloc) init_trace : could not open \"%s\" (%s)\n", name, strerror( errno ) ); return; } if (fchmod( trace.fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH ) == -1) fprintf( stderr, "(rmalloc) init_trace : could not chmod \"%s\" (%s)\n", name, strerror( errno ) ); trace.step = 0; trace.old_used = 0; trace.old_free = 0; trace_mem = true; } /* * get status and write trace-entry */ static void rmalloc_trace () { internal_size_t used, free; if ( ! trace_mem ) return; if ( trace.size == 0 ) { heap_t * heap = heap_list.heaps; used = global_heap.stat.mem_in_use; free = global_heap.stat.used_mem - global_heap.stat.mem_in_use; while ( heap != NULL ) { used += heap->stat.mem_in_use; free += (heap->stat.used_mem - heap->stat.mem_in_use); heap = heap->next; } } else { unsigned int slot = size_to_class( trace.size ); heap_t * heap = heap_list.heaps; used = global_heap.stat.used[ slot ] * size_classes[ slot ]; free = global_heap.stat.free[ slot ] * size_classes[ slot ]; while ( heap != NULL ) { used += heap->stat.used[ slot ] * size_classes[ slot ]; free += heap->stat.free[ slot ] * size_classes[ slot ]; heap = heap->next; } } used /= 1024; free /= 1024; if (((used > 0) || (free > 0)) && ((used != trace.old_used) || (free != trace.old_free))) { char buffer[256]; sprintf( buffer, "%ld %ld %ld\n", trace.step, used, free ); write( trace.fd, buffer, strlen(buffer) ); trace.old_used = used; trace.old_free = free; } trace.step++; } /* * finish tracing */ static void rmalloc_finish_trace () { if ( trace_mem ) { close( trace.fd ); trace_mem = false; } } /******************************************************************** ******************************************************************** ** ** statistical methods ** ******************************************************************** ********************************************************************/ /* * print statistics */ static void print_heap_stat ( heap_t * heap ) { internal_size_t i; internal_size_t nblocks = 0; internal_size_t sblocks = 0; if ( heap_stat_lvl <= 1 ) return; /* * statistic for small-usage */ if ( heap_stat_lvl >= 3 ) { fprintf( stderr, " sizeclass | # alloc | # free | kB | comment \n" ); fprintf( stderr, "-----------+------------+------------+------------+---------------\n" ); for ( i = 0; i <= NO_OF_CLASSES; i++ ) { if (heap->stat.alloc[i] > 0) { fprintf( stderr, "%10ld | %10ld | %10ld | %10ld |", size_classes[ i ], heap->stat.alloc[i], heap->stat.free[i], (heap->stat.free[i] * size_classes[ i ]) / 1024 ); if ( heap->stat.alloc[i] > heap->stat.free[i] ) fprintf( stderr, " %ld chunk(s) alive", heap->stat.alloc[i] - heap->stat.free[i] ); if ( heap->stat.alloc[i] < heap->stat.free[i] ) fprintf( stderr, " ERROR (too many freed)" ); fprintf( stderr, "\n" ); } } } /* * count memory */ for ( i = 0; i <= NO_OF_CLASSES; i++ ) { node_t * node = heap->free[i]; while ( node != NULL ) { sblocks += NODE_SIZE(node); nblocks++; node = node->next; } } /* * output */ #define BYTE_TO_MB( n ) (((double)(n)) / (1000.0 * 1000.0)) if ((heap->stat.mem_in_use != 0) || (heap->stat.used_mem != 0)) fprintf( stderr, " mem in use / allocated = %.2f MB / %.2f MB\n", BYTE_TO_MB(heap->stat.mem_in_use), BYTE_TO_MB(heap->stat.used_mem) ); if (nblocks != 0) { fprintf( stderr, " no / size of free chunks = %ld / %.2f MB\n", (unsigned long) nblocks, BYTE_TO_MB(sblocks) ); } if ( heap->data_seg.unused != 0 ) fprintf( stderr, " fragmentation size = %.2f MB\n", BYTE_TO_MB(heap->data_seg.unused) ); } static void rmalloc_stat () { heap_t * heap; if ( heap_stat_lvl == 0 ) return; if ( ! is_initialised ) { fprintf( stderr, "(rmalloc) stat : heap not initialised\n" ); return; } /* * print statstics about each heap */ if ( heap_stat_lvl >= 2 ) { fprintf( stderr, "(rmalloc) global heap :\n" ); print_heap_stat( & global_heap ); heap = heap_list.heaps; while ( heap != NULL ) { fprintf( stderr, "(rmalloc) heap %ld :\n", (long) heap->id ); print_heap_stat( heap ); heap = heap->next; } } if ( heap_stat_lvl >= 1 ) { internal_size_t total_size = global_heap.data_seg.global.size; internal_size_t frag_size = global_heap.data_seg.unused; heap = heap_list.heaps; while ( heap != NULL ) { total_size += heap->data_seg.global.size; frag_size += heap->data_seg.unused; heap = heap->next; } fprintf( stderr, "(rmalloc) global stat:\n" ); if ( total_size > 0 ) fprintf( stderr, " allocated from system = %.2f MB\n", BYTE_TO_MB(total_size) ); if ( frag_size > 0 ) fprintf( stderr, " total fragmentation = %.2f MB\n", BYTE_TO_MB(frag_size) ); } } /******************************************************************** ******************************************************************** ** ** build new size-classes for rmalloc ** ******************************************************************** ********************************************************************/ /* * build size-classes */ static void build_size_classes () { unsigned int i, j; unsigned int count[] = { 32, 32, 32, 32, 32 }; internal_size_t sizes[] = { 8, 64, 512, 4096, 16384 }; /* first entry must be 8 */ internal_size_t size = 0; internal_size_t old = 0; double dsize; double basis = 1.1; /* basis for the power */ unsigned int counter = 0; fprintf( stderr, "static internal_size_t size_classes[ NO_OF_CLASSES ] = { \n" ); for ( i = 0; i < sizeof( count ) / sizeof(int); i++ ) { for ( j = 0; j < count[i]; j++ ) { size += sizes[i]; fprintf( stderr, "%ld, ", size ); counter++; } } dsize = size; while ( size <= 256*1024*1024 ) { dsize *= basis; if ( dsize > ((double) (unsigned long) -1 ) ) { fprintf( stderr, "value too high\n" ); break; } size = (unsigned long) dsize; /* round value to the next multiple of 8 */ size += 8 - (size % 8); if ( old != size ) { fprintf( stderr, "%ld", size ); if ( size < 256*1024*1024 ) fprintf( stderr, ", " ); old = size; counter++; } } fprintf( stderr, "\n };\n" ); fprintf( stderr, "#define NO_OF_CLASSES %d\n", counter ); fprintf( stderr, "#define SMALL_SIZE %ld\n", sizes[0]*count[0] ); } #ifdef __cplusplus } #endif /******************************************************************** ******************************************************************** ** ** C++ memory management ** ******************************************************************** ********************************************************************/ /* * overload global new/delete */ #if defined(__cplusplus) && OVERLOAD_NEW == 1 #include void * operator new ( size_t n ) throw (std::bad_alloc) { return r_malloc( n ); } void * operator new[] ( size_t n ) throw (std::bad_alloc) { return r_malloc( n ); } void operator delete ( void * p ) throw () { r_free( p ); } void operator delete[] ( void * p ) throw () { r_free( p ); } #endif /* defined(__cplusplus) && OVERLOAD_NEW == 1 */