This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] Expand INTERNAL_SIZE_T macro.


Hi,

other malloc macro that causes bit of confusion is INTERNAL_SIZE_T.
We define it as size_t, only way this could be usable is user compiling
custom allocator.

I am for dropping these as we then do not have to assume that change.


	* malloc/hooks.c (mem2mem_check, top_check, free_check, realloc_check):
	Change INTERNAL_SIZE_T to size_t.
	* malloc/malloc.c (__malloc_assert, static, mremap_chunk,
	__libc_malloc, __libc_realloc, __libc_calloc, _int_malloc, _int_free,
	_int_realloc, _int_memalign, __malloc_trim, int_mallinfo): Likewise.


diff --git a/malloc/hooks.c b/malloc/hooks.c
index fdacdef..198a6f7 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -153,7 +153,7 @@ internal_function
 mem2chunk_check(void* mem, unsigned char **magic_p)
 {
   mchunkptr p;
-  INTERNAL_SIZE_T sz, c;
+  size_t sz, c;
   unsigned char magic;
 
   if(!aligned_OK(mem)) return NULL;
@@ -209,7 +209,7 @@ top_check(void)
 {
   mchunkptr t = top(&main_arena);
   char* brk, * new_brk;
-  INTERNAL_SIZE_T front_misalign, sbrk_size;
+  size_t front_misalign, sbrk_size;
   unsigned long pagesz = GLRO(dl_pagesize);
 
   if (t == initial_top(&main_arena) ||
@@ -289,7 +289,7 @@ free_check(void* mem, const void *caller)
 static void*
 realloc_check(void* oldmem, size_t bytes, const void *caller)
 {
-  INTERNAL_SIZE_T nb;
+  size_t nb;
   void* newmem = 0;
   unsigned char *magic_p;
 
@@ -309,7 +309,7 @@ realloc_check(void* oldmem, size_t bytes, const void *caller)
     malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
     return malloc_check(bytes, NULL);
   }
-  const INTERNAL_SIZE_T oldsize = chunksize(oldp);
+  const size_t oldsize = chunksize(oldp);
 
   checked_request2size(bytes+1, nb);
   (void)mutex_lock(&main_arena.mutex);
@@ -337,7 +337,7 @@ realloc_check(void* oldmem, size_t bytes, const void *caller)
     }
   } else {
     if (top_check() >= 0) {
-      INTERNAL_SIZE_T nb;
+      size_t nb;
       checked_request2size(bytes + 1, nb);
       newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
     }
diff --git a/malloc/malloc.c b/malloc/malloc.c
index ac8c3f6..622a606 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -93,8 +93,6 @@
 
   Supported pointer representation:       4 or 8 bytes
   Supported size_t  representation:       4 or 8 bytes
-       Note that size_t is allowed to be 4 bytes even if pointers are 8.
-       You can adjust this by defining INTERNAL_SIZE_T
 
   Alignment:                              2 * sizeof(size_t) (default)
        (i.e., 8 byte alignment with 4byte size_t). This suffices for
@@ -170,12 +168,6 @@
 
     HAVE_MREMAP                0
 
-    Changing default word sizes:
-
-    INTERNAL_SIZE_T            size_t
-    MALLOC_ALIGNMENT           MAX (2 * sizeof(INTERNAL_SIZE_T),
-				    __alignof__ (long double))
-
     Configuration and functionality options:
 
     USE_PUBLIC_MALLOC_WRAPPERS NOT defined
@@ -295,43 +287,8 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
 #endif
 
 
-/*
-  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
-  of chunk sizes.
-
-  The default version is the same as size_t.
-
-  While not strictly necessary, it is best to define this as an
-  unsigned type, even if size_t is a signed type. This may avoid some
-  artificial size limitations on some systems.
-
-  On a 64-bit machine, you may be able to reduce malloc overhead by
-  defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
-  expense of not being able to handle more than 2^32 of malloced
-  space. If this limitation is acceptable, you are encouraged to set
-  this unless you are on a platform requiring 16byte alignments. In
-  this case the alignment requirements turn out to negate any
-  potential advantages of decreasing size_t word size.
-
-  Implementors: Beware of the possible combinations of:
-     - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
-       and might be the same width as int or as long
-     - size_t might have different width and signedness as INTERNAL_SIZE_T
-     - int and long might be 32 or 64 bits, and might be the same width
-  To deal with this, most comparisons and difference computations
-  among INTERNAL_SIZE_Ts should cast them to unsigned long, being
-  aware of the fact that casting an unsigned int to a wider long does
-  not sign-extend. (This also makes checking for negative numbers
-  awkward.) Some of these casts result in harmless compiler warnings
-  on some systems.
-*/
-
-#ifndef INTERNAL_SIZE_T
-#define INTERNAL_SIZE_T size_t
-#endif
-
 /* The corresponding word size */
-#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
+#define SIZE_SZ                (sizeof(size_t))
 
 
 /*
@@ -1047,8 +1004,8 @@ typedef struct malloc_chunk* mchunkptr;
 
 static void*  _int_malloc(mstate, size_t);
 static void     _int_free(mstate, mchunkptr, int);
-static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
-			   INTERNAL_SIZE_T);
+static void*  _int_realloc(mstate, mchunkptr, size_t,
+			   size_t);
 static void*  _int_memalign(mstate, size_t, size_t);
 static void*  _mid_memalign(size_t, size_t, void *);
 
@@ -1116,8 +1073,8 @@ static void      free_atfork(void* mem, const void *caller);
 
 struct malloc_chunk {
 
-  INTERNAL_SIZE_T      prev_size;  /* Size of previous chunk (if free).  */
-  INTERNAL_SIZE_T      size;       /* Size in bytes, including overhead. */
+  size_t      prev_size;  /* Size of previous chunk (if free).  */
+  size_t      size;       /* Size in bytes, including overhead. */
 
   struct malloc_chunk* fd;         /* double links -- used only if free. */
   struct malloc_chunk* bk;
@@ -1249,7 +1206,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 
 #define REQUEST_OUT_OF_RANGE(req)                                 \
   ((unsigned long)(req) >=                                        \
-   (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
+   (unsigned long)(size_t)(-2 * MINSIZE))
 
 /* pad request bytes into a usable size -- internal version */
 
@@ -1713,18 +1670,18 @@ struct malloc_state {
 #endif
 
   /* Memory allocated from the system in this arena.  */
-  INTERNAL_SIZE_T system_mem;
-  INTERNAL_SIZE_T max_system_mem;
+  size_t system_mem;
+  size_t max_system_mem;
 };
 
 struct malloc_par {
   /* Tunable parameters */
   unsigned long    trim_threshold;
-  INTERNAL_SIZE_T  top_pad;
-  INTERNAL_SIZE_T  mmap_threshold;
+  size_t  top_pad;
+  size_t  mmap_threshold;
 #ifdef PER_THREAD
-  INTERNAL_SIZE_T  arena_test;
-  INTERNAL_SIZE_T  arena_max;
+  size_t  arena_test;
+  size_t  arena_max;
 #endif
 
   /* Memory map support */
@@ -1737,11 +1694,11 @@ struct malloc_par {
   int              no_dyn_threshold;
 
   /* Statistics */
-  INTERNAL_SIZE_T  mmapped_mem;
-  /*INTERNAL_SIZE_T  sbrked_mem;*/
-  /*INTERNAL_SIZE_T  max_sbrked_mem;*/
-  INTERNAL_SIZE_T  max_mmapped_mem;
-  INTERNAL_SIZE_T  max_total_mem; /* only kept for NO_THREADS */
+  size_t  mmapped_mem;
+  /*size_t  sbrked_mem;*/
+  /*size_t  max_sbrked_mem;*/
+  size_t  max_mmapped_mem;
+  size_t  max_total_mem; /* only kept for NO_THREADS */
 
   /* First address handed out by MORECORE/sbrk.  */
   char*            sbrk_base;
@@ -1782,7 +1739,7 @@ static struct malloc_par mp_ =
 
 
 /* Maximum size of memory handled in fastbins.  */
-static INTERNAL_SIZE_T global_max_fast;
+static size_t global_max_fast;
 
 /*
   Initialize a malloc_state struct.
@@ -1820,7 +1777,7 @@ static void malloc_init_state(mstate av)
    Other internal utilities operating on mstates
 */
 
-static void*  sysmalloc(INTERNAL_SIZE_T, mstate);
+static void*  sysmalloc(size_t, mstate);
 static int      systrim(size_t, mstate);
 static void     malloc_consolidate(mstate);
 
@@ -1965,7 +1922,7 @@ static void do_check_chunk(mstate av, mchunkptr p)
 
 static void do_check_free_chunk(mstate av, mchunkptr p)
 {
-  INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
+  size_t sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
   mchunkptr next = chunk_at_offset(p, sz);
 
   do_check_chunk(av, p);
@@ -2034,9 +1991,9 @@ static void do_check_inuse_chunk(mstate av, mchunkptr p)
   Properties of chunks recycled from fastbins
 */
 
-static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
+static void do_check_remalloced_chunk(mstate av, mchunkptr p, size_t s)
 {
-  INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
+  size_t sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
 
   if (!chunk_is_mmapped(p)) {
     assert(av == arena_for_chunk(p));
@@ -2062,7 +2019,7 @@ static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
   Properties of nonrecycled chunks at the point they are malloced
 */
 
-static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
+static void do_check_malloced_chunk(mstate av, mchunkptr p, size_t s)
 {
   /* same as recycled case ... */
   do_check_remalloced_chunk(av, p, s);
@@ -2099,12 +2056,12 @@ static void do_check_malloc_state(mstate av)
   mchunkptr q;
   mbinptr b;
   unsigned int idx;
-  INTERNAL_SIZE_T size;
+  size_t size;
   unsigned long total = 0;
   int max_fast_bin;
 
   /* internal size_t must be no wider than pointer type */
-  assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
+  assert(sizeof(size_t) <= sizeof(char*));
 
   /* alignment is a power of 2 */
   assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
@@ -2241,10 +2198,10 @@ static void do_check_malloc_state(mstate av)
   be extended or replaced.
 */
 
-static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
+static void* sysmalloc(size_t nb, mstate av)
 {
   mchunkptr       old_top;        /* incoming value of av->top */
-  INTERNAL_SIZE_T old_size;       /* its size */
+  size_t old_size;       /* its size */
   char*           old_end;        /* its end address */
 
   long            size;           /* arg to first MORECORE or mmap call */
@@ -2253,8 +2210,8 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
   long            correction;     /* arg to 2nd MORECORE call */
   char*           snd_brk;        /* 2nd return val */
 
-  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
-  INTERNAL_SIZE_T end_misalign;   /* partial page left at end of new space */
+  size_t front_misalign; /* unusable bytes at front of new space */
+  size_t end_misalign;   /* partial page left at end of new space */
   char*           aligned_brk;    /* aligned offset into brk */
 
   mchunkptr       p;              /* the allocated/returned chunk */
@@ -2313,11 +2270,11 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
 	    /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
 	       MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
 	       aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
-	    assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
+	    assert (((size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
 	    front_misalign = 0;
 	  }
 	else
-	  front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
+	  front_misalign = (size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK;
 	if (front_misalign > 0) {
 	  correction = MALLOC_ALIGNMENT - front_misalign;
 	  p = (mchunkptr)(mm + correction);
@@ -2547,7 +2504,7 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
 
 	/* Guarantee alignment of first new chunk made from this space */
 
-	front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+	front_misalign = (size_t)chunk2mem(brk) & MALLOC_ALIGN_MASK;
 	if (front_misalign > 0) {
 
 	  /*
@@ -2570,7 +2527,7 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
 	correction += old_size;
 
 	/* Extend the end address to hit a page boundary */
-	end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
+	end_misalign = (size_t)(brk + size + correction);
 	correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
 
 	assert(correction >= 0);
@@ -2603,7 +2560,7 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
 	  /* MORECORE/mmap must correctly align */
 	  assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
 	else {
-	  front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+	  front_misalign = (size_t)chunk2mem(brk) & MALLOC_ALIGN_MASK;
 	  if (front_misalign > 0) {
 
 	    /*
@@ -2771,7 +2728,7 @@ static void
 internal_function
 munmap_chunk(mchunkptr p)
 {
-  INTERNAL_SIZE_T size = chunksize(p);
+  size_t size = chunksize(p);
 
   assert (chunk_is_mmapped(p));
 
@@ -2805,8 +2762,8 @@ internal_function
 mremap_chunk(mchunkptr p, size_t new_size)
 {
   size_t page_mask = GLRO(dl_pagesize) - 1;
-  INTERNAL_SIZE_T offset = p->prev_size;
-  INTERNAL_SIZE_T size = chunksize(p);
+  size_t offset = p->prev_size;
+  size_t size = chunksize(p);
   char *cp;
 
   assert (chunk_is_mmapped(p));
@@ -2831,7 +2788,7 @@ mremap_chunk(mchunkptr p, size_t new_size)
   assert((p->prev_size == offset));
   set_head(p, (new_size - offset)|IS_MMAPPED);
 
-  INTERNAL_SIZE_T new;
+  size_t new;
   new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
 	+ new_size - size - offset;
   atomic_max (&mp_.max_mmapped_mem, new);
@@ -2917,7 +2874,7 @@ void*
 __libc_realloc(void* oldmem, size_t bytes)
 {
   mstate ar_ptr;
-  INTERNAL_SIZE_T    nb;      /* padded request size */
+  size_t    nb;      /* padded request size */
 
   void* newp;             /* chunk to return */
 
@@ -2936,7 +2893,7 @@ __libc_realloc(void* oldmem, size_t bytes)
   /* chunk corresponding to oldmem */
   const mchunkptr oldp    = mem2chunk(oldmem);
   /* its size */
-  const INTERNAL_SIZE_T oldsize = chunksize(oldp);
+  const size_t oldsize = chunksize(oldp);
 
   /* Little security check which won't hurt performance: the
      allocator never wrapps around at the end of the address space.
@@ -3115,17 +3072,17 @@ __libc_calloc(size_t n, size_t elem_size)
 {
   mstate av;
   mchunkptr oldtop, p;
-  INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
+  size_t bytes, sz, csz, oldtopsize;
   void* mem;
   unsigned long clearsize;
   unsigned long nclears;
-  INTERNAL_SIZE_T* d;
+  size_t* d;
 
   /* size_t is unsigned so the behavior on overflow is defined.  */
   bytes = n * elem_size;
-#define HALF_INTERNAL_SIZE_T \
-  (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
-  if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
+#define HALF_size_t \
+  (((size_t) 1) << (8 * sizeof (size_t) / 2))
+  if (__builtin_expect ((n | elem_size) >= HALF_size_t, 0)) {
     if (elem_size != 0 && bytes / elem_size != n) {
       __set_errno (ENOMEM);
       return 0;
@@ -3202,11 +3159,11 @@ __libc_calloc(size_t n, size_t elem_size)
 #endif
 
   /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
-     contents have an odd number of INTERNAL_SIZE_T-sized words;
+     contents have an odd number of size_t-sized words;
      minimally 3.  */
-  d = (INTERNAL_SIZE_T*)mem;
+  d = (size_t*)mem;
   clearsize = csz - SIZE_SZ;
-  nclears = clearsize / sizeof(INTERNAL_SIZE_T);
+  nclears = clearsize / sizeof(size_t);
   assert(nclears >= 3);
 
   if (nclears > 9)
@@ -3240,12 +3197,12 @@ __libc_calloc(size_t n, size_t elem_size)
 static void*
 _int_malloc(mstate av, size_t bytes)
 {
-  INTERNAL_SIZE_T nb;               /* normalized request size */
+  size_t nb;               /* normalized request size */
   unsigned int    idx;              /* associated bin index */
   mbinptr         bin;              /* associated bin */
 
   mchunkptr       victim;           /* inspected/selected chunk */
-  INTERNAL_SIZE_T size;             /* its size */
+  size_t size;             /* its size */
   int             victim_index;     /* its bin index */
 
   mchunkptr       remainder;        /* remainder from a split */
@@ -3724,12 +3681,12 @@ _int_malloc(mstate av, size_t bytes)
 static void
 _int_free(mstate av, mchunkptr p, int have_lock)
 {
-  INTERNAL_SIZE_T size;        /* its size */
+  size_t size;        /* its size */
   mfastbinptr*    fb;          /* associated fastbin */
   mchunkptr       nextchunk;   /* next contiguous chunk */
-  INTERNAL_SIZE_T nextsize;    /* its size */
+  size_t nextsize;    /* its size */
   int             nextinuse;   /* true if nextchunk is used */
-  INTERNAL_SIZE_T prevsize;    /* size of previous contiguous chunk */
+  size_t prevsize;    /* size of previous contiguous chunk */
   mchunkptr       bck;         /* misc temp for linking */
   mchunkptr       fwd;         /* misc temp for linking */
 
@@ -4019,9 +3976,9 @@ static void malloc_consolidate(mstate av)
 
   /* These have same use as in free() */
   mchunkptr       nextchunk;
-  INTERNAL_SIZE_T size;
-  INTERNAL_SIZE_T nextsize;
-  INTERNAL_SIZE_T prevsize;
+  size_t size;
+  size_t nextsize;
+  size_t prevsize;
   int             nextinuse;
   mchunkptr       bck;
   mchunkptr       fwd;
@@ -4111,11 +4068,11 @@ static void malloc_consolidate(mstate av)
 */
 
 void*
-_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
-	     INTERNAL_SIZE_T nb)
+_int_realloc(mstate av, mchunkptr oldp, size_t oldsize,
+	     size_t nb)
 {
   mchunkptr        newp;            /* chunk to return */
-  INTERNAL_SIZE_T  newsize;         /* its size */
+  size_t  newsize;         /* its size */
   void*          newmem;          /* corresponding user mem */
 
   mchunkptr        next;            /* next contiguous chunk after oldp */
@@ -4127,9 +4084,9 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
   mchunkptr        fwd;             /* misc temp for linking */
 
   unsigned long    copysize;        /* bytes to copy */
-  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
-  INTERNAL_SIZE_T* s;               /* copy source */
-  INTERNAL_SIZE_T* d;               /* copy destination */
+  unsigned int     ncopies;         /* size_t words to copy */
+  size_t* s;               /* copy source */
+  size_t* d;               /* copy destination */
 
   const char *errstr = NULL;
 
@@ -4149,7 +4106,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
   assert (!chunk_is_mmapped(oldp));
 
   next = chunk_at_offset(oldp, oldsize);
-  INTERNAL_SIZE_T nextsize = chunksize(next);
+  size_t nextsize = chunksize(next);
   if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
       || __builtin_expect (nextsize >= av->system_mem, 0))
     {
@@ -4204,13 +4161,13 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
 	/*
 	  Unroll copy of <= 36 bytes (72 if 8byte sizes)
 	  We know that contents have an odd number of
-	  INTERNAL_SIZE_T-sized words; minimally 3.
+	  size_t-sized words; minimally 3.
 	*/
 
 	copysize = oldsize - SIZE_SZ;
-	s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
-	d = (INTERNAL_SIZE_T*)(newmem);
-	ncopies = copysize / sizeof(INTERNAL_SIZE_T);
+	s = (size_t*)(chunk2mem(oldp));
+	d = (size_t*)(newmem);
+	ncopies = copysize / sizeof(size_t);
 	assert(ncopies >= 3);
 
 	if (ncopies > 9)
@@ -4272,16 +4229,16 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
 static void*
 _int_memalign(mstate av, size_t alignment, size_t bytes)
 {
-  INTERNAL_SIZE_T nb;             /* padded  request size */
+  size_t nb;             /* padded  request size */
   char*           m;              /* memory returned by malloc call */
   mchunkptr       p;              /* corresponding chunk */
   char*           brk;            /* alignment point within p */
   mchunkptr       newp;           /* chunk to return */
-  INTERNAL_SIZE_T newsize;        /* its size */
-  INTERNAL_SIZE_T leadsize;       /* leading space before alignment point */
+  size_t newsize;        /* its size */
+  size_t leadsize;       /* leading space before alignment point */
   mchunkptr       remainder;      /* spare room at end to split off */
   unsigned long   remainder_size; /* its size */
-  INTERNAL_SIZE_T size;
+  size_t size;
 
 
 
@@ -4378,7 +4335,7 @@ static int mtrim(mstate av, size_t pad)
 
 	for (mchunkptr p = last (bin); p != bin; p = p->bk)
 	  {
-	    INTERNAL_SIZE_T size = chunksize (p);
+	    size_t size = chunksize (p);
 
 	    if (size > psm1 + sizeof (struct malloc_chunk))
 	      {
@@ -4481,8 +4438,8 @@ int_mallinfo(mstate av, struct mallinfo *m)
   size_t i;
   mbinptr b;
   mchunkptr p;
-  INTERNAL_SIZE_T avail;
-  INTERNAL_SIZE_T fastavail;
+  size_t avail;
+  size_t fastavail;
   int nblocks;
   int nfastblocks;
 


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]