This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PING^2][PATCH] Reformat malloc to gnu style.


On Thu, Jan 09, 2014 at 04:52:28PM +0530, Siddhesh Poyarekar wrote:
> On Thu, Jan 02, 2014 at 07:55:09PM +0100, OndÅej BÃlka wrote:
> > That is missing a point, a formatting now was wastly different than gnu
> > one so goal was not get perfect on first try but to converge to
> > solution.
> > 
> > In formatting patches there are three factors in play:
> > 
> > 1) Amount of code formatted correctly.
> > 2) Amount of code left intact.
> > 3) Amount of code formatted incorrectly.
> > 
> > Now factors 2 and 3 sum to amount of code that needs futher formatting.
> > If a formatter gets 95% of code right then you need to fix only 5% of
> > code which means that patches that fix these are twenty times smaller
> > than in first case.
> > 
> > For our purposes it is mostly irrelevant if flaw was caused by formatter
> > or left over unless formatting causes loss of informattion. Where a style 
> > allows only one possibility you cannot encode any information so that is
> > not a factor.
> 
> Ondrej, did you get a chance to work on further fixes to formatting in
> malloc?  I noticed during a rawhide rebase that a number of macros
> remain incorrectly formatted.  It would be nice to have a consistently
> formatted malloc in 2.19.
> 
> Siddhesh

Yes, this additional patch does formatting of macros.

	* malloc/arena.c (typedef, ptmalloc_unlock_all2,
	shrink_heap): Fix formatting in macros.
	* malloc/hooks.c (__malloc_check_init, memalign_check): Likewise.
	* malloc/mallocbug.c: Likewise.
	* malloc/malloc.c (__malloc_assert, static, malloc_init_state,
	free_perturb, _int_malloc, malloc_info): Likewise.
	* malloc/mcheck.c (struct): Likewise.
	* malloc/memusage.c (struct): Likewise.
	* malloc/memusagestat.c: Likewise.
	* malloc/morecore.c: Likewise.
	* malloc/mtrace.c: Likewise.
	* malloc/obstack.c (enum, _obstack_memory_used): Likewise.
	* malloc/tst-obstack.c: Likewise.


diff --git a/malloc/arena.c b/malloc/arena.c
index 5088a25..29a26cc 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -21,12 +21,12 @@
 
 /* Compile-time constants.  */
 
-#define HEAP_MIN_SIZE (32 * 1024)
+#define HEAP_MIN_SIZE		(32 * 1024)
 #ifndef HEAP_MAX_SIZE
 # ifdef DEFAULT_MMAP_THRESHOLD_MAX
-#  define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
+#  define HEAP_MAX_SIZE	(2 * DEFAULT_MMAP_THRESHOLD_MAX)
 # else
-#  define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
+#  define HEAP_MAX_SIZE	(1024 * 1024) /* must be a power of two */
 # endif
 #endif
 
@@ -39,7 +39,7 @@
 
 
 #ifndef THREAD_STATS
-# define THREAD_STATS 0
+# define THREAD_STATS	0
 #endif
 
 /* If THREAD_STATS is non-zero, some statistics on mutex locking are
@@ -47,7 +47,7 @@
 
 /***************************************************************************/
 
-#define top(ar_ptr) ((ar_ptr)->top)
+#define top(ar_ptr)	((ar_ptr)->top)
 
 /* A heap is a single contiguous memory region holding (coalesceable)
    malloc_chunks.  It is allocated with mmap() and always starts at an
@@ -81,9 +81,12 @@ static mstate free_list;
 
 #if THREAD_STATS
 static int stat_n_heaps;
-# define THREAD_STAT(x) x
+# define THREAD_STAT(x)	x
 #else
-# define THREAD_STAT(x) do ; while (0)
+# define THREAD_STAT(x) \
+  do									      \
+    ;									      \
+  while (0)
 #endif
 
 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
@@ -103,22 +106,31 @@ int __malloc_initialized = -1;
    is just a hint as to how much memory will be required immediately
    in the new arena. */
 
-#define arena_get(ptr, size) do { \
+#define arena_get(ptr, size) \
+  do									      \
+    {									      \
       arena_lookup (ptr);						      \
       arena_lock (ptr, size);						      \
-  } while (0)
+    }									      \
+  while (0)
 
-#define arena_lookup(ptr) do { \
+#define arena_lookup(ptr) \
+  do									      \
+    {									      \
       void *vptr = NULL;						      \
       ptr = (mstate) tsd_getspecific (arena_key, vptr);			      \
-  } while (0)
+    }									      \
+  while (0)
 
-#define arena_lock(ptr, size) do {					      \
+#define arena_lock(ptr, size) \
+  do									      \
+    {									      \
       if (ptr)								      \
-        (void) mutex_lock (&ptr->mutex);				      \
+	(void) mutex_lock (&ptr->mutex);				      \
       else								      \
-        ptr = arena_get2 (ptr, (size), NULL);				      \
-  } while (0)
+	ptr = arena_get2 (ptr, (size), NULL);				      \
+    }									      \
+  while (0)
 
 /* find the heap and corresponding arena for a given ptr */
 
@@ -145,7 +157,7 @@ ATFORK_MEM;
 /* Magic value for the thread-specific arena pointer when
    malloc_atfork() is in use.  */
 
-# define ATFORK_ARENA_PTR ((void *) -1)
+# define ATFORK_ARENA_PTR	((void *) -1)
 
 /* The following hooks are used while the `atfork' handling mechanism
    is active. */
@@ -316,7 +328,7 @@ ptmalloc_unlock_all2 (void)
 
 # else
 
-#  define ptmalloc_unlock_all2 ptmalloc_unlock_all
+#  define ptmalloc_unlock_all2	ptmalloc_unlock_all
 # endif
 #endif  /* !NO_THREADS */
 
@@ -660,11 +672,13 @@ shrink_heap (heap_info *h, long diff)
 /* Delete a heap. */
 
 #define delete_heap(heap) \
-  do {									      \
+  do									      \
+    {									      \
       if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area)		      \
-        aligned_heap_area = NULL;					      \
+	aligned_heap_area = NULL;					      \
       __munmap ((char *) (heap), HEAP_MAX_SIZE);			      \
-    } while (0)
+    }									      \
+  while (0)
 
 static int
 internal_function
diff --git a/malloc/hooks.c b/malloc/hooks.c
index 00ee6be..c50204f 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -88,7 +88,7 @@ __malloc_check_init (void)
    overruns.  The goal here is to avoid obscure crashes due to invalid
    usage, unlike in the MALLOC_DEBUG code. */
 
-#define MAGICBYTE(p) ((((size_t) p >> 3) ^ ((size_t) p >> 11)) & 0xFF)
+#define MAGICBYTE(p)	((((size_t) p >> 3) ^ ((size_t) p >> 11)) & 0xFF)
 
 /* Visualize the chunk as being partitioned into blocks of 256 bytes from the
    highest address of the chunk, downwards.  The beginning of each block tells
@@ -446,8 +446,8 @@ memalign_check (size_t alignment, size_t bytes, const void *caller)
    use in the recorded state but the user requested malloc checking,
    then the hooks are reset to 0.  */
 
-#define MALLOC_STATE_MAGIC   0x444c4541l
-#define MALLOC_STATE_VERSION (0 * 0x100l + 4l) /* major*0x100 + minor */
+#define MALLOC_STATE_MAGIC	0x444c4541l
+#define MALLOC_STATE_VERSION	(0 * 0x100l + 4l) /* major*0x100 + minor */
 
 struct malloc_save_state
 {
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 813e94e..ccd0ab3 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -209,7 +209,7 @@
 */
 
 #ifndef void
-#define void      void
+#define void	void
 #endif /*void*/
 
 #include <stddef.h>   /* for size_t */
@@ -271,10 +271,10 @@
 */
 
 #ifdef NDEBUG
-# define assert(expr) ((void) 0)
+# define assert(expr)	((void) 0)
 #else
 # define assert(expr) \
-  ((expr)								      \
+  ((expr)									      \
    ? ((void) 0)								      \
    : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__))
 
@@ -327,11 +327,11 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
 */
 
 #ifndef INTERNAL_SIZE_T
-#define INTERNAL_SIZE_T size_t
+#define INTERNAL_SIZE_T	size_t
 #endif
 
 /* The corresponding word size */
-#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
+#define SIZE_SZ	(sizeof (INTERNAL_SIZE_T))
 
 
 /*
@@ -353,15 +353,16 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
    malloc_set_state than will returning blocks not adequately aligned for
    long double objects under -mlong-double-128.  */
 
-#  define MALLOC_ALIGNMENT       (2 *SIZE_SZ < __alignof__ (long double)      \
-                                  ? __alignof__ (long double) : 2 *SIZE_SZ)
+#  define MALLOC_ALIGNMENT \
+  (2 * SIZE_SZ < __alignof__ (long double)				      \
+   ? __alignof__ (long double) : 2 * SIZE_SZ)
 # else
-#  define MALLOC_ALIGNMENT       (2 *SIZE_SZ)
+#  define MALLOC_ALIGNMENT	(2 * SIZE_SZ)
 # endif
 #endif
 
 /* The corresponding bit mask value */
-#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
+#define MALLOC_ALIGN_MASK	(MALLOC_ALIGNMENT - 1)
 
 
 
@@ -373,7 +374,7 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
 */
 
 #ifndef REALLOC_ZERO_BYTES_FREES
-#define REALLOC_ZERO_BYTES_FREES 1
+#define REALLOC_ZERO_BYTES_FREES	1
 #endif
 
 /*
@@ -393,13 +394,13 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
 */
 
 #ifndef TRIM_FASTBINS
-#define TRIM_FASTBINS  0
+#define TRIM_FASTBINS	0
 #endif
 
 
 /* Definition for getting more memory from the OS.  */
-#define MORECORE         (*__morecore)
-#define MORECORE_FAILURE 0
+#define MORECORE		(*__morecore)
+#define MORECORE_FAILURE	0
 void * __default_morecore (ptrdiff_t);
 void *(*__morecore)(ptrdiff_t) = __default_morecore;
 
@@ -419,7 +420,7 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
 */
 
 #ifndef MORECORE
-#define MORECORE sbrk
+#define MORECORE	sbrk
 #endif
 
 /*
@@ -430,7 +431,7 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
 */
 
 #ifndef MORECORE_FAILURE
-#define MORECORE_FAILURE (-1)
+#define MORECORE_FAILURE	(-1)
 #endif
 
 /*
@@ -444,7 +445,7 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
 */
 
 #ifndef MORECORE_CONTIGUOUS
-#define MORECORE_CONTIGUOUS 1
+#define MORECORE_CONTIGUOUS	1
 #endif
 
 /*
@@ -466,7 +467,7 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
  */
 
 #ifndef MORECORE_CLEARS
-# define MORECORE_CLEARS 1
+# define MORECORE_CLEARS	1
 #endif
 
 
@@ -483,7 +484,7 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
    thus avoid running out of kernel resources.  */
 
 #ifndef MMAP_AS_MORECORE_SIZE
-#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
+#define MMAP_AS_MORECORE_SIZE	(1024 * 1024)
 #endif
 
 /*
@@ -492,7 +493,7 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
 */
 
 #ifndef HAVE_MREMAP
-#define HAVE_MREMAP 0
+#define HAVE_MREMAP	0
 #endif
 
 
@@ -783,11 +784,11 @@ int      __posix_memalign(void **, size_t, size_t);
 
 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
 #ifndef M_MXFAST
-#define M_MXFAST            1
+#define M_MXFAST	1
 #endif
 
 #ifndef DEFAULT_MXFAST
-#define DEFAULT_MXFAST     (64 * SIZE_SZ / 4)
+#define DEFAULT_MXFAST	(64 * SIZE_SZ / 4)
 #endif
 
 
@@ -851,10 +852,10 @@ int      __posix_memalign(void **, size_t, size_t);
   since that memory will immediately be returned to the system.
 */
 
-#define M_TRIM_THRESHOLD       -1
+#define M_TRIM_THRESHOLD	-1
 
 #ifndef DEFAULT_TRIM_THRESHOLD
-#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
+#define DEFAULT_TRIM_THRESHOLD	(128 * 1024)
 #endif
 
 /*
@@ -884,10 +885,10 @@ int      __posix_memalign(void **, size_t, size_t);
   the program needs.
 */
 
-#define M_TOP_PAD              -2
+#define M_TOP_PAD		-2
 
 #ifndef DEFAULT_TOP_PAD
-#define DEFAULT_TOP_PAD        (0)
+#define DEFAULT_TOP_PAD	(0)
 #endif
 
 /*
@@ -896,7 +897,7 @@ int      __posix_memalign(void **, size_t, size_t);
 */
 
 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
-#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
+#define DEFAULT_MMAP_THRESHOLD_MIN	(128 * 1024)
 #endif
 
 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
@@ -905,9 +906,9 @@ int      __posix_memalign(void **, size_t, size_t);
      maximum heap size and its alignment.  Going above 512k (i.e., 1M
      for new heaps) wastes too much address space.  */
 # if __WORDSIZE == 32
-#  define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
+#  define DEFAULT_MMAP_THRESHOLD_MAX	(512 * 1024)
 # else
-#  define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
+#  define DEFAULT_MMAP_THRESHOLD_MAX	(4 * 1024 * 1024 * sizeof (long))
 # endif
 #endif
 
@@ -1003,10 +1004,10 @@ int      __posix_memalign(void **, size_t, size_t);
 
 */
 
-#define M_MMAP_THRESHOLD      -3
+#define M_MMAP_THRESHOLD	-3
 
 #ifndef DEFAULT_MMAP_THRESHOLD
-#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
+#define DEFAULT_MMAP_THRESHOLD	DEFAULT_MMAP_THRESHOLD_MIN
 #endif
 
 /*
@@ -1020,16 +1021,16 @@ int      __posix_memalign(void **, size_t, size_t);
   Setting to 0 disables use of mmap for servicing large requests.
 */
 
-#define M_MMAP_MAX             -4
+#define M_MMAP_MAX		-4
 
 #ifndef DEFAULT_MMAP_MAX
-#define DEFAULT_MMAP_MAX       (65536)
+#define DEFAULT_MMAP_MAX	(65536)
 #endif
 
 #include <malloc.h>
 
 #ifndef RETURN_ADDRESS
-#define RETURN_ADDRESS(X_) (NULL)
+#define RETURN_ADDRESS(X_)	(NULL)
 #endif
 
 /* On some platforms we can compile internal, not exported functions better.
@@ -1079,15 +1080,15 @@ static void      free_atfork(void* mem, const void *caller);
 #include <sys/mman.h>
 
 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
-# define MAP_ANONYMOUS MAP_ANON
+# define MAP_ANONYMOUS	MAP_ANON
 #endif
 
 #ifndef MAP_NORESERVE
-# define MAP_NORESERVE 0
+# define MAP_NORESERVE	0
 #endif
 
 #define MMAP(addr, size, prot, flags) \
- __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
+  __mmap ((addr), (size), (prot), (flags) | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)
 
 
 /*
@@ -1208,23 +1209,23 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 
 /* conversion from malloc headers to user pointers, and back */
 
-#define chunk2mem(p)   ((void*)((char*)(p) + 2*SIZE_SZ))
-#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
+#define chunk2mem(p)	((void *) ((char *) (p) + 2 * SIZE_SZ))
+#define mem2chunk(mem)	((mchunkptr) ((char *) (mem) - 2 * SIZE_SZ))
 
 /* The smallest possible chunk */
-#define MIN_CHUNK_SIZE        (offsetof(struct malloc_chunk, fd_nextsize))
+#define MIN_CHUNK_SIZE	(offsetof (struct malloc_chunk, fd_nextsize))
 
 /* The smallest size we can malloc is an aligned minimal chunk */
 
-#define MINSIZE  \
-  (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
+#define MINSIZE \
+  (unsigned long) (((MIN_CHUNK_SIZE + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
 
 /* Check if m has acceptable alignment */
 
-#define aligned_OK(m)  (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
+#define aligned_OK(m)	(((unsigned long) (m) & MALLOC_ALIGN_MASK) == 0)
 
 #define misaligned_chunk(p) \
-  ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
+  ((uintptr_t) (MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p))	      \
    & MALLOC_ALIGN_MASK)
 
 
@@ -1234,21 +1235,22 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    low enough so that adding MINSIZE will also not wrap around zero.
  */
 
-#define REQUEST_OUT_OF_RANGE(req)                                 \
+#define REQUEST_OUT_OF_RANGE(req) \
   ((unsigned long) (req) >=						      \
    (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
 
 /* pad request bytes into a usable size -- internal version */
 
-#define request2size(req)                                         \
-  (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?             \
-   MINSIZE :                                                      \
+#define request2size(req) \
+  (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE)  ?			      \
+   MINSIZE :								      \
    ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
 
 /*  Same, except also perform argument check */
 
-#define checked_request2size(req, sz)                             \
-  if (REQUEST_OUT_OF_RANGE (req)) {					      \
+#define checked_request2size(req, sz) \
+  if (REQUEST_OUT_OF_RANGE (req))						      \
+    {									      \
       __set_errno (ENOMEM);						      \
       return 0;								      \
     }									      \
@@ -1260,26 +1262,26 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 
 
 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
-#define PREV_INUSE 0x1
+#define PREV_INUSE	0x1
 
 /* extract inuse bit of previous chunk */
-#define prev_inuse(p)       ((p)->size & PREV_INUSE)
+#define prev_inuse(p)	((p)->size & PREV_INUSE)
 
 
 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
-#define IS_MMAPPED 0x2
+#define IS_MMAPPED	0x2
 
 /* check for mmap()'ed chunk */
-#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
+#define chunk_is_mmapped(p)	((p)->size & IS_MMAPPED)
 
 
 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
    from a non-main arena.  This is only set immediately before handing
    the chunk to the user, if necessary.  */
-#define NON_MAIN_ARENA 0x4
+#define NON_MAIN_ARENA	0x4
 
 /* check for chunk from non-main arena */
-#define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
+#define chunk_non_main_arena(p)	((p)->size & NON_MAIN_ARENA)
 
 
 /*
@@ -1290,52 +1292,52 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    cause helpful core dumps to occur if it is tried by accident by
    people extending or adapting this malloc.
  */
-#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
+#define SIZE_BITS	(PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
 
 /* Get size, ignoring use bits */
-#define chunksize(p)         ((p)->size & ~(SIZE_BITS))
+#define chunksize(p)	((p)->size & ~(SIZE_BITS))
 
 
 /* Ptr to next physical malloc_chunk. */
-#define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))
+#define next_chunk(p)	((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))
 
 /* Ptr to previous physical malloc_chunk */
-#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size)))
+#define prev_chunk(p)	((mchunkptr) (((char *) (p)) - ((p)->prev_size)))
 
 /* Treat space at ptr + offset as a chunk */
-#define chunk_at_offset(p, s)  ((mchunkptr) (((char *) (p)) + (s)))
+#define chunk_at_offset(p, s)	((mchunkptr) (((char *) (p)) + (s)))
 
 /* extract p's inuse bit */
-#define inuse(p)							      \
+#define inuse(p) \
   ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
 
 /* set/clear chunk as being inuse without otherwise disturbing */
-#define set_inuse(p)							      \
+#define set_inuse(p) \
   ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
 
-#define clear_inuse(p)							      \
+#define clear_inuse(p) \
   ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
 
 
 /* check/set/clear inuse bits in known places */
-#define inuse_bit_at_offset(p, s)					      \
+#define inuse_bit_at_offset(p, s) \
   (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE)
 
-#define set_inuse_bit_at_offset(p, s)					      \
+#define set_inuse_bit_at_offset(p, s) \
   (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE)
 
-#define clear_inuse_bit_at_offset(p, s)					      \
+#define clear_inuse_bit_at_offset(p, s) \
   (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE))
 
 
 /* Set size at head, without disturbing its use bit */
-#define set_head_size(p, s)  ((p)->size = (((p)->size & SIZE_BITS) | (s)))
+#define set_head_size(p, s)	((p)->size = (((p)->size & SIZE_BITS) | (s)))
 
 /* Set size/use field */
-#define set_head(p, s)       ((p)->size = (s))
+#define set_head(p, s)	((p)->size = (s))
 
 /* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s))
+#define set_foot(p, s)	(((mchunkptr) ((char *) (p) + (s)))->prev_size = (s))
 
 
 /*
@@ -1394,44 +1396,51 @@ typedef struct malloc_chunk *mbinptr;
 /* addressing -- note that bin_at(0) does not exist */
 #define bin_at(m, i) \
   (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2]))			      \
-             - offsetof (struct malloc_chunk, fd))
+	     - offsetof (struct malloc_chunk, fd))
 
 /* analog of ++bin */
-#define next_bin(b)  ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
+#define next_bin(b)	((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
 
 /* Reminders about list directionality within bins */
-#define first(b)     ((b)->fd)
-#define last(b)      ((b)->bk)
+#define first(b)	((b)->fd)
+#define last(b)	((b)->bk)
 
 /* Take a chunk off a bin list */
-#define unlink(P, BK, FD) {                                            \
+#define unlink(P, BK, FD) \
+  {									      \
     FD = P->fd;								      \
     BK = P->bk;								      \
     if (__builtin_expect (FD->bk != P || BK->fd != P, 0))		      \
       malloc_printerr (check_action, "corrupted double-linked list", P);      \
-    else {								      \
-        FD->bk = BK;							      \
-        BK->fd = FD;							      \
-        if (!in_smallbin_range (P->size)				      \
-            && __builtin_expect (P->fd_nextsize != NULL, 0)) {		      \
-            assert (P->fd_nextsize->bk_nextsize == P);			      \
-            assert (P->bk_nextsize->fd_nextsize == P);			      \
-            if (FD->fd_nextsize == NULL) {				      \
-                if (P->fd_nextsize == P)				      \
-                  FD->fd_nextsize = FD->bk_nextsize = FD;		      \
-                else {							      \
-                    FD->fd_nextsize = P->fd_nextsize;			      \
-                    FD->bk_nextsize = P->bk_nextsize;			      \
-                    P->fd_nextsize->bk_nextsize = FD;			      \
-                    P->bk_nextsize->fd_nextsize = FD;			      \
-                  }							      \
-              } else {							      \
-                P->fd_nextsize->bk_nextsize = P->bk_nextsize;		      \
-                P->bk_nextsize->fd_nextsize = P->fd_nextsize;		      \
-              }								      \
-          }								      \
+    else								      \
+      {									      \
+	FD->bk = BK;							      \
+	BK->fd = FD;							      \
+	if (!in_smallbin_range (P->size)				      \
+	    && __builtin_expect (P->fd_nextsize != NULL, 0))		      \
+	  {								      \
+	    assert (P->fd_nextsize->bk_nextsize == P);			      \
+	    assert (P->bk_nextsize->fd_nextsize == P);			      \
+	    if (FD->fd_nextsize == NULL)				      \
+	      {								      \
+		if (P->fd_nextsize == P)				      \
+		  FD->fd_nextsize = FD->bk_nextsize = FD;		      \
+		else							      \
+		  {							      \
+		    FD->fd_nextsize = P->fd_nextsize;			      \
+		    FD->bk_nextsize = P->bk_nextsize;			      \
+		    P->fd_nextsize->bk_nextsize = FD;			      \
+		    P->bk_nextsize->fd_nextsize = FD;			      \
+		  }							      \
+	      }								      \
+	    else							      \
+	      {								      \
+		P->fd_nextsize->bk_nextsize = P->bk_nextsize;		      \
+		P->bk_nextsize->fd_nextsize = P->fd_nextsize;		      \
+	      }								      \
+	  }								      \
       }									      \
-}
+  }
 
 /*
    Indexing
@@ -1457,20 +1466,20 @@ typedef struct malloc_chunk *mbinptr;
     a valid chunk size the small bins are bumped up one.
  */
 
-#define NBINS             128
-#define NSMALLBINS         64
-#define SMALLBIN_WIDTH    MALLOC_ALIGNMENT
-#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
-#define MIN_LARGE_SIZE    ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
+#define NBINS			128
+#define NSMALLBINS		64
+#define SMALLBIN_WIDTH		MALLOC_ALIGNMENT
+#define SMALLBIN_CORRECTION	(MALLOC_ALIGNMENT > 2 * SIZE_SZ)
+#define MIN_LARGE_SIZE	((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
 
-#define in_smallbin_range(sz)  \
+#define in_smallbin_range(sz) \
   ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
 
 #define smallbin_index(sz) \
-  ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
+  ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3)) \
    + SMALLBIN_CORRECTION)
 
-#define largebin_index_32(sz)                                                \
+#define largebin_index_32(sz) \
   (((((unsigned long) (sz)) >> 6) <= 38) ?  56 + (((unsigned long) (sz)) >> 6) :\
    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
@@ -1478,7 +1487,7 @@ typedef struct malloc_chunk *mbinptr;
    ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
    126)
 
-#define largebin_index_32_big(sz)                                            \
+#define largebin_index_32_big(sz) \
   (((((unsigned long) (sz)) >> 6) <= 45) ?  49 + (((unsigned long) (sz)) >> 6) :\
    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
@@ -1489,7 +1498,7 @@ typedef struct malloc_chunk *mbinptr;
 // XXX It remains to be seen whether it is good to keep the widths of
 // XXX the buckets the same or whether it should be scaled by a factor
 // XXX of two as well.
-#define largebin_index_64(sz)                                                \
+#define largebin_index_64(sz) \
   (((((unsigned long) (sz)) >> 6) <= 48) ?  48 + (((unsigned long) (sz)) >> 6) :\
    ((((unsigned long) (sz)) >> 9) <= 20) ?  91 + (((unsigned long) (sz)) >> 9) :\
    ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
@@ -1498,8 +1507,8 @@ typedef struct malloc_chunk *mbinptr;
    126)
 
 #define largebin_index(sz) \
-  (SIZE_SZ == 8 ? largebin_index_64 (sz)                                     \
-   : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz)                     \
+  (SIZE_SZ == 8 ? largebin_index_64 (sz)					      \
+   : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz)		      \
    : largebin_index_32 (sz))
 
 #define bin_index(sz) \
@@ -1521,7 +1530,7 @@ typedef struct malloc_chunk *mbinptr;
  */
 
 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
-#define unsorted_chunks(M)          (bin_at (M, 1))
+#define unsorted_chunks(M)	(bin_at (M, 1))
 
 /*
    Top
@@ -1542,7 +1551,7 @@ typedef struct malloc_chunk *mbinptr;
  */
 
 /* Conveniently, the unsorted bin can be used as dummy top on first call */
-#define initial_top(M)              (unsorted_chunks (M))
+#define initial_top(M)	(unsorted_chunks (M))
 
 /*
    Binmap
@@ -1556,16 +1565,16 @@ typedef struct malloc_chunk *mbinptr;
  */
 
 /* Conservatively use 32 bits per map word, even if on 64bit system */
-#define BINMAPSHIFT      5
-#define BITSPERMAP       (1U << BINMAPSHIFT)
-#define BINMAPSIZE       (NBINS / BITSPERMAP)
+#define BINMAPSHIFT	5
+#define BITSPERMAP	(1U << BINMAPSHIFT)
+#define BINMAPSIZE	(NBINS / BITSPERMAP)
 
-#define idx2block(i)     ((i) >> BINMAPSHIFT)
-#define idx2bit(i)       ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
+#define idx2block(i)	((i) >> BINMAPSHIFT)
+#define idx2bit(i)	((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
 
-#define mark_bin(m, i)    ((m)->binmap[idx2block (i)] |= idx2bit (i))
-#define unmark_bin(m, i)  ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
-#define get_binmap(m, i)  ((m)->binmap[idx2block (i)] & idx2bit (i))
+#define mark_bin(m, i)		((m)->binmap[idx2block (i)] |= idx2bit (i))
+#define unmark_bin(m, i)	((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
+#define get_binmap(m, i)	((m)->binmap[idx2block (i)] & idx2bit (i))
 
 /*
    Fastbins
@@ -1585,7 +1594,7 @@ typedef struct malloc_chunk *mbinptr;
  */
 
 typedef struct malloc_chunk *mfastbinptr;
-#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
+#define fastbin(ar_ptr, idx)	((ar_ptr)->fastbinsY[idx])
 
 /* offset 2 to use otherwise unindexable first 2 bins */
 #define fastbin_index(sz) \
@@ -1593,9 +1602,9 @@ typedef struct malloc_chunk *mfastbinptr;
 
 
 /* The maximum fastbin request size we support */
-#define MAX_FAST_SIZE     (80 * SIZE_SZ / 4)
+#define MAX_FAST_SIZE	(80 * SIZE_SZ / 4)
 
-#define NFASTBINS  (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
+#define NFASTBINS	(fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
 
 /*
    FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
@@ -1608,7 +1617,7 @@ typedef struct malloc_chunk *mfastbinptr;
    if trimming is not used.
  */
 
-#define FASTBIN_CONSOLIDATION_THRESHOLD  (65536UL)
+#define FASTBIN_CONSOLIDATION_THRESHOLD	(65536UL)
 
 /*
    Since the lowest 2 bits in max_fast don't matter in size comparisons,
@@ -1625,11 +1634,11 @@ typedef struct malloc_chunk *mfastbinptr;
    initialization checks.
  */
 
-#define FASTCHUNKS_BIT        (1U)
+#define FASTCHUNKS_BIT	(1U)
 
-#define have_fastchunks(M)     (((M)->flags & FASTCHUNKS_BIT) == 0)
-#define clear_fastchunks(M)    catomic_or (&(M)->flags, FASTCHUNKS_BIT)
-#define set_fastchunks(M)      catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
+#define have_fastchunks(M)	(((M)->flags & FASTCHUNKS_BIT) == 0)
+#define clear_fastchunks(M)	catomic_or (&(M)->flags, FASTCHUNKS_BIT)
+#define set_fastchunks(M)	catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
 
 /*
    NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
@@ -1640,12 +1649,12 @@ typedef struct malloc_chunk *mfastbinptr;
    changed dynamically if mmap is ever used as an sbrk substitute.
  */
 
-#define NONCONTIGUOUS_BIT     (2U)
+#define NONCONTIGUOUS_BIT	(2U)
 
-#define contiguous(M)          (((M)->flags & NONCONTIGUOUS_BIT) == 0)
-#define noncontiguous(M)       (((M)->flags & NONCONTIGUOUS_BIT) != 0)
-#define set_noncontiguous(M)   ((M)->flags |= NONCONTIGUOUS_BIT)
-#define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
+#define contiguous(M)		(((M)->flags & NONCONTIGUOUS_BIT) == 0)
+#define noncontiguous(M)	(((M)->flags & NONCONTIGUOUS_BIT) != 0)
+#define set_noncontiguous(M)	((M)->flags |= NONCONTIGUOUS_BIT)
+#define set_contiguous(M)	((M)->flags &= ~NONCONTIGUOUS_BIT)
 
 /*
    Set value of max_fast.
@@ -1656,8 +1665,8 @@ typedef struct malloc_chunk *mfastbinptr;
 
 #define set_max_fast(s) \
   global_max_fast = (((s) == 0)						      \
-                     ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
-#define get_max_fast() global_max_fast
+		     ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
+#define get_max_fast()	global_max_fast
 
 
 /*
@@ -1752,14 +1761,14 @@ static struct malloc_par mp_ =
   .n_mmaps_max = DEFAULT_MMAP_MAX,
   .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
   .trim_threshold = DEFAULT_TRIM_THRESHOLD,
-#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
+#define NARENAS_FROM_NCORES(n)	((n) * (sizeof (long) == 4 ? 2 : 8))
   .arena_test = NARENAS_FROM_NCORES (1)
 };
 
 
 /*  Non public mallopt parameters.  */
-#define M_ARENA_TEST -7
-#define M_ARENA_MAX  -8
+#define M_ARENA_TEST	-7
+#define M_ARENA_MAX	-8
 
 
 /* Maximum size of memory handled in fastbins.  */
@@ -1815,7 +1824,7 @@ static void     malloc_consolidate (mstate);
 #ifndef weak_variable
 /* In GNU libc we want the hook variables to be weak definitions to
    avoid a problem with Emacs.  */
-# define weak_variable weak_function
+# define weak_variable	weak_function
 #endif
 
 /* Forward declarations.  */
@@ -1843,7 +1852,7 @@ void weak_variable (*__after_morecore_hook) (void) = NULL;
 /* ---------------- Error behavior ------------------------------------ */
 
 #ifndef DEFAULT_CHECK_ACTION
-# define DEFAULT_CHECK_ACTION 3
+# define DEFAULT_CHECK_ACTION	3
 #endif
 
 static int check_action = DEFAULT_CHECK_ACTION;
@@ -1895,12 +1904,12 @@ free_perturb (char *p, size_t n)
 
 #else
 
-# define check_chunk(A, P)              do_check_chunk (A, P)
-# define check_free_chunk(A, P)         do_check_free_chunk (A, P)
-# define check_inuse_chunk(A, P)        do_check_inuse_chunk (A, P)
-# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
-# define check_malloced_chunk(A, P, N)   do_check_malloced_chunk (A, P, N)
-# define check_malloc_state(A)         do_check_malloc_state (A)
+# define check_chunk(A, P)		do_check_chunk (A, P)
+# define check_free_chunk(A, P)	do_check_free_chunk (A, P)
+# define check_inuse_chunk(A, P)	do_check_inuse_chunk (A, P)
+# define check_remalloced_chunk(A, P, N)	do_check_remalloced_chunk (A, P, N)
+# define check_malloced_chunk(A, P, N)	do_check_malloced_chunk (A, P, N)
+# define check_malloc_state(A)		do_check_malloc_state (A)
 
 /*
    Properties of all chunks
@@ -3560,7 +3569,7 @@ _int_malloc (mstate av, size_t bytes)
           fwd->bk = victim;
           bck->fd = victim;
 
-#define MAX_ITERS       10000
+#define MAX_ITERS	10000
           if (++iters >= MAX_ITERS)
             break;
         }
@@ -5056,7 +5065,7 @@ malloc_info (int options, FILE *fp)
       size_t total;
       size_t count;
     } sizes[NFASTBINS + NBINS - 1];
-#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
+#define nsizes	(sizeof (sizes) / sizeof (sizes[0]))
 
     mutex_lock (&ar_ptr->mutex);
 
@@ -5127,7 +5136,7 @@ malloc_info (int options, FILE *fp)
 
     for (size_t i = 0; i < nsizes; ++i)
       if (sizes[i].count != 0 && i != NFASTBINS)
-        fprintf (fp, "							      \
+        fprintf (fp, "\
 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
                  sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
 
diff --git a/malloc/mallocbug.c b/malloc/mallocbug.c
index 7d19b6f..b2fa3b5 100644
--- a/malloc/mallocbug.c
+++ b/malloc/mallocbug.c
@@ -3,7 +3,7 @@
 #include <stdio.h>
 #include <string.h>
 
-#define size_t unsigned int
+#define size_t	unsigned int
 
 /* Defined as global variables to avoid warnings about unused variables.  */
 char *dummy0;
diff --git a/malloc/mcheck.c b/malloc/mcheck.c
index f4f875e..95d6a08 100644
--- a/malloc/mcheck.c
+++ b/malloc/mcheck.c
@@ -39,11 +39,11 @@ static __ptr_t (*old_realloc_hook) (__ptr_t ptr, size_t size,
 static void (*abortfunc) (enum mcheck_status);
 
 /* Arbitrary magical numbers.  */
-#define MAGICWORD       0xfedabeeb
-#define MAGICFREE       0xd8675309
-#define MAGICBYTE       ((char) 0xd7)
-#define MALLOCFLOOD     ((char) 0x93)
-#define FREEFLOOD       ((char) 0x95)
+#define MAGICWORD	0xfedabeeb
+#define MAGICFREE	0xd8675309
+#define MAGICBYTE	((char) 0xd7)
+#define MALLOCFLOOD	((char) 0x93)
+#define FREEFLOOD	((char) 0x95)
 
 struct hdr
 {
@@ -66,7 +66,7 @@ static int pedantic;
 
 #if defined _LIBC || defined STDC_HEADERS || defined USG
 # include <string.h>
-# define flood memset
+# define flood	memset
 #else
 static void flood (__ptr_t, int, size_t);
 static void flood (ptr, val, size)
diff --git a/malloc/memusage.c b/malloc/memusage.c
index bfbaecc..3ae6bf3 100644
--- a/malloc/memusage.c
+++ b/malloc/memusage.c
@@ -69,7 +69,7 @@ struct header
   size_t magic;
 };
 
-#define MAGIC 0xfeedbeaf
+#define MAGIC	0xfeedbeaf
 
 
 static memusage_cntr_t calls[idx_last];
@@ -89,11 +89,11 @@ static memusage_size_t peak_use[3];
 static __thread uintptr_t start_sp;
 
 /* A few macros to make the source more readable.  */
-#define peak_heap       peak_use[0]
-#define peak_stack      peak_use[1]
-#define peak_total      peak_use[2]
+#define peak_heap	peak_use[0]
+#define peak_stack	peak_use[1]
+#define peak_total	peak_use[2]
 
-#define DEFAULT_BUFFER_SIZE     32768
+#define DEFAULT_BUFFER_SIZE	32768
 static size_t buffer_size;
 
 static int fd = -1;
diff --git a/malloc/memusagestat.c b/malloc/memusagestat.c
index 3e0889e..20900fe 100644
--- a/malloc/memusagestat.c
+++ b/malloc/memusagestat.c
@@ -16,7 +16,7 @@
    You should have received a copy of the GNU General Public License
    along with this program; if not, see <http://www.gnu.org/licenses/>.  */
 
-#define _FILE_OFFSET_BITS 64
+#define _FILE_OFFSET_BITS	64
 
 #include <argp.h>
 #include <assert.h>
@@ -39,14 +39,14 @@
 #include <gdfonts.h>
 
 #include "../version.h"
-#define PACKAGE _libc_intl_domainname
+#define PACKAGE	_libc_intl_domainname
 
 /* Default size of the generated image.  */
-#define XSIZE 800
-#define YSIZE 600
+#define XSIZE	800
+#define YSIZE	600
 
 #ifndef N_
-# define N_(Arg) Arg
+# define N_(Arg)	Arg
 #endif
 
 
@@ -439,7 +439,7 @@ main (int argc, char *argv[])
 
       gdImageString (im_out, gdFontSmall, 40 + (xsize - 39 * 6 - 80) / 2,
                      ysize - 12,
-                     (unsigned char *) "				      \
+                     (unsigned char *) "\
 # memory handling function calls / time", blue);
 
       for (cnt = 0; cnt < 20; cnt += 2)
diff --git a/malloc/morecore.c b/malloc/morecore.c
index efcc615..008ad64 100644
--- a/malloc/morecore.c
+++ b/malloc/morecore.c
@@ -21,7 +21,7 @@
 #endif
 
 #ifndef __GNU_LIBRARY__
-# define __sbrk  sbrk
+# define __sbrk	sbrk
 #endif
 
 #ifdef __GNU_LIBRARY__
@@ -35,7 +35,7 @@ libc_hidden_proto (__sbrk)
 #endif
 
 #ifndef NULL
-# define NULL 0
+# define NULL	0
 #endif
 
 /* Allocate INCREMENT more bytes of data space,
diff --git a/malloc/mtrace.c b/malloc/mtrace.c
index 99ebaff..4d075df 100644
--- a/malloc/mtrace.c
+++ b/malloc/mtrace.c
@@ -36,8 +36,8 @@
 #include <libc-internal.h>
 
 #include <libio/iolibio.h>
-#define setvbuf(s, b, f, l) _IO_setvbuf (s, b, f, l)
-#define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
+#define setvbuf(s, b, f, l)	_IO_setvbuf (s, b, f, l)
+#define fwrite(buf, size, count, fp)	_IO_fwrite (buf, size, count, fp)
 
 #include <kernel-features.h>
 
@@ -45,7 +45,7 @@
 # define attribute_hidden
 #endif
 
-#define TRACE_BUFFER_SIZE 512
+#define TRACE_BUFFER_SIZE	512
 
 static FILE *mallstream;
 static const char mallenv[] = "MALLOC_TRACE";
diff --git a/malloc/obstack.c b/malloc/obstack.c
index 4064f61..6fbcda1 100644
--- a/malloc/obstack.c
+++ b/malloc/obstack.c
@@ -31,7 +31,7 @@
 /* NOTE BEFORE MODIFYING THIS FILE: This version number must be
    incremented whenever callers compiled using an old obstack.h can no
    longer properly call the functions in this obstack.c.  */
-#define OBSTACK_INTERFACE_VERSION 1
+#define OBSTACK_INTERFACE_VERSION	1
 
 /* Comment out all this code if we are using the GNU C Library, and are not
    actually compiling the library itself, and the installed library
@@ -88,7 +88,7 @@ enum
    in such a case, redefine COPYING_UNIT to `long' (if that works)
    or `char' as a last resort.  */
 # ifndef COPYING_UNIT
-#  define COPYING_UNIT int
+#  define COPYING_UNIT	int
 # endif
 
 
@@ -107,7 +107,7 @@ void (*obstack_alloc_failed_handler) (void) = print_and_abort;
 int obstack_exit_failure = EXIT_FAILURE;
 # else
 #  include "exitfail.h"
-#  define obstack_exit_failure exit_failure
+#  define obstack_exit_failure	exit_failure
 # endif
 
 # ifdef _LIBC
@@ -132,12 +132,14 @@ compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0);
    : (*(struct _obstack_chunk *(*)(long))(h)->chunkfun)((size)))
 
 # define CALL_FREEFUN(h, old_chunk) \
-  do { \
+  do									      \
+    {									      \
       if ((h)->use_extra_arg)						      \
-        (*(h)->freefun)((h)->extra_arg, (old_chunk));			      \
+	(*(h)->freefun)((h)->extra_arg, (old_chunk));			      \
       else								      \
-        (*(void (*)(void *))(h)->freefun)((old_chunk));			      \
-    } while (0)
+	(*(void (*)(void *))(h)->freefun)((old_chunk));			      \
+    }									      \
+  while (0)
 
 
 /* Initialize an obstack H for use.  Specify chunk size SIZE (0 means default).
@@ -405,7 +407,7 @@ _obstack_memory_used (struct obstack *h)
 #  include "gettext.h"
 # endif
 # ifndef _
-#  define _(msgid) gettext (msgid)
+#  define _(msgid)	gettext (msgid)
 # endif
 
 # ifdef _LIBC
@@ -415,7 +417,7 @@ _obstack_memory_used (struct obstack *h)
 # ifndef __attribute__
 /* This feature is available in gcc versions 2.5 and later.  */
 #  if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 5)
-#   define __attribute__(Spec) /* empty */
+#   define __attribute__(Spec)	/* empty */
 #  endif
 # endif
 
diff --git a/malloc/tst-obstack.c b/malloc/tst-obstack.c
index 769697f..d8706bf 100644
--- a/malloc/tst-obstack.c
+++ b/malloc/tst-obstack.c
@@ -4,11 +4,11 @@
 #include <stdio.h>
 #include <stdlib.h>
 
-#define obstack_chunk_alloc verbose_malloc
-#define obstack_chunk_free verbose_free
-#define ALIGN_BOUNDARY 64
-#define ALIGN_MASK (ALIGN_BOUNDARY - 1)
-#define OBJECT_SIZE 1000
+#define obstack_chunk_alloc	verbose_malloc
+#define obstack_chunk_free	verbose_free
+#define ALIGN_BOUNDARY		64
+#define ALIGN_MASK		(ALIGN_BOUNDARY - 1)
+#define OBJECT_SIZE		1000
 
 static void *
 verbose_malloc (size_t size)


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]