This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PING^2][PATCH] Reformat malloc to gnu style.


On Thu, Jan 02, 2014 at 04:42:28PM +1000, Allan McRae wrote:
> On 02/01/14 13:45, Siddhesh Poyarekar wrote:
> > On Thu, Jan 02, 2014 at 09:40:08AM +1000, Allan McRae wrote:
> >> Any opinions on this?   It is a "big" change, but purely reformatting.
> >>  Adding the patch now will make backporting fixes to the 2.19 easier.
> >>
> > 
> > I am in favour of getting the code formatting bits in right now.
> > 
> 
> OK.  Please commit.
> 
> Allan

I commited these without conflict, then for conflicting hunk
reformatting is following, ok to commit also this?

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 813e94e..75520c3 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -3845,380 +3845,405 @@ _int_free (mstate av, mchunkptr p, int have_lock)
       goto errout;
     }
 
-  check_inuse_chunk(av, p);
+  check_inuse_chunk (av, p);
 
   /*
-    If eligible, place chunk on a fastbin so it can be found
-    and used quickly in malloc.
-  */
+     If eligible, place chunk on a fastbin so it can be found
+     and used quickly in malloc.
+   */
 
-  if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
+  if ((unsigned long) (size) <= (unsigned long) (get_max_fast ())
 
 #if TRIM_FASTBINS
       /*
-	If TRIM_FASTBINS set, don't place chunks
-	bordering top into fastbins
-      */
-      && (chunk_at_offset(p, size) != av->top)
+         If TRIM_FASTBINS set, don't place chunks
+         bordering top into fastbins
+       */
+      && (chunk_at_offset (p, size) != av->top)
 #endif
-      ) {
-
-    if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
-	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
-			     >= av->system_mem, 0))
-      {
-	/* We might not have a lock at this point and concurrent modifications
-	   of system_mem might have let to a false positive.  Redo the test
-	   after getting the lock.  */
-	if (have_lock
-	    || ({ assert (locked == 0);
-		  mutex_lock(&av->mutex);
-		  locked = 1;
-		  chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
-		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
-	      }))
-	  {
-	    errstr = "free(): invalid next size (fast)";
-	    goto errout;
-	  }
-	if (! have_lock)
-	  {
-	    (void)mutex_unlock(&av->mutex);
-	    locked = 0;
-	  }
-      }
+      )
+    {
+      if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
+	  || __builtin_expect (chunksize (chunk_at_offset (p, size))
+			       >= av->system_mem, 0))
+	{
+	  /* We might not have a lock at this point and concurrent modifications
+	     of system_mem might have let to a false positive.  Redo the test
+	     after getting the lock.  */
+	  if (have_lock
+	      || ({ assert (locked == 0);
+		    mutex_lock (&av->mutex);
+		    locked = 1;
+		    chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
+		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem; }))
+	    {
+	      errstr = "free(): invalid next size (fast)";
+	      goto errout;
+	    }
+	  if (!have_lock)
+	    {
+	      (void) mutex_unlock (&av->mutex);
+	      locked = 0;
+	    }
+	}
 
-    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
+      free_perturb (chunk2mem (p), size - 2 * SIZE_SZ);
 
-    set_fastchunks(av);
-    unsigned int idx = fastbin_index(size);
-    fb = &fastbin (av, idx);
+      set_fastchunks (av);
+      unsigned int idx = fastbin_index (size);
+      fb = &fastbin (av, idx);
 
-    /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
-    mchunkptr old = *fb, old2;
-    unsigned int old_idx = ~0u;
-    do
-      {
-	/* Check that the top of the bin is not the record we are going to add
-	   (i.e., double free).  */
-	if (__builtin_expect (old == p, 0))
-	  {
-	    errstr = "double free or corruption (fasttop)";
-	    goto errout;
-	  }
-	/* Check that size of fastbin chunk at the top is the same as
-	   size of the chunk that we are adding.  We can dereference OLD
-	   only if we have the lock, otherwise it might have already been
-	   deallocated.  See use of OLD_IDX below for the actual check.  */
-	if (have_lock && old != NULL)
-	  old_idx = fastbin_index(chunksize(old));
-	p->fd = old2 = old;
-      }
-    while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
+      /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
+      mchunkptr old = *fb, old2;
+      unsigned int old_idx = ~0u;
+      do
+	{
+	  /* Check that the top of the bin is not the record we are going to add
+	     (i.e., double free).  */
+	  if (__builtin_expect (old == p, 0))
+	    {
+	      errstr = "double free or corruption (fasttop)";
+	      goto errout;
+	    }
+	  /* Check that size of fastbin chunk at the top is the same as
+	     size of the chunk that we are adding.  We can dereference OLD
+	     only if we have the lock, otherwise it might have already been
+	     deallocated.  See use of OLD_IDX below for the actual check.  */
+	  if (have_lock && old != NULL)
+	    old_idx = fastbin_index (chunksize (old));
+	  p->fd = old2 = old;
+	}
+      while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
 
-    if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
-      {
-	errstr = "invalid fastbin entry (free)";
-	goto errout;
-      }
-  }
+      if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
+	{
+	  errstr = "invalid fastbin entry (free)";
+	  goto errout;
+	}
+    }
 
   /*
-    Consolidate other non-mmapped chunks as they arrive.
-  */
+     Consolidate other non-mmapped chunks as they arrive.
+   */
 
-  else if (!chunk_is_mmapped(p)) {
-    if (! have_lock) {
+  else if (!chunk_is_mmapped (p))
+    {
+      if (!have_lock)
+	{
 #if THREAD_STATS
-      if(!mutex_trylock(&av->mutex))
-	++(av->stat_lock_direct);
-      else {
-	(void)mutex_lock(&av->mutex);
-	++(av->stat_lock_wait);
-      }
+	  if (!mutex_trylock (&av->mutex))
+	    ++(av->stat_lock_direct);
+	  else
+	    {
+	      (void) mutex_lock (&av->mutex);
+	      ++(av->stat_lock_wait);
+	    }
 #else
-      (void)mutex_lock(&av->mutex);
+	  (void) mutex_lock (&av->mutex);
 #endif
-      locked = 1;
-    }
-
-    nextchunk = chunk_at_offset(p, size);
-
-    /* Lightweight tests: check whether the block is already the
-       top block.  */
-    if (__builtin_expect (p == av->top, 0))
-      {
-	errstr = "double free or corruption (top)";
-	goto errout;
-      }
-    /* Or whether the next chunk is beyond the boundaries of the arena.  */
-    if (__builtin_expect (contiguous (av)
-			  && (char *) nextchunk
-			  >= ((char *) av->top + chunksize(av->top)), 0))
-      {
-	errstr = "double free or corruption (out)";
-	goto errout;
-      }
-    /* Or whether the block is actually not marked used.  */
-    if (__builtin_expect (!prev_inuse(nextchunk), 0))
-      {
-	errstr = "double free or corruption (!prev)";
-	goto errout;
-      }
-
-    nextsize = chunksize(nextchunk);
-    if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
-	|| __builtin_expect (nextsize >= av->system_mem, 0))
-      {
-	errstr = "free(): invalid next size (normal)";
-	goto errout;
-      }
-
-    free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
-
-    /* consolidate backward */
-    if (!prev_inuse(p)) {
-      prevsize = p->prev_size;
-      size += prevsize;
-      p = chunk_at_offset(p, -((long) prevsize));
-      unlink(p, bck, fwd);
-    }
+	  locked = 1;
+	}
 
-    if (nextchunk != av->top) {
-      /* get and clear inuse bit */
-      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+      nextchunk = chunk_at_offset (p, size);
 
-      /* consolidate forward */
-      if (!nextinuse) {
-	unlink(nextchunk, bck, fwd);
-	size += nextsize;
-      } else
-	clear_inuse_bit_at_offset(nextchunk, 0);
+      /* Lightweight tests: check whether the block is already the
+         top block.  */
+      if (__builtin_expect (p == av->top, 0))
+	{
+	  errstr = "double free or corruption (top)";
+	  goto errout;
+	}
+      /* Or whether the next chunk is beyond the boundaries of the arena.  */
+      if (__builtin_expect (contiguous (av)
+			    && (char *) nextchunk
+			    >= ((char *) av->top + chunksize (av->top)), 0))
+	{
+	  errstr = "double free or corruption (out)";
+	  goto errout;
+	}
+      /* Or whether the block is actually not marked used.  */
+      if (__builtin_expect (!prev_inuse (nextchunk), 0))
+	{
+	  errstr = "double free or corruption (!prev)";
+	  goto errout;
+	}
 
-      /*
-	Place the chunk in unsorted chunk list. Chunks are
-	not placed into regular bins until after they have
-	been given one chance to be used in malloc.
-      */
-
-      bck = unsorted_chunks(av);
-      fwd = bck->fd;
-      if (__builtin_expect (fwd->bk != bck, 0))
+      nextsize = chunksize (nextchunk);
+      if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
+	  || __builtin_expect (nextsize >= av->system_mem, 0))
 	{
-	  errstr = "free(): corrupted unsorted chunks";
+	  errstr = "free(): invalid next size (normal)";
 	  goto errout;
 	}
-      p->fd = fwd;
-      p->bk = bck;
-      if (!in_smallbin_range(size))
+
+      free_perturb (chunk2mem (p), size - 2 * SIZE_SZ);
+
+      /* consolidate backward */
+      if (!prev_inuse (p))
 	{
-	  p->fd_nextsize = NULL;
-	  p->bk_nextsize = NULL;
+	  prevsize = p->prev_size;
+	  size += prevsize;
+	  p = chunk_at_offset (p, -((long) prevsize));
+	  unlink (p, bck, fwd);
 	}
-      bck->fd = p;
-      fwd->bk = p;
 
-      set_head(p, size | PREV_INUSE);
-      set_foot(p, size);
+      if (nextchunk != av->top)
+	{
+	  /* get and clear inuse bit */
+	  nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
 
-      check_free_chunk(av, p);
-    }
+	  /* consolidate forward */
+	  if (!nextinuse)
+	    {
+	      unlink (nextchunk, bck, fwd);
+	      size += nextsize;
+	    }
+	  else
+	    clear_inuse_bit_at_offset (nextchunk, 0);
+
+	  /*
+	     Place the chunk in unsorted chunk list. Chunks are
+	     not placed into regular bins until after they have
+	     been given one chance to be used in malloc.
+	   */
+
+	  bck = unsorted_chunks (av);
+	  fwd = bck->fd;
+	  if (__builtin_expect (fwd->bk != bck, 0))
+	    {
+	      errstr = "free(): corrupted unsorted chunks";
+	      goto errout;
+	    }
+	  p->fd = fwd;
+	  p->bk = bck;
+	  if (!in_smallbin_range (size))
+	    {
+	      p->fd_nextsize = NULL;
+	      p->bk_nextsize = NULL;
+	    }
+	  bck->fd = p;
+	  fwd->bk = p;
 
-    /*
-      If the chunk borders the current high end of memory,
-      consolidate into top
-    */
+	  set_head (p, size | PREV_INUSE);
+	  set_foot (p, size);
 
-    else {
-      size += nextsize;
-      set_head(p, size | PREV_INUSE);
-      av->top = p;
-      check_chunk(av, p);
-    }
+	  check_free_chunk (av, p);
+	}
 
-    /*
-      If freeing a large space, consolidate possibly-surrounding
-      chunks. Then, if the total unused topmost memory exceeds trim
-      threshold, ask malloc_trim to reduce top.
+      /*
+         If the chunk borders the current high end of memory,
+         consolidate into top
+       */
 
-      Unless max_fast is 0, we don't know if there are fastbins
-      bordering top, so we cannot tell for sure whether threshold
-      has been reached unless fastbins are consolidated.  But we
-      don't want to consolidate on each free.  As a compromise,
-      consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
-      is reached.
-    */
+      else
+	{
+	  size += nextsize;
+	  set_head (p, size | PREV_INUSE);
+	  av->top = p;
+	  check_chunk (av, p);
+	}
+
+      /*
+         If freeing a large space, consolidate possibly-surrounding
+         chunks. Then, if the total unused topmost memory exceeds trim
+         threshold, ask malloc_trim to reduce top.
+
+         Unless max_fast is 0, we don't know if there are fastbins
+         bordering top, so we cannot tell for sure whether threshold
+         has been reached unless fastbins are consolidated.  But we
+         don't want to consolidate on each free.  As a compromise,
+         consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
+         is reached.
+       */
 
-    if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
-      if (have_fastchunks(av))
-	malloc_consolidate(av);
+      if ((unsigned long) (size) >= FASTBIN_CONSOLIDATION_THRESHOLD)
+	{
+	  if (have_fastchunks (av))
+	    malloc_consolidate (av);
 
-      if (av == &main_arena) {
+	  if (av == &main_arena)
+	    {
 #ifndef MORECORE_CANNOT_TRIM
-	if ((unsigned long)(chunksize(av->top)) >=
-	    (unsigned long)(mp_.trim_threshold))
-	  systrim(mp_.top_pad, av);
+	      if ((unsigned long) (chunksize (av->top)) >=
+		  (unsigned long) (mp_.trim_threshold))
+		systrim (mp_.top_pad, av);
 #endif
-      } else {
-	/* Always try heap_trim(), even if the top chunk is not
-	   large, because the corresponding heap might go away.  */
-	heap_info *heap = heap_for_ptr(top(av));
-
-	assert(heap->ar_ptr == av);
-	heap_trim(heap, mp_.top_pad);
-      }
-    }
+	    }
+	  else
+	    {
+	      /* Always try heap_trim(), even if the top chunk is not
+	         large, because the corresponding heap might go away.  */
+	      heap_info *heap = heap_for_ptr (top (av));
+
+	      assert (heap->ar_ptr == av);
+	      heap_trim (heap, mp_.top_pad);
+	    }
+	}
 
-    if (! have_lock) {
-      assert (locked);
-      (void)mutex_unlock(&av->mutex);
+      if (!have_lock)
+	{
+	  assert (locked);
+	  (void) mutex_unlock (&av->mutex);
+	}
     }
-  }
   /*
-    If the chunk was allocated via mmap, release via munmap().
-  */
+     If the chunk was allocated via mmap, release via munmap().
+   */
 
-  else {
-    munmap_chunk (p);
-  }
+  else
+    {
+      munmap_chunk (p);
+    }
 }
 
 /*
-  ------------------------- malloc_consolidate -------------------------
+   ------------------------- malloc_consolidate -------------------------
 
-  malloc_consolidate is a specialized version of free() that tears
-  down chunks held in fastbins.  Free itself cannot be used for this
-  purpose since, among other things, it might place chunks back onto
-  fastbins.  So, instead, we need to use a minor variant of the same
-  code.
+   malloc_consolidate is a specialized version of free() that tears
+   down chunks held in fastbins.  Free itself cannot be used for this
+   purpose since, among other things, it might place chunks back onto
+   fastbins.  So, instead, we need to use a minor variant of the same
+   code.
 
-  Also, because this routine needs to be called the first time through
-  malloc anyway, it turns out to be the perfect place to trigger
-  initialization code.
-*/
+   Also, because this routine needs to be called the first time through
+   malloc anyway, it turns out to be the perfect place to trigger
+   initialization code.
+ */
 
-static void malloc_consolidate(mstate av)
+static void
+malloc_consolidate (mstate av)
 {
-  mfastbinptr*    fb;                 /* current fastbin being consolidated */
-  mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
-  mchunkptr       p;                  /* current chunk being consolidated */
-  mchunkptr       nextp;              /* next chunk to consolidate */
-  mchunkptr       unsorted_bin;       /* bin header */
-  mchunkptr       first_unsorted;     /* chunk to link to */
+  mfastbinptr *fb;               /* current fastbin being consolidated */
+  mfastbinptr *maxfb;                 /* last fastbin (for loop control) */
+  mchunkptr p;                    /* current chunk being consolidated */
+  mchunkptr nextp;                    /* next chunk to consolidate */
+  mchunkptr unsorted_bin;             /* bin header */
+  mchunkptr first_unsorted;           /* chunk to link to */
 
   /* These have same use as in free() */
-  mchunkptr       nextchunk;
+  mchunkptr nextchunk;
   INTERNAL_SIZE_T size;
   INTERNAL_SIZE_T nextsize;
   INTERNAL_SIZE_T prevsize;
-  int             nextinuse;
-  mchunkptr       bck;
-  mchunkptr       fwd;
+  int nextinuse;
+  mchunkptr bck;
+  mchunkptr fwd;
 
   /*
-    If max_fast is 0, we know that av hasn't
-    yet been initialized, in which case do so below
-  */
-
-  if (get_max_fast () != 0) {
-    clear_fastchunks(av);
-
-    unsorted_bin = unsorted_chunks(av);
-
-    /*
-      Remove each chunk from fast bin and consolidate it, placing it
-      then in unsorted bin. Among other reasons for doing this,
-      placing in unsorted bin avoids needing to calculate actual bins
-      until malloc is sure that chunks aren't immediately going to be
-      reused anyway.
-    */
-
-    maxfb = &fastbin (av, NFASTBINS - 1);
-    fb = &fastbin (av, 0);
-    do {
-      p = atomic_exchange_acq (fb, 0);
-      if (p != 0) {
-	do {
-	  check_inuse_chunk(av, p);
-	  nextp = p->fd;
-
-	  /* Slightly streamlined version of consolidation code in free() */
-	  size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
-	  nextchunk = chunk_at_offset(p, size);
-	  nextsize = chunksize(nextchunk);
-
-	  if (!prev_inuse(p)) {
-	    prevsize = p->prev_size;
-	    size += prevsize;
-	    p = chunk_at_offset(p, -((long) prevsize));
-	    unlink(p, bck, fwd);
-	  }
-
-	  if (nextchunk != av->top) {
-	    nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
-
-	    if (!nextinuse) {
-	      size += nextsize;
-	      unlink(nextchunk, bck, fwd);
-	    } else
-	      clear_inuse_bit_at_offset(nextchunk, 0);
-
-	    first_unsorted = unsorted_bin->fd;
-	    unsorted_bin->fd = p;
-	    first_unsorted->bk = p;
-
-	    if (!in_smallbin_range (size)) {
-	      p->fd_nextsize = NULL;
-	      p->bk_nextsize = NULL;
-	    }
+     If max_fast is 0, we know that av hasn't
+     yet been initialized, in which case do so below
+   */
 
-	    set_head(p, size | PREV_INUSE);
-	    p->bk = unsorted_bin;
-	    p->fd = first_unsorted;
-	    set_foot(p, size);
-	  }
+  if (get_max_fast () != 0)
+    {
+      clear_fastchunks (av);
 
-	  else {
-	    size += nextsize;
-	    set_head(p, size | PREV_INUSE);
-	    av->top = p;
-	  }
+      unsorted_bin = unsorted_chunks (av);
 
-	} while ( (p = nextp) != 0);
+      /*
+         Remove each chunk from fast bin and consolidate it, placing it
+         then in unsorted bin. Among other reasons for doing this,
+         placing in unsorted bin avoids needing to calculate actual bins
+         until malloc is sure that chunks aren't immediately going to be
+         reused anyway.
+       */
 
-      }
-    } while (fb++ != maxfb);
-  }
-  else {
-    malloc_init_state(av);
-    check_malloc_state(av);
-  }
+      maxfb = &fastbin (av, NFASTBINS - 1);
+      fb = &fastbin (av, 0);
+      do
+	{
+	  p = atomic_exchange_acq (fb, 0);
+	  if (p != 0)
+	    {
+	      do
+		{
+		  check_inuse_chunk (av, p);
+		  nextp = p->fd;
+
+		  /* Slightly streamlined version of consolidation code in free() */
+		  size = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
+		  nextchunk = chunk_at_offset (p, size);
+		  nextsize = chunksize (nextchunk);
+
+		  if (!prev_inuse (p))
+		    {
+		      prevsize = p->prev_size;
+		      size += prevsize;
+		      p = chunk_at_offset (p, -((long) prevsize));
+		      unlink (p, bck, fwd);
+		    }
+
+		  if (nextchunk != av->top)
+		    {
+		      nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
+
+		      if (!nextinuse)
+			{
+			  size += nextsize;
+			  unlink (nextchunk, bck, fwd);
+			}
+		      else
+			clear_inuse_bit_at_offset (nextchunk, 0);
+
+		      first_unsorted = unsorted_bin->fd;
+		      unsorted_bin->fd = p;
+		      first_unsorted->bk = p;
+
+		      if (!in_smallbin_range (size))
+			{
+			  p->fd_nextsize = NULL;
+			  p->bk_nextsize = NULL;
+			}
+
+		      set_head (p, size | PREV_INUSE);
+		      p->bk = unsorted_bin;
+		      p->fd = first_unsorted;
+		      set_foot (p, size);
+		    }
+
+		  else
+		    {
+		      size += nextsize;
+		      set_head (p, size | PREV_INUSE);
+		      av->top = p;
+		    }
+		}
+	      while ((p = nextp) != 0);
+	    }
+	}
+      while (fb++ != maxfb);
+    }
+  else
+    {
+      malloc_init_state (av);
+      check_malloc_state (av);
+    }
 }
 
 /*
-  ------------------------------ realloc ------------------------------
-*/
+   ------------------------------ realloc ------------------------------
+ */
 
-void*
-_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
-	     INTERNAL_SIZE_T nb)
+void *
+_int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
+	      INTERNAL_SIZE_T nb)
 {
-  mchunkptr        newp;            /* chunk to return */
-  INTERNAL_SIZE_T  newsize;         /* its size */
-  void*          newmem;          /* corresponding user mem */
+  mchunkptr newp;           /* chunk to return */
+  INTERNAL_SIZE_T newsize;       /* its size */
+  void *newmem;           /* corresponding user mem */
 
-  mchunkptr        next;            /* next contiguous chunk after oldp */
+  mchunkptr next;           /* next contiguous chunk after oldp */
 
-  mchunkptr        remainder;       /* extra space at end of newp */
-  unsigned long    remainder_size;  /* its size */
+  mchunkptr remainder;           /* extra space at end of newp */
+  unsigned long remainder_size;     /* its size */
 
-  mchunkptr        bck;             /* misc temp for linking */
-  mchunkptr        fwd;             /* misc temp for linking */
+  mchunkptr bck;             /* misc temp for linking */
+  mchunkptr fwd;             /* misc temp for linking */
 
-  unsigned long    copysize;        /* bytes to copy */
-  unsigned int     ncopies;         /* INTERNAL_SIZE_T words to copy */
-  INTERNAL_SIZE_T* s;               /* copy source */
-  INTERNAL_SIZE_T* d;               /* copy destination */
+  unsigned long copysize;       /* bytes to copy */
+  unsigned int ncopies;          /* INTERNAL_SIZE_T words to copy */
+  INTERNAL_SIZE_T *s;          /* copy source */
+  INTERNAL_SIZE_T *d;          /* copy destination */
 
   const char *errstr = NULL;
 


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]