This is the mail archive of the binutils-cvs@sourceware.org mailing list for the binutils project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[binutils-gdb/binutils-2_25-branch] Rewrite relro adjusting code


https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=c949cbca06f933420f53f3dcc755e42772fb3071

commit c949cbca06f933420f53f3dcc755e42772fb3071
Author: Alan Modra <amodra@gmail.com>
Date:   Wed Apr 22 22:46:19 2015 +0930

    Rewrite relro adjusting code
    
    The linker tries to put the end of the last section in the relro
    segment exactly on a page boundary, because the relro segment itself
    must end on a page boundary.  If for any reason this can't be done,
    padding is inserted.  Since the end of the relro segment is typically
    between .got and .got.plt, padding effectively increases the size of
    the GOT.  This isn't nice for targets and code models with limited GOT
    addressing.
    
    The problem with the current code is that it doesn't cope very well
    with aligned sections in the relro segment.  When making .got aligned
    to a 256 byte boundary for PowerPC64, I found that often the initial
    alignment attempt failed and the fallback attempt to be less than
    adequate.  This is a particular problem for PowerPC64 since the
    distance between .got and .plt affects the size of plt call stubs,
    leading to "stubs don't match calculated size" errors.
    
    So this rewrite takes a direct approach to calculating a new relro
    base.  Starting from the last section in the segment, we calculate
    where it must start to position its end on the boundary, or as near as
    possible considering alignment requirements.  The new start then
    becomes the goal for the previous section to end, and so on for all
    sections.  This of course ignores the possibility that user scripts
    will place . = ALIGN(xxx); in the relro segment, or provide section
    address expressions.  In those cases we might fail, but the old code
    probably did too, and a fallback is provided.
    
    ld/
    	* ldexp.h (struct ldexp_control): Delete dataseg.min_base.  Add
    	data_seg.relro_offset.
    	* ldexp.c (fold_binary <DATA_SEGMENT_ALIGN>): Don't set min_base.
    	(fold_binary <DATA_SEGMENT_RELRO_END>): Do set relro_offset.
    	* ldlang.c (lang_size_sections): Rewrite code adjusting relro
    	segment base to line up last section on page boundary.

Diff:
---
 ld/ChangeLog | 11 +++++++++
 ld/ldexp.c   |  2 +-
 ld/ldexp.h   |  2 +-
 ld/ldlang.c  | 76 ++++++++++++++++++++++++++++--------------------------------
 4 files changed, 49 insertions(+), 42 deletions(-)

diff --git a/ld/ChangeLog b/ld/ChangeLog
index 1f34f5e..a977fea 100644
--- a/ld/ChangeLog
+++ b/ld/ChangeLog
@@ -1,3 +1,14 @@
+2015-07-10  Alan Modra  <amodra@gmail.com>
+
+	Apply from master.
+	2015-04-22  Alan Modra  <amodra@gmail.com>
+	* ldexp.h (struct ldexp_control): Delete dataseg.min_base.  Add
+	data_seg.relro_offset.
+	* ldexp.c (fold_binary <DATA_SEGMENT_ALIGN>): Don't set min_base.
+	(fold_binary <DATA_SEGMENT_RELRO_END>): Do set relro_offset.
+	* ldlang.c (lang_size_sections): Rewrite code adjusting relro
+	segment base to line up last section on page boundary.
+
 2015-02-11  Alan Modra  <amodra@gmail.com>
 
 	Apply from master.
diff --git a/ld/ldexp.c b/ld/ldexp.c
index b4af893..a8e6d5f 100644
--- a/ld/ldexp.c
+++ b/ld/ldexp.c
@@ -565,7 +565,6 @@ fold_binary (etree_type *tree)
 		  else if (expld.dataseg.phase == exp_dataseg_none)
 		    {
 		      expld.dataseg.phase = exp_dataseg_align_seen;
-		      expld.dataseg.min_base = expld.dot;
 		      expld.dataseg.base = expld.result.value;
 		      expld.dataseg.pagesize = commonpage;
 		      expld.dataseg.maxpagesize = maxpage;
@@ -579,6 +578,7 @@ fold_binary (etree_type *tree)
 
 	case DATA_SEGMENT_RELRO_END:
 	  expld.dataseg.relro = exp_dataseg_relro_end;
+	  expld.dataseg.relro_offset = expld.result.value;
 	  if (expld.phase == lang_first_phase_enum
 	      || expld.section != bfd_abs_section_ptr)
 	    expld.result.valid_p = FALSE;
diff --git a/ld/ldexp.h b/ld/ldexp.h
index fa0dada..3e9f179 100644
--- a/ld/ldexp.h
+++ b/ld/ldexp.h
@@ -156,7 +156,7 @@ struct ldexp_control {
   struct {
     enum phase_enum phase;
 
-    bfd_vma base, min_base, relro_end, end, pagesize, maxpagesize;
+    bfd_vma base, relro_offset, relro_end, end, pagesize, maxpagesize;
 
     enum relro_enum relro;
 
diff --git a/ld/ldlang.c b/ld/ldlang.c
index 2433acf..733b6cb 100644
--- a/ld/ldlang.c
+++ b/ld/ldlang.c
@@ -5379,55 +5379,51 @@ lang_size_sections (bfd_boolean *relax, bfd_boolean check_regions)
   if (expld.dataseg.phase == exp_dataseg_end_seen
       && link_info.relro && expld.dataseg.relro_end)
     {
-      /* If DATA_SEGMENT_ALIGN DATA_SEGMENT_RELRO_END pair was seen, try
-	 to put expld.dataseg.relro_end on a (common) page boundary.  */
-      bfd_vma min_base, relro_end, maxpage;
+      bfd_vma initial_base, relro_end, desired_end;
+      asection *sec;
 
-      expld.dataseg.phase = exp_dataseg_relro_adjust;
-      maxpage = expld.dataseg.maxpagesize;
-      /* MIN_BASE is the absolute minimum address we are allowed to start the
-	 read-write segment (byte before will be mapped read-only).  */
-      min_base = (expld.dataseg.min_base + maxpage - 1) & ~(maxpage - 1);
-      expld.dataseg.base += (-expld.dataseg.relro_end
-			     & (expld.dataseg.pagesize - 1));
       /* Compute the expected PT_GNU_RELRO segment end.  */
       relro_end = ((expld.dataseg.relro_end + expld.dataseg.pagesize - 1)
 		   & ~(expld.dataseg.pagesize - 1));
-      if (min_base + maxpage < expld.dataseg.base)
-	{
-	  expld.dataseg.base -= maxpage;
-	  relro_end -= maxpage;
-	}
+
+      /* Adjust by the offset arg of DATA_SEGMENT_RELRO_END.  */
+      desired_end = relro_end - expld.dataseg.relro_offset;
+
+      /* For sections in the relro segment..  */
+      for (sec = link_info.output_bfd->section_last; sec; sec = sec->prev)
+	if (!IGNORE_SECTION (sec)
+	    && sec->vma >= expld.dataseg.base
+	    && sec->vma < expld.dataseg.relro_end - expld.dataseg.relro_offset)
+	  {
+	    /* Where do we want to put this section so that it ends as
+	       desired?  */
+	    bfd_vma start = sec->vma;
+	    bfd_vma end = start + sec->size;
+	    bfd_vma bump = desired_end - end;
+	    /* We'd like to increase START by BUMP, but we must heed
+	       alignment so the increase might be less than optimum.  */
+	    start += bump & ~(((bfd_vma) 1 << sec->alignment_power) - 1);
+	    /* This is now the desired end for the previous section.  */
+	    desired_end = start;
+	  }
+
+      expld.dataseg.phase = exp_dataseg_relro_adjust;
+      ASSERT (desired_end >= expld.dataseg.base);
+      initial_base = expld.dataseg.base;
+      expld.dataseg.base = desired_end;
       lang_reset_memory_regions ();
       one_lang_size_sections_pass (relax, check_regions);
+
       if (expld.dataseg.relro_end > relro_end)
 	{
-	  /* The alignment of sections between DATA_SEGMENT_ALIGN
-	     and DATA_SEGMENT_RELRO_END can cause excessive padding to
-	     be inserted at DATA_SEGMENT_RELRO_END.  Try to start a
-	     bit lower so that the section alignments will fit in.  */
-	  asection *sec;
-	  unsigned int max_alignment_power = 0;
-
-	  /* Find maximum alignment power of sections between
-	     DATA_SEGMENT_ALIGN and DATA_SEGMENT_RELRO_END.  */
-	  for (sec = link_info.output_bfd->sections; sec; sec = sec->next)
-	    if (sec->vma >= expld.dataseg.base
-		&& sec->vma < expld.dataseg.relro_end
-		&& sec->alignment_power > max_alignment_power)
-	      max_alignment_power = sec->alignment_power;
-
-	  if (((bfd_vma) 1 << max_alignment_power) < expld.dataseg.pagesize)
-	    {
-	      /* Aligning the adjusted base guarantees the padding
-		 between sections won't change.  This is better than
-		 simply subtracting 1 << max_alignment_power which is
-		 what we used to do here.  */
-	      expld.dataseg.base &= ~((1 << max_alignment_power) - 1);
-	      lang_reset_memory_regions ();
-	      one_lang_size_sections_pass (relax, check_regions);
-	    }
+	  /* Assignments to dot, or to output section address in a
+	     user script have increased padding over the original.
+	     Revert.  */
+	  expld.dataseg.base = initial_base;
+	  lang_reset_memory_regions ();
+	  one_lang_size_sections_pass (relax, check_regions);
 	}
+
       link_info.relro_start = expld.dataseg.base;
       link_info.relro_end = expld.dataseg.relro_end;
     }


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]