This is the mail archive of the gdb-patches@sourceware.org mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns


On 05/18/2011 01:14 AM, Ulrich Weigand wrote:
> Yao Qi wrote:
> 

>> +static int
>> +thumb2_copy_preload_reg (struct gdbarch *gdbarch, uint16_t insn1,
>> +			 uint16_t insn2, struct regcache *regs,
>> +			 struct displaced_step_closure *dsc)
>> +{
>> +  unsigned int rn = bits (insn1, 0, 3);
>> +  unsigned int rm = bits (insn2, 0, 3);
>> +
>> +  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
>> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload reg",
>> +					dsc);
>> +
>> +  if (debug_displaced)
>> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
>> +			insn1, insn1);
>> +
>> +  dsc->modinsn[0] = insn1 & 0xfff0;
>> +  dsc->modinsn[1] = (insn2 & 0xfff0) | 0x1;
>> +  dsc->numinsns = 2;
>> +
>> +  install_preload_reg (gdbarch, regs, dsc, rn, rm);
>> +  return 0;
>> +}
> 
> Handling of preload instructions seems wrong for a couple of reasons:
> 
> - In Thumb mode, PLD/PLI with register offset must not use PC as offset
>   register, so those can just be copied unmodified.  The only instructions
>   to be treated specially are the "literal" variants, which do encode
>   PC-relative offsets.
> 
>   This means a separate thumb2_copy_preload_reg shouldn't be needed.
> 

Right.  thumb2_copy_preload_reg is removed.

> - However, you cannot just transform a PLD/PLI "literal" (i.e. PC + immediate)
>   into an "immediate" (i.e. register + immediate) version, since in Thumb
>   mode the "literal" version supports a 12-bit immediate, while the immediate
>   version only supports an 8-bit immediate.
> 
>   I guess you could either add the immediate to the PC during preparation
>   stage and then use an "immediate" instruction with immediate zero, or
>   else load the immediate into a second register and use a "register"
>   version of the instruction.
> 

The former may not be correct.  PC should be set at the address of `copy
area' in displaced stepping, instead of any other arbitrary values.  The
alternative to the former approach is to compute the new immediate value
according to the new PC value we will set (new PC value is
dsc->scratch_base).  However, in this way, we have to worry about the
overflow of new computed 12-bit immediate.

The latter one sounds better, because we don't have to worry about
overflow problem, and cleanup_preload can be still used as cleanup
routine in this case.

> 
>> +static int
>> +thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
>> +			      uint16_t insn2, struct regcache *regs,
>> +			      struct displaced_step_closure *dsc)
>> +{
>> +  unsigned int rn = bits (insn1, 0, 3);
>> +
>> +  if (rn == ARM_PC_REGNUM)
>> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +					"copro load/store", dsc);
>> +
>> +  if (debug_displaced)
>> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
>> +			"load/store insn %.4x%.4x\n", insn1, insn2);
>> +
>> +  dsc->modinsn[0] = insn1 & 0xfff0;
>> +  dsc->modinsn[1] = insn2;
>> +  dsc->numinsns = 2;
> 
> This doesn't look right: you're replacing the RN register if it is anything
> *but* 15 -- but those cases do not need to be replaced!
> 

Oh, sorry, it is a logic error.  The code should be like

if (rn != ARM_PC_REGNUM)
  return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "copro
load/store", dsc);

>> +static int
>> +thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
>> +		      uint16_t insn2, struct regcache *regs,
>> +		      struct displaced_step_closure *dsc)
>> +
>> +  if (!link && !exchange) /* B */
>> +    {
>> +      cond = bits (insn1, 6, 9);
> 
> Only encoding T3 has condition bits, not T4.
> 

Oh, right.  Fixed.

>> +
>> +  dsc->modinsn[0] = THUMB_NOP;
>> +
>> +  install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, 1, offset);
> 
> Why do you always pass 1 for link?  Shouldn't "link" be passed?
> 

"link" should be passed.  Fixed.

>> +static int
>> +thumb2_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn1,
>> +		     uint16_t insn2, struct regcache *regs,
>> +		     struct displaced_step_closure *dsc)
>> +{
>> +  unsigned int op2 = bits (insn2, 4, 7);
>> +  int is_mov = (op2 == 0x0);
>> +  unsigned int rn, rm, rd;
>> +
>> +  rn = bits (insn1, 0, 3); /* Rn */
>> +  rm = bits (insn2, 0, 3); /* Rm */
>> +  rd = bits (insn2, 8, 11); /* Rd */
>> +
>> +  /* In Thumb-2, rn, rm and rd can't be r15.  */
> This isn't quite true ... otherwise we wouldn't need the routine at all.

This line of comment is out of date.  Remove it.

>> +  if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM
>> +      && rd != ARM_PC_REGNUM)
>> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU reg", dsc);
>> +
>> +  if (debug_displaced)
>> +    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
>> +			"ALU", insn1, insn2);
>> +
>> +  if (is_mov)
>> +    dsc->modinsn[0] = insn1;
>> +  else
>> +    dsc->modinsn[0] = ((insn1 & 0xfff0) | 0x1);
>> +
>> +  dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x2);
>> +  dsc->numinsns = 2;
> 
> This doesn't look right.  It looks like this function is called for all
> instructions in tables A6-22 through A6-26; those encodings differ
> significantly in how their fields are used.  Some of them have the
> Rn, Rm, Rd fields as above, but others just have some of them.  For
> some, a register field content of 15 does indeed refer to the PC and
> needs to be replaced; for others a register field content of 15 means
> instead that a different operation is to be performed (e.g. ADD vs TST,
> EOR vs TEQ ...) and so it must *not* be replaced; and for yet others,
> a register field content of 15 is unpredictable.
> 
> In fact, I think only a very small number of instructions in this
> category actually may refer to the PC (only MOV?), so there needs
> to the be more instruction decoding to actually identify those.
> 

thumb2_copy_alu_reg is called in for two groups of instructions,
1.  A6.3.11 Data-processing (shifted register)
2.  A6.3.12 Data-processing (register)

PC is not used in group #2.  Even in group #1, PC is only used in MOV.
This routine thumb2_copy_alu_reg is deleted, and
thumb2_decode_dp_shift_reg is added to decode group #2.

>>  static int
>> +thumb2_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint16_t insn1,
>> +			       uint16_t insn2,  struct regcache *regs,
>> +			       struct displaced_step_closure *dsc,
>> +			       int load, int byte, int usermode, int writeback)
> 
> Hmmm ... this function is called for *halfwords* as well, not just for
> bytes and words.  This means the "byte" operand is no longer sufficient
> to uniquely determine the size -- note that when calling down to the
> install_ routine, xfersize is always set to 1 or 4.
> 

I thought "halfword" can be treated as "word" in this case, so I didn't
distinguish them.  I rename "thumb2_copy_ldr_str_ldrb_strb" to
"thumb2_copy_load_store", and change parameter BYTE to SIZE.  install_
routine and arm_ routine is updated as well.

>> +{
>> +  int immed = !bit (insn1, 9);
>> +  unsigned int rt = bits (insn2, 12, 15);
>> +  unsigned int rn = bits (insn1, 0, 3);
>> +  unsigned int rm = bits (insn2, 0, 3);  /* Only valid if !immed.  */
>> +
>> +  if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
> rm shouldn't be checked if immed is true

Fixed.

>> +    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load/store",
>> +					dsc);


>> +/* Decode extension register load/store.  Exactly the same as
>> +   arm_decode_ext_reg_ld_st.  */
>> +
>> +static int
>> +thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
>> +			     uint16_t insn2,  struct regcache *regs,
>> +			     struct displaced_step_closure *dsc)
>> +{
>> +
>> +    case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
>> +    case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
>> +      return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
> 
> See the comment at thumb2_copy_copro_load_store: since that function will
> always copy the instruction unmodified, so can this function.
> 
> 

As we discussed VLDR may still use PC, so call
thumb_copy_unmodified_32bit for VSTR in my new patch.

>> +static int
>> +thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
>> +			 uint16_t insn2, struct regcache *regs,
>> +			 struct displaced_step_closure *dsc)
>> +{
[...]
> 
> See above ... I don't think any of those instructions can ever use the PC
> in Thumb mode, so this can be simplified.
> 

It is simplified to some extent in new patch.

>> +	}
>> +    }
>> +  else
>> +    {
>> +      unsigned int op = bit (insn2, 4);
>> +      unsigned int bit_8 = bit (insn1, 8);
>> +
>> +      if (bit_8) /* Advanced SIMD */
>> +	return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +					    "neon", dsc);
>> +      else
>> +	{
>> +	  /*coproc is 101x.  */
>> +	  if ((coproc & 0xe) == 0xa)
>> +	    {
>> +	      if (op) /* 8,16,32-bit xfer.  */
>> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +						    "neon 8/16/32 bit xfer",
>> +						    dsc);
>> +	      else /* VFP data processing.  */
>> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +						    "vfp dataproc", dsc);
>> +	    }
>> +	  else
>> +	    {
>> +	      if (op)
>> +		{
>> +		  if (bit_4) /* MRC/MRC2 */
>> +		    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +							"mrc/mrc2", dsc);
>> +		  else /* MCR/MCR2 */
>> +		     return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +							"mcr/mcr2", dsc);
>> +		}
>> +	      else /* CDP/CDP 2 */
>> +		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +						    "cdp/cdp2", dsc);
>> +	    }
> 
> Likewise I'm not sure there is any need to decode to such depth, if the
> instruction in the end all can be copied unmodified.

OK.  Patch length can be reduced then.

>>  static int
>> +thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, struct regcache *regs,
>> +			      struct displaced_step_closure *dsc,
>> +			      int rd, unsigned int imm)
>> +{
>> +  /* Encoding T3: ADDS Rd, Rd, #imm */
> Why do you refer to ADDS?  The instruction you generate is ADD (with no S bit),
> which is actually correct -- so it seems just the comment is wrong.

It is a mistake in comment.  ADR doesn't update flags, we don't have S
bit in ADD.

>> +  dsc->modinsn[0] = (0xf100 | rd);
>> +  dsc->modinsn[1] = (0x0 | (rd << 8) | imm);
>> +
>> +  dsc->numinsns = 2;
>> +
>> +  install_pc_relative (gdbarch, regs, dsc, rd);
>> +
>> +  return 0;
>> +}
>> +
>> +static int
>> +thumb_decode_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
>> +				uint16_t insn2, struct regcache *regs,
>> +				struct displaced_step_closure *dsc)
>> +{
>> +  unsigned int rd = bits (insn2, 8, 11);
>> +  /* Since immeidate has the same encoding in both ADR and ADDS, so we simply
> typo
>> +     extract raw immediate encoding rather than computing immediate.  When
>> +     generating ADDS instruction, we can simply perform OR operation to set
>> +     immediate into ADDS.  */
> See above for ADDS vs. ADD.

s/ADDS/ADD/ in comments.

>> +  unsigned int imm = (insn2 & 0x70ff) | (bit (insn1, 10) << 26);
> 
> The last bit will get lost, since thumb_copy_pc_relative_32bit only or's
> the value to the second 16-bit halfword.

Then, we have separately set the bit 10 (i bit) in dsc->modinsn[0] per
original insn1's i bit.


>> +  if (debug_displaced)
>> +    fprintf_unfiltered (gdb_stdlog,
>> +			"displaced: copying thumb adr r%d, #%d insn %.4x%.4x\n",
>> +			rd, imm, insn1, insn2);
>> +
>> +  return thumb_copy_pc_relative_32bit (gdbarch, regs, dsc, rd, imm);
>> +}
> 
> B.t.w. I think the distinction between a _decode_ and a _copy_ routine is
> pointless in this case since the _decode_ routine is only ever called for
> one single instruction that matches ... it doesn't actually decode anything.
> 

thumb_decode_pc_relative_32bit is merged to thumb_copy_pc_relative_32bit.

> 
>> +static int
>> +decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
>> +				 uint16_t insn1, uint16_t insn2,
>> +				 struct regcache *regs,
>> +				 struct displaced_step_closure *dsc)
>> +{
>> +  int rd = bits (insn2, 12, 15);
>> +  int user_mode = (bits (insn2, 8, 11) == 0xe);
>> +  int err = 0;
>> +  int writeback = 0;
>> +
>> +  switch (bits (insn1, 5, 6))
>> +    {
>> +    case 0: /* Load byte and memory hints */
>> +      if (rd == 0xf) /* PLD/PLI */
>> +	{
>> +	  if (bits (insn2, 6, 11))
> This check doesn't look right to me.

This part is re-written.

>> +	    return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
>> +	  else
>> +	    return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
> 
> In any case, see the comments above on handling preload instructions.  You
> should only need to handle the "literal" variants.
> 

Right.  thumb2_copy_preload_reg is removed, and this part of code is
adjusted as well.

> 
> 
>>  static void
>>  thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
>>  				    uint16_t insn2, struct regcache *regs,
>>  				    struct displaced_step_closure *dsc)
>>  {
>> -  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
>> +  int err = 0;
>> +  unsigned short op = bit (insn2, 15);
>> +  unsigned int op1 = bits (insn1, 11, 12);
>> +
>> +  switch (op1)
>> +    {
>> +    case 1:
>> +      {
>> +	switch (bits (insn1, 9, 10))
>> +	  {
>> +	  case 0: /* load/store multiple */
>> +	    switch (bits (insn1, 7, 8))
>> +	      {
>> +	      case 0: case 3: /* SRS, RFE */
>> +		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
>> +						   "srs/rfe", dsc);
>> +		break;
>> +	      case 1: case 2: /* LDM/STM/PUSH/POP */
>> +		/* These Thumb 32-bit insns have the same encodings as ARM
>> +		   counterparts.  */
> "same encodings" isn't quite true ...

This line of comment is out of date.  Removed.

>> +		err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
>> +	      }
>> +	    break;
> 
> Hmm, it seems this case is missing code to handle the load/store dual,
> load/store exclusive, and table branch instructions (page A6-24 / table A6-17);
> there should be a check whether bit 6 is zero or one somewhere.
> 

routine thumb2_copy_table_branch is added to handle table branch
instructions.  load/store dual and load/store exclusive don't use PC, so
they are copy-unmodified.

>> +	  case 1:
>> +	    /* Data-processing (shift register).  In ARM archtecture reference
>> +	       manual, this entry is
>> +	       "Data-processing (shifted register) on page A6-31".  However,
>> +	    instructions in table A6-31 shows that they are `alu_reg'
>> +	    instructions.  There is no alu_shifted_reg instructions in
>> +	    Thumb-2.  */
> 
> Well ... they are not *register*-shifted register instructions like
> there are in ARM mode (i.e. register shifted by another register),
> but they are still *shifted* register instructions (i.e. register
> shifted by an immediate).
> 

Thanks for the clarification.  Only leave the 1st sentence of comment,
and remove the rest of them.

>> +	    err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs,
>> +					       dsc);
> (see comments at that function ...)

Add a new function thumb2_decode_dp_shift_reg and call it here.

>> +	    break;
>> +	  default: /* Coprocessor instructions */
>> +	    /* Thumb 32bit coprocessor instructions have the same encoding
>> +	       as ARM's.  */
> (see above as to "same encoding" ... also, some ARM coprocessor instruction
> may in fact use the PC, while no Thumb coprocessor instruction can ... so
> there is probably no need to decode them further at this point)

As we discussed, STC/STC/VLDR may still use PC.  Leave it there.

> 
>> +    case 2: /* op1 = 2 */
>> +      if (op) /* Branch and misc control.  */
>> +	{
>> +	  if (bit (insn2, 14)) /* BLX/BL */
>> +	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
>> +	  else if (!bits (insn2, 12, 14) && bits (insn1, 8, 10) != 0x7)
> I don't understand this condition, but it looks wrong to me ...
> 

This condition is about "Conditional Branch".  The 2nd half of condition
should be "bits (insn1, 7, 9) != 0x7", corresponding to the first line
of table A6-13 "op1 = 0x0, op is not x111xxx".

>> +	      else /* Store single data item */
>> +		{
>> +		  int user_mode = (bits (insn2, 8, 11) == 0xe);
>> +		  int byte = (bits (insn1, 5, 7) == 0
>> +			      || bits (insn1, 5, 7) == 4);
>> +		  int writeback = 0;
>> +
>> +		  if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
>> +		    writeback = bit (insn2, 8);
> 
> If things get this complicated, a decode routine might be appropriate.

OK, move these logics into a new function
"decode_thumb_32bit_store_single_data_item".

Note that patch sits on top of this patch,

  [patch] refactor arm-tdep.c:install_ldr_str_ldrb_strb to handle halfword
  http://sourceware.org/ml/gdb-patches/2011-07/msg00183.html

-- 
Yao
         Support displaced stepping for Thumb 32-bit insns.

         * arm-tdep.c (thumb_copy_unmodified_32bit): New.
         (thumb2_copy_preload): New.
         (thumb2_copy_copro_load_store): New.
         (thumb2_copy_b_bl_blx): New.
         (thumb2_copy_alu_imm): New.
         (thumb2_copy_load_store): New.
         (thumb2_copy_block_xfer): New.
         (thumb_32bit_copy_undef): New.
         (thumb_32bit_copy_unpred): New.
         (thumb2_decode_ext_reg_ld_st): New.
         (thumb2_decode_svc_copro): New.
         (decode_thumb_32bit_store_single_data_item): New.
         (thumb_copy_pc_relative_32bit): New.
         (thumb_decode_pc_relative_32bit): New.
         (decode_thumb_32bit_ld_mem_hints): New.
         (thumb2_copy_table_branch): New
         (thumb_process_displaced_32bit_insn): Process Thumb 32-bit
         instructions.
---
 gdb/arm-tdep.c |  840 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 839 insertions(+), 1 deletions(-)

diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index b0074bd..bd92193 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5341,6 +5341,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+			     uint16_t insn2, const char *iname,
+			     struct displaced_step_closure *dsc)
+{
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
+			"opcode/class '%s' unmodified\n", insn1, insn2,
+			iname);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
    modification.  */
 static int
@@ -5408,6 +5425,54 @@ arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+		     struct regcache *regs, struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int u_bit = bit (insn1, 7);
+  int imm12 = bits (insn2, 0, 11);
+  ULONGEST pc_val;
+
+  if (rn != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
+
+  /* PC is only allowed to use in PLI (immeidate,literal) Encoding T3, and
+     PLD (literal) Encoding T1.  */
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying pld/pli pc (0x%x) %c imm12 %.4x\n",
+			(unsigned int) dsc->insn_addr, u_bit ? '+' : '-',
+			imm12);
+
+  if (!u_bit)
+    imm12 = -1 * imm12;
+
+  /* Rewrite instruction {pli/pld} PC imm12 into:
+     Preapre: tmp[0] <- r0, tmp[1] <- r1, r0 <- pc, r1 <- imm12
+
+     {pli/pld} [r0, r1]
+
+     Cleanup: r0 <- tmp[0], r1 <- tmp[1].  */
+
+  dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+  dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+
+  pc_val = displaced_read_reg (regs, dsc, ARM_PC_REGNUM);
+
+  displaced_write_reg (regs, dsc, 0, pc_val, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 1, imm12, CANNOT_WRITE_PC);
+  dsc->u.preload.immed = 0;
+
+  /* {pli/pld} [r0, r1] */
+  dsc->modinsn[0] = insn1 & 0xff00;
+  dsc->modinsn[1] = 0xf001;
+  dsc->numinsns = 2;
+
+  dsc->cleanup = &cleanup_preload;
+  return 0;
+}
+
 /* Preload instructions with register offset.  */
 
 static void
@@ -5517,6 +5582,30 @@ arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
+			      uint16_t insn2, struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (rn == ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					"copro load/store", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+			"load/store insn %.4x%.4x\n", insn1, insn2);
+
+  dsc->modinsn[0] = insn1 & 0xfff0;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  install_copro_load_store (gdbarch, regs, dsc, bit (insn1, 9), rn);
+
+  return 0;
+}
+
 /* Clean up branch instructions (actually perform the branch, by setting
    PC).  */
 
@@ -5604,6 +5693,61 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
+		      uint16_t insn2, struct regcache *regs,
+		      struct displaced_step_closure *dsc)
+{
+  int link = bit (insn2, 14);
+  int exchange = link && !bit (insn2, 12);
+  int cond = INST_AL;
+  long offset =0;
+  int j1 = bit (insn2, 13);
+  int j2 = bit (insn2, 11);
+  int s = sbits (insn1, 10, 10);
+  int i1 = !(j1 ^ bit (insn1, 10));
+  int i2 = !(j2 ^ bit (insn1, 10));
+
+  if (!link && !exchange) /* B */
+    {
+      offset = (bits (insn2, 0, 10) << 1);
+      if (bit (insn2, 12)) /* Encoding T4 */
+	{
+	  offset |= (bits (insn1, 0, 9) << 12)
+	    | (i2 << 22)
+	    | (i1 << 23)
+	    | (s << 24);
+	  cond = INST_AL;
+	}
+      else /* Encoding T3 */
+	{
+	  offset |= (bits (insn1, 0, 5) << 12)
+	    | (j1 << 18)
+	    | (j2 << 19)
+	    | (s << 20);
+	  cond = bits (insn1, 6, 9);
+	}
+    }
+  else
+    {
+      offset = (bits (insn1, 0, 9) << 12);
+      offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
+      offset |= exchange ?
+	(bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying %s insn "
+			"%.4x %.4x with offset %.8lx\n",
+			link ? (exchange) ? "blx" : "bl" : "b",
+			insn1, insn2, offset);
+
+  dsc->modinsn[0] = THUMB_NOP;
+
+  install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, link, offset);
+  return 0;
+}
+
 /* Copy B Thumb instructions.  */
 static int
 thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
@@ -5767,6 +5911,58 @@ arm_copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
   return 0;
 }
 
+static int
+thumb2_copy_alu_imm (struct gdbarch *gdbarch, uint16_t insn1,
+		     uint16_t insn2, struct regcache *regs,
+		     struct displaced_step_closure *dsc)
+{
+  unsigned int op = bits (insn1, 5, 8);
+  unsigned int rn, rm, rd;
+  ULONGEST rd_val, rn_val;
+
+  rn = bits (insn1, 0, 3); /* Rn */
+  rm = bits (insn2, 0, 3); /* Rm */
+  rd = bits (insn2, 8, 11); /* Rd */
+
+  /* This routine is only called for instruction MOV.  */
+  gdb_assert (op == 0x2 && rn == 0xf);
+
+  if (rm != ARM_PC_REGNUM && rd != ARM_PC_REGNUM)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU imm", dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
+			"ALU", insn1, insn2);
+
+  /* Instruction is of form:
+
+     <op><cond> rd, [rn,] #imm
+
+     Rewrite as:
+
+     Preparation: tmp1, tmp2 <- r0, r1;
+		  r0, r1 <- rd, rn
+     Insn: <op><cond> r0, r1, #imm
+     Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2
+  */
+
+  dsc->tmp[0] = displaced_read_reg (regs, dsc, 0);
+  dsc->tmp[1] = displaced_read_reg (regs, dsc, 1);
+  rn_val = displaced_read_reg (regs, dsc, rn);
+  rd_val = displaced_read_reg (regs, dsc, rd);
+  displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC);
+  displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC);
+  dsc->rd = rd;
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x1);
+  dsc->numinsns = 2;
+
+  dsc->cleanup = &cleanup_alu_imm;
+
+  return 0;
+}
+
 /* Copy/cleanup arithmetic/logic insns with register RHS.  */
 
 static void
@@ -6135,6 +6331,69 @@ install_load_store (struct gdbarch *gdbarch, struct regcache *regs,
 }
 
 static int
+thumb2_copy_load_store (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+			struct regcache *regs,
+			struct displaced_step_closure *dsc, int load, int size,
+			int usermode, int writeback)
+{
+  int immed = !bit (insn1, 9);
+  unsigned int rt = bits (insn2, 12, 15);
+  unsigned int rn = bits (insn1, 0, 3);
+  unsigned int rm = bits (insn2, 0, 3);  /* Only valid if !immed.  */
+
+  if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM
+      && (immed || rm != ARM_PC_REGNUM))
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load/store",
+					dsc);
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying %s%s r%d [r%d] insn %.4x%.4x\n",
+			load ? (size == 1 ? "ldrb" : (size == 2 ? "ldrh" : "ldr"))
+			: (size == 1 ? "strb" : (size == 2 ? "strh" : "str")),
+			usermode ? "t" : "",
+			rt, rn, insn1, insn2);
+
+  install_load_store (gdbarch, regs, dsc, load, immed, writeback, size,
+		      usermode, rt, rm, rn);
+
+  if (load || rt != ARM_PC_REGNUM)
+    {
+      dsc->u.ldst.restore_r4 = 0;
+
+      if (immed)
+	/* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, #imm].  */
+	{
+	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+	  dsc->modinsn[1] = insn2 & 0x0fff;
+	}
+      else
+	/* {ldr,str}[b]<cond> rt, [rn, rm], etc.
+	   ->
+	   {ldr,str}[b]<cond> r0, [r2, r3].  */
+	{
+	  dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+	  dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
+	}
+
+      dsc->numinsns = 2;
+    }
+  else
+    {
+      /* In Thumb-32 instructions, the behavior is unpredictable when Rt is
+	 PC, while the behavior is undefined when Rn is PC.  Shortly, neither
+	 Rt nor Rn can be PC.  */
+
+      gdb_assert (0);
+    }
+
+  return 0;
+}
+
+
+static int
 arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
 			    struct regcache *regs,
 			    struct displaced_step_closure *dsc,
@@ -6524,6 +6783,87 @@ arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+			struct regcache *regs,
+			struct displaced_step_closure *dsc)
+{
+  int rn = bits (insn1, 0, 3);
+  int load = bit (insn1, 4);
+  int writeback = bit (insn1, 5);
+
+  /* Block transfers which don't mention PC can be run directly
+     out-of-line.  */
+  if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
+
+  if (rn == ARM_PC_REGNUM)
+    {
+      warning (_("displaced: Unpredictable LDM or STM with "
+		 "base register r15"));
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "unpredictable ldm/stm", dsc);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
+			"%.4x%.4x\n", insn1, insn2);
+
+  /* Clear bit 13, since it should be always zero.  */
+  dsc->u.block.regmask = (insn2 & 0xdfff);
+  dsc->u.block.rn = rn;
+
+  dsc->u.block.load = bit (insn1, 4);
+  dsc->u.block.user = bit (insn1, 6);
+  dsc->u.block.increment = bit (insn1, 7);
+  dsc->u.block.before = bit (insn1, 8);
+  dsc->u.block.writeback = writeback;
+  dsc->u.block.cond = INST_AL;
+
+  if (load)
+    {
+      if (dsc->u.block.regmask == 0xffff)
+	{
+	  /* This branch is impossible to happen.  */
+	  gdb_assert (0);
+	}
+      else
+	{
+	  unsigned int regmask = dsc->u.block.regmask;
+	  unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
+	  unsigned int to = 0, from = 0, i, new_rn;
+
+	  for (i = 0; i < num_in_list; i++)
+	    dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
+
+	  if (writeback)
+	    insn1 &= ~(1 << 5);
+
+	  new_regmask = (1 << num_in_list) - 1;
+
+	  if (debug_displaced)
+	    fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
+				"{..., pc}: original reg list %.4x, modified "
+				"list %.4x\n"), rn, writeback ? "!" : "",
+				(int) dsc->u.block.regmask, new_regmask);
+
+	  dsc->modinsn[0] = insn1;
+	  dsc->modinsn[1] = (new_regmask & 0xffff);
+	  dsc->numinsns = 2;
+
+	  dsc->cleanup = &cleanup_block_load_pc;
+	}
+    }
+  else
+    {
+      dsc->modinsn[0] = insn1;
+      dsc->modinsn[1] = insn2;
+      dsc->numinsns = 2;
+      dsc->cleanup = &cleanup_block_store_pc;
+    }
+  return 0;
+}
+
 /* Cleanup/copy SVC (SWI) instructions.  These two functions are overridden
    for Linux, where some SVC instructions must be treated specially.  */
 
@@ -6609,6 +6949,23 @@ arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+                       struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
+                       "%.4x %.4x\n", (unsigned short) insn1,
+                       (unsigned short) insn2);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* Copy unpredictable instructions.  */
 
 static int
@@ -6624,6 +6981,23 @@ arm_copy_unpred (struct gdbarch *gdbarch, uint32_t insn,
   return 0;
 }
 
+static int
+thumb_32bit_copy_unpred (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct displaced_step_closure *dsc)
+{
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredicatable insn "
+			"%.4x %.4x\n", (unsigned short) insn1,
+			(unsigned short) insn2);
+
+  dsc->modinsn[0] = insn1;
+  dsc->modinsn[1] = insn2;
+  dsc->numinsns = 2;
+
+  return 0;
+}
+
 /* The decode_* functions are instruction decoding helpers.  They mostly follow
    the presentation in the ARM ARM.  */
 
@@ -7005,6 +7379,91 @@ arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
   return 1;
 }
 
+/* Decode shifted register instructions.  */
+
+static int
+thumb2_decode_dp_shift_reg (struct gdbarch *gdbarch, uint16_t insn1,
+			    uint16_t insn2,  struct regcache *regs,
+			    struct displaced_step_closure *dsc)
+{
+  /* Data processing (shift register) instructions can be grouped according to
+     their encondings:
+
+     1. Insn X Rn :inst1,3-0 Rd: insn2,8-11, Rm: insn2,3-0. Rd=15 & S=1, Insn Y.
+     Rn != PC, Rm ! = PC.
+     X: AND, Y: TST (REG)
+     X: EOR, Y: TEQ (REG)
+     X: ADD, Y: CMN (REG)
+     X: SUB, Y: CMP (REG)
+
+     2. Insn X Rn : ins1,3-0, Rm: insn2, 3-0; Rm! = PC, Rn != PC
+     Insn X: TST, TEQ, PKH, CMN, and CMP.
+
+     3. Insn X Rn:inst1,3-0 Rd:insn2,8-11, Rm:insn2, 3-0. Rn != PC, Rd != PC,
+     Rm != PC.
+     X: BIC, ADC, SBC, and RSB.
+
+     4. Insn X Rn:inst1,3-0 Rd:insn2,8-11, Rm:insn2,3-0.  Rd = 15, Insn Y.
+     X: ORR, Y: MOV (REG).
+     X: ORN, Y: MVN (REG).
+
+     5.  Insn X Rd: insn2, 8-11, Rm: insn2, 3-0.
+     X: MVN, Rd != PC, Rm != PC
+     X: MOV: Rd/Rm can be PC.
+
+     PC is only allowed to be used in instruction MOV.
+*/
+
+  unsigned int op = bits (insn1, 5, 8);
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (op == 0x2 && rn == 0xf) /* MOV */
+    return thumb2_copy_alu_imm (gdbarch, insn1, insn2, regs, dsc);
+  else
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					"dp (shift reg)", dsc);
+}
+
+
+/* Decode extension register load/store.  Exactly the same as
+   arm_decode_ext_reg_ld_st.  */
+
+static int
+thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
+			     uint16_t insn2,  struct regcache *regs,
+			     struct displaced_step_closure *dsc)
+{
+  unsigned int opcode = bits (insn1, 4, 8);
+
+  switch (opcode)
+    {
+    case 0x04: case 0x05:
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vmov", dsc);
+
+    case 0x08: case 0x0c: /* 01x00 */
+    case 0x0a: case 0x0e: /* 01x10 */
+    case 0x12: case 0x16: /* 10x10 */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vstm/vpush", dsc);
+
+    case 0x09: case 0x0d: /* 01x01 */
+    case 0x0b: case 0x0f: /* 01x11 */
+    case 0x13: case 0x17: /* 10x11 */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vfp/neon vldm/vpop", dsc);
+
+    case 0x10: case 0x14: case 0x18: case 0x1c:  /* vstr.  */
+      return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					  "vstr", dsc);
+    case 0x11: case 0x15: case 0x19: case 0x1d:  /* vldr.  */
+      return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
+    }
+
+  /* Should be unreachable.  */
+  return 1;
+}
+
 static int
 arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
 		      struct regcache *regs, struct displaced_step_closure *dsc)
@@ -7051,6 +7510,49 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
     return arm_copy_undef (gdbarch, insn, dsc);  /* Possibly unreachable.  */
 }
 
+static int
+thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
+			 uint16_t insn2, struct regcache *regs,
+			 struct displaced_step_closure *dsc)
+{
+  unsigned int coproc = bits (insn2, 8, 11);
+  unsigned int op1 = bits (insn1, 4, 9);
+  unsigned int bit_5_8 = bits (insn1, 5, 8);
+  unsigned int bit_9 = bit (insn1, 9);
+  unsigned int bit_4 = bit (insn1, 4);
+  unsigned int rn = bits (insn1, 0, 3);
+
+  if (bit_9 == 0)
+    {
+      if (bit_5_8 == 2)
+	return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					    "neon 64bit xfer/mrrc/mrrc2/mcrr/mcrr2",
+					    dsc);
+      else if (bit_5_8 == 0) /* UNDEFINED.  */
+	return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+      else
+	{
+	   /*coproc is 101x.  SIMD/VFP, ext registers load/store.  */
+	  if ((coproc & 0xe) == 0xa)
+	    return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
+						dsc);
+	  else /* coproc is not 101x.  */
+	    {
+	      if (bit_4 == 0) /* STC/STC2.  */
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "stc/stc2", dsc);
+	      else /* LDC/LDC2 {literal, immeidate}.  */
+		return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+						     regs, dsc);
+	    }
+	}
+    }
+  else
+    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "coproc", dsc);
+
+  return 0;
+}
+
 static void
 install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
 		     struct displaced_step_closure *dsc, int rd)
@@ -7100,6 +7602,35 @@ thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
 }
 
 static int
+thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+			      uint16_t insn2, struct regcache *regs,
+			      struct displaced_step_closure *dsc)
+{
+  unsigned int rd = bits (insn2, 8, 11);
+  /* Since immeidate has the same encoding in both ADR and ADD, so we simply
+     extract raw immediate encoding rather than computing immediate.  When
+     generating ADD instruction, we can simply perform OR operation to set
+     immediate into ADD.  */
+  unsigned int imm_3_8 = insn2 & 0x70ff;
+  unsigned int imm_i = insn1 & 0x0400; /* Clear all bits except bit 10.  */
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog,
+			"displaced: copying thumb adr r%d, #%d:%d insn %.4x%.4x\n",
+			rd, imm_i, imm_3_8, insn1, insn2);
+
+  /* Encoding T3: ADD Rd, Rd, #imm */
+  dsc->modinsn[0] = (0xf100 | rd | imm_i);
+  dsc->modinsn[1] = ((rd << 8) | imm_3_8);
+
+  dsc->numinsns = 2;
+
+  install_pc_relative (gdbarch, regs, dsc, rd);
+
+  return 0;
+}
+
+static int
 thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
 			      struct regcache *regs,
 			      struct displaced_step_closure *dsc)
@@ -7181,6 +7712,51 @@ thumb_copy_cbnz_cbz (struct gdbarch *gdbarch, uint16_t insn1,
   return 0;
 }
 
+/* Copy Table Brach Byte/Halfword */
+static int
+thumb2_copy_table_branch (struct gdbarch *gdbarch, uint16_t insn1,
+			  uint16_t insn2, struct regcache *regs,
+			  struct displaced_step_closure *dsc)
+{
+  ULONGEST rn_val, rm_val;
+  int is_tbh = bit (insn2, 4);
+  CORE_ADDR halfwords = 0;
+  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+
+  rn_val = displaced_read_reg (regs, dsc, bits (insn1, 0, 3));
+  rm_val = displaced_read_reg (regs, dsc, bits (insn2, 0, 3));
+
+  if (is_tbh)
+    {
+      gdb_byte buf[2];
+
+      target_read_memory (rn_val + 2 * rm_val, buf, 2);
+      halfwords = extract_unsigned_integer (buf, 2, byte_order);
+    }
+  else
+    {
+      gdb_byte buf[1];
+
+      target_read_memory (rn_val + rm_val, buf, 1);
+      halfwords = extract_unsigned_integer (buf, 1, byte_order);
+    }
+
+  if (debug_displaced)
+    fprintf_unfiltered (gdb_stdlog, "displaced: %s base 0x%x offset 0x%x"
+			" offset 0x%x\n", is_tbh ? "tbh" : "tbb",
+			(unsigned int) rn_val, (unsigned int) rm_val,
+			(unsigned int) halfwords);
+
+  dsc->u.branch.cond = INST_AL;
+  dsc->u.branch.link = 0;
+  dsc->u.branch.exchange = 0;
+  dsc->u.branch.dest = dsc->insn_addr + 4 + 2 * halfwords;
+
+  dsc->cleanup = &cleanup_branch;
+
+  return 0;
+}
+
 static void
 cleanup_pop_pc_16bit_all (struct gdbarch *gdbarch, struct regcache *regs,
 			  struct displaced_step_closure *dsc)
@@ -7374,12 +7950,274 @@ thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
 		    _("thumb_process_displaced_16bit_insn: Instruction decode error"));
 }
 
+static int
+decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
+				 uint16_t insn1, uint16_t insn2,
+				 struct regcache *regs,
+				 struct displaced_step_closure *dsc)
+{
+  int rt = bits (insn2, 12, 15);
+  int rn = bits (insn1, 0, 3);
+  int op1 = bits (insn1, 7, 8);
+  int user_mode = (bits (insn2, 8, 11) == 0xe);
+  int err = 0;
+  int writeback = 0;
+
+  switch (bits (insn1, 5, 6))
+    {
+    case 0: /* Load byte and memory hints */
+      if (rt == 0xf) /* PLD/PLI */
+	{
+	  if (rn == 0xf)
+	    {
+	      /* PLD literal or Encoding T3 of PLI(immediate, literal).  */
+	      return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+	    }
+	  else
+	    {
+	      switch (op1)
+		{
+		case 0: case 2:
+		  if (bits (insn2, 8, 11) == 0x1110
+		      || (bits (insn2, 8, 11) & 0x6) == 0x9)
+		    return thumb_32bit_copy_unpred (gdbarch, insn1, insn2, dsc);
+		  else
+		    /* PLI/PLD (reigster, immediate) doesn't use PC.  */
+		    return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+							"pli/pld", dsc);
+		  break;
+		case 1: /* PLD/PLDW (immediate) */
+		case 3: /* PLI (immediate, literal) */
+		  return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						      "pli/pld", dsc);
+		  break;
+
+		}
+	    }
+	}
+      else
+	{
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	  return thumb2_copy_load_store (gdbarch, insn1, insn2, regs, dsc, 1, 1,
+					 user_mode, writeback);
+	}
+
+      break;
+    case 1: /* Load halfword and memory hints.  */
+      if (rt == 0xf) /* PLD{W} and Unalloc memory hint.  */
+	{
+	  if (rn == 0xf)
+	    {
+	      if (op1 == 0 || op1 == 1)
+		return thumb_32bit_copy_unpred (gdbarch, insn1, insn2, dsc);
+	      else
+		return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						    "unalloc memhint", dsc);
+	    }
+	  else
+	    {
+	      if ((op1 == 0 || op1 == 2)
+		  && (bits (insn2, 8, 11) == 0xe
+		      || ((bits (insn2, 8, 11) & 0x9) == 0x9)))
+		return thumb_32bit_copy_unpred (gdbarch, insn1, insn2, dsc);
+	      else thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						"pld/unalloc memhint", dsc);
+	    }
+	}
+      else
+	{
+	  int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+	  return thumb2_copy_load_store (gdbarch, insn1, insn2, regs, dsc, 1,
+					 2, user_mode, writeback);
+	}
+      break;
+    case 2: /* Load word */
+      {
+	int op1 = bits (insn1, 7, 8);
+
+	  if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+	    writeback = bit (insn2, 8);
+
+	return thumb2_copy_load_store (gdbarch, insn1, insn2, regs, dsc, 1, 4,
+				       user_mode, writeback);
+	break;
+      }
+    default:
+      return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+      break;
+    }
+  return 0;
+}
+
+
+static int
+decode_thumb_32bit_store_single_data_item (struct gdbarch *gdbarch,
+					   uint16_t insn1, uint16_t insn2,
+					   struct regcache *regs,
+					   struct displaced_step_closure *dsc)
+{
+  int user_mode = (bits (insn2, 8, 11) == 0xe);
+  int size = 0;
+  int writeback = 0;
+  int op1 = bits (insn1, 5, 7);
+
+  switch (op1)
+    {
+    case 0: case 4: size = 1; break;
+    case 1: case 5: size = 2; break;
+    case 2: case 6: size = 4; break;
+    }
+  if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
+    writeback = bit (insn2, 8);
+
+  return thumb2_copy_load_store (gdbarch, insn1, insn2, regs,
+				 dsc, 0, size, user_mode,
+				 writeback);
+
+}
+
 static void
 thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
 				    uint16_t insn2, struct regcache *regs,
 				    struct displaced_step_closure *dsc)
 {
-  error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+  int err = 0;
+  unsigned short op = bit (insn2, 15);
+  unsigned int op1 = bits (insn1, 11, 12);
+
+  switch (op1)
+    {
+    case 1:
+      {
+	switch (bits (insn1, 9, 10))
+	  {
+	  case 0:
+	    if (bit (insn1, 6))
+	      {
+		/* Load/store {dual, execlusive}, table branch.  */
+		if (bits (insn1, 7, 8) == 1 && bits (insn1, 4, 5) == 1
+		    && bits (insn2, 5, 7) == 0)
+		  err = thumb2_copy_table_branch (gdbarch, insn1, insn2, regs,
+						  dsc);
+		else
+		  /* PC is not allowed to use in load/store {dual, exclusive}
+		     instructions.  */
+		  err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						     "load/store dual/ex", dsc);
+	      }
+	    else /* load/store multiple */
+	      {
+		switch (bits (insn1, 7, 8))
+		  {
+		  case 0: case 3: /* SRS, RFE */
+		    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						       "srs/rfe", dsc);
+		    break;
+		  case 1: case 2: /* LDM/STM/PUSH/POP */
+		    err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
+		    break;
+		  }
+	      }
+	    break;
+
+	  case 1:
+	    /* Data-processing (shift register).  */
+	    err = thumb2_decode_dp_shift_reg (gdbarch, insn1, insn2, regs,
+					      dsc);
+	    break;
+	  default: /* Coprocessor instructions.  */
+	    /* Thumb 32bit coprocessor instructions have the same encoding
+	       as ARM's.  */
+	    err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	    break;
+	  }
+      break;
+      }
+    case 2: /* op1 = 2 */
+      if (op) /* Branch and misc control.  */
+	{
+	  if (bit (insn2, 14)) /* BLX/BL */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else if (!bits (insn2, 12, 14) && bits (insn1, 7, 9) != 0x7)
+	    /* Conditional Branch */
+	    err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+	  else
+	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					       "misc ctrl", dsc);
+	}
+      else
+	{
+	  if (bit (insn1, 9)) /* Data processing (plain binary imm).  */
+	    {
+	      int op = bits (insn1, 4, 8);
+	      int rn = bits (insn1, 0, 4);
+	      if ((op == 0 || op == 0xa) && rn == 0xf)
+		err = thumb_copy_pc_relative_32bit (gdbarch, insn1, insn2,
+						    regs, dsc);
+	      else
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "dp/pb", dsc);
+	    }
+	  else /* Data processing (modified immeidate) */
+	    err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+					       "dp/mi", dsc);
+	}
+      break;
+    case 3: /* op1 = 3 */
+      switch (bits (insn1, 9, 10))
+	{
+	case 0:
+	  if ((bits (insn1, 4, 6) & 0x5) == 0x1)
+	    err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
+						   regs, dsc);
+	  else
+	    {
+	      if (bit (insn1, 8)) /* NEON Load/Store */
+		err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						   "neon elt/struct load/store",
+						   dsc);
+	      else /* Store single data item */
+		err = decode_thumb_32bit_store_single_data_item (gdbarch,
+								 insn1, insn2,
+								 regs, dsc);
+
+	    }
+	  break;
+	case 1: /* op1 = 3, bits (9, 10) == 1 */
+	  switch (bits (insn1, 7, 8))
+	    {
+	    case 0: case 1: /* Data processing (register) */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "dp(reg)", dsc);
+	      break;
+	    case 2: /* Multiply and absolute difference */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "mul/mua/diff", dsc);
+	      break;
+	    case 3: /* Long multiply and divide */
+	      err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+						 "lmul/lmua", dsc);
+	      break;
+	    }
+	  break;
+	default: /* Coprocessor instructions */
+	  err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+	  break;
+	}
+      break;
+    default:
+      err = 1;
+    }
+
+  if (err)
+    internal_error (__FILE__, __LINE__,
+		    _("thumb_process_displaced_32bit_insn: Instruction decode error"));
+
 }
 
 static void
-- 
1.7.0.4


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]