This is the mail archive of the
gdb-patches@sourceware.org
mailing list for the GDB project.
Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
- From: Yao Qi <yao at codesourcery dot com>
- To: gdb-patches at sourceware dot org
- Date: Thu, 05 May 2011 21:24:13 +0800
- Subject: Re: [try 2nd 5/8] Displaced stepping for Thumb 32-bit insns
- References: <4D15F9B8.5070705@codesourcery.com> <4D8B4947.1000000@codesourcery.com> <4D8B4E88.9000100@codesourcery.com>
Here is the updated version.
--
Yao (éå)
2011-05-05 Yao Qi <yao@codesourcery.com>
Support displaced stepping for Thumb 32-bit insns.
* gdb/arm-tdep.c (thumb_copy_unmodified_32bit): New.
(thumb2_copy_preload): New.
(thumb2_copy_preload_reg): New.
(thumb2_copy_copro_load_store): New.
(thumb2_copy_b_bl_blx): New.
(thumb2_copy_alu_reg): New.
(thumb2_copy_ldr_str_ldrb_strb): New.
(thumb2_copy_block_xfer): New.
(thumb_32bit_copy_undef): New.
(thumb2_decode_ext_reg_ld_st): New.
(thumb2_decode_svc_copro): New.
(thumb_copy_pc_relative_32bit): New.
(thumb_decode_pc_relative_32bit): New.
(decode_thumb_32bit_ld_mem_hints): New.
(thumb_process_displaced_32bit_insn): Process Thumb 32-bit
instructions.
---
gdb/arm-tdep.c | 702 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 701 insertions(+), 1 deletions(-)
diff --git a/gdb/arm-tdep.c b/gdb/arm-tdep.c
index 83ac297..6fb1eaa 100644
--- a/gdb/arm-tdep.c
+++ b/gdb/arm-tdep.c
@@ -5341,6 +5341,23 @@ arm_copy_unmodified (struct gdbarch *gdbarch, uint32_t insn,
return 0;
}
+static int
+thumb_copy_unmodified_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, const char *iname,
+ struct displaced_step_closure *dsc)
+{
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.4x %.4x, "
+ "opcode/class '%s' unmodified\n", insn1, insn2,
+ iname);
+
+ dsc->modinsn[0] = insn1;
+ dsc->modinsn[1] = insn2;
+ dsc->numinsns = 2;
+
+ return 0;
+}
+
/* Copy 16-bit Thumb(Thumb and 16-bit Thumb-2) instruction without any
modification. */
static int
@@ -5408,6 +5425,27 @@ arm_copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs,
return 0;
}
+static int
+thumb2_copy_preload (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+ struct regcache *regs, struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn1, 0, 3);
+ if (rn == ARM_PC_REGNUM)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
+ insn1, insn2);
+
+ dsc->modinsn[0] = insn1 & 0xfff0;
+ dsc->modinsn[1] = insn2;
+ dsc->numinsns = 2;
+
+ install_preload (gdbarch, regs, dsc, rn);
+
+ return 0;
+}
+
/* Preload instructions with register offset. */
static void
@@ -5456,6 +5494,30 @@ arm_copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn,
return 0;
}
+static int
+thumb2_copy_preload_reg (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn1, 0, 3);
+ unsigned int rm = bits (insn2, 0, 3);
+
+ if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "preload reg",
+ dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.4x%.4x\n",
+ insn1, insn1);
+
+ dsc->modinsn[0] = insn1 & 0xfff0;
+ dsc->modinsn[1] = (insn2 & 0xfff0) | 0x1;
+ dsc->numinsns = 2;
+
+ install_preload_reg (gdbarch, regs, dsc, rn, rm);
+ return 0;
+}
+
/* Copy/cleanup coprocessor load and store instructions. */
static void
@@ -5517,6 +5579,30 @@ arm_copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn,
return 0;
}
+static int
+thumb2_copy_copro_load_store (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int rn = bits (insn1, 0, 3);
+
+ if (rn == ARM_PC_REGNUM)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "copro load/store", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor "
+ "load/store insn %.4x%.4x\n", insn1, insn2);
+
+ dsc->modinsn[0] = insn1 & 0xfff0;
+ dsc->modinsn[1] = insn2;
+ dsc->numinsns = 2;
+
+ install_copro_load_store (gdbarch, regs, dsc, bit (insn1, 9), rn);
+
+ return 0;
+}
+
/* Clean up branch instructions (actually perform the branch, by setting
PC). */
@@ -5604,6 +5690,58 @@ arm_copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn,
return 0;
}
+static int
+thumb2_copy_b_bl_blx (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ int link = bit (insn2, 14);
+ int exchange = link && !bit (insn2, 12);
+ int cond = INST_AL;
+ long offset =0;
+ int j1 = bit (insn2, 13);
+ int j2 = bit (insn2, 11);
+ int s = sbits (insn1, 10, 10);
+ int i1 = !(j1 ^ bit (insn1, 10));
+ int i2 = !(j2 ^ bit (insn1, 10));
+
+ if (!link && !exchange) /* B */
+ {
+ cond = bits (insn1, 6, 9);
+ offset = (bits (insn2, 0, 10) << 1);
+ if (bit (insn2, 12)) /* Encoding T4 */
+ {
+ offset |= (bits (insn1, 0, 9) << 12)
+ | (i2 << 22)
+ | (i1 << 23)
+ | (s << 24);
+ }
+ else /* Encoding T3 */
+ offset |= (bits (insn1, 0, 5) << 12)
+ | (j1 << 18)
+ | (j2 << 19)
+ | (s << 20);
+ }
+ else
+ {
+ offset = (bits (insn1, 0, 9) << 12);
+ offset |= ((i2 << 22) | (i1 << 23) | (s << 24));
+ offset |= exchange ?
+ (bits (insn2, 1, 10) << 2) : (bits (insn2, 0, 10) << 1);
+ }
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn "
+ "%.4x %.4x with offset %.8lx\n",
+ (exchange) ? "blx" : "bl",
+ insn1, insn2, offset);
+
+ dsc->modinsn[0] = THUMB_NOP;
+
+ install_b_bl_blx (gdbarch, regs, dsc, cond, exchange, 1, offset);
+ return 0;
+}
+
/* Copy B Thumb instructions. */
static int
thumb_copy_b (struct gdbarch *gdbarch, unsigned short insn,
@@ -5866,6 +6004,41 @@ thumb_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn,
return 0;
}
+static int
+thumb2_copy_alu_reg (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int op2 = bits (insn2, 4, 7);
+ int is_mov = (op2 == 0x0);
+ unsigned int rn, rm, rd;
+
+ rn = bits (insn1, 0, 3); /* Rn */
+ rm = bits (insn2, 0, 3); /* Rm */
+ rd = bits (insn2, 8, 11); /* Rd */
+
+ /* In Thumb-2, rn, rm and rd can't be r15. */
+ if (rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM
+ && rd != ARM_PC_REGNUM)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ALU reg", dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.4x%.4x\n",
+ "ALU", insn1, insn2);
+
+ if (is_mov)
+ dsc->modinsn[0] = insn1;
+ else
+ dsc->modinsn[0] = ((insn1 & 0xfff0) | 0x1);
+
+ dsc->modinsn[1] = ((insn2 & 0xf0f0) | 0x2);
+ dsc->numinsns = 2;
+
+ install_alu_reg (gdbarch, regs, dsc, rd, rn, rm);
+
+ return 0;
+}
+
/* Cleanup/copy arithmetic/logic insns with shifted register RHS. */
static void
@@ -6135,6 +6308,67 @@ install_ldr_str_ldrb_strb (struct gdbarch *gdbarch, struct regcache *regs,
}
static int
+thumb2_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc,
+ int load, int byte, int usermode, int writeback)
+{
+ int immed = !bit (insn1, 9);
+ unsigned int rt = bits (insn2, 12, 15);
+ unsigned int rn = bits (insn1, 0, 3);
+ unsigned int rm = bits (insn2, 0, 3); /* Only valid if !immed. */
+
+ if (rt != ARM_PC_REGNUM && rn != ARM_PC_REGNUM && rm != ARM_PC_REGNUM)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "load/store",
+ dsc);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced: copying %s%s r%d [r%d] insn %.4x%.4x\n",
+ load ? (byte ? "ldrb" : "ldr")
+ : (byte ? "strb" : "str"), usermode ? "t" : "",
+ rt, rn, insn1, insn2);
+
+ install_ldr_str_ldrb_strb (gdbarch, regs, dsc, load, immed, writeback, byte,
+ usermode, rt, rm, rn);
+
+ if (load || rt != ARM_PC_REGNUM)
+ {
+ dsc->u.ldst.restore_r4 = 0;
+
+ if (immed)
+ /* {ldr,str}[b]<cond> rt, [rn, #imm], etc.
+ ->
+ {ldr,str}[b]<cond> r0, [r2, #imm]. */
+ {
+ dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+ dsc->modinsn[1] = insn2 & 0x0fff;
+ }
+ else
+ /* {ldr,str}[b]<cond> rt, [rn, rm], etc.
+ ->
+ {ldr,str}[b]<cond> r0, [r2, r3]. */
+ {
+ dsc->modinsn[0] = (insn1 & 0xfff0) | 0x2;
+ dsc->modinsn[1] = (insn2 & 0x0ff0) | 0x3;
+ }
+
+ dsc->numinsns = 2;
+ }
+ else
+ {
+ /* In Thumb-32 instructions, the behavior is unpredictable when Rt is
+ PC, while the behavior is undefined when Rn is PC. Shortly, neither
+ Rt nor Rn can be PC. */
+
+ gdb_assert (0);
+ }
+
+ return 0;
+}
+
+
+static int
arm_copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn,
struct regcache *regs,
struct displaced_step_closure *dsc,
@@ -6524,6 +6758,87 @@ arm_copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn,
return 0;
}
+static int
+thumb2_copy_block_xfer (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ int rn = bits (insn1, 0, 3);
+ int load = bit (insn1, 4);
+ int writeback = bit (insn1, 5);
+
+ /* Block transfers which don't mention PC can be run directly
+ out-of-line. */
+ if (rn != ARM_PC_REGNUM && (insn2 & 0x8000) == 0)
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2, "ldm/stm", dsc);
+
+ if (rn == ARM_PC_REGNUM)
+ {
+ warning (_("displaced: Unpredictable LDM or STM with "
+ "base register r15"));
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "unpredictable ldm/stm", dsc);
+ }
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn "
+ "%.4x%.4x\n", insn1, insn2);
+
+ /* Clear bit 13, since it should be always zero. */
+ dsc->u.block.regmask = (insn2 & 0xdfff);
+ dsc->u.block.rn = rn;
+
+ dsc->u.block.load = bit (insn1, 4);
+ dsc->u.block.user = bit (insn1, 6);
+ dsc->u.block.increment = bit (insn1, 7);
+ dsc->u.block.before = bit (insn1, 8);
+ dsc->u.block.writeback = writeback;
+ dsc->u.block.cond = INST_AL;
+
+ if (load)
+ {
+ if (dsc->u.block.regmask == 0xffff)
+ {
+ /* This branch is impossible to happen. */
+ gdb_assert (0);
+ }
+ else
+ {
+ unsigned int regmask = dsc->u.block.regmask;
+ unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1;
+ unsigned int to = 0, from = 0, i, new_rn;
+
+ for (i = 0; i < num_in_list; i++)
+ dsc->tmp[i] = displaced_read_reg (regs, dsc, i);
+
+ if (writeback)
+ insn1 &= ~(1 << 5);
+
+ new_regmask = (1 << num_in_list) - 1;
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, "
+ "{..., pc}: original reg list %.4x, modified "
+ "list %.4x\n"), rn, writeback ? "!" : "",
+ (int) dsc->u.block.regmask, new_regmask);
+
+ dsc->modinsn[0] = insn1;
+ dsc->modinsn[1] = (new_regmask & 0xffff);
+ dsc->numinsns = 2;
+
+ dsc->cleanup = &cleanup_block_load_pc;
+ }
+ }
+ else
+ {
+ dsc->modinsn[0] = insn1;
+ dsc->modinsn[1] = insn2;
+ dsc->numinsns = 2;
+ dsc->cleanup = &cleanup_block_store_pc;
+ }
+ return 0;
+}
+
/* Cleanup/copy SVC (SWI) instructions. These two functions are overridden
for Linux, where some SVC instructions must be treated specially. */
@@ -6609,6 +6924,23 @@ arm_copy_undef (struct gdbarch *gdbarch, uint32_t insn,
return 0;
}
+static int
+thumb_32bit_copy_undef (struct gdbarch *gdbarch, uint16_t insn1, uint16_t insn2,
+ struct displaced_step_closure *dsc)
+{
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: copying undefined insn "
+ "%.4x %.4x\n", (unsigned short) insn1,
+ (unsigned short) insn2);
+
+ dsc->modinsn[0] = insn1;
+ dsc->modinsn[1] = insn2;
+ dsc->numinsns = 2;
+
+ return 0;
+}
+
/* Copy unpredictable instructions. */
static int
@@ -7005,6 +7337,43 @@ arm_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn,
return 1;
}
+/* Decode extension register load/store. Exactly the same as
+ arm_decode_ext_reg_ld_st. */
+
+static int
+thumb2_decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int opcode = bits (insn1, 4, 8);
+
+ switch (opcode)
+ {
+ case 0x04: case 0x05:
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "vfp/neon vmov", dsc);
+
+ case 0x08: case 0x0c: /* 01x00 */
+ case 0x0a: case 0x0e: /* 01x10 */
+ case 0x12: case 0x16: /* 10x10 */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "vfp/neon vstm/vpush", dsc);
+
+ case 0x09: case 0x0d: /* 01x01 */
+ case 0x0b: case 0x0f: /* 01x11 */
+ case 0x13: case 0x17: /* 10x11 */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "vfp/neon vldm/vpop", dsc);
+
+ case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */
+ case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */
+ return thumb2_copy_copro_load_store (gdbarch, insn1, insn2, regs, dsc);
+ }
+
+ /* Should be unreachable. */
+ return 1;
+}
+
static int
arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
struct regcache *regs, struct displaced_step_closure *dsc)
@@ -7051,6 +7420,102 @@ arm_decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to,
return arm_copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */
}
+static int
+thumb2_decode_svc_copro (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int coproc = bits (insn2, 8, 11);
+ unsigned int op1 = bits (insn1, 4, 9);
+ unsigned int bit_5_8 = bits (insn1, 5, 8);
+ unsigned int bit_9 = bit (insn1, 9);
+ unsigned int bit_4 = bit (insn1, 4);
+ unsigned int rn = bits (insn1, 0, 3);
+
+ if (bit_9 == 0)
+ {
+ if (bit_5_8 == 2)
+ {
+ if ((coproc & 0xe) == 0xa) /* 64-bit xfer. */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "neon 64bit xfer", dsc);
+ else
+ {
+ if (bit_4) /* MRRC/MRRC2 */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "mrrc/mrrc2", dsc);
+ else /* MCRR/MCRR2 */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "mcrr/mcrr2", dsc);
+ }
+ }
+ else if (bit_5_8 == 0) /* UNDEFINED. */
+ return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+ else
+ {
+ /*coproc is 101x. SIMD/VFP, ext registers load/store. */
+ if ((coproc & 0xe) == 0xa)
+ return thumb2_decode_ext_reg_ld_st (gdbarch, insn1, insn2, regs,
+ dsc);
+ else /* coproc is not 101x. */
+ {
+ if (bit_4 == 0) /* STC/STC2. */
+ return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+ regs, dsc);
+ else
+ {
+ if (rn == 0xf) /* LDC/LDC2 literal. */
+ return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+ regs, dsc);
+ else /* LDC/LDC2 immeidate. */
+ return thumb2_copy_copro_load_store (gdbarch, insn1, insn2,
+ regs, dsc);
+ }
+ }
+ }
+ }
+ else
+ {
+ unsigned int op = bit (insn2, 4);
+ unsigned int bit_8 = bit (insn1, 8);
+
+ if (bit_8) /* Advanced SIMD */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "neon", dsc);
+ else
+ {
+ /*coproc is 101x. */
+ if ((coproc & 0xe) == 0xa)
+ {
+ if (op) /* 8,16,32-bit xfer. */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "neon 8/16/32 bit xfer",
+ dsc);
+ else /* VFP data processing. */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "vfp dataproc", dsc);
+ }
+ else
+ {
+ if (op)
+ {
+ if (bit_4) /* MRC/MRC2 */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "mrc/mrc2", dsc);
+ else /* MCR/MCR2 */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "mcr/mcr2", dsc);
+ }
+ else /* CDP/CDP 2 */
+ return thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "cdp/cdp2", dsc);
+ }
+ }
+ }
+
+ return 0;
+}
+
static void
install_pc_relative (struct gdbarch *gdbarch, struct regcache *regs,
struct displaced_step_closure *dsc, int rd)
@@ -7100,6 +7565,42 @@ thumb_decode_pc_relative_16bit (struct gdbarch *gdbarch, uint16_t insn,
}
static int
+thumb_copy_pc_relative_32bit (struct gdbarch *gdbarch, struct regcache *regs,
+ struct displaced_step_closure *dsc,
+ int rd, unsigned int imm)
+{
+ /* Encoding T3: ADDS Rd, Rd, #imm */
+ dsc->modinsn[0] = (0xf100 | rd);
+ dsc->modinsn[1] = (0x0 | (rd << 8) | imm);
+
+ dsc->numinsns = 2;
+
+ install_pc_relative (gdbarch, regs, dsc, rd);
+
+ return 0;
+}
+
+static int
+thumb_decode_pc_relative_32bit (struct gdbarch *gdbarch, uint16_t insn1,
+ uint16_t insn2, struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ unsigned int rd = bits (insn2, 8, 11);
+ /* Since immeidate has the same encoding in both ADR and ADDS, so we simply
+ extract raw immediate encoding rather than computing immediate. When
+ generating ADDS instruction, we can simply perform OR operation to set
+ immediate into ADDS. */
+ unsigned int imm = (insn2 & 0x70ff) | (bit (insn1, 10) << 26);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced: copying thumb adr r%d, #%d insn %.4x%.4x\n",
+ rd, imm, insn1, insn2);
+
+ return thumb_copy_pc_relative_32bit (gdbarch, regs, dsc, rd, imm);
+}
+
+static int
thumb_copy_16bit_ldr_literal (struct gdbarch *gdbarch, unsigned short insn1,
struct regcache *regs,
struct displaced_step_closure *dsc)
@@ -7354,12 +7855,211 @@ thumb_process_displaced_16bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
_("thumb_process_displaced_16bit_insn: Instruction decode error"));
}
+static int
+decode_thumb_32bit_ld_mem_hints (struct gdbarch *gdbarch,
+ uint16_t insn1, uint16_t insn2,
+ struct regcache *regs,
+ struct displaced_step_closure *dsc)
+{
+ int rd = bits (insn2, 12, 15);
+ int user_mode = (bits (insn2, 8, 11) == 0xe);
+ int err = 0;
+ int writeback = 0;
+
+ switch (bits (insn1, 5, 6))
+ {
+ case 0: /* Load byte and memory hints */
+ if (rd == 0xf) /* PLD/PLI */
+ {
+ if (bits (insn2, 6, 11))
+ return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+ else
+ return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
+ }
+ else
+ {
+ int op1 = bits (insn1, 7, 8);
+
+ if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+ writeback = bit (insn2, 8);
+
+ return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
+ dsc, 1, 1, user_mode,
+ writeback);
+ }
+
+ break;
+ case 1: /* Load halfword and memory hints */
+ if (rd == 0xf) /* PLD{W} and Unalloc memory hint */
+ {
+ if (bits (insn2, 6, 11))
+ return thumb2_copy_preload (gdbarch, insn1, insn2, regs, dsc);
+ else
+ return thumb2_copy_preload_reg (gdbarch, insn1, insn2, regs, dsc);
+ }
+ else
+ {
+ int op1 = bits (insn1, 7, 8);
+
+ if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+ writeback = bit (insn2, 8);
+ return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs,
+ dsc, 1, 0, user_mode,
+ writeback);
+ }
+ break;
+ case 2: /* Load word */
+ {
+ int op1 = bits (insn1, 7, 8);
+
+ if ((op1 == 0 || op1 == 2) && bit (insn2, 11))
+ writeback = bit (insn2, 8);
+
+ return thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2, regs, dsc,
+ 1, 0, user_mode, writeback);
+ break;
+ }
+ default:
+ return thumb_32bit_copy_undef (gdbarch, insn1, insn2, dsc);
+ break;
+ }
+ return 0;
+}
+
static void
thumb_process_displaced_32bit_insn (struct gdbarch *gdbarch, uint16_t insn1,
uint16_t insn2, struct regcache *regs,
struct displaced_step_closure *dsc)
{
- error (_("Displaced stepping is only supported in ARM mode and Thumb 16bit instructions"));
+ int err = 0;
+ unsigned short op = bit (insn2, 15);
+ unsigned int op1 = bits (insn1, 11, 12);
+
+ switch (op1)
+ {
+ case 1:
+ {
+ switch (bits (insn1, 9, 10))
+ {
+ case 0: /* load/store multiple */
+ switch (bits (insn1, 7, 8))
+ {
+ case 0: case 3: /* SRS, RFE */
+ err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "srs/rfe", dsc);
+ break;
+ case 1: case 2: /* LDM/STM/PUSH/POP */
+ /* These Thumb 32-bit insns have the same encodings as ARM
+ counterparts. */
+ err = thumb2_copy_block_xfer (gdbarch, insn1, insn2, regs, dsc);
+ }
+ break;
+ case 1:
+ /* Data-processing (shift register). In ARM archtecture reference
+ manual, this entry is
+ "Data-processing (shifted register) on page A6-31". However,
+ instructions in table A6-31 shows that they are `alu_reg'
+ instructions. There is no alu_shifted_reg instructions in
+ Thumb-2. */
+ err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs,
+ dsc);
+ break;
+ default: /* Coprocessor instructions */
+ /* Thumb 32bit coprocessor instructions have the same encoding
+ as ARM's. */
+ err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+ break;
+ }
+ break;
+ }
+ case 2: /* op1 = 2 */
+ if (op) /* Branch and misc control. */
+ {
+ if (bit (insn2, 14)) /* BLX/BL */
+ err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+ else if (!bits (insn2, 12, 14) && bits (insn1, 8, 10) != 0x7)
+ /* Conditional Branch */
+ err = thumb2_copy_b_bl_blx (gdbarch, insn1, insn2, regs, dsc);
+ else
+ err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "misc ctrl", dsc);
+ }
+ else
+ {
+ if (bit (insn1, 9)) /* Data processing (plain binary imm) */
+ {
+ int op = bits (insn1, 4, 8);
+ int rn = bits (insn1, 0, 4);
+ if ((op == 0 || op == 0xa) && rn == 0xf)
+ err = thumb_decode_pc_relative_32bit (gdbarch, insn1, insn2,
+ regs, dsc);
+ else
+ err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "dp/pb", dsc);
+ }
+ else /* Data processing (modified immeidate) */
+ err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "dp/mi", dsc);
+ }
+ break;
+ case 3: /* op1 = 3 */
+ switch (bits (insn1, 9, 10))
+ {
+ case 0:
+ if (bit (insn1, 4))
+ err = decode_thumb_32bit_ld_mem_hints (gdbarch, insn1, insn2,
+ regs, dsc);
+ else
+ {
+ if (bit (insn1, 8)) /* NEON Load/Store */
+ err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "neon elt/struct load/store",
+ dsc);
+ else /* Store single data item */
+ {
+ int user_mode = (bits (insn2, 8, 11) == 0xe);
+ int byte = (bits (insn1, 5, 7) == 0
+ || bits (insn1, 5, 7) == 4);
+ int writeback = 0;
+
+ if (bits (insn1, 5, 7) < 3 && bit (insn2, 11))
+ writeback = bit (insn2, 8);
+
+ err = thumb2_copy_ldr_str_ldrb_strb (gdbarch, insn1, insn2,
+ regs, dsc, 0, byte,
+ user_mode, writeback);
+ }
+ }
+ break;
+ case 1: /* op1 = 3, bits (9, 10) == 1 */
+ switch (bits (insn1, 7, 8))
+ {
+ case 0: case 1: /* Data processing (register) */
+ err = thumb2_copy_alu_reg (gdbarch, insn1, insn2, regs, dsc);
+ break;
+ case 2: /* Multiply and absolute difference */
+ err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "mul/mua/diff", dsc);
+ break;
+ case 3: /* Long multiply and divide */
+ err = thumb_copy_unmodified_32bit (gdbarch, insn1, insn2,
+ "lmul/lmua", dsc);
+ break;
+ }
+ break;
+ default: /* Coprocessor instructions */
+ err = thumb2_decode_svc_copro (gdbarch, insn1, insn2, regs, dsc);
+ break;
+ }
+ break;
+ default:
+ err = 1;
+ }
+
+ if (err)
+ internal_error (__FILE__, __LINE__,
+ _("thumb_process_displaced_32bit_insn: Instruction decode error"));
+
}
static void
--
1.7.0.4