This is the mail archive of the
gdb-patches@sourceware.org
mailing list for the GDB project.
[PATCH] arm reversible : <phase_2_complete>
- From: paawan oza <paawan1982 at yahoo dot com>
- To: gdb at sourceware dot org, gdb-patches at sourceware dot org
- Date: Wed, 20 Apr 2011 12:16:05 -0700 (PDT)
- Subject: [PATCH] arm reversible : <phase_2_complete>
- References: <341905.10459.qm@web112513.mail.gq1.yahoo.com> <m3d3m8xdf7.fsf@fleche.redhat.com> <208397.95006.qm@web112517.mail.gq1.yahoo.com> <4DA27006.1080607@codesourcery.com> <763549.92092.qm@web112506.mail.gq1.yahoo.com>
Hi,
I am working on phase-3 now.
if anybody could please start reviewing phase-2 patch (as this is
independent of phase-3 and could be checked in independently too)
I may start implementing review comments as and when I get.
In Parallel, test cases are also being worked upon.
following is the phase-2 patch.
PATCH STARTS.
------------------------------
diff -urN arm_orig/arm-linux-tdep.c arm_new/arm-linux-tdep.c
--- arm_orig/arm-linux-tdep.c 2011-03-03 09:21:13.000000000 +0530
+++ arm_new/arm-linux-tdep.c 2011-04-15 13:11:15.000000000 +0530
@@ -998,6 +998,9 @@
set_gdbarch_fetch_tls_load_module_address (gdbarch,
svr4_fetch_objfile_link_map);
+ /* Enable process record */
+ set_gdbarch_process_record(gdbarch, arm_process_record);
+
tramp_frame_prepend_unwinder (gdbarch,
&arm_linux_sigreturn_tramp_frame);
tramp_frame_prepend_unwinder (gdbarch,
@@ -1025,6 +1028,8 @@
tdep->syscall_next_pc = arm_linux_syscall_next_pc;
+
+ tdep->arm_swi_record = NULL;
}
/* Provide a prototype to silence -Wmissing-prototypes. */
diff -urN arm_orig/arm-tdep.c arm_new/arm-tdep.c
--- arm_orig/arm-tdep.c 2011-03-03 09:21:13.000000000 +0530
+++ arm_new/arm-tdep.c 2011-04-15 13:11:15.000000000 +0530
@@ -54,8 +54,11 @@
#include "gdb_assert.h"
#include "vec.h"
+#include "record.h"
+
#include "features/arm-with-m.c"
+
static int arm_debug;
/* Macros for setting and testing a bit in a minimal symbol that marks
@@ -7929,3 +7932,1692 @@
NULL, /* FIXME: i18n: "ARM debugging is %s. */
&setdebuglist, &showdebuglist);
}
+
+
+
+/* arm-reversible process reacord data structures. */
+
+#define ARM_INSN_SIZE_BYTES 4
+#define THUMB_INSN_SIZE_BYTES 2
+#define THUMB2_INSN_SIZE_BYTES 4
+#define NO_OF_TYPE_OF_ARM_INSNS 8
+#define NO_OF_TYPE_OF_THUMB_INSNS 8
+#define ARM_RECORD_ARCH_LIST_ADD_REG(regnum) \
+ record_arch_list_add_reg (arm_record.regcache, regnum)
+#define GET_REG_VAL(REGCACHE,NO,BUF) regcache_raw_read (REGCACHE, NO, BUF);
+#define INSN_S_L_BIT_NUM 20
+
+struct arm_mem_r
+{
+ uint32_t len;
+ CORE_ADDR addr;
+};
+
+typedef struct insn_decode_record_t
+{
+ struct gdbarch *gdbarch;
+ struct regcache *regcache;
+ CORE_ADDR this_addr; /* address of the insn being decoded. */
+ uint32_t arm_insn; /* should accomodate thumb. */
+ uint32_t cond; /* condition code. */
+ uint32_t id; /* type of insn. */
+ uint32_t opcode; /* insn opcode. */
+ uint32_t decode; /* insn decode bits. */
+ uint32_t *arm_regs; /* registers to be saved for this record. */
+ struct arm_mem_r *arm_mems; /* memory to be saved for this record. */
+} insn_decode_record;
+
+
+static int
+SBO_SBZ (uint32_t insn, uint32_t bit_num, uint32_t len, uint32_t sbo)
+{
+ uint32_t ONES = bits (insn, bit_num - 1, (bit_num -1) + (len - 1));
+
+ if (!len)
+ return 1;
+
+ if(!sbo)
+ ONES = ~ONES;
+
+ while (ONES)
+ {
+ if (!(ONES & sbo))
+ {
+ return 0;
+ }
+ ONES = ONES >> 1;
+ }
+ return 1;
+}
+
+static int
+handle_extension_space (insn_decode_record *arm_record)
+{
+ insn_decode_record *arm_insn_r = arm_record;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
+ struct regcache *reg_cache = arm_insn_r->regcache;
+
+ uint32_t reg_src1 = 0, reg_src2 = 0;
+ uint32_t opcode1 = 0, opcode2 = 0;
+
+ opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
+ if ((3 == opcode1) && (bit (arm_insn_r->arm_insn, 4)))
+ {
+ /* undefined instruction on ARM V5; need to handle if later versions
+ define it. */
+ }
+
+ opcode2 = bits (arm_insn_r->arm_insn, 4, 7);
+
+ if ((!opcode1) && (9 == opcode2))
+ {
+ /* handle arithmetic insn extension space. */
+ }
+
+ opcode1 = bits (arm_insn_r->arm_insn, 26, 27);
+ opcode2 = bits (arm_insn_r->arm_insn, 23, 24);
+
+ if ((!opcode1) && (2 == opcode2) && (!bit (arm_insn_r->arm_insn, 20)))
+ {
+ /* handle control insn extension space. */
+ }
+
+ opcode1 = bits (arm_insn_r->arm_insn, 25, 27);
+ if ((!opcode1) && (bit (arm_insn_r->arm_insn, 7)) \
+ && (bit(arm_insn_r->arm_insn, 4)))
+ {
+ /* handle load/store insn extension space. */
+ }
+
+ opcode1 = bits (arm_insn_r->arm_insn, 23, 27);
+ if ((24 == opcode1) && (bit (arm_insn_r->arm_insn, 21)))
+ {
+ /* handle coprocessor insn extension space. */
+ }
+
+ /* to be done for ARMv5 and later; as of now we return -1. */
+ return -1;
+}
+
+static int
+arm_handle_data_proc_misc_load_str_insn (void *data)
+{
+
+ insn_decode_record *arm_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
+ struct regcache *reg_cache = arm_insn_r->regcache;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf[2];
+
+
+ uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
+ uint32_t immed_high = 0, immed_low = 0,offset_8 = 0, tgt_mem_addr = 0;
+ uint32_t opcode1 = 0;
+
+ memset(&u_buf, 0, sizeof(u_buf));
+
+ arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
+ arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
+ opcode1 = bits (arm_insn_r->arm_insn, 20, 24);
+
+ /* data processing insn /multiply insn. */
+ if ((9 == arm_insn_r->decode)
+ && (((4 <= arm_insn_r->opcode) && (7 >= arm_insn_r->opcode))
+ || ((0 == arm_insn_r->opcode) || (1 == arm_insn_r->opcode))))
+ {
+ /* handle multiply instructions. */
+ /* MLA, MUL, SMLAL, SMULL, UMLAL, UMULL. */
+ if ((0 == arm_insn_r->opcode) || (1 == arm_insn_r->opcode))
+ {
+ /* handle MLA and MUL. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 16, 19);
+ arm_insn_r->arm_regs[2] = ARM_PS_REGNUM;
+ }
+ else if ((4 <= arm_insn_r->opcode) && (7 >= arm_insn_r->opcode))
+ {
+ /* handle SMLAL, SMULL, UMLAL, UMULL. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*4);
+ arm_insn_r->arm_regs[0] = 3;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 16, 19);
+ arm_insn_r->arm_regs[2] = bits (arm_insn_r->arm_insn, 12, 15);
+ arm_insn_r->arm_regs[3] = ARM_PS_REGNUM;
+ }
+ }
+ else if ((bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
+ && ((11 == arm_insn_r->decode) || (13 == arm_insn_r->decode)))
+ {
+ /* handle misc load insns, as 20th bit (L = 1). */
+ /* LDR insn has a capability to do branching, if
+ MOV LR, PC is precedded by LDR insn having Rn as R15
+ in that case, it emulates branch and link insn, and hence we
+ need to save CSPR and PC as well. I am not sure this is right
+ place as opcode = 010 LDR insn make this happen, if R15 was
+ used. */
+ reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
+ if (15 != reg_dest)
+ {
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 12, 15);
+ }
+ else
+ {
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = reg_dest;
+ arm_insn_r->arm_regs[2] = ARM_PS_REGNUM;
+ }
+ }
+ else if (((9 == arm_insn_r->opcode) || (11 == arm_insn_r->opcode))
+ && (SBO_SBZ (arm_insn_r->arm_insn, 5, 12, 0))
+ && (SBO_SBZ (arm_insn_r->arm_insn, 13, 4, 1))
+ && (2 == bits (arm_insn_r->arm_insn, 20, 21)))
+ {
+ /* handle MSR insn. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ if (9 == arm_insn_r->opcode)
+ {
+ /* CSPR is going to be changed. */
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ }
+ else
+ {
+ /* SPSR is going to be changed. */
+ /* Oza: FIX ME ? how to read SPSR value ? */
+ }
+ }
+ else if ((9 == arm_insn_r->decode)
+ && ((8 == arm_insn_r->opcode) || (10 == arm_insn_r->opcode))
+ && (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)))
+ {
+ /* Handling SWP, SWPB. */
+ /* these insns, changes register and memory as well. */
+ /* SWP or SWPB insn. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ /* get memory address given by Rn. */
+ reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf[0].buf[0]);
+ arm_insn_r->arm_mems[0].len = 1;
+ /* SWP insn ?, swaps word. */
+ if (8 == arm_insn_r->opcode)
+ {
+ arm_insn_r->arm_mems[1].len = 4;
+ }
+ else
+ {
+ /* SWPB insn, swaps only byte. */
+ arm_insn_r->arm_mems[1].len = 1;
+ }
+ arm_insn_r->arm_mems[1].addr = u_buf[0].s_word;
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 12, 15);
+ }
+ else if ((3 == arm_insn_r->decode) && (0x12 == opcode1)
+ && (SBO_SBZ (arm_insn_r->arm_insn, 9, 12, 1)))
+ {
+ /* handle BLX, branch and link/exchange. */
+ if (9 == arm_insn_r->opcode)
+ {
+ /* branch is chosen by setting T bit of CSPR, bitp[0] of Rm,
+ and R14 stores the return address. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ arm_insn_r->arm_regs[2] = ARM_LR_REGNUM;
+ }
+ }
+ else if ((7 == arm_insn_r->decode) && (0x12 == opcode1))
+ {
+ /* handle enhanced software breakpoint insn, BKPT */
+ /* CPSR is changed to be executed in ARM state, disabling normal
+ interrupts, entering abort mode. */
+ /* accorindly to high vector configuration PC is set accordingly */
+ /* Oza: FIX ME ? what if user hit breakpoint and type reverse, in
+ that case, we need to go back with previous CPSR and
+ Program Counter. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ arm_insn_r->arm_regs[2] = ARM_LR_REGNUM;
+ /* save SPSR also; how? */
+ }
+ else if ((11 == arm_insn_r->decode)
+ && (!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM)))
+ {
+ /* handle enhanced store insns and DSP insns (e.g. LDRD)
+ let us begin according to addressing modes for store insns
+ STRH insn, addresing modes are taken following. */
+ if ((14 == arm_insn_r->opcode) || (10 == arm_insn_r->opcode))
+ {
+ /* 1) handle misc store, immediate offset. */
+ printf("handling store insn, immed offfset insn\n");
+ immed_low = bits (arm_insn_r->arm_insn, 0, 3);
+ immed_high = bits (arm_insn_r->arm_insn, 8, 11);
+ reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf[0].buf[0]);
+ if (15 == reg_src1)
+ {
+ /* if R15 was used as Rn, hence current PC+8. */
+ u_buf[0].s_word = u_buf[0].s_word + 8;
+ }
+ offset_8 = (immed_high << 4) | immed_low;
+ /* calculate target store address. */
+ if (14 == arm_insn_r->opcode)
+ {
+ tgt_mem_addr = u_buf[0].s_word + offset_8;
+ }
+ else
+ {
+ tgt_mem_addr = u_buf[0].s_word - offset_8;
+ }
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ arm_insn_r->arm_mems[0].len = 1;
+ arm_insn_r->arm_mems[1].len = 2;
+ arm_insn_r->arm_mems[1].addr = tgt_mem_addr;
+ }
+ else if ((12 == arm_insn_r->opcode) || (8 == arm_insn_r->opcode))
+ {
+ /* 2) store, register offset. */
+ /* get Rm. */
+ reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
+ /* get Rn. */
+ reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf[0].buf[0]);
+ GET_REG_VAL (reg_cache, reg_src2,
&u_buf[1].buf[0]);
+ if (15 == reg_src2)
+ {
+ /* if R15 was used as Rn, hence current PC+8. */
+ u_buf[0].s_word = u_buf[0].s_word + 8;
+ }
+ /* calculate target store address, Rn +/- Rm, register offset. */
+ if (12 == arm_insn_r->opcode)
+ {
+ tgt_mem_addr = u_buf[0].s_word + u_buf[1].s_word;
+ }
+ else
+ {
+ tgt_mem_addr = u_buf[1].s_word - u_buf[0].s_word;
+ }
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ arm_insn_r->arm_mems[0].len = 1;
+ arm_insn_r->arm_mems[1].len = 2;
+ arm_insn_r->arm_mems[1].addr = tgt_mem_addr;
+ }
+ else if ((11 == arm_insn_r->opcode) || (15 == arm_insn_r->opcode)
+ || (2 == arm_insn_r->opcode) || (6 == arm_insn_r->opcode))
+ {
+ /* 3) store, immediate pre-indexed. */
+ /* 5) store, immediate post-indexed. */
+ immed_low = bits (arm_insn_r->arm_insn, 0, 3);
+ immed_high = bits (arm_insn_r->arm_insn, 8, 11);
+ offset_8 = (immed_high << 4) | immed_low;
+ reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
+ GET_REG_VAL(reg_cache, reg_src1, &u_buf[0].buf[0]);
+ /* calculate target store address, Rn +/- Rm, register offset. */
+ if ((15 == arm_insn_r->opcode) || (6 == arm_insn_r->opcode))
+ {
+ tgt_mem_addr = u_buf[0].s_word + offset_8;
+ }
+ else
+ {
+ tgt_mem_addr = u_buf[0].s_word - offset_8;
+ }
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ arm_insn_r->arm_mems[0].len = 1;
+ arm_insn_r->arm_mems[1].len = 2;
+ arm_insn_r->arm_mems[1].addr = tgt_mem_addr;
+ /* record Rn also as it changes. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 16, 19);
+ }
+ else if ((9 == arm_insn_r->opcode) || (13 == arm_insn_r->opcode)
+ || (0 == arm_insn_r->opcode) || (4 == arm_insn_r->opcode))
+ {
+ /* 4) store, register pre-indexed. */
+ /* 6) store, register post -indexed. */
+ reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
+ reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf[0].buf[0]);
+ GET_REG_VAL (reg_cache, reg_src2, &u_buf[1].buf[0]);
+ /* calculate target store address, Rn +/- Rm, register offset. */
+ if ((13 == arm_insn_r->opcode) || (4 == arm_insn_r->opcode))
+ {
+ tgt_mem_addr = u_buf[0].s_word + u_buf[1].s_word;
+ }
+ else
+ {
+ tgt_mem_addr = u_buf[1].s_word - u_buf[0].s_word;
+ }
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ arm_insn_r->arm_mems[0].len = 1;
+ arm_insn_r->arm_mems[1].len = 2;
+ arm_insn_r->arm_mems[1].addr = tgt_mem_addr;
+ /* record Rn also as it changes. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 16,
19);
+ }
+ /* DSP insns (e.g. LDRD) TBD. */
+ }
+ else if ((1 == arm_insn_r->decode) && (0x12 == opcode1)
+ && SBO_SBZ (arm_insn_r->arm_insn, 9, 12, 1))
+ {
+ /* handle BX, branch and link/exchange. */
+ /* branch is chosen by setting T bit of CSPR, bitp[0] of Rm. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ }
+ else if ((1 == arm_insn_r->decode) && (0x16 == opcode1)
+ && SBO_SBZ (arm_insn_r->arm_insn, 9, 4, 1)
+ && SBO_SBZ (arm_insn_r->arm_insn, 17, 4, 1))
+ {
+ /* count leading zeros: CLZ. */
+ printf("handling CLZ - count leading zero insn\n");
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 12, 15);
+ }
+ else if ((!bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
+ && ((8 == arm_insn_r->opcode) || (10 == arm_insn_r->opcode))
+ && (SBO_SBZ (arm_insn_r->arm_insn, 17, 4, 1))
+ && (SBO_SBZ (arm_insn_r->arm_insn, 1, 12, 0))
+ )
+ {
+ /* handle MRS insn. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 12, 15);
+ }
+ /* following is always true, in case none of the above conditions meet, it
will fall here. */
+ else if (arm_insn_r->opcode <= 15)
+ {
+ /* normal data processing insns. */
+ /* out of 11 shifter operands mode, all the insn modifies destination
+ register, which is specified by 13-16 decode. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 12, 15);
+ arm_insn_r->arm_regs[2] = ARM_PS_REGNUM;
+ }
+ return 0;
+}
+
+static int
+arm_handle_data_proc_imm_insn (void *data)
+{
+ insn_decode_record *arm_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
+ struct regcache *reg_cache = arm_insn_r->regcache;
+
+ uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
+ uint32_t immed_high = 0, immed_low = 0, offset_8 = 0, tgt_mem_addr = 0;
+
+ arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
+ arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
+
+ if (((9 == arm_insn_r->opcode) || (11 == arm_insn_r->opcode))
+ && (2 == bits (arm_insn_r->arm_insn, 20, 21))
+ && (SBO_SBZ (arm_insn_r->arm_insn, 13, 4, 1))
+ )
+ {
+ /* handle MSR insn. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ if (9 == arm_insn_r->opcode)
+ {
+ /*CSPR is going to be changed. */
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ }
+ else
+ {
+ /* SPSR is going to be changed. */
+ /* Oza: FIX ME ? how to read SPSR value? */
+ }
+ }
+ /* following is always true, in case none of the above conditions meet, it
will fall
+ here. */
+ else if (arm_insn_r->opcode <= 15)
+ {
+ /* normal data processing insns. */
+ /* out of 11 shifter operands mode, all the insn modifies destination
+ register, which is specified by 13-16 decode. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 12, 15);
+ arm_insn_r->arm_regs[2] = ARM_PS_REGNUM;
+ }
+ return 0;
+}
+
+static int
+arm_handle_ld_st_imm_offset_insn (void *data)
+{
+ insn_decode_record *arm_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
+ struct regcache *reg_cache = arm_insn_r->regcache;
+
+ uint32_t reg_src1 = 0 , reg_src2= 0, reg_dest = 0;
+ uint32_t immed_high = 0, immed_low = 0, offset_12 = 0, tgt_mem_addr = 0;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+ memset(&u_buf, 0, sizeof(u_buf));
+ arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
+ arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
+
+ if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
+ {
+ reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
+ /* LDR insn has a capability to do branching, if
+ MOV LR, PC is precedded by LDR insn having Rn as R15
+ in that case, it emulates branch and link insn, and hence we
+ need to save CSPR and PC as well. */
+ if (15 != reg_dest)
+ {
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 12, 15);
+ }
+ else
+ {
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = reg_dest;
+ arm_insn_r->arm_regs[2] = ARM_PS_REGNUM;
+ }
+ }
+ else
+ {
+ if ((8 == arm_insn_r->opcode) || (10 == arm_insn_r->opcode)
+ || (12 == arm_insn_r->opcode) || (14 == arm_insn_r->opcode)
+ || (9 == arm_insn_r->opcode) || (11 == arm_insn_r->opcode)
+ || (13 == arm_insn_r->opcode) || (15 == arm_insn_r->opcode)
+ || (0 == arm_insn_r->opcode) || (2 == arm_insn_r->opcode)
+ || (4 == arm_insn_r->opcode) || (6 == arm_insn_r->opcode)
+ || (1 == arm_insn_r->opcode) || (3 == arm_insn_r->opcode)
+ || (5 == arm_insn_r->opcode) || (7 == arm_insn_r->opcode))
+ {
+ /* store, immediate offset, immediate pre-indexed,
+ immediate post-indexed. */
+ reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
+ offset_12 = bits (arm_insn_r->arm_insn, 0, 11);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf.buf[0]);
+ /* U == 1 */
+ if (bit (arm_insn_r->arm_insn, 23))
+ {
+ tgt_mem_addr = u_buf.s_word + offset_12;
+ }
+ else
+ {
+ tgt_mem_addr = u_buf.s_word - offset_12;
+ }
+
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ arm_insn_r->arm_mems[0].len = 1;
+
+ switch(arm_insn_r->opcode)
+ {
+ case 8:
+ case 12:
+ case 9:
+ case 13:
+ case 1:
+ case 5:
+ /* STR insn, STRT insn. */
+ arm_insn_r->arm_mems[1].len = 4;
+ break;
+
+ case 10:
+ case 14:
+ case 11:
+ case 15:
+ case 3:
+ case 7:
+ /* STRB insn, STRBT insn. */
+ arm_insn_r->arm_mems[1].len = 1;
+ break;
+
+ default:
+ /* rest of the insns are unreachable for this addressing mode.
*/
+ break;
+ }
+ arm_insn_r->arm_mems[1].addr = tgt_mem_addr;
+ if ((9 == arm_insn_r->opcode) || (11 == arm_insn_r->opcode)
+ || (13 == arm_insn_r->opcode) || (15 == arm_insn_r->opcode)
+ || (0 == arm_insn_r->opcode) || (2 == arm_insn_r->opcode)
+ || (4 == arm_insn_r->opcode) || (6 == arm_insn_r->opcode)
+ || (1 == arm_insn_r->opcode) || (3 == arm_insn_r->opcode)
+ || (5 == arm_insn_r->opcode) || (7 == arm_insn_r->opcode))
+ {
+ /* we are handling pre-indexed mode; post-indexed mode;
+ where Rn is going to be changed. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc
(sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = reg_src1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+arm_handle_ld_st_reg_offset_insn (void *data)
+{
+ insn_decode_record *arm_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
+ struct regcache *reg_cache = arm_insn_r->regcache;
+
+ uint32_t shift_imm = 0;
+ uint32_t reg_src1 = 0, reg_src2 = 0, reg_dest = 0;
+ uint32_t immed_high = 0, immed_low = 0, offset_12 = 0, tgt_mem_addr = 0;
+
+ union
+ {
+ int32_t signed_word;
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf[2];
+
+ memset(&u_buf, 0, sizeof(u_buf));
+ arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 21, 24);
+ arm_insn_r->decode = bits (arm_insn_r->arm_insn, 4, 7);
+
+ /* handle enhanced store insns and LDRD DSP insn,
+ let us begin according to addressing modes for store insns
+ STRH insn. */
+
+ /* LDR or STR? */
+ if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
+ {
+ reg_dest = bits (arm_insn_r->arm_insn, 12, 15);
+ /* LDR insn has a capability to do branching, if
+ MOV LR, PC is precedded by LDR insn having Rn as R15
+ in that case, it emulates branch and link insn, and hence we
+ need to save CSPR and PC as well. */
+ if (15 != reg_dest)
+ {
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = bits (arm_insn_r->arm_insn, 12, 15);
+ }
+ else
+ {
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = reg_dest;
+ arm_insn_r->arm_regs[2] = ARM_PS_REGNUM;
+ }
+ }
+ else
+ {
+ if ((8 == arm_insn_r->opcode) || (10 == arm_insn_r->opcode)
+ || (12 == arm_insn_r->opcode) || (14 == arm_insn_r->opcode)
+ || (9 == arm_insn_r->opcode) || (11 == arm_insn_r->opcode)
+ || (13 == arm_insn_r->opcode) || (15 == arm_insn_r->opcode)
+ || (0 == arm_insn_r->opcode) || (2 == arm_insn_r->opcode)
+ || (4 == arm_insn_r->opcode) || (6 == arm_insn_r->opcode)
+ || (1 == arm_insn_r->opcode) || (3 == arm_insn_r->opcode)
+ || (5 == arm_insn_r->opcode) || (7 == arm_insn_r->opcode))
+ {
+ if (! bits (arm_insn_r->arm_insn, 4, 11))
+ {
+ /* store insn, register offset and register pre-indexed,
+ register post-indexed. */
+ /* get Rm. */
+ reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
+ /* get Rn. */
+ reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf[0].buf[0]);
+ GET_REG_VAL (reg_cache, reg_src2,
&u_buf[1].buf[0]);
+ if (15 == reg_src2)
+ {
+ /* if R15 was used as Rn, hence current PC+8. */
+ /* pre-indexed mode doesnt reach here ; illegal insn. */
+ u_buf[0].s_word = u_buf[0].s_word + 8;
+ }
+ /* calculate target store address, Rn +/- Rm, register offset. */
+ /* U == 1. */
+ if (bit (arm_insn_r->arm_insn, 23))
+ {
+ tgt_mem_addr = u_buf[0].s_word + u_buf[1].s_word;
+ }
+ else
+ {
+ tgt_mem_addr = u_buf[1].s_word - u_buf[0].s_word;
+ }
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ arm_insn_r->arm_mems[0].len = 1;
+ switch(arm_insn_r->opcode)
+ {
+ case 8:
+ case 12:
+ case 9:
+ case 13:
+ case 1:
+ case 5:
+ /* STR insn, STRT insn. */
+ arm_insn_r->arm_mems[1].len = 4;
+ break;
+
+ case 10:
+ case 14:
+ case 11:
+ case 15:
+ case 3:
+ case 7:
+ /* STRB insn, STRBT insn. */
+ arm_insn_r->arm_mems[1].len = 1;
+ break;
+
+ default:
+ /* rest of the insns are unreachable for this addr mode. */
+ break;
+ }
+ arm_insn_r->arm_mems[1].addr = tgt_mem_addr;
+
+ if ((9 == arm_insn_r->opcode) || (11 == arm_insn_r->opcode)
+ || (13 == arm_insn_r->opcode) || (15 == arm_insn_r->opcode)
+ || (0 == arm_insn_r->opcode) || (2 == arm_insn_r->opcode)
+ || (4 == arm_insn_r->opcode) || (6 == arm_insn_r->opcode)
+ || (1 == arm_insn_r->opcode) || (3 == arm_insn_r->opcode)
+ || (5 == arm_insn_r->opcode) || (7 == arm_insn_r->opcode))
+ {
+ arm_insn_r->arm_regs = \
+ (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ /* Rn is going to be changed in pre-indexed mode and
+ post-indexed mode as well. */
+ arm_insn_r->arm_regs[1] = reg_src2;
+ }
+ }
+ else
+ {
+ /* store insn, scaled register offset; scaled pre-indexed. */
+ offset_12 = bits (arm_insn_r->arm_insn, 5, 6);
+ /* get Rm. */
+ reg_src1 = bits (arm_insn_r->arm_insn, 0, 3);
+ /* get Rn. */
+ reg_src2 = bits (arm_insn_r->arm_insn, 16, 19);
+ /* get shift_imm. */
+ shift_imm = bits (arm_insn_r->arm_insn, 7, 11);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf[0].buf[0]);
+ GET_REG_VAL (reg_cache, reg_src2, &u_buf[1].buf[0]);
+ /* offset_12 used as shift. */
+ switch(offset_12)
+ {
+ case 0:
+ /* offset_12 used as index. */
+ offset_12 = u_buf[0].s_word << shift_imm;
+ break;
+
+ case 1:
+ offset_12 = (!shift_imm)?0:u_buf[0].s_word >> shift_imm;
+ break;
+
+ case 2:
+ if (!shift_imm)
+ {
+ if (bit (u_buf[0].s_word, 31))
+ {
+ offset_12 = 0xFFFFFFFF;
+ }
+ else
+ {
+ offset_12 = 0;
+ }
+ }
+ else
+ {
+ /* this is arithmetic shift. */
+ offset_12 = u_buf[0].signed_word >> shift_imm;
+ }
+ break;
+
+ case 3:
+ if (!shift_imm)
+ {
+ GET_REG_VAL (reg_cache, ARM_PS_REGNUM, &u_buf[1].buf[0]);
+ /* get C flag value and shift it by 31. */
+ offset_12 = (((bit (u_buf[1].s_word, 29)) << 31) \
+ | (u_buf[0].s_word) >> 1);
+ }
+ else
+ {
+ offset_12 = (u_buf[0].s_word >> shift_imm) \
+ | (u_buf[0].s_word <<
(sizeof(uint32_t)-shift_imm));
+ }
+ break;
+
+ default:
+ /* unreachable. */
+ break;
+ }
+
+ GET_REG_VAL (reg_cache, reg_src2, &u_buf[1].buf[0]);
+ /* U == 1 */
+ if (bit (arm_insn_r->arm_insn, 23))
+ {
+ tgt_mem_addr = u_buf[1].s_word + offset_12;
+ }
+ else
+ {
+ tgt_mem_addr = u_buf[1].s_word - offset_12;
+ }
+
+ arm_insn_r->arm_mems = (struct arm_mem_r *)
+ xmalloc (sizeof(struct arm_mem_r)*2);
+ arm_insn_r->arm_mems[0].len = 1;
+ arm_insn_r->arm_mems[1].addr = tgt_mem_addr;
+ switch (arm_insn_r->opcode)
+ {
+ case 8:
+ case 12:
+ case 9:
+ case 13:
+ case 1:
+ case 5:
+ /* STR insn, STRT insn. */
+ arm_insn_r->arm_mems[1].len = 4;
+ break;
+
+ case 10:
+ case 14:
+ case 11:
+ case 15:
+ case 3:
+ case 7:
+ /* STRB insn, STRBT insn. */
+ arm_insn_r->arm_mems[1].len = 1;
+ break;
+
+ default:
+ /* rest of the insns are unreachable for this addr mode. */
+ break;
+ }
+ if ((9 == arm_insn_r->opcode) || (11 == arm_insn_r->opcode)
+ || (13 == arm_insn_r->opcode) || (15 == arm_insn_r->opcode)
+ || (0 == arm_insn_r->opcode) || (2 == arm_insn_r->opcode)
+ || (4 == arm_insn_r->opcode) || (6 == arm_insn_r->opcode)
+ || (1 == arm_insn_r->opcode) || (3 == arm_insn_r->opcode)
+ || (5 == arm_insn_r->opcode) || (7 == arm_insn_r->opcode))
+ {
+ arm_insn_r->arm_regs = \
+ (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ /* Rn is going to be changed in register scaled pre-indexed
+ mode, and scaled post indexed mode. */
+ arm_insn_r->arm_regs[1] = reg_src2;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+arm_hamdle_ld_st_multiple_insn (void *data)
+{
+ insn_decode_record *arm_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
+ struct regcache *reg_cache = arm_insn_r->regcache;
+
+ uint32_t register_list[16]={0}, register_count=0, register_bits=0;
+ uint32_t shift_imm=0;
+ uint32_t reg_src1=0, reg_src2=0, addr_mode=0;
+ uint32_t start_address=0, index = 0;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf[2];
+
+ memset (&u_buf, 0, sizeof(u_buf));
+
+ /* this mode is exclusively for load and store multiple. */
+ /* handle incremenrt after/before and decrment after.before mode;
+ Rn is chaging depending on W bit, but as of now we store Rn too wihtout
going for
+ optmization. */
+
+
+ if (bit (arm_insn_r->arm_insn, INSN_S_L_BIT_NUM))
+ {
+ /* LDR (1,2,3) where LDR (3) changes CPSR too. */
+
+ register_bits = bits (arm_insn_r->arm_insn, 0, 15);
+ /* get Rn. */
+ reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
+ while (register_bits)
+ {
+ if (register_bits & 0x00000001)
+ register_list[register_count++] = 1;
+ register_bits = register_bits >> 1;
+ }
+
+ /* extra space for Base Register and CPSR; wihtout optmization. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc \
+ (sizeof(uint32_t) * (register_count +
3));
+ arm_insn_r->arm_regs[0] = register_count + 2;
+ arm_insn_r->arm_regs[register_count+1] = reg_src1;
+ arm_insn_r->arm_regs[register_count+2] = ARM_PS_REGNUM;
+ for (register_count = 0; register_count < 8; register_count++)
+ {
+ if (register_list[register_count])
+ {
+ arm_insn_r->arm_regs[index] = register_count;
+ index++;
+ }
+ }
+ }
+ else
+ {
+ /* it handles both STM(1) and STM(2). */
+ addr_mode = bits (arm_insn_r->arm_insn, 23, 24);
+
+ register_bits = bits (arm_insn_r->arm_insn, 0, 15);
+ /* get Rn. */
+ reg_src1 = bits (arm_insn_r->arm_insn, 16, 19);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf[0].buf[0]);
+ while (register_bits)
+ {
+ if (register_bits & 0x00000001)
+ register_count++;
+ register_bits = register_bits >> 1;
+ }
+
+ switch(addr_mode)
+ {
+ /* Decrement after. */
+ case 0:
+ start_address = (u_buf[0].s_word) - (register_count * 4) + 4;
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*(register_count+1));
+ arm_insn_r->arm_mems[0].len = register_count;
+ while (register_count)
+ {
+ arm_insn_r->arm_mems[register_count].addr = start_address;
+ arm_insn_r->arm_mems[register_count].len = 4;
+ start_address = start_address + 4;
+ register_count--;
+ }
+ break;
+
+ /* Increment after. */
+ case 1:
+ start_address = u_buf[0].s_word;
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*(register_count+1));
+ arm_insn_r->arm_mems[0].len = register_count;
+ while (register_count)
+ {
+ arm_insn_r->arm_mems[register_count].addr = start_address;
+ arm_insn_r->arm_mems[register_count].len = 4;
+ start_address = start_address + 4;
+ register_count--;
+ }
+ break;
+
+ /* Decrement before. */
+ case 2:
+
+ start_address = (u_buf[0].s_word) - (register_count * 4);
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*(register_count+1));
+ arm_insn_r->arm_mems[0].len = register_count;
+ while (register_count)
+ {
+ arm_insn_r->arm_mems[register_count].addr = start_address;
+ arm_insn_r->arm_mems[register_count].len = 4;
+ start_address = start_address + 4;
+ register_count--;
+ }
+ break;
+
+ /* Increment before. */
+ case 3:
+ start_address = u_buf[0].s_word + 4;
+ arm_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*(register_count+1));
+ arm_insn_r->arm_mems[0].len = register_count;
+ while (register_count)
+ {
+ arm_insn_r->arm_mems[register_count].addr = start_address;
+ arm_insn_r->arm_mems[register_count].len = 4;
+ start_address = start_address + 4;
+ register_count--;
+ }
+ break;
+
+ default:
+ /* unreachable. */
+ break;
+ }
+
+ /* base register also changes; based on condition and W bit. */
+ /* we save it anyway without optimization. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ arm_insn_r->arm_regs[0] = 1;
+ arm_insn_r->arm_regs[1] = reg_src1;
+ }
+ return 0;
+}
+
+static int
+arm_handle_brn_insn (void *data)
+{
+ insn_decode_record *arm_insn_r = (insn_decode_record*) data;
+ /* handle B, BL, BLX(1) insns. */
+ /* wihtout optmization we save link register,
+ CSPR for the insn which changes T bit. */
+ arm_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ arm_insn_r->arm_regs[0] = 2;
+ arm_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ arm_insn_r->arm_regs[2] = ARM_LR_REGNUM;
+
+ return 0;
+}
+
+static int
+arm_handle_coproc_insn (void *data)
+{
+ return -1;
+}
+
+static int
+arm_handle_coproc_data_proc_insn (void *data)
+{
+
+ insn_decode_record *arm_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (arm_insn_r->gdbarch);
+ struct regcache *reg_cache = arm_insn_r->regcache;
+
+ uint32_t shift_imm = 0;
+ uint32_t reg_src1 = 0, reg_src2 = 0, addr_mode = 0;
+ uint32_t start_address = 0;
+
+ /* handle SWI insn; system call would be handled over here. */
+
+ arm_insn_r->opcode = bits (arm_insn_r->arm_insn, 24, 27);
+ if (15 == arm_insn_r->opcode)
+ {
+ /* handle arm syscall insn. */
+ if (tdep->arm_swi_record != NULL)
+ {
+ tdep->arm_swi_record(reg_cache);
+ }
+ else
+ {
+ return -1;
+ }
+ }
+
+ return -1;
+}
+
+
+static int
+thumb_handle_shift_add_sub_insn (void *data)
+{
+ insn_decode_record *thumb_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep ( thumb_insn_r->gdbarch);
+ struct regcache *reg_cache = thumb_insn_r->regcache;
+
+ uint32_t reg_src1 = 0;
+
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+ reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
+
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ thumb_insn_r->arm_regs[0] = 2;
+ thumb_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ thumb_insn_r->arm_regs[2] = reg_src1;
+
+ return 0;
+}
+
+static int
+thumb_handle_add_sub_cmp_mov_insn (void *data)
+{
+ insn_decode_record *thumb_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
+ struct regcache *reg_cache = thumb_insn_r->regcache;
+
+ uint32_t reg_src1 = 0;
+
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+ reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
+
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ thumb_insn_r->arm_regs[0] = 2;
+ thumb_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ thumb_insn_r->arm_regs[2] = reg_src1;
+
+ return 0;
+}
+
+static int
+thumb_handle_ld_st_reg_offset_insn (void *data)
+{
+ insn_decode_record *thumb_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
+ struct regcache *reg_cache = thumb_insn_r->regcache;
+
+ uint32_t reg_src1 = 0, reg_src2 = 0;
+ uint32_t opcode1 = 0, opcode2 = 0, opcode3 = 0;
+
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf[2];
+
+ if (bit (thumb_insn_r->arm_insn, 12))
+ {
+ /* handle load/store register offset. */
+ opcode1 = bits (thumb_insn_r->arm_insn, 11, 12);
+ opcode2 = bits (thumb_insn_r->arm_insn, 9, 10);
+ if ((opcode2 >= 12) && (opcode2 <= 15))
+ {
+ /* LDR(2), LDRB(2) , LDRH(2), LDRSB, LDRSH. */
+ reg_src1 = bits (thumb_insn_r->arm_insn,0, 2);
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = reg_src1;
+ }
+ else if ((opcode2 >= 8) && (opcode2 <= 10))
+ {
+ /* STR(2), STRB(2), STRH(2) . */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
+ reg_src2 = bits (thumb_insn_r->arm_insn, 6, 8);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf[0].buf[0]);
+ GET_REG_VAL (reg_cache, reg_src2, &u_buf[1].buf[0]);
+ thumb_insn_r->arm_mems = (struct arm_mem_r *) \
+ xmalloc (sizeof(struct arm_mem_r) * 2);
+ thumb_insn_r->arm_mems[0].len = 1;
+ thumb_insn_r->arm_mems[0].addr = u_buf[0].s_word+u_buf[1].s_word;
+ if (8 == opcode2)
+ thumb_insn_r->arm_mems[0].len = 4; /* STR (2). */
+ else if (10 == opcode2)
+ thumb_insn_r->arm_mems[0].len = 1; /* STRB (2). */
+ else if (9 == opcode2)
+ thumb_insn_r->arm_mems[0].len = 2; /* STRH (2). */
+ }
+ goto end;
+ }
+ else if (bit (thumb_insn_r->arm_insn, 11))
+ {
+ /* handle load from literal pool. */
+ /* LDR(3). */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = reg_src1;
+ goto end;
+ }
+
+ thumb_insn_r->opcode = bits (thumb_insn_r->arm_insn, 13, 15);
+ opcode1 = bits (thumb_insn_r->arm_insn, 10, 12);
+ if (opcode1)
+ {
+ opcode2 = bits (thumb_insn_r->arm_insn, 8, 9);
+ opcode3 = bits (thumb_insn_r->arm_insn, 0, 2);
+ if ((3 == opcode2) && (!opcode3))
+ {
+ /* branch with exchange. */
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ }
+ else
+ {
+ /* format 8; special data processing insns. */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ thumb_insn_r->arm_regs[0] = 2;
+ thumb_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ thumb_insn_r->arm_regs[2] = reg_src1;
+ }
+ }
+ else
+ {
+ /* format 5; data processing insns. */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
+ if (bit (thumb_insn_r->arm_insn, 7))
+ {
+ reg_src1 = reg_src1 + 8;
+ }
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ thumb_insn_r->arm_regs[0] = 2;
+ thumb_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ thumb_insn_r->arm_regs[2] = reg_src1;
+ }
+
+ end:
+ return 0;
+}
+
+static int
+thumb_handle_ld_st_imm_offset_insn (void *data)
+{
+ insn_decode_record *thumb_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
+ struct regcache *reg_cache = (struct regcache*) thumb_insn_r->regcache;
+
+ uint32_t reg_val1 = 0;
+ uint32_t reg_src1 = 0;
+ uint32_t opcode = 0, immed_5 = 0;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+ opcode = bits (thumb_insn_r->arm_insn, 11, 12);
+
+ if (opcode)
+ {
+ /* LDR(1). */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = reg_src1;
+ }
+ else
+ {
+ /* STR(1). */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
+ immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf.buf[0]);
+ thumb_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ thumb_insn_r->arm_mems[0].len = 1;
+ thumb_insn_r->arm_mems[1].len = 4;
+ thumb_insn_r->arm_mems[1].addr = u_buf.s_word + (immed_5 * 4);
+ }
+
+ return 0;
+}
+
+static int
+thumb_hamdle_ld_st_stack_insn (void *data)
+{
+ insn_decode_record *thumb_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
+ struct regcache *reg_cache = (struct regcache*) thumb_insn_r->regcache;
+
+ uint32_t reg_val1 = 0;
+ uint32_t reg_src1 = 0;
+ uint32_t opcode = 0, immed_8 = 0, immed_5 = 0;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+ opcode = bits (thumb_insn_r->arm_insn, 11, 12);
+
+ if (3 == opcode)
+ {
+ /* LDR(4). */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = reg_src1;
+ }
+ else if (1 == opcode)
+ {
+ /* LDRH(1). */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 0, 2);
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = reg_src1;
+ }
+ else if (2 == opcode)
+ {
+ /* STR(3). */
+ immed_8 = bits (thumb_insn_r->arm_insn, 0, 7);
+ GET_REG_VAL (reg_cache, ARM_SP_REGNUM, &u_buf.buf[0]);
+ thumb_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ thumb_insn_r->arm_mems[0].len = 1;
+ thumb_insn_r->arm_mems[1].len = 4;
+ thumb_insn_r->arm_mems[1].addr = u_buf.s_word + (immed_8 * 4);
+ }
+ else if (0 == opcode)
+ {
+ /* STRH(1). */
+ immed_5 = bits (thumb_insn_r->arm_insn, 6, 10);
+ reg_src1 = bits (thumb_insn_r->arm_insn, 3, 5);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf.buf[0]);
+ thumb_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*2);
+ thumb_insn_r->arm_mems[0].len = 1;
+ thumb_insn_r->arm_mems[1].len = 2;
+ thumb_insn_r->arm_mems[1].addr = u_buf.s_word + (immed_5 * 2);
+ }
+ return 0;
+}
+
+static int
+thumb_handle_misc_insn (void *data)
+{
+ insn_decode_record *thumb_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
+ struct regcache *reg_cache = (struct regcache*) thumb_insn_r->regcache;
+
+ uint32_t reg_val1 = 0;
+ uint32_t reg_src1 = 0;
+ uint32_t opcode = 0, opcode1 = 0, opcode2 = 0, immed_8 = 0, immed_5 = 0;
+ uint32_t register_bits = 0, register_count = 0;
+ uint32_t register_list[8] = {0}, index = 0, start_address = 0;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+ opcode = bits (thumb_insn_r->arm_insn, 11, 12);
+ opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
+ opcode2 = bits (thumb_insn_r->arm_insn, 9, 12);
+
+ if (14 == opcode2)
+ {
+ /* POP. */
+ register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
+ while (register_bits)
+ {
+ if (register_bits & 0x00000001)
+ register_list[register_count++] = 1;
+ register_bits = register_bits >> 1;
+ }
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc \
+ (sizeof(uint32_t) * (register_count +
3));
+ thumb_insn_r->arm_regs[0] = register_count + 2;
+ thumb_insn_r->arm_regs[register_count + 1] = ARM_PS_REGNUM;
+ thumb_insn_r->arm_regs[register_count + 2] = ARM_SP_REGNUM;
+ for (register_count = 0; register_count < 8; register_count++)
+ {
+ if (register_list[register_count])
+ {
+ thumb_insn_r->arm_regs[index] = register_count;
+ index++;
+ }
+ }
+ }
+ else if (10 == opcode2)
+ {
+ /* PUSH. */
+ register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
+ GET_REG_VAL (reg_cache, ARM_PC_REGNUM, &u_buf.buf[0]);
+ while (register_bits)
+ {
+ if (register_bits & 0x00000001)
+ register_count++;
+ register_bits = register_bits >> 1;
+ }
+ start_address = u_buf.s_word - \
+ (4 * (bit (thumb_insn_r->arm_insn, 8) + register_count)) ;
+ thumb_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*(register_count + 1));
+ thumb_insn_r->arm_mems[0].len = register_count;
+ while (register_count)
+ {
+ thumb_insn_r->arm_mems[register_count].addr = start_address;
+ thumb_insn_r->arm_mems[register_count].len = 4;
+ start_address = start_address + 4;
+ register_count--;
+ }
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t) * 2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = ARM_SP_REGNUM;
+ }
+ else if (0x1E == opcode1)
+ {
+ /* BKPT insn. */
+ /* handle enhanced software breakpoint insn, BKPT. */
+ /* CPSR is changed to be executed in ARM state, disabling normal
+ interrupts, entering abort mode. */
+ /* accorindly to high vector configuration PC is set accordingly. */
+ /* Oza: FIX ME ? what if user hit breakpoint and type reverse, in
+ that case, we need to go back with previous CPSR and
+ Program Counter.. */
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ thumb_insn_r->arm_regs[0] = 2;
+ thumb_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ thumb_insn_r->arm_regs[2] = ARM_LR_REGNUM;
+ /* save SPSR also; how?. */
+ }
+ else if ((0 == opcode) || (1 == opcode))
+ {
+ /* ADD(5), ADD(6). */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = reg_src1;
+ }
+ else if (2 == opcode)
+ {
+ /* ADD(7), SUB(4). */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*2);
+ thumb_insn_r->arm_regs[0] = 1;
+ thumb_insn_r->arm_regs[1] = ARM_SP_REGNUM;
+ }
+
+
+ return 0;
+}
+
+static int
+thumb_handle_swi_insn (void *data)
+{
+
+ insn_decode_record *thumb_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
+ struct regcache *reg_cache = (struct regcache*) thumb_insn_r->regcache;
+
+ uint32_t reg_val1 = 0;
+ uint32_t reg_src1 = 0;
+ uint32_t opcode1 = 0, opcode2 = 0, register_bits = 0, register_count = 0;
+ uint32_t register_list[8] = {0}, index = 0, start_address = 0;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+ opcode1 = bits (thumb_insn_r->arm_insn, 8, 12);
+ opcode2 = bits (thumb_insn_r->arm_insn, 11, 12);
+
+ if (1 == opcode2)
+ {
+
+ /* LDMIA. */
+ register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
+ /* get Rn. */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
+ while (register_bits)
+ {
+ if (register_bits & 0x00000001)
+ register_list[register_count++] = 1;
+ register_bits = register_bits >> 1;
+ }
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc \
+ (sizeof(uint32_t) * (register_count +
2));
+ thumb_insn_r->arm_regs[0] = register_count + 1;
+ thumb_insn_r->arm_regs[register_count + 1] = reg_src1;
+ for (register_count = 0; register_count < 8; register_count++)
+ {
+ if (register_list[register_count])
+ {
+ thumb_insn_r->arm_regs[index] = register_count;
+ index++;
+ }
+ }
+ }
+ else if (0 == opcode2)
+ {
+ /* it handles both STMIA. */
+ register_bits = bits (thumb_insn_r->arm_insn, 0, 7);
+ /* get Rn. */
+ reg_src1 = bits (thumb_insn_r->arm_insn, 8, 10);
+ GET_REG_VAL (reg_cache, reg_src1, &u_buf.buf[0]);
+ while (register_bits)
+ {
+ if (register_bits & 0x00000001)
+ register_count++;
+ register_bits = register_bits >> 1;
+ }
+ start_address = u_buf.s_word;
+ thumb_insn_r->arm_mems = (struct arm_mem_r *)xmalloc (sizeof(struct
+ arm_mem_r)*(register_count+1));
+ thumb_insn_r->arm_mems[0].len = register_count;
+ while (register_count)
+ {
+ thumb_insn_r->arm_mems[register_count].addr = start_address;
+ thumb_insn_r->arm_mems[register_count].len = 4;
+ start_address = start_address + 4;
+ register_count--;
+ }
+ }
+ else if (0x1F == opcode1)
+ {
+ /* handle arm syscall insn. */
+ if (tdep->arm_swi_record != NULL)
+ {
+ printf("handling syscall swi insn\n");
+ tdep->arm_swi_record(reg_cache);
+ }
+ else
+ {
+ return -1;
+ }
+ }
+
+ /* B(1), conditional branch is automatically taken care in process_record,
+ as PC is saved there. */
+
+ return 0;
+}
+
+static int
+thumb_handle_branch_insn (void *data)
+{
+ insn_decode_record *thumb_insn_r = (insn_decode_record*) data;
+ struct gdbarch_tdep *tdep = gdbarch_tdep (thumb_insn_r->gdbarch);
+ struct regcache *reg_cache = (struct regcache*) thumb_insn_r->regcache;
+
+ uint32_t reg_val1=0;
+ uint32_t reg_src1=0;
+ uint32_t opcode = 0, immed_5 = 0;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+
+ /* BL , BLX(1). */
+ thumb_insn_r->arm_regs = (uint32_t*)xmalloc (sizeof(uint32_t)*3);
+ thumb_insn_r->arm_regs[0] = 2;
+ thumb_insn_r->arm_regs[1] = ARM_PS_REGNUM;
+ thumb_insn_r->arm_regs[2] = ARM_LR_REGNUM;
+
+ /* B(2) is automatically taken care in process_record, as PC is saved
+ there. */
+ return 0;
+}
+
+
+
+static int
+decode_insn (insn_decode_record *arm_record, uint32_t insn_size)
+{
+
+ /* (starting from numerical 0); bits 25, 26, 27 decodes type of arm
instruction. */
+ int (*const arm_handle_insn[NO_OF_TYPE_OF_ARM_INSNS]) (void*) =
+ {
+ arm_handle_data_proc_misc_load_str_insn, /* 000. */
+ arm_handle_data_proc_imm_insn, /* 001. */
+ arm_handle_ld_st_imm_offset_insn, /* 010. */
+ arm_handle_ld_st_reg_offset_insn, /* 011. */
+ arm_hamdle_ld_st_multiple_insn, /* 100. */
+ arm_handle_brn_insn, /* 101. */
+ arm_handle_coproc_insn, /* 110. */
+ arm_handle_coproc_data_proc_insn /* 111. */
+ };
+
+ /* (starting from numerical 0); bits 13,14,15 decodes type of thumb
instruction. */
+ int (*const thumb_handle_insn[NO_OF_TYPE_OF_THUMB_INSNS]) (void*) =
+ {
+ thumb_handle_shift_add_sub_insn, /* 000. */
+ thumb_handle_add_sub_cmp_mov_insn, /* 001. */
+ thumb_handle_ld_st_reg_offset_insn, /* 010. */
+ thumb_handle_ld_st_imm_offset_insn, /* 011. */
+ thumb_hamdle_ld_st_stack_insn, /* 100. */
+ thumb_handle_misc_insn, /* 101. */
+ thumb_handle_swi_insn, /* 110. */
+ thumb_handle_branch_insn /* 111. */
+ };
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[insn_size];
+ } u_buf;
+
+ uint32_t ret=0;
+
+ memset (&u_buf, 0, sizeof(u_buf));
+ if (target_read_memory (arm_record->this_addr, &u_buf.buf[0], insn_size))
+ {
+ if (record_debug)
+ {
+ printf_unfiltered (_("Process record: error reading memory at "
+ "addr %s len = %d.\n"),
+ paddress (arm_record->gdbarch, arm_record->this_addr), insn_size);
+ return -1;
+ }
+ }
+ else if (ARM_INSN_SIZE_BYTES == insn_size)
+ {
+ arm_record->arm_insn = u_buf.s_word;
+ arm_record->cond = bits (arm_record->arm_insn, 28, 31);
+ arm_record->id = bits (arm_record->arm_insn, 25, 27);
+ ret = (0x0F != arm_record->cond)? \
+ arm_handle_insn[arm_record->id] ((void*)arm_record) : \
+ handle_extension_space(arm_record);
+ }
+ else if (THUMB_INSN_SIZE_BYTES == insn_size)
+ {
+ arm_record->arm_insn = u_buf.s_word;
+ arm_record->id = bits (arm_record->arm_insn, 13, 15);
+ ret = thumb_handle_insn[arm_record->id] ((void*)arm_record);
+ }
+ return ret;
+}
+
+/* Parse the current instruction and record the values of the registers and
+ memory that will be changed in current instruction to "record_arch_list".
+ Return -1 if something is wrong.. */
+
+int
+arm_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
+ CORE_ADDR insn_addr)
+{
+
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ uint32_t no_of_rec=0;
+ uint32_t ret=0;
+ ULONGEST t_bit = 0;
+
+ union
+ {
+ uint32_t s_word;
+ gdb_byte buf[4];
+ } u_buf;
+
+ insn_decode_record arm_record;
+ memset (&u_buf, 0, sizeof(u_buf));
+
+ memset (&arm_record, 0, sizeof (insn_decode_record));
+ arm_record.regcache = regcache;
+ arm_record.this_addr = insn_addr;
+ arm_record.gdbarch = gdbarch;
+
+
+ if (record_debug > 1)
+ {
+ fprintf_unfiltered (gdb_stdlog, "Process record: arm_process_record "
+ "addr = %s\n",
+ paddress (gdbarch, arm_record.this_addr));
+ }
+
+ /* check the insn, whether it is thumb or arm one. */
+
+ t_bit = arm_psr_thumb_bit (arm_record.gdbarch);
+ GET_REG_VAL (arm_record.regcache, ARM_PS_REGNUM, &u_buf.buf[0]);
+
+ if (!(u_buf.s_word & t_bit))
+ {
+ /* we are decoding arm insn. */
+ ret = decode_insn (&arm_record, ARM_INSN_SIZE_BYTES);
+ }
+ else
+ {
+ /* we are decoding thumb insn. */
+ ret = decode_insn (&arm_record, THUMB_INSN_SIZE_BYTES);
+ }
+
+ /* record registers. */
+ ARM_RECORD_ARCH_LIST_ADD_REG(ARM_PC_REGNUM);
+ if (arm_record.arm_regs)
+ {
+ for (no_of_rec=1;no_of_rec<=arm_record.arm_regs[0];no_of_rec++)
+ {
+ if (ARM_RECORD_ARCH_LIST_ADD_REG (arm_record.arm_regs[no_of_rec]))
+ ret = -1;
+ }
+ }
+ /* record memories. */
+ if (arm_record.arm_mems)
+ {
+ for (no_of_rec=1;no_of_rec<=arm_record.arm_mems[0].len;no_of_rec++)
+ {
+ if (record_arch_list_add_mem \
+ ((CORE_ADDR)arm_record.arm_mems[no_of_rec].addr,
+ arm_record.arm_mems[no_of_rec].len))
+ ret = -1;
+ }
+ }
+
+ if (record_arch_list_add_end ())
+ ret = -1;
+
+ if (arm_record.arm_regs)
+ xfree (arm_record.arm_regs);
+ if (arm_record.arm_mems)
+ xfree (arm_record.arm_mems);
+
+ return ret;
+}
diff -urN arm_orig/arm-tdep.h arm_new/arm-tdep.h
--- arm_orig/arm-tdep.h 2011-03-03 09:21:13.000000000 +0530
+++ arm_new/arm-tdep.h 2011-04-15 13:11:15.000000000 +0530
@@ -200,6 +200,9 @@
/* Return the expected next PC if FRAME is stopped at a syscall
instruction. */
CORE_ADDR (*syscall_next_pc) (struct frame_info *frame);
+
+ /* Parse swi args. */
+ int (*arm_swi_record) (struct regcache *regcache);
};
/* Structures used for displaced stepping. */
@@ -310,6 +313,10 @@
struct displaced_step_closure *,
CORE_ADDR, CORE_ADDR, struct regcache *);
+extern int arm_process_record (struct gdbarch *gdbarch,
+ struct regcache *regcache, CORE_ADDR addr);
+
+
/* Functions exported from armbsd-tdep.h. */
/* Return the appropriate register set for the core section identified