1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
32 #include "breakpoints.h"
33 #include "arm_disassembler.h"
34 #include "binarybuffer.h"
35 #include "algorithm.h"
39 static const uint8_t arm_usr_indices[17] = {
40 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
43 static const uint8_t arm_fiq_indices[8] = {
44 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
47 static const uint8_t arm_irq_indices[3] = {
48 23, 24, ARMV4_5_SPSR_IRQ,
51 static const uint8_t arm_svc_indices[3] = {
52 25, 26, ARMV4_5_SPSR_SVC,
55 static const uint8_t arm_abt_indices[3] = {
56 27, 28, ARMV4_5_SPSR_ABT,
59 static const uint8_t arm_und_indices[3] = {
60 29, 30, ARMV4_5_SPSR_UND,
63 static const uint8_t arm_mon_indices[3] = {
70 /* For user and system modes, these list indices for all registers.
71 * otherwise they're just indices for the shadow registers and SPSR.
73 unsigned short n_indices;
74 const uint8_t *indices;
76 /* Seven modes are standard from ARM7 on. "System" and "User" share
77 * the same registers; other modes shadow from 3 to 8 registers.
81 .psr = ARMV4_5_MODE_USR,
82 .n_indices = ARRAY_SIZE(arm_usr_indices),
83 .indices = arm_usr_indices,
87 .psr = ARMV4_5_MODE_FIQ,
88 .n_indices = ARRAY_SIZE(arm_fiq_indices),
89 .indices = arm_fiq_indices,
93 .psr = ARMV4_5_MODE_SVC,
94 .n_indices = ARRAY_SIZE(arm_svc_indices),
95 .indices = arm_svc_indices,
99 .psr = ARMV4_5_MODE_ABT,
100 .n_indices = ARRAY_SIZE(arm_abt_indices),
101 .indices = arm_abt_indices,
105 .psr = ARMV4_5_MODE_IRQ,
106 .n_indices = ARRAY_SIZE(arm_irq_indices),
107 .indices = arm_irq_indices,
110 .name = "Undefined instruction",
111 .psr = ARMV4_5_MODE_UND,
112 .n_indices = ARRAY_SIZE(arm_und_indices),
113 .indices = arm_und_indices,
117 .psr = ARMV4_5_MODE_SYS,
118 .n_indices = ARRAY_SIZE(arm_usr_indices),
119 .indices = arm_usr_indices,
121 /* TrustZone "Security Extensions" add a secure monitor mode.
122 * This is distinct from a "debug monitor" which can support
123 * non-halting debug, in conjunction with some debuggers.
126 .name = "Secure Monitor",
128 .n_indices = ARRAY_SIZE(arm_mon_indices),
129 .indices = arm_mon_indices,
133 /** Map PSR mode bits to the name of an ARM processor operating mode. */
134 const char *arm_mode_name(unsigned psr_mode)
136 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
137 if (arm_mode_data[i].psr == psr_mode)
138 return arm_mode_data[i].name;
140 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
141 return "UNRECOGNIZED";
144 /** Return true iff the parameter denotes a valid ARM processor mode. */
145 bool is_arm_mode(unsigned psr_mode)
147 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
148 if (arm_mode_data[i].psr == psr_mode)
154 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
155 int armv4_5_mode_to_number(enum armv4_5_mode mode)
158 case ARMV4_5_MODE_ANY:
159 /* map MODE_ANY to user mode */
160 case ARMV4_5_MODE_USR:
162 case ARMV4_5_MODE_FIQ:
164 case ARMV4_5_MODE_IRQ:
166 case ARMV4_5_MODE_SVC:
168 case ARMV4_5_MODE_ABT:
170 case ARMV4_5_MODE_UND:
172 case ARMV4_5_MODE_SYS:
177 LOG_ERROR("invalid mode value encountered %d", mode);
182 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
183 enum armv4_5_mode armv4_5_number_to_mode(int number)
187 return ARMV4_5_MODE_USR;
189 return ARMV4_5_MODE_FIQ;
191 return ARMV4_5_MODE_IRQ;
193 return ARMV4_5_MODE_SVC;
195 return ARMV4_5_MODE_ABT;
197 return ARMV4_5_MODE_UND;
199 return ARMV4_5_MODE_SYS;
203 LOG_ERROR("mode index out of bounds %d", number);
204 return ARMV4_5_MODE_ANY;
208 char* armv4_5_state_strings[] =
210 "ARM", "Thumb", "Jazelle", "ThumbEE",
213 /* Templates for ARM core registers.
215 * NOTE: offsets in this table are coupled to the arm_mode_data
216 * table above, the armv4_5_core_reg_map array below, and also to
217 * the ARMV4_5_*PSR* symols.
219 static const struct {
220 /* The name is used for e.g. the "regs" command. */
223 /* The {cookie, mode} tuple uniquely identifies one register.
224 * In a given mode, cookies 0..15 map to registers R0..R15,
225 * with R13..R15 usually called SP, LR, PC.
227 * MODE_ANY is used as *input* to the mapping, and indicates
228 * various special cases (sigh) and errors.
230 * Cookie 16 is (currently) confusing, since it indicates
231 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
232 * (Exception modes have both CPSR and SPSR registers ...)
235 enum armv4_5_mode mode;
236 } arm_core_regs[] = {
237 { .name = "r0", .cookie = 0, .mode = ARMV4_5_MODE_ANY, },
238 { .name = "r1", .cookie = 1, .mode = ARMV4_5_MODE_ANY, },
239 { .name = "r2", .cookie = 2, .mode = ARMV4_5_MODE_ANY, },
240 { .name = "r3", .cookie = 3, .mode = ARMV4_5_MODE_ANY, },
241 { .name = "r4", .cookie = 4, .mode = ARMV4_5_MODE_ANY, },
242 { .name = "r5", .cookie = 5, .mode = ARMV4_5_MODE_ANY, },
243 { .name = "r6", .cookie = 6, .mode = ARMV4_5_MODE_ANY, },
244 { .name = "r7", .cookie = 7, .mode = ARMV4_5_MODE_ANY, },
246 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
247 * them as MODE_ANY creates special cases.
249 { .name = "r8", .cookie = 8, .mode = ARMV4_5_MODE_ANY, },
250 { .name = "r9", .cookie = 9, .mode = ARMV4_5_MODE_ANY, },
251 { .name = "r10", .cookie = 10, .mode = ARMV4_5_MODE_ANY, },
252 { .name = "r11", .cookie = 11, .mode = ARMV4_5_MODE_ANY, },
253 { .name = "r12", .cookie = 12, .mode = ARMV4_5_MODE_ANY, },
255 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
256 { .name = "sp_usr", .cookie = 13, .mode = ARMV4_5_MODE_USR, },
257 { .name = "lr_usr", .cookie = 14, .mode = ARMV4_5_MODE_USR, },
259 { .name = "pc", .cookie = 15, .mode = ARMV4_5_MODE_ANY, },
261 { .name = "r8_fiq", .cookie = 8, .mode = ARMV4_5_MODE_FIQ, },
262 { .name = "r9_fiq", .cookie = 9, .mode = ARMV4_5_MODE_FIQ, },
263 { .name = "r10_fiq", .cookie = 10, .mode = ARMV4_5_MODE_FIQ, },
264 { .name = "r11_fiq", .cookie = 11, .mode = ARMV4_5_MODE_FIQ, },
265 { .name = "r12_fiq", .cookie = 12, .mode = ARMV4_5_MODE_FIQ, },
267 { .name = "lr_fiq", .cookie = 13, .mode = ARMV4_5_MODE_FIQ, },
268 { .name = "sp_fiq", .cookie = 14, .mode = ARMV4_5_MODE_FIQ, },
270 { .name = "lr_irq", .cookie = 13, .mode = ARMV4_5_MODE_IRQ, },
271 { .name = "sp_irq", .cookie = 14, .mode = ARMV4_5_MODE_IRQ, },
273 { .name = "lr_svc", .cookie = 13, .mode = ARMV4_5_MODE_SVC, },
274 { .name = "sp_svc", .cookie = 14, .mode = ARMV4_5_MODE_SVC, },
276 { .name = "lr_abt", .cookie = 13, .mode = ARMV4_5_MODE_ABT, },
277 { .name = "sp_abt", .cookie = 14, .mode = ARMV4_5_MODE_ABT, },
279 { .name = "lr_und", .cookie = 13, .mode = ARMV4_5_MODE_UND, },
280 { .name = "sp_und", .cookie = 14, .mode = ARMV4_5_MODE_UND, },
282 { .name = "cpsr", .cookie = 16, .mode = ARMV4_5_MODE_ANY, },
283 { .name = "spsr_fiq", .cookie = 16, .mode = ARMV4_5_MODE_FIQ, },
284 { .name = "spsr_irq", .cookie = 16, .mode = ARMV4_5_MODE_IRQ, },
285 { .name = "spsr_svc", .cookie = 16, .mode = ARMV4_5_MODE_SVC, },
286 { .name = "spsr_abt", .cookie = 16, .mode = ARMV4_5_MODE_ABT, },
287 { .name = "spsr_und", .cookie = 16, .mode = ARMV4_5_MODE_UND, },
289 { .name = "lr_mon", .cookie = 13, .mode = ARM_MODE_MON, },
290 { .name = "sp_mon", .cookie = 14, .mode = ARM_MODE_MON, },
291 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
294 /* map core mode (USR, FIQ, ...) and register number to
295 * indices into the register cache
297 const int armv4_5_core_reg_map[8][17] =
300 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
302 { /* FIQ (8 shadows of USR, vs normal 3) */
303 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
306 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
309 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
312 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
315 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
317 { /* SYS (same registers as USR) */
318 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
321 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
325 static const uint8_t arm_gdb_dummy_fp_value[12];
328 * Dummy FPA registers are required to support GDB on ARM.
329 * Register packets require eight obsolete FPA register values.
330 * Modern ARM cores use Vector Floating Point (VFP), if they
331 * have any floating point support. VFP is not FPA-compatible.
333 struct reg arm_gdb_dummy_fp_reg =
335 .name = "GDB dummy FPA register",
336 .value = (uint8_t *) arm_gdb_dummy_fp_value,
341 static const uint8_t arm_gdb_dummy_fps_value[4];
344 * Dummy FPA status registers are required to support GDB on ARM.
345 * Register packets require an obsolete FPA status register.
347 struct reg arm_gdb_dummy_fps_reg =
349 .name = "GDB dummy FPA status register",
350 .value = (uint8_t *) arm_gdb_dummy_fps_value,
355 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
357 static void arm_gdb_dummy_init(void)
359 register_init_dummy(&arm_gdb_dummy_fp_reg);
360 register_init_dummy(&arm_gdb_dummy_fps_reg);
363 static int armv4_5_get_core_reg(struct reg *reg)
366 struct armv4_5_core_reg *armv4_5 = reg->arch_info;
367 struct target *target = armv4_5->target;
369 if (target->state != TARGET_HALTED)
371 LOG_ERROR("Target not halted");
372 return ERROR_TARGET_NOT_HALTED;
375 retval = armv4_5->armv4_5_common->read_core_reg(target, armv4_5->num, armv4_5->mode);
376 if (retval == ERROR_OK)
382 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
384 struct armv4_5_core_reg *armv4_5 = reg->arch_info;
385 struct target *target = armv4_5->target;
386 struct armv4_5_common_s *armv4_5_target = target_to_armv4_5(target);
387 uint32_t value = buf_get_u32(buf, 0, 32);
389 if (target->state != TARGET_HALTED)
391 LOG_ERROR("Target not halted");
392 return ERROR_TARGET_NOT_HALTED;
395 if (reg == &armv4_5_target->core_cache->reg_list[ARMV4_5_CPSR])
399 /* T bit should be set */
400 if (armv4_5_target->core_state == ARMV4_5_STATE_ARM)
402 /* change state to Thumb */
403 LOG_DEBUG("changing to Thumb state");
404 armv4_5_target->core_state = ARMV4_5_STATE_THUMB;
409 /* T bit should be cleared */
410 if (armv4_5_target->core_state == ARMV4_5_STATE_THUMB)
412 /* change state to ARM */
413 LOG_DEBUG("changing to ARM state");
414 armv4_5_target->core_state = ARMV4_5_STATE_ARM;
418 if (armv4_5_target->core_mode != (enum armv4_5_mode)(value & 0x1f))
420 LOG_DEBUG("changing ARM core mode to '%s'",
421 arm_mode_name(value & 0x1f));
422 armv4_5_target->core_mode = value & 0x1f;
423 armv4_5_target->write_core_reg(target, 16, ARMV4_5_MODE_ANY, value);
427 buf_set_u32(reg->value, 0, 32, value);
434 static const struct reg_arch_type arm_reg_type = {
435 .get = armv4_5_get_core_reg,
436 .set = armv4_5_set_core_reg,
439 struct reg_cache* armv4_5_build_reg_cache(struct target *target, struct arm *armv4_5_common)
441 int num_regs = ARRAY_SIZE(arm_core_regs);
442 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
443 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
444 struct armv4_5_core_reg *arch_info = calloc(num_regs,
445 sizeof(struct armv4_5_core_reg));
448 if (!cache || !reg_list || !arch_info) {
455 cache->name = "ARM registers";
457 cache->reg_list = reg_list;
460 for (i = 0; i < num_regs; i++)
462 /* Skip registers this core doesn't expose */
463 if (arm_core_regs[i].mode == ARM_MODE_MON
464 && armv4_5_common->core_type != ARM_MODE_MON)
467 /* REVISIT handle Cortex-M, which only shadows R13/SP */
469 arch_info[i].num = arm_core_regs[i].cookie;
470 arch_info[i].mode = arm_core_regs[i].mode;
471 arch_info[i].target = target;
472 arch_info[i].armv4_5_common = armv4_5_common;
474 reg_list[i].name = (char *) arm_core_regs[i].name;
475 reg_list[i].size = 32;
476 reg_list[i].value = &arch_info[i].value;
477 reg_list[i].type = &arm_reg_type;
478 reg_list[i].arch_info = &arch_info[i];
486 int armv4_5_arch_state(struct target *target)
488 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
490 if (armv4_5->common_magic != ARMV4_5_COMMON_MAGIC)
492 LOG_ERROR("BUG: called for a non-ARMv4/5 target");
496 LOG_USER("target halted in %s state due to %s, current mode: %s\ncpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "",
497 armv4_5_state_strings[armv4_5->core_state],
498 Jim_Nvp_value2name_simple(nvp_target_debug_reason, target->debug_reason)->name,
499 arm_mode_name(armv4_5->core_mode),
500 buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32),
501 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
506 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
507 cache->reg_list[armv4_5_core_reg_map[mode][num]]
509 COMMAND_HANDLER(handle_armv4_5_reg_command)
511 struct target *target = get_current_target(CMD_CTX);
512 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
516 if (!is_arm(armv4_5))
518 command_print(CMD_CTX, "current target isn't an ARM");
522 if (target->state != TARGET_HALTED)
524 command_print(CMD_CTX, "error: target must be halted for register accesses");
528 if (!is_arm_mode(armv4_5->core_mode))
531 if (!armv4_5->full_context) {
532 command_print(CMD_CTX, "error: target doesn't support %s",
537 num_regs = armv4_5->core_cache->num_regs;
538 regs = armv4_5->core_cache->reg_list;
540 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
545 /* label this bank of registers (or shadows) */
546 switch (arm_mode_data[mode].psr) {
547 case ARMV4_5_MODE_SYS:
549 case ARMV4_5_MODE_USR:
550 name = "System and User";
554 if (armv4_5->core_type != ARM_MODE_MON)
558 name = arm_mode_data[mode].name;
562 command_print(CMD_CTX, "%s%s mode %sregisters",
565 /* display N rows of up to 4 registers each */
566 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
570 for (unsigned j = 0; j < 4; j++, i++) {
572 struct reg *reg = regs;
574 if (i >= arm_mode_data[mode].n_indices)
577 reg += arm_mode_data[mode].indices[i];
579 /* REVISIT be smarter about faults... */
581 armv4_5->full_context(target);
583 value = buf_get_u32(reg->value, 0, 32);
584 output_len += snprintf(output + output_len,
585 sizeof(output) - output_len,
586 "%8s: %8.8" PRIx32 " ",
589 command_print(CMD_CTX, "%s", output);
596 COMMAND_HANDLER(handle_armv4_5_core_state_command)
598 struct target *target = get_current_target(CMD_CTX);
599 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
601 if (!is_arm(armv4_5))
603 command_print(CMD_CTX, "current target isn't an ARM");
609 if (strcmp(CMD_ARGV[0], "arm") == 0)
611 armv4_5->core_state = ARMV4_5_STATE_ARM;
613 if (strcmp(CMD_ARGV[0], "thumb") == 0)
615 armv4_5->core_state = ARMV4_5_STATE_THUMB;
619 command_print(CMD_CTX, "core state: %s", armv4_5_state_strings[armv4_5->core_state]);
624 COMMAND_HANDLER(handle_armv4_5_disassemble_command)
626 int retval = ERROR_OK;
627 struct target *target = get_current_target(CMD_CTX);
628 struct arm *arm = target ? target_to_arm(target) : NULL;
634 command_print(CMD_CTX, "current target isn't an ARM");
640 if (strcmp(CMD_ARGV[2], "thumb") != 0)
645 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
648 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
649 if (address & 0x01) {
651 command_print(CMD_CTX, "Disassemble as Thumb");
659 command_print(CMD_CTX,
660 "usage: arm disassemble <address> [<count> ['thumb']]");
665 while (count-- > 0) {
666 struct arm_instruction cur_instruction;
669 /* Always use Thumb2 disassembly for best handling
670 * of 32-bit BL/BLX, and to work with newer cores
671 * (some ARMv6, all ARMv7) that use Thumb2.
673 retval = thumb2_opcode(target, address,
675 if (retval != ERROR_OK)
680 retval = target_read_u32(target, address, &opcode);
681 if (retval != ERROR_OK)
683 retval = arm_evaluate_opcode(opcode, address,
684 &cur_instruction) != ERROR_OK;
685 if (retval != ERROR_OK)
688 command_print(CMD_CTX, "%s", cur_instruction.text);
689 address += cur_instruction.instruction_size;
695 int armv4_5_register_commands(struct command_context *cmd_ctx)
697 struct command *armv4_5_cmd;
699 armv4_5_cmd = register_command(cmd_ctx, NULL, "arm",
701 "generic ARM commands");
703 register_command(cmd_ctx, armv4_5_cmd, "reg",
704 handle_armv4_5_reg_command, COMMAND_EXEC,
705 "display ARM core registers");
706 register_command(cmd_ctx, armv4_5_cmd, "core_state",
707 handle_armv4_5_core_state_command, COMMAND_EXEC,
708 "display/change ARM core state <arm | thumb>");
709 register_command(cmd_ctx, armv4_5_cmd, "disassemble",
710 handle_armv4_5_disassemble_command, COMMAND_EXEC,
711 "disassemble instructions "
712 "<address> [<count> ['thumb']]");
717 int armv4_5_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size)
719 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
722 if (!is_arm_mode(armv4_5->core_mode))
726 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
728 for (i = 0; i < 16; i++)
730 (*reg_list)[i] = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, i);
733 for (i = 16; i < 24; i++)
735 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
738 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
739 (*reg_list)[25] = &armv4_5->core_cache->reg_list[ARMV4_5_CPSR];
744 /* wait for execution to complete and check exit point */
745 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
748 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
750 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
754 if (target->state != TARGET_HALTED)
756 if ((retval = target_halt(target)) != ERROR_OK)
758 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
762 return ERROR_TARGET_TIMEOUT;
765 /* fast exit: ARMv5+ code can use BKPT */
766 if (exit_point && buf_get_u32(armv4_5->core_cache->reg_list[15].value,
767 0, 32) != exit_point)
769 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
770 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
771 return ERROR_TARGET_TIMEOUT;
777 int armv4_5_run_algorithm_inner(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info, int (*run_it)(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info))
779 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
780 struct armv4_5_algorithm *armv4_5_algorithm_info = arch_info;
781 enum armv4_5_state core_state = armv4_5->core_state;
782 enum armv4_5_mode core_mode = armv4_5->core_mode;
783 uint32_t context[17];
785 int exit_breakpoint_size = 0;
787 int retval = ERROR_OK;
788 LOG_DEBUG("Running algorithm");
790 if (armv4_5_algorithm_info->common_magic != ARMV4_5_COMMON_MAGIC)
792 LOG_ERROR("current target isn't an ARMV4/5 target");
793 return ERROR_TARGET_INVALID;
796 if (target->state != TARGET_HALTED)
798 LOG_WARNING("target not halted");
799 return ERROR_TARGET_NOT_HALTED;
802 if (!is_arm_mode(armv4_5->core_mode))
805 /* armv5 and later can terminate with BKPT instruction; less overhead */
806 if (!exit_point && armv4_5->is_armv4)
808 LOG_ERROR("ARMv4 target needs HW breakpoint location");
812 for (i = 0; i <= 16; i++)
814 if (!ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).valid)
815 armv4_5->read_core_reg(target, i, armv4_5_algorithm_info->core_mode);
816 context[i] = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32);
818 cpsr = buf_get_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32);
820 for (i = 0; i < num_mem_params; i++)
822 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
828 for (i = 0; i < num_reg_params; i++)
830 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
833 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
834 return ERROR_INVALID_ARGUMENTS;
837 if (reg->size != reg_params[i].size)
839 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
840 return ERROR_INVALID_ARGUMENTS;
843 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
849 armv4_5->core_state = armv4_5_algorithm_info->core_state;
850 if (armv4_5->core_state == ARMV4_5_STATE_ARM)
851 exit_breakpoint_size = 4;
852 else if (armv4_5->core_state == ARMV4_5_STATE_THUMB)
853 exit_breakpoint_size = 2;
856 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
857 return ERROR_INVALID_ARGUMENTS;
860 if (armv4_5_algorithm_info->core_mode != ARMV4_5_MODE_ANY)
862 LOG_DEBUG("setting core_mode: 0x%2.2x", armv4_5_algorithm_info->core_mode);
863 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 5, armv4_5_algorithm_info->core_mode);
864 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
865 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
868 /* terminate using a hardware or (ARMv5+) software breakpoint */
869 if (exit_point && (retval = breakpoint_add(target, exit_point,
870 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
872 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
873 return ERROR_TARGET_FAILURE;
876 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
881 retval = run_it(target, exit_point, timeout_ms, arch_info);
884 breakpoint_remove(target, exit_point);
886 if (retval != ERROR_OK)
889 for (i = 0; i < num_mem_params; i++)
891 if (mem_params[i].direction != PARAM_OUT)
892 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
898 for (i = 0; i < num_reg_params; i++)
900 if (reg_params[i].direction != PARAM_OUT)
903 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
906 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
907 retval = ERROR_INVALID_ARGUMENTS;
911 if (reg->size != reg_params[i].size)
913 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
914 retval = ERROR_INVALID_ARGUMENTS;
918 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
922 for (i = 0; i <= 16; i++)
925 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32);
926 if (regvalue != context[i])
928 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).name, context[i]);
929 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).value, 0, 32, context[i]);
930 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).valid = 1;
931 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, armv4_5_algorithm_info->core_mode, i).dirty = 1;
934 buf_set_u32(armv4_5->core_cache->reg_list[ARMV4_5_CPSR].value, 0, 32, cpsr);
935 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].valid = 1;
936 armv4_5->core_cache->reg_list[ARMV4_5_CPSR].dirty = 1;
938 armv4_5->core_state = core_state;
939 armv4_5->core_mode = core_mode;
944 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
946 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
950 * Runs ARM code in the target to calculate a CRC32 checksum.
952 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
954 int arm_checksum_memory(struct target *target,
955 uint32_t address, uint32_t count, uint32_t *checksum)
957 struct working_area *crc_algorithm;
958 struct armv4_5_algorithm armv4_5_info;
959 struct reg_param reg_params[2];
963 static const uint32_t arm_crc_code[] = {
964 0xE1A02000, /* mov r2, r0 */
965 0xE3E00000, /* mov r0, #0xffffffff */
966 0xE1A03001, /* mov r3, r1 */
967 0xE3A04000, /* mov r4, #0 */
968 0xEA00000B, /* b ncomp */
970 0xE7D21004, /* ldrb r1, [r2, r4] */
971 0xE59F7030, /* ldr r7, CRC32XOR */
972 0xE0200C01, /* eor r0, r0, r1, asl 24 */
973 0xE3A05000, /* mov r5, #0 */
975 0xE3500000, /* cmp r0, #0 */
976 0xE1A06080, /* mov r6, r0, asl #1 */
977 0xE2855001, /* add r5, r5, #1 */
978 0xE1A00006, /* mov r0, r6 */
979 0xB0260007, /* eorlt r0, r6, r7 */
980 0xE3550008, /* cmp r5, #8 */
981 0x1AFFFFF8, /* bne loop */
982 0xE2844001, /* add r4, r4, #1 */
984 0xE1540003, /* cmp r4, r3 */
985 0x1AFFFFF1, /* bne nbyte */
987 0xEAFFFFFE, /* b end */
989 0x04C11DB7 /* .word 0x04C11DB7 */
992 retval = target_alloc_working_area(target,
993 sizeof(arm_crc_code), &crc_algorithm);
994 if (retval != ERROR_OK)
997 /* convert code into a buffer in target endianness */
998 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
999 retval = target_write_u32(target,
1000 crc_algorithm->address + i * sizeof(uint32_t),
1002 if (retval != ERROR_OK)
1006 armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
1007 armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
1008 armv4_5_info.core_state = ARMV4_5_STATE_ARM;
1010 init_reg_param(®_params[0], "r0", 32, PARAM_IN_OUT);
1011 init_reg_param(®_params[1], "r1", 32, PARAM_OUT);
1013 buf_set_u32(reg_params[0].value, 0, 32, address);
1014 buf_set_u32(reg_params[1].value, 0, 32, count);
1016 /* 20 second timeout/megabyte */
1017 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1019 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1020 crc_algorithm->address,
1021 crc_algorithm->address + sizeof(arm_crc_code) - 8,
1022 timeout, &armv4_5_info);
1023 if (retval != ERROR_OK) {
1024 LOG_ERROR("error executing ARM crc algorithm");
1025 destroy_reg_param(®_params[0]);
1026 destroy_reg_param(®_params[1]);
1027 target_free_working_area(target, crc_algorithm);
1031 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1033 destroy_reg_param(®_params[0]);
1034 destroy_reg_param(®_params[1]);
1036 target_free_working_area(target, crc_algorithm);
1042 * Runs ARM code in the target to check whether a memory block holds
1043 * all ones. NOR flash which has been erased, and thus may be written,
1046 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1048 int arm_blank_check_memory(struct target *target,
1049 uint32_t address, uint32_t count, uint32_t *blank)
1051 struct working_area *check_algorithm;
1052 struct reg_param reg_params[3];
1053 struct armv4_5_algorithm armv4_5_info;
1057 static const uint32_t check_code[] = {
1059 0xe4d03001, /* ldrb r3, [r0], #1 */
1060 0xe0022003, /* and r2, r2, r3 */
1061 0xe2511001, /* subs r1, r1, #1 */
1062 0x1afffffb, /* bne loop */
1064 0xeafffffe /* b end */
1067 /* make sure we have a working area */
1068 retval = target_alloc_working_area(target,
1069 sizeof(check_code), &check_algorithm);
1070 if (retval != ERROR_OK)
1073 /* convert code into a buffer in target endianness */
1074 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1075 retval = target_write_u32(target,
1076 check_algorithm->address
1077 + i * sizeof(uint32_t),
1079 if (retval != ERROR_OK)
1083 armv4_5_info.common_magic = ARMV4_5_COMMON_MAGIC;
1084 armv4_5_info.core_mode = ARMV4_5_MODE_SVC;
1085 armv4_5_info.core_state = ARMV4_5_STATE_ARM;
1087 init_reg_param(®_params[0], "r0", 32, PARAM_OUT);
1088 buf_set_u32(reg_params[0].value, 0, 32, address);
1090 init_reg_param(®_params[1], "r1", 32, PARAM_OUT);
1091 buf_set_u32(reg_params[1].value, 0, 32, count);
1093 init_reg_param(®_params[2], "r2", 32, PARAM_IN_OUT);
1094 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1096 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1097 check_algorithm->address,
1098 check_algorithm->address + sizeof(check_code) - 4,
1099 10000, &armv4_5_info);
1100 if (retval != ERROR_OK) {
1101 destroy_reg_param(®_params[0]);
1102 destroy_reg_param(®_params[1]);
1103 destroy_reg_param(®_params[2]);
1104 target_free_working_area(target, check_algorithm);
1108 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1110 destroy_reg_param(®_params[0]);
1111 destroy_reg_param(®_params[1]);
1112 destroy_reg_param(®_params[2]);
1114 target_free_working_area(target, check_algorithm);
1119 static int arm_full_context(struct target *target)
1121 struct armv4_5_common_s *armv4_5 = target_to_armv4_5(target);
1122 unsigned num_regs = armv4_5->core_cache->num_regs;
1123 struct reg *reg = armv4_5->core_cache->reg_list;
1124 int retval = ERROR_OK;
1126 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1129 retval = armv4_5_get_core_reg(reg);
1134 int armv4_5_init_arch_info(struct target *target, struct arm *armv4_5)
1136 target->arch_info = armv4_5;
1138 armv4_5->common_magic = ARMV4_5_COMMON_MAGIC;
1139 armv4_5->core_state = ARMV4_5_STATE_ARM;
1140 armv4_5->core_mode = ARMV4_5_MODE_USR;
1142 /* core_type may be overridden by subtype logic */
1143 armv4_5->core_type = ARMV4_5_MODE_ANY;
1145 /* default full_context() has no core-specific optimizations */
1146 if (!armv4_5->full_context && armv4_5->read_core_reg)
1147 armv4_5->full_context = arm_full_context;