1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2006 by Magnus Lundin *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
25 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
27 ***************************************************************************/
32 #include "jtag/interface.h"
33 #include "breakpoints.h"
35 #include "target_request.h"
36 #include "target_type.h"
37 #include "arm_adi_v5.h"
38 #include "arm_disassembler.h"
40 #include "arm_opcodes.h"
41 #include "arm_semihosting.h"
42 #include <helper/time_support.h>
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FPB remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
56 /* Timeout for register r/w */
57 #define DHCSR_S_REGRDY_TIMEOUT (500)
59 /* Supported Cortex-M Cores */
60 static const struct cortex_m_part_info cortex_m_parts[] = {
62 .partno = CORTEX_M0_PARTNO,
67 .partno = CORTEX_M0P_PARTNO,
72 .partno = CORTEX_M1_PARTNO,
77 .partno = CORTEX_M3_PARTNO,
80 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
83 .partno = CORTEX_M4_PARTNO,
86 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
89 .partno = CORTEX_M7_PARTNO,
92 .flags = CORTEX_M_F_HAS_FPV5,
95 .partno = CORTEX_M23_PARTNO,
100 .partno = CORTEX_M33_PARTNO,
101 .name = "Cortex-M33",
102 .arch = ARM_ARCH_V8M,
103 .flags = CORTEX_M_F_HAS_FPV5,
106 .partno = CORTEX_M35P_PARTNO,
107 .name = "Cortex-M35P",
108 .arch = ARM_ARCH_V8M,
109 .flags = CORTEX_M_F_HAS_FPV5,
112 .partno = CORTEX_M55_PARTNO,
113 .name = "Cortex-M55",
114 .arch = ARM_ARCH_V8M,
115 .flags = CORTEX_M_F_HAS_FPV5,
119 /* forward declarations */
120 static int cortex_m_store_core_reg_u32(struct target *target,
121 uint32_t num, uint32_t value);
122 static void cortex_m_dwt_free(struct target *target);
124 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
125 * on a read. Call this helper function each time DHCSR is read
126 * to preserve S_RESET_ST state in case of a reset event was detected.
128 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
131 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
134 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
135 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
137 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
139 struct cortex_m_common *cortex_m = target_to_cm(target);
140 struct armv7m_common *armv7m = target_to_armv7m(target);
142 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
143 &cortex_m->dcb_dhcsr);
144 if (retval != ERROR_OK)
147 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
151 static int cortex_m_load_core_reg_u32(struct target *target,
152 uint32_t regsel, uint32_t *value)
154 struct cortex_m_common *cortex_m = target_to_cm(target);
155 struct armv7m_common *armv7m = target_to_armv7m(target);
157 uint32_t dcrdr, tmp_value;
160 /* because the DCB_DCRDR is used for the emulated dcc channel
161 * we have to save/restore the DCB_DCRDR when used */
162 if (target->dbg_msg_enabled) {
163 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
164 if (retval != ERROR_OK)
168 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
169 if (retval != ERROR_OK)
172 /* check if value from register is ready and pre-read it */
175 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
176 &cortex_m->dcb_dhcsr);
177 if (retval != ERROR_OK)
179 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
181 if (retval != ERROR_OK)
183 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
184 if (cortex_m->dcb_dhcsr & S_REGRDY)
186 cortex_m->slow_register_read = true; /* Polling (still) needed. */
187 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
188 LOG_ERROR("Timeout waiting for DCRDR transfer ready");
189 return ERROR_TIMEOUT_REACHED;
196 if (target->dbg_msg_enabled) {
197 /* restore DCB_DCRDR - this needs to be in a separate
198 * transaction otherwise the emulated DCC channel breaks */
199 if (retval == ERROR_OK)
200 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
206 static int cortex_m_slow_read_all_regs(struct target *target)
208 struct cortex_m_common *cortex_m = target_to_cm(target);
209 struct armv7m_common *armv7m = target_to_armv7m(target);
210 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
212 /* Opportunistically restore fast read, it'll revert to slow
213 * if any register needed polling in cortex_m_load_core_reg_u32(). */
214 cortex_m->slow_register_read = false;
216 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
217 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
219 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
220 if (retval != ERROR_OK)
225 if (!cortex_m->slow_register_read)
226 LOG_DEBUG("Switching back to fast register reads");
231 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
232 uint32_t *reg_value, uint32_t *dhcsr)
234 struct armv7m_common *armv7m = target_to_armv7m(target);
237 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
238 if (retval != ERROR_OK)
241 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
242 if (retval != ERROR_OK)
245 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
248 static int cortex_m_fast_read_all_regs(struct target *target)
250 struct cortex_m_common *cortex_m = target_to_cm(target);
251 struct armv7m_common *armv7m = target_to_armv7m(target);
255 /* because the DCB_DCRDR is used for the emulated dcc channel
256 * we have to save/restore the DCB_DCRDR when used */
257 if (target->dbg_msg_enabled) {
258 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
259 if (retval != ERROR_OK)
263 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
264 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
265 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
266 /* we need one 32-bit word for each register except FP D0..D15, which
268 uint32_t r_vals[n_r32];
269 uint32_t dhcsr[n_r32];
271 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
272 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
273 for (reg_id = 0; reg_id < num_regs; reg_id++) {
274 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
276 continue; /* skip non existent registers */
279 /* Any 8-bit or shorter register is unpacked from a 32-bit
280 * container register. Skip it now. */
284 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
285 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
287 if (retval != ERROR_OK)
291 assert(r->size == 32 || r->size == 64);
293 continue; /* done with 32-bit register */
295 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
296 /* the odd part of FP register (S1, S3...) */
297 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
299 if (retval != ERROR_OK)
306 retval = dap_run(armv7m->debug_ap->dap);
307 if (retval != ERROR_OK)
310 if (target->dbg_msg_enabled) {
311 /* restore DCB_DCRDR - this needs to be in a separate
312 * transaction otherwise the emulated DCC channel breaks */
313 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
314 if (retval != ERROR_OK)
318 bool not_ready = false;
319 for (unsigned int i = 0; i < wi; i++) {
320 if ((dhcsr[i] & S_REGRDY) == 0) {
322 LOG_DEBUG("Register %u was not ready during fast read", i);
324 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
328 /* Any register was not ready,
329 * fall back to slow read with S_REGRDY polling */
330 return ERROR_TIMEOUT_REACHED;
333 LOG_DEBUG("read %u 32-bit registers", wi);
335 unsigned int ri = 0; /* read index from r_vals array */
336 for (reg_id = 0; reg_id < num_regs; reg_id++) {
337 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
339 continue; /* skip non existent registers */
343 unsigned int reg32_id;
345 if (armv7m_map_reg_packing(reg_id, ®32_id, &offset)) {
346 /* Unpack a partial register from 32-bit container register */
347 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
349 /* The container register ought to precede all regs unpacked
350 * from it in the reg_list. So the value should be ready
353 buf_cpy(r32->value + offset, r->value, r->size);
356 assert(r->size == 32 || r->size == 64);
357 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
360 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
361 /* the odd part of FP register (S1, S3...) */
362 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
372 static int cortex_m_store_core_reg_u32(struct target *target,
373 uint32_t regsel, uint32_t value)
375 struct cortex_m_common *cortex_m = target_to_cm(target);
376 struct armv7m_common *armv7m = target_to_armv7m(target);
381 /* because the DCB_DCRDR is used for the emulated dcc channel
382 * we have to save/restore the DCB_DCRDR when used */
383 if (target->dbg_msg_enabled) {
384 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
385 if (retval != ERROR_OK)
389 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
390 if (retval != ERROR_OK)
393 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
394 if (retval != ERROR_OK)
397 /* check if value is written into register */
400 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
401 &cortex_m->dcb_dhcsr);
402 if (retval != ERROR_OK)
404 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
405 if (cortex_m->dcb_dhcsr & S_REGRDY)
407 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
408 LOG_ERROR("Timeout waiting for DCRDR transfer ready");
409 return ERROR_TIMEOUT_REACHED;
414 if (target->dbg_msg_enabled) {
415 /* restore DCB_DCRDR - this needs to be in a separate
416 * transaction otherwise the emulated DCC channel breaks */
417 if (retval == ERROR_OK)
418 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
424 static int cortex_m_write_debug_halt_mask(struct target *target,
425 uint32_t mask_on, uint32_t mask_off)
427 struct cortex_m_common *cortex_m = target_to_cm(target);
428 struct armv7m_common *armv7m = &cortex_m->armv7m;
430 /* mask off status bits */
431 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
432 /* create new register mask */
433 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
435 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
438 static int cortex_m_set_maskints(struct target *target, bool mask)
440 struct cortex_m_common *cortex_m = target_to_cm(target);
441 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
442 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
447 static int cortex_m_set_maskints_for_halt(struct target *target)
449 struct cortex_m_common *cortex_m = target_to_cm(target);
450 switch (cortex_m->isrmasking_mode) {
451 case CORTEX_M_ISRMASK_AUTO:
452 /* interrupts taken at resume, whether for step or run -> no mask */
453 return cortex_m_set_maskints(target, false);
455 case CORTEX_M_ISRMASK_OFF:
456 /* interrupts never masked */
457 return cortex_m_set_maskints(target, false);
459 case CORTEX_M_ISRMASK_ON:
460 /* interrupts always masked */
461 return cortex_m_set_maskints(target, true);
463 case CORTEX_M_ISRMASK_STEPONLY:
464 /* interrupts masked for single step only -> mask now if MASKINTS
465 * erratum, otherwise only mask before stepping */
466 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
471 static int cortex_m_set_maskints_for_run(struct target *target)
473 switch (target_to_cm(target)->isrmasking_mode) {
474 case CORTEX_M_ISRMASK_AUTO:
475 /* interrupts taken at resume, whether for step or run -> no mask */
476 return cortex_m_set_maskints(target, false);
478 case CORTEX_M_ISRMASK_OFF:
479 /* interrupts never masked */
480 return cortex_m_set_maskints(target, false);
482 case CORTEX_M_ISRMASK_ON:
483 /* interrupts always masked */
484 return cortex_m_set_maskints(target, true);
486 case CORTEX_M_ISRMASK_STEPONLY:
487 /* interrupts masked for single step only -> no mask */
488 return cortex_m_set_maskints(target, false);
493 static int cortex_m_set_maskints_for_step(struct target *target)
495 switch (target_to_cm(target)->isrmasking_mode) {
496 case CORTEX_M_ISRMASK_AUTO:
497 /* the auto-interrupt should already be done -> mask */
498 return cortex_m_set_maskints(target, true);
500 case CORTEX_M_ISRMASK_OFF:
501 /* interrupts never masked */
502 return cortex_m_set_maskints(target, false);
504 case CORTEX_M_ISRMASK_ON:
505 /* interrupts always masked */
506 return cortex_m_set_maskints(target, true);
508 case CORTEX_M_ISRMASK_STEPONLY:
509 /* interrupts masked for single step only -> mask */
510 return cortex_m_set_maskints(target, true);
515 static int cortex_m_clear_halt(struct target *target)
517 struct cortex_m_common *cortex_m = target_to_cm(target);
518 struct armv7m_common *armv7m = &cortex_m->armv7m;
521 /* clear step if any */
522 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
524 /* Read Debug Fault Status Register */
525 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
526 if (retval != ERROR_OK)
529 /* Clear Debug Fault Status */
530 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
531 if (retval != ERROR_OK)
533 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
538 static int cortex_m_single_step_core(struct target *target)
540 struct cortex_m_common *cortex_m = target_to_cm(target);
543 /* Mask interrupts before clearing halt, if not done already. This avoids
544 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
545 * HALT can put the core into an unknown state.
547 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
548 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
549 if (retval != ERROR_OK)
552 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
553 if (retval != ERROR_OK)
557 /* restore dhcsr reg */
558 cortex_m_clear_halt(target);
563 static int cortex_m_enable_fpb(struct target *target)
565 int retval = target_write_u32(target, FP_CTRL, 3);
566 if (retval != ERROR_OK)
569 /* check the fpb is actually enabled */
571 retval = target_read_u32(target, FP_CTRL, &fpctrl);
572 if (retval != ERROR_OK)
581 static int cortex_m_endreset_event(struct target *target)
585 struct cortex_m_common *cortex_m = target_to_cm(target);
586 struct armv7m_common *armv7m = &cortex_m->armv7m;
587 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
588 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
589 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
591 /* REVISIT The four debug monitor bits are currently ignored... */
592 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
593 if (retval != ERROR_OK)
595 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
597 /* this register is used for emulated dcc channel */
598 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
599 if (retval != ERROR_OK)
602 retval = cortex_m_read_dhcsr_atomic_sticky(target);
603 if (retval != ERROR_OK)
606 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
607 /* Enable debug requests */
608 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
609 if (retval != ERROR_OK)
613 /* Restore proper interrupt masking setting for running CPU. */
614 cortex_m_set_maskints_for_run(target);
616 /* Enable features controlled by ITM and DWT blocks, and catch only
617 * the vectors we were told to pay attention to.
619 * Target firmware is responsible for all fault handling policy
620 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
621 * or manual updates to the NVIC SHCSR and CCR registers.
623 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
624 if (retval != ERROR_OK)
627 /* Paranoia: evidently some (early?) chips don't preserve all the
628 * debug state (including FPB, DWT, etc) across reset...
632 retval = cortex_m_enable_fpb(target);
633 if (retval != ERROR_OK) {
634 LOG_ERROR("Failed to enable the FPB");
638 cortex_m->fpb_enabled = true;
640 /* Restore FPB registers */
641 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
642 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
643 if (retval != ERROR_OK)
647 /* Restore DWT registers */
648 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
649 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
651 if (retval != ERROR_OK)
653 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
655 if (retval != ERROR_OK)
657 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
658 dwt_list[i].function);
659 if (retval != ERROR_OK)
662 retval = dap_run(swjdp);
663 if (retval != ERROR_OK)
666 register_cache_invalidate(armv7m->arm.core_cache);
668 /* make sure we have latest dhcsr flags */
669 retval = cortex_m_read_dhcsr_atomic_sticky(target);
670 if (retval != ERROR_OK)
676 static int cortex_m_examine_debug_reason(struct target *target)
678 struct cortex_m_common *cortex_m = target_to_cm(target);
680 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
681 * only check the debug reason if we don't know it already */
683 if ((target->debug_reason != DBG_REASON_DBGRQ)
684 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
685 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
686 target->debug_reason = DBG_REASON_BREAKPOINT;
687 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WPTANDBKPT;
689 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
690 target->debug_reason = DBG_REASON_WATCHPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
692 target->debug_reason = DBG_REASON_BREAKPOINT;
693 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
694 target->debug_reason = DBG_REASON_DBGRQ;
696 target->debug_reason = DBG_REASON_UNDEFINED;
702 static int cortex_m_examine_exception_reason(struct target *target)
704 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
705 struct armv7m_common *armv7m = target_to_armv7m(target);
706 struct adiv5_dap *swjdp = armv7m->arm.dap;
709 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
710 if (retval != ERROR_OK)
712 switch (armv7m->exception_number) {
715 case 3: /* Hard Fault */
716 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
717 if (retval != ERROR_OK)
719 if (except_sr & 0x40000000) {
720 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
721 if (retval != ERROR_OK)
725 case 4: /* Memory Management */
726 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
727 if (retval != ERROR_OK)
729 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
730 if (retval != ERROR_OK)
733 case 5: /* Bus Fault */
734 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
735 if (retval != ERROR_OK)
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
738 if (retval != ERROR_OK)
741 case 6: /* Usage Fault */
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
743 if (retval != ERROR_OK)
746 case 7: /* Secure Fault */
747 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
748 if (retval != ERROR_OK)
750 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
751 if (retval != ERROR_OK)
754 case 11: /* SVCall */
756 case 12: /* Debug Monitor */
757 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
758 if (retval != ERROR_OK)
761 case 14: /* PendSV */
763 case 15: /* SysTick */
769 retval = dap_run(swjdp);
770 if (retval == ERROR_OK)
771 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
772 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
773 armv7m_exception_string(armv7m->exception_number),
774 shcsr, except_sr, cfsr, except_ar);
778 static int cortex_m_debug_entry(struct target *target)
782 struct cortex_m_common *cortex_m = target_to_cm(target);
783 struct armv7m_common *armv7m = &cortex_m->armv7m;
784 struct arm *arm = &armv7m->arm;
789 /* Do this really early to minimize the window where the MASKINTS erratum
790 * can pile up pending interrupts. */
791 cortex_m_set_maskints_for_halt(target);
793 cortex_m_clear_halt(target);
795 retval = cortex_m_read_dhcsr_atomic_sticky(target);
796 if (retval != ERROR_OK)
799 retval = armv7m->examine_debug_reason(target);
800 if (retval != ERROR_OK)
803 /* examine PE security state */
804 bool secure_state = false;
805 if (armv7m->arm.arch == ARM_ARCH_V8M) {
808 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
809 if (retval != ERROR_OK)
812 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
815 /* Load all registers to arm.core_cache */
816 if (!cortex_m->slow_register_read) {
817 retval = cortex_m_fast_read_all_regs(target);
818 if (retval == ERROR_TIMEOUT_REACHED) {
819 cortex_m->slow_register_read = true;
820 LOG_DEBUG("Switched to slow register read");
824 if (cortex_m->slow_register_read)
825 retval = cortex_m_slow_read_all_regs(target);
827 if (retval != ERROR_OK)
831 xPSR = buf_get_u32(r->value, 0, 32);
833 /* Are we in an exception handler */
835 armv7m->exception_number = (xPSR & 0x1FF);
837 arm->core_mode = ARM_MODE_HANDLER;
838 arm->map = armv7m_msp_reg_map;
840 unsigned control = buf_get_u32(arm->core_cache
841 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
843 /* is this thread privileged? */
844 arm->core_mode = control & 1
845 ? ARM_MODE_USER_THREAD
848 /* which stack is it using? */
850 arm->map = armv7m_psp_reg_map;
852 arm->map = armv7m_msp_reg_map;
854 armv7m->exception_number = 0;
857 if (armv7m->exception_number)
858 cortex_m_examine_exception_reason(target);
860 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", cpu in %s state, target->state: %s",
861 arm_mode_name(arm->core_mode),
862 buf_get_u32(arm->pc->value, 0, 32),
863 secure_state ? "Secure" : "Non-Secure",
864 target_state_name(target));
866 if (armv7m->post_debug_entry) {
867 retval = armv7m->post_debug_entry(target);
868 if (retval != ERROR_OK)
875 static int cortex_m_poll(struct target *target)
877 int detected_failure = ERROR_OK;
878 int retval = ERROR_OK;
879 enum target_state prev_target_state = target->state;
880 struct cortex_m_common *cortex_m = target_to_cm(target);
881 struct armv7m_common *armv7m = &cortex_m->armv7m;
883 /* Read from Debug Halting Control and Status Register */
884 retval = cortex_m_read_dhcsr_atomic_sticky(target);
885 if (retval != ERROR_OK) {
886 target->state = TARGET_UNKNOWN;
890 /* Recover from lockup. See ARMv7-M architecture spec,
891 * section B1.5.15 "Unrecoverable exception cases".
893 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
894 LOG_ERROR("%s -- clearing lockup after double fault",
895 target_name(target));
896 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
897 target->debug_reason = DBG_REASON_DBGRQ;
899 /* We have to execute the rest (the "finally" equivalent, but
900 * still throw this exception again).
902 detected_failure = ERROR_FAIL;
904 /* refresh status bits */
905 retval = cortex_m_read_dhcsr_atomic_sticky(target);
906 if (retval != ERROR_OK)
910 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
911 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
912 if (target->state != TARGET_RESET) {
913 target->state = TARGET_RESET;
914 LOG_INFO("%s: external reset detected", target_name(target));
919 if (target->state == TARGET_RESET) {
920 /* Cannot switch context while running so endreset is
921 * called with target->state == TARGET_RESET
923 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
924 cortex_m->dcb_dhcsr);
925 retval = cortex_m_endreset_event(target);
926 if (retval != ERROR_OK) {
927 target->state = TARGET_UNKNOWN;
930 target->state = TARGET_RUNNING;
931 prev_target_state = TARGET_RUNNING;
934 if (cortex_m->dcb_dhcsr & S_HALT) {
935 target->state = TARGET_HALTED;
937 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
938 retval = cortex_m_debug_entry(target);
939 if (retval != ERROR_OK)
942 if (arm_semihosting(target, &retval) != 0)
945 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
947 if (prev_target_state == TARGET_DEBUG_RUNNING) {
949 retval = cortex_m_debug_entry(target);
950 if (retval != ERROR_OK)
953 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
957 if (target->state == TARGET_UNKNOWN) {
958 /* Check if processor is retiring instructions or sleeping.
959 * Unlike S_RESET_ST here we test if the target *is* running now,
960 * not if it has been running (possibly in the past). Instructions are
961 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
962 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
964 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
965 target->state = TARGET_RUNNING;
970 /* Check that target is truly halted, since the target could be resumed externally */
971 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
972 /* registers are now invalid */
973 register_cache_invalidate(armv7m->arm.core_cache);
975 target->state = TARGET_RUNNING;
976 LOG_WARNING("%s: external resume detected", target_name(target));
977 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
981 /* Did we detect a failure condition that we cleared? */
982 if (detected_failure != ERROR_OK)
983 retval = detected_failure;
987 static int cortex_m_halt(struct target *target)
989 LOG_DEBUG("target->state: %s",
990 target_state_name(target));
992 if (target->state == TARGET_HALTED) {
993 LOG_DEBUG("target was already halted");
997 if (target->state == TARGET_UNKNOWN)
998 LOG_WARNING("target was in unknown state when halt was requested");
1000 if (target->state == TARGET_RESET) {
1001 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1002 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
1003 return ERROR_TARGET_FAILURE;
1005 /* we came here in a reset_halt or reset_init sequence
1006 * debug entry was already prepared in cortex_m3_assert_reset()
1008 target->debug_reason = DBG_REASON_DBGRQ;
1014 /* Write to Debug Halting Control and Status Register */
1015 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1017 /* Do this really early to minimize the window where the MASKINTS erratum
1018 * can pile up pending interrupts. */
1019 cortex_m_set_maskints_for_halt(target);
1021 target->debug_reason = DBG_REASON_DBGRQ;
1026 static int cortex_m_soft_reset_halt(struct target *target)
1028 struct cortex_m_common *cortex_m = target_to_cm(target);
1029 struct armv7m_common *armv7m = &cortex_m->armv7m;
1030 int retval, timeout = 0;
1032 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1033 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1034 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1035 * core, not the peripherals */
1036 LOG_DEBUG("soft_reset_halt is discouraged, please use 'reset halt' instead.");
1038 if (!cortex_m->vectreset_supported) {
1039 LOG_ERROR("VECTRESET is not supported on this Cortex-M core");
1044 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1045 if (retval != ERROR_OK)
1048 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1049 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1050 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1051 if (retval != ERROR_OK)
1054 /* Request a core-only reset */
1055 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1056 AIRCR_VECTKEY | AIRCR_VECTRESET);
1057 if (retval != ERROR_OK)
1059 target->state = TARGET_RESET;
1061 /* registers are now invalid */
1062 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1064 while (timeout < 100) {
1065 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1066 if (retval == ERROR_OK) {
1067 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1068 &cortex_m->nvic_dfsr);
1069 if (retval != ERROR_OK)
1071 if ((cortex_m->dcb_dhcsr & S_HALT)
1072 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1073 LOG_DEBUG("system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1074 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1075 cortex_m_poll(target);
1076 /* FIXME restore user's vector catch config */
1079 LOG_DEBUG("waiting for system reset-halt, "
1080 "DHCSR 0x%08" PRIx32 ", %d ms",
1081 cortex_m->dcb_dhcsr, timeout);
1090 void cortex_m_enable_breakpoints(struct target *target)
1092 struct breakpoint *breakpoint = target->breakpoints;
1094 /* set any pending breakpoints */
1095 while (breakpoint) {
1096 if (!breakpoint->set)
1097 cortex_m_set_breakpoint(target, breakpoint);
1098 breakpoint = breakpoint->next;
1102 static int cortex_m_resume(struct target *target, int current,
1103 target_addr_t address, int handle_breakpoints, int debug_execution)
1105 struct armv7m_common *armv7m = target_to_armv7m(target);
1106 struct breakpoint *breakpoint = NULL;
1110 if (target->state != TARGET_HALTED) {
1111 LOG_WARNING("target not halted");
1112 return ERROR_TARGET_NOT_HALTED;
1115 if (!debug_execution) {
1116 target_free_all_working_areas(target);
1117 cortex_m_enable_breakpoints(target);
1118 cortex_m_enable_watchpoints(target);
1121 if (debug_execution) {
1122 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1124 /* Disable interrupts */
1125 /* We disable interrupts in the PRIMASK register instead of
1126 * masking with C_MASKINTS. This is probably the same issue
1127 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1128 * in parallel with disabled interrupts can cause local faults
1131 * This breaks non-debug (application) execution if not
1132 * called from armv7m_start_algorithm() which saves registers.
1134 buf_set_u32(r->value, 0, 1, 1);
1138 /* Make sure we are in Thumb mode, set xPSR.T bit */
1139 /* armv7m_start_algorithm() initializes entire xPSR register.
1140 * This duplicity handles the case when cortex_m_resume()
1141 * is used with the debug_execution flag directly,
1142 * not called through armv7m_start_algorithm().
1144 r = armv7m->arm.cpsr;
1145 buf_set_u32(r->value, 24, 1, 1);
1150 /* current = 1: continue on current pc, otherwise continue at <address> */
1153 buf_set_u32(r->value, 0, 32, address);
1158 /* if we halted last time due to a bkpt instruction
1159 * then we have to manually step over it, otherwise
1160 * the core will break again */
1162 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1163 && !debug_execution)
1164 armv7m_maybe_skip_bkpt_inst(target, NULL);
1166 resume_pc = buf_get_u32(r->value, 0, 32);
1168 armv7m_restore_context(target);
1170 /* the front-end may request us not to handle breakpoints */
1171 if (handle_breakpoints) {
1172 /* Single step past breakpoint at current address */
1173 breakpoint = breakpoint_find(target, resume_pc);
1175 LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1176 breakpoint->address,
1177 breakpoint->unique_id);
1178 cortex_m_unset_breakpoint(target, breakpoint);
1179 cortex_m_single_step_core(target);
1180 cortex_m_set_breakpoint(target, breakpoint);
1185 cortex_m_set_maskints_for_run(target);
1186 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1188 target->debug_reason = DBG_REASON_NOTHALTED;
1190 /* registers are now invalid */
1191 register_cache_invalidate(armv7m->arm.core_cache);
1193 if (!debug_execution) {
1194 target->state = TARGET_RUNNING;
1195 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1196 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
1198 target->state = TARGET_DEBUG_RUNNING;
1199 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1200 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
1206 /* int irqstepcount = 0; */
1207 static int cortex_m_step(struct target *target, int current,
1208 target_addr_t address, int handle_breakpoints)
1210 struct cortex_m_common *cortex_m = target_to_cm(target);
1211 struct armv7m_common *armv7m = &cortex_m->armv7m;
1212 struct breakpoint *breakpoint = NULL;
1213 struct reg *pc = armv7m->arm.pc;
1214 bool bkpt_inst_found = false;
1216 bool isr_timed_out = false;
1218 if (target->state != TARGET_HALTED) {
1219 LOG_WARNING("target not halted");
1220 return ERROR_TARGET_NOT_HALTED;
1223 /* current = 1: continue on current pc, otherwise continue at <address> */
1225 buf_set_u32(pc->value, 0, 32, address);
1230 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1232 /* the front-end may request us not to handle breakpoints */
1233 if (handle_breakpoints) {
1234 breakpoint = breakpoint_find(target, pc_value);
1236 cortex_m_unset_breakpoint(target, breakpoint);
1239 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1241 target->debug_reason = DBG_REASON_SINGLESTEP;
1243 armv7m_restore_context(target);
1245 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1247 /* if no bkpt instruction is found at pc then we can perform
1248 * a normal step, otherwise we have to manually step over the bkpt
1249 * instruction - as such simulate a step */
1250 if (bkpt_inst_found == false) {
1251 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1252 /* Automatic ISR masking mode off: Just step over the next
1253 * instruction, with interrupts on or off as appropriate. */
1254 cortex_m_set_maskints_for_step(target);
1255 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1257 /* Process interrupts during stepping in a way they don't interfere
1262 * Set a temporary break point at the current pc and let the core run
1263 * with interrupts enabled. Pending interrupts get served and we run
1264 * into the breakpoint again afterwards. Then we step over the next
1265 * instruction with interrupts disabled.
1267 * If the pending interrupts don't complete within time, we leave the
1268 * core running. This may happen if the interrupts trigger faster
1269 * than the core can process them or the handler doesn't return.
1271 * If no more breakpoints are available we simply do a step with
1272 * interrupts enabled.
1278 * If a break point is already set on the lower half word then a break point on
1279 * the upper half word will not break again when the core is restarted. So we
1280 * just step over the instruction with interrupts disabled.
1282 * The documentation has no information about this, it was found by observation
1283 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1284 * suffer from this problem.
1286 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1287 * address has it always cleared. The former is done to indicate thumb mode
1291 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1292 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
1293 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1294 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1295 /* Re-enable interrupts if appropriate */
1296 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1297 cortex_m_set_maskints_for_halt(target);
1300 /* Set a temporary break point */
1302 retval = cortex_m_set_breakpoint(target, breakpoint);
1304 enum breakpoint_type type = BKPT_HARD;
1305 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1306 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1309 retval = breakpoint_add(target, pc_value, 2, type);
1312 bool tmp_bp_set = (retval == ERROR_OK);
1314 /* No more breakpoints left, just do a step */
1316 cortex_m_set_maskints_for_step(target);
1317 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1318 /* Re-enable interrupts if appropriate */
1319 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1320 cortex_m_set_maskints_for_halt(target);
1322 /* Start the core */
1323 LOG_DEBUG("Starting core to serve pending interrupts");
1324 int64_t t_start = timeval_ms();
1325 cortex_m_set_maskints_for_run(target);
1326 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1328 /* Wait for pending handlers to complete or timeout */
1330 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1331 if (retval != ERROR_OK) {
1332 target->state = TARGET_UNKNOWN;
1335 isr_timed_out = ((timeval_ms() - t_start) > 500);
1336 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1338 /* only remove breakpoint if we created it */
1340 cortex_m_unset_breakpoint(target, breakpoint);
1342 /* Remove the temporary breakpoint */
1343 breakpoint_remove(target, pc_value);
1346 if (isr_timed_out) {
1347 LOG_DEBUG("Interrupt handlers didn't complete within time, "
1348 "leaving target running");
1350 /* Step over next instruction with interrupts disabled */
1351 cortex_m_set_maskints_for_step(target);
1352 cortex_m_write_debug_halt_mask(target,
1353 C_HALT | C_MASKINTS,
1355 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1356 /* Re-enable interrupts if appropriate */
1357 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1358 cortex_m_set_maskints_for_halt(target);
1365 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1366 if (retval != ERROR_OK)
1369 /* registers are now invalid */
1370 register_cache_invalidate(armv7m->arm.core_cache);
1373 cortex_m_set_breakpoint(target, breakpoint);
1375 if (isr_timed_out) {
1376 /* Leave the core running. The user has to stop execution manually. */
1377 target->debug_reason = DBG_REASON_NOTHALTED;
1378 target->state = TARGET_RUNNING;
1382 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
1383 " nvic_icsr = 0x%" PRIx32,
1384 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1386 retval = cortex_m_debug_entry(target);
1387 if (retval != ERROR_OK)
1389 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1391 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
1392 " nvic_icsr = 0x%" PRIx32,
1393 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1398 static int cortex_m_assert_reset(struct target *target)
1400 struct cortex_m_common *cortex_m = target_to_cm(target);
1401 struct armv7m_common *armv7m = &cortex_m->armv7m;
1402 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1404 LOG_DEBUG("target->state: %s",
1405 target_state_name(target));
1407 enum reset_types jtag_reset_config = jtag_get_reset_config();
1409 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1410 /* allow scripts to override the reset event */
1412 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1413 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1414 target->state = TARGET_RESET;
1419 /* some cores support connecting while srst is asserted
1420 * use that mode is it has been configured */
1422 bool srst_asserted = false;
1424 if (!target_was_examined(target)) {
1425 if (jtag_reset_config & RESET_HAS_SRST) {
1426 adapter_assert_reset();
1427 if (target->reset_halt)
1428 LOG_ERROR("Target not examined, will not halt after reset!");
1431 LOG_ERROR("Target not examined, reset NOT asserted!");
1436 if ((jtag_reset_config & RESET_HAS_SRST) &&
1437 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1438 adapter_assert_reset();
1439 srst_asserted = true;
1442 /* Enable debug requests */
1443 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1445 /* Store important errors instead of failing and proceed to reset assert */
1447 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1448 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1450 /* If the processor is sleeping in a WFI or WFE instruction, the
1451 * C_HALT bit must be asserted to regain control */
1452 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1453 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1455 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1456 /* Ignore less important errors */
1458 if (!target->reset_halt) {
1459 /* Set/Clear C_MASKINTS in a separate operation */
1460 cortex_m_set_maskints_for_run(target);
1462 /* clear any debug flags before resuming */
1463 cortex_m_clear_halt(target);
1465 /* clear C_HALT in dhcsr reg */
1466 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1468 /* Halt in debug on reset; endreset_event() restores DEMCR.
1470 * REVISIT catching BUSERR presumably helps to defend against
1471 * bad vector table entries. Should this include MMERR or
1475 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1476 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1477 if (retval != ERROR_OK || retval2 != ERROR_OK)
1478 LOG_INFO("AP write error, reset will not halt");
1481 if (jtag_reset_config & RESET_HAS_SRST) {
1482 /* default to asserting srst */
1484 adapter_assert_reset();
1486 /* srst is asserted, ignore AP access errors */
1489 /* Use a standard Cortex-M3 software reset mechanism.
1490 * We default to using VECTRESET as it is supported on all current cores
1491 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1492 * This has the disadvantage of not resetting the peripherals, so a
1493 * reset-init event handler is needed to perform any peripheral resets.
1495 if (!cortex_m->vectreset_supported
1496 && reset_config == CORTEX_M_RESET_VECTRESET) {
1497 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1498 LOG_WARNING("VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1499 LOG_WARNING("Set 'cortex_m reset_config sysresetreq'.");
1502 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1503 ? "SYSRESETREQ" : "VECTRESET");
1505 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1506 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1507 "handler to reset any peripherals or configure hardware srst support.");
1511 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1512 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1513 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1514 if (retval3 != ERROR_OK)
1515 LOG_DEBUG("Ignoring AP write error right after reset");
1517 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1518 if (retval3 != ERROR_OK) {
1519 LOG_ERROR("DP initialisation failed");
1520 /* The error return value must not be propagated in this case.
1521 * SYSRESETREQ or VECTRESET have been possibly triggered
1522 * so reset processing should continue */
1524 /* I do not know why this is necessary, but it
1525 * fixes strange effects (step/resume cause NMI
1526 * after reset) on LM3S6918 -- Michael Schwingen
1529 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1533 target->state = TARGET_RESET;
1536 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1538 /* now return stored error code if any */
1539 if (retval != ERROR_OK)
1542 if (target->reset_halt) {
1543 retval = target_halt(target);
1544 if (retval != ERROR_OK)
1551 static int cortex_m_deassert_reset(struct target *target)
1553 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1555 LOG_DEBUG("target->state: %s",
1556 target_state_name(target));
1558 /* deassert reset lines */
1559 adapter_deassert_reset();
1561 enum reset_types jtag_reset_config = jtag_get_reset_config();
1563 if ((jtag_reset_config & RESET_HAS_SRST) &&
1564 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1565 target_was_examined(target)) {
1567 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1568 if (retval != ERROR_OK) {
1569 LOG_ERROR("DP initialisation failed");
1577 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1580 unsigned int fp_num = 0;
1581 struct cortex_m_common *cortex_m = target_to_cm(target);
1582 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1584 if (breakpoint->set) {
1585 LOG_WARNING("breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1589 if (breakpoint->type == BKPT_HARD) {
1590 uint32_t fpcr_value;
1591 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1593 if (fp_num >= cortex_m->fp_num_code) {
1594 LOG_ERROR("Can not find free FPB Comparator!");
1595 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1597 breakpoint->set = fp_num + 1;
1598 fpcr_value = breakpoint->address | 1;
1599 if (cortex_m->fp_rev == 0) {
1600 if (breakpoint->address > 0x1FFFFFFF) {
1601 LOG_ERROR("Cortex-M Flash Patch Breakpoint rev.1 cannot handle HW breakpoint above address 0x1FFFFFFE");
1605 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1606 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1607 } else if (cortex_m->fp_rev > 1) {
1608 LOG_ERROR("Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1611 comparator_list[fp_num].used = true;
1612 comparator_list[fp_num].fpcr_value = fpcr_value;
1613 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1614 comparator_list[fp_num].fpcr_value);
1615 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1617 comparator_list[fp_num].fpcr_value);
1618 if (!cortex_m->fpb_enabled) {
1619 LOG_DEBUG("FPB wasn't enabled, do it now");
1620 retval = cortex_m_enable_fpb(target);
1621 if (retval != ERROR_OK) {
1622 LOG_ERROR("Failed to enable the FPB");
1626 cortex_m->fpb_enabled = true;
1628 } else if (breakpoint->type == BKPT_SOFT) {
1631 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1632 * semihosting; don't use that. Otherwise the BKPT
1633 * parameter is arbitrary.
1635 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1636 retval = target_read_memory(target,
1637 breakpoint->address & 0xFFFFFFFE,
1638 breakpoint->length, 1,
1639 breakpoint->orig_instr);
1640 if (retval != ERROR_OK)
1642 retval = target_write_memory(target,
1643 breakpoint->address & 0xFFFFFFFE,
1644 breakpoint->length, 1,
1646 if (retval != ERROR_OK)
1648 breakpoint->set = true;
1651 LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
1652 breakpoint->unique_id,
1653 (int)(breakpoint->type),
1654 breakpoint->address,
1661 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1664 struct cortex_m_common *cortex_m = target_to_cm(target);
1665 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1667 if (breakpoint->set <= 0) {
1668 LOG_WARNING("breakpoint not set");
1672 LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
1673 breakpoint->unique_id,
1674 (int)(breakpoint->type),
1675 breakpoint->address,
1679 if (breakpoint->type == BKPT_HARD) {
1680 unsigned int fp_num = breakpoint->set - 1;
1681 if (fp_num >= cortex_m->fp_num_code) {
1682 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1685 comparator_list[fp_num].used = false;
1686 comparator_list[fp_num].fpcr_value = 0;
1687 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1688 comparator_list[fp_num].fpcr_value);
1690 /* restore original instruction (kept in target endianness) */
1691 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1692 breakpoint->length, 1,
1693 breakpoint->orig_instr);
1694 if (retval != ERROR_OK)
1697 breakpoint->set = false;
1702 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1704 if (breakpoint->length == 3) {
1705 LOG_DEBUG("Using a two byte breakpoint for 32bit Thumb-2 request");
1706 breakpoint->length = 2;
1709 if ((breakpoint->length != 2)) {
1710 LOG_INFO("only breakpoints of two bytes length supported");
1711 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1714 return cortex_m_set_breakpoint(target, breakpoint);
1717 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1719 if (!breakpoint->set)
1722 return cortex_m_unset_breakpoint(target, breakpoint);
1725 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1727 unsigned int dwt_num = 0;
1728 struct cortex_m_common *cortex_m = target_to_cm(target);
1730 /* REVISIT Don't fully trust these "not used" records ... users
1731 * may set up breakpoints by hand, e.g. dual-address data value
1732 * watchpoint using comparator #1; comparator #0 matching cycle
1733 * count; send data trace info through ITM and TPIU; etc
1735 struct cortex_m_dwt_comparator *comparator;
1737 for (comparator = cortex_m->dwt_comparator_list;
1738 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1739 comparator++, dwt_num++)
1741 if (dwt_num >= cortex_m->dwt_num_comp) {
1742 LOG_ERROR("Can not find free DWT Comparator");
1745 comparator->used = true;
1746 watchpoint->set = dwt_num + 1;
1748 comparator->comp = watchpoint->address;
1749 target_write_u32(target, comparator->dwt_comparator_address + 0,
1752 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1753 uint32_t mask = 0, temp;
1755 /* watchpoint params were validated earlier */
1756 temp = watchpoint->length;
1763 comparator->mask = mask;
1764 target_write_u32(target, comparator->dwt_comparator_address + 4,
1767 switch (watchpoint->rw) {
1769 comparator->function = 5;
1772 comparator->function = 6;
1775 comparator->function = 7;
1779 uint32_t data_size = watchpoint->length >> 1;
1780 comparator->mask = (watchpoint->length >> 1) | 1;
1782 switch (watchpoint->rw) {
1784 comparator->function = 4;
1787 comparator->function = 5;
1790 comparator->function = 6;
1793 comparator->function = comparator->function | (1 << 4) |
1797 target_write_u32(target, comparator->dwt_comparator_address + 8,
1798 comparator->function);
1800 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1801 watchpoint->unique_id, dwt_num,
1802 (unsigned) comparator->comp,
1803 (unsigned) comparator->mask,
1804 (unsigned) comparator->function);
1808 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1810 struct cortex_m_common *cortex_m = target_to_cm(target);
1811 struct cortex_m_dwt_comparator *comparator;
1813 if (watchpoint->set <= 0) {
1814 LOG_WARNING("watchpoint (wpid: %d) not set",
1815 watchpoint->unique_id);
1819 unsigned int dwt_num = watchpoint->set - 1;
1821 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1822 watchpoint->unique_id, dwt_num,
1823 (unsigned) watchpoint->address);
1825 if (dwt_num >= cortex_m->dwt_num_comp) {
1826 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1830 comparator = cortex_m->dwt_comparator_list + dwt_num;
1831 comparator->used = false;
1832 comparator->function = 0;
1833 target_write_u32(target, comparator->dwt_comparator_address + 8,
1834 comparator->function);
1836 watchpoint->set = false;
1841 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1843 struct cortex_m_common *cortex_m = target_to_cm(target);
1845 if (cortex_m->dwt_comp_available < 1) {
1846 LOG_DEBUG("no comparators?");
1847 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1850 /* hardware doesn't support data value masking */
1851 if (watchpoint->mask != ~(uint32_t)0) {
1852 LOG_DEBUG("watchpoint value masks not supported");
1853 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1856 /* hardware allows address masks of up to 32K */
1859 for (mask = 0; mask < 16; mask++) {
1860 if ((1u << mask) == watchpoint->length)
1864 LOG_DEBUG("unsupported watchpoint length");
1865 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1867 if (watchpoint->address & ((1 << mask) - 1)) {
1868 LOG_DEBUG("watchpoint address is unaligned");
1869 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1872 /* Caller doesn't seem to be able to describe watching for data
1873 * values of zero; that flags "no value".
1875 * REVISIT This DWT may well be able to watch for specific data
1876 * values. Requires comparator #1 to set DATAVMATCH and match
1877 * the data, and another comparator (DATAVADDR0) matching addr.
1879 if (watchpoint->value) {
1880 LOG_DEBUG("data value watchpoint not YET supported");
1881 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1884 cortex_m->dwt_comp_available--;
1885 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1890 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1892 struct cortex_m_common *cortex_m = target_to_cm(target);
1894 /* REVISIT why check? DWT can be updated with core running ... */
1895 if (target->state != TARGET_HALTED) {
1896 LOG_WARNING("target not halted");
1897 return ERROR_TARGET_NOT_HALTED;
1900 if (watchpoint->set)
1901 cortex_m_unset_watchpoint(target, watchpoint);
1903 cortex_m->dwt_comp_available++;
1904 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1909 int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1911 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1914 struct cortex_m_common *cortex_m = target_to_cm(target);
1916 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1920 unsigned int dwt_num = wp->set - 1;
1921 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1923 uint32_t dwt_function;
1924 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1925 if (retval != ERROR_OK)
1928 /* check the MATCHED bit */
1929 if (dwt_function & BIT(24)) {
1930 *hit_watchpoint = wp;
1938 void cortex_m_enable_watchpoints(struct target *target)
1940 struct watchpoint *watchpoint = target->watchpoints;
1942 /* set any pending watchpoints */
1943 while (watchpoint) {
1944 if (!watchpoint->set)
1945 cortex_m_set_watchpoint(target, watchpoint);
1946 watchpoint = watchpoint->next;
1950 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1951 uint32_t size, uint32_t count, uint8_t *buffer)
1953 struct armv7m_common *armv7m = target_to_armv7m(target);
1955 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1956 /* armv6m does not handle unaligned memory access */
1957 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1958 return ERROR_TARGET_UNALIGNED_ACCESS;
1961 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1964 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1965 uint32_t size, uint32_t count, const uint8_t *buffer)
1967 struct armv7m_common *armv7m = target_to_armv7m(target);
1969 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1970 /* armv6m does not handle unaligned memory access */
1971 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1972 return ERROR_TARGET_UNALIGNED_ACCESS;
1975 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1978 static int cortex_m_init_target(struct command_context *cmd_ctx,
1979 struct target *target)
1981 armv7m_build_reg_cache(target);
1982 arm_semihosting_init(target);
1986 void cortex_m_deinit_target(struct target *target)
1988 struct cortex_m_common *cortex_m = target_to_cm(target);
1990 free(cortex_m->fp_comparator_list);
1992 cortex_m_dwt_free(target);
1993 armv7m_free_reg_cache(target);
1995 free(target->private_config);
1999 int cortex_m_profiling(struct target *target, uint32_t *samples,
2000 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2002 struct timeval timeout, now;
2003 struct armv7m_common *armv7m = target_to_armv7m(target);
2007 retval = target_read_u32(target, DWT_PCSR, ®_value);
2008 if (retval != ERROR_OK) {
2009 LOG_ERROR("Error while reading PCSR");
2012 if (reg_value == 0) {
2013 LOG_INFO("PCSR sampling not supported on this processor.");
2014 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2017 gettimeofday(&timeout, NULL);
2018 timeval_add_time(&timeout, seconds, 0);
2020 LOG_INFO("Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2022 /* Make sure the target is running */
2023 target_poll(target);
2024 if (target->state == TARGET_HALTED)
2025 retval = target_resume(target, 1, 0, 0, 0);
2027 if (retval != ERROR_OK) {
2028 LOG_ERROR("Error while resuming target");
2032 uint32_t sample_count = 0;
2035 if (armv7m && armv7m->debug_ap) {
2036 uint32_t read_count = max_num_samples - sample_count;
2037 if (read_count > 1024)
2040 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2041 (void *)&samples[sample_count],
2042 4, read_count, DWT_PCSR);
2043 sample_count += read_count;
2045 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2048 if (retval != ERROR_OK) {
2049 LOG_ERROR("Error while reading PCSR");
2054 gettimeofday(&now, NULL);
2055 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2056 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2061 *num_samples = sample_count;
2066 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2067 * on r/w if the core is not running, and clear on resume or reset ... or
2068 * at least, in a post_restore_context() method.
2071 struct dwt_reg_state {
2072 struct target *target;
2074 uint8_t value[4]; /* scratch/cache */
2077 static int cortex_m_dwt_get_reg(struct reg *reg)
2079 struct dwt_reg_state *state = reg->arch_info;
2082 int retval = target_read_u32(state->target, state->addr, &tmp);
2083 if (retval != ERROR_OK)
2086 buf_set_u32(state->value, 0, 32, tmp);
2090 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2092 struct dwt_reg_state *state = reg->arch_info;
2094 return target_write_u32(state->target, state->addr,
2095 buf_get_u32(buf, 0, reg->size));
2104 static const struct dwt_reg dwt_base_regs[] = {
2105 { DWT_CTRL, "dwt_ctrl", 32, },
2106 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2107 * increments while the core is asleep.
2109 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2110 /* plus some 8 bit counters, useful for profiling with TPIU */
2113 static const struct dwt_reg dwt_comp[] = {
2114 #define DWT_COMPARATOR(i) \
2115 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2116 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2117 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2134 #undef DWT_COMPARATOR
2137 static const struct reg_arch_type dwt_reg_type = {
2138 .get = cortex_m_dwt_get_reg,
2139 .set = cortex_m_dwt_set_reg,
2142 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2144 struct dwt_reg_state *state;
2146 state = calloc(1, sizeof(*state));
2149 state->addr = d->addr;
2154 r->value = state->value;
2155 r->arch_info = state;
2156 r->type = &dwt_reg_type;
2159 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2162 struct reg_cache *cache;
2163 struct cortex_m_dwt_comparator *comparator;
2166 target_read_u32(target, DWT_CTRL, &dwtcr);
2167 LOG_DEBUG("DWT_CTRL: 0x%" PRIx32, dwtcr);
2169 LOG_DEBUG("no DWT");
2173 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2174 LOG_DEBUG("DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2176 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2177 cm->dwt_comp_available = cm->dwt_num_comp;
2178 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2179 sizeof(struct cortex_m_dwt_comparator));
2180 if (!cm->dwt_comparator_list) {
2182 cm->dwt_num_comp = 0;
2183 LOG_ERROR("out of mem");
2187 cache = calloc(1, sizeof(*cache));
2190 free(cm->dwt_comparator_list);
2193 cache->name = "Cortex-M DWT registers";
2194 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2195 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2196 if (!cache->reg_list) {
2201 for (reg = 0; reg < 2; reg++)
2202 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2203 dwt_base_regs + reg);
2205 comparator = cm->dwt_comparator_list;
2206 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2209 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2210 for (j = 0; j < 3; j++, reg++)
2211 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2212 dwt_comp + 3 * i + j);
2214 /* make sure we clear any watchpoints enabled on the target */
2215 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2218 *register_get_last_cache_p(&target->reg_cache) = cache;
2219 cm->dwt_cache = cache;
2221 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2222 dwtcr, cm->dwt_num_comp,
2223 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2225 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2226 * implement single-address data value watchpoints ... so we
2227 * won't need to check it later, when asked to set one up.
2231 static void cortex_m_dwt_free(struct target *target)
2233 struct cortex_m_common *cm = target_to_cm(target);
2234 struct reg_cache *cache = cm->dwt_cache;
2236 free(cm->dwt_comparator_list);
2237 cm->dwt_comparator_list = NULL;
2238 cm->dwt_num_comp = 0;
2241 register_unlink_cache(&target->reg_cache, cache);
2243 if (cache->reg_list) {
2244 for (size_t i = 0; i < cache->num_regs; i++)
2245 free(cache->reg_list[i].arch_info);
2246 free(cache->reg_list);
2250 cm->dwt_cache = NULL;
2253 #define MVFR0 0xe000ef40
2254 #define MVFR1 0xe000ef44
2256 #define MVFR0_DEFAULT_M4 0x10110021
2257 #define MVFR1_DEFAULT_M4 0x11000011
2259 #define MVFR0_DEFAULT_M7_SP 0x10110021
2260 #define MVFR0_DEFAULT_M7_DP 0x10110221
2261 #define MVFR1_DEFAULT_M7_SP 0x11000011
2262 #define MVFR1_DEFAULT_M7_DP 0x12000011
2264 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2265 struct adiv5_ap **debug_ap)
2267 if (dap_find_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2270 return dap_find_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2273 int cortex_m_examine(struct target *target)
2276 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2277 struct cortex_m_common *cortex_m = target_to_cm(target);
2278 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2279 struct armv7m_common *armv7m = target_to_armv7m(target);
2281 /* hla_target shares the examine handler but does not support
2283 if (!armv7m->is_hla_target) {
2284 if (cortex_m->apsel == DP_APSEL_INVALID) {
2285 /* Search for the MEM-AP */
2286 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2287 if (retval != ERROR_OK) {
2288 LOG_ERROR("Could not find MEM-AP to control the core");
2292 armv7m->debug_ap = dap_ap(swjdp, cortex_m->apsel);
2295 /* Leave (only) generic DAP stuff for debugport_init(); */
2296 armv7m->debug_ap->memaccess_tck = 8;
2298 retval = mem_ap_init(armv7m->debug_ap);
2299 if (retval != ERROR_OK)
2303 if (!target_was_examined(target)) {
2304 target_set_examined(target);
2306 /* Read from Device Identification Registers */
2307 retval = target_read_u32(target, CPUID, &cpuid);
2308 if (retval != ERROR_OK)
2311 /* Get ARCH and CPU types */
2312 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2314 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2315 if (core_partno == cortex_m_parts[n].partno) {
2316 cortex_m->core_info = &cortex_m_parts[n];
2321 if (!cortex_m->core_info) {
2322 LOG_ERROR("Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2326 armv7m->arm.arch = cortex_m->core_info->arch;
2328 LOG_INFO("%s: %s r%" PRId8 "p%" PRId8 " processor detected",
2329 target_name(target),
2330 cortex_m->core_info->name,
2331 (uint8_t)((cpuid >> 20) & 0xf),
2332 (uint8_t)((cpuid >> 0) & 0xf));
2334 cortex_m->maskints_erratum = false;
2335 if (core_partno == CORTEX_M7_PARTNO) {
2337 rev = (cpuid >> 20) & 0xf;
2338 patch = (cpuid >> 0) & 0xf;
2339 if ((rev == 0) && (patch < 2)) {
2340 LOG_WARNING("Silicon bug: single stepping may enter pending exception handler!");
2341 cortex_m->maskints_erratum = true;
2344 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
2346 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2347 target_read_u32(target, MVFR0, &mvfr0);
2348 target_read_u32(target, MVFR1, &mvfr1);
2350 /* test for floating point feature on Cortex-M4 */
2351 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2352 LOG_DEBUG("%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2353 armv7m->fp_feature = FPV4_SP;
2355 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2356 target_read_u32(target, MVFR0, &mvfr0);
2357 target_read_u32(target, MVFR1, &mvfr1);
2359 /* test for floating point features on Cortex-M7 */
2360 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2361 LOG_DEBUG("%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2362 armv7m->fp_feature = FPV5_SP;
2363 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2364 LOG_DEBUG("%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2365 armv7m->fp_feature = FPV5_DP;
2369 /* VECTRESET is supported only on ARMv7-M cores */
2370 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2372 /* Check for FPU, otherwise mark FPU register as non-existent */
2373 if (armv7m->fp_feature == FP_NONE)
2374 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2375 armv7m->arm.core_cache->reg_list[idx].exist = false;
2377 if (armv7m->arm.arch != ARM_ARCH_V8M)
2378 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2379 armv7m->arm.core_cache->reg_list[idx].exist = false;
2381 if (!armv7m->is_hla_target) {
2382 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2383 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2384 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2385 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2388 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2389 if (retval != ERROR_OK)
2391 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2393 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2394 /* Enable debug requests */
2395 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2397 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2398 if (retval != ERROR_OK)
2400 cortex_m->dcb_dhcsr = dhcsr;
2403 /* Configure trace modules */
2404 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2405 if (retval != ERROR_OK)
2408 if (armv7m->trace_config.itm_deferred_config)
2409 armv7m_trace_itm_config(target);
2411 /* NOTE: FPB and DWT are both optional. */
2414 target_read_u32(target, FP_CTRL, &fpcr);
2415 /* bits [14:12] and [7:4] */
2416 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2417 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2418 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2419 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2420 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2421 free(cortex_m->fp_comparator_list);
2422 cortex_m->fp_comparator_list = calloc(
2423 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2424 sizeof(struct cortex_m_fp_comparator));
2425 cortex_m->fpb_enabled = fpcr & 1;
2426 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2427 cortex_m->fp_comparator_list[i].type =
2428 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2429 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2431 /* make sure we clear any breakpoints enabled on the target */
2432 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2434 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2436 cortex_m->fp_num_code,
2437 cortex_m->fp_num_lit);
2440 cortex_m_dwt_free(target);
2441 cortex_m_dwt_setup(cortex_m, target);
2443 /* These hardware breakpoints only work for code in flash! */
2444 LOG_INFO("%s: target has %d breakpoints, %d watchpoints",
2445 target_name(target),
2446 cortex_m->fp_num_code,
2447 cortex_m->dwt_num_comp);
2453 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2455 struct armv7m_common *armv7m = target_to_armv7m(target);
2460 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2461 if (retval != ERROR_OK)
2464 dcrdr = target_buffer_get_u16(target, buf);
2465 *ctrl = (uint8_t)dcrdr;
2466 *value = (uint8_t)(dcrdr >> 8);
2468 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
2470 /* write ack back to software dcc register
2471 * signify we have read data */
2472 if (dcrdr & (1 << 0)) {
2473 target_buffer_set_u16(target, buf, 0);
2474 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2475 if (retval != ERROR_OK)
2482 static int cortex_m_target_request_data(struct target *target,
2483 uint32_t size, uint8_t *buffer)
2489 for (i = 0; i < (size * 4); i++) {
2490 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2491 if (retval != ERROR_OK)
2499 static int cortex_m_handle_target_request(void *priv)
2501 struct target *target = priv;
2502 if (!target_was_examined(target))
2505 if (!target->dbg_msg_enabled)
2508 if (target->state == TARGET_RUNNING) {
2513 retval = cortex_m_dcc_read(target, &data, &ctrl);
2514 if (retval != ERROR_OK)
2517 /* check if we have data */
2518 if (ctrl & (1 << 0)) {
2521 /* we assume target is quick enough */
2523 for (int i = 1; i <= 3; i++) {
2524 retval = cortex_m_dcc_read(target, &data, &ctrl);
2525 if (retval != ERROR_OK)
2527 request |= ((uint32_t)data << (i * 8));
2529 target_request(target, request);
2536 static int cortex_m_init_arch_info(struct target *target,
2537 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2539 struct armv7m_common *armv7m = &cortex_m->armv7m;
2541 armv7m_init_arch_info(target, armv7m);
2543 /* default reset mode is to use srst if fitted
2544 * if not it will use CORTEX_M3_RESET_VECTRESET */
2545 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2547 armv7m->arm.dap = dap;
2549 /* register arch-specific functions */
2550 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2552 armv7m->post_debug_entry = NULL;
2554 armv7m->pre_restore_context = NULL;
2556 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2557 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2559 target_register_timer_callback(cortex_m_handle_target_request, 1,
2560 TARGET_TIMER_TYPE_PERIODIC, target);
2565 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2567 struct adiv5_private_config *pc;
2569 pc = (struct adiv5_private_config *)target->private_config;
2570 if (adiv5_verify_config(pc) != ERROR_OK)
2573 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2575 LOG_ERROR("No memory creating target");
2579 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2580 cortex_m->apsel = pc->ap_num;
2582 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2587 /*--------------------------------------------------------------------------*/
2589 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2590 struct cortex_m_common *cm)
2592 if (cm->common_magic != CORTEX_M_COMMON_MAGIC) {
2593 command_print(cmd, "target is not a Cortex-M");
2594 return ERROR_TARGET_INVALID;
2600 * Only stuff below this line should need to verify that its target
2601 * is a Cortex-M3. Everything else should have indirected through the
2602 * cortexm3_target structure, which is only used with CM3 targets.
2605 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2607 struct target *target = get_current_target(CMD_CTX);
2608 struct cortex_m_common *cortex_m = target_to_cm(target);
2609 struct armv7m_common *armv7m = &cortex_m->armv7m;
2613 static const struct {
2617 { "hard_err", VC_HARDERR, },
2618 { "int_err", VC_INTERR, },
2619 { "bus_err", VC_BUSERR, },
2620 { "state_err", VC_STATERR, },
2621 { "chk_err", VC_CHKERR, },
2622 { "nocp_err", VC_NOCPERR, },
2623 { "mm_err", VC_MMERR, },
2624 { "reset", VC_CORERESET, },
2627 retval = cortex_m_verify_pointer(CMD, cortex_m);
2628 if (retval != ERROR_OK)
2631 if (!target_was_examined(target)) {
2632 LOG_ERROR("Target not examined yet");
2636 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2637 if (retval != ERROR_OK)
2643 if (CMD_ARGC == 1) {
2644 if (strcmp(CMD_ARGV[0], "all") == 0) {
2645 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2646 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2647 | VC_MMERR | VC_CORERESET;
2649 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2652 while (CMD_ARGC-- > 0) {
2654 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2655 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2657 catch |= vec_ids[i].mask;
2660 if (i == ARRAY_SIZE(vec_ids)) {
2661 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2662 return ERROR_COMMAND_SYNTAX_ERROR;
2666 /* For now, armv7m->demcr only stores vector catch flags. */
2667 armv7m->demcr = catch;
2672 /* write, but don't assume it stuck (why not??) */
2673 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2674 if (retval != ERROR_OK)
2676 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2677 if (retval != ERROR_OK)
2680 /* FIXME be sure to clear DEMCR on clean server shutdown.
2681 * Otherwise the vector catch hardware could fire when there's
2682 * no debugger hooked up, causing much confusion...
2686 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2687 command_print(CMD, "%9s: %s", vec_ids[i].name,
2688 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2694 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2696 struct target *target = get_current_target(CMD_CTX);
2697 struct cortex_m_common *cortex_m = target_to_cm(target);
2700 static const struct jim_nvp nvp_maskisr_modes[] = {
2701 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2702 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2703 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2704 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2705 { .name = NULL, .value = -1 },
2707 const struct jim_nvp *n;
2710 retval = cortex_m_verify_pointer(CMD, cortex_m);
2711 if (retval != ERROR_OK)
2714 if (target->state != TARGET_HALTED) {
2715 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2720 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2722 return ERROR_COMMAND_SYNTAX_ERROR;
2723 cortex_m->isrmasking_mode = n->value;
2724 cortex_m_set_maskints_for_halt(target);
2727 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2728 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2733 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2735 struct target *target = get_current_target(CMD_CTX);
2736 struct cortex_m_common *cortex_m = target_to_cm(target);
2740 retval = cortex_m_verify_pointer(CMD, cortex_m);
2741 if (retval != ERROR_OK)
2745 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2746 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2748 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2749 if (target_was_examined(target)
2750 && !cortex_m->vectreset_supported)
2751 LOG_WARNING("VECTRESET is not supported on your Cortex-M core!");
2753 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2756 return ERROR_COMMAND_SYNTAX_ERROR;
2759 switch (cortex_m->soft_reset_config) {
2760 case CORTEX_M_RESET_SYSRESETREQ:
2761 reset_config = "sysresetreq";
2764 case CORTEX_M_RESET_VECTRESET:
2765 reset_config = "vectreset";
2769 reset_config = "unknown";
2773 command_print(CMD, "cortex_m reset_config %s", reset_config);
2778 static const struct command_registration cortex_m_exec_command_handlers[] = {
2781 .handler = handle_cortex_m_mask_interrupts_command,
2782 .mode = COMMAND_EXEC,
2783 .help = "mask cortex_m interrupts",
2784 .usage = "['auto'|'on'|'off'|'steponly']",
2787 .name = "vector_catch",
2788 .handler = handle_cortex_m_vector_catch_command,
2789 .mode = COMMAND_EXEC,
2790 .help = "configure hardware vectors to trigger debug entry",
2791 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2794 .name = "reset_config",
2795 .handler = handle_cortex_m_reset_config_command,
2796 .mode = COMMAND_ANY,
2797 .help = "configure software reset handling",
2798 .usage = "['sysresetreq'|'vectreset']",
2800 COMMAND_REGISTRATION_DONE
2802 static const struct command_registration cortex_m_command_handlers[] = {
2804 .chain = armv7m_command_handlers,
2807 .chain = armv7m_trace_command_handlers,
2809 /* START_DEPRECATED_TPIU */
2811 .chain = arm_tpiu_deprecated_command_handlers,
2813 /* END_DEPRECATED_TPIU */
2816 .mode = COMMAND_EXEC,
2817 .help = "Cortex-M command group",
2819 .chain = cortex_m_exec_command_handlers,
2822 .chain = rtt_target_command_handlers,
2824 COMMAND_REGISTRATION_DONE
2827 struct target_type cortexm_target = {
2830 .poll = cortex_m_poll,
2831 .arch_state = armv7m_arch_state,
2833 .target_request_data = cortex_m_target_request_data,
2835 .halt = cortex_m_halt,
2836 .resume = cortex_m_resume,
2837 .step = cortex_m_step,
2839 .assert_reset = cortex_m_assert_reset,
2840 .deassert_reset = cortex_m_deassert_reset,
2841 .soft_reset_halt = cortex_m_soft_reset_halt,
2843 .get_gdb_arch = arm_get_gdb_arch,
2844 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2846 .read_memory = cortex_m_read_memory,
2847 .write_memory = cortex_m_write_memory,
2848 .checksum_memory = armv7m_checksum_memory,
2849 .blank_check_memory = armv7m_blank_check_memory,
2851 .run_algorithm = armv7m_run_algorithm,
2852 .start_algorithm = armv7m_start_algorithm,
2853 .wait_algorithm = armv7m_wait_algorithm,
2855 .add_breakpoint = cortex_m_add_breakpoint,
2856 .remove_breakpoint = cortex_m_remove_breakpoint,
2857 .add_watchpoint = cortex_m_add_watchpoint,
2858 .remove_watchpoint = cortex_m_remove_watchpoint,
2859 .hit_watchpoint = cortex_m_hit_watchpoint,
2861 .commands = cortex_m_command_handlers,
2862 .target_create = cortex_m_target_create,
2863 .target_jim_configure = adiv5_jim_configure,
2864 .init_target = cortex_m_init_target,
2865 .examine = cortex_m_examine,
2866 .deinit_target = cortex_m_deinit_target,
2868 .profiling = cortex_m_profiling,