2 * Copyright (C) 2009 by David Brownell
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
22 #include "armv8_dpm.h"
23 #include <jtag/jtag.h>
25 #include "breakpoints.h"
26 #include "target_type.h"
27 #include "armv8_opcodes.h"
29 #include "helper/time_support.h"
32 #define T32_FMTITR(instr) (((instr & 0x0000FFFF) << 16) | ((instr & 0xFFFF0000) >> 16))
36 * Implements various ARM DPM operations using architectural debug registers.
37 * These routines layer over core-specific communication methods to cope with
38 * implementation differences between cores like ARM1136 and Cortex-A8.
40 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
41 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
42 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
43 * are abstracted through internal programming interfaces to share code and
44 * to minimize needless differences in debug behavior between cores.
48 * Get core state from EDSCR, without necessity to retrieve CPSR
50 enum arm_state armv8_dpm_get_core_state(struct arm_dpm *dpm)
52 int el = (dpm->dscr >> 8) & 0x3;
53 int rw = (dpm->dscr >> 10) & 0xF;
58 /* find the first '0' in DSCR.RW */
59 for (pos = 3; pos >= 0; pos--) {
60 if ((rw & (1 << pos)) == 0)
65 return ARM_STATE_AARCH64;
70 /*----------------------------------------------------------------------*/
72 static int dpmv8_write_dcc(struct armv8_common *armv8, uint32_t data)
74 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
75 return mem_ap_write_u32(armv8->debug_ap,
76 armv8->debug_base + CPUV8_DBG_DTRRX, data);
79 static int dpmv8_write_dcc_64(struct armv8_common *armv8, uint64_t data)
82 LOG_DEBUG("write DCC 0x%016" PRIx64, data);
83 ret = mem_ap_write_u32(armv8->debug_ap,
84 armv8->debug_base + CPUV8_DBG_DTRRX, data);
86 ret = mem_ap_write_u32(armv8->debug_ap,
87 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
91 static int dpmv8_read_dcc(struct armv8_common *armv8, uint32_t *data,
94 uint32_t dscr = DSCR_ITE;
100 /* Wait for DTRRXfull */
101 long long then = timeval_ms();
102 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
103 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
104 armv8->debug_base + CPUV8_DBG_DSCR,
106 if (retval != ERROR_OK)
108 if (timeval_ms() > then + 1000) {
109 LOG_ERROR("Timeout waiting for read dcc");
114 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
115 armv8->debug_base + CPUV8_DBG_DTRTX,
117 if (retval != ERROR_OK)
119 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
127 static int dpmv8_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
130 uint32_t dscr = DSCR_ITE;
137 /* Wait for DTRRXfull */
138 long long then = timeval_ms();
139 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
140 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141 armv8->debug_base + CPUV8_DBG_DSCR,
143 if (retval != ERROR_OK)
145 if (timeval_ms() > then + 1000) {
146 LOG_ERROR("Timeout waiting for DTR_TX_FULL, dscr = 0x%08" PRIx32, dscr);
151 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
152 armv8->debug_base + CPUV8_DBG_DTRTX,
154 if (retval != ERROR_OK)
157 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
158 armv8->debug_base + CPUV8_DBG_DTRRX,
160 if (retval != ERROR_OK)
163 *data = *(uint32_t *)data | (uint64_t)higher << 32;
164 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
172 static int dpmv8_dpm_prepare(struct arm_dpm *dpm)
174 struct armv8_common *armv8 = dpm->arm->arch_info;
178 /* set up invariant: ITE is set after ever DPM operation */
179 long long then = timeval_ms();
181 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
182 armv8->debug_base + CPUV8_DBG_DSCR,
184 if (retval != ERROR_OK)
186 if ((dscr & DSCR_ITE) != 0)
188 if (timeval_ms() > then + 1000) {
189 LOG_ERROR("Timeout waiting for dpm prepare");
194 /* update the stored copy of dscr */
197 /* this "should never happen" ... */
198 if (dscr & DSCR_DTR_RX_FULL) {
199 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
201 retval = mem_ap_read_u32(armv8->debug_ap,
202 armv8->debug_base + CPUV8_DBG_DTRRX, &dscr);
203 if (retval != ERROR_OK)
210 static int dpmv8_dpm_finish(struct arm_dpm *dpm)
212 /* REVISIT what could be done here? */
216 static int dpmv8_exec_opcode(struct arm_dpm *dpm,
217 uint32_t opcode, uint32_t *p_dscr)
219 struct armv8_common *armv8 = dpm->arm->arch_info;
220 uint32_t dscr = dpm->dscr;
223 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
228 /* Wait for InstrCompl bit to be set */
229 long long then = timeval_ms();
230 while ((dscr & DSCR_ITE) == 0) {
231 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
232 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
233 if (retval != ERROR_OK) {
234 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
237 if (timeval_ms() > then + 1000) {
238 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
243 if (armv8_dpm_get_core_state(dpm) != ARM_STATE_AARCH64)
244 opcode = T32_FMTITR(opcode);
246 retval = mem_ap_write_u32(armv8->debug_ap,
247 armv8->debug_base + CPUV8_DBG_ITR, opcode);
248 if (retval != ERROR_OK)
253 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
254 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
255 if (retval != ERROR_OK) {
256 LOG_ERROR("Could not read DSCR register");
259 if (timeval_ms() > then + 1000) {
260 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
263 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
265 /* update dscr and el after each command execution */
267 if (dpm->last_el != ((dscr >> 8) & 3))
268 LOG_DEBUG("EL %i -> %i", dpm->last_el, (dscr >> 8) & 3);
269 dpm->last_el = (dscr >> 8) & 3;
271 if (dscr & DSCR_ERR) {
272 LOG_ERROR("Opcode 0x%08"PRIx32", DSCR.ERR=1, DSCR.EL=%i", opcode, dpm->last_el);
273 armv8_dpm_handle_exception(dpm);
283 static int dpmv8_instr_execute(struct arm_dpm *dpm, uint32_t opcode)
285 return dpmv8_exec_opcode(dpm, opcode, NULL);
288 static int dpmv8_instr_write_data_dcc(struct arm_dpm *dpm,
289 uint32_t opcode, uint32_t data)
291 struct armv8_common *armv8 = dpm->arm->arch_info;
294 retval = dpmv8_write_dcc(armv8, data);
295 if (retval != ERROR_OK)
298 return dpmv8_exec_opcode(dpm, opcode, 0);
301 static int dpmv8_instr_write_data_dcc_64(struct arm_dpm *dpm,
302 uint32_t opcode, uint64_t data)
304 struct armv8_common *armv8 = dpm->arm->arch_info;
307 retval = dpmv8_write_dcc_64(armv8, data);
308 if (retval != ERROR_OK)
311 return dpmv8_exec_opcode(dpm, opcode, 0);
314 static int dpmv8_instr_write_data_r0(struct arm_dpm *dpm,
315 uint32_t opcode, uint32_t data)
317 struct armv8_common *armv8 = dpm->arm->arch_info;
318 uint32_t dscr = DSCR_ITE;
321 retval = dpmv8_write_dcc(armv8, data);
322 if (retval != ERROR_OK)
325 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, READ_REG_DTRRX), &dscr);
326 if (retval != ERROR_OK)
329 /* then the opcode, taking data from R0 */
330 return dpmv8_exec_opcode(dpm, opcode, &dscr);
333 static int dpmv8_instr_write_data_r0_64(struct arm_dpm *dpm,
334 uint32_t opcode, uint64_t data)
336 struct armv8_common *armv8 = dpm->arm->arch_info;
339 if (dpm->arm->core_state != ARM_STATE_AARCH64)
340 return dpmv8_instr_write_data_r0(dpm, opcode, data);
342 /* transfer data from DCC to R0 */
343 retval = dpmv8_write_dcc_64(armv8, data);
344 if (retval == ERROR_OK)
345 retval = dpmv8_exec_opcode(dpm, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dpm->dscr);
347 /* then the opcode, taking data from R0 */
348 if (retval == ERROR_OK)
349 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
354 static int dpmv8_instr_cpsr_sync(struct arm_dpm *dpm)
357 struct armv8_common *armv8 = dpm->arm->arch_info;
359 /* "Prefetch flush" after modifying execution status in CPSR */
360 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_DSB_SY), &dpm->dscr);
361 if (retval == ERROR_OK)
362 dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_ISB_SY), &dpm->dscr);
366 static int dpmv8_instr_read_data_dcc(struct arm_dpm *dpm,
367 uint32_t opcode, uint32_t *data)
369 struct armv8_common *armv8 = dpm->arm->arch_info;
372 /* the opcode, writing data to DCC */
373 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
374 if (retval != ERROR_OK)
377 return dpmv8_read_dcc(armv8, data, &dpm->dscr);
380 static int dpmv8_instr_read_data_dcc_64(struct arm_dpm *dpm,
381 uint32_t opcode, uint64_t *data)
383 struct armv8_common *armv8 = dpm->arm->arch_info;
386 /* the opcode, writing data to DCC */
387 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
388 if (retval != ERROR_OK)
391 return dpmv8_read_dcc_64(armv8, data, &dpm->dscr);
394 static int dpmv8_instr_read_data_r0(struct arm_dpm *dpm,
395 uint32_t opcode, uint32_t *data)
397 struct armv8_common *armv8 = dpm->arm->arch_info;
400 /* the opcode, writing data to R0 */
401 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
402 if (retval != ERROR_OK)
405 /* write R0 to DCC */
406 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, WRITE_REG_DTRTX), &dpm->dscr);
407 if (retval != ERROR_OK)
410 return dpmv8_read_dcc(armv8, data, &dpm->dscr);
413 static int dpmv8_instr_read_data_r0_64(struct arm_dpm *dpm,
414 uint32_t opcode, uint64_t *data)
416 struct armv8_common *armv8 = dpm->arm->arch_info;
419 if (dpm->arm->core_state != ARM_STATE_AARCH64) {
421 retval = dpmv8_instr_read_data_r0(dpm, opcode, &tmp);
422 if (retval == ERROR_OK)
427 /* the opcode, writing data to R0 */
428 retval = dpmv8_exec_opcode(dpm, opcode, &dpm->dscr);
429 if (retval != ERROR_OK)
432 /* write R0 to DCC */
433 retval = dpmv8_exec_opcode(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dpm->dscr);
434 if (retval != ERROR_OK)
437 return dpmv8_read_dcc_64(armv8, data, &dpm->dscr);
441 static int dpmv8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
442 target_addr_t addr, uint32_t control)
444 struct armv8_common *armv8 = dpm->arm->arch_info;
445 uint32_t vr = armv8->debug_base;
446 uint32_t cr = armv8->debug_base;
450 case 0 ... 15: /* breakpoints */
451 vr += CPUV8_DBG_BVR_BASE;
452 cr += CPUV8_DBG_BCR_BASE;
454 case 16 ... 31: /* watchpoints */
455 vr += CPUV8_DBG_WVR_BASE;
456 cr += CPUV8_DBG_WCR_BASE;
465 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
466 (unsigned) vr, (unsigned) cr);
468 retval = mem_ap_write_atomic_u32(armv8->debug_ap, vr, addr);
469 if (retval != ERROR_OK)
471 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, control);
475 static int dpmv8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
477 struct armv8_common *armv8 = dpm->arm->arch_info;
482 cr = armv8->debug_base + CPUV8_DBG_BCR_BASE;
485 cr = armv8->debug_base + CPUV8_DBG_WCR_BASE;
493 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
495 /* clear control register */
496 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, 0);
500 * Coprocessor support
503 /* Read coprocessor */
504 static int dpmv8_mrc(struct target *target, int cpnum,
505 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
508 struct arm *arm = target_to_arm(target);
509 struct arm_dpm *dpm = arm->dpm;
512 retval = dpm->prepare(dpm);
513 if (retval != ERROR_OK)
516 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum,
517 (int) op1, (int) CRn,
518 (int) CRm, (int) op2);
520 /* read coprocessor register into R0; return via DCC */
521 retval = dpm->instr_read_data_r0(dpm,
522 ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2),
525 /* (void) */ dpm->finish(dpm);
529 static int dpmv8_mcr(struct target *target, int cpnum,
530 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
533 struct arm *arm = target_to_arm(target);
534 struct arm_dpm *dpm = arm->dpm;
537 retval = dpm->prepare(dpm);
538 if (retval != ERROR_OK)
541 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum,
542 (int) op1, (int) CRn,
543 (int) CRm, (int) op2);
545 /* read DCC into r0; then write coprocessor register from R0 */
546 retval = dpm->instr_write_data_r0(dpm,
547 ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2),
550 /* (void) */ dpm->finish(dpm);
554 static int dpmv8_mrs(struct target *target, uint32_t op0,
555 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
558 struct arm *arm = target_to_arm(target);
559 struct arm_dpm *dpm = arm->dpm;
563 retval = dpm->prepare(dpm);
564 if (retval != ERROR_OK)
566 op_code = ((op0 & 0x3) << 19 | (op1 & 0x7) << 16 | (CRn & 0xF) << 12 |\
567 (CRm & 0xF) << 8 | (op2 & 0x7) << 5);
569 LOG_DEBUG("MRS p%d, %d, r0, c%d, c%d, %d", (int)op0,
570 (int) op1, (int) CRn,
571 (int) CRm, (int) op2);
572 /* read coprocessor register into R0; return via DCC */
573 retval = dpm->instr_read_data_r0(dpm,
574 ARMV8_MRS(op_code, 0),
577 /* (void) */ dpm->finish(dpm);
581 static int dpmv8_msr(struct target *target, uint32_t op0,
582 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
585 struct arm *arm = target_to_arm(target);
586 struct arm_dpm *dpm = arm->dpm;
590 retval = dpm->prepare(dpm);
591 if (retval != ERROR_OK)
594 op_code = ((op0 & 0x3) << 19 | (op1 & 0x7) << 16 | (CRn & 0xF) << 12 |\
595 (CRm & 0xF) << 8 | (op2 & 0x7) << 5);
597 LOG_DEBUG("MSR p%d, %d, r0, c%d, c%d, %d", (int)op0,
598 (int) op1, (int) CRn,
599 (int) CRm, (int) op2);
601 /* read DCC into r0; then write coprocessor register from R0 */
602 retval = dpm->instr_write_data_r0(dpm,
603 ARMV8_MSR_GP(op_code, 0),
606 /* (void) */ dpm->finish(dpm);
610 /*----------------------------------------------------------------------*/
613 * Register access utilities
616 int armv8_dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
618 struct armv8_common *armv8 = (struct armv8_common *)dpm->arm->arch_info;
619 int retval = ERROR_OK;
620 unsigned int target_el;
621 enum arm_state core_state;
624 /* restore previous mode */
625 if (mode == ARM_MODE_ANY) {
626 cpsr = buf_get_u32(dpm->arm->cpsr->value, 0, 32);
628 LOG_DEBUG("restoring mode, cpsr = 0x%08"PRIx32, cpsr);
631 LOG_DEBUG("setting mode 0x%"PRIx32, mode);
633 /* else force to the specified mode */
634 if (is_arm_mode(mode))
640 switch (cpsr & 0x1f) {
652 * TODO: handle ARM_MODE_HYP
662 target_el = (cpsr >> 2) & 3;
665 if (target_el > SYSTEM_CUREL_EL3) {
666 LOG_ERROR("%s: Invalid target exception level %i", __func__, target_el);
670 LOG_DEBUG("target_el = %i, last_el = %i", target_el, dpm->last_el);
671 if (target_el > dpm->last_el) {
672 retval = dpm->instr_execute(dpm,
673 armv8_opcode(armv8, ARMV8_OPC_DCPS) | target_el);
675 /* DCPS clobbers registers just like an exception taken */
676 armv8_dpm_handle_exception(dpm);
678 core_state = armv8_dpm_get_core_state(dpm);
679 if (core_state != ARM_STATE_AARCH64) {
680 /* cannot do DRPS/ERET when already in EL0 */
681 if (dpm->last_el != 0) {
682 /* load SPSR with the desired mode and execute DRPS */
683 LOG_DEBUG("SPSR = 0x%08"PRIx32, cpsr);
684 retval = dpm->instr_write_data_r0(dpm,
685 ARMV8_MSR_GP_xPSR_T1(1, 0, 15), cpsr);
686 if (retval == ERROR_OK)
687 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
691 * need to execute multiple DRPS instructions until target_el
694 while (retval == ERROR_OK && dpm->last_el != target_el) {
695 unsigned int cur_el = dpm->last_el;
696 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
697 if (cur_el == dpm->last_el) {
698 LOG_INFO("Cannot reach EL %i, SPSR corrupted?", target_el);
704 /* On executing DRPS, DSPSR and DLR become UNKNOWN, mark them as dirty */
705 dpm->arm->cpsr->dirty = true;
706 dpm->arm->pc->dirty = true;
709 * re-evaluate the core state, we might be in Aarch32 state now
710 * we rely on dpm->dscr being up-to-date
712 core_state = armv8_dpm_get_core_state(dpm);
713 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
714 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
721 * Common register read, relies on armv8_select_reg_access() having been called.
723 static int dpmv8_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
725 struct armv8_common *armv8 = dpm->arm->arch_info;
729 retval = armv8->read_reg_u64(armv8, regnum, &value_64);
731 if (retval == ERROR_OK) {
734 buf_set_u64(r->value, 0, r->size, value_64);
736 LOG_DEBUG("READ: %s, %16.8llx", r->name, (unsigned long long) value_64);
738 LOG_DEBUG("READ: %s, %8.8x", r->name, (unsigned int) value_64);
744 * Common register write, relies on armv8_select_reg_access() having been called.
746 static int dpmv8_write_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
748 struct armv8_common *armv8 = dpm->arm->arch_info;
749 int retval = ERROR_FAIL;
752 value_64 = buf_get_u64(r->value, 0, r->size);
754 retval = armv8->write_reg_u64(armv8, regnum, value_64);
755 if (retval == ERROR_OK) {
758 LOG_DEBUG("WRITE: %s, %16.8llx", r->name, (unsigned long long)value_64);
760 LOG_DEBUG("WRITE: %s, %8.8x", r->name, (unsigned int)value_64);
767 * Read basic registers of the the current context: R0 to R15, and CPSR;
768 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
769 * In normal operation this is called on entry to halting debug state,
770 * possibly after some other operations supporting restore of debug state
771 * or making sure the CPU is fully idle (drain write buffer, etc).
773 int armv8_dpm_read_current_registers(struct arm_dpm *dpm)
775 struct arm *arm = dpm->arm;
776 struct armv8_common *armv8 = (struct armv8_common *)arm->arch_info;
777 struct reg_cache *cache;
782 retval = dpm->prepare(dpm);
783 if (retval != ERROR_OK)
786 cache = arm->core_cache;
788 /* read R0 first (it's used for scratch), then CPSR */
789 r = cache->reg_list + 0;
791 retval = dpmv8_read_reg(dpm, r, 0);
792 if (retval != ERROR_OK)
797 /* read cpsr to r0 and get it back */
798 retval = dpm->instr_read_data_r0(dpm,
799 armv8_opcode(armv8, READ_REG_DSPSR), &cpsr);
800 if (retval != ERROR_OK)
803 /* update core mode and state */
804 armv8_set_cpsr(arm, cpsr);
806 for (unsigned int i = 1; i < cache->num_regs ; i++) {
807 struct arm_reg *arm_reg;
809 r = armv8_reg_current(arm, i);
814 * Only read registers that are available from the
815 * current EL (or core mode).
817 arm_reg = r->arch_info;
818 if (arm_reg->mode != ARM_MODE_ANY &&
819 dpm->last_el != armv8_curel_from_core_mode(arm_reg->mode))
822 retval = dpmv8_read_reg(dpm, r, i);
823 if (retval != ERROR_OK)
833 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
834 * unless they're removed, or need updating because of single-stepping
835 * or running debugger code.
837 static int dpmv8_maybe_update_bpwp(struct arm_dpm *dpm, bool bpwp,
838 struct dpm_bpwp *xp, int *set_p)
840 int retval = ERROR_OK;
847 /* removed or startup; we must disable it */
852 /* disabled, but we must set it */
853 xp->dirty = disable = false;
858 /* set, but we must temporarily disable it */
859 xp->dirty = disable = true;
864 retval = dpm->bpwp_disable(dpm, xp->number);
866 retval = dpm->bpwp_enable(dpm, xp->number,
867 xp->address, xp->control);
869 if (retval != ERROR_OK)
870 LOG_ERROR("%s: can't %s HW %spoint %d",
871 disable ? "disable" : "enable",
872 target_name(dpm->arm->target),
873 (xp->number < 16) ? "break" : "watch",
879 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp);
882 * Writes all modified core registers for all processor modes. In normal
883 * operation this is called on exit from halting debug state.
885 * @param dpm: represents the processor
886 * @param bpwp: true ensures breakpoints and watchpoints are set,
887 * false ensures they are cleared
889 int armv8_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
891 struct arm *arm = dpm->arm;
892 struct reg_cache *cache = arm->core_cache;
895 retval = dpm->prepare(dpm);
896 if (retval != ERROR_OK)
899 /* If we're managing hardware breakpoints for this core, enable
900 * or disable them as requested.
902 * REVISIT We don't yet manage them for ANY cores. Eventually
903 * we should be able to assume we handle them; but until then,
904 * cope with the hand-crafted breakpoint code.
906 if (arm->target->type->add_breakpoint == dpmv8_add_breakpoint) {
907 for (unsigned i = 0; i < dpm->nbp; i++) {
908 struct dpm_bp *dbp = dpm->dbp + i;
909 struct breakpoint *bp = dbp->bp;
911 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dbp->bpwp,
912 bp ? &bp->set : NULL);
913 if (retval != ERROR_OK)
918 /* enable/disable watchpoints */
919 for (unsigned i = 0; i < dpm->nwp; i++) {
920 struct dpm_wp *dwp = dpm->dwp + i;
921 struct watchpoint *wp = dwp->wp;
923 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dwp->bpwp,
924 wp ? &wp->set : NULL);
925 if (retval != ERROR_OK)
929 /* NOTE: writes to breakpoint and watchpoint registers might
930 * be queued, and need (efficient/batched) flushing later.
933 /* Restore original core mode and state */
934 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
935 if (retval != ERROR_OK)
938 /* check everything except our scratch register R0 */
939 for (unsigned i = 1; i < cache->num_regs; i++) {
942 /* skip PC and CPSR */
943 if (i == ARMV8_PC || i == ARMV8_xPSR)
946 if (!cache->reg_list[i].valid)
949 if (!cache->reg_list[i].dirty)
952 /* skip all registers not on the current EL */
953 r = cache->reg_list[i].arch_info;
954 if (r->mode != ARM_MODE_ANY &&
955 dpm->last_el != armv8_curel_from_core_mode(r->mode))
958 retval = dpmv8_write_reg(dpm, &cache->reg_list[i], i);
959 if (retval != ERROR_OK)
963 /* flush CPSR and PC */
964 if (retval == ERROR_OK)
965 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_xPSR], ARMV8_xPSR);
966 if (retval == ERROR_OK)
967 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_PC], ARMV8_PC);
968 /* flush R0 -- it's *very* dirty by now */
969 if (retval == ERROR_OK)
970 retval = dpmv8_write_reg(dpm, &cache->reg_list[0], 0);
971 if (retval == ERROR_OK)
972 dpm->instr_cpsr_sync(dpm);
979 * Standard ARM register accessors ... there are three methods
980 * in "struct arm", to support individual read/write and bulk read
984 static int armv8_dpm_read_core_reg(struct target *target, struct reg *r,
985 int regnum, enum arm_mode mode)
987 struct arm *arm = target_to_arm(target);
988 struct arm_dpm *dpm = target_to_arm(target)->dpm;
990 int max = arm->core_cache->num_regs;
992 if (regnum < 0 || regnum >= max)
993 return ERROR_COMMAND_SYNTAX_ERROR;
996 * REVISIT what happens if we try to read SPSR in a core mode
997 * which has no such register?
999 retval = dpm->prepare(dpm);
1000 if (retval != ERROR_OK)
1003 retval = dpmv8_read_reg(dpm, r, regnum);
1004 if (retval != ERROR_OK)
1008 /* (void) */ dpm->finish(dpm);
1012 static int armv8_dpm_write_core_reg(struct target *target, struct reg *r,
1013 int regnum, enum arm_mode mode, uint8_t *value)
1015 struct arm *arm = target_to_arm(target);
1016 struct arm_dpm *dpm = target_to_arm(target)->dpm;
1018 int max = arm->core_cache->num_regs;
1020 if (regnum < 0 || regnum > max)
1021 return ERROR_COMMAND_SYNTAX_ERROR;
1023 /* REVISIT what happens if we try to write SPSR in a core mode
1024 * which has no such register?
1027 retval = dpm->prepare(dpm);
1028 if (retval != ERROR_OK)
1031 retval = dpmv8_write_reg(dpm, r, regnum);
1033 /* always clean up, regardless of error */
1039 static int armv8_dpm_full_context(struct target *target)
1041 struct arm *arm = target_to_arm(target);
1042 struct arm_dpm *dpm = arm->dpm;
1043 struct reg_cache *cache = arm->core_cache;
1047 retval = dpm->prepare(dpm);
1048 if (retval != ERROR_OK)
1052 enum arm_mode mode = ARM_MODE_ANY;
1056 /* We "know" arm_dpm_read_current_registers() was called so
1057 * the unmapped registers (R0..R7, PC, AND CPSR) and some
1058 * view of R8..R14 are current. We also "know" oddities of
1059 * register mapping: special cases for R8..R12 and SPSR.
1061 * Pick some mode with unread registers and read them all.
1062 * Repeat until done.
1064 for (unsigned i = 0; i < cache->num_regs; i++) {
1067 if (cache->reg_list[i].valid)
1069 r = cache->reg_list[i].arch_info;
1071 /* may need to pick a mode and set CPSR */
1076 /* For regular (ARM_MODE_ANY) R8..R12
1077 * in case we've entered debug state
1078 * in FIQ mode we need to patch mode.
1080 if (mode != ARM_MODE_ANY)
1081 retval = armv8_dpm_modeswitch(dpm, mode);
1083 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_USR);
1085 if (retval != ERROR_OK)
1088 if (r->mode != mode)
1091 /* CPSR was read, so "R16" must mean SPSR */
1092 retval = dpmv8_read_reg(dpm,
1093 &cache->reg_list[i],
1094 (r->num == 16) ? 17 : r->num);
1095 if (retval != ERROR_OK)
1101 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
1102 /* (void) */ dpm->finish(dpm);
1108 /*----------------------------------------------------------------------*/
1111 * Breakpoint and Watchpoint support.
1113 * Hardware {break,watch}points are usually left active, to minimize
1114 * debug entry/exit costs. When they are set or cleared, it's done in
1115 * batches. Also, DPM-conformant hardware can update debug registers
1116 * regardless of whether the CPU is running or halted ... though that
1117 * fact isn't currently leveraged.
1120 static int dpmv8_bpwp_setup(struct arm_dpm *dpm, struct dpm_bpwp *xp,
1121 uint32_t addr, uint32_t length)
1125 control = (1 << 0) /* enable */
1126 | (3 << 1); /* both user and privileged access */
1128 /* Match 1, 2, or all 4 byte addresses in this word.
1130 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
1131 * Support larger length, when addr is suitably aligned. In
1132 * particular, allow watchpoints on 8 byte "double" values.
1134 * REVISIT allow watchpoints on unaligned 2-bit values; and on
1135 * v7 hardware, unaligned 4-byte ones too.
1139 control |= (1 << (addr & 3)) << 5;
1142 /* require 2-byte alignment */
1144 control |= (3 << (addr & 2)) << 5;
1149 /* require 4-byte alignment */
1151 control |= 0xf << 5;
1156 LOG_ERROR("unsupported {break,watch}point length/alignment");
1157 return ERROR_COMMAND_SYNTAX_ERROR;
1160 /* other shared control bits:
1161 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
1162 * bit 20 == 0 ... not linked to a context ID
1163 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
1166 xp->address = addr & ~3;
1167 xp->control = control;
1170 LOG_DEBUG("BPWP: addr %8.8" PRIx32 ", control %" PRIx32 ", number %d",
1171 xp->address, control, xp->number);
1173 /* hardware is updated in write_dirty_registers() */
1177 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp)
1179 struct arm *arm = target_to_arm(target);
1180 struct arm_dpm *dpm = arm->dpm;
1181 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1184 return ERROR_COMMAND_SYNTAX_ERROR;
1185 if (!dpm->bpwp_enable)
1188 /* FIXME we need a generic solution for software breakpoints. */
1189 if (bp->type == BKPT_SOFT)
1190 LOG_DEBUG("using HW bkpt, not SW...");
1192 for (unsigned i = 0; i < dpm->nbp; i++) {
1193 if (!dpm->dbp[i].bp) {
1194 retval = dpmv8_bpwp_setup(dpm, &dpm->dbp[i].bpwp,
1195 bp->address, bp->length);
1196 if (retval == ERROR_OK)
1197 dpm->dbp[i].bp = bp;
1205 static int dpmv8_remove_breakpoint(struct target *target, struct breakpoint *bp)
1207 struct arm *arm = target_to_arm(target);
1208 struct arm_dpm *dpm = arm->dpm;
1209 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1211 for (unsigned i = 0; i < dpm->nbp; i++) {
1212 if (dpm->dbp[i].bp == bp) {
1213 dpm->dbp[i].bp = NULL;
1214 dpm->dbp[i].bpwp.dirty = true;
1216 /* hardware is updated in write_dirty_registers() */
1225 static int dpmv8_watchpoint_setup(struct arm_dpm *dpm, unsigned index_t,
1226 struct watchpoint *wp)
1229 struct dpm_wp *dwp = dpm->dwp + index_t;
1232 /* this hardware doesn't support data value matching or masking */
1233 if (wp->value || wp->mask != ~(uint32_t)0) {
1234 LOG_DEBUG("watchpoint values and masking not supported");
1235 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1238 retval = dpmv8_bpwp_setup(dpm, &dwp->bpwp, wp->address, wp->length);
1239 if (retval != ERROR_OK)
1242 control = dwp->bpwp.control;
1254 dwp->bpwp.control = control;
1256 dpm->dwp[index_t].wp = wp;
1261 static int dpmv8_add_watchpoint(struct target *target, struct watchpoint *wp)
1263 struct arm *arm = target_to_arm(target);
1264 struct arm_dpm *dpm = arm->dpm;
1265 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1267 if (dpm->bpwp_enable) {
1268 for (unsigned i = 0; i < dpm->nwp; i++) {
1269 if (!dpm->dwp[i].wp) {
1270 retval = dpmv8_watchpoint_setup(dpm, i, wp);
1279 static int dpmv8_remove_watchpoint(struct target *target, struct watchpoint *wp)
1281 struct arm *arm = target_to_arm(target);
1282 struct arm_dpm *dpm = arm->dpm;
1283 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1285 for (unsigned i = 0; i < dpm->nwp; i++) {
1286 if (dpm->dwp[i].wp == wp) {
1287 dpm->dwp[i].wp = NULL;
1288 dpm->dwp[i].bpwp.dirty = true;
1290 /* hardware is updated in write_dirty_registers() */
1299 void armv8_dpm_report_wfar(struct arm_dpm *dpm, uint64_t addr)
1301 switch (dpm->arm->core_state) {
1303 case ARM_STATE_AARCH64:
1306 case ARM_STATE_THUMB:
1307 case ARM_STATE_THUMB_EE:
1310 case ARM_STATE_JAZELLE:
1314 LOG_DEBUG("Unknown core_state");
1321 * Handle exceptions taken in debug state. This happens mostly for memory
1322 * accesses that violated a MMU policy. Taking an exception while in debug
1323 * state clobbers certain state registers on the target exception level.
1324 * Just mark those registers dirty so that they get restored on resume.
1325 * This works both for Aarch32 and Aarch64 states.
1327 * This function must not perform any actions that trigger another exception
1328 * or a recursion will happen.
1330 void armv8_dpm_handle_exception(struct arm_dpm *dpm)
1332 struct armv8_common *armv8 = dpm->arm->arch_info;
1333 struct reg_cache *cache = dpm->arm->core_cache;
1334 enum arm_state core_state;
1339 static const int clobbered_regs_by_el[3][5] = {
1340 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL1, ARMV8_ESR_EL1, ARMV8_SPSR_EL1 },
1341 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL2, ARMV8_ESR_EL2, ARMV8_SPSR_EL2 },
1342 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL3, ARMV8_ESR_EL3, ARMV8_SPSR_EL3 },
1345 el = (dpm->dscr >> 8) & 3;
1347 /* safety check, must not happen since EL0 cannot be a target for an exception */
1348 if (el < SYSTEM_CUREL_EL1 || el > SYSTEM_CUREL_EL3) {
1349 LOG_ERROR("%s: EL %i is invalid, DSCR corrupted?", __func__, el);
1353 /* Clear sticky error */
1354 mem_ap_write_u32(armv8->debug_ap,
1355 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1357 armv8->read_reg_u64(armv8, ARMV8_xPSR, &dlr);
1359 armv8->read_reg_u64(armv8, ARMV8_PC, &dlr);
1361 LOG_DEBUG("Exception taken to EL %i, DLR=0x%016"PRIx64" DSPSR=0x%08"PRIx32,
1364 /* mark all clobbered registers as dirty */
1365 for (int i = 0; i < 5; i++)
1366 cache->reg_list[clobbered_regs_by_el[el-1][i]].dirty = true;
1369 * re-evaluate the core state, we might be in Aarch64 state now
1370 * we rely on dpm->dscr being up-to-date
1372 core_state = armv8_dpm_get_core_state(dpm);
1373 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
1374 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
1377 /*----------------------------------------------------------------------*/
1380 * Other debug and support utilities
1383 void armv8_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
1385 struct target *target = dpm->arm->target;
1388 dpm->last_el = (dscr >> 8) & 3;
1390 /* Examine debug reason */
1391 switch (DSCR_ENTRY(dscr)) {
1392 /* FALL THROUGH -- assume a v6 core in abort mode */
1393 case DSCRV8_ENTRY_EXT_DEBUG: /* EDBGRQ */
1394 target->debug_reason = DBG_REASON_DBGRQ;
1396 case DSCRV8_ENTRY_HALT_STEP_EXECLU: /* HALT step */
1397 case DSCRV8_ENTRY_HALT_STEP_NORMAL: /* Halt step*/
1398 case DSCRV8_ENTRY_HALT_STEP:
1399 target->debug_reason = DBG_REASON_SINGLESTEP;
1401 case DSCRV8_ENTRY_HLT: /* HLT instruction (software breakpoint) */
1402 case DSCRV8_ENTRY_BKPT: /* SW BKPT (?) */
1403 case DSCRV8_ENTRY_RESET_CATCH: /* Reset catch */
1404 case DSCRV8_ENTRY_OS_UNLOCK: /*OS unlock catch*/
1405 case DSCRV8_ENTRY_EXCEPTION_CATCH: /*exception catch*/
1406 case DSCRV8_ENTRY_SW_ACCESS_DBG: /*SW access dbg register*/
1407 target->debug_reason = DBG_REASON_BREAKPOINT;
1409 case DSCRV8_ENTRY_WATCHPOINT: /* asynch watchpoint */
1410 target->debug_reason = DBG_REASON_WATCHPOINT;
1413 target->debug_reason = DBG_REASON_UNDEFINED;
1419 /*----------------------------------------------------------------------*/
1422 * Setup and management support.
1426 * Hooks up this DPM to its associated target; call only once.
1427 * Initially this only covers the register cache.
1429 * Oh, and watchpoints. Yeah.
1431 int armv8_dpm_setup(struct arm_dpm *dpm)
1433 struct arm *arm = dpm->arm;
1434 struct target *target = arm->target;
1435 struct reg_cache *cache;
1438 /* register access setup */
1439 arm->full_context = armv8_dpm_full_context;
1440 arm->read_core_reg = armv8_dpm_read_core_reg;
1441 arm->write_core_reg = armv8_dpm_write_core_reg;
1443 if (arm->core_cache == NULL) {
1444 cache = armv8_build_reg_cache(target);
1449 /* coprocessor access setup */
1450 arm->mrc = dpmv8_mrc;
1451 arm->mcr = dpmv8_mcr;
1452 arm->mrs = dpmv8_mrs;
1453 arm->msr = dpmv8_msr;
1455 dpm->prepare = dpmv8_dpm_prepare;
1456 dpm->finish = dpmv8_dpm_finish;
1458 dpm->instr_execute = dpmv8_instr_execute;
1459 dpm->instr_write_data_dcc = dpmv8_instr_write_data_dcc;
1460 dpm->instr_write_data_dcc_64 = dpmv8_instr_write_data_dcc_64;
1461 dpm->instr_write_data_r0 = dpmv8_instr_write_data_r0;
1462 dpm->instr_write_data_r0_64 = dpmv8_instr_write_data_r0_64;
1463 dpm->instr_cpsr_sync = dpmv8_instr_cpsr_sync;
1465 dpm->instr_read_data_dcc = dpmv8_instr_read_data_dcc;
1466 dpm->instr_read_data_dcc_64 = dpmv8_instr_read_data_dcc_64;
1467 dpm->instr_read_data_r0 = dpmv8_instr_read_data_r0;
1468 dpm->instr_read_data_r0_64 = dpmv8_instr_read_data_r0_64;
1470 dpm->arm_reg_current = armv8_reg_current;
1472 /* dpm->bpwp_enable = dpmv8_bpwp_enable; */
1473 dpm->bpwp_disable = dpmv8_bpwp_disable;
1475 /* breakpoint setup -- optional until it works everywhere */
1476 if (!target->type->add_breakpoint) {
1477 target->type->add_breakpoint = dpmv8_add_breakpoint;
1478 target->type->remove_breakpoint = dpmv8_remove_breakpoint;
1481 /* watchpoint setup */
1482 target->type->add_watchpoint = dpmv8_add_watchpoint;
1483 target->type->remove_watchpoint = dpmv8_remove_watchpoint;
1485 /* FIXME add vector catch support */
1487 dpm->nbp = 1 + ((dpm->didr >> 12) & 0xf);
1488 dpm->dbp = calloc(dpm->nbp, sizeof *dpm->dbp);
1490 dpm->nwp = 1 + ((dpm->didr >> 20) & 0xf);
1491 dpm->dwp = calloc(dpm->nwp, sizeof *dpm->dwp);
1493 if (!dpm->dbp || !dpm->dwp) {
1499 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1500 target_name(target), dpm->nbp, dpm->nwp);
1502 /* REVISIT ... and some of those breakpoints could match
1503 * execution context IDs...
1510 * Reinitializes DPM state at the beginning of a new debug session
1511 * or after a reset which may have affected the debug module.
1513 int armv8_dpm_initialize(struct arm_dpm *dpm)
1515 /* Disable all breakpoints and watchpoints at startup. */
1516 if (dpm->bpwp_disable) {
1519 for (i = 0; i < dpm->nbp; i++) {
1520 dpm->dbp[i].bpwp.number = i;
1521 (void) dpm->bpwp_disable(dpm, i);
1523 for (i = 0; i < dpm->nwp; i++) {
1524 dpm->dwp[i].bpwp.number = 16 + i;
1525 (void) dpm->bpwp_disable(dpm, 16 + i);
1528 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1529 target_name(dpm->arm->target));