openocd: fix simple cases of Yoda condition
[fw/openocd] / src / target / cortex_a.c
1 /***************************************************************************
2  *   Copyright (C) 2005 by Dominic Rath                                    *
3  *   Dominic.Rath@gmx.de                                                   *
4  *                                                                         *
5  *   Copyright (C) 2006 by Magnus Lundin                                   *
6  *   lundin@mlu.mine.nu                                                    *
7  *                                                                         *
8  *   Copyright (C) 2008 by Spencer Oliver                                  *
9  *   spen@spen-soft.co.uk                                                  *
10  *                                                                         *
11  *   Copyright (C) 2009 by Dirk Behme                                      *
12  *   dirk.behme@gmail.com - copy from cortex_m3                            *
13  *                                                                         *
14  *   Copyright (C) 2010 Ã˜yvind Harboe                                      *
15  *   oyvind.harboe@zylin.com                                               *
16  *                                                                         *
17  *   Copyright (C) ST-Ericsson SA 2011                                     *
18  *   michel.jaouen@stericsson.com : smp minimum support                    *
19  *                                                                         *
20  *   Copyright (C) Broadcom 2012                                           *
21  *   ehunter@broadcom.com : Cortex-R4 support                              *
22  *                                                                         *
23  *   Copyright (C) 2013 Kamal Dasu                                         *
24  *   kdasu.kdev@gmail.com                                                  *
25  *                                                                         *
26  *   Copyright (C) 2016 Chengyu Zheng                                      *
27  *   chengyu.zheng@polimi.it : watchpoint support                          *
28  *                                                                         *
29  *   This program is free software; you can redistribute it and/or modify  *
30  *   it under the terms of the GNU General Public License as published by  *
31  *   the Free Software Foundation; either version 2 of the License, or     *
32  *   (at your option) any later version.                                   *
33  *                                                                         *
34  *   This program is distributed in the hope that it will be useful,       *
35  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
36  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
37  *   GNU General Public License for more details.                          *
38  *                                                                         *
39  *   You should have received a copy of the GNU General Public License     *
40  *   along with this program.  If not, see <http://www.gnu.org/licenses/>. *
41  *                                                                         *
42  *   Cortex-A8(tm) TRM, ARM DDI 0344H                                      *
43  *   Cortex-A9(tm) TRM, ARM DDI 0407F                                      *
44  *   Cortex-A4(tm) TRM, ARM DDI 0363E                                      *
45  *   Cortex-A15(tm)TRM, ARM DDI 0438C                                      *
46  *                                                                         *
47  ***************************************************************************/
48
49 #ifdef HAVE_CONFIG_H
50 #include "config.h"
51 #endif
52
53 #include "breakpoints.h"
54 #include "cortex_a.h"
55 #include "register.h"
56 #include "armv7a_mmu.h"
57 #include "target_request.h"
58 #include "target_type.h"
59 #include "arm_opcodes.h"
60 #include "arm_semihosting.h"
61 #include "jtag/interface.h"
62 #include "transport/transport.h"
63 #include "smp.h"
64 #include <helper/bits.h>
65 #include <helper/time_support.h>
66
67 static int cortex_a_poll(struct target *target);
68 static int cortex_a_debug_entry(struct target *target);
69 static int cortex_a_restore_context(struct target *target, bool bpwp);
70 static int cortex_a_set_breakpoint(struct target *target,
71         struct breakpoint *breakpoint, uint8_t matchmode);
72 static int cortex_a_set_context_breakpoint(struct target *target,
73         struct breakpoint *breakpoint, uint8_t matchmode);
74 static int cortex_a_set_hybrid_breakpoint(struct target *target,
75         struct breakpoint *breakpoint);
76 static int cortex_a_unset_breakpoint(struct target *target,
77         struct breakpoint *breakpoint);
78 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
79         uint32_t value, uint32_t *dscr);
80 static int cortex_a_mmu(struct target *target, int *enabled);
81 static int cortex_a_mmu_modify(struct target *target, int enable);
82 static int cortex_a_virt2phys(struct target *target,
83         target_addr_t virt, target_addr_t *phys);
84 static int cortex_a_read_cpu_memory(struct target *target,
85         uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
86
87 static unsigned int ilog2(unsigned int x)
88 {
89         unsigned int y = 0;
90         x /= 2;
91         while (x) {
92                 ++y;
93                 x /= 2;
94                 }
95         return y;
96 }
97
98 /*  restore cp15_control_reg at resume */
99 static int cortex_a_restore_cp15_control_reg(struct target *target)
100 {
101         int retval = ERROR_OK;
102         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
103         struct armv7a_common *armv7a = target_to_armv7a(target);
104
105         if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
106                 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
107                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
108                 retval = armv7a->arm.mcr(target, 15,
109                                 0, 0,   /* op1, op2 */
110                                 1, 0,   /* CRn, CRm */
111                                 cortex_a->cp15_control_reg);
112         }
113         return retval;
114 }
115
116 /*
117  * Set up ARM core for memory access.
118  * If !phys_access, switch to SVC mode and make sure MMU is on
119  * If phys_access, switch off mmu
120  */
121 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
122 {
123         struct armv7a_common *armv7a = target_to_armv7a(target);
124         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
125         int mmu_enabled = 0;
126
127         if (phys_access == 0) {
128                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
129                 cortex_a_mmu(target, &mmu_enabled);
130                 if (mmu_enabled)
131                         cortex_a_mmu_modify(target, 1);
132                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
133                         /* overwrite DACR to all-manager */
134                         armv7a->arm.mcr(target, 15,
135                                         0, 0, 3, 0,
136                                         0xFFFFFFFF);
137                 }
138         } else {
139                 cortex_a_mmu(target, &mmu_enabled);
140                 if (mmu_enabled)
141                         cortex_a_mmu_modify(target, 0);
142         }
143         return ERROR_OK;
144 }
145
146 /*
147  * Restore ARM core after memory access.
148  * If !phys_access, switch to previous mode
149  * If phys_access, restore MMU setting
150  */
151 static int cortex_a_post_memaccess(struct target *target, int phys_access)
152 {
153         struct armv7a_common *armv7a = target_to_armv7a(target);
154         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
155
156         if (phys_access == 0) {
157                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
158                         /* restore */
159                         armv7a->arm.mcr(target, 15,
160                                         0, 0, 3, 0,
161                                         cortex_a->cp15_dacr_reg);
162                 }
163                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
164         } else {
165                 int mmu_enabled = 0;
166                 cortex_a_mmu(target, &mmu_enabled);
167                 if (mmu_enabled)
168                         cortex_a_mmu_modify(target, 1);
169         }
170         return ERROR_OK;
171 }
172
173
174 /*  modify cp15_control_reg in order to enable or disable mmu for :
175  *  - virt2phys address conversion
176  *  - read or write memory in phys or virt address */
177 static int cortex_a_mmu_modify(struct target *target, int enable)
178 {
179         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
180         struct armv7a_common *armv7a = target_to_armv7a(target);
181         int retval = ERROR_OK;
182         int need_write = 0;
183
184         if (enable) {
185                 /*  if mmu enabled at target stop and mmu not enable */
186                 if (!(cortex_a->cp15_control_reg & 0x1U)) {
187                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
188                         return ERROR_FAIL;
189                 }
190                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
191                         cortex_a->cp15_control_reg_curr |= 0x1U;
192                         need_write = 1;
193                 }
194         } else {
195                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
196                         cortex_a->cp15_control_reg_curr &= ~0x1U;
197                         need_write = 1;
198                 }
199         }
200
201         if (need_write) {
202                 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
203                         enable ? "enable mmu" : "disable mmu",
204                         cortex_a->cp15_control_reg_curr);
205
206                 retval = armv7a->arm.mcr(target, 15,
207                                 0, 0,   /* op1, op2 */
208                                 1, 0,   /* CRn, CRm */
209                                 cortex_a->cp15_control_reg_curr);
210         }
211         return retval;
212 }
213
214 /*
215  * Cortex-A Basic debug access, very low level assumes state is saved
216  */
217 static int cortex_a_init_debug_access(struct target *target)
218 {
219         struct armv7a_common *armv7a = target_to_armv7a(target);
220         uint32_t dscr;
221         int retval;
222
223         /* lock memory-mapped access to debug registers to prevent
224          * software interference */
225         retval = mem_ap_write_u32(armv7a->debug_ap,
226                         armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
227         if (retval != ERROR_OK)
228                 return retval;
229
230         /* Disable cacheline fills and force cache write-through in debug state */
231         retval = mem_ap_write_u32(armv7a->debug_ap,
232                         armv7a->debug_base + CPUDBG_DSCCR, 0);
233         if (retval != ERROR_OK)
234                 return retval;
235
236         /* Disable TLB lookup and refill/eviction in debug state */
237         retval = mem_ap_write_u32(armv7a->debug_ap,
238                         armv7a->debug_base + CPUDBG_DSMCR, 0);
239         if (retval != ERROR_OK)
240                 return retval;
241
242         retval = dap_run(armv7a->debug_ap->dap);
243         if (retval != ERROR_OK)
244                 return retval;
245
246         /* Enabling of instruction execution in debug mode is done in debug_entry code */
247
248         /* Resync breakpoint registers */
249
250         /* Enable halt for breakpoint, watchpoint and vector catch */
251         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
252                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
253         if (retval != ERROR_OK)
254                 return retval;
255         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
256                         armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
257         if (retval != ERROR_OK)
258                 return retval;
259
260         /* Since this is likely called from init or reset, update target state information*/
261         return cortex_a_poll(target);
262 }
263
264 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
265 {
266         /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
267          * Writes final value of DSCR into *dscr. Pass force to force always
268          * reading DSCR at least once. */
269         struct armv7a_common *armv7a = target_to_armv7a(target);
270         int retval;
271
272         if (force) {
273                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
274                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
275                 if (retval != ERROR_OK) {
276                         LOG_ERROR("Could not read DSCR register");
277                         return retval;
278                 }
279         }
280
281         retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
282         if (retval != ERROR_OK)
283                 LOG_ERROR("Error waiting for InstrCompl=1");
284         return retval;
285 }
286
287 /* To reduce needless round-trips, pass in a pointer to the current
288  * DSCR value.  Initialize it to zero if you just need to know the
289  * value on return from this function; or DSCR_INSTR_COMP if you
290  * happen to know that no instruction is pending.
291  */
292 static int cortex_a_exec_opcode(struct target *target,
293         uint32_t opcode, uint32_t *dscr_p)
294 {
295         uint32_t dscr;
296         int retval;
297         struct armv7a_common *armv7a = target_to_armv7a(target);
298
299         dscr = dscr_p ? *dscr_p : 0;
300
301         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
302
303         /* Wait for InstrCompl bit to be set */
304         retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
305         if (retval != ERROR_OK)
306                 return retval;
307
308         retval = mem_ap_write_u32(armv7a->debug_ap,
309                         armv7a->debug_base + CPUDBG_ITR, opcode);
310         if (retval != ERROR_OK)
311                 return retval;
312
313         /* Wait for InstrCompl bit to be set */
314         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
315         if (retval != ERROR_OK) {
316                 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
317                 return retval;
318         }
319
320         if (dscr_p)
321                 *dscr_p = dscr;
322
323         return retval;
324 }
325
326 /* Write to memory mapped registers directly with no cache or mmu handling */
327 static int cortex_a_dap_write_memap_register_u32(struct target *target,
328         uint32_t address,
329         uint32_t value)
330 {
331         int retval;
332         struct armv7a_common *armv7a = target_to_armv7a(target);
333
334         retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
335
336         return retval;
337 }
338
339 /*
340  * Cortex-A implementation of Debug Programmer's Model
341  *
342  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
343  * so there's no need to poll for it before executing an instruction.
344  *
345  * NOTE that in several of these cases the "stall" mode might be useful.
346  * It'd let us queue a few operations together... prepare/finish might
347  * be the places to enable/disable that mode.
348  */
349
350 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
351 {
352         return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
353 }
354
355 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
356 {
357         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
358         return mem_ap_write_u32(a->armv7a_common.debug_ap,
359                         a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
360 }
361
362 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
363         uint32_t *dscr_p)
364 {
365         uint32_t dscr = DSCR_INSTR_COMP;
366         int retval;
367
368         if (dscr_p)
369                 dscr = *dscr_p;
370
371         /* Wait for DTRRXfull */
372         retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
373                         DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
374         if (retval != ERROR_OK) {
375                 LOG_ERROR("Error waiting for read dcc");
376                 return retval;
377         }
378
379         retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
380                         a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
381         if (retval != ERROR_OK)
382                 return retval;
383         /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
384
385         if (dscr_p)
386                 *dscr_p = dscr;
387
388         return retval;
389 }
390
391 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
392 {
393         struct cortex_a_common *a = dpm_to_a(dpm);
394         uint32_t dscr;
395         int retval;
396
397         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
398         retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
399         if (retval != ERROR_OK) {
400                 LOG_ERROR("Error waiting for dpm prepare");
401                 return retval;
402         }
403
404         /* this "should never happen" ... */
405         if (dscr & DSCR_DTR_RX_FULL) {
406                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
407                 /* Clear DCCRX */
408                 retval = cortex_a_exec_opcode(
409                                 a->armv7a_common.arm.target,
410                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
411                                 &dscr);
412                 if (retval != ERROR_OK)
413                         return retval;
414         }
415
416         return retval;
417 }
418
419 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
420 {
421         /* REVISIT what could be done here? */
422         return ERROR_OK;
423 }
424
425 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
426         uint32_t opcode, uint32_t data)
427 {
428         struct cortex_a_common *a = dpm_to_a(dpm);
429         int retval;
430         uint32_t dscr = DSCR_INSTR_COMP;
431
432         retval = cortex_a_write_dcc(a, data);
433         if (retval != ERROR_OK)
434                 return retval;
435
436         return cortex_a_exec_opcode(
437                         a->armv7a_common.arm.target,
438                         opcode,
439                         &dscr);
440 }
441
442 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
443         uint8_t rt, uint32_t data)
444 {
445         struct cortex_a_common *a = dpm_to_a(dpm);
446         uint32_t dscr = DSCR_INSTR_COMP;
447         int retval;
448
449         if (rt > 15)
450                 return ERROR_TARGET_INVALID;
451
452         retval = cortex_a_write_dcc(a, data);
453         if (retval != ERROR_OK)
454                 return retval;
455
456         /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
457         return cortex_a_exec_opcode(
458                         a->armv7a_common.arm.target,
459                         ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
460                         &dscr);
461 }
462
463 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
464         uint32_t opcode, uint32_t data)
465 {
466         struct cortex_a_common *a = dpm_to_a(dpm);
467         uint32_t dscr = DSCR_INSTR_COMP;
468         int retval;
469
470         retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
471         if (retval != ERROR_OK)
472                 return retval;
473
474         /* then the opcode, taking data from R0 */
475         retval = cortex_a_exec_opcode(
476                         a->armv7a_common.arm.target,
477                         opcode,
478                         &dscr);
479
480         return retval;
481 }
482
483 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
484 {
485         struct target *target = dpm->arm->target;
486         uint32_t dscr = DSCR_INSTR_COMP;
487
488         /* "Prefetch flush" after modifying execution status in CPSR */
489         return cortex_a_exec_opcode(target,
490                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
491                         &dscr);
492 }
493
494 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
495         uint32_t opcode, uint32_t *data)
496 {
497         struct cortex_a_common *a = dpm_to_a(dpm);
498         int retval;
499         uint32_t dscr = DSCR_INSTR_COMP;
500
501         /* the opcode, writing data to DCC */
502         retval = cortex_a_exec_opcode(
503                         a->armv7a_common.arm.target,
504                         opcode,
505                         &dscr);
506         if (retval != ERROR_OK)
507                 return retval;
508
509         return cortex_a_read_dcc(a, data, &dscr);
510 }
511
512 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
513         uint8_t rt, uint32_t *data)
514 {
515         struct cortex_a_common *a = dpm_to_a(dpm);
516         uint32_t dscr = DSCR_INSTR_COMP;
517         int retval;
518
519         if (rt > 15)
520                 return ERROR_TARGET_INVALID;
521
522         retval = cortex_a_exec_opcode(
523                         a->armv7a_common.arm.target,
524                         ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
525                         &dscr);
526         if (retval != ERROR_OK)
527                 return retval;
528
529         return cortex_a_read_dcc(a, data, &dscr);
530 }
531
532 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
533         uint32_t opcode, uint32_t *data)
534 {
535         struct cortex_a_common *a = dpm_to_a(dpm);
536         uint32_t dscr = DSCR_INSTR_COMP;
537         int retval;
538
539         /* the opcode, writing data to R0 */
540         retval = cortex_a_exec_opcode(
541                         a->armv7a_common.arm.target,
542                         opcode,
543                         &dscr);
544         if (retval != ERROR_OK)
545                 return retval;
546
547         /* write R0 to DCC */
548         return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
549 }
550
551 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
552         uint32_t addr, uint32_t control)
553 {
554         struct cortex_a_common *a = dpm_to_a(dpm);
555         uint32_t vr = a->armv7a_common.debug_base;
556         uint32_t cr = a->armv7a_common.debug_base;
557         int retval;
558
559         switch (index_t) {
560                 case 0 ... 15:  /* breakpoints */
561                         vr += CPUDBG_BVR_BASE;
562                         cr += CPUDBG_BCR_BASE;
563                         break;
564                 case 16 ... 31: /* watchpoints */
565                         vr += CPUDBG_WVR_BASE;
566                         cr += CPUDBG_WCR_BASE;
567                         index_t -= 16;
568                         break;
569                 default:
570                         return ERROR_FAIL;
571         }
572         vr += 4 * index_t;
573         cr += 4 * index_t;
574
575         LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
576                 (unsigned) vr, (unsigned) cr);
577
578         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
579                         vr, addr);
580         if (retval != ERROR_OK)
581                 return retval;
582         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
583                         cr, control);
584         return retval;
585 }
586
587 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
588 {
589         struct cortex_a_common *a = dpm_to_a(dpm);
590         uint32_t cr;
591
592         switch (index_t) {
593                 case 0 ... 15:
594                         cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
595                         break;
596                 case 16 ... 31:
597                         cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
598                         index_t -= 16;
599                         break;
600                 default:
601                         return ERROR_FAIL;
602         }
603         cr += 4 * index_t;
604
605         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
606
607         /* clear control register */
608         return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
609 }
610
611 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
612 {
613         struct arm_dpm *dpm = &a->armv7a_common.dpm;
614         int retval;
615
616         dpm->arm = &a->armv7a_common.arm;
617         dpm->didr = didr;
618
619         dpm->prepare = cortex_a_dpm_prepare;
620         dpm->finish = cortex_a_dpm_finish;
621
622         dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
623         dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
624         dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
625
626         dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
627         dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
628
629         dpm->bpwp_enable = cortex_a_bpwp_enable;
630         dpm->bpwp_disable = cortex_a_bpwp_disable;
631
632         retval = arm_dpm_setup(dpm);
633         if (retval == ERROR_OK)
634                 retval = arm_dpm_initialize(dpm);
635
636         return retval;
637 }
638 static struct target *get_cortex_a(struct target *target, int32_t coreid)
639 {
640         struct target_list *head;
641         struct target *curr;
642
643         head = target->head;
644         while (head != (struct target_list *)NULL) {
645                 curr = head->target;
646                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
647                         return curr;
648                 head = head->next;
649         }
650         return target;
651 }
652 static int cortex_a_halt(struct target *target);
653
654 static int cortex_a_halt_smp(struct target *target)
655 {
656         int retval = 0;
657         struct target_list *head;
658         struct target *curr;
659         head = target->head;
660         while (head != (struct target_list *)NULL) {
661                 curr = head->target;
662                 if ((curr != target) && (curr->state != TARGET_HALTED)
663                         && target_was_examined(curr))
664                         retval += cortex_a_halt(curr);
665                 head = head->next;
666         }
667         return retval;
668 }
669
670 static int update_halt_gdb(struct target *target)
671 {
672         struct target *gdb_target = NULL;
673         struct target_list *head;
674         struct target *curr;
675         int retval = 0;
676
677         if (target->gdb_service && target->gdb_service->core[0] == -1) {
678                 target->gdb_service->target = target;
679                 target->gdb_service->core[0] = target->coreid;
680                 retval += cortex_a_halt_smp(target);
681         }
682
683         if (target->gdb_service)
684                 gdb_target = target->gdb_service->target;
685
686         foreach_smp_target(head, target->head) {
687                 curr = head->target;
688                 /* skip calling context */
689                 if (curr == target)
690                         continue;
691                 if (!target_was_examined(curr))
692                         continue;
693                 /* skip targets that were already halted */
694                 if (curr->state == TARGET_HALTED)
695                         continue;
696                 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
697                 if (curr == gdb_target)
698                         continue;
699
700                 /* avoid recursion in cortex_a_poll() */
701                 curr->smp = 0;
702                 cortex_a_poll(curr);
703                 curr->smp = 1;
704         }
705
706         /* after all targets were updated, poll the gdb serving target */
707         if (gdb_target != NULL && gdb_target != target)
708                 cortex_a_poll(gdb_target);
709         return retval;
710 }
711
712 /*
713  * Cortex-A Run control
714  */
715
716 static int cortex_a_poll(struct target *target)
717 {
718         int retval = ERROR_OK;
719         uint32_t dscr;
720         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
721         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
722         enum target_state prev_target_state = target->state;
723         /*  toggle to another core is done by gdb as follow */
724         /*  maint packet J core_id */
725         /*  continue */
726         /*  the next polling trigger an halt event sent to gdb */
727         if ((target->state == TARGET_HALTED) && (target->smp) &&
728                 (target->gdb_service) &&
729                 (target->gdb_service->target == NULL)) {
730                 target->gdb_service->target =
731                         get_cortex_a(target, target->gdb_service->core[1]);
732                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
733                 return retval;
734         }
735         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
736                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
737         if (retval != ERROR_OK)
738                 return retval;
739         cortex_a->cpudbg_dscr = dscr;
740
741         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
742                 if (prev_target_state != TARGET_HALTED) {
743                         /* We have a halting debug event */
744                         LOG_DEBUG("Target halted");
745                         target->state = TARGET_HALTED;
746
747                         retval = cortex_a_debug_entry(target);
748                         if (retval != ERROR_OK)
749                                 return retval;
750
751                         if (target->smp) {
752                                 retval = update_halt_gdb(target);
753                                 if (retval != ERROR_OK)
754                                         return retval;
755                         }
756
757                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
758                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
759                         } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
760                                 if (arm_semihosting(target, &retval) != 0)
761                                         return retval;
762
763                                 target_call_event_callbacks(target,
764                                         TARGET_EVENT_HALTED);
765                         }
766                 }
767         } else
768                 target->state = TARGET_RUNNING;
769
770         return retval;
771 }
772
773 static int cortex_a_halt(struct target *target)
774 {
775         int retval;
776         uint32_t dscr;
777         struct armv7a_common *armv7a = target_to_armv7a(target);
778
779         /*
780          * Tell the core to be halted by writing DRCR with 0x1
781          * and then wait for the core to be halted.
782          */
783         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
784                         armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
785         if (retval != ERROR_OK)
786                 return retval;
787
788         dscr = 0; /* force read of dscr */
789         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
790                         DSCR_CORE_HALTED, &dscr);
791         if (retval != ERROR_OK) {
792                 LOG_ERROR("Error waiting for halt");
793                 return retval;
794         }
795
796         target->debug_reason = DBG_REASON_DBGRQ;
797
798         return ERROR_OK;
799 }
800
801 static int cortex_a_internal_restore(struct target *target, int current,
802         target_addr_t *address, int handle_breakpoints, int debug_execution)
803 {
804         struct armv7a_common *armv7a = target_to_armv7a(target);
805         struct arm *arm = &armv7a->arm;
806         int retval;
807         uint32_t resume_pc;
808
809         if (!debug_execution)
810                 target_free_all_working_areas(target);
811
812 #if 0
813         if (debug_execution) {
814                 /* Disable interrupts */
815                 /* We disable interrupts in the PRIMASK register instead of
816                  * masking with C_MASKINTS,
817                  * This is probably the same issue as Cortex-M3 Errata 377493:
818                  * C_MASKINTS in parallel with disabled interrupts can cause
819                  * local faults to not be taken. */
820                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
821                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
822                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
823
824                 /* Make sure we are in Thumb mode */
825                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
826                         buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
827                         32) | (1 << 24));
828                 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
829                 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
830         }
831 #endif
832
833         /* current = 1: continue on current pc, otherwise continue at <address> */
834         resume_pc = buf_get_u32(arm->pc->value, 0, 32);
835         if (!current)
836                 resume_pc = *address;
837         else
838                 *address = resume_pc;
839
840         /* Make sure that the Armv7 gdb thumb fixups does not
841          * kill the return address
842          */
843         switch (arm->core_state) {
844                 case ARM_STATE_ARM:
845                         resume_pc &= 0xFFFFFFFC;
846                         break;
847                 case ARM_STATE_THUMB:
848                 case ARM_STATE_THUMB_EE:
849                         /* When the return address is loaded into PC
850                          * bit 0 must be 1 to stay in Thumb state
851                          */
852                         resume_pc |= 0x1;
853                         break;
854                 case ARM_STATE_JAZELLE:
855                         LOG_ERROR("How do I resume into Jazelle state??");
856                         return ERROR_FAIL;
857                 case ARM_STATE_AARCH64:
858                         LOG_ERROR("Shouldn't be in AARCH64 state");
859                         return ERROR_FAIL;
860         }
861         LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
862         buf_set_u32(arm->pc->value, 0, 32, resume_pc);
863         arm->pc->dirty = true;
864         arm->pc->valid = true;
865
866         /* restore dpm_mode at system halt */
867         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
868         /* called it now before restoring context because it uses cpu
869          * register r0 for restoring cp15 control register */
870         retval = cortex_a_restore_cp15_control_reg(target);
871         if (retval != ERROR_OK)
872                 return retval;
873         retval = cortex_a_restore_context(target, handle_breakpoints);
874         if (retval != ERROR_OK)
875                 return retval;
876         target->debug_reason = DBG_REASON_NOTHALTED;
877         target->state = TARGET_RUNNING;
878
879         /* registers are now invalid */
880         register_cache_invalidate(arm->core_cache);
881
882 #if 0
883         /* the front-end may request us not to handle breakpoints */
884         if (handle_breakpoints) {
885                 /* Single step past breakpoint at current address */
886                 breakpoint = breakpoint_find(target, resume_pc);
887                 if (breakpoint) {
888                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
889                         cortex_m3_unset_breakpoint(target, breakpoint);
890                         cortex_m3_single_step_core(target);
891                         cortex_m3_set_breakpoint(target, breakpoint);
892                 }
893         }
894
895 #endif
896         return retval;
897 }
898
899 static int cortex_a_internal_restart(struct target *target)
900 {
901         struct armv7a_common *armv7a = target_to_armv7a(target);
902         struct arm *arm = &armv7a->arm;
903         int retval;
904         uint32_t dscr;
905         /*
906          * * Restart core and wait for it to be started.  Clear ITRen and sticky
907          * * exception flags: see ARMv7 ARM, C5.9.
908          *
909          * REVISIT: for single stepping, we probably want to
910          * disable IRQs by default, with optional override...
911          */
912
913         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
914                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
915         if (retval != ERROR_OK)
916                 return retval;
917
918         if ((dscr & DSCR_INSTR_COMP) == 0)
919                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
920
921         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
922                         armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
923         if (retval != ERROR_OK)
924                 return retval;
925
926         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
927                         armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
928                         DRCR_CLEAR_EXCEPTIONS);
929         if (retval != ERROR_OK)
930                 return retval;
931
932         dscr = 0; /* force read of dscr */
933         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
934                         DSCR_CORE_RESTARTED, &dscr);
935         if (retval != ERROR_OK) {
936                 LOG_ERROR("Error waiting for resume");
937                 return retval;
938         }
939
940         target->debug_reason = DBG_REASON_NOTHALTED;
941         target->state = TARGET_RUNNING;
942
943         /* registers are now invalid */
944         register_cache_invalidate(arm->core_cache);
945
946         return ERROR_OK;
947 }
948
949 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
950 {
951         int retval = 0;
952         struct target_list *head;
953         struct target *curr;
954         target_addr_t address;
955         head = target->head;
956         while (head != (struct target_list *)NULL) {
957                 curr = head->target;
958                 if ((curr != target) && (curr->state != TARGET_RUNNING)
959                         && target_was_examined(curr)) {
960                         /*  resume current address , not in step mode */
961                         retval += cortex_a_internal_restore(curr, 1, &address,
962                                         handle_breakpoints, 0);
963                         retval += cortex_a_internal_restart(curr);
964                 }
965                 head = head->next;
966
967         }
968         return retval;
969 }
970
971 static int cortex_a_resume(struct target *target, int current,
972         target_addr_t address, int handle_breakpoints, int debug_execution)
973 {
974         int retval = 0;
975         /* dummy resume for smp toggle in order to reduce gdb impact  */
976         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
977                 /*   simulate a start and halt of target */
978                 target->gdb_service->target = NULL;
979                 target->gdb_service->core[0] = target->gdb_service->core[1];
980                 /*  fake resume at next poll we play the  target core[1], see poll*/
981                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
982                 return 0;
983         }
984         cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
985         if (target->smp) {
986                 target->gdb_service->core[0] = -1;
987                 retval = cortex_a_restore_smp(target, handle_breakpoints);
988                 if (retval != ERROR_OK)
989                         return retval;
990         }
991         cortex_a_internal_restart(target);
992
993         if (!debug_execution) {
994                 target->state = TARGET_RUNNING;
995                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
996                 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
997         } else {
998                 target->state = TARGET_DEBUG_RUNNING;
999                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1000                 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1001         }
1002
1003         return ERROR_OK;
1004 }
1005
1006 static int cortex_a_debug_entry(struct target *target)
1007 {
1008         uint32_t dscr;
1009         int retval = ERROR_OK;
1010         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1011         struct armv7a_common *armv7a = target_to_armv7a(target);
1012         struct arm *arm = &armv7a->arm;
1013
1014         LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1015
1016         /* REVISIT surely we should not re-read DSCR !! */
1017         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1018                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1019         if (retval != ERROR_OK)
1020                 return retval;
1021
1022         /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1023          * imprecise data aborts get discarded by issuing a Data
1024          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1025          */
1026
1027         /* Enable the ITR execution once we are in debug mode */
1028         dscr |= DSCR_ITR_EN;
1029         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1030                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1031         if (retval != ERROR_OK)
1032                 return retval;
1033
1034         /* Examine debug reason */
1035         arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1036
1037         /* save address of instruction that triggered the watchpoint? */
1038         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1039                 uint32_t wfar;
1040
1041                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1042                                 armv7a->debug_base + CPUDBG_WFAR,
1043                                 &wfar);
1044                 if (retval != ERROR_OK)
1045                         return retval;
1046                 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1047         }
1048
1049         /* First load register accessible through core debug port */
1050         retval = arm_dpm_read_current_registers(&armv7a->dpm);
1051         if (retval != ERROR_OK)
1052                 return retval;
1053
1054         if (arm->spsr) {
1055                 /* read SPSR */
1056                 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1057                 if (retval != ERROR_OK)
1058                         return retval;
1059         }
1060
1061 #if 0
1062 /* TODO, Move this */
1063         uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1064         cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1065         LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1066
1067         cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1068         LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1069
1070         cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1071         LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1072 #endif
1073
1074         /* Are we in an exception handler */
1075 /*      armv4_5->exception_number = 0; */
1076         if (armv7a->post_debug_entry) {
1077                 retval = armv7a->post_debug_entry(target);
1078                 if (retval != ERROR_OK)
1079                         return retval;
1080         }
1081
1082         return retval;
1083 }
1084
1085 static int cortex_a_post_debug_entry(struct target *target)
1086 {
1087         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1088         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1089         int retval;
1090
1091         /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1092         retval = armv7a->arm.mrc(target, 15,
1093                         0, 0,   /* op1, op2 */
1094                         1, 0,   /* CRn, CRm */
1095                         &cortex_a->cp15_control_reg);
1096         if (retval != ERROR_OK)
1097                 return retval;
1098         LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1099         cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1100
1101         if (!armv7a->is_armv7r)
1102                 armv7a_read_ttbcr(target);
1103
1104         if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1105                 armv7a_identify_cache(target);
1106
1107         if (armv7a->is_armv7r) {
1108                 armv7a->armv7a_mmu.mmu_enabled = 0;
1109         } else {
1110                 armv7a->armv7a_mmu.mmu_enabled =
1111                         (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1112         }
1113         armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1114                 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1115         armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1116                 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1117         cortex_a->curr_mode = armv7a->arm.core_mode;
1118
1119         /* switch to SVC mode to read DACR */
1120         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1121         armv7a->arm.mrc(target, 15,
1122                         0, 0, 3, 0,
1123                         &cortex_a->cp15_dacr_reg);
1124
1125         LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1126                         cortex_a->cp15_dacr_reg);
1127
1128         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1129         return ERROR_OK;
1130 }
1131
1132 static int cortex_a_set_dscr_bits(struct target *target,
1133                 unsigned long bit_mask, unsigned long value)
1134 {
1135         struct armv7a_common *armv7a = target_to_armv7a(target);
1136         uint32_t dscr;
1137
1138         /* Read DSCR */
1139         int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1140                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1141         if (retval != ERROR_OK)
1142                 return retval;
1143
1144         /* clear bitfield */
1145         dscr &= ~bit_mask;
1146         /* put new value */
1147         dscr |= value & bit_mask;
1148
1149         /* write new DSCR */
1150         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1151                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1152         return retval;
1153 }
1154
1155 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1156         int handle_breakpoints)
1157 {
1158         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1159         struct armv7a_common *armv7a = target_to_armv7a(target);
1160         struct arm *arm = &armv7a->arm;
1161         struct breakpoint *breakpoint = NULL;
1162         struct breakpoint stepbreakpoint;
1163         struct reg *r;
1164         int retval;
1165
1166         if (target->state != TARGET_HALTED) {
1167                 LOG_WARNING("target not halted");
1168                 return ERROR_TARGET_NOT_HALTED;
1169         }
1170
1171         /* current = 1: continue on current pc, otherwise continue at <address> */
1172         r = arm->pc;
1173         if (!current)
1174                 buf_set_u32(r->value, 0, 32, address);
1175         else
1176                 address = buf_get_u32(r->value, 0, 32);
1177
1178         /* The front-end may request us not to handle breakpoints.
1179          * But since Cortex-A uses breakpoint for single step,
1180          * we MUST handle breakpoints.
1181          */
1182         handle_breakpoints = 1;
1183         if (handle_breakpoints) {
1184                 breakpoint = breakpoint_find(target, address);
1185                 if (breakpoint)
1186                         cortex_a_unset_breakpoint(target, breakpoint);
1187         }
1188
1189         /* Setup single step breakpoint */
1190         stepbreakpoint.address = address;
1191         stepbreakpoint.asid = 0;
1192         stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1193                 ? 2 : 4;
1194         stepbreakpoint.type = BKPT_HARD;
1195         stepbreakpoint.set = 0;
1196
1197         /* Disable interrupts during single step if requested */
1198         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1199                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1200                 if (retval != ERROR_OK)
1201                         return retval;
1202         }
1203
1204         /* Break on IVA mismatch */
1205         cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1206
1207         target->debug_reason = DBG_REASON_SINGLESTEP;
1208
1209         retval = cortex_a_resume(target, 1, address, 0, 0);
1210         if (retval != ERROR_OK)
1211                 return retval;
1212
1213         int64_t then = timeval_ms();
1214         while (target->state != TARGET_HALTED) {
1215                 retval = cortex_a_poll(target);
1216                 if (retval != ERROR_OK)
1217                         return retval;
1218                 if (target->state == TARGET_HALTED)
1219                         break;
1220                 if (timeval_ms() > then + 1000) {
1221                         LOG_ERROR("timeout waiting for target halt");
1222                         return ERROR_FAIL;
1223                 }
1224         }
1225
1226         cortex_a_unset_breakpoint(target, &stepbreakpoint);
1227
1228         /* Re-enable interrupts if they were disabled */
1229         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1230                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1231                 if (retval != ERROR_OK)
1232                         return retval;
1233         }
1234
1235
1236         target->debug_reason = DBG_REASON_BREAKPOINT;
1237
1238         if (breakpoint)
1239                 cortex_a_set_breakpoint(target, breakpoint, 0);
1240
1241         if (target->state != TARGET_HALTED)
1242                 LOG_DEBUG("target stepped");
1243
1244         return ERROR_OK;
1245 }
1246
1247 static int cortex_a_restore_context(struct target *target, bool bpwp)
1248 {
1249         struct armv7a_common *armv7a = target_to_armv7a(target);
1250
1251         LOG_DEBUG(" ");
1252
1253         if (armv7a->pre_restore_context)
1254                 armv7a->pre_restore_context(target);
1255
1256         return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1257 }
1258
1259 /*
1260  * Cortex-A Breakpoint and watchpoint functions
1261  */
1262
1263 /* Setup hardware Breakpoint Register Pair */
1264 static int cortex_a_set_breakpoint(struct target *target,
1265         struct breakpoint *breakpoint, uint8_t matchmode)
1266 {
1267         int retval;
1268         int brp_i = 0;
1269         uint32_t control;
1270         uint8_t byte_addr_select = 0x0F;
1271         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1272         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1273         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1274
1275         if (breakpoint->set) {
1276                 LOG_WARNING("breakpoint already set");
1277                 return ERROR_OK;
1278         }
1279
1280         if (breakpoint->type == BKPT_HARD) {
1281                 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1282                         brp_i++;
1283                 if (brp_i >= cortex_a->brp_num) {
1284                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1285                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1286                 }
1287                 breakpoint->set = brp_i + 1;
1288                 if (breakpoint->length == 2)
1289                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1290                 control = ((matchmode & 0x7) << 20)
1291                         | (byte_addr_select << 5)
1292                         | (3 << 1) | 1;
1293                 brp_list[brp_i].used = true;
1294                 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1295                 brp_list[brp_i].control = control;
1296                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1297                                 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1298                                 brp_list[brp_i].value);
1299                 if (retval != ERROR_OK)
1300                         return retval;
1301                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1302                                 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1303                                 brp_list[brp_i].control);
1304                 if (retval != ERROR_OK)
1305                         return retval;
1306                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1307                         brp_list[brp_i].control,
1308                         brp_list[brp_i].value);
1309         } else if (breakpoint->type == BKPT_SOFT) {
1310                 uint8_t code[4];
1311                 /* length == 2: Thumb breakpoint */
1312                 if (breakpoint->length == 2)
1313                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1314                 else
1315                 /* length == 3: Thumb-2 breakpoint, actual encoding is
1316                  * a regular Thumb BKPT instruction but we replace a
1317                  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1318                  * length
1319                  */
1320                 if (breakpoint->length == 3) {
1321                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1322                         breakpoint->length = 4;
1323                 } else
1324                         /* length == 4, normal ARM breakpoint */
1325                         buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1326
1327                 retval = target_read_memory(target,
1328                                 breakpoint->address & 0xFFFFFFFE,
1329                                 breakpoint->length, 1,
1330                                 breakpoint->orig_instr);
1331                 if (retval != ERROR_OK)
1332                         return retval;
1333
1334                 /* make sure data cache is cleaned & invalidated down to PoC */
1335                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1336                         armv7a_cache_flush_virt(target, breakpoint->address,
1337                                                 breakpoint->length);
1338                 }
1339
1340                 retval = target_write_memory(target,
1341                                 breakpoint->address & 0xFFFFFFFE,
1342                                 breakpoint->length, 1, code);
1343                 if (retval != ERROR_OK)
1344                         return retval;
1345
1346                 /* update i-cache at breakpoint location */
1347                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1348                                         breakpoint->length);
1349                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1350                                                  breakpoint->length);
1351
1352                 breakpoint->set = 0x11; /* Any nice value but 0 */
1353         }
1354
1355         return ERROR_OK;
1356 }
1357
1358 static int cortex_a_set_context_breakpoint(struct target *target,
1359         struct breakpoint *breakpoint, uint8_t matchmode)
1360 {
1361         int retval = ERROR_FAIL;
1362         int brp_i = 0;
1363         uint32_t control;
1364         uint8_t byte_addr_select = 0x0F;
1365         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1366         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1367         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1368
1369         if (breakpoint->set) {
1370                 LOG_WARNING("breakpoint already set");
1371                 return retval;
1372         }
1373         /*check available context BRPs*/
1374         while ((brp_list[brp_i].used ||
1375                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1376                 brp_i++;
1377
1378         if (brp_i >= cortex_a->brp_num) {
1379                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1380                 return ERROR_FAIL;
1381         }
1382
1383         breakpoint->set = brp_i + 1;
1384         control = ((matchmode & 0x7) << 20)
1385                 | (byte_addr_select << 5)
1386                 | (3 << 1) | 1;
1387         brp_list[brp_i].used = true;
1388         brp_list[brp_i].value = (breakpoint->asid);
1389         brp_list[brp_i].control = control;
1390         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1391                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1392                         brp_list[brp_i].value);
1393         if (retval != ERROR_OK)
1394                 return retval;
1395         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1396                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1397                         brp_list[brp_i].control);
1398         if (retval != ERROR_OK)
1399                 return retval;
1400         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1401                 brp_list[brp_i].control,
1402                 brp_list[brp_i].value);
1403         return ERROR_OK;
1404
1405 }
1406
1407 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1408 {
1409         int retval = ERROR_FAIL;
1410         int brp_1 = 0;  /* holds the contextID pair */
1411         int brp_2 = 0;  /* holds the IVA pair */
1412         uint32_t control_ctx, control_iva;
1413         uint8_t ctx_byte_addr_select = 0x0F;
1414         uint8_t iva_byte_addr_select = 0x0F;
1415         uint8_t ctx_machmode = 0x03;
1416         uint8_t iva_machmode = 0x01;
1417         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1418         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1419         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1420
1421         if (breakpoint->set) {
1422                 LOG_WARNING("breakpoint already set");
1423                 return retval;
1424         }
1425         /*check available context BRPs*/
1426         while ((brp_list[brp_1].used ||
1427                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1428                 brp_1++;
1429
1430         LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1431         if (brp_1 >= cortex_a->brp_num) {
1432                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1433                 return ERROR_FAIL;
1434         }
1435
1436         while ((brp_list[brp_2].used ||
1437                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1438                 brp_2++;
1439
1440         LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1441         if (brp_2 >= cortex_a->brp_num) {
1442                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1443                 return ERROR_FAIL;
1444         }
1445
1446         breakpoint->set = brp_1 + 1;
1447         breakpoint->linked_brp = brp_2;
1448         control_ctx = ((ctx_machmode & 0x7) << 20)
1449                 | (brp_2 << 16)
1450                 | (0 << 14)
1451                 | (ctx_byte_addr_select << 5)
1452                 | (3 << 1) | 1;
1453         brp_list[brp_1].used = true;
1454         brp_list[brp_1].value = (breakpoint->asid);
1455         brp_list[brp_1].control = control_ctx;
1456         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1457                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1458                         brp_list[brp_1].value);
1459         if (retval != ERROR_OK)
1460                 return retval;
1461         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1462                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1463                         brp_list[brp_1].control);
1464         if (retval != ERROR_OK)
1465                 return retval;
1466
1467         control_iva = ((iva_machmode & 0x7) << 20)
1468                 | (brp_1 << 16)
1469                 | (iva_byte_addr_select << 5)
1470                 | (3 << 1) | 1;
1471         brp_list[brp_2].used = true;
1472         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1473         brp_list[brp_2].control = control_iva;
1474         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1475                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1476                         brp_list[brp_2].value);
1477         if (retval != ERROR_OK)
1478                 return retval;
1479         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1480                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1481                         brp_list[brp_2].control);
1482         if (retval != ERROR_OK)
1483                 return retval;
1484
1485         return ERROR_OK;
1486 }
1487
1488 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1489 {
1490         int retval;
1491         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1492         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1493         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1494
1495         if (!breakpoint->set) {
1496                 LOG_WARNING("breakpoint not set");
1497                 return ERROR_OK;
1498         }
1499
1500         if (breakpoint->type == BKPT_HARD) {
1501                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1502                         int brp_i = breakpoint->set - 1;
1503                         int brp_j = breakpoint->linked_brp;
1504                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1505                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1506                                 return ERROR_OK;
1507                         }
1508                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1509                                 brp_list[brp_i].control, brp_list[brp_i].value);
1510                         brp_list[brp_i].used = false;
1511                         brp_list[brp_i].value = 0;
1512                         brp_list[brp_i].control = 0;
1513                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1514                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1515                                         brp_list[brp_i].control);
1516                         if (retval != ERROR_OK)
1517                                 return retval;
1518                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1519                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1520                                         brp_list[brp_i].value);
1521                         if (retval != ERROR_OK)
1522                                 return retval;
1523                         if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1524                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1525                                 return ERROR_OK;
1526                         }
1527                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1528                                 brp_list[brp_j].control, brp_list[brp_j].value);
1529                         brp_list[brp_j].used = false;
1530                         brp_list[brp_j].value = 0;
1531                         brp_list[brp_j].control = 0;
1532                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1533                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1534                                         brp_list[brp_j].control);
1535                         if (retval != ERROR_OK)
1536                                 return retval;
1537                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1538                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1539                                         brp_list[brp_j].value);
1540                         if (retval != ERROR_OK)
1541                                 return retval;
1542                         breakpoint->linked_brp = 0;
1543                         breakpoint->set = 0;
1544                         return ERROR_OK;
1545
1546                 } else {
1547                         int brp_i = breakpoint->set - 1;
1548                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1549                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1550                                 return ERROR_OK;
1551                         }
1552                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1553                                 brp_list[brp_i].control, brp_list[brp_i].value);
1554                         brp_list[brp_i].used = false;
1555                         brp_list[brp_i].value = 0;
1556                         brp_list[brp_i].control = 0;
1557                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1558                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1559                                         brp_list[brp_i].control);
1560                         if (retval != ERROR_OK)
1561                                 return retval;
1562                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1563                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1564                                         brp_list[brp_i].value);
1565                         if (retval != ERROR_OK)
1566                                 return retval;
1567                         breakpoint->set = 0;
1568                         return ERROR_OK;
1569                 }
1570         } else {
1571
1572                 /* make sure data cache is cleaned & invalidated down to PoC */
1573                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1574                         armv7a_cache_flush_virt(target, breakpoint->address,
1575                                                 breakpoint->length);
1576                 }
1577
1578                 /* restore original instruction (kept in target endianness) */
1579                 if (breakpoint->length == 4) {
1580                         retval = target_write_memory(target,
1581                                         breakpoint->address & 0xFFFFFFFE,
1582                                         4, 1, breakpoint->orig_instr);
1583                         if (retval != ERROR_OK)
1584                                 return retval;
1585                 } else {
1586                         retval = target_write_memory(target,
1587                                         breakpoint->address & 0xFFFFFFFE,
1588                                         2, 1, breakpoint->orig_instr);
1589                         if (retval != ERROR_OK)
1590                                 return retval;
1591                 }
1592
1593                 /* update i-cache at breakpoint location */
1594                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1595                                                  breakpoint->length);
1596                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1597                                                  breakpoint->length);
1598         }
1599         breakpoint->set = 0;
1600
1601         return ERROR_OK;
1602 }
1603
1604 static int cortex_a_add_breakpoint(struct target *target,
1605         struct breakpoint *breakpoint)
1606 {
1607         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1608
1609         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1610                 LOG_INFO("no hardware breakpoint available");
1611                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1612         }
1613
1614         if (breakpoint->type == BKPT_HARD)
1615                 cortex_a->brp_num_available--;
1616
1617         return cortex_a_set_breakpoint(target, breakpoint, 0x00);       /* Exact match */
1618 }
1619
1620 static int cortex_a_add_context_breakpoint(struct target *target,
1621         struct breakpoint *breakpoint)
1622 {
1623         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1624
1625         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1626                 LOG_INFO("no hardware breakpoint available");
1627                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1628         }
1629
1630         if (breakpoint->type == BKPT_HARD)
1631                 cortex_a->brp_num_available--;
1632
1633         return cortex_a_set_context_breakpoint(target, breakpoint, 0x02);       /* asid match */
1634 }
1635
1636 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1637         struct breakpoint *breakpoint)
1638 {
1639         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1640
1641         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1642                 LOG_INFO("no hardware breakpoint available");
1643                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1644         }
1645
1646         if (breakpoint->type == BKPT_HARD)
1647                 cortex_a->brp_num_available--;
1648
1649         return cortex_a_set_hybrid_breakpoint(target, breakpoint);      /* ??? */
1650 }
1651
1652
1653 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1654 {
1655         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1656
1657 #if 0
1658 /* It is perfectly possible to remove breakpoints while the target is running */
1659         if (target->state != TARGET_HALTED) {
1660                 LOG_WARNING("target not halted");
1661                 return ERROR_TARGET_NOT_HALTED;
1662         }
1663 #endif
1664
1665         if (breakpoint->set) {
1666                 cortex_a_unset_breakpoint(target, breakpoint);
1667                 if (breakpoint->type == BKPT_HARD)
1668                         cortex_a->brp_num_available++;
1669         }
1670
1671
1672         return ERROR_OK;
1673 }
1674
1675 /**
1676  * Sets a watchpoint for an Cortex-A target in one of the watchpoint units.  It is
1677  * considered a bug to call this function when there are no available watchpoint
1678  * units.
1679  *
1680  * @param target Pointer to an Cortex-A target to set a watchpoint on
1681  * @param watchpoint Pointer to the watchpoint to be set
1682  * @return Error status if watchpoint set fails or the result of executing the
1683  * JTAG queue
1684  */
1685 static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1686 {
1687         int retval = ERROR_OK;
1688         int wrp_i = 0;
1689         uint32_t control;
1690         uint32_t address;
1691         uint8_t address_mask;
1692         uint8_t byte_address_select;
1693         uint8_t load_store_access_control = 0x3;
1694         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1695         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1696         struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1697
1698         if (watchpoint->set) {
1699                 LOG_WARNING("watchpoint already set");
1700                 return retval;
1701         }
1702
1703         /* check available context WRPs */
1704         while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1705                 wrp_i++;
1706
1707         if (wrp_i >= cortex_a->wrp_num) {
1708                 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1709                 return ERROR_FAIL;
1710         }
1711
1712         if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1713                         (watchpoint->length & (watchpoint->length - 1))) {
1714                 LOG_WARNING("watchpoint length must be a power of 2");
1715                 return ERROR_FAIL;
1716         }
1717
1718         if (watchpoint->address & (watchpoint->length - 1)) {
1719                 LOG_WARNING("watchpoint address must be aligned at length");
1720                 return ERROR_FAIL;
1721         }
1722
1723         /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing?  */
1724         /* handle wp length 1 and 2 through byte select */
1725         switch (watchpoint->length) {
1726         case 1:
1727                 byte_address_select = BIT(watchpoint->address & 0x3);
1728                 address = watchpoint->address & ~0x3;
1729                 address_mask = 0;
1730                 break;
1731
1732         case 2:
1733                 byte_address_select = 0x03 << (watchpoint->address & 0x2);
1734                 address = watchpoint->address & ~0x3;
1735                 address_mask = 0;
1736                 break;
1737
1738         case 4:
1739                 byte_address_select = 0x0f;
1740                 address = watchpoint->address;
1741                 address_mask = 0;
1742                 break;
1743
1744         default:
1745                 byte_address_select = 0xff;
1746                 address = watchpoint->address;
1747                 address_mask = ilog2(watchpoint->length);
1748                 break;
1749         }
1750
1751         watchpoint->set = wrp_i + 1;
1752         control = (address_mask << 24) |
1753                 (byte_address_select << 5) |
1754                 (load_store_access_control << 3) |
1755                 (0x3 << 1) | 1;
1756         wrp_list[wrp_i].used = true;
1757         wrp_list[wrp_i].value = address;
1758         wrp_list[wrp_i].control = control;
1759
1760         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1761                         + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1762                         wrp_list[wrp_i].value);
1763         if (retval != ERROR_OK)
1764                 return retval;
1765
1766         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1767                         + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1768                         wrp_list[wrp_i].control);
1769         if (retval != ERROR_OK)
1770                 return retval;
1771
1772         LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1773                         wrp_list[wrp_i].control,
1774                         wrp_list[wrp_i].value);
1775
1776         return ERROR_OK;
1777 }
1778
1779 /**
1780  * Unset an existing watchpoint and clear the used watchpoint unit.
1781  *
1782  * @param target Pointer to the target to have the watchpoint removed
1783  * @param watchpoint Pointer to the watchpoint to be removed
1784  * @return Error status while trying to unset the watchpoint or the result of
1785  *         executing the JTAG queue
1786  */
1787 static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1788 {
1789         int retval;
1790         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1791         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1792         struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1793
1794         if (!watchpoint->set) {
1795                 LOG_WARNING("watchpoint not set");
1796                 return ERROR_OK;
1797         }
1798
1799         int wrp_i = watchpoint->set - 1;
1800         if (wrp_i < 0 || wrp_i >= cortex_a->wrp_num) {
1801                 LOG_DEBUG("Invalid WRP number in watchpoint");
1802                 return ERROR_OK;
1803         }
1804         LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1805                         wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1806         wrp_list[wrp_i].used = false;
1807         wrp_list[wrp_i].value = 0;
1808         wrp_list[wrp_i].control = 0;
1809         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1810                         + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1811                         wrp_list[wrp_i].control);
1812         if (retval != ERROR_OK)
1813                 return retval;
1814         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1815                         + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1816                         wrp_list[wrp_i].value);
1817         if (retval != ERROR_OK)
1818                 return retval;
1819         watchpoint->set = 0;
1820
1821         return ERROR_OK;
1822 }
1823
1824 /**
1825  * Add a watchpoint to an Cortex-A target.  If there are no watchpoint units
1826  * available, an error response is returned.
1827  *
1828  * @param target Pointer to the Cortex-A target to add a watchpoint to
1829  * @param watchpoint Pointer to the watchpoint to be added
1830  * @return Error status while trying to add the watchpoint
1831  */
1832 static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1833 {
1834         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1835
1836         if (cortex_a->wrp_num_available < 1) {
1837                 LOG_INFO("no hardware watchpoint available");
1838                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1839         }
1840
1841         int retval = cortex_a_set_watchpoint(target, watchpoint);
1842         if (retval != ERROR_OK)
1843                 return retval;
1844
1845         cortex_a->wrp_num_available--;
1846         return ERROR_OK;
1847 }
1848
1849 /**
1850  * Remove a watchpoint from an Cortex-A target.  The watchpoint will be unset and
1851  * the used watchpoint unit will be reopened.
1852  *
1853  * @param target Pointer to the target to remove a watchpoint from
1854  * @param watchpoint Pointer to the watchpoint to be removed
1855  * @return Result of trying to unset the watchpoint
1856  */
1857 static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1858 {
1859         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1860
1861         if (watchpoint->set) {
1862                 cortex_a->wrp_num_available++;
1863                 cortex_a_unset_watchpoint(target, watchpoint);
1864         }
1865         return ERROR_OK;
1866 }
1867
1868
1869 /*
1870  * Cortex-A Reset functions
1871  */
1872
1873 static int cortex_a_assert_reset(struct target *target)
1874 {
1875         struct armv7a_common *armv7a = target_to_armv7a(target);
1876
1877         LOG_DEBUG(" ");
1878
1879         /* FIXME when halt is requested, make it work somehow... */
1880
1881         /* This function can be called in "target not examined" state */
1882
1883         /* Issue some kind of warm reset. */
1884         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1885                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1886         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1887                 /* REVISIT handle "pulls" cases, if there's
1888                  * hardware that needs them to work.
1889                  */
1890
1891                 /*
1892                  * FIXME: fix reset when transport is not JTAG. This is a temporary
1893                  * work-around for release v0.10 that is not intended to stay!
1894                  */
1895                 if (!transport_is_jtag() ||
1896                                 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1897                         adapter_assert_reset();
1898
1899         } else {
1900                 LOG_ERROR("%s: how to reset?", target_name(target));
1901                 return ERROR_FAIL;
1902         }
1903
1904         /* registers are now invalid */
1905         if (target_was_examined(target))
1906                 register_cache_invalidate(armv7a->arm.core_cache);
1907
1908         target->state = TARGET_RESET;
1909
1910         return ERROR_OK;
1911 }
1912
1913 static int cortex_a_deassert_reset(struct target *target)
1914 {
1915         struct armv7a_common *armv7a = target_to_armv7a(target);
1916         int retval;
1917
1918         LOG_DEBUG(" ");
1919
1920         /* be certain SRST is off */
1921         adapter_deassert_reset();
1922
1923         if (target_was_examined(target)) {
1924                 retval = cortex_a_poll(target);
1925                 if (retval != ERROR_OK)
1926                         return retval;
1927         }
1928
1929         if (target->reset_halt) {
1930                 if (target->state != TARGET_HALTED) {
1931                         LOG_WARNING("%s: ran after reset and before halt ...",
1932                                 target_name(target));
1933                         if (target_was_examined(target)) {
1934                                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1935                                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1936                                 if (retval != ERROR_OK)
1937                                         return retval;
1938                         } else
1939                                 target->state = TARGET_UNKNOWN;
1940                 }
1941         }
1942
1943         return ERROR_OK;
1944 }
1945
1946 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1947 {
1948         /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1949          * New desired mode must be in mode. Current value of DSCR must be in
1950          * *dscr, which is updated with new value.
1951          *
1952          * This function elides actually sending the mode-change over the debug
1953          * interface if the mode is already set as desired.
1954          */
1955         uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1956         if (new_dscr != *dscr) {
1957                 struct armv7a_common *armv7a = target_to_armv7a(target);
1958                 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1959                                 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1960                 if (retval == ERROR_OK)
1961                         *dscr = new_dscr;
1962                 return retval;
1963         } else {
1964                 return ERROR_OK;
1965         }
1966 }
1967
1968 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1969         uint32_t value, uint32_t *dscr)
1970 {
1971         /* Waits until the specified bit(s) of DSCR take on a specified value. */
1972         struct armv7a_common *armv7a = target_to_armv7a(target);
1973         int64_t then;
1974         int retval;
1975
1976         if ((*dscr & mask) == value)
1977                 return ERROR_OK;
1978
1979         then = timeval_ms();
1980         while (1) {
1981                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1982                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
1983                 if (retval != ERROR_OK) {
1984                         LOG_ERROR("Could not read DSCR register");
1985                         return retval;
1986                 }
1987                 if ((*dscr & mask) == value)
1988                         break;
1989                 if (timeval_ms() > then + 1000) {
1990                         LOG_ERROR("timeout waiting for DSCR bit change");
1991                         return ERROR_FAIL;
1992                 }
1993         }
1994         return ERROR_OK;
1995 }
1996
1997 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1998         uint32_t *data, uint32_t *dscr)
1999 {
2000         int retval;
2001         struct armv7a_common *armv7a = target_to_armv7a(target);
2002
2003         /* Move from coprocessor to R0. */
2004         retval = cortex_a_exec_opcode(target, opcode, dscr);
2005         if (retval != ERROR_OK)
2006                 return retval;
2007
2008         /* Move from R0 to DTRTX. */
2009         retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2010         if (retval != ERROR_OK)
2011                 return retval;
2012
2013         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2014          * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2015          * must also check TXfull_l). Most of the time this will be free
2016          * because TXfull_l will be set immediately and cached in dscr. */
2017         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2018                         DSCR_DTRTX_FULL_LATCHED, dscr);
2019         if (retval != ERROR_OK)
2020                 return retval;
2021
2022         /* Read the value transferred to DTRTX. */
2023         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2024                         armv7a->debug_base + CPUDBG_DTRTX, data);
2025         if (retval != ERROR_OK)
2026                 return retval;
2027
2028         return ERROR_OK;
2029 }
2030
2031 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2032         uint32_t *dfsr, uint32_t *dscr)
2033 {
2034         int retval;
2035
2036         if (dfar) {
2037                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2038                 if (retval != ERROR_OK)
2039                         return retval;
2040         }
2041
2042         if (dfsr) {
2043                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2044                 if (retval != ERROR_OK)
2045                         return retval;
2046         }
2047
2048         return ERROR_OK;
2049 }
2050
2051 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2052         uint32_t data, uint32_t *dscr)
2053 {
2054         int retval;
2055         struct armv7a_common *armv7a = target_to_armv7a(target);
2056
2057         /* Write the value into DTRRX. */
2058         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2059                         armv7a->debug_base + CPUDBG_DTRRX, data);
2060         if (retval != ERROR_OK)
2061                 return retval;
2062
2063         /* Move from DTRRX to R0. */
2064         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2065         if (retval != ERROR_OK)
2066                 return retval;
2067
2068         /* Move from R0 to coprocessor. */
2069         retval = cortex_a_exec_opcode(target, opcode, dscr);
2070         if (retval != ERROR_OK)
2071                 return retval;
2072
2073         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2074          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2075          * check RXfull_l). Most of the time this will be free because RXfull_l
2076          * will be cleared immediately and cached in dscr. */
2077         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2078         if (retval != ERROR_OK)
2079                 return retval;
2080
2081         return ERROR_OK;
2082 }
2083
2084 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2085         uint32_t dfsr, uint32_t *dscr)
2086 {
2087         int retval;
2088
2089         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2090         if (retval != ERROR_OK)
2091                 return retval;
2092
2093         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2094         if (retval != ERROR_OK)
2095                 return retval;
2096
2097         return ERROR_OK;
2098 }
2099
2100 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2101 {
2102         uint32_t status, upper4;
2103
2104         if (dfsr & (1 << 9)) {
2105                 /* LPAE format. */
2106                 status = dfsr & 0x3f;
2107                 upper4 = status >> 2;
2108                 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2109                         return ERROR_TARGET_TRANSLATION_FAULT;
2110                 else if (status == 33)
2111                         return ERROR_TARGET_UNALIGNED_ACCESS;
2112                 else
2113                         return ERROR_TARGET_DATA_ABORT;
2114         } else {
2115                 /* Normal format. */
2116                 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2117                 if (status == 1)
2118                         return ERROR_TARGET_UNALIGNED_ACCESS;
2119                 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2120                                 status == 9 || status == 11 || status == 13 || status == 15)
2121                         return ERROR_TARGET_TRANSLATION_FAULT;
2122                 else
2123                         return ERROR_TARGET_DATA_ABORT;
2124         }
2125 }
2126
2127 static int cortex_a_write_cpu_memory_slow(struct target *target,
2128         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2129 {
2130         /* Writes count objects of size size from *buffer. Old value of DSCR must
2131          * be in *dscr; updated to new value. This is slow because it works for
2132          * non-word-sized objects. Avoid unaligned accesses as they do not work
2133          * on memory address space without "Normal" attribute. If size == 4 and
2134          * the address is aligned, cortex_a_write_cpu_memory_fast should be
2135          * preferred.
2136          * Preconditions:
2137          * - Address is in R0.
2138          * - R0 is marked dirty.
2139          */
2140         struct armv7a_common *armv7a = target_to_armv7a(target);
2141         struct arm *arm = &armv7a->arm;
2142         int retval;
2143
2144         /* Mark register R1 as dirty, to use for transferring data. */
2145         arm_reg_current(arm, 1)->dirty = true;
2146
2147         /* Switch to non-blocking mode if not already in that mode. */
2148         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2149         if (retval != ERROR_OK)
2150                 return retval;
2151
2152         /* Go through the objects. */
2153         while (count) {
2154                 /* Write the value to store into DTRRX. */
2155                 uint32_t data, opcode;
2156                 if (size == 1)
2157                         data = *buffer;
2158                 else if (size == 2)
2159                         data = target_buffer_get_u16(target, buffer);
2160                 else
2161                         data = target_buffer_get_u32(target, buffer);
2162                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2163                                 armv7a->debug_base + CPUDBG_DTRRX, data);
2164                 if (retval != ERROR_OK)
2165                         return retval;
2166
2167                 /* Transfer the value from DTRRX to R1. */
2168                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2169                 if (retval != ERROR_OK)
2170                         return retval;
2171
2172                 /* Write the value transferred to R1 into memory. */
2173                 if (size == 1)
2174                         opcode = ARMV4_5_STRB_IP(1, 0);
2175                 else if (size == 2)
2176                         opcode = ARMV4_5_STRH_IP(1, 0);
2177                 else
2178                         opcode = ARMV4_5_STRW_IP(1, 0);
2179                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2180                 if (retval != ERROR_OK)
2181                         return retval;
2182
2183                 /* Check for faults and return early. */
2184                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2185                         return ERROR_OK; /* A data fault is not considered a system failure. */
2186
2187                 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2188                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2189                  * must also check RXfull_l). Most of the time this will be free
2190                  * because RXfull_l will be cleared immediately and cached in dscr. */
2191                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2192                 if (retval != ERROR_OK)
2193                         return retval;
2194
2195                 /* Advance. */
2196                 buffer += size;
2197                 --count;
2198         }
2199
2200         return ERROR_OK;
2201 }
2202
2203 static int cortex_a_write_cpu_memory_fast(struct target *target,
2204         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2205 {
2206         /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2207          * in *dscr; updated to new value. This is fast but only works for
2208          * word-sized objects at aligned addresses.
2209          * Preconditions:
2210          * - Address is in R0 and must be a multiple of 4.
2211          * - R0 is marked dirty.
2212          */
2213         struct armv7a_common *armv7a = target_to_armv7a(target);
2214         int retval;
2215
2216         /* Switch to fast mode if not already in that mode. */
2217         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2218         if (retval != ERROR_OK)
2219                 return retval;
2220
2221         /* Latch STC instruction. */
2222         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2223                         armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2224         if (retval != ERROR_OK)
2225                 return retval;
2226
2227         /* Transfer all the data and issue all the instructions. */
2228         return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2229                         4, count, armv7a->debug_base + CPUDBG_DTRRX);
2230 }
2231
2232 static int cortex_a_write_cpu_memory(struct target *target,
2233         uint32_t address, uint32_t size,
2234         uint32_t count, const uint8_t *buffer)
2235 {
2236         /* Write memory through the CPU. */
2237         int retval, final_retval;
2238         struct armv7a_common *armv7a = target_to_armv7a(target);
2239         struct arm *arm = &armv7a->arm;
2240         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2241
2242         LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2243                           address, size, count);
2244         if (target->state != TARGET_HALTED) {
2245                 LOG_WARNING("target not halted");
2246                 return ERROR_TARGET_NOT_HALTED;
2247         }
2248
2249         if (!count)
2250                 return ERROR_OK;
2251
2252         /* Clear any abort. */
2253         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2254                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2255         if (retval != ERROR_OK)
2256                 return retval;
2257
2258         /* Read DSCR. */
2259         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2260                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2261         if (retval != ERROR_OK)
2262                 return retval;
2263
2264         /* Switch to non-blocking mode if not already in that mode. */
2265         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2266         if (retval != ERROR_OK)
2267                 goto out;
2268
2269         /* Mark R0 as dirty. */
2270         arm_reg_current(arm, 0)->dirty = true;
2271
2272         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2273         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2274         if (retval != ERROR_OK)
2275                 goto out;
2276
2277         /* Get the memory address into R0. */
2278         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2279                         armv7a->debug_base + CPUDBG_DTRRX, address);
2280         if (retval != ERROR_OK)
2281                 goto out;
2282         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2283         if (retval != ERROR_OK)
2284                 goto out;
2285
2286         if (size == 4 && (address % 4) == 0) {
2287                 /* We are doing a word-aligned transfer, so use fast mode. */
2288                 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2289         } else {
2290                 /* Use slow path. Adjust size for aligned accesses */
2291                 switch (address % 4) {
2292                         case 1:
2293                         case 3:
2294                                 count *= size;
2295                                 size = 1;
2296                                 break;
2297                         case 2:
2298                                 if (size == 4) {
2299                                         count *= 2;
2300                                         size = 2;
2301                                 }
2302                         case 0:
2303                         default:
2304                                 break;
2305                 }
2306                 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2307         }
2308
2309 out:
2310         final_retval = retval;
2311
2312         /* Switch to non-blocking mode if not already in that mode. */
2313         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2314         if (final_retval == ERROR_OK)
2315                 final_retval = retval;
2316
2317         /* Wait for last issued instruction to complete. */
2318         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2319         if (final_retval == ERROR_OK)
2320                 final_retval = retval;
2321
2322         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2323          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2324          * check RXfull_l). Most of the time this will be free because RXfull_l
2325          * will be cleared immediately and cached in dscr. However, don't do this
2326          * if there is fault, because then the instruction might not have completed
2327          * successfully. */
2328         if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2329                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2330                 if (retval != ERROR_OK)
2331                         return retval;
2332         }
2333
2334         /* If there were any sticky abort flags, clear them. */
2335         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2336                 fault_dscr = dscr;
2337                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2338                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2339                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2340         } else {
2341                 fault_dscr = 0;
2342         }
2343
2344         /* Handle synchronous data faults. */
2345         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2346                 if (final_retval == ERROR_OK) {
2347                         /* Final return value will reflect cause of fault. */
2348                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2349                         if (retval == ERROR_OK) {
2350                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2351                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2352                         } else
2353                                 final_retval = retval;
2354                 }
2355                 /* Fault destroyed DFAR/DFSR; restore them. */
2356                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2357                 if (retval != ERROR_OK)
2358                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2359         }
2360
2361         /* Handle asynchronous data faults. */
2362         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2363                 if (final_retval == ERROR_OK)
2364                         /* No other error has been recorded so far, so keep this one. */
2365                         final_retval = ERROR_TARGET_DATA_ABORT;
2366         }
2367
2368         /* If the DCC is nonempty, clear it. */
2369         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2370                 uint32_t dummy;
2371                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2372                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2373                 if (final_retval == ERROR_OK)
2374                         final_retval = retval;
2375         }
2376         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2377                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2378                 if (final_retval == ERROR_OK)
2379                         final_retval = retval;
2380         }
2381
2382         /* Done. */
2383         return final_retval;
2384 }
2385
2386 static int cortex_a_read_cpu_memory_slow(struct target *target,
2387         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2388 {
2389         /* Reads count objects of size size into *buffer. Old value of DSCR must be
2390          * in *dscr; updated to new value. This is slow because it works for
2391          * non-word-sized objects. Avoid unaligned accesses as they do not work
2392          * on memory address space without "Normal" attribute. If size == 4 and
2393          * the address is aligned, cortex_a_read_cpu_memory_fast should be
2394          * preferred.
2395          * Preconditions:
2396          * - Address is in R0.
2397          * - R0 is marked dirty.
2398          */
2399         struct armv7a_common *armv7a = target_to_armv7a(target);
2400         struct arm *arm = &armv7a->arm;
2401         int retval;
2402
2403         /* Mark register R1 as dirty, to use for transferring data. */
2404         arm_reg_current(arm, 1)->dirty = true;
2405
2406         /* Switch to non-blocking mode if not already in that mode. */
2407         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2408         if (retval != ERROR_OK)
2409                 return retval;
2410
2411         /* Go through the objects. */
2412         while (count) {
2413                 /* Issue a load of the appropriate size to R1. */
2414                 uint32_t opcode, data;
2415                 if (size == 1)
2416                         opcode = ARMV4_5_LDRB_IP(1, 0);
2417                 else if (size == 2)
2418                         opcode = ARMV4_5_LDRH_IP(1, 0);
2419                 else
2420                         opcode = ARMV4_5_LDRW_IP(1, 0);
2421                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2422                 if (retval != ERROR_OK)
2423                         return retval;
2424
2425                 /* Issue a write of R1 to DTRTX. */
2426                 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2427                 if (retval != ERROR_OK)
2428                         return retval;
2429
2430                 /* Check for faults and return early. */
2431                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2432                         return ERROR_OK; /* A data fault is not considered a system failure. */
2433
2434                 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2435                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2436                  * must also check TXfull_l). Most of the time this will be free
2437                  * because TXfull_l will be set immediately and cached in dscr. */
2438                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2439                                 DSCR_DTRTX_FULL_LATCHED, dscr);
2440                 if (retval != ERROR_OK)
2441                         return retval;
2442
2443                 /* Read the value transferred to DTRTX into the buffer. */
2444                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2445                                 armv7a->debug_base + CPUDBG_DTRTX, &data);
2446                 if (retval != ERROR_OK)
2447                         return retval;
2448                 if (size == 1)
2449                         *buffer = (uint8_t) data;
2450                 else if (size == 2)
2451                         target_buffer_set_u16(target, buffer, (uint16_t) data);
2452                 else
2453                         target_buffer_set_u32(target, buffer, data);
2454
2455                 /* Advance. */
2456                 buffer += size;
2457                 --count;
2458         }
2459
2460         return ERROR_OK;
2461 }
2462
2463 static int cortex_a_read_cpu_memory_fast(struct target *target,
2464         uint32_t count, uint8_t *buffer, uint32_t *dscr)
2465 {
2466         /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2467          * *dscr; updated to new value. This is fast but only works for word-sized
2468          * objects at aligned addresses.
2469          * Preconditions:
2470          * - Address is in R0 and must be a multiple of 4.
2471          * - R0 is marked dirty.
2472          */
2473         struct armv7a_common *armv7a = target_to_armv7a(target);
2474         uint32_t u32;
2475         int retval;
2476
2477         /* Switch to non-blocking mode if not already in that mode. */
2478         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2479         if (retval != ERROR_OK)
2480                 return retval;
2481
2482         /* Issue the LDC instruction via a write to ITR. */
2483         retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2484         if (retval != ERROR_OK)
2485                 return retval;
2486
2487         count--;
2488
2489         if (count > 0) {
2490                 /* Switch to fast mode if not already in that mode. */
2491                 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2492                 if (retval != ERROR_OK)
2493                         return retval;
2494
2495                 /* Latch LDC instruction. */
2496                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2497                                 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2498                 if (retval != ERROR_OK)
2499                         return retval;
2500
2501                 /* Read the value transferred to DTRTX into the buffer. Due to fast
2502                  * mode rules, this blocks until the instruction finishes executing and
2503                  * then reissues the read instruction to read the next word from
2504                  * memory. The last read of DTRTX in this call reads the second-to-last
2505                  * word from memory and issues the read instruction for the last word.
2506                  */
2507                 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2508                                 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2509                 if (retval != ERROR_OK)
2510                         return retval;
2511
2512                 /* Advance. */
2513                 buffer += count * 4;
2514         }
2515
2516         /* Wait for last issued instruction to complete. */
2517         retval = cortex_a_wait_instrcmpl(target, dscr, false);
2518         if (retval != ERROR_OK)
2519                 return retval;
2520
2521         /* Switch to non-blocking mode if not already in that mode. */
2522         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2523         if (retval != ERROR_OK)
2524                 return retval;
2525
2526         /* Check for faults and return early. */
2527         if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2528                 return ERROR_OK; /* A data fault is not considered a system failure. */
2529
2530         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2531          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2532          * check TXfull_l). Most of the time this will be free because TXfull_l
2533          * will be set immediately and cached in dscr. */
2534         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2535                         DSCR_DTRTX_FULL_LATCHED, dscr);
2536         if (retval != ERROR_OK)
2537                 return retval;
2538
2539         /* Read the value transferred to DTRTX into the buffer. This is the last
2540          * word. */
2541         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2542                         armv7a->debug_base + CPUDBG_DTRTX, &u32);
2543         if (retval != ERROR_OK)
2544                 return retval;
2545         target_buffer_set_u32(target, buffer, u32);
2546
2547         return ERROR_OK;
2548 }
2549
2550 static int cortex_a_read_cpu_memory(struct target *target,
2551         uint32_t address, uint32_t size,
2552         uint32_t count, uint8_t *buffer)
2553 {
2554         /* Read memory through the CPU. */
2555         int retval, final_retval;
2556         struct armv7a_common *armv7a = target_to_armv7a(target);
2557         struct arm *arm = &armv7a->arm;
2558         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2559
2560         LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2561                           address, size, count);
2562         if (target->state != TARGET_HALTED) {
2563                 LOG_WARNING("target not halted");
2564                 return ERROR_TARGET_NOT_HALTED;
2565         }
2566
2567         if (!count)
2568                 return ERROR_OK;
2569
2570         /* Clear any abort. */
2571         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2572                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2573         if (retval != ERROR_OK)
2574                 return retval;
2575
2576         /* Read DSCR */
2577         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2578                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2579         if (retval != ERROR_OK)
2580                 return retval;
2581
2582         /* Switch to non-blocking mode if not already in that mode. */
2583         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2584         if (retval != ERROR_OK)
2585                 goto out;
2586
2587         /* Mark R0 as dirty. */
2588         arm_reg_current(arm, 0)->dirty = true;
2589
2590         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2591         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2592         if (retval != ERROR_OK)
2593                 goto out;
2594
2595         /* Get the memory address into R0. */
2596         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2597                         armv7a->debug_base + CPUDBG_DTRRX, address);
2598         if (retval != ERROR_OK)
2599                 goto out;
2600         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2601         if (retval != ERROR_OK)
2602                 goto out;
2603
2604         if (size == 4 && (address % 4) == 0) {
2605                 /* We are doing a word-aligned transfer, so use fast mode. */
2606                 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2607         } else {
2608                 /* Use slow path. Adjust size for aligned accesses */
2609                 switch (address % 4) {
2610                         case 1:
2611                         case 3:
2612                                 count *= size;
2613                                 size = 1;
2614                                 break;
2615                         case 2:
2616                                 if (size == 4) {
2617                                         count *= 2;
2618                                         size = 2;
2619                                 }
2620                                 break;
2621                         case 0:
2622                         default:
2623                                 break;
2624                 }
2625                 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2626         }
2627
2628 out:
2629         final_retval = retval;
2630
2631         /* Switch to non-blocking mode if not already in that mode. */
2632         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2633         if (final_retval == ERROR_OK)
2634                 final_retval = retval;
2635
2636         /* Wait for last issued instruction to complete. */
2637         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2638         if (final_retval == ERROR_OK)
2639                 final_retval = retval;
2640
2641         /* If there were any sticky abort flags, clear them. */
2642         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2643                 fault_dscr = dscr;
2644                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2645                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2646                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2647         } else {
2648                 fault_dscr = 0;
2649         }
2650
2651         /* Handle synchronous data faults. */
2652         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2653                 if (final_retval == ERROR_OK) {
2654                         /* Final return value will reflect cause of fault. */
2655                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2656                         if (retval == ERROR_OK) {
2657                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2658                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2659                         } else
2660                                 final_retval = retval;
2661                 }
2662                 /* Fault destroyed DFAR/DFSR; restore them. */
2663                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2664                 if (retval != ERROR_OK)
2665                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2666         }
2667
2668         /* Handle asynchronous data faults. */
2669         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2670                 if (final_retval == ERROR_OK)
2671                         /* No other error has been recorded so far, so keep this one. */
2672                         final_retval = ERROR_TARGET_DATA_ABORT;
2673         }
2674
2675         /* If the DCC is nonempty, clear it. */
2676         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2677                 uint32_t dummy;
2678                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2679                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2680                 if (final_retval == ERROR_OK)
2681                         final_retval = retval;
2682         }
2683         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2684                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2685                 if (final_retval == ERROR_OK)
2686                         final_retval = retval;
2687         }
2688
2689         /* Done. */
2690         return final_retval;
2691 }
2692
2693
2694 /*
2695  * Cortex-A Memory access
2696  *
2697  * This is same Cortex-M3 but we must also use the correct
2698  * ap number for every access.
2699  */
2700
2701 static int cortex_a_read_phys_memory(struct target *target,
2702         target_addr_t address, uint32_t size,
2703         uint32_t count, uint8_t *buffer)
2704 {
2705         int retval;
2706
2707         if (!count || !buffer)
2708                 return ERROR_COMMAND_SYNTAX_ERROR;
2709
2710         LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2711                 address, size, count);
2712
2713         /* read memory through the CPU */
2714         cortex_a_prep_memaccess(target, 1);
2715         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2716         cortex_a_post_memaccess(target, 1);
2717
2718         return retval;
2719 }
2720
2721 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2722         uint32_t size, uint32_t count, uint8_t *buffer)
2723 {
2724         int retval;
2725
2726         /* cortex_a handles unaligned memory access */
2727         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2728                 address, size, count);
2729
2730         cortex_a_prep_memaccess(target, 0);
2731         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2732         cortex_a_post_memaccess(target, 0);
2733
2734         return retval;
2735 }
2736
2737 static int cortex_a_write_phys_memory(struct target *target,
2738         target_addr_t address, uint32_t size,
2739         uint32_t count, const uint8_t *buffer)
2740 {
2741         int retval;
2742
2743         if (!count || !buffer)
2744                 return ERROR_COMMAND_SYNTAX_ERROR;
2745
2746         LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2747                 address, size, count);
2748
2749         /* write memory through the CPU */
2750         cortex_a_prep_memaccess(target, 1);
2751         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2752         cortex_a_post_memaccess(target, 1);
2753
2754         return retval;
2755 }
2756
2757 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2758         uint32_t size, uint32_t count, const uint8_t *buffer)
2759 {
2760         int retval;
2761
2762         /* cortex_a handles unaligned memory access */
2763         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2764                 address, size, count);
2765
2766         /* memory writes bypass the caches, must flush before writing */
2767         armv7a_cache_auto_flush_on_write(target, address, size * count);
2768
2769         cortex_a_prep_memaccess(target, 0);
2770         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2771         cortex_a_post_memaccess(target, 0);
2772         return retval;
2773 }
2774
2775 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2776                                 uint32_t count, uint8_t *buffer)
2777 {
2778         uint32_t size;
2779
2780         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2781          * will have something to do with the size we leave to it. */
2782         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2783                 if (address & size) {
2784                         int retval = target_read_memory(target, address, size, 1, buffer);
2785                         if (retval != ERROR_OK)
2786                                 return retval;
2787                         address += size;
2788                         count -= size;
2789                         buffer += size;
2790                 }
2791         }
2792
2793         /* Read the data with as large access size as possible. */
2794         for (; size > 0; size /= 2) {
2795                 uint32_t aligned = count - count % size;
2796                 if (aligned > 0) {
2797                         int retval = target_read_memory(target, address, size, aligned / size, buffer);
2798                         if (retval != ERROR_OK)
2799                                 return retval;
2800                         address += aligned;
2801                         count -= aligned;
2802                         buffer += aligned;
2803                 }
2804         }
2805
2806         return ERROR_OK;
2807 }
2808
2809 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2810                                  uint32_t count, const uint8_t *buffer)
2811 {
2812         uint32_t size;
2813
2814         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2815          * will have something to do with the size we leave to it. */
2816         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2817                 if (address & size) {
2818                         int retval = target_write_memory(target, address, size, 1, buffer);
2819                         if (retval != ERROR_OK)
2820                                 return retval;
2821                         address += size;
2822                         count -= size;
2823                         buffer += size;
2824                 }
2825         }
2826
2827         /* Write the data with as large access size as possible. */
2828         for (; size > 0; size /= 2) {
2829                 uint32_t aligned = count - count % size;
2830                 if (aligned > 0) {
2831                         int retval = target_write_memory(target, address, size, aligned / size, buffer);
2832                         if (retval != ERROR_OK)
2833                                 return retval;
2834                         address += aligned;
2835                         count -= aligned;
2836                         buffer += aligned;
2837                 }
2838         }
2839
2840         return ERROR_OK;
2841 }
2842
2843 static int cortex_a_handle_target_request(void *priv)
2844 {
2845         struct target *target = priv;
2846         struct armv7a_common *armv7a = target_to_armv7a(target);
2847         int retval;
2848
2849         if (!target_was_examined(target))
2850                 return ERROR_OK;
2851         if (!target->dbg_msg_enabled)
2852                 return ERROR_OK;
2853
2854         if (target->state == TARGET_RUNNING) {
2855                 uint32_t request;
2856                 uint32_t dscr;
2857                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2858                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2859
2860                 /* check if we have data */
2861                 int64_t then = timeval_ms();
2862                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2863                         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2864                                         armv7a->debug_base + CPUDBG_DTRTX, &request);
2865                         if (retval == ERROR_OK) {
2866                                 target_request(target, request);
2867                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2868                                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2869                         }
2870                         if (timeval_ms() > then + 1000) {
2871                                 LOG_ERROR("Timeout waiting for dtr tx full");
2872                                 return ERROR_FAIL;
2873                         }
2874                 }
2875         }
2876
2877         return ERROR_OK;
2878 }
2879
2880 /*
2881  * Cortex-A target information and configuration
2882  */
2883
2884 static int cortex_a_examine_first(struct target *target)
2885 {
2886         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2887         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2888         struct adiv5_dap *swjdp = armv7a->arm.dap;
2889
2890         int i;
2891         int retval = ERROR_OK;
2892         uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2893
2894         /* Search for the APB-AP - it is needed for access to debug registers */
2895         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2896         if (retval != ERROR_OK) {
2897                 LOG_ERROR("Could not find APB-AP for debug access");
2898                 return retval;
2899         }
2900
2901         retval = mem_ap_init(armv7a->debug_ap);
2902         if (retval != ERROR_OK) {
2903                 LOG_ERROR("Could not initialize the APB-AP");
2904                 return retval;
2905         }
2906
2907         armv7a->debug_ap->memaccess_tck = 80;
2908
2909         if (!target->dbgbase_set) {
2910                 target_addr_t dbgbase;
2911                 /* Get ROM Table base */
2912                 uint32_t apid;
2913                 int32_t coreidx = target->coreid;
2914                 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2915                           target->cmd_name);
2916                 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2917                 if (retval != ERROR_OK)
2918                         return retval;
2919                 /* Lookup 0x15 -- Processor DAP */
2920                 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2921                                 &armv7a->debug_base, &coreidx);
2922                 if (retval != ERROR_OK) {
2923                         LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2924                                   target->cmd_name);
2925                         return retval;
2926                 }
2927                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2928                           target->coreid, armv7a->debug_base);
2929         } else
2930                 armv7a->debug_base = target->dbgbase;
2931
2932         if ((armv7a->debug_base & (1UL<<31)) == 0)
2933                 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2934                             "Please fix the target configuration.", target_name(target));
2935
2936         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2937                         armv7a->debug_base + CPUDBG_DIDR, &didr);
2938         if (retval != ERROR_OK) {
2939                 LOG_DEBUG("Examine %s failed", "DIDR");
2940                 return retval;
2941         }
2942
2943         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2944                         armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2945         if (retval != ERROR_OK) {
2946                 LOG_DEBUG("Examine %s failed", "CPUID");
2947                 return retval;
2948         }
2949
2950         LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2951         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2952
2953         cortex_a->didr = didr;
2954         cortex_a->cpuid = cpuid;
2955
2956         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2957                                     armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2958         if (retval != ERROR_OK)
2959                 return retval;
2960         LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR  0x%" PRIx32, target->coreid, dbg_osreg);
2961
2962         if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2963                 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2964                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2965                 return ERROR_TARGET_INIT_FAILED;
2966         }
2967
2968         if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2969                 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2970
2971         /* Read DBGOSLSR and check if OSLK is implemented */
2972         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2973                                 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2974         if (retval != ERROR_OK)
2975                 return retval;
2976         LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2977
2978         /* check if OS Lock is implemented */
2979         if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2980                 /* check if OS Lock is set */
2981                 if (dbg_osreg & OSLSR_OSLK) {
2982                         LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2983
2984                         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2985                                                         armv7a->debug_base + CPUDBG_OSLAR,
2986                                                         0);
2987                         if (retval == ERROR_OK)
2988                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2989                                                         armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2990
2991                         /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2992                         if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2993                                 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2994                                                 target->coreid);
2995                                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2996                                 return ERROR_TARGET_INIT_FAILED;
2997                         }
2998                 }
2999         }
3000
3001         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3002                                  armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
3003         if (retval != ERROR_OK)
3004                 return retval;
3005
3006         if (dbg_idpfr1 & 0x000000f0) {
3007                 LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
3008                                 target->coreid);
3009                 armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
3010         }
3011         if (dbg_idpfr1 & 0x0000f000) {
3012                 LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
3013                                 target->coreid);
3014                 /*
3015                  * overwrite and simplify the checks.
3016                  * virtualization extensions require implementation of security extension
3017                  */
3018                 armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
3019         }
3020
3021         /* Avoid recreating the registers cache */
3022         if (!target_was_examined(target)) {
3023                 retval = cortex_a_dpm_setup(cortex_a, didr);
3024                 if (retval != ERROR_OK)
3025                         return retval;
3026         }
3027
3028         /* Setup Breakpoint Register Pairs */
3029         cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3030         cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3031         cortex_a->brp_num_available = cortex_a->brp_num;
3032         free(cortex_a->brp_list);
3033         cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3034 /*      cortex_a->brb_enabled = ????; */
3035         for (i = 0; i < cortex_a->brp_num; i++) {
3036                 cortex_a->brp_list[i].used = false;
3037                 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3038                         cortex_a->brp_list[i].type = BRP_NORMAL;
3039                 else
3040                         cortex_a->brp_list[i].type = BRP_CONTEXT;
3041                 cortex_a->brp_list[i].value = 0;
3042                 cortex_a->brp_list[i].control = 0;
3043                 cortex_a->brp_list[i].brpn = i;
3044         }
3045
3046         LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3047
3048         /* Setup Watchpoint Register Pairs */
3049         cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3050         cortex_a->wrp_num_available = cortex_a->wrp_num;
3051         free(cortex_a->wrp_list);
3052         cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3053         for (i = 0; i < cortex_a->wrp_num; i++) {
3054                 cortex_a->wrp_list[i].used = false;
3055                 cortex_a->wrp_list[i].value = 0;
3056                 cortex_a->wrp_list[i].control = 0;
3057                 cortex_a->wrp_list[i].wrpn = i;
3058         }
3059
3060         LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3061
3062         /* select debug_ap as default */
3063         swjdp->apsel = armv7a->debug_ap->ap_num;
3064
3065         target_set_examined(target);
3066         return ERROR_OK;
3067 }
3068
3069 static int cortex_a_examine(struct target *target)
3070 {
3071         int retval = ERROR_OK;
3072
3073         /* Reestablish communication after target reset */
3074         retval = cortex_a_examine_first(target);
3075
3076         /* Configure core debug access */
3077         if (retval == ERROR_OK)
3078                 retval = cortex_a_init_debug_access(target);
3079
3080         return retval;
3081 }
3082
3083 /*
3084  *      Cortex-A target creation and initialization
3085  */
3086
3087 static int cortex_a_init_target(struct command_context *cmd_ctx,
3088         struct target *target)
3089 {
3090         /* examine_first() does a bunch of this */
3091         arm_semihosting_init(target);
3092         return ERROR_OK;
3093 }
3094
3095 static int cortex_a_init_arch_info(struct target *target,
3096         struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3097 {
3098         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3099
3100         /* Setup struct cortex_a_common */
3101         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3102         armv7a->arm.dap = dap;
3103
3104         /* register arch-specific functions */
3105         armv7a->examine_debug_reason = NULL;
3106
3107         armv7a->post_debug_entry = cortex_a_post_debug_entry;
3108
3109         armv7a->pre_restore_context = NULL;
3110
3111         armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3112
3113
3114 /*      arm7_9->handle_target_request = cortex_a_handle_target_request; */
3115
3116         /* REVISIT v7a setup should be in a v7a-specific routine */
3117         armv7a_init_arch_info(target, armv7a);
3118         target_register_timer_callback(cortex_a_handle_target_request, 1,
3119                 TARGET_TIMER_TYPE_PERIODIC, target);
3120
3121         return ERROR_OK;
3122 }
3123
3124 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3125 {
3126         struct cortex_a_common *cortex_a;
3127         struct adiv5_private_config *pc;
3128
3129         if (target->private_config == NULL)
3130                 return ERROR_FAIL;
3131
3132         pc = (struct adiv5_private_config *)target->private_config;
3133
3134         cortex_a = calloc(1, sizeof(struct cortex_a_common));
3135         if (cortex_a == NULL) {
3136                 LOG_ERROR("Out of memory");
3137                 return ERROR_FAIL;
3138         }
3139         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3140         cortex_a->armv7a_common.is_armv7r = false;
3141         cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3142
3143         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3144 }
3145
3146 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3147 {
3148         struct cortex_a_common *cortex_a;
3149         struct adiv5_private_config *pc;
3150
3151         pc = (struct adiv5_private_config *)target->private_config;
3152         if (adiv5_verify_config(pc) != ERROR_OK)
3153                 return ERROR_FAIL;
3154
3155         cortex_a = calloc(1, sizeof(struct cortex_a_common));
3156         if (cortex_a == NULL) {
3157                 LOG_ERROR("Out of memory");
3158                 return ERROR_FAIL;
3159         }
3160         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3161         cortex_a->armv7a_common.is_armv7r = true;
3162
3163         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3164 }
3165
3166 static void cortex_a_deinit_target(struct target *target)
3167 {
3168         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3169         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3170         struct arm_dpm *dpm = &armv7a->dpm;
3171         uint32_t dscr;
3172         int retval;
3173
3174         if (target_was_examined(target)) {
3175                 /* Disable halt for breakpoint, watchpoint and vector catch */
3176                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3177                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
3178                 if (retval == ERROR_OK)
3179                         mem_ap_write_atomic_u32(armv7a->debug_ap,
3180                                         armv7a->debug_base + CPUDBG_DSCR,
3181                                         dscr & ~DSCR_HALT_DBG_MODE);
3182         }
3183
3184         free(cortex_a->wrp_list);
3185         free(cortex_a->brp_list);
3186         arm_free_reg_cache(dpm->arm);
3187         free(dpm->dbp);
3188         free(dpm->dwp);
3189         free(target->private_config);
3190         free(cortex_a);
3191 }
3192
3193 static int cortex_a_mmu(struct target *target, int *enabled)
3194 {
3195         struct armv7a_common *armv7a = target_to_armv7a(target);
3196
3197         if (target->state != TARGET_HALTED) {
3198                 LOG_ERROR("%s: target not halted", __func__);
3199                 return ERROR_TARGET_INVALID;
3200         }
3201
3202         if (armv7a->is_armv7r)
3203                 *enabled = 0;
3204         else
3205                 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3206
3207         return ERROR_OK;
3208 }
3209
3210 static int cortex_a_virt2phys(struct target *target,
3211         target_addr_t virt, target_addr_t *phys)
3212 {
3213         int retval;
3214         int mmu_enabled = 0;
3215
3216         /*
3217          * If the MMU was not enabled at debug entry, there is no
3218          * way of knowing if there was ever a valid configuration
3219          * for it and thus it's not safe to enable it. In this case,
3220          * just return the virtual address as physical.
3221          */
3222         cortex_a_mmu(target, &mmu_enabled);
3223         if (!mmu_enabled) {
3224                 *phys = virt;
3225                 return ERROR_OK;
3226         }
3227
3228         /* mmu must be enable in order to get a correct translation */
3229         retval = cortex_a_mmu_modify(target, 1);
3230         if (retval != ERROR_OK)
3231                 return retval;
3232         return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3233                                                     phys, 1);
3234 }
3235
3236 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3237 {
3238         struct target *target = get_current_target(CMD_CTX);
3239         struct armv7a_common *armv7a = target_to_armv7a(target);
3240
3241         return armv7a_handle_cache_info_command(CMD,
3242                         &armv7a->armv7a_mmu.armv7a_cache);
3243 }
3244
3245
3246 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3247 {
3248         struct target *target = get_current_target(CMD_CTX);
3249         if (!target_was_examined(target)) {
3250                 LOG_ERROR("target not examined yet");
3251                 return ERROR_FAIL;
3252         }
3253
3254         return cortex_a_init_debug_access(target);
3255 }
3256
3257 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3258 {
3259         struct target *target = get_current_target(CMD_CTX);
3260         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3261
3262         static const struct jim_nvp nvp_maskisr_modes[] = {
3263                 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3264                 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3265                 { .name = NULL, .value = -1 },
3266         };
3267         const struct jim_nvp *n;
3268
3269         if (CMD_ARGC > 0) {
3270                 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3271                 if (n->name == NULL) {
3272                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3273                         return ERROR_COMMAND_SYNTAX_ERROR;
3274                 }
3275
3276                 cortex_a->isrmasking_mode = n->value;
3277         }
3278
3279         n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3280         command_print(CMD, "cortex_a interrupt mask %s", n->name);
3281
3282         return ERROR_OK;
3283 }
3284
3285 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3286 {
3287         struct target *target = get_current_target(CMD_CTX);
3288         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3289
3290         static const struct jim_nvp nvp_dacrfixup_modes[] = {
3291                 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3292                 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3293                 { .name = NULL, .value = -1 },
3294         };
3295         const struct jim_nvp *n;
3296
3297         if (CMD_ARGC > 0) {
3298                 n = jim_nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3299                 if (n->name == NULL)
3300                         return ERROR_COMMAND_SYNTAX_ERROR;
3301                 cortex_a->dacrfixup_mode = n->value;
3302
3303         }
3304
3305         n = jim_nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3306         command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3307
3308         return ERROR_OK;
3309 }
3310
3311 static const struct command_registration cortex_a_exec_command_handlers[] = {
3312         {
3313                 .name = "cache_info",
3314                 .handler = cortex_a_handle_cache_info_command,
3315                 .mode = COMMAND_EXEC,
3316                 .help = "display information about target caches",
3317                 .usage = "",
3318         },
3319         {
3320                 .name = "dbginit",
3321                 .handler = cortex_a_handle_dbginit_command,
3322                 .mode = COMMAND_EXEC,
3323                 .help = "Initialize core debug",
3324                 .usage = "",
3325         },
3326         {
3327                 .name = "maskisr",
3328                 .handler = handle_cortex_a_mask_interrupts_command,
3329                 .mode = COMMAND_ANY,
3330                 .help = "mask cortex_a interrupts",
3331                 .usage = "['on'|'off']",
3332         },
3333         {
3334                 .name = "dacrfixup",
3335                 .handler = handle_cortex_a_dacrfixup_command,
3336                 .mode = COMMAND_ANY,
3337                 .help = "set domain access control (DACR) to all-manager "
3338                         "on memory access",
3339                 .usage = "['on'|'off']",
3340         },
3341         {
3342                 .chain = armv7a_mmu_command_handlers,
3343         },
3344         {
3345                 .chain = smp_command_handlers,
3346         },
3347
3348         COMMAND_REGISTRATION_DONE
3349 };
3350 static const struct command_registration cortex_a_command_handlers[] = {
3351         {
3352                 .chain = arm_command_handlers,
3353         },
3354         {
3355                 .chain = armv7a_command_handlers,
3356         },
3357         {
3358                 .name = "cortex_a",
3359                 .mode = COMMAND_ANY,
3360                 .help = "Cortex-A command group",
3361                 .usage = "",
3362                 .chain = cortex_a_exec_command_handlers,
3363         },
3364         COMMAND_REGISTRATION_DONE
3365 };
3366
3367 struct target_type cortexa_target = {
3368         .name = "cortex_a",
3369
3370         .poll = cortex_a_poll,
3371         .arch_state = armv7a_arch_state,
3372
3373         .halt = cortex_a_halt,
3374         .resume = cortex_a_resume,
3375         .step = cortex_a_step,
3376
3377         .assert_reset = cortex_a_assert_reset,
3378         .deassert_reset = cortex_a_deassert_reset,
3379
3380         /* REVISIT allow exporting VFP3 registers ... */
3381         .get_gdb_arch = arm_get_gdb_arch,
3382         .get_gdb_reg_list = arm_get_gdb_reg_list,
3383
3384         .read_memory = cortex_a_read_memory,
3385         .write_memory = cortex_a_write_memory,
3386
3387         .read_buffer = cortex_a_read_buffer,
3388         .write_buffer = cortex_a_write_buffer,
3389
3390         .checksum_memory = arm_checksum_memory,
3391         .blank_check_memory = arm_blank_check_memory,
3392
3393         .run_algorithm = armv4_5_run_algorithm,
3394
3395         .add_breakpoint = cortex_a_add_breakpoint,
3396         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3397         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3398         .remove_breakpoint = cortex_a_remove_breakpoint,
3399         .add_watchpoint = cortex_a_add_watchpoint,
3400         .remove_watchpoint = cortex_a_remove_watchpoint,
3401
3402         .commands = cortex_a_command_handlers,
3403         .target_create = cortex_a_target_create,
3404         .target_jim_configure = adiv5_jim_configure,
3405         .init_target = cortex_a_init_target,
3406         .examine = cortex_a_examine,
3407         .deinit_target = cortex_a_deinit_target,
3408
3409         .read_phys_memory = cortex_a_read_phys_memory,
3410         .write_phys_memory = cortex_a_write_phys_memory,
3411         .mmu = cortex_a_mmu,
3412         .virt2phys = cortex_a_virt2phys,
3413 };
3414
3415 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3416         {
3417                 .name = "dbginit",
3418                 .handler = cortex_a_handle_dbginit_command,
3419                 .mode = COMMAND_EXEC,
3420                 .help = "Initialize core debug",
3421                 .usage = "",
3422         },
3423         {
3424                 .name = "maskisr",
3425                 .handler = handle_cortex_a_mask_interrupts_command,
3426                 .mode = COMMAND_EXEC,
3427                 .help = "mask cortex_r4 interrupts",
3428                 .usage = "['on'|'off']",
3429         },
3430
3431         COMMAND_REGISTRATION_DONE
3432 };
3433 static const struct command_registration cortex_r4_command_handlers[] = {
3434         {
3435                 .chain = arm_command_handlers,
3436         },
3437         {
3438                 .name = "cortex_r4",
3439                 .mode = COMMAND_ANY,
3440                 .help = "Cortex-R4 command group",
3441                 .usage = "",
3442                 .chain = cortex_r4_exec_command_handlers,
3443         },
3444         COMMAND_REGISTRATION_DONE
3445 };
3446
3447 struct target_type cortexr4_target = {
3448         .name = "cortex_r4",
3449
3450         .poll = cortex_a_poll,
3451         .arch_state = armv7a_arch_state,
3452
3453         .halt = cortex_a_halt,
3454         .resume = cortex_a_resume,
3455         .step = cortex_a_step,
3456
3457         .assert_reset = cortex_a_assert_reset,
3458         .deassert_reset = cortex_a_deassert_reset,
3459
3460         /* REVISIT allow exporting VFP3 registers ... */
3461         .get_gdb_arch = arm_get_gdb_arch,
3462         .get_gdb_reg_list = arm_get_gdb_reg_list,
3463
3464         .read_memory = cortex_a_read_phys_memory,
3465         .write_memory = cortex_a_write_phys_memory,
3466
3467         .checksum_memory = arm_checksum_memory,
3468         .blank_check_memory = arm_blank_check_memory,
3469
3470         .run_algorithm = armv4_5_run_algorithm,
3471
3472         .add_breakpoint = cortex_a_add_breakpoint,
3473         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3474         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3475         .remove_breakpoint = cortex_a_remove_breakpoint,
3476         .add_watchpoint = cortex_a_add_watchpoint,
3477         .remove_watchpoint = cortex_a_remove_watchpoint,
3478
3479         .commands = cortex_r4_command_handlers,
3480         .target_create = cortex_r4_target_create,
3481         .target_jim_configure = adiv5_jim_configure,
3482         .init_target = cortex_a_init_target,
3483         .examine = cortex_a_examine,
3484         .deinit_target = cortex_a_deinit_target,
3485 };