target/cortex_a: use aligned accesses for read/write cpu memory slow
[fw/openocd] / src / target / cortex_a.c
1 /***************************************************************************
2  *   Copyright (C) 2005 by Dominic Rath                                    *
3  *   Dominic.Rath@gmx.de                                                   *
4  *                                                                         *
5  *   Copyright (C) 2006 by Magnus Lundin                                   *
6  *   lundin@mlu.mine.nu                                                    *
7  *                                                                         *
8  *   Copyright (C) 2008 by Spencer Oliver                                  *
9  *   spen@spen-soft.co.uk                                                  *
10  *                                                                         *
11  *   Copyright (C) 2009 by Dirk Behme                                      *
12  *   dirk.behme@gmail.com - copy from cortex_m3                            *
13  *                                                                         *
14  *   Copyright (C) 2010 Ã˜yvind Harboe                                      *
15  *   oyvind.harboe@zylin.com                                               *
16  *                                                                         *
17  *   Copyright (C) ST-Ericsson SA 2011                                     *
18  *   michel.jaouen@stericsson.com : smp minimum support                    *
19  *                                                                         *
20  *   Copyright (C) Broadcom 2012                                           *
21  *   ehunter@broadcom.com : Cortex-R4 support                              *
22  *                                                                         *
23  *   Copyright (C) 2013 Kamal Dasu                                         *
24  *   kdasu.kdev@gmail.com                                                  *
25  *                                                                         *
26  *   This program is free software; you can redistribute it and/or modify  *
27  *   it under the terms of the GNU General Public License as published by  *
28  *   the Free Software Foundation; either version 2 of the License, or     *
29  *   (at your option) any later version.                                   *
30  *                                                                         *
31  *   This program is distributed in the hope that it will be useful,       *
32  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
33  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
34  *   GNU General Public License for more details.                          *
35  *                                                                         *
36  *   You should have received a copy of the GNU General Public License     *
37  *   along with this program.  If not, see <http://www.gnu.org/licenses/>. *
38  *                                                                         *
39  *   Cortex-A8(tm) TRM, ARM DDI 0344H                                      *
40  *   Cortex-A9(tm) TRM, ARM DDI 0407F                                      *
41  *   Cortex-A4(tm) TRM, ARM DDI 0363E                                      *
42  *   Cortex-A15(tm)TRM, ARM DDI 0438C                                      *
43  *                                                                         *
44  ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "jtag/interface.h"
59 #include "transport/transport.h"
60 #include "smp.h"
61 #include <helper/time_support.h>
62
63 static int cortex_a_poll(struct target *target);
64 static int cortex_a_debug_entry(struct target *target);
65 static int cortex_a_restore_context(struct target *target, bool bpwp);
66 static int cortex_a_set_breakpoint(struct target *target,
67         struct breakpoint *breakpoint, uint8_t matchmode);
68 static int cortex_a_set_context_breakpoint(struct target *target,
69         struct breakpoint *breakpoint, uint8_t matchmode);
70 static int cortex_a_set_hybrid_breakpoint(struct target *target,
71         struct breakpoint *breakpoint);
72 static int cortex_a_unset_breakpoint(struct target *target,
73         struct breakpoint *breakpoint);
74 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
75         uint32_t value, uint32_t *dscr);
76 static int cortex_a_mmu(struct target *target, int *enabled);
77 static int cortex_a_mmu_modify(struct target *target, int enable);
78 static int cortex_a_virt2phys(struct target *target,
79         target_addr_t virt, target_addr_t *phys);
80 static int cortex_a_read_cpu_memory(struct target *target,
81         uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
82
83
84 /*  restore cp15_control_reg at resume */
85 static int cortex_a_restore_cp15_control_reg(struct target *target)
86 {
87         int retval = ERROR_OK;
88         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
89         struct armv7a_common *armv7a = target_to_armv7a(target);
90
91         if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
92                 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
93                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
94                 retval = armv7a->arm.mcr(target, 15,
95                                 0, 0,   /* op1, op2 */
96                                 1, 0,   /* CRn, CRm */
97                                 cortex_a->cp15_control_reg);
98         }
99         return retval;
100 }
101
102 /*
103  * Set up ARM core for memory access.
104  * If !phys_access, switch to SVC mode and make sure MMU is on
105  * If phys_access, switch off mmu
106  */
107 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
108 {
109         struct armv7a_common *armv7a = target_to_armv7a(target);
110         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
111         int mmu_enabled = 0;
112
113         if (phys_access == 0) {
114                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115                 cortex_a_mmu(target, &mmu_enabled);
116                 if (mmu_enabled)
117                         cortex_a_mmu_modify(target, 1);
118                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
119                         /* overwrite DACR to all-manager */
120                         armv7a->arm.mcr(target, 15,
121                                         0, 0, 3, 0,
122                                         0xFFFFFFFF);
123                 }
124         } else {
125                 cortex_a_mmu(target, &mmu_enabled);
126                 if (mmu_enabled)
127                         cortex_a_mmu_modify(target, 0);
128         }
129         return ERROR_OK;
130 }
131
132 /*
133  * Restore ARM core after memory access.
134  * If !phys_access, switch to previous mode
135  * If phys_access, restore MMU setting
136  */
137 static int cortex_a_post_memaccess(struct target *target, int phys_access)
138 {
139         struct armv7a_common *armv7a = target_to_armv7a(target);
140         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
141
142         if (phys_access == 0) {
143                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
144                         /* restore */
145                         armv7a->arm.mcr(target, 15,
146                                         0, 0, 3, 0,
147                                         cortex_a->cp15_dacr_reg);
148                 }
149                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
150         } else {
151                 int mmu_enabled = 0;
152                 cortex_a_mmu(target, &mmu_enabled);
153                 if (mmu_enabled)
154                         cortex_a_mmu_modify(target, 1);
155         }
156         return ERROR_OK;
157 }
158
159
160 /*  modify cp15_control_reg in order to enable or disable mmu for :
161  *  - virt2phys address conversion
162  *  - read or write memory in phys or virt address */
163 static int cortex_a_mmu_modify(struct target *target, int enable)
164 {
165         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
166         struct armv7a_common *armv7a = target_to_armv7a(target);
167         int retval = ERROR_OK;
168         int need_write = 0;
169
170         if (enable) {
171                 /*  if mmu enabled at target stop and mmu not enable */
172                 if (!(cortex_a->cp15_control_reg & 0x1U)) {
173                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
174                         return ERROR_FAIL;
175                 }
176                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
177                         cortex_a->cp15_control_reg_curr |= 0x1U;
178                         need_write = 1;
179                 }
180         } else {
181                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
182                         cortex_a->cp15_control_reg_curr &= ~0x1U;
183                         need_write = 1;
184                 }
185         }
186
187         if (need_write) {
188                 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
189                         enable ? "enable mmu" : "disable mmu",
190                         cortex_a->cp15_control_reg_curr);
191
192                 retval = armv7a->arm.mcr(target, 15,
193                                 0, 0,   /* op1, op2 */
194                                 1, 0,   /* CRn, CRm */
195                                 cortex_a->cp15_control_reg_curr);
196         }
197         return retval;
198 }
199
200 /*
201  * Cortex-A Basic debug access, very low level assumes state is saved
202  */
203 static int cortex_a_init_debug_access(struct target *target)
204 {
205         struct armv7a_common *armv7a = target_to_armv7a(target);
206         uint32_t dscr;
207         int retval;
208
209         /* lock memory-mapped access to debug registers to prevent
210          * software interference */
211         retval = mem_ap_write_u32(armv7a->debug_ap,
212                         armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
213         if (retval != ERROR_OK)
214                 return retval;
215
216         /* Disable cacheline fills and force cache write-through in debug state */
217         retval = mem_ap_write_u32(armv7a->debug_ap,
218                         armv7a->debug_base + CPUDBG_DSCCR, 0);
219         if (retval != ERROR_OK)
220                 return retval;
221
222         /* Disable TLB lookup and refill/eviction in debug state */
223         retval = mem_ap_write_u32(armv7a->debug_ap,
224                         armv7a->debug_base + CPUDBG_DSMCR, 0);
225         if (retval != ERROR_OK)
226                 return retval;
227
228         retval = dap_run(armv7a->debug_ap->dap);
229         if (retval != ERROR_OK)
230                 return retval;
231
232         /* Enabling of instruction execution in debug mode is done in debug_entry code */
233
234         /* Resync breakpoint registers */
235
236         /* Enable halt for breakpoint, watchpoint and vector catch */
237         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
238                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
239         if (retval != ERROR_OK)
240                 return retval;
241         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
242                         armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
243         if (retval != ERROR_OK)
244                 return retval;
245
246         /* Since this is likely called from init or reset, update target state information*/
247         return cortex_a_poll(target);
248 }
249
250 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
251 {
252         /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
253          * Writes final value of DSCR into *dscr. Pass force to force always
254          * reading DSCR at least once. */
255         struct armv7a_common *armv7a = target_to_armv7a(target);
256         int retval;
257
258         if (force) {
259                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
260                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
261                 if (retval != ERROR_OK) {
262                         LOG_ERROR("Could not read DSCR register");
263                         return retval;
264                 }
265         }
266
267         retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
268         if (retval != ERROR_OK)
269                 LOG_ERROR("Error waiting for InstrCompl=1");
270         return retval;
271 }
272
273 /* To reduce needless round-trips, pass in a pointer to the current
274  * DSCR value.  Initialize it to zero if you just need to know the
275  * value on return from this function; or DSCR_INSTR_COMP if you
276  * happen to know that no instruction is pending.
277  */
278 static int cortex_a_exec_opcode(struct target *target,
279         uint32_t opcode, uint32_t *dscr_p)
280 {
281         uint32_t dscr;
282         int retval;
283         struct armv7a_common *armv7a = target_to_armv7a(target);
284
285         dscr = dscr_p ? *dscr_p : 0;
286
287         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
288
289         /* Wait for InstrCompl bit to be set */
290         retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
291         if (retval != ERROR_OK)
292                 return retval;
293
294         retval = mem_ap_write_u32(armv7a->debug_ap,
295                         armv7a->debug_base + CPUDBG_ITR, opcode);
296         if (retval != ERROR_OK)
297                 return retval;
298
299         /* Wait for InstrCompl bit to be set */
300         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
301         if (retval != ERROR_OK) {
302                 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
303                 return retval;
304         }
305
306         if (dscr_p)
307                 *dscr_p = dscr;
308
309         return retval;
310 }
311
312 /* Write to memory mapped registers directly with no cache or mmu handling */
313 static int cortex_a_dap_write_memap_register_u32(struct target *target,
314         uint32_t address,
315         uint32_t value)
316 {
317         int retval;
318         struct armv7a_common *armv7a = target_to_armv7a(target);
319
320         retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
321
322         return retval;
323 }
324
325 /*
326  * Cortex-A implementation of Debug Programmer's Model
327  *
328  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
329  * so there's no need to poll for it before executing an instruction.
330  *
331  * NOTE that in several of these cases the "stall" mode might be useful.
332  * It'd let us queue a few operations together... prepare/finish might
333  * be the places to enable/disable that mode.
334  */
335
336 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
337 {
338         return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
339 }
340
341 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
342 {
343         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
344         return mem_ap_write_u32(a->armv7a_common.debug_ap,
345                         a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
346 }
347
348 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
349         uint32_t *dscr_p)
350 {
351         uint32_t dscr = DSCR_INSTR_COMP;
352         int retval;
353
354         if (dscr_p)
355                 dscr = *dscr_p;
356
357         /* Wait for DTRRXfull */
358         retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
359                         DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
360         if (retval != ERROR_OK) {
361                 LOG_ERROR("Error waiting for read dcc");
362                 return retval;
363         }
364
365         retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
366                         a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
367         if (retval != ERROR_OK)
368                 return retval;
369         /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
370
371         if (dscr_p)
372                 *dscr_p = dscr;
373
374         return retval;
375 }
376
377 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
378 {
379         struct cortex_a_common *a = dpm_to_a(dpm);
380         uint32_t dscr;
381         int retval;
382
383         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
384         retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
385         if (retval != ERROR_OK) {
386                 LOG_ERROR("Error waiting for dpm prepare");
387                 return retval;
388         }
389
390         /* this "should never happen" ... */
391         if (dscr & DSCR_DTR_RX_FULL) {
392                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
393                 /* Clear DCCRX */
394                 retval = cortex_a_exec_opcode(
395                                 a->armv7a_common.arm.target,
396                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
397                                 &dscr);
398                 if (retval != ERROR_OK)
399                         return retval;
400         }
401
402         return retval;
403 }
404
405 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
406 {
407         /* REVISIT what could be done here? */
408         return ERROR_OK;
409 }
410
411 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
412         uint32_t opcode, uint32_t data)
413 {
414         struct cortex_a_common *a = dpm_to_a(dpm);
415         int retval;
416         uint32_t dscr = DSCR_INSTR_COMP;
417
418         retval = cortex_a_write_dcc(a, data);
419         if (retval != ERROR_OK)
420                 return retval;
421
422         return cortex_a_exec_opcode(
423                         a->armv7a_common.arm.target,
424                         opcode,
425                         &dscr);
426 }
427
428 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
429         uint32_t opcode, uint32_t data)
430 {
431         struct cortex_a_common *a = dpm_to_a(dpm);
432         uint32_t dscr = DSCR_INSTR_COMP;
433         int retval;
434
435         retval = cortex_a_write_dcc(a, data);
436         if (retval != ERROR_OK)
437                 return retval;
438
439         /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
440         retval = cortex_a_exec_opcode(
441                         a->armv7a_common.arm.target,
442                         ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
443                         &dscr);
444         if (retval != ERROR_OK)
445                 return retval;
446
447         /* then the opcode, taking data from R0 */
448         retval = cortex_a_exec_opcode(
449                         a->armv7a_common.arm.target,
450                         opcode,
451                         &dscr);
452
453         return retval;
454 }
455
456 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
457 {
458         struct target *target = dpm->arm->target;
459         uint32_t dscr = DSCR_INSTR_COMP;
460
461         /* "Prefetch flush" after modifying execution status in CPSR */
462         return cortex_a_exec_opcode(target,
463                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
464                         &dscr);
465 }
466
467 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
468         uint32_t opcode, uint32_t *data)
469 {
470         struct cortex_a_common *a = dpm_to_a(dpm);
471         int retval;
472         uint32_t dscr = DSCR_INSTR_COMP;
473
474         /* the opcode, writing data to DCC */
475         retval = cortex_a_exec_opcode(
476                         a->armv7a_common.arm.target,
477                         opcode,
478                         &dscr);
479         if (retval != ERROR_OK)
480                 return retval;
481
482         return cortex_a_read_dcc(a, data, &dscr);
483 }
484
485
486 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
487         uint32_t opcode, uint32_t *data)
488 {
489         struct cortex_a_common *a = dpm_to_a(dpm);
490         uint32_t dscr = DSCR_INSTR_COMP;
491         int retval;
492
493         /* the opcode, writing data to R0 */
494         retval = cortex_a_exec_opcode(
495                         a->armv7a_common.arm.target,
496                         opcode,
497                         &dscr);
498         if (retval != ERROR_OK)
499                 return retval;
500
501         /* write R0 to DCC */
502         retval = cortex_a_exec_opcode(
503                         a->armv7a_common.arm.target,
504                         ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
505                         &dscr);
506         if (retval != ERROR_OK)
507                 return retval;
508
509         return cortex_a_read_dcc(a, data, &dscr);
510 }
511
512 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
513         uint32_t addr, uint32_t control)
514 {
515         struct cortex_a_common *a = dpm_to_a(dpm);
516         uint32_t vr = a->armv7a_common.debug_base;
517         uint32_t cr = a->armv7a_common.debug_base;
518         int retval;
519
520         switch (index_t) {
521                 case 0 ... 15:  /* breakpoints */
522                         vr += CPUDBG_BVR_BASE;
523                         cr += CPUDBG_BCR_BASE;
524                         break;
525                 case 16 ... 31: /* watchpoints */
526                         vr += CPUDBG_WVR_BASE;
527                         cr += CPUDBG_WCR_BASE;
528                         index_t -= 16;
529                         break;
530                 default:
531                         return ERROR_FAIL;
532         }
533         vr += 4 * index_t;
534         cr += 4 * index_t;
535
536         LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
537                 (unsigned) vr, (unsigned) cr);
538
539         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
540                         vr, addr);
541         if (retval != ERROR_OK)
542                 return retval;
543         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
544                         cr, control);
545         return retval;
546 }
547
548 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
549 {
550         struct cortex_a_common *a = dpm_to_a(dpm);
551         uint32_t cr;
552
553         switch (index_t) {
554                 case 0 ... 15:
555                         cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
556                         break;
557                 case 16 ... 31:
558                         cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
559                         index_t -= 16;
560                         break;
561                 default:
562                         return ERROR_FAIL;
563         }
564         cr += 4 * index_t;
565
566         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
567
568         /* clear control register */
569         return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
570 }
571
572 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
573 {
574         struct arm_dpm *dpm = &a->armv7a_common.dpm;
575         int retval;
576
577         dpm->arm = &a->armv7a_common.arm;
578         dpm->didr = didr;
579
580         dpm->prepare = cortex_a_dpm_prepare;
581         dpm->finish = cortex_a_dpm_finish;
582
583         dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
584         dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
585         dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
586
587         dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
588         dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
589
590         dpm->bpwp_enable = cortex_a_bpwp_enable;
591         dpm->bpwp_disable = cortex_a_bpwp_disable;
592
593         retval = arm_dpm_setup(dpm);
594         if (retval == ERROR_OK)
595                 retval = arm_dpm_initialize(dpm);
596
597         return retval;
598 }
599 static struct target *get_cortex_a(struct target *target, int32_t coreid)
600 {
601         struct target_list *head;
602         struct target *curr;
603
604         head = target->head;
605         while (head != (struct target_list *)NULL) {
606                 curr = head->target;
607                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
608                         return curr;
609                 head = head->next;
610         }
611         return target;
612 }
613 static int cortex_a_halt(struct target *target);
614
615 static int cortex_a_halt_smp(struct target *target)
616 {
617         int retval = 0;
618         struct target_list *head;
619         struct target *curr;
620         head = target->head;
621         while (head != (struct target_list *)NULL) {
622                 curr = head->target;
623                 if ((curr != target) && (curr->state != TARGET_HALTED)
624                         && target_was_examined(curr))
625                         retval += cortex_a_halt(curr);
626                 head = head->next;
627         }
628         return retval;
629 }
630
631 static int update_halt_gdb(struct target *target)
632 {
633         struct target *gdb_target = NULL;
634         struct target_list *head;
635         struct target *curr;
636         int retval = 0;
637
638         if (target->gdb_service && target->gdb_service->core[0] == -1) {
639                 target->gdb_service->target = target;
640                 target->gdb_service->core[0] = target->coreid;
641                 retval += cortex_a_halt_smp(target);
642         }
643
644         if (target->gdb_service)
645                 gdb_target = target->gdb_service->target;
646
647         foreach_smp_target(head, target->head) {
648                 curr = head->target;
649                 /* skip calling context */
650                 if (curr == target)
651                         continue;
652                 if (!target_was_examined(curr))
653                         continue;
654                 /* skip targets that were already halted */
655                 if (curr->state == TARGET_HALTED)
656                         continue;
657                 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
658                 if (curr == gdb_target)
659                         continue;
660
661                 /* avoid recursion in cortex_a_poll() */
662                 curr->smp = 0;
663                 cortex_a_poll(curr);
664                 curr->smp = 1;
665         }
666
667         /* after all targets were updated, poll the gdb serving target */
668         if (gdb_target != NULL && gdb_target != target)
669                 cortex_a_poll(gdb_target);
670         return retval;
671 }
672
673 /*
674  * Cortex-A Run control
675  */
676
677 static int cortex_a_poll(struct target *target)
678 {
679         int retval = ERROR_OK;
680         uint32_t dscr;
681         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
682         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
683         enum target_state prev_target_state = target->state;
684         /*  toggle to another core is done by gdb as follow */
685         /*  maint packet J core_id */
686         /*  continue */
687         /*  the next polling trigger an halt event sent to gdb */
688         if ((target->state == TARGET_HALTED) && (target->smp) &&
689                 (target->gdb_service) &&
690                 (target->gdb_service->target == NULL)) {
691                 target->gdb_service->target =
692                         get_cortex_a(target, target->gdb_service->core[1]);
693                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
694                 return retval;
695         }
696         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
697                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
698         if (retval != ERROR_OK)
699                 return retval;
700         cortex_a->cpudbg_dscr = dscr;
701
702         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
703                 if (prev_target_state != TARGET_HALTED) {
704                         /* We have a halting debug event */
705                         LOG_DEBUG("Target halted");
706                         target->state = TARGET_HALTED;
707
708                         retval = cortex_a_debug_entry(target);
709                         if (retval != ERROR_OK)
710                                 return retval;
711
712                         if (target->smp) {
713                                 retval = update_halt_gdb(target);
714                                 if (retval != ERROR_OK)
715                                         return retval;
716                         }
717
718                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
719                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
720                         } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
721                                 if (arm_semihosting(target, &retval) != 0)
722                                         return retval;
723
724                                 target_call_event_callbacks(target,
725                                         TARGET_EVENT_HALTED);
726                         }
727                 }
728         } else
729                 target->state = TARGET_RUNNING;
730
731         return retval;
732 }
733
734 static int cortex_a_halt(struct target *target)
735 {
736         int retval;
737         uint32_t dscr;
738         struct armv7a_common *armv7a = target_to_armv7a(target);
739
740         /*
741          * Tell the core to be halted by writing DRCR with 0x1
742          * and then wait for the core to be halted.
743          */
744         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
745                         armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
746         if (retval != ERROR_OK)
747                 return retval;
748
749         dscr = 0; /* force read of dscr */
750         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
751                         DSCR_CORE_HALTED, &dscr);
752         if (retval != ERROR_OK) {
753                 LOG_ERROR("Error waiting for halt");
754                 return retval;
755         }
756
757         target->debug_reason = DBG_REASON_DBGRQ;
758
759         return ERROR_OK;
760 }
761
762 static int cortex_a_internal_restore(struct target *target, int current,
763         target_addr_t *address, int handle_breakpoints, int debug_execution)
764 {
765         struct armv7a_common *armv7a = target_to_armv7a(target);
766         struct arm *arm = &armv7a->arm;
767         int retval;
768         uint32_t resume_pc;
769
770         if (!debug_execution)
771                 target_free_all_working_areas(target);
772
773 #if 0
774         if (debug_execution) {
775                 /* Disable interrupts */
776                 /* We disable interrupts in the PRIMASK register instead of
777                  * masking with C_MASKINTS,
778                  * This is probably the same issue as Cortex-M3 Errata 377493:
779                  * C_MASKINTS in parallel with disabled interrupts can cause
780                  * local faults to not be taken. */
781                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
782                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
783                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
784
785                 /* Make sure we are in Thumb mode */
786                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
787                         buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
788                         32) | (1 << 24));
789                 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
790                 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
791         }
792 #endif
793
794         /* current = 1: continue on current pc, otherwise continue at <address> */
795         resume_pc = buf_get_u32(arm->pc->value, 0, 32);
796         if (!current)
797                 resume_pc = *address;
798         else
799                 *address = resume_pc;
800
801         /* Make sure that the Armv7 gdb thumb fixups does not
802          * kill the return address
803          */
804         switch (arm->core_state) {
805                 case ARM_STATE_ARM:
806                         resume_pc &= 0xFFFFFFFC;
807                         break;
808                 case ARM_STATE_THUMB:
809                 case ARM_STATE_THUMB_EE:
810                         /* When the return address is loaded into PC
811                          * bit 0 must be 1 to stay in Thumb state
812                          */
813                         resume_pc |= 0x1;
814                         break;
815                 case ARM_STATE_JAZELLE:
816                         LOG_ERROR("How do I resume into Jazelle state??");
817                         return ERROR_FAIL;
818                 case ARM_STATE_AARCH64:
819                         LOG_ERROR("Shoudn't be in AARCH64 state");
820                         return ERROR_FAIL;
821         }
822         LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
823         buf_set_u32(arm->pc->value, 0, 32, resume_pc);
824         arm->pc->dirty = true;
825         arm->pc->valid = true;
826
827         /* restore dpm_mode at system halt */
828         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
829         /* called it now before restoring context because it uses cpu
830          * register r0 for restoring cp15 control register */
831         retval = cortex_a_restore_cp15_control_reg(target);
832         if (retval != ERROR_OK)
833                 return retval;
834         retval = cortex_a_restore_context(target, handle_breakpoints);
835         if (retval != ERROR_OK)
836                 return retval;
837         target->debug_reason = DBG_REASON_NOTHALTED;
838         target->state = TARGET_RUNNING;
839
840         /* registers are now invalid */
841         register_cache_invalidate(arm->core_cache);
842
843 #if 0
844         /* the front-end may request us not to handle breakpoints */
845         if (handle_breakpoints) {
846                 /* Single step past breakpoint at current address */
847                 breakpoint = breakpoint_find(target, resume_pc);
848                 if (breakpoint) {
849                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
850                         cortex_m3_unset_breakpoint(target, breakpoint);
851                         cortex_m3_single_step_core(target);
852                         cortex_m3_set_breakpoint(target, breakpoint);
853                 }
854         }
855
856 #endif
857         return retval;
858 }
859
860 static int cortex_a_internal_restart(struct target *target)
861 {
862         struct armv7a_common *armv7a = target_to_armv7a(target);
863         struct arm *arm = &armv7a->arm;
864         int retval;
865         uint32_t dscr;
866         /*
867          * * Restart core and wait for it to be started.  Clear ITRen and sticky
868          * * exception flags: see ARMv7 ARM, C5.9.
869          *
870          * REVISIT: for single stepping, we probably want to
871          * disable IRQs by default, with optional override...
872          */
873
874         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
875                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
876         if (retval != ERROR_OK)
877                 return retval;
878
879         if ((dscr & DSCR_INSTR_COMP) == 0)
880                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
881
882         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
883                         armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
884         if (retval != ERROR_OK)
885                 return retval;
886
887         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
888                         armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
889                         DRCR_CLEAR_EXCEPTIONS);
890         if (retval != ERROR_OK)
891                 return retval;
892
893         dscr = 0; /* force read of dscr */
894         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
895                         DSCR_CORE_RESTARTED, &dscr);
896         if (retval != ERROR_OK) {
897                 LOG_ERROR("Error waiting for resume");
898                 return retval;
899         }
900
901         target->debug_reason = DBG_REASON_NOTHALTED;
902         target->state = TARGET_RUNNING;
903
904         /* registers are now invalid */
905         register_cache_invalidate(arm->core_cache);
906
907         return ERROR_OK;
908 }
909
910 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
911 {
912         int retval = 0;
913         struct target_list *head;
914         struct target *curr;
915         target_addr_t address;
916         head = target->head;
917         while (head != (struct target_list *)NULL) {
918                 curr = head->target;
919                 if ((curr != target) && (curr->state != TARGET_RUNNING)
920                         && target_was_examined(curr)) {
921                         /*  resume current address , not in step mode */
922                         retval += cortex_a_internal_restore(curr, 1, &address,
923                                         handle_breakpoints, 0);
924                         retval += cortex_a_internal_restart(curr);
925                 }
926                 head = head->next;
927
928         }
929         return retval;
930 }
931
932 static int cortex_a_resume(struct target *target, int current,
933         target_addr_t address, int handle_breakpoints, int debug_execution)
934 {
935         int retval = 0;
936         /* dummy resume for smp toggle in order to reduce gdb impact  */
937         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
938                 /*   simulate a start and halt of target */
939                 target->gdb_service->target = NULL;
940                 target->gdb_service->core[0] = target->gdb_service->core[1];
941                 /*  fake resume at next poll we play the  target core[1], see poll*/
942                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
943                 return 0;
944         }
945         cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
946         if (target->smp) {
947                 target->gdb_service->core[0] = -1;
948                 retval = cortex_a_restore_smp(target, handle_breakpoints);
949                 if (retval != ERROR_OK)
950                         return retval;
951         }
952         cortex_a_internal_restart(target);
953
954         if (!debug_execution) {
955                 target->state = TARGET_RUNNING;
956                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
957                 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
958         } else {
959                 target->state = TARGET_DEBUG_RUNNING;
960                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
961                 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
962         }
963
964         return ERROR_OK;
965 }
966
967 static int cortex_a_debug_entry(struct target *target)
968 {
969         uint32_t dscr;
970         int retval = ERROR_OK;
971         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
972         struct armv7a_common *armv7a = target_to_armv7a(target);
973         struct arm *arm = &armv7a->arm;
974
975         LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
976
977         /* REVISIT surely we should not re-read DSCR !! */
978         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
979                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
980         if (retval != ERROR_OK)
981                 return retval;
982
983         /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
984          * imprecise data aborts get discarded by issuing a Data
985          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
986          */
987
988         /* Enable the ITR execution once we are in debug mode */
989         dscr |= DSCR_ITR_EN;
990         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
991                         armv7a->debug_base + CPUDBG_DSCR, dscr);
992         if (retval != ERROR_OK)
993                 return retval;
994
995         /* Examine debug reason */
996         arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
997
998         /* save address of instruction that triggered the watchpoint? */
999         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1000                 uint32_t wfar;
1001
1002                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1003                                 armv7a->debug_base + CPUDBG_WFAR,
1004                                 &wfar);
1005                 if (retval != ERROR_OK)
1006                         return retval;
1007                 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1008         }
1009
1010         /* First load register accessible through core debug port */
1011         retval = arm_dpm_read_current_registers(&armv7a->dpm);
1012         if (retval != ERROR_OK)
1013                 return retval;
1014
1015         if (arm->spsr) {
1016                 /* read SPSR */
1017                 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1018                 if (retval != ERROR_OK)
1019                         return retval;
1020         }
1021
1022 #if 0
1023 /* TODO, Move this */
1024         uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1025         cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1026         LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1027
1028         cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1029         LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1030
1031         cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1032         LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1033 #endif
1034
1035         /* Are we in an exception handler */
1036 /*      armv4_5->exception_number = 0; */
1037         if (armv7a->post_debug_entry) {
1038                 retval = armv7a->post_debug_entry(target);
1039                 if (retval != ERROR_OK)
1040                         return retval;
1041         }
1042
1043         return retval;
1044 }
1045
1046 static int cortex_a_post_debug_entry(struct target *target)
1047 {
1048         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1049         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1050         int retval;
1051
1052         /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1053         retval = armv7a->arm.mrc(target, 15,
1054                         0, 0,   /* op1, op2 */
1055                         1, 0,   /* CRn, CRm */
1056                         &cortex_a->cp15_control_reg);
1057         if (retval != ERROR_OK)
1058                 return retval;
1059         LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1060         cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1061
1062         if (!armv7a->is_armv7r)
1063                 armv7a_read_ttbcr(target);
1064
1065         if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1066                 armv7a_identify_cache(target);
1067
1068         if (armv7a->is_armv7r) {
1069                 armv7a->armv7a_mmu.mmu_enabled = 0;
1070         } else {
1071                 armv7a->armv7a_mmu.mmu_enabled =
1072                         (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1073         }
1074         armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1075                 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1076         armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1077                 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1078         cortex_a->curr_mode = armv7a->arm.core_mode;
1079
1080         /* switch to SVC mode to read DACR */
1081         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1082         armv7a->arm.mrc(target, 15,
1083                         0, 0, 3, 0,
1084                         &cortex_a->cp15_dacr_reg);
1085
1086         LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1087                         cortex_a->cp15_dacr_reg);
1088
1089         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1090         return ERROR_OK;
1091 }
1092
1093 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1094 {
1095         struct armv7a_common *armv7a = target_to_armv7a(target);
1096         uint32_t dscr;
1097
1098         /* Read DSCR */
1099         int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1100                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1101         if (ERROR_OK != retval)
1102                 return retval;
1103
1104         /* clear bitfield */
1105         dscr &= ~bit_mask;
1106         /* put new value */
1107         dscr |= value & bit_mask;
1108
1109         /* write new DSCR */
1110         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1111                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1112         return retval;
1113 }
1114
1115 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1116         int handle_breakpoints)
1117 {
1118         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1119         struct armv7a_common *armv7a = target_to_armv7a(target);
1120         struct arm *arm = &armv7a->arm;
1121         struct breakpoint *breakpoint = NULL;
1122         struct breakpoint stepbreakpoint;
1123         struct reg *r;
1124         int retval;
1125
1126         if (target->state != TARGET_HALTED) {
1127                 LOG_WARNING("target not halted");
1128                 return ERROR_TARGET_NOT_HALTED;
1129         }
1130
1131         /* current = 1: continue on current pc, otherwise continue at <address> */
1132         r = arm->pc;
1133         if (!current)
1134                 buf_set_u32(r->value, 0, 32, address);
1135         else
1136                 address = buf_get_u32(r->value, 0, 32);
1137
1138         /* The front-end may request us not to handle breakpoints.
1139          * But since Cortex-A uses breakpoint for single step,
1140          * we MUST handle breakpoints.
1141          */
1142         handle_breakpoints = 1;
1143         if (handle_breakpoints) {
1144                 breakpoint = breakpoint_find(target, address);
1145                 if (breakpoint)
1146                         cortex_a_unset_breakpoint(target, breakpoint);
1147         }
1148
1149         /* Setup single step breakpoint */
1150         stepbreakpoint.address = address;
1151         stepbreakpoint.asid = 0;
1152         stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1153                 ? 2 : 4;
1154         stepbreakpoint.type = BKPT_HARD;
1155         stepbreakpoint.set = 0;
1156
1157         /* Disable interrupts during single step if requested */
1158         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1159                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1160                 if (ERROR_OK != retval)
1161                         return retval;
1162         }
1163
1164         /* Break on IVA mismatch */
1165         cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1166
1167         target->debug_reason = DBG_REASON_SINGLESTEP;
1168
1169         retval = cortex_a_resume(target, 1, address, 0, 0);
1170         if (retval != ERROR_OK)
1171                 return retval;
1172
1173         int64_t then = timeval_ms();
1174         while (target->state != TARGET_HALTED) {
1175                 retval = cortex_a_poll(target);
1176                 if (retval != ERROR_OK)
1177                         return retval;
1178                 if (target->state == TARGET_HALTED)
1179                         break;
1180                 if (timeval_ms() > then + 1000) {
1181                         LOG_ERROR("timeout waiting for target halt");
1182                         return ERROR_FAIL;
1183                 }
1184         }
1185
1186         cortex_a_unset_breakpoint(target, &stepbreakpoint);
1187
1188         /* Re-enable interrupts if they were disabled */
1189         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1190                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1191                 if (ERROR_OK != retval)
1192                         return retval;
1193         }
1194
1195
1196         target->debug_reason = DBG_REASON_BREAKPOINT;
1197
1198         if (breakpoint)
1199                 cortex_a_set_breakpoint(target, breakpoint, 0);
1200
1201         if (target->state != TARGET_HALTED)
1202                 LOG_DEBUG("target stepped");
1203
1204         return ERROR_OK;
1205 }
1206
1207 static int cortex_a_restore_context(struct target *target, bool bpwp)
1208 {
1209         struct armv7a_common *armv7a = target_to_armv7a(target);
1210
1211         LOG_DEBUG(" ");
1212
1213         if (armv7a->pre_restore_context)
1214                 armv7a->pre_restore_context(target);
1215
1216         return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1217 }
1218
1219 /*
1220  * Cortex-A Breakpoint and watchpoint functions
1221  */
1222
1223 /* Setup hardware Breakpoint Register Pair */
1224 static int cortex_a_set_breakpoint(struct target *target,
1225         struct breakpoint *breakpoint, uint8_t matchmode)
1226 {
1227         int retval;
1228         int brp_i = 0;
1229         uint32_t control;
1230         uint8_t byte_addr_select = 0x0F;
1231         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1232         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1233         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1234
1235         if (breakpoint->set) {
1236                 LOG_WARNING("breakpoint already set");
1237                 return ERROR_OK;
1238         }
1239
1240         if (breakpoint->type == BKPT_HARD) {
1241                 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1242                         brp_i++;
1243                 if (brp_i >= cortex_a->brp_num) {
1244                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1245                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1246                 }
1247                 breakpoint->set = brp_i + 1;
1248                 if (breakpoint->length == 2)
1249                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1250                 control = ((matchmode & 0x7) << 20)
1251                         | (byte_addr_select << 5)
1252                         | (3 << 1) | 1;
1253                 brp_list[brp_i].used = 1;
1254                 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1255                 brp_list[brp_i].control = control;
1256                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1257                                 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1258                                 brp_list[brp_i].value);
1259                 if (retval != ERROR_OK)
1260                         return retval;
1261                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1262                                 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1263                                 brp_list[brp_i].control);
1264                 if (retval != ERROR_OK)
1265                         return retval;
1266                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1267                         brp_list[brp_i].control,
1268                         brp_list[brp_i].value);
1269         } else if (breakpoint->type == BKPT_SOFT) {
1270                 uint8_t code[4];
1271                 /* length == 2: Thumb breakpoint */
1272                 if (breakpoint->length == 2)
1273                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1274                 else
1275                 /* length == 3: Thumb-2 breakpoint, actual encoding is
1276                  * a regular Thumb BKPT instruction but we replace a
1277                  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1278                  * length
1279                  */
1280                 if (breakpoint->length == 3) {
1281                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1282                         breakpoint->length = 4;
1283                 } else
1284                         /* length == 4, normal ARM breakpoint */
1285                         buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1286
1287                 retval = target_read_memory(target,
1288                                 breakpoint->address & 0xFFFFFFFE,
1289                                 breakpoint->length, 1,
1290                                 breakpoint->orig_instr);
1291                 if (retval != ERROR_OK)
1292                         return retval;
1293
1294                 /* make sure data cache is cleaned & invalidated down to PoC */
1295                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1296                         armv7a_cache_flush_virt(target, breakpoint->address,
1297                                                 breakpoint->length);
1298                 }
1299
1300                 retval = target_write_memory(target,
1301                                 breakpoint->address & 0xFFFFFFFE,
1302                                 breakpoint->length, 1, code);
1303                 if (retval != ERROR_OK)
1304                         return retval;
1305
1306                 /* update i-cache at breakpoint location */
1307                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1308                                         breakpoint->length);
1309                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1310                                                  breakpoint->length);
1311
1312                 breakpoint->set = 0x11; /* Any nice value but 0 */
1313         }
1314
1315         return ERROR_OK;
1316 }
1317
1318 static int cortex_a_set_context_breakpoint(struct target *target,
1319         struct breakpoint *breakpoint, uint8_t matchmode)
1320 {
1321         int retval = ERROR_FAIL;
1322         int brp_i = 0;
1323         uint32_t control;
1324         uint8_t byte_addr_select = 0x0F;
1325         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1326         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1327         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1328
1329         if (breakpoint->set) {
1330                 LOG_WARNING("breakpoint already set");
1331                 return retval;
1332         }
1333         /*check available context BRPs*/
1334         while ((brp_list[brp_i].used ||
1335                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1336                 brp_i++;
1337
1338         if (brp_i >= cortex_a->brp_num) {
1339                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1340                 return ERROR_FAIL;
1341         }
1342
1343         breakpoint->set = brp_i + 1;
1344         control = ((matchmode & 0x7) << 20)
1345                 | (byte_addr_select << 5)
1346                 | (3 << 1) | 1;
1347         brp_list[brp_i].used = 1;
1348         brp_list[brp_i].value = (breakpoint->asid);
1349         brp_list[brp_i].control = control;
1350         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1351                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1352                         brp_list[brp_i].value);
1353         if (retval != ERROR_OK)
1354                 return retval;
1355         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1356                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1357                         brp_list[brp_i].control);
1358         if (retval != ERROR_OK)
1359                 return retval;
1360         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1361                 brp_list[brp_i].control,
1362                 brp_list[brp_i].value);
1363         return ERROR_OK;
1364
1365 }
1366
1367 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1368 {
1369         int retval = ERROR_FAIL;
1370         int brp_1 = 0;  /* holds the contextID pair */
1371         int brp_2 = 0;  /* holds the IVA pair */
1372         uint32_t control_CTX, control_IVA;
1373         uint8_t CTX_byte_addr_select = 0x0F;
1374         uint8_t IVA_byte_addr_select = 0x0F;
1375         uint8_t CTX_machmode = 0x03;
1376         uint8_t IVA_machmode = 0x01;
1377         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1378         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1379         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1380
1381         if (breakpoint->set) {
1382                 LOG_WARNING("breakpoint already set");
1383                 return retval;
1384         }
1385         /*check available context BRPs*/
1386         while ((brp_list[brp_1].used ||
1387                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1388                 brp_1++;
1389
1390         printf("brp(CTX) found num: %d\n", brp_1);
1391         if (brp_1 >= cortex_a->brp_num) {
1392                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1393                 return ERROR_FAIL;
1394         }
1395
1396         while ((brp_list[brp_2].used ||
1397                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1398                 brp_2++;
1399
1400         printf("brp(IVA) found num: %d\n", brp_2);
1401         if (brp_2 >= cortex_a->brp_num) {
1402                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1403                 return ERROR_FAIL;
1404         }
1405
1406         breakpoint->set = brp_1 + 1;
1407         breakpoint->linked_BRP = brp_2;
1408         control_CTX = ((CTX_machmode & 0x7) << 20)
1409                 | (brp_2 << 16)
1410                 | (0 << 14)
1411                 | (CTX_byte_addr_select << 5)
1412                 | (3 << 1) | 1;
1413         brp_list[brp_1].used = 1;
1414         brp_list[brp_1].value = (breakpoint->asid);
1415         brp_list[brp_1].control = control_CTX;
1416         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1417                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1418                         brp_list[brp_1].value);
1419         if (retval != ERROR_OK)
1420                 return retval;
1421         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1422                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1423                         brp_list[brp_1].control);
1424         if (retval != ERROR_OK)
1425                 return retval;
1426
1427         control_IVA = ((IVA_machmode & 0x7) << 20)
1428                 | (brp_1 << 16)
1429                 | (IVA_byte_addr_select << 5)
1430                 | (3 << 1) | 1;
1431         brp_list[brp_2].used = 1;
1432         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1433         brp_list[brp_2].control = control_IVA;
1434         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1435                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1436                         brp_list[brp_2].value);
1437         if (retval != ERROR_OK)
1438                 return retval;
1439         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1440                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1441                         brp_list[brp_2].control);
1442         if (retval != ERROR_OK)
1443                 return retval;
1444
1445         return ERROR_OK;
1446 }
1447
1448 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1449 {
1450         int retval;
1451         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1452         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1453         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1454
1455         if (!breakpoint->set) {
1456                 LOG_WARNING("breakpoint not set");
1457                 return ERROR_OK;
1458         }
1459
1460         if (breakpoint->type == BKPT_HARD) {
1461                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1462                         int brp_i = breakpoint->set - 1;
1463                         int brp_j = breakpoint->linked_BRP;
1464                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1465                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1466                                 return ERROR_OK;
1467                         }
1468                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1469                                 brp_list[brp_i].control, brp_list[brp_i].value);
1470                         brp_list[brp_i].used = 0;
1471                         brp_list[brp_i].value = 0;
1472                         brp_list[brp_i].control = 0;
1473                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1474                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1475                                         brp_list[brp_i].control);
1476                         if (retval != ERROR_OK)
1477                                 return retval;
1478                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1479                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1480                                         brp_list[brp_i].value);
1481                         if (retval != ERROR_OK)
1482                                 return retval;
1483                         if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1484                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1485                                 return ERROR_OK;
1486                         }
1487                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1488                                 brp_list[brp_j].control, brp_list[brp_j].value);
1489                         brp_list[brp_j].used = 0;
1490                         brp_list[brp_j].value = 0;
1491                         brp_list[brp_j].control = 0;
1492                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1493                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1494                                         brp_list[brp_j].control);
1495                         if (retval != ERROR_OK)
1496                                 return retval;
1497                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1498                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1499                                         brp_list[brp_j].value);
1500                         if (retval != ERROR_OK)
1501                                 return retval;
1502                         breakpoint->linked_BRP = 0;
1503                         breakpoint->set = 0;
1504                         return ERROR_OK;
1505
1506                 } else {
1507                         int brp_i = breakpoint->set - 1;
1508                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1509                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1510                                 return ERROR_OK;
1511                         }
1512                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1513                                 brp_list[brp_i].control, brp_list[brp_i].value);
1514                         brp_list[brp_i].used = 0;
1515                         brp_list[brp_i].value = 0;
1516                         brp_list[brp_i].control = 0;
1517                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1518                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1519                                         brp_list[brp_i].control);
1520                         if (retval != ERROR_OK)
1521                                 return retval;
1522                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1523                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1524                                         brp_list[brp_i].value);
1525                         if (retval != ERROR_OK)
1526                                 return retval;
1527                         breakpoint->set = 0;
1528                         return ERROR_OK;
1529                 }
1530         } else {
1531
1532                 /* make sure data cache is cleaned & invalidated down to PoC */
1533                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1534                         armv7a_cache_flush_virt(target, breakpoint->address,
1535                                                 breakpoint->length);
1536                 }
1537
1538                 /* restore original instruction (kept in target endianness) */
1539                 if (breakpoint->length == 4) {
1540                         retval = target_write_memory(target,
1541                                         breakpoint->address & 0xFFFFFFFE,
1542                                         4, 1, breakpoint->orig_instr);
1543                         if (retval != ERROR_OK)
1544                                 return retval;
1545                 } else {
1546                         retval = target_write_memory(target,
1547                                         breakpoint->address & 0xFFFFFFFE,
1548                                         2, 1, breakpoint->orig_instr);
1549                         if (retval != ERROR_OK)
1550                                 return retval;
1551                 }
1552
1553                 /* update i-cache at breakpoint location */
1554                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1555                                                  breakpoint->length);
1556                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1557                                                  breakpoint->length);
1558         }
1559         breakpoint->set = 0;
1560
1561         return ERROR_OK;
1562 }
1563
1564 static int cortex_a_add_breakpoint(struct target *target,
1565         struct breakpoint *breakpoint)
1566 {
1567         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1568
1569         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1570                 LOG_INFO("no hardware breakpoint available");
1571                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1572         }
1573
1574         if (breakpoint->type == BKPT_HARD)
1575                 cortex_a->brp_num_available--;
1576
1577         return cortex_a_set_breakpoint(target, breakpoint, 0x00);       /* Exact match */
1578 }
1579
1580 static int cortex_a_add_context_breakpoint(struct target *target,
1581         struct breakpoint *breakpoint)
1582 {
1583         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1584
1585         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1586                 LOG_INFO("no hardware breakpoint available");
1587                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1588         }
1589
1590         if (breakpoint->type == BKPT_HARD)
1591                 cortex_a->brp_num_available--;
1592
1593         return cortex_a_set_context_breakpoint(target, breakpoint, 0x02);       /* asid match */
1594 }
1595
1596 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1597         struct breakpoint *breakpoint)
1598 {
1599         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1600
1601         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1602                 LOG_INFO("no hardware breakpoint available");
1603                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1604         }
1605
1606         if (breakpoint->type == BKPT_HARD)
1607                 cortex_a->brp_num_available--;
1608
1609         return cortex_a_set_hybrid_breakpoint(target, breakpoint);      /* ??? */
1610 }
1611
1612
1613 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1614 {
1615         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1616
1617 #if 0
1618 /* It is perfectly possible to remove breakpoints while the target is running */
1619         if (target->state != TARGET_HALTED) {
1620                 LOG_WARNING("target not halted");
1621                 return ERROR_TARGET_NOT_HALTED;
1622         }
1623 #endif
1624
1625         if (breakpoint->set) {
1626                 cortex_a_unset_breakpoint(target, breakpoint);
1627                 if (breakpoint->type == BKPT_HARD)
1628                         cortex_a->brp_num_available++;
1629         }
1630
1631
1632         return ERROR_OK;
1633 }
1634
1635 /*
1636  * Cortex-A Reset functions
1637  */
1638
1639 static int cortex_a_assert_reset(struct target *target)
1640 {
1641         struct armv7a_common *armv7a = target_to_armv7a(target);
1642
1643         LOG_DEBUG(" ");
1644
1645         /* FIXME when halt is requested, make it work somehow... */
1646
1647         /* This function can be called in "target not examined" state */
1648
1649         /* Issue some kind of warm reset. */
1650         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1651                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1652         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1653                 /* REVISIT handle "pulls" cases, if there's
1654                  * hardware that needs them to work.
1655                  */
1656
1657                 /*
1658                  * FIXME: fix reset when transport is SWD. This is a temporary
1659                  * work-around for release v0.10 that is not intended to stay!
1660                  */
1661                 if (transport_is_swd() ||
1662                                 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1663                         adapter_assert_reset();
1664
1665         } else {
1666                 LOG_ERROR("%s: how to reset?", target_name(target));
1667                 return ERROR_FAIL;
1668         }
1669
1670         /* registers are now invalid */
1671         if (target_was_examined(target))
1672                 register_cache_invalidate(armv7a->arm.core_cache);
1673
1674         target->state = TARGET_RESET;
1675
1676         return ERROR_OK;
1677 }
1678
1679 static int cortex_a_deassert_reset(struct target *target)
1680 {
1681         int retval;
1682
1683         LOG_DEBUG(" ");
1684
1685         /* be certain SRST is off */
1686         adapter_deassert_reset();
1687
1688         if (target_was_examined(target)) {
1689                 retval = cortex_a_poll(target);
1690                 if (retval != ERROR_OK)
1691                         return retval;
1692         }
1693
1694         if (target->reset_halt) {
1695                 if (target->state != TARGET_HALTED) {
1696                         LOG_WARNING("%s: ran after reset and before halt ...",
1697                                 target_name(target));
1698                         if (target_was_examined(target)) {
1699                                 retval = target_halt(target);
1700                                 if (retval != ERROR_OK)
1701                                         return retval;
1702                         } else
1703                                 target->state = TARGET_UNKNOWN;
1704                 }
1705         }
1706
1707         return ERROR_OK;
1708 }
1709
1710 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1711 {
1712         /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1713          * New desired mode must be in mode. Current value of DSCR must be in
1714          * *dscr, which is updated with new value.
1715          *
1716          * This function elides actually sending the mode-change over the debug
1717          * interface if the mode is already set as desired.
1718          */
1719         uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1720         if (new_dscr != *dscr) {
1721                 struct armv7a_common *armv7a = target_to_armv7a(target);
1722                 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1723                                 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1724                 if (retval == ERROR_OK)
1725                         *dscr = new_dscr;
1726                 return retval;
1727         } else {
1728                 return ERROR_OK;
1729         }
1730 }
1731
1732 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1733         uint32_t value, uint32_t *dscr)
1734 {
1735         /* Waits until the specified bit(s) of DSCR take on a specified value. */
1736         struct armv7a_common *armv7a = target_to_armv7a(target);
1737         int64_t then;
1738         int retval;
1739
1740         if ((*dscr & mask) == value)
1741                 return ERROR_OK;
1742
1743         then = timeval_ms();
1744         while (1) {
1745                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1746                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
1747                 if (retval != ERROR_OK) {
1748                         LOG_ERROR("Could not read DSCR register");
1749                         return retval;
1750                 }
1751                 if ((*dscr & mask) == value)
1752                         break;
1753                 if (timeval_ms() > then + 1000) {
1754                         LOG_ERROR("timeout waiting for DSCR bit change");
1755                         return ERROR_FAIL;
1756                 }
1757         }
1758         return ERROR_OK;
1759 }
1760
1761 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1762         uint32_t *data, uint32_t *dscr)
1763 {
1764         int retval;
1765         struct armv7a_common *armv7a = target_to_armv7a(target);
1766
1767         /* Move from coprocessor to R0. */
1768         retval = cortex_a_exec_opcode(target, opcode, dscr);
1769         if (retval != ERROR_OK)
1770                 return retval;
1771
1772         /* Move from R0 to DTRTX. */
1773         retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1774         if (retval != ERROR_OK)
1775                 return retval;
1776
1777         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1778          * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1779          * must also check TXfull_l). Most of the time this will be free
1780          * because TXfull_l will be set immediately and cached in dscr. */
1781         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1782                         DSCR_DTRTX_FULL_LATCHED, dscr);
1783         if (retval != ERROR_OK)
1784                 return retval;
1785
1786         /* Read the value transferred to DTRTX. */
1787         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1788                         armv7a->debug_base + CPUDBG_DTRTX, data);
1789         if (retval != ERROR_OK)
1790                 return retval;
1791
1792         return ERROR_OK;
1793 }
1794
1795 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1796         uint32_t *dfsr, uint32_t *dscr)
1797 {
1798         int retval;
1799
1800         if (dfar) {
1801                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1802                 if (retval != ERROR_OK)
1803                         return retval;
1804         }
1805
1806         if (dfsr) {
1807                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1808                 if (retval != ERROR_OK)
1809                         return retval;
1810         }
1811
1812         return ERROR_OK;
1813 }
1814
1815 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1816         uint32_t data, uint32_t *dscr)
1817 {
1818         int retval;
1819         struct armv7a_common *armv7a = target_to_armv7a(target);
1820
1821         /* Write the value into DTRRX. */
1822         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1823                         armv7a->debug_base + CPUDBG_DTRRX, data);
1824         if (retval != ERROR_OK)
1825                 return retval;
1826
1827         /* Move from DTRRX to R0. */
1828         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1829         if (retval != ERROR_OK)
1830                 return retval;
1831
1832         /* Move from R0 to coprocessor. */
1833         retval = cortex_a_exec_opcode(target, opcode, dscr);
1834         if (retval != ERROR_OK)
1835                 return retval;
1836
1837         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1838          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1839          * check RXfull_l). Most of the time this will be free because RXfull_l
1840          * will be cleared immediately and cached in dscr. */
1841         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1842         if (retval != ERROR_OK)
1843                 return retval;
1844
1845         return ERROR_OK;
1846 }
1847
1848 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1849         uint32_t dfsr, uint32_t *dscr)
1850 {
1851         int retval;
1852
1853         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1854         if (retval != ERROR_OK)
1855                 return retval;
1856
1857         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1858         if (retval != ERROR_OK)
1859                 return retval;
1860
1861         return ERROR_OK;
1862 }
1863
1864 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1865 {
1866         uint32_t status, upper4;
1867
1868         if (dfsr & (1 << 9)) {
1869                 /* LPAE format. */
1870                 status = dfsr & 0x3f;
1871                 upper4 = status >> 2;
1872                 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1873                         return ERROR_TARGET_TRANSLATION_FAULT;
1874                 else if (status == 33)
1875                         return ERROR_TARGET_UNALIGNED_ACCESS;
1876                 else
1877                         return ERROR_TARGET_DATA_ABORT;
1878         } else {
1879                 /* Normal format. */
1880                 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1881                 if (status == 1)
1882                         return ERROR_TARGET_UNALIGNED_ACCESS;
1883                 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1884                                 status == 9 || status == 11 || status == 13 || status == 15)
1885                         return ERROR_TARGET_TRANSLATION_FAULT;
1886                 else
1887                         return ERROR_TARGET_DATA_ABORT;
1888         }
1889 }
1890
1891 static int cortex_a_write_cpu_memory_slow(struct target *target,
1892         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1893 {
1894         /* Writes count objects of size size from *buffer. Old value of DSCR must
1895          * be in *dscr; updated to new value. This is slow because it works for
1896          * non-word-sized objects. Avoid unaligned accesses as they do not work
1897          * on memory address space without "Normal" attribute. If size == 4 and
1898          * the address is aligned, cortex_a_write_cpu_memory_fast should be
1899          * preferred.
1900          * Preconditions:
1901          * - Address is in R0.
1902          * - R0 is marked dirty.
1903          */
1904         struct armv7a_common *armv7a = target_to_armv7a(target);
1905         struct arm *arm = &armv7a->arm;
1906         int retval;
1907
1908         /* Mark register R1 as dirty, to use for transferring data. */
1909         arm_reg_current(arm, 1)->dirty = true;
1910
1911         /* Switch to non-blocking mode if not already in that mode. */
1912         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1913         if (retval != ERROR_OK)
1914                 return retval;
1915
1916         /* Go through the objects. */
1917         while (count) {
1918                 /* Write the value to store into DTRRX. */
1919                 uint32_t data, opcode;
1920                 if (size == 1)
1921                         data = *buffer;
1922                 else if (size == 2)
1923                         data = target_buffer_get_u16(target, buffer);
1924                 else
1925                         data = target_buffer_get_u32(target, buffer);
1926                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1927                                 armv7a->debug_base + CPUDBG_DTRRX, data);
1928                 if (retval != ERROR_OK)
1929                         return retval;
1930
1931                 /* Transfer the value from DTRRX to R1. */
1932                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1933                 if (retval != ERROR_OK)
1934                         return retval;
1935
1936                 /* Write the value transferred to R1 into memory. */
1937                 if (size == 1)
1938                         opcode = ARMV4_5_STRB_IP(1, 0);
1939                 else if (size == 2)
1940                         opcode = ARMV4_5_STRH_IP(1, 0);
1941                 else
1942                         opcode = ARMV4_5_STRW_IP(1, 0);
1943                 retval = cortex_a_exec_opcode(target, opcode, dscr);
1944                 if (retval != ERROR_OK)
1945                         return retval;
1946
1947                 /* Check for faults and return early. */
1948                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1949                         return ERROR_OK; /* A data fault is not considered a system failure. */
1950
1951                 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1952                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1953                  * must also check RXfull_l). Most of the time this will be free
1954                  * because RXfull_l will be cleared immediately and cached in dscr. */
1955                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1956                 if (retval != ERROR_OK)
1957                         return retval;
1958
1959                 /* Advance. */
1960                 buffer += size;
1961                 --count;
1962         }
1963
1964         return ERROR_OK;
1965 }
1966
1967 static int cortex_a_write_cpu_memory_fast(struct target *target,
1968         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1969 {
1970         /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
1971          * in *dscr; updated to new value. This is fast but only works for
1972          * word-sized objects at aligned addresses.
1973          * Preconditions:
1974          * - Address is in R0 and must be a multiple of 4.
1975          * - R0 is marked dirty.
1976          */
1977         struct armv7a_common *armv7a = target_to_armv7a(target);
1978         int retval;
1979
1980         /* Switch to fast mode if not already in that mode. */
1981         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
1982         if (retval != ERROR_OK)
1983                 return retval;
1984
1985         /* Latch STC instruction. */
1986         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1987                         armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1988         if (retval != ERROR_OK)
1989                 return retval;
1990
1991         /* Transfer all the data and issue all the instructions. */
1992         return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
1993                         4, count, armv7a->debug_base + CPUDBG_DTRRX);
1994 }
1995
1996 static int cortex_a_write_cpu_memory(struct target *target,
1997         uint32_t address, uint32_t size,
1998         uint32_t count, const uint8_t *buffer)
1999 {
2000         /* Write memory through the CPU. */
2001         int retval, final_retval;
2002         struct armv7a_common *armv7a = target_to_armv7a(target);
2003         struct arm *arm = &armv7a->arm;
2004         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2005
2006         LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2007                           address, size, count);
2008         if (target->state != TARGET_HALTED) {
2009                 LOG_WARNING("target not halted");
2010                 return ERROR_TARGET_NOT_HALTED;
2011         }
2012
2013         if (!count)
2014                 return ERROR_OK;
2015
2016         /* Clear any abort. */
2017         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2018                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2019         if (retval != ERROR_OK)
2020                 return retval;
2021
2022         /* Read DSCR. */
2023         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2024                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2025         if (retval != ERROR_OK)
2026                 return retval;
2027
2028         /* Switch to non-blocking mode if not already in that mode. */
2029         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2030         if (retval != ERROR_OK)
2031                 goto out;
2032
2033         /* Mark R0 as dirty. */
2034         arm_reg_current(arm, 0)->dirty = true;
2035
2036         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2037         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2038         if (retval != ERROR_OK)
2039                 goto out;
2040
2041         /* Get the memory address into R0. */
2042         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2043                         armv7a->debug_base + CPUDBG_DTRRX, address);
2044         if (retval != ERROR_OK)
2045                 goto out;
2046         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2047         if (retval != ERROR_OK)
2048                 goto out;
2049
2050         if (size == 4 && (address % 4) == 0) {
2051                 /* We are doing a word-aligned transfer, so use fast mode. */
2052                 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2053         } else {
2054                 /* Use slow path. Adjust size for aligned accesses */
2055                 switch (address % 4) {
2056                         case 1:
2057                         case 3:
2058                                 count *= size;
2059                                 size = 1;
2060                                 break;
2061                         case 2:
2062                                 if (size == 4) {
2063                                         count *= 2;
2064                                         size = 2;
2065                                 }
2066                         case 0:
2067                         default:
2068                                 break;
2069                 }
2070                 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2071         }
2072
2073 out:
2074         final_retval = retval;
2075
2076         /* Switch to non-blocking mode if not already in that mode. */
2077         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2078         if (final_retval == ERROR_OK)
2079                 final_retval = retval;
2080
2081         /* Wait for last issued instruction to complete. */
2082         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2083         if (final_retval == ERROR_OK)
2084                 final_retval = retval;
2085
2086         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2087          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2088          * check RXfull_l). Most of the time this will be free because RXfull_l
2089          * will be cleared immediately and cached in dscr. However, don't do this
2090          * if there is fault, because then the instruction might not have completed
2091          * successfully. */
2092         if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2093                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2094                 if (retval != ERROR_OK)
2095                         return retval;
2096         }
2097
2098         /* If there were any sticky abort flags, clear them. */
2099         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2100                 fault_dscr = dscr;
2101                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2102                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2103                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2104         } else {
2105                 fault_dscr = 0;
2106         }
2107
2108         /* Handle synchronous data faults. */
2109         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2110                 if (final_retval == ERROR_OK) {
2111                         /* Final return value will reflect cause of fault. */
2112                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2113                         if (retval == ERROR_OK) {
2114                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2115                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2116                         } else
2117                                 final_retval = retval;
2118                 }
2119                 /* Fault destroyed DFAR/DFSR; restore them. */
2120                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2121                 if (retval != ERROR_OK)
2122                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2123         }
2124
2125         /* Handle asynchronous data faults. */
2126         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2127                 if (final_retval == ERROR_OK)
2128                         /* No other error has been recorded so far, so keep this one. */
2129                         final_retval = ERROR_TARGET_DATA_ABORT;
2130         }
2131
2132         /* If the DCC is nonempty, clear it. */
2133         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2134                 uint32_t dummy;
2135                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2136                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2137                 if (final_retval == ERROR_OK)
2138                         final_retval = retval;
2139         }
2140         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2141                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2142                 if (final_retval == ERROR_OK)
2143                         final_retval = retval;
2144         }
2145
2146         /* Done. */
2147         return final_retval;
2148 }
2149
2150 static int cortex_a_read_cpu_memory_slow(struct target *target,
2151         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2152 {
2153         /* Reads count objects of size size into *buffer. Old value of DSCR must be
2154          * in *dscr; updated to new value. This is slow because it works for
2155          * non-word-sized objects. Avoid unaligned accesses as they do not work
2156          * on memory address space without "Normal" attribute. If size == 4 and
2157          * the address is aligned, cortex_a_read_cpu_memory_fast should be
2158          * preferred.
2159          * Preconditions:
2160          * - Address is in R0.
2161          * - R0 is marked dirty.
2162          */
2163         struct armv7a_common *armv7a = target_to_armv7a(target);
2164         struct arm *arm = &armv7a->arm;
2165         int retval;
2166
2167         /* Mark register R1 as dirty, to use for transferring data. */
2168         arm_reg_current(arm, 1)->dirty = true;
2169
2170         /* Switch to non-blocking mode if not already in that mode. */
2171         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2172         if (retval != ERROR_OK)
2173                 return retval;
2174
2175         /* Go through the objects. */
2176         while (count) {
2177                 /* Issue a load of the appropriate size to R1. */
2178                 uint32_t opcode, data;
2179                 if (size == 1)
2180                         opcode = ARMV4_5_LDRB_IP(1, 0);
2181                 else if (size == 2)
2182                         opcode = ARMV4_5_LDRH_IP(1, 0);
2183                 else
2184                         opcode = ARMV4_5_LDRW_IP(1, 0);
2185                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2186                 if (retval != ERROR_OK)
2187                         return retval;
2188
2189                 /* Issue a write of R1 to DTRTX. */
2190                 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2191                 if (retval != ERROR_OK)
2192                         return retval;
2193
2194                 /* Check for faults and return early. */
2195                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2196                         return ERROR_OK; /* A data fault is not considered a system failure. */
2197
2198                 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2199                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2200                  * must also check TXfull_l). Most of the time this will be free
2201                  * because TXfull_l will be set immediately and cached in dscr. */
2202                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2203                                 DSCR_DTRTX_FULL_LATCHED, dscr);
2204                 if (retval != ERROR_OK)
2205                         return retval;
2206
2207                 /* Read the value transferred to DTRTX into the buffer. */
2208                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2209                                 armv7a->debug_base + CPUDBG_DTRTX, &data);
2210                 if (retval != ERROR_OK)
2211                         return retval;
2212                 if (size == 1)
2213                         *buffer = (uint8_t) data;
2214                 else if (size == 2)
2215                         target_buffer_set_u16(target, buffer, (uint16_t) data);
2216                 else
2217                         target_buffer_set_u32(target, buffer, data);
2218
2219                 /* Advance. */
2220                 buffer += size;
2221                 --count;
2222         }
2223
2224         return ERROR_OK;
2225 }
2226
2227 static int cortex_a_read_cpu_memory_fast(struct target *target,
2228         uint32_t count, uint8_t *buffer, uint32_t *dscr)
2229 {
2230         /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2231          * *dscr; updated to new value. This is fast but only works for word-sized
2232          * objects at aligned addresses.
2233          * Preconditions:
2234          * - Address is in R0 and must be a multiple of 4.
2235          * - R0 is marked dirty.
2236          */
2237         struct armv7a_common *armv7a = target_to_armv7a(target);
2238         uint32_t u32;
2239         int retval;
2240
2241         /* Switch to non-blocking mode if not already in that mode. */
2242         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2243         if (retval != ERROR_OK)
2244                 return retval;
2245
2246         /* Issue the LDC instruction via a write to ITR. */
2247         retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2248         if (retval != ERROR_OK)
2249                 return retval;
2250
2251         count--;
2252
2253         if (count > 0) {
2254                 /* Switch to fast mode if not already in that mode. */
2255                 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2256                 if (retval != ERROR_OK)
2257                         return retval;
2258
2259                 /* Latch LDC instruction. */
2260                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2261                                 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2262                 if (retval != ERROR_OK)
2263                         return retval;
2264
2265                 /* Read the value transferred to DTRTX into the buffer. Due to fast
2266                  * mode rules, this blocks until the instruction finishes executing and
2267                  * then reissues the read instruction to read the next word from
2268                  * memory. The last read of DTRTX in this call reads the second-to-last
2269                  * word from memory and issues the read instruction for the last word.
2270                  */
2271                 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2272                                 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2273                 if (retval != ERROR_OK)
2274                         return retval;
2275
2276                 /* Advance. */
2277                 buffer += count * 4;
2278         }
2279
2280         /* Wait for last issued instruction to complete. */
2281         retval = cortex_a_wait_instrcmpl(target, dscr, false);
2282         if (retval != ERROR_OK)
2283                 return retval;
2284
2285         /* Switch to non-blocking mode if not already in that mode. */
2286         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2287         if (retval != ERROR_OK)
2288                 return retval;
2289
2290         /* Check for faults and return early. */
2291         if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2292                 return ERROR_OK; /* A data fault is not considered a system failure. */
2293
2294         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2295          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2296          * check TXfull_l). Most of the time this will be free because TXfull_l
2297          * will be set immediately and cached in dscr. */
2298         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2299                         DSCR_DTRTX_FULL_LATCHED, dscr);
2300         if (retval != ERROR_OK)
2301                 return retval;
2302
2303         /* Read the value transferred to DTRTX into the buffer. This is the last
2304          * word. */
2305         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2306                         armv7a->debug_base + CPUDBG_DTRTX, &u32);
2307         if (retval != ERROR_OK)
2308                 return retval;
2309         target_buffer_set_u32(target, buffer, u32);
2310
2311         return ERROR_OK;
2312 }
2313
2314 static int cortex_a_read_cpu_memory(struct target *target,
2315         uint32_t address, uint32_t size,
2316         uint32_t count, uint8_t *buffer)
2317 {
2318         /* Read memory through the CPU. */
2319         int retval, final_retval;
2320         struct armv7a_common *armv7a = target_to_armv7a(target);
2321         struct arm *arm = &armv7a->arm;
2322         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2323
2324         LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2325                           address, size, count);
2326         if (target->state != TARGET_HALTED) {
2327                 LOG_WARNING("target not halted");
2328                 return ERROR_TARGET_NOT_HALTED;
2329         }
2330
2331         if (!count)
2332                 return ERROR_OK;
2333
2334         /* Clear any abort. */
2335         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2336                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2337         if (retval != ERROR_OK)
2338                 return retval;
2339
2340         /* Read DSCR */
2341         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2342                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2343         if (retval != ERROR_OK)
2344                 return retval;
2345
2346         /* Switch to non-blocking mode if not already in that mode. */
2347         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2348         if (retval != ERROR_OK)
2349                 goto out;
2350
2351         /* Mark R0 as dirty. */
2352         arm_reg_current(arm, 0)->dirty = true;
2353
2354         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2355         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2356         if (retval != ERROR_OK)
2357                 goto out;
2358
2359         /* Get the memory address into R0. */
2360         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2361                         armv7a->debug_base + CPUDBG_DTRRX, address);
2362         if (retval != ERROR_OK)
2363                 goto out;
2364         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2365         if (retval != ERROR_OK)
2366                 goto out;
2367
2368         if (size == 4 && (address % 4) == 0) {
2369                 /* We are doing a word-aligned transfer, so use fast mode. */
2370                 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2371         } else {
2372                 /* Use slow path. Adjust size for aligned accesses */
2373                 switch (address % 4) {
2374                         case 1:
2375                         case 3:
2376                                 count *= size;
2377                                 size = 1;
2378                                 break;
2379                         case 2:
2380                                 if (size == 4) {
2381                                         count *= 2;
2382                                         size = 2;
2383                                 }
2384                                 break;
2385                         case 0:
2386                         default:
2387                                 break;
2388                 }
2389                 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2390         }
2391
2392 out:
2393         final_retval = retval;
2394
2395         /* Switch to non-blocking mode if not already in that mode. */
2396         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2397         if (final_retval == ERROR_OK)
2398                 final_retval = retval;
2399
2400         /* Wait for last issued instruction to complete. */
2401         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2402         if (final_retval == ERROR_OK)
2403                 final_retval = retval;
2404
2405         /* If there were any sticky abort flags, clear them. */
2406         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2407                 fault_dscr = dscr;
2408                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2409                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2410                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2411         } else {
2412                 fault_dscr = 0;
2413         }
2414
2415         /* Handle synchronous data faults. */
2416         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2417                 if (final_retval == ERROR_OK) {
2418                         /* Final return value will reflect cause of fault. */
2419                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2420                         if (retval == ERROR_OK) {
2421                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2422                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2423                         } else
2424                                 final_retval = retval;
2425                 }
2426                 /* Fault destroyed DFAR/DFSR; restore them. */
2427                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2428                 if (retval != ERROR_OK)
2429                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2430         }
2431
2432         /* Handle asynchronous data faults. */
2433         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2434                 if (final_retval == ERROR_OK)
2435                         /* No other error has been recorded so far, so keep this one. */
2436                         final_retval = ERROR_TARGET_DATA_ABORT;
2437         }
2438
2439         /* If the DCC is nonempty, clear it. */
2440         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2441                 uint32_t dummy;
2442                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2443                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2444                 if (final_retval == ERROR_OK)
2445                         final_retval = retval;
2446         }
2447         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2448                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2449                 if (final_retval == ERROR_OK)
2450                         final_retval = retval;
2451         }
2452
2453         /* Done. */
2454         return final_retval;
2455 }
2456
2457
2458 /*
2459  * Cortex-A Memory access
2460  *
2461  * This is same Cortex-M3 but we must also use the correct
2462  * ap number for every access.
2463  */
2464
2465 static int cortex_a_read_phys_memory(struct target *target,
2466         target_addr_t address, uint32_t size,
2467         uint32_t count, uint8_t *buffer)
2468 {
2469         int retval;
2470
2471         if (!count || !buffer)
2472                 return ERROR_COMMAND_SYNTAX_ERROR;
2473
2474         LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2475                 address, size, count);
2476
2477         /* read memory through the CPU */
2478         cortex_a_prep_memaccess(target, 1);
2479         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2480         cortex_a_post_memaccess(target, 1);
2481
2482         return retval;
2483 }
2484
2485 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2486         uint32_t size, uint32_t count, uint8_t *buffer)
2487 {
2488         int retval;
2489
2490         /* cortex_a handles unaligned memory access */
2491         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2492                 address, size, count);
2493
2494         cortex_a_prep_memaccess(target, 0);
2495         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2496         cortex_a_post_memaccess(target, 0);
2497
2498         return retval;
2499 }
2500
2501 static int cortex_a_write_phys_memory(struct target *target,
2502         target_addr_t address, uint32_t size,
2503         uint32_t count, const uint8_t *buffer)
2504 {
2505         int retval;
2506
2507         if (!count || !buffer)
2508                 return ERROR_COMMAND_SYNTAX_ERROR;
2509
2510         LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2511                 address, size, count);
2512
2513         /* write memory through the CPU */
2514         cortex_a_prep_memaccess(target, 1);
2515         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2516         cortex_a_post_memaccess(target, 1);
2517
2518         return retval;
2519 }
2520
2521 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2522         uint32_t size, uint32_t count, const uint8_t *buffer)
2523 {
2524         int retval;
2525
2526         /* cortex_a handles unaligned memory access */
2527         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2528                 address, size, count);
2529
2530         /* memory writes bypass the caches, must flush before writing */
2531         armv7a_cache_auto_flush_on_write(target, address, size * count);
2532
2533         cortex_a_prep_memaccess(target, 0);
2534         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2535         cortex_a_post_memaccess(target, 0);
2536         return retval;
2537 }
2538
2539 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2540                                 uint32_t count, uint8_t *buffer)
2541 {
2542         uint32_t size;
2543
2544         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2545          * will have something to do with the size we leave to it. */
2546         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2547                 if (address & size) {
2548                         int retval = target_read_memory(target, address, size, 1, buffer);
2549                         if (retval != ERROR_OK)
2550                                 return retval;
2551                         address += size;
2552                         count -= size;
2553                         buffer += size;
2554                 }
2555         }
2556
2557         /* Read the data with as large access size as possible. */
2558         for (; size > 0; size /= 2) {
2559                 uint32_t aligned = count - count % size;
2560                 if (aligned > 0) {
2561                         int retval = target_read_memory(target, address, size, aligned / size, buffer);
2562                         if (retval != ERROR_OK)
2563                                 return retval;
2564                         address += aligned;
2565                         count -= aligned;
2566                         buffer += aligned;
2567                 }
2568         }
2569
2570         return ERROR_OK;
2571 }
2572
2573 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2574                                  uint32_t count, const uint8_t *buffer)
2575 {
2576         uint32_t size;
2577
2578         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2579          * will have something to do with the size we leave to it. */
2580         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2581                 if (address & size) {
2582                         int retval = target_write_memory(target, address, size, 1, buffer);
2583                         if (retval != ERROR_OK)
2584                                 return retval;
2585                         address += size;
2586                         count -= size;
2587                         buffer += size;
2588                 }
2589         }
2590
2591         /* Write the data with as large access size as possible. */
2592         for (; size > 0; size /= 2) {
2593                 uint32_t aligned = count - count % size;
2594                 if (aligned > 0) {
2595                         int retval = target_write_memory(target, address, size, aligned / size, buffer);
2596                         if (retval != ERROR_OK)
2597                                 return retval;
2598                         address += aligned;
2599                         count -= aligned;
2600                         buffer += aligned;
2601                 }
2602         }
2603
2604         return ERROR_OK;
2605 }
2606
2607 static int cortex_a_handle_target_request(void *priv)
2608 {
2609         struct target *target = priv;
2610         struct armv7a_common *armv7a = target_to_armv7a(target);
2611         int retval;
2612
2613         if (!target_was_examined(target))
2614                 return ERROR_OK;
2615         if (!target->dbg_msg_enabled)
2616                 return ERROR_OK;
2617
2618         if (target->state == TARGET_RUNNING) {
2619                 uint32_t request;
2620                 uint32_t dscr;
2621                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2622                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2623
2624                 /* check if we have data */
2625                 int64_t then = timeval_ms();
2626                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2627                         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2628                                         armv7a->debug_base + CPUDBG_DTRTX, &request);
2629                         if (retval == ERROR_OK) {
2630                                 target_request(target, request);
2631                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2632                                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2633                         }
2634                         if (timeval_ms() > then + 1000) {
2635                                 LOG_ERROR("Timeout waiting for dtr tx full");
2636                                 return ERROR_FAIL;
2637                         }
2638                 }
2639         }
2640
2641         return ERROR_OK;
2642 }
2643
2644 /*
2645  * Cortex-A target information and configuration
2646  */
2647
2648 static int cortex_a_examine_first(struct target *target)
2649 {
2650         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2651         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2652         struct adiv5_dap *swjdp = armv7a->arm.dap;
2653
2654         int i;
2655         int retval = ERROR_OK;
2656         uint32_t didr, cpuid, dbg_osreg;
2657
2658         /* Search for the APB-AP - it is needed for access to debug registers */
2659         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2660         if (retval != ERROR_OK) {
2661                 LOG_ERROR("Could not find APB-AP for debug access");
2662                 return retval;
2663         }
2664
2665         retval = mem_ap_init(armv7a->debug_ap);
2666         if (retval != ERROR_OK) {
2667                 LOG_ERROR("Could not initialize the APB-AP");
2668                 return retval;
2669         }
2670
2671         armv7a->debug_ap->memaccess_tck = 80;
2672
2673         if (!target->dbgbase_set) {
2674                 uint32_t dbgbase;
2675                 /* Get ROM Table base */
2676                 uint32_t apid;
2677                 int32_t coreidx = target->coreid;
2678                 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2679                           target->cmd_name);
2680                 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2681                 if (retval != ERROR_OK)
2682                         return retval;
2683                 /* Lookup 0x15 -- Processor DAP */
2684                 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2685                                 &armv7a->debug_base, &coreidx);
2686                 if (retval != ERROR_OK) {
2687                         LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2688                                   target->cmd_name);
2689                         return retval;
2690                 }
2691                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2692                           target->coreid, armv7a->debug_base);
2693         } else
2694                 armv7a->debug_base = target->dbgbase;
2695
2696         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2697                         armv7a->debug_base + CPUDBG_DIDR, &didr);
2698         if (retval != ERROR_OK) {
2699                 LOG_DEBUG("Examine %s failed", "DIDR");
2700                 return retval;
2701         }
2702
2703         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2704                         armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2705         if (retval != ERROR_OK) {
2706                 LOG_DEBUG("Examine %s failed", "CPUID");
2707                 return retval;
2708         }
2709
2710         LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2711         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2712
2713         cortex_a->didr = didr;
2714         cortex_a->cpuid = cpuid;
2715
2716         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2717                                     armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2718         if (retval != ERROR_OK)
2719                 return retval;
2720         LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR  0x%" PRIx32, target->coreid, dbg_osreg);
2721
2722         if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2723                 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2724                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2725                 return ERROR_TARGET_INIT_FAILED;
2726         }
2727
2728         if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2729                 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2730
2731         /* Read DBGOSLSR and check if OSLK is implemented */
2732         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2733                                 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2734         if (retval != ERROR_OK)
2735                 return retval;
2736         LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2737
2738         /* check if OS Lock is implemented */
2739         if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2740                 /* check if OS Lock is set */
2741                 if (dbg_osreg & OSLSR_OSLK) {
2742                         LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2743
2744                         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2745                                                         armv7a->debug_base + CPUDBG_OSLAR,
2746                                                         0);
2747                         if (retval == ERROR_OK)
2748                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2749                                                         armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2750
2751                         /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2752                         if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2753                                 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2754                                                 target->coreid);
2755                                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2756                                 return ERROR_TARGET_INIT_FAILED;
2757                         }
2758                 }
2759         }
2760
2761         armv7a->arm.core_type = ARM_MODE_MON;
2762
2763         /* Avoid recreating the registers cache */
2764         if (!target_was_examined(target)) {
2765                 retval = cortex_a_dpm_setup(cortex_a, didr);
2766                 if (retval != ERROR_OK)
2767                         return retval;
2768         }
2769
2770         /* Setup Breakpoint Register Pairs */
2771         cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2772         cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2773         cortex_a->brp_num_available = cortex_a->brp_num;
2774         free(cortex_a->brp_list);
2775         cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2776 /*      cortex_a->brb_enabled = ????; */
2777         for (i = 0; i < cortex_a->brp_num; i++) {
2778                 cortex_a->brp_list[i].used = 0;
2779                 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2780                         cortex_a->brp_list[i].type = BRP_NORMAL;
2781                 else
2782                         cortex_a->brp_list[i].type = BRP_CONTEXT;
2783                 cortex_a->brp_list[i].value = 0;
2784                 cortex_a->brp_list[i].control = 0;
2785                 cortex_a->brp_list[i].BRPn = i;
2786         }
2787
2788         LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2789
2790         /* select debug_ap as default */
2791         swjdp->apsel = armv7a->debug_ap->ap_num;
2792
2793         target_set_examined(target);
2794         return ERROR_OK;
2795 }
2796
2797 static int cortex_a_examine(struct target *target)
2798 {
2799         int retval = ERROR_OK;
2800
2801         /* Reestablish communication after target reset */
2802         retval = cortex_a_examine_first(target);
2803
2804         /* Configure core debug access */
2805         if (retval == ERROR_OK)
2806                 retval = cortex_a_init_debug_access(target);
2807
2808         return retval;
2809 }
2810
2811 /*
2812  *      Cortex-A target creation and initialization
2813  */
2814
2815 static int cortex_a_init_target(struct command_context *cmd_ctx,
2816         struct target *target)
2817 {
2818         /* examine_first() does a bunch of this */
2819         arm_semihosting_init(target);
2820         return ERROR_OK;
2821 }
2822
2823 static int cortex_a_init_arch_info(struct target *target,
2824         struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2825 {
2826         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2827
2828         /* Setup struct cortex_a_common */
2829         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2830         armv7a->arm.dap = dap;
2831
2832         /* register arch-specific functions */
2833         armv7a->examine_debug_reason = NULL;
2834
2835         armv7a->post_debug_entry = cortex_a_post_debug_entry;
2836
2837         armv7a->pre_restore_context = NULL;
2838
2839         armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2840
2841
2842 /*      arm7_9->handle_target_request = cortex_a_handle_target_request; */
2843
2844         /* REVISIT v7a setup should be in a v7a-specific routine */
2845         armv7a_init_arch_info(target, armv7a);
2846         target_register_timer_callback(cortex_a_handle_target_request, 1,
2847                 TARGET_TIMER_TYPE_PERIODIC, target);
2848
2849         return ERROR_OK;
2850 }
2851
2852 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2853 {
2854         struct cortex_a_common *cortex_a;
2855         struct adiv5_private_config *pc;
2856
2857         if (target->private_config == NULL)
2858                 return ERROR_FAIL;
2859
2860         pc = (struct adiv5_private_config *)target->private_config;
2861
2862         cortex_a = calloc(1, sizeof(struct cortex_a_common));
2863         if (cortex_a == NULL) {
2864                 LOG_ERROR("Out of memory");
2865                 return ERROR_FAIL;
2866         }
2867         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2868         cortex_a->armv7a_common.is_armv7r = false;
2869         cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2870
2871         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2872 }
2873
2874 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2875 {
2876         struct cortex_a_common *cortex_a;
2877         struct adiv5_private_config *pc;
2878
2879         pc = (struct adiv5_private_config *)target->private_config;
2880         if (adiv5_verify_config(pc) != ERROR_OK)
2881                 return ERROR_FAIL;
2882
2883         cortex_a = calloc(1, sizeof(struct cortex_a_common));
2884         if (cortex_a == NULL) {
2885                 LOG_ERROR("Out of memory");
2886                 return ERROR_FAIL;
2887         }
2888         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2889         cortex_a->armv7a_common.is_armv7r = true;
2890
2891         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2892 }
2893
2894 static void cortex_a_deinit_target(struct target *target)
2895 {
2896         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2897         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2898         struct arm_dpm *dpm = &armv7a->dpm;
2899         uint32_t dscr;
2900         int retval;
2901
2902         if (target_was_examined(target)) {
2903                 /* Disable halt for breakpoint, watchpoint and vector catch */
2904                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2905                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2906                 if (retval == ERROR_OK)
2907                         mem_ap_write_atomic_u32(armv7a->debug_ap,
2908                                         armv7a->debug_base + CPUDBG_DSCR,
2909                                         dscr & ~DSCR_HALT_DBG_MODE);
2910         }
2911
2912         free(cortex_a->brp_list);
2913         free(dpm->dbp);
2914         free(dpm->dwp);
2915         free(target->private_config);
2916         free(cortex_a);
2917 }
2918
2919 static int cortex_a_mmu(struct target *target, int *enabled)
2920 {
2921         struct armv7a_common *armv7a = target_to_armv7a(target);
2922
2923         if (target->state != TARGET_HALTED) {
2924                 LOG_ERROR("%s: target not halted", __func__);
2925                 return ERROR_TARGET_INVALID;
2926         }
2927
2928         if (armv7a->is_armv7r)
2929                 *enabled = 0;
2930         else
2931                 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2932
2933         return ERROR_OK;
2934 }
2935
2936 static int cortex_a_virt2phys(struct target *target,
2937         target_addr_t virt, target_addr_t *phys)
2938 {
2939         int retval;
2940         int mmu_enabled = 0;
2941
2942         /*
2943          * If the MMU was not enabled at debug entry, there is no
2944          * way of knowing if there was ever a valid configuration
2945          * for it and thus it's not safe to enable it. In this case,
2946          * just return the virtual address as physical.
2947          */
2948         cortex_a_mmu(target, &mmu_enabled);
2949         if (!mmu_enabled) {
2950                 *phys = virt;
2951                 return ERROR_OK;
2952         }
2953
2954         /* mmu must be enable in order to get a correct translation */
2955         retval = cortex_a_mmu_modify(target, 1);
2956         if (retval != ERROR_OK)
2957                 return retval;
2958         return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2959                                                     phys, 1);
2960 }
2961
2962 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2963 {
2964         struct target *target = get_current_target(CMD_CTX);
2965         struct armv7a_common *armv7a = target_to_armv7a(target);
2966
2967         return armv7a_handle_cache_info_command(CMD,
2968                         &armv7a->armv7a_mmu.armv7a_cache);
2969 }
2970
2971
2972 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2973 {
2974         struct target *target = get_current_target(CMD_CTX);
2975         if (!target_was_examined(target)) {
2976                 LOG_ERROR("target not examined yet");
2977                 return ERROR_FAIL;
2978         }
2979
2980         return cortex_a_init_debug_access(target);
2981 }
2982
2983 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
2984 {
2985         struct target *target = get_current_target(CMD_CTX);
2986         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2987
2988         static const Jim_Nvp nvp_maskisr_modes[] = {
2989                 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
2990                 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
2991                 { .name = NULL, .value = -1 },
2992         };
2993         const Jim_Nvp *n;
2994
2995         if (CMD_ARGC > 0) {
2996                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2997                 if (n->name == NULL) {
2998                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2999                         return ERROR_COMMAND_SYNTAX_ERROR;
3000                 }
3001
3002                 cortex_a->isrmasking_mode = n->value;
3003         }
3004
3005         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3006         command_print(CMD, "cortex_a interrupt mask %s", n->name);
3007
3008         return ERROR_OK;
3009 }
3010
3011 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3012 {
3013         struct target *target = get_current_target(CMD_CTX);
3014         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3015
3016         static const Jim_Nvp nvp_dacrfixup_modes[] = {
3017                 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3018                 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3019                 { .name = NULL, .value = -1 },
3020         };
3021         const Jim_Nvp *n;
3022
3023         if (CMD_ARGC > 0) {
3024                 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3025                 if (n->name == NULL)
3026                         return ERROR_COMMAND_SYNTAX_ERROR;
3027                 cortex_a->dacrfixup_mode = n->value;
3028
3029         }
3030
3031         n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3032         command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3033
3034         return ERROR_OK;
3035 }
3036
3037 static const struct command_registration cortex_a_exec_command_handlers[] = {
3038         {
3039                 .name = "cache_info",
3040                 .handler = cortex_a_handle_cache_info_command,
3041                 .mode = COMMAND_EXEC,
3042                 .help = "display information about target caches",
3043                 .usage = "",
3044         },
3045         {
3046                 .name = "dbginit",
3047                 .handler = cortex_a_handle_dbginit_command,
3048                 .mode = COMMAND_EXEC,
3049                 .help = "Initialize core debug",
3050                 .usage = "",
3051         },
3052         {
3053                 .name = "maskisr",
3054                 .handler = handle_cortex_a_mask_interrupts_command,
3055                 .mode = COMMAND_ANY,
3056                 .help = "mask cortex_a interrupts",
3057                 .usage = "['on'|'off']",
3058         },
3059         {
3060                 .name = "dacrfixup",
3061                 .handler = handle_cortex_a_dacrfixup_command,
3062                 .mode = COMMAND_ANY,
3063                 .help = "set domain access control (DACR) to all-manager "
3064                         "on memory access",
3065                 .usage = "['on'|'off']",
3066         },
3067         {
3068                 .chain = armv7a_mmu_command_handlers,
3069         },
3070         {
3071                 .chain = smp_command_handlers,
3072         },
3073
3074         COMMAND_REGISTRATION_DONE
3075 };
3076 static const struct command_registration cortex_a_command_handlers[] = {
3077         {
3078                 .chain = arm_command_handlers,
3079         },
3080         {
3081                 .chain = armv7a_command_handlers,
3082         },
3083         {
3084                 .name = "cortex_a",
3085                 .mode = COMMAND_ANY,
3086                 .help = "Cortex-A command group",
3087                 .usage = "",
3088                 .chain = cortex_a_exec_command_handlers,
3089         },
3090         COMMAND_REGISTRATION_DONE
3091 };
3092
3093 struct target_type cortexa_target = {
3094         .name = "cortex_a",
3095         .deprecated_name = "cortex_a8",
3096
3097         .poll = cortex_a_poll,
3098         .arch_state = armv7a_arch_state,
3099
3100         .halt = cortex_a_halt,
3101         .resume = cortex_a_resume,
3102         .step = cortex_a_step,
3103
3104         .assert_reset = cortex_a_assert_reset,
3105         .deassert_reset = cortex_a_deassert_reset,
3106
3107         /* REVISIT allow exporting VFP3 registers ... */
3108         .get_gdb_arch = arm_get_gdb_arch,
3109         .get_gdb_reg_list = arm_get_gdb_reg_list,
3110
3111         .read_memory = cortex_a_read_memory,
3112         .write_memory = cortex_a_write_memory,
3113
3114         .read_buffer = cortex_a_read_buffer,
3115         .write_buffer = cortex_a_write_buffer,
3116
3117         .checksum_memory = arm_checksum_memory,
3118         .blank_check_memory = arm_blank_check_memory,
3119
3120         .run_algorithm = armv4_5_run_algorithm,
3121
3122         .add_breakpoint = cortex_a_add_breakpoint,
3123         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3124         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3125         .remove_breakpoint = cortex_a_remove_breakpoint,
3126         .add_watchpoint = NULL,
3127         .remove_watchpoint = NULL,
3128
3129         .commands = cortex_a_command_handlers,
3130         .target_create = cortex_a_target_create,
3131         .target_jim_configure = adiv5_jim_configure,
3132         .init_target = cortex_a_init_target,
3133         .examine = cortex_a_examine,
3134         .deinit_target = cortex_a_deinit_target,
3135
3136         .read_phys_memory = cortex_a_read_phys_memory,
3137         .write_phys_memory = cortex_a_write_phys_memory,
3138         .mmu = cortex_a_mmu,
3139         .virt2phys = cortex_a_virt2phys,
3140 };
3141
3142 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3143         {
3144                 .name = "dbginit",
3145                 .handler = cortex_a_handle_dbginit_command,
3146                 .mode = COMMAND_EXEC,
3147                 .help = "Initialize core debug",
3148                 .usage = "",
3149         },
3150         {
3151                 .name = "maskisr",
3152                 .handler = handle_cortex_a_mask_interrupts_command,
3153                 .mode = COMMAND_EXEC,
3154                 .help = "mask cortex_r4 interrupts",
3155                 .usage = "['on'|'off']",
3156         },
3157
3158         COMMAND_REGISTRATION_DONE
3159 };
3160 static const struct command_registration cortex_r4_command_handlers[] = {
3161         {
3162                 .chain = arm_command_handlers,
3163         },
3164         {
3165                 .name = "cortex_r4",
3166                 .mode = COMMAND_ANY,
3167                 .help = "Cortex-R4 command group",
3168                 .usage = "",
3169                 .chain = cortex_r4_exec_command_handlers,
3170         },
3171         COMMAND_REGISTRATION_DONE
3172 };
3173
3174 struct target_type cortexr4_target = {
3175         .name = "cortex_r4",
3176
3177         .poll = cortex_a_poll,
3178         .arch_state = armv7a_arch_state,
3179
3180         .halt = cortex_a_halt,
3181         .resume = cortex_a_resume,
3182         .step = cortex_a_step,
3183
3184         .assert_reset = cortex_a_assert_reset,
3185         .deassert_reset = cortex_a_deassert_reset,
3186
3187         /* REVISIT allow exporting VFP3 registers ... */
3188         .get_gdb_arch = arm_get_gdb_arch,
3189         .get_gdb_reg_list = arm_get_gdb_reg_list,
3190
3191         .read_memory = cortex_a_read_phys_memory,
3192         .write_memory = cortex_a_write_phys_memory,
3193
3194         .checksum_memory = arm_checksum_memory,
3195         .blank_check_memory = arm_blank_check_memory,
3196
3197         .run_algorithm = armv4_5_run_algorithm,
3198
3199         .add_breakpoint = cortex_a_add_breakpoint,
3200         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3201         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3202         .remove_breakpoint = cortex_a_remove_breakpoint,
3203         .add_watchpoint = NULL,
3204         .remove_watchpoint = NULL,
3205
3206         .commands = cortex_r4_command_handlers,
3207         .target_create = cortex_r4_target_create,
3208         .target_jim_configure = adiv5_jim_configure,
3209         .init_target = cortex_a_init_target,
3210         .examine = cortex_a_examine,
3211         .deinit_target = cortex_a_deinit_target,
3212 };