Use enum for target_register_timer_callback()
[fw/openocd] / src / target / cortex_a.c
1 /***************************************************************************
2  *   Copyright (C) 2005 by Dominic Rath                                    *
3  *   Dominic.Rath@gmx.de                                                   *
4  *                                                                         *
5  *   Copyright (C) 2006 by Magnus Lundin                                   *
6  *   lundin@mlu.mine.nu                                                    *
7  *                                                                         *
8  *   Copyright (C) 2008 by Spencer Oliver                                  *
9  *   spen@spen-soft.co.uk                                                  *
10  *                                                                         *
11  *   Copyright (C) 2009 by Dirk Behme                                      *
12  *   dirk.behme@gmail.com - copy from cortex_m3                            *
13  *                                                                         *
14  *   Copyright (C) 2010 Ã˜yvind Harboe                                      *
15  *   oyvind.harboe@zylin.com                                               *
16  *                                                                         *
17  *   Copyright (C) ST-Ericsson SA 2011                                     *
18  *   michel.jaouen@stericsson.com : smp minimum support                    *
19  *                                                                         *
20  *   Copyright (C) Broadcom 2012                                           *
21  *   ehunter@broadcom.com : Cortex-R4 support                              *
22  *                                                                         *
23  *   Copyright (C) 2013 Kamal Dasu                                         *
24  *   kdasu.kdev@gmail.com                                                  *
25  *                                                                         *
26  *   This program is free software; you can redistribute it and/or modify  *
27  *   it under the terms of the GNU General Public License as published by  *
28  *   the Free Software Foundation; either version 2 of the License, or     *
29  *   (at your option) any later version.                                   *
30  *                                                                         *
31  *   This program is distributed in the hope that it will be useful,       *
32  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
33  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
34  *   GNU General Public License for more details.                          *
35  *                                                                         *
36  *   You should have received a copy of the GNU General Public License     *
37  *   along with this program.  If not, see <http://www.gnu.org/licenses/>. *
38  *                                                                         *
39  *   Cortex-A8(tm) TRM, ARM DDI 0344H                                      *
40  *   Cortex-A9(tm) TRM, ARM DDI 0407F                                      *
41  *   Cortex-A4(tm) TRM, ARM DDI 0363E                                      *
42  *   Cortex-A15(tm)TRM, ARM DDI 0438C                                      *
43  *                                                                         *
44  ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "armv7a_mmu.h"
54 #include "target_request.h"
55 #include "target_type.h"
56 #include "arm_opcodes.h"
57 #include "arm_semihosting.h"
58 #include "transport/transport.h"
59 #include <helper/time_support.h>
60
61 #define foreach_smp_target(pos, head) \
62         for (pos = head; (pos != NULL); pos = pos->next)
63
64 static int cortex_a_poll(struct target *target);
65 static int cortex_a_debug_entry(struct target *target);
66 static int cortex_a_restore_context(struct target *target, bool bpwp);
67 static int cortex_a_set_breakpoint(struct target *target,
68         struct breakpoint *breakpoint, uint8_t matchmode);
69 static int cortex_a_set_context_breakpoint(struct target *target,
70         struct breakpoint *breakpoint, uint8_t matchmode);
71 static int cortex_a_set_hybrid_breakpoint(struct target *target,
72         struct breakpoint *breakpoint);
73 static int cortex_a_unset_breakpoint(struct target *target,
74         struct breakpoint *breakpoint);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78         target_addr_t virt, target_addr_t *phys);
79 static int cortex_a_read_cpu_memory(struct target *target,
80         uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /*  restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86         int retval = ERROR_OK;
87         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88         struct armv7a_common *armv7a = target_to_armv7a(target);
89
90         if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91                 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93                 retval = armv7a->arm.mcr(target, 15,
94                                 0, 0,   /* op1, op2 */
95                                 1, 0,   /* CRn, CRm */
96                                 cortex_a->cp15_control_reg);
97         }
98         return retval;
99 }
100
101 /*
102  * Set up ARM core for memory access.
103  * If !phys_access, switch to SVC mode and make sure MMU is on
104  * If phys_access, switch off mmu
105  */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108         struct armv7a_common *armv7a = target_to_armv7a(target);
109         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110         int mmu_enabled = 0;
111
112         if (phys_access == 0) {
113                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114                 cortex_a_mmu(target, &mmu_enabled);
115                 if (mmu_enabled)
116                         cortex_a_mmu_modify(target, 1);
117                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118                         /* overwrite DACR to all-manager */
119                         armv7a->arm.mcr(target, 15,
120                                         0, 0, 3, 0,
121                                         0xFFFFFFFF);
122                 }
123         } else {
124                 cortex_a_mmu(target, &mmu_enabled);
125                 if (mmu_enabled)
126                         cortex_a_mmu_modify(target, 0);
127         }
128         return ERROR_OK;
129 }
130
131 /*
132  * Restore ARM core after memory access.
133  * If !phys_access, switch to previous mode
134  * If phys_access, restore MMU setting
135  */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138         struct armv7a_common *armv7a = target_to_armv7a(target);
139         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141         if (phys_access == 0) {
142                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143                         /* restore */
144                         armv7a->arm.mcr(target, 15,
145                                         0, 0, 3, 0,
146                                         cortex_a->cp15_dacr_reg);
147                 }
148                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149         } else {
150                 int mmu_enabled = 0;
151                 cortex_a_mmu(target, &mmu_enabled);
152                 if (mmu_enabled)
153                         cortex_a_mmu_modify(target, 1);
154         }
155         return ERROR_OK;
156 }
157
158
159 /*  modify cp15_control_reg in order to enable or disable mmu for :
160  *  - virt2phys address conversion
161  *  - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165         struct armv7a_common *armv7a = target_to_armv7a(target);
166         int retval = ERROR_OK;
167         int need_write = 0;
168
169         if (enable) {
170                 /*  if mmu enabled at target stop and mmu not enable */
171                 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173                         return ERROR_FAIL;
174                 }
175                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176                         cortex_a->cp15_control_reg_curr |= 0x1U;
177                         need_write = 1;
178                 }
179         } else {
180                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181                         cortex_a->cp15_control_reg_curr &= ~0x1U;
182                         need_write = 1;
183                 }
184         }
185
186         if (need_write) {
187                 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188                         enable ? "enable mmu" : "disable mmu",
189                         cortex_a->cp15_control_reg_curr);
190
191                 retval = armv7a->arm.mcr(target, 15,
192                                 0, 0,   /* op1, op2 */
193                                 1, 0,   /* CRn, CRm */
194                                 cortex_a->cp15_control_reg_curr);
195         }
196         return retval;
197 }
198
199 /*
200  * Cortex-A Basic debug access, very low level assumes state is saved
201  */
202 static int cortex_a_init_debug_access(struct target *target)
203 {
204         struct armv7a_common *armv7a = target_to_armv7a(target);
205         uint32_t dscr;
206         int retval;
207
208         /* lock memory-mapped access to debug registers to prevent
209          * software interference */
210         retval = mem_ap_write_u32(armv7a->debug_ap,
211                         armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
212         if (retval != ERROR_OK)
213                 return retval;
214
215         /* Disable cacheline fills and force cache write-through in debug state */
216         retval = mem_ap_write_u32(armv7a->debug_ap,
217                         armv7a->debug_base + CPUDBG_DSCCR, 0);
218         if (retval != ERROR_OK)
219                 return retval;
220
221         /* Disable TLB lookup and refill/eviction in debug state */
222         retval = mem_ap_write_u32(armv7a->debug_ap,
223                         armv7a->debug_base + CPUDBG_DSMCR, 0);
224         if (retval != ERROR_OK)
225                 return retval;
226
227         retval = dap_run(armv7a->debug_ap->dap);
228         if (retval != ERROR_OK)
229                 return retval;
230
231         /* Enabling of instruction execution in debug mode is done in debug_entry code */
232
233         /* Resync breakpoint registers */
234
235         /* Enable halt for breakpoint, watchpoint and vector catch */
236         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
237                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
238         if (retval != ERROR_OK)
239                 return retval;
240         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
241                         armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
242         if (retval != ERROR_OK)
243                 return retval;
244
245         /* Since this is likely called from init or reset, update target state information*/
246         return cortex_a_poll(target);
247 }
248
249 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
250 {
251         /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
252          * Writes final value of DSCR into *dscr. Pass force to force always
253          * reading DSCR at least once. */
254         struct armv7a_common *armv7a = target_to_armv7a(target);
255         int64_t then = timeval_ms();
256         while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
257                 force = false;
258                 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
259                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
260                 if (retval != ERROR_OK) {
261                         LOG_ERROR("Could not read DSCR register");
262                         return retval;
263                 }
264                 if (timeval_ms() > then + 1000) {
265                         LOG_ERROR("Timeout waiting for InstrCompl=1");
266                         return ERROR_FAIL;
267                 }
268         }
269         return ERROR_OK;
270 }
271
272 /* To reduce needless round-trips, pass in a pointer to the current
273  * DSCR value.  Initialize it to zero if you just need to know the
274  * value on return from this function; or DSCR_INSTR_COMP if you
275  * happen to know that no instruction is pending.
276  */
277 static int cortex_a_exec_opcode(struct target *target,
278         uint32_t opcode, uint32_t *dscr_p)
279 {
280         uint32_t dscr;
281         int retval;
282         struct armv7a_common *armv7a = target_to_armv7a(target);
283
284         dscr = dscr_p ? *dscr_p : 0;
285
286         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
287
288         /* Wait for InstrCompl bit to be set */
289         retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
290         if (retval != ERROR_OK)
291                 return retval;
292
293         retval = mem_ap_write_u32(armv7a->debug_ap,
294                         armv7a->debug_base + CPUDBG_ITR, opcode);
295         if (retval != ERROR_OK)
296                 return retval;
297
298         int64_t then = timeval_ms();
299         do {
300                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
301                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
302                 if (retval != ERROR_OK) {
303                         LOG_ERROR("Could not read DSCR register");
304                         return retval;
305                 }
306                 if (timeval_ms() > then + 1000) {
307                         LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
308                         return ERROR_FAIL;
309                 }
310         } while ((dscr & DSCR_INSTR_COMP) == 0);        /* Wait for InstrCompl bit to be set */
311
312         if (dscr_p)
313                 *dscr_p = dscr;
314
315         return retval;
316 }
317
318 /* Write to memory mapped registers directly with no cache or mmu handling */
319 static int cortex_a_dap_write_memap_register_u32(struct target *target,
320         uint32_t address,
321         uint32_t value)
322 {
323         int retval;
324         struct armv7a_common *armv7a = target_to_armv7a(target);
325
326         retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
327
328         return retval;
329 }
330
331 /*
332  * Cortex-A implementation of Debug Programmer's Model
333  *
334  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
335  * so there's no need to poll for it before executing an instruction.
336  *
337  * NOTE that in several of these cases the "stall" mode might be useful.
338  * It'd let us queue a few operations together... prepare/finish might
339  * be the places to enable/disable that mode.
340  */
341
342 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
343 {
344         return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
345 }
346
347 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
348 {
349         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
350         return mem_ap_write_u32(a->armv7a_common.debug_ap,
351                         a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
352 }
353
354 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
355         uint32_t *dscr_p)
356 {
357         uint32_t dscr = DSCR_INSTR_COMP;
358         int retval;
359
360         if (dscr_p)
361                 dscr = *dscr_p;
362
363         /* Wait for DTRRXfull */
364         int64_t then = timeval_ms();
365         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
366                 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
367                                 a->armv7a_common.debug_base + CPUDBG_DSCR,
368                                 &dscr);
369                 if (retval != ERROR_OK)
370                         return retval;
371                 if (timeval_ms() > then + 1000) {
372                         LOG_ERROR("Timeout waiting for read dcc");
373                         return ERROR_FAIL;
374                 }
375         }
376
377         retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
378                         a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
379         if (retval != ERROR_OK)
380                 return retval;
381         /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
382
383         if (dscr_p)
384                 *dscr_p = dscr;
385
386         return retval;
387 }
388
389 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
390 {
391         struct cortex_a_common *a = dpm_to_a(dpm);
392         uint32_t dscr;
393         int retval;
394
395         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
396         int64_t then = timeval_ms();
397         for (;; ) {
398                 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
399                                 a->armv7a_common.debug_base + CPUDBG_DSCR,
400                                 &dscr);
401                 if (retval != ERROR_OK)
402                         return retval;
403                 if ((dscr & DSCR_INSTR_COMP) != 0)
404                         break;
405                 if (timeval_ms() > then + 1000) {
406                         LOG_ERROR("Timeout waiting for dpm prepare");
407                         return ERROR_FAIL;
408                 }
409         }
410
411         /* this "should never happen" ... */
412         if (dscr & DSCR_DTR_RX_FULL) {
413                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
414                 /* Clear DCCRX */
415                 retval = cortex_a_exec_opcode(
416                                 a->armv7a_common.arm.target,
417                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
418                                 &dscr);
419                 if (retval != ERROR_OK)
420                         return retval;
421         }
422
423         return retval;
424 }
425
426 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
427 {
428         /* REVISIT what could be done here? */
429         return ERROR_OK;
430 }
431
432 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
433         uint32_t opcode, uint32_t data)
434 {
435         struct cortex_a_common *a = dpm_to_a(dpm);
436         int retval;
437         uint32_t dscr = DSCR_INSTR_COMP;
438
439         retval = cortex_a_write_dcc(a, data);
440         if (retval != ERROR_OK)
441                 return retval;
442
443         return cortex_a_exec_opcode(
444                         a->armv7a_common.arm.target,
445                         opcode,
446                         &dscr);
447 }
448
449 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
450         uint32_t opcode, uint32_t data)
451 {
452         struct cortex_a_common *a = dpm_to_a(dpm);
453         uint32_t dscr = DSCR_INSTR_COMP;
454         int retval;
455
456         retval = cortex_a_write_dcc(a, data);
457         if (retval != ERROR_OK)
458                 return retval;
459
460         /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
461         retval = cortex_a_exec_opcode(
462                         a->armv7a_common.arm.target,
463                         ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
464                         &dscr);
465         if (retval != ERROR_OK)
466                 return retval;
467
468         /* then the opcode, taking data from R0 */
469         retval = cortex_a_exec_opcode(
470                         a->armv7a_common.arm.target,
471                         opcode,
472                         &dscr);
473
474         return retval;
475 }
476
477 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
478 {
479         struct target *target = dpm->arm->target;
480         uint32_t dscr = DSCR_INSTR_COMP;
481
482         /* "Prefetch flush" after modifying execution status in CPSR */
483         return cortex_a_exec_opcode(target,
484                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
485                         &dscr);
486 }
487
488 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
489         uint32_t opcode, uint32_t *data)
490 {
491         struct cortex_a_common *a = dpm_to_a(dpm);
492         int retval;
493         uint32_t dscr = DSCR_INSTR_COMP;
494
495         /* the opcode, writing data to DCC */
496         retval = cortex_a_exec_opcode(
497                         a->armv7a_common.arm.target,
498                         opcode,
499                         &dscr);
500         if (retval != ERROR_OK)
501                 return retval;
502
503         return cortex_a_read_dcc(a, data, &dscr);
504 }
505
506
507 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
508         uint32_t opcode, uint32_t *data)
509 {
510         struct cortex_a_common *a = dpm_to_a(dpm);
511         uint32_t dscr = DSCR_INSTR_COMP;
512         int retval;
513
514         /* the opcode, writing data to R0 */
515         retval = cortex_a_exec_opcode(
516                         a->armv7a_common.arm.target,
517                         opcode,
518                         &dscr);
519         if (retval != ERROR_OK)
520                 return retval;
521
522         /* write R0 to DCC */
523         retval = cortex_a_exec_opcode(
524                         a->armv7a_common.arm.target,
525                         ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
526                         &dscr);
527         if (retval != ERROR_OK)
528                 return retval;
529
530         return cortex_a_read_dcc(a, data, &dscr);
531 }
532
533 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
534         uint32_t addr, uint32_t control)
535 {
536         struct cortex_a_common *a = dpm_to_a(dpm);
537         uint32_t vr = a->armv7a_common.debug_base;
538         uint32_t cr = a->armv7a_common.debug_base;
539         int retval;
540
541         switch (index_t) {
542                 case 0 ... 15:  /* breakpoints */
543                         vr += CPUDBG_BVR_BASE;
544                         cr += CPUDBG_BCR_BASE;
545                         break;
546                 case 16 ... 31: /* watchpoints */
547                         vr += CPUDBG_WVR_BASE;
548                         cr += CPUDBG_WCR_BASE;
549                         index_t -= 16;
550                         break;
551                 default:
552                         return ERROR_FAIL;
553         }
554         vr += 4 * index_t;
555         cr += 4 * index_t;
556
557         LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
558                 (unsigned) vr, (unsigned) cr);
559
560         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
561                         vr, addr);
562         if (retval != ERROR_OK)
563                 return retval;
564         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
565                         cr, control);
566         return retval;
567 }
568
569 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
570 {
571         struct cortex_a_common *a = dpm_to_a(dpm);
572         uint32_t cr;
573
574         switch (index_t) {
575                 case 0 ... 15:
576                         cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
577                         break;
578                 case 16 ... 31:
579                         cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
580                         index_t -= 16;
581                         break;
582                 default:
583                         return ERROR_FAIL;
584         }
585         cr += 4 * index_t;
586
587         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
588
589         /* clear control register */
590         return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
591 }
592
593 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
594 {
595         struct arm_dpm *dpm = &a->armv7a_common.dpm;
596         int retval;
597
598         dpm->arm = &a->armv7a_common.arm;
599         dpm->didr = didr;
600
601         dpm->prepare = cortex_a_dpm_prepare;
602         dpm->finish = cortex_a_dpm_finish;
603
604         dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
605         dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
606         dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
607
608         dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
609         dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
610
611         dpm->bpwp_enable = cortex_a_bpwp_enable;
612         dpm->bpwp_disable = cortex_a_bpwp_disable;
613
614         retval = arm_dpm_setup(dpm);
615         if (retval == ERROR_OK)
616                 retval = arm_dpm_initialize(dpm);
617
618         return retval;
619 }
620 static struct target *get_cortex_a(struct target *target, int32_t coreid)
621 {
622         struct target_list *head;
623         struct target *curr;
624
625         head = target->head;
626         while (head != (struct target_list *)NULL) {
627                 curr = head->target;
628                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
629                         return curr;
630                 head = head->next;
631         }
632         return target;
633 }
634 static int cortex_a_halt(struct target *target);
635
636 static int cortex_a_halt_smp(struct target *target)
637 {
638         int retval = 0;
639         struct target_list *head;
640         struct target *curr;
641         head = target->head;
642         while (head != (struct target_list *)NULL) {
643                 curr = head->target;
644                 if ((curr != target) && (curr->state != TARGET_HALTED)
645                         && target_was_examined(curr))
646                         retval += cortex_a_halt(curr);
647                 head = head->next;
648         }
649         return retval;
650 }
651
652 static int update_halt_gdb(struct target *target)
653 {
654         struct target *gdb_target = NULL;
655         struct target_list *head;
656         struct target *curr;
657         int retval = 0;
658
659         if (target->gdb_service && target->gdb_service->core[0] == -1) {
660                 target->gdb_service->target = target;
661                 target->gdb_service->core[0] = target->coreid;
662                 retval += cortex_a_halt_smp(target);
663         }
664
665         if (target->gdb_service)
666                 gdb_target = target->gdb_service->target;
667
668         foreach_smp_target(head, target->head) {
669                 curr = head->target;
670                 /* skip calling context */
671                 if (curr == target)
672                         continue;
673                 if (!target_was_examined(curr))
674                         continue;
675                 /* skip targets that were already halted */
676                 if (curr->state == TARGET_HALTED)
677                         continue;
678                 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
679                 if (curr == gdb_target)
680                         continue;
681
682                 /* avoid recursion in cortex_a_poll() */
683                 curr->smp = 0;
684                 cortex_a_poll(curr);
685                 curr->smp = 1;
686         }
687
688         /* after all targets were updated, poll the gdb serving target */
689         if (gdb_target != NULL && gdb_target != target)
690                 cortex_a_poll(gdb_target);
691         return retval;
692 }
693
694 /*
695  * Cortex-A Run control
696  */
697
698 static int cortex_a_poll(struct target *target)
699 {
700         int retval = ERROR_OK;
701         uint32_t dscr;
702         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
703         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
704         enum target_state prev_target_state = target->state;
705         /*  toggle to another core is done by gdb as follow */
706         /*  maint packet J core_id */
707         /*  continue */
708         /*  the next polling trigger an halt event sent to gdb */
709         if ((target->state == TARGET_HALTED) && (target->smp) &&
710                 (target->gdb_service) &&
711                 (target->gdb_service->target == NULL)) {
712                 target->gdb_service->target =
713                         get_cortex_a(target, target->gdb_service->core[1]);
714                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
715                 return retval;
716         }
717         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
718                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
719         if (retval != ERROR_OK)
720                 return retval;
721         cortex_a->cpudbg_dscr = dscr;
722
723         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
724                 if (prev_target_state != TARGET_HALTED) {
725                         /* We have a halting debug event */
726                         LOG_DEBUG("Target halted");
727                         target->state = TARGET_HALTED;
728
729                         retval = cortex_a_debug_entry(target);
730                         if (retval != ERROR_OK)
731                                 return retval;
732
733                         if (target->smp) {
734                                 retval = update_halt_gdb(target);
735                                 if (retval != ERROR_OK)
736                                         return retval;
737                         }
738
739                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
740                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
741                         } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
742                                 if (arm_semihosting(target, &retval) != 0)
743                                         return retval;
744
745                                 target_call_event_callbacks(target,
746                                         TARGET_EVENT_HALTED);
747                         }
748                 }
749         } else
750                 target->state = TARGET_RUNNING;
751
752         return retval;
753 }
754
755 static int cortex_a_halt(struct target *target)
756 {
757         int retval = ERROR_OK;
758         uint32_t dscr;
759         struct armv7a_common *armv7a = target_to_armv7a(target);
760
761         /*
762          * Tell the core to be halted by writing DRCR with 0x1
763          * and then wait for the core to be halted.
764          */
765         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
766                         armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
767         if (retval != ERROR_OK)
768                 return retval;
769
770         int64_t then = timeval_ms();
771         for (;; ) {
772                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
773                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
774                 if (retval != ERROR_OK)
775                         return retval;
776                 if ((dscr & DSCR_CORE_HALTED) != 0)
777                         break;
778                 if (timeval_ms() > then + 1000) {
779                         LOG_ERROR("Timeout waiting for halt");
780                         return ERROR_FAIL;
781                 }
782         }
783
784         target->debug_reason = DBG_REASON_DBGRQ;
785
786         return ERROR_OK;
787 }
788
789 static int cortex_a_internal_restore(struct target *target, int current,
790         target_addr_t *address, int handle_breakpoints, int debug_execution)
791 {
792         struct armv7a_common *armv7a = target_to_armv7a(target);
793         struct arm *arm = &armv7a->arm;
794         int retval;
795         uint32_t resume_pc;
796
797         if (!debug_execution)
798                 target_free_all_working_areas(target);
799
800 #if 0
801         if (debug_execution) {
802                 /* Disable interrupts */
803                 /* We disable interrupts in the PRIMASK register instead of
804                  * masking with C_MASKINTS,
805                  * This is probably the same issue as Cortex-M3 Errata 377493:
806                  * C_MASKINTS in parallel with disabled interrupts can cause
807                  * local faults to not be taken. */
808                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
809                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
810                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
811
812                 /* Make sure we are in Thumb mode */
813                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
814                         buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
815                         32) | (1 << 24));
816                 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
817                 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
818         }
819 #endif
820
821         /* current = 1: continue on current pc, otherwise continue at <address> */
822         resume_pc = buf_get_u32(arm->pc->value, 0, 32);
823         if (!current)
824                 resume_pc = *address;
825         else
826                 *address = resume_pc;
827
828         /* Make sure that the Armv7 gdb thumb fixups does not
829          * kill the return address
830          */
831         switch (arm->core_state) {
832                 case ARM_STATE_ARM:
833                         resume_pc &= 0xFFFFFFFC;
834                         break;
835                 case ARM_STATE_THUMB:
836                 case ARM_STATE_THUMB_EE:
837                         /* When the return address is loaded into PC
838                          * bit 0 must be 1 to stay in Thumb state
839                          */
840                         resume_pc |= 0x1;
841                         break;
842                 case ARM_STATE_JAZELLE:
843                         LOG_ERROR("How do I resume into Jazelle state??");
844                         return ERROR_FAIL;
845                 case ARM_STATE_AARCH64:
846                         LOG_ERROR("Shoudn't be in AARCH64 state");
847                         return ERROR_FAIL;
848         }
849         LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
850         buf_set_u32(arm->pc->value, 0, 32, resume_pc);
851         arm->pc->dirty = 1;
852         arm->pc->valid = 1;
853
854         /* restore dpm_mode at system halt */
855         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
856         /* called it now before restoring context because it uses cpu
857          * register r0 for restoring cp15 control register */
858         retval = cortex_a_restore_cp15_control_reg(target);
859         if (retval != ERROR_OK)
860                 return retval;
861         retval = cortex_a_restore_context(target, handle_breakpoints);
862         if (retval != ERROR_OK)
863                 return retval;
864         target->debug_reason = DBG_REASON_NOTHALTED;
865         target->state = TARGET_RUNNING;
866
867         /* registers are now invalid */
868         register_cache_invalidate(arm->core_cache);
869
870 #if 0
871         /* the front-end may request us not to handle breakpoints */
872         if (handle_breakpoints) {
873                 /* Single step past breakpoint at current address */
874                 breakpoint = breakpoint_find(target, resume_pc);
875                 if (breakpoint) {
876                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
877                         cortex_m3_unset_breakpoint(target, breakpoint);
878                         cortex_m3_single_step_core(target);
879                         cortex_m3_set_breakpoint(target, breakpoint);
880                 }
881         }
882
883 #endif
884         return retval;
885 }
886
887 static int cortex_a_internal_restart(struct target *target)
888 {
889         struct armv7a_common *armv7a = target_to_armv7a(target);
890         struct arm *arm = &armv7a->arm;
891         int retval;
892         uint32_t dscr;
893         /*
894          * * Restart core and wait for it to be started.  Clear ITRen and sticky
895          * * exception flags: see ARMv7 ARM, C5.9.
896          *
897          * REVISIT: for single stepping, we probably want to
898          * disable IRQs by default, with optional override...
899          */
900
901         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
902                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
903         if (retval != ERROR_OK)
904                 return retval;
905
906         if ((dscr & DSCR_INSTR_COMP) == 0)
907                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
908
909         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
910                         armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
911         if (retval != ERROR_OK)
912                 return retval;
913
914         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
915                         armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
916                         DRCR_CLEAR_EXCEPTIONS);
917         if (retval != ERROR_OK)
918                 return retval;
919
920         int64_t then = timeval_ms();
921         for (;; ) {
922                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
923                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
924                 if (retval != ERROR_OK)
925                         return retval;
926                 if ((dscr & DSCR_CORE_RESTARTED) != 0)
927                         break;
928                 if (timeval_ms() > then + 1000) {
929                         LOG_ERROR("Timeout waiting for resume");
930                         return ERROR_FAIL;
931                 }
932         }
933
934         target->debug_reason = DBG_REASON_NOTHALTED;
935         target->state = TARGET_RUNNING;
936
937         /* registers are now invalid */
938         register_cache_invalidate(arm->core_cache);
939
940         return ERROR_OK;
941 }
942
943 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
944 {
945         int retval = 0;
946         struct target_list *head;
947         struct target *curr;
948         target_addr_t address;
949         head = target->head;
950         while (head != (struct target_list *)NULL) {
951                 curr = head->target;
952                 if ((curr != target) && (curr->state != TARGET_RUNNING)
953                         && target_was_examined(curr)) {
954                         /*  resume current address , not in step mode */
955                         retval += cortex_a_internal_restore(curr, 1, &address,
956                                         handle_breakpoints, 0);
957                         retval += cortex_a_internal_restart(curr);
958                 }
959                 head = head->next;
960
961         }
962         return retval;
963 }
964
965 static int cortex_a_resume(struct target *target, int current,
966         target_addr_t address, int handle_breakpoints, int debug_execution)
967 {
968         int retval = 0;
969         /* dummy resume for smp toggle in order to reduce gdb impact  */
970         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
971                 /*   simulate a start and halt of target */
972                 target->gdb_service->target = NULL;
973                 target->gdb_service->core[0] = target->gdb_service->core[1];
974                 /*  fake resume at next poll we play the  target core[1], see poll*/
975                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
976                 return 0;
977         }
978         cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
979         if (target->smp) {
980                 target->gdb_service->core[0] = -1;
981                 retval = cortex_a_restore_smp(target, handle_breakpoints);
982                 if (retval != ERROR_OK)
983                         return retval;
984         }
985         cortex_a_internal_restart(target);
986
987         if (!debug_execution) {
988                 target->state = TARGET_RUNNING;
989                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
990                 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
991         } else {
992                 target->state = TARGET_DEBUG_RUNNING;
993                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
994                 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
995         }
996
997         return ERROR_OK;
998 }
999
1000 static int cortex_a_debug_entry(struct target *target)
1001 {
1002         uint32_t dscr;
1003         int retval = ERROR_OK;
1004         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1005         struct armv7a_common *armv7a = target_to_armv7a(target);
1006         struct arm *arm = &armv7a->arm;
1007
1008         LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1009
1010         /* REVISIT surely we should not re-read DSCR !! */
1011         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1012                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1013         if (retval != ERROR_OK)
1014                 return retval;
1015
1016         /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1017          * imprecise data aborts get discarded by issuing a Data
1018          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1019          */
1020
1021         /* Enable the ITR execution once we are in debug mode */
1022         dscr |= DSCR_ITR_EN;
1023         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1024                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1025         if (retval != ERROR_OK)
1026                 return retval;
1027
1028         /* Examine debug reason */
1029         arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1030
1031         /* save address of instruction that triggered the watchpoint? */
1032         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1033                 uint32_t wfar;
1034
1035                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1036                                 armv7a->debug_base + CPUDBG_WFAR,
1037                                 &wfar);
1038                 if (retval != ERROR_OK)
1039                         return retval;
1040                 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1041         }
1042
1043         /* First load register accessible through core debug port */
1044         retval = arm_dpm_read_current_registers(&armv7a->dpm);
1045         if (retval != ERROR_OK)
1046                 return retval;
1047
1048         if (arm->spsr) {
1049                 /* read SPSR */
1050                 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1051                 if (retval != ERROR_OK)
1052                         return retval;
1053         }
1054
1055 #if 0
1056 /* TODO, Move this */
1057         uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1058         cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1059         LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1060
1061         cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1062         LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1063
1064         cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1065         LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1066 #endif
1067
1068         /* Are we in an exception handler */
1069 /*      armv4_5->exception_number = 0; */
1070         if (armv7a->post_debug_entry) {
1071                 retval = armv7a->post_debug_entry(target);
1072                 if (retval != ERROR_OK)
1073                         return retval;
1074         }
1075
1076         return retval;
1077 }
1078
1079 static int cortex_a_post_debug_entry(struct target *target)
1080 {
1081         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1082         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1083         int retval;
1084
1085         /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1086         retval = armv7a->arm.mrc(target, 15,
1087                         0, 0,   /* op1, op2 */
1088                         1, 0,   /* CRn, CRm */
1089                         &cortex_a->cp15_control_reg);
1090         if (retval != ERROR_OK)
1091                 return retval;
1092         LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1093         cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1094
1095         if (!armv7a->is_armv7r)
1096                 armv7a_read_ttbcr(target);
1097
1098         if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1099                 armv7a_identify_cache(target);
1100
1101         if (armv7a->is_armv7r) {
1102                 armv7a->armv7a_mmu.mmu_enabled = 0;
1103         } else {
1104                 armv7a->armv7a_mmu.mmu_enabled =
1105                         (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1106         }
1107         armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1108                 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1109         armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1110                 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1111         cortex_a->curr_mode = armv7a->arm.core_mode;
1112
1113         /* switch to SVC mode to read DACR */
1114         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1115         armv7a->arm.mrc(target, 15,
1116                         0, 0, 3, 0,
1117                         &cortex_a->cp15_dacr_reg);
1118
1119         LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1120                         cortex_a->cp15_dacr_reg);
1121
1122         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1123         return ERROR_OK;
1124 }
1125
1126 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1127 {
1128         struct armv7a_common *armv7a = target_to_armv7a(target);
1129         uint32_t dscr;
1130
1131         /* Read DSCR */
1132         int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1133                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1134         if (ERROR_OK != retval)
1135                 return retval;
1136
1137         /* clear bitfield */
1138         dscr &= ~bit_mask;
1139         /* put new value */
1140         dscr |= value & bit_mask;
1141
1142         /* write new DSCR */
1143         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1144                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1145         return retval;
1146 }
1147
1148 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1149         int handle_breakpoints)
1150 {
1151         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1152         struct armv7a_common *armv7a = target_to_armv7a(target);
1153         struct arm *arm = &armv7a->arm;
1154         struct breakpoint *breakpoint = NULL;
1155         struct breakpoint stepbreakpoint;
1156         struct reg *r;
1157         int retval;
1158
1159         if (target->state != TARGET_HALTED) {
1160                 LOG_WARNING("target not halted");
1161                 return ERROR_TARGET_NOT_HALTED;
1162         }
1163
1164         /* current = 1: continue on current pc, otherwise continue at <address> */
1165         r = arm->pc;
1166         if (!current)
1167                 buf_set_u32(r->value, 0, 32, address);
1168         else
1169                 address = buf_get_u32(r->value, 0, 32);
1170
1171         /* The front-end may request us not to handle breakpoints.
1172          * But since Cortex-A uses breakpoint for single step,
1173          * we MUST handle breakpoints.
1174          */
1175         handle_breakpoints = 1;
1176         if (handle_breakpoints) {
1177                 breakpoint = breakpoint_find(target, address);
1178                 if (breakpoint)
1179                         cortex_a_unset_breakpoint(target, breakpoint);
1180         }
1181
1182         /* Setup single step breakpoint */
1183         stepbreakpoint.address = address;
1184         stepbreakpoint.asid = 0;
1185         stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1186                 ? 2 : 4;
1187         stepbreakpoint.type = BKPT_HARD;
1188         stepbreakpoint.set = 0;
1189
1190         /* Disable interrupts during single step if requested */
1191         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1192                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1193                 if (ERROR_OK != retval)
1194                         return retval;
1195         }
1196
1197         /* Break on IVA mismatch */
1198         cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1199
1200         target->debug_reason = DBG_REASON_SINGLESTEP;
1201
1202         retval = cortex_a_resume(target, 1, address, 0, 0);
1203         if (retval != ERROR_OK)
1204                 return retval;
1205
1206         int64_t then = timeval_ms();
1207         while (target->state != TARGET_HALTED) {
1208                 retval = cortex_a_poll(target);
1209                 if (retval != ERROR_OK)
1210                         return retval;
1211                 if (timeval_ms() > then + 1000) {
1212                         LOG_ERROR("timeout waiting for target halt");
1213                         return ERROR_FAIL;
1214                 }
1215         }
1216
1217         cortex_a_unset_breakpoint(target, &stepbreakpoint);
1218
1219         /* Re-enable interrupts if they were disabled */
1220         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1221                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1222                 if (ERROR_OK != retval)
1223                         return retval;
1224         }
1225
1226
1227         target->debug_reason = DBG_REASON_BREAKPOINT;
1228
1229         if (breakpoint)
1230                 cortex_a_set_breakpoint(target, breakpoint, 0);
1231
1232         if (target->state != TARGET_HALTED)
1233                 LOG_DEBUG("target stepped");
1234
1235         return ERROR_OK;
1236 }
1237
1238 static int cortex_a_restore_context(struct target *target, bool bpwp)
1239 {
1240         struct armv7a_common *armv7a = target_to_armv7a(target);
1241
1242         LOG_DEBUG(" ");
1243
1244         if (armv7a->pre_restore_context)
1245                 armv7a->pre_restore_context(target);
1246
1247         return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1248 }
1249
1250 /*
1251  * Cortex-A Breakpoint and watchpoint functions
1252  */
1253
1254 /* Setup hardware Breakpoint Register Pair */
1255 static int cortex_a_set_breakpoint(struct target *target,
1256         struct breakpoint *breakpoint, uint8_t matchmode)
1257 {
1258         int retval;
1259         int brp_i = 0;
1260         uint32_t control;
1261         uint8_t byte_addr_select = 0x0F;
1262         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1263         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1264         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1265
1266         if (breakpoint->set) {
1267                 LOG_WARNING("breakpoint already set");
1268                 return ERROR_OK;
1269         }
1270
1271         if (breakpoint->type == BKPT_HARD) {
1272                 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1273                         brp_i++;
1274                 if (brp_i >= cortex_a->brp_num) {
1275                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1276                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1277                 }
1278                 breakpoint->set = brp_i + 1;
1279                 if (breakpoint->length == 2)
1280                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1281                 control = ((matchmode & 0x7) << 20)
1282                         | (byte_addr_select << 5)
1283                         | (3 << 1) | 1;
1284                 brp_list[brp_i].used = 1;
1285                 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1286                 brp_list[brp_i].control = control;
1287                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1288                                 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1289                                 brp_list[brp_i].value);
1290                 if (retval != ERROR_OK)
1291                         return retval;
1292                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1293                                 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1294                                 brp_list[brp_i].control);
1295                 if (retval != ERROR_OK)
1296                         return retval;
1297                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1298                         brp_list[brp_i].control,
1299                         brp_list[brp_i].value);
1300         } else if (breakpoint->type == BKPT_SOFT) {
1301                 uint8_t code[4];
1302                 /* length == 2: Thumb breakpoint */
1303                 if (breakpoint->length == 2)
1304                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1305                 else
1306                 /* length == 3: Thumb-2 breakpoint, actual encoding is
1307                  * a regular Thumb BKPT instruction but we replace a
1308                  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1309                  * length
1310                  */
1311                 if (breakpoint->length == 3) {
1312                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1313                         breakpoint->length = 4;
1314                 } else
1315                         /* length == 4, normal ARM breakpoint */
1316                         buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1317
1318                 retval = target_read_memory(target,
1319                                 breakpoint->address & 0xFFFFFFFE,
1320                                 breakpoint->length, 1,
1321                                 breakpoint->orig_instr);
1322                 if (retval != ERROR_OK)
1323                         return retval;
1324
1325                 /* make sure data cache is cleaned & invalidated down to PoC */
1326                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1327                         armv7a_cache_flush_virt(target, breakpoint->address,
1328                                                 breakpoint->length);
1329                 }
1330
1331                 retval = target_write_memory(target,
1332                                 breakpoint->address & 0xFFFFFFFE,
1333                                 breakpoint->length, 1, code);
1334                 if (retval != ERROR_OK)
1335                         return retval;
1336
1337                 /* update i-cache at breakpoint location */
1338                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1339                                         breakpoint->length);
1340                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1341                                                  breakpoint->length);
1342
1343                 breakpoint->set = 0x11; /* Any nice value but 0 */
1344         }
1345
1346         return ERROR_OK;
1347 }
1348
1349 static int cortex_a_set_context_breakpoint(struct target *target,
1350         struct breakpoint *breakpoint, uint8_t matchmode)
1351 {
1352         int retval = ERROR_FAIL;
1353         int brp_i = 0;
1354         uint32_t control;
1355         uint8_t byte_addr_select = 0x0F;
1356         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1357         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1358         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1359
1360         if (breakpoint->set) {
1361                 LOG_WARNING("breakpoint already set");
1362                 return retval;
1363         }
1364         /*check available context BRPs*/
1365         while ((brp_list[brp_i].used ||
1366                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1367                 brp_i++;
1368
1369         if (brp_i >= cortex_a->brp_num) {
1370                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1371                 return ERROR_FAIL;
1372         }
1373
1374         breakpoint->set = brp_i + 1;
1375         control = ((matchmode & 0x7) << 20)
1376                 | (byte_addr_select << 5)
1377                 | (3 << 1) | 1;
1378         brp_list[brp_i].used = 1;
1379         brp_list[brp_i].value = (breakpoint->asid);
1380         brp_list[brp_i].control = control;
1381         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1382                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1383                         brp_list[brp_i].value);
1384         if (retval != ERROR_OK)
1385                 return retval;
1386         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1387                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1388                         brp_list[brp_i].control);
1389         if (retval != ERROR_OK)
1390                 return retval;
1391         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1392                 brp_list[brp_i].control,
1393                 brp_list[brp_i].value);
1394         return ERROR_OK;
1395
1396 }
1397
1398 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1399 {
1400         int retval = ERROR_FAIL;
1401         int brp_1 = 0;  /* holds the contextID pair */
1402         int brp_2 = 0;  /* holds the IVA pair */
1403         uint32_t control_CTX, control_IVA;
1404         uint8_t CTX_byte_addr_select = 0x0F;
1405         uint8_t IVA_byte_addr_select = 0x0F;
1406         uint8_t CTX_machmode = 0x03;
1407         uint8_t IVA_machmode = 0x01;
1408         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1409         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1410         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1411
1412         if (breakpoint->set) {
1413                 LOG_WARNING("breakpoint already set");
1414                 return retval;
1415         }
1416         /*check available context BRPs*/
1417         while ((brp_list[brp_1].used ||
1418                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1419                 brp_1++;
1420
1421         printf("brp(CTX) found num: %d\n", brp_1);
1422         if (brp_1 >= cortex_a->brp_num) {
1423                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1424                 return ERROR_FAIL;
1425         }
1426
1427         while ((brp_list[brp_2].used ||
1428                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1429                 brp_2++;
1430
1431         printf("brp(IVA) found num: %d\n", brp_2);
1432         if (brp_2 >= cortex_a->brp_num) {
1433                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1434                 return ERROR_FAIL;
1435         }
1436
1437         breakpoint->set = brp_1 + 1;
1438         breakpoint->linked_BRP = brp_2;
1439         control_CTX = ((CTX_machmode & 0x7) << 20)
1440                 | (brp_2 << 16)
1441                 | (0 << 14)
1442                 | (CTX_byte_addr_select << 5)
1443                 | (3 << 1) | 1;
1444         brp_list[brp_1].used = 1;
1445         brp_list[brp_1].value = (breakpoint->asid);
1446         brp_list[brp_1].control = control_CTX;
1447         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1448                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1449                         brp_list[brp_1].value);
1450         if (retval != ERROR_OK)
1451                 return retval;
1452         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1453                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1454                         brp_list[brp_1].control);
1455         if (retval != ERROR_OK)
1456                 return retval;
1457
1458         control_IVA = ((IVA_machmode & 0x7) << 20)
1459                 | (brp_1 << 16)
1460                 | (IVA_byte_addr_select << 5)
1461                 | (3 << 1) | 1;
1462         brp_list[brp_2].used = 1;
1463         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1464         brp_list[brp_2].control = control_IVA;
1465         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1466                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1467                         brp_list[brp_2].value);
1468         if (retval != ERROR_OK)
1469                 return retval;
1470         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1471                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1472                         brp_list[brp_2].control);
1473         if (retval != ERROR_OK)
1474                 return retval;
1475
1476         return ERROR_OK;
1477 }
1478
1479 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1480 {
1481         int retval;
1482         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1483         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1484         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1485
1486         if (!breakpoint->set) {
1487                 LOG_WARNING("breakpoint not set");
1488                 return ERROR_OK;
1489         }
1490
1491         if (breakpoint->type == BKPT_HARD) {
1492                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1493                         int brp_i = breakpoint->set - 1;
1494                         int brp_j = breakpoint->linked_BRP;
1495                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1496                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1497                                 return ERROR_OK;
1498                         }
1499                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1500                                 brp_list[brp_i].control, brp_list[brp_i].value);
1501                         brp_list[brp_i].used = 0;
1502                         brp_list[brp_i].value = 0;
1503                         brp_list[brp_i].control = 0;
1504                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1505                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1506                                         brp_list[brp_i].control);
1507                         if (retval != ERROR_OK)
1508                                 return retval;
1509                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1510                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1511                                         brp_list[brp_i].value);
1512                         if (retval != ERROR_OK)
1513                                 return retval;
1514                         if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1515                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1516                                 return ERROR_OK;
1517                         }
1518                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1519                                 brp_list[brp_j].control, brp_list[brp_j].value);
1520                         brp_list[brp_j].used = 0;
1521                         brp_list[brp_j].value = 0;
1522                         brp_list[brp_j].control = 0;
1523                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1524                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1525                                         brp_list[brp_j].control);
1526                         if (retval != ERROR_OK)
1527                                 return retval;
1528                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1529                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1530                                         brp_list[brp_j].value);
1531                         if (retval != ERROR_OK)
1532                                 return retval;
1533                         breakpoint->linked_BRP = 0;
1534                         breakpoint->set = 0;
1535                         return ERROR_OK;
1536
1537                 } else {
1538                         int brp_i = breakpoint->set - 1;
1539                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1540                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1541                                 return ERROR_OK;
1542                         }
1543                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1544                                 brp_list[brp_i].control, brp_list[brp_i].value);
1545                         brp_list[brp_i].used = 0;
1546                         brp_list[brp_i].value = 0;
1547                         brp_list[brp_i].control = 0;
1548                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1549                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1550                                         brp_list[brp_i].control);
1551                         if (retval != ERROR_OK)
1552                                 return retval;
1553                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1554                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1555                                         brp_list[brp_i].value);
1556                         if (retval != ERROR_OK)
1557                                 return retval;
1558                         breakpoint->set = 0;
1559                         return ERROR_OK;
1560                 }
1561         } else {
1562
1563                 /* make sure data cache is cleaned & invalidated down to PoC */
1564                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1565                         armv7a_cache_flush_virt(target, breakpoint->address,
1566                                                 breakpoint->length);
1567                 }
1568
1569                 /* restore original instruction (kept in target endianness) */
1570                 if (breakpoint->length == 4) {
1571                         retval = target_write_memory(target,
1572                                         breakpoint->address & 0xFFFFFFFE,
1573                                         4, 1, breakpoint->orig_instr);
1574                         if (retval != ERROR_OK)
1575                                 return retval;
1576                 } else {
1577                         retval = target_write_memory(target,
1578                                         breakpoint->address & 0xFFFFFFFE,
1579                                         2, 1, breakpoint->orig_instr);
1580                         if (retval != ERROR_OK)
1581                                 return retval;
1582                 }
1583
1584                 /* update i-cache at breakpoint location */
1585                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1586                                                  breakpoint->length);
1587                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1588                                                  breakpoint->length);
1589         }
1590         breakpoint->set = 0;
1591
1592         return ERROR_OK;
1593 }
1594
1595 static int cortex_a_add_breakpoint(struct target *target,
1596         struct breakpoint *breakpoint)
1597 {
1598         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1599
1600         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1601                 LOG_INFO("no hardware breakpoint available");
1602                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1603         }
1604
1605         if (breakpoint->type == BKPT_HARD)
1606                 cortex_a->brp_num_available--;
1607
1608         return cortex_a_set_breakpoint(target, breakpoint, 0x00);       /* Exact match */
1609 }
1610
1611 static int cortex_a_add_context_breakpoint(struct target *target,
1612         struct breakpoint *breakpoint)
1613 {
1614         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1615
1616         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1617                 LOG_INFO("no hardware breakpoint available");
1618                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1619         }
1620
1621         if (breakpoint->type == BKPT_HARD)
1622                 cortex_a->brp_num_available--;
1623
1624         return cortex_a_set_context_breakpoint(target, breakpoint, 0x02);       /* asid match */
1625 }
1626
1627 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1628         struct breakpoint *breakpoint)
1629 {
1630         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1631
1632         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1633                 LOG_INFO("no hardware breakpoint available");
1634                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1635         }
1636
1637         if (breakpoint->type == BKPT_HARD)
1638                 cortex_a->brp_num_available--;
1639
1640         return cortex_a_set_hybrid_breakpoint(target, breakpoint);      /* ??? */
1641 }
1642
1643
1644 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1645 {
1646         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1647
1648 #if 0
1649 /* It is perfectly possible to remove breakpoints while the target is running */
1650         if (target->state != TARGET_HALTED) {
1651                 LOG_WARNING("target not halted");
1652                 return ERROR_TARGET_NOT_HALTED;
1653         }
1654 #endif
1655
1656         if (breakpoint->set) {
1657                 cortex_a_unset_breakpoint(target, breakpoint);
1658                 if (breakpoint->type == BKPT_HARD)
1659                         cortex_a->brp_num_available++;
1660         }
1661
1662
1663         return ERROR_OK;
1664 }
1665
1666 /*
1667  * Cortex-A Reset functions
1668  */
1669
1670 static int cortex_a_assert_reset(struct target *target)
1671 {
1672         struct armv7a_common *armv7a = target_to_armv7a(target);
1673
1674         LOG_DEBUG(" ");
1675
1676         /* FIXME when halt is requested, make it work somehow... */
1677
1678         /* This function can be called in "target not examined" state */
1679
1680         /* Issue some kind of warm reset. */
1681         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1682                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1683         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1684                 /* REVISIT handle "pulls" cases, if there's
1685                  * hardware that needs them to work.
1686                  */
1687
1688                 /*
1689                  * FIXME: fix reset when transport is SWD. This is a temporary
1690                  * work-around for release v0.10 that is not intended to stay!
1691                  */
1692                 if (transport_is_swd() ||
1693                                 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1694                         jtag_add_reset(0, 1);
1695
1696         } else {
1697                 LOG_ERROR("%s: how to reset?", target_name(target));
1698                 return ERROR_FAIL;
1699         }
1700
1701         /* registers are now invalid */
1702         if (target_was_examined(target))
1703                 register_cache_invalidate(armv7a->arm.core_cache);
1704
1705         target->state = TARGET_RESET;
1706
1707         return ERROR_OK;
1708 }
1709
1710 static int cortex_a_deassert_reset(struct target *target)
1711 {
1712         int retval;
1713
1714         LOG_DEBUG(" ");
1715
1716         /* be certain SRST is off */
1717         jtag_add_reset(0, 0);
1718
1719         if (target_was_examined(target)) {
1720                 retval = cortex_a_poll(target);
1721                 if (retval != ERROR_OK)
1722                         return retval;
1723         }
1724
1725         if (target->reset_halt) {
1726                 if (target->state != TARGET_HALTED) {
1727                         LOG_WARNING("%s: ran after reset and before halt ...",
1728                                 target_name(target));
1729                         if (target_was_examined(target)) {
1730                                 retval = target_halt(target);
1731                                 if (retval != ERROR_OK)
1732                                         return retval;
1733                         } else
1734                                 target->state = TARGET_UNKNOWN;
1735                 }
1736         }
1737
1738         return ERROR_OK;
1739 }
1740
1741 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1742 {
1743         /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1744          * New desired mode must be in mode. Current value of DSCR must be in
1745          * *dscr, which is updated with new value.
1746          *
1747          * This function elides actually sending the mode-change over the debug
1748          * interface if the mode is already set as desired.
1749          */
1750         uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1751         if (new_dscr != *dscr) {
1752                 struct armv7a_common *armv7a = target_to_armv7a(target);
1753                 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1754                                 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1755                 if (retval == ERROR_OK)
1756                         *dscr = new_dscr;
1757                 return retval;
1758         } else {
1759                 return ERROR_OK;
1760         }
1761 }
1762
1763 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1764         uint32_t value, uint32_t *dscr)
1765 {
1766         /* Waits until the specified bit(s) of DSCR take on a specified value. */
1767         struct armv7a_common *armv7a = target_to_armv7a(target);
1768         int64_t then = timeval_ms();
1769         int retval;
1770
1771         while ((*dscr & mask) != value) {
1772                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1773                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
1774                 if (retval != ERROR_OK)
1775                         return retval;
1776                 if (timeval_ms() > then + 1000) {
1777                         LOG_ERROR("timeout waiting for DSCR bit change");
1778                         return ERROR_FAIL;
1779                 }
1780         }
1781         return ERROR_OK;
1782 }
1783
1784 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1785         uint32_t *data, uint32_t *dscr)
1786 {
1787         int retval;
1788         struct armv7a_common *armv7a = target_to_armv7a(target);
1789
1790         /* Move from coprocessor to R0. */
1791         retval = cortex_a_exec_opcode(target, opcode, dscr);
1792         if (retval != ERROR_OK)
1793                 return retval;
1794
1795         /* Move from R0 to DTRTX. */
1796         retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1797         if (retval != ERROR_OK)
1798                 return retval;
1799
1800         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1801          * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1802          * must also check TXfull_l). Most of the time this will be free
1803          * because TXfull_l will be set immediately and cached in dscr. */
1804         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1805                         DSCR_DTRTX_FULL_LATCHED, dscr);
1806         if (retval != ERROR_OK)
1807                 return retval;
1808
1809         /* Read the value transferred to DTRTX. */
1810         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1811                         armv7a->debug_base + CPUDBG_DTRTX, data);
1812         if (retval != ERROR_OK)
1813                 return retval;
1814
1815         return ERROR_OK;
1816 }
1817
1818 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1819         uint32_t *dfsr, uint32_t *dscr)
1820 {
1821         int retval;
1822
1823         if (dfar) {
1824                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1825                 if (retval != ERROR_OK)
1826                         return retval;
1827         }
1828
1829         if (dfsr) {
1830                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1831                 if (retval != ERROR_OK)
1832                         return retval;
1833         }
1834
1835         return ERROR_OK;
1836 }
1837
1838 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1839         uint32_t data, uint32_t *dscr)
1840 {
1841         int retval;
1842         struct armv7a_common *armv7a = target_to_armv7a(target);
1843
1844         /* Write the value into DTRRX. */
1845         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1846                         armv7a->debug_base + CPUDBG_DTRRX, data);
1847         if (retval != ERROR_OK)
1848                 return retval;
1849
1850         /* Move from DTRRX to R0. */
1851         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1852         if (retval != ERROR_OK)
1853                 return retval;
1854
1855         /* Move from R0 to coprocessor. */
1856         retval = cortex_a_exec_opcode(target, opcode, dscr);
1857         if (retval != ERROR_OK)
1858                 return retval;
1859
1860         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1861          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1862          * check RXfull_l). Most of the time this will be free because RXfull_l
1863          * will be cleared immediately and cached in dscr. */
1864         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1865         if (retval != ERROR_OK)
1866                 return retval;
1867
1868         return ERROR_OK;
1869 }
1870
1871 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1872         uint32_t dfsr, uint32_t *dscr)
1873 {
1874         int retval;
1875
1876         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1877         if (retval != ERROR_OK)
1878                 return retval;
1879
1880         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1881         if (retval != ERROR_OK)
1882                 return retval;
1883
1884         return ERROR_OK;
1885 }
1886
1887 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1888 {
1889         uint32_t status, upper4;
1890
1891         if (dfsr & (1 << 9)) {
1892                 /* LPAE format. */
1893                 status = dfsr & 0x3f;
1894                 upper4 = status >> 2;
1895                 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1896                         return ERROR_TARGET_TRANSLATION_FAULT;
1897                 else if (status == 33)
1898                         return ERROR_TARGET_UNALIGNED_ACCESS;
1899                 else
1900                         return ERROR_TARGET_DATA_ABORT;
1901         } else {
1902                 /* Normal format. */
1903                 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1904                 if (status == 1)
1905                         return ERROR_TARGET_UNALIGNED_ACCESS;
1906                 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1907                                 status == 9 || status == 11 || status == 13 || status == 15)
1908                         return ERROR_TARGET_TRANSLATION_FAULT;
1909                 else
1910                         return ERROR_TARGET_DATA_ABORT;
1911         }
1912 }
1913
1914 static int cortex_a_write_cpu_memory_slow(struct target *target,
1915         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1916 {
1917         /* Writes count objects of size size from *buffer. Old value of DSCR must
1918          * be in *dscr; updated to new value. This is slow because it works for
1919          * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1920          * the address is aligned, cortex_a_write_cpu_memory_fast should be
1921          * preferred.
1922          * Preconditions:
1923          * - Address is in R0.
1924          * - R0 is marked dirty.
1925          */
1926         struct armv7a_common *armv7a = target_to_armv7a(target);
1927         struct arm *arm = &armv7a->arm;
1928         int retval;
1929
1930         /* Mark register R1 as dirty, to use for transferring data. */
1931         arm_reg_current(arm, 1)->dirty = true;
1932
1933         /* Switch to non-blocking mode if not already in that mode. */
1934         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1935         if (retval != ERROR_OK)
1936                 return retval;
1937
1938         /* Go through the objects. */
1939         while (count) {
1940                 /* Write the value to store into DTRRX. */
1941                 uint32_t data, opcode;
1942                 if (size == 1)
1943                         data = *buffer;
1944                 else if (size == 2)
1945                         data = target_buffer_get_u16(target, buffer);
1946                 else
1947                         data = target_buffer_get_u32(target, buffer);
1948                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1949                                 armv7a->debug_base + CPUDBG_DTRRX, data);
1950                 if (retval != ERROR_OK)
1951                         return retval;
1952
1953                 /* Transfer the value from DTRRX to R1. */
1954                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1955                 if (retval != ERROR_OK)
1956                         return retval;
1957
1958                 /* Write the value transferred to R1 into memory. */
1959                 if (size == 1)
1960                         opcode = ARMV4_5_STRB_IP(1, 0);
1961                 else if (size == 2)
1962                         opcode = ARMV4_5_STRH_IP(1, 0);
1963                 else
1964                         opcode = ARMV4_5_STRW_IP(1, 0);
1965                 retval = cortex_a_exec_opcode(target, opcode, dscr);
1966                 if (retval != ERROR_OK)
1967                         return retval;
1968
1969                 /* Check for faults and return early. */
1970                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1971                         return ERROR_OK; /* A data fault is not considered a system failure. */
1972
1973                 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1974                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1975                  * must also check RXfull_l). Most of the time this will be free
1976                  * because RXfull_l will be cleared immediately and cached in dscr. */
1977                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1978                 if (retval != ERROR_OK)
1979                         return retval;
1980
1981                 /* Advance. */
1982                 buffer += size;
1983                 --count;
1984         }
1985
1986         return ERROR_OK;
1987 }
1988
1989 static int cortex_a_write_cpu_memory_fast(struct target *target,
1990         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1991 {
1992         /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
1993          * in *dscr; updated to new value. This is fast but only works for
1994          * word-sized objects at aligned addresses.
1995          * Preconditions:
1996          * - Address is in R0 and must be a multiple of 4.
1997          * - R0 is marked dirty.
1998          */
1999         struct armv7a_common *armv7a = target_to_armv7a(target);
2000         int retval;
2001
2002         /* Switch to fast mode if not already in that mode. */
2003         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2004         if (retval != ERROR_OK)
2005                 return retval;
2006
2007         /* Latch STC instruction. */
2008         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2009                         armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2010         if (retval != ERROR_OK)
2011                 return retval;
2012
2013         /* Transfer all the data and issue all the instructions. */
2014         return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2015                         4, count, armv7a->debug_base + CPUDBG_DTRRX);
2016 }
2017
2018 static int cortex_a_write_cpu_memory(struct target *target,
2019         uint32_t address, uint32_t size,
2020         uint32_t count, const uint8_t *buffer)
2021 {
2022         /* Write memory through the CPU. */
2023         int retval, final_retval;
2024         struct armv7a_common *armv7a = target_to_armv7a(target);
2025         struct arm *arm = &armv7a->arm;
2026         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2027
2028         LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2029                           address, size, count);
2030         if (target->state != TARGET_HALTED) {
2031                 LOG_WARNING("target not halted");
2032                 return ERROR_TARGET_NOT_HALTED;
2033         }
2034
2035         if (!count)
2036                 return ERROR_OK;
2037
2038         /* Clear any abort. */
2039         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2040                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2041         if (retval != ERROR_OK)
2042                 return retval;
2043
2044         /* Read DSCR. */
2045         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2046                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2047         if (retval != ERROR_OK)
2048                 return retval;
2049
2050         /* Switch to non-blocking mode if not already in that mode. */
2051         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2052         if (retval != ERROR_OK)
2053                 goto out;
2054
2055         /* Mark R0 as dirty. */
2056         arm_reg_current(arm, 0)->dirty = true;
2057
2058         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2059         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2060         if (retval != ERROR_OK)
2061                 goto out;
2062
2063         /* Get the memory address into R0. */
2064         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2065                         armv7a->debug_base + CPUDBG_DTRRX, address);
2066         if (retval != ERROR_OK)
2067                 goto out;
2068         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2069         if (retval != ERROR_OK)
2070                 goto out;
2071
2072         if (size == 4 && (address % 4) == 0) {
2073                 /* We are doing a word-aligned transfer, so use fast mode. */
2074                 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2075         } else {
2076                 /* Use slow path. */
2077                 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2078         }
2079
2080 out:
2081         final_retval = retval;
2082
2083         /* Switch to non-blocking mode if not already in that mode. */
2084         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2085         if (final_retval == ERROR_OK)
2086                 final_retval = retval;
2087
2088         /* Wait for last issued instruction to complete. */
2089         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2090         if (final_retval == ERROR_OK)
2091                 final_retval = retval;
2092
2093         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2094          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2095          * check RXfull_l). Most of the time this will be free because RXfull_l
2096          * will be cleared immediately and cached in dscr. However, don't do this
2097          * if there is fault, because then the instruction might not have completed
2098          * successfully. */
2099         if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2100                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2101                 if (retval != ERROR_OK)
2102                         return retval;
2103         }
2104
2105         /* If there were any sticky abort flags, clear them. */
2106         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2107                 fault_dscr = dscr;
2108                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2109                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2110                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2111         } else {
2112                 fault_dscr = 0;
2113         }
2114
2115         /* Handle synchronous data faults. */
2116         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2117                 if (final_retval == ERROR_OK) {
2118                         /* Final return value will reflect cause of fault. */
2119                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2120                         if (retval == ERROR_OK) {
2121                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2122                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2123                         } else
2124                                 final_retval = retval;
2125                 }
2126                 /* Fault destroyed DFAR/DFSR; restore them. */
2127                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2128                 if (retval != ERROR_OK)
2129                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2130         }
2131
2132         /* Handle asynchronous data faults. */
2133         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2134                 if (final_retval == ERROR_OK)
2135                         /* No other error has been recorded so far, so keep this one. */
2136                         final_retval = ERROR_TARGET_DATA_ABORT;
2137         }
2138
2139         /* If the DCC is nonempty, clear it. */
2140         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2141                 uint32_t dummy;
2142                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2143                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2144                 if (final_retval == ERROR_OK)
2145                         final_retval = retval;
2146         }
2147         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2148                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2149                 if (final_retval == ERROR_OK)
2150                         final_retval = retval;
2151         }
2152
2153         /* Done. */
2154         return final_retval;
2155 }
2156
2157 static int cortex_a_read_cpu_memory_slow(struct target *target,
2158         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2159 {
2160         /* Reads count objects of size size into *buffer. Old value of DSCR must be
2161          * in *dscr; updated to new value. This is slow because it works for
2162          * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2163          * the address is aligned, cortex_a_read_cpu_memory_fast should be
2164          * preferred.
2165          * Preconditions:
2166          * - Address is in R0.
2167          * - R0 is marked dirty.
2168          */
2169         struct armv7a_common *armv7a = target_to_armv7a(target);
2170         struct arm *arm = &armv7a->arm;
2171         int retval;
2172
2173         /* Mark register R1 as dirty, to use for transferring data. */
2174         arm_reg_current(arm, 1)->dirty = true;
2175
2176         /* Switch to non-blocking mode if not already in that mode. */
2177         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2178         if (retval != ERROR_OK)
2179                 return retval;
2180
2181         /* Go through the objects. */
2182         while (count) {
2183                 /* Issue a load of the appropriate size to R1. */
2184                 uint32_t opcode, data;
2185                 if (size == 1)
2186                         opcode = ARMV4_5_LDRB_IP(1, 0);
2187                 else if (size == 2)
2188                         opcode = ARMV4_5_LDRH_IP(1, 0);
2189                 else
2190                         opcode = ARMV4_5_LDRW_IP(1, 0);
2191                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2192                 if (retval != ERROR_OK)
2193                         return retval;
2194
2195                 /* Issue a write of R1 to DTRTX. */
2196                 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2197                 if (retval != ERROR_OK)
2198                         return retval;
2199
2200                 /* Check for faults and return early. */
2201                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2202                         return ERROR_OK; /* A data fault is not considered a system failure. */
2203
2204                 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2205                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2206                  * must also check TXfull_l). Most of the time this will be free
2207                  * because TXfull_l will be set immediately and cached in dscr. */
2208                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2209                                 DSCR_DTRTX_FULL_LATCHED, dscr);
2210                 if (retval != ERROR_OK)
2211                         return retval;
2212
2213                 /* Read the value transferred to DTRTX into the buffer. */
2214                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2215                                 armv7a->debug_base + CPUDBG_DTRTX, &data);
2216                 if (retval != ERROR_OK)
2217                         return retval;
2218                 if (size == 1)
2219                         *buffer = (uint8_t) data;
2220                 else if (size == 2)
2221                         target_buffer_set_u16(target, buffer, (uint16_t) data);
2222                 else
2223                         target_buffer_set_u32(target, buffer, data);
2224
2225                 /* Advance. */
2226                 buffer += size;
2227                 --count;
2228         }
2229
2230         return ERROR_OK;
2231 }
2232
2233 static int cortex_a_read_cpu_memory_fast(struct target *target,
2234         uint32_t count, uint8_t *buffer, uint32_t *dscr)
2235 {
2236         /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2237          * *dscr; updated to new value. This is fast but only works for word-sized
2238          * objects at aligned addresses.
2239          * Preconditions:
2240          * - Address is in R0 and must be a multiple of 4.
2241          * - R0 is marked dirty.
2242          */
2243         struct armv7a_common *armv7a = target_to_armv7a(target);
2244         uint32_t u32;
2245         int retval;
2246
2247         /* Switch to non-blocking mode if not already in that mode. */
2248         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2249         if (retval != ERROR_OK)
2250                 return retval;
2251
2252         /* Issue the LDC instruction via a write to ITR. */
2253         retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2254         if (retval != ERROR_OK)
2255                 return retval;
2256
2257         count--;
2258
2259         if (count > 0) {
2260                 /* Switch to fast mode if not already in that mode. */
2261                 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2262                 if (retval != ERROR_OK)
2263                         return retval;
2264
2265                 /* Latch LDC instruction. */
2266                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2267                                 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2268                 if (retval != ERROR_OK)
2269                         return retval;
2270
2271                 /* Read the value transferred to DTRTX into the buffer. Due to fast
2272                  * mode rules, this blocks until the instruction finishes executing and
2273                  * then reissues the read instruction to read the next word from
2274                  * memory. The last read of DTRTX in this call reads the second-to-last
2275                  * word from memory and issues the read instruction for the last word.
2276                  */
2277                 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2278                                 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2279                 if (retval != ERROR_OK)
2280                         return retval;
2281
2282                 /* Advance. */
2283                 buffer += count * 4;
2284         }
2285
2286         /* Wait for last issued instruction to complete. */
2287         retval = cortex_a_wait_instrcmpl(target, dscr, false);
2288         if (retval != ERROR_OK)
2289                 return retval;
2290
2291         /* Switch to non-blocking mode if not already in that mode. */
2292         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2293         if (retval != ERROR_OK)
2294                 return retval;
2295
2296         /* Check for faults and return early. */
2297         if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2298                 return ERROR_OK; /* A data fault is not considered a system failure. */
2299
2300         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2301          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2302          * check TXfull_l). Most of the time this will be free because TXfull_l
2303          * will be set immediately and cached in dscr. */
2304         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2305                         DSCR_DTRTX_FULL_LATCHED, dscr);
2306         if (retval != ERROR_OK)
2307                 return retval;
2308
2309         /* Read the value transferred to DTRTX into the buffer. This is the last
2310          * word. */
2311         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2312                         armv7a->debug_base + CPUDBG_DTRTX, &u32);
2313         if (retval != ERROR_OK)
2314                 return retval;
2315         target_buffer_set_u32(target, buffer, u32);
2316
2317         return ERROR_OK;
2318 }
2319
2320 static int cortex_a_read_cpu_memory(struct target *target,
2321         uint32_t address, uint32_t size,
2322         uint32_t count, uint8_t *buffer)
2323 {
2324         /* Read memory through the CPU. */
2325         int retval, final_retval;
2326         struct armv7a_common *armv7a = target_to_armv7a(target);
2327         struct arm *arm = &armv7a->arm;
2328         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2329
2330         LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2331                           address, size, count);
2332         if (target->state != TARGET_HALTED) {
2333                 LOG_WARNING("target not halted");
2334                 return ERROR_TARGET_NOT_HALTED;
2335         }
2336
2337         if (!count)
2338                 return ERROR_OK;
2339
2340         /* Clear any abort. */
2341         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2342                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2343         if (retval != ERROR_OK)
2344                 return retval;
2345
2346         /* Read DSCR */
2347         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2348                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2349         if (retval != ERROR_OK)
2350                 return retval;
2351
2352         /* Switch to non-blocking mode if not already in that mode. */
2353         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2354         if (retval != ERROR_OK)
2355                 goto out;
2356
2357         /* Mark R0 as dirty. */
2358         arm_reg_current(arm, 0)->dirty = true;
2359
2360         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2361         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2362         if (retval != ERROR_OK)
2363                 goto out;
2364
2365         /* Get the memory address into R0. */
2366         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2367                         armv7a->debug_base + CPUDBG_DTRRX, address);
2368         if (retval != ERROR_OK)
2369                 goto out;
2370         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2371         if (retval != ERROR_OK)
2372                 goto out;
2373
2374         if (size == 4 && (address % 4) == 0) {
2375                 /* We are doing a word-aligned transfer, so use fast mode. */
2376                 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2377         } else {
2378                 /* Use slow path. */
2379                 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2380         }
2381
2382 out:
2383         final_retval = retval;
2384
2385         /* Switch to non-blocking mode if not already in that mode. */
2386         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2387         if (final_retval == ERROR_OK)
2388                 final_retval = retval;
2389
2390         /* Wait for last issued instruction to complete. */
2391         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2392         if (final_retval == ERROR_OK)
2393                 final_retval = retval;
2394
2395         /* If there were any sticky abort flags, clear them. */
2396         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2397                 fault_dscr = dscr;
2398                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2399                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2400                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2401         } else {
2402                 fault_dscr = 0;
2403         }
2404
2405         /* Handle synchronous data faults. */
2406         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2407                 if (final_retval == ERROR_OK) {
2408                         /* Final return value will reflect cause of fault. */
2409                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2410                         if (retval == ERROR_OK) {
2411                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2412                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2413                         } else
2414                                 final_retval = retval;
2415                 }
2416                 /* Fault destroyed DFAR/DFSR; restore them. */
2417                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2418                 if (retval != ERROR_OK)
2419                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2420         }
2421
2422         /* Handle asynchronous data faults. */
2423         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2424                 if (final_retval == ERROR_OK)
2425                         /* No other error has been recorded so far, so keep this one. */
2426                         final_retval = ERROR_TARGET_DATA_ABORT;
2427         }
2428
2429         /* If the DCC is nonempty, clear it. */
2430         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2431                 uint32_t dummy;
2432                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2433                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2434                 if (final_retval == ERROR_OK)
2435                         final_retval = retval;
2436         }
2437         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2438                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2439                 if (final_retval == ERROR_OK)
2440                         final_retval = retval;
2441         }
2442
2443         /* Done. */
2444         return final_retval;
2445 }
2446
2447
2448 /*
2449  * Cortex-A Memory access
2450  *
2451  * This is same Cortex-M3 but we must also use the correct
2452  * ap number for every access.
2453  */
2454
2455 static int cortex_a_read_phys_memory(struct target *target,
2456         target_addr_t address, uint32_t size,
2457         uint32_t count, uint8_t *buffer)
2458 {
2459         int retval;
2460
2461         if (!count || !buffer)
2462                 return ERROR_COMMAND_SYNTAX_ERROR;
2463
2464         LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2465                 address, size, count);
2466
2467         /* read memory through the CPU */
2468         cortex_a_prep_memaccess(target, 1);
2469         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2470         cortex_a_post_memaccess(target, 1);
2471
2472         return retval;
2473 }
2474
2475 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2476         uint32_t size, uint32_t count, uint8_t *buffer)
2477 {
2478         int retval;
2479
2480         /* cortex_a handles unaligned memory access */
2481         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2482                 address, size, count);
2483
2484         cortex_a_prep_memaccess(target, 0);
2485         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2486         cortex_a_post_memaccess(target, 0);
2487
2488         return retval;
2489 }
2490
2491 static int cortex_a_write_phys_memory(struct target *target,
2492         target_addr_t address, uint32_t size,
2493         uint32_t count, const uint8_t *buffer)
2494 {
2495         int retval;
2496
2497         if (!count || !buffer)
2498                 return ERROR_COMMAND_SYNTAX_ERROR;
2499
2500         LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2501                 address, size, count);
2502
2503         /* write memory through the CPU */
2504         cortex_a_prep_memaccess(target, 1);
2505         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2506         cortex_a_post_memaccess(target, 1);
2507
2508         return retval;
2509 }
2510
2511 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2512         uint32_t size, uint32_t count, const uint8_t *buffer)
2513 {
2514         int retval;
2515
2516         /* cortex_a handles unaligned memory access */
2517         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2518                 address, size, count);
2519
2520         /* memory writes bypass the caches, must flush before writing */
2521         armv7a_cache_auto_flush_on_write(target, address, size * count);
2522
2523         cortex_a_prep_memaccess(target, 0);
2524         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2525         cortex_a_post_memaccess(target, 0);
2526         return retval;
2527 }
2528
2529 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2530                                 uint32_t count, uint8_t *buffer)
2531 {
2532         uint32_t size;
2533
2534         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2535          * will have something to do with the size we leave to it. */
2536         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2537                 if (address & size) {
2538                         int retval = target_read_memory(target, address, size, 1, buffer);
2539                         if (retval != ERROR_OK)
2540                                 return retval;
2541                         address += size;
2542                         count -= size;
2543                         buffer += size;
2544                 }
2545         }
2546
2547         /* Read the data with as large access size as possible. */
2548         for (; size > 0; size /= 2) {
2549                 uint32_t aligned = count - count % size;
2550                 if (aligned > 0) {
2551                         int retval = target_read_memory(target, address, size, aligned / size, buffer);
2552                         if (retval != ERROR_OK)
2553                                 return retval;
2554                         address += aligned;
2555                         count -= aligned;
2556                         buffer += aligned;
2557                 }
2558         }
2559
2560         return ERROR_OK;
2561 }
2562
2563 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2564                                  uint32_t count, const uint8_t *buffer)
2565 {
2566         uint32_t size;
2567
2568         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2569          * will have something to do with the size we leave to it. */
2570         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2571                 if (address & size) {
2572                         int retval = target_write_memory(target, address, size, 1, buffer);
2573                         if (retval != ERROR_OK)
2574                                 return retval;
2575                         address += size;
2576                         count -= size;
2577                         buffer += size;
2578                 }
2579         }
2580
2581         /* Write the data with as large access size as possible. */
2582         for (; size > 0; size /= 2) {
2583                 uint32_t aligned = count - count % size;
2584                 if (aligned > 0) {
2585                         int retval = target_write_memory(target, address, size, aligned / size, buffer);
2586                         if (retval != ERROR_OK)
2587                                 return retval;
2588                         address += aligned;
2589                         count -= aligned;
2590                         buffer += aligned;
2591                 }
2592         }
2593
2594         return ERROR_OK;
2595 }
2596
2597 static int cortex_a_handle_target_request(void *priv)
2598 {
2599         struct target *target = priv;
2600         struct armv7a_common *armv7a = target_to_armv7a(target);
2601         int retval;
2602
2603         if (!target_was_examined(target))
2604                 return ERROR_OK;
2605         if (!target->dbg_msg_enabled)
2606                 return ERROR_OK;
2607
2608         if (target->state == TARGET_RUNNING) {
2609                 uint32_t request;
2610                 uint32_t dscr;
2611                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2612                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2613
2614                 /* check if we have data */
2615                 int64_t then = timeval_ms();
2616                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2617                         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2618                                         armv7a->debug_base + CPUDBG_DTRTX, &request);
2619                         if (retval == ERROR_OK) {
2620                                 target_request(target, request);
2621                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2622                                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2623                         }
2624                         if (timeval_ms() > then + 1000) {
2625                                 LOG_ERROR("Timeout waiting for dtr tx full");
2626                                 return ERROR_FAIL;
2627                         }
2628                 }
2629         }
2630
2631         return ERROR_OK;
2632 }
2633
2634 /*
2635  * Cortex-A target information and configuration
2636  */
2637
2638 static int cortex_a_examine_first(struct target *target)
2639 {
2640         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2641         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2642         struct adiv5_dap *swjdp = armv7a->arm.dap;
2643
2644         int i;
2645         int retval = ERROR_OK;
2646         uint32_t didr, cpuid, dbg_osreg;
2647
2648         /* Search for the APB-AP - it is needed for access to debug registers */
2649         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2650         if (retval != ERROR_OK) {
2651                 LOG_ERROR("Could not find APB-AP for debug access");
2652                 return retval;
2653         }
2654
2655         retval = mem_ap_init(armv7a->debug_ap);
2656         if (retval != ERROR_OK) {
2657                 LOG_ERROR("Could not initialize the APB-AP");
2658                 return retval;
2659         }
2660
2661         armv7a->debug_ap->memaccess_tck = 80;
2662
2663         if (!target->dbgbase_set) {
2664                 uint32_t dbgbase;
2665                 /* Get ROM Table base */
2666                 uint32_t apid;
2667                 int32_t coreidx = target->coreid;
2668                 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2669                           target->cmd_name);
2670                 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2671                 if (retval != ERROR_OK)
2672                         return retval;
2673                 /* Lookup 0x15 -- Processor DAP */
2674                 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2675                                 &armv7a->debug_base, &coreidx);
2676                 if (retval != ERROR_OK) {
2677                         LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2678                                   target->cmd_name);
2679                         return retval;
2680                 }
2681                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2682                           target->coreid, armv7a->debug_base);
2683         } else
2684                 armv7a->debug_base = target->dbgbase;
2685
2686         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2687                         armv7a->debug_base + CPUDBG_DIDR, &didr);
2688         if (retval != ERROR_OK) {
2689                 LOG_DEBUG("Examine %s failed", "DIDR");
2690                 return retval;
2691         }
2692
2693         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2694                         armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2695         if (retval != ERROR_OK) {
2696                 LOG_DEBUG("Examine %s failed", "CPUID");
2697                 return retval;
2698         }
2699
2700         LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2701         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2702
2703         cortex_a->didr = didr;
2704         cortex_a->cpuid = cpuid;
2705
2706         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2707                                     armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2708         if (retval != ERROR_OK)
2709                 return retval;
2710         LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR  0x%" PRIx32, target->coreid, dbg_osreg);
2711
2712         if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2713                 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2714                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2715                 return ERROR_TARGET_INIT_FAILED;
2716         }
2717
2718         if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2719                 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2720
2721         /* Read DBGOSLSR and check if OSLK is implemented */
2722         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2723                                 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2724         if (retval != ERROR_OK)
2725                 return retval;
2726         LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2727
2728         /* check if OS Lock is implemented */
2729         if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2730                 /* check if OS Lock is set */
2731                 if (dbg_osreg & OSLSR_OSLK) {
2732                         LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2733
2734                         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2735                                                         armv7a->debug_base + CPUDBG_OSLAR,
2736                                                         0);
2737                         if (retval == ERROR_OK)
2738                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2739                                                         armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2740
2741                         /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2742                         if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2743                                 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2744                                                 target->coreid);
2745                                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2746                                 return ERROR_TARGET_INIT_FAILED;
2747                         }
2748                 }
2749         }
2750
2751         armv7a->arm.core_type = ARM_MODE_MON;
2752
2753         /* Avoid recreating the registers cache */
2754         if (!target_was_examined(target)) {
2755                 retval = cortex_a_dpm_setup(cortex_a, didr);
2756                 if (retval != ERROR_OK)
2757                         return retval;
2758         }
2759
2760         /* Setup Breakpoint Register Pairs */
2761         cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2762         cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2763         cortex_a->brp_num_available = cortex_a->brp_num;
2764         free(cortex_a->brp_list);
2765         cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2766 /*      cortex_a->brb_enabled = ????; */
2767         for (i = 0; i < cortex_a->brp_num; i++) {
2768                 cortex_a->brp_list[i].used = 0;
2769                 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2770                         cortex_a->brp_list[i].type = BRP_NORMAL;
2771                 else
2772                         cortex_a->brp_list[i].type = BRP_CONTEXT;
2773                 cortex_a->brp_list[i].value = 0;
2774                 cortex_a->brp_list[i].control = 0;
2775                 cortex_a->brp_list[i].BRPn = i;
2776         }
2777
2778         LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2779
2780         /* select debug_ap as default */
2781         swjdp->apsel = armv7a->debug_ap->ap_num;
2782
2783         target_set_examined(target);
2784         return ERROR_OK;
2785 }
2786
2787 static int cortex_a_examine(struct target *target)
2788 {
2789         int retval = ERROR_OK;
2790
2791         /* Reestablish communication after target reset */
2792         retval = cortex_a_examine_first(target);
2793
2794         /* Configure core debug access */
2795         if (retval == ERROR_OK)
2796                 retval = cortex_a_init_debug_access(target);
2797
2798         return retval;
2799 }
2800
2801 /*
2802  *      Cortex-A target creation and initialization
2803  */
2804
2805 static int cortex_a_init_target(struct command_context *cmd_ctx,
2806         struct target *target)
2807 {
2808         /* examine_first() does a bunch of this */
2809         arm_semihosting_init(target);
2810         return ERROR_OK;
2811 }
2812
2813 static int cortex_a_init_arch_info(struct target *target,
2814         struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2815 {
2816         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2817
2818         /* Setup struct cortex_a_common */
2819         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2820         armv7a->arm.dap = dap;
2821
2822         /* register arch-specific functions */
2823         armv7a->examine_debug_reason = NULL;
2824
2825         armv7a->post_debug_entry = cortex_a_post_debug_entry;
2826
2827         armv7a->pre_restore_context = NULL;
2828
2829         armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2830
2831
2832 /*      arm7_9->handle_target_request = cortex_a_handle_target_request; */
2833
2834         /* REVISIT v7a setup should be in a v7a-specific routine */
2835         armv7a_init_arch_info(target, armv7a);
2836         target_register_timer_callback(cortex_a_handle_target_request, 1,
2837                 TARGET_TIMER_TYPE_PERIODIC, target);
2838
2839         return ERROR_OK;
2840 }
2841
2842 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2843 {
2844         struct cortex_a_common *cortex_a;
2845         struct adiv5_private_config *pc;
2846
2847         if (target->private_config == NULL)
2848                 return ERROR_FAIL;
2849
2850         pc = (struct adiv5_private_config *)target->private_config;
2851
2852         cortex_a = calloc(1, sizeof(struct cortex_a_common));
2853         if (cortex_a == NULL) {
2854                 LOG_ERROR("Out of memory");
2855                 return ERROR_FAIL;
2856         }
2857         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2858         cortex_a->armv7a_common.is_armv7r = false;
2859         cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2860
2861         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2862 }
2863
2864 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2865 {
2866         struct cortex_a_common *cortex_a;
2867         struct adiv5_private_config *pc;
2868
2869         pc = (struct adiv5_private_config *)target->private_config;
2870         if (adiv5_verify_config(pc) != ERROR_OK)
2871                 return ERROR_FAIL;
2872
2873         cortex_a = calloc(1, sizeof(struct cortex_a_common));
2874         if (cortex_a == NULL) {
2875                 LOG_ERROR("Out of memory");
2876                 return ERROR_FAIL;
2877         }
2878         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2879         cortex_a->armv7a_common.is_armv7r = true;
2880
2881         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2882 }
2883
2884 static void cortex_a_deinit_target(struct target *target)
2885 {
2886         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2887         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2888         struct arm_dpm *dpm = &armv7a->dpm;
2889         uint32_t dscr;
2890         int retval;
2891
2892         if (target_was_examined(target)) {
2893                 /* Disable halt for breakpoint, watchpoint and vector catch */
2894                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2895                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2896                 if (retval == ERROR_OK)
2897                         mem_ap_write_atomic_u32(armv7a->debug_ap,
2898                                         armv7a->debug_base + CPUDBG_DSCR,
2899                                         dscr & ~DSCR_HALT_DBG_MODE);
2900         }
2901
2902         free(cortex_a->brp_list);
2903         free(dpm->dbp);
2904         free(dpm->dwp);
2905         free(target->private_config);
2906         free(cortex_a);
2907 }
2908
2909 static int cortex_a_mmu(struct target *target, int *enabled)
2910 {
2911         struct armv7a_common *armv7a = target_to_armv7a(target);
2912
2913         if (target->state != TARGET_HALTED) {
2914                 LOG_ERROR("%s: target not halted", __func__);
2915                 return ERROR_TARGET_INVALID;
2916         }
2917
2918         if (armv7a->is_armv7r)
2919                 *enabled = 0;
2920         else
2921                 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2922
2923         return ERROR_OK;
2924 }
2925
2926 static int cortex_a_virt2phys(struct target *target,
2927         target_addr_t virt, target_addr_t *phys)
2928 {
2929         int retval;
2930         int mmu_enabled = 0;
2931
2932         /*
2933          * If the MMU was not enabled at debug entry, there is no
2934          * way of knowing if there was ever a valid configuration
2935          * for it and thus it's not safe to enable it. In this case,
2936          * just return the virtual address as physical.
2937          */
2938         cortex_a_mmu(target, &mmu_enabled);
2939         if (!mmu_enabled) {
2940                 *phys = virt;
2941                 return ERROR_OK;
2942         }
2943
2944         /* mmu must be enable in order to get a correct translation */
2945         retval = cortex_a_mmu_modify(target, 1);
2946         if (retval != ERROR_OK)
2947                 return retval;
2948         return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2949                                                     (uint32_t *)phys, 1);
2950 }
2951
2952 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2953 {
2954         struct target *target = get_current_target(CMD_CTX);
2955         struct armv7a_common *armv7a = target_to_armv7a(target);
2956
2957         return armv7a_handle_cache_info_command(CMD_CTX,
2958                         &armv7a->armv7a_mmu.armv7a_cache);
2959 }
2960
2961
2962 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2963 {
2964         struct target *target = get_current_target(CMD_CTX);
2965         if (!target_was_examined(target)) {
2966                 LOG_ERROR("target not examined yet");
2967                 return ERROR_FAIL;
2968         }
2969
2970         return cortex_a_init_debug_access(target);
2971 }
2972 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
2973 {
2974         struct target *target = get_current_target(CMD_CTX);
2975         /* check target is an smp target */
2976         struct target_list *head;
2977         struct target *curr;
2978         head = target->head;
2979         target->smp = 0;
2980         if (head != (struct target_list *)NULL) {
2981                 while (head != (struct target_list *)NULL) {
2982                         curr = head->target;
2983                         curr->smp = 0;
2984                         head = head->next;
2985                 }
2986                 /*  fixes the target display to the debugger */
2987                 target->gdb_service->target = target;
2988         }
2989         return ERROR_OK;
2990 }
2991
2992 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
2993 {
2994         struct target *target = get_current_target(CMD_CTX);
2995         struct target_list *head;
2996         struct target *curr;
2997         head = target->head;
2998         if (head != (struct target_list *)NULL) {
2999                 target->smp = 1;
3000                 while (head != (struct target_list *)NULL) {
3001                         curr = head->target;
3002                         curr->smp = 1;
3003                         head = head->next;
3004                 }
3005         }
3006         return ERROR_OK;
3007 }
3008
3009 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3010 {
3011         struct target *target = get_current_target(CMD_CTX);
3012         int retval = ERROR_OK;
3013         struct target_list *head;
3014         head = target->head;
3015         if (head != (struct target_list *)NULL) {
3016                 if (CMD_ARGC == 1) {
3017                         int coreid = 0;
3018                         COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3019                         if (ERROR_OK != retval)
3020                                 return retval;
3021                         target->gdb_service->core[1] = coreid;
3022
3023                 }
3024                 command_print(CMD_CTX, "gdb coreid  %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3025                         , target->gdb_service->core[1]);
3026         }
3027         return ERROR_OK;
3028 }
3029
3030 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3031 {
3032         struct target *target = get_current_target(CMD_CTX);
3033         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3034
3035         static const Jim_Nvp nvp_maskisr_modes[] = {
3036                 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3037                 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3038                 { .name = NULL, .value = -1 },
3039         };
3040         const Jim_Nvp *n;
3041
3042         if (CMD_ARGC > 0) {
3043                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3044                 if (n->name == NULL) {
3045                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3046                         return ERROR_COMMAND_SYNTAX_ERROR;
3047                 }
3048
3049                 cortex_a->isrmasking_mode = n->value;
3050         }
3051
3052         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3053         command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3054
3055         return ERROR_OK;
3056 }
3057
3058 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3059 {
3060         struct target *target = get_current_target(CMD_CTX);
3061         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3062
3063         static const Jim_Nvp nvp_dacrfixup_modes[] = {
3064                 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3065                 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3066                 { .name = NULL, .value = -1 },
3067         };
3068         const Jim_Nvp *n;
3069
3070         if (CMD_ARGC > 0) {
3071                 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3072                 if (n->name == NULL)
3073                         return ERROR_COMMAND_SYNTAX_ERROR;
3074                 cortex_a->dacrfixup_mode = n->value;
3075
3076         }
3077
3078         n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3079         command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3080
3081         return ERROR_OK;
3082 }
3083
3084 static const struct command_registration cortex_a_exec_command_handlers[] = {
3085         {
3086                 .name = "cache_info",
3087                 .handler = cortex_a_handle_cache_info_command,
3088                 .mode = COMMAND_EXEC,
3089                 .help = "display information about target caches",
3090                 .usage = "",
3091         },
3092         {
3093                 .name = "dbginit",
3094                 .handler = cortex_a_handle_dbginit_command,
3095                 .mode = COMMAND_EXEC,
3096                 .help = "Initialize core debug",
3097                 .usage = "",
3098         },
3099         {   .name = "smp_off",
3100             .handler = cortex_a_handle_smp_off_command,
3101             .mode = COMMAND_EXEC,
3102             .help = "Stop smp handling",
3103             .usage = "",},
3104         {
3105                 .name = "smp_on",
3106                 .handler = cortex_a_handle_smp_on_command,
3107                 .mode = COMMAND_EXEC,
3108                 .help = "Restart smp handling",
3109                 .usage = "",
3110         },
3111         {
3112                 .name = "smp_gdb",
3113                 .handler = cortex_a_handle_smp_gdb_command,
3114                 .mode = COMMAND_EXEC,
3115                 .help = "display/fix current core played to gdb",
3116                 .usage = "",
3117         },
3118         {
3119                 .name = "maskisr",
3120                 .handler = handle_cortex_a_mask_interrupts_command,
3121                 .mode = COMMAND_ANY,
3122                 .help = "mask cortex_a interrupts",
3123                 .usage = "['on'|'off']",
3124         },
3125         {
3126                 .name = "dacrfixup",
3127                 .handler = handle_cortex_a_dacrfixup_command,
3128                 .mode = COMMAND_ANY,
3129                 .help = "set domain access control (DACR) to all-manager "
3130                         "on memory access",
3131                 .usage = "['on'|'off']",
3132         },
3133         {
3134                 .chain = armv7a_mmu_command_handlers,
3135         },
3136
3137         COMMAND_REGISTRATION_DONE
3138 };
3139 static const struct command_registration cortex_a_command_handlers[] = {
3140         {
3141                 .chain = arm_command_handlers,
3142         },
3143         {
3144                 .chain = armv7a_command_handlers,
3145         },
3146         {
3147                 .name = "cortex_a",
3148                 .mode = COMMAND_ANY,
3149                 .help = "Cortex-A command group",
3150                 .usage = "",
3151                 .chain = cortex_a_exec_command_handlers,
3152         },
3153         COMMAND_REGISTRATION_DONE
3154 };
3155
3156 struct target_type cortexa_target = {
3157         .name = "cortex_a",
3158         .deprecated_name = "cortex_a8",
3159
3160         .poll = cortex_a_poll,
3161         .arch_state = armv7a_arch_state,
3162
3163         .halt = cortex_a_halt,
3164         .resume = cortex_a_resume,
3165         .step = cortex_a_step,
3166
3167         .assert_reset = cortex_a_assert_reset,
3168         .deassert_reset = cortex_a_deassert_reset,
3169
3170         /* REVISIT allow exporting VFP3 registers ... */
3171         .get_gdb_arch = arm_get_gdb_arch,
3172         .get_gdb_reg_list = arm_get_gdb_reg_list,
3173
3174         .read_memory = cortex_a_read_memory,
3175         .write_memory = cortex_a_write_memory,
3176
3177         .read_buffer = cortex_a_read_buffer,
3178         .write_buffer = cortex_a_write_buffer,
3179
3180         .checksum_memory = arm_checksum_memory,
3181         .blank_check_memory = arm_blank_check_memory,
3182
3183         .run_algorithm = armv4_5_run_algorithm,
3184
3185         .add_breakpoint = cortex_a_add_breakpoint,
3186         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3187         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3188         .remove_breakpoint = cortex_a_remove_breakpoint,
3189         .add_watchpoint = NULL,
3190         .remove_watchpoint = NULL,
3191
3192         .commands = cortex_a_command_handlers,
3193         .target_create = cortex_a_target_create,
3194         .target_jim_configure = adiv5_jim_configure,
3195         .init_target = cortex_a_init_target,
3196         .examine = cortex_a_examine,
3197         .deinit_target = cortex_a_deinit_target,
3198
3199         .read_phys_memory = cortex_a_read_phys_memory,
3200         .write_phys_memory = cortex_a_write_phys_memory,
3201         .mmu = cortex_a_mmu,
3202         .virt2phys = cortex_a_virt2phys,
3203 };
3204
3205 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3206         {
3207                 .name = "dbginit",
3208                 .handler = cortex_a_handle_dbginit_command,
3209                 .mode = COMMAND_EXEC,
3210                 .help = "Initialize core debug",
3211                 .usage = "",
3212         },
3213         {
3214                 .name = "maskisr",
3215                 .handler = handle_cortex_a_mask_interrupts_command,
3216                 .mode = COMMAND_EXEC,
3217                 .help = "mask cortex_r4 interrupts",
3218                 .usage = "['on'|'off']",
3219         },
3220
3221         COMMAND_REGISTRATION_DONE
3222 };
3223 static const struct command_registration cortex_r4_command_handlers[] = {
3224         {
3225                 .chain = arm_command_handlers,
3226         },
3227         {
3228                 .name = "cortex_r4",
3229                 .mode = COMMAND_ANY,
3230                 .help = "Cortex-R4 command group",
3231                 .usage = "",
3232                 .chain = cortex_r4_exec_command_handlers,
3233         },
3234         COMMAND_REGISTRATION_DONE
3235 };
3236
3237 struct target_type cortexr4_target = {
3238         .name = "cortex_r4",
3239
3240         .poll = cortex_a_poll,
3241         .arch_state = armv7a_arch_state,
3242
3243         .halt = cortex_a_halt,
3244         .resume = cortex_a_resume,
3245         .step = cortex_a_step,
3246
3247         .assert_reset = cortex_a_assert_reset,
3248         .deassert_reset = cortex_a_deassert_reset,
3249
3250         /* REVISIT allow exporting VFP3 registers ... */
3251         .get_gdb_arch = arm_get_gdb_arch,
3252         .get_gdb_reg_list = arm_get_gdb_reg_list,
3253
3254         .read_memory = cortex_a_read_phys_memory,
3255         .write_memory = cortex_a_write_phys_memory,
3256
3257         .checksum_memory = arm_checksum_memory,
3258         .blank_check_memory = arm_blank_check_memory,
3259
3260         .run_algorithm = armv4_5_run_algorithm,
3261
3262         .add_breakpoint = cortex_a_add_breakpoint,
3263         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3264         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3265         .remove_breakpoint = cortex_a_remove_breakpoint,
3266         .add_watchpoint = NULL,
3267         .remove_watchpoint = NULL,
3268
3269         .commands = cortex_r4_command_handlers,
3270         .target_create = cortex_r4_target_create,
3271         .target_jim_configure = adiv5_jim_configure,
3272         .init_target = cortex_a_init_target,
3273         .examine = cortex_a_examine,
3274         .deinit_target = cortex_a_deinit_target,
3275 };