]> git.gag.com Git - fw/openocd/blob - src/target/cortex_a.c
9e8248a74c1151254e63f9b1db77462576969a3e
[fw/openocd] / src / target / cortex_a.c
1 /***************************************************************************
2  *   Copyright (C) 2005 by Dominic Rath                                    *
3  *   Dominic.Rath@gmx.de                                                   *
4  *                                                                         *
5  *   Copyright (C) 2006 by Magnus Lundin                                   *
6  *   lundin@mlu.mine.nu                                                    *
7  *                                                                         *
8  *   Copyright (C) 2008 by Spencer Oliver                                  *
9  *   spen@spen-soft.co.uk                                                  *
10  *                                                                         *
11  *   Copyright (C) 2009 by Dirk Behme                                      *
12  *   dirk.behme@gmail.com - copy from cortex_m3                            *
13  *                                                                         *
14  *   Copyright (C) 2010 Ã˜yvind Harboe                                      *
15  *   oyvind.harboe@zylin.com                                               *
16  *                                                                         *
17  *   Copyright (C) ST-Ericsson SA 2011                                     *
18  *   michel.jaouen@stericsson.com : smp minimum support                    *
19  *                                                                         *
20  *   Copyright (C) Broadcom 2012                                           *
21  *   ehunter@broadcom.com : Cortex-R4 support                              *
22  *                                                                         *
23  *   Copyright (C) 2013 Kamal Dasu                                         *
24  *   kdasu.kdev@gmail.com                                                  *
25  *                                                                         *
26  *   Copyright (C) 2016 Chengyu Zheng                                      *
27  *   chengyu.zheng@polimi.it : watchpoint support                          *
28  *                                                                         *
29  *   This program is free software; you can redistribute it and/or modify  *
30  *   it under the terms of the GNU General Public License as published by  *
31  *   the Free Software Foundation; either version 2 of the License, or     *
32  *   (at your option) any later version.                                   *
33  *                                                                         *
34  *   This program is distributed in the hope that it will be useful,       *
35  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
36  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
37  *   GNU General Public License for more details.                          *
38  *                                                                         *
39  *   You should have received a copy of the GNU General Public License     *
40  *   along with this program.  If not, see <http://www.gnu.org/licenses/>. *
41  *                                                                         *
42  *   Cortex-A8(tm) TRM, ARM DDI 0344H                                      *
43  *   Cortex-A9(tm) TRM, ARM DDI 0407F                                      *
44  *   Cortex-A4(tm) TRM, ARM DDI 0363E                                      *
45  *   Cortex-A15(tm)TRM, ARM DDI 0438C                                      *
46  *                                                                         *
47  ***************************************************************************/
48
49 #ifdef HAVE_CONFIG_H
50 #include "config.h"
51 #endif
52
53 #include "breakpoints.h"
54 #include "cortex_a.h"
55 #include "register.h"
56 #include "armv7a_mmu.h"
57 #include "target_request.h"
58 #include "target_type.h"
59 #include "arm_opcodes.h"
60 #include "arm_semihosting.h"
61 #include "jtag/interface.h"
62 #include "transport/transport.h"
63 #include "smp.h"
64 #include <helper/time_support.h>
65
66 static int cortex_a_poll(struct target *target);
67 static int cortex_a_debug_entry(struct target *target);
68 static int cortex_a_restore_context(struct target *target, bool bpwp);
69 static int cortex_a_set_breakpoint(struct target *target,
70         struct breakpoint *breakpoint, uint8_t matchmode);
71 static int cortex_a_set_context_breakpoint(struct target *target,
72         struct breakpoint *breakpoint, uint8_t matchmode);
73 static int cortex_a_set_hybrid_breakpoint(struct target *target,
74         struct breakpoint *breakpoint);
75 static int cortex_a_unset_breakpoint(struct target *target,
76         struct breakpoint *breakpoint);
77 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
78         uint32_t value, uint32_t *dscr);
79 static int cortex_a_mmu(struct target *target, int *enabled);
80 static int cortex_a_mmu_modify(struct target *target, int enable);
81 static int cortex_a_virt2phys(struct target *target,
82         target_addr_t virt, target_addr_t *phys);
83 static int cortex_a_read_cpu_memory(struct target *target,
84         uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
85
86 static unsigned int ilog2(unsigned int x)
87 {
88         unsigned int y = 0;
89         x /= 2;
90         while (x) {
91                 ++y;
92                 x /= 2;
93                 }
94         return y;
95 }
96
97 /*  restore cp15_control_reg at resume */
98 static int cortex_a_restore_cp15_control_reg(struct target *target)
99 {
100         int retval = ERROR_OK;
101         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
102         struct armv7a_common *armv7a = target_to_armv7a(target);
103
104         if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
105                 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
106                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
107                 retval = armv7a->arm.mcr(target, 15,
108                                 0, 0,   /* op1, op2 */
109                                 1, 0,   /* CRn, CRm */
110                                 cortex_a->cp15_control_reg);
111         }
112         return retval;
113 }
114
115 /*
116  * Set up ARM core for memory access.
117  * If !phys_access, switch to SVC mode and make sure MMU is on
118  * If phys_access, switch off mmu
119  */
120 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
121 {
122         struct armv7a_common *armv7a = target_to_armv7a(target);
123         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
124         int mmu_enabled = 0;
125
126         if (phys_access == 0) {
127                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
128                 cortex_a_mmu(target, &mmu_enabled);
129                 if (mmu_enabled)
130                         cortex_a_mmu_modify(target, 1);
131                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
132                         /* overwrite DACR to all-manager */
133                         armv7a->arm.mcr(target, 15,
134                                         0, 0, 3, 0,
135                                         0xFFFFFFFF);
136                 }
137         } else {
138                 cortex_a_mmu(target, &mmu_enabled);
139                 if (mmu_enabled)
140                         cortex_a_mmu_modify(target, 0);
141         }
142         return ERROR_OK;
143 }
144
145 /*
146  * Restore ARM core after memory access.
147  * If !phys_access, switch to previous mode
148  * If phys_access, restore MMU setting
149  */
150 static int cortex_a_post_memaccess(struct target *target, int phys_access)
151 {
152         struct armv7a_common *armv7a = target_to_armv7a(target);
153         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
154
155         if (phys_access == 0) {
156                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
157                         /* restore */
158                         armv7a->arm.mcr(target, 15,
159                                         0, 0, 3, 0,
160                                         cortex_a->cp15_dacr_reg);
161                 }
162                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
163         } else {
164                 int mmu_enabled = 0;
165                 cortex_a_mmu(target, &mmu_enabled);
166                 if (mmu_enabled)
167                         cortex_a_mmu_modify(target, 1);
168         }
169         return ERROR_OK;
170 }
171
172
173 /*  modify cp15_control_reg in order to enable or disable mmu for :
174  *  - virt2phys address conversion
175  *  - read or write memory in phys or virt address */
176 static int cortex_a_mmu_modify(struct target *target, int enable)
177 {
178         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
179         struct armv7a_common *armv7a = target_to_armv7a(target);
180         int retval = ERROR_OK;
181         int need_write = 0;
182
183         if (enable) {
184                 /*  if mmu enabled at target stop and mmu not enable */
185                 if (!(cortex_a->cp15_control_reg & 0x1U)) {
186                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
187                         return ERROR_FAIL;
188                 }
189                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
190                         cortex_a->cp15_control_reg_curr |= 0x1U;
191                         need_write = 1;
192                 }
193         } else {
194                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
195                         cortex_a->cp15_control_reg_curr &= ~0x1U;
196                         need_write = 1;
197                 }
198         }
199
200         if (need_write) {
201                 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
202                         enable ? "enable mmu" : "disable mmu",
203                         cortex_a->cp15_control_reg_curr);
204
205                 retval = armv7a->arm.mcr(target, 15,
206                                 0, 0,   /* op1, op2 */
207                                 1, 0,   /* CRn, CRm */
208                                 cortex_a->cp15_control_reg_curr);
209         }
210         return retval;
211 }
212
213 /*
214  * Cortex-A Basic debug access, very low level assumes state is saved
215  */
216 static int cortex_a_init_debug_access(struct target *target)
217 {
218         struct armv7a_common *armv7a = target_to_armv7a(target);
219         uint32_t dscr;
220         int retval;
221
222         /* lock memory-mapped access to debug registers to prevent
223          * software interference */
224         retval = mem_ap_write_u32(armv7a->debug_ap,
225                         armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
226         if (retval != ERROR_OK)
227                 return retval;
228
229         /* Disable cacheline fills and force cache write-through in debug state */
230         retval = mem_ap_write_u32(armv7a->debug_ap,
231                         armv7a->debug_base + CPUDBG_DSCCR, 0);
232         if (retval != ERROR_OK)
233                 return retval;
234
235         /* Disable TLB lookup and refill/eviction in debug state */
236         retval = mem_ap_write_u32(armv7a->debug_ap,
237                         armv7a->debug_base + CPUDBG_DSMCR, 0);
238         if (retval != ERROR_OK)
239                 return retval;
240
241         retval = dap_run(armv7a->debug_ap->dap);
242         if (retval != ERROR_OK)
243                 return retval;
244
245         /* Enabling of instruction execution in debug mode is done in debug_entry code */
246
247         /* Resync breakpoint registers */
248
249         /* Enable halt for breakpoint, watchpoint and vector catch */
250         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
251                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
252         if (retval != ERROR_OK)
253                 return retval;
254         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
255                         armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
256         if (retval != ERROR_OK)
257                 return retval;
258
259         /* Since this is likely called from init or reset, update target state information*/
260         return cortex_a_poll(target);
261 }
262
263 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
264 {
265         /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
266          * Writes final value of DSCR into *dscr. Pass force to force always
267          * reading DSCR at least once. */
268         struct armv7a_common *armv7a = target_to_armv7a(target);
269         int retval;
270
271         if (force) {
272                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
273                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
274                 if (retval != ERROR_OK) {
275                         LOG_ERROR("Could not read DSCR register");
276                         return retval;
277                 }
278         }
279
280         retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
281         if (retval != ERROR_OK)
282                 LOG_ERROR("Error waiting for InstrCompl=1");
283         return retval;
284 }
285
286 /* To reduce needless round-trips, pass in a pointer to the current
287  * DSCR value.  Initialize it to zero if you just need to know the
288  * value on return from this function; or DSCR_INSTR_COMP if you
289  * happen to know that no instruction is pending.
290  */
291 static int cortex_a_exec_opcode(struct target *target,
292         uint32_t opcode, uint32_t *dscr_p)
293 {
294         uint32_t dscr;
295         int retval;
296         struct armv7a_common *armv7a = target_to_armv7a(target);
297
298         dscr = dscr_p ? *dscr_p : 0;
299
300         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
301
302         /* Wait for InstrCompl bit to be set */
303         retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
304         if (retval != ERROR_OK)
305                 return retval;
306
307         retval = mem_ap_write_u32(armv7a->debug_ap,
308                         armv7a->debug_base + CPUDBG_ITR, opcode);
309         if (retval != ERROR_OK)
310                 return retval;
311
312         /* Wait for InstrCompl bit to be set */
313         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
314         if (retval != ERROR_OK) {
315                 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
316                 return retval;
317         }
318
319         if (dscr_p)
320                 *dscr_p = dscr;
321
322         return retval;
323 }
324
325 /* Write to memory mapped registers directly with no cache or mmu handling */
326 static int cortex_a_dap_write_memap_register_u32(struct target *target,
327         uint32_t address,
328         uint32_t value)
329 {
330         int retval;
331         struct armv7a_common *armv7a = target_to_armv7a(target);
332
333         retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
334
335         return retval;
336 }
337
338 /*
339  * Cortex-A implementation of Debug Programmer's Model
340  *
341  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
342  * so there's no need to poll for it before executing an instruction.
343  *
344  * NOTE that in several of these cases the "stall" mode might be useful.
345  * It'd let us queue a few operations together... prepare/finish might
346  * be the places to enable/disable that mode.
347  */
348
349 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
350 {
351         return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
352 }
353
354 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
355 {
356         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
357         return mem_ap_write_u32(a->armv7a_common.debug_ap,
358                         a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
359 }
360
361 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
362         uint32_t *dscr_p)
363 {
364         uint32_t dscr = DSCR_INSTR_COMP;
365         int retval;
366
367         if (dscr_p)
368                 dscr = *dscr_p;
369
370         /* Wait for DTRRXfull */
371         retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
372                         DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
373         if (retval != ERROR_OK) {
374                 LOG_ERROR("Error waiting for read dcc");
375                 return retval;
376         }
377
378         retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
379                         a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
380         if (retval != ERROR_OK)
381                 return retval;
382         /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
383
384         if (dscr_p)
385                 *dscr_p = dscr;
386
387         return retval;
388 }
389
390 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
391 {
392         struct cortex_a_common *a = dpm_to_a(dpm);
393         uint32_t dscr;
394         int retval;
395
396         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
397         retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
398         if (retval != ERROR_OK) {
399                 LOG_ERROR("Error waiting for dpm prepare");
400                 return retval;
401         }
402
403         /* this "should never happen" ... */
404         if (dscr & DSCR_DTR_RX_FULL) {
405                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
406                 /* Clear DCCRX */
407                 retval = cortex_a_exec_opcode(
408                                 a->armv7a_common.arm.target,
409                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
410                                 &dscr);
411                 if (retval != ERROR_OK)
412                         return retval;
413         }
414
415         return retval;
416 }
417
418 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
419 {
420         /* REVISIT what could be done here? */
421         return ERROR_OK;
422 }
423
424 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
425         uint32_t opcode, uint32_t data)
426 {
427         struct cortex_a_common *a = dpm_to_a(dpm);
428         int retval;
429         uint32_t dscr = DSCR_INSTR_COMP;
430
431         retval = cortex_a_write_dcc(a, data);
432         if (retval != ERROR_OK)
433                 return retval;
434
435         return cortex_a_exec_opcode(
436                         a->armv7a_common.arm.target,
437                         opcode,
438                         &dscr);
439 }
440
441 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
442         uint8_t rt, uint32_t data)
443 {
444         struct cortex_a_common *a = dpm_to_a(dpm);
445         uint32_t dscr = DSCR_INSTR_COMP;
446         int retval;
447
448         if (rt > 15)
449                 return ERROR_TARGET_INVALID;
450
451         retval = cortex_a_write_dcc(a, data);
452         if (retval != ERROR_OK)
453                 return retval;
454
455         /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
456         return cortex_a_exec_opcode(
457                         a->armv7a_common.arm.target,
458                         ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
459                         &dscr);
460 }
461
462 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
463         uint32_t opcode, uint32_t data)
464 {
465         struct cortex_a_common *a = dpm_to_a(dpm);
466         uint32_t dscr = DSCR_INSTR_COMP;
467         int retval;
468
469         retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
470         if (retval != ERROR_OK)
471                 return retval;
472
473         /* then the opcode, taking data from R0 */
474         retval = cortex_a_exec_opcode(
475                         a->armv7a_common.arm.target,
476                         opcode,
477                         &dscr);
478
479         return retval;
480 }
481
482 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
483 {
484         struct target *target = dpm->arm->target;
485         uint32_t dscr = DSCR_INSTR_COMP;
486
487         /* "Prefetch flush" after modifying execution status in CPSR */
488         return cortex_a_exec_opcode(target,
489                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
490                         &dscr);
491 }
492
493 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
494         uint32_t opcode, uint32_t *data)
495 {
496         struct cortex_a_common *a = dpm_to_a(dpm);
497         int retval;
498         uint32_t dscr = DSCR_INSTR_COMP;
499
500         /* the opcode, writing data to DCC */
501         retval = cortex_a_exec_opcode(
502                         a->armv7a_common.arm.target,
503                         opcode,
504                         &dscr);
505         if (retval != ERROR_OK)
506                 return retval;
507
508         return cortex_a_read_dcc(a, data, &dscr);
509 }
510
511 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
512         uint8_t rt, uint32_t *data)
513 {
514         struct cortex_a_common *a = dpm_to_a(dpm);
515         uint32_t dscr = DSCR_INSTR_COMP;
516         int retval;
517
518         if (rt > 15)
519                 return ERROR_TARGET_INVALID;
520
521         retval = cortex_a_exec_opcode(
522                         a->armv7a_common.arm.target,
523                         ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
524                         &dscr);
525         if (retval != ERROR_OK)
526                 return retval;
527
528         return cortex_a_read_dcc(a, data, &dscr);
529 }
530
531 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
532         uint32_t opcode, uint32_t *data)
533 {
534         struct cortex_a_common *a = dpm_to_a(dpm);
535         uint32_t dscr = DSCR_INSTR_COMP;
536         int retval;
537
538         /* the opcode, writing data to R0 */
539         retval = cortex_a_exec_opcode(
540                         a->armv7a_common.arm.target,
541                         opcode,
542                         &dscr);
543         if (retval != ERROR_OK)
544                 return retval;
545
546         /* write R0 to DCC */
547         return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
548 }
549
550 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
551         uint32_t addr, uint32_t control)
552 {
553         struct cortex_a_common *a = dpm_to_a(dpm);
554         uint32_t vr = a->armv7a_common.debug_base;
555         uint32_t cr = a->armv7a_common.debug_base;
556         int retval;
557
558         switch (index_t) {
559                 case 0 ... 15:  /* breakpoints */
560                         vr += CPUDBG_BVR_BASE;
561                         cr += CPUDBG_BCR_BASE;
562                         break;
563                 case 16 ... 31: /* watchpoints */
564                         vr += CPUDBG_WVR_BASE;
565                         cr += CPUDBG_WCR_BASE;
566                         index_t -= 16;
567                         break;
568                 default:
569                         return ERROR_FAIL;
570         }
571         vr += 4 * index_t;
572         cr += 4 * index_t;
573
574         LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
575                 (unsigned) vr, (unsigned) cr);
576
577         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
578                         vr, addr);
579         if (retval != ERROR_OK)
580                 return retval;
581         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
582                         cr, control);
583         return retval;
584 }
585
586 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
587 {
588         struct cortex_a_common *a = dpm_to_a(dpm);
589         uint32_t cr;
590
591         switch (index_t) {
592                 case 0 ... 15:
593                         cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
594                         break;
595                 case 16 ... 31:
596                         cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
597                         index_t -= 16;
598                         break;
599                 default:
600                         return ERROR_FAIL;
601         }
602         cr += 4 * index_t;
603
604         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
605
606         /* clear control register */
607         return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
608 }
609
610 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
611 {
612         struct arm_dpm *dpm = &a->armv7a_common.dpm;
613         int retval;
614
615         dpm->arm = &a->armv7a_common.arm;
616         dpm->didr = didr;
617
618         dpm->prepare = cortex_a_dpm_prepare;
619         dpm->finish = cortex_a_dpm_finish;
620
621         dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
622         dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
623         dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
624
625         dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
626         dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
627
628         dpm->bpwp_enable = cortex_a_bpwp_enable;
629         dpm->bpwp_disable = cortex_a_bpwp_disable;
630
631         retval = arm_dpm_setup(dpm);
632         if (retval == ERROR_OK)
633                 retval = arm_dpm_initialize(dpm);
634
635         return retval;
636 }
637 static struct target *get_cortex_a(struct target *target, int32_t coreid)
638 {
639         struct target_list *head;
640         struct target *curr;
641
642         head = target->head;
643         while (head != (struct target_list *)NULL) {
644                 curr = head->target;
645                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
646                         return curr;
647                 head = head->next;
648         }
649         return target;
650 }
651 static int cortex_a_halt(struct target *target);
652
653 static int cortex_a_halt_smp(struct target *target)
654 {
655         int retval = 0;
656         struct target_list *head;
657         struct target *curr;
658         head = target->head;
659         while (head != (struct target_list *)NULL) {
660                 curr = head->target;
661                 if ((curr != target) && (curr->state != TARGET_HALTED)
662                         && target_was_examined(curr))
663                         retval += cortex_a_halt(curr);
664                 head = head->next;
665         }
666         return retval;
667 }
668
669 static int update_halt_gdb(struct target *target)
670 {
671         struct target *gdb_target = NULL;
672         struct target_list *head;
673         struct target *curr;
674         int retval = 0;
675
676         if (target->gdb_service && target->gdb_service->core[0] == -1) {
677                 target->gdb_service->target = target;
678                 target->gdb_service->core[0] = target->coreid;
679                 retval += cortex_a_halt_smp(target);
680         }
681
682         if (target->gdb_service)
683                 gdb_target = target->gdb_service->target;
684
685         foreach_smp_target(head, target->head) {
686                 curr = head->target;
687                 /* skip calling context */
688                 if (curr == target)
689                         continue;
690                 if (!target_was_examined(curr))
691                         continue;
692                 /* skip targets that were already halted */
693                 if (curr->state == TARGET_HALTED)
694                         continue;
695                 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
696                 if (curr == gdb_target)
697                         continue;
698
699                 /* avoid recursion in cortex_a_poll() */
700                 curr->smp = 0;
701                 cortex_a_poll(curr);
702                 curr->smp = 1;
703         }
704
705         /* after all targets were updated, poll the gdb serving target */
706         if (gdb_target != NULL && gdb_target != target)
707                 cortex_a_poll(gdb_target);
708         return retval;
709 }
710
711 /*
712  * Cortex-A Run control
713  */
714
715 static int cortex_a_poll(struct target *target)
716 {
717         int retval = ERROR_OK;
718         uint32_t dscr;
719         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
720         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
721         enum target_state prev_target_state = target->state;
722         /*  toggle to another core is done by gdb as follow */
723         /*  maint packet J core_id */
724         /*  continue */
725         /*  the next polling trigger an halt event sent to gdb */
726         if ((target->state == TARGET_HALTED) && (target->smp) &&
727                 (target->gdb_service) &&
728                 (target->gdb_service->target == NULL)) {
729                 target->gdb_service->target =
730                         get_cortex_a(target, target->gdb_service->core[1]);
731                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
732                 return retval;
733         }
734         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
735                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
736         if (retval != ERROR_OK)
737                 return retval;
738         cortex_a->cpudbg_dscr = dscr;
739
740         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
741                 if (prev_target_state != TARGET_HALTED) {
742                         /* We have a halting debug event */
743                         LOG_DEBUG("Target halted");
744                         target->state = TARGET_HALTED;
745
746                         retval = cortex_a_debug_entry(target);
747                         if (retval != ERROR_OK)
748                                 return retval;
749
750                         if (target->smp) {
751                                 retval = update_halt_gdb(target);
752                                 if (retval != ERROR_OK)
753                                         return retval;
754                         }
755
756                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
757                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
758                         } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
759                                 if (arm_semihosting(target, &retval) != 0)
760                                         return retval;
761
762                                 target_call_event_callbacks(target,
763                                         TARGET_EVENT_HALTED);
764                         }
765                 }
766         } else
767                 target->state = TARGET_RUNNING;
768
769         return retval;
770 }
771
772 static int cortex_a_halt(struct target *target)
773 {
774         int retval;
775         uint32_t dscr;
776         struct armv7a_common *armv7a = target_to_armv7a(target);
777
778         /*
779          * Tell the core to be halted by writing DRCR with 0x1
780          * and then wait for the core to be halted.
781          */
782         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
783                         armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
784         if (retval != ERROR_OK)
785                 return retval;
786
787         dscr = 0; /* force read of dscr */
788         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
789                         DSCR_CORE_HALTED, &dscr);
790         if (retval != ERROR_OK) {
791                 LOG_ERROR("Error waiting for halt");
792                 return retval;
793         }
794
795         target->debug_reason = DBG_REASON_DBGRQ;
796
797         return ERROR_OK;
798 }
799
800 static int cortex_a_internal_restore(struct target *target, int current,
801         target_addr_t *address, int handle_breakpoints, int debug_execution)
802 {
803         struct armv7a_common *armv7a = target_to_armv7a(target);
804         struct arm *arm = &armv7a->arm;
805         int retval;
806         uint32_t resume_pc;
807
808         if (!debug_execution)
809                 target_free_all_working_areas(target);
810
811 #if 0
812         if (debug_execution) {
813                 /* Disable interrupts */
814                 /* We disable interrupts in the PRIMASK register instead of
815                  * masking with C_MASKINTS,
816                  * This is probably the same issue as Cortex-M3 Errata 377493:
817                  * C_MASKINTS in parallel with disabled interrupts can cause
818                  * local faults to not be taken. */
819                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
820                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
821                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
822
823                 /* Make sure we are in Thumb mode */
824                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
825                         buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
826                         32) | (1 << 24));
827                 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
828                 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
829         }
830 #endif
831
832         /* current = 1: continue on current pc, otherwise continue at <address> */
833         resume_pc = buf_get_u32(arm->pc->value, 0, 32);
834         if (!current)
835                 resume_pc = *address;
836         else
837                 *address = resume_pc;
838
839         /* Make sure that the Armv7 gdb thumb fixups does not
840          * kill the return address
841          */
842         switch (arm->core_state) {
843                 case ARM_STATE_ARM:
844                         resume_pc &= 0xFFFFFFFC;
845                         break;
846                 case ARM_STATE_THUMB:
847                 case ARM_STATE_THUMB_EE:
848                         /* When the return address is loaded into PC
849                          * bit 0 must be 1 to stay in Thumb state
850                          */
851                         resume_pc |= 0x1;
852                         break;
853                 case ARM_STATE_JAZELLE:
854                         LOG_ERROR("How do I resume into Jazelle state??");
855                         return ERROR_FAIL;
856                 case ARM_STATE_AARCH64:
857                         LOG_ERROR("Shouldn't be in AARCH64 state");
858                         return ERROR_FAIL;
859         }
860         LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
861         buf_set_u32(arm->pc->value, 0, 32, resume_pc);
862         arm->pc->dirty = true;
863         arm->pc->valid = true;
864
865         /* restore dpm_mode at system halt */
866         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
867         /* called it now before restoring context because it uses cpu
868          * register r0 for restoring cp15 control register */
869         retval = cortex_a_restore_cp15_control_reg(target);
870         if (retval != ERROR_OK)
871                 return retval;
872         retval = cortex_a_restore_context(target, handle_breakpoints);
873         if (retval != ERROR_OK)
874                 return retval;
875         target->debug_reason = DBG_REASON_NOTHALTED;
876         target->state = TARGET_RUNNING;
877
878         /* registers are now invalid */
879         register_cache_invalidate(arm->core_cache);
880
881 #if 0
882         /* the front-end may request us not to handle breakpoints */
883         if (handle_breakpoints) {
884                 /* Single step past breakpoint at current address */
885                 breakpoint = breakpoint_find(target, resume_pc);
886                 if (breakpoint) {
887                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
888                         cortex_m3_unset_breakpoint(target, breakpoint);
889                         cortex_m3_single_step_core(target);
890                         cortex_m3_set_breakpoint(target, breakpoint);
891                 }
892         }
893
894 #endif
895         return retval;
896 }
897
898 static int cortex_a_internal_restart(struct target *target)
899 {
900         struct armv7a_common *armv7a = target_to_armv7a(target);
901         struct arm *arm = &armv7a->arm;
902         int retval;
903         uint32_t dscr;
904         /*
905          * * Restart core and wait for it to be started.  Clear ITRen and sticky
906          * * exception flags: see ARMv7 ARM, C5.9.
907          *
908          * REVISIT: for single stepping, we probably want to
909          * disable IRQs by default, with optional override...
910          */
911
912         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
913                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
914         if (retval != ERROR_OK)
915                 return retval;
916
917         if ((dscr & DSCR_INSTR_COMP) == 0)
918                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
919
920         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
921                         armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
922         if (retval != ERROR_OK)
923                 return retval;
924
925         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
926                         armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
927                         DRCR_CLEAR_EXCEPTIONS);
928         if (retval != ERROR_OK)
929                 return retval;
930
931         dscr = 0; /* force read of dscr */
932         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
933                         DSCR_CORE_RESTARTED, &dscr);
934         if (retval != ERROR_OK) {
935                 LOG_ERROR("Error waiting for resume");
936                 return retval;
937         }
938
939         target->debug_reason = DBG_REASON_NOTHALTED;
940         target->state = TARGET_RUNNING;
941
942         /* registers are now invalid */
943         register_cache_invalidate(arm->core_cache);
944
945         return ERROR_OK;
946 }
947
948 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
949 {
950         int retval = 0;
951         struct target_list *head;
952         struct target *curr;
953         target_addr_t address;
954         head = target->head;
955         while (head != (struct target_list *)NULL) {
956                 curr = head->target;
957                 if ((curr != target) && (curr->state != TARGET_RUNNING)
958                         && target_was_examined(curr)) {
959                         /*  resume current address , not in step mode */
960                         retval += cortex_a_internal_restore(curr, 1, &address,
961                                         handle_breakpoints, 0);
962                         retval += cortex_a_internal_restart(curr);
963                 }
964                 head = head->next;
965
966         }
967         return retval;
968 }
969
970 static int cortex_a_resume(struct target *target, int current,
971         target_addr_t address, int handle_breakpoints, int debug_execution)
972 {
973         int retval = 0;
974         /* dummy resume for smp toggle in order to reduce gdb impact  */
975         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
976                 /*   simulate a start and halt of target */
977                 target->gdb_service->target = NULL;
978                 target->gdb_service->core[0] = target->gdb_service->core[1];
979                 /*  fake resume at next poll we play the  target core[1], see poll*/
980                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
981                 return 0;
982         }
983         cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
984         if (target->smp) {
985                 target->gdb_service->core[0] = -1;
986                 retval = cortex_a_restore_smp(target, handle_breakpoints);
987                 if (retval != ERROR_OK)
988                         return retval;
989         }
990         cortex_a_internal_restart(target);
991
992         if (!debug_execution) {
993                 target->state = TARGET_RUNNING;
994                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
995                 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
996         } else {
997                 target->state = TARGET_DEBUG_RUNNING;
998                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
999                 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1000         }
1001
1002         return ERROR_OK;
1003 }
1004
1005 static int cortex_a_debug_entry(struct target *target)
1006 {
1007         uint32_t dscr;
1008         int retval = ERROR_OK;
1009         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1010         struct armv7a_common *armv7a = target_to_armv7a(target);
1011         struct arm *arm = &armv7a->arm;
1012
1013         LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1014
1015         /* REVISIT surely we should not re-read DSCR !! */
1016         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1017                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1018         if (retval != ERROR_OK)
1019                 return retval;
1020
1021         /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1022          * imprecise data aborts get discarded by issuing a Data
1023          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1024          */
1025
1026         /* Enable the ITR execution once we are in debug mode */
1027         dscr |= DSCR_ITR_EN;
1028         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1029                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1030         if (retval != ERROR_OK)
1031                 return retval;
1032
1033         /* Examine debug reason */
1034         arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1035
1036         /* save address of instruction that triggered the watchpoint? */
1037         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1038                 uint32_t wfar;
1039
1040                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1041                                 armv7a->debug_base + CPUDBG_WFAR,
1042                                 &wfar);
1043                 if (retval != ERROR_OK)
1044                         return retval;
1045                 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1046         }
1047
1048         /* First load register accessible through core debug port */
1049         retval = arm_dpm_read_current_registers(&armv7a->dpm);
1050         if (retval != ERROR_OK)
1051                 return retval;
1052
1053         if (arm->spsr) {
1054                 /* read SPSR */
1055                 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1056                 if (retval != ERROR_OK)
1057                         return retval;
1058         }
1059
1060 #if 0
1061 /* TODO, Move this */
1062         uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1063         cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1064         LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1065
1066         cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1067         LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1068
1069         cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1070         LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1071 #endif
1072
1073         /* Are we in an exception handler */
1074 /*      armv4_5->exception_number = 0; */
1075         if (armv7a->post_debug_entry) {
1076                 retval = armv7a->post_debug_entry(target);
1077                 if (retval != ERROR_OK)
1078                         return retval;
1079         }
1080
1081         return retval;
1082 }
1083
1084 static int cortex_a_post_debug_entry(struct target *target)
1085 {
1086         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1087         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1088         int retval;
1089
1090         /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1091         retval = armv7a->arm.mrc(target, 15,
1092                         0, 0,   /* op1, op2 */
1093                         1, 0,   /* CRn, CRm */
1094                         &cortex_a->cp15_control_reg);
1095         if (retval != ERROR_OK)
1096                 return retval;
1097         LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1098         cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1099
1100         if (!armv7a->is_armv7r)
1101                 armv7a_read_ttbcr(target);
1102
1103         if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1104                 armv7a_identify_cache(target);
1105
1106         if (armv7a->is_armv7r) {
1107                 armv7a->armv7a_mmu.mmu_enabled = 0;
1108         } else {
1109                 armv7a->armv7a_mmu.mmu_enabled =
1110                         (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1111         }
1112         armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1113                 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1114         armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1115                 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1116         cortex_a->curr_mode = armv7a->arm.core_mode;
1117
1118         /* switch to SVC mode to read DACR */
1119         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1120         armv7a->arm.mrc(target, 15,
1121                         0, 0, 3, 0,
1122                         &cortex_a->cp15_dacr_reg);
1123
1124         LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1125                         cortex_a->cp15_dacr_reg);
1126
1127         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1128         return ERROR_OK;
1129 }
1130
1131 static int cortex_a_set_dscr_bits(struct target *target,
1132                 unsigned long bit_mask, unsigned long value)
1133 {
1134         struct armv7a_common *armv7a = target_to_armv7a(target);
1135         uint32_t dscr;
1136
1137         /* Read DSCR */
1138         int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1139                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1140         if (ERROR_OK != retval)
1141                 return retval;
1142
1143         /* clear bitfield */
1144         dscr &= ~bit_mask;
1145         /* put new value */
1146         dscr |= value & bit_mask;
1147
1148         /* write new DSCR */
1149         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1150                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1151         return retval;
1152 }
1153
1154 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1155         int handle_breakpoints)
1156 {
1157         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1158         struct armv7a_common *armv7a = target_to_armv7a(target);
1159         struct arm *arm = &armv7a->arm;
1160         struct breakpoint *breakpoint = NULL;
1161         struct breakpoint stepbreakpoint;
1162         struct reg *r;
1163         int retval;
1164
1165         if (target->state != TARGET_HALTED) {
1166                 LOG_WARNING("target not halted");
1167                 return ERROR_TARGET_NOT_HALTED;
1168         }
1169
1170         /* current = 1: continue on current pc, otherwise continue at <address> */
1171         r = arm->pc;
1172         if (!current)
1173                 buf_set_u32(r->value, 0, 32, address);
1174         else
1175                 address = buf_get_u32(r->value, 0, 32);
1176
1177         /* The front-end may request us not to handle breakpoints.
1178          * But since Cortex-A uses breakpoint for single step,
1179          * we MUST handle breakpoints.
1180          */
1181         handle_breakpoints = 1;
1182         if (handle_breakpoints) {
1183                 breakpoint = breakpoint_find(target, address);
1184                 if (breakpoint)
1185                         cortex_a_unset_breakpoint(target, breakpoint);
1186         }
1187
1188         /* Setup single step breakpoint */
1189         stepbreakpoint.address = address;
1190         stepbreakpoint.asid = 0;
1191         stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1192                 ? 2 : 4;
1193         stepbreakpoint.type = BKPT_HARD;
1194         stepbreakpoint.set = 0;
1195
1196         /* Disable interrupts during single step if requested */
1197         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1198                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1199                 if (ERROR_OK != retval)
1200                         return retval;
1201         }
1202
1203         /* Break on IVA mismatch */
1204         cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1205
1206         target->debug_reason = DBG_REASON_SINGLESTEP;
1207
1208         retval = cortex_a_resume(target, 1, address, 0, 0);
1209         if (retval != ERROR_OK)
1210                 return retval;
1211
1212         int64_t then = timeval_ms();
1213         while (target->state != TARGET_HALTED) {
1214                 retval = cortex_a_poll(target);
1215                 if (retval != ERROR_OK)
1216                         return retval;
1217                 if (target->state == TARGET_HALTED)
1218                         break;
1219                 if (timeval_ms() > then + 1000) {
1220                         LOG_ERROR("timeout waiting for target halt");
1221                         return ERROR_FAIL;
1222                 }
1223         }
1224
1225         cortex_a_unset_breakpoint(target, &stepbreakpoint);
1226
1227         /* Re-enable interrupts if they were disabled */
1228         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1229                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1230                 if (ERROR_OK != retval)
1231                         return retval;
1232         }
1233
1234
1235         target->debug_reason = DBG_REASON_BREAKPOINT;
1236
1237         if (breakpoint)
1238                 cortex_a_set_breakpoint(target, breakpoint, 0);
1239
1240         if (target->state != TARGET_HALTED)
1241                 LOG_DEBUG("target stepped");
1242
1243         return ERROR_OK;
1244 }
1245
1246 static int cortex_a_restore_context(struct target *target, bool bpwp)
1247 {
1248         struct armv7a_common *armv7a = target_to_armv7a(target);
1249
1250         LOG_DEBUG(" ");
1251
1252         if (armv7a->pre_restore_context)
1253                 armv7a->pre_restore_context(target);
1254
1255         return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1256 }
1257
1258 /*
1259  * Cortex-A Breakpoint and watchpoint functions
1260  */
1261
1262 /* Setup hardware Breakpoint Register Pair */
1263 static int cortex_a_set_breakpoint(struct target *target,
1264         struct breakpoint *breakpoint, uint8_t matchmode)
1265 {
1266         int retval;
1267         int brp_i = 0;
1268         uint32_t control;
1269         uint8_t byte_addr_select = 0x0F;
1270         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1271         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1272         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1273
1274         if (breakpoint->set) {
1275                 LOG_WARNING("breakpoint already set");
1276                 return ERROR_OK;
1277         }
1278
1279         if (breakpoint->type == BKPT_HARD) {
1280                 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1281                         brp_i++;
1282                 if (brp_i >= cortex_a->brp_num) {
1283                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1284                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1285                 }
1286                 breakpoint->set = brp_i + 1;
1287                 if (breakpoint->length == 2)
1288                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1289                 control = ((matchmode & 0x7) << 20)
1290                         | (byte_addr_select << 5)
1291                         | (3 << 1) | 1;
1292                 brp_list[brp_i].used = 1;
1293                 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1294                 brp_list[brp_i].control = control;
1295                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1296                                 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1297                                 brp_list[brp_i].value);
1298                 if (retval != ERROR_OK)
1299                         return retval;
1300                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1301                                 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1302                                 brp_list[brp_i].control);
1303                 if (retval != ERROR_OK)
1304                         return retval;
1305                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1306                         brp_list[brp_i].control,
1307                         brp_list[brp_i].value);
1308         } else if (breakpoint->type == BKPT_SOFT) {
1309                 uint8_t code[4];
1310                 /* length == 2: Thumb breakpoint */
1311                 if (breakpoint->length == 2)
1312                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1313                 else
1314                 /* length == 3: Thumb-2 breakpoint, actual encoding is
1315                  * a regular Thumb BKPT instruction but we replace a
1316                  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1317                  * length
1318                  */
1319                 if (breakpoint->length == 3) {
1320                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1321                         breakpoint->length = 4;
1322                 } else
1323                         /* length == 4, normal ARM breakpoint */
1324                         buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1325
1326                 retval = target_read_memory(target,
1327                                 breakpoint->address & 0xFFFFFFFE,
1328                                 breakpoint->length, 1,
1329                                 breakpoint->orig_instr);
1330                 if (retval != ERROR_OK)
1331                         return retval;
1332
1333                 /* make sure data cache is cleaned & invalidated down to PoC */
1334                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1335                         armv7a_cache_flush_virt(target, breakpoint->address,
1336                                                 breakpoint->length);
1337                 }
1338
1339                 retval = target_write_memory(target,
1340                                 breakpoint->address & 0xFFFFFFFE,
1341                                 breakpoint->length, 1, code);
1342                 if (retval != ERROR_OK)
1343                         return retval;
1344
1345                 /* update i-cache at breakpoint location */
1346                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1347                                         breakpoint->length);
1348                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1349                                                  breakpoint->length);
1350
1351                 breakpoint->set = 0x11; /* Any nice value but 0 */
1352         }
1353
1354         return ERROR_OK;
1355 }
1356
1357 static int cortex_a_set_context_breakpoint(struct target *target,
1358         struct breakpoint *breakpoint, uint8_t matchmode)
1359 {
1360         int retval = ERROR_FAIL;
1361         int brp_i = 0;
1362         uint32_t control;
1363         uint8_t byte_addr_select = 0x0F;
1364         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1365         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1366         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1367
1368         if (breakpoint->set) {
1369                 LOG_WARNING("breakpoint already set");
1370                 return retval;
1371         }
1372         /*check available context BRPs*/
1373         while ((brp_list[brp_i].used ||
1374                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1375                 brp_i++;
1376
1377         if (brp_i >= cortex_a->brp_num) {
1378                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1379                 return ERROR_FAIL;
1380         }
1381
1382         breakpoint->set = brp_i + 1;
1383         control = ((matchmode & 0x7) << 20)
1384                 | (byte_addr_select << 5)
1385                 | (3 << 1) | 1;
1386         brp_list[brp_i].used = 1;
1387         brp_list[brp_i].value = (breakpoint->asid);
1388         brp_list[brp_i].control = control;
1389         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1390                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1391                         brp_list[brp_i].value);
1392         if (retval != ERROR_OK)
1393                 return retval;
1394         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1395                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1396                         brp_list[brp_i].control);
1397         if (retval != ERROR_OK)
1398                 return retval;
1399         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1400                 brp_list[brp_i].control,
1401                 brp_list[brp_i].value);
1402         return ERROR_OK;
1403
1404 }
1405
1406 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1407 {
1408         int retval = ERROR_FAIL;
1409         int brp_1 = 0;  /* holds the contextID pair */
1410         int brp_2 = 0;  /* holds the IVA pair */
1411         uint32_t control_CTX, control_IVA;
1412         uint8_t CTX_byte_addr_select = 0x0F;
1413         uint8_t IVA_byte_addr_select = 0x0F;
1414         uint8_t CTX_machmode = 0x03;
1415         uint8_t IVA_machmode = 0x01;
1416         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1417         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1418         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1419
1420         if (breakpoint->set) {
1421                 LOG_WARNING("breakpoint already set");
1422                 return retval;
1423         }
1424         /*check available context BRPs*/
1425         while ((brp_list[brp_1].used ||
1426                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1427                 brp_1++;
1428
1429         printf("brp(CTX) found num: %d\n", brp_1);
1430         if (brp_1 >= cortex_a->brp_num) {
1431                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1432                 return ERROR_FAIL;
1433         }
1434
1435         while ((brp_list[brp_2].used ||
1436                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1437                 brp_2++;
1438
1439         printf("brp(IVA) found num: %d\n", brp_2);
1440         if (brp_2 >= cortex_a->brp_num) {
1441                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1442                 return ERROR_FAIL;
1443         }
1444
1445         breakpoint->set = brp_1 + 1;
1446         breakpoint->linked_BRP = brp_2;
1447         control_CTX = ((CTX_machmode & 0x7) << 20)
1448                 | (brp_2 << 16)
1449                 | (0 << 14)
1450                 | (CTX_byte_addr_select << 5)
1451                 | (3 << 1) | 1;
1452         brp_list[brp_1].used = 1;
1453         brp_list[brp_1].value = (breakpoint->asid);
1454         brp_list[brp_1].control = control_CTX;
1455         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1456                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1457                         brp_list[brp_1].value);
1458         if (retval != ERROR_OK)
1459                 return retval;
1460         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1461                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1462                         brp_list[brp_1].control);
1463         if (retval != ERROR_OK)
1464                 return retval;
1465
1466         control_IVA = ((IVA_machmode & 0x7) << 20)
1467                 | (brp_1 << 16)
1468                 | (IVA_byte_addr_select << 5)
1469                 | (3 << 1) | 1;
1470         brp_list[brp_2].used = 1;
1471         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1472         brp_list[brp_2].control = control_IVA;
1473         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1474                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1475                         brp_list[brp_2].value);
1476         if (retval != ERROR_OK)
1477                 return retval;
1478         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1479                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1480                         brp_list[brp_2].control);
1481         if (retval != ERROR_OK)
1482                 return retval;
1483
1484         return ERROR_OK;
1485 }
1486
1487 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1488 {
1489         int retval;
1490         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1491         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1492         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1493
1494         if (!breakpoint->set) {
1495                 LOG_WARNING("breakpoint not set");
1496                 return ERROR_OK;
1497         }
1498
1499         if (breakpoint->type == BKPT_HARD) {
1500                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1501                         int brp_i = breakpoint->set - 1;
1502                         int brp_j = breakpoint->linked_BRP;
1503                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1504                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1505                                 return ERROR_OK;
1506                         }
1507                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1508                                 brp_list[brp_i].control, brp_list[brp_i].value);
1509                         brp_list[brp_i].used = 0;
1510                         brp_list[brp_i].value = 0;
1511                         brp_list[brp_i].control = 0;
1512                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1513                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1514                                         brp_list[brp_i].control);
1515                         if (retval != ERROR_OK)
1516                                 return retval;
1517                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1518                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1519                                         brp_list[brp_i].value);
1520                         if (retval != ERROR_OK)
1521                                 return retval;
1522                         if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1523                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1524                                 return ERROR_OK;
1525                         }
1526                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1527                                 brp_list[brp_j].control, brp_list[brp_j].value);
1528                         brp_list[brp_j].used = 0;
1529                         brp_list[brp_j].value = 0;
1530                         brp_list[brp_j].control = 0;
1531                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1532                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1533                                         brp_list[brp_j].control);
1534                         if (retval != ERROR_OK)
1535                                 return retval;
1536                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1537                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1538                                         brp_list[brp_j].value);
1539                         if (retval != ERROR_OK)
1540                                 return retval;
1541                         breakpoint->linked_BRP = 0;
1542                         breakpoint->set = 0;
1543                         return ERROR_OK;
1544
1545                 } else {
1546                         int brp_i = breakpoint->set - 1;
1547                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1548                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1549                                 return ERROR_OK;
1550                         }
1551                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1552                                 brp_list[brp_i].control, brp_list[brp_i].value);
1553                         brp_list[brp_i].used = 0;
1554                         brp_list[brp_i].value = 0;
1555                         brp_list[brp_i].control = 0;
1556                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1557                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1558                                         brp_list[brp_i].control);
1559                         if (retval != ERROR_OK)
1560                                 return retval;
1561                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1562                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1563                                         brp_list[brp_i].value);
1564                         if (retval != ERROR_OK)
1565                                 return retval;
1566                         breakpoint->set = 0;
1567                         return ERROR_OK;
1568                 }
1569         } else {
1570
1571                 /* make sure data cache is cleaned & invalidated down to PoC */
1572                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1573                         armv7a_cache_flush_virt(target, breakpoint->address,
1574                                                 breakpoint->length);
1575                 }
1576
1577                 /* restore original instruction (kept in target endianness) */
1578                 if (breakpoint->length == 4) {
1579                         retval = target_write_memory(target,
1580                                         breakpoint->address & 0xFFFFFFFE,
1581                                         4, 1, breakpoint->orig_instr);
1582                         if (retval != ERROR_OK)
1583                                 return retval;
1584                 } else {
1585                         retval = target_write_memory(target,
1586                                         breakpoint->address & 0xFFFFFFFE,
1587                                         2, 1, breakpoint->orig_instr);
1588                         if (retval != ERROR_OK)
1589                                 return retval;
1590                 }
1591
1592                 /* update i-cache at breakpoint location */
1593                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1594                                                  breakpoint->length);
1595                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1596                                                  breakpoint->length);
1597         }
1598         breakpoint->set = 0;
1599
1600         return ERROR_OK;
1601 }
1602
1603 static int cortex_a_add_breakpoint(struct target *target,
1604         struct breakpoint *breakpoint)
1605 {
1606         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1607
1608         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1609                 LOG_INFO("no hardware breakpoint available");
1610                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1611         }
1612
1613         if (breakpoint->type == BKPT_HARD)
1614                 cortex_a->brp_num_available--;
1615
1616         return cortex_a_set_breakpoint(target, breakpoint, 0x00);       /* Exact match */
1617 }
1618
1619 static int cortex_a_add_context_breakpoint(struct target *target,
1620         struct breakpoint *breakpoint)
1621 {
1622         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1623
1624         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1625                 LOG_INFO("no hardware breakpoint available");
1626                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1627         }
1628
1629         if (breakpoint->type == BKPT_HARD)
1630                 cortex_a->brp_num_available--;
1631
1632         return cortex_a_set_context_breakpoint(target, breakpoint, 0x02);       /* asid match */
1633 }
1634
1635 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1636         struct breakpoint *breakpoint)
1637 {
1638         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1639
1640         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1641                 LOG_INFO("no hardware breakpoint available");
1642                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1643         }
1644
1645         if (breakpoint->type == BKPT_HARD)
1646                 cortex_a->brp_num_available--;
1647
1648         return cortex_a_set_hybrid_breakpoint(target, breakpoint);      /* ??? */
1649 }
1650
1651
1652 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1653 {
1654         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1655
1656 #if 0
1657 /* It is perfectly possible to remove breakpoints while the target is running */
1658         if (target->state != TARGET_HALTED) {
1659                 LOG_WARNING("target not halted");
1660                 return ERROR_TARGET_NOT_HALTED;
1661         }
1662 #endif
1663
1664         if (breakpoint->set) {
1665                 cortex_a_unset_breakpoint(target, breakpoint);
1666                 if (breakpoint->type == BKPT_HARD)
1667                         cortex_a->brp_num_available++;
1668         }
1669
1670
1671         return ERROR_OK;
1672 }
1673
1674 /**
1675  * Sets a watchpoint for an Cortex-A target in one of the watchpoint units.  It is
1676  * considered a bug to call this function when there are no available watchpoint
1677  * units.
1678  *
1679  * @param target Pointer to an Cortex-A target to set a watchpoint on
1680  * @param watchpoint Pointer to the watchpoint to be set
1681  * @return Error status if watchpoint set fails or the result of executing the
1682  * JTAG queue
1683  */
1684 static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1685 {
1686         int retval = ERROR_OK;
1687         int wrp_i = 0;
1688         uint32_t control;
1689         uint8_t address_mask = ilog2(watchpoint->length);
1690         uint8_t byte_address_select = 0xFF;
1691         uint8_t load_store_access_control = 0x3;
1692         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1693         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1694         struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1695
1696         if (watchpoint->set) {
1697                 LOG_WARNING("watchpoint already set");
1698                 return retval;
1699         }
1700
1701         /* check available context WRPs */
1702         while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1703                 wrp_i++;
1704
1705         if (wrp_i >= cortex_a->wrp_num) {
1706                 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1707                 return ERROR_FAIL;
1708         }
1709
1710         if (address_mask == 0x1 || address_mask == 0x2) {
1711                 LOG_WARNING("length must be a power of 2 and different than 2 and 4");
1712                 return ERROR_FAIL;
1713         }
1714
1715         watchpoint->set = wrp_i + 1;
1716         control = (address_mask << 24) |
1717                 (byte_address_select << 5) |
1718                 (load_store_access_control << 3) |
1719                 (0x3 << 1) | 1;
1720         wrp_list[wrp_i].used = 1;
1721         wrp_list[wrp_i].value = (watchpoint->address & 0xFFFFFFFC);
1722         wrp_list[wrp_i].control = control;
1723
1724         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1725                         + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].WRPn,
1726                         wrp_list[wrp_i].value);
1727         if (retval != ERROR_OK)
1728                 return retval;
1729
1730         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1731                         + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].WRPn,
1732                         wrp_list[wrp_i].control);
1733         if (retval != ERROR_OK)
1734                 return retval;
1735
1736         LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1737                         wrp_list[wrp_i].control,
1738                         wrp_list[wrp_i].value);
1739
1740         return ERROR_OK;
1741 }
1742
1743 /**
1744  * Unset an existing watchpoint and clear the used watchpoint unit.
1745  *
1746  * @param target Pointer to the target to have the watchpoint removed
1747  * @param watchpoint Pointer to the watchpoint to be removed
1748  * @return Error status while trying to unset the watchpoint or the result of
1749  *         executing the JTAG queue
1750  */
1751 static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1752 {
1753         int retval;
1754         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1755         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1756         struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1757
1758         if (!watchpoint->set) {
1759                 LOG_WARNING("watchpoint not set");
1760                 return ERROR_OK;
1761         }
1762
1763         int wrp_i = watchpoint->set - 1;
1764         if (wrp_i < 0 || wrp_i >= cortex_a->wrp_num) {
1765                 LOG_DEBUG("Invalid WRP number in watchpoint");
1766                 return ERROR_OK;
1767         }
1768         LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1769                         wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1770         wrp_list[wrp_i].used = 0;
1771         wrp_list[wrp_i].value = 0;
1772         wrp_list[wrp_i].control = 0;
1773         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1774                         + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].WRPn,
1775                         wrp_list[wrp_i].control);
1776         if (retval != ERROR_OK)
1777                 return retval;
1778         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1779                         + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].WRPn,
1780                         wrp_list[wrp_i].value);
1781         if (retval != ERROR_OK)
1782                 return retval;
1783         watchpoint->set = 0;
1784
1785         return ERROR_OK;
1786 }
1787
1788 /**
1789  * Add a watchpoint to an Cortex-A target.  If there are no watchpoint units
1790  * available, an error response is returned.
1791  *
1792  * @param target Pointer to the Cortex-A target to add a watchpoint to
1793  * @param watchpoint Pointer to the watchpoint to be added
1794  * @return Error status while trying to add the watchpoint
1795  */
1796 static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1797 {
1798         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1799
1800         if (cortex_a->wrp_num_available < 1) {
1801                 LOG_INFO("no hardware watchpoint available");
1802                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1803         }
1804
1805         cortex_a->wrp_num_available--;
1806         return cortex_a_set_watchpoint(target, watchpoint);
1807 }
1808
1809 /**
1810  * Remove a watchpoint from an Cortex-A target.  The watchpoint will be unset and
1811  * the used watchpoint unit will be reopened.
1812  *
1813  * @param target Pointer to the target to remove a watchpoint from
1814  * @param watchpoint Pointer to the watchpoint to be removed
1815  * @return Result of trying to unset the watchpoint
1816  */
1817 static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1818 {
1819         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1820
1821         if (watchpoint->set) {
1822                 cortex_a->wrp_num_available++;
1823                 cortex_a_unset_watchpoint(target, watchpoint);
1824         }
1825         return ERROR_OK;
1826 }
1827
1828
1829 /*
1830  * Cortex-A Reset functions
1831  */
1832
1833 static int cortex_a_assert_reset(struct target *target)
1834 {
1835         struct armv7a_common *armv7a = target_to_armv7a(target);
1836
1837         LOG_DEBUG(" ");
1838
1839         /* FIXME when halt is requested, make it work somehow... */
1840
1841         /* This function can be called in "target not examined" state */
1842
1843         /* Issue some kind of warm reset. */
1844         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1845                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1846         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1847                 /* REVISIT handle "pulls" cases, if there's
1848                  * hardware that needs them to work.
1849                  */
1850
1851                 /*
1852                  * FIXME: fix reset when transport is not JTAG. This is a temporary
1853                  * work-around for release v0.10 that is not intended to stay!
1854                  */
1855                 if (!transport_is_jtag() ||
1856                                 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1857                         adapter_assert_reset();
1858
1859         } else {
1860                 LOG_ERROR("%s: how to reset?", target_name(target));
1861                 return ERROR_FAIL;
1862         }
1863
1864         /* registers are now invalid */
1865         if (target_was_examined(target))
1866                 register_cache_invalidate(armv7a->arm.core_cache);
1867
1868         target->state = TARGET_RESET;
1869
1870         return ERROR_OK;
1871 }
1872
1873 static int cortex_a_deassert_reset(struct target *target)
1874 {
1875         struct armv7a_common *armv7a = target_to_armv7a(target);
1876         int retval;
1877
1878         LOG_DEBUG(" ");
1879
1880         /* be certain SRST is off */
1881         adapter_deassert_reset();
1882
1883         if (target_was_examined(target)) {
1884                 retval = cortex_a_poll(target);
1885                 if (retval != ERROR_OK)
1886                         return retval;
1887         }
1888
1889         if (target->reset_halt) {
1890                 if (target->state != TARGET_HALTED) {
1891                         LOG_WARNING("%s: ran after reset and before halt ...",
1892                                 target_name(target));
1893                         if (target_was_examined(target)) {
1894                                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1895                                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1896                                 if (retval != ERROR_OK)
1897                                         return retval;
1898                         } else
1899                                 target->state = TARGET_UNKNOWN;
1900                 }
1901         }
1902
1903         return ERROR_OK;
1904 }
1905
1906 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1907 {
1908         /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1909          * New desired mode must be in mode. Current value of DSCR must be in
1910          * *dscr, which is updated with new value.
1911          *
1912          * This function elides actually sending the mode-change over the debug
1913          * interface if the mode is already set as desired.
1914          */
1915         uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1916         if (new_dscr != *dscr) {
1917                 struct armv7a_common *armv7a = target_to_armv7a(target);
1918                 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1919                                 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1920                 if (retval == ERROR_OK)
1921                         *dscr = new_dscr;
1922                 return retval;
1923         } else {
1924                 return ERROR_OK;
1925         }
1926 }
1927
1928 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1929         uint32_t value, uint32_t *dscr)
1930 {
1931         /* Waits until the specified bit(s) of DSCR take on a specified value. */
1932         struct armv7a_common *armv7a = target_to_armv7a(target);
1933         int64_t then;
1934         int retval;
1935
1936         if ((*dscr & mask) == value)
1937                 return ERROR_OK;
1938
1939         then = timeval_ms();
1940         while (1) {
1941                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1942                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
1943                 if (retval != ERROR_OK) {
1944                         LOG_ERROR("Could not read DSCR register");
1945                         return retval;
1946                 }
1947                 if ((*dscr & mask) == value)
1948                         break;
1949                 if (timeval_ms() > then + 1000) {
1950                         LOG_ERROR("timeout waiting for DSCR bit change");
1951                         return ERROR_FAIL;
1952                 }
1953         }
1954         return ERROR_OK;
1955 }
1956
1957 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1958         uint32_t *data, uint32_t *dscr)
1959 {
1960         int retval;
1961         struct armv7a_common *armv7a = target_to_armv7a(target);
1962
1963         /* Move from coprocessor to R0. */
1964         retval = cortex_a_exec_opcode(target, opcode, dscr);
1965         if (retval != ERROR_OK)
1966                 return retval;
1967
1968         /* Move from R0 to DTRTX. */
1969         retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1970         if (retval != ERROR_OK)
1971                 return retval;
1972
1973         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1974          * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1975          * must also check TXfull_l). Most of the time this will be free
1976          * because TXfull_l will be set immediately and cached in dscr. */
1977         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1978                         DSCR_DTRTX_FULL_LATCHED, dscr);
1979         if (retval != ERROR_OK)
1980                 return retval;
1981
1982         /* Read the value transferred to DTRTX. */
1983         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1984                         armv7a->debug_base + CPUDBG_DTRTX, data);
1985         if (retval != ERROR_OK)
1986                 return retval;
1987
1988         return ERROR_OK;
1989 }
1990
1991 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1992         uint32_t *dfsr, uint32_t *dscr)
1993 {
1994         int retval;
1995
1996         if (dfar) {
1997                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1998                 if (retval != ERROR_OK)
1999                         return retval;
2000         }
2001
2002         if (dfsr) {
2003                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2004                 if (retval != ERROR_OK)
2005                         return retval;
2006         }
2007
2008         return ERROR_OK;
2009 }
2010
2011 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2012         uint32_t data, uint32_t *dscr)
2013 {
2014         int retval;
2015         struct armv7a_common *armv7a = target_to_armv7a(target);
2016
2017         /* Write the value into DTRRX. */
2018         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2019                         armv7a->debug_base + CPUDBG_DTRRX, data);
2020         if (retval != ERROR_OK)
2021                 return retval;
2022
2023         /* Move from DTRRX to R0. */
2024         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2025         if (retval != ERROR_OK)
2026                 return retval;
2027
2028         /* Move from R0 to coprocessor. */
2029         retval = cortex_a_exec_opcode(target, opcode, dscr);
2030         if (retval != ERROR_OK)
2031                 return retval;
2032
2033         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2034          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2035          * check RXfull_l). Most of the time this will be free because RXfull_l
2036          * will be cleared immediately and cached in dscr. */
2037         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2038         if (retval != ERROR_OK)
2039                 return retval;
2040
2041         return ERROR_OK;
2042 }
2043
2044 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2045         uint32_t dfsr, uint32_t *dscr)
2046 {
2047         int retval;
2048
2049         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2050         if (retval != ERROR_OK)
2051                 return retval;
2052
2053         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2054         if (retval != ERROR_OK)
2055                 return retval;
2056
2057         return ERROR_OK;
2058 }
2059
2060 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2061 {
2062         uint32_t status, upper4;
2063
2064         if (dfsr & (1 << 9)) {
2065                 /* LPAE format. */
2066                 status = dfsr & 0x3f;
2067                 upper4 = status >> 2;
2068                 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2069                         return ERROR_TARGET_TRANSLATION_FAULT;
2070                 else if (status == 33)
2071                         return ERROR_TARGET_UNALIGNED_ACCESS;
2072                 else
2073                         return ERROR_TARGET_DATA_ABORT;
2074         } else {
2075                 /* Normal format. */
2076                 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2077                 if (status == 1)
2078                         return ERROR_TARGET_UNALIGNED_ACCESS;
2079                 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2080                                 status == 9 || status == 11 || status == 13 || status == 15)
2081                         return ERROR_TARGET_TRANSLATION_FAULT;
2082                 else
2083                         return ERROR_TARGET_DATA_ABORT;
2084         }
2085 }
2086
2087 static int cortex_a_write_cpu_memory_slow(struct target *target,
2088         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2089 {
2090         /* Writes count objects of size size from *buffer. Old value of DSCR must
2091          * be in *dscr; updated to new value. This is slow because it works for
2092          * non-word-sized objects. Avoid unaligned accesses as they do not work
2093          * on memory address space without "Normal" attribute. If size == 4 and
2094          * the address is aligned, cortex_a_write_cpu_memory_fast should be
2095          * preferred.
2096          * Preconditions:
2097          * - Address is in R0.
2098          * - R0 is marked dirty.
2099          */
2100         struct armv7a_common *armv7a = target_to_armv7a(target);
2101         struct arm *arm = &armv7a->arm;
2102         int retval;
2103
2104         /* Mark register R1 as dirty, to use for transferring data. */
2105         arm_reg_current(arm, 1)->dirty = true;
2106
2107         /* Switch to non-blocking mode if not already in that mode. */
2108         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2109         if (retval != ERROR_OK)
2110                 return retval;
2111
2112         /* Go through the objects. */
2113         while (count) {
2114                 /* Write the value to store into DTRRX. */
2115                 uint32_t data, opcode;
2116                 if (size == 1)
2117                         data = *buffer;
2118                 else if (size == 2)
2119                         data = target_buffer_get_u16(target, buffer);
2120                 else
2121                         data = target_buffer_get_u32(target, buffer);
2122                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2123                                 armv7a->debug_base + CPUDBG_DTRRX, data);
2124                 if (retval != ERROR_OK)
2125                         return retval;
2126
2127                 /* Transfer the value from DTRRX to R1. */
2128                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2129                 if (retval != ERROR_OK)
2130                         return retval;
2131
2132                 /* Write the value transferred to R1 into memory. */
2133                 if (size == 1)
2134                         opcode = ARMV4_5_STRB_IP(1, 0);
2135                 else if (size == 2)
2136                         opcode = ARMV4_5_STRH_IP(1, 0);
2137                 else
2138                         opcode = ARMV4_5_STRW_IP(1, 0);
2139                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2140                 if (retval != ERROR_OK)
2141                         return retval;
2142
2143                 /* Check for faults and return early. */
2144                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2145                         return ERROR_OK; /* A data fault is not considered a system failure. */
2146
2147                 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2148                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2149                  * must also check RXfull_l). Most of the time this will be free
2150                  * because RXfull_l will be cleared immediately and cached in dscr. */
2151                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2152                 if (retval != ERROR_OK)
2153                         return retval;
2154
2155                 /* Advance. */
2156                 buffer += size;
2157                 --count;
2158         }
2159
2160         return ERROR_OK;
2161 }
2162
2163 static int cortex_a_write_cpu_memory_fast(struct target *target,
2164         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2165 {
2166         /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2167          * in *dscr; updated to new value. This is fast but only works for
2168          * word-sized objects at aligned addresses.
2169          * Preconditions:
2170          * - Address is in R0 and must be a multiple of 4.
2171          * - R0 is marked dirty.
2172          */
2173         struct armv7a_common *armv7a = target_to_armv7a(target);
2174         int retval;
2175
2176         /* Switch to fast mode if not already in that mode. */
2177         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2178         if (retval != ERROR_OK)
2179                 return retval;
2180
2181         /* Latch STC instruction. */
2182         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2183                         armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2184         if (retval != ERROR_OK)
2185                 return retval;
2186
2187         /* Transfer all the data and issue all the instructions. */
2188         return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2189                         4, count, armv7a->debug_base + CPUDBG_DTRRX);
2190 }
2191
2192 static int cortex_a_write_cpu_memory(struct target *target,
2193         uint32_t address, uint32_t size,
2194         uint32_t count, const uint8_t *buffer)
2195 {
2196         /* Write memory through the CPU. */
2197         int retval, final_retval;
2198         struct armv7a_common *armv7a = target_to_armv7a(target);
2199         struct arm *arm = &armv7a->arm;
2200         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2201
2202         LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2203                           address, size, count);
2204         if (target->state != TARGET_HALTED) {
2205                 LOG_WARNING("target not halted");
2206                 return ERROR_TARGET_NOT_HALTED;
2207         }
2208
2209         if (!count)
2210                 return ERROR_OK;
2211
2212         /* Clear any abort. */
2213         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2214                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2215         if (retval != ERROR_OK)
2216                 return retval;
2217
2218         /* Read DSCR. */
2219         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2220                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2221         if (retval != ERROR_OK)
2222                 return retval;
2223
2224         /* Switch to non-blocking mode if not already in that mode. */
2225         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2226         if (retval != ERROR_OK)
2227                 goto out;
2228
2229         /* Mark R0 as dirty. */
2230         arm_reg_current(arm, 0)->dirty = true;
2231
2232         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2233         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2234         if (retval != ERROR_OK)
2235                 goto out;
2236
2237         /* Get the memory address into R0. */
2238         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2239                         armv7a->debug_base + CPUDBG_DTRRX, address);
2240         if (retval != ERROR_OK)
2241                 goto out;
2242         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2243         if (retval != ERROR_OK)
2244                 goto out;
2245
2246         if (size == 4 && (address % 4) == 0) {
2247                 /* We are doing a word-aligned transfer, so use fast mode. */
2248                 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2249         } else {
2250                 /* Use slow path. Adjust size for aligned accesses */
2251                 switch (address % 4) {
2252                         case 1:
2253                         case 3:
2254                                 count *= size;
2255                                 size = 1;
2256                                 break;
2257                         case 2:
2258                                 if (size == 4) {
2259                                         count *= 2;
2260                                         size = 2;
2261                                 }
2262                         case 0:
2263                         default:
2264                                 break;
2265                 }
2266                 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2267         }
2268
2269 out:
2270         final_retval = retval;
2271
2272         /* Switch to non-blocking mode if not already in that mode. */
2273         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2274         if (final_retval == ERROR_OK)
2275                 final_retval = retval;
2276
2277         /* Wait for last issued instruction to complete. */
2278         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2279         if (final_retval == ERROR_OK)
2280                 final_retval = retval;
2281
2282         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2283          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2284          * check RXfull_l). Most of the time this will be free because RXfull_l
2285          * will be cleared immediately and cached in dscr. However, don't do this
2286          * if there is fault, because then the instruction might not have completed
2287          * successfully. */
2288         if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2289                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2290                 if (retval != ERROR_OK)
2291                         return retval;
2292         }
2293
2294         /* If there were any sticky abort flags, clear them. */
2295         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2296                 fault_dscr = dscr;
2297                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2298                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2299                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2300         } else {
2301                 fault_dscr = 0;
2302         }
2303
2304         /* Handle synchronous data faults. */
2305         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2306                 if (final_retval == ERROR_OK) {
2307                         /* Final return value will reflect cause of fault. */
2308                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2309                         if (retval == ERROR_OK) {
2310                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2311                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2312                         } else
2313                                 final_retval = retval;
2314                 }
2315                 /* Fault destroyed DFAR/DFSR; restore them. */
2316                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2317                 if (retval != ERROR_OK)
2318                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2319         }
2320
2321         /* Handle asynchronous data faults. */
2322         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2323                 if (final_retval == ERROR_OK)
2324                         /* No other error has been recorded so far, so keep this one. */
2325                         final_retval = ERROR_TARGET_DATA_ABORT;
2326         }
2327
2328         /* If the DCC is nonempty, clear it. */
2329         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2330                 uint32_t dummy;
2331                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2332                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2333                 if (final_retval == ERROR_OK)
2334                         final_retval = retval;
2335         }
2336         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2337                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2338                 if (final_retval == ERROR_OK)
2339                         final_retval = retval;
2340         }
2341
2342         /* Done. */
2343         return final_retval;
2344 }
2345
2346 static int cortex_a_read_cpu_memory_slow(struct target *target,
2347         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2348 {
2349         /* Reads count objects of size size into *buffer. Old value of DSCR must be
2350          * in *dscr; updated to new value. This is slow because it works for
2351          * non-word-sized objects. Avoid unaligned accesses as they do not work
2352          * on memory address space without "Normal" attribute. If size == 4 and
2353          * the address is aligned, cortex_a_read_cpu_memory_fast should be
2354          * preferred.
2355          * Preconditions:
2356          * - Address is in R0.
2357          * - R0 is marked dirty.
2358          */
2359         struct armv7a_common *armv7a = target_to_armv7a(target);
2360         struct arm *arm = &armv7a->arm;
2361         int retval;
2362
2363         /* Mark register R1 as dirty, to use for transferring data. */
2364         arm_reg_current(arm, 1)->dirty = true;
2365
2366         /* Switch to non-blocking mode if not already in that mode. */
2367         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2368         if (retval != ERROR_OK)
2369                 return retval;
2370
2371         /* Go through the objects. */
2372         while (count) {
2373                 /* Issue a load of the appropriate size to R1. */
2374                 uint32_t opcode, data;
2375                 if (size == 1)
2376                         opcode = ARMV4_5_LDRB_IP(1, 0);
2377                 else if (size == 2)
2378                         opcode = ARMV4_5_LDRH_IP(1, 0);
2379                 else
2380                         opcode = ARMV4_5_LDRW_IP(1, 0);
2381                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2382                 if (retval != ERROR_OK)
2383                         return retval;
2384
2385                 /* Issue a write of R1 to DTRTX. */
2386                 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2387                 if (retval != ERROR_OK)
2388                         return retval;
2389
2390                 /* Check for faults and return early. */
2391                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2392                         return ERROR_OK; /* A data fault is not considered a system failure. */
2393
2394                 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2395                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2396                  * must also check TXfull_l). Most of the time this will be free
2397                  * because TXfull_l will be set immediately and cached in dscr. */
2398                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2399                                 DSCR_DTRTX_FULL_LATCHED, dscr);
2400                 if (retval != ERROR_OK)
2401                         return retval;
2402
2403                 /* Read the value transferred to DTRTX into the buffer. */
2404                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2405                                 armv7a->debug_base + CPUDBG_DTRTX, &data);
2406                 if (retval != ERROR_OK)
2407                         return retval;
2408                 if (size == 1)
2409                         *buffer = (uint8_t) data;
2410                 else if (size == 2)
2411                         target_buffer_set_u16(target, buffer, (uint16_t) data);
2412                 else
2413                         target_buffer_set_u32(target, buffer, data);
2414
2415                 /* Advance. */
2416                 buffer += size;
2417                 --count;
2418         }
2419
2420         return ERROR_OK;
2421 }
2422
2423 static int cortex_a_read_cpu_memory_fast(struct target *target,
2424         uint32_t count, uint8_t *buffer, uint32_t *dscr)
2425 {
2426         /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2427          * *dscr; updated to new value. This is fast but only works for word-sized
2428          * objects at aligned addresses.
2429          * Preconditions:
2430          * - Address is in R0 and must be a multiple of 4.
2431          * - R0 is marked dirty.
2432          */
2433         struct armv7a_common *armv7a = target_to_armv7a(target);
2434         uint32_t u32;
2435         int retval;
2436
2437         /* Switch to non-blocking mode if not already in that mode. */
2438         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2439         if (retval != ERROR_OK)
2440                 return retval;
2441
2442         /* Issue the LDC instruction via a write to ITR. */
2443         retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2444         if (retval != ERROR_OK)
2445                 return retval;
2446
2447         count--;
2448
2449         if (count > 0) {
2450                 /* Switch to fast mode if not already in that mode. */
2451                 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2452                 if (retval != ERROR_OK)
2453                         return retval;
2454
2455                 /* Latch LDC instruction. */
2456                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2457                                 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2458                 if (retval != ERROR_OK)
2459                         return retval;
2460
2461                 /* Read the value transferred to DTRTX into the buffer. Due to fast
2462                  * mode rules, this blocks until the instruction finishes executing and
2463                  * then reissues the read instruction to read the next word from
2464                  * memory. The last read of DTRTX in this call reads the second-to-last
2465                  * word from memory and issues the read instruction for the last word.
2466                  */
2467                 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2468                                 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2469                 if (retval != ERROR_OK)
2470                         return retval;
2471
2472                 /* Advance. */
2473                 buffer += count * 4;
2474         }
2475
2476         /* Wait for last issued instruction to complete. */
2477         retval = cortex_a_wait_instrcmpl(target, dscr, false);
2478         if (retval != ERROR_OK)
2479                 return retval;
2480
2481         /* Switch to non-blocking mode if not already in that mode. */
2482         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2483         if (retval != ERROR_OK)
2484                 return retval;
2485
2486         /* Check for faults and return early. */
2487         if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2488                 return ERROR_OK; /* A data fault is not considered a system failure. */
2489
2490         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2491          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2492          * check TXfull_l). Most of the time this will be free because TXfull_l
2493          * will be set immediately and cached in dscr. */
2494         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2495                         DSCR_DTRTX_FULL_LATCHED, dscr);
2496         if (retval != ERROR_OK)
2497                 return retval;
2498
2499         /* Read the value transferred to DTRTX into the buffer. This is the last
2500          * word. */
2501         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2502                         armv7a->debug_base + CPUDBG_DTRTX, &u32);
2503         if (retval != ERROR_OK)
2504                 return retval;
2505         target_buffer_set_u32(target, buffer, u32);
2506
2507         return ERROR_OK;
2508 }
2509
2510 static int cortex_a_read_cpu_memory(struct target *target,
2511         uint32_t address, uint32_t size,
2512         uint32_t count, uint8_t *buffer)
2513 {
2514         /* Read memory through the CPU. */
2515         int retval, final_retval;
2516         struct armv7a_common *armv7a = target_to_armv7a(target);
2517         struct arm *arm = &armv7a->arm;
2518         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2519
2520         LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2521                           address, size, count);
2522         if (target->state != TARGET_HALTED) {
2523                 LOG_WARNING("target not halted");
2524                 return ERROR_TARGET_NOT_HALTED;
2525         }
2526
2527         if (!count)
2528                 return ERROR_OK;
2529
2530         /* Clear any abort. */
2531         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2532                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2533         if (retval != ERROR_OK)
2534                 return retval;
2535
2536         /* Read DSCR */
2537         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2538                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2539         if (retval != ERROR_OK)
2540                 return retval;
2541
2542         /* Switch to non-blocking mode if not already in that mode. */
2543         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2544         if (retval != ERROR_OK)
2545                 goto out;
2546
2547         /* Mark R0 as dirty. */
2548         arm_reg_current(arm, 0)->dirty = true;
2549
2550         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2551         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2552         if (retval != ERROR_OK)
2553                 goto out;
2554
2555         /* Get the memory address into R0. */
2556         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2557                         armv7a->debug_base + CPUDBG_DTRRX, address);
2558         if (retval != ERROR_OK)
2559                 goto out;
2560         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2561         if (retval != ERROR_OK)
2562                 goto out;
2563
2564         if (size == 4 && (address % 4) == 0) {
2565                 /* We are doing a word-aligned transfer, so use fast mode. */
2566                 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2567         } else {
2568                 /* Use slow path. Adjust size for aligned accesses */
2569                 switch (address % 4) {
2570                         case 1:
2571                         case 3:
2572                                 count *= size;
2573                                 size = 1;
2574                                 break;
2575                         case 2:
2576                                 if (size == 4) {
2577                                         count *= 2;
2578                                         size = 2;
2579                                 }
2580                                 break;
2581                         case 0:
2582                         default:
2583                                 break;
2584                 }
2585                 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2586         }
2587
2588 out:
2589         final_retval = retval;
2590
2591         /* Switch to non-blocking mode if not already in that mode. */
2592         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2593         if (final_retval == ERROR_OK)
2594                 final_retval = retval;
2595
2596         /* Wait for last issued instruction to complete. */
2597         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2598         if (final_retval == ERROR_OK)
2599                 final_retval = retval;
2600
2601         /* If there were any sticky abort flags, clear them. */
2602         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2603                 fault_dscr = dscr;
2604                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2605                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2606                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2607         } else {
2608                 fault_dscr = 0;
2609         }
2610
2611         /* Handle synchronous data faults. */
2612         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2613                 if (final_retval == ERROR_OK) {
2614                         /* Final return value will reflect cause of fault. */
2615                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2616                         if (retval == ERROR_OK) {
2617                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2618                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2619                         } else
2620                                 final_retval = retval;
2621                 }
2622                 /* Fault destroyed DFAR/DFSR; restore them. */
2623                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2624                 if (retval != ERROR_OK)
2625                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2626         }
2627
2628         /* Handle asynchronous data faults. */
2629         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2630                 if (final_retval == ERROR_OK)
2631                         /* No other error has been recorded so far, so keep this one. */
2632                         final_retval = ERROR_TARGET_DATA_ABORT;
2633         }
2634
2635         /* If the DCC is nonempty, clear it. */
2636         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2637                 uint32_t dummy;
2638                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2639                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2640                 if (final_retval == ERROR_OK)
2641                         final_retval = retval;
2642         }
2643         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2644                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2645                 if (final_retval == ERROR_OK)
2646                         final_retval = retval;
2647         }
2648
2649         /* Done. */
2650         return final_retval;
2651 }
2652
2653
2654 /*
2655  * Cortex-A Memory access
2656  *
2657  * This is same Cortex-M3 but we must also use the correct
2658  * ap number for every access.
2659  */
2660
2661 static int cortex_a_read_phys_memory(struct target *target,
2662         target_addr_t address, uint32_t size,
2663         uint32_t count, uint8_t *buffer)
2664 {
2665         int retval;
2666
2667         if (!count || !buffer)
2668                 return ERROR_COMMAND_SYNTAX_ERROR;
2669
2670         LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2671                 address, size, count);
2672
2673         /* read memory through the CPU */
2674         cortex_a_prep_memaccess(target, 1);
2675         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2676         cortex_a_post_memaccess(target, 1);
2677
2678         return retval;
2679 }
2680
2681 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2682         uint32_t size, uint32_t count, uint8_t *buffer)
2683 {
2684         int retval;
2685
2686         /* cortex_a handles unaligned memory access */
2687         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2688                 address, size, count);
2689
2690         cortex_a_prep_memaccess(target, 0);
2691         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2692         cortex_a_post_memaccess(target, 0);
2693
2694         return retval;
2695 }
2696
2697 static int cortex_a_write_phys_memory(struct target *target,
2698         target_addr_t address, uint32_t size,
2699         uint32_t count, const uint8_t *buffer)
2700 {
2701         int retval;
2702
2703         if (!count || !buffer)
2704                 return ERROR_COMMAND_SYNTAX_ERROR;
2705
2706         LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2707                 address, size, count);
2708
2709         /* write memory through the CPU */
2710         cortex_a_prep_memaccess(target, 1);
2711         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2712         cortex_a_post_memaccess(target, 1);
2713
2714         return retval;
2715 }
2716
2717 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2718         uint32_t size, uint32_t count, const uint8_t *buffer)
2719 {
2720         int retval;
2721
2722         /* cortex_a handles unaligned memory access */
2723         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2724                 address, size, count);
2725
2726         /* memory writes bypass the caches, must flush before writing */
2727         armv7a_cache_auto_flush_on_write(target, address, size * count);
2728
2729         cortex_a_prep_memaccess(target, 0);
2730         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2731         cortex_a_post_memaccess(target, 0);
2732         return retval;
2733 }
2734
2735 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2736                                 uint32_t count, uint8_t *buffer)
2737 {
2738         uint32_t size;
2739
2740         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2741          * will have something to do with the size we leave to it. */
2742         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2743                 if (address & size) {
2744                         int retval = target_read_memory(target, address, size, 1, buffer);
2745                         if (retval != ERROR_OK)
2746                                 return retval;
2747                         address += size;
2748                         count -= size;
2749                         buffer += size;
2750                 }
2751         }
2752
2753         /* Read the data with as large access size as possible. */
2754         for (; size > 0; size /= 2) {
2755                 uint32_t aligned = count - count % size;
2756                 if (aligned > 0) {
2757                         int retval = target_read_memory(target, address, size, aligned / size, buffer);
2758                         if (retval != ERROR_OK)
2759                                 return retval;
2760                         address += aligned;
2761                         count -= aligned;
2762                         buffer += aligned;
2763                 }
2764         }
2765
2766         return ERROR_OK;
2767 }
2768
2769 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2770                                  uint32_t count, const uint8_t *buffer)
2771 {
2772         uint32_t size;
2773
2774         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2775          * will have something to do with the size we leave to it. */
2776         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2777                 if (address & size) {
2778                         int retval = target_write_memory(target, address, size, 1, buffer);
2779                         if (retval != ERROR_OK)
2780                                 return retval;
2781                         address += size;
2782                         count -= size;
2783                         buffer += size;
2784                 }
2785         }
2786
2787         /* Write the data with as large access size as possible. */
2788         for (; size > 0; size /= 2) {
2789                 uint32_t aligned = count - count % size;
2790                 if (aligned > 0) {
2791                         int retval = target_write_memory(target, address, size, aligned / size, buffer);
2792                         if (retval != ERROR_OK)
2793                                 return retval;
2794                         address += aligned;
2795                         count -= aligned;
2796                         buffer += aligned;
2797                 }
2798         }
2799
2800         return ERROR_OK;
2801 }
2802
2803 static int cortex_a_handle_target_request(void *priv)
2804 {
2805         struct target *target = priv;
2806         struct armv7a_common *armv7a = target_to_armv7a(target);
2807         int retval;
2808
2809         if (!target_was_examined(target))
2810                 return ERROR_OK;
2811         if (!target->dbg_msg_enabled)
2812                 return ERROR_OK;
2813
2814         if (target->state == TARGET_RUNNING) {
2815                 uint32_t request;
2816                 uint32_t dscr;
2817                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2818                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2819
2820                 /* check if we have data */
2821                 int64_t then = timeval_ms();
2822                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2823                         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2824                                         armv7a->debug_base + CPUDBG_DTRTX, &request);
2825                         if (retval == ERROR_OK) {
2826                                 target_request(target, request);
2827                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2828                                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2829                         }
2830                         if (timeval_ms() > then + 1000) {
2831                                 LOG_ERROR("Timeout waiting for dtr tx full");
2832                                 return ERROR_FAIL;
2833                         }
2834                 }
2835         }
2836
2837         return ERROR_OK;
2838 }
2839
2840 /*
2841  * Cortex-A target information and configuration
2842  */
2843
2844 static int cortex_a_examine_first(struct target *target)
2845 {
2846         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2847         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2848         struct adiv5_dap *swjdp = armv7a->arm.dap;
2849
2850         int i;
2851         int retval = ERROR_OK;
2852         uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2853
2854         /* Search for the APB-AP - it is needed for access to debug registers */
2855         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2856         if (retval != ERROR_OK) {
2857                 LOG_ERROR("Could not find APB-AP for debug access");
2858                 return retval;
2859         }
2860
2861         retval = mem_ap_init(armv7a->debug_ap);
2862         if (retval != ERROR_OK) {
2863                 LOG_ERROR("Could not initialize the APB-AP");
2864                 return retval;
2865         }
2866
2867         armv7a->debug_ap->memaccess_tck = 80;
2868
2869         if (!target->dbgbase_set) {
2870                 uint32_t dbgbase;
2871                 /* Get ROM Table base */
2872                 uint32_t apid;
2873                 int32_t coreidx = target->coreid;
2874                 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2875                           target->cmd_name);
2876                 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2877                 if (retval != ERROR_OK)
2878                         return retval;
2879                 /* Lookup 0x15 -- Processor DAP */
2880                 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2881                                 &armv7a->debug_base, &coreidx);
2882                 if (retval != ERROR_OK) {
2883                         LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2884                                   target->cmd_name);
2885                         return retval;
2886                 }
2887                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2888                           target->coreid, armv7a->debug_base);
2889         } else
2890                 armv7a->debug_base = target->dbgbase;
2891
2892         if ((armv7a->debug_base & (1UL<<31)) == 0)
2893                 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2894                             "Please fix the target configuration.", target_name(target));
2895
2896         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2897                         armv7a->debug_base + CPUDBG_DIDR, &didr);
2898         if (retval != ERROR_OK) {
2899                 LOG_DEBUG("Examine %s failed", "DIDR");
2900                 return retval;
2901         }
2902
2903         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2904                         armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2905         if (retval != ERROR_OK) {
2906                 LOG_DEBUG("Examine %s failed", "CPUID");
2907                 return retval;
2908         }
2909
2910         LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2911         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2912
2913         cortex_a->didr = didr;
2914         cortex_a->cpuid = cpuid;
2915
2916         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2917                                     armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2918         if (retval != ERROR_OK)
2919                 return retval;
2920         LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR  0x%" PRIx32, target->coreid, dbg_osreg);
2921
2922         if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2923                 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2924                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2925                 return ERROR_TARGET_INIT_FAILED;
2926         }
2927
2928         if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2929                 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2930
2931         /* Read DBGOSLSR and check if OSLK is implemented */
2932         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2933                                 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2934         if (retval != ERROR_OK)
2935                 return retval;
2936         LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2937
2938         /* check if OS Lock is implemented */
2939         if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2940                 /* check if OS Lock is set */
2941                 if (dbg_osreg & OSLSR_OSLK) {
2942                         LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2943
2944                         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2945                                                         armv7a->debug_base + CPUDBG_OSLAR,
2946                                                         0);
2947                         if (retval == ERROR_OK)
2948                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2949                                                         armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2950
2951                         /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2952                         if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2953                                 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2954                                                 target->coreid);
2955                                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2956                                 return ERROR_TARGET_INIT_FAILED;
2957                         }
2958                 }
2959         }
2960
2961         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2962                                  armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
2963         if (retval != ERROR_OK)
2964                 return retval;
2965
2966         if (dbg_idpfr1 & 0x000000f0) {
2967                 LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
2968                                 target->coreid);
2969                 armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
2970         }
2971         if (dbg_idpfr1 & 0x0000f000) {
2972                 LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
2973                                 target->coreid);
2974                 /*
2975                  * overwrite and simplify the checks.
2976                  * virtualization extensions require implementation of security extension
2977                  */
2978                 armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
2979         }
2980
2981         /* Avoid recreating the registers cache */
2982         if (!target_was_examined(target)) {
2983                 retval = cortex_a_dpm_setup(cortex_a, didr);
2984                 if (retval != ERROR_OK)
2985                         return retval;
2986         }
2987
2988         /* Setup Breakpoint Register Pairs */
2989         cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2990         cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2991         cortex_a->brp_num_available = cortex_a->brp_num;
2992         free(cortex_a->brp_list);
2993         cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2994 /*      cortex_a->brb_enabled = ????; */
2995         for (i = 0; i < cortex_a->brp_num; i++) {
2996                 cortex_a->brp_list[i].used = 0;
2997                 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2998                         cortex_a->brp_list[i].type = BRP_NORMAL;
2999                 else
3000                         cortex_a->brp_list[i].type = BRP_CONTEXT;
3001                 cortex_a->brp_list[i].value = 0;
3002                 cortex_a->brp_list[i].control = 0;
3003                 cortex_a->brp_list[i].BRPn = i;
3004         }
3005
3006         LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3007
3008         /* Setup Watchpoint Register Pairs */
3009         cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3010         cortex_a->wrp_num_available = cortex_a->brp_num;
3011         free(cortex_a->wrp_list);
3012         cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3013         for (i = 0; i < cortex_a->wrp_num; i++) {
3014                 cortex_a->wrp_list[i].used = 0;
3015                 cortex_a->wrp_list[i].value = 0;
3016                 cortex_a->wrp_list[i].control = 0;
3017                 cortex_a->wrp_list[i].WRPn = i;
3018         }
3019
3020         LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3021
3022         /* select debug_ap as default */
3023         swjdp->apsel = armv7a->debug_ap->ap_num;
3024
3025         target_set_examined(target);
3026         return ERROR_OK;
3027 }
3028
3029 static int cortex_a_examine(struct target *target)
3030 {
3031         int retval = ERROR_OK;
3032
3033         /* Reestablish communication after target reset */
3034         retval = cortex_a_examine_first(target);
3035
3036         /* Configure core debug access */
3037         if (retval == ERROR_OK)
3038                 retval = cortex_a_init_debug_access(target);
3039
3040         return retval;
3041 }
3042
3043 /*
3044  *      Cortex-A target creation and initialization
3045  */
3046
3047 static int cortex_a_init_target(struct command_context *cmd_ctx,
3048         struct target *target)
3049 {
3050         /* examine_first() does a bunch of this */
3051         arm_semihosting_init(target);
3052         return ERROR_OK;
3053 }
3054
3055 static int cortex_a_init_arch_info(struct target *target,
3056         struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3057 {
3058         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3059
3060         /* Setup struct cortex_a_common */
3061         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3062         armv7a->arm.dap = dap;
3063
3064         /* register arch-specific functions */
3065         armv7a->examine_debug_reason = NULL;
3066
3067         armv7a->post_debug_entry = cortex_a_post_debug_entry;
3068
3069         armv7a->pre_restore_context = NULL;
3070
3071         armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3072
3073
3074 /*      arm7_9->handle_target_request = cortex_a_handle_target_request; */
3075
3076         /* REVISIT v7a setup should be in a v7a-specific routine */
3077         armv7a_init_arch_info(target, armv7a);
3078         target_register_timer_callback(cortex_a_handle_target_request, 1,
3079                 TARGET_TIMER_TYPE_PERIODIC, target);
3080
3081         return ERROR_OK;
3082 }
3083
3084 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3085 {
3086         struct cortex_a_common *cortex_a;
3087         struct adiv5_private_config *pc;
3088
3089         if (target->private_config == NULL)
3090                 return ERROR_FAIL;
3091
3092         pc = (struct adiv5_private_config *)target->private_config;
3093
3094         cortex_a = calloc(1, sizeof(struct cortex_a_common));
3095         if (cortex_a == NULL) {
3096                 LOG_ERROR("Out of memory");
3097                 return ERROR_FAIL;
3098         }
3099         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3100         cortex_a->armv7a_common.is_armv7r = false;
3101         cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3102
3103         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3104 }
3105
3106 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3107 {
3108         struct cortex_a_common *cortex_a;
3109         struct adiv5_private_config *pc;
3110
3111         pc = (struct adiv5_private_config *)target->private_config;
3112         if (adiv5_verify_config(pc) != ERROR_OK)
3113                 return ERROR_FAIL;
3114
3115         cortex_a = calloc(1, sizeof(struct cortex_a_common));
3116         if (cortex_a == NULL) {
3117                 LOG_ERROR("Out of memory");
3118                 return ERROR_FAIL;
3119         }
3120         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3121         cortex_a->armv7a_common.is_armv7r = true;
3122
3123         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3124 }
3125
3126 static void cortex_a_deinit_target(struct target *target)
3127 {
3128         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3129         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3130         struct arm_dpm *dpm = &armv7a->dpm;
3131         uint32_t dscr;
3132         int retval;
3133
3134         if (target_was_examined(target)) {
3135                 /* Disable halt for breakpoint, watchpoint and vector catch */
3136                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3137                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
3138                 if (retval == ERROR_OK)
3139                         mem_ap_write_atomic_u32(armv7a->debug_ap,
3140                                         armv7a->debug_base + CPUDBG_DSCR,
3141                                         dscr & ~DSCR_HALT_DBG_MODE);
3142         }
3143
3144         free(cortex_a->brp_list);
3145         arm_free_reg_cache(dpm->arm);
3146         free(dpm->dbp);
3147         free(dpm->dwp);
3148         free(target->private_config);
3149         free(cortex_a);
3150 }
3151
3152 static int cortex_a_mmu(struct target *target, int *enabled)
3153 {
3154         struct armv7a_common *armv7a = target_to_armv7a(target);
3155
3156         if (target->state != TARGET_HALTED) {
3157                 LOG_ERROR("%s: target not halted", __func__);
3158                 return ERROR_TARGET_INVALID;
3159         }
3160
3161         if (armv7a->is_armv7r)
3162                 *enabled = 0;
3163         else
3164                 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3165
3166         return ERROR_OK;
3167 }
3168
3169 static int cortex_a_virt2phys(struct target *target,
3170         target_addr_t virt, target_addr_t *phys)
3171 {
3172         int retval;
3173         int mmu_enabled = 0;
3174
3175         /*
3176          * If the MMU was not enabled at debug entry, there is no
3177          * way of knowing if there was ever a valid configuration
3178          * for it and thus it's not safe to enable it. In this case,
3179          * just return the virtual address as physical.
3180          */
3181         cortex_a_mmu(target, &mmu_enabled);
3182         if (!mmu_enabled) {
3183                 *phys = virt;
3184                 return ERROR_OK;
3185         }
3186
3187         /* mmu must be enable in order to get a correct translation */
3188         retval = cortex_a_mmu_modify(target, 1);
3189         if (retval != ERROR_OK)
3190                 return retval;
3191         return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3192                                                     phys, 1);
3193 }
3194
3195 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3196 {
3197         struct target *target = get_current_target(CMD_CTX);
3198         struct armv7a_common *armv7a = target_to_armv7a(target);
3199
3200         return armv7a_handle_cache_info_command(CMD,
3201                         &armv7a->armv7a_mmu.armv7a_cache);
3202 }
3203
3204
3205 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3206 {
3207         struct target *target = get_current_target(CMD_CTX);
3208         if (!target_was_examined(target)) {
3209                 LOG_ERROR("target not examined yet");
3210                 return ERROR_FAIL;
3211         }
3212
3213         return cortex_a_init_debug_access(target);
3214 }
3215
3216 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3217 {
3218         struct target *target = get_current_target(CMD_CTX);
3219         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3220
3221         static const Jim_Nvp nvp_maskisr_modes[] = {
3222                 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3223                 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3224                 { .name = NULL, .value = -1 },
3225         };
3226         const Jim_Nvp *n;
3227
3228         if (CMD_ARGC > 0) {
3229                 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3230                 if (n->name == NULL) {
3231                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3232                         return ERROR_COMMAND_SYNTAX_ERROR;
3233                 }
3234
3235                 cortex_a->isrmasking_mode = n->value;
3236         }
3237
3238         n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3239         command_print(CMD, "cortex_a interrupt mask %s", n->name);
3240
3241         return ERROR_OK;
3242 }
3243
3244 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3245 {
3246         struct target *target = get_current_target(CMD_CTX);
3247         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3248
3249         static const Jim_Nvp nvp_dacrfixup_modes[] = {
3250                 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3251                 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3252                 { .name = NULL, .value = -1 },
3253         };
3254         const Jim_Nvp *n;
3255
3256         if (CMD_ARGC > 0) {
3257                 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3258                 if (n->name == NULL)
3259                         return ERROR_COMMAND_SYNTAX_ERROR;
3260                 cortex_a->dacrfixup_mode = n->value;
3261
3262         }
3263
3264         n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3265         command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3266
3267         return ERROR_OK;
3268 }
3269
3270 static const struct command_registration cortex_a_exec_command_handlers[] = {
3271         {
3272                 .name = "cache_info",
3273                 .handler = cortex_a_handle_cache_info_command,
3274                 .mode = COMMAND_EXEC,
3275                 .help = "display information about target caches",
3276                 .usage = "",
3277         },
3278         {
3279                 .name = "dbginit",
3280                 .handler = cortex_a_handle_dbginit_command,
3281                 .mode = COMMAND_EXEC,
3282                 .help = "Initialize core debug",
3283                 .usage = "",
3284         },
3285         {
3286                 .name = "maskisr",
3287                 .handler = handle_cortex_a_mask_interrupts_command,
3288                 .mode = COMMAND_ANY,
3289                 .help = "mask cortex_a interrupts",
3290                 .usage = "['on'|'off']",
3291         },
3292         {
3293                 .name = "dacrfixup",
3294                 .handler = handle_cortex_a_dacrfixup_command,
3295                 .mode = COMMAND_ANY,
3296                 .help = "set domain access control (DACR) to all-manager "
3297                         "on memory access",
3298                 .usage = "['on'|'off']",
3299         },
3300         {
3301                 .chain = armv7a_mmu_command_handlers,
3302         },
3303         {
3304                 .chain = smp_command_handlers,
3305         },
3306
3307         COMMAND_REGISTRATION_DONE
3308 };
3309 static const struct command_registration cortex_a_command_handlers[] = {
3310         {
3311                 .chain = arm_command_handlers,
3312         },
3313         {
3314                 .chain = armv7a_command_handlers,
3315         },
3316         {
3317                 .name = "cortex_a",
3318                 .mode = COMMAND_ANY,
3319                 .help = "Cortex-A command group",
3320                 .usage = "",
3321                 .chain = cortex_a_exec_command_handlers,
3322         },
3323         COMMAND_REGISTRATION_DONE
3324 };
3325
3326 struct target_type cortexa_target = {
3327         .name = "cortex_a",
3328
3329         .poll = cortex_a_poll,
3330         .arch_state = armv7a_arch_state,
3331
3332         .halt = cortex_a_halt,
3333         .resume = cortex_a_resume,
3334         .step = cortex_a_step,
3335
3336         .assert_reset = cortex_a_assert_reset,
3337         .deassert_reset = cortex_a_deassert_reset,
3338
3339         /* REVISIT allow exporting VFP3 registers ... */
3340         .get_gdb_arch = arm_get_gdb_arch,
3341         .get_gdb_reg_list = arm_get_gdb_reg_list,
3342
3343         .read_memory = cortex_a_read_memory,
3344         .write_memory = cortex_a_write_memory,
3345
3346         .read_buffer = cortex_a_read_buffer,
3347         .write_buffer = cortex_a_write_buffer,
3348
3349         .checksum_memory = arm_checksum_memory,
3350         .blank_check_memory = arm_blank_check_memory,
3351
3352         .run_algorithm = armv4_5_run_algorithm,
3353
3354         .add_breakpoint = cortex_a_add_breakpoint,
3355         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3356         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3357         .remove_breakpoint = cortex_a_remove_breakpoint,
3358         .add_watchpoint = cortex_a_add_watchpoint,
3359         .remove_watchpoint = cortex_a_remove_watchpoint,
3360
3361         .commands = cortex_a_command_handlers,
3362         .target_create = cortex_a_target_create,
3363         .target_jim_configure = adiv5_jim_configure,
3364         .init_target = cortex_a_init_target,
3365         .examine = cortex_a_examine,
3366         .deinit_target = cortex_a_deinit_target,
3367
3368         .read_phys_memory = cortex_a_read_phys_memory,
3369         .write_phys_memory = cortex_a_write_phys_memory,
3370         .mmu = cortex_a_mmu,
3371         .virt2phys = cortex_a_virt2phys,
3372 };
3373
3374 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3375         {
3376                 .name = "dbginit",
3377                 .handler = cortex_a_handle_dbginit_command,
3378                 .mode = COMMAND_EXEC,
3379                 .help = "Initialize core debug",
3380                 .usage = "",
3381         },
3382         {
3383                 .name = "maskisr",
3384                 .handler = handle_cortex_a_mask_interrupts_command,
3385                 .mode = COMMAND_EXEC,
3386                 .help = "mask cortex_r4 interrupts",
3387                 .usage = "['on'|'off']",
3388         },
3389
3390         COMMAND_REGISTRATION_DONE
3391 };
3392 static const struct command_registration cortex_r4_command_handlers[] = {
3393         {
3394                 .chain = arm_command_handlers,
3395         },
3396         {
3397                 .name = "cortex_r4",
3398                 .mode = COMMAND_ANY,
3399                 .help = "Cortex-R4 command group",
3400                 .usage = "",
3401                 .chain = cortex_r4_exec_command_handlers,
3402         },
3403         COMMAND_REGISTRATION_DONE
3404 };
3405
3406 struct target_type cortexr4_target = {
3407         .name = "cortex_r4",
3408
3409         .poll = cortex_a_poll,
3410         .arch_state = armv7a_arch_state,
3411
3412         .halt = cortex_a_halt,
3413         .resume = cortex_a_resume,
3414         .step = cortex_a_step,
3415
3416         .assert_reset = cortex_a_assert_reset,
3417         .deassert_reset = cortex_a_deassert_reset,
3418
3419         /* REVISIT allow exporting VFP3 registers ... */
3420         .get_gdb_arch = arm_get_gdb_arch,
3421         .get_gdb_reg_list = arm_get_gdb_reg_list,
3422
3423         .read_memory = cortex_a_read_phys_memory,
3424         .write_memory = cortex_a_write_phys_memory,
3425
3426         .checksum_memory = arm_checksum_memory,
3427         .blank_check_memory = arm_blank_check_memory,
3428
3429         .run_algorithm = armv4_5_run_algorithm,
3430
3431         .add_breakpoint = cortex_a_add_breakpoint,
3432         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3433         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3434         .remove_breakpoint = cortex_a_remove_breakpoint,
3435         .add_watchpoint = cortex_a_add_watchpoint,
3436         .remove_watchpoint = cortex_a_remove_watchpoint,
3437
3438         .commands = cortex_r4_command_handlers,
3439         .target_create = cortex_r4_target_create,
3440         .target_jim_configure = adiv5_jim_configure,
3441         .init_target = cortex_a_init_target,
3442         .examine = cortex_a_examine,
3443         .deinit_target = cortex_a_deinit_target,
3444 };