arm_adi_v5: simplify handling of AP type
[fw/openocd] / src / target / cortex_a.c
1 /***************************************************************************
2  *   Copyright (C) 2005 by Dominic Rath                                    *
3  *   Dominic.Rath@gmx.de                                                   *
4  *                                                                         *
5  *   Copyright (C) 2006 by Magnus Lundin                                   *
6  *   lundin@mlu.mine.nu                                                    *
7  *                                                                         *
8  *   Copyright (C) 2008 by Spencer Oliver                                  *
9  *   spen@spen-soft.co.uk                                                  *
10  *                                                                         *
11  *   Copyright (C) 2009 by Dirk Behme                                      *
12  *   dirk.behme@gmail.com - copy from cortex_m3                            *
13  *                                                                         *
14  *   Copyright (C) 2010 Ã˜yvind Harboe                                      *
15  *   oyvind.harboe@zylin.com                                               *
16  *                                                                         *
17  *   Copyright (C) ST-Ericsson SA 2011                                     *
18  *   michel.jaouen@stericsson.com : smp minimum support                    *
19  *                                                                         *
20  *   Copyright (C) Broadcom 2012                                           *
21  *   ehunter@broadcom.com : Cortex-R4 support                              *
22  *                                                                         *
23  *   Copyright (C) 2013 Kamal Dasu                                         *
24  *   kdasu.kdev@gmail.com                                                  *
25  *                                                                         *
26  *   Copyright (C) 2016 Chengyu Zheng                                      *
27  *   chengyu.zheng@polimi.it : watchpoint support                          *
28  *                                                                         *
29  *   This program is free software; you can redistribute it and/or modify  *
30  *   it under the terms of the GNU General Public License as published by  *
31  *   the Free Software Foundation; either version 2 of the License, or     *
32  *   (at your option) any later version.                                   *
33  *                                                                         *
34  *   This program is distributed in the hope that it will be useful,       *
35  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
36  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
37  *   GNU General Public License for more details.                          *
38  *                                                                         *
39  *   You should have received a copy of the GNU General Public License     *
40  *   along with this program.  If not, see <http://www.gnu.org/licenses/>. *
41  *                                                                         *
42  *   Cortex-A8(tm) TRM, ARM DDI 0344H                                      *
43  *   Cortex-A9(tm) TRM, ARM DDI 0407F                                      *
44  *   Cortex-A4(tm) TRM, ARM DDI 0363E                                      *
45  *   Cortex-A15(tm)TRM, ARM DDI 0438C                                      *
46  *                                                                         *
47  ***************************************************************************/
48
49 #ifdef HAVE_CONFIG_H
50 #include "config.h"
51 #endif
52
53 #include "breakpoints.h"
54 #include "cortex_a.h"
55 #include "register.h"
56 #include "armv7a_mmu.h"
57 #include "target_request.h"
58 #include "target_type.h"
59 #include "arm_coresight.h"
60 #include "arm_opcodes.h"
61 #include "arm_semihosting.h"
62 #include "jtag/interface.h"
63 #include "transport/transport.h"
64 #include "smp.h"
65 #include <helper/bits.h>
66 #include <helper/time_support.h>
67
68 static int cortex_a_poll(struct target *target);
69 static int cortex_a_debug_entry(struct target *target);
70 static int cortex_a_restore_context(struct target *target, bool bpwp);
71 static int cortex_a_set_breakpoint(struct target *target,
72         struct breakpoint *breakpoint, uint8_t matchmode);
73 static int cortex_a_set_context_breakpoint(struct target *target,
74         struct breakpoint *breakpoint, uint8_t matchmode);
75 static int cortex_a_set_hybrid_breakpoint(struct target *target,
76         struct breakpoint *breakpoint);
77 static int cortex_a_unset_breakpoint(struct target *target,
78         struct breakpoint *breakpoint);
79 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
80         uint32_t value, uint32_t *dscr);
81 static int cortex_a_mmu(struct target *target, int *enabled);
82 static int cortex_a_mmu_modify(struct target *target, int enable);
83 static int cortex_a_virt2phys(struct target *target,
84         target_addr_t virt, target_addr_t *phys);
85 static int cortex_a_read_cpu_memory(struct target *target,
86         uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
87
88 static unsigned int ilog2(unsigned int x)
89 {
90         unsigned int y = 0;
91         x /= 2;
92         while (x) {
93                 ++y;
94                 x /= 2;
95                 }
96         return y;
97 }
98
99 /*  restore cp15_control_reg at resume */
100 static int cortex_a_restore_cp15_control_reg(struct target *target)
101 {
102         int retval = ERROR_OK;
103         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
104         struct armv7a_common *armv7a = target_to_armv7a(target);
105
106         if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
107                 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
108                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
109                 retval = armv7a->arm.mcr(target, 15,
110                                 0, 0,   /* op1, op2 */
111                                 1, 0,   /* CRn, CRm */
112                                 cortex_a->cp15_control_reg);
113         }
114         return retval;
115 }
116
117 /*
118  * Set up ARM core for memory access.
119  * If !phys_access, switch to SVC mode and make sure MMU is on
120  * If phys_access, switch off mmu
121  */
122 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
123 {
124         struct armv7a_common *armv7a = target_to_armv7a(target);
125         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
126         int mmu_enabled = 0;
127
128         if (phys_access == 0) {
129                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
130                 cortex_a_mmu(target, &mmu_enabled);
131                 if (mmu_enabled)
132                         cortex_a_mmu_modify(target, 1);
133                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
134                         /* overwrite DACR to all-manager */
135                         armv7a->arm.mcr(target, 15,
136                                         0, 0, 3, 0,
137                                         0xFFFFFFFF);
138                 }
139         } else {
140                 cortex_a_mmu(target, &mmu_enabled);
141                 if (mmu_enabled)
142                         cortex_a_mmu_modify(target, 0);
143         }
144         return ERROR_OK;
145 }
146
147 /*
148  * Restore ARM core after memory access.
149  * If !phys_access, switch to previous mode
150  * If phys_access, restore MMU setting
151  */
152 static int cortex_a_post_memaccess(struct target *target, int phys_access)
153 {
154         struct armv7a_common *armv7a = target_to_armv7a(target);
155         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
156
157         if (phys_access == 0) {
158                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
159                         /* restore */
160                         armv7a->arm.mcr(target, 15,
161                                         0, 0, 3, 0,
162                                         cortex_a->cp15_dacr_reg);
163                 }
164                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
165         } else {
166                 int mmu_enabled = 0;
167                 cortex_a_mmu(target, &mmu_enabled);
168                 if (mmu_enabled)
169                         cortex_a_mmu_modify(target, 1);
170         }
171         return ERROR_OK;
172 }
173
174
175 /*  modify cp15_control_reg in order to enable or disable mmu for :
176  *  - virt2phys address conversion
177  *  - read or write memory in phys or virt address */
178 static int cortex_a_mmu_modify(struct target *target, int enable)
179 {
180         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
181         struct armv7a_common *armv7a = target_to_armv7a(target);
182         int retval = ERROR_OK;
183         int need_write = 0;
184
185         if (enable) {
186                 /*  if mmu enabled at target stop and mmu not enable */
187                 if (!(cortex_a->cp15_control_reg & 0x1U)) {
188                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
189                         return ERROR_FAIL;
190                 }
191                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
192                         cortex_a->cp15_control_reg_curr |= 0x1U;
193                         need_write = 1;
194                 }
195         } else {
196                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
197                         cortex_a->cp15_control_reg_curr &= ~0x1U;
198                         need_write = 1;
199                 }
200         }
201
202         if (need_write) {
203                 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
204                         enable ? "enable mmu" : "disable mmu",
205                         cortex_a->cp15_control_reg_curr);
206
207                 retval = armv7a->arm.mcr(target, 15,
208                                 0, 0,   /* op1, op2 */
209                                 1, 0,   /* CRn, CRm */
210                                 cortex_a->cp15_control_reg_curr);
211         }
212         return retval;
213 }
214
215 /*
216  * Cortex-A Basic debug access, very low level assumes state is saved
217  */
218 static int cortex_a_init_debug_access(struct target *target)
219 {
220         struct armv7a_common *armv7a = target_to_armv7a(target);
221         uint32_t dscr;
222         int retval;
223
224         /* lock memory-mapped access to debug registers to prevent
225          * software interference */
226         retval = mem_ap_write_u32(armv7a->debug_ap,
227                         armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
228         if (retval != ERROR_OK)
229                 return retval;
230
231         /* Disable cacheline fills and force cache write-through in debug state */
232         retval = mem_ap_write_u32(armv7a->debug_ap,
233                         armv7a->debug_base + CPUDBG_DSCCR, 0);
234         if (retval != ERROR_OK)
235                 return retval;
236
237         /* Disable TLB lookup and refill/eviction in debug state */
238         retval = mem_ap_write_u32(armv7a->debug_ap,
239                         armv7a->debug_base + CPUDBG_DSMCR, 0);
240         if (retval != ERROR_OK)
241                 return retval;
242
243         retval = dap_run(armv7a->debug_ap->dap);
244         if (retval != ERROR_OK)
245                 return retval;
246
247         /* Enabling of instruction execution in debug mode is done in debug_entry code */
248
249         /* Resync breakpoint registers */
250
251         /* Enable halt for breakpoint, watchpoint and vector catch */
252         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
253                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
254         if (retval != ERROR_OK)
255                 return retval;
256         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
257                         armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
258         if (retval != ERROR_OK)
259                 return retval;
260
261         /* Since this is likely called from init or reset, update target state information*/
262         return cortex_a_poll(target);
263 }
264
265 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
266 {
267         /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
268          * Writes final value of DSCR into *dscr. Pass force to force always
269          * reading DSCR at least once. */
270         struct armv7a_common *armv7a = target_to_armv7a(target);
271         int retval;
272
273         if (force) {
274                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
275                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
276                 if (retval != ERROR_OK) {
277                         LOG_ERROR("Could not read DSCR register");
278                         return retval;
279                 }
280         }
281
282         retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
283         if (retval != ERROR_OK)
284                 LOG_ERROR("Error waiting for InstrCompl=1");
285         return retval;
286 }
287
288 /* To reduce needless round-trips, pass in a pointer to the current
289  * DSCR value.  Initialize it to zero if you just need to know the
290  * value on return from this function; or DSCR_INSTR_COMP if you
291  * happen to know that no instruction is pending.
292  */
293 static int cortex_a_exec_opcode(struct target *target,
294         uint32_t opcode, uint32_t *dscr_p)
295 {
296         uint32_t dscr;
297         int retval;
298         struct armv7a_common *armv7a = target_to_armv7a(target);
299
300         dscr = dscr_p ? *dscr_p : 0;
301
302         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
303
304         /* Wait for InstrCompl bit to be set */
305         retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
306         if (retval != ERROR_OK)
307                 return retval;
308
309         retval = mem_ap_write_u32(armv7a->debug_ap,
310                         armv7a->debug_base + CPUDBG_ITR, opcode);
311         if (retval != ERROR_OK)
312                 return retval;
313
314         /* Wait for InstrCompl bit to be set */
315         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
316         if (retval != ERROR_OK) {
317                 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
318                 return retval;
319         }
320
321         if (dscr_p)
322                 *dscr_p = dscr;
323
324         return retval;
325 }
326
327 /* Write to memory mapped registers directly with no cache or mmu handling */
328 static int cortex_a_dap_write_memap_register_u32(struct target *target,
329         uint32_t address,
330         uint32_t value)
331 {
332         int retval;
333         struct armv7a_common *armv7a = target_to_armv7a(target);
334
335         retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
336
337         return retval;
338 }
339
340 /*
341  * Cortex-A implementation of Debug Programmer's Model
342  *
343  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
344  * so there's no need to poll for it before executing an instruction.
345  *
346  * NOTE that in several of these cases the "stall" mode might be useful.
347  * It'd let us queue a few operations together... prepare/finish might
348  * be the places to enable/disable that mode.
349  */
350
351 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
352 {
353         return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
354 }
355
356 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
357 {
358         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
359         return mem_ap_write_u32(a->armv7a_common.debug_ap,
360                         a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
361 }
362
363 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
364         uint32_t *dscr_p)
365 {
366         uint32_t dscr = DSCR_INSTR_COMP;
367         int retval;
368
369         if (dscr_p)
370                 dscr = *dscr_p;
371
372         /* Wait for DTRRXfull */
373         retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
374                         DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
375         if (retval != ERROR_OK) {
376                 LOG_ERROR("Error waiting for read dcc");
377                 return retval;
378         }
379
380         retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
381                         a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
382         if (retval != ERROR_OK)
383                 return retval;
384         /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
385
386         if (dscr_p)
387                 *dscr_p = dscr;
388
389         return retval;
390 }
391
392 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
393 {
394         struct cortex_a_common *a = dpm_to_a(dpm);
395         uint32_t dscr;
396         int retval;
397
398         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
399         retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
400         if (retval != ERROR_OK) {
401                 LOG_ERROR("Error waiting for dpm prepare");
402                 return retval;
403         }
404
405         /* this "should never happen" ... */
406         if (dscr & DSCR_DTR_RX_FULL) {
407                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
408                 /* Clear DCCRX */
409                 retval = cortex_a_exec_opcode(
410                                 a->armv7a_common.arm.target,
411                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
412                                 &dscr);
413                 if (retval != ERROR_OK)
414                         return retval;
415         }
416
417         return retval;
418 }
419
420 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
421 {
422         /* REVISIT what could be done here? */
423         return ERROR_OK;
424 }
425
426 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
427         uint32_t opcode, uint32_t data)
428 {
429         struct cortex_a_common *a = dpm_to_a(dpm);
430         int retval;
431         uint32_t dscr = DSCR_INSTR_COMP;
432
433         retval = cortex_a_write_dcc(a, data);
434         if (retval != ERROR_OK)
435                 return retval;
436
437         return cortex_a_exec_opcode(
438                         a->armv7a_common.arm.target,
439                         opcode,
440                         &dscr);
441 }
442
443 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
444         uint8_t rt, uint32_t data)
445 {
446         struct cortex_a_common *a = dpm_to_a(dpm);
447         uint32_t dscr = DSCR_INSTR_COMP;
448         int retval;
449
450         if (rt > 15)
451                 return ERROR_TARGET_INVALID;
452
453         retval = cortex_a_write_dcc(a, data);
454         if (retval != ERROR_OK)
455                 return retval;
456
457         /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
458         return cortex_a_exec_opcode(
459                         a->armv7a_common.arm.target,
460                         ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
461                         &dscr);
462 }
463
464 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
465         uint32_t opcode, uint32_t data)
466 {
467         struct cortex_a_common *a = dpm_to_a(dpm);
468         uint32_t dscr = DSCR_INSTR_COMP;
469         int retval;
470
471         retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
472         if (retval != ERROR_OK)
473                 return retval;
474
475         /* then the opcode, taking data from R0 */
476         retval = cortex_a_exec_opcode(
477                         a->armv7a_common.arm.target,
478                         opcode,
479                         &dscr);
480
481         return retval;
482 }
483
484 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
485 {
486         struct target *target = dpm->arm->target;
487         uint32_t dscr = DSCR_INSTR_COMP;
488
489         /* "Prefetch flush" after modifying execution status in CPSR */
490         return cortex_a_exec_opcode(target,
491                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
492                         &dscr);
493 }
494
495 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
496         uint32_t opcode, uint32_t *data)
497 {
498         struct cortex_a_common *a = dpm_to_a(dpm);
499         int retval;
500         uint32_t dscr = DSCR_INSTR_COMP;
501
502         /* the opcode, writing data to DCC */
503         retval = cortex_a_exec_opcode(
504                         a->armv7a_common.arm.target,
505                         opcode,
506                         &dscr);
507         if (retval != ERROR_OK)
508                 return retval;
509
510         return cortex_a_read_dcc(a, data, &dscr);
511 }
512
513 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
514         uint8_t rt, uint32_t *data)
515 {
516         struct cortex_a_common *a = dpm_to_a(dpm);
517         uint32_t dscr = DSCR_INSTR_COMP;
518         int retval;
519
520         if (rt > 15)
521                 return ERROR_TARGET_INVALID;
522
523         retval = cortex_a_exec_opcode(
524                         a->armv7a_common.arm.target,
525                         ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
526                         &dscr);
527         if (retval != ERROR_OK)
528                 return retval;
529
530         return cortex_a_read_dcc(a, data, &dscr);
531 }
532
533 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
534         uint32_t opcode, uint32_t *data)
535 {
536         struct cortex_a_common *a = dpm_to_a(dpm);
537         uint32_t dscr = DSCR_INSTR_COMP;
538         int retval;
539
540         /* the opcode, writing data to R0 */
541         retval = cortex_a_exec_opcode(
542                         a->armv7a_common.arm.target,
543                         opcode,
544                         &dscr);
545         if (retval != ERROR_OK)
546                 return retval;
547
548         /* write R0 to DCC */
549         return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
550 }
551
552 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
553         uint32_t addr, uint32_t control)
554 {
555         struct cortex_a_common *a = dpm_to_a(dpm);
556         uint32_t vr = a->armv7a_common.debug_base;
557         uint32_t cr = a->armv7a_common.debug_base;
558         int retval;
559
560         switch (index_t) {
561                 case 0 ... 15:  /* breakpoints */
562                         vr += CPUDBG_BVR_BASE;
563                         cr += CPUDBG_BCR_BASE;
564                         break;
565                 case 16 ... 31: /* watchpoints */
566                         vr += CPUDBG_WVR_BASE;
567                         cr += CPUDBG_WCR_BASE;
568                         index_t -= 16;
569                         break;
570                 default:
571                         return ERROR_FAIL;
572         }
573         vr += 4 * index_t;
574         cr += 4 * index_t;
575
576         LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
577                 (unsigned) vr, (unsigned) cr);
578
579         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
580                         vr, addr);
581         if (retval != ERROR_OK)
582                 return retval;
583         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
584                         cr, control);
585         return retval;
586 }
587
588 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
589 {
590         struct cortex_a_common *a = dpm_to_a(dpm);
591         uint32_t cr;
592
593         switch (index_t) {
594                 case 0 ... 15:
595                         cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
596                         break;
597                 case 16 ... 31:
598                         cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
599                         index_t -= 16;
600                         break;
601                 default:
602                         return ERROR_FAIL;
603         }
604         cr += 4 * index_t;
605
606         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
607
608         /* clear control register */
609         return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
610 }
611
612 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
613 {
614         struct arm_dpm *dpm = &a->armv7a_common.dpm;
615         int retval;
616
617         dpm->arm = &a->armv7a_common.arm;
618         dpm->didr = didr;
619
620         dpm->prepare = cortex_a_dpm_prepare;
621         dpm->finish = cortex_a_dpm_finish;
622
623         dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
624         dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
625         dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
626
627         dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
628         dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
629
630         dpm->bpwp_enable = cortex_a_bpwp_enable;
631         dpm->bpwp_disable = cortex_a_bpwp_disable;
632
633         retval = arm_dpm_setup(dpm);
634         if (retval == ERROR_OK)
635                 retval = arm_dpm_initialize(dpm);
636
637         return retval;
638 }
639 static struct target *get_cortex_a(struct target *target, int32_t coreid)
640 {
641         struct target_list *head;
642         struct target *curr;
643
644         head = target->head;
645         while (head) {
646                 curr = head->target;
647                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
648                         return curr;
649                 head = head->next;
650         }
651         return target;
652 }
653 static int cortex_a_halt(struct target *target);
654
655 static int cortex_a_halt_smp(struct target *target)
656 {
657         int retval = 0;
658         struct target_list *head;
659         struct target *curr;
660         head = target->head;
661         while (head) {
662                 curr = head->target;
663                 if ((curr != target) && (curr->state != TARGET_HALTED)
664                         && target_was_examined(curr))
665                         retval += cortex_a_halt(curr);
666                 head = head->next;
667         }
668         return retval;
669 }
670
671 static int update_halt_gdb(struct target *target)
672 {
673         struct target *gdb_target = NULL;
674         struct target_list *head;
675         struct target *curr;
676         int retval = 0;
677
678         if (target->gdb_service && target->gdb_service->core[0] == -1) {
679                 target->gdb_service->target = target;
680                 target->gdb_service->core[0] = target->coreid;
681                 retval += cortex_a_halt_smp(target);
682         }
683
684         if (target->gdb_service)
685                 gdb_target = target->gdb_service->target;
686
687         foreach_smp_target(head, target->head) {
688                 curr = head->target;
689                 /* skip calling context */
690                 if (curr == target)
691                         continue;
692                 if (!target_was_examined(curr))
693                         continue;
694                 /* skip targets that were already halted */
695                 if (curr->state == TARGET_HALTED)
696                         continue;
697                 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
698                 if (curr == gdb_target)
699                         continue;
700
701                 /* avoid recursion in cortex_a_poll() */
702                 curr->smp = 0;
703                 cortex_a_poll(curr);
704                 curr->smp = 1;
705         }
706
707         /* after all targets were updated, poll the gdb serving target */
708         if (gdb_target && gdb_target != target)
709                 cortex_a_poll(gdb_target);
710         return retval;
711 }
712
713 /*
714  * Cortex-A Run control
715  */
716
717 static int cortex_a_poll(struct target *target)
718 {
719         int retval = ERROR_OK;
720         uint32_t dscr;
721         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
722         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
723         enum target_state prev_target_state = target->state;
724         /*  toggle to another core is done by gdb as follow */
725         /*  maint packet J core_id */
726         /*  continue */
727         /*  the next polling trigger an halt event sent to gdb */
728         if ((target->state == TARGET_HALTED) && (target->smp) &&
729                 (target->gdb_service) &&
730                 (!target->gdb_service->target)) {
731                 target->gdb_service->target =
732                         get_cortex_a(target, target->gdb_service->core[1]);
733                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
734                 return retval;
735         }
736         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
737                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
738         if (retval != ERROR_OK)
739                 return retval;
740         cortex_a->cpudbg_dscr = dscr;
741
742         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
743                 if (prev_target_state != TARGET_HALTED) {
744                         /* We have a halting debug event */
745                         LOG_DEBUG("Target halted");
746                         target->state = TARGET_HALTED;
747
748                         retval = cortex_a_debug_entry(target);
749                         if (retval != ERROR_OK)
750                                 return retval;
751
752                         if (target->smp) {
753                                 retval = update_halt_gdb(target);
754                                 if (retval != ERROR_OK)
755                                         return retval;
756                         }
757
758                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
759                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
760                         } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
761                                 if (arm_semihosting(target, &retval) != 0)
762                                         return retval;
763
764                                 target_call_event_callbacks(target,
765                                         TARGET_EVENT_HALTED);
766                         }
767                 }
768         } else
769                 target->state = TARGET_RUNNING;
770
771         return retval;
772 }
773
774 static int cortex_a_halt(struct target *target)
775 {
776         int retval;
777         uint32_t dscr;
778         struct armv7a_common *armv7a = target_to_armv7a(target);
779
780         /*
781          * Tell the core to be halted by writing DRCR with 0x1
782          * and then wait for the core to be halted.
783          */
784         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
785                         armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
786         if (retval != ERROR_OK)
787                 return retval;
788
789         dscr = 0; /* force read of dscr */
790         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
791                         DSCR_CORE_HALTED, &dscr);
792         if (retval != ERROR_OK) {
793                 LOG_ERROR("Error waiting for halt");
794                 return retval;
795         }
796
797         target->debug_reason = DBG_REASON_DBGRQ;
798
799         return ERROR_OK;
800 }
801
802 static int cortex_a_internal_restore(struct target *target, int current,
803         target_addr_t *address, int handle_breakpoints, int debug_execution)
804 {
805         struct armv7a_common *armv7a = target_to_armv7a(target);
806         struct arm *arm = &armv7a->arm;
807         int retval;
808         uint32_t resume_pc;
809
810         if (!debug_execution)
811                 target_free_all_working_areas(target);
812
813 #if 0
814         if (debug_execution) {
815                 /* Disable interrupts */
816                 /* We disable interrupts in the PRIMASK register instead of
817                  * masking with C_MASKINTS,
818                  * This is probably the same issue as Cortex-M3 Errata 377493:
819                  * C_MASKINTS in parallel with disabled interrupts can cause
820                  * local faults to not be taken. */
821                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
822                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
823                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
824
825                 /* Make sure we are in Thumb mode */
826                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
827                         buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
828                         32) | (1 << 24));
829                 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
830                 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
831         }
832 #endif
833
834         /* current = 1: continue on current pc, otherwise continue at <address> */
835         resume_pc = buf_get_u32(arm->pc->value, 0, 32);
836         if (!current)
837                 resume_pc = *address;
838         else
839                 *address = resume_pc;
840
841         /* Make sure that the Armv7 gdb thumb fixups does not
842          * kill the return address
843          */
844         switch (arm->core_state) {
845                 case ARM_STATE_ARM:
846                         resume_pc &= 0xFFFFFFFC;
847                         break;
848                 case ARM_STATE_THUMB:
849                 case ARM_STATE_THUMB_EE:
850                         /* When the return address is loaded into PC
851                          * bit 0 must be 1 to stay in Thumb state
852                          */
853                         resume_pc |= 0x1;
854                         break;
855                 case ARM_STATE_JAZELLE:
856                         LOG_ERROR("How do I resume into Jazelle state??");
857                         return ERROR_FAIL;
858                 case ARM_STATE_AARCH64:
859                         LOG_ERROR("Shouldn't be in AARCH64 state");
860                         return ERROR_FAIL;
861         }
862         LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
863         buf_set_u32(arm->pc->value, 0, 32, resume_pc);
864         arm->pc->dirty = true;
865         arm->pc->valid = true;
866
867         /* restore dpm_mode at system halt */
868         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
869         /* called it now before restoring context because it uses cpu
870          * register r0 for restoring cp15 control register */
871         retval = cortex_a_restore_cp15_control_reg(target);
872         if (retval != ERROR_OK)
873                 return retval;
874         retval = cortex_a_restore_context(target, handle_breakpoints);
875         if (retval != ERROR_OK)
876                 return retval;
877         target->debug_reason = DBG_REASON_NOTHALTED;
878         target->state = TARGET_RUNNING;
879
880         /* registers are now invalid */
881         register_cache_invalidate(arm->core_cache);
882
883 #if 0
884         /* the front-end may request us not to handle breakpoints */
885         if (handle_breakpoints) {
886                 /* Single step past breakpoint at current address */
887                 breakpoint = breakpoint_find(target, resume_pc);
888                 if (breakpoint) {
889                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
890                         cortex_m3_unset_breakpoint(target, breakpoint);
891                         cortex_m3_single_step_core(target);
892                         cortex_m3_set_breakpoint(target, breakpoint);
893                 }
894         }
895
896 #endif
897         return retval;
898 }
899
900 static int cortex_a_internal_restart(struct target *target)
901 {
902         struct armv7a_common *armv7a = target_to_armv7a(target);
903         struct arm *arm = &armv7a->arm;
904         int retval;
905         uint32_t dscr;
906         /*
907          * * Restart core and wait for it to be started.  Clear ITRen and sticky
908          * * exception flags: see ARMv7 ARM, C5.9.
909          *
910          * REVISIT: for single stepping, we probably want to
911          * disable IRQs by default, with optional override...
912          */
913
914         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
915                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
916         if (retval != ERROR_OK)
917                 return retval;
918
919         if ((dscr & DSCR_INSTR_COMP) == 0)
920                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
921
922         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
923                         armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
924         if (retval != ERROR_OK)
925                 return retval;
926
927         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
928                         armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
929                         DRCR_CLEAR_EXCEPTIONS);
930         if (retval != ERROR_OK)
931                 return retval;
932
933         dscr = 0; /* force read of dscr */
934         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
935                         DSCR_CORE_RESTARTED, &dscr);
936         if (retval != ERROR_OK) {
937                 LOG_ERROR("Error waiting for resume");
938                 return retval;
939         }
940
941         target->debug_reason = DBG_REASON_NOTHALTED;
942         target->state = TARGET_RUNNING;
943
944         /* registers are now invalid */
945         register_cache_invalidate(arm->core_cache);
946
947         return ERROR_OK;
948 }
949
950 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
951 {
952         int retval = 0;
953         struct target_list *head;
954         struct target *curr;
955         target_addr_t address;
956         head = target->head;
957         while (head) {
958                 curr = head->target;
959                 if ((curr != target) && (curr->state != TARGET_RUNNING)
960                         && target_was_examined(curr)) {
961                         /*  resume current address , not in step mode */
962                         retval += cortex_a_internal_restore(curr, 1, &address,
963                                         handle_breakpoints, 0);
964                         retval += cortex_a_internal_restart(curr);
965                 }
966                 head = head->next;
967
968         }
969         return retval;
970 }
971
972 static int cortex_a_resume(struct target *target, int current,
973         target_addr_t address, int handle_breakpoints, int debug_execution)
974 {
975         int retval = 0;
976         /* dummy resume for smp toggle in order to reduce gdb impact  */
977         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
978                 /*   simulate a start and halt of target */
979                 target->gdb_service->target = NULL;
980                 target->gdb_service->core[0] = target->gdb_service->core[1];
981                 /*  fake resume at next poll we play the  target core[1], see poll*/
982                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
983                 return 0;
984         }
985         cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
986         if (target->smp) {
987                 target->gdb_service->core[0] = -1;
988                 retval = cortex_a_restore_smp(target, handle_breakpoints);
989                 if (retval != ERROR_OK)
990                         return retval;
991         }
992         cortex_a_internal_restart(target);
993
994         if (!debug_execution) {
995                 target->state = TARGET_RUNNING;
996                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
997                 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
998         } else {
999                 target->state = TARGET_DEBUG_RUNNING;
1000                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1001                 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1002         }
1003
1004         return ERROR_OK;
1005 }
1006
1007 static int cortex_a_debug_entry(struct target *target)
1008 {
1009         uint32_t dscr;
1010         int retval = ERROR_OK;
1011         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1012         struct armv7a_common *armv7a = target_to_armv7a(target);
1013         struct arm *arm = &armv7a->arm;
1014
1015         LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1016
1017         /* REVISIT surely we should not re-read DSCR !! */
1018         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1019                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1020         if (retval != ERROR_OK)
1021                 return retval;
1022
1023         /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1024          * imprecise data aborts get discarded by issuing a Data
1025          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1026          */
1027
1028         /* Enable the ITR execution once we are in debug mode */
1029         dscr |= DSCR_ITR_EN;
1030         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1031                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1032         if (retval != ERROR_OK)
1033                 return retval;
1034
1035         /* Examine debug reason */
1036         arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1037
1038         /* save address of instruction that triggered the watchpoint? */
1039         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1040                 uint32_t wfar;
1041
1042                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1043                                 armv7a->debug_base + CPUDBG_WFAR,
1044                                 &wfar);
1045                 if (retval != ERROR_OK)
1046                         return retval;
1047                 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1048         }
1049
1050         /* First load register accessible through core debug port */
1051         retval = arm_dpm_read_current_registers(&armv7a->dpm);
1052         if (retval != ERROR_OK)
1053                 return retval;
1054
1055         if (arm->spsr) {
1056                 /* read SPSR */
1057                 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1058                 if (retval != ERROR_OK)
1059                         return retval;
1060         }
1061
1062 #if 0
1063 /* TODO, Move this */
1064         uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1065         cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1066         LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1067
1068         cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1069         LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1070
1071         cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1072         LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1073 #endif
1074
1075         /* Are we in an exception handler */
1076 /*      armv4_5->exception_number = 0; */
1077         if (armv7a->post_debug_entry) {
1078                 retval = armv7a->post_debug_entry(target);
1079                 if (retval != ERROR_OK)
1080                         return retval;
1081         }
1082
1083         return retval;
1084 }
1085
1086 static int cortex_a_post_debug_entry(struct target *target)
1087 {
1088         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1089         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1090         int retval;
1091
1092         /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1093         retval = armv7a->arm.mrc(target, 15,
1094                         0, 0,   /* op1, op2 */
1095                         1, 0,   /* CRn, CRm */
1096                         &cortex_a->cp15_control_reg);
1097         if (retval != ERROR_OK)
1098                 return retval;
1099         LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1100         cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1101
1102         if (!armv7a->is_armv7r)
1103                 armv7a_read_ttbcr(target);
1104
1105         if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1106                 armv7a_identify_cache(target);
1107
1108         if (armv7a->is_armv7r) {
1109                 armv7a->armv7a_mmu.mmu_enabled = 0;
1110         } else {
1111                 armv7a->armv7a_mmu.mmu_enabled =
1112                         (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1113         }
1114         armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1115                 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1116         armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1117                 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1118         cortex_a->curr_mode = armv7a->arm.core_mode;
1119
1120         /* switch to SVC mode to read DACR */
1121         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1122         armv7a->arm.mrc(target, 15,
1123                         0, 0, 3, 0,
1124                         &cortex_a->cp15_dacr_reg);
1125
1126         LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1127                         cortex_a->cp15_dacr_reg);
1128
1129         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1130         return ERROR_OK;
1131 }
1132
1133 static int cortex_a_set_dscr_bits(struct target *target,
1134                 unsigned long bit_mask, unsigned long value)
1135 {
1136         struct armv7a_common *armv7a = target_to_armv7a(target);
1137         uint32_t dscr;
1138
1139         /* Read DSCR */
1140         int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1141                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1142         if (retval != ERROR_OK)
1143                 return retval;
1144
1145         /* clear bitfield */
1146         dscr &= ~bit_mask;
1147         /* put new value */
1148         dscr |= value & bit_mask;
1149
1150         /* write new DSCR */
1151         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1152                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1153         return retval;
1154 }
1155
1156 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1157         int handle_breakpoints)
1158 {
1159         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1160         struct armv7a_common *armv7a = target_to_armv7a(target);
1161         struct arm *arm = &armv7a->arm;
1162         struct breakpoint *breakpoint = NULL;
1163         struct breakpoint stepbreakpoint;
1164         struct reg *r;
1165         int retval;
1166
1167         if (target->state != TARGET_HALTED) {
1168                 LOG_WARNING("target not halted");
1169                 return ERROR_TARGET_NOT_HALTED;
1170         }
1171
1172         /* current = 1: continue on current pc, otherwise continue at <address> */
1173         r = arm->pc;
1174         if (!current)
1175                 buf_set_u32(r->value, 0, 32, address);
1176         else
1177                 address = buf_get_u32(r->value, 0, 32);
1178
1179         /* The front-end may request us not to handle breakpoints.
1180          * But since Cortex-A uses breakpoint for single step,
1181          * we MUST handle breakpoints.
1182          */
1183         handle_breakpoints = 1;
1184         if (handle_breakpoints) {
1185                 breakpoint = breakpoint_find(target, address);
1186                 if (breakpoint)
1187                         cortex_a_unset_breakpoint(target, breakpoint);
1188         }
1189
1190         /* Setup single step breakpoint */
1191         stepbreakpoint.address = address;
1192         stepbreakpoint.asid = 0;
1193         stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1194                 ? 2 : 4;
1195         stepbreakpoint.type = BKPT_HARD;
1196         stepbreakpoint.set = 0;
1197
1198         /* Disable interrupts during single step if requested */
1199         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1200                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1201                 if (retval != ERROR_OK)
1202                         return retval;
1203         }
1204
1205         /* Break on IVA mismatch */
1206         cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1207
1208         target->debug_reason = DBG_REASON_SINGLESTEP;
1209
1210         retval = cortex_a_resume(target, 1, address, 0, 0);
1211         if (retval != ERROR_OK)
1212                 return retval;
1213
1214         int64_t then = timeval_ms();
1215         while (target->state != TARGET_HALTED) {
1216                 retval = cortex_a_poll(target);
1217                 if (retval != ERROR_OK)
1218                         return retval;
1219                 if (target->state == TARGET_HALTED)
1220                         break;
1221                 if (timeval_ms() > then + 1000) {
1222                         LOG_ERROR("timeout waiting for target halt");
1223                         return ERROR_FAIL;
1224                 }
1225         }
1226
1227         cortex_a_unset_breakpoint(target, &stepbreakpoint);
1228
1229         /* Re-enable interrupts if they were disabled */
1230         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1231                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1232                 if (retval != ERROR_OK)
1233                         return retval;
1234         }
1235
1236
1237         target->debug_reason = DBG_REASON_BREAKPOINT;
1238
1239         if (breakpoint)
1240                 cortex_a_set_breakpoint(target, breakpoint, 0);
1241
1242         if (target->state != TARGET_HALTED)
1243                 LOG_DEBUG("target stepped");
1244
1245         return ERROR_OK;
1246 }
1247
1248 static int cortex_a_restore_context(struct target *target, bool bpwp)
1249 {
1250         struct armv7a_common *armv7a = target_to_armv7a(target);
1251
1252         LOG_DEBUG(" ");
1253
1254         if (armv7a->pre_restore_context)
1255                 armv7a->pre_restore_context(target);
1256
1257         return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1258 }
1259
1260 /*
1261  * Cortex-A Breakpoint and watchpoint functions
1262  */
1263
1264 /* Setup hardware Breakpoint Register Pair */
1265 static int cortex_a_set_breakpoint(struct target *target,
1266         struct breakpoint *breakpoint, uint8_t matchmode)
1267 {
1268         int retval;
1269         int brp_i = 0;
1270         uint32_t control;
1271         uint8_t byte_addr_select = 0x0F;
1272         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1273         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1274         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1275
1276         if (breakpoint->set) {
1277                 LOG_WARNING("breakpoint already set");
1278                 return ERROR_OK;
1279         }
1280
1281         if (breakpoint->type == BKPT_HARD) {
1282                 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1283                         brp_i++;
1284                 if (brp_i >= cortex_a->brp_num) {
1285                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1286                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1287                 }
1288                 breakpoint->set = brp_i + 1;
1289                 if (breakpoint->length == 2)
1290                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1291                 control = ((matchmode & 0x7) << 20)
1292                         | (byte_addr_select << 5)
1293                         | (3 << 1) | 1;
1294                 brp_list[brp_i].used = true;
1295                 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1296                 brp_list[brp_i].control = control;
1297                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1298                                 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1299                                 brp_list[brp_i].value);
1300                 if (retval != ERROR_OK)
1301                         return retval;
1302                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1303                                 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1304                                 brp_list[brp_i].control);
1305                 if (retval != ERROR_OK)
1306                         return retval;
1307                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1308                         brp_list[brp_i].control,
1309                         brp_list[brp_i].value);
1310         } else if (breakpoint->type == BKPT_SOFT) {
1311                 uint8_t code[4];
1312                 /* length == 2: Thumb breakpoint */
1313                 if (breakpoint->length == 2)
1314                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1315                 else
1316                 /* length == 3: Thumb-2 breakpoint, actual encoding is
1317                  * a regular Thumb BKPT instruction but we replace a
1318                  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1319                  * length
1320                  */
1321                 if (breakpoint->length == 3) {
1322                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1323                         breakpoint->length = 4;
1324                 } else
1325                         /* length == 4, normal ARM breakpoint */
1326                         buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1327
1328                 retval = target_read_memory(target,
1329                                 breakpoint->address & 0xFFFFFFFE,
1330                                 breakpoint->length, 1,
1331                                 breakpoint->orig_instr);
1332                 if (retval != ERROR_OK)
1333                         return retval;
1334
1335                 /* make sure data cache is cleaned & invalidated down to PoC */
1336                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1337                         armv7a_cache_flush_virt(target, breakpoint->address,
1338                                                 breakpoint->length);
1339                 }
1340
1341                 retval = target_write_memory(target,
1342                                 breakpoint->address & 0xFFFFFFFE,
1343                                 breakpoint->length, 1, code);
1344                 if (retval != ERROR_OK)
1345                         return retval;
1346
1347                 /* update i-cache at breakpoint location */
1348                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1349                                         breakpoint->length);
1350                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1351                                                  breakpoint->length);
1352
1353                 breakpoint->set = 0x11; /* Any nice value but 0 */
1354         }
1355
1356         return ERROR_OK;
1357 }
1358
1359 static int cortex_a_set_context_breakpoint(struct target *target,
1360         struct breakpoint *breakpoint, uint8_t matchmode)
1361 {
1362         int retval = ERROR_FAIL;
1363         int brp_i = 0;
1364         uint32_t control;
1365         uint8_t byte_addr_select = 0x0F;
1366         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1367         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1368         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1369
1370         if (breakpoint->set) {
1371                 LOG_WARNING("breakpoint already set");
1372                 return retval;
1373         }
1374         /*check available context BRPs*/
1375         while ((brp_list[brp_i].used ||
1376                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1377                 brp_i++;
1378
1379         if (brp_i >= cortex_a->brp_num) {
1380                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1381                 return ERROR_FAIL;
1382         }
1383
1384         breakpoint->set = brp_i + 1;
1385         control = ((matchmode & 0x7) << 20)
1386                 | (byte_addr_select << 5)
1387                 | (3 << 1) | 1;
1388         brp_list[brp_i].used = true;
1389         brp_list[brp_i].value = (breakpoint->asid);
1390         brp_list[brp_i].control = control;
1391         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1392                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1393                         brp_list[brp_i].value);
1394         if (retval != ERROR_OK)
1395                 return retval;
1396         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1397                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1398                         brp_list[brp_i].control);
1399         if (retval != ERROR_OK)
1400                 return retval;
1401         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1402                 brp_list[brp_i].control,
1403                 brp_list[brp_i].value);
1404         return ERROR_OK;
1405
1406 }
1407
1408 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1409 {
1410         int retval = ERROR_FAIL;
1411         int brp_1 = 0;  /* holds the contextID pair */
1412         int brp_2 = 0;  /* holds the IVA pair */
1413         uint32_t control_ctx, control_iva;
1414         uint8_t ctx_byte_addr_select = 0x0F;
1415         uint8_t iva_byte_addr_select = 0x0F;
1416         uint8_t ctx_machmode = 0x03;
1417         uint8_t iva_machmode = 0x01;
1418         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1419         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1420         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1421
1422         if (breakpoint->set) {
1423                 LOG_WARNING("breakpoint already set");
1424                 return retval;
1425         }
1426         /*check available context BRPs*/
1427         while ((brp_list[brp_1].used ||
1428                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1429                 brp_1++;
1430
1431         LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1432         if (brp_1 >= cortex_a->brp_num) {
1433                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1434                 return ERROR_FAIL;
1435         }
1436
1437         while ((brp_list[brp_2].used ||
1438                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1439                 brp_2++;
1440
1441         LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1442         if (brp_2 >= cortex_a->brp_num) {
1443                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1444                 return ERROR_FAIL;
1445         }
1446
1447         breakpoint->set = brp_1 + 1;
1448         breakpoint->linked_brp = brp_2;
1449         control_ctx = ((ctx_machmode & 0x7) << 20)
1450                 | (brp_2 << 16)
1451                 | (0 << 14)
1452                 | (ctx_byte_addr_select << 5)
1453                 | (3 << 1) | 1;
1454         brp_list[brp_1].used = true;
1455         brp_list[brp_1].value = (breakpoint->asid);
1456         brp_list[brp_1].control = control_ctx;
1457         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1458                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1459                         brp_list[brp_1].value);
1460         if (retval != ERROR_OK)
1461                 return retval;
1462         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1463                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1464                         brp_list[brp_1].control);
1465         if (retval != ERROR_OK)
1466                 return retval;
1467
1468         control_iva = ((iva_machmode & 0x7) << 20)
1469                 | (brp_1 << 16)
1470                 | (iva_byte_addr_select << 5)
1471                 | (3 << 1) | 1;
1472         brp_list[brp_2].used = true;
1473         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1474         brp_list[brp_2].control = control_iva;
1475         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1476                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1477                         brp_list[brp_2].value);
1478         if (retval != ERROR_OK)
1479                 return retval;
1480         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1481                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1482                         brp_list[brp_2].control);
1483         if (retval != ERROR_OK)
1484                 return retval;
1485
1486         return ERROR_OK;
1487 }
1488
1489 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1490 {
1491         int retval;
1492         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1493         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1494         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1495
1496         if (!breakpoint->set) {
1497                 LOG_WARNING("breakpoint not set");
1498                 return ERROR_OK;
1499         }
1500
1501         if (breakpoint->type == BKPT_HARD) {
1502                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1503                         int brp_i = breakpoint->set - 1;
1504                         int brp_j = breakpoint->linked_brp;
1505                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1506                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1507                                 return ERROR_OK;
1508                         }
1509                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1510                                 brp_list[brp_i].control, brp_list[brp_i].value);
1511                         brp_list[brp_i].used = false;
1512                         brp_list[brp_i].value = 0;
1513                         brp_list[brp_i].control = 0;
1514                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1515                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1516                                         brp_list[brp_i].control);
1517                         if (retval != ERROR_OK)
1518                                 return retval;
1519                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1520                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1521                                         brp_list[brp_i].value);
1522                         if (retval != ERROR_OK)
1523                                 return retval;
1524                         if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1525                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1526                                 return ERROR_OK;
1527                         }
1528                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1529                                 brp_list[brp_j].control, brp_list[brp_j].value);
1530                         brp_list[brp_j].used = false;
1531                         brp_list[brp_j].value = 0;
1532                         brp_list[brp_j].control = 0;
1533                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1534                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1535                                         brp_list[brp_j].control);
1536                         if (retval != ERROR_OK)
1537                                 return retval;
1538                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1539                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1540                                         brp_list[brp_j].value);
1541                         if (retval != ERROR_OK)
1542                                 return retval;
1543                         breakpoint->linked_brp = 0;
1544                         breakpoint->set = 0;
1545                         return ERROR_OK;
1546
1547                 } else {
1548                         int brp_i = breakpoint->set - 1;
1549                         if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1550                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1551                                 return ERROR_OK;
1552                         }
1553                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1554                                 brp_list[brp_i].control, brp_list[brp_i].value);
1555                         brp_list[brp_i].used = false;
1556                         brp_list[brp_i].value = 0;
1557                         brp_list[brp_i].control = 0;
1558                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1559                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1560                                         brp_list[brp_i].control);
1561                         if (retval != ERROR_OK)
1562                                 return retval;
1563                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1564                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1565                                         brp_list[brp_i].value);
1566                         if (retval != ERROR_OK)
1567                                 return retval;
1568                         breakpoint->set = 0;
1569                         return ERROR_OK;
1570                 }
1571         } else {
1572
1573                 /* make sure data cache is cleaned & invalidated down to PoC */
1574                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1575                         armv7a_cache_flush_virt(target, breakpoint->address,
1576                                                 breakpoint->length);
1577                 }
1578
1579                 /* restore original instruction (kept in target endianness) */
1580                 if (breakpoint->length == 4) {
1581                         retval = target_write_memory(target,
1582                                         breakpoint->address & 0xFFFFFFFE,
1583                                         4, 1, breakpoint->orig_instr);
1584                         if (retval != ERROR_OK)
1585                                 return retval;
1586                 } else {
1587                         retval = target_write_memory(target,
1588                                         breakpoint->address & 0xFFFFFFFE,
1589                                         2, 1, breakpoint->orig_instr);
1590                         if (retval != ERROR_OK)
1591                                 return retval;
1592                 }
1593
1594                 /* update i-cache at breakpoint location */
1595                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1596                                                  breakpoint->length);
1597                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1598                                                  breakpoint->length);
1599         }
1600         breakpoint->set = 0;
1601
1602         return ERROR_OK;
1603 }
1604
1605 static int cortex_a_add_breakpoint(struct target *target,
1606         struct breakpoint *breakpoint)
1607 {
1608         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1609
1610         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1611                 LOG_INFO("no hardware breakpoint available");
1612                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1613         }
1614
1615         if (breakpoint->type == BKPT_HARD)
1616                 cortex_a->brp_num_available--;
1617
1618         return cortex_a_set_breakpoint(target, breakpoint, 0x00);       /* Exact match */
1619 }
1620
1621 static int cortex_a_add_context_breakpoint(struct target *target,
1622         struct breakpoint *breakpoint)
1623 {
1624         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1625
1626         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1627                 LOG_INFO("no hardware breakpoint available");
1628                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1629         }
1630
1631         if (breakpoint->type == BKPT_HARD)
1632                 cortex_a->brp_num_available--;
1633
1634         return cortex_a_set_context_breakpoint(target, breakpoint, 0x02);       /* asid match */
1635 }
1636
1637 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1638         struct breakpoint *breakpoint)
1639 {
1640         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1641
1642         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1643                 LOG_INFO("no hardware breakpoint available");
1644                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1645         }
1646
1647         if (breakpoint->type == BKPT_HARD)
1648                 cortex_a->brp_num_available--;
1649
1650         return cortex_a_set_hybrid_breakpoint(target, breakpoint);      /* ??? */
1651 }
1652
1653
1654 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1655 {
1656         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1657
1658 #if 0
1659 /* It is perfectly possible to remove breakpoints while the target is running */
1660         if (target->state != TARGET_HALTED) {
1661                 LOG_WARNING("target not halted");
1662                 return ERROR_TARGET_NOT_HALTED;
1663         }
1664 #endif
1665
1666         if (breakpoint->set) {
1667                 cortex_a_unset_breakpoint(target, breakpoint);
1668                 if (breakpoint->type == BKPT_HARD)
1669                         cortex_a->brp_num_available++;
1670         }
1671
1672
1673         return ERROR_OK;
1674 }
1675
1676 /**
1677  * Sets a watchpoint for an Cortex-A target in one of the watchpoint units.  It is
1678  * considered a bug to call this function when there are no available watchpoint
1679  * units.
1680  *
1681  * @param target Pointer to an Cortex-A target to set a watchpoint on
1682  * @param watchpoint Pointer to the watchpoint to be set
1683  * @return Error status if watchpoint set fails or the result of executing the
1684  * JTAG queue
1685  */
1686 static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1687 {
1688         int retval = ERROR_OK;
1689         int wrp_i = 0;
1690         uint32_t control;
1691         uint32_t address;
1692         uint8_t address_mask;
1693         uint8_t byte_address_select;
1694         uint8_t load_store_access_control = 0x3;
1695         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1696         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1697         struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1698
1699         if (watchpoint->set) {
1700                 LOG_WARNING("watchpoint already set");
1701                 return retval;
1702         }
1703
1704         /* check available context WRPs */
1705         while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1706                 wrp_i++;
1707
1708         if (wrp_i >= cortex_a->wrp_num) {
1709                 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1710                 return ERROR_FAIL;
1711         }
1712
1713         if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1714                         (watchpoint->length & (watchpoint->length - 1))) {
1715                 LOG_WARNING("watchpoint length must be a power of 2");
1716                 return ERROR_FAIL;
1717         }
1718
1719         if (watchpoint->address & (watchpoint->length - 1)) {
1720                 LOG_WARNING("watchpoint address must be aligned at length");
1721                 return ERROR_FAIL;
1722         }
1723
1724         /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing?  */
1725         /* handle wp length 1 and 2 through byte select */
1726         switch (watchpoint->length) {
1727         case 1:
1728                 byte_address_select = BIT(watchpoint->address & 0x3);
1729                 address = watchpoint->address & ~0x3;
1730                 address_mask = 0;
1731                 break;
1732
1733         case 2:
1734                 byte_address_select = 0x03 << (watchpoint->address & 0x2);
1735                 address = watchpoint->address & ~0x3;
1736                 address_mask = 0;
1737                 break;
1738
1739         case 4:
1740                 byte_address_select = 0x0f;
1741                 address = watchpoint->address;
1742                 address_mask = 0;
1743                 break;
1744
1745         default:
1746                 byte_address_select = 0xff;
1747                 address = watchpoint->address;
1748                 address_mask = ilog2(watchpoint->length);
1749                 break;
1750         }
1751
1752         watchpoint->set = wrp_i + 1;
1753         control = (address_mask << 24) |
1754                 (byte_address_select << 5) |
1755                 (load_store_access_control << 3) |
1756                 (0x3 << 1) | 1;
1757         wrp_list[wrp_i].used = true;
1758         wrp_list[wrp_i].value = address;
1759         wrp_list[wrp_i].control = control;
1760
1761         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1762                         + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1763                         wrp_list[wrp_i].value);
1764         if (retval != ERROR_OK)
1765                 return retval;
1766
1767         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1768                         + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1769                         wrp_list[wrp_i].control);
1770         if (retval != ERROR_OK)
1771                 return retval;
1772
1773         LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1774                         wrp_list[wrp_i].control,
1775                         wrp_list[wrp_i].value);
1776
1777         return ERROR_OK;
1778 }
1779
1780 /**
1781  * Unset an existing watchpoint and clear the used watchpoint unit.
1782  *
1783  * @param target Pointer to the target to have the watchpoint removed
1784  * @param watchpoint Pointer to the watchpoint to be removed
1785  * @return Error status while trying to unset the watchpoint or the result of
1786  *         executing the JTAG queue
1787  */
1788 static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1789 {
1790         int retval;
1791         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1792         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1793         struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1794
1795         if (!watchpoint->set) {
1796                 LOG_WARNING("watchpoint not set");
1797                 return ERROR_OK;
1798         }
1799
1800         int wrp_i = watchpoint->set - 1;
1801         if (wrp_i < 0 || wrp_i >= cortex_a->wrp_num) {
1802                 LOG_DEBUG("Invalid WRP number in watchpoint");
1803                 return ERROR_OK;
1804         }
1805         LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1806                         wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1807         wrp_list[wrp_i].used = false;
1808         wrp_list[wrp_i].value = 0;
1809         wrp_list[wrp_i].control = 0;
1810         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1811                         + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1812                         wrp_list[wrp_i].control);
1813         if (retval != ERROR_OK)
1814                 return retval;
1815         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1816                         + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1817                         wrp_list[wrp_i].value);
1818         if (retval != ERROR_OK)
1819                 return retval;
1820         watchpoint->set = 0;
1821
1822         return ERROR_OK;
1823 }
1824
1825 /**
1826  * Add a watchpoint to an Cortex-A target.  If there are no watchpoint units
1827  * available, an error response is returned.
1828  *
1829  * @param target Pointer to the Cortex-A target to add a watchpoint to
1830  * @param watchpoint Pointer to the watchpoint to be added
1831  * @return Error status while trying to add the watchpoint
1832  */
1833 static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1834 {
1835         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1836
1837         if (cortex_a->wrp_num_available < 1) {
1838                 LOG_INFO("no hardware watchpoint available");
1839                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1840         }
1841
1842         int retval = cortex_a_set_watchpoint(target, watchpoint);
1843         if (retval != ERROR_OK)
1844                 return retval;
1845
1846         cortex_a->wrp_num_available--;
1847         return ERROR_OK;
1848 }
1849
1850 /**
1851  * Remove a watchpoint from an Cortex-A target.  The watchpoint will be unset and
1852  * the used watchpoint unit will be reopened.
1853  *
1854  * @param target Pointer to the target to remove a watchpoint from
1855  * @param watchpoint Pointer to the watchpoint to be removed
1856  * @return Result of trying to unset the watchpoint
1857  */
1858 static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1859 {
1860         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1861
1862         if (watchpoint->set) {
1863                 cortex_a->wrp_num_available++;
1864                 cortex_a_unset_watchpoint(target, watchpoint);
1865         }
1866         return ERROR_OK;
1867 }
1868
1869
1870 /*
1871  * Cortex-A Reset functions
1872  */
1873
1874 static int cortex_a_assert_reset(struct target *target)
1875 {
1876         struct armv7a_common *armv7a = target_to_armv7a(target);
1877
1878         LOG_DEBUG(" ");
1879
1880         /* FIXME when halt is requested, make it work somehow... */
1881
1882         /* This function can be called in "target not examined" state */
1883
1884         /* Issue some kind of warm reset. */
1885         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1886                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1887         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1888                 /* REVISIT handle "pulls" cases, if there's
1889                  * hardware that needs them to work.
1890                  */
1891
1892                 /*
1893                  * FIXME: fix reset when transport is not JTAG. This is a temporary
1894                  * work-around for release v0.10 that is not intended to stay!
1895                  */
1896                 if (!transport_is_jtag() ||
1897                                 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1898                         adapter_assert_reset();
1899
1900         } else {
1901                 LOG_ERROR("%s: how to reset?", target_name(target));
1902                 return ERROR_FAIL;
1903         }
1904
1905         /* registers are now invalid */
1906         if (target_was_examined(target))
1907                 register_cache_invalidate(armv7a->arm.core_cache);
1908
1909         target->state = TARGET_RESET;
1910
1911         return ERROR_OK;
1912 }
1913
1914 static int cortex_a_deassert_reset(struct target *target)
1915 {
1916         struct armv7a_common *armv7a = target_to_armv7a(target);
1917         int retval;
1918
1919         LOG_DEBUG(" ");
1920
1921         /* be certain SRST is off */
1922         adapter_deassert_reset();
1923
1924         if (target_was_examined(target)) {
1925                 retval = cortex_a_poll(target);
1926                 if (retval != ERROR_OK)
1927                         return retval;
1928         }
1929
1930         if (target->reset_halt) {
1931                 if (target->state != TARGET_HALTED) {
1932                         LOG_WARNING("%s: ran after reset and before halt ...",
1933                                 target_name(target));
1934                         if (target_was_examined(target)) {
1935                                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1936                                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1937                                 if (retval != ERROR_OK)
1938                                         return retval;
1939                         } else
1940                                 target->state = TARGET_UNKNOWN;
1941                 }
1942         }
1943
1944         return ERROR_OK;
1945 }
1946
1947 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1948 {
1949         /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1950          * New desired mode must be in mode. Current value of DSCR must be in
1951          * *dscr, which is updated with new value.
1952          *
1953          * This function elides actually sending the mode-change over the debug
1954          * interface if the mode is already set as desired.
1955          */
1956         uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1957         if (new_dscr != *dscr) {
1958                 struct armv7a_common *armv7a = target_to_armv7a(target);
1959                 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1960                                 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1961                 if (retval == ERROR_OK)
1962                         *dscr = new_dscr;
1963                 return retval;
1964         } else {
1965                 return ERROR_OK;
1966         }
1967 }
1968
1969 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1970         uint32_t value, uint32_t *dscr)
1971 {
1972         /* Waits until the specified bit(s) of DSCR take on a specified value. */
1973         struct armv7a_common *armv7a = target_to_armv7a(target);
1974         int64_t then;
1975         int retval;
1976
1977         if ((*dscr & mask) == value)
1978                 return ERROR_OK;
1979
1980         then = timeval_ms();
1981         while (1) {
1982                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1983                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
1984                 if (retval != ERROR_OK) {
1985                         LOG_ERROR("Could not read DSCR register");
1986                         return retval;
1987                 }
1988                 if ((*dscr & mask) == value)
1989                         break;
1990                 if (timeval_ms() > then + 1000) {
1991                         LOG_ERROR("timeout waiting for DSCR bit change");
1992                         return ERROR_FAIL;
1993                 }
1994         }
1995         return ERROR_OK;
1996 }
1997
1998 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1999         uint32_t *data, uint32_t *dscr)
2000 {
2001         int retval;
2002         struct armv7a_common *armv7a = target_to_armv7a(target);
2003
2004         /* Move from coprocessor to R0. */
2005         retval = cortex_a_exec_opcode(target, opcode, dscr);
2006         if (retval != ERROR_OK)
2007                 return retval;
2008
2009         /* Move from R0 to DTRTX. */
2010         retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2011         if (retval != ERROR_OK)
2012                 return retval;
2013
2014         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2015          * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2016          * must also check TXfull_l). Most of the time this will be free
2017          * because TXfull_l will be set immediately and cached in dscr. */
2018         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2019                         DSCR_DTRTX_FULL_LATCHED, dscr);
2020         if (retval != ERROR_OK)
2021                 return retval;
2022
2023         /* Read the value transferred to DTRTX. */
2024         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2025                         armv7a->debug_base + CPUDBG_DTRTX, data);
2026         if (retval != ERROR_OK)
2027                 return retval;
2028
2029         return ERROR_OK;
2030 }
2031
2032 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2033         uint32_t *dfsr, uint32_t *dscr)
2034 {
2035         int retval;
2036
2037         if (dfar) {
2038                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2039                 if (retval != ERROR_OK)
2040                         return retval;
2041         }
2042
2043         if (dfsr) {
2044                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2045                 if (retval != ERROR_OK)
2046                         return retval;
2047         }
2048
2049         return ERROR_OK;
2050 }
2051
2052 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2053         uint32_t data, uint32_t *dscr)
2054 {
2055         int retval;
2056         struct armv7a_common *armv7a = target_to_armv7a(target);
2057
2058         /* Write the value into DTRRX. */
2059         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2060                         armv7a->debug_base + CPUDBG_DTRRX, data);
2061         if (retval != ERROR_OK)
2062                 return retval;
2063
2064         /* Move from DTRRX to R0. */
2065         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2066         if (retval != ERROR_OK)
2067                 return retval;
2068
2069         /* Move from R0 to coprocessor. */
2070         retval = cortex_a_exec_opcode(target, opcode, dscr);
2071         if (retval != ERROR_OK)
2072                 return retval;
2073
2074         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2075          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2076          * check RXfull_l). Most of the time this will be free because RXfull_l
2077          * will be cleared immediately and cached in dscr. */
2078         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2079         if (retval != ERROR_OK)
2080                 return retval;
2081
2082         return ERROR_OK;
2083 }
2084
2085 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2086         uint32_t dfsr, uint32_t *dscr)
2087 {
2088         int retval;
2089
2090         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2091         if (retval != ERROR_OK)
2092                 return retval;
2093
2094         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2095         if (retval != ERROR_OK)
2096                 return retval;
2097
2098         return ERROR_OK;
2099 }
2100
2101 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2102 {
2103         uint32_t status, upper4;
2104
2105         if (dfsr & (1 << 9)) {
2106                 /* LPAE format. */
2107                 status = dfsr & 0x3f;
2108                 upper4 = status >> 2;
2109                 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2110                         return ERROR_TARGET_TRANSLATION_FAULT;
2111                 else if (status == 33)
2112                         return ERROR_TARGET_UNALIGNED_ACCESS;
2113                 else
2114                         return ERROR_TARGET_DATA_ABORT;
2115         } else {
2116                 /* Normal format. */
2117                 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2118                 if (status == 1)
2119                         return ERROR_TARGET_UNALIGNED_ACCESS;
2120                 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2121                                 status == 9 || status == 11 || status == 13 || status == 15)
2122                         return ERROR_TARGET_TRANSLATION_FAULT;
2123                 else
2124                         return ERROR_TARGET_DATA_ABORT;
2125         }
2126 }
2127
2128 static int cortex_a_write_cpu_memory_slow(struct target *target,
2129         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2130 {
2131         /* Writes count objects of size size from *buffer. Old value of DSCR must
2132          * be in *dscr; updated to new value. This is slow because it works for
2133          * non-word-sized objects. Avoid unaligned accesses as they do not work
2134          * on memory address space without "Normal" attribute. If size == 4 and
2135          * the address is aligned, cortex_a_write_cpu_memory_fast should be
2136          * preferred.
2137          * Preconditions:
2138          * - Address is in R0.
2139          * - R0 is marked dirty.
2140          */
2141         struct armv7a_common *armv7a = target_to_armv7a(target);
2142         struct arm *arm = &armv7a->arm;
2143         int retval;
2144
2145         /* Mark register R1 as dirty, to use for transferring data. */
2146         arm_reg_current(arm, 1)->dirty = true;
2147
2148         /* Switch to non-blocking mode if not already in that mode. */
2149         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2150         if (retval != ERROR_OK)
2151                 return retval;
2152
2153         /* Go through the objects. */
2154         while (count) {
2155                 /* Write the value to store into DTRRX. */
2156                 uint32_t data, opcode;
2157                 if (size == 1)
2158                         data = *buffer;
2159                 else if (size == 2)
2160                         data = target_buffer_get_u16(target, buffer);
2161                 else
2162                         data = target_buffer_get_u32(target, buffer);
2163                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2164                                 armv7a->debug_base + CPUDBG_DTRRX, data);
2165                 if (retval != ERROR_OK)
2166                         return retval;
2167
2168                 /* Transfer the value from DTRRX to R1. */
2169                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2170                 if (retval != ERROR_OK)
2171                         return retval;
2172
2173                 /* Write the value transferred to R1 into memory. */
2174                 if (size == 1)
2175                         opcode = ARMV4_5_STRB_IP(1, 0);
2176                 else if (size == 2)
2177                         opcode = ARMV4_5_STRH_IP(1, 0);
2178                 else
2179                         opcode = ARMV4_5_STRW_IP(1, 0);
2180                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2181                 if (retval != ERROR_OK)
2182                         return retval;
2183
2184                 /* Check for faults and return early. */
2185                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2186                         return ERROR_OK; /* A data fault is not considered a system failure. */
2187
2188                 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2189                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2190                  * must also check RXfull_l). Most of the time this will be free
2191                  * because RXfull_l will be cleared immediately and cached in dscr. */
2192                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2193                 if (retval != ERROR_OK)
2194                         return retval;
2195
2196                 /* Advance. */
2197                 buffer += size;
2198                 --count;
2199         }
2200
2201         return ERROR_OK;
2202 }
2203
2204 static int cortex_a_write_cpu_memory_fast(struct target *target,
2205         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2206 {
2207         /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2208          * in *dscr; updated to new value. This is fast but only works for
2209          * word-sized objects at aligned addresses.
2210          * Preconditions:
2211          * - Address is in R0 and must be a multiple of 4.
2212          * - R0 is marked dirty.
2213          */
2214         struct armv7a_common *armv7a = target_to_armv7a(target);
2215         int retval;
2216
2217         /* Switch to fast mode if not already in that mode. */
2218         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2219         if (retval != ERROR_OK)
2220                 return retval;
2221
2222         /* Latch STC instruction. */
2223         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2224                         armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2225         if (retval != ERROR_OK)
2226                 return retval;
2227
2228         /* Transfer all the data and issue all the instructions. */
2229         return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2230                         4, count, armv7a->debug_base + CPUDBG_DTRRX);
2231 }
2232
2233 static int cortex_a_write_cpu_memory(struct target *target,
2234         uint32_t address, uint32_t size,
2235         uint32_t count, const uint8_t *buffer)
2236 {
2237         /* Write memory through the CPU. */
2238         int retval, final_retval;
2239         struct armv7a_common *armv7a = target_to_armv7a(target);
2240         struct arm *arm = &armv7a->arm;
2241         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2242
2243         LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2244                           address, size, count);
2245         if (target->state != TARGET_HALTED) {
2246                 LOG_WARNING("target not halted");
2247                 return ERROR_TARGET_NOT_HALTED;
2248         }
2249
2250         if (!count)
2251                 return ERROR_OK;
2252
2253         /* Clear any abort. */
2254         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2255                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2256         if (retval != ERROR_OK)
2257                 return retval;
2258
2259         /* Read DSCR. */
2260         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2261                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2262         if (retval != ERROR_OK)
2263                 return retval;
2264
2265         /* Switch to non-blocking mode if not already in that mode. */
2266         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2267         if (retval != ERROR_OK)
2268                 goto out;
2269
2270         /* Mark R0 as dirty. */
2271         arm_reg_current(arm, 0)->dirty = true;
2272
2273         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2274         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2275         if (retval != ERROR_OK)
2276                 goto out;
2277
2278         /* Get the memory address into R0. */
2279         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2280                         armv7a->debug_base + CPUDBG_DTRRX, address);
2281         if (retval != ERROR_OK)
2282                 goto out;
2283         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2284         if (retval != ERROR_OK)
2285                 goto out;
2286
2287         if (size == 4 && (address % 4) == 0) {
2288                 /* We are doing a word-aligned transfer, so use fast mode. */
2289                 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2290         } else {
2291                 /* Use slow path. Adjust size for aligned accesses */
2292                 switch (address % 4) {
2293                         case 1:
2294                         case 3:
2295                                 count *= size;
2296                                 size = 1;
2297                                 break;
2298                         case 2:
2299                                 if (size == 4) {
2300                                         count *= 2;
2301                                         size = 2;
2302                                 }
2303                         case 0:
2304                         default:
2305                                 break;
2306                 }
2307                 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2308         }
2309
2310 out:
2311         final_retval = retval;
2312
2313         /* Switch to non-blocking mode if not already in that mode. */
2314         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2315         if (final_retval == ERROR_OK)
2316                 final_retval = retval;
2317
2318         /* Wait for last issued instruction to complete. */
2319         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2320         if (final_retval == ERROR_OK)
2321                 final_retval = retval;
2322
2323         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2324          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2325          * check RXfull_l). Most of the time this will be free because RXfull_l
2326          * will be cleared immediately and cached in dscr. However, don't do this
2327          * if there is fault, because then the instruction might not have completed
2328          * successfully. */
2329         if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2330                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2331                 if (retval != ERROR_OK)
2332                         return retval;
2333         }
2334
2335         /* If there were any sticky abort flags, clear them. */
2336         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2337                 fault_dscr = dscr;
2338                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2339                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2340                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2341         } else {
2342                 fault_dscr = 0;
2343         }
2344
2345         /* Handle synchronous data faults. */
2346         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2347                 if (final_retval == ERROR_OK) {
2348                         /* Final return value will reflect cause of fault. */
2349                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2350                         if (retval == ERROR_OK) {
2351                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2352                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2353                         } else
2354                                 final_retval = retval;
2355                 }
2356                 /* Fault destroyed DFAR/DFSR; restore them. */
2357                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2358                 if (retval != ERROR_OK)
2359                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2360         }
2361
2362         /* Handle asynchronous data faults. */
2363         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2364                 if (final_retval == ERROR_OK)
2365                         /* No other error has been recorded so far, so keep this one. */
2366                         final_retval = ERROR_TARGET_DATA_ABORT;
2367         }
2368
2369         /* If the DCC is nonempty, clear it. */
2370         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2371                 uint32_t dummy;
2372                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2373                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2374                 if (final_retval == ERROR_OK)
2375                         final_retval = retval;
2376         }
2377         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2378                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2379                 if (final_retval == ERROR_OK)
2380                         final_retval = retval;
2381         }
2382
2383         /* Done. */
2384         return final_retval;
2385 }
2386
2387 static int cortex_a_read_cpu_memory_slow(struct target *target,
2388         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2389 {
2390         /* Reads count objects of size size into *buffer. Old value of DSCR must be
2391          * in *dscr; updated to new value. This is slow because it works for
2392          * non-word-sized objects. Avoid unaligned accesses as they do not work
2393          * on memory address space without "Normal" attribute. If size == 4 and
2394          * the address is aligned, cortex_a_read_cpu_memory_fast should be
2395          * preferred.
2396          * Preconditions:
2397          * - Address is in R0.
2398          * - R0 is marked dirty.
2399          */
2400         struct armv7a_common *armv7a = target_to_armv7a(target);
2401         struct arm *arm = &armv7a->arm;
2402         int retval;
2403
2404         /* Mark register R1 as dirty, to use for transferring data. */
2405         arm_reg_current(arm, 1)->dirty = true;
2406
2407         /* Switch to non-blocking mode if not already in that mode. */
2408         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2409         if (retval != ERROR_OK)
2410                 return retval;
2411
2412         /* Go through the objects. */
2413         while (count) {
2414                 /* Issue a load of the appropriate size to R1. */
2415                 uint32_t opcode, data;
2416                 if (size == 1)
2417                         opcode = ARMV4_5_LDRB_IP(1, 0);
2418                 else if (size == 2)
2419                         opcode = ARMV4_5_LDRH_IP(1, 0);
2420                 else
2421                         opcode = ARMV4_5_LDRW_IP(1, 0);
2422                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2423                 if (retval != ERROR_OK)
2424                         return retval;
2425
2426                 /* Issue a write of R1 to DTRTX. */
2427                 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2428                 if (retval != ERROR_OK)
2429                         return retval;
2430
2431                 /* Check for faults and return early. */
2432                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2433                         return ERROR_OK; /* A data fault is not considered a system failure. */
2434
2435                 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2436                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2437                  * must also check TXfull_l). Most of the time this will be free
2438                  * because TXfull_l will be set immediately and cached in dscr. */
2439                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2440                                 DSCR_DTRTX_FULL_LATCHED, dscr);
2441                 if (retval != ERROR_OK)
2442                         return retval;
2443
2444                 /* Read the value transferred to DTRTX into the buffer. */
2445                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2446                                 armv7a->debug_base + CPUDBG_DTRTX, &data);
2447                 if (retval != ERROR_OK)
2448                         return retval;
2449                 if (size == 1)
2450                         *buffer = (uint8_t) data;
2451                 else if (size == 2)
2452                         target_buffer_set_u16(target, buffer, (uint16_t) data);
2453                 else
2454                         target_buffer_set_u32(target, buffer, data);
2455
2456                 /* Advance. */
2457                 buffer += size;
2458                 --count;
2459         }
2460
2461         return ERROR_OK;
2462 }
2463
2464 static int cortex_a_read_cpu_memory_fast(struct target *target,
2465         uint32_t count, uint8_t *buffer, uint32_t *dscr)
2466 {
2467         /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2468          * *dscr; updated to new value. This is fast but only works for word-sized
2469          * objects at aligned addresses.
2470          * Preconditions:
2471          * - Address is in R0 and must be a multiple of 4.
2472          * - R0 is marked dirty.
2473          */
2474         struct armv7a_common *armv7a = target_to_armv7a(target);
2475         uint32_t u32;
2476         int retval;
2477
2478         /* Switch to non-blocking mode if not already in that mode. */
2479         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2480         if (retval != ERROR_OK)
2481                 return retval;
2482
2483         /* Issue the LDC instruction via a write to ITR. */
2484         retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2485         if (retval != ERROR_OK)
2486                 return retval;
2487
2488         count--;
2489
2490         if (count > 0) {
2491                 /* Switch to fast mode if not already in that mode. */
2492                 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2493                 if (retval != ERROR_OK)
2494                         return retval;
2495
2496                 /* Latch LDC instruction. */
2497                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2498                                 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2499                 if (retval != ERROR_OK)
2500                         return retval;
2501
2502                 /* Read the value transferred to DTRTX into the buffer. Due to fast
2503                  * mode rules, this blocks until the instruction finishes executing and
2504                  * then reissues the read instruction to read the next word from
2505                  * memory. The last read of DTRTX in this call reads the second-to-last
2506                  * word from memory and issues the read instruction for the last word.
2507                  */
2508                 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2509                                 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2510                 if (retval != ERROR_OK)
2511                         return retval;
2512
2513                 /* Advance. */
2514                 buffer += count * 4;
2515         }
2516
2517         /* Wait for last issued instruction to complete. */
2518         retval = cortex_a_wait_instrcmpl(target, dscr, false);
2519         if (retval != ERROR_OK)
2520                 return retval;
2521
2522         /* Switch to non-blocking mode if not already in that mode. */
2523         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2524         if (retval != ERROR_OK)
2525                 return retval;
2526
2527         /* Check for faults and return early. */
2528         if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2529                 return ERROR_OK; /* A data fault is not considered a system failure. */
2530
2531         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2532          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2533          * check TXfull_l). Most of the time this will be free because TXfull_l
2534          * will be set immediately and cached in dscr. */
2535         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2536                         DSCR_DTRTX_FULL_LATCHED, dscr);
2537         if (retval != ERROR_OK)
2538                 return retval;
2539
2540         /* Read the value transferred to DTRTX into the buffer. This is the last
2541          * word. */
2542         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2543                         armv7a->debug_base + CPUDBG_DTRTX, &u32);
2544         if (retval != ERROR_OK)
2545                 return retval;
2546         target_buffer_set_u32(target, buffer, u32);
2547
2548         return ERROR_OK;
2549 }
2550
2551 static int cortex_a_read_cpu_memory(struct target *target,
2552         uint32_t address, uint32_t size,
2553         uint32_t count, uint8_t *buffer)
2554 {
2555         /* Read memory through the CPU. */
2556         int retval, final_retval;
2557         struct armv7a_common *armv7a = target_to_armv7a(target);
2558         struct arm *arm = &armv7a->arm;
2559         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2560
2561         LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2562                           address, size, count);
2563         if (target->state != TARGET_HALTED) {
2564                 LOG_WARNING("target not halted");
2565                 return ERROR_TARGET_NOT_HALTED;
2566         }
2567
2568         if (!count)
2569                 return ERROR_OK;
2570
2571         /* Clear any abort. */
2572         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2573                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2574         if (retval != ERROR_OK)
2575                 return retval;
2576
2577         /* Read DSCR */
2578         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2579                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2580         if (retval != ERROR_OK)
2581                 return retval;
2582
2583         /* Switch to non-blocking mode if not already in that mode. */
2584         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2585         if (retval != ERROR_OK)
2586                 goto out;
2587
2588         /* Mark R0 as dirty. */
2589         arm_reg_current(arm, 0)->dirty = true;
2590
2591         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2592         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2593         if (retval != ERROR_OK)
2594                 goto out;
2595
2596         /* Get the memory address into R0. */
2597         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2598                         armv7a->debug_base + CPUDBG_DTRRX, address);
2599         if (retval != ERROR_OK)
2600                 goto out;
2601         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2602         if (retval != ERROR_OK)
2603                 goto out;
2604
2605         if (size == 4 && (address % 4) == 0) {
2606                 /* We are doing a word-aligned transfer, so use fast mode. */
2607                 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2608         } else {
2609                 /* Use slow path. Adjust size for aligned accesses */
2610                 switch (address % 4) {
2611                         case 1:
2612                         case 3:
2613                                 count *= size;
2614                                 size = 1;
2615                                 break;
2616                         case 2:
2617                                 if (size == 4) {
2618                                         count *= 2;
2619                                         size = 2;
2620                                 }
2621                                 break;
2622                         case 0:
2623                         default:
2624                                 break;
2625                 }
2626                 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2627         }
2628
2629 out:
2630         final_retval = retval;
2631
2632         /* Switch to non-blocking mode if not already in that mode. */
2633         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2634         if (final_retval == ERROR_OK)
2635                 final_retval = retval;
2636
2637         /* Wait for last issued instruction to complete. */
2638         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2639         if (final_retval == ERROR_OK)
2640                 final_retval = retval;
2641
2642         /* If there were any sticky abort flags, clear them. */
2643         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2644                 fault_dscr = dscr;
2645                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2646                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2647                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2648         } else {
2649                 fault_dscr = 0;
2650         }
2651
2652         /* Handle synchronous data faults. */
2653         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2654                 if (final_retval == ERROR_OK) {
2655                         /* Final return value will reflect cause of fault. */
2656                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2657                         if (retval == ERROR_OK) {
2658                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2659                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2660                         } else
2661                                 final_retval = retval;
2662                 }
2663                 /* Fault destroyed DFAR/DFSR; restore them. */
2664                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2665                 if (retval != ERROR_OK)
2666                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2667         }
2668
2669         /* Handle asynchronous data faults. */
2670         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2671                 if (final_retval == ERROR_OK)
2672                         /* No other error has been recorded so far, so keep this one. */
2673                         final_retval = ERROR_TARGET_DATA_ABORT;
2674         }
2675
2676         /* If the DCC is nonempty, clear it. */
2677         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2678                 uint32_t dummy;
2679                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2680                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2681                 if (final_retval == ERROR_OK)
2682                         final_retval = retval;
2683         }
2684         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2685                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2686                 if (final_retval == ERROR_OK)
2687                         final_retval = retval;
2688         }
2689
2690         /* Done. */
2691         return final_retval;
2692 }
2693
2694
2695 /*
2696  * Cortex-A Memory access
2697  *
2698  * This is same Cortex-M3 but we must also use the correct
2699  * ap number for every access.
2700  */
2701
2702 static int cortex_a_read_phys_memory(struct target *target,
2703         target_addr_t address, uint32_t size,
2704         uint32_t count, uint8_t *buffer)
2705 {
2706         int retval;
2707
2708         if (!count || !buffer)
2709                 return ERROR_COMMAND_SYNTAX_ERROR;
2710
2711         LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2712                 address, size, count);
2713
2714         /* read memory through the CPU */
2715         cortex_a_prep_memaccess(target, 1);
2716         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2717         cortex_a_post_memaccess(target, 1);
2718
2719         return retval;
2720 }
2721
2722 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2723         uint32_t size, uint32_t count, uint8_t *buffer)
2724 {
2725         int retval;
2726
2727         /* cortex_a handles unaligned memory access */
2728         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2729                 address, size, count);
2730
2731         cortex_a_prep_memaccess(target, 0);
2732         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2733         cortex_a_post_memaccess(target, 0);
2734
2735         return retval;
2736 }
2737
2738 static int cortex_a_write_phys_memory(struct target *target,
2739         target_addr_t address, uint32_t size,
2740         uint32_t count, const uint8_t *buffer)
2741 {
2742         int retval;
2743
2744         if (!count || !buffer)
2745                 return ERROR_COMMAND_SYNTAX_ERROR;
2746
2747         LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2748                 address, size, count);
2749
2750         /* write memory through the CPU */
2751         cortex_a_prep_memaccess(target, 1);
2752         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2753         cortex_a_post_memaccess(target, 1);
2754
2755         return retval;
2756 }
2757
2758 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2759         uint32_t size, uint32_t count, const uint8_t *buffer)
2760 {
2761         int retval;
2762
2763         /* cortex_a handles unaligned memory access */
2764         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2765                 address, size, count);
2766
2767         /* memory writes bypass the caches, must flush before writing */
2768         armv7a_cache_auto_flush_on_write(target, address, size * count);
2769
2770         cortex_a_prep_memaccess(target, 0);
2771         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2772         cortex_a_post_memaccess(target, 0);
2773         return retval;
2774 }
2775
2776 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2777                                 uint32_t count, uint8_t *buffer)
2778 {
2779         uint32_t size;
2780
2781         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2782          * will have something to do with the size we leave to it. */
2783         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2784                 if (address & size) {
2785                         int retval = target_read_memory(target, address, size, 1, buffer);
2786                         if (retval != ERROR_OK)
2787                                 return retval;
2788                         address += size;
2789                         count -= size;
2790                         buffer += size;
2791                 }
2792         }
2793
2794         /* Read the data with as large access size as possible. */
2795         for (; size > 0; size /= 2) {
2796                 uint32_t aligned = count - count % size;
2797                 if (aligned > 0) {
2798                         int retval = target_read_memory(target, address, size, aligned / size, buffer);
2799                         if (retval != ERROR_OK)
2800                                 return retval;
2801                         address += aligned;
2802                         count -= aligned;
2803                         buffer += aligned;
2804                 }
2805         }
2806
2807         return ERROR_OK;
2808 }
2809
2810 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2811                                  uint32_t count, const uint8_t *buffer)
2812 {
2813         uint32_t size;
2814
2815         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2816          * will have something to do with the size we leave to it. */
2817         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2818                 if (address & size) {
2819                         int retval = target_write_memory(target, address, size, 1, buffer);
2820                         if (retval != ERROR_OK)
2821                                 return retval;
2822                         address += size;
2823                         count -= size;
2824                         buffer += size;
2825                 }
2826         }
2827
2828         /* Write the data with as large access size as possible. */
2829         for (; size > 0; size /= 2) {
2830                 uint32_t aligned = count - count % size;
2831                 if (aligned > 0) {
2832                         int retval = target_write_memory(target, address, size, aligned / size, buffer);
2833                         if (retval != ERROR_OK)
2834                                 return retval;
2835                         address += aligned;
2836                         count -= aligned;
2837                         buffer += aligned;
2838                 }
2839         }
2840
2841         return ERROR_OK;
2842 }
2843
2844 static int cortex_a_handle_target_request(void *priv)
2845 {
2846         struct target *target = priv;
2847         struct armv7a_common *armv7a = target_to_armv7a(target);
2848         int retval;
2849
2850         if (!target_was_examined(target))
2851                 return ERROR_OK;
2852         if (!target->dbg_msg_enabled)
2853                 return ERROR_OK;
2854
2855         if (target->state == TARGET_RUNNING) {
2856                 uint32_t request;
2857                 uint32_t dscr;
2858                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2859                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2860
2861                 /* check if we have data */
2862                 int64_t then = timeval_ms();
2863                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2864                         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2865                                         armv7a->debug_base + CPUDBG_DTRTX, &request);
2866                         if (retval == ERROR_OK) {
2867                                 target_request(target, request);
2868                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2869                                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2870                         }
2871                         if (timeval_ms() > then + 1000) {
2872                                 LOG_ERROR("Timeout waiting for dtr tx full");
2873                                 return ERROR_FAIL;
2874                         }
2875                 }
2876         }
2877
2878         return ERROR_OK;
2879 }
2880
2881 /*
2882  * Cortex-A target information and configuration
2883  */
2884
2885 static int cortex_a_examine_first(struct target *target)
2886 {
2887         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2888         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2889         struct adiv5_dap *swjdp = armv7a->arm.dap;
2890         struct adiv5_private_config *pc = target->private_config;
2891
2892         int i;
2893         int retval = ERROR_OK;
2894         uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2895
2896         if (pc->ap_num == DP_APSEL_INVALID) {
2897                 /* Search for the APB-AP - it is needed for access to debug registers */
2898                 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2899                 if (retval != ERROR_OK) {
2900                         LOG_ERROR("Could not find APB-AP for debug access");
2901                         return retval;
2902                 }
2903         } else {
2904                 armv7a->debug_ap = dap_ap(swjdp, pc->ap_num);
2905         }
2906
2907         retval = mem_ap_init(armv7a->debug_ap);
2908         if (retval != ERROR_OK) {
2909                 LOG_ERROR("Could not initialize the APB-AP");
2910                 return retval;
2911         }
2912
2913         armv7a->debug_ap->memaccess_tck = 80;
2914
2915         if (!target->dbgbase_set) {
2916                 target_addr_t dbgbase;
2917                 /* Get ROM Table base */
2918                 uint32_t apid;
2919                 int32_t coreidx = target->coreid;
2920                 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2921                           target->cmd_name);
2922                 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2923                 if (retval != ERROR_OK)
2924                         return retval;
2925                 /* Lookup Processor DAP */
2926                 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2927                                 &armv7a->debug_base, &coreidx);
2928                 if (retval != ERROR_OK) {
2929                         LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2930                                   target->cmd_name);
2931                         return retval;
2932                 }
2933                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2934                           target->coreid, armv7a->debug_base);
2935         } else
2936                 armv7a->debug_base = target->dbgbase;
2937
2938         if ((armv7a->debug_base & (1UL<<31)) == 0)
2939                 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2940                             "Please fix the target configuration.", target_name(target));
2941
2942         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2943                         armv7a->debug_base + CPUDBG_DIDR, &didr);
2944         if (retval != ERROR_OK) {
2945                 LOG_DEBUG("Examine %s failed", "DIDR");
2946                 return retval;
2947         }
2948
2949         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2950                         armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2951         if (retval != ERROR_OK) {
2952                 LOG_DEBUG("Examine %s failed", "CPUID");
2953                 return retval;
2954         }
2955
2956         LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2957         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2958
2959         cortex_a->didr = didr;
2960         cortex_a->cpuid = cpuid;
2961
2962         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2963                                     armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2964         if (retval != ERROR_OK)
2965                 return retval;
2966         LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR  0x%" PRIx32, target->coreid, dbg_osreg);
2967
2968         if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2969                 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2970                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2971                 return ERROR_TARGET_INIT_FAILED;
2972         }
2973
2974         if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2975                 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2976
2977         /* Read DBGOSLSR and check if OSLK is implemented */
2978         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2979                                 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2980         if (retval != ERROR_OK)
2981                 return retval;
2982         LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2983
2984         /* check if OS Lock is implemented */
2985         if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2986                 /* check if OS Lock is set */
2987                 if (dbg_osreg & OSLSR_OSLK) {
2988                         LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2989
2990                         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2991                                                         armv7a->debug_base + CPUDBG_OSLAR,
2992                                                         0);
2993                         if (retval == ERROR_OK)
2994                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2995                                                         armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2996
2997                         /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2998                         if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2999                                 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
3000                                                 target->coreid);
3001                                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3002                                 return ERROR_TARGET_INIT_FAILED;
3003                         }
3004                 }
3005         }
3006
3007         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3008                                  armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
3009         if (retval != ERROR_OK)
3010                 return retval;
3011
3012         if (dbg_idpfr1 & 0x000000f0) {
3013                 LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
3014                                 target->coreid);
3015                 armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
3016         }
3017         if (dbg_idpfr1 & 0x0000f000) {
3018                 LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
3019                                 target->coreid);
3020                 /*
3021                  * overwrite and simplify the checks.
3022                  * virtualization extensions require implementation of security extension
3023                  */
3024                 armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
3025         }
3026
3027         /* Avoid recreating the registers cache */
3028         if (!target_was_examined(target)) {
3029                 retval = cortex_a_dpm_setup(cortex_a, didr);
3030                 if (retval != ERROR_OK)
3031                         return retval;
3032         }
3033
3034         /* Setup Breakpoint Register Pairs */
3035         cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3036         cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3037         cortex_a->brp_num_available = cortex_a->brp_num;
3038         free(cortex_a->brp_list);
3039         cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3040 /*      cortex_a->brb_enabled = ????; */
3041         for (i = 0; i < cortex_a->brp_num; i++) {
3042                 cortex_a->brp_list[i].used = false;
3043                 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3044                         cortex_a->brp_list[i].type = BRP_NORMAL;
3045                 else
3046                         cortex_a->brp_list[i].type = BRP_CONTEXT;
3047                 cortex_a->brp_list[i].value = 0;
3048                 cortex_a->brp_list[i].control = 0;
3049                 cortex_a->brp_list[i].brpn = i;
3050         }
3051
3052         LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3053
3054         /* Setup Watchpoint Register Pairs */
3055         cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3056         cortex_a->wrp_num_available = cortex_a->wrp_num;
3057         free(cortex_a->wrp_list);
3058         cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3059         for (i = 0; i < cortex_a->wrp_num; i++) {
3060                 cortex_a->wrp_list[i].used = false;
3061                 cortex_a->wrp_list[i].value = 0;
3062                 cortex_a->wrp_list[i].control = 0;
3063                 cortex_a->wrp_list[i].wrpn = i;
3064         }
3065
3066         LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3067
3068         /* select debug_ap as default */
3069         swjdp->apsel = armv7a->debug_ap->ap_num;
3070
3071         target_set_examined(target);
3072         return ERROR_OK;
3073 }
3074
3075 static int cortex_a_examine(struct target *target)
3076 {
3077         int retval = ERROR_OK;
3078
3079         /* Reestablish communication after target reset */
3080         retval = cortex_a_examine_first(target);
3081
3082         /* Configure core debug access */
3083         if (retval == ERROR_OK)
3084                 retval = cortex_a_init_debug_access(target);
3085
3086         return retval;
3087 }
3088
3089 /*
3090  *      Cortex-A target creation and initialization
3091  */
3092
3093 static int cortex_a_init_target(struct command_context *cmd_ctx,
3094         struct target *target)
3095 {
3096         /* examine_first() does a bunch of this */
3097         arm_semihosting_init(target);
3098         return ERROR_OK;
3099 }
3100
3101 static int cortex_a_init_arch_info(struct target *target,
3102         struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3103 {
3104         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3105
3106         /* Setup struct cortex_a_common */
3107         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3108         armv7a->arm.dap = dap;
3109
3110         /* register arch-specific functions */
3111         armv7a->examine_debug_reason = NULL;
3112
3113         armv7a->post_debug_entry = cortex_a_post_debug_entry;
3114
3115         armv7a->pre_restore_context = NULL;
3116
3117         armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3118
3119
3120 /*      arm7_9->handle_target_request = cortex_a_handle_target_request; */
3121
3122         /* REVISIT v7a setup should be in a v7a-specific routine */
3123         armv7a_init_arch_info(target, armv7a);
3124         target_register_timer_callback(cortex_a_handle_target_request, 1,
3125                 TARGET_TIMER_TYPE_PERIODIC, target);
3126
3127         return ERROR_OK;
3128 }
3129
3130 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3131 {
3132         struct cortex_a_common *cortex_a;
3133         struct adiv5_private_config *pc;
3134
3135         if (!target->private_config)
3136                 return ERROR_FAIL;
3137
3138         pc = (struct adiv5_private_config *)target->private_config;
3139
3140         cortex_a = calloc(1, sizeof(struct cortex_a_common));
3141         if (!cortex_a) {
3142                 LOG_ERROR("Out of memory");
3143                 return ERROR_FAIL;
3144         }
3145         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3146         cortex_a->armv7a_common.is_armv7r = false;
3147         cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3148
3149         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3150 }
3151
3152 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3153 {
3154         struct cortex_a_common *cortex_a;
3155         struct adiv5_private_config *pc;
3156
3157         pc = (struct adiv5_private_config *)target->private_config;
3158         if (adiv5_verify_config(pc) != ERROR_OK)
3159                 return ERROR_FAIL;
3160
3161         cortex_a = calloc(1, sizeof(struct cortex_a_common));
3162         if (!cortex_a) {
3163                 LOG_ERROR("Out of memory");
3164                 return ERROR_FAIL;
3165         }
3166         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3167         cortex_a->armv7a_common.is_armv7r = true;
3168
3169         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3170 }
3171
3172 static void cortex_a_deinit_target(struct target *target)
3173 {
3174         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3175         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3176         struct arm_dpm *dpm = &armv7a->dpm;
3177         uint32_t dscr;
3178         int retval;
3179
3180         if (target_was_examined(target)) {
3181                 /* Disable halt for breakpoint, watchpoint and vector catch */
3182                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3183                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
3184                 if (retval == ERROR_OK)
3185                         mem_ap_write_atomic_u32(armv7a->debug_ap,
3186                                         armv7a->debug_base + CPUDBG_DSCR,
3187                                         dscr & ~DSCR_HALT_DBG_MODE);
3188         }
3189
3190         free(cortex_a->wrp_list);
3191         free(cortex_a->brp_list);
3192         arm_free_reg_cache(dpm->arm);
3193         free(dpm->dbp);
3194         free(dpm->dwp);
3195         free(target->private_config);
3196         free(cortex_a);
3197 }
3198
3199 static int cortex_a_mmu(struct target *target, int *enabled)
3200 {
3201         struct armv7a_common *armv7a = target_to_armv7a(target);
3202
3203         if (target->state != TARGET_HALTED) {
3204                 LOG_ERROR("%s: target not halted", __func__);
3205                 return ERROR_TARGET_INVALID;
3206         }
3207
3208         if (armv7a->is_armv7r)
3209                 *enabled = 0;
3210         else
3211                 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3212
3213         return ERROR_OK;
3214 }
3215
3216 static int cortex_a_virt2phys(struct target *target,
3217         target_addr_t virt, target_addr_t *phys)
3218 {
3219         int retval;
3220         int mmu_enabled = 0;
3221
3222         /*
3223          * If the MMU was not enabled at debug entry, there is no
3224          * way of knowing if there was ever a valid configuration
3225          * for it and thus it's not safe to enable it. In this case,
3226          * just return the virtual address as physical.
3227          */
3228         cortex_a_mmu(target, &mmu_enabled);
3229         if (!mmu_enabled) {
3230                 *phys = virt;
3231                 return ERROR_OK;
3232         }
3233
3234         /* mmu must be enable in order to get a correct translation */
3235         retval = cortex_a_mmu_modify(target, 1);
3236         if (retval != ERROR_OK)
3237                 return retval;
3238         return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3239                                                     phys, 1);
3240 }
3241
3242 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3243 {
3244         struct target *target = get_current_target(CMD_CTX);
3245         struct armv7a_common *armv7a = target_to_armv7a(target);
3246
3247         return armv7a_handle_cache_info_command(CMD,
3248                         &armv7a->armv7a_mmu.armv7a_cache);
3249 }
3250
3251
3252 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3253 {
3254         struct target *target = get_current_target(CMD_CTX);
3255         if (!target_was_examined(target)) {
3256                 LOG_ERROR("target not examined yet");
3257                 return ERROR_FAIL;
3258         }
3259
3260         return cortex_a_init_debug_access(target);
3261 }
3262
3263 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3264 {
3265         struct target *target = get_current_target(CMD_CTX);
3266         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3267
3268         static const struct jim_nvp nvp_maskisr_modes[] = {
3269                 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3270                 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3271                 { .name = NULL, .value = -1 },
3272         };
3273         const struct jim_nvp *n;
3274
3275         if (CMD_ARGC > 0) {
3276                 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3277                 if (!n->name) {
3278                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3279                         return ERROR_COMMAND_SYNTAX_ERROR;
3280                 }
3281
3282                 cortex_a->isrmasking_mode = n->value;
3283         }
3284
3285         n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3286         command_print(CMD, "cortex_a interrupt mask %s", n->name);
3287
3288         return ERROR_OK;
3289 }
3290
3291 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3292 {
3293         struct target *target = get_current_target(CMD_CTX);
3294         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3295
3296         static const struct jim_nvp nvp_dacrfixup_modes[] = {
3297                 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3298                 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3299                 { .name = NULL, .value = -1 },
3300         };
3301         const struct jim_nvp *n;
3302
3303         if (CMD_ARGC > 0) {
3304                 n = jim_nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3305                 if (!n->name)
3306                         return ERROR_COMMAND_SYNTAX_ERROR;
3307                 cortex_a->dacrfixup_mode = n->value;
3308
3309         }
3310
3311         n = jim_nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3312         command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3313
3314         return ERROR_OK;
3315 }
3316
3317 static const struct command_registration cortex_a_exec_command_handlers[] = {
3318         {
3319                 .name = "cache_info",
3320                 .handler = cortex_a_handle_cache_info_command,
3321                 .mode = COMMAND_EXEC,
3322                 .help = "display information about target caches",
3323                 .usage = "",
3324         },
3325         {
3326                 .name = "dbginit",
3327                 .handler = cortex_a_handle_dbginit_command,
3328                 .mode = COMMAND_EXEC,
3329                 .help = "Initialize core debug",
3330                 .usage = "",
3331         },
3332         {
3333                 .name = "maskisr",
3334                 .handler = handle_cortex_a_mask_interrupts_command,
3335                 .mode = COMMAND_ANY,
3336                 .help = "mask cortex_a interrupts",
3337                 .usage = "['on'|'off']",
3338         },
3339         {
3340                 .name = "dacrfixup",
3341                 .handler = handle_cortex_a_dacrfixup_command,
3342                 .mode = COMMAND_ANY,
3343                 .help = "set domain access control (DACR) to all-manager "
3344                         "on memory access",
3345                 .usage = "['on'|'off']",
3346         },
3347         {
3348                 .chain = armv7a_mmu_command_handlers,
3349         },
3350         {
3351                 .chain = smp_command_handlers,
3352         },
3353
3354         COMMAND_REGISTRATION_DONE
3355 };
3356 static const struct command_registration cortex_a_command_handlers[] = {
3357         {
3358                 .chain = arm_command_handlers,
3359         },
3360         {
3361                 .chain = armv7a_command_handlers,
3362         },
3363         {
3364                 .name = "cortex_a",
3365                 .mode = COMMAND_ANY,
3366                 .help = "Cortex-A command group",
3367                 .usage = "",
3368                 .chain = cortex_a_exec_command_handlers,
3369         },
3370         COMMAND_REGISTRATION_DONE
3371 };
3372
3373 struct target_type cortexa_target = {
3374         .name = "cortex_a",
3375
3376         .poll = cortex_a_poll,
3377         .arch_state = armv7a_arch_state,
3378
3379         .halt = cortex_a_halt,
3380         .resume = cortex_a_resume,
3381         .step = cortex_a_step,
3382
3383         .assert_reset = cortex_a_assert_reset,
3384         .deassert_reset = cortex_a_deassert_reset,
3385
3386         /* REVISIT allow exporting VFP3 registers ... */
3387         .get_gdb_arch = arm_get_gdb_arch,
3388         .get_gdb_reg_list = arm_get_gdb_reg_list,
3389
3390         .read_memory = cortex_a_read_memory,
3391         .write_memory = cortex_a_write_memory,
3392
3393         .read_buffer = cortex_a_read_buffer,
3394         .write_buffer = cortex_a_write_buffer,
3395
3396         .checksum_memory = arm_checksum_memory,
3397         .blank_check_memory = arm_blank_check_memory,
3398
3399         .run_algorithm = armv4_5_run_algorithm,
3400
3401         .add_breakpoint = cortex_a_add_breakpoint,
3402         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3403         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3404         .remove_breakpoint = cortex_a_remove_breakpoint,
3405         .add_watchpoint = cortex_a_add_watchpoint,
3406         .remove_watchpoint = cortex_a_remove_watchpoint,
3407
3408         .commands = cortex_a_command_handlers,
3409         .target_create = cortex_a_target_create,
3410         .target_jim_configure = adiv5_jim_configure,
3411         .init_target = cortex_a_init_target,
3412         .examine = cortex_a_examine,
3413         .deinit_target = cortex_a_deinit_target,
3414
3415         .read_phys_memory = cortex_a_read_phys_memory,
3416         .write_phys_memory = cortex_a_write_phys_memory,
3417         .mmu = cortex_a_mmu,
3418         .virt2phys = cortex_a_virt2phys,
3419 };
3420
3421 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3422         {
3423                 .name = "dbginit",
3424                 .handler = cortex_a_handle_dbginit_command,
3425                 .mode = COMMAND_EXEC,
3426                 .help = "Initialize core debug",
3427                 .usage = "",
3428         },
3429         {
3430                 .name = "maskisr",
3431                 .handler = handle_cortex_a_mask_interrupts_command,
3432                 .mode = COMMAND_EXEC,
3433                 .help = "mask cortex_r4 interrupts",
3434                 .usage = "['on'|'off']",
3435         },
3436
3437         COMMAND_REGISTRATION_DONE
3438 };
3439 static const struct command_registration cortex_r4_command_handlers[] = {
3440         {
3441                 .chain = arm_command_handlers,
3442         },
3443         {
3444                 .name = "cortex_r4",
3445                 .mode = COMMAND_ANY,
3446                 .help = "Cortex-R4 command group",
3447                 .usage = "",
3448                 .chain = cortex_r4_exec_command_handlers,
3449         },
3450         COMMAND_REGISTRATION_DONE
3451 };
3452
3453 struct target_type cortexr4_target = {
3454         .name = "cortex_r4",
3455
3456         .poll = cortex_a_poll,
3457         .arch_state = armv7a_arch_state,
3458
3459         .halt = cortex_a_halt,
3460         .resume = cortex_a_resume,
3461         .step = cortex_a_step,
3462
3463         .assert_reset = cortex_a_assert_reset,
3464         .deassert_reset = cortex_a_deassert_reset,
3465
3466         /* REVISIT allow exporting VFP3 registers ... */
3467         .get_gdb_arch = arm_get_gdb_arch,
3468         .get_gdb_reg_list = arm_get_gdb_reg_list,
3469
3470         .read_memory = cortex_a_read_phys_memory,
3471         .write_memory = cortex_a_write_phys_memory,
3472
3473         .checksum_memory = arm_checksum_memory,
3474         .blank_check_memory = arm_blank_check_memory,
3475
3476         .run_algorithm = armv4_5_run_algorithm,
3477
3478         .add_breakpoint = cortex_a_add_breakpoint,
3479         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3480         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3481         .remove_breakpoint = cortex_a_remove_breakpoint,
3482         .add_watchpoint = cortex_a_add_watchpoint,
3483         .remove_watchpoint = cortex_a_remove_watchpoint,
3484
3485         .commands = cortex_r4_command_handlers,
3486         .target_create = cortex_r4_target_create,
3487         .target_jim_configure = adiv5_jim_configure,
3488         .init_target = cortex_a_init_target,
3489         .examine = cortex_a_examine,
3490         .deinit_target = cortex_a_deinit_target,
3491 };