a20339f4b68a364765b86311e927a7eae4dfebe1
[fw/openocd] / src / target / cortex_a.c
1 /***************************************************************************
2  *   Copyright (C) 2005 by Dominic Rath                                    *
3  *   Dominic.Rath@gmx.de                                                   *
4  *                                                                         *
5  *   Copyright (C) 2006 by Magnus Lundin                                   *
6  *   lundin@mlu.mine.nu                                                    *
7  *                                                                         *
8  *   Copyright (C) 2008 by Spencer Oliver                                  *
9  *   spen@spen-soft.co.uk                                                  *
10  *                                                                         *
11  *   Copyright (C) 2009 by Dirk Behme                                      *
12  *   dirk.behme@gmail.com - copy from cortex_m3                            *
13  *                                                                         *
14  *   Copyright (C) 2010 Ã˜yvind Harboe                                      *
15  *   oyvind.harboe@zylin.com                                               *
16  *                                                                         *
17  *   Copyright (C) ST-Ericsson SA 2011                                     *
18  *   michel.jaouen@stericsson.com : smp minimum support                    *
19  *                                                                         *
20  *   Copyright (C) Broadcom 2012                                           *
21  *   ehunter@broadcom.com : Cortex-R4 support                              *
22  *                                                                         *
23  *   Copyright (C) 2013 Kamal Dasu                                         *
24  *   kdasu.kdev@gmail.com                                                  *
25  *                                                                         *
26  *   Copyright (C) 2016 Chengyu Zheng                                      *
27  *   chengyu.zheng@polimi.it : watchpoint support                          *
28  *                                                                         *
29  *   This program is free software; you can redistribute it and/or modify  *
30  *   it under the terms of the GNU General Public License as published by  *
31  *   the Free Software Foundation; either version 2 of the License, or     *
32  *   (at your option) any later version.                                   *
33  *                                                                         *
34  *   This program is distributed in the hope that it will be useful,       *
35  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
36  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
37  *   GNU General Public License for more details.                          *
38  *                                                                         *
39  *   You should have received a copy of the GNU General Public License     *
40  *   along with this program.  If not, see <http://www.gnu.org/licenses/>. *
41  *                                                                         *
42  *   Cortex-A8(tm) TRM, ARM DDI 0344H                                      *
43  *   Cortex-A9(tm) TRM, ARM DDI 0407F                                      *
44  *   Cortex-A4(tm) TRM, ARM DDI 0363E                                      *
45  *   Cortex-A15(tm)TRM, ARM DDI 0438C                                      *
46  *                                                                         *
47  ***************************************************************************/
48
49 #ifdef HAVE_CONFIG_H
50 #include "config.h"
51 #endif
52
53 #include "breakpoints.h"
54 #include "cortex_a.h"
55 #include "register.h"
56 #include "armv7a_mmu.h"
57 #include "target_request.h"
58 #include "target_type.h"
59 #include "arm_coresight.h"
60 #include "arm_opcodes.h"
61 #include "arm_semihosting.h"
62 #include "jtag/interface.h"
63 #include "transport/transport.h"
64 #include "smp.h"
65 #include <helper/bits.h>
66 #include <helper/time_support.h>
67
68 static int cortex_a_poll(struct target *target);
69 static int cortex_a_debug_entry(struct target *target);
70 static int cortex_a_restore_context(struct target *target, bool bpwp);
71 static int cortex_a_set_breakpoint(struct target *target,
72         struct breakpoint *breakpoint, uint8_t matchmode);
73 static int cortex_a_set_context_breakpoint(struct target *target,
74         struct breakpoint *breakpoint, uint8_t matchmode);
75 static int cortex_a_set_hybrid_breakpoint(struct target *target,
76         struct breakpoint *breakpoint);
77 static int cortex_a_unset_breakpoint(struct target *target,
78         struct breakpoint *breakpoint);
79 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
80         uint32_t value, uint32_t *dscr);
81 static int cortex_a_mmu(struct target *target, int *enabled);
82 static int cortex_a_mmu_modify(struct target *target, int enable);
83 static int cortex_a_virt2phys(struct target *target,
84         target_addr_t virt, target_addr_t *phys);
85 static int cortex_a_read_cpu_memory(struct target *target,
86         uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
87
88 static unsigned int ilog2(unsigned int x)
89 {
90         unsigned int y = 0;
91         x /= 2;
92         while (x) {
93                 ++y;
94                 x /= 2;
95                 }
96         return y;
97 }
98
99 /*  restore cp15_control_reg at resume */
100 static int cortex_a_restore_cp15_control_reg(struct target *target)
101 {
102         int retval = ERROR_OK;
103         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
104         struct armv7a_common *armv7a = target_to_armv7a(target);
105
106         if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
107                 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
108                 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
109                 retval = armv7a->arm.mcr(target, 15,
110                                 0, 0,   /* op1, op2 */
111                                 1, 0,   /* CRn, CRm */
112                                 cortex_a->cp15_control_reg);
113         }
114         return retval;
115 }
116
117 /*
118  * Set up ARM core for memory access.
119  * If !phys_access, switch to SVC mode and make sure MMU is on
120  * If phys_access, switch off mmu
121  */
122 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
123 {
124         struct armv7a_common *armv7a = target_to_armv7a(target);
125         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
126         int mmu_enabled = 0;
127
128         if (phys_access == 0) {
129                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
130                 cortex_a_mmu(target, &mmu_enabled);
131                 if (mmu_enabled)
132                         cortex_a_mmu_modify(target, 1);
133                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
134                         /* overwrite DACR to all-manager */
135                         armv7a->arm.mcr(target, 15,
136                                         0, 0, 3, 0,
137                                         0xFFFFFFFF);
138                 }
139         } else {
140                 cortex_a_mmu(target, &mmu_enabled);
141                 if (mmu_enabled)
142                         cortex_a_mmu_modify(target, 0);
143         }
144         return ERROR_OK;
145 }
146
147 /*
148  * Restore ARM core after memory access.
149  * If !phys_access, switch to previous mode
150  * If phys_access, restore MMU setting
151  */
152 static int cortex_a_post_memaccess(struct target *target, int phys_access)
153 {
154         struct armv7a_common *armv7a = target_to_armv7a(target);
155         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
156
157         if (phys_access == 0) {
158                 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
159                         /* restore */
160                         armv7a->arm.mcr(target, 15,
161                                         0, 0, 3, 0,
162                                         cortex_a->cp15_dacr_reg);
163                 }
164                 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
165         } else {
166                 int mmu_enabled = 0;
167                 cortex_a_mmu(target, &mmu_enabled);
168                 if (mmu_enabled)
169                         cortex_a_mmu_modify(target, 1);
170         }
171         return ERROR_OK;
172 }
173
174
175 /*  modify cp15_control_reg in order to enable or disable mmu for :
176  *  - virt2phys address conversion
177  *  - read or write memory in phys or virt address */
178 static int cortex_a_mmu_modify(struct target *target, int enable)
179 {
180         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
181         struct armv7a_common *armv7a = target_to_armv7a(target);
182         int retval = ERROR_OK;
183         int need_write = 0;
184
185         if (enable) {
186                 /*  if mmu enabled at target stop and mmu not enable */
187                 if (!(cortex_a->cp15_control_reg & 0x1U)) {
188                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
189                         return ERROR_FAIL;
190                 }
191                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
192                         cortex_a->cp15_control_reg_curr |= 0x1U;
193                         need_write = 1;
194                 }
195         } else {
196                 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
197                         cortex_a->cp15_control_reg_curr &= ~0x1U;
198                         need_write = 1;
199                 }
200         }
201
202         if (need_write) {
203                 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
204                         enable ? "enable mmu" : "disable mmu",
205                         cortex_a->cp15_control_reg_curr);
206
207                 retval = armv7a->arm.mcr(target, 15,
208                                 0, 0,   /* op1, op2 */
209                                 1, 0,   /* CRn, CRm */
210                                 cortex_a->cp15_control_reg_curr);
211         }
212         return retval;
213 }
214
215 /*
216  * Cortex-A Basic debug access, very low level assumes state is saved
217  */
218 static int cortex_a_init_debug_access(struct target *target)
219 {
220         struct armv7a_common *armv7a = target_to_armv7a(target);
221         uint32_t dscr;
222         int retval;
223
224         /* lock memory-mapped access to debug registers to prevent
225          * software interference */
226         retval = mem_ap_write_u32(armv7a->debug_ap,
227                         armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
228         if (retval != ERROR_OK)
229                 return retval;
230
231         /* Disable cacheline fills and force cache write-through in debug state */
232         retval = mem_ap_write_u32(armv7a->debug_ap,
233                         armv7a->debug_base + CPUDBG_DSCCR, 0);
234         if (retval != ERROR_OK)
235                 return retval;
236
237         /* Disable TLB lookup and refill/eviction in debug state */
238         retval = mem_ap_write_u32(armv7a->debug_ap,
239                         armv7a->debug_base + CPUDBG_DSMCR, 0);
240         if (retval != ERROR_OK)
241                 return retval;
242
243         retval = dap_run(armv7a->debug_ap->dap);
244         if (retval != ERROR_OK)
245                 return retval;
246
247         /* Enabling of instruction execution in debug mode is done in debug_entry code */
248
249         /* Resync breakpoint registers */
250
251         /* Enable halt for breakpoint, watchpoint and vector catch */
252         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
253                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
254         if (retval != ERROR_OK)
255                 return retval;
256         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
257                         armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
258         if (retval != ERROR_OK)
259                 return retval;
260
261         /* Since this is likely called from init or reset, update target state information*/
262         return cortex_a_poll(target);
263 }
264
265 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
266 {
267         /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
268          * Writes final value of DSCR into *dscr. Pass force to force always
269          * reading DSCR at least once. */
270         struct armv7a_common *armv7a = target_to_armv7a(target);
271         int retval;
272
273         if (force) {
274                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
275                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
276                 if (retval != ERROR_OK) {
277                         LOG_ERROR("Could not read DSCR register");
278                         return retval;
279                 }
280         }
281
282         retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
283         if (retval != ERROR_OK)
284                 LOG_ERROR("Error waiting for InstrCompl=1");
285         return retval;
286 }
287
288 /* To reduce needless round-trips, pass in a pointer to the current
289  * DSCR value.  Initialize it to zero if you just need to know the
290  * value on return from this function; or DSCR_INSTR_COMP if you
291  * happen to know that no instruction is pending.
292  */
293 static int cortex_a_exec_opcode(struct target *target,
294         uint32_t opcode, uint32_t *dscr_p)
295 {
296         uint32_t dscr;
297         int retval;
298         struct armv7a_common *armv7a = target_to_armv7a(target);
299
300         dscr = dscr_p ? *dscr_p : 0;
301
302         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
303
304         /* Wait for InstrCompl bit to be set */
305         retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
306         if (retval != ERROR_OK)
307                 return retval;
308
309         retval = mem_ap_write_u32(armv7a->debug_ap,
310                         armv7a->debug_base + CPUDBG_ITR, opcode);
311         if (retval != ERROR_OK)
312                 return retval;
313
314         /* Wait for InstrCompl bit to be set */
315         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
316         if (retval != ERROR_OK) {
317                 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
318                 return retval;
319         }
320
321         if (dscr_p)
322                 *dscr_p = dscr;
323
324         return retval;
325 }
326
327 /* Write to memory mapped registers directly with no cache or mmu handling */
328 static int cortex_a_dap_write_memap_register_u32(struct target *target,
329         uint32_t address,
330         uint32_t value)
331 {
332         int retval;
333         struct armv7a_common *armv7a = target_to_armv7a(target);
334
335         retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
336
337         return retval;
338 }
339
340 /*
341  * Cortex-A implementation of Debug Programmer's Model
342  *
343  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
344  * so there's no need to poll for it before executing an instruction.
345  *
346  * NOTE that in several of these cases the "stall" mode might be useful.
347  * It'd let us queue a few operations together... prepare/finish might
348  * be the places to enable/disable that mode.
349  */
350
351 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
352 {
353         return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
354 }
355
356 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
357 {
358         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
359         return mem_ap_write_u32(a->armv7a_common.debug_ap,
360                         a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
361 }
362
363 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
364         uint32_t *dscr_p)
365 {
366         uint32_t dscr = DSCR_INSTR_COMP;
367         int retval;
368
369         if (dscr_p)
370                 dscr = *dscr_p;
371
372         /* Wait for DTRRXfull */
373         retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
374                         DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
375         if (retval != ERROR_OK) {
376                 LOG_ERROR("Error waiting for read dcc");
377                 return retval;
378         }
379
380         retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
381                         a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
382         if (retval != ERROR_OK)
383                 return retval;
384         /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
385
386         if (dscr_p)
387                 *dscr_p = dscr;
388
389         return retval;
390 }
391
392 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
393 {
394         struct cortex_a_common *a = dpm_to_a(dpm);
395         uint32_t dscr;
396         int retval;
397
398         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
399         retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
400         if (retval != ERROR_OK) {
401                 LOG_ERROR("Error waiting for dpm prepare");
402                 return retval;
403         }
404
405         /* this "should never happen" ... */
406         if (dscr & DSCR_DTR_RX_FULL) {
407                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
408                 /* Clear DCCRX */
409                 retval = cortex_a_exec_opcode(
410                                 a->armv7a_common.arm.target,
411                                 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
412                                 &dscr);
413                 if (retval != ERROR_OK)
414                         return retval;
415         }
416
417         return retval;
418 }
419
420 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
421 {
422         /* REVISIT what could be done here? */
423         return ERROR_OK;
424 }
425
426 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
427         uint32_t opcode, uint32_t data)
428 {
429         struct cortex_a_common *a = dpm_to_a(dpm);
430         int retval;
431         uint32_t dscr = DSCR_INSTR_COMP;
432
433         retval = cortex_a_write_dcc(a, data);
434         if (retval != ERROR_OK)
435                 return retval;
436
437         return cortex_a_exec_opcode(
438                         a->armv7a_common.arm.target,
439                         opcode,
440                         &dscr);
441 }
442
443 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
444         uint8_t rt, uint32_t data)
445 {
446         struct cortex_a_common *a = dpm_to_a(dpm);
447         uint32_t dscr = DSCR_INSTR_COMP;
448         int retval;
449
450         if (rt > 15)
451                 return ERROR_TARGET_INVALID;
452
453         retval = cortex_a_write_dcc(a, data);
454         if (retval != ERROR_OK)
455                 return retval;
456
457         /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
458         return cortex_a_exec_opcode(
459                         a->armv7a_common.arm.target,
460                         ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
461                         &dscr);
462 }
463
464 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
465         uint32_t opcode, uint32_t data)
466 {
467         struct cortex_a_common *a = dpm_to_a(dpm);
468         uint32_t dscr = DSCR_INSTR_COMP;
469         int retval;
470
471         retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
472         if (retval != ERROR_OK)
473                 return retval;
474
475         /* then the opcode, taking data from R0 */
476         retval = cortex_a_exec_opcode(
477                         a->armv7a_common.arm.target,
478                         opcode,
479                         &dscr);
480
481         return retval;
482 }
483
484 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
485 {
486         struct target *target = dpm->arm->target;
487         uint32_t dscr = DSCR_INSTR_COMP;
488
489         /* "Prefetch flush" after modifying execution status in CPSR */
490         return cortex_a_exec_opcode(target,
491                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
492                         &dscr);
493 }
494
495 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
496         uint32_t opcode, uint32_t *data)
497 {
498         struct cortex_a_common *a = dpm_to_a(dpm);
499         int retval;
500         uint32_t dscr = DSCR_INSTR_COMP;
501
502         /* the opcode, writing data to DCC */
503         retval = cortex_a_exec_opcode(
504                         a->armv7a_common.arm.target,
505                         opcode,
506                         &dscr);
507         if (retval != ERROR_OK)
508                 return retval;
509
510         return cortex_a_read_dcc(a, data, &dscr);
511 }
512
513 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
514         uint8_t rt, uint32_t *data)
515 {
516         struct cortex_a_common *a = dpm_to_a(dpm);
517         uint32_t dscr = DSCR_INSTR_COMP;
518         int retval;
519
520         if (rt > 15)
521                 return ERROR_TARGET_INVALID;
522
523         retval = cortex_a_exec_opcode(
524                         a->armv7a_common.arm.target,
525                         ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
526                         &dscr);
527         if (retval != ERROR_OK)
528                 return retval;
529
530         return cortex_a_read_dcc(a, data, &dscr);
531 }
532
533 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
534         uint32_t opcode, uint32_t *data)
535 {
536         struct cortex_a_common *a = dpm_to_a(dpm);
537         uint32_t dscr = DSCR_INSTR_COMP;
538         int retval;
539
540         /* the opcode, writing data to R0 */
541         retval = cortex_a_exec_opcode(
542                         a->armv7a_common.arm.target,
543                         opcode,
544                         &dscr);
545         if (retval != ERROR_OK)
546                 return retval;
547
548         /* write R0 to DCC */
549         return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
550 }
551
552 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
553         uint32_t addr, uint32_t control)
554 {
555         struct cortex_a_common *a = dpm_to_a(dpm);
556         uint32_t vr = a->armv7a_common.debug_base;
557         uint32_t cr = a->armv7a_common.debug_base;
558         int retval;
559
560         switch (index_t) {
561                 case 0 ... 15:  /* breakpoints */
562                         vr += CPUDBG_BVR_BASE;
563                         cr += CPUDBG_BCR_BASE;
564                         break;
565                 case 16 ... 31: /* watchpoints */
566                         vr += CPUDBG_WVR_BASE;
567                         cr += CPUDBG_WCR_BASE;
568                         index_t -= 16;
569                         break;
570                 default:
571                         return ERROR_FAIL;
572         }
573         vr += 4 * index_t;
574         cr += 4 * index_t;
575
576         LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
577                 (unsigned) vr, (unsigned) cr);
578
579         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
580                         vr, addr);
581         if (retval != ERROR_OK)
582                 return retval;
583         retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
584                         cr, control);
585         return retval;
586 }
587
588 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
589 {
590         struct cortex_a_common *a = dpm_to_a(dpm);
591         uint32_t cr;
592
593         switch (index_t) {
594                 case 0 ... 15:
595                         cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
596                         break;
597                 case 16 ... 31:
598                         cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
599                         index_t -= 16;
600                         break;
601                 default:
602                         return ERROR_FAIL;
603         }
604         cr += 4 * index_t;
605
606         LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
607
608         /* clear control register */
609         return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
610 }
611
612 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
613 {
614         struct arm_dpm *dpm = &a->armv7a_common.dpm;
615         int retval;
616
617         dpm->arm = &a->armv7a_common.arm;
618         dpm->didr = didr;
619
620         dpm->prepare = cortex_a_dpm_prepare;
621         dpm->finish = cortex_a_dpm_finish;
622
623         dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
624         dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
625         dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
626
627         dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
628         dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
629
630         dpm->bpwp_enable = cortex_a_bpwp_enable;
631         dpm->bpwp_disable = cortex_a_bpwp_disable;
632
633         retval = arm_dpm_setup(dpm);
634         if (retval == ERROR_OK)
635                 retval = arm_dpm_initialize(dpm);
636
637         return retval;
638 }
639 static struct target *get_cortex_a(struct target *target, int32_t coreid)
640 {
641         struct target_list *head;
642
643         foreach_smp_target(head, target->smp_targets) {
644                 struct target *curr = head->target;
645                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
646                         return curr;
647         }
648         return target;
649 }
650 static int cortex_a_halt(struct target *target);
651
652 static int cortex_a_halt_smp(struct target *target)
653 {
654         int retval = 0;
655         struct target_list *head;
656
657         foreach_smp_target(head, target->smp_targets) {
658                 struct target *curr = head->target;
659                 if ((curr != target) && (curr->state != TARGET_HALTED)
660                         && target_was_examined(curr))
661                         retval += cortex_a_halt(curr);
662         }
663         return retval;
664 }
665
666 static int update_halt_gdb(struct target *target)
667 {
668         struct target *gdb_target = NULL;
669         struct target_list *head;
670         struct target *curr;
671         int retval = 0;
672
673         if (target->gdb_service && target->gdb_service->core[0] == -1) {
674                 target->gdb_service->target = target;
675                 target->gdb_service->core[0] = target->coreid;
676                 retval += cortex_a_halt_smp(target);
677         }
678
679         if (target->gdb_service)
680                 gdb_target = target->gdb_service->target;
681
682         foreach_smp_target(head, target->smp_targets) {
683                 curr = head->target;
684                 /* skip calling context */
685                 if (curr == target)
686                         continue;
687                 if (!target_was_examined(curr))
688                         continue;
689                 /* skip targets that were already halted */
690                 if (curr->state == TARGET_HALTED)
691                         continue;
692                 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
693                 if (curr == gdb_target)
694                         continue;
695
696                 /* avoid recursion in cortex_a_poll() */
697                 curr->smp = 0;
698                 cortex_a_poll(curr);
699                 curr->smp = 1;
700         }
701
702         /* after all targets were updated, poll the gdb serving target */
703         if (gdb_target && gdb_target != target)
704                 cortex_a_poll(gdb_target);
705         return retval;
706 }
707
708 /*
709  * Cortex-A Run control
710  */
711
712 static int cortex_a_poll(struct target *target)
713 {
714         int retval = ERROR_OK;
715         uint32_t dscr;
716         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
717         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
718         enum target_state prev_target_state = target->state;
719         /*  toggle to another core is done by gdb as follow */
720         /*  maint packet J core_id */
721         /*  continue */
722         /*  the next polling trigger an halt event sent to gdb */
723         if ((target->state == TARGET_HALTED) && (target->smp) &&
724                 (target->gdb_service) &&
725                 (!target->gdb_service->target)) {
726                 target->gdb_service->target =
727                         get_cortex_a(target, target->gdb_service->core[1]);
728                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
729                 return retval;
730         }
731         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
732                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
733         if (retval != ERROR_OK)
734                 return retval;
735         cortex_a->cpudbg_dscr = dscr;
736
737         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
738                 if (prev_target_state != TARGET_HALTED) {
739                         /* We have a halting debug event */
740                         LOG_DEBUG("Target halted");
741                         target->state = TARGET_HALTED;
742
743                         retval = cortex_a_debug_entry(target);
744                         if (retval != ERROR_OK)
745                                 return retval;
746
747                         if (target->smp) {
748                                 retval = update_halt_gdb(target);
749                                 if (retval != ERROR_OK)
750                                         return retval;
751                         }
752
753                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
754                                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
755                         } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
756                                 if (arm_semihosting(target, &retval) != 0)
757                                         return retval;
758
759                                 target_call_event_callbacks(target,
760                                         TARGET_EVENT_HALTED);
761                         }
762                 }
763         } else
764                 target->state = TARGET_RUNNING;
765
766         return retval;
767 }
768
769 static int cortex_a_halt(struct target *target)
770 {
771         int retval;
772         uint32_t dscr;
773         struct armv7a_common *armv7a = target_to_armv7a(target);
774
775         /*
776          * Tell the core to be halted by writing DRCR with 0x1
777          * and then wait for the core to be halted.
778          */
779         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
780                         armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
781         if (retval != ERROR_OK)
782                 return retval;
783
784         dscr = 0; /* force read of dscr */
785         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
786                         DSCR_CORE_HALTED, &dscr);
787         if (retval != ERROR_OK) {
788                 LOG_ERROR("Error waiting for halt");
789                 return retval;
790         }
791
792         target->debug_reason = DBG_REASON_DBGRQ;
793
794         return ERROR_OK;
795 }
796
797 static int cortex_a_internal_restore(struct target *target, int current,
798         target_addr_t *address, int handle_breakpoints, int debug_execution)
799 {
800         struct armv7a_common *armv7a = target_to_armv7a(target);
801         struct arm *arm = &armv7a->arm;
802         int retval;
803         uint32_t resume_pc;
804
805         if (!debug_execution)
806                 target_free_all_working_areas(target);
807
808 #if 0
809         if (debug_execution) {
810                 /* Disable interrupts */
811                 /* We disable interrupts in the PRIMASK register instead of
812                  * masking with C_MASKINTS,
813                  * This is probably the same issue as Cortex-M3 Errata 377493:
814                  * C_MASKINTS in parallel with disabled interrupts can cause
815                  * local faults to not be taken. */
816                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
817                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
818                 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
819
820                 /* Make sure we are in Thumb mode */
821                 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
822                         buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
823                         32) | (1 << 24));
824                 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = true;
825                 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = true;
826         }
827 #endif
828
829         /* current = 1: continue on current pc, otherwise continue at <address> */
830         resume_pc = buf_get_u32(arm->pc->value, 0, 32);
831         if (!current)
832                 resume_pc = *address;
833         else
834                 *address = resume_pc;
835
836         /* Make sure that the Armv7 gdb thumb fixups does not
837          * kill the return address
838          */
839         switch (arm->core_state) {
840                 case ARM_STATE_ARM:
841                         resume_pc &= 0xFFFFFFFC;
842                         break;
843                 case ARM_STATE_THUMB:
844                 case ARM_STATE_THUMB_EE:
845                         /* When the return address is loaded into PC
846                          * bit 0 must be 1 to stay in Thumb state
847                          */
848                         resume_pc |= 0x1;
849                         break;
850                 case ARM_STATE_JAZELLE:
851                         LOG_ERROR("How do I resume into Jazelle state??");
852                         return ERROR_FAIL;
853                 case ARM_STATE_AARCH64:
854                         LOG_ERROR("Shouldn't be in AARCH64 state");
855                         return ERROR_FAIL;
856         }
857         LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
858         buf_set_u32(arm->pc->value, 0, 32, resume_pc);
859         arm->pc->dirty = true;
860         arm->pc->valid = true;
861
862         /* restore dpm_mode at system halt */
863         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
864         /* called it now before restoring context because it uses cpu
865          * register r0 for restoring cp15 control register */
866         retval = cortex_a_restore_cp15_control_reg(target);
867         if (retval != ERROR_OK)
868                 return retval;
869         retval = cortex_a_restore_context(target, handle_breakpoints);
870         if (retval != ERROR_OK)
871                 return retval;
872         target->debug_reason = DBG_REASON_NOTHALTED;
873         target->state = TARGET_RUNNING;
874
875         /* registers are now invalid */
876         register_cache_invalidate(arm->core_cache);
877
878 #if 0
879         /* the front-end may request us not to handle breakpoints */
880         if (handle_breakpoints) {
881                 /* Single step past breakpoint at current address */
882                 breakpoint = breakpoint_find(target, resume_pc);
883                 if (breakpoint) {
884                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
885                         cortex_m3_unset_breakpoint(target, breakpoint);
886                         cortex_m3_single_step_core(target);
887                         cortex_m3_set_breakpoint(target, breakpoint);
888                 }
889         }
890
891 #endif
892         return retval;
893 }
894
895 static int cortex_a_internal_restart(struct target *target)
896 {
897         struct armv7a_common *armv7a = target_to_armv7a(target);
898         struct arm *arm = &armv7a->arm;
899         int retval;
900         uint32_t dscr;
901         /*
902          * * Restart core and wait for it to be started.  Clear ITRen and sticky
903          * * exception flags: see ARMv7 ARM, C5.9.
904          *
905          * REVISIT: for single stepping, we probably want to
906          * disable IRQs by default, with optional override...
907          */
908
909         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
910                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
911         if (retval != ERROR_OK)
912                 return retval;
913
914         if ((dscr & DSCR_INSTR_COMP) == 0)
915                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
916
917         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
918                         armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
919         if (retval != ERROR_OK)
920                 return retval;
921
922         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
923                         armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
924                         DRCR_CLEAR_EXCEPTIONS);
925         if (retval != ERROR_OK)
926                 return retval;
927
928         dscr = 0; /* force read of dscr */
929         retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
930                         DSCR_CORE_RESTARTED, &dscr);
931         if (retval != ERROR_OK) {
932                 LOG_ERROR("Error waiting for resume");
933                 return retval;
934         }
935
936         target->debug_reason = DBG_REASON_NOTHALTED;
937         target->state = TARGET_RUNNING;
938
939         /* registers are now invalid */
940         register_cache_invalidate(arm->core_cache);
941
942         return ERROR_OK;
943 }
944
945 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
946 {
947         int retval = 0;
948         struct target_list *head;
949         target_addr_t address;
950
951         foreach_smp_target(head, target->smp_targets) {
952                 struct target *curr = head->target;
953                 if ((curr != target) && (curr->state != TARGET_RUNNING)
954                         && target_was_examined(curr)) {
955                         /*  resume current address , not in step mode */
956                         retval += cortex_a_internal_restore(curr, 1, &address,
957                                         handle_breakpoints, 0);
958                         retval += cortex_a_internal_restart(curr);
959                 }
960         }
961         return retval;
962 }
963
964 static int cortex_a_resume(struct target *target, int current,
965         target_addr_t address, int handle_breakpoints, int debug_execution)
966 {
967         int retval = 0;
968         /* dummy resume for smp toggle in order to reduce gdb impact  */
969         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
970                 /*   simulate a start and halt of target */
971                 target->gdb_service->target = NULL;
972                 target->gdb_service->core[0] = target->gdb_service->core[1];
973                 /*  fake resume at next poll we play the  target core[1], see poll*/
974                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
975                 return 0;
976         }
977         cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
978         if (target->smp) {
979                 target->gdb_service->core[0] = -1;
980                 retval = cortex_a_restore_smp(target, handle_breakpoints);
981                 if (retval != ERROR_OK)
982                         return retval;
983         }
984         cortex_a_internal_restart(target);
985
986         if (!debug_execution) {
987                 target->state = TARGET_RUNNING;
988                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
989                 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
990         } else {
991                 target->state = TARGET_DEBUG_RUNNING;
992                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
993                 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
994         }
995
996         return ERROR_OK;
997 }
998
999 static int cortex_a_debug_entry(struct target *target)
1000 {
1001         uint32_t dscr;
1002         int retval = ERROR_OK;
1003         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1004         struct armv7a_common *armv7a = target_to_armv7a(target);
1005         struct arm *arm = &armv7a->arm;
1006
1007         LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1008
1009         /* REVISIT surely we should not re-read DSCR !! */
1010         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1011                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1012         if (retval != ERROR_OK)
1013                 return retval;
1014
1015         /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1016          * imprecise data aborts get discarded by issuing a Data
1017          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1018          */
1019
1020         /* Enable the ITR execution once we are in debug mode */
1021         dscr |= DSCR_ITR_EN;
1022         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1023                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1024         if (retval != ERROR_OK)
1025                 return retval;
1026
1027         /* Examine debug reason */
1028         arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1029
1030         /* save address of instruction that triggered the watchpoint? */
1031         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1032                 uint32_t wfar;
1033
1034                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1035                                 armv7a->debug_base + CPUDBG_WFAR,
1036                                 &wfar);
1037                 if (retval != ERROR_OK)
1038                         return retval;
1039                 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1040         }
1041
1042         /* First load register accessible through core debug port */
1043         retval = arm_dpm_read_current_registers(&armv7a->dpm);
1044         if (retval != ERROR_OK)
1045                 return retval;
1046
1047         if (arm->spsr) {
1048                 /* read SPSR */
1049                 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1050                 if (retval != ERROR_OK)
1051                         return retval;
1052         }
1053
1054 #if 0
1055 /* TODO, Move this */
1056         uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1057         cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1058         LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1059
1060         cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1061         LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1062
1063         cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1064         LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1065 #endif
1066
1067         /* Are we in an exception handler */
1068 /*      armv4_5->exception_number = 0; */
1069         if (armv7a->post_debug_entry) {
1070                 retval = armv7a->post_debug_entry(target);
1071                 if (retval != ERROR_OK)
1072                         return retval;
1073         }
1074
1075         return retval;
1076 }
1077
1078 static int cortex_a_post_debug_entry(struct target *target)
1079 {
1080         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1081         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1082         int retval;
1083
1084         /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1085         retval = armv7a->arm.mrc(target, 15,
1086                         0, 0,   /* op1, op2 */
1087                         1, 0,   /* CRn, CRm */
1088                         &cortex_a->cp15_control_reg);
1089         if (retval != ERROR_OK)
1090                 return retval;
1091         LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1092         cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1093
1094         if (!armv7a->is_armv7r)
1095                 armv7a_read_ttbcr(target);
1096
1097         if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1098                 armv7a_identify_cache(target);
1099
1100         if (armv7a->is_armv7r) {
1101                 armv7a->armv7a_mmu.mmu_enabled = 0;
1102         } else {
1103                 armv7a->armv7a_mmu.mmu_enabled =
1104                         (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1105         }
1106         armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1107                 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1108         armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1109                 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1110         cortex_a->curr_mode = armv7a->arm.core_mode;
1111
1112         /* switch to SVC mode to read DACR */
1113         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1114         armv7a->arm.mrc(target, 15,
1115                         0, 0, 3, 0,
1116                         &cortex_a->cp15_dacr_reg);
1117
1118         LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1119                         cortex_a->cp15_dacr_reg);
1120
1121         arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1122         return ERROR_OK;
1123 }
1124
1125 static int cortex_a_set_dscr_bits(struct target *target,
1126                 unsigned long bit_mask, unsigned long value)
1127 {
1128         struct armv7a_common *armv7a = target_to_armv7a(target);
1129         uint32_t dscr;
1130
1131         /* Read DSCR */
1132         int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1133                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
1134         if (retval != ERROR_OK)
1135                 return retval;
1136
1137         /* clear bitfield */
1138         dscr &= ~bit_mask;
1139         /* put new value */
1140         dscr |= value & bit_mask;
1141
1142         /* write new DSCR */
1143         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1144                         armv7a->debug_base + CPUDBG_DSCR, dscr);
1145         return retval;
1146 }
1147
1148 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1149         int handle_breakpoints)
1150 {
1151         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1152         struct armv7a_common *armv7a = target_to_armv7a(target);
1153         struct arm *arm = &armv7a->arm;
1154         struct breakpoint *breakpoint = NULL;
1155         struct breakpoint stepbreakpoint;
1156         struct reg *r;
1157         int retval;
1158
1159         if (target->state != TARGET_HALTED) {
1160                 LOG_WARNING("target not halted");
1161                 return ERROR_TARGET_NOT_HALTED;
1162         }
1163
1164         /* current = 1: continue on current pc, otherwise continue at <address> */
1165         r = arm->pc;
1166         if (!current)
1167                 buf_set_u32(r->value, 0, 32, address);
1168         else
1169                 address = buf_get_u32(r->value, 0, 32);
1170
1171         /* The front-end may request us not to handle breakpoints.
1172          * But since Cortex-A uses breakpoint for single step,
1173          * we MUST handle breakpoints.
1174          */
1175         handle_breakpoints = 1;
1176         if (handle_breakpoints) {
1177                 breakpoint = breakpoint_find(target, address);
1178                 if (breakpoint)
1179                         cortex_a_unset_breakpoint(target, breakpoint);
1180         }
1181
1182         /* Setup single step breakpoint */
1183         stepbreakpoint.address = address;
1184         stepbreakpoint.asid = 0;
1185         stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1186                 ? 2 : 4;
1187         stepbreakpoint.type = BKPT_HARD;
1188         stepbreakpoint.is_set = false;
1189
1190         /* Disable interrupts during single step if requested */
1191         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1192                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1193                 if (retval != ERROR_OK)
1194                         return retval;
1195         }
1196
1197         /* Break on IVA mismatch */
1198         cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1199
1200         target->debug_reason = DBG_REASON_SINGLESTEP;
1201
1202         retval = cortex_a_resume(target, 1, address, 0, 0);
1203         if (retval != ERROR_OK)
1204                 return retval;
1205
1206         int64_t then = timeval_ms();
1207         while (target->state != TARGET_HALTED) {
1208                 retval = cortex_a_poll(target);
1209                 if (retval != ERROR_OK)
1210                         return retval;
1211                 if (target->state == TARGET_HALTED)
1212                         break;
1213                 if (timeval_ms() > then + 1000) {
1214                         LOG_ERROR("timeout waiting for target halt");
1215                         return ERROR_FAIL;
1216                 }
1217         }
1218
1219         cortex_a_unset_breakpoint(target, &stepbreakpoint);
1220
1221         /* Re-enable interrupts if they were disabled */
1222         if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1223                 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1224                 if (retval != ERROR_OK)
1225                         return retval;
1226         }
1227
1228
1229         target->debug_reason = DBG_REASON_BREAKPOINT;
1230
1231         if (breakpoint)
1232                 cortex_a_set_breakpoint(target, breakpoint, 0);
1233
1234         if (target->state != TARGET_HALTED)
1235                 LOG_DEBUG("target stepped");
1236
1237         return ERROR_OK;
1238 }
1239
1240 static int cortex_a_restore_context(struct target *target, bool bpwp)
1241 {
1242         struct armv7a_common *armv7a = target_to_armv7a(target);
1243
1244         LOG_DEBUG(" ");
1245
1246         if (armv7a->pre_restore_context)
1247                 armv7a->pre_restore_context(target);
1248
1249         return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1250 }
1251
1252 /*
1253  * Cortex-A Breakpoint and watchpoint functions
1254  */
1255
1256 /* Setup hardware Breakpoint Register Pair */
1257 static int cortex_a_set_breakpoint(struct target *target,
1258         struct breakpoint *breakpoint, uint8_t matchmode)
1259 {
1260         int retval;
1261         int brp_i = 0;
1262         uint32_t control;
1263         uint8_t byte_addr_select = 0x0F;
1264         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1265         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1266         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1267
1268         if (breakpoint->is_set) {
1269                 LOG_WARNING("breakpoint already set");
1270                 return ERROR_OK;
1271         }
1272
1273         if (breakpoint->type == BKPT_HARD) {
1274                 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1275                         brp_i++;
1276                 if (brp_i >= cortex_a->brp_num) {
1277                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1278                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1279                 }
1280                 breakpoint_hw_set(breakpoint, brp_i);
1281                 if (breakpoint->length == 2)
1282                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1283                 control = ((matchmode & 0x7) << 20)
1284                         | (byte_addr_select << 5)
1285                         | (3 << 1) | 1;
1286                 brp_list[brp_i].used = true;
1287                 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1288                 brp_list[brp_i].control = control;
1289                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1290                                 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1291                                 brp_list[brp_i].value);
1292                 if (retval != ERROR_OK)
1293                         return retval;
1294                 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1295                                 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1296                                 brp_list[brp_i].control);
1297                 if (retval != ERROR_OK)
1298                         return retval;
1299                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1300                         brp_list[brp_i].control,
1301                         brp_list[brp_i].value);
1302         } else if (breakpoint->type == BKPT_SOFT) {
1303                 uint8_t code[4];
1304                 /* length == 2: Thumb breakpoint */
1305                 if (breakpoint->length == 2)
1306                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1307                 else
1308                 /* length == 3: Thumb-2 breakpoint, actual encoding is
1309                  * a regular Thumb BKPT instruction but we replace a
1310                  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1311                  * length
1312                  */
1313                 if (breakpoint->length == 3) {
1314                         buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1315                         breakpoint->length = 4;
1316                 } else
1317                         /* length == 4, normal ARM breakpoint */
1318                         buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1319
1320                 retval = target_read_memory(target,
1321                                 breakpoint->address & 0xFFFFFFFE,
1322                                 breakpoint->length, 1,
1323                                 breakpoint->orig_instr);
1324                 if (retval != ERROR_OK)
1325                         return retval;
1326
1327                 /* make sure data cache is cleaned & invalidated down to PoC */
1328                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1329                         armv7a_cache_flush_virt(target, breakpoint->address,
1330                                                 breakpoint->length);
1331                 }
1332
1333                 retval = target_write_memory(target,
1334                                 breakpoint->address & 0xFFFFFFFE,
1335                                 breakpoint->length, 1, code);
1336                 if (retval != ERROR_OK)
1337                         return retval;
1338
1339                 /* update i-cache at breakpoint location */
1340                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1341                                         breakpoint->length);
1342                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1343                                                  breakpoint->length);
1344
1345                 breakpoint->is_set = true;
1346         }
1347
1348         return ERROR_OK;
1349 }
1350
1351 static int cortex_a_set_context_breakpoint(struct target *target,
1352         struct breakpoint *breakpoint, uint8_t matchmode)
1353 {
1354         int retval = ERROR_FAIL;
1355         int brp_i = 0;
1356         uint32_t control;
1357         uint8_t byte_addr_select = 0x0F;
1358         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1359         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1360         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1361
1362         if (breakpoint->is_set) {
1363                 LOG_WARNING("breakpoint already set");
1364                 return retval;
1365         }
1366         /*check available context BRPs*/
1367         while ((brp_list[brp_i].used ||
1368                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1369                 brp_i++;
1370
1371         if (brp_i >= cortex_a->brp_num) {
1372                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1373                 return ERROR_FAIL;
1374         }
1375
1376         breakpoint_hw_set(breakpoint, brp_i);
1377         control = ((matchmode & 0x7) << 20)
1378                 | (byte_addr_select << 5)
1379                 | (3 << 1) | 1;
1380         brp_list[brp_i].used = true;
1381         brp_list[brp_i].value = (breakpoint->asid);
1382         brp_list[brp_i].control = control;
1383         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1384                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1385                         brp_list[brp_i].value);
1386         if (retval != ERROR_OK)
1387                 return retval;
1388         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1389                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1390                         brp_list[brp_i].control);
1391         if (retval != ERROR_OK)
1392                 return retval;
1393         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1394                 brp_list[brp_i].control,
1395                 brp_list[brp_i].value);
1396         return ERROR_OK;
1397
1398 }
1399
1400 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1401 {
1402         int retval = ERROR_FAIL;
1403         int brp_1 = 0;  /* holds the contextID pair */
1404         int brp_2 = 0;  /* holds the IVA pair */
1405         uint32_t control_ctx, control_iva;
1406         uint8_t ctx_byte_addr_select = 0x0F;
1407         uint8_t iva_byte_addr_select = 0x0F;
1408         uint8_t ctx_machmode = 0x03;
1409         uint8_t iva_machmode = 0x01;
1410         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1411         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1412         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1413
1414         if (breakpoint->is_set) {
1415                 LOG_WARNING("breakpoint already set");
1416                 return retval;
1417         }
1418         /*check available context BRPs*/
1419         while ((brp_list[brp_1].used ||
1420                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1421                 brp_1++;
1422
1423         LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1424         if (brp_1 >= cortex_a->brp_num) {
1425                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1426                 return ERROR_FAIL;
1427         }
1428
1429         while ((brp_list[brp_2].used ||
1430                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1431                 brp_2++;
1432
1433         LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1434         if (brp_2 >= cortex_a->brp_num) {
1435                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1436                 return ERROR_FAIL;
1437         }
1438
1439         breakpoint_hw_set(breakpoint, brp_1);
1440         breakpoint->linked_brp = brp_2;
1441         control_ctx = ((ctx_machmode & 0x7) << 20)
1442                 | (brp_2 << 16)
1443                 | (0 << 14)
1444                 | (ctx_byte_addr_select << 5)
1445                 | (3 << 1) | 1;
1446         brp_list[brp_1].used = true;
1447         brp_list[brp_1].value = (breakpoint->asid);
1448         brp_list[brp_1].control = control_ctx;
1449         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1450                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1451                         brp_list[brp_1].value);
1452         if (retval != ERROR_OK)
1453                 return retval;
1454         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1455                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1456                         brp_list[brp_1].control);
1457         if (retval != ERROR_OK)
1458                 return retval;
1459
1460         control_iva = ((iva_machmode & 0x7) << 20)
1461                 | (brp_1 << 16)
1462                 | (iva_byte_addr_select << 5)
1463                 | (3 << 1) | 1;
1464         brp_list[brp_2].used = true;
1465         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1466         brp_list[brp_2].control = control_iva;
1467         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1468                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1469                         brp_list[brp_2].value);
1470         if (retval != ERROR_OK)
1471                 return retval;
1472         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1473                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1474                         brp_list[brp_2].control);
1475         if (retval != ERROR_OK)
1476                 return retval;
1477
1478         return ERROR_OK;
1479 }
1480
1481 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1482 {
1483         int retval;
1484         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1485         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1486         struct cortex_a_brp *brp_list = cortex_a->brp_list;
1487
1488         if (!breakpoint->is_set) {
1489                 LOG_WARNING("breakpoint not set");
1490                 return ERROR_OK;
1491         }
1492
1493         if (breakpoint->type == BKPT_HARD) {
1494                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1495                         int brp_i = breakpoint->number;
1496                         int brp_j = breakpoint->linked_brp;
1497                         if (brp_i >= cortex_a->brp_num) {
1498                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1499                                 return ERROR_OK;
1500                         }
1501                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1502                                 brp_list[brp_i].control, brp_list[brp_i].value);
1503                         brp_list[brp_i].used = false;
1504                         brp_list[brp_i].value = 0;
1505                         brp_list[brp_i].control = 0;
1506                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1507                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1508                                         brp_list[brp_i].control);
1509                         if (retval != ERROR_OK)
1510                                 return retval;
1511                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1512                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1513                                         brp_list[brp_i].value);
1514                         if (retval != ERROR_OK)
1515                                 return retval;
1516                         if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1517                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1518                                 return ERROR_OK;
1519                         }
1520                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1521                                 brp_list[brp_j].control, brp_list[brp_j].value);
1522                         brp_list[brp_j].used = false;
1523                         brp_list[brp_j].value = 0;
1524                         brp_list[brp_j].control = 0;
1525                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1526                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1527                                         brp_list[brp_j].control);
1528                         if (retval != ERROR_OK)
1529                                 return retval;
1530                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1531                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1532                                         brp_list[brp_j].value);
1533                         if (retval != ERROR_OK)
1534                                 return retval;
1535                         breakpoint->linked_brp = 0;
1536                         breakpoint->is_set = false;
1537                         return ERROR_OK;
1538
1539                 } else {
1540                         int brp_i = breakpoint->number;
1541                         if (brp_i >= cortex_a->brp_num) {
1542                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1543                                 return ERROR_OK;
1544                         }
1545                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1546                                 brp_list[brp_i].control, brp_list[brp_i].value);
1547                         brp_list[brp_i].used = false;
1548                         brp_list[brp_i].value = 0;
1549                         brp_list[brp_i].control = 0;
1550                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1551                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1552                                         brp_list[brp_i].control);
1553                         if (retval != ERROR_OK)
1554                                 return retval;
1555                         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1556                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1557                                         brp_list[brp_i].value);
1558                         if (retval != ERROR_OK)
1559                                 return retval;
1560                         breakpoint->is_set = false;
1561                         return ERROR_OK;
1562                 }
1563         } else {
1564
1565                 /* make sure data cache is cleaned & invalidated down to PoC */
1566                 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1567                         armv7a_cache_flush_virt(target, breakpoint->address,
1568                                                 breakpoint->length);
1569                 }
1570
1571                 /* restore original instruction (kept in target endianness) */
1572                 if (breakpoint->length == 4) {
1573                         retval = target_write_memory(target,
1574                                         breakpoint->address & 0xFFFFFFFE,
1575                                         4, 1, breakpoint->orig_instr);
1576                         if (retval != ERROR_OK)
1577                                 return retval;
1578                 } else {
1579                         retval = target_write_memory(target,
1580                                         breakpoint->address & 0xFFFFFFFE,
1581                                         2, 1, breakpoint->orig_instr);
1582                         if (retval != ERROR_OK)
1583                                 return retval;
1584                 }
1585
1586                 /* update i-cache at breakpoint location */
1587                 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1588                                                  breakpoint->length);
1589                 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1590                                                  breakpoint->length);
1591         }
1592         breakpoint->is_set = false;
1593
1594         return ERROR_OK;
1595 }
1596
1597 static int cortex_a_add_breakpoint(struct target *target,
1598         struct breakpoint *breakpoint)
1599 {
1600         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1601
1602         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1603                 LOG_INFO("no hardware breakpoint available");
1604                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1605         }
1606
1607         if (breakpoint->type == BKPT_HARD)
1608                 cortex_a->brp_num_available--;
1609
1610         return cortex_a_set_breakpoint(target, breakpoint, 0x00);       /* Exact match */
1611 }
1612
1613 static int cortex_a_add_context_breakpoint(struct target *target,
1614         struct breakpoint *breakpoint)
1615 {
1616         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1617
1618         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1619                 LOG_INFO("no hardware breakpoint available");
1620                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1621         }
1622
1623         if (breakpoint->type == BKPT_HARD)
1624                 cortex_a->brp_num_available--;
1625
1626         return cortex_a_set_context_breakpoint(target, breakpoint, 0x02);       /* asid match */
1627 }
1628
1629 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1630         struct breakpoint *breakpoint)
1631 {
1632         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1633
1634         if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1635                 LOG_INFO("no hardware breakpoint available");
1636                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1637         }
1638
1639         if (breakpoint->type == BKPT_HARD)
1640                 cortex_a->brp_num_available--;
1641
1642         return cortex_a_set_hybrid_breakpoint(target, breakpoint);      /* ??? */
1643 }
1644
1645
1646 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1647 {
1648         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1649
1650 #if 0
1651 /* It is perfectly possible to remove breakpoints while the target is running */
1652         if (target->state != TARGET_HALTED) {
1653                 LOG_WARNING("target not halted");
1654                 return ERROR_TARGET_NOT_HALTED;
1655         }
1656 #endif
1657
1658         if (breakpoint->is_set) {
1659                 cortex_a_unset_breakpoint(target, breakpoint);
1660                 if (breakpoint->type == BKPT_HARD)
1661                         cortex_a->brp_num_available++;
1662         }
1663
1664
1665         return ERROR_OK;
1666 }
1667
1668 /**
1669  * Sets a watchpoint for an Cortex-A target in one of the watchpoint units.  It is
1670  * considered a bug to call this function when there are no available watchpoint
1671  * units.
1672  *
1673  * @param target Pointer to an Cortex-A target to set a watchpoint on
1674  * @param watchpoint Pointer to the watchpoint to be set
1675  * @return Error status if watchpoint set fails or the result of executing the
1676  * JTAG queue
1677  */
1678 static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1679 {
1680         int retval = ERROR_OK;
1681         int wrp_i = 0;
1682         uint32_t control;
1683         uint32_t address;
1684         uint8_t address_mask;
1685         uint8_t byte_address_select;
1686         uint8_t load_store_access_control = 0x3;
1687         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1688         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1689         struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1690
1691         if (watchpoint->is_set) {
1692                 LOG_WARNING("watchpoint already set");
1693                 return retval;
1694         }
1695
1696         /* check available context WRPs */
1697         while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1698                 wrp_i++;
1699
1700         if (wrp_i >= cortex_a->wrp_num) {
1701                 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1702                 return ERROR_FAIL;
1703         }
1704
1705         if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1706                         (watchpoint->length & (watchpoint->length - 1))) {
1707                 LOG_WARNING("watchpoint length must be a power of 2");
1708                 return ERROR_FAIL;
1709         }
1710
1711         if (watchpoint->address & (watchpoint->length - 1)) {
1712                 LOG_WARNING("watchpoint address must be aligned at length");
1713                 return ERROR_FAIL;
1714         }
1715
1716         /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing?  */
1717         /* handle wp length 1 and 2 through byte select */
1718         switch (watchpoint->length) {
1719         case 1:
1720                 byte_address_select = BIT(watchpoint->address & 0x3);
1721                 address = watchpoint->address & ~0x3;
1722                 address_mask = 0;
1723                 break;
1724
1725         case 2:
1726                 byte_address_select = 0x03 << (watchpoint->address & 0x2);
1727                 address = watchpoint->address & ~0x3;
1728                 address_mask = 0;
1729                 break;
1730
1731         case 4:
1732                 byte_address_select = 0x0f;
1733                 address = watchpoint->address;
1734                 address_mask = 0;
1735                 break;
1736
1737         default:
1738                 byte_address_select = 0xff;
1739                 address = watchpoint->address;
1740                 address_mask = ilog2(watchpoint->length);
1741                 break;
1742         }
1743
1744         watchpoint_set(watchpoint, wrp_i);
1745         control = (address_mask << 24) |
1746                 (byte_address_select << 5) |
1747                 (load_store_access_control << 3) |
1748                 (0x3 << 1) | 1;
1749         wrp_list[wrp_i].used = true;
1750         wrp_list[wrp_i].value = address;
1751         wrp_list[wrp_i].control = control;
1752
1753         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1754                         + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1755                         wrp_list[wrp_i].value);
1756         if (retval != ERROR_OK)
1757                 return retval;
1758
1759         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1760                         + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1761                         wrp_list[wrp_i].control);
1762         if (retval != ERROR_OK)
1763                 return retval;
1764
1765         LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1766                         wrp_list[wrp_i].control,
1767                         wrp_list[wrp_i].value);
1768
1769         return ERROR_OK;
1770 }
1771
1772 /**
1773  * Unset an existing watchpoint and clear the used watchpoint unit.
1774  *
1775  * @param target Pointer to the target to have the watchpoint removed
1776  * @param watchpoint Pointer to the watchpoint to be removed
1777  * @return Error status while trying to unset the watchpoint or the result of
1778  *         executing the JTAG queue
1779  */
1780 static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1781 {
1782         int retval;
1783         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1784         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1785         struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1786
1787         if (!watchpoint->is_set) {
1788                 LOG_WARNING("watchpoint not set");
1789                 return ERROR_OK;
1790         }
1791
1792         int wrp_i = watchpoint->number;
1793         if (wrp_i >= cortex_a->wrp_num) {
1794                 LOG_DEBUG("Invalid WRP number in watchpoint");
1795                 return ERROR_OK;
1796         }
1797         LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1798                         wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1799         wrp_list[wrp_i].used = false;
1800         wrp_list[wrp_i].value = 0;
1801         wrp_list[wrp_i].control = 0;
1802         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1803                         + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1804                         wrp_list[wrp_i].control);
1805         if (retval != ERROR_OK)
1806                 return retval;
1807         retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1808                         + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1809                         wrp_list[wrp_i].value);
1810         if (retval != ERROR_OK)
1811                 return retval;
1812         watchpoint->is_set = false;
1813
1814         return ERROR_OK;
1815 }
1816
1817 /**
1818  * Add a watchpoint to an Cortex-A target.  If there are no watchpoint units
1819  * available, an error response is returned.
1820  *
1821  * @param target Pointer to the Cortex-A target to add a watchpoint to
1822  * @param watchpoint Pointer to the watchpoint to be added
1823  * @return Error status while trying to add the watchpoint
1824  */
1825 static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1826 {
1827         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1828
1829         if (cortex_a->wrp_num_available < 1) {
1830                 LOG_INFO("no hardware watchpoint available");
1831                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1832         }
1833
1834         int retval = cortex_a_set_watchpoint(target, watchpoint);
1835         if (retval != ERROR_OK)
1836                 return retval;
1837
1838         cortex_a->wrp_num_available--;
1839         return ERROR_OK;
1840 }
1841
1842 /**
1843  * Remove a watchpoint from an Cortex-A target.  The watchpoint will be unset and
1844  * the used watchpoint unit will be reopened.
1845  *
1846  * @param target Pointer to the target to remove a watchpoint from
1847  * @param watchpoint Pointer to the watchpoint to be removed
1848  * @return Result of trying to unset the watchpoint
1849  */
1850 static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1851 {
1852         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1853
1854         if (watchpoint->is_set) {
1855                 cortex_a->wrp_num_available++;
1856                 cortex_a_unset_watchpoint(target, watchpoint);
1857         }
1858         return ERROR_OK;
1859 }
1860
1861
1862 /*
1863  * Cortex-A Reset functions
1864  */
1865
1866 static int cortex_a_assert_reset(struct target *target)
1867 {
1868         struct armv7a_common *armv7a = target_to_armv7a(target);
1869
1870         LOG_DEBUG(" ");
1871
1872         /* FIXME when halt is requested, make it work somehow... */
1873
1874         /* This function can be called in "target not examined" state */
1875
1876         /* Issue some kind of warm reset. */
1877         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1878                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1879         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1880                 /* REVISIT handle "pulls" cases, if there's
1881                  * hardware that needs them to work.
1882                  */
1883
1884                 /*
1885                  * FIXME: fix reset when transport is not JTAG. This is a temporary
1886                  * work-around for release v0.10 that is not intended to stay!
1887                  */
1888                 if (!transport_is_jtag() ||
1889                                 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1890                         adapter_assert_reset();
1891
1892         } else {
1893                 LOG_ERROR("%s: how to reset?", target_name(target));
1894                 return ERROR_FAIL;
1895         }
1896
1897         /* registers are now invalid */
1898         if (target_was_examined(target))
1899                 register_cache_invalidate(armv7a->arm.core_cache);
1900
1901         target->state = TARGET_RESET;
1902
1903         return ERROR_OK;
1904 }
1905
1906 static int cortex_a_deassert_reset(struct target *target)
1907 {
1908         struct armv7a_common *armv7a = target_to_armv7a(target);
1909         int retval;
1910
1911         LOG_DEBUG(" ");
1912
1913         /* be certain SRST is off */
1914         adapter_deassert_reset();
1915
1916         if (target_was_examined(target)) {
1917                 retval = cortex_a_poll(target);
1918                 if (retval != ERROR_OK)
1919                         return retval;
1920         }
1921
1922         if (target->reset_halt) {
1923                 if (target->state != TARGET_HALTED) {
1924                         LOG_WARNING("%s: ran after reset and before halt ...",
1925                                 target_name(target));
1926                         if (target_was_examined(target)) {
1927                                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1928                                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1929                                 if (retval != ERROR_OK)
1930                                         return retval;
1931                         } else
1932                                 target->state = TARGET_UNKNOWN;
1933                 }
1934         }
1935
1936         return ERROR_OK;
1937 }
1938
1939 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1940 {
1941         /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1942          * New desired mode must be in mode. Current value of DSCR must be in
1943          * *dscr, which is updated with new value.
1944          *
1945          * This function elides actually sending the mode-change over the debug
1946          * interface if the mode is already set as desired.
1947          */
1948         uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1949         if (new_dscr != *dscr) {
1950                 struct armv7a_common *armv7a = target_to_armv7a(target);
1951                 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1952                                 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1953                 if (retval == ERROR_OK)
1954                         *dscr = new_dscr;
1955                 return retval;
1956         } else {
1957                 return ERROR_OK;
1958         }
1959 }
1960
1961 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1962         uint32_t value, uint32_t *dscr)
1963 {
1964         /* Waits until the specified bit(s) of DSCR take on a specified value. */
1965         struct armv7a_common *armv7a = target_to_armv7a(target);
1966         int64_t then;
1967         int retval;
1968
1969         if ((*dscr & mask) == value)
1970                 return ERROR_OK;
1971
1972         then = timeval_ms();
1973         while (1) {
1974                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1975                                 armv7a->debug_base + CPUDBG_DSCR, dscr);
1976                 if (retval != ERROR_OK) {
1977                         LOG_ERROR("Could not read DSCR register");
1978                         return retval;
1979                 }
1980                 if ((*dscr & mask) == value)
1981                         break;
1982                 if (timeval_ms() > then + 1000) {
1983                         LOG_ERROR("timeout waiting for DSCR bit change");
1984                         return ERROR_FAIL;
1985                 }
1986         }
1987         return ERROR_OK;
1988 }
1989
1990 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1991         uint32_t *data, uint32_t *dscr)
1992 {
1993         int retval;
1994         struct armv7a_common *armv7a = target_to_armv7a(target);
1995
1996         /* Move from coprocessor to R0. */
1997         retval = cortex_a_exec_opcode(target, opcode, dscr);
1998         if (retval != ERROR_OK)
1999                 return retval;
2000
2001         /* Move from R0 to DTRTX. */
2002         retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2003         if (retval != ERROR_OK)
2004                 return retval;
2005
2006         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2007          * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2008          * must also check TXfull_l). Most of the time this will be free
2009          * because TXfull_l will be set immediately and cached in dscr. */
2010         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2011                         DSCR_DTRTX_FULL_LATCHED, dscr);
2012         if (retval != ERROR_OK)
2013                 return retval;
2014
2015         /* Read the value transferred to DTRTX. */
2016         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2017                         armv7a->debug_base + CPUDBG_DTRTX, data);
2018         if (retval != ERROR_OK)
2019                 return retval;
2020
2021         return ERROR_OK;
2022 }
2023
2024 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2025         uint32_t *dfsr, uint32_t *dscr)
2026 {
2027         int retval;
2028
2029         if (dfar) {
2030                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2031                 if (retval != ERROR_OK)
2032                         return retval;
2033         }
2034
2035         if (dfsr) {
2036                 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2037                 if (retval != ERROR_OK)
2038                         return retval;
2039         }
2040
2041         return ERROR_OK;
2042 }
2043
2044 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2045         uint32_t data, uint32_t *dscr)
2046 {
2047         int retval;
2048         struct armv7a_common *armv7a = target_to_armv7a(target);
2049
2050         /* Write the value into DTRRX. */
2051         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2052                         armv7a->debug_base + CPUDBG_DTRRX, data);
2053         if (retval != ERROR_OK)
2054                 return retval;
2055
2056         /* Move from DTRRX to R0. */
2057         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2058         if (retval != ERROR_OK)
2059                 return retval;
2060
2061         /* Move from R0 to coprocessor. */
2062         retval = cortex_a_exec_opcode(target, opcode, dscr);
2063         if (retval != ERROR_OK)
2064                 return retval;
2065
2066         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2067          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2068          * check RXfull_l). Most of the time this will be free because RXfull_l
2069          * will be cleared immediately and cached in dscr. */
2070         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2071         if (retval != ERROR_OK)
2072                 return retval;
2073
2074         return ERROR_OK;
2075 }
2076
2077 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2078         uint32_t dfsr, uint32_t *dscr)
2079 {
2080         int retval;
2081
2082         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2083         if (retval != ERROR_OK)
2084                 return retval;
2085
2086         retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2087         if (retval != ERROR_OK)
2088                 return retval;
2089
2090         return ERROR_OK;
2091 }
2092
2093 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2094 {
2095         uint32_t status, upper4;
2096
2097         if (dfsr & (1 << 9)) {
2098                 /* LPAE format. */
2099                 status = dfsr & 0x3f;
2100                 upper4 = status >> 2;
2101                 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2102                         return ERROR_TARGET_TRANSLATION_FAULT;
2103                 else if (status == 33)
2104                         return ERROR_TARGET_UNALIGNED_ACCESS;
2105                 else
2106                         return ERROR_TARGET_DATA_ABORT;
2107         } else {
2108                 /* Normal format. */
2109                 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2110                 if (status == 1)
2111                         return ERROR_TARGET_UNALIGNED_ACCESS;
2112                 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2113                                 status == 9 || status == 11 || status == 13 || status == 15)
2114                         return ERROR_TARGET_TRANSLATION_FAULT;
2115                 else
2116                         return ERROR_TARGET_DATA_ABORT;
2117         }
2118 }
2119
2120 static int cortex_a_write_cpu_memory_slow(struct target *target,
2121         uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2122 {
2123         /* Writes count objects of size size from *buffer. Old value of DSCR must
2124          * be in *dscr; updated to new value. This is slow because it works for
2125          * non-word-sized objects. Avoid unaligned accesses as they do not work
2126          * on memory address space without "Normal" attribute. If size == 4 and
2127          * the address is aligned, cortex_a_write_cpu_memory_fast should be
2128          * preferred.
2129          * Preconditions:
2130          * - Address is in R0.
2131          * - R0 is marked dirty.
2132          */
2133         struct armv7a_common *armv7a = target_to_armv7a(target);
2134         struct arm *arm = &armv7a->arm;
2135         int retval;
2136
2137         /* Mark register R1 as dirty, to use for transferring data. */
2138         arm_reg_current(arm, 1)->dirty = true;
2139
2140         /* Switch to non-blocking mode if not already in that mode. */
2141         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2142         if (retval != ERROR_OK)
2143                 return retval;
2144
2145         /* Go through the objects. */
2146         while (count) {
2147                 /* Write the value to store into DTRRX. */
2148                 uint32_t data, opcode;
2149                 if (size == 1)
2150                         data = *buffer;
2151                 else if (size == 2)
2152                         data = target_buffer_get_u16(target, buffer);
2153                 else
2154                         data = target_buffer_get_u32(target, buffer);
2155                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2156                                 armv7a->debug_base + CPUDBG_DTRRX, data);
2157                 if (retval != ERROR_OK)
2158                         return retval;
2159
2160                 /* Transfer the value from DTRRX to R1. */
2161                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2162                 if (retval != ERROR_OK)
2163                         return retval;
2164
2165                 /* Write the value transferred to R1 into memory. */
2166                 if (size == 1)
2167                         opcode = ARMV4_5_STRB_IP(1, 0);
2168                 else if (size == 2)
2169                         opcode = ARMV4_5_STRH_IP(1, 0);
2170                 else
2171                         opcode = ARMV4_5_STRW_IP(1, 0);
2172                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2173                 if (retval != ERROR_OK)
2174                         return retval;
2175
2176                 /* Check for faults and return early. */
2177                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2178                         return ERROR_OK; /* A data fault is not considered a system failure. */
2179
2180                 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2181                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2182                  * must also check RXfull_l). Most of the time this will be free
2183                  * because RXfull_l will be cleared immediately and cached in dscr. */
2184                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2185                 if (retval != ERROR_OK)
2186                         return retval;
2187
2188                 /* Advance. */
2189                 buffer += size;
2190                 --count;
2191         }
2192
2193         return ERROR_OK;
2194 }
2195
2196 static int cortex_a_write_cpu_memory_fast(struct target *target,
2197         uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2198 {
2199         /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2200          * in *dscr; updated to new value. This is fast but only works for
2201          * word-sized objects at aligned addresses.
2202          * Preconditions:
2203          * - Address is in R0 and must be a multiple of 4.
2204          * - R0 is marked dirty.
2205          */
2206         struct armv7a_common *armv7a = target_to_armv7a(target);
2207         int retval;
2208
2209         /* Switch to fast mode if not already in that mode. */
2210         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2211         if (retval != ERROR_OK)
2212                 return retval;
2213
2214         /* Latch STC instruction. */
2215         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2216                         armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2217         if (retval != ERROR_OK)
2218                 return retval;
2219
2220         /* Transfer all the data and issue all the instructions. */
2221         return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2222                         4, count, armv7a->debug_base + CPUDBG_DTRRX);
2223 }
2224
2225 static int cortex_a_write_cpu_memory(struct target *target,
2226         uint32_t address, uint32_t size,
2227         uint32_t count, const uint8_t *buffer)
2228 {
2229         /* Write memory through the CPU. */
2230         int retval, final_retval;
2231         struct armv7a_common *armv7a = target_to_armv7a(target);
2232         struct arm *arm = &armv7a->arm;
2233         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2234
2235         LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2236                           address, size, count);
2237         if (target->state != TARGET_HALTED) {
2238                 LOG_WARNING("target not halted");
2239                 return ERROR_TARGET_NOT_HALTED;
2240         }
2241
2242         if (!count)
2243                 return ERROR_OK;
2244
2245         /* Clear any abort. */
2246         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2247                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2248         if (retval != ERROR_OK)
2249                 return retval;
2250
2251         /* Read DSCR. */
2252         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2253                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2254         if (retval != ERROR_OK)
2255                 return retval;
2256
2257         /* Switch to non-blocking mode if not already in that mode. */
2258         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2259         if (retval != ERROR_OK)
2260                 goto out;
2261
2262         /* Mark R0 as dirty. */
2263         arm_reg_current(arm, 0)->dirty = true;
2264
2265         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2266         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2267         if (retval != ERROR_OK)
2268                 goto out;
2269
2270         /* Get the memory address into R0. */
2271         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2272                         armv7a->debug_base + CPUDBG_DTRRX, address);
2273         if (retval != ERROR_OK)
2274                 goto out;
2275         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2276         if (retval != ERROR_OK)
2277                 goto out;
2278
2279         if (size == 4 && (address % 4) == 0) {
2280                 /* We are doing a word-aligned transfer, so use fast mode. */
2281                 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2282         } else {
2283                 /* Use slow path. Adjust size for aligned accesses */
2284                 switch (address % 4) {
2285                         case 1:
2286                         case 3:
2287                                 count *= size;
2288                                 size = 1;
2289                                 break;
2290                         case 2:
2291                                 if (size == 4) {
2292                                         count *= 2;
2293                                         size = 2;
2294                                 }
2295                         case 0:
2296                         default:
2297                                 break;
2298                 }
2299                 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2300         }
2301
2302 out:
2303         final_retval = retval;
2304
2305         /* Switch to non-blocking mode if not already in that mode. */
2306         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2307         if (final_retval == ERROR_OK)
2308                 final_retval = retval;
2309
2310         /* Wait for last issued instruction to complete. */
2311         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2312         if (final_retval == ERROR_OK)
2313                 final_retval = retval;
2314
2315         /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2316          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2317          * check RXfull_l). Most of the time this will be free because RXfull_l
2318          * will be cleared immediately and cached in dscr. However, don't do this
2319          * if there is fault, because then the instruction might not have completed
2320          * successfully. */
2321         if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2322                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2323                 if (retval != ERROR_OK)
2324                         return retval;
2325         }
2326
2327         /* If there were any sticky abort flags, clear them. */
2328         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2329                 fault_dscr = dscr;
2330                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2331                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2332                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2333         } else {
2334                 fault_dscr = 0;
2335         }
2336
2337         /* Handle synchronous data faults. */
2338         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2339                 if (final_retval == ERROR_OK) {
2340                         /* Final return value will reflect cause of fault. */
2341                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2342                         if (retval == ERROR_OK) {
2343                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2344                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2345                         } else
2346                                 final_retval = retval;
2347                 }
2348                 /* Fault destroyed DFAR/DFSR; restore them. */
2349                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2350                 if (retval != ERROR_OK)
2351                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2352         }
2353
2354         /* Handle asynchronous data faults. */
2355         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2356                 if (final_retval == ERROR_OK)
2357                         /* No other error has been recorded so far, so keep this one. */
2358                         final_retval = ERROR_TARGET_DATA_ABORT;
2359         }
2360
2361         /* If the DCC is nonempty, clear it. */
2362         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2363                 uint32_t dummy;
2364                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2365                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2366                 if (final_retval == ERROR_OK)
2367                         final_retval = retval;
2368         }
2369         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2370                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2371                 if (final_retval == ERROR_OK)
2372                         final_retval = retval;
2373         }
2374
2375         /* Done. */
2376         return final_retval;
2377 }
2378
2379 static int cortex_a_read_cpu_memory_slow(struct target *target,
2380         uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2381 {
2382         /* Reads count objects of size size into *buffer. Old value of DSCR must be
2383          * in *dscr; updated to new value. This is slow because it works for
2384          * non-word-sized objects. Avoid unaligned accesses as they do not work
2385          * on memory address space without "Normal" attribute. If size == 4 and
2386          * the address is aligned, cortex_a_read_cpu_memory_fast should be
2387          * preferred.
2388          * Preconditions:
2389          * - Address is in R0.
2390          * - R0 is marked dirty.
2391          */
2392         struct armv7a_common *armv7a = target_to_armv7a(target);
2393         struct arm *arm = &armv7a->arm;
2394         int retval;
2395
2396         /* Mark register R1 as dirty, to use for transferring data. */
2397         arm_reg_current(arm, 1)->dirty = true;
2398
2399         /* Switch to non-blocking mode if not already in that mode. */
2400         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2401         if (retval != ERROR_OK)
2402                 return retval;
2403
2404         /* Go through the objects. */
2405         while (count) {
2406                 /* Issue a load of the appropriate size to R1. */
2407                 uint32_t opcode, data;
2408                 if (size == 1)
2409                         opcode = ARMV4_5_LDRB_IP(1, 0);
2410                 else if (size == 2)
2411                         opcode = ARMV4_5_LDRH_IP(1, 0);
2412                 else
2413                         opcode = ARMV4_5_LDRW_IP(1, 0);
2414                 retval = cortex_a_exec_opcode(target, opcode, dscr);
2415                 if (retval != ERROR_OK)
2416                         return retval;
2417
2418                 /* Issue a write of R1 to DTRTX. */
2419                 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2420                 if (retval != ERROR_OK)
2421                         return retval;
2422
2423                 /* Check for faults and return early. */
2424                 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2425                         return ERROR_OK; /* A data fault is not considered a system failure. */
2426
2427                 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2428                  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2429                  * must also check TXfull_l). Most of the time this will be free
2430                  * because TXfull_l will be set immediately and cached in dscr. */
2431                 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2432                                 DSCR_DTRTX_FULL_LATCHED, dscr);
2433                 if (retval != ERROR_OK)
2434                         return retval;
2435
2436                 /* Read the value transferred to DTRTX into the buffer. */
2437                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2438                                 armv7a->debug_base + CPUDBG_DTRTX, &data);
2439                 if (retval != ERROR_OK)
2440                         return retval;
2441                 if (size == 1)
2442                         *buffer = (uint8_t) data;
2443                 else if (size == 2)
2444                         target_buffer_set_u16(target, buffer, (uint16_t) data);
2445                 else
2446                         target_buffer_set_u32(target, buffer, data);
2447
2448                 /* Advance. */
2449                 buffer += size;
2450                 --count;
2451         }
2452
2453         return ERROR_OK;
2454 }
2455
2456 static int cortex_a_read_cpu_memory_fast(struct target *target,
2457         uint32_t count, uint8_t *buffer, uint32_t *dscr)
2458 {
2459         /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2460          * *dscr; updated to new value. This is fast but only works for word-sized
2461          * objects at aligned addresses.
2462          * Preconditions:
2463          * - Address is in R0 and must be a multiple of 4.
2464          * - R0 is marked dirty.
2465          */
2466         struct armv7a_common *armv7a = target_to_armv7a(target);
2467         uint32_t u32;
2468         int retval;
2469
2470         /* Switch to non-blocking mode if not already in that mode. */
2471         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2472         if (retval != ERROR_OK)
2473                 return retval;
2474
2475         /* Issue the LDC instruction via a write to ITR. */
2476         retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2477         if (retval != ERROR_OK)
2478                 return retval;
2479
2480         count--;
2481
2482         if (count > 0) {
2483                 /* Switch to fast mode if not already in that mode. */
2484                 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2485                 if (retval != ERROR_OK)
2486                         return retval;
2487
2488                 /* Latch LDC instruction. */
2489                 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2490                                 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2491                 if (retval != ERROR_OK)
2492                         return retval;
2493
2494                 /* Read the value transferred to DTRTX into the buffer. Due to fast
2495                  * mode rules, this blocks until the instruction finishes executing and
2496                  * then reissues the read instruction to read the next word from
2497                  * memory. The last read of DTRTX in this call reads the second-to-last
2498                  * word from memory and issues the read instruction for the last word.
2499                  */
2500                 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2501                                 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2502                 if (retval != ERROR_OK)
2503                         return retval;
2504
2505                 /* Advance. */
2506                 buffer += count * 4;
2507         }
2508
2509         /* Wait for last issued instruction to complete. */
2510         retval = cortex_a_wait_instrcmpl(target, dscr, false);
2511         if (retval != ERROR_OK)
2512                 return retval;
2513
2514         /* Switch to non-blocking mode if not already in that mode. */
2515         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2516         if (retval != ERROR_OK)
2517                 return retval;
2518
2519         /* Check for faults and return early. */
2520         if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2521                 return ERROR_OK; /* A data fault is not considered a system failure. */
2522
2523         /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2524          * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2525          * check TXfull_l). Most of the time this will be free because TXfull_l
2526          * will be set immediately and cached in dscr. */
2527         retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2528                         DSCR_DTRTX_FULL_LATCHED, dscr);
2529         if (retval != ERROR_OK)
2530                 return retval;
2531
2532         /* Read the value transferred to DTRTX into the buffer. This is the last
2533          * word. */
2534         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2535                         armv7a->debug_base + CPUDBG_DTRTX, &u32);
2536         if (retval != ERROR_OK)
2537                 return retval;
2538         target_buffer_set_u32(target, buffer, u32);
2539
2540         return ERROR_OK;
2541 }
2542
2543 static int cortex_a_read_cpu_memory(struct target *target,
2544         uint32_t address, uint32_t size,
2545         uint32_t count, uint8_t *buffer)
2546 {
2547         /* Read memory through the CPU. */
2548         int retval, final_retval;
2549         struct armv7a_common *armv7a = target_to_armv7a(target);
2550         struct arm *arm = &armv7a->arm;
2551         uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2552
2553         LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %"  PRIu32 " count %"  PRIu32,
2554                           address, size, count);
2555         if (target->state != TARGET_HALTED) {
2556                 LOG_WARNING("target not halted");
2557                 return ERROR_TARGET_NOT_HALTED;
2558         }
2559
2560         if (!count)
2561                 return ERROR_OK;
2562
2563         /* Clear any abort. */
2564         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2565                         armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2566         if (retval != ERROR_OK)
2567                 return retval;
2568
2569         /* Read DSCR */
2570         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2571                         armv7a->debug_base + CPUDBG_DSCR, &dscr);
2572         if (retval != ERROR_OK)
2573                 return retval;
2574
2575         /* Switch to non-blocking mode if not already in that mode. */
2576         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2577         if (retval != ERROR_OK)
2578                 goto out;
2579
2580         /* Mark R0 as dirty. */
2581         arm_reg_current(arm, 0)->dirty = true;
2582
2583         /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2584         retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2585         if (retval != ERROR_OK)
2586                 goto out;
2587
2588         /* Get the memory address into R0. */
2589         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2590                         armv7a->debug_base + CPUDBG_DTRRX, address);
2591         if (retval != ERROR_OK)
2592                 goto out;
2593         retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2594         if (retval != ERROR_OK)
2595                 goto out;
2596
2597         if (size == 4 && (address % 4) == 0) {
2598                 /* We are doing a word-aligned transfer, so use fast mode. */
2599                 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2600         } else {
2601                 /* Use slow path. Adjust size for aligned accesses */
2602                 switch (address % 4) {
2603                         case 1:
2604                         case 3:
2605                                 count *= size;
2606                                 size = 1;
2607                                 break;
2608                         case 2:
2609                                 if (size == 4) {
2610                                         count *= 2;
2611                                         size = 2;
2612                                 }
2613                                 break;
2614                         case 0:
2615                         default:
2616                                 break;
2617                 }
2618                 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2619         }
2620
2621 out:
2622         final_retval = retval;
2623
2624         /* Switch to non-blocking mode if not already in that mode. */
2625         retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2626         if (final_retval == ERROR_OK)
2627                 final_retval = retval;
2628
2629         /* Wait for last issued instruction to complete. */
2630         retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2631         if (final_retval == ERROR_OK)
2632                 final_retval = retval;
2633
2634         /* If there were any sticky abort flags, clear them. */
2635         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2636                 fault_dscr = dscr;
2637                 mem_ap_write_atomic_u32(armv7a->debug_ap,
2638                                 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2639                 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2640         } else {
2641                 fault_dscr = 0;
2642         }
2643
2644         /* Handle synchronous data faults. */
2645         if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2646                 if (final_retval == ERROR_OK) {
2647                         /* Final return value will reflect cause of fault. */
2648                         retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2649                         if (retval == ERROR_OK) {
2650                                 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2651                                 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2652                         } else
2653                                 final_retval = retval;
2654                 }
2655                 /* Fault destroyed DFAR/DFSR; restore them. */
2656                 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2657                 if (retval != ERROR_OK)
2658                         LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2659         }
2660
2661         /* Handle asynchronous data faults. */
2662         if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2663                 if (final_retval == ERROR_OK)
2664                         /* No other error has been recorded so far, so keep this one. */
2665                         final_retval = ERROR_TARGET_DATA_ABORT;
2666         }
2667
2668         /* If the DCC is nonempty, clear it. */
2669         if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2670                 uint32_t dummy;
2671                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2672                                 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2673                 if (final_retval == ERROR_OK)
2674                         final_retval = retval;
2675         }
2676         if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2677                 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2678                 if (final_retval == ERROR_OK)
2679                         final_retval = retval;
2680         }
2681
2682         /* Done. */
2683         return final_retval;
2684 }
2685
2686
2687 /*
2688  * Cortex-A Memory access
2689  *
2690  * This is same Cortex-M3 but we must also use the correct
2691  * ap number for every access.
2692  */
2693
2694 static int cortex_a_read_phys_memory(struct target *target,
2695         target_addr_t address, uint32_t size,
2696         uint32_t count, uint8_t *buffer)
2697 {
2698         int retval;
2699
2700         if (!count || !buffer)
2701                 return ERROR_COMMAND_SYNTAX_ERROR;
2702
2703         LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2704                 address, size, count);
2705
2706         /* read memory through the CPU */
2707         cortex_a_prep_memaccess(target, 1);
2708         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2709         cortex_a_post_memaccess(target, 1);
2710
2711         return retval;
2712 }
2713
2714 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2715         uint32_t size, uint32_t count, uint8_t *buffer)
2716 {
2717         int retval;
2718
2719         /* cortex_a handles unaligned memory access */
2720         LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2721                 address, size, count);
2722
2723         cortex_a_prep_memaccess(target, 0);
2724         retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2725         cortex_a_post_memaccess(target, 0);
2726
2727         return retval;
2728 }
2729
2730 static int cortex_a_write_phys_memory(struct target *target,
2731         target_addr_t address, uint32_t size,
2732         uint32_t count, const uint8_t *buffer)
2733 {
2734         int retval;
2735
2736         if (!count || !buffer)
2737                 return ERROR_COMMAND_SYNTAX_ERROR;
2738
2739         LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2740                 address, size, count);
2741
2742         /* write memory through the CPU */
2743         cortex_a_prep_memaccess(target, 1);
2744         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2745         cortex_a_post_memaccess(target, 1);
2746
2747         return retval;
2748 }
2749
2750 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2751         uint32_t size, uint32_t count, const uint8_t *buffer)
2752 {
2753         int retval;
2754
2755         /* cortex_a handles unaligned memory access */
2756         LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2757                 address, size, count);
2758
2759         /* memory writes bypass the caches, must flush before writing */
2760         armv7a_cache_auto_flush_on_write(target, address, size * count);
2761
2762         cortex_a_prep_memaccess(target, 0);
2763         retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2764         cortex_a_post_memaccess(target, 0);
2765         return retval;
2766 }
2767
2768 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2769                                 uint32_t count, uint8_t *buffer)
2770 {
2771         uint32_t size;
2772
2773         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2774          * will have something to do with the size we leave to it. */
2775         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2776                 if (address & size) {
2777                         int retval = target_read_memory(target, address, size, 1, buffer);
2778                         if (retval != ERROR_OK)
2779                                 return retval;
2780                         address += size;
2781                         count -= size;
2782                         buffer += size;
2783                 }
2784         }
2785
2786         /* Read the data with as large access size as possible. */
2787         for (; size > 0; size /= 2) {
2788                 uint32_t aligned = count - count % size;
2789                 if (aligned > 0) {
2790                         int retval = target_read_memory(target, address, size, aligned / size, buffer);
2791                         if (retval != ERROR_OK)
2792                                 return retval;
2793                         address += aligned;
2794                         count -= aligned;
2795                         buffer += aligned;
2796                 }
2797         }
2798
2799         return ERROR_OK;
2800 }
2801
2802 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2803                                  uint32_t count, const uint8_t *buffer)
2804 {
2805         uint32_t size;
2806
2807         /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2808          * will have something to do with the size we leave to it. */
2809         for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2810                 if (address & size) {
2811                         int retval = target_write_memory(target, address, size, 1, buffer);
2812                         if (retval != ERROR_OK)
2813                                 return retval;
2814                         address += size;
2815                         count -= size;
2816                         buffer += size;
2817                 }
2818         }
2819
2820         /* Write the data with as large access size as possible. */
2821         for (; size > 0; size /= 2) {
2822                 uint32_t aligned = count - count % size;
2823                 if (aligned > 0) {
2824                         int retval = target_write_memory(target, address, size, aligned / size, buffer);
2825                         if (retval != ERROR_OK)
2826                                 return retval;
2827                         address += aligned;
2828                         count -= aligned;
2829                         buffer += aligned;
2830                 }
2831         }
2832
2833         return ERROR_OK;
2834 }
2835
2836 static int cortex_a_handle_target_request(void *priv)
2837 {
2838         struct target *target = priv;
2839         struct armv7a_common *armv7a = target_to_armv7a(target);
2840         int retval;
2841
2842         if (!target_was_examined(target))
2843                 return ERROR_OK;
2844         if (!target->dbg_msg_enabled)
2845                 return ERROR_OK;
2846
2847         if (target->state == TARGET_RUNNING) {
2848                 uint32_t request;
2849                 uint32_t dscr;
2850                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2851                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2852
2853                 /* check if we have data */
2854                 int64_t then = timeval_ms();
2855                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2856                         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2857                                         armv7a->debug_base + CPUDBG_DTRTX, &request);
2858                         if (retval == ERROR_OK) {
2859                                 target_request(target, request);
2860                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2861                                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2862                         }
2863                         if (timeval_ms() > then + 1000) {
2864                                 LOG_ERROR("Timeout waiting for dtr tx full");
2865                                 return ERROR_FAIL;
2866                         }
2867                 }
2868         }
2869
2870         return ERROR_OK;
2871 }
2872
2873 /*
2874  * Cortex-A target information and configuration
2875  */
2876
2877 static int cortex_a_examine_first(struct target *target)
2878 {
2879         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2880         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2881         struct adiv5_dap *swjdp = armv7a->arm.dap;
2882         struct adiv5_private_config *pc = target->private_config;
2883
2884         int i;
2885         int retval = ERROR_OK;
2886         uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2887
2888         if (armv7a->debug_ap) {
2889                 dap_put_ap(armv7a->debug_ap);
2890                 armv7a->debug_ap = NULL;
2891         }
2892
2893         if (pc->ap_num == DP_APSEL_INVALID) {
2894                 /* Search for the APB-AP - it is needed for access to debug registers */
2895                 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2896                 if (retval != ERROR_OK) {
2897                         LOG_ERROR("Could not find APB-AP for debug access");
2898                         return retval;
2899                 }
2900         } else {
2901                 armv7a->debug_ap = dap_get_ap(swjdp, pc->ap_num);
2902                 if (!armv7a->debug_ap) {
2903                         LOG_ERROR("Cannot get AP");
2904                         return ERROR_FAIL;
2905                 }
2906         }
2907
2908         retval = mem_ap_init(armv7a->debug_ap);
2909         if (retval != ERROR_OK) {
2910                 LOG_ERROR("Could not initialize the APB-AP");
2911                 return retval;
2912         }
2913
2914         armv7a->debug_ap->memaccess_tck = 80;
2915
2916         if (!target->dbgbase_set) {
2917                 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2918                           target->cmd_name);
2919                 /* Lookup Processor DAP */
2920                 retval = dap_lookup_cs_component(armv7a->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2921                                 &armv7a->debug_base, target->coreid);
2922                 if (retval != ERROR_OK) {
2923                         LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2924                                   target->cmd_name);
2925                         return retval;
2926                 }
2927                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2928                           target->coreid, armv7a->debug_base);
2929         } else
2930                 armv7a->debug_base = target->dbgbase;
2931
2932         if ((armv7a->debug_base & (1UL<<31)) == 0)
2933                 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2934                             "Please fix the target configuration.", target_name(target));
2935
2936         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2937                         armv7a->debug_base + CPUDBG_DIDR, &didr);
2938         if (retval != ERROR_OK) {
2939                 LOG_DEBUG("Examine %s failed", "DIDR");
2940                 return retval;
2941         }
2942
2943         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2944                         armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2945         if (retval != ERROR_OK) {
2946                 LOG_DEBUG("Examine %s failed", "CPUID");
2947                 return retval;
2948         }
2949
2950         LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2951         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2952
2953         cortex_a->didr = didr;
2954         cortex_a->cpuid = cpuid;
2955
2956         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2957                                     armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2958         if (retval != ERROR_OK)
2959                 return retval;
2960         LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR  0x%" PRIx32, target->coreid, dbg_osreg);
2961
2962         if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2963                 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2964                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2965                 return ERROR_TARGET_INIT_FAILED;
2966         }
2967
2968         if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2969                 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2970
2971         /* Read DBGOSLSR and check if OSLK is implemented */
2972         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2973                                 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2974         if (retval != ERROR_OK)
2975                 return retval;
2976         LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2977
2978         /* check if OS Lock is implemented */
2979         if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2980                 /* check if OS Lock is set */
2981                 if (dbg_osreg & OSLSR_OSLK) {
2982                         LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2983
2984                         retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2985                                                         armv7a->debug_base + CPUDBG_OSLAR,
2986                                                         0);
2987                         if (retval == ERROR_OK)
2988                                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2989                                                         armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2990
2991                         /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2992                         if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2993                                 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2994                                                 target->coreid);
2995                                 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2996                                 return ERROR_TARGET_INIT_FAILED;
2997                         }
2998                 }
2999         }
3000
3001         retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3002                                  armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
3003         if (retval != ERROR_OK)
3004                 return retval;
3005
3006         if (dbg_idpfr1 & 0x000000f0) {
3007                 LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
3008                                 target->coreid);
3009                 armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
3010         }
3011         if (dbg_idpfr1 & 0x0000f000) {
3012                 LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
3013                                 target->coreid);
3014                 /*
3015                  * overwrite and simplify the checks.
3016                  * virtualization extensions require implementation of security extension
3017                  */
3018                 armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
3019         }
3020
3021         /* Avoid recreating the registers cache */
3022         if (!target_was_examined(target)) {
3023                 retval = cortex_a_dpm_setup(cortex_a, didr);
3024                 if (retval != ERROR_OK)
3025                         return retval;
3026         }
3027
3028         /* Setup Breakpoint Register Pairs */
3029         cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3030         cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3031         cortex_a->brp_num_available = cortex_a->brp_num;
3032         free(cortex_a->brp_list);
3033         cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3034 /*      cortex_a->brb_enabled = ????; */
3035         for (i = 0; i < cortex_a->brp_num; i++) {
3036                 cortex_a->brp_list[i].used = false;
3037                 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3038                         cortex_a->brp_list[i].type = BRP_NORMAL;
3039                 else
3040                         cortex_a->brp_list[i].type = BRP_CONTEXT;
3041                 cortex_a->brp_list[i].value = 0;
3042                 cortex_a->brp_list[i].control = 0;
3043                 cortex_a->brp_list[i].brpn = i;
3044         }
3045
3046         LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3047
3048         /* Setup Watchpoint Register Pairs */
3049         cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3050         cortex_a->wrp_num_available = cortex_a->wrp_num;
3051         free(cortex_a->wrp_list);
3052         cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3053         for (i = 0; i < cortex_a->wrp_num; i++) {
3054                 cortex_a->wrp_list[i].used = false;
3055                 cortex_a->wrp_list[i].value = 0;
3056                 cortex_a->wrp_list[i].control = 0;
3057                 cortex_a->wrp_list[i].wrpn = i;
3058         }
3059
3060         LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3061
3062         /* select debug_ap as default */
3063         swjdp->apsel = armv7a->debug_ap->ap_num;
3064
3065         target_set_examined(target);
3066         return ERROR_OK;
3067 }
3068
3069 static int cortex_a_examine(struct target *target)
3070 {
3071         int retval = ERROR_OK;
3072
3073         /* Reestablish communication after target reset */
3074         retval = cortex_a_examine_first(target);
3075
3076         /* Configure core debug access */
3077         if (retval == ERROR_OK)
3078                 retval = cortex_a_init_debug_access(target);
3079
3080         return retval;
3081 }
3082
3083 /*
3084  *      Cortex-A target creation and initialization
3085  */
3086
3087 static int cortex_a_init_target(struct command_context *cmd_ctx,
3088         struct target *target)
3089 {
3090         /* examine_first() does a bunch of this */
3091         arm_semihosting_init(target);
3092         return ERROR_OK;
3093 }
3094
3095 static int cortex_a_init_arch_info(struct target *target,
3096         struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3097 {
3098         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3099
3100         /* Setup struct cortex_a_common */
3101         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3102         armv7a->arm.dap = dap;
3103
3104         /* register arch-specific functions */
3105         armv7a->examine_debug_reason = NULL;
3106
3107         armv7a->post_debug_entry = cortex_a_post_debug_entry;
3108
3109         armv7a->pre_restore_context = NULL;
3110
3111         armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3112
3113
3114 /*      arm7_9->handle_target_request = cortex_a_handle_target_request; */
3115
3116         /* REVISIT v7a setup should be in a v7a-specific routine */
3117         armv7a_init_arch_info(target, armv7a);
3118         target_register_timer_callback(cortex_a_handle_target_request, 1,
3119                 TARGET_TIMER_TYPE_PERIODIC, target);
3120
3121         return ERROR_OK;
3122 }
3123
3124 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3125 {
3126         struct cortex_a_common *cortex_a;
3127         struct adiv5_private_config *pc;
3128
3129         if (!target->private_config)
3130                 return ERROR_FAIL;
3131
3132         pc = (struct adiv5_private_config *)target->private_config;
3133
3134         cortex_a = calloc(1, sizeof(struct cortex_a_common));
3135         if (!cortex_a) {
3136                 LOG_ERROR("Out of memory");
3137                 return ERROR_FAIL;
3138         }
3139         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3140         cortex_a->armv7a_common.is_armv7r = false;
3141         cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3142
3143         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3144 }
3145
3146 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3147 {
3148         struct cortex_a_common *cortex_a;
3149         struct adiv5_private_config *pc;
3150
3151         pc = (struct adiv5_private_config *)target->private_config;
3152         if (adiv5_verify_config(pc) != ERROR_OK)
3153                 return ERROR_FAIL;
3154
3155         cortex_a = calloc(1, sizeof(struct cortex_a_common));
3156         if (!cortex_a) {
3157                 LOG_ERROR("Out of memory");
3158                 return ERROR_FAIL;
3159         }
3160         cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3161         cortex_a->armv7a_common.is_armv7r = true;
3162
3163         return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3164 }
3165
3166 static void cortex_a_deinit_target(struct target *target)
3167 {
3168         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3169         struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3170         struct arm_dpm *dpm = &armv7a->dpm;
3171         uint32_t dscr;
3172         int retval;
3173
3174         if (target_was_examined(target)) {
3175                 /* Disable halt for breakpoint, watchpoint and vector catch */
3176                 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3177                                 armv7a->debug_base + CPUDBG_DSCR, &dscr);
3178                 if (retval == ERROR_OK)
3179                         mem_ap_write_atomic_u32(armv7a->debug_ap,
3180                                         armv7a->debug_base + CPUDBG_DSCR,
3181                                         dscr & ~DSCR_HALT_DBG_MODE);
3182         }
3183
3184         if (armv7a->debug_ap)
3185                 dap_put_ap(armv7a->debug_ap);
3186
3187         free(cortex_a->wrp_list);
3188         free(cortex_a->brp_list);
3189         arm_free_reg_cache(dpm->arm);
3190         free(dpm->dbp);
3191         free(dpm->dwp);
3192         free(target->private_config);
3193         free(cortex_a);
3194 }
3195
3196 static int cortex_a_mmu(struct target *target, int *enabled)
3197 {
3198         struct armv7a_common *armv7a = target_to_armv7a(target);
3199
3200         if (target->state != TARGET_HALTED) {
3201                 LOG_ERROR("%s: target not halted", __func__);
3202                 return ERROR_TARGET_INVALID;
3203         }
3204
3205         if (armv7a->is_armv7r)
3206                 *enabled = 0;
3207         else
3208                 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3209
3210         return ERROR_OK;
3211 }
3212
3213 static int cortex_a_virt2phys(struct target *target,
3214         target_addr_t virt, target_addr_t *phys)
3215 {
3216         int retval;
3217         int mmu_enabled = 0;
3218
3219         /*
3220          * If the MMU was not enabled at debug entry, there is no
3221          * way of knowing if there was ever a valid configuration
3222          * for it and thus it's not safe to enable it. In this case,
3223          * just return the virtual address as physical.
3224          */
3225         cortex_a_mmu(target, &mmu_enabled);
3226         if (!mmu_enabled) {
3227                 *phys = virt;
3228                 return ERROR_OK;
3229         }
3230
3231         /* mmu must be enable in order to get a correct translation */
3232         retval = cortex_a_mmu_modify(target, 1);
3233         if (retval != ERROR_OK)
3234                 return retval;
3235         return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3236                                                     phys, 1);
3237 }
3238
3239 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3240 {
3241         struct target *target = get_current_target(CMD_CTX);
3242         struct armv7a_common *armv7a = target_to_armv7a(target);
3243
3244         return armv7a_handle_cache_info_command(CMD,
3245                         &armv7a->armv7a_mmu.armv7a_cache);
3246 }
3247
3248
3249 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3250 {
3251         struct target *target = get_current_target(CMD_CTX);
3252         if (!target_was_examined(target)) {
3253                 LOG_ERROR("target not examined yet");
3254                 return ERROR_FAIL;
3255         }
3256
3257         return cortex_a_init_debug_access(target);
3258 }
3259
3260 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3261 {
3262         struct target *target = get_current_target(CMD_CTX);
3263         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3264
3265         static const struct jim_nvp nvp_maskisr_modes[] = {
3266                 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3267                 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3268                 { .name = NULL, .value = -1 },
3269         };
3270         const struct jim_nvp *n;
3271
3272         if (CMD_ARGC > 0) {
3273                 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3274                 if (!n->name) {
3275                         LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3276                         return ERROR_COMMAND_SYNTAX_ERROR;
3277                 }
3278
3279                 cortex_a->isrmasking_mode = n->value;
3280         }
3281
3282         n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3283         command_print(CMD, "cortex_a interrupt mask %s", n->name);
3284
3285         return ERROR_OK;
3286 }
3287
3288 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3289 {
3290         struct target *target = get_current_target(CMD_CTX);
3291         struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3292
3293         static const struct jim_nvp nvp_dacrfixup_modes[] = {
3294                 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3295                 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3296                 { .name = NULL, .value = -1 },
3297         };
3298         const struct jim_nvp *n;
3299
3300         if (CMD_ARGC > 0) {
3301                 n = jim_nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3302                 if (!n->name)
3303                         return ERROR_COMMAND_SYNTAX_ERROR;
3304                 cortex_a->dacrfixup_mode = n->value;
3305
3306         }
3307
3308         n = jim_nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3309         command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3310
3311         return ERROR_OK;
3312 }
3313
3314 static const struct command_registration cortex_a_exec_command_handlers[] = {
3315         {
3316                 .name = "cache_info",
3317                 .handler = cortex_a_handle_cache_info_command,
3318                 .mode = COMMAND_EXEC,
3319                 .help = "display information about target caches",
3320                 .usage = "",
3321         },
3322         {
3323                 .name = "dbginit",
3324                 .handler = cortex_a_handle_dbginit_command,
3325                 .mode = COMMAND_EXEC,
3326                 .help = "Initialize core debug",
3327                 .usage = "",
3328         },
3329         {
3330                 .name = "maskisr",
3331                 .handler = handle_cortex_a_mask_interrupts_command,
3332                 .mode = COMMAND_ANY,
3333                 .help = "mask cortex_a interrupts",
3334                 .usage = "['on'|'off']",
3335         },
3336         {
3337                 .name = "dacrfixup",
3338                 .handler = handle_cortex_a_dacrfixup_command,
3339                 .mode = COMMAND_ANY,
3340                 .help = "set domain access control (DACR) to all-manager "
3341                         "on memory access",
3342                 .usage = "['on'|'off']",
3343         },
3344         {
3345                 .chain = armv7a_mmu_command_handlers,
3346         },
3347         {
3348                 .chain = smp_command_handlers,
3349         },
3350
3351         COMMAND_REGISTRATION_DONE
3352 };
3353 static const struct command_registration cortex_a_command_handlers[] = {
3354         {
3355                 .chain = arm_command_handlers,
3356         },
3357         {
3358                 .chain = armv7a_command_handlers,
3359         },
3360         {
3361                 .name = "cortex_a",
3362                 .mode = COMMAND_ANY,
3363                 .help = "Cortex-A command group",
3364                 .usage = "",
3365                 .chain = cortex_a_exec_command_handlers,
3366         },
3367         COMMAND_REGISTRATION_DONE
3368 };
3369
3370 struct target_type cortexa_target = {
3371         .name = "cortex_a",
3372
3373         .poll = cortex_a_poll,
3374         .arch_state = armv7a_arch_state,
3375
3376         .halt = cortex_a_halt,
3377         .resume = cortex_a_resume,
3378         .step = cortex_a_step,
3379
3380         .assert_reset = cortex_a_assert_reset,
3381         .deassert_reset = cortex_a_deassert_reset,
3382
3383         /* REVISIT allow exporting VFP3 registers ... */
3384         .get_gdb_arch = arm_get_gdb_arch,
3385         .get_gdb_reg_list = arm_get_gdb_reg_list,
3386
3387         .read_memory = cortex_a_read_memory,
3388         .write_memory = cortex_a_write_memory,
3389
3390         .read_buffer = cortex_a_read_buffer,
3391         .write_buffer = cortex_a_write_buffer,
3392
3393         .checksum_memory = arm_checksum_memory,
3394         .blank_check_memory = arm_blank_check_memory,
3395
3396         .run_algorithm = armv4_5_run_algorithm,
3397
3398         .add_breakpoint = cortex_a_add_breakpoint,
3399         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3400         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3401         .remove_breakpoint = cortex_a_remove_breakpoint,
3402         .add_watchpoint = cortex_a_add_watchpoint,
3403         .remove_watchpoint = cortex_a_remove_watchpoint,
3404
3405         .commands = cortex_a_command_handlers,
3406         .target_create = cortex_a_target_create,
3407         .target_jim_configure = adiv5_jim_configure,
3408         .init_target = cortex_a_init_target,
3409         .examine = cortex_a_examine,
3410         .deinit_target = cortex_a_deinit_target,
3411
3412         .read_phys_memory = cortex_a_read_phys_memory,
3413         .write_phys_memory = cortex_a_write_phys_memory,
3414         .mmu = cortex_a_mmu,
3415         .virt2phys = cortex_a_virt2phys,
3416 };
3417
3418 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3419         {
3420                 .name = "dbginit",
3421                 .handler = cortex_a_handle_dbginit_command,
3422                 .mode = COMMAND_EXEC,
3423                 .help = "Initialize core debug",
3424                 .usage = "",
3425         },
3426         {
3427                 .name = "maskisr",
3428                 .handler = handle_cortex_a_mask_interrupts_command,
3429                 .mode = COMMAND_EXEC,
3430                 .help = "mask cortex_r4 interrupts",
3431                 .usage = "['on'|'off']",
3432         },
3433
3434         COMMAND_REGISTRATION_DONE
3435 };
3436 static const struct command_registration cortex_r4_command_handlers[] = {
3437         {
3438                 .chain = arm_command_handlers,
3439         },
3440         {
3441                 .name = "cortex_r4",
3442                 .mode = COMMAND_ANY,
3443                 .help = "Cortex-R4 command group",
3444                 .usage = "",
3445                 .chain = cortex_r4_exec_command_handlers,
3446         },
3447         COMMAND_REGISTRATION_DONE
3448 };
3449
3450 struct target_type cortexr4_target = {
3451         .name = "cortex_r4",
3452
3453         .poll = cortex_a_poll,
3454         .arch_state = armv7a_arch_state,
3455
3456         .halt = cortex_a_halt,
3457         .resume = cortex_a_resume,
3458         .step = cortex_a_step,
3459
3460         .assert_reset = cortex_a_assert_reset,
3461         .deassert_reset = cortex_a_deassert_reset,
3462
3463         /* REVISIT allow exporting VFP3 registers ... */
3464         .get_gdb_arch = arm_get_gdb_arch,
3465         .get_gdb_reg_list = arm_get_gdb_reg_list,
3466
3467         .read_memory = cortex_a_read_phys_memory,
3468         .write_memory = cortex_a_write_phys_memory,
3469
3470         .checksum_memory = arm_checksum_memory,
3471         .blank_check_memory = arm_blank_check_memory,
3472
3473         .run_algorithm = armv4_5_run_algorithm,
3474
3475         .add_breakpoint = cortex_a_add_breakpoint,
3476         .add_context_breakpoint = cortex_a_add_context_breakpoint,
3477         .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3478         .remove_breakpoint = cortex_a_remove_breakpoint,
3479         .add_watchpoint = cortex_a_add_watchpoint,
3480         .remove_watchpoint = cortex_a_remove_watchpoint,
3481
3482         .commands = cortex_r4_command_handlers,
3483         .target_create = cortex_r4_target_create,
3484         .target_jim_configure = adiv5_jim_configure,
3485         .init_target = cortex_a_init_target,
3486         .examine = cortex_a_examine,
3487         .deinit_target = cortex_a_deinit_target,
3488 };