aarch64: introduce dpm extension for ARMv8
[fw/openocd] / src / target / aarch64.c
1 /***************************************************************************
2  *   Copyright (C) 2015 by David Ung                                       *
3  *                                                                         *
4  *   This program is free software; you can redistribute it and/or modify  *
5  *   it under the terms of the GNU General Public License as published by  *
6  *   the Free Software Foundation; either version 2 of the License, or     *
7  *   (at your option) any later version.                                   *
8  *                                                                         *
9  *   This program is distributed in the hope that it will be useful,       *
10  *   but WITHOUT ANY WARRANTY; without even the implied warranty of        *
11  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         *
12  *   GNU General Public License for more details.                          *
13  *                                                                         *
14  *   You should have received a copy of the GNU General Public License     *
15  *   along with this program; if not, write to the                         *
16  *   Free Software Foundation, Inc.,                                       *
17  *                                                                         *
18  ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "arm_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36         struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38         struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40         struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42         struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45         target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ab_memory(struct target *target,
47         uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49         uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53         int retval = ERROR_OK;
54
55         struct aarch64_common *aarch64 = target_to_aarch64(target);
56         struct armv8_common *armv8 = target_to_armv8(target);
57
58         if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59                 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60                 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
61                                                      0xd5181000,
62                                                      aarch64->system_control_reg);
63         }
64
65         return retval;
66 }
67
68 /*  check address before aarch64_apb read write access with mmu on
69  *  remove apb predictible data abort */
70 static int aarch64_check_address(struct target *target, uint32_t address)
71 {
72         /* TODO */
73         return ERROR_OK;
74 }
75 /*  modify system_control_reg in order to enable or disable mmu for :
76  *  - virt2phys address conversion
77  *  - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target *target, int enable)
79 {
80         struct aarch64_common *aarch64 = target_to_aarch64(target);
81         struct armv8_common *armv8 = &aarch64->armv8_common;
82         int retval = ERROR_OK;
83
84         if (enable) {
85                 /*  if mmu enabled at target stop and mmu not enable */
86                 if (!(aarch64->system_control_reg & 0x1U)) {
87                         LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
88                         return ERROR_FAIL;
89                 }
90                 if (!(aarch64->system_control_reg_curr & 0x1U)) {
91                         aarch64->system_control_reg_curr |= 0x1U;
92                         retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
93                                                              0xd5181000,
94                                                              aarch64->system_control_reg_curr);
95                 }
96         } else {
97                 if (aarch64->system_control_reg_curr & 0x4U) {
98                         /*  data cache is active */
99                         aarch64->system_control_reg_curr &= ~0x4U;
100                         /* flush data cache armv7 function to be called */
101                         if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
102                                 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
103                 }
104                 if ((aarch64->system_control_reg_curr & 0x1U)) {
105                         aarch64->system_control_reg_curr &= ~0x1U;
106                         retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
107                                                              0xd5181000,
108                                                              aarch64->system_control_reg_curr);
109                 }
110         }
111         return retval;
112 }
113
114 /*
115  * Basic debug access, very low level assumes state is saved
116  */
117 static int aarch64_init_debug_access(struct target *target)
118 {
119         struct armv8_common *armv8 = target_to_armv8(target);
120         int retval;
121         uint32_t dummy;
122
123         LOG_DEBUG(" ");
124
125         /* Unlocking the debug registers for modification
126          * The debugport might be uninitialised so try twice */
127         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
128                              armv8->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
129         if (retval != ERROR_OK) {
130                 /* try again */
131                 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
132                              armv8->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
133                 if (retval == ERROR_OK)
134                         LOG_USER("Locking debug access failed on first, but succeeded on second try.");
135         }
136         if (retval != ERROR_OK)
137                 return retval;
138         /* Clear Sticky Power Down status Bit in PRSR to enable access to
139            the registers in the Core Power Domain */
140         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141                         armv8->debug_base + CPUDBG_PRSR, &dummy);
142         if (retval != ERROR_OK)
143                 return retval;
144
145         /* Enabling of instruction execution in debug mode is done in debug_entry code */
146
147         /* Resync breakpoint registers */
148
149         /* Since this is likely called from init or reset, update target state information*/
150         return aarch64_poll(target);
151 }
152
153 /* To reduce needless round-trips, pass in a pointer to the current
154  * DSCR value.  Initialize it to zero if you just need to know the
155  * value on return from this function; or DSCR_INSTR_COMP if you
156  * happen to know that no instruction is pending.
157  */
158 static int aarch64_exec_opcode(struct target *target,
159         uint32_t opcode, uint32_t *dscr_p)
160 {
161         uint32_t dscr;
162         int retval;
163         struct armv8_common *armv8 = target_to_armv8(target);
164         dscr = dscr_p ? *dscr_p : 0;
165
166         LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
167
168         /* Wait for InstrCompl bit to be set */
169         long long then = timeval_ms();
170         while ((dscr & DSCR_INSTR_COMP) == 0) {
171                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
172                                 armv8->debug_base + CPUDBG_DSCR, &dscr);
173                 if (retval != ERROR_OK) {
174                         LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
175                         return retval;
176                 }
177                 if (timeval_ms() > then + 1000) {
178                         LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
179                         return ERROR_FAIL;
180                 }
181         }
182
183         retval = mem_ap_write_u32(armv8->debug_ap,
184                         armv8->debug_base + CPUDBG_ITR, opcode);
185         if (retval != ERROR_OK)
186                 return retval;
187
188         then = timeval_ms();
189         do {
190                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
191                                 armv8->debug_base + CPUDBG_DSCR, &dscr);
192                 if (retval != ERROR_OK) {
193                         LOG_ERROR("Could not read DSCR register");
194                         return retval;
195                 }
196                 if (timeval_ms() > then + 1000) {
197                         LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
198                         return ERROR_FAIL;
199                 }
200         } while ((dscr & DSCR_INSTR_COMP) == 0);        /* Wait for InstrCompl bit to be set */
201
202         if (dscr_p)
203                 *dscr_p = dscr;
204
205         return retval;
206 }
207
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target *target,
210         uint32_t address,
211         uint32_t value)
212 {
213         int retval;
214         struct armv8_common *armv8 = target_to_armv8(target);
215
216         retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
217
218         return retval;
219 }
220
221 /*
222  * AARCH64 implementation of Debug Programmer's Model
223  *
224  * NOTE the invariant:  these routines return with DSCR_INSTR_COMP set,
225  * so there's no need to poll for it before executing an instruction.
226  *
227  * NOTE that in several of these cases the "stall" mode might be useful.
228  * It'd let us queue a few operations together... prepare/finish might
229  * be the places to enable/disable that mode.
230  */
231
232 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
233 {
234         return container_of(dpm, struct aarch64_common, armv8_common.dpm);
235 }
236
237 static int aarch64_write_dcc(struct aarch64_common *a8, uint32_t data)
238 {
239         LOG_DEBUG("write DCC 0x%08" PRIx32, data);
240         return mem_ap_write_u32(a8->armv8_common.debug_ap,
241                                 a8->armv8_common.debug_base + CPUDBG_DTRRX, data);
242 }
243
244 static int aarch64_write_dcc_64(struct aarch64_common *a8, uint64_t data)
245 {
246         int ret;
247         LOG_DEBUG("write DCC 0x%08" PRIx32, (unsigned)data);
248         LOG_DEBUG("write DCC 0x%08" PRIx32, (unsigned)(data >> 32));
249         ret = mem_ap_write_u32(a8->armv8_common.debug_ap,
250                                a8->armv8_common.debug_base + CPUDBG_DTRRX, data);
251         ret += mem_ap_write_u32(a8->armv8_common.debug_ap,
252                                 a8->armv8_common.debug_base + CPUDBG_DTRTX, data >> 32);
253         return ret;
254 }
255
256 static int aarch64_read_dcc(struct aarch64_common *a8, uint32_t *data,
257         uint32_t *dscr_p)
258 {
259         uint32_t dscr = DSCR_INSTR_COMP;
260         int retval;
261
262         if (dscr_p)
263                 dscr = *dscr_p;
264
265         /* Wait for DTRRXfull */
266         long long then = timeval_ms();
267         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
268                 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
269                                 a8->armv8_common.debug_base + CPUDBG_DSCR,
270                                 &dscr);
271                 if (retval != ERROR_OK)
272                         return retval;
273                 if (timeval_ms() > then + 1000) {
274                         LOG_ERROR("Timeout waiting for read dcc");
275                         return ERROR_FAIL;
276                 }
277         }
278
279         retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
280                                             a8->armv8_common.debug_base + CPUDBG_DTRTX,
281                                             data);
282         if (retval != ERROR_OK)
283                 return retval;
284         LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
285
286         if (dscr_p)
287                 *dscr_p = dscr;
288
289         return retval;
290 }
291 static int aarch64_read_dcc_64(struct aarch64_common *a8, uint64_t *data,
292         uint32_t *dscr_p)
293 {
294         uint32_t dscr = DSCR_INSTR_COMP;
295         uint32_t higher;
296         int retval;
297
298         if (dscr_p)
299                 dscr = *dscr_p;
300
301         /* Wait for DTRRXfull */
302         long long then = timeval_ms();
303         while ((dscr & DSCR_DTR_TX_FULL) == 0) {
304                 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
305                                 a8->armv8_common.debug_base + CPUDBG_DSCR,
306                                 &dscr);
307                 if (retval != ERROR_OK)
308                         return retval;
309                 if (timeval_ms() > then + 1000) {
310                         LOG_ERROR("Timeout waiting for read dcc");
311                         return ERROR_FAIL;
312                 }
313         }
314
315         retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
316                                             a8->armv8_common.debug_base + CPUDBG_DTRTX,
317                                             (uint32_t *)data);
318         if (retval != ERROR_OK)
319                 return retval;
320
321         retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
322                                             a8->armv8_common.debug_base + CPUDBG_DTRRX,
323                                             &higher);
324         if (retval != ERROR_OK)
325                 return retval;
326
327         *data = *(uint32_t *)data | (uint64_t)higher << 32;
328         LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
329
330         if (dscr_p)
331                 *dscr_p = dscr;
332
333         return retval;
334 }
335
336 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
337 {
338         struct aarch64_common *a8 = dpm_to_a8(dpm);
339         uint32_t dscr;
340         int retval;
341
342         /* set up invariant:  INSTR_COMP is set after ever DPM operation */
343         long long then = timeval_ms();
344         for (;; ) {
345                 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
346                                 a8->armv8_common.debug_base + CPUDBG_DSCR,
347                                 &dscr);
348                 if (retval != ERROR_OK)
349                         return retval;
350                 if ((dscr & DSCR_INSTR_COMP) != 0)
351                         break;
352                 if (timeval_ms() > then + 1000) {
353                         LOG_ERROR("Timeout waiting for dpm prepare");
354                         return ERROR_FAIL;
355                 }
356         }
357
358         /* this "should never happen" ... */
359         if (dscr & DSCR_DTR_RX_FULL) {
360                 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
361                 /* Clear DCCRX */
362                 retval = aarch64_exec_opcode(
363                                 a8->armv8_common.arm.target,
364                                 0xd5130400,
365                                 &dscr);
366                 if (retval != ERROR_OK)
367                         return retval;
368         }
369
370         return retval;
371 }
372
373 static int aarch64_dpm_finish(struct arm_dpm *dpm)
374 {
375         /* REVISIT what could be done here? */
376         return ERROR_OK;
377 }
378
379 static int aarch64_instr_execute(struct arm_dpm *dpm,
380         uint32_t opcode)
381 {
382         struct aarch64_common *a8 = dpm_to_a8(dpm);
383         uint32_t dscr = DSCR_ITE;
384
385         return aarch64_exec_opcode(
386                         a8->armv8_common.arm.target,
387                         opcode,
388                         &dscr);
389 }
390
391 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
392         uint32_t opcode, uint32_t data)
393 {
394         struct aarch64_common *a8 = dpm_to_a8(dpm);
395         int retval;
396         uint32_t dscr = DSCR_INSTR_COMP;
397
398         retval = aarch64_write_dcc(a8, data);
399         if (retval != ERROR_OK)
400                 return retval;
401
402         return aarch64_exec_opcode(
403                         a8->armv8_common.arm.target,
404                         opcode,
405                         &dscr);
406 }
407
408 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
409         uint32_t opcode, uint64_t data)
410 {
411         struct aarch64_common *a8 = dpm_to_a8(dpm);
412         int retval;
413         uint32_t dscr = DSCR_INSTR_COMP;
414
415         retval = aarch64_write_dcc_64(a8, data);
416         if (retval != ERROR_OK)
417                 return retval;
418
419         return aarch64_exec_opcode(
420                         a8->armv8_common.arm.target,
421                         opcode,
422                         &dscr);
423 }
424
425 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
426         uint32_t opcode, uint32_t data)
427 {
428         struct aarch64_common *a8 = dpm_to_a8(dpm);
429         uint32_t dscr = DSCR_INSTR_COMP;
430         int retval;
431
432         retval = aarch64_write_dcc(a8, data);
433         if (retval != ERROR_OK)
434                 return retval;
435
436         retval = aarch64_exec_opcode(
437                         a8->armv8_common.arm.target,
438                         0xd5330500,
439                         &dscr);
440         if (retval != ERROR_OK)
441                 return retval;
442
443         /* then the opcode, taking data from R0 */
444         retval = aarch64_exec_opcode(
445                         a8->armv8_common.arm.target,
446                         opcode,
447                         &dscr);
448
449         return retval;
450 }
451
452 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
453         uint32_t opcode, uint64_t data)
454 {
455         struct aarch64_common *a8 = dpm_to_a8(dpm);
456         uint32_t dscr = DSCR_INSTR_COMP;
457         int retval;
458
459         retval = aarch64_write_dcc_64(a8, data);
460         if (retval != ERROR_OK)
461                 return retval;
462
463         retval = aarch64_exec_opcode(
464                         a8->armv8_common.arm.target,
465                         0xd5330400,
466                         &dscr);
467         if (retval != ERROR_OK)
468                 return retval;
469
470         /* then the opcode, taking data from R0 */
471         retval = aarch64_exec_opcode(
472                         a8->armv8_common.arm.target,
473                         opcode,
474                         &dscr);
475
476         return retval;
477 }
478
479 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
480 {
481         struct target *target = dpm->arm->target;
482         uint32_t dscr = DSCR_INSTR_COMP;
483
484         /* "Prefetch flush" after modifying execution status in CPSR */
485         return aarch64_exec_opcode(target,
486                         ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
487                         &dscr);
488 }
489
490 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
491         uint32_t opcode, uint32_t *data)
492 {
493         struct aarch64_common *a8 = dpm_to_a8(dpm);
494         int retval;
495         uint32_t dscr = DSCR_INSTR_COMP;
496
497         /* the opcode, writing data to DCC */
498         retval = aarch64_exec_opcode(
499                         a8->armv8_common.arm.target,
500                         opcode,
501                         &dscr);
502         if (retval != ERROR_OK)
503                 return retval;
504
505         return aarch64_read_dcc(a8, data, &dscr);
506 }
507
508 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
509         uint32_t opcode, uint64_t *data)
510 {
511         struct aarch64_common *a8 = dpm_to_a8(dpm);
512         int retval;
513         uint32_t dscr = DSCR_INSTR_COMP;
514
515         /* the opcode, writing data to DCC */
516         retval = aarch64_exec_opcode(
517                         a8->armv8_common.arm.target,
518                         opcode,
519                         &dscr);
520         if (retval != ERROR_OK)
521                 return retval;
522
523         return aarch64_read_dcc_64(a8, data, &dscr);
524 }
525
526 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
527         uint32_t opcode, uint32_t *data)
528 {
529         struct aarch64_common *a8 = dpm_to_a8(dpm);
530         uint32_t dscr = DSCR_INSTR_COMP;
531         int retval;
532
533         /* the opcode, writing data to R0 */
534         retval = aarch64_exec_opcode(
535                         a8->armv8_common.arm.target,
536                         opcode,
537                         &dscr);
538         if (retval != ERROR_OK)
539                 return retval;
540
541         /* write R0 to DCC */
542         retval = aarch64_exec_opcode(
543                         a8->armv8_common.arm.target,
544                         0xd5130400,  /* msr dbgdtr_el0, x0 */
545                         &dscr);
546         if (retval != ERROR_OK)
547                 return retval;
548
549         return aarch64_read_dcc(a8, data, &dscr);
550 }
551
552 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
553         uint32_t opcode, uint64_t *data)
554 {
555         struct aarch64_common *a8 = dpm_to_a8(dpm);
556         uint32_t dscr = DSCR_INSTR_COMP;
557         int retval;
558
559         /* the opcode, writing data to R0 */
560         retval = aarch64_exec_opcode(
561                         a8->armv8_common.arm.target,
562                         opcode,
563                         &dscr);
564         if (retval != ERROR_OK)
565                 return retval;
566
567         /* write R0 to DCC */
568         retval = aarch64_exec_opcode(
569                         a8->armv8_common.arm.target,
570                         0xd5130400,  /* msr dbgdtr_el0, x0 */
571                         &dscr);
572         if (retval != ERROR_OK)
573                 return retval;
574
575         return aarch64_read_dcc_64(a8, data, &dscr);
576 }
577
578 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
579         uint32_t addr, uint32_t control)
580 {
581         struct aarch64_common *a8 = dpm_to_a8(dpm);
582         uint32_t vr = a8->armv8_common.debug_base;
583         uint32_t cr = a8->armv8_common.debug_base;
584         int retval;
585
586         switch (index_t) {
587                 case 0 ... 15:  /* breakpoints */
588                         vr += CPUDBG_BVR_BASE;
589                         cr += CPUDBG_BCR_BASE;
590                         break;
591                 case 16 ... 31: /* watchpoints */
592                         vr += CPUDBG_WVR_BASE;
593                         cr += CPUDBG_WCR_BASE;
594                         index_t -= 16;
595                         break;
596                 default:
597                         return ERROR_FAIL;
598         }
599         vr += 4 * index_t;
600         cr += 4 * index_t;
601
602         LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
603                 (unsigned) vr, (unsigned) cr);
604
605         retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
606                         vr, addr);
607         if (retval != ERROR_OK)
608                 return retval;
609         retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
610                         cr, control);
611         return retval;
612 }
613
614 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
615 {
616         return ERROR_OK;
617
618 #if 0
619         struct aarch64_common *a8 = dpm_to_a8(dpm);
620         uint32_t cr;
621
622         switch (index_t) {
623                 case 0 ... 15:
624                         cr = a8->armv8_common.debug_base + CPUDBG_BCR_BASE;
625                         break;
626                 case 16 ... 31:
627                         cr = a8->armv8_common.debug_base + CPUDBG_WCR_BASE;
628                         index_t -= 16;
629                         break;
630                 default:
631                         return ERROR_FAIL;
632         }
633         cr += 4 * index_t;
634
635         LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
636
637         /* clear control register */
638         return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
639 #endif
640 }
641
642 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
643 {
644         struct arm_dpm *dpm = &a8->armv8_common.dpm;
645         int retval;
646
647         dpm->arm = &a8->armv8_common.arm;
648         dpm->didr = debug;
649
650         dpm->prepare = aarch64_dpm_prepare;
651         dpm->finish = aarch64_dpm_finish;
652
653         dpm->instr_execute = aarch64_instr_execute;
654         dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
655         dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
656         dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
657         dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
658         dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
659
660         dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
661         dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
662         dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
663         dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
664
665         dpm->arm_reg_current = armv8_reg_current;
666
667         dpm->bpwp_enable = aarch64_bpwp_enable;
668         dpm->bpwp_disable = aarch64_bpwp_disable;
669
670         retval = armv8_dpm_setup(dpm);
671         if (retval == ERROR_OK)
672                 retval = armv8_dpm_initialize(dpm);
673
674         return retval;
675 }
676 static struct target *get_aarch64(struct target *target, int32_t coreid)
677 {
678         struct target_list *head;
679         struct target *curr;
680
681         head = target->head;
682         while (head != (struct target_list *)NULL) {
683                 curr = head->target;
684                 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
685                         return curr;
686                 head = head->next;
687         }
688         return target;
689 }
690 static int aarch64_halt(struct target *target);
691
692 static int aarch64_halt_smp(struct target *target)
693 {
694         int retval = 0;
695         struct target_list *head;
696         struct target *curr;
697         head = target->head;
698         while (head != (struct target_list *)NULL) {
699                 curr = head->target;
700                 if ((curr != target) && (curr->state != TARGET_HALTED))
701                         retval += aarch64_halt(curr);
702                 head = head->next;
703         }
704         return retval;
705 }
706
707 static int update_halt_gdb(struct target *target)
708 {
709         int retval = 0;
710         if (target->gdb_service && target->gdb_service->core[0] == -1) {
711                 target->gdb_service->target = target;
712                 target->gdb_service->core[0] = target->coreid;
713                 retval += aarch64_halt_smp(target);
714         }
715         return retval;
716 }
717
718 /*
719  * Cortex-A8 Run control
720  */
721
722 static int aarch64_poll(struct target *target)
723 {
724         int retval = ERROR_OK;
725         uint32_t dscr;
726         struct aarch64_common *aarch64 = target_to_aarch64(target);
727         struct armv8_common *armv8 = &aarch64->armv8_common;
728         enum target_state prev_target_state = target->state;
729         /*  toggle to another core is done by gdb as follow */
730         /*  maint packet J core_id */
731         /*  continue */
732         /*  the next polling trigger an halt event sent to gdb */
733         if ((target->state == TARGET_HALTED) && (target->smp) &&
734                 (target->gdb_service) &&
735                 (target->gdb_service->target == NULL)) {
736                 target->gdb_service->target =
737                         get_aarch64(target, target->gdb_service->core[1]);
738                 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
739                 return retval;
740         }
741         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
742                         armv8->debug_base + CPUDBG_DSCR, &dscr);
743         if (retval != ERROR_OK)
744                 return retval;
745         aarch64->cpudbg_dscr = dscr;
746
747         if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
748                 if (prev_target_state != TARGET_HALTED) {
749                         /* We have a halting debug event */
750                         LOG_DEBUG("Target halted");
751                         target->state = TARGET_HALTED;
752                         if ((prev_target_state == TARGET_RUNNING)
753                                 || (prev_target_state == TARGET_UNKNOWN)
754                                 || (prev_target_state == TARGET_RESET)) {
755                                 retval = aarch64_debug_entry(target);
756                                 if (retval != ERROR_OK)
757                                         return retval;
758                                 if (target->smp) {
759                                         retval = update_halt_gdb(target);
760                                         if (retval != ERROR_OK)
761                                                 return retval;
762                                 }
763                                 target_call_event_callbacks(target,
764                                         TARGET_EVENT_HALTED);
765                         }
766                         if (prev_target_state == TARGET_DEBUG_RUNNING) {
767                                 LOG_DEBUG(" ");
768
769                                 retval = aarch64_debug_entry(target);
770                                 if (retval != ERROR_OK)
771                                         return retval;
772                                 if (target->smp) {
773                                         retval = update_halt_gdb(target);
774                                         if (retval != ERROR_OK)
775                                                 return retval;
776                                 }
777
778                                 target_call_event_callbacks(target,
779                                         TARGET_EVENT_DEBUG_HALTED);
780                         }
781                 }
782         } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
783                 target->state = TARGET_RUNNING;
784         else {
785                 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
786                 target->state = TARGET_UNKNOWN;
787         }
788
789         return retval;
790 }
791
792 static int aarch64_halt(struct target *target)
793 {
794         int retval = ERROR_OK;
795         uint32_t dscr;
796         struct armv8_common *armv8 = target_to_armv8(target);
797
798         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
799                         armv8->debug_base + 0x10000 + 0, &dscr);
800         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
801                         armv8->debug_base + 0x10000 + 0, 1);
802         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
803                         armv8->debug_base + 0x10000 + 0, &dscr);
804
805         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
806                         armv8->debug_base + 0x10000 + 0x140, &dscr);
807         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
808                         armv8->debug_base + 0x10000 + 0x140, 6);
809         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
810                         armv8->debug_base + 0x10000 + 0x140, &dscr);
811
812         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
813                         armv8->debug_base + 0x10000 + 0xa0, &dscr);
814         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
815                         armv8->debug_base + 0x10000 + 0xa0, 5);
816         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
817                         armv8->debug_base + 0x10000 + 0xa0, &dscr);
818
819         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
820                         armv8->debug_base + 0x10000 + 0xa4, &dscr);
821         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
822                         armv8->debug_base + 0x10000 + 0xa4, 2);
823         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
824                         armv8->debug_base + 0x10000 + 0xa4, &dscr);
825
826         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
827                         armv8->debug_base + 0x10000 + 0x20, &dscr);
828         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
829                         armv8->debug_base + 0x10000 + 0x20, 4);
830         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
831                         armv8->debug_base + 0x10000 + 0x20, &dscr);
832
833         /*
834          * enter halting debug mode
835          */
836         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
837                         armv8->debug_base + CPUDBG_DSCR, &dscr);
838         if (retval != ERROR_OK)
839                 return retval;
840
841 #       /* STATUS */
842         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
843                         armv8->debug_base + 0x10000 + 0x134, &dscr);
844
845         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
846                         armv8->debug_base + 0x10000 + 0x1c, &dscr);
847         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
848                         armv8->debug_base + 0x10000 + 0x1c, 1);
849         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
850                         armv8->debug_base + 0x10000 + 0x1c, &dscr);
851
852
853         long long then = timeval_ms();
854         for (;; ) {
855                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
856                                 armv8->debug_base + CPUDBG_DSCR, &dscr);
857                 if (retval != ERROR_OK)
858                         return retval;
859                 if ((dscr & DSCR_CORE_HALTED) != 0)
860                         break;
861                 if (timeval_ms() > then + 1000) {
862                         LOG_ERROR("Timeout waiting for halt");
863                         return ERROR_FAIL;
864                 }
865         }
866
867         target->debug_reason = DBG_REASON_DBGRQ;
868
869         return ERROR_OK;
870 }
871
872 static int aarch64_internal_restore(struct target *target, int current,
873         uint64_t *address, int handle_breakpoints, int debug_execution)
874 {
875         struct armv8_common *armv8 = target_to_armv8(target);
876         struct arm *arm = &armv8->arm;
877         int retval;
878         uint64_t resume_pc;
879
880         if (!debug_execution)
881                 target_free_all_working_areas(target);
882
883         /* current = 1: continue on current pc, otherwise continue at <address> */
884         resume_pc = buf_get_u64(arm->pc->value, 0, 64);
885         if (!current)
886                 resume_pc = *address;
887         else
888                 *address = resume_pc;
889
890         /* Make sure that the Armv7 gdb thumb fixups does not
891          * kill the return address
892          */
893         switch (arm->core_state) {
894                 case ARM_STATE_ARM:
895                         resume_pc &= 0xFFFFFFFC;
896                         break;
897                 case ARM_STATE_AARCH64:
898                         resume_pc &= 0xFFFFFFFFFFFFFFFC;
899                         break;
900                 case ARM_STATE_THUMB:
901                 case ARM_STATE_THUMB_EE:
902                         /* When the return address is loaded into PC
903                          * bit 0 must be 1 to stay in Thumb state
904                          */
905                         resume_pc |= 0x1;
906                         break;
907                 case ARM_STATE_JAZELLE:
908                         LOG_ERROR("How do I resume into Jazelle state??");
909                         return ERROR_FAIL;
910         }
911         LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
912         buf_set_u64(arm->pc->value, 0, 64, resume_pc);
913         arm->pc->dirty = 1;
914         arm->pc->valid = 1;
915         dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
916
917         /* called it now before restoring context because it uses cpu
918          * register r0 for restoring system control register */
919         retval = aarch64_restore_system_control_reg(target);
920         if (retval != ERROR_OK)
921                 return retval;
922         retval = aarch64_restore_context(target, handle_breakpoints);
923         if (retval != ERROR_OK)
924                 return retval;
925         target->debug_reason = DBG_REASON_NOTHALTED;
926         target->state = TARGET_RUNNING;
927
928         /* registers are now invalid */
929         register_cache_invalidate(arm->core_cache);
930
931 #if 0
932         /* the front-end may request us not to handle breakpoints */
933         if (handle_breakpoints) {
934                 /* Single step past breakpoint at current address */
935                 breakpoint = breakpoint_find(target, resume_pc);
936                 if (breakpoint) {
937                         LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
938                         cortex_m3_unset_breakpoint(target, breakpoint);
939                         cortex_m3_single_step_core(target);
940                         cortex_m3_set_breakpoint(target, breakpoint);
941                 }
942         }
943 #endif
944
945         return retval;
946 }
947
948 static int aarch64_internal_restart(struct target *target)
949 {
950         struct armv8_common *armv8 = target_to_armv8(target);
951         struct arm *arm = &armv8->arm;
952         int retval;
953         uint32_t dscr;
954         /*
955          * * Restart core and wait for it to be started.  Clear ITRen and sticky
956          * * exception flags: see ARMv7 ARM, C5.9.
957          *
958          * REVISIT: for single stepping, we probably want to
959          * disable IRQs by default, with optional override...
960          */
961
962         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
963                         armv8->debug_base + CPUDBG_DSCR, &dscr);
964         if (retval != ERROR_OK)
965                 return retval;
966
967         if ((dscr & DSCR_INSTR_COMP) == 0)
968                 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
969
970         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
971                         armv8->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
972         if (retval != ERROR_OK)
973                 return retval;
974
975         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
976                         armv8->debug_base + CPUDBG_DRCR, DRCR_RESTART |
977                         DRCR_CLEAR_EXCEPTIONS);
978         if (retval != ERROR_OK)
979                 return retval;
980
981         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
982                         armv8->debug_base + 0x10000 + 0x10, 1);
983         if (retval != ERROR_OK)
984                 return retval;
985
986         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
987                         armv8->debug_base + 0x10000 + 0x1c, 2);
988         if (retval != ERROR_OK)
989                 return retval;
990
991         long long then = timeval_ms();
992         for (;; ) {
993                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
994                                 armv8->debug_base + CPUDBG_DSCR, &dscr);
995                 if (retval != ERROR_OK)
996                         return retval;
997                 if ((dscr & DSCR_CORE_RESTARTED) != 0)
998                         break;
999                 if (timeval_ms() > then + 1000) {
1000                         LOG_ERROR("Timeout waiting for resume");
1001                         return ERROR_FAIL;
1002                 }
1003         }
1004
1005         target->debug_reason = DBG_REASON_NOTHALTED;
1006         target->state = TARGET_RUNNING;
1007
1008         /* registers are now invalid */
1009         register_cache_invalidate(arm->core_cache);
1010
1011         return ERROR_OK;
1012 }
1013
1014 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1015 {
1016         int retval = 0;
1017         struct target_list *head;
1018         struct target *curr;
1019         uint64_t address;
1020         head = target->head;
1021         while (head != (struct target_list *)NULL) {
1022                 curr = head->target;
1023                 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1024                         /*  resume current address , not in step mode */
1025                         retval += aarch64_internal_restore(curr, 1, &address,
1026                                         handle_breakpoints, 0);
1027                         retval += aarch64_internal_restart(curr);
1028                 }
1029                 head = head->next;
1030
1031         }
1032         return retval;
1033 }
1034
1035 static int aarch64_resume(struct target *target, int current,
1036         target_addr_t address, int handle_breakpoints, int debug_execution)
1037 {
1038         int retval = 0;
1039         uint64_t addr = address;
1040
1041         /* dummy resume for smp toggle in order to reduce gdb impact  */
1042         if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1043                 /*   simulate a start and halt of target */
1044                 target->gdb_service->target = NULL;
1045                 target->gdb_service->core[0] = target->gdb_service->core[1];
1046                 /*  fake resume at next poll we play the  target core[1], see poll*/
1047                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1048                 return 0;
1049         }
1050         aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1051                                  debug_execution);
1052         if (target->smp) {
1053                 target->gdb_service->core[0] = -1;
1054                 retval = aarch64_restore_smp(target, handle_breakpoints);
1055                 if (retval != ERROR_OK)
1056                         return retval;
1057         }
1058         aarch64_internal_restart(target);
1059
1060         if (!debug_execution) {
1061                 target->state = TARGET_RUNNING;
1062                 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1063                 LOG_DEBUG("target resumed at 0x%" PRIu64, addr);
1064         } else {
1065                 target->state = TARGET_DEBUG_RUNNING;
1066                 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1067                 LOG_DEBUG("target debug resumed at 0x%" PRIu64, addr);
1068         }
1069
1070         return ERROR_OK;
1071 }
1072
1073 static int aarch64_debug_entry(struct target *target)
1074 {
1075         uint32_t dscr;
1076         int retval = ERROR_OK;
1077         struct aarch64_common *aarch64 = target_to_aarch64(target);
1078         struct armv8_common *armv8 = target_to_armv8(target);
1079         uint32_t tmp;
1080
1081         LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1082
1083         /* REVISIT surely we should not re-read DSCR !! */
1084         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1085                         armv8->debug_base + CPUDBG_DSCR, &dscr);
1086         if (retval != ERROR_OK)
1087                 return retval;
1088
1089         /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1090          * imprecise data aborts get discarded by issuing a Data
1091          * Synchronization Barrier:  ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1092          */
1093
1094         /* Enable the ITR execution once we are in debug mode */
1095         dscr |= DSCR_ITR_EN;
1096         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1097                         armv8->debug_base + CPUDBG_DSCR, dscr);
1098         if (retval != ERROR_OK)
1099                 return retval;
1100
1101         /* Examine debug reason */
1102         arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1103         mem_ap_read_atomic_u32(armv8->debug_ap,
1104                                    armv8->debug_base + CPUDBG_DESR, &tmp);
1105         if ((tmp & 0x7) == 0x4)
1106                 target->debug_reason = DBG_REASON_SINGLESTEP;
1107
1108         /* save address of instruction that triggered the watchpoint? */
1109         if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1110                 uint32_t wfar;
1111
1112                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1113                                 armv8->debug_base + CPUDBG_WFAR,
1114                                 &wfar);
1115                 if (retval != ERROR_OK)
1116                         return retval;
1117                 arm_dpm_report_wfar(&armv8->dpm, wfar);
1118         }
1119
1120         retval = armv8_dpm_read_current_registers(&armv8->dpm);
1121
1122         if (armv8->post_debug_entry) {
1123                 retval = armv8->post_debug_entry(target);
1124                 if (retval != ERROR_OK)
1125                         return retval;
1126         }
1127
1128         return retval;
1129 }
1130
1131 static int aarch64_post_debug_entry(struct target *target)
1132 {
1133         struct aarch64_common *aarch64 = target_to_aarch64(target);
1134         struct armv8_common *armv8 = &aarch64->armv8_common;
1135         struct armv8_mmu_common *armv8_mmu = &armv8->armv8_mmu;
1136         uint32_t sctlr_el1 = 0;
1137         int retval;
1138
1139         mem_ap_write_atomic_u32(armv8->debug_ap,
1140                                 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1141         retval = aarch64_instr_read_data_r0(armv8->arm.dpm,
1142                                             0xd5381000, &sctlr_el1);
1143         if (retval != ERROR_OK)
1144                 return retval;
1145
1146         LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1);
1147         aarch64->system_control_reg = sctlr_el1;
1148         aarch64->system_control_reg_curr = sctlr_el1;
1149         aarch64->curr_mode = armv8->arm.core_mode;
1150
1151         armv8_mmu->mmu_enabled = sctlr_el1 & 0x1U ? 1 : 0;
1152         armv8_mmu->armv8_cache.d_u_cache_enabled = sctlr_el1 & 0x4U ? 1 : 0;
1153         armv8_mmu->armv8_cache.i_cache_enabled = sctlr_el1 & 0x1000U ? 1 : 0;
1154
1155 #if 0
1156         if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1157                 armv8_identify_cache(target);
1158 #endif
1159
1160         return ERROR_OK;
1161 }
1162
1163 static int aarch64_step(struct target *target, int current, target_addr_t address,
1164         int handle_breakpoints)
1165 {
1166         struct armv8_common *armv8 = target_to_armv8(target);
1167         int retval;
1168         uint32_t tmp;
1169
1170         if (target->state != TARGET_HALTED) {
1171                 LOG_WARNING("target not halted");
1172                 return ERROR_TARGET_NOT_HALTED;
1173         }
1174
1175         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1176                         armv8->debug_base + CPUDBG_DECR, &tmp);
1177         if (retval != ERROR_OK)
1178                 return retval;
1179
1180         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1181                         armv8->debug_base + CPUDBG_DECR, (tmp|0x4));
1182         if (retval != ERROR_OK)
1183                 return retval;
1184
1185         target->debug_reason = DBG_REASON_SINGLESTEP;
1186         retval = aarch64_resume(target, 1, address, 0, 0);
1187         if (retval != ERROR_OK)
1188                 return retval;
1189
1190         long long then = timeval_ms();
1191         while (target->state != TARGET_HALTED) {
1192                 mem_ap_read_atomic_u32(armv8->debug_ap,
1193                         armv8->debug_base + CPUDBG_DESR, &tmp);
1194                 LOG_DEBUG("DESR = %#x", tmp);
1195                 retval = aarch64_poll(target);
1196                 if (retval != ERROR_OK)
1197                         return retval;
1198                 if (timeval_ms() > then + 1000) {
1199                         LOG_ERROR("timeout waiting for target halt");
1200                         return ERROR_FAIL;
1201                 }
1202         }
1203
1204         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1205                         armv8->debug_base + CPUDBG_DECR, (tmp&(~0x4)));
1206         if (retval != ERROR_OK)
1207                 return retval;
1208
1209         target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1210         if (target->state == TARGET_HALTED)
1211                 LOG_DEBUG("target stepped");
1212
1213         return ERROR_OK;
1214 }
1215
1216 static int aarch64_restore_context(struct target *target, bool bpwp)
1217 {
1218         struct armv8_common *armv8 = target_to_armv8(target);
1219
1220         LOG_DEBUG(" ");
1221
1222         if (armv8->pre_restore_context)
1223                 armv8->pre_restore_context(target);
1224
1225         return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1226
1227 }
1228
1229 /*
1230  * Cortex-A8 Breakpoint and watchpoint functions
1231  */
1232
1233 /* Setup hardware Breakpoint Register Pair */
1234 static int aarch64_set_breakpoint(struct target *target,
1235         struct breakpoint *breakpoint, uint8_t matchmode)
1236 {
1237         int retval;
1238         int brp_i = 0;
1239         uint32_t control;
1240         uint8_t byte_addr_select = 0x0F;
1241         struct aarch64_common *aarch64 = target_to_aarch64(target);
1242         struct armv8_common *armv8 = &aarch64->armv8_common;
1243         struct aarch64_brp *brp_list = aarch64->brp_list;
1244         uint32_t dscr;
1245
1246         if (breakpoint->set) {
1247                 LOG_WARNING("breakpoint already set");
1248                 return ERROR_OK;
1249         }
1250
1251         if (breakpoint->type == BKPT_HARD) {
1252                 int64_t bpt_value;
1253                 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1254                         brp_i++;
1255                 if (brp_i >= aarch64->brp_num) {
1256                         LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1257                         return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1258                 }
1259                 breakpoint->set = brp_i + 1;
1260                 if (breakpoint->length == 2)
1261                         byte_addr_select = (3 << (breakpoint->address & 0x02));
1262                 control = ((matchmode & 0x7) << 20)
1263                         | (1 << 13)
1264                         | (byte_addr_select << 5)
1265                         | (3 << 1) | 1;
1266                 brp_list[brp_i].used = 1;
1267                 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1268                 brp_list[brp_i].control = control;
1269                 bpt_value = brp_list[brp_i].value;
1270
1271                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1272                                 + CPUDBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1273                                 (uint32_t)(bpt_value & 0xFFFFFFFF));
1274                 if (retval != ERROR_OK)
1275                         return retval;
1276                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1277                                 + CPUDBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1278                                 (uint32_t)(bpt_value >> 32));
1279                 if (retval != ERROR_OK)
1280                         return retval;
1281
1282                 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1283                                 + CPUDBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1284                                 brp_list[brp_i].control);
1285                 if (retval != ERROR_OK)
1286                         return retval;
1287                 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1288                         brp_list[brp_i].control,
1289                         brp_list[brp_i].value);
1290
1291         } else if (breakpoint->type == BKPT_SOFT) {
1292                 uint8_t code[4];
1293                 buf_set_u32(code, 0, 32, 0xD4400000);
1294
1295                 retval = target_read_memory(target,
1296                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1297                                 breakpoint->length, 1,
1298                                 breakpoint->orig_instr);
1299                 if (retval != ERROR_OK)
1300                         return retval;
1301                 retval = target_write_memory(target,
1302                                 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1303                                 breakpoint->length, 1, code);
1304                 if (retval != ERROR_OK)
1305                         return retval;
1306                 breakpoint->set = 0x11; /* Any nice value but 0 */
1307         }
1308
1309         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1310                                         armv8->debug_base + CPUDBG_DSCR, &dscr);
1311         /* Ensure that halting debug mode is enable */
1312         dscr = dscr | DSCR_HALT_DBG_MODE;
1313         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1314                                          armv8->debug_base + CPUDBG_DSCR, dscr);
1315         if (retval != ERROR_OK) {
1316                 LOG_DEBUG("Failed to set DSCR.HDE");
1317                 return retval;
1318         }
1319
1320         return ERROR_OK;
1321 }
1322
1323 static int aarch64_set_context_breakpoint(struct target *target,
1324         struct breakpoint *breakpoint, uint8_t matchmode)
1325 {
1326         int retval = ERROR_FAIL;
1327         int brp_i = 0;
1328         uint32_t control;
1329         uint8_t byte_addr_select = 0x0F;
1330         struct aarch64_common *aarch64 = target_to_aarch64(target);
1331         struct armv8_common *armv8 = &aarch64->armv8_common;
1332         struct aarch64_brp *brp_list = aarch64->brp_list;
1333
1334         if (breakpoint->set) {
1335                 LOG_WARNING("breakpoint already set");
1336                 return retval;
1337         }
1338         /*check available context BRPs*/
1339         while ((brp_list[brp_i].used ||
1340                 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1341                 brp_i++;
1342
1343         if (brp_i >= aarch64->brp_num) {
1344                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1345                 return ERROR_FAIL;
1346         }
1347
1348         breakpoint->set = brp_i + 1;
1349         control = ((matchmode & 0x7) << 20)
1350                 | (byte_addr_select << 5)
1351                 | (3 << 1) | 1;
1352         brp_list[brp_i].used = 1;
1353         brp_list[brp_i].value = (breakpoint->asid);
1354         brp_list[brp_i].control = control;
1355         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1356                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1357                         brp_list[brp_i].value);
1358         if (retval != ERROR_OK)
1359                 return retval;
1360         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1361                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1362                         brp_list[brp_i].control);
1363         if (retval != ERROR_OK)
1364                 return retval;
1365         LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1366                 brp_list[brp_i].control,
1367                 brp_list[brp_i].value);
1368         return ERROR_OK;
1369
1370 }
1371
1372 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1373 {
1374         int retval = ERROR_FAIL;
1375         int brp_1 = 0;  /* holds the contextID pair */
1376         int brp_2 = 0;  /* holds the IVA pair */
1377         uint32_t control_CTX, control_IVA;
1378         uint8_t CTX_byte_addr_select = 0x0F;
1379         uint8_t IVA_byte_addr_select = 0x0F;
1380         uint8_t CTX_machmode = 0x03;
1381         uint8_t IVA_machmode = 0x01;
1382         struct aarch64_common *aarch64 = target_to_aarch64(target);
1383         struct armv8_common *armv8 = &aarch64->armv8_common;
1384         struct aarch64_brp *brp_list = aarch64->brp_list;
1385
1386         if (breakpoint->set) {
1387                 LOG_WARNING("breakpoint already set");
1388                 return retval;
1389         }
1390         /*check available context BRPs*/
1391         while ((brp_list[brp_1].used ||
1392                 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1393                 brp_1++;
1394
1395         printf("brp(CTX) found num: %d\n", brp_1);
1396         if (brp_1 >= aarch64->brp_num) {
1397                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1398                 return ERROR_FAIL;
1399         }
1400
1401         while ((brp_list[brp_2].used ||
1402                 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1403                 brp_2++;
1404
1405         printf("brp(IVA) found num: %d\n", brp_2);
1406         if (brp_2 >= aarch64->brp_num) {
1407                 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1408                 return ERROR_FAIL;
1409         }
1410
1411         breakpoint->set = brp_1 + 1;
1412         breakpoint->linked_BRP = brp_2;
1413         control_CTX = ((CTX_machmode & 0x7) << 20)
1414                 | (brp_2 << 16)
1415                 | (0 << 14)
1416                 | (CTX_byte_addr_select << 5)
1417                 | (3 << 1) | 1;
1418         brp_list[brp_1].used = 1;
1419         brp_list[brp_1].value = (breakpoint->asid);
1420         brp_list[brp_1].control = control_CTX;
1421         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1422                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1423                         brp_list[brp_1].value);
1424         if (retval != ERROR_OK)
1425                 return retval;
1426         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1427                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1428                         brp_list[brp_1].control);
1429         if (retval != ERROR_OK)
1430                 return retval;
1431
1432         control_IVA = ((IVA_machmode & 0x7) << 20)
1433                 | (brp_1 << 16)
1434                 | (IVA_byte_addr_select << 5)
1435                 | (3 << 1) | 1;
1436         brp_list[brp_2].used = 1;
1437         brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1438         brp_list[brp_2].control = control_IVA;
1439         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1440                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1441                         brp_list[brp_2].value);
1442         if (retval != ERROR_OK)
1443                 return retval;
1444         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1445                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1446                         brp_list[brp_2].control);
1447         if (retval != ERROR_OK)
1448                 return retval;
1449
1450         return ERROR_OK;
1451 }
1452
1453 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1454 {
1455         int retval;
1456         struct aarch64_common *aarch64 = target_to_aarch64(target);
1457         struct armv8_common *armv8 = &aarch64->armv8_common;
1458         struct aarch64_brp *brp_list = aarch64->brp_list;
1459
1460         if (!breakpoint->set) {
1461                 LOG_WARNING("breakpoint not set");
1462                 return ERROR_OK;
1463         }
1464
1465         if (breakpoint->type == BKPT_HARD) {
1466                 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1467                         int brp_i = breakpoint->set - 1;
1468                         int brp_j = breakpoint->linked_BRP;
1469                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1470                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1471                                 return ERROR_OK;
1472                         }
1473                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1474                                 brp_list[brp_i].control, brp_list[brp_i].value);
1475                         brp_list[brp_i].used = 0;
1476                         brp_list[brp_i].value = 0;
1477                         brp_list[brp_i].control = 0;
1478                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1479                                         + CPUDBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1480                                         brp_list[brp_i].control);
1481                         if (retval != ERROR_OK)
1482                                 return retval;
1483                         if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1484                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1485                                 return ERROR_OK;
1486                         }
1487                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1488                                 brp_list[brp_j].control, brp_list[brp_j].value);
1489                         brp_list[brp_j].used = 0;
1490                         brp_list[brp_j].value = 0;
1491                         brp_list[brp_j].control = 0;
1492                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1493                                         + CPUDBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1494                                         brp_list[brp_j].control);
1495                         if (retval != ERROR_OK)
1496                                 return retval;
1497                         breakpoint->linked_BRP = 0;
1498                         breakpoint->set = 0;
1499                         return ERROR_OK;
1500
1501                 } else {
1502                         int brp_i = breakpoint->set - 1;
1503                         if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1504                                 LOG_DEBUG("Invalid BRP number in breakpoint");
1505                                 return ERROR_OK;
1506                         }
1507                         LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1508                                 brp_list[brp_i].control, brp_list[brp_i].value);
1509                         brp_list[brp_i].used = 0;
1510                         brp_list[brp_i].value = 0;
1511                         brp_list[brp_i].control = 0;
1512                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1513                                         + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1514                                         brp_list[brp_i].control);
1515                         if (retval != ERROR_OK)
1516                                 return retval;
1517                         retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1518                                         + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1519                                         brp_list[brp_i].value);
1520                         if (retval != ERROR_OK)
1521                                 return retval;
1522                         breakpoint->set = 0;
1523                         return ERROR_OK;
1524                 }
1525         } else {
1526                 /* restore original instruction (kept in target endianness) */
1527                 if (breakpoint->length == 4) {
1528                         retval = target_write_memory(target,
1529                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1530                                         4, 1, breakpoint->orig_instr);
1531                         if (retval != ERROR_OK)
1532                                 return retval;
1533                 } else {
1534                         retval = target_write_memory(target,
1535                                         breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1536                                         2, 1, breakpoint->orig_instr);
1537                         if (retval != ERROR_OK)
1538                                 return retval;
1539                 }
1540         }
1541         breakpoint->set = 0;
1542
1543         return ERROR_OK;
1544 }
1545
1546 static int aarch64_add_breakpoint(struct target *target,
1547         struct breakpoint *breakpoint)
1548 {
1549         struct aarch64_common *aarch64 = target_to_aarch64(target);
1550
1551         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1552                 LOG_INFO("no hardware breakpoint available");
1553                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1554         }
1555
1556         if (breakpoint->type == BKPT_HARD)
1557                 aarch64->brp_num_available--;
1558
1559         return aarch64_set_breakpoint(target, breakpoint, 0x00);        /* Exact match */
1560 }
1561
1562 static int aarch64_add_context_breakpoint(struct target *target,
1563         struct breakpoint *breakpoint)
1564 {
1565         struct aarch64_common *aarch64 = target_to_aarch64(target);
1566
1567         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1568                 LOG_INFO("no hardware breakpoint available");
1569                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1570         }
1571
1572         if (breakpoint->type == BKPT_HARD)
1573                 aarch64->brp_num_available--;
1574
1575         return aarch64_set_context_breakpoint(target, breakpoint, 0x02);        /* asid match */
1576 }
1577
1578 static int aarch64_add_hybrid_breakpoint(struct target *target,
1579         struct breakpoint *breakpoint)
1580 {
1581         struct aarch64_common *aarch64 = target_to_aarch64(target);
1582
1583         if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1584                 LOG_INFO("no hardware breakpoint available");
1585                 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1586         }
1587
1588         if (breakpoint->type == BKPT_HARD)
1589                 aarch64->brp_num_available--;
1590
1591         return aarch64_set_hybrid_breakpoint(target, breakpoint);       /* ??? */
1592 }
1593
1594
1595 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1596 {
1597         struct aarch64_common *aarch64 = target_to_aarch64(target);
1598
1599 #if 0
1600 /* It is perfectly possible to remove breakpoints while the target is running */
1601         if (target->state != TARGET_HALTED) {
1602                 LOG_WARNING("target not halted");
1603                 return ERROR_TARGET_NOT_HALTED;
1604         }
1605 #endif
1606
1607         if (breakpoint->set) {
1608                 aarch64_unset_breakpoint(target, breakpoint);
1609                 if (breakpoint->type == BKPT_HARD)
1610                         aarch64->brp_num_available++;
1611         }
1612
1613         return ERROR_OK;
1614 }
1615
1616 /*
1617  * Cortex-A8 Reset functions
1618  */
1619
1620 static int aarch64_assert_reset(struct target *target)
1621 {
1622         struct armv8_common *armv8 = target_to_armv8(target);
1623
1624         LOG_DEBUG(" ");
1625
1626         /* FIXME when halt is requested, make it work somehow... */
1627
1628         /* Issue some kind of warm reset. */
1629         if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1630                 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1631         else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1632                 /* REVISIT handle "pulls" cases, if there's
1633                  * hardware that needs them to work.
1634                  */
1635                 jtag_add_reset(0, 1);
1636         } else {
1637                 LOG_ERROR("%s: how to reset?", target_name(target));
1638                 return ERROR_FAIL;
1639         }
1640
1641         /* registers are now invalid */
1642         register_cache_invalidate(armv8->arm.core_cache);
1643
1644         target->state = TARGET_RESET;
1645
1646         return ERROR_OK;
1647 }
1648
1649 static int aarch64_deassert_reset(struct target *target)
1650 {
1651         int retval;
1652
1653         LOG_DEBUG(" ");
1654
1655         /* be certain SRST is off */
1656         jtag_add_reset(0, 0);
1657
1658         retval = aarch64_poll(target);
1659         if (retval != ERROR_OK)
1660                 return retval;
1661
1662         if (target->reset_halt) {
1663                 if (target->state != TARGET_HALTED) {
1664                         LOG_WARNING("%s: ran after reset and before halt ...",
1665                                 target_name(target));
1666                         retval = target_halt(target);
1667                         if (retval != ERROR_OK)
1668                                 return retval;
1669                 }
1670         }
1671
1672         return ERROR_OK;
1673 }
1674
1675 static int aarch64_write_apb_ab_memory(struct target *target,
1676         uint64_t address, uint32_t size,
1677         uint32_t count, const uint8_t *buffer)
1678 {
1679         /* write memory through APB-AP */
1680         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1681         struct armv8_common *armv8 = target_to_armv8(target);
1682         struct arm *arm = &armv8->arm;
1683         int total_bytes = count * size;
1684         int total_u32;
1685         int start_byte = address & 0x3;
1686         int end_byte   = (address + total_bytes) & 0x3;
1687         struct reg *reg;
1688         uint32_t dscr;
1689         uint8_t *tmp_buff = NULL;
1690         uint32_t i = 0;
1691
1692         LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %"  PRIu32 " count%"  PRIu32,
1693                           address, size, count);
1694         if (target->state != TARGET_HALTED) {
1695                 LOG_WARNING("target not halted");
1696                 return ERROR_TARGET_NOT_HALTED;
1697         }
1698
1699         total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1700
1701         /* Mark register R0 as dirty, as it will be used
1702          * for transferring the data.
1703          * It will be restored automatically when exiting
1704          * debug mode
1705          */
1706         reg = armv8_reg_current(arm, 1);
1707         reg->dirty = true;
1708
1709         reg = armv8_reg_current(arm, 0);
1710         reg->dirty = true;
1711
1712         /*  clear any abort  */
1713         retval = mem_ap_write_atomic_u32(armv8->debug_ap, armv8->debug_base + CPUDBG_DRCR, 1<<2);
1714         if (retval != ERROR_OK)
1715                 return retval;
1716
1717         /* This algorithm comes from either :
1718          * Cortex-A8 TRM Example 12-25
1719          * Cortex-R4 TRM Example 11-26
1720          * (slight differences)
1721          */
1722
1723         /* The algorithm only copies 32 bit words, so the buffer
1724          * should be expanded to include the words at either end.
1725          * The first and last words will be read first to avoid
1726          * corruption if needed.
1727          */
1728         tmp_buff = malloc(total_u32 * 4);
1729
1730         if ((start_byte != 0) && (total_u32 > 1)) {
1731                 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1732                  * the other bytes in the word.
1733                  */
1734                 retval = aarch64_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1735                 if (retval != ERROR_OK)
1736                         goto error_free_buff_w;
1737         }
1738
1739         /* If end of write is not aligned, or the write is less than 4 bytes */
1740         if ((end_byte != 0) ||
1741                 ((total_u32 == 1) && (total_bytes != 4))) {
1742
1743                 /* Read the last word to avoid corruption during 32 bit write */
1744                 int mem_offset = (total_u32-1) * 4;
1745                 retval = aarch64_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1746                 if (retval != ERROR_OK)
1747                         goto error_free_buff_w;
1748         }
1749
1750         /* Copy the write buffer over the top of the temporary buffer */
1751         memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1752
1753         /* We now have a 32 bit aligned buffer that can be written */
1754
1755         /* Read DSCR */
1756         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1757                         armv8->debug_base + CPUDBG_DSCR, &dscr);
1758         if (retval != ERROR_OK)
1759                 goto error_free_buff_w;
1760
1761         /* Set DTR mode to Normal*/
1762         dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1763         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1764                         armv8->debug_base + CPUDBG_DSCR, dscr);
1765         if (retval != ERROR_OK)
1766                 goto error_free_buff_w;
1767
1768         if (size > 4) {
1769                 LOG_WARNING("reading size >4 bytes not yet supported");
1770                 goto error_unset_dtr_w;
1771         }
1772
1773         retval = aarch64_instr_write_data_dcc_64(arm->dpm, 0xd5330401, address+4);
1774         if (retval != ERROR_OK)
1775                 goto error_unset_dtr_w;
1776
1777         dscr = DSCR_INSTR_COMP;
1778         while (i < count * size) {
1779                 uint32_t val;
1780
1781                 memcpy(&val, &buffer[i], size);
1782                 retval = aarch64_instr_write_data_dcc(arm->dpm, 0xd5330500, val);
1783                 if (retval != ERROR_OK)
1784                         goto error_unset_dtr_w;
1785
1786                 retval = aarch64_exec_opcode(target, 0xb81fc020, &dscr);
1787                 if (retval != ERROR_OK)
1788                         goto error_unset_dtr_w;
1789
1790                 retval = aarch64_exec_opcode(target, 0x91001021, &dscr);
1791                 if (retval != ERROR_OK)
1792                         goto error_unset_dtr_w;
1793
1794                 i += 4;
1795         }
1796
1797         /* Check for sticky abort flags in the DSCR */
1798         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1799                                 armv8->debug_base + CPUDBG_DSCR, &dscr);
1800         if (retval != ERROR_OK)
1801                 goto error_free_buff_w;
1802         if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1803                 /* Abort occurred - clear it and exit */
1804                 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1805                 mem_ap_write_atomic_u32(armv8->debug_ap,
1806                                         armv8->debug_base + CPUDBG_DRCR, 1<<2);
1807                 goto error_free_buff_w;
1808         }
1809
1810         /* Done */
1811         free(tmp_buff);
1812         return ERROR_OK;
1813
1814 error_unset_dtr_w:
1815         /* Unset DTR mode */
1816         mem_ap_read_atomic_u32(armv8->debug_ap,
1817                                 armv8->debug_base + CPUDBG_DSCR, &dscr);
1818         dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1819         mem_ap_write_atomic_u32(armv8->debug_ap,
1820                                 armv8->debug_base + CPUDBG_DSCR, dscr);
1821 error_free_buff_w:
1822         LOG_ERROR("error");
1823         free(tmp_buff);
1824         return ERROR_FAIL;
1825 }
1826
1827 static int aarch64_read_apb_ab_memory(struct target *target,
1828         target_addr_t address, uint32_t size,
1829         uint32_t count, uint8_t *buffer)
1830 {
1831         /* read memory through APB-AP */
1832
1833         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1834         struct armv8_common *armv8 = target_to_armv8(target);
1835         struct arm *arm = &armv8->arm;
1836         struct reg *reg;
1837         uint32_t dscr, val;
1838         uint8_t *tmp_buff = NULL;
1839         uint32_t i = 0;
1840
1841         LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%"  PRIu32,
1842                           address, size, count);
1843         if (target->state != TARGET_HALTED) {
1844                 LOG_WARNING("target not halted");
1845                 return ERROR_TARGET_NOT_HALTED;
1846         }
1847
1848         /* Mark register R0 as dirty, as it will be used
1849          * for transferring the data.
1850          * It will be restored automatically when exiting
1851          * debug mode
1852          */
1853         reg = armv8_reg_current(arm, 0);
1854         reg->dirty = true;
1855
1856         /*  clear any abort  */
1857         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1858                 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1859         if (retval != ERROR_OK)
1860                 goto error_free_buff_r;
1861
1862         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1863                         armv8->debug_base + CPUDBG_DSCR, &dscr);
1864         if (retval != ERROR_OK)
1865                 goto error_unset_dtr_r;
1866
1867         if (size > 4) {
1868                 LOG_WARNING("reading size >4 bytes not yet supported");
1869                 goto error_unset_dtr_r;
1870         }
1871
1872         while (i < count * size) {
1873
1874                 retval = aarch64_instr_write_data_dcc_64(arm->dpm, 0xd5330400, address+4);
1875                 if (retval != ERROR_OK)
1876                         goto error_unset_dtr_r;
1877                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1878                         armv8->debug_base + CPUDBG_DSCR, &dscr);
1879
1880                 dscr = DSCR_INSTR_COMP;
1881                 retval = aarch64_exec_opcode(target, 0xb85fc000, &dscr);
1882                 if (retval != ERROR_OK)
1883                         goto error_unset_dtr_r;
1884                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1885                         armv8->debug_base + CPUDBG_DSCR, &dscr);
1886
1887                 retval = aarch64_instr_read_data_dcc(arm->dpm, 0xd5130400, &val);
1888                 if (retval != ERROR_OK)
1889                         goto error_unset_dtr_r;
1890                 memcpy(&buffer[i], &val, size);
1891                 i += 4;
1892                 address += 4;
1893         }
1894
1895         /* Clear any sticky error */
1896         mem_ap_write_atomic_u32(armv8->debug_ap,
1897                 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1898
1899         /* Done */
1900         return ERROR_OK;
1901
1902 error_unset_dtr_r:
1903         LOG_WARNING("DSCR = 0x%" PRIx32, dscr);
1904         /* Todo: Unset DTR mode */
1905
1906 error_free_buff_r:
1907         LOG_ERROR("error");
1908         free(tmp_buff);
1909
1910         /* Clear any sticky error */
1911         mem_ap_write_atomic_u32(armv8->debug_ap,
1912                 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1913
1914         return ERROR_FAIL;
1915 }
1916
1917 static int aarch64_read_phys_memory(struct target *target,
1918         target_addr_t address, uint32_t size,
1919         uint32_t count, uint8_t *buffer)
1920 {
1921         struct armv8_common *armv8 = target_to_armv8(target);
1922         int retval = ERROR_COMMAND_SYNTAX_ERROR;
1923         struct adiv5_dap *swjdp = armv8->arm.dap;
1924         uint8_t apsel = swjdp->apsel;
1925         LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1926                 address, size, count);
1927
1928         if (count && buffer) {
1929
1930                 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1931
1932                         /* read memory through AHB-AP */
1933                         retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
1934                 } else {
1935                         /* read memory through APB-AP */
1936                         retval = aarch64_mmu_modify(target, 0);
1937                         if (retval != ERROR_OK)
1938                                 return retval;
1939                         retval = aarch64_read_apb_ab_memory(target, address, size, count, buffer);
1940                 }
1941         }
1942         return retval;
1943 }
1944
1945 static int aarch64_read_memory(struct target *target, target_addr_t address,
1946         uint32_t size, uint32_t count, uint8_t *buffer)
1947 {
1948         int mmu_enabled = 0;
1949         target_addr_t virt, phys;
1950         int retval;
1951         struct armv8_common *armv8 = target_to_armv8(target);
1952         struct adiv5_dap *swjdp = armv8->arm.dap;
1953         uint8_t apsel = swjdp->apsel;
1954
1955         /* aarch64 handles unaligned memory access */
1956         LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1957                 size, count);
1958
1959         /* determine if MMU was enabled on target stop */
1960         if (!armv8->is_armv7r) {
1961                 retval = aarch64_mmu(target, &mmu_enabled);
1962                 if (retval != ERROR_OK)
1963                         return retval;
1964         }
1965
1966         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1967                 if (mmu_enabled) {
1968                         virt = address;
1969                         retval = aarch64_virt2phys(target, virt, &phys);
1970                         if (retval != ERROR_OK)
1971                                 return retval;
1972
1973                         LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
1974                                   virt, phys);
1975                         address = phys;
1976                 }
1977                 retval = aarch64_read_phys_memory(target, address, size, count,
1978                                                   buffer);
1979         } else {
1980                 if (mmu_enabled) {
1981                         retval = aarch64_check_address(target, address);
1982                         if (retval != ERROR_OK)
1983                                 return retval;
1984                         /* enable MMU as we could have disabled it for phys
1985                            access */
1986                         retval = aarch64_mmu_modify(target, 1);
1987                         if (retval != ERROR_OK)
1988                                 return retval;
1989                 }
1990                 retval = aarch64_read_apb_ab_memory(target, address, size,
1991                                                     count, buffer);
1992         }
1993         return retval;
1994 }
1995
1996 static int aarch64_write_phys_memory(struct target *target,
1997         target_addr_t address, uint32_t size,
1998         uint32_t count, const uint8_t *buffer)
1999 {
2000         struct armv8_common *armv8 = target_to_armv8(target);
2001         struct adiv5_dap *swjdp = armv8->arm.dap;
2002         int retval = ERROR_COMMAND_SYNTAX_ERROR;
2003         uint8_t apsel = swjdp->apsel;
2004
2005         LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2006                 size, count);
2007
2008         if (count && buffer) {
2009
2010                 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2011
2012                         /* write memory through AHB-AP */
2013                         retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2014                 } else {
2015
2016                         /* write memory through APB-AP */
2017                         if (!armv8->is_armv7r) {
2018                                 retval = aarch64_mmu_modify(target, 0);
2019                                 if (retval != ERROR_OK)
2020                                         return retval;
2021                         }
2022                         return aarch64_write_apb_ab_memory(target, address, size, count, buffer);
2023                 }
2024         }
2025
2026
2027         /* REVISIT this op is generic ARMv7-A/R stuff */
2028         if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2029                 struct arm_dpm *dpm = armv8->arm.dpm;
2030
2031                 retval = dpm->prepare(dpm);
2032                 if (retval != ERROR_OK)
2033                         return retval;
2034
2035                 /* The Cache handling will NOT work with MMU active, the
2036                  * wrong addresses will be invalidated!
2037                  *
2038                  * For both ICache and DCache, walk all cache lines in the
2039                  * address range. Cortex-A8 has fixed 64 byte line length.
2040                  *
2041                  * REVISIT per ARMv7, these may trigger watchpoints ...
2042                  */
2043
2044                 /* invalidate I-Cache */
2045                 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2046                         /* ICIMVAU - Invalidate Cache single entry
2047                          * with MVA to PoU
2048                          *      MCR p15, 0, r0, c7, c5, 1
2049                          */
2050                         for (uint32_t cacheline = address;
2051                                 cacheline < address + size * count;
2052                                 cacheline += 64) {
2053                                 retval = dpm->instr_write_data_r0(dpm,
2054                                                 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2055                                                 cacheline);
2056                                 if (retval != ERROR_OK)
2057                                         return retval;
2058                         }
2059                 }
2060
2061                 /* invalidate D-Cache */
2062                 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2063                         /* DCIMVAC - Invalidate data Cache line
2064                          * with MVA to PoC
2065                          *      MCR p15, 0, r0, c7, c6, 1
2066                          */
2067                         for (uint32_t cacheline = address;
2068                                 cacheline < address + size * count;
2069                                 cacheline += 64) {
2070                                 retval = dpm->instr_write_data_r0(dpm,
2071                                                 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2072                                                 cacheline);
2073                                 if (retval != ERROR_OK)
2074                                         return retval;
2075                         }
2076                 }
2077
2078                 /* (void) */ dpm->finish(dpm);
2079         }
2080
2081         return retval;
2082 }
2083
2084 static int aarch64_write_memory(struct target *target, target_addr_t address,
2085         uint32_t size, uint32_t count, const uint8_t *buffer)
2086 {
2087         int mmu_enabled = 0;
2088         target_addr_t virt, phys;
2089         int retval;
2090         struct armv8_common *armv8 = target_to_armv8(target);
2091         struct adiv5_dap *swjdp = armv8->arm.dap;
2092         uint8_t apsel = swjdp->apsel;
2093
2094         /* aarch64 handles unaligned memory access */
2095         LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2096                   "; count %" PRId32, address, size, count);
2097
2098         /* determine if MMU was enabled on target stop */
2099         if (!armv8->is_armv7r) {
2100                 retval = aarch64_mmu(target, &mmu_enabled);
2101                 if (retval != ERROR_OK)
2102                         return retval;
2103         }
2104
2105         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2106                 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2107                           PRId32 "; count %" PRId32, address, size, count);
2108                 if (mmu_enabled) {
2109                         virt = address;
2110                         retval = aarch64_virt2phys(target, virt, &phys);
2111                         if (retval != ERROR_OK)
2112                                 return retval;
2113
2114                         LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2115                                   TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2116                         address = phys;
2117                 }
2118                 retval = aarch64_write_phys_memory(target, address, size,
2119                                 count, buffer);
2120         } else {
2121                 if (mmu_enabled) {
2122                         retval = aarch64_check_address(target, address);
2123                         if (retval != ERROR_OK)
2124                                 return retval;
2125                         /* enable MMU as we could have disabled it for phys access */
2126                         retval = aarch64_mmu_modify(target, 1);
2127                         if (retval != ERROR_OK)
2128                                 return retval;
2129                 }
2130                 retval = aarch64_write_apb_ab_memory(target, address, size, count, buffer);
2131         }
2132         return retval;
2133 }
2134
2135 static int aarch64_handle_target_request(void *priv)
2136 {
2137         struct target *target = priv;
2138         struct armv8_common *armv8 = target_to_armv8(target);
2139         int retval;
2140
2141         if (!target_was_examined(target))
2142                 return ERROR_OK;
2143         if (!target->dbg_msg_enabled)
2144                 return ERROR_OK;
2145
2146         if (target->state == TARGET_RUNNING) {
2147                 uint32_t request;
2148                 uint32_t dscr;
2149                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2150                                 armv8->debug_base + CPUDBG_DSCR, &dscr);
2151
2152                 /* check if we have data */
2153                 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2154                         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2155                                         armv8->debug_base + CPUDBG_DTRTX, &request);
2156                         if (retval == ERROR_OK) {
2157                                 target_request(target, request);
2158                                 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2159                                                 armv8->debug_base + CPUDBG_DSCR, &dscr);
2160                         }
2161                 }
2162         }
2163
2164         return ERROR_OK;
2165 }
2166
2167 static int aarch64_examine_first(struct target *target)
2168 {
2169         struct aarch64_common *aarch64 = target_to_aarch64(target);
2170         struct armv8_common *armv8 = &aarch64->armv8_common;
2171         struct adiv5_dap *swjdp = armv8->arm.dap;
2172         int retval = ERROR_OK;
2173         uint32_t pfr, debug, ctypr, ttypr, cpuid;
2174         int i;
2175
2176         /* We do one extra read to ensure DAP is configured,
2177          * we call ahbap_debugport_init(swjdp) instead
2178          */
2179         retval = dap_dp_init(swjdp);
2180         if (retval != ERROR_OK)
2181                 return retval;
2182
2183         /* Search for the APB-AB - it is needed for access to debug registers */
2184         retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2185         if (retval != ERROR_OK) {
2186                 LOG_ERROR("Could not find APB-AP for debug access");
2187                 return retval;
2188         }
2189
2190         retval = mem_ap_init(armv8->debug_ap);
2191         if (retval != ERROR_OK) {
2192                 LOG_ERROR("Could not initialize the APB-AP");
2193                 return retval;
2194         }
2195
2196         armv8->debug_ap->memaccess_tck = 80;
2197
2198         /* Search for the AHB-AB */
2199         armv8->memory_ap_available = false;
2200         retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2201         if (retval == ERROR_OK) {
2202                 retval = mem_ap_init(armv8->memory_ap);
2203                 if (retval == ERROR_OK)
2204                         armv8->memory_ap_available = true;
2205         }
2206         if (retval != ERROR_OK) {
2207                 /* AHB-AP not found or unavailable - use the CPU */
2208                 LOG_DEBUG("No AHB-AP available for memory access");
2209         }
2210
2211
2212         if (!target->dbgbase_set) {
2213                 uint32_t dbgbase;
2214                 /* Get ROM Table base */
2215                 uint32_t apid;
2216                 int32_t coreidx = target->coreid;
2217                 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2218                 if (retval != ERROR_OK)
2219                         return retval;
2220                 /* Lookup 0x15 -- Processor DAP */
2221                 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2222                                 &armv8->debug_base, &coreidx);
2223                 if (retval != ERROR_OK)
2224                         return retval;
2225                 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2226                           coreidx, armv8->debug_base);
2227         } else
2228                 armv8->debug_base = target->dbgbase;
2229
2230         retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2231                         armv8->debug_base + 0x300, 0);
2232         if (retval != ERROR_OK) {
2233                 LOG_DEBUG("Examine %s failed", "oslock");
2234                 return retval;
2235         }
2236
2237         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2238                         armv8->debug_base + 0x88, &cpuid);
2239         LOG_DEBUG("0x88 = %x", cpuid);
2240
2241         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2242                         armv8->debug_base + 0x314, &cpuid);
2243         LOG_DEBUG("0x314 = %x", cpuid);
2244
2245         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2246                         armv8->debug_base + 0x310, &cpuid);
2247         LOG_DEBUG("0x310 = %x", cpuid);
2248         if (retval != ERROR_OK)
2249                 return retval;
2250
2251         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2252                         armv8->debug_base + CPUDBG_CPUID, &cpuid);
2253         if (retval != ERROR_OK) {
2254                 LOG_DEBUG("Examine %s failed", "CPUID");
2255                 return retval;
2256         }
2257
2258         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2259                         armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2260         if (retval != ERROR_OK) {
2261                 LOG_DEBUG("Examine %s failed", "CTYPR");
2262                 return retval;
2263         }
2264
2265         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2266                         armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2267         if (retval != ERROR_OK) {
2268                 LOG_DEBUG("Examine %s failed", "TTYPR");
2269                 return retval;
2270         }
2271
2272         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2273                         armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2274         if (retval != ERROR_OK) {
2275                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2276                 return retval;
2277         }
2278         retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2279                         armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2280         if (retval != ERROR_OK) {
2281                 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2282                 return retval;
2283         }
2284
2285         LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2286         LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2287         LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2288         LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2289         LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2290
2291         armv8->arm.core_type = ARM_MODE_MON;
2292         armv8->arm.core_state = ARM_STATE_AARCH64;
2293         retval = aarch64_dpm_setup(aarch64, debug);
2294         if (retval != ERROR_OK)
2295                 return retval;
2296
2297         /* Setup Breakpoint Register Pairs */
2298         aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2299         aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2300
2301         /* hack - no context bpt support yet */
2302         aarch64->brp_num_context = 0;
2303
2304         aarch64->brp_num_available = aarch64->brp_num;
2305         aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2306         for (i = 0; i < aarch64->brp_num; i++) {
2307                 aarch64->brp_list[i].used = 0;
2308                 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2309                         aarch64->brp_list[i].type = BRP_NORMAL;
2310                 else
2311                         aarch64->brp_list[i].type = BRP_CONTEXT;
2312                 aarch64->brp_list[i].value = 0;
2313                 aarch64->brp_list[i].control = 0;
2314                 aarch64->brp_list[i].BRPn = i;
2315         }
2316
2317         LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2318
2319         target_set_examined(target);
2320         return ERROR_OK;
2321 }
2322
2323 static int aarch64_examine(struct target *target)
2324 {
2325         int retval = ERROR_OK;
2326
2327         /* don't re-probe hardware after each reset */
2328         if (!target_was_examined(target))
2329                 retval = aarch64_examine_first(target);
2330
2331         /* Configure core debug access */
2332         if (retval == ERROR_OK)
2333                 retval = aarch64_init_debug_access(target);
2334
2335         return retval;
2336 }
2337
2338 /*
2339  *      Cortex-A8 target creation and initialization
2340  */
2341
2342 static int aarch64_init_target(struct command_context *cmd_ctx,
2343         struct target *target)
2344 {
2345         /* examine_first() does a bunch of this */
2346         return ERROR_OK;
2347 }
2348
2349 static int aarch64_init_arch_info(struct target *target,
2350         struct aarch64_common *aarch64, struct jtag_tap *tap)
2351 {
2352         struct armv8_common *armv8 = &aarch64->armv8_common;
2353         struct adiv5_dap *dap = armv8->arm.dap;
2354
2355         armv8->arm.dap = dap;
2356
2357         /* Setup struct aarch64_common */
2358         aarch64->common_magic = AARCH64_COMMON_MAGIC;
2359         /*  tap has no dap initialized */
2360         if (!tap->dap) {
2361                 tap->dap = dap_init();
2362
2363                 /* Leave (only) generic DAP stuff for debugport_init() */
2364                 tap->dap->tap = tap;
2365         }
2366
2367         armv8->arm.dap = tap->dap;
2368
2369         aarch64->fast_reg_read = 0;
2370
2371         /* register arch-specific functions */
2372         armv8->examine_debug_reason = NULL;
2373
2374         armv8->post_debug_entry = aarch64_post_debug_entry;
2375
2376         armv8->pre_restore_context = NULL;
2377
2378         armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2379
2380         /* REVISIT v7a setup should be in a v7a-specific routine */
2381         armv8_init_arch_info(target, armv8);
2382         target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2383
2384         return ERROR_OK;
2385 }
2386
2387 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2388 {
2389         struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2390
2391         aarch64->armv8_common.is_armv7r = false;
2392
2393         return aarch64_init_arch_info(target, aarch64, target->tap);
2394 }
2395
2396 static int aarch64_mmu(struct target *target, int *enabled)
2397 {
2398         if (target->state != TARGET_HALTED) {
2399                 LOG_ERROR("%s: target not halted", __func__);
2400                 return ERROR_TARGET_INVALID;
2401         }
2402
2403         *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2404         return ERROR_OK;
2405 }
2406
2407 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2408                              target_addr_t *phys)
2409 {
2410         int retval = ERROR_FAIL;
2411         struct armv8_common *armv8 = target_to_armv8(target);
2412         struct adiv5_dap *swjdp = armv8->arm.dap;
2413         uint8_t apsel = swjdp->apsel;
2414         if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2415                 uint32_t ret;
2416                 retval = armv8_mmu_translate_va(target,
2417                                 virt, &ret);
2418                 if (retval != ERROR_OK)
2419                         goto done;
2420                 *phys = ret;
2421         } else {/*  use this method if armv8->memory_ap not selected
2422                  *  mmu must be enable in order to get a correct translation */
2423                 retval = aarch64_mmu_modify(target, 1);
2424                 if (retval != ERROR_OK)
2425                         goto done;
2426                 retval = armv8_mmu_translate_va_pa(target, virt,  phys, 1);
2427         }
2428 done:
2429         return retval;
2430 }
2431
2432 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2433 {
2434         struct target *target = get_current_target(CMD_CTX);
2435         struct armv8_common *armv8 = target_to_armv8(target);
2436
2437         return armv8_handle_cache_info_command(CMD_CTX,
2438                         &armv8->armv8_mmu.armv8_cache);
2439 }
2440
2441
2442 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2443 {
2444         struct target *target = get_current_target(CMD_CTX);
2445         if (!target_was_examined(target)) {
2446                 LOG_ERROR("target not examined yet");
2447                 return ERROR_FAIL;
2448         }
2449
2450         return aarch64_init_debug_access(target);
2451 }
2452 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2453 {
2454         struct target *target = get_current_target(CMD_CTX);
2455         /* check target is an smp target */
2456         struct target_list *head;
2457         struct target *curr;
2458         head = target->head;
2459         target->smp = 0;
2460         if (head != (struct target_list *)NULL) {
2461                 while (head != (struct target_list *)NULL) {
2462                         curr = head->target;
2463                         curr->smp = 0;
2464                         head = head->next;
2465                 }
2466                 /*  fixes the target display to the debugger */
2467                 target->gdb_service->target = target;
2468         }
2469         return ERROR_OK;
2470 }
2471
2472 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2473 {
2474         struct target *target = get_current_target(CMD_CTX);
2475         struct target_list *head;
2476         struct target *curr;
2477         head = target->head;
2478         if (head != (struct target_list *)NULL) {
2479                 target->smp = 1;
2480                 while (head != (struct target_list *)NULL) {
2481                         curr = head->target;
2482                         curr->smp = 1;
2483                         head = head->next;
2484                 }
2485         }
2486         return ERROR_OK;
2487 }
2488
2489 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2490 {
2491         struct target *target = get_current_target(CMD_CTX);
2492         int retval = ERROR_OK;
2493         struct target_list *head;
2494         head = target->head;
2495         if (head != (struct target_list *)NULL) {
2496                 if (CMD_ARGC == 1) {
2497                         int coreid = 0;
2498                         COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2499                         if (ERROR_OK != retval)
2500                                 return retval;
2501                         target->gdb_service->core[1] = coreid;
2502
2503                 }
2504                 command_print(CMD_CTX, "gdb coreid  %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2505                         , target->gdb_service->core[1]);
2506         }
2507         return ERROR_OK;
2508 }
2509
2510 static const struct command_registration aarch64_exec_command_handlers[] = {
2511         {
2512                 .name = "cache_info",
2513                 .handler = aarch64_handle_cache_info_command,
2514                 .mode = COMMAND_EXEC,
2515                 .help = "display information about target caches",
2516                 .usage = "",
2517         },
2518         {
2519                 .name = "dbginit",
2520                 .handler = aarch64_handle_dbginit_command,
2521                 .mode = COMMAND_EXEC,
2522                 .help = "Initialize core debug",
2523                 .usage = "",
2524         },
2525         {       .name = "smp_off",
2526                 .handler = aarch64_handle_smp_off_command,
2527                 .mode = COMMAND_EXEC,
2528                 .help = "Stop smp handling",
2529                 .usage = "",
2530         },
2531         {
2532                 .name = "smp_on",
2533                 .handler = aarch64_handle_smp_on_command,
2534                 .mode = COMMAND_EXEC,
2535                 .help = "Restart smp handling",
2536                 .usage = "",
2537         },
2538         {
2539                 .name = "smp_gdb",
2540                 .handler = aarch64_handle_smp_gdb_command,
2541                 .mode = COMMAND_EXEC,
2542                 .help = "display/fix current core played to gdb",
2543                 .usage = "",
2544         },
2545
2546
2547         COMMAND_REGISTRATION_DONE
2548 };
2549 static const struct command_registration aarch64_command_handlers[] = {
2550         {
2551                 .chain = arm_command_handlers,
2552         },
2553         {
2554                 .chain = armv8_command_handlers,
2555         },
2556         {
2557                 .name = "cortex_a",
2558                 .mode = COMMAND_ANY,
2559                 .help = "Cortex-A command group",
2560                 .usage = "",
2561                 .chain = aarch64_exec_command_handlers,
2562         },
2563         COMMAND_REGISTRATION_DONE
2564 };
2565
2566 struct target_type aarch64_target = {
2567         .name = "aarch64",
2568
2569         .poll = aarch64_poll,
2570         .arch_state = armv8_arch_state,
2571
2572         .halt = aarch64_halt,
2573         .resume = aarch64_resume,
2574         .step = aarch64_step,
2575
2576         .assert_reset = aarch64_assert_reset,
2577         .deassert_reset = aarch64_deassert_reset,
2578
2579         /* REVISIT allow exporting VFP3 registers ... */
2580         .get_gdb_reg_list = armv8_get_gdb_reg_list,
2581
2582         .read_memory = aarch64_read_memory,
2583         .write_memory = aarch64_write_memory,
2584
2585         .checksum_memory = arm_checksum_memory,
2586         .blank_check_memory = arm_blank_check_memory,
2587
2588         .run_algorithm = armv4_5_run_algorithm,
2589
2590         .add_breakpoint = aarch64_add_breakpoint,
2591         .add_context_breakpoint = aarch64_add_context_breakpoint,
2592         .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2593         .remove_breakpoint = aarch64_remove_breakpoint,
2594         .add_watchpoint = NULL,
2595         .remove_watchpoint = NULL,
2596
2597         .commands = aarch64_command_handlers,
2598         .target_create = aarch64_target_create,
2599         .init_target = aarch64_init_target,
2600         .examine = aarch64_examine,
2601
2602         .read_phys_memory = aarch64_read_phys_memory,
2603         .write_phys_memory = aarch64_write_phys_memory,
2604         .mmu = aarch64_mmu,
2605         .virt2phys = aarch64_virt2phys,
2606 };